xonsh-0.6.0/000077500000000000000000000000001320541242300126505ustar00rootroot00000000000000xonsh-0.6.0/.appveyor.yml000066400000000000000000000007611320541242300153220ustar00rootroot00000000000000version: 0.6.0.{build} os: Windows Server 2012 R2 environment: matrix: # http://www.appveyor.com/docs/installed-software#python - PYTHON: "C:\\Python34" - PYTHON: "C:\\Python35" - PYTHON: "C:\\Python36" - PYTHON: "C:\\Python34-x64" DISTUTILS_USE_SDK: "1" - PYTHON: "C:\\Python35-x64" - PYTHON: "C:\\Python36-x64" # TODO: Miniconda install: - "%PYTHON%\\Scripts\\pip install -r requirements-tests.txt" build: off test_script: - "%PYTHON%\\Scripts\\py.test" xonsh-0.6.0/.circle.yml000066400000000000000000000020201320541242300147040ustar00rootroot00000000000000machine: environment: PATH: /home/ubuntu/miniconda/envs/test_env/bin:/home/ubuntu/miniconda/bin:$PATH XONSH_DEBUG: 1 post: - pyenv global 3.4.4 3.5.2 dependencies: pre: - wget https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh -O miniconda.sh - bash miniconda.sh -b -p $HOME/miniconda - conda config --set always_yes yes --set changeps1 no - conda update -q conda - bash .circle_miniconda.sh - rm -rf ~/.pyenv - rm -rf ~/virtualenvs post: - case $CIRCLE_NODE_INDEX in 0) pip install -r requirements-tests.txt ;; esac: parallel: false - case $CIRCLE_NODE_INDEX in 0) python setup.py install ;; 1) python setup.py install ;; esac: parallel: true test: override: - case $CIRCLE_NODE_INDEX in 0) py.test --flake8 --timeout=10 --cov=./xonsh;; 1) py.test --timeout=10 ;; esac: parallel: true post: - if [[ $CIRCLE_NODE_INDEX -eq 0 ]]; then codecov; fi xonsh-0.6.0/.circle_miniconda.sh000066400000000000000000000005121320541242300165420ustar00rootroot00000000000000if [[ $CIRCLE_NODE_INDEX == 0 ]] then conda create -q -n test_env python=3.4 pygments prompt_toolkit ply pytest pytest-timeout psutil numpy matplotlib=1.5.3 fi if [[ $CIRCLE_NODE_INDEX == 1 ]] then conda create -q -n test_env python=3.5 pygments prompt_toolkit ply pytest pytest-timeout psutil numpy matplotlib=1.5.3 fi xonsh-0.6.0/.coveragerc000066400000000000000000000001711320541242300147700ustar00rootroot00000000000000[run] branch = true source = xonsh/ omit = */__amalgam__.py xonsh/lazyasd.py xonsh/parser_table.py xonsh/ply/* xonsh-0.6.0/.gitattributes000066400000000000000000000011431320541242300155420ustar00rootroot00000000000000# Set the default behavior, in case people don't have core.autocrlf set. # This should be first, because the git documentation says "When more # than one pattern matches the path, a later line overrides an earlier line." * text=auto # Text files *.bat text *.css_t text *.in text *.json text *.py text *.rst text *.sh text *.txt text *.xsh text *.yaml text *.yml text CONTRIBUTING text license text LICENSE text Makefile text README text # Files in the lazyjson format require LF line endings tests/histories/*.json text eol=lf # Binary files *.ico binary *.gif binary *.gz binary *.png binary *.webm binary xonsh-0.6.0/.landscape.yaml000066400000000000000000000012061320541242300155430ustar00rootroot00000000000000doc-warnings: yes test-warnings: yes strictness: veryhigh max-line-length: 90 autodetect: yes ignore-paths: - docs/conf.py - xonsh/ply python-targets: - 3 pylint: disable: - super-on-old-class - old-style-class - global-variable-not-assigned - pointless-except - unused-argument - protected-access - global-statement - eval-used - exec-used - too-many-lines - import-self - redefined-outer-name - invalid-name - too-few-public-methods - no-init - star-args mccabe: disable: - MC0000 # not Py3k compatible - MC0001 # silly cyclomatic complexity xonsh-0.6.0/.travis.yml000066400000000000000000000037171320541242300147710ustar00rootroot00000000000000language: python env: global: - secure: "pvQHCsdcIRjwNvsBrZxP8cZWEwug0+PLg1T8841ZLkMdCaO3YheqmxF1xGjAqty6hLppz6vX1LFEKmPjKurLL0/i+be6MhT8/ZikFpSan7TdNUqISxeFx31ls+QpuFKzCV7ZEx7C1ms8LPWEGmzMMN6bCtOBVtGznD9KKWZmLlA=" matrix: include: - os: linux python: 3.5 env: - MINICONDA_OS="Linux" - CI=true - TRAVIS=true - os: linux python: 3.6 env: - MINICONDA_OS="Linux" - BUILD_DOCS=true - os: linux python: "nightly" - os: osx language: generic env: PYTHON="3.4" MINICONDA_OS="MacOSX" - os: osx language: generic env: PYTHON="3.5" MINICONDA_OS="MacOSX" allow_failures: - python: "nightly" before_install: - if [[ ! ("$TRAVIS_PYTHON_VERSION" == "nightly" || "$TRAVIS_PYTHON_VERSION" == "3.6-dev") && ! $BUILD_DOCS ]]; then URL="https://repo.continuum.io/miniconda/Miniconda3-latest-${MINICONDA_OS}-x86_64.sh"; wget "${URL}" -O miniconda.sh; bash miniconda.sh -b -p $HOME/miniconda; export PATH="$HOME/miniconda/bin:$PATH"; hash -r; conda config --set always_yes yes --set changeps1 no; conda update -q conda; conda info -a; fi install: - if [[ $BUILD_DOCS = true ]]; then python setup.py install; pip install -r requirements-docs.txt; pip install pygments prompt_toolkit ply psutil ipykernel matplotlib; pip install doctr; else pip install -r requirements-tests.txt; fi before_script: - rvm get head || true script: - set -e - if [[ $BUILD_DOCS = true ]]; then cd docs; make html; cd ..; doctr deploy --deploy-repo xonsh/xonsh-docs dev; git checkout $(git describe --tags `git rev-list --tags --max-count=1`); cd docs; make clean html; cd ..; doctr deploy --deploy-repo xonsh/xonsh-docs .; else py.test --timeout=10; fi xonsh-0.6.0/CHANGELOG.rst000066400000000000000000002405671320541242300147070ustar00rootroot00000000000000==================== Xonsh Change Log ==================== .. current developments v0.6.0 ==================== **Added:** * Added an alias command, matching bash's implementation, available as part of bashisms. * New ``$AUTO_SUGGEST_IN_COMPLETIONS`` environment variable that enables/disables whether the auto-suggestion result appears in the tab completions. * Added ``__add__()`` and ``__radd__()`` methods to ``EnvPath``. * Xonsh now supports f-strings, as in Python v3.6+. * Added ``ipython`` as unthreadable in command cache threadabilty predictors. * Added ``whole_word_jumping`` xontrib * Added ``$XONSH_APPEND_NEWLINE`` environment variable * Support for PEP 515: Underscores in Numeric Literals * ``xonsh.color_tools.make_palette()`` Simple rename of the pre-existing ``xonsh.color_tools.make_pallete()`` function. * ``xonsh.tools.decorator()`` function/method decorator. This allows for an API function to be annotated with a decorator that documents deprecation, while also tying in functionality that will warn a user that the function has been deprecated, and, raise an ``AssertionError`` if the function has passed its expiry date. * New xontrib ``schedule`` (Xonsh Task Scheduler) **Changed:** * ``on_pre_prompt`` is now fired before prompt calculations are made, allowing modifications to the prompt. * ``emacsclient`` will now return false in the threadable predictors. * Improved the autopair behavior to match that of popular code editors. * Moved the lazy ``pkg_resources`` package back to its original place. The will hopefully address some of the slowdown issues experiances on some platforms. * When xonsh is used to run an ``xsh`` script, the ``xonshrc`` is not loaded * Change in the behavior of the default predictor with binary analysis. The pattern ``libgpm`` is use, assuming when ``gpm`` is used the program is not threadable. This change solves issues with programs as ``links``. * Error messages added to the ``source`` command if it is used with a language that is not xonsh or Python. **Deprecated:** * ``xonsh.color_tools.make_pallette()`` Deprecated in release 0.5.10 and will be removed in release 0.6.0. **Fixed:** * Now f-strings can be used inside @() without explicit enclosing command in ![] * Fix for ``x, y, *z = ...`` unpacking. * Git branch detection now correctly passes the environment down to the subprocess call. This allows for branch detection when git is installed into a non-standard location. * Escape regex characters in ``path_complete`` to avoid regex parsing errors for certain combinations of characters in path completer * gistatus: Fixed hash not being shown when in detaced HEAD and there are no tags * Fix branch colorization when ``git`` or ``hg`` are aliases. * Fixed leftover ``.git/index.lock`` in ``gitstatus`` * Made JSON history loading more robust to corrupt files. * Starting a new command with an open parentheses will no longer throw a traceback when ``$UPDATE_COMPLETIONS_ON_KEYPRESS`` is ``True``. * Automatically wrapping subprocess calls would sometimes include semincolons and other line-ending tokens, rather than stopping at them. This has been fixed. * Numerous spelling errors in documentation, docstrings/comments, text strings and local variable names. * Spelling error in the ``xonsh.color_tools.make_pallete()`` public function declaration. This was fixed by renaming the function to ``xonsh.color_tools.make_palette()`` while maintaining a binding of ``make_pallete()`` to the new ``make_palette()`` in case users are already used to this API. * Fixed issue with starting triple quote strings being run as a command. * Fixed a problem with escaping charet (^) character for cmd.exe in the source-cmd function. * ``EOF in multi-line statement`` errors were misreported as being on line 0. Now they are correctly reported as being on the last line of the file. v0.5.12 ==================== **Fixed:** * Fixed ``release.xsh`` to prevent it from dirtying the repo on release and leading to an unwanted ``.dev`` suffix on the version number v0.5.11 ==================== **Added:** * ``release.xsh`` creates a github release with the merged news entries as the release body **Fixed:** * ``xonfig`` now displays the proper value for "on linux" v0.5.10 ==================== **Added:** * Added ``xclip`` and ``repo`` to default threadable predictors (Issues #2355 and #2348) * Pretty printing of the $PATH variable * Add "fzf-widgets" xontrib which provides fuzzy search productivity widgets with on custom keybindings to xontrib list. * New ``free_cwd`` xontrib for Windows, which prevent the current directory from being locked when the prompt is shown. This allows the other programs or Windows explorer to delete the current or parent directory. This is accomplished by resetting the CWD to the users home directory temporarily while the prompt is displayed. The directory is still locked while any commands are processed so xonsh still can't remove it own working directory. **Changed:** * Codecov threshold to 2% **Removed:** * On Windows environments variables in wrapped like``%foo%`` are no longer expanded automatically. **Fixed:** * Fixed the ``--rc`` option so it now runs xonsh with the specified rc file * ``@$`` operator now functions properly when returned command is an alias * Correct line continuation would not work on Windows if the line continuations were used in the ``xonshrc`` file. * Fixed a regression in the Windows ``sudo`` command, that allows users to run elevated commands in xonsh. * Fix echo command from xoreutils. * Fixed a bug on Windows which meant xonsh wasn't using PATH environment variable but instead relying on a default value from the windows registry. v0.5.9 ==================== **Added:** * Add ``Alt .`` keybinding to ``bashisms-xontrib`` to insert last argument of previous command into current buffer **Fixed:** * Fix crash when openSSH version of bash is on PATH on Windows. * Added missing ensurers to make sure that ``bool`` env_vars are bools and ``int`` env_vars are integers: * ``DIRSTACK_SIZE`` * ``EXPAND_ENV_VARS`` * ``PUSHD_MINUS`` * ``PUSHD_SILENT`` * ``SUGGEST_COMMANDS`` * ``SUGGEST_MAX_NUM`` * ``SUGGEST_THRESHOLD`` v0.5.8 ==================== **Changed:** * The ``xonsh.platform.os_environ`` wrapper is now case-insensitive and case-preserving on Windows. * The private ``_TeeStd`` class will no longer attempt to write to a standard buffer after the tee has been 'closed' and the standard buffer returned to the system. **Fixed:** * Fixed a bug on py34 where os.scandir was used by accident. * Line continuations (``\\``) is subproc mode will no longer consume the surrounding whitespace. * Fixed a bug if foreign_shell name was not written in lower case in the static configuration file ``config.json`` * Fixed a regression on Windows where caused ``which`` reported that the ``PATH`` environment variable could not be found. * Fixed issue with foregrounding jobs that were started in the background. * Fixed that ``Ctrl-C`` crashes xonsh after running an invalid command. * Fixed an potential ``ProcessLookupError`` issue, see #2288. v0.5.7 ==================== **Added:** * New ``color_tools`` module provides basic color tools for converting to and from various formats as well as creating palettes from color strings. * Redirections may now be used in string and list-of-strings aliases. * Subprocess redirection may now forego the whitespace between the redirection and a file name. For example, ``echo hello world >/dev/null``. * Add a ``-P`` flag to the ``cd`` function in order to change directory and following symlinks. * ``xonfig tutorial`` command to launch the http://xon.sh/tutorial in the browser. * ``@(...)`` syntax now supports generators and tuples without parentheses. * Sourcing foreign shells now have the ``--show`` option, which lets you see when script will be run, and the ``--dryrun`` option which prevents the source from actually taking place. Xonsh's foreign shell API also added these keyword arguments. * Subprocess mode now supports subshells. Place any xonsh code between two parentheses, e.g. ``(cmd)``, to run this command in a separate xonsh subprocess. * Foreign shell aliases now have the ability to take extra arguments, if needed. * Xonsh will issue a warning message when the current working directory has been remove out from under it and not replaced prior to running the next command. * Line continuation backslashes are respected on Windows in the PTK shell if the backspace is is preceded by a space. * Added ``ponysay`` as a command which will usually not run in a threaded mode in the commands cache. * New ``jsonutils`` module available for serializing special xonsh objects to JSON. **Changed:** * The literal tokens ``and`` and ``or`` must be surrounded by whitespace to delimit subprocess mode. If they do not have whitespace on both sides in subproc mode, they are considered to be part of a command argument. * The ``xontrib`` command is now flagged as unthreadable and will be run on the main Python thread. This allows xontribs to set signal handlers and other operations that require the main thread. * nvim (Neovim) has been flagged as unthreadable * The interactive prompt will now catch ``SystemExit`` and, instead of exiting the session, will refresh the prompt. This is the same process as for keyboard interrupts. * Xonsh no longer launches the wizard for new users. Instead a welcome screen is shown which says how to launch the wizard. * Added Windows ``expanduser()``-like function which prevents the expansion of ``~`` that are not followed by a path separator. * Collecting xonsh history files was reported to have random runtime OSError failures. This exception is now handled, just in case. The The exception will still be printed in debug mode. * ``Shell.stype`` has been renamed to ``Shell.shell_type``. * The configuration wizard now displays the proper control sequence to leave the wizard at the to start of the wizard itself. Note that this is Ctrl+D for readline and Ctrl+C for prompt-toolkit. * Callable alias proxy functions are now more friendly to ``functools.partial()``. * ``prompt.vc.get_hg_branch`` now uses ``os.scandir`` to walk up the filetree looking for a ``.hg`` directory. This results in (generally) faster branch resolution compared to the subprocess call to ``hg root``. * Xonsh's script and code caches will are now invalidated whenever the xonsh version changes for a given Python version. * Autowrapping of subprocess globs has been improved to cover more cases that are ambiguous with Python syntax. * Job control info when foregrounding or backgrounding jobs will now only be displayed when xonsh is in interactive mode. * Enabled virtual terminal processing in the prompt-toolkit shell for Windows. **Fixed:** * 3rd party pygments styles (like solorized or monokailight) are now able to be used in xonsh. These styles are dynamically created upon first use, rather than being lazily loaded by xonsh. * On Windows, ``os.environ`` is case insensitive. This would potentially change the case of environment variables set into the environment. Xonsh now uses ``nt.environ``, the case sensitive counterpart, to avoid these issues on Windows. * Fix how ``$PWD`` is managed in order to work with symlinks gracefully * ``history replay`` no longer barfs on ``style_name`` when setting up the environment * ``Shell.shell_type`` is now properly set to the same value as ``$SHELL_TYPE``. * Fixed ``source-zsh`` to work with zsh v5.2. * Fixed issue where ``del (x, y)`` would raise a syntax error. * Certain vim commands issue commands involving subshells, and this is now supported. * Null bytes handed to Popen are now automatically escaped prior to running a subprocess. This prevents Popen from issuing embedded null byte exceptions. * Xonsh will no longer crash is the current working directory is removed out from under it. * Multiline strings can now be written in subprocess mode. * PTK completions will now correctly deduplicate autosuggest completions and display completions values based on the cursor position. * Fixed bug where trailing backspaces on Windows paths could be interpreted as line continuations characters. Now line continuation characters must be preceded by a space on Windows. This only applies to xonsh in interactive mode to ensure scripts are portable. * Importing ``*.xsh`` files will now respect the encoding listed in that file and properly fallback to UTF-8. This behaviour follows the rules described in PEP 263. * Wizard is now able to properly serialize environment paths. v0.5.6 ==================== **Added:** * New core utility function aliases (written in pure Python) are now available in ``xonsh.xoreutils``. These include: ``cat``, ``echo``, ``pwd``, ``tee``, ``tty``, and ``yes``. These are not enabled by default. Use the new ``coreutils`` xontrib to load them. * CircleCI test post codecov run * The ``trace`` will automatically disable color printing when stdout is not a TTY or stdout is captured. * New ``jedi`` xontrib enables jedi-based tab completions when it is loaded. This supersedes xonsh's default Python-mode completer. * The lexer has a new ``split()`` method which splits strings according to xonsh's rules for whitespace and quotes. * New events for hooking into the Python import process are now available. You can now provide a handler for: - ``on_import_pre_find_spec`` - ``on_import_post_find_spec`` - ``on_import_pre_create_module`` - ``on_import_post_create_module`` - ``on_import_pre_exec_module`` - ``on_import_post_exec_module`` **Changed:** * The prompt toolkit shell's first completion will now be the current token from the auto-suggestion, if available. * Sourcing foreign shells will now safely skip applying aliases with the same name as existing xonsh aliases by default. This prevents accidentally overwriting important xonsh standard aliases, such as ``cd``. **Fixed:** * Threadable prediction for subprocesses will now consult both the command as it was typed in and any resolved aliases. * The first prompt will no longer print in the middle of the line if the user has already started typing. * Windows consoles will now automatically enable virtual terminal processing with the readline shell, if available. This allows the full use of ANSI escape sequences. * On the Windows readline shell, the tab-completion suppression prompt will no longer error out depending on what you press. * Fixed issue with subprocess mode wrapping not respecting line continuation backslashes. * Handle a bug where Bash On Windows causes platform.windows_bash_command() to raise CalledProcessError. * Fixed issues pertaining to completing from raw string paths. This is particularly relevant to Windows, where raw strings are inserted in path completion. * Replace deprecated calls to ``time.clock()`` by calls to ``time.perf_counter()``. * Use ``clock()`` to set the start time of ``_timings`` in non-windows instead of manually setting it to ``0.0``. * The ``trace`` utility will now correctly color output and not print extraneous newlines when called in a script. * The ``@$(cmd)`` operator now correctly splits strings according to xonsh semantics, rather than just on whitespace using ``str.split()``. * The ``mpl`` xontrib has been updated to improve matplotlib handling. If ``xontrib load mpl`` is run before matplotlib is imported and xonsh is in interactive mode, matplotlib will automatically enter interactive mode as well. Additionally, ``pyplot.show()`` is patched in interactive mode to be non-blocking. If a non-blocking show fails to draw the figure for some reason, a regular blocking version is called. * Fixed issues like ``timeit ls`` causing OSError - "Inappropriate ioctl for device". * Fixed a potential "OSError: [Errno 22] Invalid argument" to increase job control stability. v0.5.5 ==================== **Added:** * New ``--rc`` command line option allows users to specify paths to run control files from the command line. This includes both xonsh-based and JSON-based configuration. * New ``$UPDATE_COMPLETIONS_ON_KEYPRESS`` controls whether or not completions will automatically display and update while typing. This feature is only available in the prompt-toolkit shell. **Changed:** * Xonsh scripts now report ``__file__`` and ``__name__`` when run as scripts or sourced. These variables have the same meaning as they do in Python scripts. * ``$XONSHRC`` and related configuration variables now accept JSON-based static configuration file names as elements. This unifies the two methods of run control to a single entry point and loading system. * The ``xonsh.shell.Shell()`` class now requires that an Execer instance be explicitly provided to its init method. This class is no longer responsible for creating an execer an its dependencies. * Moved decorators ``unthreadable``, ``uncapturable`` from ``xonsh.proc`` to ``xonsh.tools``. * Some refactorings on jobs control. **Deprecated:** * The ``--config-path`` command line option is now deprecated in favor of ``--rc``. **Removed:** * ``xonsh.environ.DEFAULT_XONSHRC`` has been removed due to deprecation. For this value, please check the environment instead, or call ``xonsh.environ.default_xonshrc(env)``. **Fixed:** * Command pipelines that end in a callable alias are now interruptable with ``^C`` and the processes that are piped into the alias have their file handles closed. This should ensure that the entire pipeline is closed. * Fixed issue where unthreadable subprocs were not allowed to be captured with the ``$(cmd)`` operator. * The ``ProcProxy`` class (unthreadable aliases) was not being executed and would hang if the alias was capturable. This has been fixed. * Fixed a ``tcsetattr: Interrupted system call`` issue when run xonsh scripts. * Fixed issue with ``ValueError`` being thrown from ``inspect.signature()`` when called on C-extension callables in tab completer. * Fixed issue that ``ls | less`` crashes on Mac. * Threadable prediction was incorrectly based on the user input command, rather than the version where aliases have been resolved. This has been corrected. v0.5.4 ==================== **Added:** * Add alias ``xip`` ("kip") so that xonsh's Python environment (whatever that is) can be modified. * HistoryEntry, a SimpleNamespace object that represents a command in history. * ``xonsh.completers.bash_completion`` module * Added option to report timing information of xonsh startup times. Start xonsh with the ``--timings`` flag to use the feature. * The Python tab completer will now complete the argument names of functions and other callables. * Uptime module added to ``xonsh.xoreutils``. This can report the system boot time and up time. * The environment variable ``XONSH_HISTORY_BACKEND`` now also supports a value of class type or a History Backend instance. * ``on_envvar_new`` event that fires after a new envvar is created. * ``on_envvar_change`` event that fires after an envvar is changed. **Changed:** * history indexing api to be more simple, now returns HistoryEntry. * Decoupled ``bash_completion`` from xonsh project and added shim back to xonsh. * The JSON history backend will now unlock history files that were created prior to the last reboot. **Fixed:** * Fixed broken bash completions on Windows if 'Windows Subsystem for Linux' is installed. * Readline history would try to read the first element of history prior to actually loading any history. This caused an exception to be raised on Windows at xonsh startup when using pyreadline. * Fixed issue with readline tab completer overwriting initial prefix in some instances. * Fixed issue wherein if ``git`` or (presumably) ``hg`` are aliased, then branch information no longer appears in the ``$PROMPT`` * Fixed an issue with commands that background themselves (such as ``gpg-connect-agent``) not being able to be run from within xonshrc. v0.5.3 ==================== **Added:** * Tab completion xontrib for python applications based on click framework. * Added ``on_transform_command`` event for pre-processing that macros can't handle. * Autodetection of backgroundability by binary analysis on POSIX. * New argument ``expand_user=True`` to ``tools.expand_path``. * New ``$COMPLETION_QUERY_LIMIT`` environment variable for setting the number of completions above which the user will be asked if they wish to see the potential completions. * Users may now redirect stdout to stderr in subprocess mode. **Changed:** * The ``Block`` and ``Functor`` context managers from ``xonsh.contexts`` have been rewritten to use xonsh's macro capabilities. You must now enter these via the ``with!`` statement, e.g. ``with! Block(): pass``. * The ``distributed`` xontrib now needs to use the ``with!`` statement, since it relies on ``Functor``. * ``telnet`` has been flagged as unthreadable. * When ``$DYNAMIC_CWD_ELISION_CHAR`` is non empty and the last dir of cwd is too long and shortened, the elision char is added at the end. * ``pygments`` is no longer a strict dependency of the ``prompt_toolkit`` backend. If ``pygments`` is not installed, the PTK backend will use the default ansi color settings from the terminal. Syntax highlighting requires that ``pygments`` is installed. * Events are now keyword arguments only * Restored ``on_precommand`` to its original signature. * Move ``built_ins.expand_path`` to ``tools.expand_path``. * Rename ``tools.expandpath`` to ``tools._expandpath``. * Added ``gvim`` command to unthreadable predictors. * The ``source`` alias now passes ``$ARGS`` down to file it is sourcing. **Removed:** * ``XonshBlockError`` has been removed, since it no longer serves a purpose. **Fixed:** * ``PopenThread`` will now re-issue SIGINT to the main thread when it is received. * Fixed an issue that using sqlite history backend does not kill unfinished jobs when quitting xonsh with a second "exit". * Fixed an issue that xonsh would fail over to external shells when running .xsh script which raises exceptions. * Fixed an issue with ``openpty()`` returning non-unix line endings in its buffer. This was causing git and ssh to fail when xonsh was used as the login shell on the server. See https://mail.python.org/pipermail/python-list/2013-June/650460.html for more details. * Restored the ability to ^Z and ``fg`` processes on posix platforms. * CommandPipelines were not guaranteed to have been ended when the return code was requested. This has been fixed. * Introduce path expansion in ``is_writable_file`` to fix ``$XONSH_TRACEBACK_LOGFILE=~/xonsh.log``. * Backgrounding a running process (^Z) now restores ECHO mode to the terminal in cases where the subprocess doesn't properly restore itself. A major instance of this behaviour is Python's interactive interpreter. * Readline backend would not ask the user to confirm the printing of completion options if they numbered above a certain value. Instead they would be dumped to the screen. This has been fixed. * Jupyter kernel was no longer properly running subprocess commands. This has been fixed. * The filename is applied to the target of the ``source`` alias, providing better tracebacks. v0.5.2 ==================== **Added:** * Added ``weechat`` to default predictors * ``$DYNAMIC_CWD_ELISION_CHAR`` environment variable to control how a shortened path is displayed. **Changed:** * ``_ret_code`` function of ``prompt_ret_code`` xontrib return now ``None`` when return code is 0 instead of empty string allowing more customization of prompt format. **Fixed:** * Minor Python completer token counting bug fix. * multiline syntax error in PTK shell due to buffer not being reset * Segfaults and other early exit signals are now reported correctly, again. * ``tests/bin/{cat,pwd,wc}`` shebang changed to python3 v0.5.1 ==================== **Fixed:** * Fixed xonfig raising error when xonsh is not installed from source. v0.5.0 ==================== **Added:** * $XONTRIB_MPL_MINIMAL environment variable can be set to change if plots are minimalist or as-seen * xontrib-mpl now supports iTerm2 inline image display if iterm2_tools python package is installed * Xonsh now will fallback to other shells if encountered errors when starting up. * Added entry to customization faq re: ``dirs`` alias (#1452) * Added entry to customization faq re: tab completion selection (#1725) * Added entry to customization faq re: libgcc core dump (#1160) * Section about quoting in the tutorial. * The ``$VC_HG_SHOW_BRANCH`` environment variable to control whether to hide the hg branch in the prompt. * xonfig now contains the latest git commit date if xonsh installed from source. * Alt+Enter will execute a multiline code block irrespective of cursor position * Windows now has the ability to read output asynchronously from the console. * Use `doctr `_ to deploy dev docs to github pages * New ``xonsh.proc.uncapturable()`` decorator for declaring that function aliases should not be run in a captured subprocess. * New history backend sqlite. * Prompt user to install xontrib package if they try to load an uninstalled xontrib * Callable aliases may now take a final ``spec`` argument, which is the corresponding ``SubprocSpec`` instance. * New ``bashisms`` xontrib provides additional Bash-like syntax, such as ``!!``. This xontrib only affects the command line, and not xonsh scripts. * Tests that create testing repos (git, hg) * New subprocess specification class ``SubprocSpec`` is used for specifying and manipulating subprocess classes prior to execution. * New ``PopenThread`` class runs subprocesses on a a separate thread. * New ``CommandPipeline`` and ``HiddenCommandPipeline`` classes manage the execution of a pipeline of commands via the execution of the last command in the pipeline. Instances may be iterated and stream lines from the stdout buffer. These pipelines read from the stdout & stderr streams in a non-blocking manner. * ``$XONSH_STORE_STDOUT`` is now available on all platforms! * The ``CommandsCache`` now has the ability to predict whether or not a command must be run in the foreground using ``Popen`` or may use a background thread and can use ``PopenThread``. * Callable aliases may now use the full gamut of functions signatures: ``f()``, ``f(args)``, ``f(args, stdin=None)``, ``f(args, stdin=None, stdout=None)``, and ` ``f(args, stdin=None, stdout=None, stderr=None)``. * Uncaptured subprocesses now receive a PTY file handle for stdout and stderr. * New ``$XONSH_PROC_FREQUENCY`` environment variable that specifies how long loops in the subprocess framework should sleep. This may be adjusted from its default value to improved performance and mitigate "leaky" pipes on slower machines. * ``Shift+Tab`` moves backwards in completion dropdown in prompt_toolkit * PromptFormatter class that holds all the related prompt methods * PromptFormatter caching when building the prompt * p-strings: ``p'/foo/bar'`` is short for ``pathlib.Path('/foo/bar')`` * byte strings: prefixes other than ``b'foo'`` (eg, ``RB'foo'``) now work * Backticks for regex or glob searches now support an additional modifier ``p``, which causes them to return Path objects instead of strings. * New ``BOTTOM_TOOLBAR`` environment variable to control a bottom toolbar as specified in prompt-toolkit * New ``$XONSH_STDERR_PREFIX`` and ``$XONSH_STDERR_POSTFIX`` environment variables allow the user to print a prompt-like string before and after all stderr that is seen. For example, say that you would like stderr to appear on a red background, you might set ``$XONSH_STDERR_PREFIX = "{BACKGROUND_RED}"`` and ``$XONSH_STDERR_PREFIX = "{NO_COLOR}"``. * New ``xonsh.pyghooks.XonshTerminal256Formatter`` class patches the pygments formatter to understand xonsh color token semantics. * Load events are now available * New events added: ``on_post_init``, ``on_pre_cmdloop``, ``on_pre_rc``, ``on_post_rc``, ``on_ptk_create`` * Completion for ``xonsh`` builtin functions ``xontrib`` and ``xonfig`` * Added a general customization FAQ page to the docs to collect various tips/tricks/fixes for common issues/requests * ``test_single_command`` and ``test_redirect_out_to_file`` tests in ``test_integrations`` * Add note that the target of redirection should be separated by a space. **Changed:** * CircleCI now handles flake8 checks * Travis doesn't allow failures on nightly * ``get_hg_branch`` runs ``hg root`` to find root dir and check if we're in repo * The default style will now use the color keywords (#ansired, #ansidarkred) to set colors that follow the terminal color schemes. Currently, this requires prompt_toolkit master (>1.0.8) and pygments master (2.2) to work correctly. * ``vox activate`` now accepts relative directories. * Updated the effectivity of ``$XONSH_DEBUG`` on debug messages. * Better documentation on how to get nice colors in Windows' default console * All custom prompt_toolkit key binding filters now declared with the ``@Condition`` decorator * The style for the prompt toolkit completion menu is now lightgray/darkgray instead of turquoise/teal * landscape.io linting now ignores ply directory * ``history`` help messages to reflect subcommand usage * Quote all paths when completion if any of the paths needs be quoted, so that bash can automatically complete to the max prefix of the paths. * Tee'd reads now occur in 1kb chunks, rather than character-by-character. * The ``which`` alias no longer has a trailing newline if it is captured. This means that ``$(which cmd)`` will simply be the path to the command. * The following commands are, by default, predicted to be not threadable in some circumstances: * bash * csh * clear * clear.exe * cls * cmd * ex * fish * htop * ksh * less * man * more * mutt * nano * psql * ranger * rview * rvim * scp * sh * ssh * startx * sudo * tcsh * top * vi * view * vim * vimpager * xo * xonsh * zsh * The ``run_subproc()`` function has been replaced with a new implementation. * Piping between processes now uses OS pipes. * ``$XONSH_STORE_STDIN`` now uses ``os.pread()`` rather than ``tee`` and a new file. * The implementation of the ``foreground()`` decorator has been moved to ``unthreadable()``. * ``voxapi.Vox`` now supports ``pathlib.Path`` and ``PathLike`` objects as virtual environment identifiers * Renamed FORMATTER_DICT to PROMPT_FIELDS * BaseShell instantiates PromptFormatter * readline/ptk shells use PromptFormatter * Updated the bundled version of ``ply`` to current master available * vended ``ply`` is now a git subtree to help with any future updates * ``WHITE`` color keyword now means lightgray and ``INTENSE_WHITE`` completely white * Removed ``add_to_shell`` doc section from ``*nix`` install pages and instead relocated it to the general customization page * Moved a few ``*nix`` customization tips from the linux install page to the general customization page **Removed:** * coverage checks * ``CompletedCommand`` and ``HiddenCompletedCommand`` classes have been removed in favor of ``CommandPipeline`` and ``HiddenCommandPipeline``. * ``SimpleProcProxy`` and ``SimpleForegroundProcProxy`` have been removed in favor of a more general mechanism for dispatching callable aliases implemented in the ``ProcProxyThread`` and ``ProcProxy`` classes. * ``test_run_subproc.py`` in favor of ``test_integrations.py`` * Unused imports in many tests * Many duplicated tests (copypasta) **Fixed:** * xontrib-mpl now preserves the figure and does not permanently alter it for viewing * Fix up small pep8 violations * Fixed a bug where some files are not showing using bash completer * Fixed some issues with subprocess capturing aliases that it probably shouldn't. * ``safe_readable()`` now checks for ``ValueError`` as well. * The scroll bars in the PTK completions menus are back. * Jupyter kernel installation now respects the setuptools ``root`` parameter. * Fix ``__repr__`` and ``__str__`` methods of ``SubprocSpec`` so they report correctly * Fixed the message printed when which is unable to find the command. * Fixed a handful of sphinx errors and warnings in the docs * Fixed many PEP8 violations that had gone unnoticed * Fix failure to detect an Anaconda python distribution if the python was install from the conda-forge channel. * current_branch will try and locate the vc binary once * May now Crtl-C out of an infinite loop with a subprocess, such as ```while True: sleep 1``. * Fix for stdin redirects. * Backgrounding works with ``$XONSH_STORE_STDOUT`` * ``PopenThread`` blocks its thread from finishing until command has completed or process is suspended. * Added a minimum time buffer time for command pipelines to check for if previous commands have executed successfully. This is helpful for pipelines where the last command takes a long time to start up, such as GNU Parallel. This also checks to make sure that output has occurred. This includes piping 2+ commands together and pipelines that end in unthreadable commands. * ``curr_branch`` reports correctly when ``git config status.short true`` is used * ``pip`` completion now filters results by prefix * Fixed streaming ``!(alias)`` repr evaluation where bytes where not streamed. * Aliases that begin with a comma now complete correctly (no spurious comma) * Use ``python3`` in shebang lines for compatibility with distros that still use Python 2 as the default Python * STDOUT is only stored when ``$XONSH_STORE_STDOUT=True`` * Fixed issue with alias redirections to files throwing an OSError because the function ProcProxies were not being waited upon. * Fixed issue with callable aliases that happen to call sys.exit() or raise SystemExit taking out the whole xonsh process. * Safely flushes file handles on threaded buffers. * Proper default value and documentation for ``$BASH_COMPLETIONS`` * Fixed readline completer issues on paths with spaces * Fix bug in ``argvquote()`` functions used when sourcing batch files on Windows. The bug meant an extra backslash was added to UNC paths. Thanks to @bytesemantics for spotting it, and @janschulz for fixing the issue. * pep8, lint and refactor in pytest style of ``test_ptk_multiline.py``, ``test_replay.py`` * Tab completion of aliases returned a upper cased alias on Windows. * History show all action now also include current session items. * ``proc.stream_stderr`` now handles stderr that doesn't have buffer attribute * Made ``history show`` result sorted. * Fixed issue that ``history gc`` does not delete empty history files. * Standard stream tees have been fixed to accept the possibility that they may not be backed by a binary buffer. This includes the pipeline stdout tee as well as the shell tees. * Fixed a bug when the pygments plugin was used by third party editors etc. * CPU usage of ``PopenThread`` and ``CommandPipeline`` has been brought down significantly. v0.4.7 ==================== **Added:** * Define alias for 'echo' on startup for Windows only. * New coredev `astronouth7303 `_ added * ``which -a`` now searches in ``__xonsh_ctx__`` too * Info about the xontrib cookiecutter template on xontrib tutorial * xonsh's optional dependencies may now be installed with the pip extras ``ptk``, ``proctitle``, ``linux``, ``mac``, and ``win``. * Env ``help`` method to format and print the vardocs for an envvar * test_news fails if no empty line before a category * more info on test_news failures * Added ``on_precommand`` and ``on_postcommand`` `events `_ * New ``FORMATTER_DICT`` entry ``gitstatus`` to provides informative git status * FOREIGN_ALIASES_OVERRIDE envvar to control whether foreign aliases should override xonsh aliases with the same name. * Warning on tutorial about foreign aliases being ignored if a xonsh alias exist with the same name if not FOREIGN_ALIASES_OVERRIDE. * The prompt-toolkit shell now auto-inserts matching parentheses, brackets, and quotes. Enabled via the ``XONSH_AUTOPAIR`` environment variable * Better syntax highlights in prompt-toolkit, including valid command / path highlighting, macro syntax highlighting, and more * More info on tutorial about history interaction * Entry on bash_to_xsh * Macro context managers are now available via the ``with!`` syntax. **Changed:** * Devguide reflects the current process of releasing through ``release.xsh`` * moved ``which`` from ``xonsh.aliases`` into ``xoreutils.which`` * ``xonsh.prompt.gitstatus.gitstatus`` now returns a namedtuple * implementation of ``xonsh.prompt.vc_branch.get_git_branch`` and ``xonsh.prompt.vc_branch.git_dirty_working_directory`` to use 'git status --procelain' * moved prompt formatting specific functions from ``xonsh.environ`` to ``xonsh.prompt.base`` * All prompt formatter functions moved to ``xonsh.prompt`` subpackage * Printing the message about foreign aliases being ignored happens only if XONSH_DEBUG is set. * Use ``SetConsoleTitleW()`` on Windows instead of a process call. * Tutorial to reflect the current history command argument functionality * Macro function arguments now default to ``str``, rather than ``eval``, for consistency with other parts of the macro system. **Removed:** * aliases that use '!' in their name cause they clash with the macro syntax **Fixed:** * Fix regression where bash git completions where not loaded automatically when GitForWindows is installed. * More tokens are now supported in subproc args, such as ``==``. * Python completions now work without space delimiters, e.g. ``a=matpl`` will complete to ``a=matplotlib`` * Parser would fail on nested, captured suprocess macros. Now, it works, hooray!? * now fires chdir event if OS change in working directory is detected. * ``xonsh.prompt.vc_branch.git_dirty_working_directory`` uses ``porcelain`` option instead of using the bytestring ``nothing to commit`` to find out if a git directory is dirty * Fix bug where know commands where not highlighted on windows. * Fixed completer showing executable in upper case on windows. * Fixed issue where tilde expansion was occurring more than once before an equals sign. * test_dirstack test_cdpath_expansion leaving stray testing dirs * Better completer display for long completions in prompt-toolkit * Automatically append newline to target of ``source`` alias, so that it may be exec'd. * test_news fails when single graves around word * Slashes in virtual environment names work in vox * non string type value in $FORMATTER_DICT turning prompt ugly * whole prompt turning useless when one formatting function raises an exception * Fix completion after alias expansion * Fix hard crash when foreign shell functions fails to run. #1715 * Bug where non-default locations for ``XDG_DATA_HOME`` and ``XONSH_DATA_DIR`` would not expand ``~`` into the home directory * Auto quote path completions if path contains 'and' or 'or' * Completion now works on subcommands after pipe, ``&&``, ``||`` and so on. * cd . and cd now work. Fix indexerror in AUTO_PUSHD case, too. * Fixed issue with accidentally wrapping generators inside of function calls. * History indexing with string returns most recent command. v0.4.6 ==================== **Added:** * New option ``COMPLETIONS_CONFIRM``. When set, ```` is used to confirm completion instead of running command while completion menu is displayed. * NetBSD is now supported. * Macro function calls are now available. These use a Rust-like ``f!(arg)`` syntax. * Macro subprocess call now available with the ``echo! x y z`` syntax. * A new `event subsystem `_ has been added. * howto install sections for Debian/Ubuntu and Fedora. * ``History`` methods ``__iter__`` and ``__getitem__`` * ``tools.get_portions`` that yields parts of an iterable * Added a py.test plugin to collect ``test_*.xsh`` files and run ``test_*()`` functions. * ``__repr__`` and ``__str__`` magic method on LazyObject **Changed:** * ``create_module`` implementation on XonshImportHook * Results of the ``bash`` tab completer are now properly escaped (quoted) when necessary. * Foreign aliases that match xonsh builtin aliases are now ignored with a warning. * ``prompt_toolkit`` completions now only show the rightmost portion of a given completion in the dropdown * The value of ``'none'`` is no longer allowed for ``$SHELL_TYPE`` just during the initial load from the environment. ``-D``, later times, and other sources still work. * ``yacc_debug=True`` now load the parser on the same thread that the Parser instance is created. ``setup.py`` now uses this synchronous form as it was causing the parser table to be missed by some package managers. * Tilde expansion for the home directory now has the same semantics as Bash. Previously it only matched leading tildes. * Context sensitive AST transformation now checks that all names in an expression are in scope. If they are, then Python mode is retained. However, if even one is missing, subprocess wrapping is attempted. Previously, only the left-most name was examined for being within scope. * ``dirstack.pushd`` and ``dirstack.popd`` now handle UNC paths (of form ``\\\\...``), but only on Windows. They emulate behavior of `CMD.EXE` by creating a temporary mapped drive letter (starting from z: down) to replace the ``\\\`` portion of the path, on the ``pushd`` and unmapping the drive letter when all references to it are popped. * And ``dirstack`` suppresses this temporary drive mapping funky jive if registry entry ``HKCU\software\microsoft\command processor\DisableUNCCheck`` (or HKLM\...) is a DWORD value 1. This allows Xonsh to show the actual UNC path in your prompt string and *also* allows subprocess commands invoking `CMD.EXE` to run in the expected working directory. See https://support.microsoft.com/en-us/kb/156276 to satisfy any lingering curiosity. * ``lazy_locate_binary`` handles binary on different drive letter than current working directory (on Windows). * ``_curr_session_parser`` now iterates over ``History`` * New implementation of bash completer with better performance and compatibility. * ``$COMPLETIONS_BRACKETS`` is now available to determine whether or not to include opening brackets in Python completions * ``xonsh.bat`` tries to use `pylauncher `_ when available. **Removed:** * ``History`` method ``show`` * ``_hist_get_portion`` in favor of ``tools.get_portions`` * Unused imports in proc, flake8. **Fixed:** * xonsh modules imported now have the __file__ attribute * Context sensitive AST transformer was not adding argument names to the local scope. This would then enable extraneous subprocess mode wrapping for expressions whose leftmost name was function argument. This has been fixed by properly adding the argument names to the scope. * Foreign shell functions that are mapped to empty filenames no longer receive aliases since they can't be found to source later. * Correctly preserve arguments given to xon.sh, in case there are quoted ones. * Environment variables in subprocess mode were not being expanded unless they were in a sting. They are now expanded properly. * Fixed a bug that prevented xonsh from running scripts with code caching disabled. * Text of instructions to download missing program now does not get off and appears in whole. * Fix some test problems when win_unicode_console was installed on windows. * Fixed bug that prompt string and ``$PWD`` failed to track change in actual working directory if the invoked Python function happened to change it (e.g via ```os.chdir()```. Fix is to update ``$PWD`` after each command in ```BaseShell.default()```. * The interactive prompt now correctly handles multiline strings. * ``cd \\\`` now works when $AUTO_PUSHD is set, either creating a temporary mapped drive or simply setting UNC working directory based on registry ``DisableUNCCheck``. However, if $AUTO_PUSHD is not set and UNC checking is enabled (default for Windows), it issues an error message and fails. This improves on prior behavior, which would fail to change the current working directory, but would set $PWD and prompt string to the UNC path, creating false expectations. * fix parsing for tuple of tuples (like `(),()`) * ``sys.stdin``, ``sys.stdout``, ``sys.stderr`` no longer complete with opening square brackets * xonsh now properly handles syntax error messages arising from using values in inappropriate contexts (e.g., ``del 7``). v0.4.5 ==================== **Added:** * ``_hist_get`` that uses generators to filter and fetch the history commands of each session. * ``-n`` option to the show subcommand to choose to numerate the commands. * The ``exec`` command is now a first class alias that acts the same way as in sh-based languages. It replaces the current process with the command and argument that follows it. This allows xonsh to be used as a default shell while maintaining functionality with SSH, gdb, and other third party programs that assume the default shell supports raw ``exec command [args]`` syntax. This feature introduces some ambiguity between exec-as-a-subprocess and exec-as-a-function (the inescapable Python builtin). Though the two pieces of syntax do not overlap, they perform very different operations. Please see the xonsh FAQ for more information on trade-offs and mitigation strategies. * ``which -v`` now calls superhelp, which will print highlighted source. * Added xontribs: * `z (Tracks your most used directories, based on 'frecency'.) `_ * amalgamate.py now supports relative imports. * ``history show`` args ``-t``, ``-f``, ``-T`` ``+T`` to filter commands by timestamp * ``ensure_timestamp`` in xonsh.tools to try and convert an object to a timestamp a.k.a float * ``$XONSH_DATETIME_FORMAT`` envvar, the default format to be used with ``datetime.datetime.strptime()`` * ``xon.sh`` script now sets ``$LANG=C.UTF8`` in the event that no encoding is detected. * amalgamate.py now properly handles ``from __future__`` imports. **Changed:** * ``_hist_show`` now uses ``_hist_get`` to print out the commands. * ``xonsh.completers`` sub-package is now fully lazy. * The vox xontrib now takes flags very similar to Python's venv tool. Use ``vox --help `` to learn more. * Xontribs may now define ``__all__`` as a module top-level to limit what gets exported to the shell context * xon.sh uses the interpreter used to install instead of the default python3. * ``imphooks`` now checks directory access rights. * $TITLE now changes both icon (tab) and window title * Moved ``amalgamate_source`` outside ``build_tables`` * Disable amalgamation on setup develop * ``_hist_parse_args`` implementation refactor * moved all parameter checking in ``_hist_get`` * ``_hist_show`` to handle numeration and timestamp printing of commands * ``xonsh.imphooks`` does not install the import hooks automatically, you now need to explicitly call the `install_hook()` method defined in this module. For example: ``from xonsh.imphooks import install_hook; install_hook()``. The ``install_hook`` method can safely be called several times. If you need compatibility with previous versions of Xonsh you can use the following:: from xonsh import imphooks getattr(imphooks, 'install_hook', lambda:None)() * xonfig command now dumps more encoding related settings. **Removed:** * Anaconda Build is shutting down so we can no longer build conda development packages. All references to these packages are removed from the documentation. * Removed conda build recipe since the it is no longer used for Anaconda Build. The recipe used to build xonsh on conda-forge can be found here: https://github.com/conda-forge/xonsh-feedstock/blob/master/recipe/meta.yaml **Fixed:** * ``_zsh_hist_parser`` not parsing history files without timestamps. * Fixed amalgamation of aliased imports that are already in ``sys.modules``. * Xonsh will no longer fail to start in directories where the user doesn't have read access. * Fixed parser error line number exception from being raised while trying to raise a SyntaxError. * Made pip completer more robust to when pip is not installed. * Fix a startup problem on windows caused by a refactor of Prompt_toolkit. https://github.com/jonathanslenders/python-prompt-toolkit/commit/a9df2a2 * ``ensure_slice`` bugfix for -1 index/slice * Alias tab completion works again * Version number reported by bundled PLY * ``xonfig`` no longer breaks if PLY is externally installed and version 3.8 * LazyObject supports set union * Fixed error with not sourcing files with ``$XONSH_ENCODING`` and ``$XONSH_ENCODING_ERRORS``. * ``$IGNOREEOF`` envrionment variable now works properly in the prompt-toolkit shell. * Completions in ``jupyter_kernel.py`` now use updated completion framework v0.4.4 ==================== **Added:** * New ``lazyobject()``, ``lazydict()``, and ``lazybool()`` decorators to turn functions into lazy, global objects. * ``vox remove`` command can remove multiple environments at once. * Added FreeBSD support. * Tab completion for pip python package manager. * Regular expressions for environment variable matching. * __contains__ method on Env * Added news tests to enforce changelog conformity. * A new way to add optional items to the prompt format string has been added. Instead of relying on formatter dict items being padded with a space, now the padding characters are specified in the format string itself, in place of the format spec (after a ``:``). For example, previously the prompt string ``{cwd}{curr_branch} $`` would rely on ``curr_branch`` giving its output prepended with a space for separation, or outputting nothing if it is not applicable. Now ``curr_branch`` just outputs a value or ``None``, and the prompt string has to specify the surrounding characters: ``{cwd}{curr_branch: {}} $``. Here the value of ``curr_branch`` will be prepended with a space (``{}`` is a placeholder for the value itself). The format string after ``:`` is applied only if the value is not ``None``. * ``xonsh.completers`` subpackage is now amalgamated. * amalgamate.py will now warn if the same name is defined across multiple different files. * xonsh_builtins, xonsh_execer fixtures in conftest.py * Docs on how to tweak the Windows ConHost for a better color scheme. * Docs: how to fix Thunar's "Open Terminal Here" action. * A new API class was added to Vox: ``xontrib.voxapi.Vox``. This allows programmatic access to the virtual environment machinery for other xontribs. See the API documentation for details. * History now accepts multiple slices arguments separated by spaces **Changed:** * amalgamate now works on Python 2 and allows relative imports. * Top-level xonsh package now more lazy. * Show conda environment name in prompt in parentheses similar what conda does. * Implementation of expandvars now uses regex * Because of the addition of "optional items" to the prompt format string, the functions ``xonsh.environ.current_branch``, ``xonsh.environ.env_name`` and formatter dict items ``curr_branch``, ``current_job``, ``env_name`` are no longer padded with a separator. * many test cases to use fixtures and parametrization * Public interface in ``xonsh.ansi_colors`` module now has ``ansi_`` prefix to prevent name conflicts with other parts of xonsh. * Vox was moved to xontrib. Behaves exactly the same as before, just need to add it to your xontribs. * is_int_as_str and is_slice_as_str are now reimplemented in EAFP style **Deprecated:** * yield statements (nose style) and for loops in tests * is_int_or_slice **Removed:** * _is_in_env, _get_env_string functions on tools * ``xonsh.environ.format_prompt`` has been dropped; ``partial_format_prompt`` can be used instead. * for loops and yield statements in test cases, unused imports * is_int_or_slice **Fixed:** * Fixed bug on Windows preventing xonsh from changing the console title. * Unrecognized ``$XONSH_COLOR_STYLE`` values don't crash terminal. * Writing the window title will no longer accidentally answer interactive questions, eg ``rm -i`` now works as expected. * more matching cases for envvar reference * Certain linux VTE terminals would not start new tabs in the previous CWD. This may now be rectified by adding ``{vte_new_tab_cwd}`` somewhere to the prompt. * Unqualified usage of Unstorable in xonsh setup wizard that was causing the wizard to crash and burn * Bare ``except:`` was replaced with ``except Exception`` to prevent accidentally catching utility exceptions such as KeyboardInterrupt, which caused unexpected problems like printing out the raw $PROMPT string. * Fixed multiple definition of ``EQUAL``. * Fixed multiple definition of ``pprint``. * Fixed multiple definition of ``pyghooks``. * Fixed multiple definition of ``pygments``. * Fixed multiple definition of ``tokenize``. * redundant and 'leaky' tests in nose * Fix bug that prevented disabling $INTENSIFY_COLORS_ON_WIN in ``xonshrc`` * ``LazyJSON`` will now hide failures to close, and instead rely on reference counting if something goes wrong. * Fixed maximum recursion error with color styles. * Parser tables will no longer be generated in the current directory by accident. * Error messages when zsh or bash history file is not found v0.4.3 ==================== **Added:** * The results of glob expressions are sorted if ``$GLOB_SORTED`` is set. * LazyObjects will now load themselves on ``__getitem__()`` * New tools in ``xonsh.lazyasd`` module for loading modules in background threads. **Changed:** * ``GLOB_SORTED`` is enabled by default. * Sped up loading of pygments by ~100x by loading ``pkg_resources`` in background. * Sped up loading of prompt-toolkit by ~2x-3x by loading ``pkg_resources`` in background. * ``setup.py`` will no longer git checkout to replace the version number. Now it simply stores and reuses the original version line. **Removed:** * Removed the ``xonsh.built_ins.ENV`` global instance of the Env class. **Fixed:** * Bug with setting hist size not being settable due to lazy object loading has been resolved. * Minor amalgamate bug with ``import pkg.mod`` amalgamated imports. * No longer raises an error if a directory in ``$PATH`` does not exist on Python v3.4. * Fixed a readline shell completion issue that caused by inconsistency between ``$CASE_SENSITIVE_COMPLETIONS`` and readline's inputrc setting. v0.4.2 ==================== **Added:** * dev versions now display a ``devN`` counter at the end and ``xonfig info`` also displays the git sha of the current build **Changed:** * `prompt_toolkit` completion no longer automatically selects the first entry on first tab-press when completing multiple directories at once **Fixed:** * Sourcing foreign shells now allow fully capture environment variables that contain newlines as long as they also don't contain equal signs. * Added scripts directory to MANIFEST.in v0.4.1 ==================== **Fixed:** * ``setup.py`` will only amalgamate source files if ``amalgamate.py`` is available. This fixes issues with installing from pip. v0.4.0 ==================== **Added:** * A new class, ``xonsh.tools.EnvPath`` has been added. This class implements a ``MutableSequence`` object and overrides the ``__getitem__`` method so that when its entries are requested (either explicitly or implicitly), variable and user expansion is performed, and relative paths are resolved. ``EnvPath`` accepts objects (or lists of objects) of ``str``, ``bytes`` or ``pathlib.Path`` types. * New amalgamate tool collapses modules inside of a package into a single ``__amalgam__.py`` module. This tool glues together all of the code from the modules in a package, finds and removes intra-package imports, makes all non-package imports lazy, and adds hooks into the ``__init__.py``. This helps makes initial imports of modules fast and decreases startup time. Packages and sub-packages must be amalgamated separately. * New lazy and self-destructive module ``xonsh.lazyasd`` adds a suite of classes for delayed creation of objects. - A ``LazyObject`` won't be created until it has an attribute accessed. - A ``LazyDict`` will load each value only when a key is accessed. - A ``LazyBool`` will only be created when ``__bool__()`` is called. Additionally, when fully loaded, the above objects will replace themselves by name in the context that they were handed, thus dereferencing themselves. This is useful for global variables that may be expensive to create, should only be created once, and may not be used in any particular session. * New ``xon.sh`` script added for launching xonsh from a sh environment. This should be used if the normal ``xonsh`` script does not work for some reason. * Normal globbing is now available in Python mode via ``g```` * Backticks were expanded to allow searching using arbitrary functions, via ``@```` * ``xonsh.platform`` now has a new ``PATH_DEFAULT`` variable. * Tab completers can now raise ``StopIteration`` to prevent consideration of remaining completers. * Added tab completer for the ``completer`` alias. * New ``Block`` and ``Functor`` context managers are now available as part of the ``xonsh.contexts`` module. * ``Block`` provides support for turning a context body into a non-executing list of string lines. This is implement via a syntax tree transformation. This is useful for creating remote execution tools that seek to prevent local execution. * ``Functor`` is a subclass of the ``Block`` context manager that turns the block into a callable object. The function object is available via the ``func()`` attribute. However, the ``Functor`` instance is itself callable and will dispatch to ``func()``. * New ``$VC_BRANCH_TIMEOUT`` environment variable is the time (in seconds) of how long to spend attempting each individual version control branch information command during ``$PROMPT`` formatting. This allows for faster prompt resolution and faster startup times. * New lazy methods added to CommandsCache allowing for testing and inspection without the possibility of recomputing the cache. * ``!(command)`` is now usefully iterable, yielding lines of stdout * Added XonshCalledProcessError, which includes the relevant CompletedCommand. Also handles differences between Py3.4 and 3.5 in CalledProcessError * Tab completion of paths now includes zsh-style path expansion (subsequence matching), toggleable with ``$SUBSEQUENCE_PATH_COMPLETION`` * Tab completion of paths now includes "fuzzy" matches that are accurate to within a few characters, toggleable with ``$FUZZY_PATH_COMPLETION`` * Provide ``$XONSH_SOURCE`` for scripts in the environment variables pointing to the currently running script's path * Arguments '+' and '-' for the ``fg`` command (job control) * Provide ``$XONSH_SOURCE`` for scripts in the environment variables pointing to the currently running script's path * ``!(command)`` is now usefully iterable, yielding lines of stdout * Added XonshCalledProcessError, which includes the relevant CompletedCommand. Also handles differences between Py3.4 and 3.5 in CalledProcessError * XonshError and XonshCalledProcessError are now in builtins: - ``history session`` - ``history xonsh`` - ``history all`` - ``history zsh`` - ``history bash`` - ``__xonsh_history__.show()`` * New ``pathsep_to_set()`` and ``set_to_pathsep()`` functions convert to/from ``os.pathsep`` separated strings to a set of strings. **Changed:** * Changed testing framework from nose to pytest * All ``PATH``-like environment variables are now stored in an ``EnvPath`` object, so that non-absolute paths or paths containing environment variables can be resolved properly. * In ``VI_MODE``, the ``v`` key will enter character selection mode, not open the editor. ``Ctrl-X Ctrl-E`` will still open an editor in any mode * ``$XONSH_DEBUG`` will now suppress amalgamated imports. This usually needs to be set in the calling environment or prior to *any* xonsh imports. * Restructured ``xonsh.platform`` to be fully lazy. * Restructured ``xonsh.ansi_colors`` to be fully lazy. * Ensured the ``pygments`` and ``xonsh.pyghooks`` are not imported until actually needed. * Yacc parser is now loaded in a background thread. * Cleaned up argument parsing in ``xonsh.main.premain`` by removing the ``undo_args`` hack. * Now complains on invalid arguments. * ``Env`` now guarantees that the ``$PATH`` is available and mutable when initialized. * On Windows the ``PROMPT`` environment variable is reset to `$P$G` before sourcing ``*.bat`` files. * On Windows the ``PROMPT`` environment variable is reset to `$P$G` before starting subprocesses. This prevents the unformatted xonsh ``PROMPT`` template from showing up when running batch files with ``ECHO ON``` * ``@()`` now passes through functions as well as strings, which allows for the use of anonymous aliases and aliases not explicitly added to the ``aliases`` mapping. * Functions in ``Execer`` now take ``transform`` kwarg instead of ``wrap_subproc``. * Provide ``$XONSH_SOURCE`` for scripts in the environment variables pointing to the currently running script's path * XonshError and XonshCalledProcessError are now in builtins * ``__repr__`` on the environment only shows a short representation of the object instead of printing the whole environment dictionary * More informative prompt when configuring foreign shells in the wizard. * ``CommandsCache`` is now a mapping from command names to a tuple of (executable locations, has alias flags). This enables faster lookup times. * ``locate_bin()`` now uses the ``CommandsCache``, rather than scanning the ``$PATH`` itself. * ``$PATHEXT`` is now a set, rather than a list. * Ignore case and leading a quotes when sorting completions **Removed:** * The ``'console_scripts'`` option to setuptools has been removed. It was found to cause slowdowns of over 150 ms on every startup. * Bash is no longer loaded by default as a foreign shell for initial configuration. This was done to increase stock startup times. This behaviour can be recovered by adding ``{"shell": "bash"}`` to your ``"foreign_shells"`` in your config.json file. For more details, see http://xon.sh/xonshconfig.html#foreign-shells * ``ensure_git()`` and ``ensure_hg()`` decorators removed. * ``call_hg_command()`` function removed. **Fixed:** * Issue where ``xonsh`` did not expand user and environment variables in ``$PATH``, forcing the user to add absolute paths. * Fixed a problem with aliases not always being found. * Fixed issue where input was directed to the last process in a pipeline, rather than the first. * Bug where xonfig wizard can't find ENV docs * Fixed ``xonsh.environ.locate_binary()`` to handle PATH variable are given as a tuple. * Fixed missing completions for ``cd`` and ```rmdir`` when directories had spaces in their names. * Bug preventing `xonsh` executable being installed on macOS. * Strip leading space in commands passed using the "-c" switch * Fixed xonfig wizard failing on Windows due to colon in created filename. * Ensured that the prompt_toolkit shell functions, even without a ``completer`` attribute. * Fixed crash resulting from malformed ``$PROMPT`` or ``$TITLE``. * xonsh no longer backgrounds itself after every command on Cygwin. * Fixed an issue about ``os.killpg()`` on Cygwin which caused xonsh to crash occasionally * Fix crash on startup when Bash Windows Subsystem for Linux is on the Path. * Fixed issue with setting and signaling process groups on Linux when the first process is a function alias and has no pid. * Fixed ``_list_completers`` such that it does not throw a ValueError if no completer is registered. * Fixed ``_list_completers`` such that it does not throw an AttributeError if a completer has no docstring. * Bug that caused command line argument ``--config-path`` to be ignored. * Bug that caused xonsh to break on startup when prompt-toolkit < 1.0.0. v0.3.4 ==================== **Changed:** * ``$PROMPT`` from foreign shells is now ignored. * ``$RC_FILES`` environment variable now stores the run control files we attempted to load. * Only show the prompt for the wizard if we did not attempt to load any run control files (as opposed to if none were successfully loaded). * Git and mercurial branch and dirty function refactor to improve run times. **Fixed:** * Fixed an issue whereby attempting to delete a literal value (e.g., ``del 7``) in the prompt_toolkit shell would cause xonsh to crash. * Fixed broken behavior when using ``cd ..`` to move into a nonexistent directory. * Partial workaround for Cygwin where ``pthread_sigmask`` appears to be missing from the ``signal`` module. * Fixed crash resulting from malformed ``$PROMPT``. * Fixed regression on Windows with the locate_binary() function. The bug prevented `source-cmd` from working correctly and broke the ``activate``/``deactivate`` aliases for the conda environments. * Fixed crash resulting from errors other than syntax errors in run control file. * On Windows if bash is not on the path look in the registry for the defaults install directory for GitForWindows. v0.3.3 ==================== **Added:** * Question mark literals, ``?``, are now allowed as part of subprocess argument names. * IPython style visual pointer to show where syntax error was detected * Pretty printing of output and syntax highlighting of input and output can now be controlled via new environment variables ``$COLOR_INPUT``, ``$COLOR_RESULTS``, and ``$PRETTY_PRINT_RESULTS``. * In interactive mode, if there are stopped or background jobs, Xonsh prompts for confirmations rather than just killing all jobs and exiting. **Changed:** * ``which`` now gives a better verbose report of where the executables are found. * Tab completion now uses a different interface, which allows new completers to be implemented in Python. * Most functions in the ``Execer`` now take an extra argument ``transform``, indicating whether the syntax tree transformations should be applied. * ``prompt_toolkit`` is now loaded lazily, decreasing load times when using the ``readline`` shell. * RC files are now executed directly in the appropriate context. * ``_`` is now updated by ``![]``, to contain the appropriate ``CompletedCommand`` object. **Removed:** * Fixed bug on Windows where ``which`` did not include current directory **Fixed:** * Fixed crashed bash-completer when bash is not available on Windows * Fixed bug on Windows where tab-completion for executables would return all files. * Fixed bug on Windows which caused the bash $PROMPT variable to be used when no no $PROMPT variable was set in .xonshrc * Improved start-up times by caching information about bash completion functions * The --shell-type CLI flag now takes precedence over $SHELL_TYPE specified in .xonshrc * Fixed an issue about ``os.killpg()`` on OS X which caused xonsh crash with occasionally. v0.3.2 ==================== **Fixed:** * Fixed PermissionError when tab completions tries to lookup executables in directories without read permissions. * Fixed incorrect parsing of command line flags v0.3.1 ==================== **Added:** * When a subprocess exits with a signal (e.g. SIGSEGV), a message is printed, similar to Bash. * Added comma literals to subproc mode. * ``@$(cmd)`` has been added as a subprocess-mode operator, which replaces in the subprocess command itself with the result of running ``cmd``. * New ``showcmd`` alias for displaying how xonsh interprets subprocess mode commands and arguments. * Added ``$DYNAMIC_CWD_WIDTH`` to allow the adjusting of the current working directory width in the prompt. * Added ``$XONSH_DEBUG`` environment variable to help with debugging. * The ``${...}`` shortcut for ``__xonsh_env__`` now returns appropriate completion options **Changed:** * On Windows the default bash completions files ``$BASH_COMPLETIONS`` now points to the default location of the completions files used by 'Git for Windows' * On Cygwin, some tweaks are applied to foreign shell subprocess calls and the readline import, in order to avoid hangs on launch. **Removed:** * Special cased code for handling version of prompt_toolkit < v1.0.0 **Fixed:** * Show sorted bash completions suggestions. * Fix bash completions (e.g git etc.) on windows when completions files have spaces in their path names * Fixed a bug preventing ``source-bash`` from working on Windows * Numerous improvements to job control via a nearly-complete rewrite. * Addressed issue with finding the next break in subproc mode in context sensitive parsing. * Fixed issue with loading readline init files (inputrc) that seems to be triggered by libedit. * ``$MULTILINE_PROMPT`` now allows colors, as originally intended. * Rectified install issue with Jupyter hook when installing with pyenv, Jupyter install hook now respects ``--prefix`` argument. * Fixed issue with the xonsh.ply subpackage not being installed. * Fixed a parsing bug whereby a trailing ``&`` on a line was being ignored (processes were unable to be started in the background) v0.3.0 ==================== **Added:** * ``and``, ``or``, ``&&``, ``||`` have been added as subprocess logical operators, by popular demand! * Subprocesses may be negated with ``not`` and grouped together with parentheses. * New framework for writing xonsh extensions, called ``xontribs``. * Added a new shell type ``'none'``, used to avoid importing ``readline`` or ``prompt_toolkit`` when running scripts or running a single command. * New: `sudo` functionality on Windows through an alias * Automatically enhance colors for readability in the default terminal (cmd.exe) on Windows. This functionality can be enabled/disabled with the $INTENSIFY_COLORS_ON_WIN environment variable. * Added ``Ellipsis`` lookup to ``__xonsh_env__`` to allow environment variable checks, e.g. ``'HOME' in ${...}`` * Added an option to update ``os.environ`` every time the xonsh environment changes. This is disabled by default but can be enabled by setting ``$UPDATE_OS_ENVIRON`` to True. * Added Windows 'cmd.exe' as a foreign shell. This gives xonsh the ability to source Windows Batch files (.bat and .cmd). Calling ``source-cmd script.bat`` or the alias ``source-bat script.bat`` will call the bat file and changes to the environment variables will be reflected in xonsh. * Added an alias for the conda environment activate/deactivate batch scripts when running the Anaconda python distribution on Windows. * Added a menu entry to launch xonsh when installing xonsh from a conda package * Added a new ``which`` alias that supports both regular ``which`` and also searches through xonsh aliases. A pure python implementation of ``which`` is used. Thanks to Trent Mick. https://github.com/trentm/which/ * Added support for prompt toolkit v1.0.0. * Added ``$XONSH_CACHE_SCRIPTS`` and ``$XONSH_CACHE_EVERYTHING`` environment variables to control caching of scripts and interactive commands. These can also be controlled by command line options ``--no-script-cache`` and ``--cache-everything`` when starting xonsh. * Added a workaround to allow ctrl-c to interrupt reverse incremental search in the readline shell **Changed:** * Running scripts through xonsh (or running a single command with ``-c``) no longer runs the user's rc file, unless the ``--login`` option is specified. Also avoids loading aliases and environments from foreign shells, as well as loading bash completions. * rc files are now compiled and cached, to avoid re-parsing when they haven't changed. Scripts are also compiled and cached, and there is the option to cache interactive commands. * Left and Right arrows in the ``prompt_toolkit`` shell now wrap in multiline environments * Regexpath matching with backticks, now returns an empty list in python mode. * Pygments added as a dependency for the conda package * Foreign shells now allow for setting exit-on-error commands before and after all other commands via the ``seterrprevcmd`` and ``seterrpostcmd`` arguments. Sensinble defaults are provided for existing shells. * PLY is no longer a external dependency but is bundled in xonsh/ply. Xonsh can therefore run without any external dependencies, although having prompt-toolkit recommended. * Provide better user feedback when running ``which`` in a platform that doesn't provide it (e.g. Windows). * The lexer now uses a custom tokenizer that handles regex globs in the proper way. **Fixed:** * Fixed bug with loading prompt-toolkit shell < v0.57. * Fixed bug with prompt-toolkit completion when the cursor is not at the end of the line. * Aliases will now evaluate environment variables and other expansions at execution time rather than passing through a literal string. * Fixed environment variables from os.environ not being loaded when a running a script * The readline shell will now load the inputrc files. * Fixed bug that prevented `source-alias` from working. * Now able to ``^C`` the xonfig wizard on start up. * Fixed deadlock on Windows when running subprocess that generates enough output to fill the OS pipe buffer. * Sourcing foreign shells will now return a non-zero exit code if the source operation failed for some reason. * Fixed PermissionError when running commands in directories without read permissions * Prevent Windows fixups from overriding environment vars in static config * Fixed Optional Github project status to reflect added/removed files via git_dirty_working_directory() * Fixed xonsh.exe launcher on Windows, when Python install directory has a space in it * Fixed `$CDPATH` to support `~` and environments variables in its items v0.2.7 ==================== **Added:** * Added new valid ``$SHELL_TYPE`` called ``'best'``. This selects the best value for the concrete shell type based on the availability on the user's machine. * New environment variable ``$XONSH_COLOR_STYLE`` will set the color mapping for all of xonsh. * New ``XonshStyle`` pygments style will determine the appropriate color mapping based on ``$XONSH_COLOR_STYLE``. The associated ``xonsh_style_proxy()`` is intended for wrapping ``XonshStyle`` when actually being used by pygments. * The functions ``print_color()`` and ``format_color()`` found in ``xonsh.tools`` dispatch to the approriate shell color handling and may be used from anywhere. * ``xonsh.tools.HAVE_PYGMENTS`` flag now denotes if pygments is installed and available on the users system. * The ``ansi_colors`` module is now available for handling ANSI color codes. * ``?`` and ``??`` operator output now has colored titles, like in IPython. * ``??`` will syntax highlight source code if pygments is available. * Python mode output is now syntax highlighted if pygments is available. * New ``$RIGHT_PROMPT`` environment variable for displaying right-aligned text in prompt-toolkit shell. * Added ``!(...)`` operator, which returns an object representing the result of running a command. The truth value of this object is True if the return code is equal to zero and False otherwise. * Optional dependency on the win_unicode_console package to enable unicode support in cmd.exe on Windows. This can be disabled/enabled with the ``$WIN_UNICODE_CONSOLE`` environment variable. **Changed:** * Updated ``$SHELL_TYPE`` default to ``'best'``. * Shell classes are now responsible for implementing their own color formatting and printing. * Prompt coloring, history diffing, and tracing uses new color handling capabilities. * New ``Token.Color`` token for xonsh color names, e.g. we now use ``Token.Color.RED`` rather than ``Token.RED``. * Untracked files in git are ignored when determining if a git workdir is is dirty. This affects the coloring of the branch label. * Regular expression globbing now uses ``re.fullmatch`` instead of ``re.match``, and the result of an empty regex glob does not cause the argument to be deleted. **Removed:** * The ``xonsh.tools.TERM_COLORS`` mapping has been axed, along with all references to it. This may cause a problem if you were using a raw color code in your xonshrc file from ``$FORMATTER_DICT``. To fix, simply remove these references. **Fixed:** * Multidimensional slicing, as in numpy, no longer throws SyntaxErrors. * Some minor zsh fixes for more platforms and setups. * The ``BaseShell.settitle`` method no longer has its commands captured by ``$(...)`` v0.2.6 ==================== **Added:** * ``trace`` alias added that enables users to turn on and off the printing of source code lines prior to their execution. This is useful for debugging scripts. * New ability to force callable alias functions to be run in the foreground, i.e. the main thread from which the function was called. This is useful for debuggers and profilers which may require such access. Use the ``xonsh.proc.foreground`` decorator on an alias function to flag it. ``ForegroundProcProxy`` and ``SimpleForegroundProcProxy`` classes have been added to support this feature. Normally, forcing a foreground alias is not needed. * Added boolean ``$RAISE_SUBPROC_ERROR`` environment variable. If true and a subprocess command exits with a non-zero return code, a CalledProcessError will be raised. This is useful in scripts that should fail at the first error. * If the ``setproctitle`` package is installed, the process title will be set to ``'xonsh'`` rather than the path to the Python interpreter. * zsh foreign shell interface now supported natively in xonsh, like with Bash. New ``source-zsh`` alias allows easy access to zsh scripts and functions. * Vox virtual environment manager added. **Changed:** * The ``foreign_shell_data()`` keyword arguments ``envcmd`` and ``aliascmd`` now default to ``None``. * Updated alias docs to pull in usage from the commands automatically. **Fixed:** * Hundreds of bugs related to line and column numbers have been addressed. * Fixed path completion not working for absolute paths or for expanded paths on Windows. * Fixed issue with hg dirty branches and $PATH. * Fixed issues related to foreign shell data in files with whitespace in the names. * Worked around bug in ConEmu/cmder which prevented ``get_git_branch()`` from working in these terminal emulators on Windows. v0.2.5 =========== **Added:** * New configuration utility 'xonfig' which reports current system setup information and creates config files through an interactive wizard. * Toolkit for creating wizards now available * timeit and which aliases will now complete their arguments. * $COMPLETIONS_MENU_ROWS environment variable controls the size of the tab-completion menu in prompt-toolkit. * Prompt-toolkit shell now supports true multiline input with the ability to scroll up and down in the prompt. **Changed:** * The xonfig wizard will run on interactive startup if no configuration file is found. * BaseShell now has a singleline() method for prompting a single input. * Environment variable docs are now auto-generated. * Prompt-toolkit shell will now dynamically allocate space for the tab-completion menu. * Looking up nonexistent environment variables now generates an error in Python mode, but produces a sane default value in subprocess mode. * Environments are now considered to contain all manually-adjusted keys, and also all keys with an associated default value. **Removed:** * Removed ``xonsh.ptk.shortcuts.Prompter.create_prompt_layout()`` and ``xonsh.ptk.shortcuts.Prompter.create_prompt_application()`` methods to reduce portion of xonsh that forks prompt-toolkit. This may require users to upgrade to prompt-toolkit v0.57+. **Fixed:** * First prompt in the prompt-toolkit shell now allows for up and down arrows to search through history. * Made obtaining the prompt-toolkit buffer thread-safe. * Now always set non-detypable environment variables when sourcing foreign shells. * Fixed issue with job management if a TTY existed but was not controlled by the process, posix only. * Jupyter kernel no longer times out when using foreign shells on startup. * Capturing redirections, e.g. ``$(echo hello > f.txt)``, no longer fails with a decoding error. * Evaluation in a Jupyter cell will return pformatted object. * Jupyter with redirect uncaptured subprocs to notebook. * Tab completion in Jupyter fixed. v0.2.1 - v0.2.4 =============== You are reading the docs...but you still feel hungry. v0.2.0 ============= **Added:** * Rich history recording and replaying v0.1.0 ============= **Added:** * Naturally typed environment variables * Inherits the environment from BASH * Uses BASH completion for subprocess commands * Regular expression filename globbing * Its own PLY-based lexer and parser * xonsh code parses into a Python AST * You can do all the normal Python things, like arithmetic and importing * Captured and uncaptured subprocesses * Pipes, redirection, and non-blocking subprocess syntax support * Help and superhelp with ? and ?? * Command aliasing * Multiline input, unlike ed * History matching like in IPython * Color prompts * Low system overhead `_, it imports Python modules straight from ``__amalgam__.py``, which decreases startup times by eliminating the cost of runtime imports. But setting ``$ $XONSH_DEBUG=1`` will suppress amalgamated imports. Reloading the xonsh shell (``$ xonsh``) won't simply import the stale ``__amalgam__.py`` file that doesn't contain your new change, but will instead import the unamalgamated source code which does contain your change. You can now load every subsequent change by reloading xonsh, and if your code changes don't seem to have any effect, make sure you check ``$XONSH_DEBUG`` first! Changelog ========= Pull requests will often have CHANGELOG entries associated with. However, to avoid excessive merge conflicts, please follow the following procedure: 1. Go into the ``news/`` directory, 2. Copy the ``TEMPLATE.rst`` file to another file in the ``news/`` directory. We suggest using the branchname:: $ cp TEMPLATE.rst branch.rst 3. Add your entries as a bullet pointed lists in your ``branch.rst`` file in the appropriate category. It is OK to leave the ``None`` entries for later use. 4. Commit your ``branch.rst``. Feel free to update this file whenever you want! Please don't use someone else's file name. All of the files in this ``news/`` directory will be merged automatically at release time. The ``None`` entries will be automatically filtered out too! Style Guide =========== xonsh is a pure Python project, and so we use PEP8 (with some additions) to ensure consistency throughout the code base. ---------------------------------- Rules to Write By ---------------------------------- It is important to refer to things and concepts by their most specific name. When writing xonsh code or documentation please use technical terms appropriately. The following rules help provide needed clarity. ********** Interfaces ********** * User-facing APIs should be as generic and robust as possible. * Tests belong in the top-level ``tests`` directory. * Documentation belongs in the top-level ``docs`` directory. ************ Expectations ************ * Code must have associated tests and adequate documentation. * User-interaction code (such as the Shell class) is hard to test. Mechanism to test such constructs should be developed over time. * Have *extreme* empathy for your users. * Be selfish. Since you will be writing tests you will be your first user. ------------------- Python Style Guide ------------------- xonsh uses `PEP8`_ for all Python code. The following rules apply where `PEP8`_ is open to interpretation. * Use absolute imports (``import xonsh.tools``) rather than explicit relative imports (``import .tools``). Implicit relative imports (``import tools``) are never allowed. * Use ``'single quotes'`` for string literals, and ``"""triple double quotes"""`` for docstrings. Double quotes are allowed to prevent single quote escaping, e.g. ``"Y'all c'mon o'er here!"`` * We use sphinx with the numpydoc extension to autogenerate API documentation. Follow the `numpydoc`_ standard for docstrings. * Simple functions should have simple docstrings. * Lines should be at most 80 characters long. The 72 and 79 character recommendations from PEP8 are not required here. * All Python code should be compliant with Python 3.4+. At some unforeseen date in the future, Python 2.7 support *may* be supported. * Tests should be written with pytest using a procedural style. Do not use unittest directly or write tests in an object-oriented style. * Test generators make more dots and the dots must flow! You can easily check for style issues, including some outright bugs such as mispelled variable names, using pylint. If you're using Anaconda you'll need to run "conda install pylint" once. You can easily run pylint on the edited files in your uncommited git change:: $ pylint $(git status -s | awk '/\.py$$/ { print $$2 }' | sort) If you want to lint the entire code base run:: $ pylint $(find tests xonsh -name \*.py | sort) ********** Imports ********** Xonsh source code may be amalgamated into a single file (``__amalgam__.py``) to speed up imports. The way the code amalgamater works is that other modules that are in the same package (and amalgamated) should be imported with:: from pkg.x import a, c, d This is because the amalgamater puts all such modules in the same globals(), which is effectively what the from-imports do. For example, ``xonsh.ast`` and ``xonsh.execer`` are both in the same package (``xonsh``). Thus they should use the above from from-import syntax. Alternatively, for modules outside of the current package (or modules that are not amalgamated) the import statement should be either ``import pkg.x`` or ``import pkg.x as name``. This is because these are the only cases where the amalgamater is able to automatically insert lazy imports in way that is guaranteed to be safe. This is due to the ambiguity that ``from pkg.x import name`` may import a variable that cannot be lazily constructed or may import a module. So the simple rules to follow are that: 1. Import objects from modules in the same package directly in using from-import, 2. Import objects from modules outside of the package via a direct import or import-as statement. How to Test ================ ---------------------------------- Docker ---------------------------------- If you want to run your "work in progress version" without installing and in a fresh environment you can use Docker. If Docker is installed you just have to run this:: $ python xonsh-in-docker.py This will build and run the current state of the repository in an isolated container (it may take a while the first time you run it). There are two additional arguments you can pass this script. * The version of python * the version of ``prompt_toolkit`` Example:: $ python docker.py 3.4 0.57 Ensure your cwd is the root directory of the project (i.e., the one containing the .git directory). ---------------------------------- Dependencies ---------------------------------- Prep your environment for running the tests:: $ pip install -r requirements-tests.txt ---------------------------------- Running the Tests - Basic ---------------------------------- Run all the tests using pytest:: $ py.test -q Use "-q" to keep pytest from outputting a bunch of info for every test. ---------------------------------- Running the Tests - Advanced ---------------------------------- To perform all unit tests:: $ py.test If you want to run specific tests you can specify the test names to execute. For example to run test_aliases:: $ py.test test_aliases.py Note that you can pass multiple test names in the above examples:: $ py.test test_aliases.py test_environ.py ---------------------------------- Writing the Tests - Advanced ---------------------------------- (refer to pytest documentation) With the Pytest framework you can use bare `assert` statements on anything you're trying to test, note that the name of the test function has to be prefixed with `test_`:: def test_whatever(): assert is_true_or_false The conftest.py in tests directory defines fixtures for mocking various parts of xonsh for more test isolation. For a list of the various fixtures:: $ py.test --fixtures when writing tests it's best to use pytest features i.e parametrization:: @pytest.mark.parametrize('env', [test_env1, test_env2]) def test_one(env, xonsh_builtins): xonsh_builtins.__xonsh_env__ = env ... this will run the test two times each time with the respective `test_env`. This can be done with a for loop too but the test will run only once for the different test cases and you get less isolation. With that in mind, each test should have the least `assert` statements, preferably one. At the moment, xonsh doesn't support any pytest plugins. Happy Testing! How to Document ==================== Documentation takes many forms. This will guide you through the steps of successful documentation. ---------- Docstrings ---------- No matter what language you are writing in, you should always have documentation strings along with you code. This is so important that it is part of the style guide. When writing in Python, your docstrings should be in reStructured Text using the `numpydoc`_ format. ------------------------ Auto-Documentation Hooks ------------------------ The docstrings that you have written will automatically be connected to the website, once the appropriate hooks have been setup. At this stage, all documentation lives within xonsh's top-level ``docs`` directory. We uses the sphinx tool to manage and generate the documentation, which you can learn about from `the sphinx website `_. If you want to generate the documentation, first xonsh itself must be installed and then you may run the following command from the ``docs`` dir: .. code-block:: console ~/xonsh/docs $ make html For each new module, you will have to supply the appropriate hooks. This should be done the first time that the module appears in a pull request. From here, call the new module ``mymod``. The following explains how to add hooks. ------------------------ Python Hooks ------------------------ Python documentation lives in the ``docs/api`` directory. First, create a file in this directory that represents the new module called ``mymod.rst``. The ``docs/api`` directory matches the structure of the ``xonsh/`` directory. So if your module is in a sub-package, you'll need to go into the sub-package's directory before creating ``mymod.rst``. The contents of this file should be as follows: **mymod.rst:** .. code-block:: rst .. _xonsh_mymod: ======================================= My Awesome Module -- :mod:`xonsh.mymod` ======================================= .. currentmodule:: xonsh.mymod .. automodule:: xonsh.mymod :members: This will discover all of the docstrings in ``mymod`` and create the appropriate webpage. Now, you need to hook this page up to the rest of the website. Go into the ``index.rst`` file in ``docs/xonsh`` or other subdirectory and add ``mymod`` to the appropriate ``toctree`` (which stands for table-of-contents tree). Note that every sub-package has its own ``index.rst`` file. Building the Website =========================== Building the website/documentation requires the following dependencies: #. `Sphinx `_ #. `Cloud Sphinx Theme `_ ----------------------------------- Procedure for modifying the website ----------------------------------- The xonsh website source files are located in the ``docs`` directory. A developer first makes necessary changes, then rebuilds the website locally by executing the command:: $ make html This will generate html files for the website in the ``_build/html/`` folder. The developer may view the local changes by opening these files with their favorite browser, e.g.:: $ google-chrome _build/html/index.html Once the developer is satisfied with the changes, the changes should be committed and pull-requested per usual. Once the pull request is accepted, the developer can push their local changes directly to the website by:: $ make push-root Branches and Releases ============================= Mainline xonsh development occurs on the ``master`` branch. Other branches may be used for feature development (topical branches) or to represent past and upcoming releases. All releases should have a release candidate ('-rc1') that comes out 2 - 5 days prior to the scheduled release. During this time, no changes should occur to a special release branch ('vX.X.X-release'). The release branch is there so that development can continue on the develop branch while the release candidates (rc) are out and under review. This is because otherwise any new developments would have to wait until post-release to be merged into develop to prevent them from accidentally getting released early. As such, the 'vX.X.X-release' branch should only exist while there are release candidates out. They are akin to a temporary second level of staging, and so everything that is in this branch should also be part of master. Every time a new release candidate comes out the vX.X.X-release should be tagged with the name 'X.X.X-rcX'. There should be a 2 - 5 day period of time in between release candidates. When the full and final release happens, the 'vX.X.X-release' branch is merged into master and then deleted. If you have a new fix that needs to be in the next release candidate, you should make a topical branch and then pull request it into the release branch. After this has been accepted, the topical branch should be merged with master as well. The release branch must be quiet and untouched for 2 - 5 days prior to the full release. The release candidate procedure here only applies to major and minor releases. Micro releases may be pushed and released directly without having a release candidate. -------------------- Maintenance Tasks -------------------- You can cleanup your local repository of transient files such as \*.pyc files created by unit testing by running:: $ rm -f xonsh/parser_table.py $ rm -f xonsh/*.pyc tests/*.pyc $ rm -fr build ----------------------- Performing the Release ----------------------- This is done through the ``release.xsh`` script. To get a list of the valid options use:: $ xonsh release.xsh --help You can perform a full release:: $ xonsh release.xsh Or only a specific one:: $ xonsh release.xsh --only-pip You can also exclude a release:: $ xonsh release.xsh --no-conda Document History =================== Portions of this page have been forked from the PyNE documentation, Copyright 2011-2015, the PyNE Development Team. All rights reserved. .. _PEP8: https://www.python.org/dev/peps/pep-0008/ .. _numpydoc: https://github.com/numpy/numpy/blob/master/doc/HOWTO_DOCUMENT.rst.txt xonsh-0.6.0/MANIFEST.in000066400000000000000000000000631320541242300144050ustar00rootroot00000000000000include logo.txt include scripts/* include tests/* xonsh-0.6.0/Makefile000066400000000000000000000012571320541242300143150ustar00rootroot00000000000000# Make GNU Make xonshy SHELL=xonsh .SHELLFLAGS=-c .ONESHELL: .SILENT: # Unlike normal makefiles: executes the entire body in one go under xonsh, and doesn't echo .PHONY: help help: print(""" Utility file for xonsh project. Try these targets: * amalgamate: Generate __amalgam__.py files * clean: Remove generated files (namely, the amalgamations) * xonsh/ply: Pull down most recent ply """) xonsh/ply: git subtree pull --prefix xonsh/ply https://github.com/dabeaz/ply.git master --squash .PHONY: clean clean: find xonsh -name __amalgam__.py -delete -print .PHONY: amalgamate amalgamate: sys.path.insert(0, '.') import setup setup.amalgamate_source() _ = sys.path.pop(0) xonsh-0.6.0/README.rst000066400000000000000000000025221320541242300143400ustar00rootroot00000000000000xonsh ===== .. image:: https://badges.gitter.im/xonsh/xonsh.svg :alt: Join the chat at https://gitter.im/xonsh/xonsh :target: https://gitter.im/xonsh/xonsh?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge .. image:: https://travis-ci.org/xonsh/xonsh.svg?branch=master :target: https://travis-ci.org/xonsh/xonsh .. image:: https://ci.appveyor.com/api/projects/status/github/xonsh/xonsh?svg=true :target: https://ci.appveyor.com/project/xonsh/xonsh .. image:: https://circleci.com/gh/xonsh/xonsh.svg?style=shield :target: https://circleci.com/gh/xonsh/xonsh .. image:: https://landscape.io/github/xonsh/xonsh/master/landscape.svg?style=flat :target: https://landscape.io/github/xonsh/xonsh/master :alt: Code Health .. image:: https://codecov.io/gh/xonsh/xonsh/branch/master/graph/badge.svg :target: https://codecov.io/gh/xonsh/xonsh xonsh is a Python-powered, cross-platform, Unix-gazing shell language and command prompt. The language is a superset of Python 3.4+ with additional shell primitives. xonsh (pronounced *conch*) is meant for the daily use of experts and novices alike. Please visit http://xon.sh for more information. Projects that use xonsh *********************** - `gitsome `_: A supercharged Git/shell autocompleter with GitHub integration. xonsh-0.6.0/amalgamate.py000077500000000000000000000415631320541242300153270ustar00rootroot00000000000000#!/usr/bin/env python3 """A package-based, source code amalgamater.""" import os import sys import pprint from itertools import repeat from collections import namedtuple from collections.abc import Mapping from ast import parse, walk, Import, ImportFrom __version__ = '0.1.2' ModNode = namedtuple('ModNode', ['name', 'pkgdeps', 'extdeps', 'futures']) ModNode.__doc__ = """Module node for dependency graph. Attributes ---------- name : str Module name. pkgdeps : frozenset of str Module dependencies in the same package. extdeps : frozenset of str External module dependencies from outside of the package. futures : frozenset of str Import directive names antecedent to 'from __future__ import' """ class SourceCache(Mapping): """Stores / loads source code for files based on package and module names.""" def __init__(self, *args, **kwargs): self._d = dict(*args, **kwargs) def __getitem__(self, key): d = self._d if key in d: return d[key] pkg, name = key pkgdir = pkg.replace('.', os.sep) fname = pkgdir + os.sep + name + '.py' with open(fname, encoding='utf-8', errors='surrogateescape') as f: raw = f.read() d[key] = raw return raw def __iter__(self): yield from self._d def __len__(self): return len(self._d) SOURCES = SourceCache() class GlobalNames(object): """Stores globally defined names that have been seen on ast nodes.""" impnodes = frozenset(['import', 'importfrom']) def __init__(self, pkg=''): self.cache = {} self.pkg = pkg self.module = '' self.topnode = None def warn_duplicates(self): s = '' for key in sorted(self.cache.keys()): val = self.cache[key] if len(val) < 2: continue val = sorted(val) if all([val[0][0] == x[0] for x in val[1:]]): continue s += 'WARNING: {0!r} defined in multiple locations:\n'.format(key) for loc in val: s += ' {}:{} ({})\n'.format(*loc) if len(s) > 0: print(s, end='', flush=True, file=sys.stderr) def entry(self, name, lineno): if name.startswith('__'): return topnode = self.topnode e = (self.pkg + '.' + self.module, lineno, topnode) if name in self.cache: if topnode in self.impnodes and \ all([topnode == x[2] for x in self.cache[name]]): return self.cache[name].add(e) else: self.cache[name] = set([e]) def add(self, node, istopnode=False): """Adds the names from the node to the cache.""" nodename = node.__class__.__name__.lower() if istopnode: self.topnode = nodename meth = getattr(self, '_add_' + nodename, None) if meth is not None: meth(node) def _add_name(self, node): self.entry(node.id, node.lineno) def _add_tuple(self, node): for x in node.elts: self.add(x) def _add_assign(self, node): for target in node.targets: self.add(target) def _add_functiondef(self, node): self.entry(node.name, node.lineno) def _add_classdef(self, node): self.entry(node.name, node.lineno) def _add_import(self, node): lineno = node.lineno for target in node.names: if target.asname is None: name, _, _ = target.name.partition('.') else: name = target.asname self.entry(name, lineno) def _add_importfrom(self, node): pkg, _ = resolve_package_module(node.module, self.pkg, node.level) if pkg == self.pkg: return lineno = node.lineno for target in node.names: if target.asname is None: name = target.name else: name = target.asname self.entry(name, lineno) def _add_with(self, node): for item in node.items: if item.optional_vars is None: continue self.add(item.optional_vars) for child in node.body: self.add(child, istopnode=True) def _add_for(self, node): self.add(node.target) for child in node.body: self.add(child, istopnode=True) def _add_while(self, node): for child in node.body: self.add(child, istopnode=True) def _add_if(self, node): for child in node.body: self.add(child, istopnode=True) for child in node.orelse: self.add(child, istopnode=True) def _add_try(self, node): for child in node.body: self.add(child, istopnode=True) def module_is_package(module, pkg, level): """Returns whether or not the module name refers to the package.""" if level == 0: return module == pkg elif level == 1: return module is None else: return False def module_from_package(module, pkg, level): """Returns whether or not a module is from the package.""" if level == 0: return module.startswith(pkg + '.') elif level == 1: return True else: return False def resolve_package_module(module, pkg, level, default=None): """Returns a 2-tuple of package and module name, even for relative imports """ if level == 0: p, _, m = module.rpartition('.') elif level == 1: p = pkg m = module or default else: p = m = None return p, m def make_node(name, pkg, allowed, glbnames): """Makes a node by parsing a file and traversing its AST.""" raw = SOURCES[pkg, name] tree = parse(raw, filename=name) # we only want to deal with global import statements pkgdeps = set() extdeps = set() futures = set() glbnames.module = name for a in tree.body: glbnames.add(a, istopnode=True) if isinstance(a, Import): for n in a.names: p, dot, m = n.name.rpartition('.') if p == pkg and m in allowed: pkgdeps.add(m) else: extdeps.add(n.name) elif isinstance(a, ImportFrom): if module_is_package(a.module, pkg, a.level): pkgdeps.update(n.name for n in a.names if n.name in allowed) elif module_from_package(a.module, pkg, a.level): p, m = resolve_package_module(a.module, pkg, a.level, default=a.names[0].name) if p == pkg and m in allowed: pkgdeps.add(m) else: extdeps.add(a.module) elif a.module == '__future__': futures.update(n.name for n in a.names) return ModNode(name, frozenset(pkgdeps), frozenset(extdeps), frozenset(futures)) def make_graph(pkg, exclude=None): """Create a graph (dict) of module dependencies.""" graph = {} pkgdir = pkg.replace('.', os.sep) allowed = set() files = os.listdir(pkgdir) for fname in files: base, ext = os.path.splitext(fname) if base.startswith('__') or ext != '.py': continue allowed.add(base) if exclude: allowed -= exclude glbnames = GlobalNames(pkg=pkg) for base in allowed: graph[base] = make_node(base, pkg, allowed, glbnames) glbnames.warn_duplicates() return graph def depsort(graph): """Sort modules by dependency.""" remaining = set(graph.keys()) seder = [] solved = set() while 0 < len(remaining): nodeps = {m for m in remaining if len(graph[m].pkgdeps - solved) == 0} if len(nodeps) == 0: msg = ('\nsolved order = {0}\nremaining = {1}\nCycle detected in ' 'module graph!').format(pprint.pformat(seder), pprint.pformat(remaining)) raise RuntimeError(msg) solved |= nodeps remaining -= nodeps seder += sorted(nodeps) return seder LAZY_IMPORTS = """ from sys import modules as _modules from types import ModuleType as _ModuleType from importlib import import_module as _import_module class _LazyModule(_ModuleType): def __init__(self, pkg, mod, asname=None): '''Lazy module 'pkg.mod' in package 'pkg'.''' self.__dct__ = { 'loaded': False, 'pkg': pkg, # pkg 'mod': mod, # pkg.mod 'asname': asname, # alias } @classmethod def load(cls, pkg, mod, asname=None): if mod in _modules: key = pkg if asname is None else mod return _modules[key] else: return cls(pkg, mod, asname) def __getattribute__(self, name): if name == '__dct__': return super(_LazyModule, self).__getattribute__(name) dct = self.__dct__ mod = dct['mod'] if dct['loaded']: m = _modules[mod] else: m = _import_module(mod) glbs = globals() pkg = dct['pkg'] asname = dct['asname'] if asname is None: glbs[pkg] = m = _modules[pkg] else: glbs[asname] = m dct['loaded'] = True return getattr(m, name) """ def get_lineno(node, default=0): """Gets the lineno of a node or returns the default.""" return getattr(node, 'lineno', default) def min_line(node): """Computes the minimum lineno.""" node_line = get_lineno(node) return min(map(get_lineno, walk(node), repeat(node_line))) def format_import(names): """Format an import line""" parts = [] for _, name, asname in names: if asname is None: parts.append(name) else: parts.append(name + ' as ' + asname) line = 'import ' + ', '.join(parts) + '\n' return line def format_lazy_import(names): """Formats lazy import lines""" lines = '' for _, name, asname in names: pkg, _, _ = name.partition('.') if asname is None: line = '{pkg} = _LazyModule.load({pkg!r}, {mod!r})\n' else: line = '{asname} = _LazyModule.load({pkg!r}, {mod!r}, {asname!r})\n' lines += line.format(pkg=pkg, mod=name, asname=asname) return lines def format_from_import(names): """Format a from import line""" parts = [] for _, module, name, asname in names: if asname is None: parts.append(name) else: parts.append(name + ' as ' + asname) line = 'from ' + module line += ' import ' + ', '.join(parts) + '\n' return line def rewrite_imports(name, pkg, order, imps): """Rewrite the global imports in the file given the amalgamation.""" raw = SOURCES[pkg, name] tree = parse(raw, filename=name) replacements = [] # list of (startline, stopline, str) tuples # collect replacements in forward direction for a, b in zip(tree.body, tree.body[1:] + [None]): if not isinstance(a, (Import, ImportFrom)): continue start = min_line(a) - 1 stop = len(tree.body) if b is None else min_line(b) - 1 if isinstance(a, Import): keep = [] for n in a.names: p, dot, m = n.name.rpartition('.') if p == pkg and m in order: msg = ('Cannot amalgamate import of amalgamated module:' '\n\n import {0}.{1}\n\nin {0}/{2}.py').format( pkg, n.name, name) raise RuntimeError(msg) imp = (Import, n.name, n.asname) if imp not in imps: imps.add(imp) keep.append(imp) if len(keep) == 0: s = ', '.join(n.name for n in a.names) s = '# amalgamated ' + s + '\n' else: s = format_lazy_import(keep) replacements.append((start, stop, s)) elif isinstance(a, ImportFrom): p, m = resolve_package_module(a.module, pkg, a.level, default='') if module_is_package(a.module, pkg, a.level): for n in a.names: if n.name in order: msg = ('Cannot amalgamate import of ' 'amalgamated module:\n\n from {0} import {1}\n' '\nin {0}/{2}.py').format(pkg, n.name, name) raise RuntimeError(msg) elif p == pkg and m in order: replacements.append((start, stop, '# amalgamated ' + p + '.' + m + '\n')) elif a.module == '__future__': replacements.append((start, stop, '# amalgamated __future__ directive\n')) else: keep = [] for n in a.names: imp = (ImportFrom, a.module, n.name, n.asname) if imp not in imps: imps.add(imp) keep.append(imp) if len(keep) == len(a.names): continue # all new imports elif len(keep) == 0: s = ', '.join(n.name for n in a.names) s = '# amalgamated from ' + a.module + ' import ' + s + '\n' else: s = format_from_import(keep) replacements.append((start, stop, s)) # apply replacements in reverse lines = raw.splitlines(keepends=True) for start, stop, s in replacements[::-1]: lines[start] = s for i in range(stop - start - 1): del lines[start+1] return ''.join(lines) def sorted_futures(graph): """Returns a sorted, unique list of future imports.""" f = set() for value in graph.values(): f |= value.futures return sorted(f) def amalgamate(order, graph, pkg): """Create amalgamated source.""" src = ('\"\"\"Amalgamation of {0} package, made up of the following ' 'modules, in order:\n\n* ').format(pkg) src += '\n* '.join(order) src += '\n\n\"\"\"\n' futures = sorted_futures(graph) if len(futures) > 0: src += 'from __future__ import ' + ', '.join(futures) + '\n' src += LAZY_IMPORTS imps = set() for name in order: lines = rewrite_imports(name, pkg, order, imps) src += '#\n# ' + name + '\n#\n' + lines + '\n' return src def write_amalgam(src, pkg): """Write out __amalgam__.py file""" pkgdir = pkg.replace('.', os.sep) fname = os.path.join(pkgdir, '__amalgam__.py') with open(fname, 'w', encoding='utf-8', errors='surrogateescape') as f: f.write(src) def _init_name_lines(pkg): pkgdir = pkg.replace('.', os.sep) fname = os.path.join(pkgdir, '__init__.py') with open(fname, encoding='utf-8', errors='surrogateescape') as f: raw = f.read() lines = raw.splitlines() return fname, lines def read_exclude(pkg): """reads in modules to exclude from __init__.py""" _, lines = _init_name_lines(pkg) exclude = set() for line in lines: if line.startswith('# amalgamate exclude'): exclude.update(line.split()[3:]) return exclude FAKE_LOAD = """ import os as _os if _os.getenv('{debug}', ''): pass else: import sys as _sys try: from {pkg} import __amalgam__ {load} del __amalgam__ except ImportError: pass del _sys del _os """.strip() def rewrite_init(pkg, order, debug='DEBUG'): """Rewrites the init file to insert modules.""" fname, lines = _init_name_lines(pkg) start, stop = -1, -1 for i, line in enumerate(lines): if line.startswith('# amalgamate end'): stop = i elif line.startswith('# amalgamate'): start = i t = ("{1} = __amalgam__\n " "_sys.modules['{0}.{1}'] = __amalgam__") load = '\n '.join(t.format(pkg, m) for m in order) s = FAKE_LOAD.format(pkg=pkg, load=load, debug=debug) if start + 1 == stop: lines.insert(stop, s) else: lines[start+1] = s lines = lines[:start+2] + lines[stop:] init = '\n'.join(lines) + '\n' with open(fname, 'w', encoding='utf-8', errors='surrogateescape') as f: f.write(init) def main(args=None): if args is None: args = sys.argv debug = 'DEBUG' for pkg in args[1:]: if pkg.startswith('--debug='): debug = pkg[8:] continue print('Amalgamating ' + pkg) exclude = read_exclude(pkg) print(' excluding {}'.format(pprint.pformat(exclude or None))) graph = make_graph(pkg, exclude=exclude) order = depsort(graph) src = amalgamate(order, graph, pkg) write_amalgam(src, pkg) rewrite_init(pkg, order, debug=debug) print(' collapsed {} modules'.format(len(order))) if __name__ == '__main__': main() xonsh-0.6.0/appveyor.yml000077700000000000000000000000001320541242300177012.appveyor.ymlustar00rootroot00000000000000xonsh-0.6.0/circle.yml000077700000000000000000000000001320541242300166712.circle.ymlustar00rootroot00000000000000xonsh-0.6.0/codecov.yml000066400000000000000000000001041320541242300150100ustar00rootroot00000000000000coverage: status: patch: default: threshold: 2% xonsh-0.6.0/conftest.py000066400000000000000000000002121320541242300150420ustar00rootroot00000000000000# empty file to trick py.test into adding the root folder to sys.path # see https://github.com/pytest-dev/pytest/issues/911 for more info xonsh-0.6.0/docs/000077500000000000000000000000001320541242300136005ustar00rootroot00000000000000xonsh-0.6.0/docs/Makefile000066400000000000000000000103541320541242300152430ustar00rootroot00000000000000# Makefile for Sphinx documentation # # You can set these variables from the command line. SPHINXOPTS = SPHINXBUILD = sphinx-build PAPER = BUILDDIR = _build RELEASE = v0.1 # Internal variables. PAPEROPT_a4 = -D latex_paper_size=a4 PAPEROPT_letter = -D latex_paper_size=letter ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . DOCREPONAME = xonsh-docs DOCREPOURL = git@github.com:scopatz/xonsh-docs.git DOCREPOBRANCH = gh-pages .PHONY: help clean html dirhtml pickle json htmlhelp qthelp latex changes linkcheck doctest help: @echo "Please use \`make ' where is one of" @echo " html to make standalone HTML files" @echo " dirhtml to make HTML files named index.html in directories" @echo " pickle to make pickle files" @echo " json to make JSON files" @echo " htmlhelp to make HTML files and a HTML help project" @echo " qthelp to make HTML files and a qthelp project" @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" @echo " changes to make an overview of all changed/added/deprecated items" @echo " linkcheck to check all external links for integrity" @echo " doctest to run all doctests embedded in the documentation (if enabled)" clean: -rm -rf $(BUILDDIR)/* html: $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." dirhtml: $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." pickle: $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle @echo @echo "Build finished; now you can process the pickle files." json: $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json @echo @echo "Build finished; now you can process the JSON files." htmlhelp: $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp @echo @echo "Build finished; now you can run HTML Help Workshop with the" \ ".hhp project file in $(BUILDDIR)/htmlhelp." qthelp: $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp @echo @echo "Build finished; now you can run "qcollectiongenerator" with the" \ ".qhcp project file in $(BUILDDIR)/qthelp, like this:" @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/metasci.qhcp" @echo "To view the help file:" @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/metasci.qhc" latex: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." @echo "Run \`make all-pdf' or \`make all-ps' in that directory to" \ "run these through (pdf)latex." changes: $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes @echo @echo "The overview file is in $(BUILDDIR)/changes." linkcheck: $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck @echo @echo "Link check complete; look for any errors in the above output " \ "or in $(BUILDDIR)/linkcheck/output.txt." doctest: $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest @echo "Testing of doctests in the sources finished, look at the " \ "results in $(BUILDDIR)/doctest/output.txt." push-latest: cd $(BUILDDIR) && \ test -d $(DOCREPONAME) || git clone $(DOCREPOURL) $(DOCREPONAME) && \ cd $(DOCREPONAME) && \ git pull origin $(DOCREPOBRANCH) && \ test -d latest || mkdir latest && touch latest/_ && \ rm -r latest/* && \ cp -r ../html/* latest/ && \ git add latest/ && \ git commit -am "Pushed latest docs at $(date)" && \ git push push-release: cd $(BUILDDIR) && \ test -d $(DOCREPONAME) || git clone $(DOCREPOURL) $(DOCREPONAME) && \ cd $(DOCREPONAME) && \ git pull origin $(DOCREPOBRANCH) && \ test -d $(RELEASE) || mkdir $(RELEASE) && touch $(RELEASE)/_ && \ rm -r $(RELEASE)/* && \ cp -r ../html/* $(RELEASE)/ && \ git add $(RELEASE)/ && \ git commit -am "Pushed $(RELEASE) docs at $(date)" && \ git push push-root: cd $(BUILDDIR) && \ test -d $(DOCREPONAME) || git clone $(DOCREPOURL) $(DOCREPONAME) && \ cd $(DOCREPONAME) && \ git pull origin $(DOCREPOBRANCH) && \ rm -rf api previous _sources _static *.html *.js *.inv && \ cp -r ../html/* . && \ git add . && \ git commit -am "Pushed root-level docs at $(date)" && \ git push xonsh-0.6.0/docs/_templates_overwrite/000077500000000000000000000000001320541242300200435ustar00rootroot00000000000000xonsh-0.6.0/docs/_templates_overwrite/layout.html000066400000000000000000000002301320541242300222410ustar00rootroot00000000000000{% extends '!layout.html' %} {% block extrahead %} {{ super() }} {% endblock %} xonsh-0.6.0/docs/advanced_events.rst000066400000000000000000000033021320541242300174610ustar00rootroot00000000000000.. _events: ******************** Advanced Events ******************** If you haven't, go read the `events tutorial `_ first. This documents the messy details of the event system. You may also find the `events API reference `_ useful. Why Unordered? ============== Yes, handler call order is not guaranteed. Please don't file bugs about this. This was chosen because the order of handler registration is dependant on load order, which is stable in a release but not something generally reasoned about. In addition, xontribs mean that we don't know what handlers could be registered. So even an "ordered" event system would be unable to make guarantees about ordering because of the larger system. Because of this, the event system is not ordered; this is a form of abstraction. Order-dependant semantics are not encouraged by the built-in methods. So how do I handle results? =========================== ``Event.fire()`` returns a list of the returns from the handlers. You should merge this list in an appropriate way. What are Species? ================= In xonsh, events come in species. Each one may look like an event and quack like an event, but they behave differently. This was done because load hooks look like events and quack like events, but they have different semantics. See `LoadEvents `_ for details. In order to turn an event from the default ``Event``, you must transmogrify it, using ``events.transmogrify()``. The class the event is turned in to must be a subclass of ``AbstractEvent``. (Under the hood, transmogrify creates a new instance and copies the handlers and docstring from the old instance to the new one.) xonsh-0.6.0/docs/aliases.rst000066400000000000000000000122611320541242300157550ustar00rootroot00000000000000.. _aliases: ******************** Built-in Aliases ******************** This page describes the xonsh built-in commands and aliases. ``cd`` =================== Changes the directory. If no directory is specified (i.e. if there are no arguments) then this changes to the current user's home directory. ``pushd`` =================== Adds a directory to the top of the directory stack, or rotates the stack, making the new top of the stack the current working directory. .. command-help:: xonsh.dirstack.pushd ``popd`` =================== Removes entries from the directory stack. .. command-help:: xonsh.dirstack.popd ``dirs`` =================== Displays the list of currently remembered directories. Can also be used to clear the directory stack. .. command-help:: xonsh.dirstack.dirs ``jobs`` =================== Display a list of all current jobs. ``fg`` =================== Bring the currently active job to the foreground, or, if a single number is given as an argument, bring that job to the foreground. ``bg`` ==================== Resume execution of the currently active job in the background, or, if a single number is given as an argument, resume that job in the background. ``EOF``, ``exit``, and ``quit`` =================================== The commands ``EOF``, ``exit``, and ``quit`` all alias the same action, which is to leave xonsh in a safe manner. Typing ``Crtl-d`` is the same as typing ``EOF`` and pressing enter. ``exec`` and ``xexec`` ========================= .. command-help:: xonsh.aliases.xexec ``source`` ==================== Executes the contents of the provided files in the current context. This, of course, only works on xonsh and Python files. ``source-bash`` ==================== Like the ``source`` command but for Bash files. This is a thin wrapper around the ``source-foreign`` alias where the ``shell`` argument is automatically set to ``bash``. ``source-foreign`` ==================== Like the ``source`` command but for files in foreign (non-xonsh) languages. It will pick up the environment and any aliases. .. command-help:: xonsh.aliases.source_foreign ``history`` ==================== Tools for dealing with xonsh history. See `the history tutorial `_ for more information all the history command and all of its sub-commands. .. command-help:: xonsh.history.main.history_main ``replay`` ===================== Replays a xonsh history file. See `the replay section of the history tutorial `_ for more information about this command. .. command-help:: xonsh.replay.replay_main ``timeit`` =============== Runs timing study on arguments. Similar to IPython's ``%timeit`` magic. ``scp-resume`` ================= Simple alias defined as ``['rsync', '--partial', '-h', '--progress', '--rsh=ssh']``. ``showcmd`` ============ Displays how commands and arguments are evaluated. ``ipynb`` ================= Simple alias defined as ``['ipython', 'notebook', '--no-browser']``. ``trace`` ================= Provides an interface to printing lines of source code prior to their execution. .. command-help:: xonsh.tracer.tracermain ``xpip`` ================= Runs the ``pip`` package manager for xonsh itself. Useful for installations where xonsh is in an isolated environment (eg conda, homebrew). In general, use ``xpip`` if you're configuring or adding features to xonsh, and use ``pip`` if you're doing Python development. ``xonfig`` ================= Manages xonsh configuration information. .. command-help:: xonsh.xonfig.xonfig_main Windows cmd Aliases ======================= The following aliases on Windows are expanded to ``['cmd', '/c', alias]``: .. code-block:: python {'cls': ['cmd', '/c', 'cls'], 'copy': ['cmd', '/c', 'copy'], 'del': ['cmd', '/c', 'del'], 'dir': ['cmd', '/c', 'dir'], 'erase': ['cmd', '/c', 'erase'], 'md': ['cmd', '/c', 'md'], 'mkdir': ['cmd', '/c', 'mkdir'], 'mklink': ['cmd', '/c', 'mklink'], 'move': ['cmd', '/c', 'move'], 'rd': ['cmd', '/c', 'rd'], 'ren': ['cmd', '/c', 'ren'], 'rename': ['cmd', '/c', 'rename'], 'rmdir': ['cmd', '/c', 'rmdir'], 'time': ['cmd', '/c', 'time'], 'type': ['cmd', '/c', 'type'], 'vol': ['cmd', '/c', 'vol'], } ``activate``/``deactivate`` on Windows with Anaconda ========================================================= On Windows with an Anaconda Python distribution, ``activate`` and ``deactivate`` are aliased to ``['source-bat activate']`` and ``['source-bat deactivate']``. This makes it possible to use the same commands to activate/deactivate conda environments as in cmd.exe. ``sudo`` on Windows ==================== On Windows, if no executables named ``sudo`` are found, Xonsh adds a ``sudo`` alias that poly fills the "run as Admin" behavior with the help of ``ShellExecuteEx`` and ``ctypes``. It doesn't support any actual ``sudo`` parameters and just takes the command to run. ``ls`` ==================== The ``ls`` command is aliased to ``['ls', '--color=auto', '-v']`` normally. On Mac OSX it is instead aliased to ``['ls', '-G']``. ``grep`` ==================== The ``grep`` command is aliased to ``['grep', '--color=auto']``. ``xontrib`` ============== Manages xonsh extensions. xonsh-0.6.0/docs/api/000077500000000000000000000000001320541242300143515ustar00rootroot00000000000000xonsh-0.6.0/docs/api/aliases.rst000066400000000000000000000003671320541242300165320ustar00rootroot00000000000000.. _xonsh_aliases: ****************************************************** Aliases (``xonsh.aliases``) ****************************************************** .. automodule:: xonsh.aliases :members: :undoc-members: :inherited-members: xonsh-0.6.0/docs/api/ast.rst000066400000000000000000000003701320541242300156720ustar00rootroot00000000000000.. _xonsh_ast: ****************************************************** Abstract Syntax Tree (``xonsh.ast``) ****************************************************** .. automodule:: xonsh.ast :members: :undoc-members: :inherited-members: xonsh-0.6.0/docs/api/base_shell.rst000066400000000000000000000003611320541242300172040ustar00rootroot00000000000000.. _xonsh_base_shell: ****************************************************** Base Shell Class (``xonsh.base_shell``) ****************************************************** .. automodule:: xonsh.base_shell :members: :undoc-members: xonsh-0.6.0/docs/api/blank.rst000066400000000000000000000004311320541242300161700ustar00rootroot00000000000000.. _xonsh_mod: ******************************************************************************** (``xonsh.mod``) ******************************************************************************** .. automodule:: xonsh.mod :members: :undoc-members: :inherited-members: xonsh-0.6.0/docs/api/built_ins.rst000066400000000000000000000003771320541242300171020ustar00rootroot00000000000000.. _xonsh_built_ins: ****************************************************** Built-Ins (``xonsh.built_ins``) ****************************************************** .. automodule:: xonsh.built_ins :members: :undoc-members: :inherited-members: xonsh-0.6.0/docs/api/codecache.rst000066400000000000000000000004401320541242300167770ustar00rootroot00000000000000.. _xonsh_codecache: ********************************************************* Compiling and Caching of Xonsh Code (``xonsh.codecache``) ********************************************************* .. automodule:: xonsh.codecache :members: :undoc-members: :inherited-members: xonsh-0.6.0/docs/api/color_tools.rst000066400000000000000000000003601320541242300174400ustar00rootroot00000000000000.. _xonsh_color_tools: ****************************************** Color Tools (``xonsh.color_tools``) ****************************************** .. automodule:: xonsh.color_tools :members: :undoc-members: :inherited-members: xonsh-0.6.0/docs/api/commands_cache.rst000066400000000000000000000004231320541242300200260ustar00rootroot00000000000000.. _xonsh_commands_cache: ****************************************************** Commands Cache (``xonsh.commands_cache``) ****************************************************** .. automodule:: xonsh.commands_cache :members: :undoc-members: :inherited-members: xonsh-0.6.0/docs/api/completer.rst000066400000000000000000000004031320541242300170720ustar00rootroot00000000000000.. _xonsh_completer: ****************************************************** Tab Completer (``xonsh.completer``) ****************************************************** .. automodule:: xonsh.completer :members: :undoc-members: :inherited-members: xonsh-0.6.0/docs/api/completers/000077500000000000000000000000001320541242300165265ustar00rootroot00000000000000xonsh-0.6.0/docs/api/completers/base.rst000066400000000000000000000004111320541242300201660ustar00rootroot00000000000000.. _xonsh_completers_base: *********************************************** Base Completer (``xonsh.completers.base``) *********************************************** .. automodule:: xonsh.completers.base :members: :undoc-members: :inherited-members: xonsh-0.6.0/docs/api/completers/bash.rst000066400000000000000000000004121320541242300201720ustar00rootroot00000000000000.. _xonsh_completers_bash: *********************************************** Bash Completers (``xonsh.completers.bash``) *********************************************** .. automodule:: xonsh.completers.bash :members: :undoc-members: :inherited-members: xonsh-0.6.0/docs/api/completers/commands.rst000066400000000000000000000004561320541242300210660ustar00rootroot00000000000000.. _xonsh_completers_commands: ******************************************************* First Command Completer (``xonsh.completers.commands``) ******************************************************* .. automodule:: xonsh.completers.commands :members: :undoc-members: :inherited-members: xonsh-0.6.0/docs/api/completers/dirs.rst000066400000000000000000000004211320541242300202160ustar00rootroot00000000000000.. _xonsh_completers_dirs: ************************************************ Directory Completers (``xonsh.completers.dirs``) ************************************************ .. automodule:: xonsh.completers.dirs :members: :undoc-members: :inherited-members: xonsh-0.6.0/docs/api/completers/index.rst000066400000000000000000000004151320541242300203670ustar00rootroot00000000000000.. _api_completers: ================= Completers API ================= All of the ways that xonsh completes you. **Stock Xonsh COmpleters:** .. toctree:: :maxdepth: 2 base bash commands dirs man path python tools xompletions xonsh-0.6.0/docs/api/completers/man.rst000066400000000000000000000004541320541242300200360ustar00rootroot00000000000000.. _xonsh_completers_man: *********************************************************** Manual Page Based Completers (``xonsh.completers.man``) *********************************************************** .. automodule:: xonsh.completers.man :members: :undoc-members: :inherited-members: xonsh-0.6.0/docs/api/completers/path.rst000066400000000000000000000004531320541242300202160ustar00rootroot00000000000000.. _xonsh_completers_path: ********************************************************** File System Path Completer (``xonsh.completers.path``) ********************************************************** .. automodule:: xonsh.completers.path :members: :undoc-members: :inherited-members: xonsh-0.6.0/docs/api/completers/python.rst000066400000000000000000000004211320541242300205760ustar00rootroot00000000000000.. _xonsh_completers_python: *********************************************** Python Completer (``xonsh.completers.python``) *********************************************** .. automodule:: xonsh.completers.python :members: :undoc-members: :inherited-members: xonsh-0.6.0/docs/api/completers/tools.rst000066400000000000000000000004161320541242300204210ustar00rootroot00000000000000.. _xonsh_completers_tools: *********************************************** Completion Tools (``xonsh.completers.tools``) *********************************************** .. automodule:: xonsh.completers.tools :members: :undoc-members: :inherited-members: xonsh-0.6.0/docs/api/completers/xompletions.rst000066400000000000000000000004721320541242300216440ustar00rootroot00000000000000.. _xonsh_completers_xompletions: ********************************************************* Completion Xompletions (``xonsh.completers.xompletions``) ********************************************************* .. automodule:: xonsh.completers.xompletions :members: :undoc-members: :inherited-members: xonsh-0.6.0/docs/api/contexts.rst000066400000000000000000000005021320541242300167470ustar00rootroot00000000000000.. _xonsh_contexts: ******************************************************************************** Context Managers for Xonsh (``xonsh.contexts``) ******************************************************************************** .. automodule:: xonsh.contexts :members: :undoc-members: :inherited-members: xonsh-0.6.0/docs/api/diff_history.rst000066400000000000000000000004161320541242300175750ustar00rootroot00000000000000.. _xonsh_diff_history: ****************************************************** History Diff'er (``xonsh.diff_history``) ****************************************************** .. automodule:: xonsh.diff_history :members: :undoc-members: :inherited-members: xonsh-0.6.0/docs/api/dirstack.rst000066400000000000000000000004021320541242300167030ustar00rootroot00000000000000.. _xonsh_dirstack: ****************************************************** Directory Stack (``xonsh.dirstack``) ****************************************************** .. automodule:: xonsh.dirstack :members: :undoc-members: :inherited-members: xonsh-0.6.0/docs/api/environ.rst000066400000000000000000000003731320541242300165660ustar00rootroot00000000000000.. _xonsh_environ: ****************************************************** Environment (``xonsh.environ``) ****************************************************** .. automodule:: xonsh.environ :members: :undoc-members: :inherited-members: xonsh-0.6.0/docs/api/events.rst000066400000000000000000000004511320541242300164070ustar00rootroot00000000000000.. _xonsh_events: ******************************************************************************** Events (``xonsh.events``) ******************************************************************************** .. automodule:: xonsh.events :members: :undoc-members: :inherited-members: xonsh-0.6.0/docs/api/execer.rst000066400000000000000000000004351320541242300163600ustar00rootroot00000000000000.. _xonsh_execer: *********************************************************** Compiliation, Evaluation, & Execution (``xonsh.execer``) *********************************************************** .. automodule:: xonsh.execer :members: :undoc-members: :inherited-members: xonsh-0.6.0/docs/api/foreign_shells.rst000066400000000000000000000004301320541242300201030ustar00rootroot00000000000000.. _xonsh_foreign_shells: ****************************************************** Foreign Shell Tools (``xonsh.foreign_shells``) ****************************************************** .. automodule:: xonsh.foreign_shells :members: :undoc-members: :inherited-members: xonsh-0.6.0/docs/api/history/000077500000000000000000000000001320541242300160525ustar00rootroot00000000000000xonsh-0.6.0/docs/api/history/base.rst000066400000000000000000000004021320541242300175120ustar00rootroot00000000000000.. _xonsh_history_base: =============================================== History Base Class -- :mod:`xonsh.history.base` =============================================== .. currentmodule:: xonsh.history.base .. automodule:: xonsh.history.base :members: xonsh-0.6.0/docs/api/history/index.rst000066400000000000000000000002431320541242300177120ustar00rootroot00000000000000.. _api_history: ================= Xonsh History API ================= **History Modules:** .. toctree:: :maxdepth: 2 base main json sqlite xonsh-0.6.0/docs/api/history/json.rst000066400000000000000000000004101320541242300175500ustar00rootroot00000000000000.. _xonsh_history_json: ================================================= History Backend JSON -- :mod:`xonsh.history.json` ================================================= .. currentmodule:: xonsh.history.json .. automodule:: xonsh.history.json :members: xonsh-0.6.0/docs/api/history/main.rst000066400000000000000000000004341320541242300175310ustar00rootroot00000000000000.. _xonsh_history_main: ====================================================== History Main Entry Points -- :mod:`xonsh.history.main` ====================================================== .. automodule:: xonsh.history.main :members: :undoc-members: :inherited-members: xonsh-0.6.0/docs/api/history/sqlite.rst000066400000000000000000000004321320541242300201040ustar00rootroot00000000000000.. _xonsh_history_sqlite: ===================================================== History Backend Sqlite -- :mod:`xonsh.history.sqlite` ===================================================== .. currentmodule:: xonsh.history.sqlite .. automodule:: xonsh.history.sqlite :members: xonsh-0.6.0/docs/api/imphooks.rst000066400000000000000000000003471320541242300167400ustar00rootroot00000000000000.. _xonsh_imphooks: ****************************************************** Import Hooks (``xonsh.imphooks``) ****************************************************** .. automodule:: xonsh.imphooks :members: :undoc-members: xonsh-0.6.0/docs/api/index.rst000066400000000000000000000016521320541242300162160ustar00rootroot00000000000000.. _api: ================= Xonsh API ================= For those of you who want the gritty details. **Language:** .. toctree:: :maxdepth: 1 lexer parser ast execer imphooks **Command Prompt:** .. toctree:: :maxdepth: 1 built_ins environ aliases dirstack jobs proc inspectors history/index completer completers/index prompt/index shell base_shell readline_shell ptk/shell ptk/history ptk/completer ptk/key_bindings pretty replay diff_history xoreutils/index **Helpers:** .. toctree:: :maxdepth: 1 events tools platform jsonutils lazyjson lazyasd openpy foreign_shells commands_cache tracer main color_tools pyghooks jupyter_kernel wizard xonfig codecache contexts **Xontribs:** .. toctree:: :maxdepth: 1 mplhooks vox xonsh-0.6.0/docs/api/inspectors.rst000066400000000000000000000004121320541242300172710ustar00rootroot00000000000000.. _xonsh_inspectors: ****************************************************** Object Inspectors (``xonsh.inspectors``) ****************************************************** .. automodule:: xonsh.inspectors :members: :undoc-members: :inherited-members: xonsh-0.6.0/docs/api/jobs.rst000066400000000000000000000003621320541242300160410ustar00rootroot00000000000000.. _xonsh_jobs: ****************************************************** Job Control (``xonsh.jobs``) ****************************************************** .. automodule:: xonsh.jobs :members: :undoc-members: :inherited-members: xonsh-0.6.0/docs/api/jsonutils.rst000066400000000000000000000003251320541242300171350ustar00rootroot00000000000000.. _xonsh_jsonutils: ****************************************** JSON Utilities (``xonsh.jsonutils``) ****************************************** .. automodule:: xonsh.jsonutils :members: :undoc-members: xonsh-0.6.0/docs/api/jupyter_kernel.rst000066400000000000000000000003441320541242300201460ustar00rootroot00000000000000.. _xonsh_jupyter_kernel: ****************************************** Jupyter Kernel (``xonsh.jupyter_kernel``) ****************************************** .. automodule:: xonsh.jupyter_kernel :members: :undoc-members: xonsh-0.6.0/docs/api/lazyasd.rst000066400000000000000000000004541320541242300165550ustar00rootroot00000000000000.. _xonsh_lazyasd: ******************************************************************************** Lazy & Self-destructive Objects (``xonsh.lazyasd``) ******************************************************************************** .. automodule:: xonsh.lazyasd :members: :undoc-members: xonsh-0.6.0/docs/api/lazyjson.rst000066400000000000000000000003231320541242300167520ustar00rootroot00000000000000.. _xonsh_lazyjson: ****************************************** Lazy JSON Files (``xonsh.lazyjson``) ****************************************** .. automodule:: xonsh.lazyjson :members: :undoc-members: xonsh-0.6.0/docs/api/lexer.rst000066400000000000000000000005561320541242300162300ustar00rootroot00000000000000.. _xonsh_lexer: ****************************************************** Lexer (``xonsh.lexer``) ****************************************************** .. automodule:: xonsh.lexer :members: :undoc-members: :inherited-members: :exclude-members: t_NAME, t_BYTES_LITERAL, t_RAW_STRING_LITERAL, t_UNICODE_LITERAL, t_STRING_LITERALxonsh-0.6.0/docs/api/main.rst000066400000000000000000000003751320541242300160340ustar00rootroot00000000000000.. _xonsh_main: ****************************************************** Command Line Interface (``xonsh.main``) ****************************************************** .. automodule:: xonsh.main :members: :undoc-members: :inherited-members: xonsh-0.6.0/docs/api/mplhooks.rst000066400000000000000000000003611320541242300167370ustar00rootroot00000000000000.. _xontrib_mplhooks: ****************************************************** Matplotlib Hooks (``xontrib.mplhooks``) ****************************************************** .. automodule:: xontrib.mplhooks :members: :undoc-members: xonsh-0.6.0/docs/api/openpy.rst000066400000000000000000000003761320541242300164230ustar00rootroot00000000000000.. _xonsh_openpy: ****************************************************** Open Python Files (``xonsh.openpy``) ****************************************************** .. automodule:: xonsh.openpy :members: :undoc-members: :inherited-members: xonsh-0.6.0/docs/api/parser.rst000066400000000000000000000003631320541242300164010ustar00rootroot00000000000000.. _xonsh_parser: ****************************************************** Parser (``xonsh.parser``) ****************************************************** .. automodule:: xonsh.parser :members: :undoc-members: :inherited-members: xonsh-0.6.0/docs/api/platform.rst000066400000000000000000000012221320541242300167240ustar00rootroot00000000000000.. _xonsh_platform: Platform-specific constants and implementations (``xonsh.platform``) ==================================================================== .. automodule:: xonsh.platform :members: :undoc-members: .. py:function:: scandir This is either `os.scandir` on Python 3.5+ or a function providing a compatibility layer for it. It is recommended for iterations over directory entries at a significantly higher speed than `os.listdir` on Python 3.5+. It also caches properties that are commonly used for filtering. :param str path: The path to scan for entries. :return: A generator yielding `DirEntry` instances. xonsh-0.6.0/docs/api/pretty.rst000066400000000000000000000004051320541242300164310ustar00rootroot00000000000000.. _xonsh_pretty: **************************************************************** Pretty printing (``xonsh.pretty``) **************************************************************** .. currentmodule:: xonsh.pretty .. automodule:: xonsh.pretty :members: xonsh-0.6.0/docs/api/proc.rst000066400000000000000000000004441320541242300160500ustar00rootroot00000000000000.. _xonsh_proc: ********************************************************** Python Procedures as Subprocess Commands (``xonsh.proc``) ********************************************************** .. automodule:: xonsh.proc :members: :undoc-members: :exclude-members: msvcrt _winapi xonsh-0.6.0/docs/api/prompt/000077500000000000000000000000001320541242300156725ustar00rootroot00000000000000xonsh-0.6.0/docs/api/prompt/base.rst000066400000000000000000000004041320541242300173340ustar00rootroot00000000000000.. _xonsh_prompt_base: *********************************************** Base prompt formatter (``xonsh.prompt.base``) *********************************************** .. automodule:: xonsh.prompt.base :members: :undoc-members: :inherited-members: xonsh-0.6.0/docs/api/prompt/cwd.rst000066400000000000000000000004241320541242300172010ustar00rootroot00000000000000.. _xonsh_prompt_cwd: ***************************************************** CWD related prompt formatter (``xonsh.prompt.cwd``) ***************************************************** .. automodule:: xonsh.prompt.cwd :members: :undoc-members: :inherited-members: xonsh-0.6.0/docs/api/prompt/env.rst000066400000000000000000000004211320541242300172110ustar00rootroot00000000000000.. _xonsh_prompt_env: **************************************************** Virtualenv prompt formatter (``xonsh.prompt.env``) **************************************************** .. automodule:: xonsh.prompt.env :members: :undoc-members: :inherited-members: xonsh-0.6.0/docs/api/prompt/gitstatus.rst000066400000000000000000000005251320541242300204550ustar00rootroot00000000000000.. _xonsh_prompt_gitstatus: *********************************************************************** Informative git status prompt formatter (``xonsh.prompt.gitstatus``) *********************************************************************** .. automodule:: xonsh.prompt.gitstatus :members: :undoc-members: :inherited-members: xonsh-0.6.0/docs/api/prompt/index.rst000066400000000000000000000003241320541242300175320ustar00rootroot00000000000000.. _api_prompt: ====================== Prompt formatter API ====================== Modules that provides ``FORMATTER_DICT``. .. toctree:: :maxdepth: 1 base cwd env gitstatus job vc xonsh-0.6.0/docs/api/prompt/job.rst000066400000000000000000000004021320541242300171720ustar00rootroot00000000000000.. _xonsh_prompt_jobs: *********************************************** Jobs prompt formatter (``xonsh.prompt.job``) *********************************************** .. automodule:: xonsh.prompt.job :members: :undoc-members: :inherited-members: xonsh-0.6.0/docs/api/prompt/vc.rst000066400000000000000000000005151320541242300170350ustar00rootroot00000000000000.. _xonsh_prompt_vc: *************************************************************************** Version control branch info prompt formatter (``xonsh.prompt.vc``) *************************************************************************** .. automodule:: xonsh.prompt.vc :members: :undoc-members: :inherited-members: xonsh-0.6.0/docs/api/ptk/000077500000000000000000000000001320541242300151475ustar00rootroot00000000000000xonsh-0.6.0/docs/api/ptk/completer.rst000066400000000000000000000004501320541242300176720ustar00rootroot00000000000000.. _xonsh_ptk_completer: ************************************************************* Prompt Toolkit Completer (``xonsh.ptk.completer``) ************************************************************* .. automodule:: xonsh.ptk.completer :members: :undoc-members: :inherited-members: xonsh-0.6.0/docs/api/ptk/history.rst000066400000000000000000000004551320541242300174060ustar00rootroot00000000000000.. _xonsh_ptk_history: **************************************************************** Prompt Toolkit History Object (``xonsh.ptk.history``) **************************************************************** .. automodule:: xonsh.ptk.history :members: :undoc-members: :inherited-members: xonsh-0.6.0/docs/api/ptk/key_bindings.rst000066400000000000000000000004561320541242300203530ustar00rootroot00000000000000.. _xonsh_ptk_key_bindings: ********************************************************** Prompt Toolkit Key Bindings (``xonsh.ptk.key_bindings``) ********************************************************** .. automodule:: xonsh.ptk.key_bindings :members: :undoc-members: :inherited-members: xonsh-0.6.0/docs/api/ptk/shell.rst000066400000000000000000000004121320541242300170050ustar00rootroot00000000000000.. _xonsh_ptk_shell: ****************************************************** Prompt Toolkit Shell (``xonsh.ptk.shell``) ****************************************************** .. automodule:: xonsh.ptk.shell :members: :undoc-members: :inherited-members: xonsh-0.6.0/docs/api/pyghooks.rst000066400000000000000000000003511320541242300167450ustar00rootroot00000000000000.. _xonsh_pyghooks: ****************************************************** Pygments Hooks (``xonsh.pyghooks``) ****************************************************** .. automodule:: xonsh.pyghooks :members: :undoc-members: xonsh-0.6.0/docs/api/readline_shell.rst000066400000000000000000000004231320541242300200540ustar00rootroot00000000000000.. _xonsh_readline_shell: ****************************************************** Readline Shell (``xonsh.readline_shell``) ****************************************************** .. automodule:: xonsh.readline_shell :members: :undoc-members: :inherited-members: xonsh-0.6.0/docs/api/replay.rst000066400000000000000000000003731320541242300164020ustar00rootroot00000000000000.. _xonsh_replay: ****************************************************** Replay History (``xonsh.replay``) ****************************************************** .. automodule:: xonsh.replay :members: :undoc-members: :inherited-members: xonsh-0.6.0/docs/api/shell.rst000066400000000000000000000003571320541242300162170ustar00rootroot00000000000000.. _xonsh_shell: ****************************************************** Main Shell Command Prompt (``xonsh.shell``) ****************************************************** .. automodule:: xonsh.shell :members: :inherited-members: xonsh-0.6.0/docs/api/tools.rst000066400000000000000000000003301320541242300162370ustar00rootroot00000000000000.. _xonsh_tools: ****************************************** Tools (``xonsh.tools``) ****************************************** .. automodule:: xonsh.tools :members: :undoc-members: :inherited-members: xonsh-0.6.0/docs/api/tracer.rst000066400000000000000000000003461320541242300163660ustar00rootroot00000000000000.. _xonsh_tracer: *********************************************** Tracer (``xonsh.tracer``) *********************************************** .. automodule:: xonsh.tracer :members: :undoc-members: :inherited-members: xonsh-0.6.0/docs/api/vox.rst000066400000000000000000000003611320541242300157170ustar00rootroot00000000000000.. _xonsh_vox: ****************************************************** Vox (``xontrib.voxapi``) ****************************************************** .. automodule:: xontrib.voxapi :members: :undoc-members: :inherited-members: xonsh-0.6.0/docs/api/wizard.rst000066400000000000000000000003511320541242300164020ustar00rootroot00000000000000.. _xonsh_wizard: ****************************************** Wizard Making Tools (``xonsh.wizard``) ****************************************** .. automodule:: xonsh.wizard :members: :undoc-members: :inherited-members: xonsh-0.6.0/docs/api/xonfig.rst000066400000000000000000000003731320541242300164000ustar00rootroot00000000000000.. _xonsh_xonfig: *********************************************** Xonsh Configuration Utility (``xonsh.xonfig``) *********************************************** .. automodule:: xonsh.xonfig :members: :undoc-members: :inherited-members: xonsh-0.6.0/docs/api/xoreutils/000077500000000000000000000000001320541242300164075ustar00rootroot00000000000000xonsh-0.6.0/docs/api/xoreutils/cat.rst000066400000000000000000000003771320541242300177170ustar00rootroot00000000000000.. _xonsh_xoreutils_cat: =============================================== Cat Command -- :mod:`xonsh.xoreutils.cat` =============================================== .. currentmodule:: xonsh.xoreutils.cat .. automodule:: xonsh.xoreutils.cat :members: xonsh-0.6.0/docs/api/xoreutils/echo.rst000066400000000000000000000004041320541242300200550ustar00rootroot00000000000000.. _xonsh_xoreutils_echo: =============================================== Echo Command -- :mod:`xonsh.xoreutils.echo` =============================================== .. currentmodule:: xonsh.xoreutils.echo .. automodule:: xonsh.xoreutils.echo :members: xonsh-0.6.0/docs/api/xoreutils/index.rst000066400000000000000000000003761320541242300202560ustar00rootroot00000000000000.. _api_xoreutils: ================== Core Utilities API ================== **Command Modules:** .. toctree:: :maxdepth: 1 cat echo pwd tee tty which **Helper Modules:** .. toctree:: :maxdepth: 1 uptime util xonsh-0.6.0/docs/api/xoreutils/pwd.rst000066400000000000000000000003771320541242300177420ustar00rootroot00000000000000.. _xonsh_xoreutils_pwd: =============================================== Pwd Command -- :mod:`xonsh.xoreutils.pwd` =============================================== .. currentmodule:: xonsh.xoreutils.pwd .. automodule:: xonsh.xoreutils.pwd :members: xonsh-0.6.0/docs/api/xoreutils/tee.rst000066400000000000000000000003771320541242300177250ustar00rootroot00000000000000.. _xonsh_xoreutils_tee: =============================================== Tee Command -- :mod:`xonsh.xoreutils.tee` =============================================== .. currentmodule:: xonsh.xoreutils.tee .. automodule:: xonsh.xoreutils.tee :members: xonsh-0.6.0/docs/api/xoreutils/tty.rst000066400000000000000000000003771320541242300177700ustar00rootroot00000000000000.. _xonsh_xoreutils_tty: =============================================== TTY Command -- :mod:`xonsh.xoreutils.tty` =============================================== .. currentmodule:: xonsh.xoreutils.tty .. automodule:: xonsh.xoreutils.tty :members: xonsh-0.6.0/docs/api/xoreutils/uptime.rst000066400000000000000000000004151320541242300204440ustar00rootroot00000000000000.. _xonsh_xoreutils_uptime: =============================================== System Uptime -- :mod:`xonsh.xoreutils.uptime` =============================================== .. currentmodule:: xonsh.xoreutils.uptime .. automodule:: xonsh.xoreutils.uptime :members: xonsh-0.6.0/docs/api/xoreutils/util.rst000066400000000000000000000004351320541242300201200ustar00rootroot00000000000000.. _xonsh_xoreutils_util: ====================================================== Core Utilites Utilities -- :mod:`xonsh.xoreutils.util` ====================================================== .. currentmodule:: xonsh.xoreutils.util .. automodule:: xonsh.xoreutils.util :members: xonsh-0.6.0/docs/api/xoreutils/which.rst000066400000000000000000000004111320541242300202370ustar00rootroot00000000000000.. _xonsh_xoreutils_which: =============================================== Which Command -- :mod:`xonsh.xoreutils.which` =============================================== .. currentmodule:: xonsh.xoreutils.which .. automodule:: xonsh.xoreutils.which :members: xonsh-0.6.0/docs/bash_to_xsh.rst000066400000000000000000000044401320541242300166350ustar00rootroot00000000000000Bash to Xonsh Translation Guide ================================ As you have probably figured out by now, xonsh is not ``sh``-lang compliant. If your muscles have memorized all of the Bash prestidigitations, this page will help you put a finger on how to do the equivalent task in xonsh. .. list-table:: :widths: 30 30 40 :header-rows: 1 * - Bash - Xonsh - Notes * - ``$NAME`` or ``${NAME}`` - ``$NAME`` - Look up an environment variable by name. * - ``echo "$HOME/hello"`` - ``echo "$HOME/hello"`` - Construct an argument using an environment variable. * - ``something/$SOME_VAR/$(some_command)`` - ``@('something/' + $SOME_VAR + $(some_command).strip())`` - Concatenate a variable or text with the result of running a command. * - ``${!VAR}`` - ``${var or expr}`` - Look up an environment variable via another variable name. In xonsh, this may be any valid expression. * - ``$(cmd args)`` or ```cmd args``` - ``@$(cmd args)`` - Command substitution (allow the output of a command to replace the command itself). Tokenizes and executes the output of a subprocess command as another subprocess. * - ``set -e`` - ``$RAISE_SUBPROC_ERROR = True`` - Cause a failure after a non-zero return code. Xonsh will raise a ``supbrocess.CalledProcessError``. * - ``set -x`` - ``trace on`` - Turns on tracing of source code lines during execution. * - ``&&`` - ``and`` or ``&&`` - Logical-and operator for subprocesses. * - ``||`` - ``or`` as well as ``||`` - Logical-or operator for subprocesses. * - ``$?`` - ``_.rtn`` - Returns the exit code, or status, of the previous command. * - ``N=V command`` - ``with ${...}.swap(N=V): command`` - Set temporary environment variable(s) and execute for command. Use an indented block to execute many commands in the same context. * - ``!$`` - ``__xonsh_history__[-1, -1]`` - Get the last argument of the last command * - ``$`` - ``$ARG`` - Command line argument at index ``n``, so ``$ARG1`` is the equivalent of ``$1``. * - ``$@`` - ``$ARGS`` - List of all command line argument and parameter strings. xonsh-0.6.0/docs/changelog.rst000066400000000000000000000000351320541242300162570ustar00rootroot00000000000000.. include:: ../CHANGELOG.rstxonsh-0.6.0/docs/cmdhelp.py000066400000000000000000000042441320541242300155720ustar00rootroot00000000000000"""This module adds a reST directive to sphinx that generates alias documentation. For example:: .. command-help:: xonsh.aliases.source_foreign .. command-help:: xonsh.aliases.source_foreign -h will create help for aliases. """ import io import textwrap import importlib from docutils import nodes, statemachine, utils try: from docutils.utils.error_reporting import ErrorString # the new way except ImportError: from docutils.error_reporting import ErrorString # the old way from docutils.parsers.rst import Directive, convert_directive_function from docutils.parsers.rst import directives, roles, states from docutils.parsers.rst.roles import set_classes from docutils.transforms import misc from docutils.statemachine import ViewList from sphinx.util.nodes import nested_parse_with_titles from xonsh.tools import redirect_stdout, redirect_stderr class CommandHelp(Directive): """The command-help directive, which is based on constructing a list of of string lines of restructured text and then parsing it into its own node. Note that this will add the '--help' flag automatically. """ required_arguments = 1 optional_arguments = 1 final_argument_whitespace = True option_spec = {} has_content = False def run(self): arguments = self.arguments lines = ['.. code-block:: none', ''] m, f = arguments[0].rsplit('.', 1) mod = importlib.import_module(m) func = getattr(mod, f) args = ['--help'] if len(arguments) == 1 else arguments[1:] stdout = io.StringIO() stderr = io.StringIO() with redirect_stdout(stdout), redirect_stderr(stderr): try: func(args) except SystemExit: pass stdout.seek(0) s = stdout.read() lines += textwrap.indent(s, ' ').splitlines() # hook to docutils src, lineno = self.state_machine.get_source_and_line(self.lineno) vl = ViewList(lines, source=src) node = nodes.paragraph() nested_parse_with_titles(self.state, vl, node) return node.children def setup(app): app.add_directive('command-help', CommandHelp) xonsh-0.6.0/docs/comparison.rst000066400000000000000000000025141320541242300165060ustar00rootroot00000000000000 Comparison ========== Xonsh is significantly different from most other shells or shell tools. The following table lists features and capabilities that various tools may or may not share. .. list-table:: :widths: 3 1 1 1 1 1 1 :header-rows: 1 :stub-columns: 1 * - - Bash - zsh - plumbum - fish - IPython - xonsh * - Sane language - - - ✓ - ✓ - ✓ - ✓ * - Easily scriptable - ✓ - ✓ - ✓ - ✓ - - ✓ * - Native cross-platform support - - - ✓ - ✓ - ✓ - ✓ * - Meant as a shell - ✓ - ✓ - - ✓ - - ✓ * - Tab completion - ✓ - ✓ - - ✓ - ✓ - ✓ * - Man-page completion - - - - ✓ - - ✓ * - Large standard library - - ✓ - - - ✓ - ✓ * - Typed variables - - - ✓ - ✓ - ✓ - ✓ * - Syntax highlighting - - - - ✓ - in notebook - w/ prompt-toolkit * - Pun in name - ✓ - - ✓ - - - ✓ * - Rich history - - - - - - ✓ xonsh-0.6.0/docs/conf.py000066400000000000000000000311421320541242300151000ustar00rootroot00000000000000# -*- coding: utf-8 -*- # # This file is execfile()d with the current directory set to its containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import os import sys import builtins import inspect import importlib os.environ['XONSH_DEBUG'] = '1' from xonsh import __version__ as XONSH_VERSION from xonsh.environ import DEFAULT_DOCS, Env from xonsh.xontribs import xontrib_metadata from xonsh import main from xonsh.commands_cache import CommandsCache spec = importlib.util.find_spec('prompt_toolkit') if spec is not None: # hacky runaround to import PTK-specific events builtins.__xonsh_env__ = Env() from xonsh.ptk.shell import events else: from xonsh.events import events sys.path.insert(0, os.path.dirname(__file__)) def setup(sphinx): from xonsh.pyghooks import XonshConsoleLexer sphinx.add_lexer("xonshcon", XonshConsoleLexer()) # -- General configuration ----------------------------------------------------- # Documentation is being built on readthedocs, this will be true. on_rtd = os.environ.get('READTHEDOCS', None) == 'True' # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.imgmath', 'sphinx.ext.inheritance_diagram', 'sphinx.ext.viewcode', #'sphinx.ext.autosummary', 'numpydoc', 'cmdhelp', ] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8' # The master toctree document. master_doc = 'sidebar' # General information about the project. project = u'xonsh' copyright = u'2015, Anthony Scopatz' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = XONSH_VERSION.rsplit('.',1)[0] # The full version, including alpha/beta/rc tags. release = XONSH_VERSION # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of documents that shouldn't be included in the build. exclude_patterns = ['api/blank.rst'] # List of directories, relative to source directory, that shouldn't be searched # for source files. exclude_trees = [] # The reST default role (used for this markup: `text`) to use for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' #pygments_style = 'friendly' #pygments_style = 'bw' #pygments_style = 'fruity' #pygments_style = 'manni' #pygments_style = 'tango' #pygments_style = 'pastie' # A list of ignored prefixes for module index sorting. modindex_common_prefix = ['xonsh.'] # -- Options for HTML output --------------------------------------------------- # The theme to use for HTML and HTML Help pages. Major themes that come with # Sphinx are currently 'default' and 'sphinxdoc'. #html_theme = 'default' #html_theme = 'altered_nature' #html_theme = 'sphinxdoc' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. if not on_rtd: import cloud_sptheme as csp html_theme = 'cloud' html_theme_options = { 'max_width': '1250px', 'minimal_width': '700px', 'relbarbgcolor': '#000000', 'footerbgcolor': '#FFFFE7', 'sidebarwidth': '322px', 'sidebarbgcolor': '#e7e7ff', #'googleanalytics_id': 'UA-41934829-1', 'stickysidebar': False, 'highlighttoc': False, 'externalrefs': False, 'collapsiblesidebar': True, 'default_layout_text_size': "100%", # prevents division by zero error } # Add any paths that contain custom themes here, relative to this directory. html_theme_path = ["_theme", csp.get_theme_dir()] templates_path = ["_templates_overwrite"] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. html_logo = '_static/ascii_conch_part_transparent_tight.png' # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. html_favicon = '_static/magic_conch.ico' # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] html_style = "numpy_friendly.css" # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_use_modindex = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = '' # Output file base name for HTML help builder. htmlhelp_basename = 'xonshdoc' # -- Options for LaTeX output -------------------------------------------------- # The paper size ('letter' or 'a4'). #latex_paper_size = 'letter' # The font size ('10pt', '11pt' or '12pt'). #latex_font_size = '10pt' # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ ('index', 'xonsh.tex', u'xonsh documentation', u'Anthony Scopatz', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # Additional stuff for the LaTeX preamble. #latex_preamble = '' # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_use_modindex = True #Autodocumentation Flags autodoc_member_order = "groupwise" autoclass_content = "both" autosummary_generate = [] # Prevent numpy from making silly tables numpydoc_show_class_members = False # # Auto-generate some docs # def make_envvars(): env = Env() vars = sorted(DEFAULT_DOCS.keys()) s = ('.. list-table::\n' ' :header-rows: 0\n\n') table = [] ncol = 3 row = ' {0} - :ref:`${1} <{2}>`' for i, var in enumerate(vars): star = '*' if i%ncol == 0 else ' ' table.append(row.format(star, var, var.lower())) table.extend([' -']*((ncol - len(vars)%ncol)%ncol)) s += '\n'.join(table) + '\n\n' s += ('Listing\n' '-------\n\n') sec = ('.. _{low}:\n\n' '{title}\n' '{under}\n' '{docstr}\n\n' '**configurable:** {configurable}\n\n' '**default:** {default}\n\n' '**store_as_str:** {store_as_str}\n\n' '-------\n\n') for var in vars: title = '$' + var under = '.' * len(title) vd = env.get_docs(var) s += sec.format(low=var.lower(), title=title, under=under, docstr=vd.docstr, configurable=vd.configurable, default=vd.default, store_as_str=vd.store_as_str) s = s[:-9] fname = os.path.join(os.path.dirname(__file__), 'envvarsbody') with open(fname, 'w') as f: f.write(s) def make_xontribs(): md = xontrib_metadata() names = sorted(d['name'] for d in md['xontribs'] if 'name' in d) s = ('.. list-table::\n' ' :header-rows: 0\n\n') table = [] ncol = 5 row = ' {0} - :ref:`{1} <{2}>`' for i, name in enumerate(names): star = '*' if i%ncol == 0 else ' ' table.append(row.format(star, name, name.lower())) table.extend([' -']*((ncol - len(names)%ncol)%ncol)) s += '\n'.join(table) + '\n\n' s += ('Information\n' '-----------\n\n') sec = ('.. _{low}:\n\n' '{title}\n' '{under}\n' ':Website: {url}\n' ':Package: {pkg}\n\n' '{desc}\n\n' '{inst}' '-------\n\n') for name in names: for d in md['xontribs']: if d.get('name', None) == name: break title = name under = '.' * len(title) desc = d.get('description', '') if not isinstance(desc, str): desc = ''.join(desc) pkgname = d.get('package', None) if pkgname is None: pkg = 'unknown' inst = '' else: pd = md['packages'].get(pkgname, {}) pkg = pkgname if 'url' in pd: pkg = '`{0} website <{1}>`_'.format(pkg, pd['url']) if 'license' in pd: pkg = pkg + ', ' + pd['license'] inst = '' installd = pd.get('install', {}) if len(installd) > 0: inst = ('**Installation:**\n\n' '.. code-block:: xonsh\n\n') for k, v in sorted(pd.get('install', {}).items()): cmd = "\n ".join(v.split('\n')) inst += (' # install with {k}\n' ' {cmd}\n\n').format(k=k, cmd=cmd) s += sec.format(low=name.lower(), title=title, under=under, url=d.get('url', 'unknown'), desc=desc, pkg=pkg, inst=inst) s = s[:-9] fname = os.path.join(os.path.dirname(__file__), 'xontribsbody') with open(fname, 'w') as f: f.write(s) def make_events(): names = sorted(vars(events).keys()) s = ('.. list-table::\n' ' :header-rows: 0\n\n') table = [] ncol = 3 row = ' {0} - :ref:`{1} <{2}>`' for i, var in enumerate(names): star = '*' if i%ncol == 0 else ' ' table.append(row.format(star, var, var.lower())) table.extend([' -']*((ncol - len(names)%ncol)%ncol)) s += '\n'.join(table) + '\n\n' s += ('Listing\n' '-------\n\n') sec = ('.. _{low}:\n\n' '``{title}``\n' '{under}\n' '{docstr}\n\n' '-------\n\n') for name in names: event = getattr(events, name) title = name docstr = inspect.getdoc(event) if docstr.startswith(name): # Assume the first line is a signature title, docstr = docstr.split('\n', 1) docstr = docstr.strip() under = '.' * (len(title) + 4) s += sec.format(low=name.lower(), title=title, under=under, docstr=docstr) s = s[:-9] fname = os.path.join(os.path.dirname(__file__), 'eventsbody') with open(fname, 'w') as f: f.write(s) make_envvars() make_xontribs() make_events() builtins.__xonsh_history__ = None builtins.__xonsh_env__ = {} builtins.__xonsh_commands_cache__ = CommandsCache() xonsh-0.6.0/docs/contact.rst000066400000000000000000000004201320541242300157610ustar00rootroot00000000000000 Contact Us ========== If you have questions or comments, please send them to the mailing list xonsh@googlegroups.com, page us on IRC, contact the author directly, or open an issue on GitHub. `Join the mailing list here! `_ xonsh-0.6.0/docs/contributing.rst000066400000000000000000000007151320541242300170440ustar00rootroot00000000000000 Contributing ============ We highly encourage contributions to xonsh! If you would like to contribute, it is as easy as forking the repository on GitHub, making your changes, and issuing a pull request. If you have any questions about this process don't hesitate to ask the mailing list (xonsh@googlegroups.com) or the `Gitter `_ channel. See the `Developer's Guide `_ for more information about contributing. xonsh-0.6.0/docs/customization.rst000066400000000000000000000133141320541242300172440ustar00rootroot00000000000000===================== Customizing ``xonsh`` ===================== .. contents:: :local: How do I... =========== .. _change_theme: ...change the current color theme? ---------------------------------- You can view the available styles by typing .. code-block:: console $ xonfig styles For a quick peek at the theme's colors you can do .. code-block:: console $ xonfig colors To set a new theme, do .. code-block:: console $ $XONSH_COLOR_STYLE='' .. _import_local_modules: ...import python modules from a local directory? ------------------------------------------------ The modules available for import in a given ``xonsh`` session depend on what's available in ``sys.path``. If you want to be able to import a module that resides in the current directory, ensure that there is an empty string as the first element of your ``sys.path`` .. code-block:: xonshcon $ import sys $ sys.path.insert(0, '') .. _default_shell: ...set ``xonsh`` as my default shell? ------------------------------------- If you want to use xonsh as your default shell, you will first have to add xonsh to ``/etc/shells``. First ensure that xonsh is on your ``$PATH`` .. code-block:: console $ which xonsh Then, as root, add xonsh to the shell list .. code-block:: console # which xonsh >> /etc/shells To change shells, run .. code-block:: console $ chsh -s $(which xonsh) You will have to log out and log back in before the changes take effect. .. _select_completion_result: ...select a tab completion result without executing the current line? --------------------------------------------------------------------- In the ``prompt_toolkit`` shell, you can cycle through possible tab-completion results using the TAB key and use ENTER to select the completion you want. By default, ENTER will also execute the current line. If you would prefer to not automatically execute the line (say, if you're constructing a long pathname), you can set .. code-block:: xonshcon $COMPLETIONS_CONFIRM=True in your ``xonshrc`` .. _add_args_builtin_alias: ...add a default argument to a builtin ``xonsh`` alias? ------------------------------------------------------- If you want to add a default argument to a builtin alias like ``dirs`` the standard alias definition method will fail. In order to handle this case you can use the following solution in your ``xonshrc``: .. code-block:: python from xonsh.dirstack import dirs def _verbose_dirs(args, stdin=None): return dirs(['-v'] + args, stdin=stdin) aliases['dirs'] = _verbose_dirs .. _terminal_tabs: ...make terminal tabs start in the correct directory? ----------------------------------------------------- If you use Gnome Terminal or another VTE terminal and it doesn't start new tabs in the CWD of the original TAB, this is because of a custom VTE interface. To fix this, please add ``{vte_new_tab_cwd}`` somewhere to you prompt: .. code-block:: xonsh $PROMPT = '{vte_new_tab_cwd}' + $PROMPT This will issue the proper escape sequence to the terminal without otherwise affecting the displayed prompt. .. _open_terminal_here: ...set up the "Open Terminal Here" action in Thunar? ---------------------------------------------------- If you use Thunar and "Open Terminal Here" action does not work, you can try to replace a command for this action by the following: .. code-block:: sh exo-open --working-directory %f --launch TerminalEmulator xonsh --shell-type=best In order to do this, go to ``Edit > Configure custom actions...``, then choose ``Open Terminal Here`` and click on ``Edit currently selected action`` button. .. _unicode_troubles: ...use utf-8 characters in xonsh? --------------------------------- If you are unable to use utf-8 (ie. non-ascii) characters in xonsh. For example if you get the following output .. code-block:: xonsh echo "ßðđ" xonsh: For full traceback set: $XONSH_SHOW_TRACEBACK = True UnicodeEncodeError: 'ascii' codec can't encode characters in position 0-2: ordinal not in range(128) The problem might be: - Your locale is not set to utf-8, to check this you can set the content of the environment variable ``LC_TYPE`` - Your locale is correctly set but **after** xonsh started. This is typically the case if you set your ``LC_TYPE`` inside your ``.xonshrc`` and xonsh is your default/login shell. To fix this you should see the documentation of your operating system to know how to correctly setup environment variables before the shell start (``~/.pam_environment`` for example) .. _fix_libgcc_core_dump: ...fix a ``libgcc_s.so.1`` error? --------------------------------- On certain flavors of Linux you may periodically encounter this error message when starting ``xonsh``: .. code-block:: xonshcon libgcc_s.so.1 must be installed for pthread_cancel to work Aborted (core dumped) This is due to an upstream Python problem and can be fixed by setting ``LD_PRELOAD``: .. code-block:: bash $ env LD_PRELOAD=libgcc_s.so.1 xonsh ...color my man pages? ---------------------- You can add add `man page color support`_ using ``less`` environment variables: .. code-block:: xonsh # Coloured man page support # using 'less' env vars (format is '\E[;m') $LESS_TERMCAP_mb = "\033[01;31m" # begin blinking $LESS_TERMCAP_md = "\033[01;31m" # begin bold $LESS_TERMCAP_me = "\033[0m" # end mode $LESS_TERMCAP_so = "\033[01;44;36m" # begin standout-mode (bottom of screen) $LESS_TERMCAP_se = "\033[0m" # end standout-mode $LESS_TERMCAP_us = "\033[00;36m" # begin underline $LESS_TERMCAP_ue = "\033[0m" # end underline .. _man page color support: https://wiki.archlinux.org/index.php/Color_output_in_console#less xonsh-0.6.0/docs/dependencies.rst000066400000000000000000000024031320541242300167570ustar00rootroot00000000000000Dependencies ============ Xonsh currently has the following external dependencies, *Run Time:* #. Python v3.4+ #. PLY (optional, included with xonsh) Pip supports "extra" dependencies in the form of ``xonsh[ptk,linux]``, where the list in the brackets identify the optional features Xonsh currently has the following extras #. ``ptk``: prompt-toolkit: *advanced readline library, line-editing* #. ``pygments``: *syntax-highlighting* #. ``proctitle``: setproctitle: *change the title of terminal to reflect the current subprocess* #. ``linux``: distro: *linux specific platform information* #. ``mac``: gnureadline: *GNU's featureful version of readline* #. ``win``: win_unicode_console: *enables the use of Unicode in windows consoles* In addition, xonsh integrates with Jupyter, an in-browser REPL, enabling the use of xonsh in jupyter notebooks Development Dependencies ======================== If you want to develop xonsh, it is extremely recommended to install the dependencies listed in `requirements-docs.txt `_ (to generate documentation) and `requirements-tests.txt `_ (to run the test suite). xonsh-0.6.0/docs/development.rst000066400000000000000000000002451320541242300166550ustar00rootroot00000000000000 Development Spiral ================== .. toctree:: :titlesonly: :maxdepth: 2 api/index advanced_events devguide changelog faq todoxonsh-0.6.0/docs/devguide.rst000077700000000000000000000000001320541242300207752../CONTRIBUTING.rstustar00rootroot00000000000000xonsh-0.6.0/docs/envvars.rst000066400000000000000000000003471320541242300160220ustar00rootroot00000000000000Environment Variables ===================== The following displays information about the environment variables that affect xonsh performance in some way. It also lists their default values, if applicable. .. include:: envvarsbody xonsh-0.6.0/docs/events.rst000066400000000000000000000002711320541242300156360ustar00rootroot00000000000000Core Events =========== The following events are defined by xonsh itself. For more information about events, see `the events tutorial `_. .. include:: eventsbody xonsh-0.6.0/docs/faq.rst000066400000000000000000000205131320541242300151020ustar00rootroot00000000000000 Frequently Asked Questions ========================== Ok, so, maybe no one actually asked them. 1. Why xonsh? ------------- The idea for xonsh first struck while I was reviewing the Bash chapter (written by my co-author `Katy Huff `_) of `Effective Computation in Physics `_. In the book, we spend a bunch of time describing important, but complex ideas, such as piping. However, we don't even touch on more 'basic' aspects of the Bash language, such as if-statements or loops. Even though I have been using Bash for well over a decade, I am not even sure I *know how* to add two numbers together in it or consistently create an array. This is normal. If the tool is so bad, then maybe we need a new tool. So xonsh is really meant to solve the problem that other shells don't "fit your brain." In some programming situations this is OK because of what you get (an optimizing compiler, type safety, provable correctness, register access). But a shell that doesn't fit your brain is only a liability. Coincidentally, within the week, `an article floated to the top of Hacker News `_ that teaches you how to write a shell in C. So I thought, "It can't be that hard..." And thus, `again `_, I entered the danger zone. 2. Why not another exotic shell, such as ``fish``? ----------------------------------------------------- While many other alternative shells have an amazing suite of features as well as much improved syntax of traditional options, none of them are quite as beautiful as Python. In xonsh, you get the best of all possible worlds. A syntax that already fits your brain and any features that you desire. 3. Why not just use the IPython command line interface? ------------------------------------------------------- There are two serious drawbacks to this approach - though, believe me, I have tried it. The first is that typing ``!`` before every subprocess command is extremely tedious. I think that this is because it is a prefix operator and thus gets in the way of what you are trying to do right as you start to try to do it. Making ``!`` a postfix operator could address some of this, but would probably end up being annoying, though not nearly as jarring. The second reason is that tab completion of subprocess commands after an ``!`` does not work. This is a deal breaker for day-to-day use. 4. So how does this all work? ----------------------------- We use `PLY `_ to tokenize and parse xonsh code. This is heavily inspired by how `pycparser `_ used this PLY. From our parser, we construct an abstract syntax tree (AST) only using nodes found in the Python ``ast`` standard library module. This allows us to compile and execute the AST using the normal Python tools. Of course, xonsh has special builtins, so the proper context (builtins, globals, and locals) must be set up prior to actually executing any code. However, the AST can be constructed completely independently of any context...mostly. While the grammar of the xonsh language is context-free, it was convenient to write the executer in a way that is slightly context sensitive. This is because certain expressions are ambiguous as to whether they belong to Python-mode or subprocess-mode. For example, most people will look at ``ls -l`` and see a listing command. However, if ``ls`` and ``l`` were Python variables, this could be transformed to the equivalent (Python) expressions ``ls - l`` or ``ls-l``. Neither of which are valid listing commands. What xonsh does to overcome such ambiguity is to check if the names in the expression (``ls`` and ``l`` above) are in the present Python context. If they are, then it takes the line to be valid xonsh as written. If one of the names cannot be found, then xonsh assumes that the left-most name is an external command. It thus attempts to parse the line after wrapping it in an uncaptured subprocess call ``![]``. If wrapped version successfully parses, the ``![]`` version stays. Otherwise, the original line is retained. All of the context sensitive parsing occurs as an AST transformation prior to any code is executed. This ensures that code will never be partially executed before failing. It is critical to note that the context sensitive parsing is a convenience meant for humans. If ambiguity remains or exactness is required, simply manually use the ``![]``, ``!()``, ``$[]`` or ``$()`` operators on your code. 5. Context-sensitive parsing is gross -------------------------------------- Yes, context-sensitive parsing is gross. But the point of xonsh is that it uses xontext-sensitive parsing and is ultimately a lot less gross than other shell languages, such as Bash. Furthermore, its use is heavily limited here. 6. My Branches are Timing Out?! ------------------------------- Depending on you system, setup, and repository sizes, computing branch names and colors (i.e. if the branch is dirty or not), can be a pretty slow operation. This is bad news because xonsh can try to compute these each time it formats the ``$PROMPT``. In order to keep xonsh snappy, we have implemented branch computation timeouts. This is set to a nominal value (usually 0.1 sec) via the ``$VC_BRANCH_TIMEOUT`` environment variable. Feel free to set this to any limit that you feel comfortable with. So if you don't mind a potentially slow prompt, set it to 1, 5, 20, 100 seconds! However, if you never want to deal with a slow prompt or seeing this timeout message, you can remove the ``{curr_branch}``, ``{branch_color}`` and ``{branch_bg_color}`` portions of your ``$PROMPT``, and these values will never be computed. It is also worth noting that ``{branch_color}`` is usually the slow poke. Just removing the color lookup from the ``$PROMPT`` can still provide the branch name while being fast enough. 7. exec ------- The notion of ``exec`` is a bit of a tricky beast in xonsh. Both Python and basically every other shell language have an exec that perform radically different operations. * In Python, ``exec`` is a builtin function that executes strings, ASTs, or code objects in a provided namespace. * In sh-langs (and elsewhere), ``exec`` is a command the runs another command directly in the current process. These two ideas are central to both languages - without which most programs cannot be run. Luckily, even though they share a name, they have distinct syntax and don't share a namespace. Therefore, in xonsh, .. code-block:: xonshcon # exec() as a function is run as Python's exec >>> exec('x = 41; x += 1', globals(), locals()) # while exec as a statement is like bash's exec >>> exec gdb (gdb) Yes, this is potentially confusing. This is particularly true since earlier versions of Python *had* an exec statement whose syntax would have clashed with the sh-lang command form. Yes, we are sorry. But the alternative is that important programs that use exec under the covers, such as SSH and gdb, would not be usable when xonsh is set as the default shell. (Note that we can't rename the exec() function since Python would fail.) As usability is the most important aspect of a shell, xonsh trades a small amount of potential confusion for large class of important commands. All of the above being true, if the exec duality is causing you problems there a few operations that you can implement to mitigate the confusion. The first is that you can remove the ``exec`` alias and use the ``xexec`` alias instead: .. code-block:: xonshcon >>> del aliases['exec'] >>> xexec ssh Alternatively, you can always be sure to run the exec command explicitly in subprocess mode with ``![]`` or ``!()``: .. code-block:: xonshcon >>> ![exec bash] Lastly, you can assign the result of the exec() function to a throw away variable (since the return is always None): .. code-block:: xonshcon >>> _ = exec('x = 42') Hopefully, though, this trade-off makes sense and you never have to worry about it...unless chimera slaying is your bag. 8. Gotchas ---------- There are a few gotchas when using xonsh across multiple versions of Python, where some behavior can differ, as the underlying Python might behave differently. For example double star globbing `**` will only work on Python 3.5+ (ie not on 3.4) as recursive globbing is `new in Python 3.5 `_ xonsh-0.6.0/docs/guides.rst000066400000000000000000000004211320541242300156070ustar00rootroot00000000000000 Guides ====== .. toctree:: :titlesonly: :maxdepth: 2 tutorial tutorial_hist tutorial_macros tutorial_xontrib tutorial_events tutorial_completers tutorial_history_backend tutorial_ptk bash_to_xsh python_virtual_environmentsxonsh-0.6.0/docs/index.rst000066400000000000000000000004521320541242300154420ustar00rootroot00000000000000 .. include:: intro.rst .. include:: comparison.rst .. include:: installation.rst .. include:: dependencies.rst .. include:: news.rst .. include:: guides.rst .. include:: setup.rst .. include:: contributing.rst .. include:: contact.rst .. include:: links.rst .. include:: development.rst xonsh-0.6.0/docs/installation.rst000066400000000000000000000002141320541242300170300ustar00rootroot00000000000000 Installation ============ .. toctree:: :titlesonly: :maxdepth: 2 dependencies linux osx windows customizationxonsh-0.6.0/docs/intro.rst000066400000000000000000000067761320541242300155050ustar00rootroot00000000000000the xonsh shell =============== .. raw:: html

~ ~

Xonsh is a Python-powered, cross-platform, Unix-gazing shell language and command prompt. The language is a superset of Python 3.4+ with additional shell primitives that you are used to from Bash and IPython. It works on all major systems including Linux, Mac OSX, and Windows. Xonsh is meant for the daily use of experts and novices alike. **Try it out!** .. raw:: html

.. xonsh-0.6.0/docs/links.rst000066400000000000000000000037431320541242300154610ustar00rootroot00000000000000 Helpful Links ============= * `Documentation `_ * `Gitter `_ * `Mailing list `_ * `IRC: channel #xonsh on OFTC `_ * `GitHub Repository `_ * :ref:`genindex` * :ref:`modindex` * :ref:`search` .. raw:: html Fork me on GitHub xonsh-0.6.0/docs/linux.rst000066400000000000000000000063211320541242300154730ustar00rootroot00000000000000========================== Linux Guide ========================== Installation ============ You can install xonsh using ``conda``, ``pip``, or from source. **conda:** .. code-block:: console $ conda config --add channels conda-forge $ conda install xonsh **pip:** .. code-block:: console $ pip install xonsh **source:** Download the source `from github `_ (`zip file `_), then run the following from the source directory, .. code-block:: console $ python setup.py install Debian/Ubuntu users can install xonsh from the repository with: **apt:** .. code-block:: console $ apt install xonsh Fedora users can install xonsh from the repository with: **dnf:** .. code-block:: console $ dnf install xonsh Arch Linux users can install xonsh from the Arch User Repository with: **yaourt:** .. code-block:: console $ yaourt -Sa xonsh **aura:** .. code-block:: console $ aura -A xonsh **pacaur:** .. code-block:: console $ pacaur -S xonsh Note that some of these may require ``sudo``. If you run into any problems, please let us know! .. include:: dependencies.rst Customization ============= See the `xonsh customization guide `_ for more details on setting up ``xonsh``! Possible conflicts with Bash ============================ Depending on how your installation of Bash is configured, Xonsh may have trouble loading certain shell modules. Particularly if you see errors similar to this when launching Xonsh: .. code-block:: console bash: module: line 1: syntax error: unexpected end of file bash: error importing function definition for `BASH_FUNC_module' bash: scl: line 1: syntax error: unexpected end of file bash: error importing function definition for `BASH_FUNC_scl' bash: module: line 1: syntax error: unexpected end of file bash: error importing function definition for `BASH_FUNC_module' bash: scl: line 1: syntax error: unexpected end of file bash: error importing function definition for `BASH_FUNC_scl' ...You can correct the problem by unsetting the modules, by adding the following lines to your ``~/.bashrc file``: .. code-block:: console unset module unset scl Default Ubuntu .bashrc breaks Foreign Shell Functions ===================================================== Xonsh supports importing functions from foreign shells using the `ForeignShellFunctionAlias` class, which calls functions as if they were aliases. This is implemented by executing a command that sources the file containing the function definition and then immediately calls the function with any necessary arguments. The default user `~/.bashrc` file in Ubuntu 15.10 has the following snippet at the top, which causes the script to exit immediately if not run interactively. .. code-block:: bash # If not running interactively, don't do anything case $- in *i*) ;; *) return;; esac This means that any function you have added to the file after this point will be registered as a xonsh alias but will fail on execution. Previous versions of Ubuntu have a different test for interactivity at the top of the file that yields the same problem. xonsh-0.6.0/docs/news.rst000066400000000000000000000001551320541242300153070ustar00rootroot00000000000000 News & Media ============ .. toctree:: :titlesonly: :maxdepth: 2 talks_and_articles quotesxonsh-0.6.0/docs/osx.rst000066400000000000000000000027161320541242300151510ustar00rootroot00000000000000========================== OSX Guide ========================== Installation ============ You can install xonsh using homebrew, conda, pip, or from source. **homebrew:** .. code-block:: console $ brew install xonsh **conda:** .. code-block:: console $ conda config --add channels conda-forge $ conda install xonsh **pip:** .. code-block:: console $ pip3 install xonsh **source:** Download the source `from github `_ (`zip file `_), then run the following from the source directory, .. code-block:: console $ python3 setup.py install Extras for OSX ============== On Mac OSX, it is *strongly* recommended to install the ``gnureadline`` library if using the readline shell. ``gnureadline`` can be installed via pip: .. code-block:: console $ pip3 install gnureadline Xonsh has support for using bash completion files on the shell, to use it you need to install the bash-completion package. The regular bash-completion package uses v1 which mostly works, but `occasionally has rough edges `_ so we recommend using bash-completion v2 which is installed using the bash-completion2 package. .. code-block:: console $ brew install bash-completion2 .. include:: dependencies.rst Customization ============= See the `xonsh customization guide `_ for more details on setting up ``xonsh``! xonsh-0.6.0/docs/python_virtual_environments.rst000066400000000000000000000050041320541242300222270ustar00rootroot00000000000000.. highlight:: bash .. _python_virtual_environments: =========================== Python Virtual Environments =========================== The usual tools for creating Python virtual environments—``venv``, ``virtualenv``, ``pew``—don't play well with xonsh. We won't dig deeper into why it is so, but the general gist is that these tools are hacky and hard-coded for bash, zsh, and other mainstream shells. Luckily, xonsh ships with its own virtual environments manager called **Vox**. Vox === First, load the vox xontrib:: $ xontrib load vox To create a new environment with vox, run ``vox new ``:: $ vox new myenv Creating environment... Environment "myenv" created. Activate it with "vox activate myenv". By default, environments are stored in ``~/.virtualenvs``, but you can override it by setting the ``$VIRTUALENV_HOME`` environment variable. To see all existing environments, run ``vox list``:: $ vox list Available environments: eggs myenv spam To activate an environment, run ``vox activate ``:: $ vox activate myenv Activated "myenv". Instead of ``activate``, you can call ``workon`` or ``enter``. To exit the currently active environment, run ``vox deactivate`` or ``vox exit``:: $ vox deactivate Deactivated "myenv". To remove an environment, run ``vox remove ``:: $ vox remove myenv Environment "myenv" removed. Instead of ``remove``, you can call ``rm``, ``delete``, or ``del``. To see all available commands, run ``vox help``, ``vox --help``, or ``vox -h``:: Vox is a virtual environment manager for xonsh. Available commands: vox new Create new virtual environment in $VIRTUALENV_HOME vox activate (workon, enter) Activate virtual environment vox deactivate (exit) Deactivate current virtual environment vox list (ls) List all available environments vox remove (rm, delete, del) ... Remove virtual environments vox help (-h, --help) Show help ``virtualenv`` like prompt -------------------------- Although it's included in the default prompt, you can customize your prompt to automatically update in the same way as ``virtualenv``. Simply add the ``'{env_name}'`` variable to your ``$PROMPT``:: $PROMPT = '{env_name: {}}' + restofmyprompt Note that you do **not** need to load the ``vox`` xontrib for this to work. For more details see :ref:`customprompt`. xonsh-0.6.0/docs/quotes.rst000066400000000000000000000011571320541242300156560ustar00rootroot00000000000000========== Quotes ========== `@gilforsyth `_ **says,** .. epigraph:: “Just stumbled across xonsh by @scopatz -- holy cow it's amazing. I've never been so happy to rewrite a .rc file” `@wbuthod `_ **says,** .. epigraph:: “I've tweeted about Xonsh before, and finally spent a day using it exclusively. I must have it on ALL PLATFORMS now.” `@biochemistries `_ **says,** .. epigraph:: “@pathogenomenick @btnaughton @lexnederbragt the dark wizardry of @scopatz :-) check out xon.sh” xonsh-0.6.0/docs/setup.rst000066400000000000000000000002611320541242300154710ustar00rootroot00000000000000 Config Files and Settings ========================= .. toctree:: :titlesonly: :maxdepth: 2 xonshrc xonshconfig envvars aliases xontribs eventsxonsh-0.6.0/docs/sidebar.rst000066400000000000000000000004101320541242300157360ustar00rootroot00000000000000:orphan: .. title:: Documentation Index .. toctree:: :maxdepth: 3 the xonsh shell intro comparison installation dependencies news guides setup contributing development API contact linksxonsh-0.6.0/docs/talks_and_articles.rst000066400000000000000000000016651320541242300201700ustar00rootroot00000000000000========================== Talks & Articles ========================== Here are some talks, articles, and other sundry about your favorite shell. Talks ============ **Python Nordeste 2016:** presented by Lucas Inojosa http://lucasicf.github.io/talks/shell_python/ **PyCon 2016:** presented by Anthony Scopatz .. raw:: html
Articles ========= * `First Impressions of Xonsh `_, FusionBox, June 20, 2016. * `InfoWorld on xonsh `_, Paul Krill, June 2nd, 2016. * `Slashdot on xonsh `_, June 2016. xonsh-0.6.0/docs/todo.rst000066400000000000000000000017311320541242300153010ustar00rootroot00000000000000========================== Wishlist & To-Dos ========================== Here is what is targeted for future versions of xonsh. Any one wishing to tackle any of these or add their own is encouraged to do so! 1. Tab completion from man pages --------------------------------- One of the more genius ideas I first encountered from ``fish`` is the idea that man pages can be used to supply matches to tab-completion. In principle this is not that hard. First, we just need to use ``man2html`` and then parse the html. 2. urwid based command prompt ----------------------------- Moving to urwid would allow for a whole new depth of user interaction. There could be syntax highlighting as you type, a real interface for environment variables, and so on. The command prompt is only the start! 3. Support and testing for other platforms ------------------------------------------- This includes: * Support for Python 2.7 * Support for future versions of Python * Testing on Mac OSX xonsh-0.6.0/docs/tutorial.rst000066400000000000000000001534361320541242300162110ustar00rootroot00000000000000.. _tutorial: ******************* Tutorial ******************* xonsh is a shell language and command prompt. Unlike other shells, xonsh is based on Python, with additional syntax added that makes calling subprocess commands, manipulating the environment, and dealing with the file system easy. The xonsh command prompt gives users interactive access to the xonsh language. While all Python code is also xonsh, not all Bash code can be used in xonsh. That would defeat the purpose, and Python is better anyway! Still, xonsh is Bash-wards compatible in the ways that matter, such as for running commands, reading in the Bash environment, and utilizing Bash tab completion. The purpose of this tutorial is to teach you xonsh. There are many excellent guides out there for learning Python, and this will not join their ranks. Similarly, you'd probably get the most out of this tutorial if you have already used a command prompt or interactive interpreter. Let's dive in! Starting xonsh ======================== Assuming you have successfully installed xonsh (see http://xon.sh), you can start up the xonsh interpreter via the ``xonsh`` command. Suppose you are in a lesser terminal: .. code-block:: console $ xonsh snail@home ~ $ Now we are in a xonsh shell. Our username happens to be ``snail``, our hostname happens to be ``home``, and we are in our home directory (``~``). Alternatively, you can setup your terminal emulator (xterm, gnome-terminal, etc) to run xonsh automatically when it starts up. This is recommended. Basics ======================= The xonsh language is based on Python, and the xonsh shell uses Python to interpret any input it receives. This makes simple things, like arithmetic, simple: .. code-block:: xonshcon >>> 1 + 1 2 .. note:: From here on we'll be using ``>>>`` to prefix (or prompt) any xonsh input. This follows the Python convention and helps trick syntax highlighting, though ``$`` is more traditional for shells. Since this is just Python, we are able import modules, print values, and use other built-in Python functionality: .. code-block:: xonshcon >>> import sys >>> print(sys.version) 3.4.2 |Continuum Analytics, Inc.| (default, Oct 21 2014, 17:16:37) [GCC 4.4.7 20120313 (Red Hat 4.4.7-1)] We can also create and use literal data types, such as ints, floats, lists, sets, and dictionaries. Everything that you are used to if you already know Python is there: .. code-block:: xonshcon >>> d = {'xonsh': True} >>> d.get('bash', False) False The xonsh shell also supports multi-line input for more advanced flow control. The multi-line mode is automatically entered whenever the first line of input is not syntactically valid on its own. Multi-line mode is then exited when enter (or return) is pressed when the cursor is in the first column. .. code-block:: xonshcon >>> if True: ... print(1) ... else: ... print(2) ... 1 Flow control, of course, includes loops. .. code-block:: xonshcon >>> for i, x in enumerate('xonsh'): ... print(i, x) ... 0 x 1 o 2 n 3 s 4 h We can also define and call functions and classes. I'll mostly spare you the details, but this *is* pretty cool: .. code-block:: xonshcon >>> def f(): ... return "xonsh" ... >>> f() 'xonsh' For easier indentation, Shift+Tab will enter 4 spaces. And that about wraps it up for the basics section. It is just like Python. Environment Variables ======================= Environment variables are written as ``$`` followed by a name. For example, ``$HOME``, ``$PWD``, and ``$PATH``. .. code-block:: xonshcon >>> $HOME '/home/snail' You can set (and export) environment variables like you would set any other variable in Python. The same is true for deleting them too. .. code-block:: xonshcon >>> $GOAL = 'Become the Lord of the Files' >>> print($GOAL) Become the Lord of the Files >>> del $GOAL Very nice. The Environment Itself ``${...}`` --------------------------------- All environment variables live in the built-in ``${...}`` (aka ``__xonsh_env__``) mapping. You can access this mapping directly, but in most situations, you shouldn’t need to. If you want for example to check if an environment variable is present in your current session (say, in your awesome new ``xonsh`` script) you can use the membership operator: .. code-block:: xonshcon >>> 'HOME' in ${...} True To get information about a specific environment variable you can use the :func:`~xonsh.environ.Env.help` method. .. code-block:: xonshcon >>> ${...}.help('XONSH_DEBUG') One helpful method on the ``${...}`` is :func:`~xonsh.environ.Env.swap`. It can be used to temporarily set an environment variable: .. code-block:: xonshcon >>> with ${...}.swap(SOMEVAR='foo'): ... echo $SOMEVAR ... ... foo >>> echo $SOMEVAR >>> Environment Lookup with ``${}`` ------------------------------------- The ``$NAME`` is great as long as you know the name of the environment variable you want to look up. But what if you want to construct the name programmatically, or read it from another variable? Enter the ``${}`` operator. .. warning:: In Bash, ``$NAME`` and ``${NAME}`` are syntactically equivalent. In xonsh, they have separate meanings. We can place any valid Python expression inside of the curly braces in ``${}``. This result of this expression will then be used to look up a value in the environment. Here are a couple of examples in action: .. code-block:: xonshcon >>> x = 'USER' >>> ${x} 'snail' >>> ${'HO' + 'ME'} '/home/snail' Not bad, xonsh, not bad. Environment Types ----------------- Like other variables in Python, environment variables have a type. Sometimes this type is imposed based on the variable name. The current rules are pretty simple: * ``\w*PATH``: any variable whose name ends in PATH is a list of strings. * ``\w*DIRS``: any variable whose name ends in DIRS is a list of strings. * ``XONSH_HISTORY_SIZE``: this variable is an int. * ``CASE_SENSITIVE_COMPLETIONS``: this variable is a boolean. xonsh will automatically convert back and forth to untyped (string-only) representations of the environment as needed (mostly by subprocess commands). When in xonsh, you'll always have the typed version. Here are a couple of PATH examples: .. code-block:: xonshcon >>> $PATH ['/home/snail/.local/bin', '/home/snail/sandbox/bin', '/home/snail/miniconda3/bin', '/usr/local/bin', '/usr/local/sbin', '/usr/bin', '/usr/sbin', '/bin', '/sbin', '.'] >>> $LD_LIBRARY_PATH ['/home/snail/.local/lib', ''] Also note that *any* Python object can go into the environment. It is sometimes useful to have more sophisticated types, like functions, in the environment. There are handful of environment variables that xonsh considers special. They can be seen on the `Environment Variables page `_. .. note:: In subprocess mode, referencing an undefined environment variable will produce an empty string. In Python mode, however, a ``KeyError`` will be raised if the variable does not exist in the environment. Running Commands ============================== As a shell, xonsh is meant to make running commands easy and fun. Running subprocess commands should work like any other in any other shell. .. code-block:: xonshcon >>> echo "Yoo hoo" Yoo hoo >>> cd xonsh >>> ls build docs README.rst setup.py xonsh __pycache__ dist license scripts tests xonsh.egg-info >>> dir scripts xonsh xonsh.bat >>> git status On branch master Your branch is up-to-date with 'origin/master'. Changes not staged for commit: (use "git add ..." to update what will be committed) (use "git checkout -- ..." to discard changes in working directory) modified: docs/tutorial.rst no changes added to commit (use "git add" and/or "git commit -a") >>> exit This should feel very natural. Python-mode vs Subprocess-mode ================================ It is sometimes helpful to make the distinction between lines that operate in pure Python mode and lines that use shell-specific syntax, edit the execution environment, and run commands. Unfortunately, it is not always clear from the syntax alone what mode is desired. This ambiguity stems from most command line utilities looking a lot like Python operators. Take the case of ``ls -l``. This is valid Python code, though it could have also been written as ``ls - l`` or ``ls-l``. So how does xonsh know that ``ls -l`` is meant to be run in subprocess-mode? For any given line that only contains an expression statement (expr-stmt, see the Python AST docs for more information), if all the names cannot be found as current variables xonsh will try to parse the line as a subprocess command instead. In the above, if ``ls`` and ``l`` are not variables, then subprocess mode will be attempted. If parsing in subprocess mode fails, then the line is left in Python-mode. In the following example, we will list the contents of the directory with ``ls -l``. Then we'll make new variable names ``ls`` and ``l`` and then subtract them. Finally, we will delete ``ls`` and ``l`` and be able to list the directories again. .. code-block:: xonshcon >>> # this will be in subproc-mode, because ls doesn't exist >>> ls -l total 0 -rw-rw-r-- 1 snail snail 0 Mar 8 15:46 xonsh >>> # set ls and l variables to force python-mode >>> ls = 44 >>> l = 2 >>> ls -l 42 >>> # deleting ls will return us to subproc-mode >>> del ls >>> ls -l total 0 -rw-rw-r-- 1 snail snail 0 Mar 8 15:46 xonsh The determination between Python- and subprocess-modes is always done in the safest possible way. If anything goes wrong, it will favor Python-mode. The determination between the two modes is done well ahead of any execution. You do not need to worry about partially executed commands - that is impossible. If you absolutely want to run a subprocess command, you can always force xonsh to do so with the syntax that we will see in the following sections. Quoting ======= Single or double quotes can be used to remove the special meaning of certain characters or words to xonsh. If a subprocess command contains characters that collide with xonsh syntax then quotes must be used to force xonsh to not interpret them. .. code-block:: xonshcon >>> echo ${ ... SyntaxError: :1:5: ('code: {',) echo ${ ^ >>> echo '${' ${ .. warning:: There is no notion of an escaping character in xonsh like the backslash (\\) in bash. Captured Subprocess with ``$()`` and ``!()`` ============================================ The ``$()`` operator in xonsh executes a subprocess command and *captures* some information about that command. The ``$()`` syntax captures and returns the standard output stream of the command as a Python string. This is similar to how ``$()`` performs in Bash. For example, .. code-block:: xonshcon >>> $(ls -l) 'total 0\n-rw-rw-r-- 1 snail snail 0 Mar 8 15:46 xonsh\n' The ``!()`` syntax captured more information about the command, as an instance of a class called ``CommandPipeline``. This object contains more information about the result of the given command, including the return code, the process id, the standard output and standard error streams, and information about how input and output were redirected. For example: .. code-block:: xonshcon >>> !(ls nonexistent_directory) CommandPipeline(stdin=<_io.BytesIO object at 0x7f1948182bf8>, stdout=<_io.BytesIO object at 0x7f1948182af0>, stderr=<_io.BytesIO object at 0x7f19483a6200>, pid=26968, returncode=2, args=['ls', 'nonexistent_directory'], alias=['ls', '--color=auto', '-v'], stdin_redirect=['', 'r'], stdout_redirect=[9, 'wb'], stderr_redirect=[11, 'w'], timestamps=[1485235484.5016758, None], executed_cmd=['ls', '--color=auto', '-v', 'nonexistent_directory'], input=None, output=, errors=None) This object will be "truthy" if its return code was 0, and it is equal (via ``==``) to its return code. It also hashes to its return code. This allows for some interesting new kinds of interactions with subprocess commands, for example: .. code-block:: xonshcon def check_file(file): if !(test -e @(file)): if !(test -f @(file)) or !(test -d @(file)): print("File is a regular file or directory") else: print("File is not a regular file or directory") else: print("File does not exist") def wait_until_google_responds(): while not !(ping -c 1 google.com): sleep 1 If you iterate over the ``CommandPipeline`` object, it will yield lines of its output. Using this, you can quickly and cleanly process output from commands. Additionally, these objects expose a method ``itercheck``, which behaves the same as the built-in iterator but raises ``XonshCalledProcessError`` if the process had a nonzero return code. .. code-block:: xonshcon def get_wireless_interface(): """Returns devicename of first connected wifi, None otherwise""" for line in !(nmcli device): dev, typ, state, conn_name = line.split(None, 3) if typ == 'wifi' and state == 'connected': return dev def grep_path(path, regexp): """Recursively greps `path` for perl `regexp` Returns a dict of 'matches' and 'failures'. Matches are files that contain the given regexp. Failures are files that couldn't be scanned. """ matches = [] failures = [] try: for match in !(grep -RPl @(regexp) @(str(path))).itercheck(): matches.append(match) except XonshCalledProcessError as error: for line in error.stderr.split('\n'): if not line.strip(): continue filename = line.split('grep: ', 1)[1].rsplit(':', 1)[0] failures.append(filename) return {'matches': matches, 'failures': failures} The ``$()`` and ``!()`` operators are expressions themselves. This means that we can assign the results to a variable or perform any other manipulations we want. .. code-block:: xonshcon >>> x = $(ls -l) >>> print(x.upper()) TOTAL 0 -RW-RW-R-- 1 SNAIL SNAIL 0 MAR 8 15:46 XONSH >>> y = !(ls -l) >>> print(y.returncode) 0 >>> print(y.rtn) # alias to returncode 0 .. warning:: Job control is not implemented for captured subprocesses. While in subprocess-mode or inside of a captured subprocess, we can always still query the environment with ``$NAME`` variables or the ``${}`` syntax, or inject Python values with the ``@()`` operator: .. code-block:: xonshcon >>> $(echo $HOME) '/home/snail\n' Uncaptured Subprocess with ``$[]`` and ``![]`` =============================================== Uncaptured subprocesses are denoted with the ``$[]`` and ``![]`` operators. They are the same as ``$()`` captured subprocesses in almost every way. The only difference is that the subprocess's stdout passes directly through xonsh and to the screen. The return value of ``$[]`` is always ``None``. In the following, we can see that the results of ``$[]`` are automatically printed, and the return value is not a string. .. code-block:: xonshcon >>> x = $[ls -l] total 0 -rw-rw-r-- 1 snail snail 0 Mar 8 15:46 xonsh >>> x is None True Previously when we automatically entered subprocess-mode, uncaptured subprocesses were used. Thus ``ls -l`` and ``$[ls -l]`` are usually equivalent. The ``![]`` operator is similar to the ``!()`` in that it returns an object containing information about the result of executing the given command. However, its standard output and standard error streams are directed to the terminal, and the resulting object is not displayed. For example .. code-block:: xonshcon >>> x = ![ls -l] and ![echo "hi"] total 0 -rw-rw-r-- 1 snail snail 0 Mar 8 15:46 xonsh hi Python Evaluation with ``@()`` =============================== The ``@()`` operator form works in subprocess mode, and will evaluate arbitrary Python code. The result is appended to the subprocess command list. If the result is a string, it is appended to the argument list. If the result is a list or other non-string sequence, the contents are converted to strings and appended to the argument list in order. If the result in the first position is a function, it is treated as an alias (see the section on `Aliases`_ below), even if it was not explicitly added to the ``aliases`` mapping. Otherwise, the result is automatically converted to a string. For example, .. code-block:: xonshcon >>> x = 'xonsh' >>> y = 'party' >>> echo @(x + ' ' + y) xonsh party >>> echo @(2+2) 4 >>> echo @([42, 'yo']) 42 yo >>> echo "hello" | @(lambda a, s=None: s.read().strip() + " world\n") hello world This syntax can be used inside of a captured or uncaptured subprocess, and can be used to generate any of the tokens in the subprocess command list. .. code-block:: xonshcon >>> out = $(echo @(x + ' ' + y)) >>> out 'xonsh party\n' >>> @("ech" + "o") "hey" hey Thus, ``@()`` allows us to create complex commands in Python-mode and then feed them to a subprocess as needed. For example: .. code-block:: xonshcon for i in range(20): $[touch @('file%02d' % i)] Command Substitution with ``@$()`` ================================== A common use of the ``@()`` and ``$()`` operators is allowing the output of a command to replace the command itself (command substitution): ``@([i.strip() for i in $(cmd).split()])``. Xonsh offers a short-hand syntax for this operation: ``@$(cmd)``. Consider the following example: .. code-block:: xonshcon >>> # this returns a string representing stdout >>> $(which ls) 'ls --color=auto\n' >>> # this attempts to run the command, but as one argument >>> # (looks for 'ls --color=auto\n' with spaces and newline) >>> @($(which ls).strip()) xonsh: subprocess mode: command not found: ls --color=auto >>> # this actually executes the intended command >>> @([i.strip() for i in $(which ls).split()]) some_file some_other_file >>> # this does the same thing, but is much more concise >>> @$(which ls) some_file some_other_file Nesting Subprocesses ===================================== Though I am begging you not to abuse this, it is possible to nest the subprocess operators that we have seen so far (``$()``, ``$[]``, ``${}``, ``@()``, ``@$()``). An instance of ``ls -l`` that is on the wrong side of the border of the absurd is shown below: .. code-block:: console >>> $[@$(which @($(echo ls).strip())) @('-' + $(printf 'l'))] total 0 -rw-rw-r-- 1 snail snail 0 Mar 8 15:46 xonsh With great power, and so forth... .. note:: Nesting these subprocess operators inside of ``$()`` and/or ``$[]`` works because the contents of those operators are executed in subprocess mode. Since ``@()`` and ``${}`` run their contents in Python mode, it is not possible to nest other subprocess operators inside of them. Pipes ==================== In subprocess-mode, xonsh allows you to use the ``|`` character to pipe together commands as you would in other shells. .. code-block:: xonshcon >>> env | uniq | sort | grep PATH DATAPATH=/usr/share/MCNPX/v260/Data/ DEFAULTS_PATH=/usr/share/gconf/awesome-gnome.default.path LD_LIBRARY_PATH=/home/snail/.local/lib: MANDATORY_PATH=/usr/share/gconf/awesome-gnome.mandatory.path PATH=/home/snail/.local/bin:/home/snail/sandbox/bin:/usr/local/bin XDG_SEAT_PATH=/org/freedesktop/DisplayManager/Seat0 XDG_SESSION_PATH=/org/freedesktop/DisplayManager/Session0 This is only available in subprocess-mode because ``|`` is otherwise a Python operator. If you are unsure of what pipes are, there are many great references out there. You should be able to find information on StackOverflow or Google. Logical Subprocess And ======================= Subprocess-mode also allows you to use the ``and`` operator to chain together subprocess commands. The truth value of a command is evaluated as whether its return code is zero (i.e. ``proc.returncode == 0``). Like in Python, if the command evaluates to ``False``, subsequent commands will not be executed. For example, suppose we want to lists files that may or may not exist: .. code-block:: xonshcon >>> touch exists >>> ls exists and ls doesnt exists /bin/ls: cannot access doesnt: No such file or directory However, if you list the file that doesn't exist first, you would have only seen the error: .. code-block:: xonshcon >>> ls doesnt and ls exists /bin/ls: cannot access doesnt: No such file or directory Also, don't worry. Xonsh directly translates the ``&&`` operator into ``and`` for you. It is less Pythonic, of course, but it is your shell! Logical Subprocess Or ======================= Much like with ``and``, you can use the ``or`` operator to chain together subprocess commands. The difference, to be certain, is that subsequent commands will be executed only if the if the return code is non-zero (i.e. a failure). Using the file example from above: .. code-block:: xonshcon >>> ls exists or ls doesnt exists This doesn't even try to list a non-existent file! However, if you list the file that doesn't exist first, you will see the error and then the file that does exist: .. code-block:: xonshcon >>> ls doesnt or ls exists /bin/ls: cannot access doesnt: No such file or directory exists Never fear! Xonsh also directly translates the ``||`` operator into ``or``, too. Your muscle memory is safe now, here with us. Input/Output Redirection ==================================== xonsh also allows you to redirect ``stdin``, ``stdout``, and/or ``stderr``. This allows you to control where the output of a command is sent, and where it receives its input from. xonsh has its own syntax for these operations, but, for compatibility purposes, xonsh also support Bash-like syntax. The basic operations are "write to" (``>``), "append to" (``>>``), and "read from" (``<``). The details of these are perhaps best explained through examples. .. note:: The target of the redirection should be separated by a space, otherwise xonsh will raise a SyntaxError. Redirecting ``stdout`` ---------------------- All of the following examples will execute ``COMMAND`` and write its regular output (stdout) to a file called ``output.txt``, creating it if it does not exist: .. code-block:: xonshcon >>> COMMAND > output.txt >>> COMMAND out> output.txt >>> COMMAND o> output.txt >>> COMMAND 1> output.txt # included for Bash compatibility These can be made to append to ``output.txt`` instead of overwriting its contents by replacing ``>`` with ``>>`` (note that ``>>`` will still create the file if it does not exist). Redirecting ``stderr`` ---------------------- All of the following examples will execute ``COMMAND`` and write its error output (stderr) to a file called ``errors.txt``, creating it if it does not exist: .. code-block:: xonshcon >>> COMMAND err> errors.txt >>> COMMAND e> errors.txt >>> COMMAND 2> errors.txt # included for Bash compatibility As above, replacing ``>`` with ``>>`` will cause the error output to be appended to ``errors.txt``, rather than replacing its contents. Combining Streams ---------------------- It is possible to send all of ``COMMAND``'s output (both regular output and error output) to the same location. All of the following examples accomplish that task: .. code-block:: xonshcon >>> COMMAND all> combined.txt >>> COMMAND a> combined.txt >>> COMMAND &> combined.txt # included for Bash compatibility It is also possible to explicitly merge stderr into stdout so that error messages are reported to the same location as regular output. You can do this with the following syntax: .. code-block:: xonshcon >>> COMMAND err>out >>> COMMAND err>o >>> COMMAND e>out >>> COMMAND e>o >>> COMMAND 2>&1 # included for Bash compatibility This merge can be combined with other redirections, including pipes (see the section on `Pipes`_ above): .. code-block:: xonshcon >>> COMMAND err>out | COMMAND2 >>> COMMAND e>o > combined.txt It is worth noting that this last example is equivalent to: ``COMMAND a> combined.txt`` Similarly, you can also send stdout to stderr with the following syntax: .. code-block:: xonshcon >>> COMMAND out>err >>> COMMAND out>e >>> COMMAND o>err >>> COMMAND o>e >>> COMMAND 1>&2 # included for Bash compatibility Redirecting ``stdin`` --------------------- It is also possible to have a command read its input from a file, rather than from ``stdin``. The following examples demonstrate two ways to accomplish this: .. code-block:: xonshcon >>> COMMAND < input.txt >>> < input.txt COMMAND Combining I/O Redirects ------------------------ It is worth noting that all of these redirections can be combined. Below is one example of a complicated redirect. .. code-block:: xonshcon >>> COMMAND1 e>o < input.txt | COMMAND2 > output.txt e>> errors.txt This line will run ``COMMAND1`` with the contents of ``input.txt`` fed in on stdin, and will pipe all output (stdout and stderr) to ``COMMAND2``; the regular output of this command will be redirected to ``output.txt``, and the error output will be appended to ``errors.txt``. Background Jobs =============== Typically, when you start a program running in xonsh, xonsh itself will pause and wait for that program to terminate. Sometimes, though, you may want to continue giving commands to xonsh while that program is running. In subprocess mode, you can start a process "in the background" (i.e., in a way that allows continued use of the shell) by adding an ampersand (``&``) to the end of your command. Background jobs are very useful when running programs with graphical user interfaces. The following shows an example with ``emacs``. .. code-block:: xonshcon >>> emacs & >>> Note that the prompt is returned to you after emacs is started. Job Control =========== If you start a program in the foreground (with no ampersand), you can suspend that program's execution and return to the xonsh prompt by pressing Control-Z. This will give control of the terminal back to xonsh, and will keep the program paused in the background. .. note:: Suspending processes via Control-Z is not yet supported when running on Windows. To unpause the program and bring it back to the foreground, you can use the ``fg`` command. To unpause the program have it continue in the background (giving you continued access to the xonsh prompt), you can use the ``bg`` command. You can get a listing of all currently running jobs with the ``jobs`` command. Each job has a unique identifier (starting with 1 and counting upward). By default, the ``fg`` and ``bg`` commands operate on the job that was started most recently. You can bring older jobs to the foreground or background by specifying the appropriate ID; for example, ``fg 1`` brings the job with ID 1 to the foreground. Additionally, specify "+" for the most recent job and "-" for the second most recent job. String Literals in Subprocess-mode ==================================== Strings can be used to escape special characters in subprocess-mode. The contents of the string are passed directly to the subprocess command as a single argument. So whenever you are in doubt, or if there is a xonsh syntax error because of a filename, just wrap the offending portion in a string. A common use case for this is files with spaces in their names. This detestable practice refuses to die. "No problem!" says xonsh, "I have strings." Let's see it go! .. code-block:: xonshcon >>> touch "sp ace" >>> ls -l total 0 -rw-rw-r-- 1 snail snail 0 Mar 8 17:50 sp ace -rw-rw-r-- 1 snail snail 0 Mar 8 15:46 xonsh By default, the name of an environment variable inside a string will be replaced by the contents of that variable (in subprocess mode only). For example: .. code-block:: xonshcon >>> print("my home is $HOME") my home is $HOME >>> echo "my home is $HOME" my home is /home/snail You can avoid this expansion within a particular command by forcing the strings to be evaluated in Python mode using the ``@()`` syntax: .. code-block:: xonshcon >>> echo "my home is $HOME" my home is /home/snail >>> echo @("my home is $HOME") my home is $HOME You can also disable environment variable expansion completely by setting ``$EXPAND_ENV_VARS`` to ``False``. Filename Globbing with ``*`` =============================== Filename globbing with the ``*`` character is also allowed in subprocess-mode. This simply uses Python's glob module under-the-covers. See there for more details. As an example, start with a lovely bunch of xonshs: .. code-block:: xonshcon >>> touch xonsh conch konk quanxh >>> ls conch konk quanxh xonsh >>> ls *h conch quanxh xonsh >>> ls *o* conch konk xonsh This is not available in Python-mode because multiplication is pretty important. Advanced Path Search with Backticks =================================== xonsh offers additional ways to find path names beyond regular globbing, both in Python mode and in subprocess mode. Regular Expression Globbing --------------------------- If you have ever felt that normal globbing could use some more octane, then regex globbing is the tool for you! Any string that uses backticks (`````) instead of quotes (``'``, ``"``) is interpreted as a regular expression to match filenames against. Like with regular globbing, a list of successful matches is returned. In Python-mode, this is just a list of strings. In subprocess-mode, each filename becomes its own argument to the subprocess command. Let's see a demonstration with some simple filenames: .. code-block:: xonshcon >>> touch a aa aaa aba abba aab aabb abcba >>> ls `a(a+|b+)a` aaa aba abba >>> print(`a(a+|b+)a`) ['aaa', 'aba', 'abba'] >>> len(`a(a+|b+)a`) 3 This same kind of search is performed if the backticks are prefaced with ``r``. So the following expressions are equivalent: ```test``` and ``r`test```. Other than the regex matching, this functions in the same way as normal globbing. For more information, please see the documentation for the ``re`` module in the Python standard library. .. warning:: In Xonsh, the meaning of backticks is very different from their meaning in Bash. In Bash, backticks mean to run a captured subprocess (``$()`` in Xonsh). Normal Globbing --------------- In subprocess mode, normal globbing happens without any special syntax. However, the backtick syntax has an additional feature: it is available inside of Python mode as well as subprocess mode. Similarly to regex globbing, normal globbing can be performed (either in Python mode or subprocess mode) by using the ``g````: .. code-block:: xonshcon >>> touch a aa aaa aba abba aab aabb abcba >>> ls a*b* aab aabb aba abba abcba >>> ls g`a*b*` aab aabb aba abba abcba >>> print(g`a*b*`) ['aab', 'aabb', 'abba', 'abcba', 'aba'] >>> len(g`a*b*`) 5 Custom Path Searches -------------------- In addition, if normal globbing and regular expression globbing are not enough, xonsh allows you to specify your own search functions. A search function is defined as a function of a single argument (a string) that returns a list of possible matches to that string. Search functions can then be used with backticks with the following syntax: ``@`test``` The following example shows the form of these functions: .. code-block:: xonshcon >>> def foo(s): ... return [i for i in os.listdir('.') if i.startswith(s)] >>> @foo`aa` ['aa', 'aaa', 'aab', 'aabb'] Path Output ----------- Using the ``p`` modifier with either regex or glob backticks changes the return type from a list of strings to a list of :class:`pathlib.Path` objects: .. code-block:: xonshcon >>> p`.*` [Path('foo'), Path('bar')] >>> [x for x in pg`**` if x.is_symlink()] [Path('a_link')] Path Literals ------------- Path objects can be instantiated directly using *p-string* syntax. Path objects can be converted back to plain strings with `str()`, and this conversion is handled implicitly in subprocess mode. .. code-block:: xonshcon >>> mypath = p'/foo/bar' >>> mypath Path('/foo/bar') >>> mypath.stem 'bar' >>> echo @(mypath) /foo/bar Help & Superhelp with ``?`` & ``??`` ===================================================== From IPython, xonsh allows you to inspect objects with question marks. A single question mark (``?``) is used to display the normal level of help. Double question marks (``??``) are used to display a higher level of help, called superhelp. Superhelp usually includes source code if the object was written in pure Python. Let's start by looking at the help for the int type: .. code-block:: xonshcon >>> int? Type: type String form: Init definition: (self, *args, **kwargs) Docstring: int(x=0) -> integer int(x, base=10) -> integer Convert a number or string to an integer, or return 0 if no arguments are given. If x is a number, return x.__int__(). For floating point numbers, this truncates towards zero. If x is not a number or if base is given, then x must be a string, bytes, or bytearray instance representing an integer literal in the given base. The literal can be preceded by '+' or '-' and be surrounded by whitespace. The base defaults to 10. Valid bases are 0 and 2-36. Base 0 means to interpret the base from the string as an integer literal. >>> int('0b100', base=0) 4 Now, let's look at the superhelp for the xonsh built-in that enables regex globbing: .. code-block:: xonshcon >>> __xonsh_regexsearch__?? Type: function String form: File: /usr/local/lib/python3.5/dist-packages/xonsh/built_ins.py Definition: (s) Source: def regexsearch(s): s = expand_path(s) return reglob(s) Note that both help and superhelp return the object that they are inspecting. This allows you to chain together help inside of other operations and ask for help several times in an object hierarchy. For instance, let's get help for both the dict type and its key() method simultaneously: .. code-block:: xonshcon >>> dict?.keys?? Type: type String form: Init definition: (self, *args, **kwargs) Docstring: dict() -> new empty dictionary dict(mapping) -> new dictionary initialized from a mapping object's (key, value) pairs dict(iterable) -> new dictionary initialized as if via: d = {} for k, v in iterable: d[k] = v dict(**kwargs) -> new dictionary initialized with the name=value pairs in the keyword argument list. For example: dict(one=1, two=2) Type: method_descriptor String form: Docstring: D.keys() -> a set-like object providing a view on D's keys Of course, for subprocess commands, you still want to use the ``man`` command. Compile, Evaluate, & Execute ================================ Like Python and Bash, xonsh provides built-in hooks to compile, evaluate, and execute strings of xonsh code. To prevent this functionality from having serious name collisions with the Python built-in ``compile()``, ``eval()``, and ``exec()`` functions, the xonsh equivalents all append an 'x'. So for xonsh code you want to use the ``compilex()``, ``evalx()``, and ``execx()`` functions. If you don't know what these do, you probably don't need them. Aliases ============================== Another important xonsh built-in is the ``aliases`` mapping. This is like a dictionary that affects how subprocess commands are run. If you are familiar with the Bash ``alias`` built-in, this is similar. Alias command matching only occurs for the first element of a subprocess command. The keys of ``aliases`` are strings that act as commands in subprocess-mode. The values are lists of strings, where the first element is the command, and the rest are the arguments. You can also set the value to a string, in which case it will be converted to a list automatically with ``shlex.split``. For example, the following creates several aliases for the ``git`` version control software. Both styles (list of strings and single string) are shown: .. code-block:: xonshcon >>> aliases['g'] = 'git status -sb' >>> aliases['gco'] = 'git checkout' >>> aliases['gp'] = ['git', 'pull'] If you were to run ``gco feature-fabulous`` with the above aliases in effect, the command would reduce to ``['git', 'checkout', 'feature-fabulous']`` before being executed. Callable Aliases ---------------- Lastly, if an alias value is a function (or other callable), then this function is called *instead* of going to a subprocess command. Such functions may have one of the following signatures: .. code-block:: python def mycmd0(): """This form takes no arguments but may return output or a return code. """ return "some output." def mycmd1(args): """This form takes a single argument, args. This is a list of strings representing the arguments to this command. Feel free to parse them however you wish! """ # perform some action. return 0 def mycmd2(args, stdin=None): """This form takes two arguments. The args list like above, as a well as standard input. stdin will be a file like object that the command can read from, if the user piped input to this command. If no input was provided this will be None. """ # do whatever you want! Anything you print to stdout or stderr # will be captured for you automatically. This allows callable # aliases to support piping. print('I go to stdout and will be printed or piped') # Note: that you have access to the xonsh # built-ins if you 'import builtins'. For example, if you need the # environment, you could do to following: import builtins env = builtins.__xonsh_env__ # The return value of the function can either be None, return # a single string representing stdout return 'I am out of here' # or you can build up strings for stdout and stderr and then # return a (stdout, stderr) tuple. Both of these may be # either a str or None. Any results returned like this will be # concatenated with the strings printed elsewhere in the function. stdout = 'I commanded' stderr = None return stdout, stderr # Lastly, a 3-tuple return value can be used to include an integer # return code indicating failure (> 0 return code). In the previous # examples the return code would be 0/success. return (None, "I failed", 2) def mycmd3(args, stdin=None, stdout=None): """This form has three parameters. The first two are the same as above. The last argument represents the standard output. This is a file-like object that the command may write too. """ # you can either use stdout stdout.write("Hello, ") # or print()! print("Mom!") return def mycmd4(args, stdin=None, stdout=None, stderr=None): """The next form of subprocess callables takes all of the arguments shown above as well as the standard error stream. As with stdout, this is a write-only file-like object. """ # This form allows "streaming" data to stdout and stderr import time for i in range(5): time.sleep(i) print(i, file=stdout) # In this form, the return value should be a single integer # representing the "return code" of the alias (zero if successful, # non-zero otherwise) return 0 def mycmd5(args, stdin=None, stdout=None, stderr=None, spec=None): """Lastly, the final form of subprocess callables takes all of the arguments shown above as well as a subprocess specification SubprocSpec object. This holds many attributes that dictate how the command is being run. For instance this can be useful for knowing if the process is captured by $() or !(). """ import xonsh.proc if spec.captured in xonsh.proc.STDOUT_CAPTURE_KINDS: end = '' else: end = '\n' # Now we'll get a newline if the user is at the terminal, and no # newline if we are captured print('Hi Mom!', end=end) return 0 Adding, Modifying, and Removing Aliases --------------------------------------- We can dynamically alter the aliases present simply by modifying the built-in mapping. Here is an example using a function value: .. code-block:: xonshcon >>> def _banana(args, stdin=None): ... return ('My spoon is tooo big!', None) >>> aliases['banana'] = _banana >>> banana 'My spoon is tooo big!' To redefine an alias, simply assign a new function, here using a python lambda with keyword arguments: .. code-block:: xonshcon >>> aliases['banana'] = lambda: "Banana for scale.\n" >>> banana Banana for scale. Removing an alias is as easy as deleting the key from the alias dictionary: .. code-block:: xonshcon >>> del aliases['banana'] .. note:: Alias functions should generally be defined with a leading underscore. Otherwise, they may shadow the alias itself, as Python variables take precedence over aliases when xonsh executes commands. Anonymous Aliases ----------------- As mentioned above, it is also possible to treat functions outside this mapping as aliases, by wrapping them in ``@()``. For example: .. code-block:: xonshcon >>> @(_banana) 'My spoon is tooo big!' >>> echo "hello" | @(lambda args, stdin=None: stdin.read().strip() + ' ' + args[0] + '\n') world hello world Unthreadable Aliases ----------------------- Usually, callable alias commands will be run in a separate thread so that they may be run in the background. However, some aliases may need to be executed on the thread that they were called from. This is mostly useful for debuggers and profilers. To make an alias run in the foreground, decorate its function with the ``xonsh.tools.unthreadable`` decorator. .. code-block:: python from xonsh.tools import unthreadable @unthreadable def _mycmd(args, stdin=None): return 'In your face!' aliases['mycmd'] = _mycmd Uncapturable Aliases ----------------------- Also, callable aliases by default will be executed such that their output is captured (like most commands in xonsh that don't enter alternate mode). However, some aliases may want to run alternate-mode commands themselves. Thus the callable alias can't be captured without dire consequences (tm). To prevent this, you can declare a callable alias uncapturable. This is mostly useful for aliases that then open up text editors, pagers, or the like. To make an alias uncapturable, decorate its function with the ``xonsh.tools.uncapturable`` decorator. This is probably best used in conjunction with the ``unthreadable`` decorator. For example: .. code-block:: python from xonsh.tools import unthreadable, uncapturable @uncapturable @unthreadable def _myvi(): vi my.txt aliases['myvi'] = _myvi ------------- Aliasing is a powerful way that xonsh allows you to seamlessly interact to with Python and subprocess. .. warning:: If ``FOREIGN_ALIASES_OVERRIDE`` environment variable is False (the default) then foreign shell aliases that try to override xonsh aliases will be ignored. Setting of this environment variable must happen in the static configuration file ``$XONSH_CONFIG_DIR/config.json`` in the 'env' section. Up, Down, Tab ============== The up and down keys search history matching from the start of the line, much like they do in the IPython shell. Tab completion is present as well. By default, in Python-mode you are able to complete based on the variable names in the current builtins, globals, and locals, as well as xonsh languages keywords & operator, files & directories, and environment variable names. In subprocess-mode, you additionally complete on the names of executable files on your ``$PATH``, alias keys, and full Bash completion for the commands themselves. xonsh also provides a means of modifying the behavior of the tab completer. More detail is available on the `Tab Completion page `_. .. _customprompt: Customizing the Prompt ====================== Customizing the prompt by modifying ``$PROMPT`` is probably the most common reason for altering an environment variable. .. note:: Note that the ``$PROMPT`` variable will never be inherited from a parent process (regardless of whether that parent is a foreign shell or an instance of xonsh). The ``$PROMPT`` variable can be a string, or it can be a function (of no arguments) that returns a string. The result can contain keyword arguments, which will be replaced automatically: .. code-block:: xonshcon >>> $PROMPT = '{user}@{hostname}:{cwd} > ' snail@home:~ > # it works! snail@home:~ > $PROMPT = lambda: '{user}@{hostname}:{cwd} >> ' snail@home:~ >> # so does that! By default, the following variables are available for use: * ``user``: The username of the current user * ``hostname``: The name of the host computer * ``cwd``: The current working directory, you may use ``$DYNAMIC_CWD_WIDTH`` to set a maximum width for this variable and ``$DYNAMIC_CWD_ELISION_CHAR`` to set the character used in shortened path. * ``short_cwd``: A shortened form of the current working directory; e.g., ``/path/to/xonsh`` becomes ``/p/t/xonsh`` * ``cwd_dir``: The dirname of the current working directory, e.g. ``/path/to`` in ``/path/to/xonsh``. * ``cwd_base``: The basename of the current working directory, e.g. ``xonsh`` in ``/path/to/xonsh``. * ``env_name``: The name of active virtual environment, if any. * ``curr_branch``: The name of the current git branch, if any. * ``branch_color``: ``{BOLD_GREEN}`` if the current git branch is clean, otherwise ``{BOLD_RED}``. This is yellow if the branch color could not be determined. * ``branch_bg_color``: Like, ``{branch_color}``, but sets a background color instead. * ``prompt_end``: ``#`` if the user has root/admin permissions ``$`` otherwise * ``current_job``: The name of the command currently running in the foreground, if any. * ``vte_new_tab_cwd``: Issues VTE escape sequence for opening new tabs in the current working directory on some linux terminals. This is not usually needed. * ``gitstatus``: Informative git status, like ``[master|MERGING|+1…2]``, you may use `$XONSH_GITSTATUS_* `_ to customize the styling. You can also color your prompt easily by inserting keywords such as ``{GREEN}`` or ``{BOLD_BLUE}``. Colors have the form shown below: * ``NO_COLOR``: Resets any previously used color codes * ``COLORNAME``: Inserts a color code for the following basic colors, which come in regular (dark) and intense (light) forms: - ``BLACK`` or ``INTENSE_BLACK`` - ``RED`` or ``INTENSE_RED`` - ``GREEN`` or ``INTENSE_GREEN`` - ``YELLOW`` or ``INTENSE_YELLOW`` - ``BLUE`` or ``INTENSE_BLUE`` - ``PURPLE`` or ``INTENSE_PURPLE`` - ``CYAN`` or ``INTENSE_CYAN`` - ``WHITE`` or ``INTENSE_WHITE`` * ``#HEX``: A ``#`` before a len-3 or len-6 hex code will use that hex color, or the nearest approximation that that is supported by the shell and terminal. For example, ``#fff`` and ``#fafad2`` are both valid. * ``BACKGROUND_`` may be added to the beginning of a color name or hex color to set a background color. For example, ``BACKGROUND_INTENSE_RED`` and ``BACKGROUND_#123456`` can both be used. * ``bg#HEX`` or ``BG#HEX`` are shortcuts for setting a background hex color. Thus you can set ``bg#0012ab`` or the uppercase version. * ``BOLD_`` is a prefix qualifier that may be used with any foreground color. For example, ``BOLD_RED`` and ``BOLD_#112233`` are OK! * ``UNDERLINE_`` is a prefix qualifier that also may be used with any foreground color. For example, ``UNDERLINE_GREEN``. * Or any other combination of qualifiers, such as ``BOLD_UNDERLINE_INTENSE_BLACK``, which is the most metal color you can use! You can make use of additional variables beyond these by adding them to the ``PROMPT_FIELDS`` environment variable. The values in this dictionary should be strings (which will be inserted into the prompt verbatim), or functions of no arguments (which will be called each time the prompt is generated, and the results of those calls will be inserted into the prompt). For example: .. code-block:: console snail@home ~ $ $PROMPT_FIELDS['test'] = "hey" snail@home ~ $ $PROMPT = "{test} {cwd} $ " hey ~ $ hey ~ $ import random hey ~ $ $PROMPT_FIELDS['test'] = lambda: random.randint(1,9) 3 ~ $ 5 ~ $ 2 ~ $ 8 ~ $ Environment variables and functions are also available with the ``$`` prefix. For example: .. code-block:: console snail@home ~ $ $PROMPT = "{$LANG} >" en_US.utf8 > Note that some entries of the ``$PROMPT_FIELDS`` are not always applicable, for example, ``curr_branch`` returns ``None`` if the current directory is not in a repository. The ``None`` will be interpreted as an empty string. But let's consider a problem: .. code-block:: console snail@home ~/xonsh $ $PROMPT = "{cwd_base} [{curr_branch}] $ " xonsh [master] $ cd .. ~ [] $ We want the branch to be displayed in square brackets, but we also don't want the brackets (and the extra space) to be displayed when there is no branch. The solution is to add a nested format string (separated with a colon) that will be invoked only if the value is not ``None``: .. code-block:: console snail@home ~/xonsh $ $PROMPT = "{cwd_base}{curr_branch: [{}]} $ " xonsh [master] $ cd .. ~ $ The curly brackets act as a placeholder, because the additional part is an ordinary format string. What we're doing here is equivalent to this expression: .. code-block:: python " [{}]".format(curr_branch()) if curr_branch() is not None else "" Executing Commands and Scripts ============================== When started with the ``-c`` flag and a command, xonsh will execute that command and exit, instead of entering the command loop. .. code-block:: console $ xonsh -c "echo @(7+3)" 10 Longer scripts can be run either by specifying a filename containing the script, or by feeding them to xonsh via stdin. For example, consider the following script, stored in ``test.xsh``: .. code-block:: xonshcon #!/usr/bin/env xonsh ls print('removing files') rm `file\d+.txt` ls print('adding files') # This is a comment for i, x in enumerate("xonsh"): echo @(x) > @("file{0}.txt".format(i)) print($(ls).replace('\n', ' ')) This script could be run by piping its contents to xonsh: .. code-block:: console $ cat test.xsh | xonsh file0.txt file1.txt file2.txt file3.txt file4.txt test_script.sh removing files test_script.sh adding files file0.txt file1.txt file2.txt file3.txt file4.txt test_script.sh or by invoking xonsh with its filename as an argument: .. code-block:: console $ xonsh test.xsh file0.txt file1.txt file2.txt file3.txt file4.txt test_script.sh removing files test_script.sh adding files file0.txt file1.txt file2.txt file3.txt file4.txt test_script.sh xonsh scripts can also accept command line arguments and parameters. These arguments are made available to the script in two different ways: #. In either mode, as individual variables ``$ARG`` (e.g., ``$ARG1``) #. In Python mode only, as a list ``$ARGS`` For example, consider a slight variation of the example script from above that operates on a given argument, rather than on the string ``'xonsh'`` (notice how ``$ARGS`` and ``$ARG1`` are used): .. code-block:: xonshcon #!/usr/bin/env xonsh print($ARGS) ls print('removing files') rm `file\d+.txt` ls print('adding files') # This is a comment for i, x in enumerate($ARG1): echo @(x) > @("file{0}.txt".format(i)) print($(ls).replace('\n', ' ')) print() .. code-block:: console $ xonsh test2.xsh snails ['test_script.sh', 'snails'] file0.txt file1.txt file2.txt file3.txt file4.txt file5.txt test_script.sh removing files test_script.sh adding files file0.txt file1.txt file2.txt file3.txt file4.txt file5.txt test_script.sh $ echo @(' '.join($(cat @('file%d.txt' % i)).strip() for i in range(6))) s n a i l s Additionally, if the script should exit if a command fails, set the environment variable ``$RAISE_SUBPROC_ERROR = True`` at the top of the file. Errors in Python mode will already raise exceptions and so this is roughly equivalent to Bash's ``set -e``. Furthermore, you can also toggle the ability to print source code lines with the ``trace on`` and ``trace off`` commands. This is roughly equivalent to Bash's ``set -x`` or Python's ``python -m trace``, but you know, better. Importing Xonsh (``*.xsh``) ============================== You can import xonsh source files with the ``*.xsh`` file extension using the normal Python syntax. Say you had a file called ``mine.xsh``, you could, therefore, perform a Bash-like source into your current shell with the following: .. code-block:: xonshcon from mine import * That's All, Folks ====================== To leave xonsh, hit ``Ctrl-D``, type ``EOF``, type ``quit``, or type ``exit``. On Windows, you can also type ``Ctrl-Z``. .. code-block:: xonshcon >>> exit Now it is your turn. xonsh-0.6.0/docs/tutorial_completers.rst000066400000000000000000000124201320541242300204310ustar00rootroot00000000000000.. _tutorial_completers: ************************************* Tutorial: Programmable Tab-Completion ************************************* Overview ================================ As with many other shells, xonsh ships with the ability to complete partially-specified arguments upon hitting the "tab" key. In Python-mode, pressing the "tab" key will complete based on the variable names in the current builtins, globals, and locals, as well as xonsh language keywords & operators, files & directories, and environment variable names. In subprocess-mode, xonsh additionally completes based on the names of any executable files on your $PATH, alias keys, and full Bash completion for the commands themselves. xonsh also provides a mechanism by which the results of a tab completion can be customized (i.e., new completions can be generated, or a subset of the built-in completions can be ignored). This page details the internal structure of xonsh's completion system and includes instructions for implementing new tab completion functions. Structure ========== xonsh's built-in completers live in the ``xonsh.completers`` package, and they are managed through an instance of ``OrderedDict`` (``__xonsh_completers__``) that maps unique identifiers to completion functions. When the "tab" key is pressed, xonsh loops over the completion functions in order, calling each one in turn until it reaches one that returns a non-empty set of completion for the current line. This set is then displayed to the user. Listing Active Completers ========================= A list of the active completers can be viewed by running the ``completer list`` command. This command will display names and descriptions of the currently-active completers, in the order in which they will be checked. Writing a New Completer ======================= Completers are implemented as Python functions that take five arguments: * ``prefix``: the string to be matched (the last whitespace-separated token in the current line) * ``line``: a string representing the entire current line * ``begidx``: the index at which ``prefix`` starts in ``line`` * ``endidx``: the index at which ``prefix`` ends in ``line`` * ``ctx``: the current Python environment, as a dictionary mapping names to values This function should return a Python set of possible completions for ``prefix`` in the current context. If the completer should not be used in this case, it should return ``None`` or an empty set, which will cause xonsh to move on and try to use the next completer. Occasionally, completers will need to return a match that does not actually start with ``prefix``. In this case, a completer should instead return a tuple ``(completions, prefixlength)``, where ``completions`` is the set of appropriate completions, and ``prefixlength`` is the number of characters in ``line`` that should be treated as part of the completion. The docstring of a completer should contain a brief description of its functionality, which will be displayed by ``completer list``. Three examples follow. For more examples, see the source code of the completers xonsh actually uses, in the ``xonsh.completers`` module. .. code-block:: python def dummy_completer(prefix, line, begidx, endidx, ctx): ''' Completes everything with options "lou" and "carcolh", regardless of the value of prefix. ''' return {"lou", "carcolh"} def python_context_completer(prefix, line, begidx, endidx, ctx): ''' Completes based on the names in the current Python environment ''' return {i for i in ctx if i.startswith(prefix)} def unbeliever_completer(prefix, line, begidx, endidx, ctx): ''' Replaces "lou carcolh" with "snail" if tab is pressed after typing "lou" and when typing "carcolh" ''' if 'carcolh'.startswith(prefix) and line[:begidx].split()[-1] == 'lou': return ({'snail'}, len('lou ') + len(prefix)) Registering a Completer ======================= Once you have created a completion function, you can add it to the list of active completers via the ``completer add`` command:: Usage: completer add NAME FUNC [POS] ``NAME`` is a unique name to use in the listing ``FUNC`` is the name of a completer function to use. ``POS`` (optional) is a position into the list of completers at which the new completer should be added. It can be one of the following values: * ``"start"`` indicates that the completer should be added to the start of the list of completers (it should be run before all others) * ``"end"`` indicates that the completer should be added to the end of the list of completers (it should be run after all others) * ``">KEY"``, where ``KEY`` is a pre-existing name, indicates that this should be added after the completer named ``KEY`` * ``"`_ for the details. Note that the event system is keyword only. Event handlers must match argument names and must have a ``**kw`` as protection against future changes. Can I use this, too? ==================== Yes! It's even easy! In your xontrib, you just have to do something like:: events.doc('myxontrib_on_spam', """ myxontrib_on_spam(can: Spam) -> bool? Fired in case of spam. Return ``True`` if it's been eaten. """) This will enable users to call ``help(events.myxontrib_on_spam)`` and get useful output. Further Reading =============== For a complete list of available events, see `the events reference `_. If you want to know more about the gory details of what makes events tick, see `Advanced Events `_. xonsh-0.6.0/docs/tutorial_hist.rst000066400000000000000000000506551320541242300172370ustar00rootroot00000000000000.. _tutorial_hist: ************************************ Tutorial: History ************************************ Import your best Leonard Nimoy documentary voice and get ready for the xonsh tutorial on ``history``. How is xonsh history different? ================================ Most shells - bash foremost among them - think of history as a linear sequence of past commands that have been entered into *the* terminal. This is saved when *the* shell exits, and loaded when *the* new shell starts. But this is no longer how the world works. The world is a messy, asynchronous place. We usually have at least as many terminals (and shells) open at a time as we can practically handle - and probably even more! In xonsh, history acknowledges that this is the case. Instead of a single history file of inputs, xonsh implements a collection of JSON-formatted history files that can be thought of as having the following structure: .. code-block:: python {'env': {...}, # Environment that xonsh was started with 'sessionid': str, # UUID4 for the session 'ts': [start, stop], # start and stop timestamps for session [s since epoch] 'locked': True, # boolean for whether the file is in use or not 'cmds': [ # array of commands {'inp': str, # input command 'ts': [start, stop], # timestamps for the command 'rtn': int, # command return code 'out' str, # stdout and stderr of command, for subproc commands # this is only available on Linux. Off by default. }, ... ], } This rich set of data allows xonsh to do much more advanced inspection and manipulation. The sessionid, locking, and one-file-per-shell ideas allow for there to be multiple instances of xonsh running at the same time without competing and overwriting history constantly. Of course, an external process deleting a history file can still cause problems. But hey, the world and the file system are messy places to be! Why have rich history? ======================= Often by the time you know that you need a historical artifact, it is already too late. You can't remember: * the input exactly, * you think that you remember the output but when you rerun the command what you get now seems somehow different, * who knows what the return code was, * and whatever command you ran right before is now lost in the mists of time! So the reasons for having rich history are debugging and reproducibility. Xonsh takes the guess-work out of the past. There is even the ability to store all of stdout, though this is turned off by default. If history was just a static file, it would be more like a server log than a traditional history file. However, xonsh also has the ability to ``replay`` a history file. Replaying history allows previous sessions to act as scripts in a new or the same environment. Replaying will create a new, separate history session and file. The two histories - even though they contain the same inputs - are then able to be diff'ed. Diff'ing can be done through xonsh custom history diff'ing tool, which can help pinpoint differences stemming from the environment as well as the input/output. This cycle of do-replay-diff is more meaningful than a traditional, "What did I/it/the Universe just do?!" approach. Of course, nothing has ever stopped anyone from pulling Unix tools like ``env``, ``script``, ``diff``, and others together to deliver the same kind of capability. However, in practice, no one does this. With xonsh, rich and useful history come batteries included. ``history`` command ==================== All xonsh history inspection and manipulation goes through the top-level ``history`` alias or command. If you run this without an ``action`` argument, it will default to the ``show`` action, see below. .. code-block:: xonshcon >>> history Also note that the history object itself can be accessed through the xonsh built-in variable ``__xonsh_history__``. ``show`` action ================ The ``show`` action for the history command mimics what the ``history`` command does in other shells. Namely, it displays the past inputs along with the index of these inputs. This operates on the current session by default and is the default action for the ``history`` command. For example, .. code-block:: xonshcon >>> 1 + 1 2 >>> history show 0 1 + 1 >>> history 0 1 + 1 1 history show .. note:: History is zero-indexed; this is still Python. The show command can also optionally take as an argument any integer (to just display that history index) or a slice (to display a range of history indices). To display only the even indices from above, you could write: .. code-block:: xonshcon >>> history show ::2 0 1 + 1 2 history One can also use many slice/integer arguments to get different portions of history After ``show`` an option that indicates which history to be returned can be used: ``xonsh`` displays the past inputs from all valid json files found in ``XONSH_DATA_DIR``. As such, this operates on all past and present xonsh sessions. ``all`` is an alias for ``xonsh``. ``zsh`` will display all history from the history file specified by the ``HISTFILE`` environmental variable in zsh. By default this is ``~/.zsh_history``. However, they can also be respectively specified in both ``~/.zshrc`` and ``~/.zprofile``. Xonsh will parse these files (rc file first) to check if ``HISTFILE`` has been set. The ``bash`` action will display all history from the history file specified by the ``HISTFILE`` environmental variable in bash. By default this is ``~/.bash_history``. However, they can also be respectively specified in both ``~/.bashrc`` and ``~/.bash_profile``. Xonsh will parse these files (rc file first) to check if ``HISTFILE`` has been set. ``show`` also accepts other options for more control over history output, the ``-n`` option is used to enumerate the commands, the ``-t`` option is used to show the timestamps, and more, try out ``history show --help`` for a list of options. ``id`` action ================ Each xonsh history has its own universally unique ``sessionid``. The ``id`` action is how you display this identified. For instance, .. code-block:: xonshcon >>> history id ace97177-f8dd-4a8d-8a91-a98ffd0b3d17 ``file`` action ================ Similarly, each xonsh history has its own file associated with it. The ``file`` action is how you display the path to this file. For example, .. code-block:: xonshcon >>> history file /home/me/.local/share/xonsh/xonsh-ace97177-f8dd-4a8d-8a91-a98ffd0b3d17.json Note that by these files are stored in ``$XONSH_DATA_DIR`` environment variable. This is, by default, set to the ``xonsh`` dir inside of the free desktop standards ``$XDG_DATA_HOME`` environment variable. See `this page `_ for more details. ``info`` action =============== The info action combines the ``id`` and ``file`` actions as well as adds some additional information about the current state of the history. By default, this prints a key-value series of lines. However, it can also return a JSON formatted string. .. code-block:: xonshcon >>> history info sessionid: ace97177-f8dd-4a8d-8a91-a98ffd0b3d17 filename: /home/scopatz/.local/share/xonsh/xonsh-ace97177-f8dd-4a8d-8a91-a98ffd0b3d17.json length: 6 buffersize: 100 bufferlength: 6 .. code-block:: xonshcon >>> history info --json {"sessionid": "ace97177-f8dd-4a8d-8a91-a98ffd0b3d17", "filename": "/home/scopatz/.local/share/xonsh/xonsh-ace97177-f8dd-4a8d-8a91-a98ffd0b3d17.json", "length": 7, "buffersize": 100, "bufferlength": 7} ``replay`` action ================== The ``replay`` action allows for history files to be rerun, as scripts or in an existing xonsh session. First, the original ``'replay'`` environment is loaded and will be merged with the current ``'native'`` environment. How the environments are merged or not merged can be set at replay time. The default is for the current native environment to take precedence. Next, each input in the environment is executed in order. Lastly, the information of the replayed history file is printed. Let's walk through an example. To begin with, open up xonsh and run some simple commands, as follows. Call this the ``orig`` session. **orig history** .. code-block:: xonshcon >>> mkdir -p temp/ >>> cd temp >>> import random >>> touch @(random.randint(0, 18)) >>> ls 2 >>> history file /home/scopatz/.local/share/xonsh/xonsh-4bc4ecd6-3eba-4f3a-b396-a229ba2b4810.json >>> exit We can now replay this by passing the filename into the replay command or the replay action of the history command. This action has a few different options, but one of them is that we can select a different target output file with the ``-o`` or ``--target`` option. For example, in a new session, we could run: **new history** .. code-block:: xonshcon >>> history replay -o ~/new.json ~/.local/share/xonsh/xonsh-4bc4ecd6-3eba-4f3a-b396-a229ba2b4810.json 2 10 /home/scopatz/new.json ---------------------------------------------------------------- Just replayed history, new history the has following information ---------------------------------------------------------------- sessionid: 35712b6f-4b15-4ef9-8ce3-b4c781601bc2 filename: /home/scopatz/new.json length: 7 buffersize: 100 bufferlength: 0 As you can see, a new history was created and another random file was added to the file system. If we want instead to replay history in its own session, we can always use the ``-c`` option on xonsh itself to execute the replay command. **next history** .. code-block:: xonshcon >>> xonsh -c "replay -o ~/next.json ~/new.json" 2 7 10 /home/scopatz/next.json ---------------------------------------------------------------- Just replayed history, new history has the following information ---------------------------------------------------------------- sessionid: 70d7186e-3eb9-4b1c-8f82-45bb8a1b7967 filename: /home/scopatz/next.json length: 7 buffersize: 100 bufferlength: 0 Currently history does not handle alias storage and reloading, but such a feature may be coming in the future. ``diff`` action =============== Between any two history files, we can run the ``diff`` action. This does more that a simple line diff that you might generate with the unix ``diff`` command. (If you want a line diff, just use the unix command!) Instead this takes advantage of the fact that we know we have xonsh history files to do a more sophisticated diff on the environment, input, output (if available), and return values. Of course, the histories inputs should be 'sufficiently similar' if the diff is to be meaningful. However, they don't need to be exactly the same. The diff action has one major option, ``-v`` or ``--verbose``. This basically says whether the diff should go into as much detail as possible or only pick out the relevant pieces. Diffing the new and next examples from the replay action, we see the diff looks like: .. code-block:: xonshcon >>> history diff ~/new.json ~/next.json --- /home/scopatz/new.json (35712b6f-4b15-4ef9-8ce3-b4c781601bc2) [unlocked] started: 2015-08-27 15:13:44.873869 stopped: 2015-08-27 15:13:44.918903 runtime: 0:00:00.045034 +++ /home/scopatz/next.json (70d7186e-3eb9-4b1c-8f82-45bb8a1b7967) [unlocked] started: 2015-08-27 15:15:09.423932 stopped: 2015-08-27 15:15:09.619098 runtime: 0:00:00.195166 Environment ----------- 'PATH' is in both, but differs - /home/scopatz/.local/bin:/home/scopatz/sandbox/bin:/home/scopatz/miniconda3/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/home/scopatz/origen22/code/ + /home/scopatz/.local/bin:/home/scopatz/sandbox/bin:/home/scopatz/miniconda3/bin:/home/scopatz/.local/bin:/home/scopatz/sandbox/bin:/home/scopatz/miniconda3/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/home/scopatz/origen22/code/:/home/scopatz/origen22/code/ 'SHLVL' is in both, but differs - 2 + 3 'XONSH_INTERACTIVE' is in both, but differs - True + False These vars are only in 70d7186e-3eb9-4b1c-8f82-45bb8a1b7967: {'OLDPWD'} Commands -------- cmd #4 in 35712b6f-4b15-4ef9-8ce3-b4c781601bc2 input is the same as cmd #4 in 70d7186e-3eb9-4b1c-8f82-45bb8a1b7967, but output differs: Outputs differ - 2 10 + 2 7 10 cmd #5 in 35712b6f-4b15-4ef9-8ce3-b4c781601bc2 input is the same as cmd #5 in 70d7186e-3eb9-4b1c-8f82-45bb8a1b7967, but output differs: Outputs differ - /home/scopatz/new.json + /home/scopatz/next.json As can be seen, the diff has three sections. 1. **The header** describes the meta-information about the histories, such as their file names, sessionids, and time stamps. 2. **The environment** section describes the differences in the environment when the histories were started or replayed. 3. **The commands** list this differences in the command themselves. For the commands, the input sequences are diff'd first, prior to the outputs being compared. In a terminal, this will appear in color, with the first history in red and the second one in green. ``gc`` action =============== Last, but certainly not least, the ``gc`` action is a manual hook into executing history garbage control. Since history has the potential for a lot of information to be stored, it is necessary to be able to clean out the cache every once in a while. Garbage control is launched automatically for every xonsh thread, but runs in the a background thread. The garbage collector only operates on unlocked history files. The action here allows you to manually start a new garbage collector, possibly with different criteria. Normally, the garbage collector uses the environment variable ``$XONSH_HISTORY_SIZE`` to determine the size and units of what should be allowed to remain on disk. By default, this is ``(8128, 'commands')``. This variable is usually a tuple or list of a number and a string, as seen here. However, you can also use a string with the same information, e.g. ``'8128 commands'``. On the command line, though, you just pass in two arguments to the ``--size`` option, a la ``--size 8128 commands``. The garbage collector accepts four canonical units: 1. ``'commands'`` is for limiting the number of past commands executed in the history files, 2. ``'files'`` is for specifying the total number of history files to keep, 3. ``'s'`` is for the number of seconds in the past that are allowed - which is effectively a timeout of the history files, and 4. ``'b'`` is for the number of bytes that are allowed on the file system for all history files to collectively consume. However, other units, aliases, and appropriate conversion functions have been implemented. This makes it easier to garbage collect based on human-friendly values. **GC Aliases:** .. code-block:: python {'commands': ['', 'c', 'cmd', 'cmds', 'command'], 'files': ['f'], 's': ['sec', 'second', 'seconds', 'm', 'min', 'mins', 'h', 'hr', 'hour', 'hours', 'd', 'day', 'days', 'mon', 'month', 'months', 'y', 'yr', 'yrs', 'year', 'years'], 'b': ['byte', 'bytes', 'kb', 'kilobyte', 'kilobytes', 'mb', 'meg', 'megs', 'megabyte', 'megabytes', 'gb', 'gig', 'gigs', 'gigabyte', 'gigabytes', 'tb', 'terabyte', 'terabytes'] } So all said and done, if you wanted to remove all history files older than a month, you could run the following command: .. code-block:: xonshcon >>> history gc --size 1 month History Indexing ======================= History object (``__xonsh_history__``) acts like a sequence that can be indexed in a special way that adds extra functionality. At the moment only history from the current session can be retrieved. Note that the most recent command is the last item in history. The index acts as a filter with two parts, command and argument, separated by comma. Based on the type of each part different filtering can be achieved, for the command part: - an int returns the command in that position. - a slice returns a list of commands. - a string returns the most recent command containing the string. for the argument part: - an int returns the argument of the command in that position. - a slice returns a part of the command based on the argument position. The argument part of the filter can be omitted but the command part is required. Command arguments are separated by white space. If the filtering produces only one result it is returned as a string else a list of strings is returned. examples: .. code-block:: xonshcon >>> echo mkdir with/a/huge/name/ mkdir with/a/huge/name >>> __xonsh_history__[-1, -1] 'with/a/huge/name/' >>> __xonsh_history__['mkdir'] 'echo mkdir with/a/huge/name' >>> __xonsh_history__[0, 1:] 'mkdir with/a/huge/name' Exciting Technical Detail: Lazy JSON ===================================== So now you know how to inspect, run, and remove history. But what *is* a history file exactly? While xonsh history files are JSON formatted, and they do have the structure indicated at the top of the page, that isn't their top-level structure. If you open one up, you'll see a bunch of hocus pocus before you get to anything real. Xonsh has implemented a generic indexing system (sizes, offsets, etc)for JSON files that lives inside of the file that it indexes. This is known as ``LazyJSON`` because it allows us to only read in the parts of a file that we need. For example, for replaying we only need to grab the input fields and so that helps us on I/O. For garbage collecting based on the number of commands, we can get this information from the index and don't need to read in any of the original data. The best part about this is that it is totally generic. Feel free to use ``xonsh.lazyjson`` yourself for things other than xonsh history! Of course, if you want to read in xonsh history, you should probably use the module. Exciting Technical Detail: Teeing and Pseudo Terminals ======================================================== Xonsh is able to capture all stdout and stderr transparently and responsively. For aliases, Python code, or xonsh code, this isn't a big deal. It is easy to redirect information flowing through ``sys.stdout`` and ``sys.stderr``. For subprocess commands, this is considerably harder. Storing stdout is disabled by default, but can be enabled by setting: ``$XONSH_STORE_STDOUT=True`` in your ``~/.xonshrc`` file. To be able to tee stdout and stderr and still have the terminal responsive, xonsh implements its own teeing pseudo-terminal on top of the Python standard library ``pty`` module. You can find this class in the ``xonsh.teepty`` module. Like with lazy JSON, this is independent from other parts of xonsh and can be used on its own. If you find this useful in other areas, please let us know! Sqlite History Backend ====================== Xonsh has a second built-in history backend powered by sqlite (other than the JSON version mentioned all above in this tutorial). It shares the same functionality as the JSON version in most ways, except it currently doesn't support ``history diff`` and ``history replay`` actions. The Sqlite history backend can provide a speed advantage in loading history into a just-started xonsh session. The JSON history backend may need to read potentially thousands of json files and the sqlite backend only reads one. Note that this does not affect startup time, but the amount of time before all history is available for searching. To use sqlite history backend, set ``$XONSH_HISTORY_BACKEND = 'sqlite'`` in your ``~/.xonshrc`` file. To switch back to JSON version, remove this line, or set it to ``'json'``. .. note:: SQLite history backend currently only supports ``commands`` as the unit in ``$XONSH_HISTORY_SIZE`` in its garbage collection. .. tip:: If you have `sqlite-web `_ installed, you can read the history easily with command: ``sqlite_web @$(history file)``. Fun ideas for history data ========================== Now that we have all of this history data, it seems like what we have here is just the tip of the iceberg! Here are some hopefully fun ideas that I think would be great to see implemented: * Basic statistic reports about command usage, timing, etc., * Global statistics by collecting anonymized histories from many people, * MCMC-based tab-completer for inputs, * and many more! Let us know if you'd be interested in working on any of these, inside or outside of xonsh. xonsh-0.6.0/docs/tutorial_history_backend.rst000066400000000000000000000230771320541242300214360ustar00rootroot00000000000000.. _tutorial_history_backend: **************************************** Tutorial: Write Your Own History Backend **************************************** One of the great things about xonsh is how easy it is to customize. In this tutorial, let's write our own history backend based on CouchDB. Start with a Minimal History Template ===================================== Here is a minimal history backend to start with: .. code-block:: python import collections from xonsh.history.base import History class CouchDBHistory(History): def append(self, cmd): pass def items(self): yield {'inp': 'couchdb in action', 'ts': 1464652800, 'ind': 0} def all_items(self): return self.items() def info(self): data = collections.OrderedDict() data['backend'] = 'couchdb' data['sessionid'] = str(self.sessionid) return data Go ahead and create the file ``~/.xonsh/history_couchdb.py`` and put the content above into it. Now we need to tell xonsh to use it as the history backend. To do this we need xonsh to be able to find our file and this ``CouchDBHistory`` class. Putting the following code into ``~/.xonshrc`` file can achieve this. .. code-block:: none import os.path import sys xonsh_ext_dir = os.path.expanduser('~/.xonsh') if os.path.isdir(xonsh_ext_dir): sys.path.append(xonsh_ext_dir) from history_couchdb import CouchDBHistory $XONSH_HISTORY_BACKEND = CouchDBHistory After starting a new xonsh session, try the following commands: .. code-block:: none $ history info backend: couchdb sessionid: 4198d678-1f0a-4ce3-aeb3-6d5517d7fc61 $ history -n 0: couchdb in action Woohoo! We just wrote a working history backend! Setup CouchDB ============= For this to work, we need CouchDB up and running. Go to `CouchDB website `_ and spend some time to install it. we will wait for you. Take your time. After installing, check that it's configured correctly with ``curl``: .. code-block:: none $ curl -i 'http://127.0.0.1:5984/' HTTP/1.1 200 OK Cache-Control: must-revalidate Content-Length: 91 Content-Type: application/json Date: Wed, 01 Feb 2017 13:54:14 GMT Server: CouchDB/2.0.0 (Erlang OTP/19) X-Couch-Request-ID: 025a195bcb X-CouchDB-Body-Time: 0 { "couchdb": "Welcome", "version": "2.0.0", "vendor": { "name": "The Apache Software Foundation" } } Okay, CouchDB is working. Now open ``_ with your browser, and create a new database called ``xonsh-history``. Initialize History Backend ========================== .. code-block:: python def __init__(self, **kwargs): super().__init__(**kwargs) self.gc = None self.sessionid = self._build_session_id() self.inps = [] self.rtns = [] self.outs = [] self.tss = [] def _build_session_id(self): ts = int(time.time() * 1000) return '{}-{}'.format(ts, str(uuid.uuid4())[:18]) In the ``__init__()`` method, let's initialize `Some Public Attributes `_ which xonsh uses in various places. Note that we use Unix timestamp and some random char to make ``self.sessionid`` unique and to keep the entries ordered in time. We will cover it with a bit more detail in the next section. Save History to CouchDB ======================= First, we need some helper functions to write docs to CouchDB. .. code-block:: python def _save_to_db(self, cmd): data = cmd.copy() data['inp'] = cmd['inp'].rstrip() if 'out' in data: data.pop('out') data['_id'] = self._build_doc_id() try: self._request_db_data('/xonsh-history', data=data) except Exception as e: msg = 'failed to save history: {}: {}'.format(e.__class__.__name__, e) print(msg, file=sys.stderr) def _build_doc_id(self): ts = int(time.time() * 1000) return '{}-{}-{}'.format(self.sessionid, ts, str(uuid.uuid4())[:18]) def _request_db_data(self, path, data=None): url = 'http://127.0.0.1:5984' + path headers = {'Content-Type': 'application/json'} if data is not None: resp = requests.post(url, json.dumps(data), headers=headers) else: headers = {'Content-Type': 'text/plain'} resp = requests.get(url, headers=headers) return resp ``_save_to_db()`` takes a dict as the input, which contains the information about a command that user input, and saves it into CouchDB. Instead of letting CouchDB provide us a random Document ID (i.e. the ``data['_id']`` in our code), we build it for ourselves. We use the Unix timestamp and UUID string for a second time. Prefixing this with ``self.sessionid``, we make history entries in order inside a single xonsh session too. So that we don't need any extra CouchDB's `Design Documents and Views `_ feature. Just with a bare ``_all_docs`` API, we can fetch history items back in order. Now that we have helper functions, let's update our ``append()`` method to do the real job - save history into DB. .. code-block:: python def append(self, cmd): self.inps.append(cmd['inp']) self.rtns.append(cmd['rtn']) self.outs.append(None) self.tss.append(cmd.get('ts', (None, None))) self._save_to_db(cmd) This method will be called by xonsh every time it runs a new command from user. Retrieve History Items ====================== .. code-block:: python def items(self): yield from self._get_db_items(self.sessionid) def all_items(self): yield from self._get_db_items() These two methods are responsible for getting history items for the current xonsh session and all historical sessions respectively. And here is our helper method to get docs from DB: .. code-block:: python def _get_db_items(self, sessionid=None): path = '/xonsh-history/_all_docs?include_docs=true' if sessionid is not None: path += '&start_key="{0}"&end_key="{0}-z"'.format(sessionid) try: r = self._request_db_data(path) except Exception as e: msg = 'error when query db: {}: {}'.format(e.__class__.__name__, e) print(msg, file=sys.stderr) return data = json.loads(r.text) for item in data['rows']: cmd = item['doc'].copy() cmd['ts'] = cmd['ts'][0] yield cmd The `try-except` is here so that we're safe when something bad happens, like CouchDB is not running properly, etc. Try Out Our New History Backend =============================== That's it. We've finished our new history backend. The ``import`` part is skipped, but I think you can figure it out though. Note that in our code an extra Python library is used: ``requests``. You could easily install it with ``pip`` or other library managers. You can find the full code here: ``_ Let's start a new xonsh session: .. code-block:: none $ history info backend: couchdb sessionid: 1486035364166-3bb78606-dd59-4679 $ ls Applications Desktop Documents Downloads $ echo hi hi Start a second xonsh session: .. code-block:: none $ history info backend: couchdb sessionid: 1486035430658-6f81cd5d-b6d4-4f6a $ echo new new $ history show all -nt 0:(2017-02-02 19:36) history info 1:(2017-02-02 19:36) ls 2:(2017-02-02 19:37) echo hi 3:(2017-02-02 19:37) history info 4:(2017-02-02 19:37) echo new $ history -nt 0:(2017-02-02 19:37) history info 1:(2017-02-02 19:37) echo new 2:(2017-02-02 19:37) history show all -nt We're not missing any history, so it looks like we're good to go! History Garbage Collection ========================== For the built-in history backends ``json`` and ``sqlite``, garbage collection is triggered when xonsh is started or when the user runs ``history gc``. History items outside of the range defined by `$XONSH_HISTORY_SIZE `_ are deleted. .. code-block:: python class History: def run_gc(self, size=None, blocking=True): """Run the garbage collector. Parameters ---------- size: None or tuple of a int and a string Determines the size and units of what would be allowed to remain. blocking: bool If set blocking, then wait until gc action finished. """ pass The History public method ``run_gc()`` is for this purpose. Our ``CouchDBHistory`` doesn't define this method, thus it inherits from its parent ``History``, which does nothing. We will leave the GC implementation as an exercise. Other History Options ===================== There are some environment variables that can change the behavior of the history backend. Such as `$HISTCONTROL `_, `$XONSH_HISTORY_SIZE `_, `$XONSH_STORE_STDOUT `_, etc. We should implement these ENVs in our CouchDB backend. Luckily, it's not a hard thing. We'll leave the implementation of those features to you, but you can see how it's handled for `the sqlite backend <_modules/xonsh/history/sqlite.html#SqliteHistory>`_. Wrap Up ======= This is a barebones implementation but hopefully it will give you a sense of how you can customize xonsh's history backend for your own needs! xonsh-0.6.0/docs/tutorial_macros.rst000066400000000000000000000422731320541242300175510ustar00rootroot00000000000000.. _tutorial_macros: ************************************ Tutorial: Macros ************************************ Bust out your DSLRs, people. It is time to closely examine macros! What are macro instructions? ============================ In generic terms, a programming macro is a special kind of syntax that replaces a smaller amount of code with a larger expression, syntax tree, code object, etc after the macro has been evaluated. In practice, macros pause the normal parsing and evaluation of the code that they contain. This is so that they can perform their expansion with a complete inputs. Roughly, the algorithm executing a macro follows is: 1. Macro start, pause or skip normal parsing 2. Gather macro inputs as strings 3. Evaluate macro with inputs 4. Resume normal parsing and execution. Is this meta-programming? You betcha! When and where are macros used? =============================== Macros are a practicality-beats-purity feature of many programing languages. Because they allow you break out of the normal parsing cycle, depending on the language, you can do some truly wild things with them. However, macros are really there to reduce the amount of boiler plate code that users and developers have to write. In C and C++ (and Fortran), the C Preprocessor ``cpp`` is a macro evaluation engine. For example, every time you see an ``#include`` or ``#ifdef``, this is the ``cpp`` macro system in action. In these languages, the macros are technically outside of the definition of the language at hand. Furthermore, because ``cpp`` must function with only a single pass through the code, the sorts of macros that can be written with ``cpp`` are relatively simple. Rust, on the other hand, has a first-class notion of macros that look and feel a lot like normal functions. Macros in Rust are capable of pulling off type information from their arguments and preventing their return values from being consumed. Other languages like Lisp, Forth, and Julia also provide their macro systems. Even restructured text (rST) directives could be considered macros. Haskell and other more purely functional languages do not need macros (since evaluation is lazy anyway), and so do not have them. If these seem unfamiliar to the Python world, note that Jupyter and IPython magics ``%`` and ``%%`` are macros! Function Macros =============== Xonsh supports Rust-like macros that are based on normal Python callables. Macros do not require a special definition in xonsh. However, like in Rust, they must be called with an exclamation point ``!`` between the callable and the opening parentheses ``(``. Macro arguments are split on the top-level commas ``,``, like normal Python functions. For example, say we have the functions ``f`` and ``g``. We could perform a macro call on these functions with the following: .. code-block:: xonsh # No macro args f!() # Single arg f!(x) g!([y, 43, 44]) # Two args f!(x, x + 42) g!([y, 43, 44], f!(z)) Not so bad, right? So what actually happens to the arguments when used in a macro call? Well, that depends on the definition of the function. In particular, each argument in the macro call is matched up with the corresponding parameter annotation in the callable's signature. For example, say we have an ``identity()`` function that annotates its sole argument as a string: .. code-block:: xonsh def identity(x : str): return x If we call this normally, we'll just get whatever object we put in back out, even if that object is not a string: .. code-block:: xonshcon >>> identity('me') 'me' >>> identity(42) 42 >>> identity(identity) However, if we perform macro calls instead we are now guaranteed to get the string of the source code that is in the macro call: .. code-block:: xonshcon >>> identity!('me') "'me'" >>> identity!(42) '42' >>> identity!(identity) 'identity' Also note that each macro argument is stripped prior to passing it to the macro itself. This is done for consistency. .. code-block:: xonshcon >>> identity!(42) '42' >>> identity!( 42 ) '42' Importantly, because we are capturing and not evaluating the source code, a macro call can contain input that is beyond the usual syntax. In fact, that is sort of the whole point. Here are some cases to start your gears turning: .. code-block:: xonshcon >>> identity!(import os) 'import os' >>> identity!(if True: >>> pass) 'if True:\n pass' >>> identity!(std::vector x = {"yoo", "hoo"}) 'std::vector x = {"yoo", "hoo"}' You do you, ``identity()``. Calling Function Macros ======================= There are a couple of points to consider when calling macros. The first is that passing in arguments by name will not behave as expected. This is because the ``=`` is captured by the macro itself. Using the ``identity()`` function from above: .. code-block:: xonshcon >>> identity!(x=42) 'x=42' Performing a macro call uses only argument order to pass in values. Additionally, macro calls split arguments only on the top-level commas. The top-level commas are not included in any argument. This behaves analogously to normal Python function calls. For instance, say we have the following ``g()`` function that accepts two arguments: .. code-block:: xonsh def g(x : str, y : str): print('x = ' + repr(x)) print('y = ' + repr(y)) Then you can see the splitting and stripping behavior on each macro argument: .. code-block:: xonshcon >>> g!(42, 65) x = '42' y = '65' >>> g!(42, 65,) x = '42' y = '65' >>> g!( 42, 65, ) x = '42' y = '65' >>> g!(['x', 'y'], {1: 1, 2: 3}) x = "['x', 'y']" y = '{1: 1, 2: 3}' Sometimes you may only want to pass in the first few arguments as macro arguments and you want the rest to be treated as normal Python arguments. By convention, xonsh's macro caller will look for a lone ``*`` argument in order to split the macro arguments and the regular arguments. So for example: .. code-block:: xonshcon >>> g!(42, *, 65) x = '42' y = 65 >>> g!(42, *, y=65) x = '42' y = 65 In the above, note that ``x`` is still captured as a macro argument. However, everything after the ``*``, namely ``y``, is evaluated is if it were passed in to a normal function call. This can be useful for large interfaces where only a handful of args are expected as macro arguments. Hopefully, now you see the big picture. Writing Function Macros ======================= Though any function (or callable) can be used as a macro, this functionality is probably most useful if the function was *designed* as a macro. There are two main aspects of macro design to consider: argument annotations and call site execution context. Macro Annotations ----------------------------------- There are six kinds of annotations that macros are able to interpret: .. list-table:: Kinds of Annotation :header-rows: 1 * - Category - Object - Flags - Modes - Returns * - String - ``str`` - ``'s'``, ``'str'``, or ``'string'`` - - Source code of argument as string, *default*. * - AST - ``ast.AST`` - ``'a'`` or ``'ast'`` - ``'eval'`` (default), ``'exec'``, or ``'single'`` - Abstract syntax tree of argument. * - Code - ``types.CodeType`` or ``compile`` - ``'c'``, ``'code'``, or ``'compile'`` - ``'eval'`` (default), ``'exec'``, or ``'single'`` - Compiled code object of argument. * - Eval - ``eval`` or ``None`` - ``'v'`` or ``'eval'`` - - Evaluation of the argument. * - Exec - ``exec`` - ``'x'`` or ``'exec'`` - ``'exec'`` (default) or ``'single'`` - Execs the argument and returns None. * - Type - ``type`` - ``'t'`` or ``'type'`` - - The type of the argument after it has been evaluated. These annotations allow you to hook into whichever stage of the compilation that you desire. It is important to note that the string form of the arguments is split and stripped (as described above) prior to conversion to the annotation type. Each argument may be annotated with its own individual type. Annotations may be provided as either objects or as the string flags seen in the above table. String flags are case-insensitive. If an argument does not have an annotation, ``str`` is selected. This makes the macro function call behave like the subprocess macros and context manager macros below. For example, .. code-block:: xonsh def func(a, b : 'AST', c : compile): pass In a macro call of ``func!()``, * ``a`` will be evaluated with ``str`` since no annotation was provided, * ``b`` will be parsed into a syntax tree node, and * ``c`` will be compiled into code object since the builtin ``compile()`` function was used as the annotation. Additionally, certain kinds of annotations have different modes that affect the parsing, compilation, and execution of its argument. While a sensible default is provided, you may also supply your own. This is done by annotating with a (kind, mode) tuple. The first element can be any valid object or flag. The second element must be a corresponding mode as a string. For instance, .. code-block:: xonsh def gunc(d : (exec, 'single'), e : ('c', 'exec')): pass Thus in a macro call of ``gunc!()``, * ``d`` will be exec'd in single-mode (rather than exec-mode), and * ``e`` will be compiled in exec-mode (rather than eval-mode). For more information on the differences between the exec, eval, and single modes please see the Python documentation. Macro Function Execution Context -------------------------------- Equally important as having the macro arguments is knowing the execution context of the macro call itself. Rather than mucking around with frames, macros provide both the globals and locals of the call site. These are accessible as the ``macro_globals`` and ``macro_locals`` attributes of the macro function itself while the macro is being executed. For example, consider a macro which replaces all literal ``1`` digits with the literal ``2``, evaluates the modification, and returns the results. To eval, the macro will need to pull off its globals and locals: .. code-block:: xonsh def one_to_two(x : str): s = x.replace('1', '2') glbs = one_to_two.macro_globals locs = one_to_two.macro_locals return eval(s, glbs, locs) Running this with a few of different inputs, we see: .. code-block:: xonshcon >>> one_to_two!(1 + 1) 4 >>> one_to_two!(11) 22 >>> x = 1 >>> one_to_two!(x + 1) 3 Of course, many other more sophisticated options are available depending on the use case. Subprocess Macros ================= Like with function macros above, subprocess macros allow you to pause the parser for until you are ready to exit subprocess mode. Unlike function macros, there is only a single macro argument and its macro type is always a string. This is because it (usually) doesn't make sense to pass non-string arguments to a command. And when it does, there is the ``@()`` syntax! In the simplest case, subprocess macros look like the equivalent of their function macro counterparts: .. code-block:: xonshcon >>> echo! I'm Mr. Meeseeks. I'm Mr. Meeseeks. Again, note that everything to the right of the ``!`` is passed down to the ``echo`` command as the final, single argument. This is space preserving, like wrapping with quotes: .. code-block:: xonshcon # normally, xonsh will split on whitespace, # so each argument is passed in separately >>> echo x y z x y z # usually space can be preserved with quotes >>> echo "x y z" x y z # however, subprocess macros will pause and then strip # all input after the exclamation point >>> echo! x y z x y z However, the macro will pause everything, including path and environment variable expansion, that might be present even with quotes. For example: .. code-block:: xonshcon # without macros, environment variable are expanded >>> echo $USER lou # inside of a macro, all additional munging is turned off. >>> echo! $USER $USER Everything to the right of the exclamation point, except the leading and trailing whitespace, is passed into the command directly as written. This allows certain commands to function in cases where quoting or piping might be more burdensome. The ``timeit`` command is a great example where simple syntax will often fail, but will be easily executable as a macro: .. code-block:: xonshcon # fails normally >>> timeit "hello mom " + "and dad" xonsh: subprocess mode: command not found: hello # macro success! >>> timeit! "hello mom " + "and dad" 100000000 loops, best of 3: 8.24 ns per loop All expressions to the left of the exclamation point are passed in normally and are not treated as the special macro argument. This allows the mixing of simple and complex command line arguments. For example, sometimes you might really want to write some code in another language: .. code-block:: xonshcon # don't worry, it is temporary! >>> bash -c ! export var=42; echo $var 42 # that's better! >>> python -c ! import os; print(os.path.abspath("/")) / Compared to function macros, subprocess macros are relatively simple. However, they can still be very expressive! Context Manager Macros ====================== Now that we have seen what life can be like with macro expressions, it is time to introduce the macro statement: ``with!``. With-bang provides macros on top of existing Python context managers. This provides both anonymous and onymous blocks in xonsh. The syntax for context manager macros is the same as the usual with-statement in Python, but with an additional exclamation point between the ``with`` word and the first context manager expression. As a simple example, .. code-block:: xonsh with! x: y = 10 print(y) In the above, everything to the left of the colon (``x``) will be evaluated normally. However, the body will not be executed and ``y`` will not be defined or printed. In this case, the body will be attached to x as a string, along with globals and locals, prior to the body even being entered. The body is then replaced with a ``pass`` statement. You can think of the above as being transformed into the following: .. code-block:: xonsh x.macro_block = 'y = 10\nprint(y)\n' x.macro_globals = globals() x.macro_locals = locals() with! x: pass There are a few important things about this to notice: 1. The ``macro_block`` string is dedented, 2. The ``macro_*`` attributes are set *before* the context manager is entered so the ``__enter__()`` method may use them, and 3. The ``macro_*`` attributes are not cleaned up automatically so that the context manager may use them even after the object is exited. The ``__exit__()`` method may clean up these attributes, if desired. By default, macro blocks are returned as a string. However, like with function macro arguments, the kind of ``macro_block`` is determined by a special annotation. This annotation is given via the ``__xonsh_block__`` attribute on the context manager itself. This allows the block to be interpreted as an AST, byte compiled, etc. The convenient part about this syntax is that the macro block is only exited once it sees a dedent back to the level of the ``with!``. All other code is indiscriminately skipped! This allows you to write blocks of code in languages other than xonsh without pause. For example, consider a simple XML macro context manager. This will return the parsed XML tree from a macro block. The context manager itself can be written as: .. code-block:: python import xml.etree.ElementTree as ET class XmlBlock: # make sure the macro_block comes back as a string __xonsh_block__ = str def __enter__(self): # parse and return the block on entry root = ET.fromstring(self.macro_block) return root def __exit__(self, *exc): # no reason to keep these attributes around. del self.macro_block, self.macro_globals, self.macro_locals The above class may then be used in a with-bang as follows: .. code-block:: xonsh with! XmlBlock() as tree: You Xonsh Don't You Want Me, Baby You know I don't believe you when you say that you don't need me. And if you run this, you'll see that the ``tree`` object really is a parsed XML object. .. code-block:: xonshcon >>> print(tree.tag) note So in roughly eight lines of xonsh code, you can seamlessly interface with another, vastly different language. The possibilities for this are not limited to just markup languages or other party tricks. You could be a remote execution interface via SSH, RPC, dask / distributed, etc. The real benefit of context manager macros is that they allow you to select when, where, and what code is executed as a part of the xonsh language itself. The power is there; use it without reservation! Take Away ========= Hopefully, at this point, you see that a few well placed macros can be extremely convenient and valuable to any project. xonsh-0.6.0/docs/tutorial_ptk.rst000066400000000000000000000114151320541242300170550ustar00rootroot00000000000000.. _tutorial_ptk: *********************************************** Tutorial: ``prompt_toolkit`` custom keybindings *********************************************** Are you really jonesing for some special keybindings? We can help you out with that. The first time is free and so is every other time! .. warning:: This tutorial will let you hook directly into the ``prompt_toolkit`` keybinding manager. It will not stop you from rendering your prompt completely unusable, so tread lightly. Overview ======== The ``prompt_toolkit`` shell has a registry for handling custom keybindings. You may not like the default keybindings in xonsh, or you may want to add a new key binding. We'll walk you though how to do this using ``prompt_toolkit`` tools to define keybindings and warn you about potential pitfalls. All of the code below can be entered into your `xonshrc `_ Control characters ================== We can't and won't stop you from doing what you want, but in the interest of a functioning shell, you probably shouldn't mess with the following keystrokes. Some of them are `ASCII control characters `_ and _really_ shouldn't be used. The others are used by xonsh and will result in some loss of functionality (in less you take the time to rebind them elsewhere). .. list-table:: :widths: 2 2 2 :header-rows: 1 * - Keystroke - ASCII control representation - Default command * - ``Control J`` - ```` - Run command * - ``Control I`` - ```` - Indent, autocomplete * - ``Control R`` - - Backwards history search * - ``Control Z`` - - SIGSTOP current job * - ``Control C`` - - SIGINT current job Useful imports ============== There are a few useful ``prompt_toolkit`` tools that will help us create better bindings:: from prompt_toolkit.keys import Keys from prompt_toolkit.filters import Condition, EmacsInsertMode, ViInsertMode Custom keyload function ======================= We need our additional keybindings to load after the shell is initialized, so we define a function that contains all of the custom keybindings and decorate it with the appropriate event, in this case ``on_ptk_create``. We'll start with a toy example that just inserts the text "hi" into the current line of the prompt:: @events.on_ptk_create def custom_keybindings(bindings, **kw): handler = bindings.registry.add_binding @handler(Keys.ControlW) def say_hi(event): event.current_buffer.insert_text('hi') Put that in your `xonshrc `_, restart xonsh and then see if pressing ``Ctrl-w`` does anything (it should!) What commands can keybindings run? ================================== Pretty much anything! Since we're defining these commands after xonsh has started up, we can create keybinding events that run subprocess commands with hardly any effort at all. If we wanted to, say, have a command that runs ``ls -l`` in the current directory:: @handler(Keys.ControlP) def run_ls(event): ls -l event.cli.renderer.erase() .. note:: The ``event.cli.renderer.erase()`` is required to redraw the prompt after asking for a separate command to send information to ``STDOUT`` Restrict actions with filters ============================= Often we want a key command to only work if certain conditions are met. For instance, the ```` key in xonsh brings up the completions menu, but then it also cycles through the available completions. We use filters to create this behavior. A few helpful filters are included with ``prompt_toolkit``, like ``ViInsertMode`` and ``EmacsInsertMode``, which return ``True`` when the respective insert mode is active. But it's also easy to create our own filters that take advantage of xonsh's beautiful strangeness. Suppose we want a filter to restrict a given command to run only when there are fewer than ten files in a given directory. We just need a function that returns a Bool that matches that requirement and then we decorate it! And remember, those functions can be in xonsh-language, not just pure Python:: @Condition def lt_ten_files(cli): return len(g`*`) < 10 .. note:: See `the tutorial section on globbing `_ for more globbing options. Now that the condition is defined, we can pass it as a ``filter`` keyword to a keybinding definition:: @handler(Keys.ControlL, filter=lt_ten_files) def ls_if_lt_ten(event): ls -l event.cli.renderer.erase() With both of those in your ``.xonshrc``, pressing ``Control L`` will list the contents of your current directory if there are fewer than 10 items in it. Useful? Debatable. Powerful? Yes. xonsh-0.6.0/docs/tutorial_xontrib.rst000066400000000000000000000202151320541242300177420ustar00rootroot00000000000000.. _tutorial_xontrib: ************************************ Tutorial: Extensions (Xontribs) ************************************ Take a deep breath and prepare for some serious Show & Tell; it's time to learn about xonsh extensions! Overview ================================ Xontributions, or ``xontribs``, are a set of tools and conventions for extending the functionality of xonsh beyond what is provided by default. This allows 3rd party developers and users to improve their xonsh experience without having to go through the xonsh development and release cycle. Many tools and libraries have extension capabilities. Here are some that we took inspiration from for xonsh: * `Sphinx `_: Extensions are just Python modules, bundles some extensions with the main package, interface is a list of string names. * `Oh My Zsh `_: Centralized registry, autoloading, and for a shell. * `ESLint `_: Ability to use language package manager to install/remove extensions. Structure ========== Xontribs are modules written in either xonsh (``*.xsh``) or Python (``*.py``). Normally, these are stored and found in an `implicit namespace package `_ called ``xontrib``. However, xontribs may be placed in any package or directory that is on the ``$PYTHONPATH``. If a module is in the ``xontrib`` namespace package, it can be referred to just by its module name. If a module is in any other package, then it must be referred to by its full package path, separated by ``.`` like you would in an import statement. Of course, a module in ``xontrib`` may be referred to with the full ``xontrib.myext``. But just calling it ``myext`` is a lot shorter and one of the main advantages of placing an extension in the ``xontrib`` namespace package. Here is a sample file system layout and what the xontrib names would be:: |- xontrib/ |- javert.xsh # "javert", because in xontrib |- your.py # "your", |- eyes/ |- __init__.py |- scream.xsh # "eyes.scream", because eyes is in xontrib |- mypkg/ |- __init__.py # a regular package with an init file |- other.py # not a xontrib |- show.py # "mypkg.show", full module name |- tell.xsh # "mypkg.tell", full module name |- subpkg/ |- __init__.py |- done.py # "mypkg.subpkg.done", full module name You can also use `cookiecutter `_ with the `xontrib template `_ to easily create the layout for your xontrib package. Loading Xontribs ================ Xontribs may be loaded in a few different ways: from the config file, dynamically at runtime with the ``xontrib`` command, or by importing the module normally. Since these extensions are just Python modules, by default, they cannot be unloaded (easily). .. note:: When a xontrib is loaded from a config file or via the xontrib command, its public variables are placed in the current execution context unless ``__all__`` is defined, just like in regular Python modules. Loading xontribs in the config file is as simple as adding a list of string xontrib names to the top-level ``"xontribs"`` key. For example, the following would load the ``"mpl"`` and ``"example"`` xontribs. .. code:: json {"xontribs": ["mpl", "example"]} Extensions may also be loaded via the ``xontrib`` command, which is a xonsh default alias. This command may be run from anywhere in a xonshrc file or at any point after xonsh has started up. Loading is the default action of the ``xontrib`` command. Thus the following methods for loading via this command are equivalent: .. code-block:: xonsh xontrib myext mpl mypkg.show xontrib load myext mpl mypkg.show Loading the same xontrib multiple times does not have any effect after the first. Xontribs are simply Python modules, and therefore follow the same caching rules. So by the same token, you can also import them normally. Of course, you have to use the full module name to import a xontrib: .. code-block:: python import xontrib.mpl from xontrib import myext from mypkg.show import * Listing Known Xontribs ====================== In addition to loading extensions, the ``xontrib`` command also allows you to list the known xontribs. This command will report whether known xontribs are installed and if they are loaded in the current session. To display this information, pass the ``list`` action to the ``xontrib`` command: .. code-block:: xonshcon >>> xontrib list mpl installed not-loaded myext not-installed not-loaded By default, this will display information for all known xontribs. However, you can restrict this to a set of names passed in on the command line. .. code-block:: xonshcon >>> xontrib list mpl mpl installed not-loaded For programmatic access, you may also have this command print a JSON formatted string: .. code-block:: xonshcon >>> xontrib list --json mpl {"mpl": {"loaded": false, "installed": true}} Authoring Xontribs ========================= Writing a xontrib is as easy as writing a xonsh or Python file and sticking it in a directory named ``xontrib/``. However, please do not place an ``__init__.py`` in the ``xontrib/`` directory. It is an *implicit namespace package* and should not have one. See `PEP 420 `_ for more details. .. warning:: Do not place an ``__init__.py`` in the ``xontrib/`` directory! If you plan on using ``*.xsh`` files in you xontrib, then you'll have to add some hooks to distutils, setuptools, pip, etc. to install these files. Try adding entries like the following entries to your ``setup()`` call in your ``setup.py``: .. code-block:: python try: from setuptools import setup except ImportError: from distutils.core import setup setup(..., packages=[..., 'xontrib'], package_dir={..., 'xontrib': 'xontrib'}, package_data={..., 'xontrib': ['*.xsh']}, ...) Something similar can be done for any non-xontrib package or sub-package that needs to distribute ``*.xsh`` files. Tell Us About Your Xontrib! =========================== We request that you register your xontrib with us. We think that this is a good idea, in general, because then: * Your xontrib will show up as an extension the xonsh website, * It will appear in the ``xontrib list`` command, and * It will show up in ``xonfig wizard``. All of this let's users know that your xontrib is out there, ready to be used. Of course, your under no obligation to register your xontrib. Users will still be able to load your xontrib, as long as they have it installed. To register a xontrib, add an entry to `the xontribs.json file `_ in the main xonsh repository. A pull request is probably best, but if you are having trouble figuring it out please contact one of the xonsh devs with the relevant information. This is a JSON file with two top-level keys: ``"xontribs"`` and ``"packages"``. The ``"xontribs"`` key is a list of dictionaries that describes the xontrib module itself. Such entries have the following structure: .. code-block:: json {"xontribs": [ {"name": "xontrib-name", "package": "package-name", "url": "http://example.com/api/xontrib", "description": ["Textual description as string or list or strings ", "enabling long content to be split over many lines."] } ] } The ``"packages"`` key, on the other hand, is a dict mapping package names (associated with the xontrib entries) to metadata about the package. Package entries have the following structure: .. code-block:: json {"packages": { "package-name": { "license": "WTFPL v1.1", "url": "http://example", "install": { "conda": "conda install package-name", "pip": "xpip install package-name"} } } } Note that you can have as many entries in the ``"install"`` dict as you want. Also, the keys are arbitrary labels, so feel free to pick whatever you want. Go forth! xonsh-0.6.0/docs/windows.rst000066400000000000000000000131551320541242300160310ustar00rootroot00000000000000========================== Windows Guide ========================== Installation ================ The easy way ---------------- The easiest way to install xonsh on windows is through the Anaconda Python Distribution and the conda package manager. .. note:: Be sure to install the version with Python3.4 or later. Xonsh is not yet supported on legacy Python (2.7). Install xonsh with the following command: .. code-block:: bat > conda config --add channels conda-forge > conda install xonsh This will install xonsh and all the recommended dependencies. Next, run xonsh: .. code-block:: bat > xonsh snail@home ~ $ Install from source ------------------- To install xonsh from source on Windows, first install `Python v3.4+`_ from http://python.org. Remember to select "Add python to PATH" during installation. Next, install the prompt_toolkit dependency via ``pip``: .. code-block:: bat > pip install prompt-toolkit While prompt-toolkit is considered an optional dependency, it is the recommended alternative to pyreadline for Windows users. For Windows, it is recommended to use a replacement console emulator. Good choices are `cmder`_ or `conemu`_. Download the latest `xonsh-master.zip`_ from github and unzip it to ``xonsh-master``. Now install xonsh: .. code-block:: bat > cd xonsh-master > python setup.py install Next, run xonsh: .. code-block:: bat > xonsh snail@home ~ $ .. _Python v3.4+: https://www.python.org/downloads/windows/ .. _xonsh-master.zip: https://github.com/xonsh/xonsh/archive/master.zip .. _cmder: http://cmder.net/ .. _conemu: https://conemu.github.io/ Usage ================ Color style -------------------------------- The dark red and blue colors are completely unreadable in Windows' default terminal. .. image:: _static/intensify-colors-on-win-false.png :width: 396 px :alt: intensify-colors-win-false :align: center To give new users, the best experience Xonsh automatically replaces some of the dark colors with more readable alternatives (e.g. blue becomes cyan). The behavior is controlled with the ``$INTENSIFY_COLORS_ON_WIN`` environment variable. .. image:: _static/intensify-colors-on-win-true.png :width: 399 px :alt: intensify-colors-win-true :align: center It is possible to configure the Windows console with readable default colors, but it is tedious to do manually. Here is registry file which will do it automatically and set some good default colors. Simply download and run the registry file. * `console_colors.reg`_ .. note:: This may not always work right away. Especially if you already fiddled around with the colors settings. Try to delete any subfolders under ``HKCU/console/`` in the registry. Also, shortcuts files store their own colors schemes and ignore registry settings. With better colors configured, ``$INTENSIFY_COLORS_ON_WIN`` should be set to ``False``, and the default prompt can be changed to match how it looks on POSIX and Mac. You can do this by adding the following to the xonsh run control file ``.xonshrc``: .. code-block:: xonshcon $INTENSIFY_COLORS_ON_WIN = False $PROMPT = $PROMPT.replace('INTENSE_','').replace('CYAN','BLUE') With everything setup the console will look like this: .. image:: _static/better_colors_windows.png :width: 366 px :alt: better_colors_windows :align: center .. _console_colors.reg: http://xon.sh/_static/console_colors.reg Avoid locking the working directory ----------------------------------- Python (like other processes on Windows) locks the current working directory so it can't be deleted or renamed. ``cmd.exe`` has this behaviour as well, but it is quite annoying for a shell. The ``free_cwd`` xontrib (add-on) for xonsh solves some of this problem. It works by hooking the prompt to reset the current working directory to the root drive folder whenever the shell is idle. It only works with the prompt-toolkit back-end. To enable that behaviour run the following: .. code-block:: xonshcon >>> xontrib load free_cwd Add this line to your ``~/.xonshrc`` file to have it always enabled. Name space conflicts -------------------- Due to ambiguity with the Python ``dir`` builtin, to list the current directory via the ``cmd.exe`` builtin you must explicitly request the ``.``, like this: .. code-block:: xonshcon >>> dir . Volume in drive C is Windows Volume Serial Number is 30E8-8B86 Directory of C:\Users\snail\xonsh 2015-05-12 03:04 . 2015-05-12 03:04 .. 2015-05-01 01:31 xonsh 0 File(s) 0 bytes 3 Dir(s) 11,008,000,000 bytes free Many people create a ``d`` alias for the ``dir`` command to save typing and avoid the ambiguity altogether: .. code-block:: xonshcon >>> aliases['d'] = ['cmd', '/c', 'dir'] You can add aliases to your ``~/.xonshrc`` to have it always available when xonsh starts. Unicode support for Windows ---------------------------- Python's utf-8 unicode is not compatible with the default shell 'cmd.exe' on Windows. The package ``win_unicode_console`` fixes this. Xonsh will use ``win_unicode_console`` if it is installed. This can be disabled/enabled with the ``$WIN_UNICODE_CONSOLE``` environment variable. .. note:: Even with unicode support enabled the symbols available will depend on the font used in cmd.exe. The packages ``win_unicode_console`` can be installed along with xonsh by using the package name ``xonsh[win]`` or separately using pip or conda. .. code-block:: bat > pip install win_unicode_console .. code-block:: bat > conda install --channel xonsh win_unicode_console xonsh-0.6.0/docs/xonshconfig.json000066400000000000000000000003231320541242300170160ustar00rootroot00000000000000{"env": { "EDITOR": "xo", "PAGER": "more" }, "xontribs": ["mpl"], "foreign_shells": [ {"shell": "bash", "login": true, "extra_args": ["--rcfile", "/path/to/rcfile"] }, {"shell": "zsh"} ] }xonsh-0.6.0/docs/xonshconfig.rst000066400000000000000000000112041320541242300166550ustar00rootroot00000000000000Static Configuration File ========================= In addition to the run control file, xonsh allows you to have a static config file. This JSON-formatted file lives at ``$XONSH_CONFIG_DIR/config.json``, which is normally ``~/.config/xonsh/config.json``. The purpose of this file is to allow users to set runtime parameters *before* anything else happens. This includes loading data from various foreign shells or setting critical environment variables. This is a dictionary or JSON object at its top-level. It has the following top-level keys. All top-level keys are optional. ``env`` -------- This is a simple string-keyed dictionary that lets you set environment variables. For example, .. code:: json {"env": { "EDITOR": "xo", "PAGER": "more" } } ``xontribs`` ------------ This is a list (JSON array) of xontrib names (strings) to load prior to loading any run control files. For example, .. code:: json {"xontribs": ["mpl", "example"]} ``foreign_shells`` -------------------- This is a list (JSON Array) of dicts (JSON objects) that represent the foreign shells to inspect for extra start up information, such as environment variables, aliases, and foreign shell functions. The suite of data gathered may be expanded in the future. Each shell dictionary unpacked and passed into the ``xonsh.foreign_shells.foreign_shell_data()`` function. Thus, these dictionaries have the following structure: :shell: *str, required* - The name or path of the shell, such as "bash" or "/bin/sh". :interactive: *bool, optional* - Whether the shell should be run in interactive mode. ``default=true`` :login: *bool, optional* - Whether the shell should be a login shell. ``default=false`` :envcmd: *str, optional* - The command to generate environment output with. ``default="env"`` :aliascmd: *str, optional* - The command to generate alias output with. ``default="alias"`` :extra_args: *list of str, optional* - Addtional command line options to pass into the shell. ``default=[]`` :currenv: *dict or null, optional* - Manual override for the current environment. ``default=null`` :safe: *bool, optional* - Flag for whether or not to safely handle exceptions and other errors. ``default=true`` :prevcmd: *str, optional* - An additional command or script to run before anything else, useful for sourcing and other commands that may require environment recovery. ``default=''`` :postcmd: *str, optional* - A command to run after everything else, useful for cleaning up any damage that the ``prevcmd`` may have caused. ``default=''`` :funcscmd: *str or None, optional* - This is a command or script that can be used to determine the names and locations of any functions that are native to the foreign shell. This command should print *only* a whitespace separated sequence of pairs function name & filenames where the functions are defined. If this is None (null), then a default script will attempted to be looked up based on the shell name. Callable wrappers for these functions will be returned in the aliases dictionary. ``default=null`` :sourcer: *str or None, optional* - How to source a foreign shell file for purposes of calling functions in that shell. If this is None, a default value will attempt to be looked up based on the shell name. ``default=null`` :runcmd: *str or None, optional* - Command line switches to use when running the script, such as ``-c`` for Bash and ``/C`` for cmd.exe. ``default=null`` :seterrprevcmd: *str or None, optional* - Command that enables exit-on-error for the shell before all other commands. For example, this is "set -e" in Bash. To disable this exit-on-error behavior, simply pass in an empty string. ``default=null`` :seterrpostcmd: *str or None, optional* - Command that enables exit-on-error for the shell after all other commands. For example, this is "if errorlevel 1 exit 1" in cmd.exe. To disable this exit-on-error behavior, simply pass in an empty string. ``default=null`` Some examples can be seen below: .. code:: json # load bash then zsh {"foreign_shells": [ {"shell": "/bin/bash"}, {"shell": "zsh"} ] } # load bash as a login shell with custom rcfile {"foreign_shells": [ {"shell": "bash", "login": true, "extra_args": ["--rcfile", "/path/to/rcfile"] } ] } # disable all foreign shell loading via an empty list {"foreign_shells": []} Putting it all together ----------------------- The following example shows a fully fleshed out config file. :download:`Download config.json ` .. include:: xonshconfig.json :code: json xonsh-0.6.0/docs/xonshrc.rst000066400000000000000000000046451320541242300160270ustar00rootroot00000000000000Run Control File ========================= Xonsh allows you to have run control files to customize your shell behavior. These are called ``xonshrc`` files. The system-wide ``xonshrc`` file controls options that are applied to all users of Xonsh on a given system. You can create this file in ``/etc/xonshrc`` for Linux and OSX and in ``%ALLUSERSPROFILE%\xonsh\xonshrc`` on Windows. Xonsh also allows you to have a run control file in your home directory called ``~/.xonshrc``. The options set in the local ``xonshrc`` only apply to the current user and will override any conflicting settings set in the system-wide control file. These files are written in the xonsh language, of course. They are executed exactly once at startup. The following is a real-world example of such a file. :download:`Download xonshrc ` .. include:: xonshrc.xsh :code: xonsh Snippets for xonshrc ========================= The following are usefull snippets and code that tweaks and adjust xonsh in various ways. If you have any useful tricks, feel free to share them. Adjust how git branch label behaves ------------------------------------------- Xonsh adds a colored branch name to the prompt when working with git or hg repositories. This behavior can be controlled with the ``$PROMPT`` environment variable. See how to `customize the prompt`_ . The branch name changes color if the work dir is dirty or not. This is controlled by the ``{branch_color}`` formatter string. The following snippet reimplements the formatter also to include untracked files when considering if a git directory is dirty. .. code-block:: xonshcon >>> from xonsh.prompt.vc_branch import git_dirty_working_directory >>> $PROMPT_FIELDS['branch_color'] = lambda: ('{BOLD_INTENSE_RED}' if git_dirty_working_directory(include_untracked=True) else '{BOLD_INTENSE_GREEN}') .. _customize the prompt: http://xon.sh/tutorial.html#customizing-the-prompt Get better colors from the ``ls`` command ---------------------------------------------- The colors of the ``ls`` command may be hard to read in a dark terminal. If so, this is an excellent addition to the xonshrc file. .. code-block:: xonshcon >>> $LS_COLORS='rs=0:di=01;36:ln=01;36:mh=00:pi=40;33:so=01;35:do=01;35:bd=40;33;01:cd=40;33;01:or=40;31;01:su=37;41:sg=30;43:ca=30;41:tw=30;42:ow=34;42:st=37;44:ex=01;32:' xonsh-0.6.0/docs/xonshrc.xsh000066400000000000000000000010511320541242300160050ustar00rootroot00000000000000# adjust some paths $PATH.append('/home/scopatz/sandbox/bin') $LD_LIBRARY_PATH = ['/home/scopatz/.local/lib', '/home/scopatz/miniconda3/lib', ''] # alias to quit AwesomeWM from the terminal def _quit_awesome(args, stdin=None): lines = $(ps ux | grep "gnome-session --session=awesome").splitlines() pids = [l.split()[1] for l in lines] for pid in pids: kill @(pid) aliases['qa'] = _quit_awesome # some customization options $MULTILINE_PROMPT = '`·.,¸,.·*¯`·.,¸,.·*¯' $XONSH_SHOW_TRACEBACK = True $XONSH_STORE_STDOUT = True xonsh-0.6.0/docs/xontribs.rst000066400000000000000000000007441320541242300162070ustar00rootroot00000000000000Xontribs ======== The following lists known xonsh contributions (xontribs), a description of the xontrib, and how to get your hands on it. Once installed, these xontribs can be loaded into your session by adding them to the ``xontribs`` list in the config file, or dynamically in your xonshrc file or on the command line using the ``xontrib`` command: .. code-block:: xonshcon >>> xontrib mpl xo ... .. See the xontrib tutorial for more information. .. include:: xontribsbody xonsh-0.6.0/empty000066400000000000000000000000001320541242300137170ustar00rootroot00000000000000xonsh-0.6.0/github_deploy_key.enc000066400000000000000000000104701320541242300170470ustar00rootroot00000000000000gAAAAABYCVeRMufeZzuB75i64JSZgVejwpGQJ6pEO4DwLwZidzRA2we3Xu02minO3Rt7mmQl5pKJOJJYQZ0zP6uCORSNTY27t8OX5NsZEueWgSwsBPg4tq8lzEcHyKqJjWKpThky5mm1La_Qjw4kBO407UwrGX7DuUIF9jbOphWRF9SbbG44mobT3LAttUE1t9jVRQ320fVE2hZvs-vONYFOu2HsMk29dJ08pIOxnqoTHuwylZn3YL-vEGi7XJFaXIDWCFbjvXzcdLbcUmhcYPkISKn9BZGzoID7uIEi4dJGq_ZoIdNVRp-XgyUxHjjYEA8EojvBOKJ4unMn5xt3IXajrlslGseQHnxO1hswIXNSMqr8IH_dzv1pUu4C7ssm8xGuyxWh9JEyTfpHkNGMau8urgVrdpyHypqENFQVDG4hU6f6CQeyoiOM1_hYBA7byN1FiP8ZhefHo5nIjcSF3rnKIOn7y_Wt2ye3soQXBP258yzB9WJvvpya-tW3QB48tXJHHg5US-F9hDHchJyzZS2nArBSYeKpB6yMWyC6vVQBY1BFqh4wrjTkEHrSx3qi5B-631418yI8rb2PalbAizWE2MGUgdo8nePMUa93RpSPELL_FkQsK9BRR8c8EpTvRcQJsmg7aKSzwkpNrTrO99v30DU94xIwNe4rFsq5ogZQiu4Rw-wFu7K6YBlNCfIbTSEgCAlcULAwxakvo1tNGTKbEpKZLLZ7jeeETyyKLb8YOEYxiMuID015iV9Ecbuk3AWHhsiFWU_qEltOzN6xkkX9CyvLeXVliFsGF6sEZOyYQUyXAoGwd_xkUMZafHy5L63WfnwX8FEl7SI4D7LlaRrKLh8du50HqR3s00_Zl7KzfIXBtu-BBgT1t8c3UEZpbhxPA8v6XaUlLSGyrQUhSe_GqVI9BVJ6toVRjNqT14j4Q_HHOElkPbzloh2Cas-mp9as3SyH15dTmLTvAPw4nuPv5g3vL04NyDBQQtMPhhcGS8pwQ-odVhfB5k7TJOzXmjBg8LOlC3znD8Grgen6wOJmHpxAfdwVwOLB6S-8d3a2gwsP34xbUS_5kPUTIOWg92AngaIFlShyk331wkXi9EdP8fp9rPlbdSrPa2oNrSvYfoHoL9yhpnBjKKg8TEKJ49gM9UrU4JCmoJOPJDIigI2LjvJMEXVFSvXBr8Lb7l3nidJdZO4wxhi32Rp87AGznsOZpLGAYbrGKCjEowrPIOCgJ-B5SMo0cYVcfS1v0uL3Qvea5nfATffwmdMr1nOtid4dQX5eqz4OL3QMVU9zOeNSRDvbqK1McBlaWaoMA4Cy4Q5lnm98d6pMdNhCD-5dd9_C5q-_rBa0SGrdm6frYvgSpitTVWmV2XSa-QjdzjMS-k_VDsAdl_hKYWGUdSaP5b24cY_la0GjRNHYbcwO5KrO1fOVR68nB4Y4UGwq4eMk6hxd-Nmpqyd9ebmpuI0l6zGhapiSBkNWDlVskM-9Qm_YgEqTNvXn5paGh-KwLAQOQVBjNPhbDmIpAU3m5jVRJpXoVBnf8u2-N6qMFmW4C7za5NbUJLF2nJWHGAjclxSJBKRap8yPb3ry3mMZADkDmDVM6gWDzRr5ZgX82RuLMSppe5EHGIcvnSjs6980S7IxqOGxWFWNJU30oL5QQterTurBEXv0U4bdjTAHa42slrtp8pUT-LsDY8pwKBtHlJ7tXj5y5gzQEqhfL7yceyugjkNlmqhamjENhcnBJlGOXamip6JYE6wDlf7ogIVs4vtwO_4vjHAxOtgyYZJhdyC0lklecXYyC_xl_lzVgZ7lJ22SdwIbFk3Lh4wMFxXIvzKWaHu1KPlt2MtlPfj5aLZTu4DIOy4tkwXUrpxhsOzD2ZY-Oz_ZTD7yXktTsreQm8mTldlVncO4fdeMHbGcXeFxGBX7pJsglT2jKYgTXPbhSO0FbsuHft2qRHgceIV8CnyoPiN5JayT0-tS7g4Pw_reBUN4xKUgpHlZOti_WMDtGPCkS38psKkg4d6bqtuKTN7ebtYJYabMCb45nGMLUA1oR0sLsSkL24-r215TAfV-SQK5JrSlg9EibLPcaXgUMxh6z0NH86g_hrQ9WBDaaqmPT2L1_jPFjudbmHYIL7DI9hJMmQ_BiQgSF-69iUOgMEt31jRSXzxAQVFaEaS2rjEAvxDJ_tOt3lJiIlRZBTEtt_E9zUBXJA8GXAdXckFlQpfXJbp8IHa69HI-lfKjTg4IDD9z03XQrd-yITSwAc5zhz4CRZ6atM9t23FMWQyqPBbue4Zk2ZouZIy98Ce6UVYySZr0zsdelmttN6wJZm8txncDmUMn8VhdEgxd26dNOJnzBRtjFcQQsg2bKCCXSM4TSt-fL8YYa1aRRo2qsIhwaceRvF_q1XgJVWKV4KXFZbn76klvaZIQadOJUNDk8CUX13DtNtRVfxzyHcTUEXHkQFqKI0HaflGVv5nqSvXOqP6bjQvJ2Rxp2ZGd_MqsLtMmQSLJeHNuNWCryu0tP9roZThPZKYSeiBTk6ZF07QD677Aqp1CIuZa0-BuQfVf2z_wejJfmM9fnMI5yParH7cVf23SwcItM5X_Thi6PRqzMivewrnGdNQtbwBEAeuVtSxs-V4RdoArOCoOu7LFpCdyE8pMD22JBJ9ojn6TPlfteCJSjJDcoUzcSVM_vo1tfvMGHMhhAJNr4aD_5DwSrBWczxtMXZ8aMZx3FO6GntQHjCXlciu3RwFFrABbLM5_TFntZb-REvNFXFgCieCYampKj6kImT7nUfQiGBrlIIj0h_kLsa_3ZC4avNpXEvRCOzL2vqrghWQxChDNoopb4SJCRBbQD8jpsNdiIwv3mweAcpRCB_hWhiX4MPgHLF7CAIvewLLTNidyjiimiGE27IdKieI4GC3COEwbWANuT1UK32FZc7KYrfrF8wfCXc5geutE6mIQHZn0ORiOUGyZUkV7Auecm9FMMvEQHOxUKVqA-JDhw9QlDe1hKtXmVNsJqyljhVMtDVgJyNs4UmmafqdAc2suehQTCCuw3rVexyf4PUZoa9d4cdBfhTHsrRx4Rf6HHs682S3F-tTI_FkQv9_VEqx6n8wJrwMjamobqp88-AMKjrnBl0DnQlPUABSBROdhiQnLGLLeNB6hP7v_it9v-eCPuUNVKcnDUKXBdJJPQKXVwOM9jC3tnQMX9FSflXhSCHzJvFsyRsGuy6a9i09ZjvzBYAPMQIu0IiQl9iWgSvcFdiF6xAih70vvvndyAy88GTXjH99UVUZcyevXX241xJMsMAbGYwdTyRdSx1WqcRMhvgNeB-Pk0NhptrKlbVELr3LTBVELCSvoJm3Teb_g9jBX0F89jNK0CQPmM0iMvGPiQf3fqZSqfCZ7ErD6RkDRFEYhMZMLSM3zdoOXXcj2dt53D1Of0UH96dnmbfEic0M2S5sEr3p9juYuYQuCt14310pNmyusd-EqU0k7uJTJWu0HKIH6TQaQth1rBmW1m_DvlzbMCB1NccKXeaZ-8-d7ubzaQdiXIWVlnsxTrgcyaFU2JFNDHdtfyVyyhzXhN-zVzBB1KQU-NmLsxSXCXS7xwsNxo_cdjn0UgcpJGhpxf_vNGz6dzk52ITXwImLIOjHLQ6eKSwgQn_86cVKshteWqkaVV3fV-ITtALWxVPSHdB4CHgsHgZAuFbGPWs_djdjtZthcko9nAdLyCa5vW-jntq7y3J75QKeC7fj1b0Au8E7JasWGfHwv7sYqVf1k8zqmSV_D_slea0-FG3TdglB297HqbHW7Ytjx205MMOS8rjSSpH-vw8cf2wdyL9r8DBQwaII_JtUbZLQE3HBlBMVjqUl9iTVoj30MkdEFXTHDiFUpRvb-Skk3gw15H_Dq8QGaYygtb20q9RYAqTG1IrL3WheCcoBGoZ4yfrYGGlREOG7zxEBntivLg12UUBVAazFXLbNj0P1CKMsUbHB6m9dHSljM31MGQKzDMcSpUSkvH2s1ekgIpN7kae3Dk3pyFEn-I0VkN-7jG5nrKSCwQGc9mM8PAXeCTt1FTjcmBiT3s8-FYj13jXixf4-zMIVRIN7aPSsLzyVad3qmdLb4lIGwqW_II9TmEgp6NnurS1nApKaDrUlQiTFXkzfTaQoI0mFlI61xT8uNaspDQssQdv71l-bbmflPp06R5xmYR59vz3RDfkYKZU5uwxNPByuFf7xsRuLybN64Kg2l-0kv3WoPBpXCctCIsJ0DM2MeZUJ5xS8deRvbl7co7YhgNEkrZPHils7zwBZ5GzhyW2EGGZ3x-yHstqhJmKi-gthACbdoUMrCVO1G1iWtkA5TLLCnMdqiJRetlCD53DkBy9ld3Ez5vUZ2Mi4Ppx5ha66NPPZa__MQEASCy06bHABJq86NMHRzG6Y2ENvij8k=xonsh-0.6.0/license000066400000000000000000000030521320541242300142150ustar00rootroot00000000000000Copyright 2015-2016, the xonsh developers. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE XONSH DEVELOPERS ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE XONSH DEVELOPERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. The views and conclusions contained in the software and documentation are those of the authors and should not be interpreted as representing official policies, either expressed or implied, of the stakeholders of the xonsh project or the employers of xonsh developers. xonsh-0.6.0/logo.txt000066400000000000000000000060521320541242300143540ustar00rootroot00000000000000 ╓██▄ ╙██▀██╕ ▐██4Φ█▀█▌ ²██▄███▀██^██ -███╩▀ " ╒▄█████▀█ ║██▀▀W╤▄▀ ▐║█╘ ╝█ ▄m▀%Φ▀▀ ╝*" ,α█████▓▄,▄▀Γ"▀╕ "▀██¼" ▄═╦█╟║█▀ ╓ `^` ,▄ ╢╕ ,▀╫M█▐j╓╟▀ ╔▓▄█▀ '║ ╔ ╣║▌ ▀▄ ▄m▀▀███╬█╝▀ █▀^ "ÜM j▐╟╫╨▒ ╙▀≡═╤═m▀╗ █æsæ╓ ╕, ,▄Ä ▐'╕H LU ║║╠╫Å^2=⌐ █ ▄æ%Å███╠█ª╙▄█▀ $1╙ ║║╟╫╩*T▄ ▌ ╙╗%▄,╦██▌█▌█╢M ╕ M║║║║█═⌐ⁿ"^ ╫ ╙╣▀████@█░█ ▌╕╕ ` ▌║▐▐║█D═≈⌐¬ⁿ s ║⌐ ╙╬███▓║█` ▌╚ ╕ ╕▌║▐▐╣▌⌐*▒▒Dù` ▐▌ ╙╬██╨U█ ╟ $ ▌ ▌▌▐▐▐M█▄═≤⌐% ╓⌐ ▌ ║║█▄▌║ ╟ ▌ ▌M▐▐▐M█▀▒▒▒22, ▐▌ ███╙^▌ ║ ▌ ⌐M▐▐▐M█≤⌐⌐¬── ▐M ║██ ▌╙ ╓ H║ ▌╒ M║▐▐M█"^^^^^"ⁿ ║ ██╕╙@▓ ╕ ▌║ H' ║▐▐▐█══=.,,, █ ╙█▓╔╚╚█ ╠ ▌└╒ ▌▐ ╚║║║▀****ⁿ - ╓▌ ╙█▌¼V╚▌ ▌ ╕ ▌ ║╒ ║ ▌▒╠█▀≤≤≤≤≤⌐ █ ╙█▌╔█╚▌ ┘ M ▌║ ╫ UUM██J^^" ▐▌ ╙██╙█╙▌ ╕$j ▐⌐▌ ▌║╝╟█Å%%%≈═ █ ╙╣█╣█^▌ ╠║▐ ║ ▌▐.DU██^[""ⁿ -╒▌ ▀█▄█`▌ ░M▀ ▌▐ Å£╝╝█╜%≈═╓""w ⁿ⌐ █ `▀▄▀`▌ ▌█▐⌐║▐UW╖██%≤═░*─ =z ▄Γ ╙██╙▄▌█ ▌Å╛╣██╨%╤ƒⁿ= -` ▄┘ █▌╢▓▌▌ W £6█╤,"ⁿ ` ▄≡▀▀▀ █"█▌▌╟Å╓█╓█▀%` ▄▀ ╙▌██`▒U▓U█%╗* █ ▌╫║ ▌ÅÅ║▀╛¬` `"█ ▌╫ ╫╟ █▄ ~╦%▒╥4^ ▌▌ "M█ `▀╕ X╕"╗▄▀^ █▌ ╓M ╙▀e▀▀^ ╙██▄▄▀ ^^ xonsh-0.6.0/news/000077500000000000000000000000001320541242300136245ustar00rootroot00000000000000xonsh-0.6.0/news/TEMPLATE.rst000066400000000000000000000001611320541242300155670ustar00rootroot00000000000000**Added:** None **Changed:** None **Deprecated:** None **Removed:** None **Fixed:** None **Security:** None xonsh-0.6.0/release.xsh000077500000000000000000000243141320541242300150230ustar00rootroot00000000000000#!/usr/bin/env xonsh """Release helper script for xonsh.""" import os import re import sys import socket from getpass import getuser, getpass from argparse import ArgumentParser, Action try: import github3 except ImportError: github3 = None # Configuration! PROJECT = 'xonsh' PROJECT_URL = 'http://xon.sh' # further possible customizations USER = getuser() ORG = PROJECT BRANCH = 'master' UPSTREAM_ORG = PROJECT UPSTREAM_REPO = PROJECT FEEDSTOCK_REPO = PROJECT + '-feedstock' WILL_DO = { 'do_version_bump': True, 'do_git': True, 'do_pip': True, 'do_conda': True, 'do_docs': True, } # Allow alternative SHA patterns for feedstock, uncomment the one you need # Option 0 TAR_SHA_RE = '\s+sha256:.*' TAR_SHA_SUBS = ' sha256: {0}' # Option 1 #TAR_SHA_RE = '{% set sha256 = ".*" %}' #TAR_SHA_SUBS = '{{% set sha256 = "{0}" %}}' def ver_news(ver): news = ('.. current developments\n\n' 'v{0}\n' '====================\n\n') news = news.format(ver) news += merge_news() return news VERSION_UPDATE_PATTERNS = [ ('__version__\s*=.*', (lambda ver: "__version__ = '{0}'".format(ver)), [PROJECT, '__init__.py']), ('version:\s*', (lambda ver: 'version: {0}.{{build}}'.format(ver)), ['.appveyor.yml']), ('.. current developments', ver_news, ['CHANGELOG.rst']), ] # # Implementation below! # def replace_in_file(pattern, new, fname): """Replaces a given pattern in a file""" with open(fname, 'r') as f: raw = f.read() lines = raw.splitlines() ptn = re.compile(pattern) for i, line in enumerate(lines): if ptn.match(line): lines[i] = new upd = '\n'.join(lines) + '\n' with open(fname, 'w') as f: f.write(upd) if os.path.isdir('news'): NEWS = [os.path.join('news', f) for f in os.listdir('news') if f != 'TEMPLATE.rst'] else: NEWS = [] NEWS_CATEGORIES = ['Added', 'Changed', 'Deprecated', 'Removed', 'Fixed', 'Security'] NEWS_RE = re.compile('\*\*({0}):\*\*'.format('|'.join(NEWS_CATEGORIES)), flags=re.DOTALL) def merge_news(): """Reads news files and merges them.""" cats = {c: '' for c in NEWS_CATEGORIES} for news in NEWS: with open(news) as f: raw = f.read() raw = raw.strip() parts = NEWS_RE.split(raw) while len(parts) > 0 and parts[0] not in NEWS_CATEGORIES: parts = parts[1:] for key, val in zip(parts[::2], parts[1::2]): val = val.strip() if val == 'None': continue cats[key] += val + '\n' for news in NEWS: os.remove(news) s = '' for c in NEWS_CATEGORIES: val = cats[c] if len(val) == 0: continue s += '**' + c + ':**\n\n' + val + '\n\n' return s def version_update(ver): """Updates version strings in relevant files.""" for p, n, f in VERSION_UPDATE_PATTERNS: if callable(n): n = n(ver) replace_in_file(p, n, os.path.join(*f)) def just_do_git(ns): """Commits and updates tags. Creates github release and adds merged news as release notes""" git status git commit -am @("version bump to " + ns.ver) git push @(ns.upstream) @(ns.branch) git tag @(ns.ver) git push --tags @(ns.upstream) if github3 is not None: do_github_release(ns.ver, ns.ghuser, 'xonsh', 'xonsh') def pipify(): """Make and upload pip package.""" ./setup.py sdist upload def shatar(org, repo, target): """Returns the SHA-256 sum of the {ver}.tar.gz archive from github.""" oldpwd = $PWD cd /tmp url = "https://github.com/{0}/{1}/archive/{2}.tar.gz" url = url.format(org, repo, target) curl -L -O @(url) sha, _ = $(sha256sum @('{}.tar.gz'.format(target))).split() cd @(oldpwd) return sha def feedstock_repos(ghuser): """Returns the origin and upstream repo URLs for the feedstock.""" origin = 'git@github.com:{ghuser}/{feedstock}.git' origin = origin.format(ghuser=ghuser, feedstock=FEEDSTOCK_REPO) upstream = 'git@github.com:conda-forge/{feedstock}.git' upstream = upstream.format(feedstock=FEEDSTOCK_REPO) return origin, upstream def condaify(ver, ghuser): """Make and upload conda packages.""" origin, upstream = feedstock_repos(ghuser) if not os.path.isdir('feedstock'): git clone @(origin) feedstock # make sure master feedstock is up to date cd feedstock git checkout master git pull @(upstream) master # make and modify version branch with ${...}.swap(RAISE_SUBPROC_ERROR=False): git checkout -b @(ver) master or git checkout @(ver) cd recipe set_ver = '{% set version = "' + ver + '" %}' set_sha = TAR_SHA_SUBS.format(shatar(UPSTREAM_ORG, UPSTREAM_REPO, ver)) replace_in_file('{% set version = ".*" %}', set_ver, 'meta.yaml') replace_in_file(TAR_SHA_RE, set_sha, 'meta.yaml') cd .. with ${...}.swap(RAISE_SUBPROC_ERROR=False): git commit -am @("updated v" + ver) git push --set-upstream @(origin) @(ver) cd .. if github3 is not None: open_feedstock_pr(ver, ghuser) def create_ghuser_token(ghuser, credfile): """Acquires a github token, writes a credentials file, and returns the token. """ password = '' while not password: password = getpass('GitHub Password for {0}: '.format(ghuser)) note = 'github3.py release.xsh ' + PROJECT + ' ' + socket.gethostname() note_url = PROJECT_URL scopes = ['user', 'repo'] try: auth = github3.authorize(ghuser, password, scopes, note, note_url, two_factor_callback=two_factor) except github3.exceptions.UnprocessableEntity: msg = ('Could not create GitHub authentication token, probably because' 'it already exists. Try deleting the token titled:\n\n ') msg += note msg += ('\n\nfrom https://github.com/settings/tokens') raise RuntimeError(msg) with open(credfile, 'w') as f: f.write(str(auth.token) + '\n') f.write(str(auth.id)) return auth.token def two_factor(): """2 Factor Authentication callback function, called by `github3.authorize` as needed. """ code = '' while not code: code = input('Enter 2FA code: ') return code def read_ghuser_token(credfile): """Reads in a github user token from the credentials file.""" with open(credfile, 'r') as f: token = f.readline().strip() ghid = f.readline().strip() return token def ghlogin(ghuser): """Returns a github object that is logged in.""" credfile = ghuser + '.cred' if os.path.exists(credfile): token = read_ghuser_token(credfile) else: token = create_ghuser_token(ghuser, credfile) gh = github3.login(ghuser, token=token) return gh def do_github_release(ver, ghuser, org, repo): """Performs a github release""" login = ghlogin(ghuser) repo = login.repository(org, repo) news = read_changelog_recent() repo.create_release(ver, target_commitish='master', name=ver, body=news, draft=False, prerelease=False) def read_changelog_recent(): with open('CHANGELOG.rst', 'r') as f: line = '' while not line.startswith('v'): line = f.readline() news = '' while True: line = f.readline() if line.startswith('v'): break news += line return news def open_feedstock_pr(ver, ghuser): """Opens a feedstock PR.""" origin, upstream = feedstock_repos(ghuser) gh = ghlogin(ghuser) repo = gh.repository('conda-forge', FEEDSTOCK_REPO) print('Creating conda-forge feedstock pull request...') title = PROJECT + ' v' + ver head = ghuser + ':' + ver body = 'Merge only after success.' pr = repo.create_pull(title, 'master', head, body=body) if pr is None: print('!!!Failed to create pull request!!!') else: print('Pull request created at ' + pr.html_url) def docser(): """Create docs""" # FIXME this should be made more general ./setup.py install --user cd docs make clean html push-root cd .. DOERS = ('do_version_bump', 'do_git', 'do_pip', 'do_conda', 'do_docs') class OnlyAction(Action): def __init__(self, option_strings, dest, **kwargs): super().__init__(option_strings, dest, **kwargs) def __call__(self, parser, namespace, values, option_string=None): for doer in DOERS: if doer == self.dest: setattr(namespace, doer, True) else: setattr(namespace, doer, False) def main(args=None): default_upstream = 'git@github.com:{org}/{repo}.git' default_upstream = default_upstream.format(org=UPSTREAM_ORG, repo=UPSTREAM_REPO) # make parser parser = ArgumentParser('release') parser.add_argument('--upstream', default=default_upstream, help='upstream repo') parser.add_argument('-b', '--branch', default=BRANCH, help='branch to commit / push to.') parser.add_argument('--github-user', default=USER, dest='ghuser', help='GitHub username.') for doer in DOERS: base = doer[3:].replace('_', '-') wd = WILL_DO.get(doer, True) parser.add_argument('--do-' + base, dest=doer, default=wd, action='store_true', help='runs {}, default: {}'.format(base, wd)) parser.add_argument('--no-' + base, dest=doer, action='store_false', help='does not run ' + base) parser.add_argument('--only-' + base, dest=doer, action=OnlyAction, help='only runs ' + base, nargs=0) parser.add_argument('ver', help='target version string') ns = parser.parse_args(args or $ARGS[1:]) # enable debugging $RAISE_SUBPROC_ERROR = True #trace on # run commands if ns.do_version_bump: version_update(ns.ver) if ns.do_git: just_do_git(ns) if ns.do_pip: pipify() if ns.do_conda: condaify(ns.ver, ns.ghuser) if ns.do_docs: docser() # disable debugging #trace off if __name__ == '__main__': main() xonsh-0.6.0/requirements-docs.txt000066400000000000000000000000711320541242300170600ustar00rootroot00000000000000cloud_sptheme numpydoc==0.5 Sphinx==1.5.6 prompt_toolkit xonsh-0.6.0/requirements-tests.txt000066400000000000000000000002761320541242300173010ustar00rootroot00000000000000ply==3.8 py==1.4.31 pyflakes==1.2.3 pytest==2.9.2 pytest-flake8==0.5 pytest-cov==2.3.0 pytest-timeout==1.0.0 prompt-toolkit==1.0.3 pygments==2.1.3 codecov==2.0.5 flake8==2.6.2 coverage==4.2 xonsh-0.6.0/rever.xsh000066400000000000000000000024361320541242300145240ustar00rootroot00000000000000$PROJECT = $GITHUB_ORG = $GITHUB_REPO = 'xonsh' $WEBSITE_URL = 'http://xon.sh' $ACTIVITIES = ['version_bump', 'changelog', 'pytest', 'sphinx', #'tag', 'pypi', 'conda_forge', 'ghpages', 'ghrelease' ] $VERSION_BUMP_PATTERNS = [ ('.appveyor.yml', 'version:.*', 'version: $VERSION.{build}'), ('xonsh/__init__.py', '__version__\s*=.*', "__version__ = '$VERSION'"), ] $CHANGELOG_FILENAME = 'CHANGELOG.rst' $CHANGELOG_TEMPLATE = 'TEMPLATE.rst' #$PYTEST_COMMAND = "rm -r .cache/ __pycache__/ */__pycache__ */*/__pycache__ && pytest" $TAG_REMOTE = 'git@github.com:xonsh/xonsh.git' $TAG_TARGET = 'master' $GHPAGES_REPO = 'git@github.com:scopatz/xonsh-docs.git' $DOCKER_APT_DEPS = ['man'] with open('requirements-tests.txt') as f: conda_deps = f.read().split() with open('requirements-docs.txt') as f: conda_deps += f.read().split() conda_deps = {d.lower().split('=')[0] for d in set(conda_deps)} conda_deps.discard('prompt-toolkit') conda_deps |= {'prompt_toolkit', 'pip', 'psutil', 'numpy', 'matplotlib'} $DOCKER_CONDA_DEPS = sorted(conda_deps) $DOCKER_INSTALL_COMMAND = ('rm -rf .cache/ __pycache__/ */__pycache__ */*/__pycache__ build/ && ' './setup.py install') $DOCKER_GIT_NAME = 'xonsh' $DOCKER_GIT_EMAIL = 'xonsh@googlegroups.com' xonsh-0.6.0/scripts/000077500000000000000000000000001320541242300143375ustar00rootroot00000000000000xonsh-0.6.0/scripts/xon.sh000077500000000000000000000004051320541242300155010ustar00rootroot00000000000000#!/bin/sh # set locale if it is totally undefined if [ -z "${LC_ALL+x}" ] && [ -z "${LC_CTYPE+x}" ] && \ [ -z "${LANG+x}" ] && [ -z "${LANGUAGE+x}" ]; then export LANG=C.UTF-8 fi # run python exec /usr/bin/env PYTHONUNBUFFERED=1 python3 -u -m xonsh "$@" xonsh-0.6.0/scripts/xonsh000077500000000000000000000000751320541242300154260ustar00rootroot00000000000000#!/usr/bin/env python3 -u from xonsh.main import main main() xonsh-0.6.0/scripts/xonsh.bat000066400000000000000000000002711320541242300161660ustar00rootroot00000000000000@echo off call :s_which py.exe if not "%_path%" == "" ( py -3 -m xonsh %* ) else ( python -m xonsh %* ) goto :eof :s_which setlocal endlocal & set _path=%~$PATH:1 goto :eof xonsh-0.6.0/setup.cfg000066400000000000000000000015031320541242300144700ustar00rootroot00000000000000[pytest] flake8-max-line-length = 180 flake8-ignore = *.py E122 *.py E402 tests/tools.py E128 xonsh/ast.py F401 xonsh/built_ins.py F821 E721 xonsh/commands_cache.py F841 xonsh/history.py F821 xonsh/pyghooks.py F821 xonsh/style_tools.py F821 xonsh/readline_shell.py F401 xonsh/timings.py F401 xonsh/tokenize.py F821 F841 xonsh/tools.py E731 xonsh/xonfig.py E731 xonsh/proc.py E261 E265 xonsh/ptk/key_bindings.py F841 xonsh/ptk/shell.py E731 xontrib/vox.py F821 __amalgam__.py ALL # test files should be PEP8 but a ton of errors for now test_*.py ALL # we don't care about sphinx autogenerated files docs/*.py ALL # we don't care about ply files? ply/*.py ALL # these run VERY slowly and give tons of errors parser*_table.py ALL xonsh-0.6.0/setup.py000077500000000000000000000301671320541242300143740ustar00rootroot00000000000000#!/usr/bin/env python3 # -*- coding: ascii -*- """The xonsh installer.""" # Note: Do not embed any non-ASCII characters in this file until pip has been # fixed. See https://github.com/xonsh/xonsh/issues/487. from __future__ import print_function, unicode_literals import os import sys import json import subprocess try: from tempfile import TemporaryDirectory except ImportError: pass try: from setuptools import setup from setuptools.command.sdist import sdist from setuptools.command.install import install from setuptools.command.develop import develop from setuptools.command.install_scripts import install_scripts HAVE_SETUPTOOLS = True except ImportError: from distutils.core import setup from distutils.command.sdist import sdist as sdist from distutils.command.install import install as install from distutils.command.install_scripts import install_scripts HAVE_SETUPTOOLS = False try: from jupyter_client.kernelspec import KernelSpecManager HAVE_JUPYTER = True except ImportError: HAVE_JUPYTER = False TABLES = ['xonsh/lexer_table.py', 'xonsh/parser_table.py', 'xonsh/__amalgam__.py', 'xonsh/completers/__amalgam__.py', 'xonsh/history/__amalgam__.py', 'xonsh/prompt/__amalgam__.py'] def clean_tables(): """Remove the lexer/parser modules that are dynamically created.""" for f in TABLES: if os.path.isfile(f): os.remove(f) print('Removed ' + f) os.environ['XONSH_DEBUG'] = '1' from xonsh import __version__ as XONSH_VERSION def amalgamate_source(): """Amalgamates source files.""" sys.path.insert(0, os.path.dirname(__file__)) try: import amalgamate except ImportError: print('Could not import amalgamate, skipping.', file=sys.stderr) return amalgamate.main(['amalgamate', '--debug=XONSH_DEBUG', 'xonsh', 'xonsh.completers', 'xonsh.history', 'xonsh.prompt']) sys.path.pop(0) def build_tables(): """Build the lexer/parser modules.""" print('Building lexer and parser tables.') sys.path.insert(0, os.path.dirname(__file__)) from xonsh.parser import Parser Parser(lexer_table='lexer_table', yacc_table='parser_table', outputdir='xonsh', yacc_debug=True) sys.path.pop(0) def install_jupyter_hook(prefix=None, root=None): """Make xonsh available as a Jupyter kernel.""" if not HAVE_JUPYTER: print('Could not install Jupyter kernel spec, please install ' 'Jupyter/IPython.') return spec = {"argv": [sys.executable, "-m", "xonsh.jupyter_kernel", "-f", "{connection_file}"], "display_name": "Xonsh", "language": "xonsh", "codemirror_mode": "shell", } with TemporaryDirectory() as d: os.chmod(d, 0o755) # Starts off as 700, not user readable if sys.platform == 'win32': # Ensure that conda-build detects the hard coded prefix spec['argv'][0] = spec['argv'][0].replace(os.sep, os.altsep) with open(os.path.join(d, 'kernel.json'), 'w') as f: json.dump(spec, f, sort_keys=True) if 'CONDA_BUILD' in os.environ: prefix = sys.prefix if sys.platform == 'win32': prefix = prefix.replace(os.sep, os.altsep) user = ('--user' in sys.argv) print('Installing Jupyter kernel spec:') print(' root: {0!r}'.format(root)) print(' prefix: {0!r}'.format(prefix)) print(' as user: {0}'.format(user)) if root and prefix: # os.path.join isn't used since prefix is probably absolute prefix = root + prefix print(' combined prefix {0!r}'.format(prefix)) KernelSpecManager().install_kernel_spec( d, 'xonsh', user=user, replace=True, prefix=prefix) def dirty_version(): """ If install/sdist is run from a git directory (not a conda install), add a devN suffix to reported version number and write a gitignored file that holds the git hash of the current state of the repo to be queried by ``xonfig`` """ try: _version = subprocess.check_output(['git', 'describe', '--tags']) except Exception: print('failed to find git tags', file=sys.stderr) return False _version = _version.decode('ascii') try: _, N, sha = _version.strip().split('-') except ValueError: # tag name may contain "-" print('failed to parse git version', file=sys.stderr) return False sha = sha.strip('g') replace_version(N) _cmd = ['git', 'show', '-s', '--format=%cd', '--date=local', sha] try: _date = subprocess.check_output(_cmd) _date = _date.decode('ascii') # remove weekday name for a shorter string _date = ' '.join(_date.split()[1:]) except: _date = '' print('failed to get commit date', file=sys.stderr) with open('xonsh/dev.githash', 'w') as f: f.write('{}|{}'.format(sha, _date)) print('wrote git version: ' + sha, file=sys.stderr) return True ORIGINAL_VERSION_LINE = None def replace_version(N): """Replace version in `__init__.py` with devN suffix""" global ORIGINAL_VERSION_LINE with open('xonsh/__init__.py', 'r') as f: raw = f.read() lines = raw.splitlines() msg_assert = '__version__ must be the first line of the __init__.py' assert '__version__' in lines[0], msg_assert ORIGINAL_VERSION_LINE = lines[0] lines[0] = lines[0].rstrip(" '") + ".dev{}'".format(N) upd = '\n'.join(lines) + '\n' with open('xonsh/__init__.py', 'w') as f: f.write(upd) def restore_version(): """If we touch the version in __init__.py discard changes after install.""" if ORIGINAL_VERSION_LINE is None: return with open('xonsh/__init__.py', 'r') as f: raw = f.read() lines = raw.splitlines() lines[0] = ORIGINAL_VERSION_LINE upd = '\n'.join(lines) + '\n' with open('xonsh/__init__.py', 'w') as f: f.write(upd) class xinstall(install): """Xonsh specialization of setuptools install class.""" def run(self): clean_tables() build_tables() amalgamate_source() # add dirty version number dirty = dirty_version() # install Jupyter hook root = self.root if self.root else None prefix = self.prefix if self.prefix else None try: install_jupyter_hook(prefix=prefix, root=root) except Exception: import traceback traceback.print_exc() print('Installing Jupyter hook failed.') super().run() if dirty: restore_version() class xsdist(sdist): """Xonsh specialization of setuptools sdist class.""" def make_release_tree(self, basedir, files): clean_tables() build_tables() amalgamate_source() dirty = dirty_version() super().make_release_tree(basedir, files) if dirty: restore_version() # Hack to overcome pip/setuptools problem on Win 10. See: # https://github.com/tomduck/pandoc-eqnos/issues/6 # https://github.com/pypa/pip/issues/2783 # Custom install_scripts command class for setup() class install_scripts_quoted_shebang(install_scripts): """Ensure there are quotes around shebang paths with spaces.""" def write_script(self, script_name, contents, mode="t", *ignored): shebang = str(contents.splitlines()[0]) if shebang.startswith('#!') and ' ' in shebang[2:].strip() \ and '"' not in shebang: quoted_shebang = '#!"%s"' % shebang[2:].strip() contents = contents.replace(shebang, quoted_shebang) super().write_script(script_name, contents, mode, *ignored) class install_scripts_rewrite(install_scripts): """Change default python3 to the concrete python binary used to install/develop inside xon.sh script""" def run(self): super().run() if not self.dry_run: for file in self.get_outputs(): if file.endswith('xon.sh'): # this is the value distutils use on its shebang translation bs_cmd = self.get_finalized_command('build_scripts') exec_param = getattr(bs_cmd, 'executable', None) with open(file, 'r') as f: content = f.read() processed = content.replace(' python3 ', ' "{}" '.format(exec_param)) with open(file, 'w') as f: f.write(processed) # The custom install needs to be used on Windows machines if os.name == 'nt': cmdclass = {'install': xinstall, 'sdist': xsdist, 'install_scripts': install_scripts_quoted_shebang} else: cmdclass = {'install': xinstall, 'sdist': xsdist, 'install_scripts': install_scripts_rewrite} if HAVE_SETUPTOOLS: class xdevelop(develop): """Xonsh specialization of setuptools develop class.""" def run(self): clean_tables() build_tables() dirty = dirty_version() develop.run(self) if dirty: restore_version() def install_script(self, dist, script_name, script_text, dev_path=None): if script_name == 'xon.sh': # change default python3 to the concrete python binary used to install/develop inside xon.sh script script_text = script_text.replace(' python3 ', ' "{}" '.format(sys.executable)) super().install_script(dist, script_name, script_text, dev_path) def main(): """The main entry point.""" if sys.version_info[:2] < (3, 4): sys.exit('xonsh currently requires Python 3.4+') try: if '--name' not in sys.argv: logo_fname = os.path.join(os.path.dirname(__file__), 'logo.txt') with open(logo_fname, 'rb') as f: logo = f.read().decode('utf-8') print(logo) except UnicodeEncodeError: pass with open(os.path.join(os.path.dirname(__file__), 'README.rst'), 'r') as f: readme = f.read() scripts = ['scripts/xon.sh'] if sys.platform == 'win32': scripts.append('scripts/xonsh.bat') else: scripts.append('scripts/xonsh') skw = dict( name='xonsh', description='Python-powered, cross-platform, Unix-gazing shell', long_description=readme, license='BSD', version=XONSH_VERSION, author='Anthony Scopatz', maintainer='Anthony Scopatz', author_email='scopatz@gmail.com', url='https://github.com/xonsh/xonsh', platforms='Cross Platform', classifiers=['Programming Language :: Python :: 3'], packages=['xonsh', 'xonsh.ply.ply', 'xonsh.ptk', 'xonsh.parsers', 'xonsh.xoreutils', 'xontrib', 'xonsh.completers', 'xonsh.history', 'xonsh.prompt'], package_dir={'xonsh': 'xonsh', 'xontrib': 'xontrib'}, package_data={'xonsh': ['*.json', '*.githash'], 'xontrib': ['*.xsh']}, cmdclass=cmdclass, scripts=scripts, ) if HAVE_SETUPTOOLS: # WARNING!!! Do not use setuptools 'console_scripts' # It validates the dependencies (of which we have none) every time the # 'xonsh' command is run. This validation adds ~0.2 sec. to the startup # time of xonsh - for every single xonsh run. This prevents us from # reaching the goal of a startup time of < 0.1 sec. So never ever write # the following: # # 'console_scripts': ['xonsh = xonsh.main:main'], # # END WARNING skw['entry_points'] = { 'pygments.lexers': ['xonsh = xonsh.pyghooks:XonshLexer', 'xonshcon = xonsh.pyghooks:XonshConsoleLexer'], 'pytest11': ['xonsh = xonsh.pytest_plugin'] } skw['cmdclass']['develop'] = xdevelop skw['extras_require'] = { 'ptk': ['prompt-toolkit'], 'pygments': ['pygments'], 'win': ['win_unicode_console'], 'mac': ['gnureadline'], 'linux': ['distro'], 'proctitle': ['setproctitle'], } setup(**skw) if __name__ == '__main__': main() xonsh-0.6.0/tests/000077500000000000000000000000001320541242300140125ustar00rootroot00000000000000xonsh-0.6.0/tests/aliases/000077500000000000000000000000001320541242300154335ustar00rootroot00000000000000xonsh-0.6.0/tests/aliases/__init__.py000066400000000000000000000000001320541242300175320ustar00rootroot00000000000000xonsh-0.6.0/tests/aliases/test_source.py000066400000000000000000000021571320541242300203510ustar00rootroot00000000000000import os.path import pytest from contextlib import contextmanager from unittest.mock import MagicMock from xonsh.aliases import source_alias, builtins @pytest.fixture def mockopen(xonsh_builtins, monkeypatch): @contextmanager def mocked_open(fpath, *args, **kwargs): yield MagicMock(read=lambda: fpath) monkeypatch.setattr(builtins, 'open', mocked_open) def test_source_current_dir(mockopen, monkeypatch): checker = [] def mocked_execx(src, *args, **kwargs): checker.append(src.strip()) monkeypatch.setattr(builtins, 'execx', mocked_execx) monkeypatch.setattr(os.path, 'isfile', lambda x: True) source_alias(['foo', 'bar']) assert checker == ['foo', 'bar'] def test_source_path(mockopen, monkeypatch): checker = [] def mocked_execx(src, *args, **kwargs): checker.append(src.strip()) monkeypatch.setattr(builtins, 'execx', mocked_execx) source_alias(['foo', 'bar']) path_foo = os.path.join('tests', 'bin', 'foo') path_bar = os.path.join('tests', 'bin', 'bar') assert checker[0].endswith(path_foo) assert checker[1].endswith(path_bar) xonsh-0.6.0/tests/bashrc.sh000066400000000000000000000001441320541242300156070ustar00rootroot00000000000000export EMERALD="SWORD" alias ll='ls -a -lF' alias la='ls -A' export MIGHTY=WARRIOR alias l='ls -CF' xonsh-0.6.0/tests/batch.bat000066400000000000000000000000761320541242300155660ustar00rootroot00000000000000echo on set ENV_TO_BE_ADDED=Hallo world set ENV_TO_BE_REMOVED=xonsh-0.6.0/tests/bin/000077500000000000000000000000001320541242300145625ustar00rootroot00000000000000xonsh-0.6.0/tests/bin/cat000077500000000000000000000001571320541242300152620ustar00rootroot00000000000000#!/usr/bin/env python3 import sys with open(sys.argv[-1]) as f: for line in f: print(line, end='') xonsh-0.6.0/tests/bin/cat.bat000066400000000000000000000003501320541242300160170ustar00rootroot00000000000000@echo on call :s_which py.exe rem note that %~dp0 is dir of this batch script if not "%_path%" == "" ( py -3 %~dp0cat %* ) else ( python %~dp0cat %* ) goto :eof :s_which setlocal endlocal & set _path=%~$PATH:1 goto :eof xonsh-0.6.0/tests/bin/printfile.xsh000077500000000000000000000001011320541242300172750ustar00rootroot00000000000000#!/usr/bin/env xonsh import os print(os.path.basename(__file__)) xonsh-0.6.0/tests/bin/printname.xsh000077500000000000000000000000451320541242300173050ustar00rootroot00000000000000#!/usr/bin/env xonsh print(__name__) xonsh-0.6.0/tests/bin/pwd000077500000000000000000000000721320541242300153010ustar00rootroot00000000000000#!/usr/bin/env python3 import os x = os.getcwd() print(x) xonsh-0.6.0/tests/bin/pwd.bat000066400000000000000000000003471320541242300160500ustar00rootroot00000000000000@echo on call :s_which py.exe rem note that %~dp0 is dir of this batch script if not "%_path%" == "" ( py -3 %~dp0pwd %* ) else ( python %~dp0pwd %* ) goto :eof :s_which setlocal endlocal & set _path=%~$PATH:1 goto :eofxonsh-0.6.0/tests/bin/sourcefile.xsh000077500000000000000000000001711320541242300174500ustar00rootroot00000000000000#!/usr/bin/env xonsh import os x = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'printfile.xsh') source @(x) xonsh-0.6.0/tests/bin/wc000077500000000000000000000004221320541242300151170ustar00rootroot00000000000000#!/usr/bin/env python3 import sys if len(sys.argv) == 1: f = sys.stdin.buffer else: f = open(sys.argv[1], 'rb') doc = f.read() lines = len(doc.splitlines()) words = len(doc.split()) bytes = len(doc) print(' {0} {1:>2} {2} {3}'.format(lines, words, bytes, f.name))xonsh-0.6.0/tests/bin/wc.bat000066400000000000000000000003451320541242300156650ustar00rootroot00000000000000@echo on call :s_which py.exe rem note that %~dp0 is dir of this batch script if not "%_path%" == "" ( py -3 %~dp0wc %* ) else ( python %~dp0wc %* ) goto :eof :s_which setlocal endlocal & set _path=%~$PATH:1 goto :eofxonsh-0.6.0/tests/conftest.py000066400000000000000000000060071320541242300162140ustar00rootroot00000000000000import builtins import glob import os import pytest from xonsh.built_ins import ensure_list_of_strs, enter_macro from xonsh.execer import Execer from xonsh.jobs import tasks from xonsh.events import events from xonsh.platform import ON_WINDOWS from tools import DummyShell, sp, DummyCommandsCache, DummyEnv, DummyHistory @pytest.fixture def source_path(): """Get the xonsh source path.""" pwd = os.path.dirname(__file__) return os.path.dirname(pwd) @pytest.fixture def xonsh_execer(monkeypatch): """Initiate the Execer with a mocked nop `load_builtins`""" monkeypatch.setattr('xonsh.built_ins.load_builtins.__code__', (lambda *args, **kwargs: None).__code__) execer = Execer(unload=False) builtins.__xonsh_execer__ = execer return execer @pytest.yield_fixture def xonsh_events(): yield events for name, oldevent in vars(events).items(): # Heavily based on transmogrification species = oldevent.species newevent = events._mkevent(name, species, species.__doc__) setattr(events, name, newevent) @pytest.yield_fixture def xonsh_builtins(xonsh_events): """Mock out most of the builtins xonsh attributes.""" old_builtins = set(dir(builtins)) builtins.__xonsh_env__ = DummyEnv() if ON_WINDOWS: builtins.__xonsh_env__['PATHEXT'] = ['.EXE', '.BAT', '.CMD'] builtins.__xonsh_ctx__ = {} builtins.__xonsh_shell__ = DummyShell() builtins.__xonsh_help__ = lambda x: x builtins.__xonsh_glob__ = glob.glob builtins.__xonsh_exit__ = False builtins.__xonsh_superhelp__ = lambda x: x builtins.__xonsh_regexpath__ = lambda x: [] builtins.__xonsh_expand_path__ = lambda x: x builtins.__xonsh_subproc_captured__ = sp builtins.__xonsh_subproc_uncaptured__ = sp builtins.__xonsh_stdout_uncaptured__ = None builtins.__xonsh_stderr_uncaptured__ = None builtins.__xonsh_ensure_list_of_strs__ = ensure_list_of_strs builtins.__xonsh_commands_cache__ = DummyCommandsCache() builtins.__xonsh_all_jobs__ = {} builtins.__xonsh_history__ = DummyHistory() builtins.__xonsh_subproc_captured_hiddenobject__ = sp builtins.__xonsh_enter_macro__ = enter_macro builtins.evalx = eval builtins.execx = None builtins.compilex = None builtins.aliases = {} # Unlike all the other stuff, this has to refer to the "real" one because all modules that would # be firing events on the global instance. builtins.events = xonsh_events yield builtins for attr in set(dir(builtins)) - old_builtins: delattr(builtins, attr) tasks.clear() # must to this to enable resetting all_jobs if ON_WINDOWS: try: import win_unicode_console except ImportError: pass else: @pytest.fixture(autouse=True) def disable_win_unicode_console(monkeypatch): """ Disable win_unicode_console if it is present since it collides with pytests ouptput capture""" monkeypatch.setattr(win_unicode_console, 'enable', lambda: None) xonsh-0.6.0/tests/histories/000077500000000000000000000000001320541242300160235ustar00rootroot00000000000000xonsh-0.6.0/tests/histories/echo.json000066400000000000000000000160711320541242300176410ustar00rootroot00000000000000{"locs": [ 69, 3707, 3787, 3435], "index": {"offsets": {"__total__": 0, "cmds": [{"__total__": 10, "inp": 18, "out": 52, "rtn": 83, "ts": [93, 113, 92]}, {"__total__": 137, "inp": 145, "rtn": 161, "ts": [171, 191, 170]}, 9], "env": {"BASH_COMPLETIONS": 245, "CASE_SENSITIVE_COMPLETIONS": 342, "CLUTTER_IM_MODULE": 368, "COLORTERM": 388, "CROSS_SECTIONS": 424, "DATAPATH": 489, "DBUS_SESSION_BUS_ADDRESS": 548, "DEFAULTS_PATH": 641, "DESKTOP_SESSION": 707, "DISPLAY": 735, "EDITOR": 751, "GDMSESSION": 771, "GDM_LANG": 800, "GIO_LAUNCHED_DESKTOP_FILE": 838, "GIO_LAUNCHED_DESKTOP_FILE_PID": 914, "GNOME_DESKTOP_SESSION_ID": 950, "GTEST_COLOR": 976, "GTK_IM_MODULE": 1000, "GTK_MODULES": 1023, "HOME": 1052, "INDENT": 1079, "LANG": 1095, "LANGUAGE": 1122, "LC_COLLATE": 1145, "LC_CTYPE": 1162, "LC_MESSAGES": 1192, "LC_MONETARY": 1212, "LC_NUMERIC": 1231, "LC_TIME": 1247, "LD_LIBRARY_PATH": 1271, "LESS": 1308, "LESSOPEN": 1328, "LOGNAME": 1398, "LSCUTOFF": 1421, "MANDATORY_PATH": 1446, "MULTILINE_PROMPT": 1515, "ORBIT_SOCKETDIR": 1539, "PATH": 1569, "PROMPT": 1751, "PWD": 1859, "QT4_IM_MODULE": 1893, "QT_IM_MODULE": 1916, "QT_QPA_PLATFORMTHEME": 1948, "SESSION_MANAGER": 1982, "SHELL": 2064, "SHELL_TYPE": 2091, "SHLVL": 2112, "SSH_AGENT_PID": 2134, "SSH_AUTH_SOCK": 2159, "TERM": 2203, "TERMINATOR_UUID": 2231, "TITLE": 2289, "USER": 2333, "WINDOWID": 2356, "XAUTHORITY": 2382, "XDG_CONFIG_DIRS": 2430, "XDG_CURRENT_DESKTOP": 2492, "XDG_DATA_DIRS": 2526, "XDG_GREETER_DATA_DIR": 2625, "XDG_MENU_PREFIX": 2677, "XDG_RUNTIME_DIR": 2706, "XDG_SEAT": 2736, "XDG_SEAT_PATH": 2762, "XDG_SESSION_DESKTOP": 2826, "XDG_SESSION_ID": 2861, "XDG_SESSION_PATH": 2887, "XDG_SESSION_TYPE": 2951, "XDG_VTNR": 2970, "XMODIFIERS": 2989, "XONSHRC": 3012, "XONSH_CONFIG_DIR": 3058, "XONSH_DATA_DIR": 3107, "XONSH_HISTORY_FILE": 3165, "XONSH_HISTORY_SIZE": 3224, "XONSH_STORE_STDOUT": 3263, "XONSH_VERSION": 3284, "_": 3298, "__total__": 224}, "locked": 3326, "sessionid": 3346, "ts": [3393, 3413, 3392]}, "sizes": {"__total__": 3435, "cmds": [{"__total__": 125, "inp": 25, "out": 22, "rtn": 1, "ts": [18, 18, 41]}, {"__total__": 76, "inp": 7, "rtn": 1, "ts": [18, 18, 41]}, 206], "env": {"BASH_COMPLETIONS": 65, "CASE_SENSITIVE_COMPLETIONS": 3, "CLUTTER_IM_MODULE": 5, "COLORTERM": 16, "CROSS_SECTIONS": 51, "DATAPATH": 29, "DBUS_SESSION_BUS_ADDRESS": 74, "DEFAULTS_PATH": 45, "DESKTOP_SESSION": 15, "DISPLAY": 4, "EDITOR": 4, "GDMSESSION": 15, "GDM_LANG": 7, "GIO_LAUNCHED_DESKTOP_FILE": 41, "GIO_LAUNCHED_DESKTOP_FILE_PID": 6, "GNOME_DESKTOP_SESSION_ID": 9, "GTEST_COLOR": 5, "GTK_IM_MODULE": 6, "GTK_MODULES": 19, "HOME": 15, "INDENT": 6, "LANG": 13, "LANGUAGE": 7, "LC_COLLATE": 3, "LC_CTYPE": 13, "LC_MESSAGES": 3, "LC_MONETARY": 3, "LC_NUMERIC": 3, "LC_TIME": 3, "LD_LIBRARY_PATH": 27, "LESS": 6, "LESSOPEN": 57, "LOGNAME": 9, "LSCUTOFF": 5, "MANDATORY_PATH": 47, "MULTILINE_PROMPT": 3, "ORBIT_SOCKETDIR": 20, "PATH": 170, "PROMPT": 99, "PWD": 15, "QT4_IM_MODULE": 5, "QT_IM_MODULE": 6, "QT_QPA_PLATFORMTHEME": 13, "SESSION_MANAGER": 71, "SHELL": 11, "SHELL_TYPE": 10, "SHLVL": 3, "SSH_AGENT_PID": 6, "SSH_AUTH_SOCK": 34, "TERM": 7, "TERMINATOR_UUID": 47, "TITLE": 34, "USER": 9, "WINDOWID": 10, "XAUTHORITY": 27, "XDG_CONFIG_DIRS": 37, "XDG_CURRENT_DESKTOP": 15, "XDG_DATA_DIRS": 73, "XDG_GREETER_DATA_DIR": 31, "XDG_MENU_PREFIX": 8, "XDG_RUNTIME_DIR": 16, "XDG_SEAT": 7, "XDG_SEAT_PATH": 39, "XDG_SESSION_DESKTOP": 15, "XDG_SESSION_ID": 4, "XDG_SESSION_PATH": 42, "XDG_SESSION_TYPE": 5, "XDG_VTNR": 3, "XMODIFIERS": 10, "XONSHRC": 24, "XONSH_CONFIG_DIR": 29, "XONSH_DATA_DIR": 34, "XONSH_HISTORY_FILE": 35, "XONSH_HISTORY_SIZE": 15, "XONSH_STORE_STDOUT": 2, "XONSH_VERSION": 7, "_": 14, "__total__": 3090}, "locked": 5, "sessionid": 38, "ts": [18, 18, 41]}}, "data": {"cmds": [{"inp": "echo fan the bunnymen\n", "out": "fan the bunnymen\r\n", "rtn": 0, "ts": [1440360782.5314634, 1440360782.5452716] } , {"inp": "EOF\n", "rtn": 0, "ts": [1440360785.7070692, 1440360785.7086098] } ] , "env": {"BASH_COMPLETIONS": "/etc/bash_completion:/usr/share/bash-completion/completions/git", "CASE_SENSITIVE_COMPLETIONS": "1", "CLUTTER_IM_MODULE": "xim", "COLORTERM": "gnome-terminal", "CROSS_SECTIONS": "/home/scopatz/openmc/data/nndc/cross_sections.xml", "DATAPATH": "/usr/share/MCNPX/v260/Data/", "DBUS_SESSION_BUS_ADDRESS": "unix:abstract=/tmp/dbus-mN1JNeR4P5,guid=4222f88e296fedba6528ff3055d9d809", "DEFAULTS_PATH": "/usr/share/gconf/awesome-gnome.default.path", "DESKTOP_SESSION": "awesome-gnome", "DISPLAY": ":0", "EDITOR": "xo", "GDMSESSION": "awesome-gnome", "GDM_LANG": "en_US", "GIO_LAUNCHED_DESKTOP_FILE": "/usr/share/applications/awesome.desktop", "GIO_LAUNCHED_DESKTOP_FILE_PID": "1894", "GNOME_DESKTOP_SESSION_ID": "Default", "GTEST_COLOR": "yes", "GTK_IM_MODULE": "ibus", "GTK_MODULES": "overlay-scrollbar", "HOME": "/home/scopatz", "INDENT": " ", "LANG": "en_US.UTF-8", "LANGUAGE": "en_US", "LC_COLLATE": "C", "LC_CTYPE": "en_US.UTF-8", "LC_MESSAGES": "C", "LC_MONETARY": "C", "LC_NUMERIC": "C", "LC_TIME": "C", "LD_LIBRARY_PATH": "/home/scopatz/.local/lib:", "LESS": " -R ", "LESSOPEN": "| /usr/share/source-highlight/src-hilite-lesspipe.sh %s", "LOGNAME": "scopatz", "LSCUTOFF": "100", "MANDATORY_PATH": "/usr/share/gconf/awesome-gnome.mandatory.path", "MULTILINE_PROMPT": ".", "ORBIT_SOCKETDIR": "/tmp/orbit-scopatz", "PATH": "/home/scopatz/.local/bin:/home/scopatz/sandbox/bin:/home/scopatz/miniconda3/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games", "PROMPT": "{BOLD_GREEN}{user}@{hostname}{BOLD_BLUE} {cwd}{branch_color}{curr_branch} {BOLD_BLUE}${NO_COLOR} ", "PWD": "/home/scopatz", "QT4_IM_MODULE": "xim", "QT_IM_MODULE": "ibus", "QT_QPA_PLATFORMTHEME": "appmenu-qt5", "SESSION_MANAGER": "local/athenaie:@/tmp/.ICE-unix/1729,unix/athenaie:/tmp/.ICE-unix/1729", "SHELL": "/bin/bash", "SHELL_TYPE": "readline", "SHLVL": "2", "SSH_AGENT_PID": "1788", "SSH_AUTH_SOCK": "/tmp/ssh-zI4rxe7ejkJG/agent.1729", "TERM": "xterm", "TERMINATOR_UUID": "urn:uuid:648fe1b7-d380-439b-bb8a-d79850a650bc", "TITLE": "{user}@{hostname}: {cwd} | xonsh", "USER": "scopatz", "WINDOWID": "67108868", "XAUTHORITY": "/home/scopatz/.Xauthority", "XDG_CONFIG_DIRS": "/etc/xdg/xdg-awesome-gnome:/etc/xdg", "XDG_CURRENT_DESKTOP": "Awesome GNOME", "XDG_DATA_DIRS": "/usr/share/awesome-gnome:/usr/share/gnome:/usr/local/share/:/usr/share/", "XDG_GREETER_DATA_DIR": "/var/lib/lightdm-data/scopatz", "XDG_MENU_PREFIX": "gnome-", "XDG_RUNTIME_DIR": "/run/user/1000", "XDG_SEAT": "seat0", "XDG_SEAT_PATH": "/org/freedesktop/DisplayManager/Seat0", "XDG_SESSION_DESKTOP": "awesome-gnome", "XDG_SESSION_ID": "c3", "XDG_SESSION_PATH": "/org/freedesktop/DisplayManager/Session0", "XDG_SESSION_TYPE": "x11", "XDG_VTNR": "7", "XMODIFIERS": "@im=ibus", "XONSHRC": "/home/scopatz/.xonshrc", "XONSH_CONFIG_DIR": "/home/scopatz/.config/xonsh", "XONSH_DATA_DIR": "/home/scopatz/.local/share/xonsh", "XONSH_HISTORY_FILE": "/home/scopatz/.xonsh_history.json", "XONSH_HISTORY_SIZE": "8128 commands", "XONSH_STORE_STDOUT": "", "XONSH_VERSION": "0.1.6", "_": "/usr/bin/env"} , "locked": false, "sessionid": "67a24a96-b389-4aab-b2b6-6a0606d2ac65", "ts": [1440360773.6575115, 1440360785.7093797] } } xonsh-0.6.0/tests/histories/simple-python.json000066400000000000000000000167341320541242300215410ustar00rootroot00000000000000{"locs": [ 69, 3964, 4044, 3597], "index": {"offsets": {"__total__": 0, "cmds": [{"__total__": 10, "inp": 18, "out": 51, "rtn": 75, "ts": [85, 105, 84]}, {"__total__": 128, "inp": 136, "rtn": 169, "ts": [179, 199, 178]}, {"__total__": 223, "inp": 231, "rtn": 247, "ts": [257, 277, 256]}, {"__total__": 301, "inp": 309, "rtn": 325, "ts": [335, 355, 334]}, 9], "env": {"BASH_COMPLETIONS": 408, "CASE_SENSITIVE_COMPLETIONS": 505, "CLUTTER_IM_MODULE": 531, "COLORTERM": 551, "CROSS_SECTIONS": 587, "DATAPATH": 652, "DBUS_SESSION_BUS_ADDRESS": 711, "DEFAULTS_PATH": 804, "DESKTOP_SESSION": 870, "DISPLAY": 898, "EDITOR": 914, "GDMSESSION": 934, "GDM_LANG": 963, "GIO_LAUNCHED_DESKTOP_FILE": 1001, "GIO_LAUNCHED_DESKTOP_FILE_PID": 1077, "GNOME_DESKTOP_SESSION_ID": 1113, "GTEST_COLOR": 1139, "GTK_IM_MODULE": 1163, "GTK_MODULES": 1186, "HOME": 1215, "INDENT": 1242, "LANG": 1258, "LANGUAGE": 1285, "LC_COLLATE": 1308, "LC_CTYPE": 1325, "LC_MESSAGES": 1355, "LC_MONETARY": 1375, "LC_NUMERIC": 1394, "LC_TIME": 1410, "LD_LIBRARY_PATH": 1434, "LESS": 1471, "LESSOPEN": 1491, "LOGNAME": 1561, "LSCUTOFF": 1584, "MANDATORY_PATH": 1609, "MULTILINE_PROMPT": 1678, "ORBIT_SOCKETDIR": 1702, "PATH": 1732, "PROMPT": 1914, "PWD": 2022, "QT4_IM_MODULE": 2056, "QT_IM_MODULE": 2079, "QT_QPA_PLATFORMTHEME": 2111, "SESSION_MANAGER": 2145, "SHELL": 2227, "SHELL_TYPE": 2254, "SHLVL": 2275, "SSH_AGENT_PID": 2297, "SSH_AUTH_SOCK": 2322, "TERM": 2366, "TERMINATOR_UUID": 2394, "TITLE": 2452, "USER": 2496, "WINDOWID": 2519, "XAUTHORITY": 2545, "XDG_CONFIG_DIRS": 2593, "XDG_CURRENT_DESKTOP": 2655, "XDG_DATA_DIRS": 2689, "XDG_GREETER_DATA_DIR": 2788, "XDG_MENU_PREFIX": 2840, "XDG_RUNTIME_DIR": 2869, "XDG_SEAT": 2899, "XDG_SEAT_PATH": 2925, "XDG_SESSION_DESKTOP": 2989, "XDG_SESSION_ID": 3024, "XDG_SESSION_PATH": 3050, "XDG_SESSION_TYPE": 3114, "XDG_VTNR": 3133, "XMODIFIERS": 3152, "XONSHRC": 3175, "XONSH_CONFIG_DIR": 3221, "XONSH_DATA_DIR": 3270, "XONSH_HISTORY_FILE": 3328, "XONSH_HISTORY_SIZE": 3387, "XONSH_STORE_STDOUT": 3426, "XONSH_VERSION": 3447, "_": 3461, "__total__": 387}, "locked": 3489, "sessionid": 3509, "ts": [3556, 3576, 3555]}, "sizes": {"__total__": 3597, "cmds": [{"__total__": 116, "inp": 24, "out": 15, "rtn": 1, "ts": [18, 17, 40]}, {"__total__": 93, "inp": 24, "rtn": 1, "ts": [18, 18, 41]}, {"__total__": 76, "inp": 7, "rtn": 1, "ts": [18, 18, 41]}, {"__total__": 75, "inp": 7, "rtn": 1, "ts": [18, 17, 40]}, 369], "env": {"BASH_COMPLETIONS": 65, "CASE_SENSITIVE_COMPLETIONS": 3, "CLUTTER_IM_MODULE": 5, "COLORTERM": 16, "CROSS_SECTIONS": 51, "DATAPATH": 29, "DBUS_SESSION_BUS_ADDRESS": 74, "DEFAULTS_PATH": 45, "DESKTOP_SESSION": 15, "DISPLAY": 4, "EDITOR": 4, "GDMSESSION": 15, "GDM_LANG": 7, "GIO_LAUNCHED_DESKTOP_FILE": 41, "GIO_LAUNCHED_DESKTOP_FILE_PID": 6, "GNOME_DESKTOP_SESSION_ID": 9, "GTEST_COLOR": 5, "GTK_IM_MODULE": 6, "GTK_MODULES": 19, "HOME": 15, "INDENT": 6, "LANG": 13, "LANGUAGE": 7, "LC_COLLATE": 3, "LC_CTYPE": 13, "LC_MESSAGES": 3, "LC_MONETARY": 3, "LC_NUMERIC": 3, "LC_TIME": 3, "LD_LIBRARY_PATH": 27, "LESS": 6, "LESSOPEN": 57, "LOGNAME": 9, "LSCUTOFF": 5, "MANDATORY_PATH": 47, "MULTILINE_PROMPT": 3, "ORBIT_SOCKETDIR": 20, "PATH": 170, "PROMPT": 99, "PWD": 15, "QT4_IM_MODULE": 5, "QT_IM_MODULE": 6, "QT_QPA_PLATFORMTHEME": 13, "SESSION_MANAGER": 71, "SHELL": 11, "SHELL_TYPE": 10, "SHLVL": 3, "SSH_AGENT_PID": 6, "SSH_AUTH_SOCK": 34, "TERM": 7, "TERMINATOR_UUID": 47, "TITLE": 34, "USER": 9, "WINDOWID": 10, "XAUTHORITY": 27, "XDG_CONFIG_DIRS": 37, "XDG_CURRENT_DESKTOP": 15, "XDG_DATA_DIRS": 73, "XDG_GREETER_DATA_DIR": 31, "XDG_MENU_PREFIX": 8, "XDG_RUNTIME_DIR": 16, "XDG_SEAT": 7, "XDG_SEAT_PATH": 39, "XDG_SESSION_DESKTOP": 15, "XDG_SESSION_ID": 4, "XDG_SESSION_PATH": 42, "XDG_SESSION_TYPE": 5, "XDG_VTNR": 3, "XMODIFIERS": 10, "XONSHRC": 24, "XONSH_CONFIG_DIR": 29, "XONSH_DATA_DIR": 34, "XONSH_HISTORY_FILE": 35, "XONSH_HISTORY_SIZE": 15, "XONSH_STORE_STDOUT": 2, "XONSH_VERSION": 7, "_": 14, "__total__": 3090}, "locked": 5, "sessionid": 38, "ts": [18, 17, 40]}}, "data": {"cmds": [{"inp": "print('The Turtles')\n", "out": "The Turtles\n", "rtn": 0, "ts": [1440371854.0424275, 1440371854.042462] } , {"inp": "def f():\n pass\n\n", "rtn": 0, "ts": [1440371868.8591268, 1440371868.8591323] } , {"inp": "f()\n", "rtn": 0, "ts": [1440371873.6370964, 1440371873.6371114] } , {"inp": "EOF\n", "rtn": 0, "ts": [1440371879.4418402, 1440371879.442938] } ] , "env": {"BASH_COMPLETIONS": "/etc/bash_completion:/usr/share/bash-completion/completions/git", "CASE_SENSITIVE_COMPLETIONS": "1", "CLUTTER_IM_MODULE": "xim", "COLORTERM": "gnome-terminal", "CROSS_SECTIONS": "/home/scopatz/openmc/data/nndc/cross_sections.xml", "DATAPATH": "/usr/share/MCNPX/v260/Data/", "DBUS_SESSION_BUS_ADDRESS": "unix:abstract=/tmp/dbus-mN1JNeR4P5,guid=4222f88e296fedba6528ff3055d9d809", "DEFAULTS_PATH": "/usr/share/gconf/awesome-gnome.default.path", "DESKTOP_SESSION": "awesome-gnome", "DISPLAY": ":0", "EDITOR": "xo", "GDMSESSION": "awesome-gnome", "GDM_LANG": "en_US", "GIO_LAUNCHED_DESKTOP_FILE": "/usr/share/applications/awesome.desktop", "GIO_LAUNCHED_DESKTOP_FILE_PID": "1894", "GNOME_DESKTOP_SESSION_ID": "Default", "GTEST_COLOR": "yes", "GTK_IM_MODULE": "ibus", "GTK_MODULES": "overlay-scrollbar", "HOME": "/home/scopatz", "INDENT": " ", "LANG": "en_US.UTF-8", "LANGUAGE": "en_US", "LC_COLLATE": "C", "LC_CTYPE": "en_US.UTF-8", "LC_MESSAGES": "C", "LC_MONETARY": "C", "LC_NUMERIC": "C", "LC_TIME": "C", "LD_LIBRARY_PATH": "/home/scopatz/.local/lib:", "LESS": " -R ", "LESSOPEN": "| /usr/share/source-highlight/src-hilite-lesspipe.sh %s", "LOGNAME": "scopatz", "LSCUTOFF": "100", "MANDATORY_PATH": "/usr/share/gconf/awesome-gnome.mandatory.path", "MULTILINE_PROMPT": ".", "ORBIT_SOCKETDIR": "/tmp/orbit-scopatz", "PATH": "/home/scopatz/.local/bin:/home/scopatz/sandbox/bin:/home/scopatz/miniconda3/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games", "PROMPT": "{BOLD_GREEN}{user}@{hostname}{BOLD_BLUE} {cwd}{branch_color}{curr_branch} {BOLD_BLUE}${NO_COLOR} ", "PWD": "/home/scopatz", "QT4_IM_MODULE": "xim", "QT_IM_MODULE": "ibus", "QT_QPA_PLATFORMTHEME": "appmenu-qt5", "SESSION_MANAGER": "local/athenaie:@/tmp/.ICE-unix/1729,unix/athenaie:/tmp/.ICE-unix/1729", "SHELL": "/bin/bash", "SHELL_TYPE": "readline", "SHLVL": "2", "SSH_AGENT_PID": "1788", "SSH_AUTH_SOCK": "/tmp/ssh-zI4rxe7ejkJG/agent.1729", "TERM": "xterm", "TERMINATOR_UUID": "urn:uuid:efa23cb0-4eed-4de0-9dbc-c9b9f2f78263", "TITLE": "{user}@{hostname}: {cwd} | xonsh", "USER": "scopatz", "WINDOWID": "67108868", "XAUTHORITY": "/home/scopatz/.Xauthority", "XDG_CONFIG_DIRS": "/etc/xdg/xdg-awesome-gnome:/etc/xdg", "XDG_CURRENT_DESKTOP": "Awesome GNOME", "XDG_DATA_DIRS": "/usr/share/awesome-gnome:/usr/share/gnome:/usr/local/share/:/usr/share/", "XDG_GREETER_DATA_DIR": "/var/lib/lightdm-data/scopatz", "XDG_MENU_PREFIX": "gnome-", "XDG_RUNTIME_DIR": "/run/user/1000", "XDG_SEAT": "seat0", "XDG_SEAT_PATH": "/org/freedesktop/DisplayManager/Seat0", "XDG_SESSION_DESKTOP": "awesome-gnome", "XDG_SESSION_ID": "c3", "XDG_SESSION_PATH": "/org/freedesktop/DisplayManager/Session0", "XDG_SESSION_TYPE": "x11", "XDG_VTNR": "7", "XMODIFIERS": "@im=ibus", "XONSHRC": "/home/scopatz/.xonshrc", "XONSH_CONFIG_DIR": "/home/scopatz/.config/xonsh", "XONSH_DATA_DIR": "/home/scopatz/.local/share/xonsh", "XONSH_HISTORY_FILE": "/home/scopatz/.xonsh_history.json", "XONSH_HISTORY_SIZE": "8128 commands", "XONSH_STORE_STDOUT": "", "XONSH_VERSION": "0.1.6", "_": "/usr/bin/env"} , "locked": false, "sessionid": "c461668b-f650-4e17-8774-2b58384a1b9c", "ts": [1440371810.6295521, 1440371879.443467] } } xonsh-0.6.0/tests/run_pwd.xsh000066400000000000000000000000041320541242300162060ustar00rootroot00000000000000pwd xonsh-0.6.0/tests/sample.xsh000066400000000000000000000002101320541242300160100ustar00rootroot00000000000000# I am a test module. aliases['echo'] = lambda args, stdin=None: print(' '.join(args)) $WAKKA = "jawaka" x = $(echo "hello mom" $WAKKA)xonsh-0.6.0/tests/scripts/000077500000000000000000000000001320541242300155015ustar00rootroot00000000000000xonsh-0.6.0/tests/scripts/raise.xsh000066400000000000000000000000311320541242300173220ustar00rootroot00000000000000raise Exception('oh no') xonsh-0.6.0/tests/test_aliases.py000066400000000000000000000024121320541242300170430ustar00rootroot00000000000000# -*- coding: utf-8 -*- """Testing built_ins.Aliases""" from __future__ import unicode_literals, print_function import os import pytest import xonsh.built_ins as built_ins from xonsh.aliases import Aliases from xonsh.environ import Env from tools import skip_if_on_windows def cd(args, stdin=None): return args ALIASES = Aliases({'o': ['omg', 'lala']}, color_ls=['ls', '--color=true'], ls="ls '- -'", cd=cd, indirect_cd='cd ..') RAW = ALIASES._raw def test_imports(): expected = { 'o': ['omg', 'lala'], 'ls': ['ls', '- -'], 'color_ls': ['ls', '--color=true'], 'cd': cd, 'indirect_cd': ['cd', '..'] } assert RAW == expected def test_eval_normal(xonsh_builtins): assert ALIASES.get('o') == ['omg', 'lala'] def test_eval_self_reference(xonsh_builtins): assert ALIASES.get('ls') == ['ls', '- -'] def test_eval_recursive(xonsh_builtins): assert ALIASES.get('color_ls') == ['ls', '- -', '--color=true'] @skip_if_on_windows def test_eval_recursive_callable_partial(xonsh_builtins): xonsh_builtins.__xonsh_env__ = Env(HOME=os.path.expanduser('~')) assert ALIASES.get('indirect_cd')(['arg2', 'arg3']) == ['..', 'arg2', 'arg3'] xonsh-0.6.0/tests/test_ast.py000066400000000000000000000046771320541242300162300ustar00rootroot00000000000000"""Xonsh AST tests.""" import ast as pyast from xonsh import ast from xonsh.ast import Tuple, Name, Store, min_line, Call, BinOp, pdump import pytest from tools import check_parse, nodes_equal @pytest.fixture(autouse=True) def xonsh_execer_autouse(xonsh_execer): return xonsh_execer def test_gather_names_name(): node = Name(id='y', ctx=Store()) exp = {'y'} obs = ast.gather_names(node) assert exp == obs def test_gather_names_tuple(): node = Tuple(elts=[Name(id='y', ctx=Store()), Name(id='z', ctx=Store())]) exp = {'y', 'z'} obs = ast.gather_names(node) assert exp == obs def test_gather_load_store_names_tuple(): node = Tuple(elts=[Name(id='y', ctx=Store()), Name(id='z', ctx=Store())]) lexp = set() sexp = {'y', 'z'} lobs, sobs = ast.gather_load_store_names(node) assert lexp == lobs assert sexp == sobs @pytest.mark.parametrize('line1', [ # this second line wil be transformed into a subprocess call 'x = 1', # this second line wil be transformed into a subprocess call even though # ls is defined. 'ls = 1', # the second line wil be transformed still even though l exists. 'l = 1', ]) def test_multilline_num(xonsh_builtins, line1): code = line1 + '\nls -l\n' tree = check_parse(code) lsnode = tree.body[1] assert 2 == min_line(lsnode) assert isinstance(lsnode.value, Call) def test_multilline_no_transform(): # no subprocess transformations happen here since all variables are known code = 'ls = 1\nl = 1\nls -l\n' tree = check_parse(code) lsnode = tree.body[2] assert 3 == min_line(lsnode) assert isinstance(lsnode.value, BinOp) @pytest.mark.parametrize('inp', [ """def f(): if True: pass """, """def f(x): if x: pass """, """def f(*args): if not args: pass """, """def f(*, y): if y: pass """, """def f(**kwargs): if not kwargs: pass """, """def f(k=42): if not k: pass """, """def f(k=10, *, a, b=1, **kw): if not kw and b: pass """, """import os path = '/path/to/wakka' paths = [] for root, dirs, files in os.walk(path): paths.extend(os.path.join(root, d) for d in dirs) paths.extend(os.path.join(root, f) for f in files) """, ]) def test_unmodified(inp): # Context sensitive parsing should not modify AST exp = pyast.parse(inp) obs = check_parse(inp) assert nodes_equal(exp, obs) xonsh-0.6.0/tests/test_base_shell.py000066400000000000000000000025121320541242300175240ustar00rootroot00000000000000# -*- coding: utf-8 -*- """(A down payment on) Testing for ``xonsh.base_shell.BaseShell`` and associated classes""" import os from xonsh.environ import Env from xonsh.base_shell import BaseShell from xonsh.shell import transform_command def test_pwd_tracks_cwd(xonsh_builtins, xonsh_execer, tmpdir_factory, monkeypatch ): asubdir = str(tmpdir_factory.mktemp("asubdir")) cur_wd = os.getcwd() xonsh_builtins.__xonsh_env__ = Env(PWD=cur_wd, XONSH_CACHE_SCRIPTS=False, XONSH_CACHE_EVERYTHING=False) monkeypatch.setattr(xonsh_execer, "cacheall", False, raising=False) bc = BaseShell(xonsh_execer, None) assert os.getcwd() == cur_wd bc.default('os.chdir(r"' + asubdir + '")') assert os.path.abspath(os.getcwd()) == os.path.abspath(asubdir) assert os.path.abspath(os.getcwd()) == os.path.abspath(xonsh_builtins.__xonsh_env__['PWD']) assert 'OLDPWD' in xonsh_builtins.__xonsh_env__ assert os.path.abspath(cur_wd) == os.path.abspath(xonsh_builtins.__xonsh_env__['OLDPWD']) def test_transform(xonsh_builtins): @xonsh_builtins.events.on_transform_command def spam2egg(cmd, **_): if cmd == 'spam': return 'egg' else: return cmd assert transform_command('spam') == 'egg' assert transform_command('egg') == 'egg' assert transform_command('foo') == 'foo' xonsh-0.6.0/tests/test_bashisms.py000066400000000000000000000005551320541242300172410ustar00rootroot00000000000000"""Tests bashisms xontrib.""" import pytest @pytest.mark.parametrize('inp, exp', [ ('x = 42', 'x = 42'), ('!!', 'ls'), ]) def test_preproc(inp, exp, xonsh_builtins): """Test the bash preprocessor.""" from xontrib.bashisms import bash_preproc xonsh_builtins.__xonsh_history__.inps = ['ls\n'] obs = bash_preproc(inp) assert exp == obsxonsh-0.6.0/tests/test_builtins.py000066400000000000000000000163751320541242300172700ustar00rootroot00000000000000# -*- coding: utf-8 -*- """Tests the xonsh builtins.""" from __future__ import unicode_literals, print_function import os import re import builtins import types from ast import AST import pytest from xonsh import built_ins from xonsh.built_ins import reglob, pathsearch, helper, superhelper, \ ensure_list_of_strs, list_of_strs_or_callables, regexsearch, \ globsearch, expand_path, convert_macro_arg, in_macro_call, call_macro, \ enter_macro from xonsh.environ import Env from tools import skip_if_on_windows HOME_PATH = os.path.expanduser('~') @pytest.fixture(autouse=True) def xonsh_execer_autouse(xonsh_execer): return xonsh_execer @pytest.mark.parametrize('testfile', reglob('test_.*')) def test_reglob_tests(testfile): assert (testfile.startswith('test_')) @pytest.fixture def home_env(xonsh_builtins): """Set `__xonsh_env__ ` to a new Env instance on `xonsh_builtins`""" xonsh_builtins.__xonsh_env__ = Env(HOME=HOME_PATH) return xonsh_builtins @skip_if_on_windows def test_repath_backslash(home_env): exp = os.listdir(HOME_PATH) exp = {p for p in exp if re.match(r'\w\w.*', p)} exp = {os.path.join(HOME_PATH, p) for p in exp} obs = set(pathsearch(regexsearch, r'~/\w\w.*')) assert exp == obs @skip_if_on_windows def test_repath_HOME_PATH_itself(home_env): exp = HOME_PATH obs = pathsearch(regexsearch, '~') assert 1 == len(obs) assert exp == obs[0] @skip_if_on_windows def test_repath_HOME_PATH_contents(home_env): exp = os.listdir(HOME_PATH) exp = {os.path.join(HOME_PATH, p) for p in exp} obs = set(pathsearch(regexsearch, '~/.*')) assert exp == obs @skip_if_on_windows def test_repath_HOME_PATH_var(home_env): exp = HOME_PATH obs = pathsearch(regexsearch, '$HOME') assert 1 == len(obs) assert exp == obs[0] @skip_if_on_windows def test_repath_HOME_PATH_var_brace(home_env): exp = HOME_PATH obs = pathsearch(regexsearch, '${"HOME"}') assert 1 == len(obs) assert exp == obs[0] def test_helper_int(home_env): helper(int, 'int') def test_helper_helper(home_env): helper(helper, 'helper') def test_helper_env(home_env): helper(Env, 'Env') def test_superhelper_int(home_env): superhelper(int, 'int') def test_superhelper_helper(home_env): superhelper(helper, 'helper') def test_superhelper_env(home_env): superhelper(Env, 'Env') @pytest.mark.parametrize('exp, inp', [ (['yo'], 'yo'), (['yo'], ['yo']), (['42'], 42), (['42'], [42]) ]) def test_ensure_list_of_strs(exp, inp): obs = ensure_list_of_strs(inp) assert exp == obs f = lambda x: 20 @pytest.mark.parametrize('exp, inp', [ (['yo'], 'yo'), (['yo'], ['yo']), (['42'], 42), (['42'], [42]), ([f], f), ([f], [f]) ]) def test_list_of_strs_or_callables(exp, inp): obs = list_of_strs_or_callables(inp) assert exp == obs @pytest.mark.parametrize('s', [ '~', '~/', 'x=~/place', 'x=one:~/place', 'x=one:~/place:~/yo', 'x=~/one:~/place:~/yo', ]) def test_expand_path(s, home_env): if os.sep != '/': s = s.replace('/', os.sep) if os.pathsep != ':': s = s.replace(':', os.pathsep) assert expand_path(s) == s.replace('~', HOME_PATH) @pytest.mark.parametrize('kind', [str, 's', 'S', 'str', 'string']) def test_convert_macro_arg_str(kind): raw_arg = 'value' arg = convert_macro_arg(raw_arg, kind, None, None) assert arg is raw_arg @pytest.mark.parametrize('kind', [AST, 'a', 'Ast']) def test_convert_macro_arg_ast(kind): raw_arg = '42' arg = convert_macro_arg(raw_arg, kind, {}, None) assert isinstance(arg, AST) @pytest.mark.parametrize('kind', [types.CodeType, compile, 'c', 'code', 'compile']) def test_convert_macro_arg_code(kind): raw_arg = '42' arg = convert_macro_arg(raw_arg, kind, {}, None) assert isinstance(arg, types.CodeType) @pytest.mark.parametrize('kind', [eval, None, 'v', 'eval']) def test_convert_macro_arg_eval(kind): # literals raw_arg = '42' arg = convert_macro_arg(raw_arg, kind, {}, None) assert arg == 42 # exprs raw_arg = 'x + 41' arg = convert_macro_arg(raw_arg, kind, {}, {'x': 1}) assert arg == 42 @pytest.mark.parametrize('kind', [exec, 'x', 'exec']) def test_convert_macro_arg_exec(kind): # at global scope raw_arg = 'def f(x, y):\n return x + y' glbs = {} arg = convert_macro_arg(raw_arg, kind, glbs, None) assert arg is None assert 'f' in glbs assert glbs['f'](1, 41) == 42 # at local scope raw_arg = 'def g(z):\n return x + z\ny += 42' glbs = {'x': 40} locs = {'y': 1} arg = convert_macro_arg(raw_arg, kind, glbs, locs) assert arg is None assert 'g' in locs assert locs['g'](1) == 41 assert 'y' in locs assert locs['y'] == 43 @pytest.mark.parametrize('kind', [type, 't', 'type']) def test_convert_macro_arg_eval(kind): # literals raw_arg = '42' arg = convert_macro_arg(raw_arg, kind, {}, None) assert arg is int # exprs raw_arg = 'x + 41' arg = convert_macro_arg(raw_arg, kind, {}, {'x': 1}) assert arg is int def test_in_macro_call(): def f(): pass with in_macro_call(f, True, True): assert f.macro_globals assert f.macro_locals assert not hasattr(f, 'macro_globals') assert not hasattr(f, 'macro_locals') @pytest.mark.parametrize('arg', ['x', '42', 'x + y']) def test_call_macro_str(arg): def f(x : str): return x rtn = call_macro(f, [arg], None, None) assert rtn is arg @pytest.mark.parametrize('arg', ['x', '42', 'x + y']) def test_call_macro_ast(arg): def f(x : AST): return x rtn = call_macro(f, [arg], {}, None) assert isinstance(rtn, AST) @pytest.mark.parametrize('arg', ['x', '42', 'x + y']) def test_call_macro_code(arg): def f(x : compile): return x rtn = call_macro(f, [arg], {}, None) assert isinstance(rtn, types.CodeType) @pytest.mark.parametrize('arg', ['x', '42', 'x + y']) def test_call_macro_eval(arg): def f(x : eval): return x rtn = call_macro(f, [arg], {'x': 42, 'y': 0}, None) assert rtn == 42 @pytest.mark.parametrize('arg', ['if y:\n pass', 'if 42:\n pass', 'if x + y:\n pass']) def test_call_macro_exec(arg): def f(x : exec): return x rtn = call_macro(f, [arg], {'x': 42, 'y': 0}, None) assert rtn is None @pytest.mark.parametrize('arg', ['x', '42', 'x + y']) def test_call_macro_raw_arg(arg): def f(x : str): return x rtn = call_macro(f, ['*', arg], {'x': 42, 'y': 0}, None) assert rtn == 42 @pytest.mark.parametrize('arg', ['x', '42', 'x + y']) def test_call_macro_raw_kwarg(arg): def f(x : str): return x rtn = call_macro(f, ['*', 'x=' + arg], {'x': 42, 'y': 0}, None) assert rtn == 42 @pytest.mark.parametrize('arg', ['x', '42', 'x + y']) def test_call_macro_raw_kwargs(arg): def f(x : str): return x rtn = call_macro(f, ['*', '**{"x" :' + arg + '}'], {'x': 42, 'y': 0}, None) assert rtn == 42 def test_enter_macro(): obj = lambda: None rtn = enter_macro(obj, 'wakka', True, True) assert obj is rtn assert obj.macro_block == 'wakka' assert obj.macro_globals assert obj.macro_locals xonsh-0.6.0/tests/test_commands_cache.py000066400000000000000000000043401320541242300203500ustar00rootroot00000000000000import pytest import os from xonsh.commands_cache import (CommandsCache, predict_shell, SHELL_PREDICTOR_PARSER, predict_true, predict_false) from tools import skip_if_on_windows def test_commands_cache_lazy(xonsh_builtins): cc = CommandsCache() assert not cc.lazyin('xonsh') assert 0 == len(list(cc.lazyiter())) assert 0 == cc.lazylen() TRUE_SHELL_ARGS = [ ['-c', 'yo'], ['-c=yo'], ['file'], ['-i', '-l', 'file'], ['-i', '-c', 'yo'], ['-i', 'file'], ['-i', '-c', 'yo', 'file'], ] @pytest.mark.parametrize('args', TRUE_SHELL_ARGS) def test_predict_shell_parser(args): ns, unknown = SHELL_PREDICTOR_PARSER.parse_known_args(args) if ns.filename is not None: assert not ns.filename.startswith('-') @pytest.mark.parametrize('args', TRUE_SHELL_ARGS) def test_predict_shell_true(args): assert predict_shell(args) FALSE_SHELL_ARGS = [ [], ['-c'], ['-i'], ['-i', '-l'], ] @pytest.mark.parametrize('args', FALSE_SHELL_ARGS) def test_predict_shell_false(args): assert not predict_shell(args) PATTERN_BIN_USING_TTY_OR_NOT = [ (False, {10: b'isnotatty'}), (False, {12: b'isatty'}), (False, {151: b'gpm'}), (False, {10: b'isatty', 100: b'tcgetattr', }), (False, {10: b'isatty', 100: b'tcsetattr'}), (True, {10: b'isatty', 100: b'tcsetattr', 1000: b'tcgetattr'}), (True, {1000: b'libncurses'}), (True, {4094: b'libgpm'}), (True, {2045: b'tcgetattr', 4095: b'tcgetattr', 6140: b'tcsetattr', 8190: b'isatty'}), ] @pytest.mark.parametrize('args', PATTERN_BIN_USING_TTY_OR_NOT) @skip_if_on_windows def test_commands_cache_predictor_default(args): cc = CommandsCache() use_tty, patterns = args f = open('testfile', 'wb') where = list(patterns.keys()) where.sort() pos = 0 for w in where: f.write(b'\x20' * (w - pos)) f.write(patterns[w]) pos = w + len(patterns[w]) f.write(b'\x20' * (pos // 2)) f.close() result = cc.default_predictor_readbin('', os.getcwd() + os.sep + 'testfile', timeout=1, failure=None) expected = predict_false if use_tty else predict_true assert result == expected xonsh-0.6.0/tests/test_contexts.py000066400000000000000000000222001320541242300172660ustar00rootroot00000000000000"""Tests xonsh contexts.""" from textwrap import dedent from tools import check_exec from xonsh.contexts import Block, Functor import pytest @pytest.fixture(autouse=True) def xonsh_execer_autouse(xonsh_builtins, xonsh_execer): return xonsh_execer # # helpers # X1_WITH = ('x = 1\n' 'with! Block() as b:\n') SIMPLE_WITH = 'with! Block() as b:\n' FUNC_WITH = ('x = 1\n' 'def func():\n' ' y = 1\n' ' with! Block() as b:\n' '{body}' ' y += 1\n' ' return b\n' 'x += 1\n' 'rtn = func()\n' 'x += 1\n') FUNC_OBSG = {'x': 3} FUNC_OBSL = {'y': 1} def norm_body(body): if not isinstance(body, str): body = '\n'.join(body) body = dedent(body) body = body.splitlines() return body def block_checks_glb(name, glbs, body, obs=None): block = glbs[name] obs = obs or {} for k, v in obs.items(): assert v == glbs[k] body = norm_body(body) assert body == block.lines assert glbs is block.glbs assert block.locs is None def block_checks_func(name, glbs, body, obsg=None, obsl=None): block = glbs[name] obsg = obsg or {} for k, v in obsg.items(): assert v == glbs[k] body = norm_body(body) assert body == block.lines assert glbs is block.glbs # local context tests locs = block.locs assert locs is not None obsl = obsl or {} for k, v in obsl.items(): assert v == locs[k] # # Block tests # def test_block_noexec(): s = ('x = 1\n' 'with! Block():\n' ' x += 42\n') glbs = {'Block': Block} check_exec(s, glbs=glbs, locs=None) assert 1 == glbs['x'] def test_block_oneline(): body = ' x += 42\n' s = X1_WITH + body glbs = {'Block': Block} check_exec(s, glbs=glbs, locs=None) block_checks_glb('b', glbs, body, {'x': 1}) def test_block_manylines(): body = (' ![echo wow mom]\n' '# bad place for a comment\n' ' x += 42') s = X1_WITH + body glbs = {'Block': Block} check_exec(s, glbs=glbs, locs=None) block_checks_glb('b', glbs, body, {'x': 1}) def test_block_leading_comment(): # leading comments do not show up in block lines body = (' # I am a leading comment\n' ' x += 42\n') s = X1_WITH + body glbs = {'Block': Block} check_exec(s, glbs=glbs, locs=None) block_checks_glb('b', glbs, [' x += 42'], {'x': 1}) def test_block_trailing_comment(): # trailing comments show up in block lines body = (' x += 42\n' ' # I am a trailing comment\n') s = X1_WITH + body glbs = {'Block': Block} check_exec(s, glbs=glbs, locs=None) block_checks_glb('b', glbs, body, {'x': 1}) def test_block_trailing_line_continuation(): body = (' x += \\\n' ' 42\n') s = X1_WITH + body glbs = {'Block': Block} check_exec(s, glbs=glbs, locs=None) block_checks_glb('b', glbs, body, {'x': 1}) def test_block_trailing_close_paren(): body = (' x += int("42"\n' ' )\n') s = X1_WITH + body glbs = {'Block': Block} check_exec(s, glbs=glbs, locs=None) block_checks_glb('b', glbs, body, {'x': 1}) def test_block_trailing_close_many(): body = (' x = {None: [int("42"\n' ' )\n' ' ]\n' ' }\n') s = SIMPLE_WITH + body glbs = {'Block': Block} check_exec(s, glbs=glbs, locs=None) block_checks_glb('b', glbs, body) def test_block_trailing_triple_string(): body = (' x = """This\n' 'is\n' '"probably"\n' '\'not\' what I meant.\n' '"""\n') s = SIMPLE_WITH + body glbs = {'Block': Block} check_exec(s, glbs=glbs, locs=None) block_checks_glb('b', glbs, body) def test_block_func_oneline(): body = ' x += 42\n' s = FUNC_WITH.format(body=body) glbs = {'Block': Block} check_exec(s, glbs=glbs, locs=None) block_checks_func('rtn', glbs, body, FUNC_OBSG, FUNC_OBSL) def test_block_func_manylines(): body = (' ![echo wow mom]\n' '# bad place for a comment\n' ' x += 42\n') s = FUNC_WITH.format(body=body) glbs = {'Block': Block} check_exec(s, glbs=glbs, locs=None) block_checks_func('rtn', glbs, body, FUNC_OBSG, FUNC_OBSL) def test_block_func_leading_comment(): # leading comments do not show up in block lines body = (' # I am a leading comment\n' ' x += 42\n') s = FUNC_WITH.format(body=body) glbs = {'Block': Block} check_exec(s, glbs=glbs, locs=None) block_checks_func('rtn', glbs, ' x += 42\n', FUNC_OBSG, FUNC_OBSL) def test_block_func_trailing_comment(): # trailing comments show up in block lines body = (' x += 42\n' ' # I am a trailing comment\n') s = FUNC_WITH.format(body=body) glbs = {'Block': Block} check_exec(s, glbs=glbs, locs=None) block_checks_func('rtn', glbs, body, FUNC_OBSG, FUNC_OBSL) def test_blockfunc__trailing_line_continuation(): body = (' x += \\\n' ' 42\n') s = FUNC_WITH.format(body=body) glbs = {'Block': Block} check_exec(s, glbs=glbs, locs=None) block_checks_func('rtn', glbs, body, FUNC_OBSG, FUNC_OBSL) def test_block_func_trailing_close_paren(): body = (' x += int("42"\n' ' )\n') s = FUNC_WITH.format(body=body) glbs = {'Block': Block} check_exec(s, glbs=glbs, locs=None) block_checks_func('rtn', glbs, body, FUNC_OBSG, FUNC_OBSL) def test_block_func_trailing_close_many(): body = (' x = {None: [int("42"\n' ' )\n' ' ]\n' ' }\n') s = FUNC_WITH.format(body=body) glbs = {'Block': Block} check_exec(s, glbs=glbs, locs=None) block_checks_func('rtn', glbs, body, FUNC_OBSG, FUNC_OBSL) def test_block_func_trailing_triple_string(): body = (' x = """This\n' 'is\n' '"probably"\n' '\'not\' what I meant.\n' '"""\n') s = FUNC_WITH.format(body=body) glbs = {'Block': Block} check_exec(s, glbs=glbs, locs=None) block_checks_func('rtn', glbs, body, FUNC_OBSG, FUNC_OBSL) # # Functor tests # X2_WITH = ('{var} = 1\n' 'with! Functor() as f:\n' '{body}' '{var} += 1\n' '{calls}\n' ) def test_functor_oneline_onecall_class(): body = (' global y\n' ' y += 42\n') calls = 'f()' s = X2_WITH.format(body=body, calls=calls, var='y') glbs = {'Functor': Functor} check_exec(s, glbs=glbs, locs=None) block_checks_glb('f', glbs, body, {'y': 44}) def test_functor_oneline_onecall_func(): body = (' global z\n' ' z += 42\n') calls = 'f.func()' s = X2_WITH.format(body=body, calls=calls, var='z') glbs = {'Functor': Functor} check_exec(s, glbs=glbs, locs=None) block_checks_glb('f', glbs, body, {'z': 44}) def test_functor_oneline_onecall_both(): body = (' global x\n' ' x += 42\n') calls = 'f()\nf.func()' s = X2_WITH.format(body=body, calls=calls, var='x') glbs = {'Functor': Functor} check_exec(s, glbs=glbs, locs=None) block_checks_glb('f', glbs, body, {'x': 86}) XA_WITH = ('x = [1]\n' 'with! Functor() as f:\n' '{body}' 'x.append(2)\n' '{calls}\n' ) def test_functor_oneline_append(): body = ' x.append(3)\n' calls = 'f()\n' s = XA_WITH.format(body=body, calls=calls) glbs = {'Functor': Functor} check_exec(s, glbs=glbs, locs=None) block_checks_glb('f', glbs, body, {'x': [1, 2, 3]}) def test_functor_return(): body = ' x = 42' t = ('res = 0\n' 'with! Functor(rtn="x") as f:\n' '{body}\n' 'res = f()\n') s = t.format(body=body) glbs = {'Functor': Functor} check_exec(s, glbs=glbs, locs=None) block_checks_glb('f', glbs, body, {'res': 42}) def test_functor_args(): body = ' x = 42 + a' t = ('res = 0\n' 'with! Functor(args=("a",), rtn="x") as f:\n' '{body}\n' 'res = f(2)\n') s = t.format(body=body) glbs = {'Functor': Functor} check_exec(s, glbs=glbs, locs=None) block_checks_glb('f', glbs, body, {'res': 44}) def test_functor_kwargs(): body = ' x = 42 + a + b' t = ('res = 0\n' 'with! Functor(kwargs={{"a": 1, "b": 12}}, rtn="x") as f:\n' '{body}\n' 'res = f(b=6)\n') s = t.format(body=body) glbs = {'Functor': Functor} check_exec(s, glbs=glbs, locs=None) block_checks_glb('f', glbs, body, {'res': 49}) def test_functor_fullsig(): body = ' x = 42 + a + b + c' t = ('res = 0\n' 'with! Functor(args=("c",), kwargs={{"a": 1, "b": 12}}, rtn="x") as f:\n' '{body}\n' 'res = f(55)\n') s = t.format(body=body) glbs = {'Functor': Functor} check_exec(s, glbs=glbs, locs=None) block_checks_glb('f', glbs, body, {'res': 110}) xonsh-0.6.0/tests/test_dirstack.py000066400000000000000000000057421320541242300172370ustar00rootroot00000000000000# -*- coding: utf-8 -*- """Testing dirstack""" from __future__ import unicode_literals, print_function from contextlib import contextmanager from functools import wraps import os import builtins import pytest from xonsh import dirstack from xonsh.environ import Env from xonsh.built_ins import load_builtins HERE = os.path.abspath(os.path.dirname(__file__)) PARENT = os.path.dirname(HERE) @contextmanager def chdir(adir): old_dir = os.getcwd() os.chdir(adir) yield os.chdir(old_dir) def test_simple(xonsh_builtins): xonsh_builtins.__xonsh_env__ = Env(CDPATH=PARENT, PWD=PARENT) with chdir(PARENT): assert os.getcwd() != HERE dirstack.cd(["tests"]) assert os.getcwd() == HERE def test_cdpath_simple(xonsh_builtins): xonsh_builtins.__xonsh_env__ = Env(CDPATH=PARENT, PWD=HERE) with chdir(os.path.normpath("/")): assert os.getcwd() != HERE dirstack.cd(["tests"]) assert os.getcwd() == HERE def test_cdpath_collision(xonsh_builtins): xonsh_builtins.__xonsh_env__ = Env(CDPATH=PARENT, PWD=HERE) sub_tests = os.path.join(HERE, "tests") if not os.path.exists(sub_tests): os.mkdir(sub_tests) with chdir(HERE): assert os.getcwd() == HERE dirstack.cd(["tests"]) assert os.getcwd() == os.path.join(HERE, "tests") def test_cdpath_expansion(xonsh_builtins): xonsh_builtins.__xonsh_env__ = Env(HERE=HERE, CDPATH=("~", "$HERE")) test_dirs = ( os.path.join(HERE, "xonsh-test-cdpath-here"), os.path.expanduser("~/xonsh-test-cdpath-home") ) try: for d in test_dirs: if not os.path.exists(d): os.mkdir(d) assert os.path.exists(dirstack._try_cdpath(d)), "dirstack._try_cdpath: could not resolve {0}".format(d) finally: for d in test_dirs: if os.path.exists(d): os.rmdir(d) def test_cdpath_events(xonsh_builtins, tmpdir): xonsh_builtins.__xonsh_env__ = Env(CDPATH=PARENT, PWD=os.getcwd()) target = str(tmpdir) ev = None @xonsh_builtins.events.on_chdir def handler(olddir, newdir, **kw): nonlocal ev ev = olddir, newdir old_dir = os.getcwd() try: dirstack.cd([target]) except: raise else: assert (old_dir, target) == ev finally: # Use os.chdir() here so dirstack.cd() doesn't fire events (or fail again) os.chdir(old_dir) def test_cd_autopush(xonsh_builtins, tmpdir): xonsh_builtins.__xonsh_env__ = Env(CDPATH=PARENT, PWD=os.getcwd(), AUTO_PUSHD=True) target = str(tmpdir) old_dir = os.getcwd() old_ds_size = len(dirstack.DIRSTACK) assert target != old_dir try: dirstack.cd([target]) assert target == os.getcwd() assert old_ds_size + 1 == len(dirstack.DIRSTACK) dirstack.popd([]) except: raise finally: while len(dirstack.DIRSTACK) > old_ds_size: dirstack.popd([]) assert old_dir == os.getcwd() xonsh-0.6.0/tests/test_dirstack_unc.py000066400000000000000000000251261320541242300201020ustar00rootroot00000000000000# -*- coding: utf-8 -*- """Testing dirstack""" #from __future__ import unicode_literals, print_function from contextlib import contextmanager from functools import wraps import os import os.path import subprocess import builtins import pytest from xonsh import dirstack from xonsh.environ import Env from xonsh.built_ins import load_builtins from xonsh.dirstack import DIRSTACK from xonsh.platform import ON_WINDOWS from xonsh.dirstack import _unc_tempDrives HERE = os.path.abspath(os.path.dirname(__file__)) PARENT = os.path.dirname(HERE) def drive_in_use(letter): return ON_WINDOWS and os.system('vol {}: 2>nul>nul'.format(letter)) == 0 MAX_TEMP_DRIVES = 4 TEMP_DRIVE = [] for d in 'zyxwvuts': if not drive_in_use(d): TEMP_DRIVE.append(d + ':') pytestmark = pytest.mark.skipif(len(TEMP_DRIVE) < MAX_TEMP_DRIVES, reason='Too many drive letters are already used by Windows to run the tests.') @pytest.yield_fixture(scope="module") def shares_setup(tmpdir_factory): """create some shares to play with on current machine. Yield (to test case) array of structs: [uncPath, driveLetter, equivLocalPath] Side effect: `os.chdir(TEST_WORK_DIR)` """ if not ON_WINDOWS: return [] shares = [[r'uncpushd_test_HERE', TEMP_DRIVE[1], HERE] , [r'uncpushd_test_PARENT', TEMP_DRIVE[3], PARENT]] for s, d, l in shares: # set up some shares on local machine. dirs already exist test case must invoke wd_setup. rtn = subprocess.call(['NET', 'SHARE', s, '/delete'], universal_newlines=True) # clean up from previous run after good, long wait. if rtn != 0: yield None return rtn = subprocess.call(['NET', 'SHARE', s + '=' + l], universal_newlines=True) if rtn != 0: yield None return rtn = subprocess.call(['NET', 'USE', d, r"\\localhost" + '\\' + s], universal_newlines=True) if rtn != 0: yield None return yield [[r"\\localhost" + '\\' + s[0], s[1], s[2]] for s in shares] # we want to delete the test shares we've created, but can't do that if unc shares in DIRSTACK # (left over from assert fail aborted test) os.chdir(HERE) for dl in _unc_tempDrives: rtn = subprocess.call(['net', 'use', dl, '/delete'], universal_newlines=True) for s, d, l in shares: rtn = subprocess.call(['net', 'use', d, '/delete'], universal_newlines=True) # subprocess.call(['net', 'share', s, '/delete'], universal_newlines=True) # fails with access denied, # unless I wait > 10 sec. see http://stackoverflow.com/questions/38448413/access-denied-in-net-share-delete def test_pushdpopd(xonsh_builtins): """Simple non-UNC push/pop to verify we didn't break nonUNC case. """ xonsh_builtins.__xonsh_env__ = Env(CDPATH=PARENT, PWD=HERE) dirstack.cd([PARENT]) owd = os.getcwd() assert owd.casefold() == xonsh_builtins.__xonsh_env__['PWD'].casefold() dirstack.pushd([HERE]) wd = os.getcwd() assert wd.casefold() == HERE.casefold() dirstack.popd([]) assert owd.casefold() == os.getcwd().casefold(), "popd returned cwd to expected dir" def test_cd_dot(xonsh_builtins): xonsh_builtins.__xonsh_env__ = Env(PWD=os.getcwd()) owd = os.getcwd().casefold() dirstack.cd(['.']) assert owd == os.getcwd().casefold() @pytest.mark.skipif( not ON_WINDOWS, reason="Windows-only UNC functionality") def test_uncpushd_simple_push_pop(xonsh_builtins, shares_setup): if shares_setup is None: return xonsh_builtins.__xonsh_env__ = Env(CDPATH=PARENT, PWD=HERE) dirstack.cd([PARENT]) owd = os.getcwd() assert owd.casefold() == xonsh_builtins.__xonsh_env__['PWD'].casefold() dirstack.pushd([r'\\localhost\uncpushd_test_HERE']) wd = os.getcwd() assert os.path.splitdrive(wd)[0].casefold() == TEMP_DRIVE[0] assert os.path.splitdrive(wd)[1].casefold() == '\\' dirstack.popd([]) assert owd.casefold() == os.getcwd().casefold(), "popd returned cwd to expected dir" assert len(_unc_tempDrives) == 0 @pytest.mark.skipif(not ON_WINDOWS, reason="Windows-only UNC functionality") def test_uncpushd_push_to_same_share(xonsh_builtins, shares_setup): if shares_setup is None: return xonsh_builtins.__xonsh_env__ = Env(CDPATH=PARENT, PWD=HERE) dirstack.cd([PARENT]) owd = os.getcwd() assert owd.casefold() == xonsh_builtins.__xonsh_env__['PWD'].casefold() dirstack.pushd([r'\\localhost\uncpushd_test_HERE']) wd = os.getcwd() assert os.path.splitdrive(wd)[0].casefold() == TEMP_DRIVE[0] assert os.path.splitdrive(wd)[1].casefold() == '\\' assert len(_unc_tempDrives) == 1 assert len(DIRSTACK) == 1 dirstack.pushd([r'\\localhost\uncpushd_test_HERE']) wd = os.getcwd() assert os.path.splitdrive(wd)[0].casefold() == TEMP_DRIVE[0] assert os.path.splitdrive(wd)[1].casefold() == '\\' assert len(_unc_tempDrives) == 1 assert len(DIRSTACK) == 2 dirstack.popd([]) assert os.path.isdir(TEMP_DRIVE[0] + '\\'), "Temp drive not unmapped till last reference removed" dirstack.popd([]) assert owd.casefold() == os.getcwd().casefold(), "popd returned cwd to expected dir" assert len(_unc_tempDrives) == 0 @pytest.mark.skipif( not ON_WINDOWS, reason="Windows-only UNC functionality") def test_uncpushd_push_other_push_same(xonsh_builtins, shares_setup): """push to a, then to b. verify drive letter is TEMP_DRIVE[2], skipping already used TEMP_DRIVE[1] Then push to a again. Pop (check b unmapped and a still mapped), pop, pop (check a is unmapped)""" if shares_setup is None: return xonsh_builtins.__xonsh_env__ = Env(CDPATH=PARENT, PWD=HERE) dirstack.cd([PARENT]) owd = os.getcwd() assert owd.casefold() == xonsh_builtins.__xonsh_env__['PWD'].casefold() dirstack.pushd([r'\\localhost\uncpushd_test_HERE']) assert os.getcwd().casefold() == TEMP_DRIVE[0] + '\\' assert len(_unc_tempDrives) == 1 assert len(DIRSTACK) == 1 dirstack.pushd([r'\\localhost\uncpushd_test_PARENT']) wd = os.getcwd() assert os.getcwd().casefold() == TEMP_DRIVE[2] + '\\' assert len(_unc_tempDrives) == 2 assert len(DIRSTACK) == 2 dirstack.pushd([r'\\localhost\uncpushd_test_HERE']) assert os.getcwd().casefold() == TEMP_DRIVE[0] + '\\' assert len(_unc_tempDrives) == 2 assert len(DIRSTACK) == 3 dirstack.popd([]) assert os.getcwd().casefold() == TEMP_DRIVE[2] + '\\' assert len(_unc_tempDrives) == 2 assert len(DIRSTACK) == 2 assert os.path.isdir(TEMP_DRIVE[2] + '\\') assert os.path.isdir(TEMP_DRIVE[0] + '\\') dirstack.popd([]) assert os.getcwd().casefold() == TEMP_DRIVE[0] + '\\' assert len(_unc_tempDrives) == 1 assert len(DIRSTACK) == 1 assert not os.path.isdir(TEMP_DRIVE[2] + '\\') assert os.path.isdir(TEMP_DRIVE[0] + '\\') dirstack.popd([]) assert os.getcwd().casefold() == owd.casefold() assert len(_unc_tempDrives) == 0 assert len(DIRSTACK) == 0 assert not os.path.isdir(TEMP_DRIVE[2] + '\\') assert not os.path.isdir(TEMP_DRIVE[0] + '\\') @pytest.mark.skipif(not ON_WINDOWS, reason="Windows-only UNC functionality") def test_uncpushd_push_base_push_rempath(xonsh_builtins): """push to subdir under share, verify mapped path includes subdir""" pass #really? Need to cut-and-paste 2 flavors of this? yield_fixture requires yield in defined function body, not callee @pytest.yield_fixture() def with_unc_check_enabled(): if not ON_WINDOWS: return import winreg old_wval = 0 key = winreg.OpenKey(winreg.HKEY_CURRENT_USER, r'software\microsoft\command processor', access=winreg.KEY_WRITE) try: wval, wtype = winreg.QueryValueEx(key, 'DisableUNCCheck') old_wval = wval # if values was defined at all except OSError as e: pass winreg.SetValueEx(key, 'DisableUNCCheck', None, winreg.REG_DWORD, 0) winreg.CloseKey(key) yield old_wval key = winreg.OpenKey(winreg.HKEY_CURRENT_USER, r'software\microsoft\command processor', access=winreg.KEY_WRITE) winreg.SetValueEx(key, 'DisableUNCCheck', None, winreg.REG_DWORD, old_wval) winreg.CloseKey(key) @pytest.yield_fixture() def with_unc_check_disabled(): # just like the above, but value is 1 to *disable* unc check if not ON_WINDOWS: return import winreg old_wval = 0 key = winreg.OpenKey(winreg.HKEY_CURRENT_USER, r'software\microsoft\command processor', access=winreg.KEY_WRITE) try: wval, wtype = winreg.QueryValueEx(key, 'DisableUNCCheck') old_wval = wval # if values was defined at all except OSError as e: pass winreg.SetValueEx(key, 'DisableUNCCheck', None, winreg.REG_DWORD, 1) winreg.CloseKey(key) yield old_wval key = winreg.OpenKey(winreg.HKEY_CURRENT_USER, r'software\microsoft\command processor', access=winreg.KEY_WRITE) winreg.SetValueEx(key, 'DisableUNCCheck', None, winreg.REG_DWORD, old_wval) winreg.CloseKey(key) @pytest.fixture() def xonsh_builtins_cd(xonsh_builtins): xonsh_builtins.__xonsh_env__['CDPATH'] = PARENT xonsh_builtins.__xonsh_env__['PWD'] = os.getcwd() xonsh_builtins.__xonsh_env__['DIRSTACK_SIZE'] = 20 return xonsh_builtins @pytest.mark.skipif(not ON_WINDOWS, reason="Windows-only UNC functionality") def test_uncpushd_cd_unc_auto_pushd(xonsh_builtins_cd, with_unc_check_enabled): xonsh_builtins_cd.__xonsh_env__['AUTO_PUSHD'] = True so, se, rc = dirstack.cd([r'\\localhost\uncpushd_test_PARENT']) if rc != 0: return assert os.getcwd().casefold() == TEMP_DRIVE[0] + '\\' assert len(DIRSTACK) == 1 assert os.path.isdir(TEMP_DRIVE[0] + '\\') @pytest.mark.skipif(not ON_WINDOWS, reason="Windows-only UNC functionality") def test_uncpushd_cd_unc_nocheck(xonsh_builtins_cd, with_unc_check_disabled): if with_unc_check_disabled == 0: return dirstack.cd([r'\\localhost\uncpushd_test_HERE']) assert os.getcwd().casefold() == r'\\localhost\uncpushd_test_here' @pytest.mark.skipif(not ON_WINDOWS, reason="Windows-only UNC functionality") def test_uncpushd_cd_unc_no_auto_pushd(xonsh_builtins_cd, with_unc_check_enabled): if with_unc_check_enabled == 0: return so, se, rc = dirstack.cd([r'\\localhost\uncpushd_test_PARENT']) assert rc != 0 assert so is None or len(so) == 0 assert 'disableunccheck' in se.casefold() and 'auto_pushd' in se.casefold() @pytest.mark.skipif(not ON_WINDOWS, reason="Windows-only UNC functionality") def test_uncpushd_unc_check(): # emminently suited to mocking, but I don't know how # need to verify unc_check_enabled correct whether values set in HKCU or HKLM pass xonsh-0.6.0/tests/test_environ.py000066400000000000000000000155101320541242300171050ustar00rootroot00000000000000# -*- coding: utf-8 -*- """Tests the xonsh environment.""" from __future__ import unicode_literals, print_function import os import tempfile import builtins import itertools from tempfile import TemporaryDirectory from xonsh.tools import ON_WINDOWS import pytest from xonsh.commands_cache import CommandsCache from xonsh.environ import Env, load_static_config, locate_binary, DEFAULT_ENSURERS, DEFAULT_VALUES from tools import skip_if_on_unix def test_env_normal(): env = Env(VAR='wakka') assert 'wakka' == env['VAR'] def test_env_contains(): env = Env(VAR='wakka') assert 'VAR' in env @pytest.mark.parametrize('path', [['/home/wakka'], ['wakka']]) def test_env_path_list(path): env = Env(MYPATH=path) assert path == env['MYPATH'].paths @pytest.mark.parametrize('path', [ ['/home/wakka' + os.pathsep + '/home/jawaka'], ['wakka' + os.pathsep + 'jawaka'] ]) def test_env_path_str(path): env = Env(MYPATH=path) assert path == env['MYPATH'].paths def test_env_detype(): env = Env(MYPATH=['wakka', 'jawaka']) assert 'wakka' + os.pathsep + 'jawaka' == env.detype()['MYPATH'] @pytest.mark.parametrize('path1, path2',[ (['/home/wakka', '/home/jawaka'], '/home/woah'), (['wakka', 'jawaka'], 'woah') ]) def test_env_detype_mutable_access_clear(path1, path2): env = Env(MYPATH=path1) assert path1[0] + os.pathsep + path1[1] == env.detype()['MYPATH'] env['MYPATH'][0] = path2 assert env._detyped is None assert path2 + os.pathsep + path1[1] == env.detype()['MYPATH'] def test_env_detype_no_dict(): env = Env(YO={'hey': 42}) det = env.detype() assert 'YO' not in det def test_histcontrol_none(): env = Env(HISTCONTROL=None) assert isinstance(env['HISTCONTROL'], set) assert len(env['HISTCONTROL']) == 0 def test_HISTCONTROL_empty(): env = Env(HISTCONTROL='') assert isinstance(env['HISTCONTROL'], set) assert len(env['HISTCONTROL']) == 0 def test_histcontrol_ignoredups(): env = Env(HISTCONTROL='ignoredups') assert isinstance(env['HISTCONTROL'], set) assert len(env['HISTCONTROL']) == 1 assert ('ignoredups' in env['HISTCONTROL']) assert ('ignoreerr' not in env['HISTCONTROL']) def test_histcontrol_ignoreerr_ignoredups(): env = Env(HISTCONTROL='ignoreerr,ignoredups,ignoreerr') assert len(env['HISTCONTROL']) == 2 assert ('ignoreerr' in env['HISTCONTROL']) assert ('ignoredups' in env['HISTCONTROL']) def test_swap(): env = Env(VAR='wakka') assert env['VAR'] == 'wakka' # positional arg with env.swap({'VAR': 'foo'}): assert env['VAR'] == 'foo' # make sure the environment goes back outside the context manager assert env['VAR'] == 'wakka' # kwargs only with env.swap(VAR1='foo', VAR2='bar'): assert env['VAR1'] == 'foo' assert env['VAR2'] == 'bar' # positional and kwargs with env.swap({'VAR3': 'baz'}, VAR1='foo', VAR2='bar'): assert env['VAR1'] == 'foo' assert env['VAR2'] == 'bar' assert env['VAR3'] == 'baz' # make sure the environment goes back outside the context manager assert env['VAR'] == 'wakka' assert 'VAR1' not in env assert 'VAR2' not in env assert 'VAR3' not in env @pytest.mark.parametrize('s, exp, loaded',[ (b'{"best": "awash"}', {'best': 'awash'}, True), # works (b'["best", "awash"]', {}, False), # fail (b'{"best": "awash"', {}, False) # json fail ]) def test_load_static_config(s, exp, loaded, tmpdir, xonsh_builtins): env = Env({'XONSH_SHOW_TRACEBACK': False}) xonsh_builtins.__xonsh_env__ = env f = tmpdir.join('test_static_config') f.write(s) conf = load_static_config(env, str(f)) assert exp == conf assert env['LOADED_CONFIG'] == loaded @skip_if_on_unix def test_locate_binary_on_windows(xonsh_builtins): files = ('file1.exe', 'FILE2.BAT', 'file3.txt') with TemporaryDirectory() as tmpdir: for fname in files: fpath = os.path.join(tmpdir, fname) with open(fpath, 'w') as f: f.write(fpath) xonsh_builtins.__xonsh_env__.update({ 'PATH': [tmpdir], 'PATHEXT': ['.COM', '.EXE', '.BAT'], }) xonsh_builtins.__xonsh_commands_cache__ = CommandsCache() assert locate_binary('file1') == os.path.join(tmpdir, 'file1.exe') assert locate_binary('file1.exe') == os.path.join(tmpdir, 'file1.exe') assert locate_binary('file2') == os.path.join(tmpdir, 'FILE2.BAT') assert locate_binary('file2.bat') == os.path.join(tmpdir, 'FILE2.BAT') assert locate_binary('file3') is None def test_event_on_envvar_change(xonsh_builtins): env = Env(TEST=0) xonsh_builtins.__xonsh_env__ = env share = [] # register @xonsh_builtins.events.on_envvar_change def handler(name, oldvalue, newvalue, **kwargs): share.extend((name, oldvalue, newvalue)) # trigger env['TEST'] = 1 assert share == ['TEST', 0, 1] def test_event_on_envvar_new(xonsh_builtins): env = Env() xonsh_builtins.__xonsh_env__ = env share = [] # register @xonsh_builtins.events.on_envvar_new def handler(name, value, **kwargs): share.extend((name, value)) # trigger env['TEST'] = 1 assert share == ['TEST', 1] def test_event_on_envvar_change_from_none_value(xonsh_builtins): env = Env(TEST=None) xonsh_builtins.__xonsh_env__ = env share = [] # register @xonsh_builtins.events.on_envvar_change def handler(name, oldvalue, newvalue, **kwargs): share.extend((name, oldvalue, newvalue)) # trigger env['TEST'] = 1 assert share == ['TEST', None, 1] @pytest.mark.parametrize('val', [1, None, True, 'ok']) def test_event_on_envvar_change_no_fire_when_value_is_same(val, xonsh_builtins): env = Env(TEST=val) xonsh_builtins.__xonsh_env__ = env share = [] # register @xonsh_builtins.events.on_envvar_change def handler(name, oldvalue, newvalue, **kwargs): share.extend((name, oldvalue, newvalue)) # trigger env['TEST'] = val assert share == [] def test_events_on_envvar_called_in_right_order(xonsh_builtins): env = Env() xonsh_builtins.__xonsh_env__ = env share = [] # register @xonsh_builtins.events.on_envvar_new def handler(name, value, **kwargs): share[:] = ['new'] @xonsh_builtins.events.on_envvar_change def handler(name, oldvalue, newvalue, **kwargs): share[:] = ['change'] # trigger new env['TEST'] = 1 assert share == ['new'] # trigger change env['TEST'] = 2 assert share == ['change'] def test_int_bool_envvars_have_ensurers(): bool_ints = [type(envvar) in [bool, int] for envvar in DEFAULT_VALUES.values()] key_mask = set(itertools.compress(DEFAULT_VALUES.keys(), bool_ints)) ensurer_keys = set(DEFAULT_ENSURERS.keys()) assert len(key_mask.intersection(ensurer_keys)) == len(key_mask) xonsh-0.6.0/tests/test_events.py000066400000000000000000000061141320541242300167310ustar00rootroot00000000000000"""Event tests""" import inspect import pytest from xonsh.events import EventManager, Event, LoadEvent @pytest.fixture def events(): return EventManager() def test_event_calling(events): called = False @events.on_test def _(spam, **_): nonlocal called called = spam events.on_test.fire(spam="eggs") assert called == "eggs" def test_event_returns(events): called = 0 @events.on_test def on_test(**_): nonlocal called called += 1 return 1 @events.on_test def second(**_): nonlocal called called += 1 return 2 vals = events.on_test.fire() assert called == 2 assert set(vals) == {1, 2} def test_validator(events): called = None @events.on_test def first(n, **_): nonlocal called called += 1 return False @first.validator def v(n): return n == 'spam' @events.on_test def second(n, **_): nonlocal called called += 1 return False called = 0 events.on_test.fire(n='egg') assert called == 1 called = 0 events.on_test.fire(n='spam') assert called == 2 def test_eventdoc(events): docstring = "Test event" events.doc('on_test', docstring) assert inspect.getdoc(events.on_test) == docstring def test_transmogrify(events): docstring = "Test event" events.doc('on_test', docstring) @events.on_test def func(**_): pass assert isinstance(events.on_test, Event) assert len(events.on_test) == 1 assert inspect.getdoc(events.on_test) == docstring events.transmogrify('on_test', LoadEvent) assert isinstance(events.on_test, LoadEvent) assert len(events.on_test) == 1 assert inspect.getdoc(events.on_test) == docstring def test_transmogrify_by_string(events): docstring = "Test event" events.doc('on_test', docstring) @events.on_test def func(**_): pass assert isinstance(events.on_test, Event) assert len(events.on_test) == 1 assert inspect.getdoc(events.on_test) == docstring events.transmogrify('on_test', 'LoadEvent') assert isinstance(events.on_test, LoadEvent) assert len(events.on_test) == 1 assert inspect.getdoc(events.on_test) == docstring def test_load(events): events.transmogrify('on_test', 'LoadEvent') called = 0 @events.on_test def on_test(**_): nonlocal called called += 1 assert called == 0 events.on_test.fire() assert called == 1 @events.on_test def second(**_): nonlocal called called += 1 assert called == 2 def test_load_2nd_call(events): events.transmogrify('on_test', 'LoadEvent') called = 0 @events.on_test def on_test(**_): nonlocal called called += 1 assert called == 0 events.on_test.fire() assert called == 1 events.on_test.fire() assert called == 1 def test_typos(xonsh_builtins): for name, ev in vars(xonsh_builtins.events).items(): if 'pytest' in name: continue assert inspect.getdoc(ev) xonsh-0.6.0/tests/test_execer.py000066400000000000000000000051551320541242300167040ustar00rootroot00000000000000# -*- coding: utf-8 -*- """Tests the xonsh lexer.""" import os from tools import (check_eval, check_parse, skip_if_on_unix, skip_if_on_windows) import pytest @pytest.fixture(autouse=True) def xonsh_execer_autouse(xonsh_builtins, xonsh_execer): return xonsh_execer @skip_if_on_unix def test_win_ipconfig(): assert check_eval(os.environ['SYSTEMROOT'] + '\\System32\\ipconfig.exe /all') @skip_if_on_unix def test_ipconfig(): assert check_eval('ipconfig /all') @skip_if_on_windows def test_bin_ls(): assert check_eval('/bin/ls -l') def test_ls_dashl(): assert check_parse('ls -l') def test_which_ls(): assert check_parse('which ls') def test_echo_hello(): assert check_parse('echo hello') def test_echo_star_with_semi(): assert check_parse('echo * spam ; ![echo eggs]\n') def test_simple_func(): code = ('def prompt():\n' " return '{user}'.format(user='me')\n") assert check_parse(code) def test_lookup_alias(): code = ( 'def foo(a, s=None):\n' ' return "bar"\n' '@(foo)\n') assert check_parse(code) def test_lookup_anon_alias(): code = ('echo "hi" | @(lambda a, s=None: a[0]) foo bar baz\n') assert check_parse(code) def test_simple_func_broken(): code = ('def prompt():\n' " return '{user}'.format(\n" " user='me')\n") assert check_parse(code) def test_bad_indent(): code = ('if True:\n' 'x = 1\n') with pytest.raises(SyntaxError): check_parse(code) def test_good_rhs_subproc(): # nonsense but parsable code = 'str().split() | ![grep exit]\n' assert(code) def test_bad_rhs_subproc(): # nonsense but unparsable code = 'str().split() | grep exit\n' with pytest.raises(SyntaxError): check_parse(code) def test_indent_with_empty_line(): code = ('if True:\n' '\n' ' some_command for_sub_process_mode\n') assert check_parse(code) def test_command_in_func(): code = ('def f():\n' ' echo hello\n') assert check_parse(code) def test_command_in_func_with_comment(): code = ('def f():\n' ' echo hello # comment\n') assert check_parse(code) def test_pyeval_redirect(): code = 'echo @("foo") > bar\n' assert check_parse(code) def test_echo_comma(): code = 'echo ,\n' assert check_parse(code) def test_echo_comma_val(): code = 'echo ,1\n' assert check_parse(code) def test_echo_comma_2val(): code = 'echo 1,2\n' assert check_parse(code) def test_echo_line_cont(): code = 'echo "1 \\\n2"\n' assert check_parse(code) xonsh-0.6.0/tests/test_foreign_shells.py000066400000000000000000000057321320541242300204350ustar00rootroot00000000000000# -*- coding: utf-8 -*- """Tests foreign shells.""" from __future__ import unicode_literals, print_function import os import subprocess import pytest from tools import skip_if_on_windows, skip_if_on_unix from xonsh.foreign_shells import foreign_shell_data, parse_env, parse_aliases def test_parse_env(): exp = {'X': 'YES', 'Y': 'NO'} s = ('some garbage\n' '__XONSH_ENV_BEG__\n' 'Y=NO\n' 'X=YES\n' '__XONSH_ENV_END__\n' 'more filth') obs = parse_env(s) assert exp == obs def test_parse_env_newline(): exp = {'X': 'YES', 'Y': 'NO', 'PROMPT': 'why\nme '} s = ('some garbage\n' '__XONSH_ENV_BEG__\n' 'Y=NO\n' 'PROMPT=why\nme \n' 'X=YES\n' '__XONSH_ENV_END__\n' 'more filth') obs = parse_env(s) assert exp == obs def test_parse_env_equals(): exp = {'X': 'YES', 'Y': 'NO', 'LS_COLORS': '*.tar=5'} s = ('some garbage\n' '__XONSH_ENV_BEG__\n' 'Y=NO\n' 'LS_COLORS=*.tar=5\n' 'X=YES\n' '__XONSH_ENV_END__\n' 'more filth') obs = parse_env(s) assert exp == obs def test_parse_aliases(): exp = {'x': ['yes', '-1'], 'y': ['echo', 'no']} s = ('some garbage\n' '__XONSH_ALIAS_BEG__\n' "alias x='yes -1'\n" "alias y='echo no'\n" '__XONSH_ALIAS_END__\n' 'more filth') obs = parse_aliases(s) assert exp == obs @skip_if_on_windows def test_foreign_bash_data(): expenv = {"EMERALD": "SWORD", 'MIGHTY': 'WARRIOR'} expaliases = { 'l': ['ls', '-CF'], 'la': ['ls', '-A'], 'll': ['ls', '-a', '-lF'], } rcfile = os.path.join(os.path.dirname(__file__), 'bashrc.sh') try: obsenv, obsaliases = foreign_shell_data('bash', currenv=(), extra_args=('--rcfile', rcfile), safe=False) except (subprocess.CalledProcessError, FileNotFoundError): return for key, expval in expenv.items(): assert expval == obsenv.get(key, False) for key, expval in expaliases.items(): assert expval == obsaliases.get(key, False) @skip_if_on_unix def test_foreign_cmd_data(): env = (('ENV_TO_BE_REMOVED','test'),) batchfile = os.path.join(os.path.dirname(__file__), 'batch.bat') source_cmd ='call "{}"\necho off'.format(batchfile) try: obsenv, _ = foreign_shell_data('cmd',prevcmd=source_cmd, currenv=env, interactive =False, sourcer='call',envcmd='set', use_tmpfile=True, safe=False) except (subprocess.CalledProcessError, FileNotFoundError): return assert 'ENV_TO_BE_ADDED' in obsenv assert obsenv['ENV_TO_BE_ADDED']=='Hallo world' assert 'ENV_TO_BE_REMOVED' not in obsenv xonsh-0.6.0/tests/test_history.py000066400000000000000000000231531320541242300171300ustar00rootroot00000000000000# -*- coding: utf-8 -*- """Tests the json history backend.""" # pylint: disable=protected-access import os import shlex import pytest from xonsh.lazyjson import LazyJSON from xonsh.history.dummy import DummyHistory from xonsh.history.json import JsonHistory from xonsh.history.main import history_main, _xh_parse_args, construct_history CMDS = ['ls', 'cat hello kitty', 'abc', 'def', 'touch me', 'grep from me'] @pytest.yield_fixture def hist(): h = JsonHistory(filename='xonsh-HISTORY-TEST.json', here='yup', sessionid='SESSIONID', gc=False) yield h os.remove(h.filename) def test_hist_init(hist): """Test initialization of the shell history.""" with LazyJSON(hist.filename) as lj: obs = lj['here'] assert 'yup' == obs def test_hist_append(hist, xonsh_builtins): """Verify appending to the history works.""" xonsh_builtins.__xonsh_env__['HISTCONTROL'] = set() hf = hist.append({'inp': 'still alive', 'rtn': 0}) assert hf is None assert 'still alive' == hist.buffer[0]['inp'] assert 0 == hist.buffer[0]['rtn'] assert 0 == hist.rtns[-1] hf = hist.append({'inp': 'dead now', 'rtn': 1}) assert 'dead now' == hist.buffer[1]['inp'] assert 1 == hist.buffer[1]['rtn'] assert 1 == hist.rtns[-1] hf = hist.append({'inp': 'reborn', 'rtn': 0}) assert 'reborn' == hist.buffer[2]['inp'] assert 0 == hist.buffer[2]['rtn'] assert 0 == hist.rtns[-1] def test_hist_flush(hist, xonsh_builtins): """Verify explicit flushing of the history works.""" hf = hist.flush() assert hf is None xonsh_builtins.__xonsh_env__['HISTCONTROL'] = set() hist.append({'inp': 'still alive?', 'rtn': 0, 'out': 'yes'}) hf = hist.flush() assert hf is not None while hf.is_alive(): pass with LazyJSON(hist.filename) as lj: assert len(lj['cmds']) == 1 cmd = lj['cmds'][0] assert cmd['inp'] == 'still alive?' assert not cmd.get('out', None) def test_hist_flush_with_store_stdout(hist, xonsh_builtins): """Verify explicit flushing of the history works.""" hf = hist.flush() assert hf is None xonsh_builtins.__xonsh_env__['HISTCONTROL'] = set() xonsh_builtins.__xonsh_env__['XONSH_STORE_STDOUT'] = True hist.append({'inp': 'still alive?', 'rtn': 0, 'out': 'yes'}) hf = hist.flush() assert hf is not None while hf.is_alive(): pass with LazyJSON(hist.filename) as lj: assert len(lj['cmds']) == 1 assert lj['cmds'][0]['inp'] == 'still alive?' assert lj['cmds'][0]['out'].strip() == 'yes' def test_hist_flush_with_hist_control(hist, xonsh_builtins): """Verify explicit flushing of the history works.""" hf = hist.flush() assert hf is None xonsh_builtins.__xonsh_env__['HISTCONTROL'] = 'ignoredups,ignoreerr' hist.append({'inp': 'ls foo1', 'rtn': 0}) hist.append({'inp': 'ls foo1', 'rtn': 1}) hist.append({'inp': 'ls foo1', 'rtn': 0}) hist.append({'inp': 'ls foo2', 'rtn': 2}) hist.append({'inp': 'ls foo3', 'rtn': 0}) hf = hist.flush() assert hf is not None while hf.is_alive(): pass assert len(hist.buffer) == 0 with LazyJSON(hist.filename) as lj: cmds = list(lj['cmds']) assert len(cmds) == 2 assert [x['inp'] for x in cmds] == ['ls foo1', 'ls foo3'] assert [x['rtn'] for x in cmds] == [0, 0] def test_cmd_field(hist, xonsh_builtins): # in-memory xonsh_builtins.__xonsh_env__['HISTCONTROL'] = set() hf = hist.append({'inp': 'ls foo', 'rtn': 1}) assert hf is None assert 1 == hist.rtns[0] assert 1 == hist.rtns[-1] assert None == hist.outs[-1] # slice assert [1] == hist.rtns[:] # on disk hf = hist.flush() assert hf is not None assert 1 == hist.rtns[0] assert 1 == hist.rtns[-1] assert None == hist.outs[-1] @pytest.mark.parametrize('inp, commands, offset', [ ('', CMDS, (0, 1)), ('-r', list(reversed(CMDS)), (len(CMDS)- 1, -1)), ('0', CMDS[0:1], (0, 1)), ('1', CMDS[1:2], (1, 1)), ('-2', CMDS[-2:-1], (len(CMDS) -2 , 1)), ('1:3', CMDS[1:3], (1, 1)), ('1::2', CMDS[1::2], (1, 2)), ('-4:-2', CMDS[-4:-2], (len(CMDS) - 4, 1)) ]) def test_show_cmd_numerate(inp, commands, offset, hist, xonsh_builtins, capsys): """Verify that CLI history commands work.""" base_idx, step = offset xonsh_builtins.__xonsh_history__ = hist xonsh_builtins.__xonsh_env__['HISTCONTROL'] = set() for ts, cmd in enumerate(CMDS): # populate the shell history hist.append({'inp': cmd, 'rtn': 0, 'ts':(ts + 1, ts + 1.5)}) exp = ('{}: {}'.format(base_idx + idx * step, cmd) for idx, cmd in enumerate(list(commands))) exp = '\n'.join(exp) history_main(['show', '-n'] + shlex.split(inp)) out, err = capsys.readouterr() assert out.rstrip() == exp def test_histcontrol(hist, xonsh_builtins): """Test HISTCONTROL=ignoredups,ignoreerr""" xonsh_builtins.__xonsh_env__['HISTCONTROL'] = 'ignoredups,ignoreerr' assert len(hist.buffer) == 0 # An error, buffer remains empty hist.append({'inp': 'ls foo', 'rtn': 2}) assert len(hist.buffer) == 1 assert hist.rtns[-1] == 2 assert hist.inps[-1] == 'ls foo' # Success hist.append({'inp': 'ls foobazz', 'rtn': 0}) assert len(hist.buffer) == 2 assert 'ls foobazz' == hist.buffer[-1]['inp'] assert 0 == hist.buffer[-1]['rtn'] assert hist.rtns[-1] == 0 assert hist.inps[-1] == 'ls foobazz' # Error hist.append({'inp': 'ls foo', 'rtn': 2}) assert len(hist.buffer) == 3 assert 'ls foo' == hist.buffer[-1]['inp'] assert 2 == hist.buffer[-1]['rtn'] assert hist.rtns[-1] == 2 assert hist.inps[-1] == 'ls foo' # File now exists, success hist.append({'inp': 'ls foo', 'rtn': 0}) assert len(hist.buffer) == 4 assert 'ls foo' == hist.buffer[-1]['inp'] assert 0 == hist.buffer[-1]['rtn'] assert hist.rtns[-1] == 0 assert hist.inps[-1] == 'ls foo' # Success hist.append({'inp': 'ls', 'rtn': 0}) assert len(hist.buffer) == 5 assert 'ls' == hist.buffer[-1]['inp'] assert 0 == hist.buffer[-1]['rtn'] assert hist.rtns[-1] == 0 assert hist.inps[-1] == 'ls' # Dup hist.append({'inp': 'ls', 'rtn': 0}) assert len(hist.buffer) == 6 assert hist.rtns[-1] == 0 assert hist.inps[-1] == 'ls' # Success hist.append({'inp': '/bin/ls', 'rtn': 0}) assert len(hist.buffer) == 7 assert '/bin/ls' == hist.buffer[-1]['inp'] assert 0 == hist.buffer[-1]['rtn'] assert hist.rtns[-1] == 0 assert hist.inps[-1] == '/bin/ls' # Error hist.append({'inp': 'ls bazz', 'rtn': 1}) assert len(hist.buffer) == 8 assert 'ls bazz' == hist.buffer[-1]['inp'] assert 1 == hist.buffer[-1]['rtn'] assert hist.rtns[-1] == 1 assert hist.inps[-1] == 'ls bazz' # Error hist.append({'inp': 'ls bazz', 'rtn': -1}) assert len(hist.buffer) == 9 assert 'ls bazz' == hist.buffer[-1]['inp'] assert -1 == hist.buffer[-1]['rtn'] assert hist.rtns[-1] == -1 assert hist.inps[-1] == 'ls bazz' @pytest.mark.parametrize('args', [ '-h', '--help', 'show -h', 'show --help']) def test_parse_args_help(args, capsys): with pytest.raises(SystemExit): args = _xh_parse_args(shlex.split(args)) assert 'show this help message and exit' in capsys.readouterr()[0] @pytest.mark.parametrize('args, exp', [ ('', ('show', 'session', [], False, False)), ('1:5', ('show', 'session', ['1:5'], False, False)), ('show', ('show', 'session', [], False, False)), ('show 15', ('show', 'session', ['15'], False, False)), ('show bash 3:5 15:66', ('show', 'bash', ['3:5', '15:66'], False, False)), ('show -r', ('show', 'session', [], False, True)), ('show -rn bash', ('show', 'bash', [], True, True)), ('show -n -r -30:20', ('show', 'session', ['-30:20'], True, True)), ('show -n zsh 1:2:3', ('show', 'zsh', ['1:2:3'], True, False)) ]) def test_parser_show(args, exp): # use dict instead of argparse.Namespace for pretty pytest diff exp_ns = {'action': exp[0], 'session': exp[1], 'slices': exp[2], 'numerate': exp[3], 'reverse': exp[4], 'start_time': None, 'end_time': None, 'datetime_format': None, 'timestamp': False} ns = _xh_parse_args(shlex.split(args)) assert ns.__dict__ == exp_ns @pytest.mark.parametrize('index, exp', [ (-1, ('grep from me', 'out', 0, (5, 6))), (1, ('cat hello kitty', 'out', 0, (1, 2))), (slice(1, 3), [('cat hello kitty', 'out', 0, (1, 2)), ('abc', 'out', 0, (2, 3))]), ]) def test_history_getitem(index, exp, hist, xonsh_builtins): xonsh_builtins.__xonsh_env__['HISTCONTROL'] = set() attrs = ('inp', 'out', 'rtn', 'ts') for ts,cmd in enumerate(CMDS): # populate the shell history entry = {k: v for k, v in zip(attrs, [cmd, 'out', 0, (ts, ts+1)])} hist.append(entry) entry = hist[index] if isinstance(entry, list): assert [(e.cmd, e.out, e.rtn, e.ts) for e in entry] == exp else: assert (entry.cmd, entry.out, entry.rtn, entry.ts) == exp def test_construct_history_str(xonsh_builtins): xonsh_builtins.__xonsh_env__['XONSH_HISTORY_BACKEND'] = 'dummy' assert isinstance(construct_history(), DummyHistory) def test_construct_history_class(xonsh_builtins): xonsh_builtins.__xonsh_env__['XONSH_HISTORY_BACKEND'] = DummyHistory assert isinstance(construct_history(), DummyHistory) def test_construct_history_instance(xonsh_builtins): xonsh_builtins.__xonsh_env__['XONSH_HISTORY_BACKEND'] = DummyHistory() assert isinstance(construct_history(), DummyHistory) xonsh-0.6.0/tests/test_history_sqlite.py000066400000000000000000000131411320541242300205050ustar00rootroot00000000000000# -*- coding: utf-8 -*- """Tests the xonsh history.""" # pylint: disable=protected-access import os import shlex from xonsh.history.sqlite import SqliteHistory from xonsh.history.main import history_main import pytest @pytest.yield_fixture def hist(): h = SqliteHistory(filename='xonsh-HISTORY-TEST.sqlite', sessionid='SESSIONID', gc=False) yield h os.remove(h.filename) def test_hist_append(hist, xonsh_builtins): """Verify appending to the history works.""" xonsh_builtins.__xonsh_env__['HISTCONTROL'] = set() hf = hist.append({'inp': 'still alive', 'rtn': 1}) assert hf is None items = list(hist.items()) assert len(items) == 1 assert 'still alive' == items[0]['inp'] assert 1 == items[0]['rtn'] hist.append({'inp': 'still alive', 'rtn': 0}) items = list(hist.items()) assert len(items) == 2 assert 'still alive' == items[1]['inp'] assert 0 == items[1]['rtn'] assert list(hist.all_items()) == items def test_hist_attrs(hist, xonsh_builtins): xonsh_builtins.__xonsh_env__['HISTCONTROL'] = set() hf = hist.append({'inp': 'ls foo', 'rtn': 1}) assert hf is None assert 'ls foo' == hist.inps[0] assert 'ls foo' == hist.inps[-1] assert 1 == hist.rtns[0] assert 1 == hist.rtns[-1] assert None is hist.outs[-1] assert [1] == hist.rtns[:] hist.append({'inp': 'ls bar', 'rtn': 0}) assert 'ls bar' == hist.inps[1] assert 'ls bar' == hist.inps[-1] assert 0 == hist.rtns[1] assert 0 == hist.rtns[-1] assert None is hist.outs[-1] assert [1, 0] == hist.rtns[:] assert len(hist.tss) == 2 assert len(hist.tss[0]) == 2 CMDS = ['ls', 'cat hello kitty', 'abc', 'def', 'touch me', 'grep from me'] @pytest.mark.parametrize('inp, commands, offset', [ ('', CMDS, (0, 1)), ('-r', list(reversed(CMDS)), (len(CMDS) - 1, -1)), ('0', CMDS[0:1], (0, 1)), ('1', CMDS[1:2], (1, 1)), ('-2', CMDS[-2:-1], (len(CMDS) - 2, 1)), ('1:3', CMDS[1:3], (1, 1)), ('1::2', CMDS[1::2], (1, 2)), ('-4:-2', CMDS[-4:-2], (len(CMDS) - 4, 1)) ]) def test_show_cmd_numerate(inp, commands, offset, hist, xonsh_builtins, capsys): """Verify that CLI history commands work.""" base_idx, step = offset xonsh_builtins.__xonsh_history__ = hist xonsh_builtins.__xonsh_env__['HISTCONTROL'] = set() for ts, cmd in enumerate(CMDS): # populate the shell history hist.append({'inp': cmd, 'rtn': 0, 'ts': (ts + 1, ts + 1.5)}) exp = ('{}: {}'.format(base_idx + idx * step, cmd) for idx, cmd in enumerate(list(commands))) exp = '\n'.join(exp) history_main(['show', '-n'] + shlex.split(inp)) out, err = capsys.readouterr() assert out.rstrip() == exp def test_histcontrol(hist, xonsh_builtins): """Test HISTCONTROL=ignoredups,ignoreerr""" xonsh_builtins.__xonsh_env__['HISTCONTROL'] = 'ignoredups,ignoreerr' assert len(hist) == 0 # An error, items() remains empty hist.append({'inp': 'ls foo', 'rtn': 2}) assert len(hist) == 0 assert len(hist.inps) == 1 assert len(hist.rtns) == 1 assert 2 == hist.rtns[-1] # Success hist.append({'inp': 'ls foobazz', 'rtn': 0}) assert len(hist) == 1 assert len(hist.inps) == 2 assert len(hist.rtns) == 2 items = list(hist.items()) assert 'ls foobazz' == items[-1]['inp'] assert 0 == items[-1]['rtn'] assert 0 == hist.rtns[-1] # Error hist.append({'inp': 'ls foo', 'rtn': 2}) assert len(hist) == 1 items = list(hist.items()) assert 'ls foobazz' == items[-1]['inp'] assert 0 == items[-1]['rtn'] assert 2 == hist.rtns[-1] # File now exists, success hist.append({'inp': 'ls foo', 'rtn': 0}) assert len(hist) == 2 items = list(hist.items()) assert 'ls foo' == items[-1]['inp'] assert 0 == items[-1]['rtn'] assert 0 == hist.rtns[-1] # Success hist.append({'inp': 'ls', 'rtn': 0}) assert len(hist) == 3 items = list(hist.items()) assert 'ls' == items[-1]['inp'] assert 0 == items[-1]['rtn'] assert 0 == hist.rtns[-1] # Dup hist.append({'inp': 'ls', 'rtn': 0}) assert len(hist) == 3 # Success hist.append({'inp': '/bin/ls', 'rtn': 0}) assert len(hist) == 4 items = list(hist.items()) assert '/bin/ls' == items[-1]['inp'] assert 0 == items[-1]['rtn'] assert 0 == hist.rtns[-1] # Error hist.append({'inp': 'ls bazz', 'rtn': 1}) assert len(hist) == 4 items = list(hist.items()) assert '/bin/ls' == items[-1]['inp'] assert 0 == items[-1]['rtn'] assert 'ls bazz' == hist.inps[-1] assert 1 == hist.rtns[-1] # Error hist.append({'inp': 'ls bazz', 'rtn': -1}) assert len(hist) == 4 items = list(hist.items()) assert '/bin/ls' == items[-1]['inp'] assert 0 == items[-1]['rtn'] assert -1 == hist.rtns[-1] @pytest.mark.parametrize('index, exp', [ (-1, ('grep from me', 'out', 0, (5, 6))), (1, ('cat hello kitty', 'out', 0, (1, 2))), (slice(1, 3), [('cat hello kitty', 'out', 0, (1, 2)), ('abc', 'out', 0, (2, 3))]), ]) def test_history_getitem(index, exp, hist, xonsh_builtins): xonsh_builtins.__xonsh_env__['HISTCONTROL'] = set() xonsh_builtins.__xonsh_env__['XONSH_STORE_STDOUT'] = True attrs = ('inp', 'out', 'rtn', 'ts') for ts, cmd in enumerate(CMDS): # populate the shell history entry = {k: v for k, v in zip(attrs, [cmd, 'out', 0, (ts, ts + 1)])} hist.append(entry) entry = hist[index] if isinstance(entry, list): assert [(e.cmd, e.out, e.rtn, e.ts) for e in entry] == exp else: assert (entry.cmd, entry.out, entry.rtn, entry.ts) == exp xonsh-0.6.0/tests/test_imphooks.py000066400000000000000000000024371320541242300172620ustar00rootroot00000000000000# -*- coding: utf-8 -*- """Testing xonsh import hooks""" import os import builtins import pytest from xonsh import imphooks from xonsh.execer import Execer from xonsh.environ import Env from xonsh.built_ins import unload_builtins imphooks.install_import_hooks() @pytest.yield_fixture(autouse=True) def imp_env(): execer = Execer(unload=False) builtins.__xonsh_env__ = Env({'PATH': [], 'PATHEXT': []}) yield unload_builtins() def test_import(): import sample assert ('hello mom jawaka\n' == sample.x) def test_absolute_import(): from xpack import sample assert ('hello mom jawaka\n' == sample.x) def test_relative_import(): from xpack import relimp assert ('hello mom jawaka\n' == relimp.sample.x) assert ('hello mom jawaka\ndark chest of wonders' == relimp.y) def test_sub_import(): from xpack.sub import sample assert ('hello mom jawaka\n' == sample.x) TEST_DIR = os.path.dirname(__file__) def test_module_dunder_file_attribute(): import sample exp = os.path.join(TEST_DIR, 'sample.xsh') assert os.path.abspath(sample.__file__) == exp def test_module_dunder_file_attribute_sub(): from xpack.sub import sample exp = os.path.join(TEST_DIR, 'xpack', 'sub', 'sample.xsh') assert os.path.abspath(sample.__file__) == exp xonsh-0.6.0/tests/test_inspectors.py000066400000000000000000000003641320541242300176170ustar00rootroot00000000000000# -*- coding: utf-8 -*- """Testing inspectors""" import inspect from xonsh.inspectors import getouterframes def test_getouterframes(): """Just test that this works.""" curr = inspect.currentframe() getouterframes(curr, context=0) xonsh-0.6.0/tests/test_integrations.py000066400000000000000000000170441320541242300201370ustar00rootroot00000000000000import os import sys import shutil import subprocess as sp import pytest import xonsh from xonsh.platform import ON_WINDOWS from tools import (skip_if_on_windows, skip_if_on_darwin, skip_if_on_travis, ON_WINDOWS, ON_DARWIN, ON_TRAVIS) XONSH_PREFIX = xonsh.__file__ if 'site-packages' in XONSH_PREFIX: # must be installed version of xonsh num_up = 5 else: # must be in source dir num_up = 2 for i in range(num_up): XONSH_PREFIX = os.path.dirname(XONSH_PREFIX) PATH = os.path.join(os.path.dirname(__file__), 'bin') + os.pathsep + \ os.path.join(XONSH_PREFIX, 'bin') + os.pathsep + \ os.path.join(XONSH_PREFIX, 'Scripts') + os.pathsep + \ os.path.join(XONSH_PREFIX, 'scripts') + os.pathsep + \ os.path.dirname(sys.executable) + os.pathsep + \ os.environ['PATH'] skip_if_no_xonsh = pytest.mark.skipif(shutil.which('xonsh', path=PATH) is None, reason='xonsh not on path') def run_xonsh(cmd, stdin=sp.PIPE, stdout=sp.PIPE, stderr=sp.STDOUT): env = dict(os.environ) env['PATH'] = PATH env['XONSH_DEBUG'] = '1' env['XONSH_SHOW_TRACEBACK'] = '1' env['RAISE_SUBPROC_ERROR'] = '1' env['PROMPT'] = '' xonsh = 'xonsh.bat' if ON_WINDOWS else 'xon.sh' xonsh = shutil.which(xonsh, path=PATH) proc = sp.Popen([xonsh, '--no-rc'], env=env, stdin=stdin, stdout=stdout, stderr=stderr, universal_newlines=True, ) try: out, err = proc.communicate(input=cmd, timeout=10) except sp.TimeoutExpired: proc.kill() raise return out, err, proc.returncode def check_run_xonsh(cmd, fmt, exp): """The ``fmt`` parameter is a function that formats the output of cmd, can be None. """ out, err, rtn = run_xonsh(cmd, stderr=sp.DEVNULL) if callable(fmt): out = fmt(out) if callable(exp): exp = exp() assert out == exp assert rtn == 0 # # The following list contains a (stdin, stdout, returncode) tuples # ALL_PLATFORMS = [ # test calling a function alias (""" def _f(): print('hello') aliases['f'] = _f f """, "hello\n", 0), # test redirecting a function alias (""" def _f(): print('Wow Mom!') aliases['f'] = _f f > tttt with open('tttt') as tttt: s = tttt.read().strip() print('REDIRECTED OUTPUT: ' + s) """, "REDIRECTED OUTPUT: Wow Mom!\n", 0), # test system exit in function alias (""" import sys def _f(): sys.exit(42) aliases['f'] = _f print(![f].returncode) """, "42\n", 0), # test uncaptured streaming alias, # order actually printed in is non-deterministic (""" def _test_stream(args, stdin, stdout, stderr): print('hallo on stream', file=stderr) print('hallo on stream', file=stdout) return 1 aliases['test-stream'] = _test_stream x = ![test-stream] print(x.returncode) """, "hallo on stream\nhallo on stream\n1\n", 0), # test captured streaming alias (""" def _test_stream(args, stdin, stdout, stderr): print('hallo on err', file=stderr) print('hallo on out', file=stdout) return 1 aliases['test-stream'] = _test_stream x = !(test-stream) print(x.returncode) """, "hallo on err\n1\n", 0), # test piping aliases (""" def dummy(args, inn, out, err): out.write('hey!') return 0 def dummy2(args, inn, out, err): s = inn.read() out.write(s.upper()) return 0 aliases['d'] = dummy aliases['d2'] = dummy2 d | d2 """, "HEY!", 0), # test output larger than most pipe buffers (""" def _g(args, stdin=None): for i in range(1000): print('x' * 100) aliases['g'] = _g g """, (("x"*100) + '\n') * 1000, 0), # test piping 'real' command (""" with open('tttt', 'w') as fp: fp.write("Wow mom!\\n") ![cat tttt | wc] """, ' 1 2 10\n' if ON_WINDOWS else " 1 2 9 \n", 0), # test double piping 'real' command (""" with open('tttt', 'w') as fp: fp.write("Wow mom!\\n") ![cat tttt | wc | wc] """, ' 1 3 24\n' if ON_WINDOWS else " 1 4 16 \n", 0), # test unthreadable alias (which should trigger a ProcPoxy call) (""" from xonsh.tools import unthreadable @unthreadable def _f(): return 'hello\\n' aliases['f'] = _f f """, "hello\n", 0), # test ambiguous globs (""" import os def _echo(args): print(' '.join(args)) aliases['echo'] = _echo files = ['Actually_test.tst', 'Actually.tst', 'Complete_test.tst', 'Complete.tst'] # touch the file for f in files: with open(f, 'w'): pass # echo the files echo *.tst and echo *_test.tst echo *_test.tst echo *_test.tst and echo *.tst # remove the files for f in files: os.remove(f) """, 'Actually.tst Actually_test.tst Complete.tst Complete_test.tst\n' 'Actually_test.tst Complete_test.tst\n' 'Actually_test.tst Complete_test.tst\n' 'Actually_test.tst Complete_test.tst\n' 'Actually.tst Actually_test.tst Complete.tst Complete_test.tst\n', 0), # # test ambiguous line continuations # (""" def _echo(args): print(' '.join(args)) aliases['echo'] = _echo echo --option1 \ --option2 """, '--option1 --option2\n', 0), # # test @$() with aliases # (""" aliases['ls'] = 'spam spam sausage spam' echo @$(which ls) """, 'spam spam sausage spam\n', 0), ] @pytest.mark.parametrize('case', ALL_PLATFORMS) def test_script(case): script, exp_out, exp_rtn = case out, err, rtn = run_xonsh(script) assert exp_out == out assert exp_rtn == rtn ALL_PLATFORMS_STDERR = [ # test redirecting a function alias (""" def _f(args, stdout): print('Wow Mom!', file=stdout) aliases['f'] = _f f o>e """, "Wow Mom!\n", 0), ] @pytest.mark.parametrize('case', ALL_PLATFORMS_STDERR) def test_script_stderr(case): script, exp_err, exp_rtn = case out, err, rtn = run_xonsh(script, stderr=sp.PIPE) assert exp_err == err assert exp_rtn == rtn @skip_if_on_windows @pytest.mark.parametrize('cmd, fmt, exp', [ ('pwd', None, lambda: os.getcwd() + '\n'), ('echo WORKING', None, 'WORKING\n'), ('ls -f', lambda out: out.splitlines().sort(), os.listdir().sort()), ]) def test_single_command_no_windows(cmd, fmt, exp): check_run_xonsh(cmd, fmt, exp) def test_eof_syntax_error(): """Ensures syntax errors for EOF appear on last line.""" script = 'x = 1\na = (1, 0\n' out, err, rtn = run_xonsh(script, stderr=sp.PIPE) assert ':0:0: EOF in multi-line statement' not in err assert ':2:0: EOF in multi-line statement' in err _bad_case = pytest.mark.skipif(ON_DARWIN or ON_WINDOWS or ON_TRAVIS, reason="bad platforms") @_bad_case def test_printfile(): check_run_xonsh('printfile.xsh', None, 'printfile.xsh\n') @_bad_case def test_printname(): check_run_xonsh('printfile.xsh', None, 'printfile.xsh\n') @_bad_case def test_sourcefile(): check_run_xonsh('printfile.xsh', None, 'printfile.xsh\n') @_bad_case @pytest.mark.parametrize('cmd, fmt, exp', [ # test subshell wrapping (""" with open('tttt', 'w') as fp: fp.write("Wow mom!\\n") (wc) < tttt """, None, " 1 2 9 \n"), # test subshell statement wrapping (""" with open('tttt', 'w') as fp: fp.write("Wow mom!\\n") (wc;) < tttt """, None, " 1 2 9 \n"), ]) def test_subshells(cmd, fmt, exp): check_run_xonsh(cmd, fmt, exp) @skip_if_on_windows @pytest.mark.parametrize('cmd, exp', [ ('pwd', lambda: os.getcwd() + '\n'), ]) def test_redirect_out_to_file(cmd, exp, tmpdir): outfile = tmpdir.mkdir('xonsh_test_dir').join('xonsh_test_file') command = '{} > {}\n'.format(cmd, outfile) out, _, _ = run_xonsh(command) content = outfile.read() if callable(exp): exp = exp() assert content == exp xonsh-0.6.0/tests/test_jsonutils.py000066400000000000000000000007651320541242300174650ustar00rootroot00000000000000# -*- coding: utf-8 -*- """Testing xonsh json hooks""" import json import pytest from xonsh.tools import EnvPath from xonsh.jsonutils import serialize_xonsh_json @pytest.mark.parametrize('inp', [ 42, "yo", ["hello"], {"x": 65}, EnvPath(["wakka", "jawaka"]), ["y", EnvPath(["wakka", "jawaka"])], {"z": EnvPath(["wakka", "jawaka"])}, ]) def test_serialize_xonsh_json_roundtrip(inp): s = json.dumps(inp, default=serialize_xonsh_json) obs = json.loads(s) assert inp == obsxonsh-0.6.0/tests/test_lazyasd.py000066400000000000000000000003261320541242300170730ustar00rootroot00000000000000"""Tests lazy and self destruictive objects.""" from xonsh.lazyasd import LazyObject # # LazyObject Tests # def test_lazyobject_getitem(): lo = LazyObject(lambda: {'x': 1}, {}, 'lo') assert 1 == lo['x'] xonsh-0.6.0/tests/test_lazyjson.py000066400000000000000000000073771320541242300173120ustar00rootroot00000000000000# -*- coding: utf-8 -*- """Tests lazy json functionality.""" from __future__ import unicode_literals, print_function from io import StringIO from xonsh.lazyjson import index, ljdump, LazyJSON, LJNode def test_index_int(): exp = {'offsets': 0, 'sizes': 2} s, obs = index(42) assert exp == obs def test_index_str(): exp = {'offsets': 0, 'sizes': 7} s, obs = index('wakka') assert exp == obs def test_index_list_ints(): exp = {'offsets': [1, 4, 0], 'sizes': [1, 2, 8]} s, obs = index([1, 42]) assert exp == obs def test_index_list_str(): exp = {'offsets': [1, 10, 0], 'sizes': [7, 8, 20]} s, obs = index(['wakka', 'jawaka']) assert exp == obs def test_index_list_str_int(): exp = {'offsets': [1, 10, 0], 'sizes': [7, 2, 14]} s, obs = index(['wakka', 42]) assert exp == obs def test_index_list_int_str(): exp = {'offsets': [1, 5, 14, 0], 'sizes': [2, 7, 8, 24]} s, obs = index([42, 'wakka', 'jawaka']) assert exp == obs def test_index_dict_int(): exp = {'offsets': {'wakka': 10, '__total__': 0}, 'sizes': {'wakka': 2, '__total__': 14}} s, obs = index({'wakka': 42}) assert exp == obs def test_index_dict_str(): exp = {'offsets': {'wakka': 10, '__total__': 0}, 'sizes': {'wakka': 8, '__total__': 20}} s, obs = index({'wakka': 'jawaka'}) assert exp == obs def test_index_dict_dict_int(): exp = {'offsets': {'wakka': {'jawaka': 21, '__total__': 10}, '__total__': 0, }, 'sizes': {'wakka': {'jawaka': 2, '__total__': 15}, '__total__': 27} } s, obs = index({'wakka': {'jawaka': 42}}) assert exp == obs def test_lazy_load_index(): f = StringIO() ljdump({'wakka': 42}, f) f.seek(0) lj = LazyJSON(f) assert {'wakka': 10, '__total__': 0} == lj.offsets assert {'wakka': 2, '__total__': 14} == lj.sizes def test_lazy_int(): f = StringIO() ljdump(42, f) f.seek(0) lj = LazyJSON(f) assert 42 == lj.load() def test_lazy_str(): f = StringIO() ljdump('wakka', f) f.seek(0) lj = LazyJSON(f) assert 'wakka' == lj.load() def test_lazy_list_empty(): x = [] f = StringIO() ljdump(x, f) f.seek(0) lj = LazyJSON(f) assert 0 == len(lj) assert x == lj.load() def test_lazy_list_ints(): x = [0, 1, 6, 28, 496, 8128] f = StringIO() ljdump(x, f) f.seek(0) lj = LazyJSON(f) assert 28 == lj[3] assert x[:2:-2] == lj[:2:-2] assert x == [_ for _ in lj] assert x == lj.load() def test_lazy_list_str(): x = ['I', 'have', 'seen', 'the', 'wind', 'blow'] f = StringIO() ljdump(x, f) f.seek(0) lj = LazyJSON(f) assert 'the' == lj[3] assert x[:2:-2] == lj[:2:-2] assert x == [_ for _ in lj] assert x == lj.load() def test_lazy_list_list_ints(): x = [[0, 1], [6, 28], [496, 8128]] f = StringIO() ljdump(x, f) f.seek(0) lj = LazyJSON(f) assert isinstance(lj[1], LJNode) assert 28 == lj[1][1] assert [6 == 28], lj[1].load() assert x == lj.load() def test_lazy_dict_empty(): x = {} f = StringIO() ljdump(x, f) f.seek(0) lj = LazyJSON(f) assert 0 == len(lj) assert x == lj.load() def test_lazy_dict(): f = StringIO() ljdump({'wakka': 42}, f) f.seek(0) lj = LazyJSON(f) assert ['wakka'] == list(lj.keys()) assert 42 == lj['wakka'] assert 1 == len(lj) assert {'wakka': 42} == lj.load() def test_lazy_dict_dict_int(): x = {'wakka': {'jawaka': 42}} f = StringIO() ljdump(x, f) f.seek(0) lj = LazyJSON(f) assert ['wakka'] == list(lj.keys()) assert isinstance(lj['wakka'], LJNode) assert 42 == lj['wakka']['jawaka'] assert 1 == len(lj) assert x == lj.load() xonsh-0.6.0/tests/test_lexer.py000066400000000000000000000257101320541242300165470ustar00rootroot00000000000000# -*- coding: utf-8 -*- """Tests the xonsh lexer.""" from __future__ import unicode_literals, print_function import os import sys from collections import Sequence sys.path.insert(0, os.path.abspath('..')) # FIXME from pprint import pformat import pytest try: from ply.lex import LexToken except ImportError: from xonsh.ply.ply.lex import LexToken from xonsh.lexer import Lexer LEXER_ARGS = {'lextab': 'lexer_test_table', 'debug': 0} def ensure_tuple(x): if isinstance(x, LexToken): # line numbers can no longer be solely determined from the lexer #x = (x.type, x.value, x.lineno, x.lexpos) x = (x.type, x.value, x.lexpos) elif isinstance(x, tuple): pass elif isinstance(x, Sequence): x = tuple(x) else: raise TypeError('{0} is not a sequence'.format(x)) return x def tokens_equal(x, y): """Tests whether two token are equal.""" xtup = ensure_tuple(x) ytup = ensure_tuple(y) return xtup == ytup def assert_token_equal(x, y): """Asserts that two tokens are equal.""" if not tokens_equal(x, y): msg = 'The tokens differ: {0!r} != {1!r}'.format(x, y) pytest.fail(msg) return True def assert_tokens_equal(x, y): """Asserts that two token sequences are equal.""" if len(x) != len(y): msg = 'The tokens sequences have different lengths: {0!r} != {1!r}\n' msg += '# x\n{2}\n\n# y\n{3}' pytest.fail(msg.format(len(x), len(y), pformat(x), pformat(y))) diffs = [(a, b) for a, b in zip(x, y) if not tokens_equal(a, b)] if len(diffs) > 0: msg = ['The token sequences differ: '] for a, b in diffs: msg += ['', '- ' + repr(a), '+ ' + repr(b)] msg = '\n'.join(msg) pytest.fail(msg) return True def check_token(inp, exp): l = Lexer() l.input(inp) obs = list(l) if len(obs) != 1: msg = 'The observed sequence does not have length-1: {0!r} != 1\n' msg += '# obs\n{1}' pytest.fail(msg.format(len(obs), pformat(obs))) return assert_token_equal(exp, obs[0]) def check_tokens(inp, exp): l = Lexer() l.input(inp) obs = list(l) return assert_tokens_equal(exp, obs) def check_tokens_subproc(inp, exp, stop=-1): l = Lexer() l.input('$[{}]'.format(inp)) obs = list(l)[1:stop] return assert_tokens_equal(exp, obs) def test_int_literal(): assert check_token('42', ['NUMBER', '42', 0]) assert check_token('4_2', ['NUMBER', '4_2', 0]) def test_hex_literal(): assert check_token('0x42', ['NUMBER', '0x42', 0]) assert check_token('0x4_2', ['NUMBER', '0x4_2', 0]) def test_oct_o_literal(): assert check_token('0o42', ['NUMBER', '0o42', 0]) assert check_token('0o4_2', ['NUMBER', '0o4_2', 0]) def test_bin_literal(): assert check_token('0b101010', ['NUMBER', '0b101010', 0]) assert check_token('0b10_10_10', ['NUMBER', '0b10_10_10', 0]) def test_indent(): exp = [('INDENT', ' \t ', 0), ('NUMBER', '42', 5), ('DEDENT', '', 0)] assert check_tokens(' \t 42', exp) def test_post_whitespace(): inp = '42 \t ' exp = [('NUMBER', '42', 0)] assert check_tokens(inp, exp) def test_internal_whitespace(): inp = '42 +\t65' exp = [('NUMBER', '42', 0), ('PLUS', '+', 4), ('NUMBER', '65', 6),] assert check_tokens(inp, exp) def test_indent_internal_whitespace(): inp = ' 42 +\t65' exp = [('INDENT', ' ', 0), ('NUMBER', '42', 1), ('PLUS', '+', 5), ('NUMBER', '65', 7), ('DEDENT', '', 0)] assert check_tokens(inp, exp) def test_assignment(): inp = 'x = 42' exp = [('NAME', 'x', 0), ('EQUALS', '=', 2), ('NUMBER', '42', 4),] assert check_tokens(inp, exp) def test_multiline(): inp = 'x\ny' exp = [('NAME', 'x', 0), ('NEWLINE', '\n', 1), ('NAME', 'y', 0),] assert check_tokens(inp, exp) def test_atdollar_expression(): inp = '@$(which python)' exp = [('ATDOLLAR_LPAREN', '@$(', 0), ('NAME', 'which', 3), ('WS', ' ', 8), ('NAME', 'python', 9), ('RPAREN', ')', 15)] assert check_tokens(inp, exp) def test_and(): assert check_token('and', ['AND', 'and', 0]) def test_ampersand(): assert check_token('&', ['AMPERSAND', '&', 0]) def test_not_really_and_pre(): inp = "![foo-and]" exp = [ ('BANG_LBRACKET', '![', 0), ('NAME', 'foo', 2), ('MINUS', '-', 5), ('NAME', 'and', 6), ('RBRACKET', ']', 9), ] assert check_tokens(inp, exp) def test_not_really_and_post(): inp = "![and-bar]" exp = [ ('BANG_LBRACKET', '![', 0), ('NAME', 'and', 2), ('MINUS', '-', 5), ('NAME', 'bar', 6), ('RBRACKET', ']', 9), ] assert check_tokens(inp, exp) def test_not_really_and_pre_post(): inp = "![foo-and-bar]" exp = [ ('BANG_LBRACKET', '![', 0), ('NAME', 'foo', 2), ('MINUS', '-', 5), ('NAME', 'and', 6), ('MINUS', '-', 9), ('NAME', 'bar', 10), ('RBRACKET', ']', 13), ] assert check_tokens(inp, exp) def test_not_really_or_pre(): inp = "![foo-or]" exp = [ ('BANG_LBRACKET', '![', 0), ('NAME', 'foo', 2), ('MINUS', '-', 5), ('NAME', 'or', 6), ('RBRACKET', ']', 8), ] assert check_tokens(inp, exp) def test_not_really_or_post(): inp = "![or-bar]" exp = [ ('BANG_LBRACKET', '![', 0), ('NAME', 'or', 2), ('MINUS', '-', 4), ('NAME', 'bar', 5), ('RBRACKET', ']', 8), ] assert check_tokens(inp, exp) def test_not_really_or_pre_post(): inp = "![foo-or-bar]" exp = [ ('BANG_LBRACKET', '![', 0), ('NAME', 'foo', 2), ('MINUS', '-', 5), ('NAME', 'or', 6), ('MINUS', '-', 8), ('NAME', 'bar', 9), ('RBRACKET', ']', 12), ] assert check_tokens(inp, exp) def test_subproc_line_cont_space(): inp = ("![echo --option1 value1 \\\n" " --option2 value2 \\\n" " --optionZ valueZ]") exp = [ ('BANG_LBRACKET', '![', 0), ('NAME', 'echo', 2), ('WS', ' ', 6), ('MINUS', '-', 7), ('MINUS', '-', 8), ('NAME', 'option1', 9), ('WS', ' ', 16), ('NAME', 'value1', 17), ('WS', ' ', 23), ('MINUS', '-', 5), ('MINUS', '-', 6), ('NAME', 'option2', 7), ('WS', ' ', 14), ('NAME', 'value2', 15), ('WS', ' ', 21), ('MINUS', '-', 5), ('MINUS', '-', 6), ('NAME', 'optionZ', 7), ('WS', ' ', 14), ('NAME', 'valueZ', 15), ('RBRACKET',']', 21), ] assert check_tokens(inp, exp) def test_subproc_line_cont_nospace(): inp = ("![echo --option1 value1\\\n" " --option2 value2\\\n" " --optionZ valueZ]") exp = [ ('BANG_LBRACKET', '![', 0), ('NAME', 'echo', 2), ('WS', ' ', 6), ('MINUS', '-', 7), ('MINUS', '-', 8), ('NAME', 'option1', 9), ('WS', ' ', 16), ('NAME', 'value1', 17), ('WS', '\\', 23), ('MINUS', '-', 5), ('MINUS', '-', 6), ('NAME', 'option2', 7), ('WS', ' ', 14), ('NAME', 'value2', 15), ('WS', '\\', 21), ('MINUS', '-', 5), ('MINUS', '-', 6), ('NAME', 'optionZ', 7), ('WS', ' ', 14), ('NAME', 'valueZ', 15), ('RBRACKET',']', 21), ] assert check_tokens(inp, exp) def test_atdollar(): assert check_token('@$', ['ATDOLLAR', '@$', 0]) def test_doubleamp(): assert check_token('&&', ['AND', 'and', 0]) def test_pipe(): assert check_token('|', ['PIPE', '|', 0]) def test_doublepipe(): assert check_token('||', ['OR', 'or', 0]) def test_single_quote_literal(): assert check_token("'yo'", ['STRING', "'yo'", 0]) def test_double_quote_literal(): assert check_token('"yo"', ['STRING', '"yo"', 0]) def test_triple_single_quote_literal(): assert check_token("'''yo'''", ['STRING', "'''yo'''", 0]) def test_triple_double_quote_literal(): assert check_token('"""yo"""', ['STRING', '"""yo"""', 0]) def test_single_raw_string_literal(): assert check_token("r'yo'", ['STRING', "r'yo'", 0]) def test_double_raw_string_literal(): assert check_token('r"yo"', ['STRING', 'r"yo"', 0]) def test_single_f_string_literal(): assert check_token("f'{yo}'", ['STRING', "f'{yo}'", 0]) def test_double_f_string_literal(): assert check_token('f"{yo}"', ['STRING', 'f"{yo}"', 0]) def test_single_unicode_literal(): assert check_token("u'yo'", ['STRING', "u'yo'", 0]) def test_double_unicode_literal(): assert check_token('u"yo"', ['STRING', 'u"yo"', 0]) def test_single_bytes_literal(): assert check_token("b'yo'", ['STRING', "b'yo'", 0]) def test_path_string_literal(): assert check_token("p'/foo'", ['STRING', "p'/foo'", 0]) assert check_token('p"/foo"', ['STRING', 'p"/foo"', 0]) assert check_token("pr'/foo'", ['STRING', "pr'/foo'", 0]) assert check_token('pr"/foo"', ['STRING', 'pr"/foo"', 0]) assert check_token("rp'/foo'", ['STRING', "rp'/foo'", 0]) assert check_token('rp"/foo"', ['STRING', 'rp"/foo"', 0]) def test_regex_globs(): for i in ('.*', r'\d*', '.*#{1,2}'): for p in ('', 'r', 'g', '@somethingelse', 'p', 'pg'): c = '{}`{}`'.format(p,i) assert check_token(c, ['SEARCHPATH', c, 0]) @pytest.mark.parametrize('case', [ '0.0', '.0', '0.', '1e10', '1.e42', '0.1e42', '0.5e-42', '5E10', '5e+42', '1_0e1_0']) def test_float_literals(case): assert check_token(case, ['NUMBER', case, 0]) @pytest.mark.parametrize('case', [ '2>1', 'err>out', 'o>', 'all>', 'e>o', 'e>', 'out>', '2>&1' ]) def test_ioredir(case): assert check_tokens_subproc(case, [('IOREDIRECT', case, 2)], stop=-2) @pytest.mark.parametrize('case', [ '>', '>>', '<', 'e>', '> ', '>> ', '< ', 'e> ', ]) def test_redir_whitespace(case): inp = '![{}/path/to/file]'.format(case) l = Lexer() l.input(inp) obs = list(l) assert obs[2].type == 'WS' @pytest.mark.parametrize('s, exp', [ ('', []), (' \t \n \t ', []), ('echo hello', ['echo', 'hello']), ('echo "hello"', ['echo', '"hello"']), ('![echo "hello"]', ['![echo', '"hello"]']), ('/usr/bin/echo hello', ['/usr/bin/echo', 'hello']), ('$(/usr/bin/echo hello)', ['$(/usr/bin/echo', 'hello)']), ('C:\\Python\\python.exe -m xonsh', ['C:\\Python\\python.exe', '-m', 'xonsh']), ('print("""I am a triple string""")', ['print("""I am a triple string""")']), ('print("""I am a \ntriple string""")', ['print("""I am a \ntriple string""")']), ('echo $HOME', ['echo', '$HOME']), ('echo -n $HOME', ['echo', '-n', '$HOME']), ('echo --go=away', ['echo', '--go=away']), ('echo --go=$HOME', ['echo', '--go=$HOME']), ]) def test_lexer_split(s, exp): lexer = Lexer() obs = lexer.split(s) assert exp == obs xonsh-0.6.0/tests/test_main.py000066400000000000000000000124031320541242300163470ustar00rootroot00000000000000# -*- coding: utf-8 -*- """Tests the xonsh main function.""" from __future__ import unicode_literals, print_function from contextlib import contextmanager import builtins import os.path import sys import xonsh.main from xonsh.main import XonshMode from xonsh.environ import Env import pytest from tools import TEST_DIR def Shell(*args, **kwargs): pass @pytest.fixture def shell(xonsh_builtins, xonsh_execer, monkeypatch): """Xonsh Shell Mock""" monkeypatch.setattr(xonsh.main, 'Shell', Shell) def test_premain_no_arg(shell, monkeypatch): monkeypatch.setattr(sys.stdin, 'isatty', lambda: True) xonsh.main.premain([]) assert builtins.__xonsh_env__.get('XONSH_LOGIN') def test_premain_interactive(shell): xonsh.main.premain(['-i']) assert (builtins.__xonsh_env__.get('XONSH_INTERACTIVE')) def test_premain_login_command(shell): xonsh.main.premain(['-l', '-c', 'echo "hi"']) assert (builtins.__xonsh_env__.get('XONSH_LOGIN')) def test_premain_login(shell): xonsh.main.premain(['-l']) assert (builtins.__xonsh_env__.get('XONSH_LOGIN')) def test_premain_D(shell): xonsh.main.premain(['-DTEST1=1616', '-DTEST2=LOL']) assert (builtins.__xonsh_env__.get('TEST1') == '1616') assert (builtins.__xonsh_env__.get('TEST2') == 'LOL') def test_premain_custom_rc(shell, tmpdir, monkeypatch): monkeypatch.setattr(sys.stdin, 'isatty', lambda: True) builtins.__xonsh_env__ = Env(XONSH_CACHE_SCRIPTS=False) f = tmpdir.join('wakkawakka') f.write("print('hi')") args = xonsh.main.premain(['--rc', f.strpath]) assert args.mode == XonshMode.interactive assert f.strpath in builtins.__xonsh_env__.get('XONSHRC') def test_no_rc_with_script(shell, tmpdir): args = xonsh.main.premain(['tests/sample.xsh']) assert not (args.mode == XonshMode.interactive) def test_force_interactive_rc_with_script(shell, tmpdir): args = xonsh.main.premain(['-i', 'tests/sample.xsh']) assert builtins.__xonsh_env__.get('XONSH_INTERACTIVE') def test_force_interactive_custom_rc_with_script(shell, tmpdir): """Calling a custom RC file on a script-call with the interactive flag should run interactively """ builtins.__xonsh_env__ = Env(XONSH_CACHE_SCRIPTS=False) f = tmpdir.join('wakkawakka') f.write("print('hi')") args = xonsh.main.premain(['-i', '--rc', f.strpath, 'tests/sample.xsh']) assert args.mode == XonshMode.interactive assert f.strpath in builtins.__xonsh_env__.get('XONSHRC') def test_custom_rc_with_script(shell, tmpdir): """Calling a custom RC file on a script-call without the interactive flag should not run interactively """ f = tmpdir.join('wakkawakka') f.write("print('hi')") args = xonsh.main.premain(['--rc', f.strpath, 'tests/sample.xsh']) assert not (args.mode == XonshMode.interactive) def test_premain_no_rc(shell, tmpdir): xonsh.main.premain(['--no-rc']) assert not builtins.__xonsh_env__.get('XONSHRC') @pytest.mark.parametrize( 'arg', ['', '-i', '-vERSION', '-hAALP', 'TTTT', '-TT', '--TTT']) def test_premain_with_file_argument(arg, shell): xonsh.main.premain(['tests/sample.xsh', arg]) assert not (builtins.__xonsh_env__.get('XONSH_INTERACTIVE')) def test_premain_interactive__with_file_argument(shell): xonsh.main.premain(['-i', 'tests/sample.xsh']) assert (builtins.__xonsh_env__.get('XONSH_INTERACTIVE')) @pytest.mark.parametrize('case', ['----', '--hep', '-TT', '--TTTT']) def test_premain_invalid_arguments(shell, case, capsys): with pytest.raises(SystemExit): xonsh.main.premain([case]) assert 'unrecognized argument' in capsys.readouterr()[1] def test_xonsh_failback(shell, monkeypatch): failback_checker = [] monkeypatch.setattr(sys, 'stderr', open(os.devnull, 'w')) def mocked_main(*args): raise Exception('A fake failure') monkeypatch.setattr(xonsh.main, 'main_xonsh', mocked_main) def mocked_execlp(f, *args): failback_checker.append(f) failback_checker.append(args[0]) monkeypatch.setattr(os, 'execlp', mocked_execlp) monkeypatch.setattr(os.path, 'exists', lambda x: True) monkeypatch.setattr(sys, 'argv', ['xonsh', '-i']) @contextmanager def mocked_open(*args): yield ['/usr/bin/xonsh', '/usr/bin/screen', 'bash', '/bin/xshell'] monkeypatch.setattr(builtins, 'open', mocked_open) xonsh.main.main() assert failback_checker == ['/bin/xshell', '/bin/xshell'] def test_xonsh_failback_single(shell, monkeypatch): class FakeFailureError(Exception): pass def mocked_main(*args): raise FakeFailureError() monkeypatch.setattr(xonsh.main, 'main_xonsh', mocked_main) monkeypatch.setattr(sys, 'argv', ['xonsh', '-c', 'echo', 'foo']) monkeypatch.setattr(sys, 'stderr', open(os.devnull, 'w')) with pytest.raises(FakeFailureError): xonsh.main.main() def test_xonsh_failback_script_from_file(shell, monkeypatch): checker = [] def mocked_execlp(f, *args): checker.append(f) monkeypatch.setattr(os, 'execlp', mocked_execlp) script = os.path.join(TEST_DIR, 'scripts', 'raise.xsh') monkeypatch.setattr(sys, 'argv', ['xonsh', script]) monkeypatch.setattr(sys, 'stderr', open(os.devnull, 'w')) with pytest.raises(Exception): xonsh.main.main() assert len(checker) == 0 xonsh-0.6.0/tests/test_man.py000066400000000000000000000011411320541242300161730ustar00rootroot00000000000000# -*- coding: utf-8 -*- import os from xonsh.completers.man import complete_from_man from tools import skip_if_on_windows @skip_if_on_windows def test_man_completion(monkeypatch, tmpdir, xonsh_builtins): tempdir = tmpdir.mkdir('test_man') monkeypatch.setitem(os.environ, 'MANPATH', os.path.dirname(os.path.abspath(__file__))) xonsh_builtins.__xonsh_env__.update({'XONSH_DATA_DIR': str(tempdir)}) completions = complete_from_man('--', 'yes --', 4, 6, xonsh_builtins.__xonsh_env__) assert '--version' in completions assert '--help' in completions xonsh-0.6.0/tests/test_mpl.py000066400000000000000000000065401320541242300162200ustar00rootroot00000000000000import pytest # make sure to skip these tests entirely if numpy/matplotlib are not present np = pytest.importorskip("numpy") matplotlib = pytest.importorskip("matplotlib") plt = pytest.importorskip("matplotlib.pyplot") from xontrib import mplhooks skip_if_mpl2 = pytest.mark.skipif(matplotlib.__version__.startswith('2'), reason='Bug in matplotlib v2') # some default settings that are temporarily changed by mpl FONT_SIZE = 22 FACE_COLOR = (0.0, 1.0, 0.0, 1.0) DPI = 80 def create_figure(): """Simply create a figure with the default settings""" f, ax = plt.subplots() ax.plot(np.arange(20), np.arange(20)) # set the figure parameters such that mpl will require changes f.set_facecolor(FACE_COLOR) f.dpi = DPI matplotlib.rcParams.update({'font.size': FONT_SIZE}) return f @skip_if_mpl2 def test_mpl_preserve_font_size(): """Make sure that matplotlib preserves font size settings""" f = create_figure() width, height = f.canvas.get_width_height() print(width, height) s = mplhooks.figure_to_tight_array(f, 0.5*width, 0.5*height, True) exp = FONT_SIZE obs = matplotlib.rcParams['font.size'] plt.close(f) assert exp == obs @skip_if_mpl2 def test_mpl_preserve_face_color(): """Make sure that the figure preserves face color settings""" f = create_figure() width, height = f.canvas.get_width_height() s = mplhooks.figure_to_tight_array(f, 0.5*width, 0.5*height, True) exp = FACE_COLOR obs = f.get_facecolor() plt.close(f) assert exp == obs @skip_if_mpl2 def test_mpl_preserve_width(): """Make sure that the figure preserves width settings""" f = create_figure() width, height = f.canvas.get_width_height() s = mplhooks.figure_to_tight_array(f, 0.5*width, 0.5*height, True) exp = width newwidth, newheight = f.canvas.get_width_height() obs = newwidth plt.close(f) assert exp == obs @skip_if_mpl2 def test_mpl_preserve_height(): """Make sure that the figure preserves height settings""" f = create_figure() width, height = f.canvas.get_width_height() s = mplhooks.figure_to_tight_array(f, 0.5*width, 0.5*height, True) exp = height newwidth, newheight = f.canvas.get_width_height() obs = newheight plt.close(f) assert exp == obs def test_mpl_preserve_dpi(): """Make sure that the figure preserves height settings""" f = create_figure() width, height = f.canvas.get_width_height() s = mplhooks.figure_to_tight_array(f, 0.5*width, 0.5*height, False) exp = DPI obs = f.dpi plt.close(f) assert exp == obs @skip_if_mpl2 def test_mpl_preserve_image_tight(): """Make sure that the figure preserves height settings""" f = create_figure() exp = mplhooks.figure_to_rgb_array(f) width, height = f.canvas.get_width_height() s = mplhooks.figure_to_tight_array(f, 0.5*width, 0.5*height, True) obs = mplhooks.figure_to_rgb_array(f) plt.close(f) assert np.all(exp == obs) def test_mpl_preserve_standard(): """Make sure that the figure preserves height settings""" f = create_figure() exp = mplhooks.figure_to_rgb_array(f) width, height = f.canvas.get_width_height() s = mplhooks.figure_to_tight_array(f, 0.5*width, 0.5*height, False) obs = mplhooks.figure_to_rgb_array(f) plt.close(f) assert np.all(exp == obs) xonsh-0.6.0/tests/test_news.py000066400000000000000000000046521320541242300164060ustar00rootroot00000000000000# -*- coding: utf-8 -*- """Testing that news entries are well formed.""" import os import re import pytest from xonsh.platform import scandir NEWSDIR = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'news') CATEGORIES = frozenset(['Added', 'Changed', 'Deprecated', 'Removed', 'Fixed', 'Security']) single_grave_reg = re.compile(r'[^`]`[^`]+`[^`_]') def check_news_file(fname): name = fname.name with open(fname.path) as f: lines = f.read().splitlines() nlines = len(lines) for i, line in enumerate(lines): if line.startswith('**'): cat, *_ = line[2:].rsplit(':') if cat not in CATEGORIES: pytest.fail('{}:{}: {!r} not a proper category ' 'must be one of {}' ''.format(name, i+1, cat, list(CATEGORIES)), pytrace=False) if i+1 == nlines: continue if not lines[i+1].strip() == '': pytest.fail('{}:{}: empty line required after category' ''.format(name, i+1), pytrace=False) if i > 0 and not lines[i-1].strip() == '': pytest.fail('{}:{}: empty line required before category' ''.format(name, i+1), pytrace=False) if line.endswith('None'): if not lines[i+2].startswith('**'): pytest.fail("{}:{}: can't have entries after None" ''.format(name, i+1), pytrace=False) else: if lines[i+2].startswith('**'): pytest.fail("{}:{}: must have entry if not None" ''.format(name, i+1), pytrace=False) else: if not (line.startswith('* ') or line.startswith(' ') or (line.strip() == '')): pytest.fail('{}:{}: invalid rst'.format(name, i+1), pytrace=False) if '`' in line: if single_grave_reg.search(line): pytest.fail("{}:{}: single grave accents" " are not valid rst".format(name, i+1), pytrace=False) @pytest.mark.parametrize('fname', list(scandir(NEWSDIR))) def test_news(fname): base, ext = os.path.splitext(fname.path) assert 'rst' in ext check_news_file(fname) xonsh-0.6.0/tests/test_parser.py000066400000000000000000001652561320541242300167360ustar00rootroot00000000000000# -*- coding: utf-8 -*- """Tests the xonsh parser.""" import ast import builtins import textwrap import itertools import pytest from xonsh.ast import AST, With, Pass, pdump from xonsh.parser import Parser from tools import VER_FULL, skip_if_py34, skip_if_lt_py36, nodes_equal # a lot of col_offset data changed from Py v3.5.0 -> v3.5.1 INC_ATTRS = (3, 5, 1) <= VER_FULL @pytest.fixture(autouse=True) def xonsh_builtins_autouse(xonsh_builtins): return xonsh_builtins PARSER = Parser(lexer_optimize=False, yacc_optimize=False, yacc_debug=True) def check_ast(inp, run=True, mode='eval', debug_level=0): __tracebackhide__ = True # expect a Python AST exp = ast.parse(inp, mode=mode) # observe something from xonsh obs = PARSER.parse(inp, debug_level=debug_level) # Check that they are equal assert nodes_equal(exp, obs) # round trip by running xonsh AST via Python if run: exec(compile(obs, '', mode)) def check_stmts(inp, run=True, mode='exec', debug_level=0): __tracebackhide__ = True if not inp.endswith('\n'): inp += '\n' check_ast(inp, run=run, mode=mode, debug_level=debug_level) def check_xonsh_ast(xenv, inp, run=True, mode='eval', debug_level=0, return_obs=False): __tracebackhide__ = True builtins.__xonsh_env__ = xenv obs = PARSER.parse(inp, debug_level=debug_level) if obs is None: return # comment only bytecode = compile(obs, '', mode) if run: exec(bytecode) return obs if return_obs else True def check_xonsh(xenv, inp, run=True, mode='exec'): __tracebackhide__ = True if not inp.endswith('\n'): inp += '\n' check_xonsh_ast(xenv, inp, run=run, mode=mode) # # Tests # # # expressions # def test_int_literal(): check_ast('42') @skip_if_lt_py36 def test_int_literal_underscore(): check_ast('4_2') def test_float_literal(): check_ast('42.0') @skip_if_lt_py36 def test_float_literal_underscore(): check_ast('4_2.4_2') def test_imag_literal(): check_ast('42j') def test_float_imag_literal(): check_ast('42.0j') def test_complex(): check_ast('42+84j') def test_str_literal(): check_ast('"hello"') def test_bytes_literal(): check_ast('b"hello"') check_ast('B"hello"') def test_raw_literal(): check_ast('r"hell\o"') check_ast('R"hell\o"') @skip_if_lt_py36 def test_f_literal(): check_ast('f"wakka{yo}yakka{42}"', run=False) check_ast('F"{yo}"', run=False) def test_raw_bytes_literal(): check_ast('br"hell\o"') check_ast('RB"hell\o"') check_ast('Br"hell\o"') check_ast('rB"hell\o"') def test_unary_plus(): check_ast('+1') def test_unary_minus(): check_ast('-1') def test_unary_invert(): check_ast('~1') def test_binop_plus(): check_ast('42 + 65') def test_binop_minus(): check_ast('42 - 65') def test_binop_times(): check_ast('42 * 65') @skip_if_py34 def test_binop_matmult(): check_ast('x @ y', False) def test_binop_div(): check_ast('42 / 65') def test_binop_mod(): check_ast('42 % 65') def test_binop_floordiv(): check_ast('42 // 65') def test_binop_pow(): check_ast('2 ** 2') def test_plus_pow(): check_ast('42 + 2 ** 2') def test_plus_plus(): check_ast('42 + 65 + 6') def test_plus_minus(): check_ast('42 + 65 - 6') def test_minus_plus(): check_ast('42 - 65 + 6') def test_minus_minus(): check_ast('42 - 65 - 6') def test_minus_plus_minus(): check_ast('42 - 65 + 6 - 28') def test_times_plus(): check_ast('42 * 65 + 6') def test_plus_times(): check_ast('42 + 65 * 6') def test_times_times(): check_ast('42 * 65 * 6') def test_times_div(): check_ast('42 * 65 / 6') def test_times_div_mod(): check_ast('42 * 65 / 6 % 28') def test_times_div_mod_floor(): check_ast('42 * 65 / 6 % 28 // 13') def test_str_str(): check_ast('"hello" \'mom\'') def test_str_str_str(): check_ast('"hello" \'mom\' "wow"') def test_str_plus_str(): check_ast('"hello" + \'mom\'') def test_str_times_int(): check_ast('"hello" * 20') def test_int_times_str(): check_ast('2*"hello"') def test_group_plus_times(): check_ast('(42 + 65) * 20') def test_plus_group_times(): check_ast('42 + (65 * 20)') def test_group(): check_ast('(42)') def test_lt(): check_ast('42 < 65') def test_gt(): check_ast('42 > 65') def test_eq(): check_ast('42 == 65') def test_le(): check_ast('42 <= 65') def test_ge(): check_ast('42 >= 65') def test_ne(): check_ast('42 != 65') def test_in(): check_ast('"4" in "65"') def test_is(): check_ast('42 is 65') def test_not_in(): check_ast('"4" not in "65"') def test_is_not(): check_ast('42 is not 65') def test_lt_lt(): check_ast('42 < 65 < 105') def test_lt_lt_lt(): check_ast('42 < 65 < 105 < 77') def test_not(): check_ast('not 0') def test_or(): check_ast('1 or 0') def test_or_or(): check_ast('1 or 0 or 42') def test_and(): check_ast('1 and 0') def test_and_and(): check_ast('1 and 0 and 2') def test_and_or(): check_ast('1 and 0 or 2') def test_or_and(): check_ast('1 or 0 and 2') def test_group_and_and(): check_ast('(1 and 0) and 2') def test_group_and_or(): check_ast('(1 and 0) or 2') def test_if_else_expr(): check_ast('42 if True else 65') def test_if_else_expr_expr(): check_ast('42+5 if 1 == 2 else 65-5') def test_str_idx(): check_ast('"hello"[0]') def test_str_slice(): check_ast('"hello"[0:3]') def test_str_step(): check_ast('"hello"[0:3:1]') def test_str_slice_all(): check_ast('"hello"[:]') def test_str_slice_upper(): check_ast('"hello"[5:]') def test_str_slice_lower(): check_ast('"hello"[:3]') def test_str_slice_other(): check_ast('"hello"[::2]') def test_str_slice_lower_other(): check_ast('"hello"[:3:2]') def test_str_slice_upper_other(): check_ast('"hello"[3::2]') def test_str_2slice(): check_ast('"hello"[0:3,0:3]', False) def test_str_2step(): check_ast('"hello"[0:3:1,0:4:2]', False) def test_str_2slice_all(): check_ast('"hello"[:,:]', False) def test_str_2slice_upper(): check_ast('"hello"[5:,5:]', False) def test_str_2slice_lower(): check_ast('"hello"[:3,:3]', False) def test_str_2slice_lowerupper(): check_ast('"hello"[5:,:3]', False) def test_str_2slice_other(): check_ast('"hello"[::2,::2]', False) def test_str_2slice_lower_other(): check_ast('"hello"[:3:2,:3:2]', False) def test_str_2slice_upper_other(): check_ast('"hello"[3::2,3::2]', False) def test_str_3slice(): check_ast('"hello"[0:3,0:3,0:3]', False) def test_str_3step(): check_ast('"hello"[0:3:1,0:4:2,1:3:2]', False) def test_str_3slice_all(): check_ast('"hello"[:,:,:]', False) def test_str_3slice_upper(): check_ast('"hello"[5:,5:,5:]', False) def test_str_3slice_lower(): check_ast('"hello"[:3,:3,:3]', False) def test_str_3slice_lowerlowerupper(): check_ast('"hello"[:3,:3,:3]', False) def test_str_3slice_lowerupperlower(): check_ast('"hello"[:3,5:,:3]', False) def test_str_3slice_lowerupperupper(): check_ast('"hello"[:3,5:,5:]', False) def test_str_3slice_upperlowerlower(): check_ast('"hello"[5:,5:,:3]', False) def test_str_3slice_upperlowerupper(): check_ast('"hello"[5:,:3,5:]', False) def test_str_3slice_upperupperlower(): check_ast('"hello"[5:,5:,:3]', False) def test_str_3slice_other(): check_ast('"hello"[::2,::2,::2]', False) def test_str_3slice_lower_other(): check_ast('"hello"[:3:2,:3:2,:3:2]', False) def test_str_3slice_upper_other(): check_ast('"hello"[3::2,3::2,3::2]', False) def test_str_slice_true(): check_ast('"hello"[0:3,True]', False) def test_str_true_slice(): check_ast('"hello"[True,0:3]', False) def test_list_empty(): check_ast('[]') def test_list_one(): check_ast('[1]') def test_list_one_comma(): check_ast('[1,]') def test_list_two(): check_ast('[1, 42]') def test_list_three(): check_ast('[1, 42, 65]') def test_list_three_comma(): check_ast('[1, 42, 65,]') def test_list_one_nested(): check_ast('[[1]]') def test_list_list_four_nested(): check_ast('[[1], [2], [3], [4]]') def test_list_tuple_three_nested(): check_ast('[(1,), (2,), (3,)]') def test_list_set_tuple_three_nested(): check_ast('[{(1,)}, {(2,)}, {(3,)}]') def test_list_tuple_one_nested(): check_ast('[(1,)]') def test_tuple_tuple_one_nested(): check_ast('((1,),)') def test_dict_list_one_nested(): check_ast('{1: [2]}') def test_dict_list_one_nested_comma(): check_ast('{1: [2],}') def test_dict_tuple_one_nested(): check_ast('{1: (2,)}') def test_dict_tuple_one_nested_comma(): check_ast('{1: (2,),}') def test_dict_list_two_nested(): check_ast('{1: [2], 3: [4]}') def test_set_tuple_one_nested(): check_ast('{(1,)}') def test_set_tuple_two_nested(): check_ast('{(1,), (2,)}') def test_tuple_empty(): check_ast('()') def test_tuple_one_bare(): check_ast('1,') def test_tuple_two_bare(): check_ast('1, 42') def test_tuple_three_bare(): check_ast('1, 42, 65') def test_tuple_three_bare_comma(): check_ast('1, 42, 65,') def test_tuple_one_comma(): check_ast('(1,)') def test_tuple_two(): check_ast('(1, 42)') def test_tuple_three(): check_ast('(1, 42, 65)') def test_tuple_three_comma(): check_ast('(1, 42, 65,)') def test_bare_tuple_of_tuples(): check_ast('(),') check_ast('((),),(1,)') check_ast('(),(),') check_ast('[],') check_ast('[],[]') check_ast('[],()') check_ast('(),[],') check_ast('((),[()],)') def test_set_one(): check_ast('{42}') def test_set_one_comma(): check_ast('{42,}') def test_set_two(): check_ast('{42, 65}') def test_set_two_comma(): check_ast('{42, 65,}') def test_set_three(): check_ast('{42, 65, 45}') def test_dict_empty(): check_ast('{}') def test_dict_one(): check_ast('{42: 65}') def test_dict_one_comma(): check_ast('{42: 65,}') def test_dict_two(): check_ast('{42: 65, 6: 28}') def test_dict_two_comma(): check_ast('{42: 65, 6: 28,}') def test_dict_three(): check_ast('{42: 65, 6: 28, 1: 2}') @skip_if_py34 def test_dict_from_dict_two_xy(): check_ast('{"x": 1, **{"y": 2}}') @skip_if_py34 def test_dict_from_dict_two_x_first(): check_ast('{"x": 1, **{"x": 2}}') @skip_if_py34 def test_dict_from_dict_two_x_second(): check_ast('{**{"x": 2}, "x": 1}') @skip_if_py34 def test_unpack_range_tuple(): check_stmts('*range(4),') @skip_if_py34 def test_unpack_range_tuple_4(): check_stmts('*range(4), 4') @skip_if_py34 def test_unpack_range_tuple_parens(): check_ast('(*range(4),)') @skip_if_py34 def test_unpack_range_tuple_parens_4(): check_ast('(*range(4), 4)') @skip_if_py34 def test_unpack_range_list(): check_ast('[*range(4)]') @skip_if_py34 def test_unpack_range_list_4(): check_ast('[*range(4), 4]') @skip_if_py34 def test_unpack_range_set(): check_ast('{*range(4)}') @skip_if_py34 def test_unpack_range_set_4(): check_ast('{*range(4), 4}') def test_true(): check_ast('True') def test_false(): check_ast('False') def test_none(): check_ast('None') def test_elipssis(): check_ast('...') def test_not_implemented_name(): check_ast('NotImplemented') def test_genexpr(): check_ast('(x for x in "mom")') def test_genexpr_if(): check_ast('(x for x in "mom" if True)') def test_genexpr_if_and(): check_ast('(x for x in "mom" if True and x == "m")') def test_dbl_genexpr(): check_ast('(x+y for x in "mom" for y in "dad")') def test_genexpr_if_genexpr(): check_ast('(x+y for x in "mom" if True for y in "dad")') def test_genexpr_if_genexpr_if(): check_ast('(x+y for x in "mom" if True for y in "dad" if y == "d")') def test_listcomp(): check_ast('[x for x in "mom"]') def test_listcomp_if(): check_ast('[x for x in "mom" if True]') def test_listcomp_if_and(): check_ast('[x for x in "mom" if True and x == "m"]') def test_dbl_listcomp(): check_ast('[x+y for x in "mom" for y in "dad"]') def test_listcomp_if_listcomp(): check_ast('[x+y for x in "mom" if True for y in "dad"]') def test_listcomp_if_listcomp_if(): check_ast('[x+y for x in "mom" if True for y in "dad" if y == "d"]') def test_setcomp(): check_ast('{x for x in "mom"}') def test_setcomp_if(): check_ast('{x for x in "mom" if True}') def test_setcomp_if_and(): check_ast('{x for x in "mom" if True and x == "m"}') def test_dbl_setcomp(): check_ast('{x+y for x in "mom" for y in "dad"}') def test_setcomp_if_setcomp(): check_ast('{x+y for x in "mom" if True for y in "dad"}') def test_setcomp_if_setcomp_if(): check_ast('{x+y for x in "mom" if True for y in "dad" if y == "d"}') def test_dictcomp(): check_ast('{x: x for x in "mom"}') def test_dictcomp_unpack_parens(): check_ast('{k: v for (k, v) in {"x": 42}.items()}') def test_dictcomp_unpack_no_parens(): check_ast('{k: v for k, v in {"x": 42}.items()}') def test_dictcomp_if(): check_ast('{x: x for x in "mom" if True}') def test_dictcomp_if_and(): check_ast('{x: x for x in "mom" if True and x == "m"}') def test_dbl_dictcomp(): check_ast('{x: y for x in "mom" for y in "dad"}') def test_dictcomp_if_dictcomp(): check_ast('{x: y for x in "mom" if True for y in "dad"}') def test_dictcomp_if_dictcomp_if(): check_ast('{x: y for x in "mom" if True for y in "dad" if y == "d"}') def test_lambda(): check_ast('lambda: 42') def test_lambda_x(): check_ast('lambda x: x') def test_lambda_kwx(): check_ast('lambda x=42: x') def test_lambda_x_y(): check_ast('lambda x, y: x') def test_lambda_x_y_z(): check_ast('lambda x, y, z: x') def test_lambda_x_kwy(): check_ast('lambda x, y=42: x') def test_lambda_kwx_kwy(): check_ast('lambda x=65, y=42: x') def test_lambda_kwx_kwy_kwz(): check_ast('lambda x=65, y=42, z=1: x') def test_lambda_x_comma(): check_ast('lambda x,: x') def test_lambda_x_y_comma(): check_ast('lambda x, y,: x') def test_lambda_x_y_z_comma(): check_ast('lambda x, y, z,: x') def test_lambda_x_kwy_comma(): check_ast('lambda x, y=42,: x') def test_lambda_kwx_kwy_comma(): check_ast('lambda x=65, y=42,: x') def test_lambda_kwx_kwy_kwz_comma(): check_ast('lambda x=65, y=42, z=1,: x') def test_lambda_args(): check_ast('lambda *args: 42') def test_lambda_args_x(): check_ast('lambda *args, x: 42') def test_lambda_args_x_y(): check_ast('lambda *args, x, y: 42') def test_lambda_args_x_kwy(): check_ast('lambda *args, x, y=10: 42') def test_lambda_args_kwx_y(): check_ast('lambda *args, x=10, y: 42') def test_lambda_args_kwx_kwy(): check_ast('lambda *args, x=42, y=65: 42') def test_lambda_x_args(): check_ast('lambda x, *args: 42') def test_lambda_x_args_y(): check_ast('lambda x, *args, y: 42') def test_lambda_x_args_y_z(): check_ast('lambda x, *args, y, z: 42') def test_lambda_kwargs(): check_ast('lambda **kwargs: 42') def test_lambda_x_kwargs(): check_ast('lambda x, **kwargs: 42') def test_lambda_x_y_kwargs(): check_ast('lambda x, y, **kwargs: 42') def test_lambda_x_kwy_kwargs(): check_ast('lambda x, y=42, **kwargs: 42') def test_lambda_args_kwargs(): check_ast('lambda *args, **kwargs: 42') def test_lambda_x_args_kwargs(): check_ast('lambda x, *args, **kwargs: 42') def test_lambda_x_y_args_kwargs(): check_ast('lambda x, y, *args, **kwargs: 42') def test_lambda_kwx_args_kwargs(): check_ast('lambda x=10, *args, **kwargs: 42') def test_lambda_x_kwy_args_kwargs(): check_ast('lambda x, y=42, *args, **kwargs: 42') def test_lambda_x_args_y_kwargs(): check_ast('lambda x, *args, y, **kwargs: 42') def test_lambda_x_args_kwy_kwargs(): check_ast('lambda x, *args, y=42, **kwargs: 42') def test_lambda_args_y_kwargs(): check_ast('lambda *args, y, **kwargs: 42') def test_lambda_star_x(): check_ast('lambda *, x: 42') def test_lambda_star_x_y(): check_ast('lambda *, x, y: 42') def test_lambda_star_x_kwargs(): check_ast('lambda *, x, **kwargs: 42') def test_lambda_star_kwx_kwargs(): check_ast('lambda *, x=42, **kwargs: 42') def test_lambda_x_star_y(): check_ast('lambda x, *, y: 42') def test_lambda_x_y_star_z(): check_ast('lambda x, y, *, z: 42') def test_lambda_x_kwy_star_y(): check_ast('lambda x, y=42, *, z: 42') def test_lambda_x_kwy_star_kwy(): check_ast('lambda x, y=42, *, z=65: 42') def test_lambda_x_star_y_kwargs(): check_ast('lambda x, *, y, **kwargs: 42') def test_call_range(): check_ast('range(6)') def test_call_range_comma(): check_ast('range(6,)') def test_call_range_x_y(): check_ast('range(6, 10)') def test_call_range_x_y_comma(): check_ast('range(6, 10,)') def test_call_range_x_y_z(): check_ast('range(6, 10, 2)') def test_call_dict_kwx(): check_ast('dict(start=10)') def test_call_dict_kwx_comma(): check_ast('dict(start=10,)') def test_call_dict_kwx_kwy(): check_ast('dict(start=10, stop=42)') def test_call_tuple_gen(): check_ast('tuple(x for x in [1, 2, 3])') def test_call_tuple_genifs(): check_ast('tuple(x for x in [1, 2, 3] if x < 3)') def test_call_range_star(): check_ast('range(*[1, 2, 3])') def test_call_range_x_star(): check_ast('range(1, *[2, 3])') def test_call_int(): check_ast('int(*["42"], base=8)') def test_call_int_base_dict(): check_ast('int(*["42"], **{"base": 8})') def test_call_dict_kwargs(): check_ast('dict(**{"base": 8})') @skip_if_py34 def test_call_list_many_star_args(): check_ast('min(*[1, 2], 3, *[4, 5])') @skip_if_py34 def test_call_list_many_starstar_args(): check_ast('dict(**{"a": 2}, v=3, **{"c": 5})') @skip_if_py34 def test_call_list_many_star_and_starstar_args(): check_ast('x(*[("a", 2)], *[("v", 3)], **{"c": 5})', False) def test_call_alot(): check_ast('x(1, *args, **kwargs)', False) def test_call_alot_next(): check_ast('x(x=1, *args, **kwargs)', False) def test_call_alot_next_next(): check_ast('x(x=1, *args, y=42, **kwargs)', False) def test_getattr(): check_ast('list.append') def test_getattr_getattr(): check_ast('list.append.__str__') def test_dict_tuple_key(): check_ast('{(42, 1): 65}') def test_dict_tuple_key_get(): check_ast('{(42, 1): 65}[42, 1]') def test_dict_tuple_key_get_3(): check_ast('{(42, 1, 3): 65}[42, 1, 3]') def test_pipe_op(): check_ast('{42} | {65}') def test_pipe_op_two(): check_ast('{42} | {65} | {1}') def test_pipe_op_three(): check_ast('{42} | {65} | {1} | {7}') def test_xor_op(): check_ast('{42} ^ {65}') def test_xor_op_two(): check_ast('{42} ^ {65} ^ {1}') def test_xor_op_three(): check_ast('{42} ^ {65} ^ {1} ^ {7}') def test_xor_pipe(): check_ast('{42} ^ {65} | {1}') def test_amp_op(): check_ast('{42} & {65}') def test_amp_op_two(): check_ast('{42} & {65} & {1}') def test_amp_op_three(): check_ast('{42} & {65} & {1} & {7}') def test_lshift_op(): check_ast('42 << 65') def test_lshift_op_two(): check_ast('42 << 65 << 1') def test_lshift_op_three(): check_ast('42 << 65 << 1 << 7') def test_rshift_op(): check_ast('42 >> 65') def test_rshift_op_two(): check_ast('42 >> 65 >> 1') def test_rshift_op_three(): check_ast('42 >> 65 >> 1 >> 7') # # statements # def test_equals(): check_stmts('x = 42') def test_equals_semi(): check_stmts('x = 42;') def test_x_y_equals_semi(): check_stmts('x = y = 42') def test_equals_two(): check_stmts('x = 42; y = 65') def test_equals_two_semi(): check_stmts('x = 42; y = 65;') def test_equals_three(): check_stmts('x = 42; y = 65; z = 6') def test_equals_three_semi(): check_stmts('x = 42; y = 65; z = 6;') def test_plus_eq(): check_stmts('x = 42; x += 65') def test_sub_eq(): check_stmts('x = 42; x -= 2') def test_times_eq(): check_stmts('x = 42; x *= 2') @skip_if_py34 def test_matmult_eq(): check_stmts('x @= y', False) def test_div_eq(): check_stmts('x = 42; x /= 2') def test_floordiv_eq(): check_stmts('x = 42; x //= 2') def test_pow_eq(): check_stmts('x = 42; x **= 2') def test_mod_eq(): check_stmts('x = 42; x %= 2') def test_xor_eq(): check_stmts('x = 42; x ^= 2') def test_ampersand_eq(): check_stmts('x = 42; x &= 2') def test_bitor_eq(): check_stmts('x = 42; x |= 2') def test_lshift_eq(): check_stmts('x = 42; x <<= 2') def test_rshift_eq(): check_stmts('x = 42; x >>= 2') def test_bare_unpack(): check_stmts('x, y = 42, 65') def test_lhand_group_unpack(): check_stmts('(x, y) = 42, 65') def test_rhand_group_unpack(): check_stmts('x, y = (42, 65)') def test_grouped_unpack(): check_stmts('(x, y) = (42, 65)') def test_double_grouped_unpack(): check_stmts('(x, y) = (z, a) = (7, 8)') def test_double_ungrouped_unpack(): check_stmts('x, y = z, a = 7, 8') def test_stary_eq(): check_stmts('*y, = [1, 2, 3]') def test_stary_x(): check_stmts('*y, x = [1, 2, 3]') def test_tuple_x_stary(): check_stmts('(x, *y) = [1, 2, 3]') def test_list_x_stary(): check_stmts('[x, *y] = [1, 2, 3]') def test_bare_x_stary(): check_stmts('x, *y = [1, 2, 3]') def test_bare_x_stary_z(): check_stmts('x, *y, z = [1, 2, 2, 3]') def test_equals_list(): check_stmts('x = [42]; x[0] = 65') def test_equals_dict(): check_stmts('x = {42: 65}; x[42] = 3') def test_equals_attr(): check_stmts('class X(object):\n pass\nx = X()\nx.a = 65') def test_dict_keys(): check_stmts('x = {"x": 1}\nx.keys()') def test_assert_msg(): check_stmts('assert True, "wow mom"') def test_assert(): check_stmts('assert True') def test_pass(): check_stmts('pass') def test_del(): check_stmts('x = 42; del x') def test_del_comma(): check_stmts('x = 42; del x,') def test_del_two(): check_stmts('x = 42; y = 65; del x, y') def test_del_two_comma(): check_stmts('x = 42; y = 65; del x, y,') def test_del_with_parens(): check_stmts('x = 42; y = 65; del (x, y)') def test_raise(): check_stmts('raise', False) def test_raise_x(): check_stmts('raise TypeError', False) def test_raise_x_from(): check_stmts('raise TypeError from x', False) def test_import_x(): check_stmts('import x', False) def test_import_xy(): check_stmts('import x.y', False) def test_import_xyz(): check_stmts('import x.y.z', False) def test_from_x_import_y(): check_stmts('from x import y', False) def test_from_dot_import_y(): check_stmts('from . import y', False) def test_from_dotx_import_y(): check_stmts('from .x import y', False) def test_from_dotdotx_import_y(): check_stmts('from ..x import y', False) def test_from_dotdotdotx_import_y(): check_stmts('from ...x import y', False) def test_from_dotdotdotdotx_import_y(): check_stmts('from ....x import y', False) def test_from_import_x_y(): check_stmts('import x, y', False) def test_from_import_x_y_z(): check_stmts('import x, y, z', False) def test_from_dot_import_x_y(): check_stmts('from . import x, y', False) def test_from_dot_import_x_y_z(): check_stmts('from . import x, y, z', False) def test_from_dot_import_group_x_y(): check_stmts('from . import (x, y)', False) def test_import_x_as_y(): check_stmts('import x as y', False) def test_import_xy_as_z(): check_stmts('import x.y as z', False) def test_import_x_y_as_z(): check_stmts('import x, y as z', False) def test_import_x_as_y_z(): check_stmts('import x as y, z', False) def test_import_x_as_y_z_as_a(): check_stmts('import x as y, z as a', False) def test_from_dot_import_x_as_y(): check_stmts('from . import x as y', False) def test_from_x_import_star(): check_stmts('from x import *', False) def test_from_x_import_y_as_z(): check_stmts('from x import y as z', False) def test_from_x_import_y_as_z_a_as_b(): check_stmts('from x import y as z, a as b', False) def test_from_dotx_import_y_as_z_a_as_b_c_as_d(): check_stmts('from .x import y as z, a as b, c as d', False) def test_continue(): check_stmts('continue', False) def test_break(): check_stmts('break', False) def test_global(): check_stmts('global x', False) def test_global_xy(): check_stmts('global x, y', False) def test_nonlocal_x(): check_stmts('nonlocal x', False) def test_nonlocal_xy(): check_stmts('nonlocal x, y', False) def test_yield(): check_stmts('yield', False) def test_yield_x(): check_stmts('yield x', False) def test_yield_x_comma(): check_stmts('yield x,', False) def test_yield_x_y(): check_stmts('yield x, y', False) def test_yield_from_x(): check_stmts('yield from x', False) def test_return(): check_stmts('return', False) def test_return_x(): check_stmts('return x', False) def test_return_x_comma(): check_stmts('return x,', False) def test_return_x_y(): check_stmts('return x, y', False) def test_if_true(): check_stmts('if True:\n pass') def test_if_true_twolines(): check_stmts('if True:\n pass\n pass') def test_if_true_twolines_deindent(): check_stmts('if True:\n pass\n pass\npass') def test_if_true_else(): check_stmts('if True:\n pass\nelse: \n pass') def test_if_true_x(): check_stmts('if True:\n x = 42') def test_if_switch(): check_stmts('x = 42\nif x == 1:\n pass') def test_if_switch_elif1_else(): check_stmts('x = 42\nif x == 1:\n pass\n' 'elif x == 2:\n pass\nelse:\n pass') def test_if_switch_elif2_else(): check_stmts('x = 42\nif x == 1:\n pass\n' 'elif x == 2:\n pass\n' 'elif x == 3:\n pass\n' 'elif x == 4:\n pass\n' 'else:\n pass') def test_if_nested(): check_stmts('x = 42\nif x == 1:\n pass\n if x == 4:\n pass') def test_while(): check_stmts('while False:\n pass') def test_while_else(): check_stmts('while False:\n pass\nelse:\n pass') def test_for(): check_stmts('for x in range(6):\n pass') def test_for_zip(): check_stmts('for x, y in zip(range(6), "123456"):\n pass') def test_for_idx(): check_stmts('x = [42]\nfor x[0] in range(3):\n pass') def test_for_zip_idx(): check_stmts('x = [42]\nfor x[0], y in zip(range(6), "123456"):\n' ' pass') def test_for_attr(): check_stmts('for x.a in range(3):\n pass', False) def test_for_zip_attr(): check_stmts('for x.a, y in zip(range(6), "123456"):\n pass', False) def test_for_else(): check_stmts('for x in range(6):\n pass\nelse: pass') @skip_if_py34 def test_async_for(): check_stmts("async def f():\n async for x in y:\n pass\n", False) def test_with(): check_stmts('with x:\n pass', False) def test_with_as(): check_stmts('with x as y:\n pass', False) def test_with_xy(): check_stmts('with x, y:\n pass', False) def test_with_x_as_y_z(): check_stmts('with x as y, z:\n pass', False) def test_with_x_as_y_a_as_b(): check_stmts('with x as y, a as b:\n pass', False) def test_with_in_func(): check_stmts("def f():\n with x:\n pass\n") @skip_if_py34 def test_async_with(): check_stmts("async def f():\n async with x as y:\n pass\n", False) def test_try(): check_stmts('try:\n pass\nexcept:\n pass', False) def test_try_except_t(): check_stmts('try:\n pass\nexcept TypeError:\n pass', False) def test_try_except_t_as_e(): check_stmts('try:\n pass\nexcept TypeError as e:\n pass', False) def test_try_except_t_u(): check_stmts('try:\n pass\nexcept (TypeError, SyntaxError):\n pass', False) def test_try_except_t_u_as_e(): check_stmts('try:\n pass\nexcept (TypeError, SyntaxError) as e:\n pass', False) def test_try_except_t_except_u(): check_stmts('try:\n pass\nexcept TypeError:\n pass\n' 'except SyntaxError as f:\n pass', False) def test_try_except_else(): check_stmts('try:\n pass\nexcept:\n pass\nelse: pass', False) def test_try_except_finally(): check_stmts('try:\n pass\nexcept:\n pass\nfinally: pass', False) def test_try_except_else_finally(): check_stmts('try:\n pass\nexcept:\n pass\nelse:\n pass' '\nfinally: pass', False) def test_try_finally(): check_stmts('try:\n pass\nfinally: pass', False) def test_func(): check_stmts('def f():\n pass') def test_func_ret(): check_stmts('def f():\n return') def test_func_ret_42(): check_stmts('def f():\n return 42') def test_func_ret_42_65(): check_stmts('def f():\n return 42, 65') def test_func_rarrow(): check_stmts('def f() -> int:\n pass') def test_func_x(): check_stmts('def f(x):\n return x') def test_func_kwx(): check_stmts('def f(x=42):\n return x') def test_func_x_y(): check_stmts('def f(x, y):\n return x') def test_func_x_y_z(): check_stmts('def f(x, y, z):\n return x') def test_func_x_kwy(): check_stmts('def f(x, y=42):\n return x') def test_func_kwx_kwy(): check_stmts('def f(x=65, y=42):\n return x') def test_func_kwx_kwy_kwz(): check_stmts('def f(x=65, y=42, z=1):\n return x') def test_func_x_comma(): check_stmts('def f(x,):\n return x') def test_func_x_y_comma(): check_stmts('def f(x, y,):\n return x') def test_func_x_y_z_comma(): check_stmts('def f(x, y, z,):\n return x') def test_func_x_kwy_comma(): check_stmts('def f(x, y=42,):\n return x') def test_func_kwx_kwy_comma(): check_stmts('def f(x=65, y=42,):\n return x') def test_func_kwx_kwy_kwz_comma(): check_stmts('def f(x=65, y=42, z=1,):\n return x') def test_func_args(): check_stmts('def f(*args):\n return 42') def test_func_args_x(): check_stmts('def f(*args, x):\n return 42') def test_func_args_x_y(): check_stmts('def f(*args, x, y):\n return 42') def test_func_args_x_kwy(): check_stmts('def f(*args, x, y=10):\n return 42') def test_func_args_kwx_y(): check_stmts('def f(*args, x=10, y):\n return 42') def test_func_args_kwx_kwy(): check_stmts('def f(*args, x=42, y=65):\n return 42') def test_func_x_args(): check_stmts('def f(x, *args):\n return 42') def test_func_x_args_y(): check_stmts('def f(x, *args, y):\n return 42') def test_func_x_args_y_z(): check_stmts('def f(x, *args, y, z):\n return 42') def test_func_kwargs(): check_stmts('def f(**kwargs):\n return 42') def test_func_x_kwargs(): check_stmts('def f(x, **kwargs):\n return 42') def test_func_x_y_kwargs(): check_stmts('def f(x, y, **kwargs):\n return 42') def test_func_x_kwy_kwargs(): check_stmts('def f(x, y=42, **kwargs):\n return 42') def test_func_args_kwargs(): check_stmts('def f(*args, **kwargs):\n return 42') def test_func_x_args_kwargs(): check_stmts('def f(x, *args, **kwargs):\n return 42') def test_func_x_y_args_kwargs(): check_stmts('def f(x, y, *args, **kwargs):\n return 42') def test_func_kwx_args_kwargs(): check_stmts('def f(x=10, *args, **kwargs):\n return 42') def test_func_x_kwy_args_kwargs(): check_stmts('def f(x, y=42, *args, **kwargs):\n return 42') def test_func_x_args_y_kwargs(): check_stmts('def f(x, *args, y, **kwargs):\n return 42') def test_func_x_args_kwy_kwargs(): check_stmts('def f(x, *args, y=42, **kwargs):\n return 42') def test_func_args_y_kwargs(): check_stmts('def f(*args, y, **kwargs):\n return 42') def test_func_star_x(): check_stmts('def f(*, x):\n return 42') def test_func_star_x_y(): check_stmts('def f(*, x, y):\n return 42') def test_func_star_x_kwargs(): check_stmts('def f(*, x, **kwargs):\n return 42') def test_func_star_kwx_kwargs(): check_stmts('def f(*, x=42, **kwargs):\n return 42') def test_func_x_star_y(): check_stmts('def f(x, *, y):\n return 42') def test_func_x_y_star_z(): check_stmts('def f(x, y, *, z):\n return 42') def test_func_x_kwy_star_y(): check_stmts('def f(x, y=42, *, z):\n return 42') def test_func_x_kwy_star_kwy(): check_stmts('def f(x, y=42, *, z=65):\n return 42') def test_func_x_star_y_kwargs(): check_stmts('def f(x, *, y, **kwargs):\n return 42') def test_func_tx(): check_stmts('def f(x:int):\n return x') def test_func_txy(): check_stmts('def f(x:int, y:float=10.0):\n return x') def test_class(): check_stmts('class X:\n pass') def test_class_obj(): check_stmts('class X(object):\n pass') def test_class_int_flt(): check_stmts('class X(int, object):\n pass') def test_class_obj_kw(): # technically valid syntax, though it will fail to compile check_stmts('class X(object=5):\n pass', False) def test_decorator(): check_stmts('@g\ndef f():\n pass', False) def test_decorator_2(): check_stmts('@h\n@g\ndef f():\n pass', False) def test_decorator_call(): check_stmts('@g()\ndef f():\n pass', False) def test_decorator_call_args(): check_stmts('@g(x, y=10)\ndef f():\n pass', False) def test_decorator_dot_call_args(): check_stmts('@h.g(x, y=10)\ndef f():\n pass', False) def test_decorator_dot_dot_call_args(): check_stmts('@i.h.g(x, y=10)\ndef f():\n pass', False) def test_broken_prompt_func(): code = ('def prompt():\n' " return '{user}'.format(\n" " user='me')\n") check_stmts(code, False) def test_class_with_methods(): code = ('class Test:\n' ' def __init__(self):\n' ' self.msg("hello world")\n' ' def msg(self, m):\n' ' print(m)\n') check_stmts(code, False) def test_nested_functions(): code = ('def test(x):\n' ' def test2(y):\n' ' return y+x\n' ' return test2\n') check_stmts(code, False) def test_function_blank_line(): code = ('def foo():\n' ' ascii_art = [\n' ' "(╯°□°)╯︵ ┻━┻",\n' ' "¯\\_(ツ)_/¯",\n' ' "┻━┻︵ \\(°□°)/ ︵ ┻━┻",\n' ' ]\n' '\n' ' import random\n' ' i = random.randint(0,len(ascii_art)) - 1\n' ' print(" Get to work!")\n' ' print(ascii_art[i])\n') check_stmts(code, False) @skip_if_py34 def test_async_func(): check_stmts('async def f():\n pass\n') @skip_if_py34 def test_async_decorator(): check_stmts('@g\nasync def f():\n pass', False) @skip_if_py34 def test_async_await(): check_stmts("async def f():\n await fut\n", False) # # Xonsh specific syntax # def test_path_literal(): check_xonsh_ast({}, 'p"/foo"', False) check_xonsh_ast({}, 'pr"/foo"', False) check_xonsh_ast({}, 'rp"/foo"', False) check_xonsh_ast({}, 'pR"/foo"', False) check_xonsh_ast({}, 'Rp"/foo"', False) def test_dollar_name(): check_xonsh_ast({'WAKKA': 42}, '$WAKKA') def test_dollar_py(): check_xonsh({'WAKKA': 42}, 'x = "WAKKA"; y = ${x}') def test_dollar_py_test(): check_xonsh_ast({'WAKKA': 42}, '${None or "WAKKA"}') def test_dollar_py_recursive_name(): check_xonsh_ast({'WAKKA': 42, 'JAWAKA': 'WAKKA'}, '${$JAWAKA}') def test_dollar_py_test_recursive_name(): check_xonsh_ast({'WAKKA': 42, 'JAWAKA': 'WAKKA'}, '${None or $JAWAKA}') def test_dollar_py_test_recursive_test(): check_xonsh_ast({'WAKKA': 42, 'JAWAKA': 'WAKKA'}, '${${"JAWA" + $JAWAKA[-2:]}}') def test_dollar_name_set(): check_xonsh({'WAKKA': 42}, '$WAKKA = 42') def test_dollar_py_set(): check_xonsh({'WAKKA': 42}, 'x = "WAKKA"; ${x} = 65') def test_dollar_sub(): check_xonsh_ast({}, '$(ls)', False) def test_dollar_sub_space(): check_xonsh_ast({}, '$(ls )', False) def test_ls_dot(): check_xonsh_ast({}, '$(ls .)', False) def test_lambda_in_atparens(): check_xonsh_ast({}, '$(echo hello | @(lambda a, s=None: "hey!") foo bar baz)', False) def test_generator_in_atparens(): check_xonsh_ast({}, '$(echo @(i**2 for i in range(20)))', False) def test_bare_tuple_in_atparens(): check_xonsh_ast({}, '$(echo @("a", 7))', False) def test_nested_madness(): check_xonsh_ast({}, '$(@$(which echo) ls | @(lambda a, s=None: $(@(s.strip()) @(a[1]))) foo -la baz)', False) def test_ls_dot_nesting(): check_xonsh_ast({}, '$(ls @(None or "."))', False) def test_ls_dot_nesting_var(): check_xonsh({}, 'x = "."; $(ls @(None or x))', False) def test_ls_dot_str(): check_xonsh_ast({}, '$(ls ".")', False) def test_ls_nest_ls(): check_xonsh_ast({}, '$(ls $(ls))', False) def test_ls_nest_ls_dashl(): check_xonsh_ast({}, '$(ls $(ls) -l)', False) def test_ls_envvar_strval(): check_xonsh_ast({'WAKKA': '.'}, '$(ls $WAKKA)', False) def test_ls_envvar_listval(): check_xonsh_ast({'WAKKA': ['.', '.']}, '$(ls $WAKKA)', False) def test_bang_sub(): check_xonsh_ast({}, '!(ls)', False) def test_bang_sub_space(): check_xonsh_ast({}, '!(ls )', False) def test_bang_ls_dot(): check_xonsh_ast({}, '!(ls .)', False) def test_bang_ls_dot_nesting(): check_xonsh_ast({}, '!(ls @(None or "."))', False) def test_bang_ls_dot_nesting_var(): check_xonsh({}, 'x = "."; !(ls @(None or x))', False) def test_bang_ls_dot_str(): check_xonsh_ast({}, '!(ls ".")', False) def test_bang_ls_nest_ls(): check_xonsh_ast({}, '!(ls $(ls))', False) def test_bang_ls_nest_ls_dashl(): check_xonsh_ast({}, '!(ls $(ls) -l)', False) def test_bang_ls_envvar_strval(): check_xonsh_ast({'WAKKA': '.'}, '!(ls $WAKKA)', False) def test_bang_ls_envvar_listval(): check_xonsh_ast({'WAKKA': ['.', '.']}, '!(ls $WAKKA)', False) def test_question(): check_xonsh_ast({}, 'range?') def test_dobquestion(): check_xonsh_ast({}, 'range??') def test_question_chain(): check_xonsh_ast({}, 'range?.index?') def test_ls_regex(): check_xonsh_ast({}, '$(ls `[Ff]+i*LE` -l)', False) def test_backtick(): check_xonsh_ast({}, 'print(`.*`)', False) def test_ls_regex_octothorpe(): check_xonsh_ast({}, '$(ls `#[Ff]+i*LE` -l)', False) def test_ls_explicitregex(): check_xonsh_ast({}, '$(ls r`[Ff]+i*LE` -l)', False) def test_rbacktick(): check_xonsh_ast({}, 'print(r`.*`)', False) def test_ls_explicitregex_octothorpe(): check_xonsh_ast({}, '$(ls r`#[Ff]+i*LE` -l)', False) def test_ls_glob(): check_xonsh_ast({}, '$(ls g`[Ff]+i*LE` -l)', False) def test_gbacktick(): check_xonsh_ast({}, 'print(g`.*`)', False) def test_pbacktrick(): check_xonsh_ast({}, 'print(p`.*`)', False) def test_pgbacktick(): check_xonsh_ast({}, 'print(pg`.*`)', False) def test_prbacktick(): check_xonsh_ast({}, 'print(pr`.*`)', False) def test_ls_glob_octothorpe(): check_xonsh_ast({}, '$(ls g`#[Ff]+i*LE` -l)', False) def test_ls_customsearch(): check_xonsh_ast({}, '$(ls @foo`[Ff]+i*LE` -l)', False) def test_custombacktick(): check_xonsh_ast({}, 'print(@foo`.*`)', False) def test_ls_customsearch_octothorpe(): check_xonsh_ast({}, '$(ls @foo`#[Ff]+i*LE` -l)', False) def test_injection(): check_xonsh_ast({}, '$[@$(which python)]', False) def test_rhs_nested_injection(): check_xonsh_ast({}, '$[ls @$(dirname @$(which python))]', False) def test_backtick_octothorpe(): check_xonsh_ast({}, 'print(`#.*`)', False) def test_uncaptured_sub(): check_xonsh_ast({}, '$[ls]', False) def test_hiddenobj_sub(): check_xonsh_ast({}, '![ls]', False) def test_slash_envarv_echo(): check_xonsh_ast({}, '![echo $HOME/place]', False) def test_echo_double_eq(): check_xonsh_ast({}, '![echo yo==yo]', False) def test_bang_two_cmds_one_pipe(): check_xonsh_ast({}, '!(ls | grep wakka)', False) def test_bang_three_cmds_two_pipes(): check_xonsh_ast({}, '!(ls | grep wakka | grep jawaka)', False) def test_bang_one_cmd_write(): check_xonsh_ast({}, '!(ls > x.py)', False) def test_bang_one_cmd_append(): check_xonsh_ast({}, '!(ls >> x.py)', False) def test_bang_two_cmds_write(): check_xonsh_ast({}, '!(ls | grep wakka > x.py)', False) def test_bang_two_cmds_append(): check_xonsh_ast({}, '!(ls | grep wakka >> x.py)', False) def test_bang_cmd_background(): check_xonsh_ast({}, '!(emacs ugggh &)', False) def test_bang_cmd_background_nospace(): check_xonsh_ast({}, '!(emacs ugggh&)', False) def test_bang_git_quotes_no_space(): check_xonsh_ast({}, '![git commit -am "wakka"]', False) def test_bang_git_quotes_space(): check_xonsh_ast({}, '![git commit -am "wakka jawaka"]', False) def test_bang_git_two_quotes_space(): check_xonsh({}, '![git commit -am "wakka jawaka"]\n' '![git commit -am "flock jawaka"]\n', False) def test_bang_git_two_quotes_space_space(): check_xonsh({}, '![git commit -am "wakka jawaka" ]\n' '![git commit -am "flock jawaka milwaka" ]\n', False) def test_bang_ls_quotes_3_space(): check_xonsh_ast({}, '![ls "wakka jawaka baraka"]', False) def test_two_cmds_one_pipe(): check_xonsh_ast({}, '$(ls | grep wakka)', False) def test_three_cmds_two_pipes(): check_xonsh_ast({}, '$(ls | grep wakka | grep jawaka)', False) def test_two_cmds_one_and_brackets(): check_xonsh_ast({}, '![ls me] and ![grep wakka]', False) def test_three_cmds_two_ands(): check_xonsh_ast({}, '![ls] and ![grep wakka] and ![grep jawaka]', False) def test_two_cmds_one_doubleamps(): check_xonsh_ast({}, '![ls] && ![grep wakka]', False) def test_three_cmds_two_doubleamps(): check_xonsh_ast({}, '![ls] && ![grep wakka] && ![grep jawaka]', False) def test_two_cmds_one_or(): check_xonsh_ast({}, '![ls] or ![grep wakka]', False) def test_three_cmds_two_ors(): check_xonsh_ast({}, '![ls] or ![grep wakka] or ![grep jawaka]', False) def test_two_cmds_one_doublepipe(): check_xonsh_ast({}, '![ls] || ![grep wakka]', False) def test_three_cmds_two_doublepipe(): check_xonsh_ast({}, '![ls] || ![grep wakka] || ![grep jawaka]', False) def test_one_cmd_write(): check_xonsh_ast({}, '$(ls > x.py)', False) def test_one_cmd_append(): check_xonsh_ast({}, '$(ls >> x.py)', False) def test_two_cmds_write(): check_xonsh_ast({}, '$(ls | grep wakka > x.py)', False) def test_two_cmds_append(): check_xonsh_ast({}, '$(ls | grep wakka >> x.py)', False) def test_cmd_background(): check_xonsh_ast({}, '$(emacs ugggh &)', False) def test_cmd_background_nospace(): check_xonsh_ast({}, '$(emacs ugggh&)', False) def test_git_quotes_no_space(): check_xonsh_ast({}, '$[git commit -am "wakka"]', False) def test_git_quotes_space(): check_xonsh_ast({}, '$[git commit -am "wakka jawaka"]', False) def test_git_two_quotes_space(): check_xonsh({}, '$[git commit -am "wakka jawaka"]\n' '$[git commit -am "flock jawaka"]\n', False) def test_git_two_quotes_space_space(): check_xonsh({}, '$[git commit -am "wakka jawaka" ]\n' '$[git commit -am "flock jawaka milwaka" ]\n', False) def test_ls_quotes_3_space(): check_xonsh_ast({}, '$[ls "wakka jawaka baraka"]', False) def test_echo_comma(): check_xonsh_ast({}, '![echo ,]', False) def test_echo_internal_comma(): check_xonsh_ast({}, '![echo 1,2]', False) def test_comment_only(): check_xonsh_ast({}, '# hello') def test_echo_slash_question(): check_xonsh_ast({}, '![echo /?]', False) def test_bad_quotes(): with pytest.raises(SyntaxError): check_xonsh_ast({}, '![echo """hello]', False) def test_redirect(): assert check_xonsh_ast({}, '$[cat < input.txt]', False) assert check_xonsh_ast({}, '$[< input.txt cat]', False) @pytest.mark.parametrize('case', [ '![(cat)]', '![(cat;)]', '![(cd path; ls; cd)]', '![(echo "abc"; sleep 1; echo "def")]', '![(echo "abc"; sleep 1; echo "def") | grep abc]', '![(if True:\n ls\nelse:\n echo not true)]', ]) def test_use_subshell(case): check_xonsh_ast({}, case, False, debug_level=0) @pytest.mark.parametrize('case', [ '$[cat < /path/to/input.txt]', '$[(cat) < /path/to/input.txt]', '$[< /path/to/input.txt cat]', '![< /path/to/input.txt]', '![< /path/to/input.txt > /path/to/output.txt]', ]) def test_redirect_abspath(case): assert check_xonsh_ast({}, case, False) @pytest.mark.parametrize('case', ['', 'o', 'out', '1']) def test_redirect_output(case): assert check_xonsh_ast({}, '$[echo "test" {}> test.txt]'.format(case), False) assert check_xonsh_ast({}, '$[< input.txt echo "test" {}> test.txt]'.format(case), False) assert check_xonsh_ast({}, '$[echo "test" {}> test.txt < input.txt]'.format(case), False) @pytest.mark.parametrize('case', ['e', 'err', '2']) def test_redirect_error(case): assert check_xonsh_ast({}, '$[echo "test" {}> test.txt]'.format(case), False) assert check_xonsh_ast({}, '$[< input.txt echo "test" {}> test.txt]'.format(case), False) assert check_xonsh_ast({}, '$[echo "test" {}> test.txt < input.txt]'.format(case), False) @pytest.mark.parametrize('case', ['a', 'all', '&']) def test_redirect_all(case): assert check_xonsh_ast({}, '$[echo "test" {}> test.txt]'.format(case), False) assert check_xonsh_ast({}, '$[< input.txt echo "test" {}> test.txt]'.format(case), False) assert check_xonsh_ast({}, '$[echo "test" {}> test.txt < input.txt]'.format(case), False) @pytest.mark.parametrize('r', ['e>o', 'e>out', 'err>o', '2>1', 'e>1', 'err>1', '2>out', '2>o', 'err>&1', 'e>&1', '2>&1' ]) @pytest.mark.parametrize('o', ['', 'o', 'out', '1']) def test_redirect_error_to_output(r, o): assert check_xonsh_ast({}, '$[echo "test" {} {}> test.txt]'.format(r, o), False) assert check_xonsh_ast({}, '$[< input.txt echo "test" {} {}> test.txt]'.format(r, o), False) assert check_xonsh_ast({}, '$[echo "test" {} {}> test.txt < input.txt]'.format(r, o), False) @pytest.mark.parametrize('r', ['o>e', 'o>err', 'out>e', '1>2', 'o>2', 'out>2', '1>err', '1>e', 'out>&2', 'o>&2', '1>&2' ]) @pytest.mark.parametrize('e', ['e', 'err', '2']) def test_redirect_output_to_error(r, e): assert check_xonsh_ast({}, '$[echo "test" {} {}> test.txt]'.format(r, e), False) assert check_xonsh_ast({}, '$[< input.txt echo "test" {} {}> test.txt]'.format(r, e), False) assert check_xonsh_ast({}, '$[echo "test" {} {}> test.txt < input.txt]'.format(r, e), False) def test_macro_call_empty(): assert check_xonsh_ast({}, 'f!()', False) MACRO_ARGS = [ 'x', 'True', 'None', 'import os', 'x=10', '"oh no, mom"', '...', ' ... ', 'if True:\n pass', '{x: y}', '{x: y, 42: 5}', '{1, 2, 3,}', '(x,y)', '(x, y)', '((x, y), z)', 'g()', 'range(10)', 'range(1, 10, 2)', '()', '{}', '[]', '[1, 2]', '@(x)', '!(ls -l)', '![ls -l]', '$(ls -l)', '${x + y}', '$[ls -l]', '@$(which xonsh)', ] @pytest.mark.parametrize('s', MACRO_ARGS) def test_macro_call_one_arg(s): f = 'f!({})'.format(s) tree = check_xonsh_ast({}, f, False, return_obs=True) assert isinstance(tree, AST) args = tree.body.args[1].elts assert len(args) == 1 assert args[0].s == s.strip() @pytest.mark.parametrize('s,t', itertools.product(MACRO_ARGS[::2], MACRO_ARGS[1::2])) def test_macro_call_two_args(s, t): f = 'f!({}, {})'.format(s, t) tree = check_xonsh_ast({}, f, False, return_obs=True) assert isinstance(tree, AST) args = tree.body.args[1].elts assert len(args) == 2 assert args[0].s == s.strip() assert args[1].s == t.strip() @pytest.mark.parametrize('s,t,u', itertools.product(MACRO_ARGS[::3], MACRO_ARGS[1::3], MACRO_ARGS[2::3])) def test_macro_call_three_args(s, t, u): f = 'f!({}, {}, {})'.format(s, t, u) tree = check_xonsh_ast({}, f, False, return_obs=True) assert isinstance(tree, AST) args = tree.body.args[1].elts assert len(args) == 3 assert args[0].s == s.strip() assert args[1].s == t.strip() assert args[2].s == u.strip() @pytest.mark.parametrize('s', MACRO_ARGS) def test_macro_call_one_trailing(s): f = 'f!({0},)'.format(s) tree = check_xonsh_ast({}, f, False, return_obs=True) assert isinstance(tree, AST) args = tree.body.args[1].elts assert len(args) == 1 assert args[0].s == s.strip() @pytest.mark.parametrize('s', MACRO_ARGS) def test_macro_call_one_trailing_space(s): f = 'f!( {0}, )'.format(s) tree = check_xonsh_ast({}, f, False, return_obs=True) assert isinstance(tree, AST) args = tree.body.args[1].elts assert len(args) == 1 assert args[0].s == s.strip() SUBPROC_MACRO_OC = [ ('!(', ')'), ('$(', ')'), ('![', ']'), ('$[', ']'), ] @pytest.mark.parametrize('opener, closer', SUBPROC_MACRO_OC) @pytest.mark.parametrize('body', ['echo!', 'echo !', 'echo ! ']) def test_empty_subprocbang(opener, closer, body): tree = check_xonsh_ast({}, opener + body + closer, False, return_obs=True) assert isinstance(tree, AST) cmd = tree.body.args[0].elts assert len(cmd) == 2 assert cmd[1].s == '' @pytest.mark.parametrize('opener, closer', SUBPROC_MACRO_OC) @pytest.mark.parametrize('body', ['echo!x', 'echo !x', 'echo !x', 'echo ! x']) def test_single_subprocbang(opener, closer, body): tree = check_xonsh_ast({}, opener + body + closer, False, return_obs=True) assert isinstance(tree, AST) cmd = tree.body.args[0].elts assert len(cmd) == 2 assert cmd[1].s == 'x' @pytest.mark.parametrize('opener, closer', SUBPROC_MACRO_OC) @pytest.mark.parametrize('body', ['echo -n!x', 'echo -n!x', 'echo -n !x', 'echo -n ! x']) def test_arg_single_subprocbang(opener, closer, body): tree = check_xonsh_ast({}, opener + body + closer, False, return_obs=True) assert isinstance(tree, AST) cmd = tree.body.args[0].elts assert len(cmd) == 3 assert cmd[2].s == 'x' @pytest.mark.parametrize('opener, closer', SUBPROC_MACRO_OC) @pytest.mark.parametrize('ipener, iloser', [ ('$(', ')'), ('@$(', ')'), ('$[', ']'), ]) @pytest.mark.parametrize('body', ['echo -n!x', 'echo -n!x', 'echo -n !x', 'echo -n ! x']) def test_arg_single_subprocbang_nested(opener, closer, ipener, iloser, body): code = opener + 'echo ' + ipener + body + iloser + closer tree = check_xonsh_ast({}, opener + body + closer, False, return_obs=True) assert isinstance(tree, AST) cmd = tree.body.args[0].elts assert len(cmd) == 3 assert cmd[2].s == 'x' @pytest.mark.parametrize('opener, closer', SUBPROC_MACRO_OC) @pytest.mark.parametrize('body', [ 'echo!x + y', 'echo !x + y', 'echo !x + y', 'echo ! x + y', 'timeit! bang! and more', 'timeit! recurse() and more', 'timeit! recurse[] and more', 'timeit! recurse!() and more', 'timeit! recurse![] and more', 'timeit! recurse$() and more', 'timeit! recurse$[] and more', 'timeit! recurse!() and more', 'timeit!!!!', 'timeit! (!)', 'timeit! [!]', 'timeit!!(ls)', 'timeit!"!)"', ]) def test_many_subprocbang(opener, closer, body): tree = check_xonsh_ast({}, opener + body + closer, False, return_obs=True) assert isinstance(tree, AST) cmd = tree.body.args[0].elts assert len(cmd) == 2 assert cmd[1].s == body.partition('!')[-1].strip() WITH_BANG_RAWSUITES = [ 'pass\n', 'x = 42\ny = 12\n', 'export PATH="yo:momma"\necho $PATH\n', ('with q as t:\n' ' v = 10\n' '\n'), ('with q as t:\n' ' v = 10\n' '\n' 'for x in range(6):\n' ' if True:\n' ' pass\n' ' else:\n' ' ls -l\n' '\n' 'a = 42\n'), ] @pytest.mark.parametrize('body', WITH_BANG_RAWSUITES) def test_withbang_single_suite(body): code = 'with! x:\n{}'.format(textwrap.indent(body, ' ')) tree = check_xonsh_ast({}, code, False, return_obs=True, mode='exec') assert isinstance(tree, AST) wither = tree.body[0] assert isinstance(wither, With) assert len(wither.body) == 1 assert isinstance(wither.body[0], Pass) assert len(wither.items) == 1 item = wither.items[0] s = item.context_expr.args[1].s assert s == body @pytest.mark.parametrize('body', WITH_BANG_RAWSUITES) def test_withbang_as_single_suite(body): code = 'with! x as y:\n{}'.format(textwrap.indent(body, ' ')) tree = check_xonsh_ast({}, code, False, return_obs=True, mode='exec') assert isinstance(tree, AST) wither = tree.body[0] assert isinstance(wither, With) assert len(wither.body) == 1 assert isinstance(wither.body[0], Pass) assert len(wither.items) == 1 item = wither.items[0] assert item.optional_vars.id == 'y' s = item.context_expr.args[1].s assert s == body @pytest.mark.parametrize('body', WITH_BANG_RAWSUITES) def test_withbang_single_suite_trailing(body): code = 'with! x:\n{}\nprint(x)\n'.format(textwrap.indent(body, ' ')) tree = check_xonsh_ast({}, code, False, return_obs=True, mode='exec', #debug_level=100 ) assert isinstance(tree, AST) wither = tree.body[0] assert isinstance(wither, With) assert len(wither.body) == 1 assert isinstance(wither.body[0], Pass) assert len(wither.items) == 1 item = wither.items[0] s = item.context_expr.args[1].s assert s == body + '\n' WITH_BANG_RAWSIMPLE = [ 'pass', 'x = 42; y = 12', 'export PATH="yo:momma"; echo $PATH', '[1,\n 2,\n 3]' ] @pytest.mark.parametrize('body', WITH_BANG_RAWSIMPLE) def test_withbang_single_simple(body): code = 'with! x: {}\n'.format(body) tree = check_xonsh_ast({}, code, False, return_obs=True, mode='exec') assert isinstance(tree, AST) wither = tree.body[0] assert isinstance(wither, With) assert len(wither.body) == 1 assert isinstance(wither.body[0], Pass) assert len(wither.items) == 1 item = wither.items[0] s = item.context_expr.args[1].s assert s == body @pytest.mark.parametrize('body', WITH_BANG_RAWSIMPLE) def test_withbang_single_simple_opt(body): code = 'with! x as y: {}\n'.format(body) tree = check_xonsh_ast({}, code, False, return_obs=True, mode='exec') assert isinstance(tree, AST) wither = tree.body[0] assert isinstance(wither, With) assert len(wither.body) == 1 assert isinstance(wither.body[0], Pass) assert len(wither.items) == 1 item = wither.items[0] assert item.optional_vars.id == 'y' s = item.context_expr.args[1].s assert s == body @pytest.mark.parametrize('body', WITH_BANG_RAWSUITES) def test_withbang_as_many_suite(body): code = 'with! x as a, y as b, z as c:\n{}' code = code.format(textwrap.indent(body, ' ')) tree = check_xonsh_ast({}, code, False, return_obs=True, mode='exec') assert isinstance(tree, AST) wither = tree.body[0] assert isinstance(wither, With) assert len(wither.body) == 1 assert isinstance(wither.body[0], Pass) assert len(wither.items) == 3 for i, targ in enumerate('abc'): item = wither.items[i] assert item.optional_vars.id == targ s = item.context_expr.args[1].s assert s == body # test invalid expressions def test_syntax_error_del_literal(): with pytest.raises(SyntaxError): PARSER.parse('del 7') def test_syntax_error_del_constant(): with pytest.raises(SyntaxError): PARSER.parse('del True') def test_syntax_error_del_emptytuple(): with pytest.raises(SyntaxError): PARSER.parse('del ()') def test_syntax_error_del_call(): with pytest.raises(SyntaxError): PARSER.parse('del foo()') def test_syntax_error_del_lambda(): with pytest.raises(SyntaxError): PARSER.parse('del lambda x: "yay"') def test_syntax_error_del_ifexp(): with pytest.raises(SyntaxError): PARSER.parse('del x if y else z') @pytest.mark.parametrize('exp', ['[i for i in foo]', '{i for i in foo}', '(i for i in foo)', '{k:v for k,v in d.items()}']) def test_syntax_error_del_comps(exp): with pytest.raises(SyntaxError): PARSER.parse('del {}'.format(exp)) @pytest.mark.parametrize('exp', ['x + y', 'x and y', '-x']) def test_syntax_error_del_ops(exp): with pytest.raises(SyntaxError): PARSER.parse('del {}'.format(exp)) @pytest.mark.parametrize('exp', ['x > y', 'x > y == z']) def test_syntax_error_del_cmp(exp): with pytest.raises(SyntaxError): PARSER.parse('del {}'.format(exp)) def test_syntax_error_lonely_del(): with pytest.raises(SyntaxError): PARSER.parse('del') def test_syntax_error_assign_literal(): with pytest.raises(SyntaxError): PARSER.parse('7 = x') def test_syntax_error_assign_constant(): with pytest.raises(SyntaxError): PARSER.parse('True = 8') def test_syntax_error_assign_emptytuple(): with pytest.raises(SyntaxError): PARSER.parse('() = x') def test_syntax_error_assign_call(): with pytest.raises(SyntaxError): PARSER.parse('foo() = x') def test_syntax_error_assign_lambda(): with pytest.raises(SyntaxError): PARSER.parse('lambda x: "yay" = y') def test_syntax_error_assign_ifexp(): with pytest.raises(SyntaxError): PARSER.parse('x if y else z = 8') @pytest.mark.parametrize('exp', ['[i for i in foo]', '{i for i in foo}', '(i for i in foo)', '{k:v for k,v in d.items()}']) def test_syntax_error_assign_comps(exp): with pytest.raises(SyntaxError): PARSER.parse('{} = z'.format(exp)) @pytest.mark.parametrize('exp', ['x + y', 'x and y', '-x']) def test_syntax_error_assign_ops(exp): with pytest.raises(SyntaxError): PARSER.parse('{} = z'.format(exp)) @pytest.mark.parametrize('exp', ['x > y', 'x > y == z']) def test_syntax_error_assign_cmp(exp): with pytest.raises(SyntaxError): PARSER.parse('{} = a'.format(exp)) def test_syntax_error_augassign_literal(): with pytest.raises(SyntaxError): PARSER.parse('7 += x') def test_syntax_error_augassign_constant(): with pytest.raises(SyntaxError): PARSER.parse('True += 8') def test_syntax_error_augassign_emptytuple(): with pytest.raises(SyntaxError): PARSER.parse('() += x') def test_syntax_error_augassign_call(): with pytest.raises(SyntaxError): PARSER.parse('foo() += x') def test_syntax_error_augassign_lambda(): with pytest.raises(SyntaxError): PARSER.parse('lambda x: "yay" += y') def test_syntax_error_augassign_ifexp(): with pytest.raises(SyntaxError): PARSER.parse('x if y else z += 8') @pytest.mark.parametrize('exp', ['[i for i in foo]', '{i for i in foo}', '(i for i in foo)', '{k:v for k,v in d.items()}']) def test_syntax_error_augassign_comps(exp): with pytest.raises(SyntaxError): PARSER.parse('{} += z'.format(exp)) @pytest.mark.parametrize('exp', ['x + y', 'x and y', '-x']) def test_syntax_error_augassign_ops(exp): with pytest.raises(SyntaxError): PARSER.parse('{} += z'.format(exp)) @pytest.mark.parametrize('exp', ['x > y', 'x > y +=+= z']) def test_syntax_error_augassign_cmp(exp): with pytest.raises(SyntaxError): PARSER.parse('{} += a'.format(exp)) xonsh-0.6.0/tests/test_path_completers.py000066400000000000000000000012541320541242300206160ustar00rootroot00000000000000import pytest from xonsh.environ import Env import xonsh.completers.path as xcp def test_pattern_need_quotes(): # just make sure the regex compiles xcp.PATTERN_NEED_QUOTES.match('') def test_complete_path(xonsh_builtins): xonsh_builtins.__xonsh_env__ = {'CASE_SENSITIVE_COMPLETIONS': False, 'GLOB_SORTED': True, 'SUBSEQUENCE_PATH_COMPLETION': False, 'FUZZY_PATH_COMPLETION': False, 'SUGGEST_THRESHOLD': 3, 'CDPATH': set(), } xcp.complete_path('[1-0.1]', '[1-0.1]', 0, 7, dict()) xonsh-0.6.0/tests/test_platform.py000066400000000000000000000006101320541242300172440ustar00rootroot00000000000000from contextlib import contextmanager from unittest.mock import MagicMock import builtins import xonsh.platform as xp def test_githash_value_error(monkeypatch): @contextmanager def mocked_open(*args): yield MagicMock(read=lambda: 'abc123') monkeypatch.setattr(builtins, 'open', mocked_open) sha, date_ = xp.githash() assert date_ is None assert sha is None xonsh-0.6.0/tests/test_prompt.py000066400000000000000000000124431320541242300167500ustar00rootroot00000000000000import os import subprocess as sp import tempfile from unittest.mock import Mock import pytest from xonsh.environ import Env from xonsh.prompt.base import PromptFormatter from xonsh.prompt import vc from tools import skip_if_py34, DummyEnv @pytest.fixture def formatter(xonsh_builtins): return PromptFormatter() @pytest.mark.parametrize('fields', [{ 'a_string': 'cat', 'none': (lambda: None), 'f': (lambda: 'wakka'), }]) @pytest.mark.parametrize('inp, exp', [ ('my {a_string}', 'my cat'), ('my {none}{a_string}', 'my cat'), ('{f} jawaka', 'wakka jawaka'), ]) def test_format_prompt(inp, exp, fields, formatter): obs = formatter(template=inp, fields=fields) assert exp == obs @pytest.mark.parametrize('fields', [{ 'a_string': 'cats', 'a_number': 7, 'empty': '', 'current_job': (lambda: 'sleep'), 'none': (lambda: None), }]) @pytest.mark.parametrize('inp, exp', [ ('{a_number:{0:^3}}cats', ' 7 cats'), ('{current_job:{} | }xonsh', 'sleep | xonsh'), ('{none:{} | }{a_string}{empty:!}', 'cats!'), ('{none:{}}', ''), ('{{{a_string:{{{}}}}}}', '{{cats}}'), ('{{{none:{{{}}}}}}', '{}'), ]) def test_format_prompt_with_format_spec(inp, exp, fields, formatter): obs = formatter(template=inp, fields=fields) assert exp == obs def test_format_prompt_with_broken_template(formatter): for p in ('{user', '{user}{hostname'): assert formatter(p) == p # '{{user' will be parsed to '{user' for p in ('{{user}', '{{user'): assert 'user' in formatter(p) @pytest.mark.parametrize('inp', [ '{user', '{{user', '{{user}', '{user}{hostname', ]) def test_format_prompt_with_broken_template_in_func(inp, formatter): # '{{user' will be parsed to '{user' assert '{user' in formatter(lambda: inp) def test_format_prompt_with_invalid_func(formatter, xonsh_builtins): xonsh_builtins.__xonsh_env__ = Env() def p(): foo = bar # raises exception # noqa return '{user}' assert isinstance(formatter(p), str) def test_format_prompt_with_func_that_raises(formatter, capsys, xonsh_builtins): xonsh_builtins.__xonsh_env__ = Env() template = 'tt {zerodiv} tt' exp = 'tt (ERROR:zerodiv) tt' fields = {'zerodiv': lambda: 1/0} obs = formatter(template, fields) assert exp == obs out, err = capsys.readouterr() assert 'prompt: error' in err def test_promptformatter_cache(formatter): spam = Mock() template = '{spam} and {spam}' fields = {'spam': spam} formatter(template, fields) assert spam.call_count == 1 def test_promptformatter_clears_cache(formatter): spam = Mock() template = '{spam} and {spam}' fields = {'spam': spam} formatter(template, fields) formatter(template, fields) assert spam.call_count == 2 # Xonsh interaction with version control systems. VC_BRANCH = {'git': 'master', 'hg': 'default'} @pytest.fixture(scope='module', params=VC_BRANCH.keys()) def test_repo(request): """Return a dict with vc and a temporary dir that is a repository for testing. """ vc = request.param temp_dir = tempfile.mkdtemp() os.chdir(temp_dir) try: sp.call([vc, 'init']) except FileNotFoundError: pytest.skip('cannot find {} executable'.format(vc)) # git needs at least one commit if vc == 'git': with open('test-file', 'w'): pass sp.call(['git', 'add', 'test-file']) sp.call(['git', 'commit', '-m', 'test commit']) return {'name': vc, 'dir': temp_dir} def test_test_repo(test_repo): dotdir = os.path.isdir(os.path.join(test_repo['dir'], '.{}'.format(test_repo['name']))) assert dotdir if test_repo['name'] == 'git': assert os.path.isfile(os.path.join(test_repo['dir'], 'test-file')) def test_no_repo(xonsh_builtins): import queue temp_dir = tempfile.mkdtemp() xonsh_builtins.__xonsh_env__ = Env(VC_BRANCH_TIMEOUT=2, PWD=temp_dir) q = queue.Queue() try: vc._get_hg_root(q) except AttributeError: assert False def test_vc_get_branch(test_repo, xonsh_builtins): xonsh_builtins.__xonsh_env__ = Env(VC_BRANCH_TIMEOUT=2) # get corresponding function from vc module fun = 'get_{}_branch'.format(test_repo['name']) obs = getattr(vc, fun)() if obs is not None: assert obs == VC_BRANCH[test_repo['name']] def test_current_branch_calls_locate_binary_for_empty_cmds_cache(xonsh_builtins): cache = xonsh_builtins.__xonsh_commands_cache__ xonsh_builtins.__xonsh_env__ = DummyEnv(VC_BRANCH_TIMEOUT=1) cache.is_empty = Mock(return_value=True) cache.locate_binary = Mock(return_value='') vc.current_branch() assert cache.locate_binary.called def test_current_branch_does_not_call_locate_binary_for_non_empty_cmds_cache(xonsh_builtins): cache = xonsh_builtins.__xonsh_commands_cache__ xonsh_builtins.__xonsh_env__ = DummyEnv(VC_BRANCH_TIMEOUT=1) cache.is_empty = Mock(return_value=False) cache.locate_binary = Mock(return_value='') # make lazy locate return nothing to avoid running vc binaries cache.lazy_locate_binary = Mock(return_value='') vc.current_branch() assert not cache.locate_binary.called xonsh-0.6.0/tests/test_ptk_highlight.py000066400000000000000000000116601320541242300202540ustar00rootroot00000000000000# -*- coding: utf-8 -*- """Test XonshLexer for pygments""" import os import builtins import pytest from pygments.token import (Keyword, Name, String, Error, Number, Operator, Punctuation, Text) from tools import skip_if_on_windows from xonsh.platform import ON_WINDOWS from xonsh.built_ins import load_builtins, unload_builtins from xonsh.pyghooks import XonshLexer @pytest.yield_fixture(autouse=True) def load_command_cache(): load_builtins() if ON_WINDOWS: for key in ('cd', 'bash'): builtins.aliases[key] = lambda *args, **kwargs: None yield unload_builtins() def check_token(code, tokens): """Make sure that all tokens appears in code in order""" lx = XonshLexer() tks = list(lx.get_tokens(code)) for tk in tokens: while tks: if tk == tks[0]: break tks = tks[1:] else: msg = "Token {!r} missing: {!r}".format(tk, list(lx.get_tokens(code))) pytest.fail(msg) break @skip_if_on_windows def test_ls(): check_token('ls -al', [(Name.Builtin, 'ls')]) @skip_if_on_windows def test_bin_ls(): check_token('/bin/ls -al', [(Name.Builtin, '/bin/ls')]) def test_py_print(): check_token('print("hello")', [(Keyword, 'print'), (String.Double, 'hello')]) def test_invalid_cmd(): check_token('non-existance-cmd -al', [(Name, 'non')]) # parse as python check_token('![non-existance-cmd -al]', [(Error, 'non-existance-cmd')]) # parse as error check_token('for i in range(10):', [(Keyword, 'for')]) # as py keyword check_token('(1, )', [(Punctuation, '('), (Number.Integer, '1')]) def test_multi_cmd(): check_token('cd && cd', [(Name.Builtin, 'cd'), (Operator, '&&'), (Name.Builtin, 'cd')]) check_token('cd || non-existance-cmd', [(Name.Builtin, 'cd'), (Operator, '||'), (Error, 'non-existance-cmd') ]) def test_nested(): check_token('echo @("hello")', [(Name.Builtin, 'echo'), (Keyword, '@'), (Punctuation, '('), (String.Double, 'hello'), (Punctuation, ')')]) check_token('print($(cd))', [(Keyword, 'print'), (Punctuation, '('), (Keyword, '$'), (Punctuation, '('), (Name.Builtin, 'cd'), (Punctuation, ')'), (Punctuation, ')')]) check_token(r'print(![echo "])\""])', [(Keyword, 'print'), (Keyword, '!'), (Punctuation, '['), (Name.Builtin, 'echo'), (String.Double, r'"])\""'), (Punctuation, ']')]) def test_path(tmpdir): test_dir = str(tmpdir.mkdir('xonsh-test-highlight-path')) check_token('cd {}'.format(test_dir), [(Name.Builtin, 'cd'), (Name.Constant, test_dir)]) check_token('cd {}-xxx'.format(test_dir), [(Name.Builtin, 'cd'), (Text, '{}-xxx'.format(test_dir)) ]) check_token('cd X={}'.format(test_dir), [(Name.Constant, test_dir)]) with builtins.__xonsh_env__.swap(AUTO_CD=True): check_token(test_dir, [(Name.Constant, test_dir)]) def test_subproc_args(): check_token('cd 192.168.0.1', [(Text, '192.168.0.1')]) def test_backtick(): check_token(r'echo g`.*\w+`', [(String.Affix, 'g'), (String.Backtick, '`'), (String.Regex, '.'), (String.Regex, '*'), (String.Escape, r'\w'), ]) def test_macro(): check_token(r'g!(42, *, 65)', [(Name, 'g'), (Keyword, '!'), (Punctuation, '('), (Number.Integer, '42')]) check_token(r'echo! hello world', [(Name.Builtin, 'echo'), (Keyword, '!'), (String, "hello world")]) check_token(r'bash -c ! export var=42; echo $var', [(Name.Builtin, 'bash'), (Text, '-c'), (Keyword, '!'), (String, 'export var=42; echo $var'), ]) xonsh-0.6.0/tests/test_ptk_history.py000066400000000000000000000010351320541242300200010ustar00rootroot00000000000000import pytest try: import prompt_toolkit # NOQA except ImportError: pytest.mark.skip(msg='prompt_toolkit is not available') from xonsh.ptk.history import PromptToolkitHistory @pytest.fixture def history_obj(): """Instantiate `PromptToolkitHistory` and append a line string""" hist = PromptToolkitHistory(load_prev=False) hist.append('line10') return hist def test_obj(history_obj): assert ['line10'] == history_obj.strings assert len(history_obj) == 1 assert ['line10'] == [x for x in history_obj] xonsh-0.6.0/tests/test_ptk_multiline.py000066400000000000000000000067631320541242300203170ustar00rootroot00000000000000# -*- coding: utf-8 -*- """Tests sample inputs to PTK multiline and checks parser response""" import builtins from collections import namedtuple from unittest.mock import MagicMock, patch import pytest from prompt_toolkit.interface import CommandLineInterface from prompt_toolkit.document import Document from prompt_toolkit.buffer import Buffer, AcceptAction from xonsh.tools import ON_WINDOWS from tools import DummyEnv Context = namedtuple('Context', ['indent', 'buffer', 'accept', 'cli', 'cr']) @pytest.yield_fixture(scope='module') def ctx(): """Context in which the ptk multiline functionality will be tested.""" builtins.__xonsh_env__ = DummyEnv() builtins.__xonsh_env__['INDENT'] = ' ' from xonsh.ptk.key_bindings import carriage_return ptk_buffer = Buffer() ptk_buffer.accept_action = MagicMock(name='accept', spec=AcceptAction) cli = MagicMock(name='cli', spec=CommandLineInterface) yield Context(indent=' ', buffer=ptk_buffer, accept=ptk_buffer.accept_action, cli=cli, cr=carriage_return) del builtins.__xonsh_env__ def test_colon_indent(ctx): document = Document('for i in range(5):') ctx.buffer.set_document(document) ctx.cr(ctx.buffer, ctx.cli) assert ctx.buffer.document.current_line == ctx.indent def test_dedent(ctx): document = Document('\n'+ctx.indent+'pass') ctx.buffer.set_document(document) ctx.cr(ctx.buffer, ctx.cli) assert ctx.buffer.document.current_line == '' document = Document('\n'+2*ctx.indent+'continue') ctx.buffer.set_document(document) ctx.cr(ctx.buffer, ctx.cli) assert ctx.buffer.document.current_line == ctx.indent def test_nodedent(ctx): '''don't dedent if first line of ctx.buffer''' mock = MagicMock(return_value=True) with patch('xonsh.ptk.key_bindings.can_compile', mock): document = Document('pass') ctx.buffer.set_document(document) ctx.cr(ctx.buffer, ctx.cli) assert ctx.accept.mock_calls is not None mock = MagicMock(return_value=True) with patch('xonsh.ptk.key_bindings.can_compile', mock): document = Document(ctx.indent+'pass') ctx.buffer.set_document(document) ctx.cr(ctx.buffer, ctx.cli) assert ctx.accept.mock_calls is not None def test_continuation_line(ctx): document = Document('\nsecond line') ctx.buffer.set_document(document) ctx.cr(ctx.buffer, ctx.cli) assert ctx.buffer.document.current_line == '' def test_trailing_slash(ctx): mock = MagicMock(return_value=True) with patch('xonsh.ptk.key_bindings.can_compile', mock): document = Document('this line will \\') ctx.buffer.set_document(document) ctx.cr(ctx.buffer, ctx.cli) if not ON_WINDOWS: assert ctx.buffer.document.current_line == '' else: assert ctx.accept.mock_calls is not None def test_cant_compile_newline(ctx): mock = MagicMock(return_value=False) with patch('xonsh.ptk.key_bindings.can_compile', mock): document = Document('for i in (1, 2, ') ctx.buffer.set_document(document) ctx.cr(ctx.buffer, ctx.cli) assert ctx.buffer.document.current_line == '' def test_can_compile_and_executes(ctx): mock = MagicMock(return_value=True) with patch('xonsh.ptk.key_bindings.can_compile', mock): document = Document('ls') ctx.buffer.set_document(document) ctx.cr(ctx.buffer, ctx.cli) assert ctx.accept.mock_calls is not None xonsh-0.6.0/tests/test_python_completers.py000066400000000000000000000021121320541242300211750ustar00rootroot00000000000000import builtins import pytest from xonsh.completers.python import python_signature_complete @pytest.fixture(autouse=True) def xonsh_execer_autouse(xonsh_builtins, xonsh_execer): return xonsh_execer def foo(x, y, z): pass def bar(wakka='wow', jawaka='mom'): pass def baz(sonata, artica=True): pass def always_true(x, y): return True BASE_CTX = {'foo': foo, 'bar': bar, 'baz': baz} FOO_ARGS = {'x=', 'y=', 'z='} BAR_ARGS = {'wakka=', 'jawaka='} BAZ_ARGS = {'sonata=', 'artica='} @pytest.mark.parametrize('line, end, exp', [ ('foo(', 4, FOO_ARGS), # I have no idea why this one needs to be first ('foo()', 3, set()), ('foo()', 4, FOO_ARGS), ('foo()', 5, set()), ('foo(x, ', 6, FOO_ARGS), ('foo(x, )', 6, FOO_ARGS), ('bar()', 4, BAR_ARGS), ('baz()', 4, BAZ_ARGS), ('foo(bar(', 8, BAR_ARGS), ('foo(bar()', 9, FOO_ARGS), ('foo(bar())', 4, FOO_ARGS), ]) def test_complete_python_signatures(line, end, exp): ctx = dict(BASE_CTX) obs = python_signature_complete('', line, end, ctx, always_true) assert exp == obs xonsh-0.6.0/tests/test_replay.py000066400000000000000000000022511320541242300167170ustar00rootroot00000000000000# -*- coding: utf-8 -*- """Tests the xonsh replay functionality.""" from __future__ import unicode_literals, print_function import os import builtins import pytest from xonsh.shell import Shell from xonsh.execer import Execer from xonsh.replay import Replayer from tools import skip_if_on_darwin HISTDIR = os.path.join(os.path.dirname(__file__), 'histories') @pytest.yield_fixture(scope='module', autouse=True) def ctx(): """Create a global Shell instance to use in all the test.""" ctx = {'PATH': []} execer = Execer(xonsh_ctx=ctx) builtins.__xonsh_shell__ = Shell(execer=execer, ctx=ctx) yield del builtins.__xonsh_shell__ @skip_if_on_darwin def test_echo(): histfile = os.path.join(HISTDIR, 'echo.json') hist = Replayer(histfile).replay() assert len(hist) == 2 @skip_if_on_darwin def test_reecho(): histfile = os.path.join(HISTDIR, 'echo.json') hist = Replayer(histfile).replay() assert len(hist) == 2 @skip_if_on_darwin def test_simple_python(): histfile = os.path.join(HISTDIR, 'simple-python.json') hist = Replayer(histfile).replay() assert len(hist) == 4 assert hist.inps[0].strip() == "print('The Turtles')" xonsh-0.6.0/tests/test_tools.py000066400000000000000000001111041320541242300165610ustar00rootroot00000000000000 # -*- coding: utf-8 -*- """Tests xonsh tools.""" import datetime as dt import os import pathlib import stat from tempfile import TemporaryDirectory import warnings import pytest from xonsh import __version__ from xonsh.platform import ON_WINDOWS from xonsh.lexer import Lexer from xonsh.tools import ( EnvPath, always_false, always_true, argvquote, bool_or_int_to_str, bool_to_str, check_for_partial_string, dynamic_cwd_tuple_to_str, ensure_slice, ensure_string, env_path_to_str, escape_windows_cmd_string, executables_in, expand_case_matching, expand_path, find_next_break, is_bool, is_bool_or_int, is_callable, is_dynamic_cwd_width, is_env_path, is_float, is_int, is_logfile_opt, is_string_or_callable, logfile_opt_to_str, str_to_env_path, is_string, subexpr_from_unbalanced, subproc_toks, to_bool, to_bool_or_int, to_dynamic_cwd_tuple, to_logfile_opt, pathsep_to_set, set_to_pathsep, is_string_seq, pathsep_to_seq, seq_to_pathsep, is_nonstring_seq_of_strings, pathsep_to_upper_seq, seq_to_upper_pathsep, expandvars, is_int_as_str, is_slice_as_str, ensure_timestamp, get_portions, is_balanced, subexpr_before_unbalanced, swap_values, get_logical_line, replace_logical_line, check_quotes, deprecated, is_writable_file, balanced_parens) from xonsh.environ import Env from tools import skip_if_on_windows, skip_if_on_unix LEXER = Lexer() LEXER.build() INDENT = ' ' TOOLS_ENV = {'EXPAND_ENV_VARS': True, 'XONSH_ENCODING_ERRORS':'strict'} ENCODE_ENV_ONLY = {'XONSH_ENCODING_ERRORS': 'strict'} PATHEXT_ENV = {'PATHEXT': ['.COM', '.EXE', '.BAT']} def test_subproc_toks_x(): exp = '![x]' obs = subproc_toks('x', lexer=LEXER, returnline=True) assert exp == obs def test_subproc_toks_ls_l(): exp = '![ls -l]' obs = subproc_toks('ls -l', lexer=LEXER, returnline=True) assert exp == obs def test_subproc_toks_git(): s = 'git commit -am "hello doc"' exp = '![{0}]'.format(s) obs = subproc_toks(s, lexer=LEXER, returnline=True) assert exp == obs def test_subproc_toks_git_semi(): s = 'git commit -am "hello doc"' exp = '![{0}];'.format(s) obs = subproc_toks(s + ';', lexer=LEXER, returnline=True) assert exp == obs def test_subproc_toks_git_nl(): s = 'git commit -am "hello doc"' exp = '![{0}]\n'.format(s) obs = subproc_toks(s + '\n', lexer=LEXER, returnline=True) assert exp == obs def test_bash_macro(): s = 'bash -c ! export var=42; echo $var' exp = '![{0}]\n'.format(s) obs = subproc_toks(s + '\n', lexer=LEXER, returnline=True) assert exp == obs def test_python_macro(): s = 'python -c ! import os; print(os.path.abspath("/"))' exp = '![{0}]\n'.format(s) obs = subproc_toks(s + '\n', lexer=LEXER, returnline=True) assert exp == obs def test_subproc_toks_indent_ls(): s = 'ls -l' exp = INDENT + '![{0}]'.format(s) obs = subproc_toks(INDENT + s, mincol=len(INDENT), lexer=LEXER, returnline=True) assert exp == obs def test_subproc_toks_indent_ls_nl(): s = 'ls -l' exp = INDENT + '![{0}]\n'.format(s) obs = subproc_toks(INDENT + s + '\n', mincol=len(INDENT), lexer=LEXER, returnline=True) assert exp == obs def test_subproc_toks_indent_ls_no_min(): s = 'ls -l' exp = INDENT + '![{0}]'.format(s) obs = subproc_toks(INDENT + s, lexer=LEXER, returnline=True) assert exp == obs def test_subproc_toks_indent_ls_no_min_nl(): s = 'ls -l' exp = INDENT + '![{0}]\n'.format(s) obs = subproc_toks(INDENT + s + '\n', lexer=LEXER, returnline=True) assert exp == obs def test_subproc_toks_indent_ls_no_min_semi(): s = 'ls' exp = INDENT + '![{0}];'.format(s) obs = subproc_toks(INDENT + s + ';', lexer=LEXER, returnline=True) assert exp == obs def test_subproc_toks_indent_ls_no_min_semi_nl(): s = 'ls' exp = INDENT + '![{0}];\n'.format(s) obs = subproc_toks(INDENT + s + ';\n', lexer=LEXER, returnline=True) assert exp == obs def test_subproc_toks_ls_comment(): s = 'ls -l' com = ' # lets list' exp = '![{0}]{1}'.format(s, com) obs = subproc_toks(s + com, lexer=LEXER, returnline=True) assert exp == obs def test_subproc_toks_ls_42_comment(): s = 'ls 42' com = ' # lets list' exp = '![{0}]{1}'.format(s, com) obs = subproc_toks(s + com, lexer=LEXER, returnline=True) assert exp == obs def test_subproc_toks_ls_str_comment(): s = 'ls "wakka"' com = ' # lets list' exp = '![{0}]{1}'.format(s, com) obs = subproc_toks(s + com, lexer=LEXER, returnline=True) assert exp == obs def test_subproc_toks_indent_ls_comment(): ind = ' ' s = 'ls -l' com = ' # lets list' exp = '{0}![{1}]{2}'.format(ind, s, com) obs = subproc_toks(ind + s + com, lexer=LEXER, returnline=True) assert exp == obs def test_subproc_toks_indent_ls_str(): ind = ' ' s = 'ls "wakka"' com = ' # lets list' exp = '{0}![{1}]{2}'.format(ind, s, com) obs = subproc_toks(ind + s + com, lexer=LEXER, returnline=True) assert exp == obs def test_subproc_toks_ls_l_semi_ls_first(): lsdl = 'ls -l' ls = 'ls' s = '{0}; {1}'.format(lsdl, ls) exp = '![{0}]; {1}'.format(lsdl, ls) obs = subproc_toks(s, lexer=LEXER, maxcol=6, returnline=True) assert exp == obs def test_subproc_toks_ls_l_semi_ls_second(): lsdl = 'ls -l' ls = 'ls' s = '{0}; {1}'.format(lsdl, ls) exp = '{0}; ![{1}]'.format(lsdl, ls) obs = subproc_toks(s, lexer=LEXER, mincol=7, returnline=True) assert exp == obs def test_subproc_toks_hello_mom_first(): fst = "echo 'hello'" sec = "echo 'mom'" s = '{0}; {1}'.format(fst, sec) exp = '![{0}]; {1}'.format(fst, sec) obs = subproc_toks(s, lexer=LEXER, maxcol=len(fst)+1, returnline=True) assert exp == obs def test_subproc_toks_hello_mom_second(): fst = "echo 'hello'" sec = "echo 'mom'" s = '{0}; {1}'.format(fst, sec) exp = '{0}; ![{1}]'.format(fst, sec) obs = subproc_toks(s, lexer=LEXER, mincol=len(fst), returnline=True) assert exp == obs def test_subproc_toks_hello_bad_leading_single_quotes(): obs = subproc_toks('echo "hello', lexer=LEXER, returnline=True) assert obs is None def test_subproc_toks_hello_bad_trailing_single_quotes(): obs = subproc_toks('echo hello"', lexer=LEXER, returnline=True) assert obs is None def test_subproc_toks_hello_bad_leading_triple_quotes(): obs = subproc_toks('echo """hello', lexer=LEXER, returnline=True) assert obs is None def test_subproc_toks_hello_bad_trailing_triple_quotes(): obs = subproc_toks('echo hello"""', lexer=LEXER, returnline=True) assert obs is None def test_subproc_toks_hello_mom_triple_quotes_nl(): s = 'echo """hello\nmom"""' exp = '![{0}]'.format(s) obs = subproc_toks(s, lexer=LEXER, returnline=True) assert exp == obs def test_subproc_toks_comment(): exp = None obs = subproc_toks('# I am a comment', lexer=LEXER, returnline=True) assert exp == obs def test_subproc_toks_not(): exp = 'not ![echo mom]' obs = subproc_toks('not echo mom', lexer=LEXER, returnline=True) assert exp == obs def test_subproc_toks_paren(): exp = '(![echo mom])' obs = subproc_toks('(echo mom)', lexer=LEXER, returnline=True) assert exp == obs def test_subproc_toks_paren_ws(): exp = '(![echo mom]) ' obs = subproc_toks('(echo mom) ', lexer=LEXER, returnline=True) assert exp == obs def test_subproc_toks_not_paren(): exp = 'not (![echo mom])' obs = subproc_toks('not (echo mom)', lexer=LEXER, returnline=True) assert exp == obs def test_subproc_toks_and_paren(): exp = 'True and (![echo mom])' obs = subproc_toks('True and (echo mom)', lexer=LEXER, returnline=True) assert exp == obs def test_subproc_toks_paren_and_paren(): exp = '(![echo a]) and (echo b)' obs = subproc_toks('(echo a) and (echo b)', maxcol=9, lexer=LEXER, returnline=True) assert exp == obs def test_subproc_toks_semicolon_only(): exp = None obs = subproc_toks(';', lexer=LEXER, returnline=True) assert exp == obs def test_subproc_toks_pyeval(): s = 'echo @(1+1)' exp = '![{0}]'.format(s) obs = subproc_toks(s, lexer=LEXER, returnline=True) assert exp == obs def test_subproc_toks_twopyeval(): s = 'echo @(1+1) @(40 + 2)' exp = '![{0}]'.format(s) obs = subproc_toks(s, lexer=LEXER, returnline=True) assert exp == obs def test_subproc_toks_pyeval_parens(): s = 'echo @(1+1)' inp = '({0})'.format(s) exp = '(![{0}])'.format(s) obs = subproc_toks(inp, lexer=LEXER, returnline=True) assert exp == obs def test_subproc_toks_twopyeval_parens(): s = 'echo @(1+1) @(40+2)' inp = '({0})'.format(s) exp = '(![{0}])'.format(s) obs = subproc_toks(inp, lexer=LEXER, returnline=True) assert exp == obs def test_subproc_toks_pyeval_nested(): s = 'echo @(min(1, 42))' exp = '![{0}]'.format(s) obs = subproc_toks(s, lexer=LEXER, returnline=True) assert exp == obs def test_subproc_toks_pyeval_nested_parens(): s = 'echo @(min(1, 42))' inp = '({0})'.format(s) exp = '(![{0}])'.format(s) obs = subproc_toks(inp, lexer=LEXER, returnline=True) assert exp == obs def test_subproc_toks_capstdout(): s = 'echo $(echo bat)' exp = '![{0}]'.format(s) obs = subproc_toks(s, lexer=LEXER, returnline=True) assert exp == obs def test_subproc_toks_capproc(): s = 'echo !(echo bat)' exp = '![{0}]'.format(s) obs = subproc_toks(s, lexer=LEXER, returnline=True) assert exp == obs def test_subproc_toks_pyeval_redirect(): s = 'echo @("foo") > bar' inp = '{0}'.format(s) exp = '![{0}]'.format(s) obs = subproc_toks(inp, lexer=LEXER, returnline=True) assert exp == obs def test_subproc_toks_greedy_parens(): s = '(sort)' exp = '![{0}]'.format(s) obs = subproc_toks(s, lexer=LEXER, returnline=True, greedy=True) assert exp == obs def test_subproc_toks_greedy_parens_inp(): s = '(sort) < input.txt' exp = '![{0}]'.format(s) obs = subproc_toks(s, lexer=LEXER, returnline=True, greedy=True) assert exp == obs def test_subproc_toks_greedy_parens_statements(): s = '(echo "abc"; sleep 1; echo "def")' exp = '![{0}]'.format(s) obs = subproc_toks(s, lexer=LEXER, returnline=True, greedy=True) assert exp == obs def test_subproc_toks_greedy_parens_statements_with_grep(): s = '(echo "abc"; sleep 1; echo "def") | grep' exp = '![{0}]'.format(s) obs = subproc_toks(s, lexer=LEXER, returnline=True, greedy=True) assert exp == obs LOGICAL_LINE_CASES = [ ("""x = 14 + 2""", 0, 'x = 14 + 2', 1), ("""x = \\ 14 \\ + 2 """, 0, 'x = 14 + 2', 3), ("""y = 16 14 \\ + 2 """, 1, '14 + 2', 2), ('''x = """wow mom""" ''', 0, 'x = """wow\nmom"""', 2), # test from start ("echo --option1 value1 \\\n" " --option2 value2 \\\n" " --optionZ valueZ", 0, "echo --option1 value1 --option2 value2 --optionZ valueZ", 3), # test from second line ("echo --option1 value1 \\\n" " --option2 value2 \\\n" " --optionZ valueZ", 1, "echo --option1 value1 --option2 value2 --optionZ valueZ", 3), ('"""\n', 0, '"""', 1), ] @pytest.mark.parametrize('src, idx, exp_line, exp_n', LOGICAL_LINE_CASES) def test_get_logical_line(src, idx, exp_line, exp_n): lines = src.splitlines() line, n, start = get_logical_line(lines, idx) assert exp_line == line assert exp_n == n @pytest.mark.parametrize('src, idx, exp_line, exp_n', LOGICAL_LINE_CASES) def test_replace_logical_line(src, idx, exp_line, exp_n): lines = src.splitlines() logical = exp_line while idx > 0 and lines[idx-1].endswith('\\'): idx -= 1 replace_logical_line(lines, logical, idx, exp_n) exp = src.replace('\\\n', '').strip() obs = '\n'.join(lines).replace('\\\n', '').strip() assert exp == obs @pytest.mark.parametrize('inp, exp', [ ('f(1,10),x.y', True), ('"x"', True), ("'y'", True), ('b"x"', True), ("r'y'", True), ("f'z'", True), ('"""hello\nmom"""', True), ]) def test_check_quotes(inp, exp): obs = check_quotes(inp) assert exp is obs @pytest.mark.parametrize('inp', [ 'f(1,10),x.y', ]) def test_is_balanced_parens(inp): obs = is_balanced(inp, '(', ')') assert obs @pytest.mark.parametrize('inp', [ 'f(x.', 'f(1,x.' 'f((1,10),x.y', ]) def test_is_not_balanced_parens(inp): obs = is_balanced(inp, '(', ')') assert not obs @pytest.mark.parametrize('inp, exp', [ ('f(x.', 'x.'), ('f(1,x.', 'x.'), ('f((1,10),x.y', 'x.y'), ]) def test_subexpr_from_unbalanced_parens(inp, exp): obs = subexpr_from_unbalanced(inp, '(', ')') assert exp == obs @pytest.mark.parametrize('inp, exp', [ ('f(x.', 'f'), ('f(1,x.', 'f'), ('f((1,10),x.y', 'f'), ('wakka().f((1,10),x.y', '.f'), ('wakka(f((1,10),x.y', 'f'), ('wakka(jawakka().f((1,10),x.y', '.f'), ('wakka(jawakka().f((1,10),x.y)', 'wakka'), ]) def test_subexpr_before_unbalanced_parens(inp, exp): obs = subexpr_before_unbalanced(inp, '(', ')') assert exp == obs @pytest.mark.parametrize('line, exp', [ ('', True), ('wakka jawaka', True), ('rm *; echo hello world', True), ('()', True), ('f()', True), ('echo * yo ; echo eggs', True), ('(', False), (')', False), ('(cmd;', False), ('cmd;)', False), ]) def test_balanced_parens(line, exp): obs = balanced_parens(line, lexer=LEXER) if exp: assert obs else: assert not obs @pytest.mark.parametrize('line, mincol, exp', [ ('ls && echo a', 0, 4), ('ls && echo a', 6, None), ('ls && echo a || echo b', 6, 14), ('(ls) && echo a', 1, 4), ('not ls && echo a', 0, 8), ('not (ls) && echo a', 0, 8), ('bash -c ! export var=42; echo $var', 0, 35), ('python -c ! import os; print(os.path.abspath("/"))', 0, 51), ('echo * yo ; echo eggs', 0, 11), ]) def test_find_next_break(line, mincol, exp): obs = find_next_break(line, mincol=mincol, lexer=LEXER) assert exp == obs @pytest.mark.parametrize('inp, exp', [ (42, True), (42.0, False), ('42', False), ('42.0', False), ([42], False), ([], False), (None, False), ('', False) ]) def test_is_int(inp, exp): obs = is_int(inp) assert exp == obs @pytest.mark.parametrize('inp, exp', [ (42.0, True), (42.000101010010101010101001010101010001011100001101101011100, True), (42, False), ('42', False), ('42.0', False), ([42], False), ([], False), (None, False), ('', False), (False, False), (True, False), ]) def test_is_float(inp, exp): obs = is_float(inp) assert exp == obs def test_is_string_true(): assert is_string('42.0') def test_is_string_false(): assert not is_string(42.0) def test_is_callable_true(): assert is_callable(lambda: 42.0) def test_is_callable_false(): assert not is_callable(42.0) @pytest.mark.parametrize('inp', ['42.0', lambda: 42.0]) def test_is_string_or_callable_true(inp): assert is_string_or_callable(inp) def test_is_string_or_callable_false(): assert not is_string(42.0) @pytest.mark.parametrize('inp', [42, '42']) def test_always_true(inp): assert always_true(inp) @pytest.mark.parametrize('inp', [42, '42']) def test_always_false(inp): assert not always_false(inp) @pytest.mark.parametrize('inp, exp', [(42, '42'), ('42', '42'),]) def test_ensure_string(inp, exp): obs = ensure_string(inp) assert exp == obs @pytest.mark.parametrize('inp, exp', [ ('', set()), ('a', {'a'}), (os.pathsep.join(['a', 'b']), {'a', 'b'}), (os.pathsep.join(['a', 'b', 'c']), {'a', 'b', 'c'}), ]) def test_pathsep_to_set(inp, exp): obs = pathsep_to_set(inp) assert exp == obs @pytest.mark.parametrize('inp, exp', [ (set(), ''), ({'a'}, 'a'), ({'a', 'b'}, os.pathsep.join(['a', 'b'])), ({'a', 'b', 'c'}, os.pathsep.join(['a', 'b', 'c'])), ]) def test_set_to_pathsep(inp, exp): obs = set_to_pathsep(inp, sort=(len(inp) > 1)) assert exp == obs @pytest.mark.parametrize('inp', ['42.0', ['42.0']]) def test_is_string_seq_true(inp): assert is_string_seq(inp) def test_is_string_seq_false(): assert not is_string_seq([42.0]) def test_is_nonstring_seq_of_strings_true(): assert is_nonstring_seq_of_strings(['42.0']) def test_is_nonstring_seq_of_strings_false(): assert not is_nonstring_seq_of_strings([42.0]) @pytest.mark.parametrize('inp, exp', [ ('', []), ('a', ['a']), (os.pathsep.join(['a', 'b']), ['a', 'b']), (os.pathsep.join(['a', 'b', 'c']), ['a', 'b', 'c']), ]) def test_pathsep_to_seq(inp, exp): obs = pathsep_to_seq(inp) assert exp == obs @pytest.mark.parametrize('inp, exp', [ ([], ''), (['a'], 'a'), (['a', 'b'], os.pathsep.join(['a', 'b'])), (['a', 'b', 'c'], os.pathsep.join(['a', 'b', 'c'])), ]) def test_seq_to_pathsep(inp, exp): obs = seq_to_pathsep(inp) assert exp == obs @pytest.mark.parametrize('inp, exp', [ ('', []), ('a', ['A']), (os.pathsep.join(['a', 'B']), ['A', 'B']), (os.pathsep.join(['A', 'b', 'c']), ['A', 'B', 'C']), ]) def test_pathsep_to_upper_seq(inp, exp): obs = pathsep_to_upper_seq(inp) assert exp == obs @pytest.mark.parametrize('inp, exp', [ ([], ''), (['a'], 'A'), (['a', 'b'], os.pathsep.join(['A', 'B'])), (['a', 'B', 'c'], os.pathsep.join(['A', 'B', 'C'])), ]) def test_seq_to_upper_pathsep(inp, exp): obs = seq_to_upper_pathsep(inp) assert exp == obs @pytest.mark.parametrize('inp, exp', [ ('/home/wakka', False), (['/home/jawaka'], False), (EnvPath(['/home/jawaka']), True), (EnvPath(['jawaka']), True), (EnvPath(b'jawaka:wakka'), True), ]) def test_is_env_path(inp, exp): obs = is_env_path(inp) assert exp == obs @pytest.mark.parametrize('inp, exp', [ ('/home/wakka', ['/home/wakka']), ('/home/wakka' + os.pathsep + '/home/jawaka', ['/home/wakka', '/home/jawaka']), (b'/home/wakka', ['/home/wakka']), ]) def test_str_to_env_path(inp, exp): obs = str_to_env_path(inp) assert exp == obs.paths @pytest.mark.parametrize('inp, exp', [ (['/home/wakka'], '/home/wakka'), (['/home/wakka', '/home/jawaka'], '/home/wakka' + os.pathsep + '/home/jawaka'), ]) def test_env_path_to_str(inp, exp): obs = env_path_to_str(inp) assert exp == obs @pytest.mark.parametrize('left, right, exp', [ (EnvPath(['/home/wakka']), ['/home/jawaka'], EnvPath(['/home/wakka', '/home/jawaka'])), (['a'], EnvPath(['b']), EnvPath(['a', 'b'])), (EnvPath(['c']), EnvPath(['d']), EnvPath(['c', 'd'])), ]) def test_env_path_add(left, right, exp): obs = left + right assert is_env_path(obs) assert exp == obs # helper def expand(path): return os.path.expanduser(os.path.expandvars(path)) @pytest.mark.parametrize('env', [TOOLS_ENV, ENCODE_ENV_ONLY]) @pytest.mark.parametrize('inp, exp', [ ('xonsh_dir', 'xonsh_dir'), ('.', '.'), ('../', '../'), ('~/', '~/'), (b'~/../', '~/../'), ]) def test_env_path_getitem(inp, exp, xonsh_builtins, env): xonsh_builtins.__xonsh_env__ = env obs = EnvPath(inp)[0] # call to __getitem__ if env.get('EXPAND_ENV_VARS'): assert expand(exp) == obs else: assert exp == obs @pytest.mark.parametrize('env', [TOOLS_ENV, ENCODE_ENV_ONLY]) @pytest.mark.parametrize('inp, exp', [ (os.pathsep.join(['xonsh_dir', '../', '.', '~/']), ['xonsh_dir', '../', '.', '~/']), ('/home/wakka' + os.pathsep + '/home/jakka' + os.pathsep + '~/', ['/home/wakka', '/home/jakka', '~/']) ]) def test_env_path_multipath(inp, exp, xonsh_builtins, env): # cases that involve path-separated strings xonsh_builtins.__xonsh_env__ = env if env == TOOLS_ENV: obs = [i for i in EnvPath(inp)] assert [expand(i) for i in exp] == obs else: obs = [i for i in EnvPath(inp)] assert [i for i in exp] == obs @pytest.mark.parametrize('inp, exp', [ (pathlib.Path('/home/wakka'), ['/home/wakka'.replace('/', os.sep)]), (pathlib.Path('~/'), ['~']), (pathlib.Path('.'), ['.']), (['/home/wakka', pathlib.Path('/home/jakka'), '~/'], ['/home/wakka', '/home/jakka'.replace('/', os.sep), '~/']), (['/home/wakka', pathlib.Path('../'), '../'], ['/home/wakka', '..', '../']), (['/home/wakka', pathlib.Path('~/'), '~/'], ['/home/wakka', '~', '~/']), ]) def test_env_path_with_pathlib_path_objects(inp, exp, xonsh_builtins): xonsh_builtins.__xonsh_env__ = TOOLS_ENV # iterate over EnvPath to acquire all expanded paths obs = [i for i in EnvPath(inp)] assert [expand(i) for i in exp] == obs @pytest.mark.parametrize('inp', ['42.0', [42.0]]) def test_is_nonstring_seq_of_strings_false(inp): assert not is_nonstring_seq_of_strings(inp) # helper def mkpath(*paths): """Build os-dependent paths properly.""" return os.sep + os.sep.join(paths) @pytest.mark.parametrize('inp, exp', [ ([mkpath('home', 'wakka'), mkpath('home', 'jakka'), mkpath('home', 'yakka')], [mkpath('home', 'wakka'), mkpath('home', 'jakka')]) ]) def test_env_path_slice_get_all_except_last_element(inp, exp): obs = EnvPath(inp)[:-1] assert exp == obs @pytest.mark.parametrize('inp, exp', [ ([mkpath('home', 'wakka'), mkpath('home', 'jakka'), mkpath('home', 'yakka')], [mkpath('home', 'jakka'), mkpath('home', 'yakka')]) ]) def test_env_path_slice_get_all_except_first_element(inp, exp): obs = EnvPath(inp)[1:] assert exp == obs @pytest.mark.parametrize('inp, exp_a, exp_b', [ ([mkpath('home', 'wakka'), mkpath('home', 'jakka'), mkpath('home', 'yakka'), mkpath('home', 'takka')], [mkpath('home', 'wakka'), mkpath('home', 'yakka')], [mkpath('home', 'jakka'), mkpath('home', 'takka')]) ]) def test_env_path_slice_path_with_step(inp, exp_a, exp_b): obs_a = EnvPath(inp)[0::2] assert exp_a == obs_a obs_b = EnvPath(inp)[1::2] assert exp_b == obs_b @pytest.mark.parametrize('inp, exp', [ ([mkpath('home', 'wakka'), mkpath('home', 'xakka'), mkpath('other', 'zakka'), mkpath('another', 'akka'), mkpath('home', 'bakka')], [mkpath('other', 'zakka'), mkpath('another', 'akka')]) ]) def test_env_path_keep_only_non_home_paths(inp, exp): obs = EnvPath(inp)[2:4] assert exp == obs @pytest.mark.parametrize('inp', [True, False]) def test_is_bool_true(inp): assert True == is_bool(inp) @pytest.mark.parametrize('inp', [1, 'yooo hooo!']) def test_is_bool_false(inp): assert False == is_bool(inp) @pytest.mark.parametrize('inp, exp', [ (True, True), (False, False), (None, False), ('', False), ('0', False), ('False', False), ('NONE', False), ('TRUE', True), ('1', True), (0, False), (1, True), ]) def test_to_bool(inp, exp): obs = to_bool(inp) assert exp == obs @pytest.mark.parametrize('inp, exp', [(True, '1'), (False, '')]) def test_bool_to_str(inp, exp): assert bool_to_str(inp) == exp @pytest.mark.parametrize('inp, exp', [ (True, True), (False, True), (1, True), (0, True), ('Yolo', False), (1.0, False), ]) def test_is_bool_or_int(inp, exp): obs = is_bool_or_int(inp) assert exp == obs @pytest.mark.parametrize('inp, exp', [ (True, True), (False, False), (1, 1), (0, 0), ('', False), (0.0, False), (1.0, True), ('T', True), ('f', False), ('0', 0), ('10', 10), ]) def test_to_bool_or_int(inp, exp): obs = to_bool_or_int(inp) assert exp == obs @pytest.mark.parametrize('inp, exp', [ (True, '1'), (False, ''), (1, '1'), (0, '0'), ]) def test_bool_or_int_to_str(inp, exp): obs = bool_or_int_to_str(inp) assert exp == obs @pytest.mark.parametrize('inp, exp', [ (42, slice(42, 43)), (0, slice(0, 1)), (None, slice(None, None, None)), (slice(1, 2), slice(1, 2)), ('-1', slice(-1, None, None)), ('42', slice(42, 43)), ('-42', slice(-42, -41)), ('1:2:3', slice(1, 2, 3)), ('1::3', slice(1, None, 3)), (':', slice(None, None, None)), ('1:', slice(1, None, None)), ('[1:2:3]', slice(1, 2, 3)), ('(1:2:3)', slice(1, 2, 3)), ((4, 8, 10), slice(4, 8, 10)), ([10, 20], slice(10, 20)) ]) def test_ensure_slice(inp, exp): obs = ensure_slice(inp) assert exp == obs @pytest.mark.parametrize('inp, exp', [ ((range(50), slice(25, 40)), list(i for i in range(25, 40))), (([1,2,3,4,5,6,7,8,9,10], [slice(1,4), slice(6, None)]), [2, 3, 4, 7, 8, 9, 10]), (([1,2,3,4,5], [slice(-2, None), slice(-5, -3)]), [4, 5, 1, 2]), ]) def test_get_portions(inp, exp): obs = get_portions(*inp) assert list(obs) == exp @pytest.mark.parametrize('inp', [ '42.3', '3:asd5:1', 'test' , '6.53:100:5', '4:-', '2:15-:3', '50:-:666', object(), [1,5,3,4], ('foo') ]) def test_ensure_slice_invalid(inp): with pytest.raises(ValueError): obs = ensure_slice(inp) @pytest.mark.parametrize('inp, exp', [ ('42', True), ('42.0', False), (42, False), ([42], False), ([], False), (None, False), ('', False), (False, False), (True, False), ]) def test_is_int_as_str(inp, exp): obs = is_int_as_str(inp) assert exp == obs @pytest.mark.parametrize('inp, exp', [ ('20', False), ('20%', False), ((20, 'c'), False), ((20.0, 'm'), False), ((20.0, 'c'), True), ((20.0, '%'), True), ]) def test_is_dynamic_cwd_width(inp, exp): obs = is_dynamic_cwd_width(inp) assert exp == obs @pytest.mark.parametrize('inp, exp', [ (42, False), (None, False), ('42', False), ('-42', False), (slice(1,2,3), False), ([], False), (False, False), (True, False), ('1:2:3', True), ('1::3', True), ('1:', True), (':', True), ('[1:2:3]', True), ('(1:2:3)', True), ('r', False), ('r:11', False), ]) def test_is_slice_as_str(inp, exp): obs = is_slice_as_str(inp) assert exp == obs @pytest.mark.parametrize('inp, exp', [ ('throwback.log', True), ('', True), (None, True), (True, False), (False, False), (42, False), ([1, 2, 3], False), ((1, 2), False), (("wrong", "parameter"), False), skip_if_on_windows(('/dev/null', True)) ]) def test_is_logfile_opt(inp, exp): obs = is_logfile_opt(inp) assert exp == obs @pytest.mark.parametrize('inp, exp', [ (True, None), (False, None), (1, None), (None, None), ('throwback.log', 'throwback.log'), skip_if_on_windows(('/dev/null', '/dev/null')), skip_if_on_windows(('/dev/nonexistent_dev', '/dev/nonexistent_dev' if is_writable_file('/dev/nonexistent_dev') else None)) ]) def test_to_logfile_opt(inp, exp): obs = to_logfile_opt(inp) assert exp == obs @pytest.mark.parametrize('inp, exp', [ (None, ''), ('', ''), ('throwback.log', 'throwback.log'), ('/dev/null', '/dev/null') ]) def test_logfile_opt_to_str(inp, exp): obs = logfile_opt_to_str(inp) assert exp == obs @pytest.mark.parametrize('inp, exp', [ ('20', (20.0, 'c')), ('20%', (20.0, '%')), ((20, 'c'), (20.0, 'c')), ((20, '%'), (20.0, '%')), ((20.0, 'c'), (20.0, 'c')), ((20.0, '%'), (20.0, '%')), ('inf', (float('inf'), 'c')), ]) def test_to_dynamic_cwd_tuple(inp, exp): obs = to_dynamic_cwd_tuple(inp) assert exp == obs @pytest.mark.parametrize('inp, exp', [ ((20.0, 'c'), '20.0'), ((20.0, '%'), '20.0%'), ((float('inf'), 'c'), 'inf'), ]) def test_dynamic_cwd_tuple_to_str(inp, exp): obs = dynamic_cwd_tuple_to_str(inp) assert exp == obs @pytest.mark.parametrize('st, esc', [ ('', ''), ('foo', 'foo'), ('foo&bar', 'foo^&bar'), ('foo$?-/_"\\', 'foo$?-/_^"\\'), ('^&<>|', '^^^&^<^>^|'), ('()<>','^(^)^<^>'), ]) def test_escape_windows_cmd_string(st, esc): obs = escape_windows_cmd_string(st) assert esc == obs @pytest.mark.parametrize('st, esc, forced', [ ('', '""', None), ('foo', 'foo', '"foo"'), (r'arg1 "hallo, "world"" "\some\path with\spaces")', r'"arg1 \"hallo, \"world\"\" \"\some\path with\spaces\")"', None), (r'"argument"2" argument3 argument4', r'"\"argument\"2\" argument3 argument4"', None), (r'"\foo\bar bar\foo\" arg', r'"\"\foo\bar bar\foo\\\" arg"', None), (r'\\machine\dir\file.bat', r'\\machine\dir\file.bat', r'"\\machine\dir\file.bat"'), (r'"\\machine\dir space\file.bat"', r'"\"\\machine\dir space\file.bat\""', None) ]) def test_argvquote(st, esc, forced): obs = argvquote(st) assert esc == obs if forced is None: forced = esc obs = argvquote(st, force=True) assert forced == obs @pytest.mark.parametrize('inp', ['no string here', '']) def test_partial_string_none(inp): assert check_for_partial_string(inp) == (None, None, None) @pytest.mark.parametrize('leaders', [ (('', 0), ('not empty', 9)), (('not empty', 9), ('', 0)) ]) @pytest.mark.parametrize('prefix', ['b', 'rb', 'r' ]) @pytest.mark.parametrize('quote', ['"', '"""']) def test_partial_string(leaders, prefix, quote): (l, l_len), (f, f_len) = leaders s = prefix + quote t = s + 'test string' + quote t_len = len(t) # single string test_string = l + t + f obs = check_for_partial_string(test_string) exp = l_len, l_len + t_len, s assert obs == exp # single partial test_string = l + f + s + 'test string' obs = check_for_partial_string(test_string) exp = l_len + f_len, None, s assert obs == exp # two strings test_string = l + t + f + l + t + f obs = check_for_partial_string(test_string) exp = (l_len + t_len + f_len + l_len), (l_len + t_len + f_len + l_len + t_len), s assert obs == exp # one string, one partial test_string = l + t + f + l + s + 'test string' obs = check_for_partial_string(test_string) exp = l_len + t_len + f_len + l_len , None, s assert obs == exp def test_executables_in(xonsh_builtins): expected = set() types = ('file', 'directory', 'brokensymlink') if ON_WINDOWS: # Don't test symlinks on windows since it requires admin types = ('file', 'directory') executables = (True, False) with TemporaryDirectory() as test_path: for _type in types: for executable in executables: fname = '%s_%s' % (_type, executable) if _type == 'none': continue if _type == 'file' and executable: ext = '.exe' if ON_WINDOWS else '' expected.add(fname + ext) else: ext = '' path = os.path.join(test_path, fname + ext) if _type == 'file': with open(path, 'w') as f: f.write(fname) elif _type == 'directory': os.mkdir(path) elif _type == 'brokensymlink': tmp_path = os.path.join(test_path, 'i_wont_exist') with open(tmp_path, 'w') as f: f.write('deleteme') os.symlink(tmp_path, path) os.remove(tmp_path) if executable and not _type == 'brokensymlink': os.chmod(path, stat.S_IXUSR | stat.S_IRUSR | stat.S_IWUSR) if ON_WINDOWS: xonsh_builtins.__xonsh_env__ = PATHEXT_ENV result = set(executables_in(test_path)) else: result = set(executables_in(test_path)) assert expected == result @pytest.mark.parametrize('inp, exp', [ ('yo', '[Yy][Oo]'), ('[a-f]123e', '[a-f]123[Ee]'), ('${HOME}/yo', '${HOME}/[Yy][Oo]'), ('./yo/mom', './[Yy][Oo]/[Mm][Oo][Mm]'), ('Eßen', '[Ee][Ss]?[Ssß][Ee][Nn]'), ]) def test_expand_case_matching(inp, exp): obs = expand_case_matching(inp) assert exp == obs @pytest.mark.parametrize('inp, exp', [ ("foo", "foo"), ("$foo $bar", "bar $bar"), ("$foobar", "$foobar"), ("$foo $spam", "bar eggs"), ("$an_int$spam$a_bool", "42eggsTrue"), ("bar$foo$spam$foo $an_int $none", "barbareggsbar 42 None"), ("$foo/bar", "bar/bar"), ("${'foo'} $spam", "bar eggs"), ("${'foo'} ${'a_bool'}", "bar True"), ("${'foo'}bar", "barbar"), ("${'foo'}/bar", "bar/bar"), ("${\"foo\'}", "${\"foo\'}"), ("$?bar", "$?bar"), ("$foo}bar", "bar}bar"), ("${'foo", "${'foo"), (b"foo", "foo"), (b"$foo bar", "bar bar"), (b"${'foo'}bar", "barbar"), ]) def test_expandvars(inp, exp, xonsh_builtins): """Tweaked for xonsh cases from CPython `test_genericpath.py`""" env = Env({'foo':'bar', 'spam': 'eggs', 'a_bool': True, 'an_int': 42, 'none': None}) xonsh_builtins.__xonsh_env__ = env assert expandvars(inp) == exp @pytest.mark.parametrize('inp, fmt, exp',[ (572392800.0, None, 572392800.0), ('42.1459', None, 42.1459), (dt.datetime(2016, 8, 2, 13, 24), None, dt.datetime(2016, 8, 2, 13, 24).timestamp()), ('2016-8-10 16:14', None, dt.datetime(2016, 8, 10, 16, 14).timestamp()), ('2016/8/10 16:14:40', '%Y/%m/%d %H:%M:%S', dt.datetime(2016, 8, 10, 16, 14, 40).timestamp()), ]) def test_ensure_timestamp(inp, fmt, exp, xonsh_builtins): xonsh_builtins.__xonsh_env__['XONSH_DATETIME_FORMAT'] = '%Y-%m-%d %H:%M' obs = ensure_timestamp(inp, fmt) assert exp == obs @pytest.mark.parametrize('expand_user', [True, False]) @pytest.mark.parametrize('inp, expand_env_vars, exp_end', [ ('~/test.txt', True, '/test.txt'), ('~/$foo', True, '/bar'), ('~/test/$a_bool', True, '/test/True'), ('~/test/$an_int', True, '/test/42'), ('~/test/$none', True, '/test/None'), ('~/$foo', False, '/$foo') ]) def test_expand_path(expand_user, inp, expand_env_vars, exp_end, xonsh_builtins): if os.sep != '/': inp = inp.replace('/', os.sep) exp_end = exp_end.replace('/', os.sep) env = Env({'foo':'bar', 'a_bool': True, 'an_int': 42, 'none': None}) env['EXPAND_ENV_VARS'] = expand_env_vars xonsh_builtins.__xonsh_env__ = env path = expand_path(inp, expand_user=expand_user) if expand_user: home_path = os.path.expanduser('~') assert path == home_path + exp_end else: assert path == '~' + exp_end def test_swap_values(): orig = {'x': 1} updates = {'x': 42, 'y': 43} with swap_values(orig, updates): assert orig['x'] == 42 assert orig['y'] == 43 assert orig['x'] == 1 assert 'y' not in orig @pytest.mark.parametrize('arguments, expected_docstring', [ ({'deprecated_in': '0.5.10', 'removed_in': '0.6.0'}, 'my_function has been deprecated in version 0.5.10 and will be removed ' 'in version 0.6.0'), ({'deprecated_in': '0.5.10'}, 'my_function has been deprecated in version 0.5.10'), ({'removed_in': '0.6.0'}, 'my_function has been deprecated and will be removed in version 0.6.0'), ({}, 'my_function has been deprecated') ]) def test_deprecated_docstrings_with_empty_docstring( arguments, expected_docstring): @deprecated(**arguments) def my_function(): pass assert my_function.__doc__ == expected_docstring @pytest.mark.parametrize('arguments, expected_docstring', [ ({'deprecated_in': '0.5.10', 'removed_in': '0.6.0'}, 'Does nothing.\n\nmy_function has been deprecated in version 0.5.10 and ' 'will be removed in version 0.6.0'), ({'deprecated_in': '0.5.10'}, 'Does nothing.\n\nmy_function has been deprecated in version 0.5.10'), ({'removed_in': '0.6.0'}, 'Does nothing.\n\nmy_function has been deprecated and will be removed ' 'in version 0.6.0'), ({}, 'Does nothing.\n\nmy_function has been deprecated') ]) def test_deprecated_docstrings_with_nonempty_docstring( arguments, expected_docstring): @deprecated(**arguments) def my_function(): """Does nothing.""" pass assert my_function.__doc__ == expected_docstring def test_deprecated_warning_raised(): @deprecated() def my_function(): pass with warnings.catch_warnings(record=True) as warning: warnings.simplefilter('always') my_function() assert issubclass(warning.pop().category, DeprecationWarning) def test_deprecated_warning_contains_message(): @deprecated() def my_function(): pass with warnings.catch_warnings(record=True) as warning: warnings.simplefilter('always') my_function() assert str(warning.pop().message) == 'my_function has been deprecated' @pytest.mark.parametrize('expired_version', ['0.1.0', __version__]) def test_deprecated_past_expiry_raises_assertion_error(expired_version): @deprecated(removed_in=expired_version) def my_function(): pass with pytest.raises(AssertionError): my_function() xonsh-0.6.0/tests/test_vox.py000066400000000000000000000106011320541242300162350ustar00rootroot00000000000000"""Vox tests""" import builtins import stat import os import pytest from xontrib.voxapi import Vox from tools import skip_if_on_conda from xonsh.platform import ON_WINDOWS @skip_if_on_conda def test_crud(xonsh_builtins, tmpdir): """ Creates a virtual environment, gets it, enumerates it, and then deletes it. """ xonsh_builtins.__xonsh_env__['VIRTUALENV_HOME'] = str(tmpdir) last_event = None @xonsh_builtins.events.vox_on_create def create(name, **_): nonlocal last_event last_event = 'create', name @xonsh_builtins.events.vox_on_delete def delete(name, **_): nonlocal last_event last_event = 'delete', name vox = Vox() vox.create('spam') assert stat.S_ISDIR(tmpdir.join('spam').stat().mode) assert last_event == ('create', 'spam') ve = vox['spam'] assert ve.env == str(tmpdir.join('spam')) assert os.path.isdir(ve.bin) assert 'spam' in vox assert 'spam' in list(vox) del vox['spam'] assert not tmpdir.join('spam').check() assert last_event == ('delete', 'spam') @skip_if_on_conda def test_activate(xonsh_builtins, tmpdir): """ Creates a virtual environment, gets it, enumerates it, and then deletes it. """ xonsh_builtins.__xonsh_env__['VIRTUALENV_HOME'] = str(tmpdir) # I consider the case that the user doesn't have a PATH set to be unreasonable xonsh_builtins.__xonsh_env__.setdefault('PATH', []) last_event = None @xonsh_builtins.events.vox_on_activate def activate(name, **_): nonlocal last_event last_event = 'activate', name @xonsh_builtins.events.vox_on_deactivate def deactivate(name, **_): nonlocal last_event last_event = 'deactivate', name vox = Vox() vox.create('spam') vox.activate('spam') assert xonsh_builtins.__xonsh_env__['VIRTUAL_ENV'] == vox['spam'].env assert last_event == ('activate', 'spam') vox.deactivate() assert 'VIRTUAL_ENV' not in xonsh_builtins.__xonsh_env__ assert last_event == ('deactivate', 'spam') @skip_if_on_conda def test_path(xonsh_builtins, tmpdir): """ Test to make sure Vox properly activates and deactivates by examining $PATH """ xonsh_builtins.__xonsh_env__['VIRTUALENV_HOME'] = str(tmpdir) # I consider the case that the user doesn't have a PATH set to be unreasonable xonsh_builtins.__xonsh_env__.setdefault('PATH', []) oldpath = list(xonsh_builtins.__xonsh_env__['PATH']) vox = Vox() vox.create('eggs') vox.activate('eggs') assert oldpath != xonsh_builtins.__xonsh_env__['PATH'] vox.deactivate() assert oldpath == xonsh_builtins.__xonsh_env__['PATH'] @skip_if_on_conda def test_crud_subdir(xonsh_builtins, tmpdir): """ Creates a virtual environment, gets it, enumerates it, and then deletes it. """ xonsh_builtins.__xonsh_env__['VIRTUALENV_HOME'] = str(tmpdir) vox = Vox() vox.create('spam/eggs') assert stat.S_ISDIR(tmpdir.join('spam', 'eggs').stat().mode) ve = vox['spam/eggs'] assert ve.env == str(tmpdir.join('spam', 'eggs')) assert os.path.isdir(ve.bin) assert 'spam/eggs' in vox assert 'spam' not in vox #assert 'spam/eggs' in list(vox) # This is NOT true on Windows assert 'spam' not in list(vox) del vox['spam/eggs'] assert not tmpdir.join('spam', 'eggs').check() try: import pathlib except ImportError: pass else: @skip_if_on_conda def test_crud_path(xonsh_builtins, tmpdir): """ Creates a virtual environment, gets it, enumerates it, and then deletes it. """ tmp = pathlib.Path(str(tmpdir)) vox = Vox() vox.create(tmp) assert stat.S_ISDIR(tmpdir.join('lib').stat().mode) ve = vox[tmp] assert ve.env == str(tmp) assert os.path.isdir(ve.bin) del vox[tmp] assert not tmpdir.check() @skip_if_on_conda def test_crud_subdir(xonsh_builtins, tmpdir): """ Creates a virtual environment, gets it, enumerates it, and then deletes it. """ xonsh_builtins.__xonsh_env__['VIRTUALENV_HOME'] = str(tmpdir) vox = Vox() with pytest.raises(ValueError): if ON_WINDOWS: vox.create('Scripts') else: vox.create('bin') with pytest.raises(ValueError): if ON_WINDOWS: vox.create('spameggs/Scripts') else: vox.create('spameggs/bin') xonsh-0.6.0/tests/test_wizard.py000066400000000000000000000023211320541242300167210ustar00rootroot00000000000000# -*- coding: utf-8 -*- """Tests the xonsh lexer.""" from __future__ import unicode_literals, print_function import os import pytest from xonsh.wizard import (Node, Wizard, Pass, PrettyFormatter, Message, Question, StateVisitor) TREE0 = Wizard(children=[Pass(), Message(message='yo')]) TREE1 = Question('wakka?', {'jawaka': Pass()}) def test_pretty_format_tree0(): exp = ('Wizard(children=[\n' ' Pass(),\n' " Message('yo')\n" '])') obs = PrettyFormatter(TREE0).visit() assert exp == obs assert exp == str(TREE0) assert exp.replace('\n', '') == repr(TREE0) def test_pretty_format_tree1(): exp = ('Question(\n' " question='wakka?',\n" ' responses={\n' " 'jawaka': Pass()\n" ' }\n' ')') obs = PrettyFormatter(TREE1).visit() assert exp == obs assert exp == str(TREE1) assert exp.replace('\n', '') == repr(TREE1) def test_state_visitor_store(): exp = {'rick': [{}, {}, {'and': 'morty'}]} sv = StateVisitor() sv.store('/rick/2/and', 'morty') obs = sv.state assert exp == obs exp['rick'][1]['mr'] = 'meeseeks' sv.store('/rick/-2/mr', 'meeseeks') assert exp == obs xonsh-0.6.0/tests/test_xonsh.xsh000066400000000000000000000010261320541242300167330ustar00rootroot00000000000000import builtins def test_simple(): assert 1 + 1 == 2 def test_envionment(): $USER = 'snail' x = 'USER' assert x in ${...} assert ${'U' + 'SER'} == 'snail' def test_xonsh_party(): orig = builtins.__xonsh_env__.get('XONSH_INTERACTIVE') builtins.__xonsh_env__['XONSH_INTERACTIVE'] = False try: x = 'xonsh' y = 'party' out = $(echo @(x + '-' + y)).strip() assert out == 'xonsh-party', 'Out really was <' + out + '>, sorry.' finally: builtins.__xonsh_env__['XONSH_INTERACTIVE'] = orig xonsh-0.6.0/tests/test_xontribs.py000066400000000000000000000031051320541242300172720ustar00rootroot00000000000000"""xontrib tests, such as they are""" import sys import pytest from xonsh.xontribs import xontrib_metadata, xontrib_context def test_load_xontrib_metadata(): # Simply tests that the xontribs JSON files isn't malformed. xontrib_metadata() @pytest.yield_fixture def tmpmod(tmpdir): """ Same as tmpdir but also adds/removes it to the front of sys.path. Also cleans out any modules loaded as part of the test. """ sys.path.insert(0, str(tmpdir)) loadedmods = set(sys.modules.keys()) try: yield tmpdir finally: del sys.path[0] newmods = set(sys.modules.keys()) - loadedmods for m in newmods: del sys.modules[m] def test_noall(tmpmod): """ Tests what get's exported from a module without __all__ """ with tmpmod.mkdir("xontrib").join("spameggs.py").open('w') as x: x.write(""" spam = 1 eggs = 2 _foobar = 3 """) ctx = xontrib_context('spameggs') assert ctx == {'spam': 1, 'eggs': 2} def test_withall(tmpmod): """ Tests what get's exported from a module with __all__ """ with tmpmod.mkdir("xontrib").join("spameggs.py").open('w') as x: x.write(""" __all__ = 'spam', '_foobar' spam = 1 eggs = 2 _foobar = 3 """) ctx = xontrib_context('spameggs') assert ctx == {'spam': 1, '_foobar': 3} def test_xshxontrib(tmpmod): """ Test that .xsh xontribs are loadable """ with tmpmod.mkdir("xontrib").join("script.xsh").open('w') as x: x.write(""" hello = 'world' """) ctx = xontrib_context('script') assert ctx == {'hello': 'world'} xonsh-0.6.0/tests/test_xoreutils.py000066400000000000000000000070101320541242300174570ustar00rootroot00000000000000import os import tempfile from xonsh.xoreutils import _which from xonsh.xoreutils import uptime from xonsh.tools import ON_WINDOWS class TestWhich: # Tests for the _whichgen function which is the only thing we # use from the _which.py module. def setup(self): # Setup two folders with some test files. self.testdirs = [tempfile.TemporaryDirectory(), tempfile.TemporaryDirectory()] if ON_WINDOWS: self.testapps = ['whichtestapp1.exe', 'whichtestapp2.wta'] self.exts = ['.EXE'] else: self.testapps = ['whichtestapp1'] self.exts = None for app in self.testapps: for d in self.testdirs: path = os.path.join(d.name, app) open(path, 'wb').write(b'') os.chmod(path, 0o755) def teardown_module(self): for d in self.testdirs: d.cleanup() def test_whichgen(self): testdir = self.testdirs[0].name arg = 'whichtestapp1' matches = list(_which.whichgen(arg, path=[testdir], exts=self.exts)) assert len(matches) == 1 assert self._file_match(matches[0][0], os.path.join(testdir, arg)) def test_whichgen_failure(self): testdir = self.testdirs[0].name arg = 'not_a_file' matches = list(_which.whichgen(arg, path=[testdir], exts=self.exts)) assert len(matches) == 0 def test_whichgen_verbose(self): testdir = self.testdirs[0].name arg = 'whichtestapp1' matches = list(_which.whichgen(arg, path=[testdir], exts=self.exts, verbose=True)) assert len(matches) == 1 match, from_where = matches[0] assert self._file_match(match, os.path.join(testdir, arg)) assert from_where == 'from given path element 0' def test_whichgen_multiple(self): testdir0 = self.testdirs[0].name testdir1 = self.testdirs[1].name arg = 'whichtestapp1' matches = list(_which.whichgen(arg, path=[testdir0, testdir1], exts=self.exts)) assert len(matches) == 2 assert self._file_match(matches[0][0], os.path.join(testdir0, arg)) assert self._file_match(matches[1][0], os.path.join(testdir1, arg)) if ON_WINDOWS: def test_whichgen_ext_failure(self): testdir = self.testdirs[0].name arg = 'whichtestapp2' matches = list(_which.whichgen(arg, path=[testdir], exts=self.exts)) assert len(matches) == 0 def test_whichgen_ext_success(self): testdir = self.testdirs[0].name arg = 'whichtestapp2' matches = list(_which.whichgen(arg, path=[testdir], exts=['.wta'])) assert len(matches) == 1 assert self._file_match(matches[0][0], os.path.join(testdir, arg)) def _file_match(self, path1, path2): if ON_WINDOWS: path1 = os.path.normpath(os.path.normcase(path1)) path2 = os.path.normpath(os.path.normcase(path2)) path1 = os.path.splitext(path1)[0] path2 = os.path.splitext(path2)[0] return path1 == path2 else: return os.path.samefile(path1, path2) def test_uptime(): up = uptime.uptime() assert up is not None assert up > 0.0 def test_boottime(): bt = uptime.boottime() assert bt is not None assert bt > 0.0 assert uptime._BOOTTIME is not None assert uptime._BOOTTIME > 0.0 xonsh-0.6.0/tests/tools.py000066400000000000000000000121541320541242300155270ustar00rootroot00000000000000# -*- coding: utf-8 -*- """Tests the xonsh lexer.""" from __future__ import unicode_literals, print_function import os import sys import ast import builtins import platform import subprocess import contextlib from collections import defaultdict from collections.abc import MutableMapping import pytest from xonsh.environ import Env from xonsh.base_shell import BaseShell VER_3_4 = (3, 4) VER_3_5 = (3, 5) VER_3_6 = (3, 6) VER_MAJOR_MINOR = sys.version_info[:2] VER_FULL = sys.version_info[:3] ON_DARWIN = (platform.system() == 'Darwin') ON_WINDOWS = (platform.system() == 'Windows') ON_CONDA = True in [conda in pytest.__file__.lower() for conda in ['conda', 'anaconda', 'miniconda']] ON_TRAVIS = 'TRAVIS' in os.environ and 'CI' in os.environ TEST_DIR = os.path.dirname(__file__) # pytest skip decorators skip_if_py34 = pytest.mark.skipif(VER_MAJOR_MINOR < VER_3_5, reason="Py3.5+ only test") skip_if_lt_py36 = pytest.mark.skipif(VER_MAJOR_MINOR < VER_3_6, reason="Py3.6+ only test") skip_if_on_conda = pytest.mark.skipif(ON_CONDA, reason="Conda and virtualenv _really_ hate each other") skip_if_on_windows = pytest.mark.skipif(ON_WINDOWS, reason='Unix stuff') skip_if_on_unix = pytest.mark.skipif(not ON_WINDOWS, reason='Windows stuff') skip_if_on_darwin = pytest.mark.skipif(ON_DARWIN, reason='not Mac friendly') skip_if_on_travis = pytest.mark.skipif(ON_TRAVIS, reason='not Travis CI friendly') def sp(cmd): return subprocess.check_output(cmd, universal_newlines=True) class DummyStyler(): styles = defaultdict(str) class DummyBaseShell(BaseShell): def __init__(self): self.styler = DummyStyler() class DummyShell: def settitle(self): pass _shell = None @property def shell(self): if self._shell is None: self._shell = DummyBaseShell() return self._shell class DummyCommandsCache: def locate_binary(self, name): return os.path.join(os.path.dirname(__file__), 'bin', name) def predict_threadable(self, cmd): return True class DummyHistory: last_cmd_rtn = 0 last_cmd_out = '' def append(self, x): pass def flush(self, *args, **kwargs): pass class DummyEnv(MutableMapping): DEFAULTS = { 'XONSH_DEBUG': 1, 'XONSH_COLOR_STYLE': 'default', } def __init__(self, *args, **kwargs): self._d = self.DEFAULTS.copy() self._d.update(dict(*args, **kwargs)) def detype(self): return {k: str(v) for k, v in self._d.items()} def __getitem__(self, k): return self._d[k] def __setitem__(self, k, v): self._d[k] = v def __delitem__(self, k): del self._d[k] def __len__(self): return len(self._d) def __iter__(self): yield from self._d @contextlib.contextmanager def swap(self, other=None, **kwargs): old = {} # single positional argument should be a dict-like object if other is not None: for k, v in other.items(): old[k] = self.get(k, NotImplemented) self[k] = v # kwargs could also have been sent in for k, v in kwargs.items(): old[k] = self.get(k, NotImplemented) self[k] = v yield self # restore the values for k, v in old.items(): if v is NotImplemented: del self[k] else: self[k] = v # # Execer tools # def check_exec(input, **kwargs): if not input.endswith('\n'): input += '\n' builtins.__xonsh_execer__.exec(input, **kwargs) return True def check_eval(input): builtins.__xonsh_env__ = Env({'AUTO_CD': False, 'XONSH_ENCODING': 'utf-8', 'XONSH_ENCODING_ERRORS': 'strict', 'PATH': []}) if ON_WINDOWS: builtins.__xonsh_env__['PATHEXT'] = ['.COM', '.EXE', '.BAT', '.CMD'] builtins.__xonsh_execer__.eval(input) return True def check_parse(input): tree = builtins.__xonsh_execer__.parse(input, ctx=None) return tree # # Parser tools # def nodes_equal(x, y): __tracebackhide__ = True assert type(x) == type(y), "Ast nodes do not have the same type: '%s' != '%s' " % (type(x), type(y)) if isinstance(x, (ast.Expr, ast.FunctionDef, ast.ClassDef)): assert x.lineno == y.lineno, "Ast nodes do not have the same line number : %s != %s" % (x.lineno, y.lineno) assert x.col_offset == y.col_offset, "Ast nodes do not have the same column offset number : %s != %s" % (x.col_offset, y.col_offset) for (xname, xval), (yname, yval) in zip(ast.iter_fields(x), ast.iter_fields(y)): assert xname == yname, "Ast nodes fields differ : %s (of type %s) != %s (of type %s)" % (xname, type(xval), yname, type(yval)) assert type(xval) == type(yval), "Ast nodes fields differ : %s (of type %s) != %s (of type %s)" % (xname, type(xval), yname, type(yval)) for xchild, ychild in zip(ast.iter_child_nodes(x), ast.iter_child_nodes(y)): assert nodes_equal(xchild, ychild), "Ast node children differs" return True xonsh-0.6.0/tests/xpack/000077500000000000000000000000001320541242300151205ustar00rootroot00000000000000xonsh-0.6.0/tests/xpack/__init__.py000066400000000000000000000000001320541242300172170ustar00rootroot00000000000000xonsh-0.6.0/tests/xpack/relimp.xsh000066400000000000000000000000751320541242300171360ustar00rootroot00000000000000from . import sample y = sample.x + 'dark chest of wonders' xonsh-0.6.0/tests/xpack/sample.xsh000066400000000000000000000002101320541242300171160ustar00rootroot00000000000000# I am a test module. aliases['echo'] = lambda args, stdin=None: print(' '.join(args)) $WAKKA = "jawaka" x = $(echo "hello mom" $WAKKA)xonsh-0.6.0/tests/xpack/sub/000077500000000000000000000000001320541242300157115ustar00rootroot00000000000000xonsh-0.6.0/tests/xpack/sub/__init__.py000066400000000000000000000000001320541242300200100ustar00rootroot00000000000000xonsh-0.6.0/tests/xpack/sub/sample.xsh000066400000000000000000000002101320541242300177070ustar00rootroot00000000000000# I am a test module. aliases['echo'] = lambda args, stdin=None: print(' '.join(args)) $WAKKA = "jawaka" x = $(echo "hello mom" $WAKKA)xonsh-0.6.0/travis.yml000077700000000000000000000000001320541242300170072.travis.ymlustar00rootroot00000000000000xonsh-0.6.0/xonsh-in-docker.py000077500000000000000000000027521320541242300162430ustar00rootroot00000000000000#!/usr/bin/env python3 import subprocess import os import argparse program_description = """Build and run Xonsh in a fresh, controlled environment using docker """ parser = argparse.ArgumentParser(description=program_description) parser.add_argument('env', nargs='*', default=[], metavar='ENV=value') parser.add_argument('--python', '-p', default='3.4', metavar='python_version') parser.add_argument('--ptk', '-t', default='1.00', metavar='ptk_version') parser.add_argument('--keep', action='store_true') parser.add_argument('--build', action='store_true') parser.add_argument('--command', '-c', default='xonsh', metavar='command') args = parser.parse_args() docker_script = """ from python:{python_version} RUN pip install --upgrade pip && pip install \\ ply \\ prompt-toolkit=={ptk_version} \\ pygments RUN mkdir /xonsh WORKDIR /xonsh ADD ./ ./ RUN python setup.py install """.format( python_version=args.python, ptk_version=args.ptk) print('Building and running Xonsh') print('Using python ', args.python) print('Using prompt-toolkit ', args.ptk) with open('./Dockerfile', 'w+') as f: f.write(docker_script) env_string = ' '.join(args.env) subprocess.call(['docker', 'build', '-t', 'xonsh', '.']) os.remove('./Dockerfile') if not args.build: run_args = ['docker', 'run', '-ti'] for e in args.env: run_args += ['-e', e] if not args.keep: run_args.append('--rm') run_args += ['xonsh', args.command] subprocess.call(run_args) xonsh-0.6.0/xonsh/000077500000000000000000000000001320541242300140075ustar00rootroot00000000000000xonsh-0.6.0/xonsh/__init__.py000066400000000000000000000072401320541242300161230ustar00rootroot00000000000000__version__ = '0.6.0' # amalgamate exclude jupyter_kernel parser_table parser_test_table pyghooks # amalgamate exclude winutils wizard pytest_plugin fs macutils import os as _os if _os.getenv('XONSH_DEBUG', ''): pass else: import sys as _sys try: from xonsh import __amalgam__ completer = __amalgam__ _sys.modules['xonsh.completer'] = __amalgam__ contexts = __amalgam__ _sys.modules['xonsh.contexts'] = __amalgam__ lazyasd = __amalgam__ _sys.modules['xonsh.lazyasd'] = __amalgam__ lazyjson = __amalgam__ _sys.modules['xonsh.lazyjson'] = __amalgam__ platform = __amalgam__ _sys.modules['xonsh.platform'] = __amalgam__ pretty = __amalgam__ _sys.modules['xonsh.pretty'] = __amalgam__ codecache = __amalgam__ _sys.modules['xonsh.codecache'] = __amalgam__ lazyimps = __amalgam__ _sys.modules['xonsh.lazyimps'] = __amalgam__ parser = __amalgam__ _sys.modules['xonsh.parser'] = __amalgam__ tokenize = __amalgam__ _sys.modules['xonsh.tokenize'] = __amalgam__ tools = __amalgam__ _sys.modules['xonsh.tools'] = __amalgam__ ast = __amalgam__ _sys.modules['xonsh.ast'] = __amalgam__ color_tools = __amalgam__ _sys.modules['xonsh.color_tools'] = __amalgam__ commands_cache = __amalgam__ _sys.modules['xonsh.commands_cache'] = __amalgam__ diff_history = __amalgam__ _sys.modules['xonsh.diff_history'] = __amalgam__ events = __amalgam__ _sys.modules['xonsh.events'] = __amalgam__ foreign_shells = __amalgam__ _sys.modules['xonsh.foreign_shells'] = __amalgam__ jobs = __amalgam__ _sys.modules['xonsh.jobs'] = __amalgam__ jsonutils = __amalgam__ _sys.modules['xonsh.jsonutils'] = __amalgam__ lexer = __amalgam__ _sys.modules['xonsh.lexer'] = __amalgam__ openpy = __amalgam__ _sys.modules['xonsh.openpy'] = __amalgam__ xontribs = __amalgam__ _sys.modules['xonsh.xontribs'] = __amalgam__ ansi_colors = __amalgam__ _sys.modules['xonsh.ansi_colors'] = __amalgam__ dirstack = __amalgam__ _sys.modules['xonsh.dirstack'] = __amalgam__ proc = __amalgam__ _sys.modules['xonsh.proc'] = __amalgam__ shell = __amalgam__ _sys.modules['xonsh.shell'] = __amalgam__ style_tools = __amalgam__ _sys.modules['xonsh.style_tools'] = __amalgam__ timings = __amalgam__ _sys.modules['xonsh.timings'] = __amalgam__ xonfig = __amalgam__ _sys.modules['xonsh.xonfig'] = __amalgam__ base_shell = __amalgam__ _sys.modules['xonsh.base_shell'] = __amalgam__ environ = __amalgam__ _sys.modules['xonsh.environ'] = __amalgam__ inspectors = __amalgam__ _sys.modules['xonsh.inspectors'] = __amalgam__ readline_shell = __amalgam__ _sys.modules['xonsh.readline_shell'] = __amalgam__ replay = __amalgam__ _sys.modules['xonsh.replay'] = __amalgam__ tracer = __amalgam__ _sys.modules['xonsh.tracer'] = __amalgam__ aliases = __amalgam__ _sys.modules['xonsh.aliases'] = __amalgam__ built_ins = __amalgam__ _sys.modules['xonsh.built_ins'] = __amalgam__ execer = __amalgam__ _sys.modules['xonsh.execer'] = __amalgam__ imphooks = __amalgam__ _sys.modules['xonsh.imphooks'] = __amalgam__ main = __amalgam__ _sys.modules['xonsh.main'] = __amalgam__ del __amalgam__ except ImportError: pass del _sys del _os # amalgamate end xonsh-0.6.0/xonsh/__main__.py000066400000000000000000000000431320541242300160760ustar00rootroot00000000000000from xonsh.main import main main() xonsh-0.6.0/xonsh/aliases.py000066400000000000000000000523631320541242300160130ustar00rootroot00000000000000# -*- coding: utf-8 -*- """Aliases for the xonsh shell.""" import os import sys import shlex import inspect import argparse import builtins import collections.abc as cabc from xonsh.lazyasd import lazyobject from xonsh.dirstack import cd, pushd, popd, dirs, _get_cwd from xonsh.environ import locate_binary from xonsh.foreign_shells import foreign_shell_data from xonsh.jobs import jobs, fg, bg, clean_jobs from xonsh.platform import (ON_ANACONDA, ON_DARWIN, ON_WINDOWS, ON_FREEBSD, ON_NETBSD) from xonsh.tools import unthreadable, print_color from xonsh.replay import replay_main from xonsh.timings import timeit_alias from xonsh.tools import argvquote, escape_windows_cmd_string, to_bool, swap_values from xonsh.xontribs import xontribs_main import xonsh.completers._aliases as xca import xonsh.history.main as xhm import xonsh.xoreutils.which as xxw class Aliases(cabc.MutableMapping): """Represents a location to hold and look up aliases.""" def __init__(self, *args, **kwargs): self._raw = {} self.update(*args, **kwargs) def get(self, key, default=None): """Returns the (possibly modified) value. If the key is not present, then `default` is returned. If the value is callable, it is returned without modification. If it is an iterable of strings it will be evaluated recursively to expand other aliases, resulting in a new list or a "partially applied" callable. """ val = self._raw.get(key) if val is None: return default elif isinstance(val, cabc.Iterable) or callable(val): return self.eval_alias(val, seen_tokens={key}) else: msg = 'alias of {!r} has an inappropriate type: {!r}' raise TypeError(msg.format(key, val)) def eval_alias(self, value, seen_tokens=frozenset(), acc_args=()): """ "Evaluates" the alias `value`, by recursively looking up the leftmost token and "expanding" if it's also an alias. A value like ["cmd", "arg"] might transform like this: > ["cmd", "arg"] -> ["ls", "-al", "arg"] -> callable() where `cmd=ls -al` and `ls` is an alias with its value being a callable. The resulting callable will be "partially applied" with ["-al", "arg"]. """ # Beware of mutability: default values for keyword args are evaluated # only once. if callable(value): if acc_args: # Partial application def _alias(args, stdin=None): args = list(acc_args) + args return value(args, stdin=stdin) return _alias else: return value else: expand_path = builtins.__xonsh_expand_path__ token, *rest = map(expand_path, value) if token in seen_tokens or token not in self._raw: # ^ Making sure things like `egrep=egrep --color=auto` works, # and that `l` evals to `ls --color=auto -CF` if `l=ls -CF` # and `ls=ls --color=auto` rtn = [token] rtn.extend(rest) rtn.extend(acc_args) return rtn else: seen_tokens = seen_tokens | {token} acc_args = rest + list(acc_args) return self.eval_alias(self._raw[token], seen_tokens, acc_args) def expand_alias(self, line): """Expands any aliases present in line if alias does not point to a builtin function and if alias is only a single command. """ word = line.split(' ', 1)[0] if word in builtins.aliases and isinstance(self.get(word), cabc.Sequence): word_idx = line.find(word) expansion = ' '.join(self.get(word)) line = line[:word_idx] + expansion + line[word_idx+len(word):] return line # # Mutable mapping interface # def __getitem__(self, key): return self._raw[key] def __setitem__(self, key, val): if isinstance(val, str): self._raw[key] = shlex.split(val) else: self._raw[key] = val def __delitem__(self, key): del self._raw[key] def update(self, *args, **kwargs): for key, val in dict(*args, **kwargs).items(): self[key] = val def __iter__(self): yield from self._raw def __len__(self): return len(self._raw) def __str__(self): return str(self._raw) def __repr__(self): return '{0}.{1}({2})'.format(self.__class__.__module__, self.__class__.__name__, self._raw) def _repr_pretty_(self, p, cycle): name = '{0}.{1}'.format(self.__class__.__module__, self.__class__.__name__) with p.group(0, name + '(', ')'): if cycle: p.text('...') elif len(self): p.break_() p.pretty(dict(self)) def xonsh_exit(args, stdin=None): """Sends signal to exit shell.""" if not clean_jobs(): # Do not exit if jobs not cleaned up return None, None builtins.__xonsh_exit__ = True print() # gimme a newline return None, None @lazyobject def _SOURCE_FOREIGN_PARSER(): desc = "Sources a file written in a foreign shell language." parser = argparse.ArgumentParser('source-foreign', description=desc) parser.add_argument('shell', help='Name or path to the foreign shell') parser.add_argument('files_or_code', nargs='+', help='file paths to source or code in the target ' 'language.') parser.add_argument('-i', '--interactive', type=to_bool, default=True, help='whether the sourced shell should be interactive', dest='interactive') parser.add_argument('-l', '--login', type=to_bool, default=False, help='whether the sourced shell should be login', dest='login') parser.add_argument('--envcmd', default=None, dest='envcmd', help='command to print environment') parser.add_argument('--aliascmd', default=None, dest='aliascmd', help='command to print aliases') parser.add_argument('--extra-args', default=(), dest='extra_args', type=(lambda s: tuple(s.split())), help='extra arguments needed to run the shell') parser.add_argument('-s', '--safe', type=to_bool, default=True, help='whether the source shell should be run safely, ' 'and not raise any errors, even if they occur.', dest='safe') parser.add_argument('-p', '--prevcmd', default=None, dest='prevcmd', help='command(s) to run before any other commands, ' 'replaces traditional source.') parser.add_argument('--postcmd', default='', dest='postcmd', help='command(s) to run after all other commands') parser.add_argument('--funcscmd', default=None, dest='funcscmd', help='code to find locations of all native functions ' 'in the shell language.') parser.add_argument('--sourcer', default=None, dest='sourcer', help='the source command in the target shell ' 'language, default: source.') parser.add_argument('--use-tmpfile', type=to_bool, default=False, help='whether the commands for source shell should be ' 'written to a temporary file.', dest='use_tmpfile') parser.add_argument('--seterrprevcmd', default=None, dest='seterrprevcmd', help='command(s) to set exit-on-error before any' 'other commands.') parser.add_argument('--seterrpostcmd', default=None, dest='seterrpostcmd', help='command(s) to set exit-on-error after all' 'other commands.') parser.add_argument('--overwrite-aliases', default=False, action='store_true', dest='overwrite_aliases', help='flag for whether or not sourced aliases should ' 'replace the current xonsh aliases.') parser.add_argument('--show', default=False, action='store_true', dest='show', help='Will show the script output.') parser.add_argument('-d', '--dry-run', default=False, action='store_true', dest='dryrun', help='Will not actually source the file.') return parser def source_foreign(args, stdin=None, stdout=None, stderr=None): """Sources a file written in a foreign shell language.""" ns = _SOURCE_FOREIGN_PARSER.parse_args(args) if ns.prevcmd is not None: pass # don't change prevcmd if given explicitly elif os.path.isfile(ns.files_or_code[0]): # we have filename to source ns.prevcmd = '{} "{}"'.format(ns.sourcer, '" "'.join(ns.files_or_code)) elif ns.prevcmd is None: ns.prevcmd = ' '.join(ns.files_or_code) # code to run, no files foreign_shell_data.cache_clear() # make sure that we don't get prev src fsenv, fsaliases = foreign_shell_data(shell=ns.shell, login=ns.login, interactive=ns.interactive, envcmd=ns.envcmd, aliascmd=ns.aliascmd, extra_args=ns.extra_args, safe=ns.safe, prevcmd=ns.prevcmd, postcmd=ns.postcmd, funcscmd=ns.funcscmd, sourcer=ns.sourcer, use_tmpfile=ns.use_tmpfile, seterrprevcmd=ns.seterrprevcmd, seterrpostcmd=ns.seterrpostcmd, show=ns.show, dryrun=ns.dryrun) if fsenv is None: if ns.dryrun: return else: msg = 'xonsh: error: Source failed: {}\n'.format(ns.prevcmd) return (None, msg, 1) # apply results env = builtins.__xonsh_env__ denv = env.detype() for k, v in fsenv.items(): if k in denv and v == denv[k]: continue # no change from original env[k] = v # Remove any env-vars that were unset by the script. for k in denv: if k not in fsenv: env.pop(k, None) # Update aliases baliases = builtins.aliases for k, v in fsaliases.items(): if k in baliases and v == baliases[k]: continue # no change from original elif ns.overwrite_aliases or k not in baliases: baliases[k] = v else: msg = ('Skipping application of {0!r} alias from {1!r} ' 'since it shares a name with an existing xonsh alias. ' 'Use "--overwrite-alias" option to apply it anyway.') print(msg.format(k, ns.shell), file=stderr) def source_alias(args, stdin=None): """Executes the contents of the provided files in the current context. If sourced file isn't found in cwd, search for file along $PATH to source instead. """ env = builtins.__xonsh_env__ encoding = env.get('XONSH_ENCODING') errors = env.get('XONSH_ENCODING_ERRORS') for i, fname in enumerate(args): fpath = fname if not os.path.isfile(fpath): fpath = locate_binary(fname) if fpath is None: if env.get('XONSH_DEBUG'): print('source: {}: No such file'.format(fname), file=sys.stderr) if i == 0: raise RuntimeError('must source at least one file, ' + fname + 'does not exist.') break _, fext = os.path.splitext(fpath) if fext and fext != '.xsh' and fext != '.py': raise RuntimeError('attempting to source non-xonsh file! If you are ' 'trying to source a file in another language, ' 'then please use the appropriate source command. ' 'For example, source-bash script.sh') with open(fpath, 'r', encoding=encoding, errors=errors) as fp: src = fp.read() if not src.endswith('\n'): src += '\n' ctx = builtins.__xonsh_ctx__ updates = {'__file__': fpath, '__name__': os.path.abspath(fpath)} with env.swap(ARGS=args[i+1:]), swap_values(ctx, updates): try: builtins.execx(src, 'exec', ctx, filename=fpath) except Exception: print_color('{RED}You may be attempting to source non-xonsh file! ' '{NO_COLOR}If you are trying to source a file in ' 'another language, then please use the appropriate ' 'source command. For example, {GREEN}source-bash ' 'script.sh{NO_COLOR}', file=sys.stderr) raise def source_cmd(args, stdin=None): """Simple cmd.exe-specific wrapper around source-foreign.""" args = list(args) fpath = locate_binary(args[0]) args[0] = fpath if fpath else args[0] if not os.path.isfile(args[0]): return (None, 'xonsh: error: File not found: {}\n'.format(args[0]), 1) prevcmd = 'call ' prevcmd += ' '.join([argvquote(arg, force=True) for arg in args]) prevcmd = escape_windows_cmd_string(prevcmd) args.append('--prevcmd={}'.format(prevcmd)) args.insert(0, 'cmd') args.append('--interactive=0') args.append('--sourcer=call') args.append('--envcmd=set') args.append('--seterrpostcmd=if errorlevel 1 exit 1') args.append('--use-tmpfile=1') with builtins.__xonsh_env__.swap(PROMPT='$P$G'): return source_foreign(args, stdin=stdin) def xexec(args, stdin=None): """exec [-h|--help] command [args...] exec (also aliased as xexec) uses the os.execvpe() function to replace the xonsh process with the specified program. This provides the functionality of the bash 'exec' builtin:: >>> exec bash -l -i bash $ The '-h' and '--help' options print this message and exit. Notes ----- This command **is not** the same as the Python builtin function exec(). That function is for running Python code. This command, which shares the same name as the sh-lang statement, is for launching a command directly in the same process. In the event of a name conflict, please use the xexec command directly or dive into subprocess mode explicitly with ![exec command]. For more details, please see http://xon.sh/faq.html#exec. """ if len(args) == 0: return (None, 'xonsh: exec: no args specified\n', 1) elif args[0] == '-h' or args[0] == '--help': return inspect.getdoc(xexec) else: denv = builtins.__xonsh_env__.detype() try: os.execvpe(args[0], args, denv) except FileNotFoundError as e: return (None, 'xonsh: exec: file not found: {}: {}' '\n'.format(e.args[1], args[0]), 1) class AWitchAWitch(argparse.Action): SUPPRESS = '==SUPPRESS==' def __init__(self, option_strings, version=None, dest=SUPPRESS, default=SUPPRESS, **kwargs): super().__init__(option_strings=option_strings, dest=dest, default=default, nargs=0, **kwargs) def __call__(self, parser, namespace, values, option_string=None): import webbrowser webbrowser.open('https://github.com/xonsh/xonsh/commit/f49b400') parser.exit() def xonfig(args, stdin=None): """Runs the xonsh configuration utility.""" from xonsh.xonfig import xonfig_main # lazy import return xonfig_main(args) @unthreadable def trace(args, stdin=None, stdout=None, stderr=None, spec=None): """Runs the xonsh tracer utility.""" from xonsh.tracer import tracermain # lazy import try: return tracermain(args, stdin=stdin, stdout=stdout, stderr=stderr, spec=spec) except SystemExit: pass def showcmd(args, stdin=None): """usage: showcmd [-h|--help|cmd args] Displays the command and arguments as a list of strings that xonsh would run in subprocess mode. This is useful for determining how xonsh evaluates your commands and arguments prior to running these commands. optional arguments: -h, --help show this help message and exit example: >>> showcmd echo $USER can't hear "the sea" ['echo', 'I', "can't", 'hear', 'the sea'] """ if len(args) == 0 or (len(args) == 1 and args[0] in {'-h', '--help'}): print(showcmd.__doc__.rstrip().replace('\n ', '\n')) else: sys.displayhook(args) def detect_xpip_alias(): """ Determines the correct invocation to get xonsh's pip """ if not getattr(sys, 'executable', None): return lambda args, stdin=None: ("", "Sorry, unable to run pip on your system (missing sys.executable)", 1) basecmd = [sys.executable, '-m', 'pip'] try: if ON_WINDOWS: # XXX: Does windows have an installation mode that requires UAC? return basecmd elif not os.access(os.path.dirname(sys.executable), os.W_OK): return ['sudo'] + basecmd else: return basecmd except Exception: # Something freaky happened, return something that'll probably work return basecmd def make_default_aliases(): """Creates a new default aliases dictionary.""" default_aliases = { 'cd': cd, 'pushd': pushd, 'popd': popd, 'dirs': dirs, 'jobs': jobs, 'fg': fg, 'bg': bg, 'EOF': xonsh_exit, 'exit': xonsh_exit, 'quit': xonsh_exit, 'exec': xexec, 'xexec': xexec, 'source': source_alias, 'source-zsh': ['source-foreign', 'zsh', '--sourcer=source'], 'source-bash': ['source-foreign', 'bash', '--sourcer=source'], 'source-cmd': source_cmd, 'source-foreign': source_foreign, 'history': xhm.history_main, 'replay': replay_main, 'trace': trace, 'timeit': timeit_alias, 'xonfig': xonfig, 'scp-resume': ['rsync', '--partial', '-h', '--progress', '--rsh=ssh'], 'showcmd': showcmd, 'ipynb': ['jupyter', 'notebook', '--no-browser'], 'which': xxw.which, 'xontrib': xontribs_main, 'completer': xca.completer_alias, 'xpip': detect_xpip_alias(), } if ON_WINDOWS: # Borrow builtin commands from cmd.exe. windows_cmd_aliases = { 'cls', 'copy', 'del', 'dir', 'echo', 'erase', 'md', 'mkdir', 'mklink', 'move', 'rd', 'ren', 'rename', 'rmdir', 'time', 'type', 'vol' } for alias in windows_cmd_aliases: default_aliases[alias] = ['cmd', '/c', alias] default_aliases['call'] = ['source-cmd'] default_aliases['source-bat'] = ['source-cmd'] default_aliases['clear'] = 'cls' if ON_ANACONDA: # Add aliases specific to the Anaconda python distribution. default_aliases['activate'] = ['source-cmd', 'activate.bat'] default_aliases['deactivate'] = ['source-cmd', 'deactivate.bat'] if not locate_binary('sudo'): import xonsh.winutils as winutils def sudo(args): if len(args) < 1: print('You need to provide an executable to run as ' 'Administrator.') return cmd = args[0] if locate_binary(cmd): return winutils.sudo(cmd, args[1:]) elif cmd.lower() in windows_cmd_aliases: args = ['/D', '/C', 'CD', _get_cwd(), '&&'] + args return winutils.sudo('cmd', args) else: msg = 'Cannot find the path for executable "{0}".' print(msg.format(cmd)) default_aliases['sudo'] = sudo elif ON_DARWIN: default_aliases['ls'] = ['ls', '-G'] elif ON_FREEBSD: default_aliases['grep'] = ['grep', '--color=auto'] default_aliases['egrep'] = ['egrep', '--color=auto'] default_aliases['fgrep'] = ['fgrep', '--color=auto'] default_aliases['ls'] = ['ls', '-G'] elif ON_NETBSD: default_aliases['grep'] = ['grep', '--color=auto'] default_aliases['egrep'] = ['egrep', '--color=auto'] default_aliases['fgrep'] = ['fgrep', '--color=auto'] else: default_aliases['grep'] = ['grep', '--color=auto'] default_aliases['egrep'] = ['egrep', '--color=auto'] default_aliases['fgrep'] = ['fgrep', '--color=auto'] default_aliases['ls'] = ['ls', '--color=auto', '-v'] return default_aliases xonsh-0.6.0/xonsh/ansi_colors.py000066400000000000000000000637201320541242300167040ustar00rootroot00000000000000"""Tools for helping with ANSI color codes.""" import sys import string import warnings import builtins from xonsh.platform import HAS_PYGMENTS from xonsh.lazyasd import LazyDict from xonsh.color_tools import (RE_BACKGROUND, BASE_XONSH_COLORS, make_palette, find_closest_color, rgb2short, rgb_to_256) def ansi_partial_color_format(template, style='default', cmap=None, hide=False): """Formats a template string but only with respect to the colors. Another template string is returned, with the color values filled in. Parameters ---------- template : str The template string, potentially with color names. style : str, optional Style name to look up color map from. cmap : dict, optional A color map to use, this will prevent the color map from being looked up via the style name. hide : bool, optional Whether to wrap the color codes in the \\001 and \\002 escape codes, so that the color codes are not counted against line length. Returns ------- A template string with the color values filled in. """ try: return _ansi_partial_color_format_main(template, style=style, cmap=cmap, hide=hide) except Exception: return template def _ansi_partial_color_format_main(template, style='default', cmap=None, hide=False): if cmap is not None: pass elif style in ANSI_STYLES: cmap = ANSI_STYLES[style] else: try: # dynamically loading the style cmap = ansi_style_by_name(style) except Exception: msg = 'Could not find color style {0!r}, using default.' print(msg.format(style), file=sys.stderr) builtins.__xonsh_env__['XONSH_COLOR_STYLE'] = 'default' cmap = ANSI_STYLES['default'] formatter = string.Formatter() esc = ('\001' if hide else '') + '\033[' m = 'm' + ('\002' if hide else '') bopen = '{' bclose = '}' colon = ':' expl = '!' toks = [] for literal, field, spec, conv in formatter.parse(template): toks.append(literal) if field is None: pass elif field in cmap: toks.extend([esc, cmap[field], m]) elif '#' in field: field = field.lower() pre, _, post = field.partition('#') f_or_b = '38' if RE_BACKGROUND.search(pre) is None else '48' rgb, _, post = post.partition('_') c256, _ = rgb_to_256(rgb) color = f_or_b + ';5;' + c256 mods = pre + '_' + post if 'underline' in mods: color = '4;' + color if 'bold' in mods: color = '1;' + color toks.extend([esc, color, m]) elif field is not None: toks.append(bopen) toks.append(field) if conv is not None and len(conv) > 0: toks.append(expl) toks.append(conv) if spec is not None and len(spec) > 0: toks.append(colon) toks.append(spec) toks.append(bclose) return ''.join(toks) def ansi_color_style_names(): """Returns an iterable of all ANSI color style names.""" return ANSI_STYLES.keys() def ansi_color_style(style='default'): """Returns the current color map.""" if style in ANSI_STYLES: cmap = ANSI_STYLES[style] else: msg = 'Could not find color style {0!r}, using default.'.format(style) warnings.warn(msg, RuntimeWarning) cmap = ANSI_STYLES['default'] return cmap def _ansi_expand_style(cmap): """Expands a style in order to more quickly make color map changes.""" for key, val in list(cmap.items()): if key == 'NO_COLOR': continue elif len(val) == 0: cmap['BOLD_'+key] = '1' cmap['UNDERLINE_'+key] = '4' cmap['BOLD_UNDERLINE_'+key] = '1;4' cmap['BACKGROUND_'+key] = val else: cmap['BOLD_'+key] = '1;' + val cmap['UNDERLINE_'+key] = '4;' + val cmap['BOLD_UNDERLINE_'+key] = '1;4;' + val cmap['BACKGROUND_'+key] = val.replace('38', '48', 1) def _bw_style(): style = { 'BLACK': '', 'BLUE': '', 'CYAN': '', 'GREEN': '', 'INTENSE_BLACK': '', 'INTENSE_BLUE': '', 'INTENSE_CYAN': '', 'INTENSE_GREEN': '', 'INTENSE_PURPLE': '', 'INTENSE_RED': '', 'INTENSE_WHITE': '', 'INTENSE_YELLOW': '', 'NO_COLOR': '0', 'PURPLE': '', 'RED': '', 'WHITE': '', 'YELLOW': '', } _ansi_expand_style(style) return style def _default_style(): style = { # Reset 'NO_COLOR': '0', # Text Reset # Regular Colors 'BLACK': '0;30', # BLACK 'RED': '0;31', # RED 'GREEN': '0;32', # GREEN 'YELLOW': '0;33', # YELLOW 'BLUE': '0;34', # BLUE 'PURPLE': '0;35', # PURPLE 'CYAN': '0;36', # CYAN 'WHITE': '0;37', # WHITE # Bold 'BOLD_BLACK': '1;30', # BLACK 'BOLD_RED': '1;31', # RED 'BOLD_GREEN': '1;32', # GREEN 'BOLD_YELLOW': '1;33', # YELLOW 'BOLD_BLUE': '1;34', # BLUE 'BOLD_PURPLE': '1;35', # PURPLE 'BOLD_CYAN': '1;36', # CYAN 'BOLD_WHITE': '1;37', # WHITE # Underline 'UNDERLINE_BLACK': '4;30', # BLACK 'UNDERLINE_RED': '4;31', # RED 'UNDERLINE_GREEN': '4;32', # GREEN 'UNDERLINE_YELLOW': '4;33', # YELLOW 'UNDERLINE_BLUE': '4;34', # BLUE 'UNDERLINE_PURPLE': '4;35', # PURPLE 'UNDERLINE_CYAN': '4;36', # CYAN 'UNDERLINE_WHITE': '4;37', # WHITE # Bold, Underline 'BOLD_UNDERLINE_BLACK': '1;4;30', # BLACK 'BOLD_UNDERLINE_RED': '1;4;31', # RED 'BOLD_UNDERLINE_GREEN': '1;4;32', # GREEN 'BOLD_UNDERLINE_YELLOW': '1;4;33', # YELLOW 'BOLD_UNDERLINE_BLUE': '1;4;34', # BLUE 'BOLD_UNDERLINE_PURPLE': '1;4;35', # PURPLE 'BOLD_UNDERLINE_CYAN': '1;4;36', # CYAN 'BOLD_UNDERLINE_WHITE': '1;4;37', # WHITE # Background 'BACKGROUND_BLACK': '40', # BLACK 'BACKGROUND_RED': '41', # RED 'BACKGROUND_GREEN': '42', # GREEN 'BACKGROUND_YELLOW': '43', # YELLOW 'BACKGROUND_BLUE': '44', # BLUE 'BACKGROUND_PURPLE': '45', # PURPLE 'BACKGROUND_CYAN': '46', # CYAN 'BACKGROUND_WHITE': '47', # WHITE # High Intensity 'INTENSE_BLACK': '0;90', # BLACK 'INTENSE_RED': '0;91', # RED 'INTENSE_GREEN': '0;92', # GREEN 'INTENSE_YELLOW': '0;93', # YELLOW 'INTENSE_BLUE': '0;94', # BLUE 'INTENSE_PURPLE': '0;95', # PURPLE 'INTENSE_CYAN': '0;96', # CYAN 'INTENSE_WHITE': '0;97', # WHITE # Bold High Intensity 'BOLD_INTENSE_BLACK': '1;90', # BLACK 'BOLD_INTENSE_RED': '1;91', # RED 'BOLD_INTENSE_GREEN': '1;92', # GREEN 'BOLD_INTENSE_YELLOW': '1;93', # YELLOW 'BOLD_INTENSE_BLUE': '1;94', # BLUE 'BOLD_INTENSE_PURPLE': '1;95', # PURPLE 'BOLD_INTENSE_CYAN': '1;96', # CYAN 'BOLD_INTENSE_WHITE': '1;97', # WHITE # Underline High Intensity 'UNDERLINE_INTENSE_BLACK': '4;90', # BLACK 'UNDERLINE_INTENSE_RED': '4;91', # RED 'UNDERLINE_INTENSE_GREEN': '4;92', # GREEN 'UNDERLINE_INTENSE_YELLOW': '4;93', # YELLOW 'UNDERLINE_INTENSE_BLUE': '4;94', # BLUE 'UNDERLINE_INTENSE_PURPLE': '4;95', # PURPLE 'UNDERLINE_INTENSE_CYAN': '4;96', # CYAN 'UNDERLINE_INTENSE_WHITE': '4;97', # WHITE # Bold Underline High Intensity 'BOLD_UNDERLINE_INTENSE_BLACK': '1;4;90', # BLACK 'BOLD_UNDERLINE_INTENSE_RED': '1;4;91', # RED 'BOLD_UNDERLINE_INTENSE_GREEN': '1;4;92', # GREEN 'BOLD_UNDERLINE_INTENSE_YELLOW': '1;4;93', # YELLOW 'BOLD_UNDERLINE_INTENSE_BLUE': '1;4;94', # BLUE 'BOLD_UNDERLINE_INTENSE_PURPLE': '1;4;95', # PURPLE 'BOLD_UNDERLINE_INTENSE_CYAN': '1;4;96', # CYAN 'BOLD_UNDERLINE_INTENSE_WHITE': '1;4;97', # WHITE # High Intensity backgrounds 'BACKGROUND_INTENSE_BLACK': '0;100', # BLACK 'BACKGROUND_INTENSE_RED': '0;101', # RED 'BACKGROUND_INTENSE_GREEN': '0;102', # GREEN 'BACKGROUND_INTENSE_YELLOW': '0;103', # YELLOW 'BACKGROUND_INTENSE_BLUE': '0;104', # BLUE 'BACKGROUND_INTENSE_PURPLE': '0;105', # PURPLE 'BACKGROUND_INTENSE_CYAN': '0;106', # CYAN 'BACKGROUND_INTENSE_WHITE': '0;107', # WHITE } return style def _monokai_style(): style = { 'NO_COLOR': '0', 'BLACK': '38;5;16', 'BLUE': '38;5;63', 'CYAN': '38;5;81', 'GREEN': '38;5;40', 'PURPLE': '38;5;89', 'RED': '38;5;124', 'WHITE': '38;5;188', 'YELLOW': '38;5;184', 'INTENSE_BLACK': '38;5;59', 'INTENSE_BLUE': '38;5;20', 'INTENSE_CYAN': '38;5;44', 'INTENSE_GREEN': '38;5;148', 'INTENSE_PURPLE': '38;5;141', 'INTENSE_RED': '38;5;197', 'INTENSE_WHITE': '38;5;15', 'INTENSE_YELLOW': '38;5;186', } _ansi_expand_style(style) return style #################################### # Auto-generated below this line # #################################### def _algol_style(): style = { 'BLACK': '38;5;59', 'BLUE': '38;5;59', 'CYAN': '38;5;59', 'GREEN': '38;5;59', 'INTENSE_BLACK': '38;5;59', 'INTENSE_BLUE': '38;5;102', 'INTENSE_CYAN': '38;5;102', 'INTENSE_GREEN': '38;5;102', 'INTENSE_PURPLE': '38;5;102', 'INTENSE_RED': '38;5;09', 'INTENSE_WHITE': '38;5;102', 'INTENSE_YELLOW': '38;5;102', 'NO_COLOR': '0', 'PURPLE': '38;5;59', 'RED': '38;5;09', 'WHITE': '38;5;102', 'YELLOW': '38;5;09', } _ansi_expand_style(style) return style def _algol_nu_style(): style = { 'BLACK': '38;5;59', 'BLUE': '38;5;59', 'CYAN': '38;5;59', 'GREEN': '38;5;59', 'INTENSE_BLACK': '38;5;59', 'INTENSE_BLUE': '38;5;102', 'INTENSE_CYAN': '38;5;102', 'INTENSE_GREEN': '38;5;102', 'INTENSE_PURPLE': '38;5;102', 'INTENSE_RED': '38;5;09', 'INTENSE_WHITE': '38;5;102', 'INTENSE_YELLOW': '38;5;102', 'NO_COLOR': '0', 'PURPLE': '38;5;59', 'RED': '38;5;09', 'WHITE': '38;5;102', 'YELLOW': '38;5;09', } _ansi_expand_style(style) return style def _autumn_style(): style = { 'BLACK': '38;5;18', 'BLUE': '38;5;19', 'CYAN': '38;5;37', 'GREEN': '38;5;34', 'INTENSE_BLACK': '38;5;59', 'INTENSE_BLUE': '38;5;33', 'INTENSE_CYAN': '38;5;33', 'INTENSE_GREEN': '38;5;64', 'INTENSE_PURPLE': '38;5;217', 'INTENSE_RED': '38;5;130', 'INTENSE_WHITE': '38;5;145', 'INTENSE_YELLOW': '38;5;217', 'NO_COLOR': '0', 'PURPLE': '38;5;90', 'RED': '38;5;124', 'WHITE': '38;5;145', 'YELLOW': '38;5;130', } _ansi_expand_style(style) return style def _borland_style(): style = { 'BLACK': '38;5;16', 'BLUE': '38;5;18', 'CYAN': '38;5;30', 'GREEN': '38;5;28', 'INTENSE_BLACK': '38;5;59', 'INTENSE_BLUE': '38;5;21', 'INTENSE_CYAN': '38;5;194', 'INTENSE_GREEN': '38;5;102', 'INTENSE_PURPLE': '38;5;188', 'INTENSE_RED': '38;5;09', 'INTENSE_WHITE': '38;5;224', 'INTENSE_YELLOW': '38;5;188', 'NO_COLOR': '0', 'PURPLE': '38;5;90', 'RED': '38;5;124', 'WHITE': '38;5;145', 'YELLOW': '38;5;124', } _ansi_expand_style(style) return style def _colorful_style(): style = { 'BLACK': '38;5;16', 'BLUE': '38;5;20', 'CYAN': '38;5;31', 'GREEN': '38;5;34', 'INTENSE_BLACK': '38;5;59', 'INTENSE_BLUE': '38;5;61', 'INTENSE_CYAN': '38;5;145', 'INTENSE_GREEN': '38;5;102', 'INTENSE_PURPLE': '38;5;217', 'INTENSE_RED': '38;5;166', 'INTENSE_WHITE': '38;5;15', 'INTENSE_YELLOW': '38;5;217', 'NO_COLOR': '0', 'PURPLE': '38;5;90', 'RED': '38;5;124', 'WHITE': '38;5;145', 'YELLOW': '38;5;130', } _ansi_expand_style(style) return style def _emacs_style(): style = { 'BLACK': '38;5;28', 'BLUE': '38;5;18', 'CYAN': '38;5;26', 'GREEN': '38;5;34', 'INTENSE_BLACK': '38;5;59', 'INTENSE_BLUE': '38;5;26', 'INTENSE_CYAN': '38;5;145', 'INTENSE_GREEN': '38;5;34', 'INTENSE_PURPLE': '38;5;129', 'INTENSE_RED': '38;5;167', 'INTENSE_WHITE': '38;5;145', 'INTENSE_YELLOW': '38;5;145', 'NO_COLOR': '0', 'PURPLE': '38;5;90', 'RED': '38;5;124', 'WHITE': '38;5;145', 'YELLOW': '38;5;130', } _ansi_expand_style(style) return style def _friendly_style(): style = { 'BLACK': '38;5;22', 'BLUE': '38;5;18', 'CYAN': '38;5;31', 'GREEN': '38;5;34', 'INTENSE_BLACK': '38;5;59', 'INTENSE_BLUE': '38;5;74', 'INTENSE_CYAN': '38;5;74', 'INTENSE_GREEN': '38;5;71', 'INTENSE_PURPLE': '38;5;134', 'INTENSE_RED': '38;5;167', 'INTENSE_WHITE': '38;5;15', 'INTENSE_YELLOW': '38;5;145', 'NO_COLOR': '0', 'PURPLE': '38;5;90', 'RED': '38;5;124', 'WHITE': '38;5;145', 'YELLOW': '38;5;166', } _ansi_expand_style(style) return style def _fruity_style(): style = { 'BLACK': '38;5;16', 'BLUE': '38;5;32', 'CYAN': '38;5;32', 'GREEN': '38;5;28', 'INTENSE_BLACK': '38;5;59', 'INTENSE_BLUE': '38;5;33', 'INTENSE_CYAN': '38;5;33', 'INTENSE_GREEN': '38;5;102', 'INTENSE_PURPLE': '38;5;198', 'INTENSE_RED': '38;5;202', 'INTENSE_WHITE': '38;5;15', 'INTENSE_YELLOW': '38;5;187', 'NO_COLOR': '0', 'PURPLE': '38;5;198', 'RED': '38;5;09', 'WHITE': '38;5;187', 'YELLOW': '38;5;202', } _ansi_expand_style(style) return style def _igor_style(): style = { 'BLACK': '38;5;34', 'BLUE': '38;5;21', 'CYAN': '38;5;30', 'GREEN': '38;5;34', 'INTENSE_BLACK': '38;5;30', 'INTENSE_BLUE': '38;5;21', 'INTENSE_CYAN': '38;5;30', 'INTENSE_GREEN': '38;5;34', 'INTENSE_PURPLE': '38;5;163', 'INTENSE_RED': '38;5;166', 'INTENSE_WHITE': '38;5;163', 'INTENSE_YELLOW': '38;5;166', 'NO_COLOR': '0', 'PURPLE': '38;5;163', 'RED': '38;5;166', 'WHITE': '38;5;163', 'YELLOW': '38;5;166', } _ansi_expand_style(style) return style def _lovelace_style(): style = { 'BLACK': '38;5;59', 'BLUE': '38;5;25', 'CYAN': '38;5;29', 'GREEN': '38;5;65', 'INTENSE_BLACK': '38;5;59', 'INTENSE_BLUE': '38;5;25', 'INTENSE_CYAN': '38;5;102', 'INTENSE_GREEN': '38;5;29', 'INTENSE_PURPLE': '38;5;133', 'INTENSE_RED': '38;5;131', 'INTENSE_WHITE': '38;5;102', 'INTENSE_YELLOW': '38;5;136', 'NO_COLOR': '0', 'PURPLE': '38;5;133', 'RED': '38;5;124', 'WHITE': '38;5;102', 'YELLOW': '38;5;130', } _ansi_expand_style(style) return style def _manni_style(): style = { 'BLACK': '38;5;16', 'BLUE': '38;5;18', 'CYAN': '38;5;30', 'GREEN': '38;5;40', 'INTENSE_BLACK': '38;5;59', 'INTENSE_BLUE': '38;5;105', 'INTENSE_CYAN': '38;5;45', 'INTENSE_GREEN': '38;5;113', 'INTENSE_PURPLE': '38;5;165', 'INTENSE_RED': '38;5;202', 'INTENSE_WHITE': '38;5;224', 'INTENSE_YELLOW': '38;5;221', 'NO_COLOR': '0', 'PURPLE': '38;5;165', 'RED': '38;5;124', 'WHITE': '38;5;145', 'YELLOW': '38;5;166', } _ansi_expand_style(style) return style def _murphy_style(): style = { 'BLACK': '38;5;16', 'BLUE': '38;5;18', 'CYAN': '38;5;31', 'GREEN': '38;5;34', 'INTENSE_BLACK': '38;5;59', 'INTENSE_BLUE': '38;5;63', 'INTENSE_CYAN': '38;5;86', 'INTENSE_GREEN': '38;5;86', 'INTENSE_PURPLE': '38;5;213', 'INTENSE_RED': '38;5;209', 'INTENSE_WHITE': '38;5;15', 'INTENSE_YELLOW': '38;5;222', 'NO_COLOR': '0', 'PURPLE': '38;5;90', 'RED': '38;5;124', 'WHITE': '38;5;145', 'YELLOW': '38;5;166', } _ansi_expand_style(style) return style def _native_style(): style = { 'BLACK': '38;5;52', 'BLUE': '38;5;67', 'CYAN': '38;5;31', 'GREEN': '38;5;64', 'INTENSE_BLACK': '38;5;59', 'INTENSE_BLUE': '38;5;68', 'INTENSE_CYAN': '38;5;87', 'INTENSE_GREEN': '38;5;70', 'INTENSE_PURPLE': '38;5;188', 'INTENSE_RED': '38;5;160', 'INTENSE_WHITE': '38;5;15', 'INTENSE_YELLOW': '38;5;214', 'NO_COLOR': '0', 'PURPLE': '38;5;59', 'RED': '38;5;124', 'WHITE': '38;5;145', 'YELLOW': '38;5;124', } _ansi_expand_style(style) return style def _paraiso_dark_style(): style = { 'BLACK': '38;5;95', 'BLUE': '38;5;97', 'CYAN': '38;5;39', 'GREEN': '38;5;72', 'INTENSE_BLACK': '38;5;95', 'INTENSE_BLUE': '38;5;97', 'INTENSE_CYAN': '38;5;79', 'INTENSE_GREEN': '38;5;72', 'INTENSE_PURPLE': '38;5;188', 'INTENSE_RED': '38;5;203', 'INTENSE_WHITE': '38;5;188', 'INTENSE_YELLOW': '38;5;220', 'NO_COLOR': '0', 'PURPLE': '38;5;97', 'RED': '38;5;203', 'WHITE': '38;5;79', 'YELLOW': '38;5;214', } _ansi_expand_style(style) return style def _paraiso_light_style(): style = { 'BLACK': '38;5;16', 'BLUE': '38;5;16', 'CYAN': '38;5;39', 'GREEN': '38;5;72', 'INTENSE_BLACK': '38;5;16', 'INTENSE_BLUE': '38;5;97', 'INTENSE_CYAN': '38;5;79', 'INTENSE_GREEN': '38;5;72', 'INTENSE_PURPLE': '38;5;97', 'INTENSE_RED': '38;5;203', 'INTENSE_WHITE': '38;5;79', 'INTENSE_YELLOW': '38;5;220', 'NO_COLOR': '0', 'PURPLE': '38;5;97', 'RED': '38;5;16', 'WHITE': '38;5;102', 'YELLOW': '38;5;214', } _ansi_expand_style(style) return style def _pastie_style(): style = { 'BLACK': '38;5;16', 'BLUE': '38;5;20', 'CYAN': '38;5;25', 'GREEN': '38;5;28', 'INTENSE_BLACK': '38;5;59', 'INTENSE_BLUE': '38;5;61', 'INTENSE_CYAN': '38;5;194', 'INTENSE_GREEN': '38;5;34', 'INTENSE_PURPLE': '38;5;188', 'INTENSE_RED': '38;5;172', 'INTENSE_WHITE': '38;5;15', 'INTENSE_YELLOW': '38;5;188', 'NO_COLOR': '0', 'PURPLE': '38;5;125', 'RED': '38;5;124', 'WHITE': '38;5;145', 'YELLOW': '38;5;130', } _ansi_expand_style(style) return style def _perldoc_style(): style = { 'BLACK': '38;5;18', 'BLUE': '38;5;18', 'CYAN': '38;5;31', 'GREEN': '38;5;34', 'INTENSE_BLACK': '38;5;59', 'INTENSE_BLUE': '38;5;134', 'INTENSE_CYAN': '38;5;145', 'INTENSE_GREEN': '38;5;28', 'INTENSE_PURPLE': '38;5;134', 'INTENSE_RED': '38;5;167', 'INTENSE_WHITE': '38;5;188', 'INTENSE_YELLOW': '38;5;188', 'NO_COLOR': '0', 'PURPLE': '38;5;90', 'RED': '38;5;124', 'WHITE': '38;5;145', 'YELLOW': '38;5;166', } _ansi_expand_style(style) return style def _rrt_style(): style = { 'BLACK': '38;5;09', 'BLUE': '38;5;117', 'CYAN': '38;5;117', 'GREEN': '38;5;46', 'INTENSE_BLACK': '38;5;117', 'INTENSE_BLUE': '38;5;117', 'INTENSE_CYAN': '38;5;122', 'INTENSE_GREEN': '38;5;46', 'INTENSE_PURPLE': '38;5;213', 'INTENSE_RED': '38;5;09', 'INTENSE_WHITE': '38;5;188', 'INTENSE_YELLOW': '38;5;222', 'NO_COLOR': '0', 'PURPLE': '38;5;213', 'RED': '38;5;09', 'WHITE': '38;5;117', 'YELLOW': '38;5;09', } _ansi_expand_style(style) return style def _tango_style(): style = { 'BLACK': '38;5;16', 'BLUE': '38;5;20', 'CYAN': '38;5;61', 'GREEN': '38;5;34', 'INTENSE_BLACK': '38;5;24', 'INTENSE_BLUE': '38;5;62', 'INTENSE_CYAN': '38;5;15', 'INTENSE_GREEN': '38;5;64', 'INTENSE_PURPLE': '38;5;15', 'INTENSE_RED': '38;5;09', 'INTENSE_WHITE': '38;5;15', 'INTENSE_YELLOW': '38;5;178', 'NO_COLOR': '0', 'PURPLE': '38;5;90', 'RED': '38;5;124', 'WHITE': '38;5;15', 'YELLOW': '38;5;94', } _ansi_expand_style(style) return style def _trac_style(): style = { 'BLACK': '38;5;16', 'BLUE': '38;5;18', 'CYAN': '38;5;30', 'GREEN': '38;5;100', 'INTENSE_BLACK': '38;5;59', 'INTENSE_BLUE': '38;5;60', 'INTENSE_CYAN': '38;5;194', 'INTENSE_GREEN': '38;5;102', 'INTENSE_PURPLE': '38;5;188', 'INTENSE_RED': '38;5;137', 'INTENSE_WHITE': '38;5;224', 'INTENSE_YELLOW': '38;5;188', 'NO_COLOR': '0', 'PURPLE': '38;5;90', 'RED': '38;5;124', 'WHITE': '38;5;145', 'YELLOW': '38;5;100', } _ansi_expand_style(style) return style def _vim_style(): style = { 'BLACK': '38;5;18', 'BLUE': '38;5;18', 'CYAN': '38;5;44', 'GREEN': '38;5;40', 'INTENSE_BLACK': '38;5;60', 'INTENSE_BLUE': '38;5;68', 'INTENSE_CYAN': '38;5;44', 'INTENSE_GREEN': '38;5;40', 'INTENSE_PURPLE': '38;5;164', 'INTENSE_RED': '38;5;09', 'INTENSE_WHITE': '38;5;188', 'INTENSE_YELLOW': '38;5;184', 'NO_COLOR': '0', 'PURPLE': '38;5;164', 'RED': '38;5;160', 'WHITE': '38;5;188', 'YELLOW': '38;5;160', } _ansi_expand_style(style) return style def _vs_style(): style = { 'BLACK': '38;5;28', 'BLUE': '38;5;21', 'CYAN': '38;5;31', 'GREEN': '38;5;28', 'INTENSE_BLACK': '38;5;31', 'INTENSE_BLUE': '38;5;31', 'INTENSE_CYAN': '38;5;31', 'INTENSE_GREEN': '38;5;31', 'INTENSE_PURPLE': '38;5;31', 'INTENSE_RED': '38;5;09', 'INTENSE_WHITE': '38;5;31', 'INTENSE_YELLOW': '38;5;31', 'NO_COLOR': '0', 'PURPLE': '38;5;124', 'RED': '38;5;124', 'WHITE': '38;5;31', 'YELLOW': '38;5;124', } _ansi_expand_style(style) return style def _xcode_style(): style = { 'BLACK': '38;5;16', 'BLUE': '38;5;20', 'CYAN': '38;5;60', 'GREEN': '38;5;28', 'INTENSE_BLACK': '38;5;60', 'INTENSE_BLUE': '38;5;20', 'INTENSE_CYAN': '38;5;60', 'INTENSE_GREEN': '38;5;60', 'INTENSE_PURPLE': '38;5;126', 'INTENSE_RED': '38;5;160', 'INTENSE_WHITE': '38;5;60', 'INTENSE_YELLOW': '38;5;94', 'NO_COLOR': '0', 'PURPLE': '38;5;126', 'RED': '38;5;160', 'WHITE': '38;5;60', 'YELLOW': '38;5;94', } _ansi_expand_style(style) return style ANSI_STYLES = LazyDict({ 'algol': _algol_style, 'algol_nu': _algol_nu_style, 'autumn': _autumn_style, 'borland': _borland_style, 'bw': _bw_style, 'colorful': _colorful_style, 'default': _default_style, 'emacs': _emacs_style, 'friendly': _friendly_style, 'fruity': _fruity_style, 'igor': _igor_style, 'lovelace': _lovelace_style, 'manni': _manni_style, 'monokai': _monokai_style, 'murphy': _murphy_style, 'native': _native_style, 'paraiso-dark': _paraiso_dark_style, 'paraiso-light': _paraiso_light_style, 'pastie': _pastie_style, 'perldoc': _perldoc_style, 'rrt': _rrt_style, 'tango': _tango_style, 'trac': _trac_style, 'vim': _vim_style, 'vs': _vs_style, 'xcode': _xcode_style, }, globals(), 'ANSI_STYLES') del (_algol_style, _algol_nu_style, _autumn_style, _borland_style, _bw_style, _colorful_style, _default_style, _emacs_style, _friendly_style, _fruity_style, _igor_style, _lovelace_style, _manni_style, _monokai_style, _murphy_style, _native_style, _paraiso_dark_style, _paraiso_light_style, _pastie_style, _perldoc_style, _rrt_style, _tango_style, _trac_style, _vim_style, _vs_style, _xcode_style) # # Dynamically generated styles # def make_ansi_style(palette): """Makes an ANSI color style from a color palette""" style = {'NO_COLOR': '0'} for name, t in BASE_XONSH_COLORS.items(): closest = find_closest_color(t, palette) if len(closest) == 3: closest = ''.join([a*2 for a in closest]) short = rgb2short(closest)[0] style[name] = '38;5;' + short style['BOLD_'+name] = '1;38;5;' + short style['UNDERLINE_'+name] = '4;38;5;' + short style['BOLD_UNDERLINE_'+name] = '1;4;38;5;' + short style['BACKGROUND_'+name] = '48;5;' + short return style def ansi_style_by_name(name): """Gets or makes an ANSI color style by name. If the styles does not exist, it will look for a style using the pygments name. """ if name in ANSI_STYLES: return ANSI_STYLES[name] elif not HAS_PYGMENTS: raise KeyError('could not find style {0!r}'.format(name)) from pygments.styles import get_style_by_name pstyle = get_style_by_name(name) palette = make_palette(pstyle.styles.values()) astyle = make_ansi_style(palette) ANSI_STYLES[name] = astyle return astyle xonsh-0.6.0/xonsh/ast.py000066400000000000000000000415411320541242300151550ustar00rootroot00000000000000# -*- coding: utf-8 -*- """The xonsh abstract syntax tree node.""" # These are imported into our module namespace for the benefit of parser.py. # pylint: disable=unused-import import sys from ast import ( Module, Num, Expr, Str, Bytes, UnaryOp, UAdd, USub, Invert, BinOp, Add, Sub, Mult, Div, FloorDiv, Mod, Pow, Compare, Lt, Gt, LtE, GtE, Eq, NotEq, In, NotIn, Is, IsNot, Not, BoolOp, Or, And, Subscript, Load, Slice, ExtSlice, List, Tuple, Set, Dict, AST, NameConstant, Name, GeneratorExp, Store, comprehension, ListComp, SetComp, DictComp, Assign, AugAssign, BitXor, BitAnd, BitOr, LShift, RShift, Assert, Delete, Del, Pass, Raise, Import, alias, ImportFrom, Continue, Break, Yield, YieldFrom, Return, IfExp, Lambda, arguments, arg, Call, keyword, Attribute, Global, Nonlocal, If, While, For, withitem, With, Try, ExceptHandler, FunctionDef, ClassDef, Starred, NodeTransformer, Interactive, Expression, Index, literal_eval, dump, walk, increment_lineno ) from ast import Ellipsis as EllipsisNode # pylint: enable=unused-import import textwrap import itertools from xonsh.tools import subproc_toks, find_next_break, get_logical_line from xonsh.platform import PYTHON_VERSION_INFO if PYTHON_VERSION_INFO >= (3, 5, 0): # pylint: disable=unused-import # pylint: disable=no-name-in-module from ast import MatMult, AsyncFunctionDef, AsyncWith, AsyncFor, Await else: MatMult = AsyncFunctionDef = AsyncWith = AsyncFor = Await = None if PYTHON_VERSION_INFO >= (3, 6, 0): # pylint: disable=unused-import # pylint: disable=no-name-in-module from ast import JoinedStr, FormattedValue else: JoinedStr = FormattedValue = None STATEMENTS = (FunctionDef, ClassDef, Return, Delete, Assign, AugAssign, For, While, If, With, Raise, Try, Assert, Import, ImportFrom, Global, Nonlocal, Expr, Pass, Break, Continue) def leftmostname(node): """Attempts to find the first name in the tree.""" if isinstance(node, Name): rtn = node.id elif isinstance(node, (BinOp, Compare)): rtn = leftmostname(node.left) elif isinstance(node, (Attribute, Subscript, Starred, Expr)): rtn = leftmostname(node.value) elif isinstance(node, Call): rtn = leftmostname(node.func) elif isinstance(node, UnaryOp): rtn = leftmostname(node.operand) elif isinstance(node, BoolOp): rtn = leftmostname(node.values[0]) elif isinstance(node, Assign): rtn = leftmostname(node.targets[0]) elif isinstance(node, (Str, Bytes, JoinedStr)): # handles case of "./my executable" rtn = leftmostname(node.s) elif isinstance(node, Tuple) and len(node.elts) > 0: # handles case of echo ,1,2,3 rtn = leftmostname(node.elts[0]) else: rtn = None return rtn def get_lineno(node, default=0): """Gets the lineno of a node or returns the default.""" return getattr(node, 'lineno', default) def min_line(node): """Computes the minimum lineno.""" node_line = get_lineno(node) return min(map(get_lineno, walk(node), itertools.repeat(node_line))) def max_line(node): """Computes the maximum lineno.""" return max(map(get_lineno, walk(node))) def get_col(node, default=-1): """Gets the col_offset of a node, or returns the default""" return getattr(node, 'col_offset', default) def min_col(node): """Computes the minimum col_offset.""" return min(map(get_col, walk(node), itertools.repeat(node.col_offset))) def max_col(node): """Returns the maximum col_offset of the node and all sub-nodes.""" col = getattr(node, 'max_col', None) if col is not None: return col highest = max(walk(node), key=get_col) col = highest.col_offset + node_len(highest) return col def node_len(node): """The length of a node as a string""" val = 0 for n in walk(node): if isinstance(n, Name): val += len(n.id) elif isinstance(n, Attribute): val += 1 + (len(n.attr) if isinstance(n.attr, str) else 0) # this may need to be added to for more nodes as more cases are found return val def get_id(node, default=None): """Gets the id attribute of a node, or returns a default.""" return getattr(node, 'id', default) def gather_names(node): """Returns the set of all names present in the node's tree.""" rtn = set(map(get_id, walk(node))) rtn.discard(None) return rtn def get_id_ctx(node): """Gets the id and attribute of a node, or returns a default.""" nid = getattr(node, 'id', None) if nid is None: return (None, None) return (nid, node.ctx) def gather_load_store_names(node): """Returns the names present in the node's tree in a set of load nodes and a set of store nodes. """ load = set() store = set() for nid, ctx in map(get_id_ctx, walk(node)): if nid is None: continue elif isinstance(ctx, Load): load.add(nid) else: store.add(nid) return (load, store) def has_elts(x): """Tests if x is an AST node with elements.""" return isinstance(x, AST) and hasattr(x, 'elts') def xonsh_call(name, args, lineno=None, col=None): """Creates the AST node for calling a function of a given name.""" return Call(func=Name(id=name, ctx=Load(), lineno=lineno, col_offset=col), args=args, keywords=[], starargs=None, kwargs=None, lineno=lineno, col_offset=col) def isdescendable(node): """Determines whether or not a node is worth visiting. Currently only UnaryOp and BoolOp nodes are visited. """ return isinstance(node, (UnaryOp, BoolOp)) class CtxAwareTransformer(NodeTransformer): """Transforms a xonsh AST based to use subprocess calls when the first name in an expression statement is not known in the context. This assumes that the expression statement is instead parseable as a subprocess. """ def __init__(self, parser): """Parameters ---------- parser : xonsh.Parser A parse instance to try to parse subprocess statements with. """ super(CtxAwareTransformer, self).__init__() self.parser = parser self.input = None self.contexts = [] self.lines = None self.mode = None self._nwith = 0 self.filename = '' self.debug_level = 0 def ctxvisit(self, node, inp, ctx, mode='exec', filename=None, debug_level=0): """Transforms the node in a context-dependent way. Parameters ---------- node : ast.AST A syntax tree to transform. input : str The input code in string format. ctx : dict The root context to use. filename : str, optional File we are to transform. debug_level : int, optional Debugging level to use in lexing and parsing. Returns ------- node : ast.AST The transformed node. """ self.filename = self.filename if filename is None else filename self.debug_level = debug_level self.lines = inp.splitlines() self.contexts = [ctx, set()] self.mode = mode self._nwith = 0 node = self.visit(node) del self.lines, self.contexts, self.mode self._nwith = 0 return node def ctxupdate(self, iterable): """Updated the most recent context.""" self.contexts[-1].update(iterable) def ctxadd(self, value): """Adds a value the most recent context.""" self.contexts[-1].add(value) def ctxremove(self, value): """Removes a value the most recent context.""" for ctx in reversed(self.contexts): if value in ctx: ctx.remove(value) break def try_subproc_toks(self, node, strip_expr=False): """Tries to parse the line of the node as a subprocess.""" line, nlogical, idx = get_logical_line(self.lines, node.lineno - 1) if self.mode == 'eval': mincol = len(line) - len(line.lstrip()) maxcol = None else: mincol = max(min_col(node) - 1, 0) maxcol = max_col(node) if mincol == maxcol: maxcol = find_next_break(line, mincol=mincol, lexer=self.parser.lexer) elif nlogical > 1: maxcol = None elif maxcol < len(line) and line[maxcol] == ';': pass else: maxcol += 1 spline = subproc_toks(line, mincol=mincol, maxcol=maxcol, returnline=False, lexer=self.parser.lexer) if spline is None or len(spline) < len(line[mincol:maxcol]) + 2: # failed to get something consistent, try greedy wrap # The +2 comes from "![]" being length 3, minus 1 since maxcol # is one beyond the total length for slicing spline = subproc_toks(line, mincol=mincol, maxcol=maxcol, returnline=False, lexer=self.parser.lexer, greedy=True) if spline is None: return node try: newnode = self.parser.parse(spline, mode=self.mode, filename=self.filename, debug_level=(self.debug_level > 2)) newnode = newnode.body if not isinstance(newnode, AST): # take the first (and only) Expr newnode = newnode[0] increment_lineno(newnode, n=node.lineno - 1) newnode.col_offset = node.col_offset if self.debug_level > 1: msg = ('{0}:{1}:{2}{3} - {4}\n' '{0}:{1}:{2}{3} + {5}') mstr = '' if maxcol is None else ':' + str(maxcol) msg = msg.format(self.filename, node.lineno, mincol, mstr, line, spline) print(msg, file=sys.stderr) except SyntaxError: newnode = node if strip_expr and isinstance(newnode, Expr): newnode = newnode.value return newnode def is_in_scope(self, node): """Determines whether or not the current node is in scope.""" names, store = gather_load_store_names(node) names -= store if not names: return True inscope = False for ctx in reversed(self.contexts): names -= ctx if not names: inscope = True break return inscope # # Replacement visitors # def visit_Expression(self, node): """Handle visiting an expression body.""" if isdescendable(node.body): node.body = self.visit(node.body) body = node.body inscope = self.is_in_scope(body) if not inscope: node.body = self.try_subproc_toks(body) return node def visit_Expr(self, node): """Handle visiting an expression.""" if isdescendable(node.value): node.value = self.visit(node.value) # this allows diving into BoolOps if self.is_in_scope(node): return node else: newnode = self.try_subproc_toks(node) if not isinstance(newnode, Expr): newnode = Expr(value=newnode, lineno=node.lineno, col_offset=node.col_offset) if hasattr(node, 'max_lineno'): newnode.max_lineno = node.max_lineno newnode.max_col = node.max_col return newnode def visit_UnaryOp(self, node): """Handle visiting an unary operands, like not.""" if isdescendable(node.operand): node.operand = self.visit(node.operand) operand = node.operand inscope = self.is_in_scope(operand) if not inscope: node.operand = self.try_subproc_toks(operand, strip_expr=True) return node def visit_BoolOp(self, node): """Handle visiting an boolean operands, like and/or.""" for i in range(len(node.values)): val = node.values[i] if isdescendable(val): val = node.values[i] = self.visit(val) inscope = self.is_in_scope(val) if not inscope: node.values[i] = self.try_subproc_toks(val, strip_expr=True) return node # # Context aggregator visitors # def visit_Assign(self, node): """Handle visiting an assignment statement.""" ups = set() for targ in node.targets: if isinstance(targ, (Tuple, List)): ups.update(leftmostname(elt) for elt in targ.elts) elif isinstance(targ, BinOp): newnode = self.try_subproc_toks(node) if newnode is node: ups.add(leftmostname(targ)) else: return newnode else: ups.add(leftmostname(targ)) self.ctxupdate(ups) return node def visit_Import(self, node): """Handle visiting a import statement.""" for name in node.names: if name.asname is None: self.ctxadd(name.name) else: self.ctxadd(name.asname) return node def visit_ImportFrom(self, node): """Handle visiting a "from ... import ..." statement.""" for name in node.names: if name.asname is None: self.ctxadd(name.name) else: self.ctxadd(name.asname) return node def visit_With(self, node): """Handle visiting a with statement.""" for item in node.items: if item.optional_vars is not None: self.ctxupdate(gather_names(item.optional_vars)) self._nwith += 1 self.generic_visit(node) self._nwith -= 1 return node def visit_For(self, node): """Handle visiting a for statement.""" targ = node.target self.ctxupdate(gather_names(targ)) self.generic_visit(node) return node def visit_FunctionDef(self, node): """Handle visiting a function definition.""" self.ctxadd(node.name) self.contexts.append(set()) args = node.args argchain = [args.args, args.kwonlyargs] if args.vararg is not None: argchain.append((args.vararg,)) if args.kwarg is not None: argchain.append((args.kwarg,)) self.ctxupdate(a.arg for a in itertools.chain.from_iterable(argchain)) self.generic_visit(node) self.contexts.pop() return node def visit_ClassDef(self, node): """Handle visiting a class definition.""" self.ctxadd(node.name) self.contexts.append(set()) self.generic_visit(node) self.contexts.pop() return node def visit_Delete(self, node): """Handle visiting a del statement.""" for targ in node.targets: if isinstance(targ, Name): self.ctxremove(targ.id) self.generic_visit(node) return node def visit_Try(self, node): """Handle visiting a try statement.""" for handler in node.handlers: if handler.name is not None: self.ctxadd(handler.name) self.generic_visit(node) return node def visit_Global(self, node): """Handle visiting a global statement.""" self.contexts[1].update(node.names) # contexts[1] is the global ctx self.generic_visit(node) return node def pdump(s, **kwargs): """performs a pretty dump of an AST node.""" if isinstance(s, AST): s = dump(s, **kwargs).replace(',', ',\n') openers = '([{' closers = ')]}' lens = len(s) + 1 if lens == 1: return s i = min([s.find(o) % lens for o in openers]) if i == lens - 1: return s closer = closers[openers.find(s[i])] j = s.rfind(closer) if j == -1 or j <= i: return s[:i+1] + '\n' + textwrap.indent(pdump(s[i+1:]), ' ') pre = s[:i+1] + '\n' mid = s[i+1:j] post = '\n' + s[j:] mid = textwrap.indent(pdump(mid), ' ') if '(' in post or '[' in post or '{' in post: post = pdump(post) return pre + mid + post def pprint_ast(s, *, sep=None, end=None, file=None, flush=False, **kwargs): """Performs a pretty print of the AST nodes.""" print(pdump(s, **kwargs), sep=sep, end=end, file=file, flush=flush) # # Private helpers # def _getblockattr(name, lineno, col): """calls getattr(name, '__xonsh_block__', False).""" return xonsh_call('getattr', args=[ Name(id=name, ctx=Load(), lineno=lineno, col_offset=col), Str(s='__xonsh_block__', lineno=lineno, col_offset=col), NameConstant(value=False, lineno=lineno, col_offset=col)], lineno=lineno, col=col) xonsh-0.6.0/xonsh/base_shell.py000066400000000000000000000470421320541242300164710ustar00rootroot00000000000000# -*- coding: utf-8 -*- """The base class for xonsh shell""" import io import os import sys import time import builtins from xonsh.tools import (XonshError, print_exception, DefaultNotGiven, check_for_partial_string, format_std_prepost, get_line_continuation) from xonsh.platform import HAS_PYGMENTS, ON_WINDOWS from xonsh.codecache import (should_use_cache, code_cache_name, code_cache_check, get_cache_filename, update_cache, run_compiled_code) from xonsh.completer import Completer from xonsh.prompt.base import multiline_prompt, PromptFormatter from xonsh.events import events from xonsh.shell import transform_command from xonsh.lazyimps import pygments, pyghooks from xonsh.ansi_colors import ansi_partial_color_format if ON_WINDOWS: import ctypes kernel32 = ctypes.windll.kernel32 kernel32.SetConsoleTitleW.argtypes = [ctypes.c_wchar_p] class _TeeStdBuf(io.RawIOBase): """A dispatcher for bytes to two buffers, as std stream buffer and an in memory buffer. """ def __init__(self, stdbuf, membuf, encoding=None, errors=None, prestd=b'', poststd=b''): """ Parameters ---------- stdbuf : BytesIO-like or StringIO-like The std stream buffer. membuf : BytesIO-like The in memory stream buffer. encoding : str or None, optional The encoding of the stream. Only used if stdbuf is a text stream, rather than a binary one. errors : str or None, optional The error form for the encoding of the stream. Only used if stdbuf is a text stream, rather than a binary one. prestd : bytes, optional The prefix to prepend to the standard buffer. poststd : bytes, optional The postfix to append to the standard buffer. """ self.stdbuf = stdbuf self.membuf = membuf self.encoding = encoding self.errors = errors self.prestd = prestd self.poststd = poststd self._std_is_binary = not hasattr(stdbuf, 'encoding') def fileno(self): """Returns the file descriptor of the std buffer.""" return self.stdbuf.fileno() def seek(self, offset, whence=io.SEEK_SET): """Sets the location in both the stdbuf and the membuf.""" self.stdbuf.seek(offset, whence) self.membuf.seek(offset, whence) def truncate(self, size=None): """Truncate both buffers.""" self.stdbuf.truncate(size) self.membuf.truncate(size) def readinto(self, b): """Read bytes into buffer from both streams.""" if self._std_is_binary: self.stdbuf.readinto(b) return self.membuf.readinto(b) def write(self, b): """Write bytes into both buffers.""" std_b = b if self.prestd: std_b = self.prestd + b if self.poststd: std_b += self.poststd # write to stdbuf if self._std_is_binary: self.stdbuf.write(std_b) else: self.stdbuf.write(std_b.decode(encoding=self.encoding, errors=self.errors)) return self.membuf.write(b) class _TeeStd(io.TextIOBase): """Tees a std stream into an in-memory container and the original stream.""" def __init__(self, name, mem, prestd='', poststd=''): """ Parameters ---------- name : str The name of the buffer in the sys module, e.g. 'stdout'. mem : io.TextIOBase-like The in-memory text-based representation. prestd : str, optional The prefix to prepend to the standard stream. poststd : str, optional The postfix to append to the standard stream. """ self._name = name self.std = std = getattr(sys, name) self.mem = mem self.prestd = prestd self.poststd = poststd preb = prestd.encode(encoding=mem.encoding, errors=mem.errors) postb = poststd.encode(encoding=mem.encoding, errors=mem.errors) if hasattr(std, 'buffer'): buffer = _TeeStdBuf(std.buffer, mem.buffer, prestd=preb, poststd=postb) else: # TextIO does not have buffer as part of the API, so std streams # may not either. buffer = _TeeStdBuf(std, mem.buffer, encoding=mem.encoding, errors=mem.errors, prestd=preb, poststd=postb) self.buffer = buffer setattr(sys, name, self) @property def encoding(self): """The encoding of the in-memory buffer.""" return self.mem.encoding @property def errors(self): """The errors of the in-memory buffer.""" return self.mem.errors @property def newlines(self): """The newlines of the in-memory buffer.""" return self.mem.newlines def _replace_std(self): std = self.std if std is None: return setattr(sys, self._name, std) self.std = self._name = None def __del__(self): self._replace_std() def close(self): """Restores the original std stream.""" self._replace_std() def write(self, s): """Writes data to the original std stream and the in-memory object.""" self.mem.write(s) if self.std is None: return std_s = s if self.prestd: std_s = self.prestd + std_s if self.poststd: std_s += self.poststd self.std.write(std_s) def flush(self): """Flushes both the original stdout and the buffer.""" self.std.flush() self.mem.flush() def fileno(self): """Tunnel fileno() calls to the std stream.""" return self.std.fileno() def seek(self, offset, whence=io.SEEK_SET): """Seek to a location in both streams.""" self.std.seek(offset, whence) self.mem.seek(offset, whence) def truncate(self, size=None): """Seek to a location in both streams.""" self.std.truncate(size) self.mem.truncate(size) def detach(self): """This operation is not supported.""" raise io.UnsupportedOperation def read(self, size=None): """Read from the in-memory stream and seek to a new location in the std stream. """ s = self.mem.read(size) loc = self.std.tell() self.std.seek(loc + len(s)) return s def readline(self, size=-1): """Read a line from the in-memory stream and seek to a new location in the std stream. """ s = self.mem.readline(size) loc = self.std.tell() self.std.seek(loc + len(s)) return s class Tee: """Class that merges tee'd stdout and stderr into a single stream. This represents what a user would actually see on the command line. This class has the same interface as io.TextIOWrapper, except that the buffer is optional. """ # pylint is a stupid about counting public methods when using inheritance. # pylint: disable=too-few-public-methods def __init__(self, buffer=None, encoding=None, errors=None, newline=None, line_buffering=False, write_through=False): self.buffer = io.BytesIO() if buffer is None else buffer self.memory = io.TextIOWrapper(self.buffer, encoding=encoding, errors=errors, newline=newline, line_buffering=line_buffering, write_through=write_through) self.stdout = _TeeStd('stdout', self.memory) env = builtins.__xonsh_env__ prestderr = format_std_prepost(env.get('XONSH_STDERR_PREFIX')) poststderr = format_std_prepost(env.get('XONSH_STDERR_POSTFIX')) self.stderr = _TeeStd('stderr', self.memory, prestd=prestderr, poststd=poststderr) @property def line_buffering(self): return self.memory.line_buffering def __del__(self): del self.stdout, self.stderr self.stdout = self.stderr = None def close(self): """Closes the buffer as well as the stdout and stderr tees.""" self.stdout.close() self.stderr.close() self.memory.close() def getvalue(self): """Gets the current contents of the in-memory buffer.""" m = self.memory loc = m.tell() m.seek(0) s = m.read() m.seek(loc) return s class BaseShell(object): """The xonsh shell.""" def __init__(self, execer, ctx, **kwargs): super().__init__() self.execer = execer self.ctx = ctx self.completer = Completer() if kwargs.get('completer', True) else None self.buffer = [] self.need_more_lines = False self.mlprompt = None self._styler = DefaultNotGiven self.prompt_formatter = PromptFormatter() self.accumulated_inputs = '' @property def styler(self): if self._styler is DefaultNotGiven: if HAS_PYGMENTS: from xonsh.pyghooks import XonshStyle env = builtins.__xonsh_env__ self._styler = XonshStyle(env.get('XONSH_COLOR_STYLE')) else: self._styler = None return self._styler @styler.setter def styler(self, value): self._styler = value @styler.deleter def styler(self): self._styler = DefaultNotGiven def emptyline(self): """Called when an empty line has been entered.""" self.need_more_lines = False self.default('') def singleline(self, **kwargs): """Reads a single line of input from the shell.""" msg = '{0} has not implemented singleline().' raise RuntimeError(msg.format(self.__class__.__name__)) def precmd(self, line): """Called just before execution of line.""" return line if self.need_more_lines else line.lstrip() def default(self, line): """Implements code execution.""" line = line if line.endswith('\n') else line + '\n' src, code = self.push(line) if code is None: return events.on_precommand.fire(cmd=src) env = builtins.__xonsh_env__ hist = builtins.__xonsh_history__ # pylint: disable=no-member ts1 = None enc = env.get('XONSH_ENCODING') err = env.get('XONSH_ENCODING_ERRORS') tee = Tee(encoding=enc, errors=err) try: ts0 = time.time() run_compiled_code(code, self.ctx, None, 'single') ts1 = time.time() if hist is not None and hist.last_cmd_rtn is None: hist.last_cmd_rtn = 0 # returncode for success except XonshError as e: print(e.args[0], file=sys.stderr) if hist is not None and hist.last_cmd_rtn is None: hist.last_cmd_rtn = 1 # return code for failure except Exception: # pylint: disable=broad-except print_exception() if hist is not None and hist.last_cmd_rtn is None: hist.last_cmd_rtn = 1 # return code for failure finally: ts1 = ts1 or time.time() tee_out = tee.getvalue() self._append_history(inp=src, ts=[ts0, ts1], tee_out=tee_out) self.accumulated_inputs += src append_newline = env.get('XONSH_APPEND_NEWLINE') if append_newline and not tee_out.endswith(os.linesep): print(os.linesep, end='') tee.close() self._fix_cwd() if builtins.__xonsh_exit__: # pylint: disable=no-member return True def _append_history(self, tee_out=None, **info): """Append information about the command to the history. This also handles on_postcommand because this is the place where all the information is available. """ hist = builtins.__xonsh_history__ # pylint: disable=no-member info['rtn'] = hist.last_cmd_rtn if hist is not None else None tee_out = tee_out or None last_out = hist.last_cmd_out if hist is not None else None if last_out is None and tee_out is None: pass elif last_out is None and tee_out is not None: info['out'] = tee_out elif last_out is not None and tee_out is None: info['out'] = last_out else: info['out'] = tee_out + '\n' + last_out events.on_postcommand.fire( cmd=info['inp'], rtn=info['rtn'], out=info.get('out', None), ts=info['ts'] ) if hist is not None: hist.append(info) hist.last_cmd_rtn = hist.last_cmd_out = None def _fix_cwd(self): """Check if the cwd changed out from under us.""" env = builtins.__xonsh_env__ try: cwd = os.getcwd() except (FileNotFoundError, OSError): cwd = None if cwd is None: # directory has been deleted out from under us, most likely pwd = env.get('PWD', None) if pwd is None: # we have no idea where we are env['PWD'] = '' elif os.path.isdir(pwd): # unclear why os.getcwd() failed. do nothing. pass else: # OK PWD is really gone. msg = '{UNDERLINE_INTENSE_WHITE}{BACKGROUND_INTENSE_BLACK}' msg += "xonsh: working directory does not exist: " + pwd msg += '{NO_COLOR}' self.print_color(msg, file=sys.stderr) elif 'PWD' not in env: # $PWD is missing from env, recreate it env['PWD'] = cwd elif os.path.realpath(cwd) != os.path.realpath(env['PWD']): # The working directory has changed without updating $PWD, fix this old = env['PWD'] env['PWD'] = cwd env['OLDPWD'] = old events.on_chdir.fire(olddir=old, newdir=cwd) def push(self, line): """Pushes a line onto the buffer and compiles the code in a way that enables multiline input. """ self.buffer.append(line) if self.need_more_lines: return None, None src = ''.join(self.buffer) src = transform_command(src) return self.compile(src) def compile(self, src): """Compiles source code and returns the (possibly modified) source and a valid code object. """ _cache = should_use_cache(self.execer, 'single') if _cache: codefname = code_cache_name(src) cachefname = get_cache_filename(codefname, code=True) usecache, code = code_cache_check(cachefname) if usecache: self.reset_buffer() return src, code lincont = get_line_continuation() if src.endswith(lincont+'\n'): self.need_more_lines = True return src, None try: code = self.execer.compile(src, mode='single', glbs=self.ctx, locs=None) if _cache: update_cache(code, cachefname) self.reset_buffer() except SyntaxError: partial_string_info = check_for_partial_string(src) in_partial_string = (partial_string_info[0] is not None and partial_string_info[1] is None) if (src == '\n' or src.endswith('\n\n')) and not in_partial_string: self.reset_buffer() print_exception() return src, None self.need_more_lines = True code = None except Exception: # pylint: disable=broad-except self.reset_buffer() print_exception() code = None return src, code def reset_buffer(self): """Resets the line buffer.""" self.buffer.clear() self.need_more_lines = False self.mlprompt = None def settitle(self): """Sets terminal title.""" env = builtins.__xonsh_env__ # pylint: disable=no-member term = env.get('TERM', None) # Shells running in emacs sets TERM to "dumb" or "eterm-color". # Do not set title for these to avoid garbled prompt. if (term is None and not ON_WINDOWS) or term in ['dumb', 'eterm-color', 'linux']: return t = env.get('TITLE') if t is None: return t = self.prompt_formatter(t) if ON_WINDOWS and 'ANSICON' not in env: kernel32.SetConsoleTitleW(t) else: with open(1, 'wb', closefd=False) as f: # prevent xonsh from answering interactive questions # on the next command by writing the title f.write("\x1b]0;{0}\x07".format(t).encode()) f.flush() @property def prompt(self): """Obtains the current prompt string.""" if self.need_more_lines: if self.mlprompt is None: try: self.mlprompt = multiline_prompt() except Exception: # pylint: disable=broad-except print_exception() self.mlprompt = ' ' return self.mlprompt env = builtins.__xonsh_env__ # pylint: disable=no-member p = env.get('PROMPT') try: p = self.prompt_formatter(p) except Exception: # pylint: disable=broad-except print_exception() self.settitle() return p def format_color(self, string, hide=False, force_string=False, **kwargs): """Formats the colors in a string. ``BaseShell``'s default implementation of this method uses colors based on ANSI color codes. """ style = builtins.__xonsh_env__.get('XONSH_COLOR_STYLE') return ansi_partial_color_format(string, hide=hide, style=style) def print_color(self, string, hide=False, **kwargs): """Prints a string in color. This base implementation's colors are based on ANSI color codes if a string was given as input. If a list of token pairs is given, it will color based on pygments, if available. If pygments is not available, it will print a colorless string. """ if isinstance(string, str): s = self.format_color(string, hide=hide) elif HAS_PYGMENTS: # assume this is a list of (Token, str) tuples and format it env = builtins.__xonsh_env__ self.styler.style_name = env.get('XONSH_COLOR_STYLE') style_proxy = pyghooks.xonsh_style_proxy(self.styler) formatter = pyghooks.XonshTerminal256Formatter(style=style_proxy) s = pygments.format(string, formatter).rstrip() else: # assume this is a list of (Token, str) tuples and remove color s = ''.join([x for _, x in string]) print(s, **kwargs) def color_style_names(self): """Returns an iterable of all available style names.""" return () def color_style(self): """Returns the current color map.""" return {} def restore_tty_sanity(self): """An interface for resetting the TTY stdin mode. This is highly dependent on the shell backend. Also it is mostly optional since it only affects ^Z backgrounding behaviour. """ pass xonsh-0.6.0/xonsh/built_ins.py000066400000000000000000001233551320541242300163620ustar00rootroot00000000000000# -*- coding: utf-8 -*- """The xonsh built-ins. Note that this module is named 'built_ins' so as not to be confused with the special Python builtins module. """ import io import os import re import sys import types import shlex import signal import atexit import pathlib import inspect import builtins import itertools import subprocess import contextlib import collections.abc as cabc from xonsh.ast import AST from xonsh.lazyasd import LazyObject, lazyobject from xonsh.inspectors import Inspector from xonsh.aliases import Aliases, make_default_aliases from xonsh.environ import Env, default_env, locate_binary from xonsh.jobs import add_job from xonsh.platform import ON_POSIX, ON_WINDOWS from xonsh.proc import ( PopenThread, ProcProxyThread, ProcProxy, ConsoleParallelReader, pause_call_resume, CommandPipeline, HiddenCommandPipeline, STDOUT_CAPTURE_KINDS) from xonsh.tools import ( suggest_commands, expand_path, globpath, XonshError, XonshCalledProcessError ) from xonsh.lazyimps import pty, termios from xonsh.commands_cache import CommandsCache from xonsh.events import events import xonsh.completers.init BUILTINS_LOADED = False INSPECTOR = LazyObject(Inspector, globals(), 'INSPECTOR') @lazyobject def AT_EXIT_SIGNALS(): sigs = (signal.SIGABRT, signal.SIGFPE, signal.SIGILL, signal.SIGSEGV, signal.SIGTERM) if ON_POSIX: sigs += (signal.SIGTSTP, signal.SIGQUIT, signal.SIGHUP) return sigs def resetting_signal_handle(sig, f): """Sets a new signal handle that will automatically restore the old value once the new handle is finished. """ oldh = signal.getsignal(sig) def newh(s=None, frame=None): f(s, frame) signal.signal(sig, oldh) if sig != 0: sys.exit(sig) signal.signal(sig, newh) def helper(x, name=''): """Prints help about, and then returns that variable.""" INSPECTOR.pinfo(x, oname=name, detail_level=0) return x def superhelper(x, name=''): """Prints help about, and then returns that variable.""" INSPECTOR.pinfo(x, oname=name, detail_level=1) return x def reglob(path, parts=None, i=None): """Regular expression-based globbing.""" if parts is None: path = os.path.normpath(path) drive, tail = os.path.splitdrive(path) parts = tail.split(os.sep) d = os.sep if os.path.isabs(path) else '.' d = os.path.join(drive, d) return reglob(d, parts, i=0) base = subdir = path if i == 0: if not os.path.isabs(base): base = '' elif len(parts) > 1: i += 1 regex = os.path.join(base, parts[i]) if ON_WINDOWS: # currently unable to access regex backslash sequences # on Windows due to paths using \. regex = regex.replace('\\', '\\\\') regex = re.compile(regex) files = os.listdir(subdir) files.sort() paths = [] i1 = i + 1 if i1 == len(parts): for f in files: p = os.path.join(base, f) if regex.fullmatch(p) is not None: paths.append(p) else: for f in files: p = os.path.join(base, f) if regex.fullmatch(p) is None or not os.path.isdir(p): continue paths += reglob(p, parts=parts, i=i1) return paths def path_literal(s): s = expand_path(s) return pathlib.Path(s) def regexsearch(s): s = expand_path(s) return reglob(s) def globsearch(s): csc = builtins.__xonsh_env__.get('CASE_SENSITIVE_COMPLETIONS') glob_sorted = builtins.__xonsh_env__.get('GLOB_SORTED') return globpath(s, ignore_case=(not csc), return_empty=True, sort_result=glob_sorted) def pathsearch(func, s, pymode=False, pathobj=False): """ Takes a string and returns a list of file paths that match (regex, glob, or arbitrary search function). If pathobj=True, the return is a list of pathlib.Path objects instead of strings. """ if (not callable(func) or len(inspect.signature(func).parameters) != 1): error = "%r is not a known path search function" raise XonshError(error % func) o = func(s) if pathobj and pymode: o = list(map(pathlib.Path, o)) no_match = [] if pymode else [s] return o if len(o) != 0 else no_match RE_SHEBANG = LazyObject(lambda: re.compile(r'#![ \t]*(.+?)$'), globals(), 'RE_SHEBANG') def _is_binary(fname, limit=80): with open(fname, 'rb') as f: for i in range(limit): char = f.read(1) if char == b'\0': return True if char == b'\n': return False if char == b'': return False return False def _un_shebang(x): if x == '/usr/bin/env': return [] elif any(x.startswith(i) for i in ['/usr/bin', '/usr/local/bin', '/bin']): x = os.path.basename(x) elif x.endswith('python') or x.endswith('python.exe'): x = 'python' if x == 'xonsh': return ['python', '-m', 'xonsh.main'] return [x] def get_script_subproc_command(fname, args): """Given the name of a script outside the path, returns a list representing an appropriate subprocess command to execute the script. Raises PermissionError if the script is not executable. """ # make sure file is executable if not os.access(fname, os.X_OK): raise PermissionError if ON_POSIX and not os.access(fname, os.R_OK): # on some systems, some important programs (e.g. sudo) will have # execute permissions but not read/write permissions. This enables # things with the SUID set to be run. Needs to come before _is_binary() # is called, because that function tries to read the file. return [fname] + args elif _is_binary(fname): # if the file is a binary, we should call it directly return [fname] + args if ON_WINDOWS: # Windows can execute various filetypes directly # as given in PATHEXT _, ext = os.path.splitext(fname) if ext.upper() in builtins.__xonsh_env__.get('PATHEXT'): return [fname] + args # find interpreter with open(fname, 'rb') as f: first_line = f.readline().decode().strip() m = RE_SHEBANG.match(first_line) # xonsh is the default interpreter if m is None: interp = ['xonsh'] else: interp = m.group(1).strip() if len(interp) > 0: interp = shlex.split(interp) else: interp = ['xonsh'] if ON_WINDOWS: o = [] for i in interp: o.extend(_un_shebang(i)) interp = o return interp + [fname] + args @lazyobject def _REDIR_REGEX(): name = "(o(?:ut)?|e(?:rr)?|a(?:ll)?|&?\d?)" return re.compile("{r}(>?>|<){r}$".format(r=name)) _MODES = LazyObject(lambda: {'>>': 'a', '>': 'w', '<': 'r'}, globals(), '_MODES') _WRITE_MODES = LazyObject(lambda: frozenset({'w', 'a'}), globals(), '_WRITE_MODES') _REDIR_ALL = LazyObject(lambda: frozenset({'&', 'a', 'all'}), globals(), '_REDIR_ALL') _REDIR_ERR = LazyObject(lambda: frozenset({'2', 'e', 'err'}), globals(), '_REDIR_ERR') _REDIR_OUT = LazyObject(lambda: frozenset({'', '1', 'o', 'out'}), globals(), '_REDIR_OUT') _E2O_MAP = LazyObject(lambda: frozenset({'{}>{}'.format(e, o) for e in _REDIR_ERR for o in _REDIR_OUT if o != ''}), globals(), '_E2O_MAP') _O2E_MAP = LazyObject(lambda: frozenset({'{}>{}'.format(o, e) for e in _REDIR_ERR for o in _REDIR_OUT if o != ''}), globals(), '_O2E_MAP') def _is_redirect(x): return isinstance(x, str) and _REDIR_REGEX.match(x) def safe_open(fname, mode, buffering=-1): """Safely attempts to open a file in for xonsh subprocs.""" # file descriptors try: return io.open(fname, mode, buffering=buffering) except PermissionError: raise XonshError('xonsh: {0}: permission denied'.format(fname)) except FileNotFoundError: raise XonshError('xonsh: {0}: no such file or directory'.format(fname)) except Exception: raise XonshError('xonsh: {0}: unable to open file'.format(fname)) def safe_close(x): """Safely attempts to close an object.""" if not isinstance(x, io.IOBase): return if x.closed: return try: x.close() except Exception: pass def _parse_redirects(r, loc=None): """returns origin, mode, destination tuple""" orig, mode, dest = _REDIR_REGEX.match(r).groups() # redirect to fd if dest.startswith('&'): try: dest = int(dest[1:]) if loc is None: loc, dest = dest, '' # NOQA else: e = 'Unrecognized redirection command: {}'.format(r) raise XonshError(e) except (ValueError, XonshError): raise except Exception: pass mode = _MODES.get(mode, None) if mode == 'r' and (len(orig) > 0 or len(dest) > 0): raise XonshError('Unrecognized redirection command: {}'.format(r)) elif mode in _WRITE_MODES and len(dest) > 0: raise XonshError('Unrecognized redirection command: {}'.format(r)) return orig, mode, dest def _redirect_streams(r, loc=None): """Returns stdin, stdout, stderr tuple of redirections.""" stdin = stdout = stderr = None no_ampersand = r.replace('&', '') # special case of redirecting stderr to stdout if no_ampersand in _E2O_MAP: stderr = subprocess.STDOUT return stdin, stdout, stderr elif no_ampersand in _O2E_MAP: stdout = 2 # using 2 as a flag, rather than using a file object return stdin, stdout, stderr # get streams orig, mode, dest = _parse_redirects(r) if mode == 'r': stdin = safe_open(loc, mode) elif mode in _WRITE_MODES: if orig in _REDIR_ALL: stdout = stderr = safe_open(loc, mode) elif orig in _REDIR_OUT: stdout = safe_open(loc, mode) elif orig in _REDIR_ERR: stderr = safe_open(loc, mode) else: raise XonshError('Unrecognized redirection command: {}'.format(r)) else: raise XonshError('Unrecognized redirection command: {}'.format(r)) return stdin, stdout, stderr def default_signal_pauser(n, f): """Pauses a signal, as needed.""" signal.pause() def no_pg_xonsh_preexec_fn(): """Default subprocess preexec function for when there is no existing pipeline group. """ os.setpgrp() signal.signal(signal.SIGTSTP, default_signal_pauser) class SubprocSpec: """A container for specifying how a subprocess command should be executed. """ kwnames = ('stdin', 'stdout', 'stderr', 'universal_newlines') def __init__(self, cmd, *, cls=subprocess.Popen, stdin=None, stdout=None, stderr=None, universal_newlines=False, captured=False): """ Parameters ---------- cmd : list of str Command to be run. cls : Popen-like Class to run the subprocess with. stdin : file-like Popen file descriptor or flag for stdin. stdout : file-like Popen file descriptor or flag for stdout. stderr : file-like Popen file descriptor or flag for stderr. universal_newlines : bool Whether or not to use universal newlines. captured : bool or str, optional The flag for if the subprocess is captured, may be one of: False for $[], 'stdout' for $(), 'hiddenobject' for ![], or 'object' for !(). Attributes ---------- args : list of str Arguments as originally supplied. alias : list of str, callable, or None The alias that was resolved for this command, if any. binary_loc : str or None Path to binary to execute. is_proxy : bool Whether or not the subprocess is or should be run as a proxy. background : bool Whether or not the subprocess should be started in the background. threadable : bool Whether or not the subprocess is able to be run in a background thread, rather than the main thread. last_in_pipeline : bool Whether the subprocess is the last in the execution pipeline. captured_stdout : file-like Handle to captured stdin captured_stderr : file-like Handle to captured stderr """ self._stdin = self._stdout = self._stderr = None # args self.cmd = list(cmd) self.cls = cls self.stdin = stdin self.stdout = stdout self.stderr = stderr self.universal_newlines = universal_newlines self.captured = captured # pure attrs self.args = list(cmd) self.alias = None self.binary_loc = None self.is_proxy = False self.background = False self.threadable = True self.last_in_pipeline = False self.captured_stdout = None self.captured_stderr = None def __str__(self): s = self.__class__.__name__ + '(' + str(self.cmd) + ', ' s += self.cls.__name__ + ', ' kws = [n + '=' + str(getattr(self, n)) for n in self.kwnames] s += ', '.join(kws) + ')' return s def __repr__(self): s = self.__class__.__name__ + '(' + repr(self.cmd) + ', ' s += self.cls.__name__ + ', ' kws = [n + '=' + repr(getattr(self, n)) for n in self.kwnames] s += ', '.join(kws) + ')' return s # # Properties # @property def stdin(self): return self._stdin @stdin.setter def stdin(self, value): if self._stdin is None: self._stdin = value elif value is None: pass else: safe_close(value) msg = 'Multiple inputs for stdin for {0!r}' msg = msg.format(' '.join(self.args)) raise XonshError(msg) @property def stdout(self): return self._stdout @stdout.setter def stdout(self, value): if self._stdout is None: self._stdout = value elif value is None: pass else: safe_close(value) msg = 'Multiple redirections for stdout for {0!r}' msg = msg.format(' '.join(self.args)) raise XonshError(msg) @property def stderr(self): return self._stderr @stderr.setter def stderr(self, value): if self._stderr is None: self._stderr = value elif value is None: pass else: safe_close(value) msg = 'Multiple redirections for stderr for {0!r}' msg = msg.format(' '.join(self.args)) raise XonshError(msg) # # Execution methods # def run(self, *, pipeline_group=None): """Launches the subprocess and returns the object.""" kwargs = {n: getattr(self, n) for n in self.kwnames} self.prep_env(kwargs) self.prep_preexec_fn(kwargs, pipeline_group=pipeline_group) if callable(self.alias): if 'preexec_fn' in kwargs: kwargs.pop('preexec_fn') p = self.cls(self.alias, self.cmd, **kwargs) else: self._fix_null_cmd_bytes() p = self._run_binary(kwargs) p.spec = self p.last_in_pipeline = self.last_in_pipeline p.captured_stdout = self.captured_stdout p.captured_stderr = self.captured_stderr return p def _run_binary(self, kwargs): try: bufsize = 1 p = self.cls(self.cmd, bufsize=bufsize, **kwargs) except PermissionError: e = 'xonsh: subprocess mode: permission denied: {0}' raise XonshError(e.format(self.cmd[0])) except FileNotFoundError: cmd0 = self.cmd[0] e = 'xonsh: subprocess mode: command not found: {0}'.format(cmd0) env = builtins.__xonsh_env__ sug = suggest_commands(cmd0, env, builtins.aliases) if len(sug.strip()) > 0: e += '\n' + suggest_commands(cmd0, env, builtins.aliases) raise XonshError(e) return p def prep_env(self, kwargs): """Prepares the environment to use in the subprocess.""" denv = builtins.__xonsh_env__.detype() if ON_WINDOWS: # Over write prompt variable as xonsh's $PROMPT does # not make much sense for other subprocs denv['PROMPT'] = '$P$G' kwargs['env'] = denv def prep_preexec_fn(self, kwargs, pipeline_group=None): """Prepares the 'preexec_fn' keyword argument""" if not ON_POSIX: return if not builtins.__xonsh_env__.get('XONSH_INTERACTIVE'): return if pipeline_group is None: xonsh_preexec_fn = no_pg_xonsh_preexec_fn else: def xonsh_preexec_fn(): """Preexec function bound to a pipeline group.""" os.setpgid(0, pipeline_group) signal.signal(signal.SIGTSTP, default_signal_pauser) kwargs['preexec_fn'] = xonsh_preexec_fn def _fix_null_cmd_bytes(self): # Popen does not accept null bytes in its input commands. # That doesn't stop some subprocesses from using them. Here we # escape them just in case. cmd = self.cmd for i in range(len(cmd)): cmd[i] = cmd[i].replace('\0', '\\0') # # Building methods # @classmethod def build(kls, cmd, *, cls=subprocess.Popen, **kwargs): """Creates an instance of the subprocess command, with any modifications and adjustments based on the actual cmd that was received. """ # modifications that do not alter cmds may come before creating instance spec = kls(cmd, cls=cls, **kwargs) # modifications that alter cmds must come after creating instance # perform initial redirects spec.redirect_leading() spec.redirect_trailing() # apply aliases spec.resolve_alias() spec.resolve_binary_loc() spec.resolve_auto_cd() spec.resolve_executable_commands() spec.resolve_alias_cls() return spec def redirect_leading(self): """Manage leading redirects such as with '< input.txt COMMAND'. """ while len(self.cmd) >= 3 and self.cmd[0] == '<': self.stdin = safe_open(self.cmd[1], 'r') self.cmd = self.cmd[2:] def redirect_trailing(self): """Manages trailing redirects.""" while True: cmd = self.cmd if len(cmd) >= 3 and _is_redirect(cmd[-2]): streams = _redirect_streams(cmd[-2], cmd[-1]) self.stdin, self.stdout, self.stderr = streams self.cmd = cmd[:-2] elif len(cmd) >= 2 and _is_redirect(cmd[-1]): streams = _redirect_streams(cmd[-1]) self.stdin, self.stdout, self.stderr = streams self.cmd = cmd[:-1] else: break def resolve_alias(self): """Sets alias in command, if applicable.""" cmd0 = self.cmd[0] if callable(cmd0): alias = cmd0 else: alias = builtins.aliases.get(cmd0, None) self.alias = alias def resolve_binary_loc(self): """Sets the binary location""" alias = self.alias if alias is None: binary_loc = locate_binary(self.cmd[0]) elif callable(alias): binary_loc = None else: binary_loc = locate_binary(alias[0]) self.binary_loc = binary_loc def resolve_auto_cd(self): """Implements AUTO_CD functionality.""" if not (self.alias is None and self.binary_loc is None and len(self.cmd) == 1 and builtins.__xonsh_env__.get('AUTO_CD') and os.path.isdir(self.cmd[0])): return self.cmd.insert(0, 'cd') self.alias = builtins.aliases.get('cd', None) def resolve_executable_commands(self): """Resolve command executables, if applicable.""" alias = self.alias if alias is None: pass elif callable(alias): self.cmd.pop(0) return else: self.cmd = alias + self.cmd[1:] # resolve any redirects the aliases may have applied self.redirect_leading() self.redirect_trailing() if self.binary_loc is None: return try: self.cmd = get_script_subproc_command(self.binary_loc, self.cmd[1:]) except PermissionError: e = 'xonsh: subprocess mode: permission denied: {0}' raise XonshError(e.format(self.cmd[0])) def resolve_alias_cls(self): """Determine which proxy class to run an alias with.""" alias = self.alias if not callable(alias): return self.is_proxy = True thable = getattr(alias, '__xonsh_threadable__', True) cls = ProcProxyThread if thable else ProcProxy self.cls = cls self.threadable = thable # also check capturability, while we are here cpable = getattr(alias, '__xonsh_capturable__', self.captured) self.captured = cpable def _safe_pipe_properties(fd, use_tty=False): """Makes sure that a pipe file descriptor properties are sane.""" if not use_tty: return # due to some weird, long standing issue in Python, PTYs come out # replacing newline \n with \r\n. This causes issues for raw unix # protocols, like git and ssh, which expect unix line endings. # see https://mail.python.org/pipermail/python-list/2013-June/650460.html # for more details and the following solution. props = termios.tcgetattr(fd) props[1] = props[1] & (~termios.ONLCR) | termios.ONLRET termios.tcsetattr(fd, termios.TCSANOW, props) def _update_last_spec(last): captured = last.captured last.last_in_pipeline = True if not captured: return callable_alias = callable(last.alias) if callable_alias: pass else: cmds_cache = builtins.__xonsh_commands_cache__ thable = (cmds_cache.predict_threadable(last.args) and cmds_cache.predict_threadable(last.cmd)) if captured and thable: last.cls = PopenThread elif not thable: # foreground processes should use Popen last.threadable = False if captured == 'object' or captured == 'hiddenobject': # CommandPipeline objects should not pipe stdout, stderr return # cannot used PTY pipes for aliases, for some dark reason, # and must use normal pipes instead. use_tty = ON_POSIX and not callable_alias # Do not set standard in! Popen is not a fan of redirections here # set standard out if last.stdout is not None: last.universal_newlines = True elif captured in STDOUT_CAPTURE_KINDS: last.universal_newlines = False r, w = os.pipe() last.stdout = safe_open(w, 'wb') last.captured_stdout = safe_open(r, 'rb') elif builtins.__xonsh_stdout_uncaptured__ is not None: last.universal_newlines = True last.stdout = builtins.__xonsh_stdout_uncaptured__ last.captured_stdout = last.stdout elif ON_WINDOWS and not callable_alias: last.universal_newlines = True last.stdout = None # must truly stream on windows last.captured_stdout = ConsoleParallelReader(1) else: last.universal_newlines = True r, w = pty.openpty() if use_tty else os.pipe() _safe_pipe_properties(w, use_tty=use_tty) last.stdout = safe_open(w, 'w') _safe_pipe_properties(r, use_tty=use_tty) last.captured_stdout = safe_open(r, 'r') # set standard error if last.stderr is not None: pass elif captured == 'object': r, w = os.pipe() last.stderr = safe_open(w, 'w') last.captured_stderr = safe_open(r, 'r') elif builtins.__xonsh_stderr_uncaptured__ is not None: last.stderr = builtins.__xonsh_stderr_uncaptured__ last.captured_stderr = last.stderr elif ON_WINDOWS and not callable_alias: last.universal_newlines = True last.stderr = None # must truly stream on windows else: r, w = pty.openpty() if use_tty else os.pipe() _safe_pipe_properties(w, use_tty=use_tty) last.stderr = safe_open(w, 'w') _safe_pipe_properties(r, use_tty=use_tty) last.captured_stderr = safe_open(r, 'r') # redirect stdout to stderr, if we should if isinstance(last.stdout, int) and last.stdout == 2: # need to use private interface to avoid duplication. last._stdout = last.stderr def cmds_to_specs(cmds, captured=False): """Converts a list of cmds to a list of SubprocSpec objects that are ready to be executed. """ # first build the subprocs independently and separate from the redirects specs = [] redirects = [] for cmd in cmds: if isinstance(cmd, str): redirects.append(cmd) else: if cmd[-1] == '&': cmd = cmd[:-1] redirects.append('&') spec = SubprocSpec.build(cmd, captured=captured) specs.append(spec) # now modify the subprocs based on the redirects. for i, redirect in enumerate(redirects): if redirect == '|': # these should remain integer file descriptors, and not Python # file objects since they connect processes. r, w = os.pipe() specs[i].stdout = w specs[i + 1].stdin = r elif redirect == '&' and i == len(redirects) - 1: specs[-1].background = True else: raise XonshError('unrecognized redirect {0!r}'.format(redirect)) # Apply boundary conditions _update_last_spec(specs[-1]) return specs def _should_set_title(captured=False): env = builtins.__xonsh_env__ return (env.get('XONSH_INTERACTIVE') and not env.get('XONSH_STORE_STDOUT') and captured not in STDOUT_CAPTURE_KINDS and hasattr(builtins, '__xonsh_shell__')) def run_subproc(cmds, captured=False): """Runs a subprocess, in its many forms. This takes a list of 'commands,' which may be a list of command line arguments or a string, representing a special connecting character. For example:: $ ls | grep wakka is represented by the following cmds:: [['ls'], '|', ['grep', 'wakka']] Lastly, the captured argument affects only the last real command. """ specs = cmds_to_specs(cmds, captured=captured) captured = specs[-1].captured if captured == 'hiddenobject': command = HiddenCommandPipeline(specs) else: command = CommandPipeline(specs) proc = command.proc background = command.spec.background if not all(x.is_proxy for x in specs): add_job({ 'cmds': cmds, 'pids': [i.pid for i in command.procs], 'obj': proc, 'bg': background, 'pipeline': command, 'pgrp': command.term_pgid, }) if _should_set_title(captured=captured): # set title here to get currently executing command pause_call_resume(proc, builtins.__xonsh_shell__.settitle) # create command or return if backgrounding. if background: return # now figure out what we should return. if captured == 'stdout': command.end() return command.output elif captured == 'object': return command elif captured == 'hiddenobject': command.end() return command else: command.end() return def subproc_captured_stdout(*cmds): """Runs a subprocess, capturing the output. Returns the stdout that was produced as a str. """ return run_subproc(cmds, captured='stdout') def subproc_captured_inject(*cmds): """Runs a subprocess, capturing the output. Returns a list of whitespace-separated strings of the stdout that was produced. The string is split using xonsh's lexer, rather than Python's str.split() or shlex.split(). """ s = run_subproc(cmds, captured='stdout') toks = builtins.__xonsh_execer__.parser.lexer.split(s.strip()) return toks def subproc_captured_object(*cmds): """ Runs a subprocess, capturing the output. Returns an instance of CommandPipeline representing the completed command. """ return run_subproc(cmds, captured='object') def subproc_captured_hiddenobject(*cmds): """Runs a subprocess, capturing the output. Returns an instance of HiddenCommandPipeline representing the completed command. """ return run_subproc(cmds, captured='hiddenobject') def subproc_uncaptured(*cmds): """Runs a subprocess, without capturing the output. Returns the stdout that was produced as a str. """ return run_subproc(cmds, captured=False) def ensure_list_of_strs(x): """Ensures that x is a list of strings.""" if isinstance(x, str): rtn = [x] elif isinstance(x, cabc.Sequence): rtn = [i if isinstance(i, str) else str(i) for i in x] else: rtn = [str(x)] return rtn def list_of_strs_or_callables(x): """Ensures that x is a list of strings or functions""" if isinstance(x, str) or callable(x): rtn = [x] elif isinstance(x, cabc.Iterable): rtn = [i if isinstance(i, str) or callable(i) else str(i) for i in x] else: rtn = [str(x)] return rtn @lazyobject def MACRO_FLAG_KINDS(): return { 's': str, 'str': str, 'string': str, 'a': AST, 'ast': AST, 'c': types.CodeType, 'code': types.CodeType, 'compile': types.CodeType, 'v': eval, 'eval': eval, 'x': exec, 'exec': exec, 't': type, 'type': type, } def _convert_kind_flag(x): """Puts a kind flag (string) a canonical form.""" x = x.lower() kind = MACRO_FLAG_KINDS.get(x, None) if kind is None: raise TypeError('{0!r} not a recognized macro type.'.format(x)) return kind def convert_macro_arg(raw_arg, kind, glbs, locs, *, name='', macroname=''): """Converts a string macro argument based on the requested kind. Parameters ---------- raw_arg : str The str representation of the macro argument. kind : object A flag or type representing how to convert the argument. glbs : Mapping The globals from the call site. locs : Mapping or None The locals from the call site. name : str, optional The macro argument name. macroname : str, optional The name of the macro itself. Returns ------- The converted argument. """ # munge kind and mode to start mode = None if isinstance(kind, cabc.Sequence) and not isinstance(kind, str): # have (kind, mode) tuple kind, mode = kind if isinstance(kind, str): kind = _convert_kind_flag(kind) if kind is str or kind is None: return raw_arg # short circuit since there is nothing else to do # select from kind and convert execer = builtins.__xonsh_execer__ filename = macroname + '(' + name + ')' if kind is AST: ctx = set(dir(builtins)) | set(glbs.keys()) if locs is not None: ctx |= set(locs.keys()) mode = mode or 'eval' arg = execer.parse(raw_arg, ctx, mode=mode, filename=filename) elif kind is types.CodeType or kind is compile: # NOQA mode = mode or 'eval' arg = execer.compile(raw_arg, mode=mode, glbs=glbs, locs=locs, filename=filename) elif kind is eval: arg = execer.eval(raw_arg, glbs=glbs, locs=locs, filename=filename) elif kind is exec: mode = mode or 'exec' if not raw_arg.endswith('\n'): raw_arg += '\n' arg = execer.exec(raw_arg, mode=mode, glbs=glbs, locs=locs, filename=filename) elif kind is type: arg = type(execer.eval(raw_arg, glbs=glbs, locs=locs, filename=filename)) else: msg = ('kind={0!r} and mode={1!r} was not recognized for macro ' 'argument {2!r}') raise TypeError(msg.format(kind, mode, name)) return arg @contextlib.contextmanager def in_macro_call(f, glbs, locs): """Attaches macro globals and locals temporarily to function as a context manager. Parameters ---------- f : callable object The function that is called as ``f(*args)``. glbs : Mapping The globals from the call site. locs : Mapping or None The locals from the call site. """ prev_glbs = getattr(f, 'macro_globals', None) prev_locs = getattr(f, 'macro_locals', None) f.macro_globals = glbs f.macro_locals = locs yield if prev_glbs is None: del f.macro_globals else: f.macro_globals = prev_glbs if prev_locs is None: del f.macro_locals else: f.macro_locals = prev_locs def call_macro(f, raw_args, glbs, locs): """Calls a function as a macro, returning its result. Parameters ---------- f : callable object The function that is called as ``f(*args)``. raw_args : tuple of str The str representation of arguments of that were passed into the macro. These strings will be parsed, compiled, evaled, or left as a string depending on the annotations of f. glbs : Mapping The globals from the call site. locs : Mapping or None The locals from the call site. """ sig = inspect.signature(f) empty = inspect.Parameter.empty macroname = f.__name__ i = 0 args = [] for (key, param), raw_arg in zip(sig.parameters.items(), raw_args): i += 1 if raw_arg == '*': break kind = param.annotation if kind is empty or kind is None: kind = str arg = convert_macro_arg(raw_arg, kind, glbs, locs, name=key, macroname=macroname) args.append(arg) reg_args, kwargs = _eval_regular_args(raw_args[i:], glbs, locs) args += reg_args with in_macro_call(f, glbs, locs): rtn = f(*args, **kwargs) return rtn @lazyobject def KWARG_RE(): return re.compile('([A-Za-z_]\w*=|\*\*)') def _starts_as_arg(s): """Tests if a string starts as a non-kwarg string would.""" return KWARG_RE.match(s) is None def _eval_regular_args(raw_args, glbs, locs): if not raw_args: return [], {} arglist = list(itertools.takewhile(_starts_as_arg, raw_args)) kwarglist = raw_args[len(arglist):] execer = builtins.__xonsh_execer__ if not arglist: args = arglist kwargstr = 'dict({})'.format(', '.join(kwarglist)) kwargs = execer.eval(kwargstr, glbs=glbs, locs=locs) elif not kwarglist: argstr = '({},)'.format(', '.join(arglist)) args = execer.eval(argstr, glbs=glbs, locs=locs) kwargs = {} else: argstr = '({},)'.format(', '.join(arglist)) kwargstr = 'dict({})'.format(', '.join(kwarglist)) both = '({}, {})'.format(argstr, kwargstr) args, kwargs = execer.eval(both, glbs=glbs, locs=locs) return args, kwargs def enter_macro(obj, raw_block, glbs, locs): """Prepares to enter a context manager macro by attaching the contents of the macro block, globals, and locals to the object. These modifications are made in-place and the original object is returned. Parameters ---------- obj : context manager The object that is about to be entered via a with-statement. raw_block : str The str of the block that is the context body. This string will be parsed, compiled, evaled, or left as a string depending on the return annotation of obj.__enter__. glbs : Mapping The globals from the context site. locs : Mapping or None The locals from the context site. Returns ------- obj : context manager The same context manager but with the new macro information applied. """ # recurse down sequences if isinstance(obj, cabc.Sequence): for x in obj: enter_macro(x, raw_block, glbs, locs) return obj # convert block as needed kind = getattr(obj, '__xonsh_block__', str) macroname = getattr(obj, '__name__', '') block = convert_macro_arg(raw_block, kind, glbs, locs, name='', macroname=macroname) # attach attrs obj.macro_globals = glbs obj.macro_locals = locs obj.macro_block = block return obj def load_builtins(execer=None, ctx=None): """Loads the xonsh builtins into the Python builtins. Sets the BUILTINS_LOADED variable to True. """ global BUILTINS_LOADED # private built-ins builtins.__xonsh_config__ = {} builtins.__xonsh_env__ = Env(default_env()) builtins.__xonsh_help__ = helper builtins.__xonsh_superhelp__ = superhelper builtins.__xonsh_pathsearch__ = pathsearch builtins.__xonsh_globsearch__ = globsearch builtins.__xonsh_regexsearch__ = regexsearch builtins.__xonsh_glob__ = globpath builtins.__xonsh_expand_path__ = expand_path builtins.__xonsh_exit__ = False builtins.__xonsh_stdout_uncaptured__ = None builtins.__xonsh_stderr_uncaptured__ = None if hasattr(builtins, 'exit'): builtins.__xonsh_pyexit__ = builtins.exit del builtins.exit if hasattr(builtins, 'quit'): builtins.__xonsh_pyquit__ = builtins.quit del builtins.quit builtins.__xonsh_subproc_captured_stdout__ = subproc_captured_stdout builtins.__xonsh_subproc_captured_inject__ = subproc_captured_inject builtins.__xonsh_subproc_captured_object__ = subproc_captured_object builtins.__xonsh_subproc_captured_hiddenobject__ = subproc_captured_hiddenobject builtins.__xonsh_subproc_uncaptured__ = subproc_uncaptured builtins.__xonsh_execer__ = execer builtins.__xonsh_commands_cache__ = CommandsCache() builtins.__xonsh_all_jobs__ = {} builtins.__xonsh_ensure_list_of_strs__ = ensure_list_of_strs builtins.__xonsh_list_of_strs_or_callables__ = list_of_strs_or_callables builtins.__xonsh_completers__ = xonsh.completers.init.default_completers() builtins.__xonsh_call_macro__ = call_macro builtins.__xonsh_enter_macro__ = enter_macro builtins.__xonsh_path_literal__ = path_literal # public built-ins builtins.XonshError = XonshError builtins.XonshCalledProcessError = XonshCalledProcessError builtins.evalx = None if execer is None else execer.eval builtins.execx = None if execer is None else execer.exec builtins.compilex = None if execer is None else execer.compile builtins.events = events # sneak the path search functions into the aliases # Need this inline/lazy import here since we use locate_binary that # relies on __xonsh_env__ in default aliases builtins.default_aliases = builtins.aliases = Aliases(make_default_aliases()) builtins.__xonsh_history__ = None atexit.register(_lastflush) for sig in AT_EXIT_SIGNALS: resetting_signal_handle(sig, _lastflush) BUILTINS_LOADED = True def _lastflush(s=None, f=None): if hasattr(builtins, '__xonsh_history__'): if builtins.__xonsh_history__ is not None: builtins.__xonsh_history__.flush(at_exit=True) def unload_builtins(): """Removes the xonsh builtins from the Python builtins, if the BUILTINS_LOADED is True, sets BUILTINS_LOADED to False, and returns. """ global BUILTINS_LOADED env = getattr(builtins, '__xonsh_env__', None) if isinstance(env, Env): env.undo_replace_env() if hasattr(builtins, '__xonsh_pyexit__'): builtins.exit = builtins.__xonsh_pyexit__ if hasattr(builtins, '__xonsh_pyquit__'): builtins.quit = builtins.__xonsh_pyquit__ if not BUILTINS_LOADED: return names = ['__xonsh_config__', '__xonsh_env__', '__xonsh_ctx__', '__xonsh_help__', '__xonsh_superhelp__', '__xonsh_pathsearch__', '__xonsh_globsearch__', '__xonsh_regexsearch__', '__xonsh_glob__', '__xonsh_expand_path__', '__xonsh_exit__', '__xonsh_stdout_uncaptured__', '__xonsh_stderr_uncaptured__', '__xonsh_pyexit__', '__xonsh_pyquit__', '__xonsh_subproc_captured_stdout__', '__xonsh_subproc_captured_inject__', '__xonsh_subproc_captured_object__', '__xonsh_subproc_captured_hiddenobject__', '__xonsh_subproc_uncaptured__', '__xonsh_execer__', '__xonsh_commands_cache__', '__xonsh_completers__', '__xonsh_call_macro__', '__xonsh_enter_macro__', '__xonsh_path_literal__', 'XonshError', 'XonshCalledProcessError', 'evalx', 'execx', 'compilex', 'default_aliases', '__xonsh_all_jobs__', '__xonsh_ensure_list_of_strs__', '__xonsh_list_of_strs_or_callables__', '__xonsh_history__', ] for name in names: if hasattr(builtins, name): delattr(builtins, name) BUILTINS_LOADED = False @contextlib.contextmanager def xonsh_builtins(execer=None): """A context manager for using the xonsh builtins only in a limited scope. Likely useful in testing. """ load_builtins(execer=execer) yield unload_builtins() xonsh-0.6.0/xonsh/codecache.py000066400000000000000000000143761320541242300162720ustar00rootroot00000000000000"""Tools for caching xonsh code.""" import os import sys import hashlib import marshal import builtins from xonsh import __version__ as XONSH_VERSION from xonsh.lazyasd import lazyobject from xonsh.platform import PYTHON_VERSION_INFO_BYTES def _splitpath(path, sofar=[]): folder, path = os.path.split(path) if path == "": return sofar[::-1] elif folder == "": return (sofar + [path])[::-1] else: return _splitpath(folder, sofar + [path]) @lazyobject def _CHARACTER_MAP(): cmap = {chr(o): '_%s' % chr(o+32) for o in range(65, 91)} cmap.update({'.': '_.', '_': '__'}) return cmap def _cache_renamer(path, code=False): if not code: path = os.path.abspath(path) o = [''.join(_CHARACTER_MAP.get(i, i) for i in w) for w in _splitpath(path)] o[-1] = "{}.{}".format(o[-1], sys.implementation.cache_tag) return o def _make_if_not_exists(dirname): if not os.path.isdir(dirname): os.makedirs(dirname) def should_use_cache(execer, mode): """ Return ``True`` if caching has been enabled for this mode (through command line flags or environment variables) """ if mode == 'exec': return ((execer.scriptcache or execer.cacheall) and (builtins.__xonsh_env__['XONSH_CACHE_SCRIPTS'] or builtins.__xonsh_env__['XONSH_CACHE_EVERYTHING'])) else: return (execer.cacheall or builtins.__xonsh_env__['XONSH_CACHE_EVERYTHING']) def run_compiled_code(code, glb, loc, mode): """ Helper to run code in a given mode and context """ if code is None: return if mode in {'exec', 'single'}: func = exec else: func = eval func(code, glb, loc) def get_cache_filename(fname, code=True): """ Return the filename of the cache for the given filename. Cache filenames are similar to those used by the Mercurial DVCS for its internal store. The ``code`` switch should be true if we should use the code store rather than the script store. """ datadir = builtins.__xonsh_env__['XONSH_DATA_DIR'] cachedir = os.path.join(datadir, 'xonsh_code_cache' if code else 'xonsh_script_cache') cachefname = os.path.join(cachedir, *_cache_renamer(fname, code=code)) return cachefname def update_cache(ccode, cache_file_name): """ Update the cache at ``cache_file_name`` to contain the compiled code represented by ``ccode``. """ if cache_file_name is not None: _make_if_not_exists(os.path.dirname(cache_file_name)) with open(cache_file_name, 'wb') as cfile: cfile.write(XONSH_VERSION.encode() + b'\n') cfile.write(bytes(PYTHON_VERSION_INFO_BYTES) + b'\n') marshal.dump(ccode, cfile) def _check_cache_versions(cfile): # version data should be < 1 kb ver = cfile.readline(1024).strip() if ver != XONSH_VERSION.encode(): return False ver = cfile.readline(1024).strip() return ver == PYTHON_VERSION_INFO_BYTES def compile_code(filename, code, execer, glb, loc, mode): """ Wrapper for ``execer.compile`` to compile the given code """ try: if not code.endswith('\n'): code += '\n' old_filename = execer.filename execer.filename = filename ccode = execer.compile(code, glbs=glb, locs=loc, mode=mode, filename=filename) except Exception: raise finally: execer.filename = old_filename return ccode def script_cache_check(filename, cachefname): """ Check whether the script cache for a particular file is valid. Returns a tuple containing: a boolean representing whether the cached code should be used, and the cached code (or ``None`` if the cache should not be used). """ ccode = None run_cached = False if os.path.isfile(cachefname): if os.stat(cachefname).st_mtime >= os.stat(filename).st_mtime: with open(cachefname, 'rb') as cfile: if not _check_cache_versions(cfile): return False, None ccode = marshal.load(cfile) run_cached = True return run_cached, ccode def run_script_with_cache(filename, execer, glb=None, loc=None, mode='exec'): """ Run a script, using a cached version if it exists (and the source has not changed), and updating the cache as necessary. """ run_cached = False use_cache = should_use_cache(execer, mode) cachefname = get_cache_filename(filename, code=False) if use_cache: run_cached, ccode = script_cache_check(filename, cachefname) if not run_cached: with open(filename, 'r') as f: code = f.read() ccode = compile_code(filename, code, execer, glb, loc, mode) update_cache(ccode, cachefname) run_compiled_code(ccode, glb, loc, mode) def code_cache_name(code): """ Return an appropriate spoofed filename for the given code. """ if isinstance(code, str): _code = code.encode() else: _code = code return hashlib.md5(_code).hexdigest() def code_cache_check(cachefname): """ Check whether the code cache for a particular piece of code is valid. Returns a tuple containing: a boolean representing whether the cached code should be used, and the cached code (or ``None`` if the cache should not be used). """ ccode = None run_cached = False if os.path.isfile(cachefname): with open(cachefname, 'rb') as cfile: if not _check_cache_versions(cfile): return False, None ccode = marshal.load(cfile) run_cached = True return run_cached, ccode def run_code_with_cache(code, execer, glb=None, loc=None, mode='exec'): """ Run a piece of code, using a cached version if it exists, and updating the cache as necessary. """ use_cache = should_use_cache(execer, mode) filename = code_cache_name(code) cachefname = get_cache_filename(filename, code=True) run_cached = False if use_cache: run_cached, ccode = code_cache_check(cachefname) if not run_cached: ccode = compile_code(filename, code, execer, glb, loc, mode) update_cache(ccode, cachefname) run_compiled_code(ccode, glb, loc, mode) xonsh-0.6.0/xonsh/color_tools.py000066400000000000000000000251031320541242300167200ustar00rootroot00000000000000"""Tools for color handling in xonsh. This includes Convert values between RGB hex codes and xterm-256 color codes. Parts of this file were originally forked from Micah Elliott http://MicahElliott.com Copyright (C) 2011 Micah Elliott. All rights reserved. WTFPL http://sam.zoy.org/wtfpl/ """ import re import math from xonsh.lazyasd import lazyobject, LazyObject from xonsh.tools import deprecated RE_BACKGROUND = LazyObject(lambda: re.compile('(BG#|BGHEX|BACKGROUND)'), globals(), 'RE_BACKGROUND') @lazyobject def BASE_XONSH_COLORS(): return { 'BLACK': (0, 0, 0), 'RED': (170, 0, 0), 'GREEN': (0, 170, 0), 'YELLOW': (170, 85, 0), 'BLUE': (0, 0, 170), 'PURPLE': (170, 0, 170), 'CYAN': (0, 170, 170), 'WHITE': (170, 170, 170), 'INTENSE_BLACK': (85, 85, 85), 'INTENSE_RED': (255, 85, 85), 'INTENSE_GREEN': (85, 255, 85), 'INTENSE_YELLOW': (255, 255, 85), 'INTENSE_BLUE': (85, 85, 255), 'INTENSE_PURPLE': (255, 85, 255), 'INTENSE_CYAN': (85, 255, 255), 'INTENSE_WHITE': (255, 255, 255), } @lazyobject def CLUT(): """color look-up table""" return [ # 8-bit, RGB hex # Primary 3-bit (8 colors). Unique representation! ('00', '000000'), ('01', '800000'), ('02', '008000'), ('03', '808000'), ('04', '000080'), ('05', '800080'), ('06', '008080'), ('07', 'c0c0c0'), # Equivalent "bright" versions of original 8 colors. ('08', '808080'), ('09', 'ff0000'), ('10', '00ff00'), ('11', 'ffff00'), ('12', '0000ff'), ('13', 'ff00ff'), ('14', '00ffff'), ('15', 'ffffff'), # Strictly ascending. ('16', '000000'), ('17', '00005f'), ('18', '000087'), ('19', '0000af'), ('20', '0000d7'), ('21', '0000ff'), ('22', '005f00'), ('23', '005f5f'), ('24', '005f87'), ('25', '005faf'), ('26', '005fd7'), ('27', '005fff'), ('28', '008700'), ('29', '00875f'), ('30', '008787'), ('31', '0087af'), ('32', '0087d7'), ('33', '0087ff'), ('34', '00af00'), ('35', '00af5f'), ('36', '00af87'), ('37', '00afaf'), ('38', '00afd7'), ('39', '00afff'), ('40', '00d700'), ('41', '00d75f'), ('42', '00d787'), ('43', '00d7af'), ('44', '00d7d7'), ('45', '00d7ff'), ('46', '00ff00'), ('47', '00ff5f'), ('48', '00ff87'), ('49', '00ffaf'), ('50', '00ffd7'), ('51', '00ffff'), ('52', '5f0000'), ('53', '5f005f'), ('54', '5f0087'), ('55', '5f00af'), ('56', '5f00d7'), ('57', '5f00ff'), ('58', '5f5f00'), ('59', '5f5f5f'), ('60', '5f5f87'), ('61', '5f5faf'), ('62', '5f5fd7'), ('63', '5f5fff'), ('64', '5f8700'), ('65', '5f875f'), ('66', '5f8787'), ('67', '5f87af'), ('68', '5f87d7'), ('69', '5f87ff'), ('70', '5faf00'), ('71', '5faf5f'), ('72', '5faf87'), ('73', '5fafaf'), ('74', '5fafd7'), ('75', '5fafff'), ('76', '5fd700'), ('77', '5fd75f'), ('78', '5fd787'), ('79', '5fd7af'), ('80', '5fd7d7'), ('81', '5fd7ff'), ('82', '5fff00'), ('83', '5fff5f'), ('84', '5fff87'), ('85', '5fffaf'), ('86', '5fffd7'), ('87', '5fffff'), ('88', '870000'), ('89', '87005f'), ('90', '870087'), ('91', '8700af'), ('92', '8700d7'), ('93', '8700ff'), ('94', '875f00'), ('95', '875f5f'), ('96', '875f87'), ('97', '875faf'), ('98', '875fd7'), ('99', '875fff'), ('100', '878700'), ('101', '87875f'), ('102', '878787'), ('103', '8787af'), ('104', '8787d7'), ('105', '8787ff'), ('106', '87af00'), ('107', '87af5f'), ('108', '87af87'), ('109', '87afaf'), ('110', '87afd7'), ('111', '87afff'), ('112', '87d700'), ('113', '87d75f'), ('114', '87d787'), ('115', '87d7af'), ('116', '87d7d7'), ('117', '87d7ff'), ('118', '87ff00'), ('119', '87ff5f'), ('120', '87ff87'), ('121', '87ffaf'), ('122', '87ffd7'), ('123', '87ffff'), ('124', 'af0000'), ('125', 'af005f'), ('126', 'af0087'), ('127', 'af00af'), ('128', 'af00d7'), ('129', 'af00ff'), ('130', 'af5f00'), ('131', 'af5f5f'), ('132', 'af5f87'), ('133', 'af5faf'), ('134', 'af5fd7'), ('135', 'af5fff'), ('136', 'af8700'), ('137', 'af875f'), ('138', 'af8787'), ('139', 'af87af'), ('140', 'af87d7'), ('141', 'af87ff'), ('142', 'afaf00'), ('143', 'afaf5f'), ('144', 'afaf87'), ('145', 'afafaf'), ('146', 'afafd7'), ('147', 'afafff'), ('148', 'afd700'), ('149', 'afd75f'), ('150', 'afd787'), ('151', 'afd7af'), ('152', 'afd7d7'), ('153', 'afd7ff'), ('154', 'afff00'), ('155', 'afff5f'), ('156', 'afff87'), ('157', 'afffaf'), ('158', 'afffd7'), ('159', 'afffff'), ('160', 'd70000'), ('161', 'd7005f'), ('162', 'd70087'), ('163', 'd700af'), ('164', 'd700d7'), ('165', 'd700ff'), ('166', 'd75f00'), ('167', 'd75f5f'), ('168', 'd75f87'), ('169', 'd75faf'), ('170', 'd75fd7'), ('171', 'd75fff'), ('172', 'd78700'), ('173', 'd7875f'), ('174', 'd78787'), ('175', 'd787af'), ('176', 'd787d7'), ('177', 'd787ff'), ('178', 'd7af00'), ('179', 'd7af5f'), ('180', 'd7af87'), ('181', 'd7afaf'), ('182', 'd7afd7'), ('183', 'd7afff'), ('184', 'd7d700'), ('185', 'd7d75f'), ('186', 'd7d787'), ('187', 'd7d7af'), ('188', 'd7d7d7'), ('189', 'd7d7ff'), ('190', 'd7ff00'), ('191', 'd7ff5f'), ('192', 'd7ff87'), ('193', 'd7ffaf'), ('194', 'd7ffd7'), ('195', 'd7ffff'), ('196', 'ff0000'), ('197', 'ff005f'), ('198', 'ff0087'), ('199', 'ff00af'), ('200', 'ff00d7'), ('201', 'ff00ff'), ('202', 'ff5f00'), ('203', 'ff5f5f'), ('204', 'ff5f87'), ('205', 'ff5faf'), ('206', 'ff5fd7'), ('207', 'ff5fff'), ('208', 'ff8700'), ('209', 'ff875f'), ('210', 'ff8787'), ('211', 'ff87af'), ('212', 'ff87d7'), ('213', 'ff87ff'), ('214', 'ffaf00'), ('215', 'ffaf5f'), ('216', 'ffaf87'), ('217', 'ffafaf'), ('218', 'ffafd7'), ('219', 'ffafff'), ('220', 'ffd700'), ('221', 'ffd75f'), ('222', 'ffd787'), ('223', 'ffd7af'), ('224', 'ffd7d7'), ('225', 'ffd7ff'), ('226', 'ffff00'), ('227', 'ffff5f'), ('228', 'ffff87'), ('229', 'ffffaf'), ('230', 'ffffd7'), ('231', 'ffffff'), # Gray-scale range. ('232', '080808'), ('233', '121212'), ('234', '1c1c1c'), ('235', '262626'), ('236', '303030'), ('237', '3a3a3a'), ('238', '444444'), ('239', '4e4e4e'), ('240', '585858'), ('241', '626262'), ('242', '6c6c6c'), ('243', '767676'), ('244', '808080'), ('245', '8a8a8a'), ('246', '949494'), ('247', '9e9e9e'), ('248', 'a8a8a8'), ('249', 'b2b2b2'), ('250', 'bcbcbc'), ('251', 'c6c6c6'), ('252', 'd0d0d0'), ('253', 'dadada'), ('254', 'e4e4e4'), ('255', 'eeeeee'), ] def _str2hex(hexstr): return int(hexstr, 16) def _strip_hash(rgb): # Strip leading `#` if exists. if rgb.startswith('#'): rgb = rgb.lstrip('#') return rgb @lazyobject def SHORT_TO_RGB(): return dict(CLUT) @lazyobject def RGB_TO_SHORT(): return {v: k for k, v in SHORT_TO_RGB.items()} def short2rgb(short): return SHORT_TO_RGB[short] def rgb_to_256(rgb): """Find the closest ANSI 256 approximation to the given RGB value. >>> rgb2short('123456') ('23', '005f5f') >>> rgb2short('ffffff') ('231', 'ffffff') >>> rgb2short('0DADD6') # vimeo logo ('38', '00afd7') Parameters ---------- rgb : Hex code representing an RGB value, eg, 'abcdef' Returns ------- String between 0 and 255, compatible with xterm. """ rgb = rgb.lstrip('#') if len(rgb) == 0: return '0', '000000' incs = (0x00, 0x5f, 0x87, 0xaf, 0xd7, 0xff) # Break 6-char RGB code into 3 integer vals. parts = rgb_to_ints(rgb) res = [] for part in parts: i = 0 while i < len(incs)-1: s, b = incs[i], incs[i+1] # smaller, bigger if s <= part <= b: s1 = abs(s - part) b1 = abs(b - part) if s1 < b1: closest = s else: closest = b res.append(closest) break i += 1 res = ''.join([('%02.x' % i) for i in res]) equiv = RGB_TO_SHORT[res] return equiv, res rgb2short = rgb_to_256 @lazyobject def RE_RGB3(): return re.compile(r'(.)(.)(.)') @lazyobject def RE_RGB6(): return re.compile(r'(..)(..)(..)') def rgb_to_ints(rgb): if len(rgb) == 6: return tuple([int(h, 16) for h in RE_RGB6.split(rgb)[1:4]]) else: return tuple([int(h*2, 16) for h in RE_RGB3.split(rgb)[1:4]]) def color_dist(x, y): return math.sqrt((x[0]-y[0])**2 + (x[1]-y[1])**2 + (x[2]-y[2])**2) def find_closest_color(x, palette): return min(sorted(palette.keys())[::-1], key=lambda k: color_dist(x, palette[k])) def make_palette(strings): """Makes a color palette from a collection of strings.""" palette = {} for s in strings: while '#' in s: _, t = s.split('#', 1) t, _, s = t.partition(' ') palette[t] = rgb_to_ints(t) return palette @deprecated(deprecated_in='0.5.10', removed_in='0.6.0') def make_pallete(*args, **kwargs): make_palette(*args, **kwargs) xonsh-0.6.0/xonsh/commands_cache.py000066400000000000000000000325441320541242300173150ustar00rootroot00000000000000# -*- coding: utf-8 -*- """Module for caching command & alias names as well as for predicting whether a command will be able to be run in the background. A background predictor is a function that accepts a single argument list and returns whether or not the process can be run in the background (returns True) or must be run the foreground (returns False). """ import os import time import builtins import argparse import collections.abc as cabc from xonsh.platform import ON_WINDOWS, ON_POSIX, pathbasename from xonsh.tools import executables_in from xonsh.lazyasd import lazyobject class CommandsCache(cabc.Mapping): """A lazy cache representing the commands available on the file system. The keys are the command names and the values a tuple of (loc, has_alias) where loc is either a str pointing to the executable on the file system or None (if no executable exists) and has_alias is a boolean flag for whether the command has an alias. """ def __init__(self): self._cmds_cache = {} self._path_checksum = None self._alias_checksum = None self._path_mtime = -1 self.threadable_predictors = default_threadable_predictors() def __contains__(self, key): _ = self.all_commands return self.lazyin(key) def __iter__(self): for cmd, (path, is_alias) in self.all_commands.items(): if ON_WINDOWS and path is not None: # All command keys are stored in uppercase on Windows. # This ensures the original command name is returned. cmd = pathbasename(path) yield cmd def __len__(self): return len(self.all_commands) def __getitem__(self, key): _ = self.all_commands return self.lazyget(key) def is_empty(self): """Returns whether the cache is populated or not.""" return len(self._cmds_cache) == 0 @staticmethod def get_possible_names(name): """Generates the possible `PATHEXT` extension variants of a given executable name on Windows as a list, conserving the ordering in `PATHEXT`. Returns a list as `name` being the only item in it on other platforms.""" if ON_WINDOWS: pathext = builtins.__xonsh_env__.get('PATHEXT') name = name.upper() return [ name + ext for ext in ([''] + pathext) ] else: return [name] @property def all_commands(self): paths = builtins.__xonsh_env__.get('PATH', []) pathset = frozenset(x for x in paths if os.path.isdir(x)) # did PATH change? path_hash = hash(pathset) cache_valid = path_hash == self._path_checksum self._path_checksum = path_hash # did aliases change? alss = getattr(builtins, 'aliases', dict()) al_hash = hash(frozenset(alss)) cache_valid = cache_valid and al_hash == self._alias_checksum self._alias_checksum = al_hash # did the contents of any directory in PATH change? max_mtime = 0 for path in pathset: mtime = os.stat(path).st_mtime if mtime > max_mtime: max_mtime = mtime cache_valid = cache_valid and (max_mtime <= self._path_mtime) self._path_mtime = max_mtime if cache_valid: return self._cmds_cache allcmds = {} for path in reversed(paths): # iterate backwards so that entries at the front of PATH overwrite # entries at the back. for cmd in executables_in(path): key = cmd.upper() if ON_WINDOWS else cmd allcmds[key] = (os.path.join(path, cmd), alss.get(key, None)) for cmd in alss: if cmd not in allcmds: key = cmd.upper() if ON_WINDOWS else cmd allcmds[key] = (cmd, True) self._cmds_cache = allcmds return allcmds def cached_name(self, name): """Returns the name that would appear in the cache, if it exists.""" if name is None: return None cached = pathbasename(name) if ON_WINDOWS: keys = self.get_possible_names(cached) cached = next((k for k in keys if k in self._cmds_cache), None) return cached def lazyin(self, key): """Checks if the value is in the current cache without the potential to update the cache. It just says whether the value is known *now*. This may not reflect precisely what is on the $PATH. """ return self.cached_name(key) in self._cmds_cache def lazyiter(self): """Returns an iterator over the current cache contents without the potential to update the cache. This may not reflect what is on the $PATH. """ return iter(self._cmds_cache) def lazylen(self): """Returns the length of the current cache contents without the potential to update the cache. This may not reflect precisely what is on the $PATH. """ return len(self._cmds_cache) def lazyget(self, key, default=None): """A lazy value getter.""" return self._cmds_cache.get(self.cached_name(key), default) def locate_binary(self, name, ignore_alias=False): """Locates an executable on the file system using the cache. Arguments --------- name : str name of binary to search for ignore_alias : bool, optional Force return of binary path even if alias of ``name`` exists (default ``False``) """ # make sure the cache is up to date by accessing the property _ = self.all_commands return self.lazy_locate_binary(name, ignore_alias) def lazy_locate_binary(self, name, ignore_alias=False): """Locates an executable in the cache, without checking its validity. Arguments --------- name : str name of binary to search for ignore_alias : bool, optional Force return of binary path even if alias of ``name`` exists (default ``False``) """ possibilities = self.get_possible_names(name) if ON_WINDOWS: # Windows users expect to be able to execute files in the same # directory without `./` local_bin = next((fn for fn in possibilities if os.path.isfile(fn)), None) if local_bin: return os.path.abspath(local_bin) cached = next((cmd for cmd in possibilities if cmd in self._cmds_cache), None) if cached: (path, alias) = self._cmds_cache[cached] if not alias or ignore_alias: return path else: return None elif os.path.isfile(name) and name != pathbasename(name): return name def predict_threadable(self, cmd): """Predicts whether a command list is able to be run on a background thread, rather than the main thread. """ name = self.cached_name(cmd[0]) predictors = self.threadable_predictors if ON_WINDOWS: # On all names (keys) are stored in upper case so instead # we get the original cmd or alias name path, _ = self.lazyget(name, (None, None)) if path is None: return True else: name = pathbasename(path) if name not in predictors: pre, ext = os.path.splitext(name) if pre in predictors: predictors[name] = predictors[pre] if name not in predictors: predictors[name] = self.default_predictor(name, cmd[0]) predictor = predictors[name] return predictor(cmd[1:]) # # Background Predictors (as methods) # def default_predictor(self, name, cmd0): if ON_POSIX: return self.default_predictor_readbin(name, cmd0, timeout=0.1, failure=predict_true) else: return predict_true def default_predictor_readbin(self, name, cmd0, timeout, failure): """Make a default predictor by analyzing the content of the binary. Should only works on POSIX. Return failure if the analysis fails. """ fname = cmd0 if os.path.isabs(cmd0) else None fname = cmd0 if fname is None and os.sep in cmd0 else fname fname = self.lazy_locate_binary(name) if fname is None else fname if fname is None: return failure if not os.path.isfile(fname): return failure try: fd = os.open(fname, os.O_RDONLY | os.O_NONBLOCK) except Exception: return failure # opening error search_for = { (b'ncurses',): [False, ], (b'libgpm',): [False, ], (b'isatty', b'tcgetattr', b'tcsetattr'): [False, False, False], } tstart = time.time() block = b'' while time.time() < tstart + timeout: previous_block = block try: block = os.read(fd, 2048) except Exception: # should not occur, except e.g. if a file is deleted a a dir is # created with the same name between os.path.isfile and os.open os.close(fd) return failure if len(block) == 0: os.close(fd) return predict_true # no keys of search_for found analyzed_block = previous_block + block for k, v in search_for.items(): for i in range(len(k)): if v[i]: continue if k[i] in analyzed_block: v[i] = True if all(v): os.close(fd) return predict_false # use one key of search_for os.close(fd) return failure # timeout # # Background Predictors # def predict_true(args): """Always say the process is threadable.""" return True def predict_false(args): """Never say the process is threadable.""" return False @lazyobject def SHELL_PREDICTOR_PARSER(): p = argparse.ArgumentParser('shell', add_help=False) p.add_argument('-c', nargs='?', default=None) p.add_argument('filename', nargs='?', default=None) return p def predict_shell(args): """Predict the backgroundability of the normal shell interface, which comes down to whether it is being run in subproc mode. """ ns, _ = SHELL_PREDICTOR_PARSER.parse_known_args(args) if ns.c is None and ns.filename is None: pred = False else: pred = True return pred @lazyobject def HELP_VER_PREDICTOR_PARSER(): p = argparse.ArgumentParser('cmd', add_help=False) p.add_argument('-h', '--help', dest='help', action='store_true', default=None) p.add_argument('-v', '-V', '--version', dest='version', action='store_true', default=None) return p def predict_help_ver(args): """Predict the backgroundability of commands that have help & version switches: -h, --help, -v, -V, --version. If either of these options is present, the command is assumed to print to stdout normally and is therefore threadable. Otherwise, the command is assumed to not be threadable. This is useful for commands, like top, that normally enter alternate mode but may not in certain circumstances. """ ns, _ = HELP_VER_PREDICTOR_PARSER.parse_known_args(args) pred = ns.help is not None or ns.version is not None return pred def default_threadable_predictors(): """Generates a new defaultdict for known threadable predictors. The default is to predict true. """ # alphabetical, for what it is worth. predictors = { 'bash': predict_shell, 'csh': predict_shell, 'clear': predict_false, 'cls': predict_false, 'cmd': predict_shell, 'ex': predict_false, 'emacsclient': predict_false, 'fish': predict_shell, 'gvim': predict_help_ver, 'htop': predict_help_ver, 'ipython': predict_shell, 'ksh': predict_shell, 'less': predict_help_ver, 'man': predict_help_ver, 'more': predict_help_ver, 'mvim': predict_help_ver, 'mutt': predict_help_ver, 'nano': predict_help_ver, 'nvim': predict_false, 'ponysay': predict_help_ver, 'psql': predict_false, 'python': predict_shell, 'python2': predict_shell, 'python3': predict_shell, 'repo': predict_help_ver, 'ranger': predict_help_ver, 'rview': predict_false, 'rvim': predict_false, 'scp': predict_false, 'sh': predict_shell, 'ssh': predict_false, 'startx': predict_false, 'sudo': predict_help_ver, 'tcsh': predict_shell, 'telnet': predict_false, 'top': predict_help_ver, 'vi': predict_false, 'view': predict_false, 'vim': predict_false, 'vimpager': predict_help_ver, 'weechat': predict_help_ver, 'xclip': predict_help_ver, 'xo': predict_help_ver, 'xonsh': predict_shell, 'xon.sh': predict_shell, 'zsh': predict_shell, } return predictors xonsh-0.6.0/xonsh/completer.py000066400000000000000000000030731320541242300163560ustar00rootroot00000000000000# -*- coding: utf-8 -*- """A (tab-)completer for xonsh.""" import builtins import collections.abc as cabc class Completer(object): """This provides a list of optional completions for the xonsh shell.""" def complete(self, prefix, line, begidx, endidx, ctx=None): """Complete the string, given a possible execution context. Parameters ---------- prefix : str The string to match line : str The line that prefix appears on. begidx : int The index in line that prefix starts on. endidx : int The index in line that prefix ends on. ctx : Iterable of str (ie dict, set, etc), optional Names in the current execution context. Returns ------- rtn : list of str Possible completions of prefix, sorted alphabetically. lprefix : int Length of the prefix to be replaced in the completion. """ ctx = ctx or {} for func in builtins.__xonsh_completers__.values(): try: out = func(prefix, line, begidx, endidx, ctx) except StopIteration: return set(), len(prefix) if isinstance(out, cabc.Sequence): res, lprefix = out else: res = out lprefix = len(prefix) if res is not None and len(res) != 0: def sortkey(s): return s.lstrip(''''"''').lower() return tuple(sorted(res, key=sortkey)), lprefix return set(), lprefix xonsh-0.6.0/xonsh/completers/000077500000000000000000000000001320541242300161645ustar00rootroot00000000000000xonsh-0.6.0/xonsh/completers/__init__.py000066400000000000000000000030151320541242300202740ustar00rootroot00000000000000# amalgamate exclude import os as _os if _os.getenv('XONSH_DEBUG', ''): pass else: import sys as _sys try: from xonsh.completers import __amalgam__ bash_completion = __amalgam__ _sys.modules['xonsh.completers.bash_completion'] = __amalgam__ completer = __amalgam__ _sys.modules['xonsh.completers.completer'] = __amalgam__ pip = __amalgam__ _sys.modules['xonsh.completers.pip'] = __amalgam__ tools = __amalgam__ _sys.modules['xonsh.completers.tools'] = __amalgam__ xompletions = __amalgam__ _sys.modules['xonsh.completers.xompletions'] = __amalgam__ _aliases = __amalgam__ _sys.modules['xonsh.completers._aliases'] = __amalgam__ commands = __amalgam__ _sys.modules['xonsh.completers.commands'] = __amalgam__ man = __amalgam__ _sys.modules['xonsh.completers.man'] = __amalgam__ path = __amalgam__ _sys.modules['xonsh.completers.path'] = __amalgam__ python = __amalgam__ _sys.modules['xonsh.completers.python'] = __amalgam__ base = __amalgam__ _sys.modules['xonsh.completers.base'] = __amalgam__ bash = __amalgam__ _sys.modules['xonsh.completers.bash'] = __amalgam__ dirs = __amalgam__ _sys.modules['xonsh.completers.dirs'] = __amalgam__ init = __amalgam__ _sys.modules['xonsh.completers.init'] = __amalgam__ del __amalgam__ except ImportError: pass del _sys del _os # amalgamate end xonsh-0.6.0/xonsh/completers/_aliases.py000066400000000000000000000147271320541242300203310ustar00rootroot00000000000000import inspect import builtins import collections import xonsh.lazyasd as xl from xonsh.completers.tools import justify VALID_ACTIONS = xl.LazyObject(lambda: frozenset({'add', 'remove', 'list'}), globals(), 'VALID_ACTIONS') def _add_one_completer(name, func, loc='end'): new = collections.OrderedDict() if loc == 'start': new[name] = func for (k, v) in builtins.__xonsh_completers__.items(): new[k] = v elif loc == 'end': for (k, v) in builtins.__xonsh_completers__.items(): new[k] = v new[name] = func else: direction, rel = loc[0], loc[1:] found = False for (k, v) in builtins.__xonsh_completers__.items(): if rel == k and direction == '<': new[name] = func found = True new[k] = v if rel == k and direction == '>': new[name] = func found = True if not found: new[name] = func builtins.__xonsh_completers__.clear() builtins.__xonsh_completers__.update(new) def _list_completers(args, stdin=None): o = "Registered Completer Functions: \n" _comp = builtins.__xonsh_completers__ ml = max((len(i) for i in _comp), default=0) _strs = [] for c in _comp: if _comp[c].__doc__ is None: doc = 'No description provided' else: doc = ' '.join(_comp[c].__doc__.split()) doc = justify(doc, 80, ml + 3) _strs.append('{: >{}} : {}'.format(c, ml, doc)) return o + '\n'.join(_strs) + '\n' def _remove_completer(args, stdin=None): err = None if len(args) != 1: err = "completer remove takes exactly 1 argument." else: name = args[0] if name not in builtins.__xonsh_completers__: err = ("The name %s is not a registered " "completer function.") % name if err is None: del builtins.__xonsh_completers__[name] return else: return None, err + '\n', 1 def _register_completer(args, stdin=None): err = None if len(args) not in {2, 3}: err = ("completer add takes either 2 or 3 arguments.\n" "For help, run: completer help add") else: name = args[0] func_name = args[1] if name in builtins.__xonsh_completers__: err = ("The name %s is already a registered " "completer function.") % name else: if func_name in builtins.__xonsh_ctx__: func = builtins.__xonsh_ctx__[func_name] if not callable(func): err = "%s is not callable" % func_name else: print(inspect.stack(context=0)) for frame_info in inspect.stack(context=0): frame = frame_info[0] if func_name in frame.f_locals: func = frame.f_locals[func_name] break elif func_name in frame.f_globals: func = frame.f_globals[func_name] break else: err = "No such function: %s" % func_name if err is None: position = "start" if len(args) == 2 else args[2] _add_one_completer(name, func, position) else: return None, err + '\n', 1 def completer_alias(args, stdin=None): err = None if len(args) == 0 or args[0] not in (VALID_ACTIONS | {'help'}): err = ('Please specify an action. Valid actions are: ' '"add", "remove", "list", or "help".') elif args[0] == 'help': if len(args) == 1 or args[1] not in VALID_ACTIONS: return ('Valid actions are: add, remove, list. For help with a ' 'specific action, run: completer help ACTION\n') elif args[1] == 'add': return COMPLETER_ADD_HELP_STR elif args[1] == 'remove': return COMPLETER_REMOVE_HELP_STR elif args[1] == 'list': return COMPLETER_LIST_HELP_STR if err is not None: return None, err + '\n', 1 if args[0] == 'add': func = _register_completer elif args[0] == 'remove': func = _remove_completer elif args[0] == 'list': func = _list_completers return func(args[1:], stdin=stdin) COMPLETER_LIST_HELP_STR = """completer list: ordered list the active completers Usage: completer remove """ COMPLETER_REMOVE_HELP_STR = """completer remove: removes a completer from xonsh Usage: completer remove NAME NAME is a unique name of a completer (run "completer list" to see the current completers in order) """ COMPLETER_ADD_HELP_STR = """completer add: adds a new completer to xonsh Usage: completer add NAME FUNC [POS] NAME is a unique name to use in the listing (run "completer list" to see the current completers in order) FUNC is the name of a completer function to use. This should be a function of the following arguments, and should return a set of valid completions for the given prefix. If this completer should not be used in a given context, it should return an empty set or None. Arguments to FUNC: * prefix: the string to be matched * line: a string representing the whole current line, for context * begidx: the index at which prefix starts in line * endidx: the index at which prefix ends in line * ctx: the current Python environment If the completer expands the prefix in any way, it should return a tuple of two elements: the first should be the set of completions, and the second should be the length of the modified prefix (for an example, see xonsh.completers.path.complete_path). POS (optional) is a position into the list of completers at which the new completer should be added. It can be one of the following values: * "start" indicates that the completer should be added to the start of the list of completers (it should be run before all others) * "end" indicates that the completer should be added to the end of the list of completers (it should be run after all others) * ">KEY", where KEY is a pre-existing name, indicates that this should be added after the completer named KEY * " /dev/null || echo "-F _minimal" }} _complete_stmt=$(_get_complete_statement) if echo "$_complete_stmt" | grep --quiet -e "_minimal" then declare -f _completion_loader > /dev/null && _completion_loader {cmd} _complete_stmt=$(_get_complete_statement) fi _func=$(echo "$_complete_stmt" | grep -o -e '-F \w\+' | cut -d ' ' -f 2) declare -f "$_func" > /dev/null || exit 1 echo "$_complete_stmt" COMP_WORDS=({line}) COMP_LINE={comp_line} COMP_POINT=${{#COMP_LINE}} COMP_COUNT={end} COMP_CWORD={n} $_func {cmd} {prefix} {prev} for ((i=0;i<${{#COMPREPLY[*]}};i++)) do echo ${{COMPREPLY[i]}}; done """ def bash_completions(prefix, line, begidx, endidx, env=None, paths=None, command=None, quote_paths=_bash_quote_paths, **kwargs): """Completes based on results from BASH completion. Parameters ---------- prefix : str The string to match line : str The line that prefix appears on. begidx : int The index in line that prefix starts on. endidx : int The index in line that prefix ends on. env : Mapping, optional The environment dict to execute the Bash subprocess in. paths : list or tuple of str or None, optional This is a list (or tuple) of strings that specifies where the ``bash_completion`` script may be found. The first valid path will be used. For better performance, bash-completion v2.x is recommended since it lazy-loads individual completion scripts. For both bash-completion v1.x and v2.x, paths of individual completion scripts (like ``.../completes/ssh``) do not need to be included here. The default values are platform dependent, but sane. command : str or None, optional The /path/to/bash to use. If None, it will be selected based on the from the environment and platform. quote_paths : callable, optional A functions that quotes file system paths. You shouldn't normally need this as the default is acceptable 99+% of the time. Returns ------- rtn : list of str Possible completions of prefix, sorted alphabetically. lprefix : int Length of the prefix to be replaced in the completion. """ source = _get_bash_completions_source(paths) or set() if prefix.startswith('$'): # do not complete env variables return set(), 0 splt = line.split() cmd = splt[0] idx = n = 0 prev = '' for n, tok in enumerate(splt): if tok == prefix: idx = line.find(prefix, idx) if idx >= begidx: break prev = tok if len(prefix) == 0: prefix_quoted = '""' n += 1 else: prefix_quoted = shlex.quote(prefix) script = BASH_COMPLETE_SCRIPT.format( source=source, line=' '.join(shlex.quote(p) for p in splt), comp_line=shlex.quote(line), n=n, cmd=shlex.quote(cmd), end=endidx + 1, prefix=prefix_quoted, prev=shlex.quote(prev), ) if command is None: command = _bash_command(env=env) try: out = subprocess.check_output( [command, '-c', script], universal_newlines=True, stderr=subprocess.PIPE, env=env) if not out: raise ValueError except (subprocess.CalledProcessError, FileNotFoundError, UnicodeDecodeError, ValueError): return set(), 0 out = out.splitlines() complete_stmt = out[0] out = set(out[1:]) # From GNU Bash document: The results of the expansion are prefix-matched # against the word being completed # Ensure input to `commonprefix` is a list (now required by Python 3.6) commprefix = os.path.commonprefix(list(out)) strip_len = 0 while strip_len < len(prefix): if commprefix.startswith(prefix[strip_len:]): break strip_len += 1 if '-o noquote' not in complete_stmt: out = quote_paths(out, '', '') if '-o nospace' in complete_stmt: out = set([x.rstrip() for x in out]) return out, len(prefix) - strip_len xonsh-0.6.0/xonsh/completers/commands.py000066400000000000000000000033241320541242300203410ustar00rootroot00000000000000import os import builtins import xonsh.tools as xt import xonsh.platform as xp from xonsh.completers.tools import get_filter_function SKIP_TOKENS = {'sudo', 'time', 'timeit', 'which', 'showcmd', 'man'} END_PROC_TOKENS = {'|', '||', '&&', 'and', 'or'} def complete_command(cmd, line, start, end, ctx): """ Returns a list of valid commands starting with the first argument """ space = ' ' out = {s + space for s in builtins.__xonsh_commands_cache__ if get_filter_function()(s, cmd)} if xp.ON_WINDOWS: out |= {i for i in xt.executables_in('.') if i.startswith(cmd)} base = os.path.basename(cmd) if os.path.isdir(base): out |= {os.path.join(base, i) for i in xt.executables_in(base) if i.startswith(cmd)} return out def complete_skipper(cmd, line, start, end, ctx): """ Skip over several tokens (e.g., sudo) and complete based on the rest of the line. """ parts = line.split(' ') skip_part_num = 0 for i, s in enumerate(parts): if s in END_PROC_TOKENS: skip_part_num = i + 1 while len(parts) > skip_part_num: if parts[skip_part_num] not in SKIP_TOKENS: break skip_part_num += 1 if skip_part_num == 0: return set() if len(parts) == skip_part_num + 1: comp_func = complete_command else: comp = builtins.__xonsh_shell__.shell.completer comp_func = comp.complete skip_len = len(' '.join(line[:skip_part_num])) + 1 return comp_func(cmd, ' '.join(parts[skip_part_num:]), start - skip_len, end - skip_len, ctx) xonsh-0.6.0/xonsh/completers/completer.py000066400000000000000000000021111320541242300205230ustar00rootroot00000000000000import builtins def complete_completer(prefix, line, start, end, ctx): """ Completion for "completer" """ args = line.split(' ') if len(args) == 0 or args[0] != 'completer': return None curix = args.index(prefix) compnames = set(builtins.__xonsh_completers__.keys()) if curix == 1: possible = {'list', 'help', 'add', 'remove'} elif curix == 2: if args[1] == 'help': possible = {'list', 'add', 'remove'} elif args[1] == 'remove': possible = compnames else: raise StopIteration else: if args[1] != 'add': raise StopIteration if curix == 3: possible = {i for i, j in builtins.__xonsh_ctx__.items() if callable(j)} elif curix == 4: possible = ({'start', 'end'} | {'>' + n for n in compnames} | {'<' + n for n in compnames}) else: raise StopIteration return {i for i in possible if i.startswith(prefix)} xonsh-0.6.0/xonsh/completers/dirs.py000066400000000000000000000014111320541242300174740ustar00rootroot00000000000000from xonsh.completers.man import complete_from_man from xonsh.completers.path import complete_dir def complete_cd(prefix, line, start, end, ctx): """ Completion for "cd", includes only valid directory names. """ if start != 0 and line.split(' ')[0] == 'cd': return complete_dir(prefix, line, start, end, ctx, True) return set() def complete_rmdir(prefix, line, start, end, ctx): """ Completion for "rmdir", includes only valid directory names. """ if start != 0 and line.split(' ')[0] == 'rmdir': opts = {i for i in complete_from_man('-', 'rmdir -', 6, 7, ctx) if i.startswith(prefix)} comps, lp = complete_dir(prefix, line, start, end, ctx, True) return comps | opts, lp return set() xonsh-0.6.0/xonsh/completers/init.py000066400000000000000000000025241320541242300175040ustar00rootroot00000000000000"""Constructor for xonsh completer objects.""" import collections from xonsh.completers.pip import complete_pip from xonsh.completers.man import complete_from_man from xonsh.completers.bash import complete_from_bash from xonsh.completers.base import complete_base from xonsh.completers.path import complete_path from xonsh.completers.dirs import complete_cd, complete_rmdir from xonsh.completers.python import (complete_python, complete_import, complete_python_mode) from xonsh.completers.commands import complete_skipper from xonsh.completers.completer import complete_completer from xonsh.completers.xompletions import complete_xonfig, complete_xontrib def default_completers(): """Creates a copy of the default completers.""" return collections.OrderedDict([ ('python_mode', complete_python_mode), ('base', complete_base), ('completer', complete_completer), ('skip', complete_skipper), ('pip', complete_pip), ('xpip', complete_pip), ('cd', complete_cd), ('rmdir', complete_rmdir), ('xonfig', complete_xonfig), ('xontrib', complete_xontrib), ('bash', complete_from_bash), ('man', complete_from_man), ('import', complete_import), ('python', complete_python), ('path', complete_path), ]) xonsh-0.6.0/xonsh/completers/man.py000066400000000000000000000032541320541242300173150ustar00rootroot00000000000000import os import re import pickle import builtins import subprocess import xonsh.lazyasd as xl from xonsh.completers.tools import get_filter_function OPTIONS = None OPTIONS_PATH = None @xl.lazyobject def SCRAPE_RE(): return re.compile(r'^(?:\s*(?:-\w|--[a-z0-9-]+)[\s,])+', re.M) @xl.lazyobject def INNER_OPTIONS_RE(): return re.compile(r'-\w|--[a-z0-9-]+') def complete_from_man(prefix, line, start, end, ctx): """ Completes an option name, based on the contents of the associated man page. """ global OPTIONS, OPTIONS_PATH if OPTIONS is None: datadir = builtins.__xonsh_env__['XONSH_DATA_DIR'] OPTIONS_PATH = os.path.join(datadir, 'man_completions_cache') try: with open(OPTIONS_PATH, 'rb') as f: OPTIONS = pickle.load(f) except Exception: OPTIONS = {} if not prefix.startswith('-'): return set() cmd = line.split()[0] if cmd not in OPTIONS: try: manpage = subprocess.Popen( ["man", cmd], stdout=subprocess.PIPE, stderr=subprocess.DEVNULL) # This is a trick to get rid of reverse line feeds text = subprocess.check_output( ["col", "-b"], stdin=manpage.stdout) text = text.decode('utf-8') scraped_text = ' '.join(SCRAPE_RE.findall(text)) matches = INNER_OPTIONS_RE.findall(scraped_text) OPTIONS[cmd] = matches with open(OPTIONS_PATH, 'wb') as f: pickle.dump(OPTIONS, f) except Exception: return set() return {s for s in OPTIONS[cmd] if get_filter_function()(s, prefix)} xonsh-0.6.0/xonsh/completers/path.py000066400000000000000000000230461320541242300174770ustar00rootroot00000000000000import os import re import ast import glob import builtins import xonsh.tools as xt import xonsh.platform as xp import xonsh.lazyasd as xl from xonsh.completers.tools import get_filter_function @xl.lazyobject def PATTERN_NEED_QUOTES(): pattern = r'\s`\$\{\}\,\*\(\)"\'\?&' if xp.ON_WINDOWS: pattern += '%' pattern = '[' + pattern + ']' + r'|\band\b|\bor\b' return re.compile(pattern) def _path_from_partial_string(inp, pos=None): if pos is None: pos = len(inp) partial = inp[:pos] startix, endix, quote = xt.check_for_partial_string(partial) _post = "" if startix is None: return None elif endix is None: string = partial[startix:] else: if endix != pos: _test = partial[endix:pos] if not any(i == ' ' for i in _test): _post = _test else: return None string = partial[startix:endix] end = xt.RE_STRING_START.sub('', quote) _string = string if not _string.endswith(end): _string = _string + end try: val = ast.literal_eval(_string) except SyntaxError: return None if isinstance(val, bytes): env = builtins.__xonsh_env__ val = val.decode(encoding=env.get('XONSH_ENCODING'), errors=env.get('XONSH_ENCODING_ERRORS')) return string + _post, val + _post, quote, end def _normpath(p): """ Wraps os.normpath() to avoid removing './' at the beginning and '/' at the end. On windows it does the same with backslashes """ initial_dotslash = p.startswith(os.curdir + os.sep) initial_dotslash |= (xp.ON_WINDOWS and p.startswith(os.curdir + os.altsep)) p = p.rstrip() trailing_slash = p.endswith(os.sep) trailing_slash |= (xp.ON_WINDOWS and p.endswith(os.altsep)) p = os.path.normpath(p) if initial_dotslash and p != '.': p = os.path.join(os.curdir, p) if trailing_slash: p = os.path.join(p, '') if xp.ON_WINDOWS and builtins.__xonsh_env__.get('FORCE_POSIX_PATHS'): p = p.replace(os.sep, os.altsep) return p def _startswithlow(x, start, startlow=None): if startlow is None: startlow = start.lower() return x.startswith(start) or x.lower().startswith(startlow) def _startswithnorm(x, start, startlow=None): return x.startswith(start) def _env(prefix): if prefix.startswith('$'): key = prefix[1:] return {'$' + k for k in builtins.__xonsh_env__ if get_filter_function()(k, key)} return () def _dots(prefix): slash = xt.get_sep() if slash == '\\': slash = '' if prefix in {'', '.'}: return ('.'+slash, '..'+slash) elif prefix == '..': return ('..'+slash,) else: return () def _add_cdpaths(paths, prefix): """Completes current prefix using CDPATH""" env = builtins.__xonsh_env__ csc = env.get('CASE_SENSITIVE_COMPLETIONS') glob_sorted = env.get('GLOB_SORTED') for cdp in env.get('CDPATH'): test_glob = os.path.join(cdp, prefix) + '*' for s in xt.iglobpath(test_glob, ignore_case=(not csc), sort_result=glob_sorted): if os.path.isdir(s): paths.add(os.path.basename(s)) def _quote_to_use(x): single = "'" double = '"' if single in x and double not in x: return double else: return single def _quote_paths(paths, start, end, append_end=True): expand_path = builtins.__xonsh_expand_path__ out = set() space = ' ' backslash = '\\' double_backslash = '\\\\' slash = xt.get_sep() orig_start = start orig_end = end # quote on all or none, to make readline completes to max prefix need_quotes = any( re.search(PATTERN_NEED_QUOTES, x) or (backslash in x and slash != backslash) for x in paths) for s in paths: start = orig_start end = orig_end if start == '' and need_quotes: start = end = _quote_to_use(s) if os.path.isdir(expand_path(s)): _tail = slash elif end == '': _tail = space else: _tail = '' if start != '' and 'r' not in start and backslash in s: start = 'r%s' % start s = s + _tail if end != '': if "r" not in start.lower(): s = s.replace(backslash, double_backslash) if s.endswith(backslash) and not s.endswith(double_backslash): s += backslash if end in s: s = s.replace(end, ''.join('\\%s' % i for i in end)) s = start + s + end if append_end else start + s out.add(s) return out def _joinpath(path): # convert our tuple representation back into a string representing a path if path is None: return '' elif len(path) == 0: return '' elif path == ('',): return xt.get_sep() elif path[0] == '': return xt.get_sep() + _normpath(os.path.join(*path)) else: return _normpath(os.path.join(*path)) def _splitpath(path): # convert a path into an intermediate tuple representation # if this tuple starts with '', it means that the path was an absolute path path = _normpath(path) if path.startswith(xt.get_sep()): pre = ('', ) else: pre = () return pre + _splitpath_helper(path, ()) def _splitpath_helper(path, sofar=()): folder, path = os.path.split(path) if path: sofar = sofar + (path, ) if not folder or folder == xt.get_sep(): return sofar[::-1] elif xp.ON_WINDOWS and not path: return os.path.splitdrive(folder)[:1] + sofar[::-1] elif xp.ON_WINDOWS and os.path.splitdrive(path)[0]: return sofar[::-1] return _splitpath_helper(folder, sofar) def subsequence_match(ref, typed, csc): """ Detects whether typed is a subsequence of ref. Returns ``True`` if the characters in ``typed`` appear (in order) in ``ref``, regardless of exactly where in ``ref`` they occur. If ``csc`` is ``False``, ignore the case of ``ref`` and ``typed``. Used in "subsequence" path completion (e.g., ``~/u/ro`` expands to ``~/lou/carcohl``) """ if csc: return _subsequence_match_iter(ref, typed) else: return _subsequence_match_iter(ref.lower(), typed.lower()) def _subsequence_match_iter(ref, typed): if len(typed) == 0: return True elif len(ref) == 0: return False elif ref[0] == typed[0]: return _subsequence_match_iter(ref[1:], typed[1:]) else: return _subsequence_match_iter(ref[1:], typed) def _expand_one(sofar, nextone, csc): out = set() glob_sorted = builtins.__xonsh_env__.get('GLOB_SORTED') for i in sofar: _glob = os.path.join(_joinpath(i), '*') if i is not None else '*' for j in xt.iglobpath(_glob, sort_result=glob_sorted): j = os.path.basename(j) if subsequence_match(j, nextone, csc): out.add((i or ()) + (j, )) return out def complete_path(prefix, line, start, end, ctx, cdpath=True, filtfunc=None): """Completes based on a path name.""" # string stuff for automatic quoting path_str_start = '' path_str_end = '' append_end = True p = _path_from_partial_string(line, end) lprefix = len(prefix) if p is not None: lprefix = len(p[0]) prefix = p[1] path_str_start = p[2] path_str_end = p[3] if len(line) >= end + 1 and line[end] == path_str_end: append_end = False tilde = '~' paths = set() env = builtins.__xonsh_env__ csc = env.get('CASE_SENSITIVE_COMPLETIONS') glob_sorted = env.get('GLOB_SORTED') prefix = glob.escape(prefix) for s in xt.iglobpath(prefix + '*', ignore_case=(not csc), sort_result=glob_sorted): paths.add(s) if len(paths) == 0 and env.get('SUBSEQUENCE_PATH_COMPLETION'): # this block implements 'subsequence' matching, similar to fish and zsh. # matches are based on subsequences, not substrings. # e.g., ~/u/ro completes to ~/lou/carcolh # see above functions for details. p = _splitpath(os.path.expanduser(prefix)) if len(p) != 0: if p[0] == '': basedir = ('', ) p = p[1:] else: basedir = None matches_so_far = {basedir} for i in p: matches_so_far = _expand_one(matches_so_far, i, csc) paths |= {_joinpath(i) for i in matches_so_far} if len(paths) == 0 and env.get('FUZZY_PATH_COMPLETION'): threshold = env.get('SUGGEST_THRESHOLD') for s in xt.iglobpath(os.path.dirname(prefix) + '*', ignore_case=(not csc), sort_result=glob_sorted): if xt.levenshtein(prefix, s, threshold) < threshold: paths.add(s) if tilde in prefix: home = os.path.expanduser(tilde) paths = {s.replace(home, tilde) for s in paths} if cdpath: _add_cdpaths(paths, prefix) paths = set(filter(filtfunc, paths)) paths = _quote_paths({_normpath(s) for s in paths}, path_str_start, path_str_end, append_end) paths.update(filter(filtfunc, _dots(prefix))) paths.update(filter(filtfunc, _env(prefix))) return paths, lprefix def complete_dir(prefix, line, start, end, ctx, cdpath=False): return complete_path(prefix, line, start, end, cdpath, filtfunc=os.path.isdir) xonsh-0.6.0/xonsh/completers/pip.py000066400000000000000000000033101320541242300173230ustar00rootroot00000000000000"""Completers for pip.""" # pylint: disable=invalid-name, missing-docstring, unsupported-membership-test # pylint: disable=unused-argument, not-an-iterable import re import subprocess import xonsh.lazyasd as xl @xl.lazyobject def PIP_RE(): return re.compile(r"pip(?:\d|\.)*") @xl.lazyobject def PIP_LIST_RE(): return re.compile(r"pip(?:\d|\.)* (?:uninstall|show)") @xl.lazyobject def ALL_COMMANDS(): try: help_text = str(subprocess.check_output(['pip', '--help'], stderr=subprocess.DEVNULL)) except FileNotFoundError: return [] commands = re.findall(r" (\w+) ", help_text) return [c for c in commands if c not in ['completion', 'help']] def complete_pip(prefix, line, begidx, endidx, ctx): """Completes python's package manager pip""" line_len = len(line.split()) if (line_len > 3) or (line_len > 2 and line.endswith(' ')) or \ (not PIP_RE.search(line)): return if PIP_LIST_RE.search(line): try: items = subprocess.check_output(['pip', 'list'], stderr=subprocess.DEVNULL) except FileNotFoundError: return set() items = items.decode('utf-8').splitlines() return set(i.split()[0] for i in items if i.split()[0].startswith(prefix)) if (line_len > 1 and line.endswith(' ')) or line_len > 2: # "pip show " -> no complete (note space) return if prefix not in ALL_COMMANDS: suggestions = [c for c in ALL_COMMANDS if c.startswith(prefix)] if suggestions: return suggestions, len(prefix) return ALL_COMMANDS, len(prefix) xonsh-0.6.0/xonsh/completers/python.py000066400000000000000000000152531320541242300200650ustar00rootroot00000000000000"""Completers for Python code""" import re import sys import inspect import builtins import importlib import collections.abc as cabc import xonsh.tools as xt import xonsh.lazyasd as xl from xonsh.completers.tools import get_filter_function @xl.lazyobject def RE_ATTR(): return re.compile(r'([^\s\(\)]+(\.[^\s\(\)]+)*)\.(\w*)$') @xl.lazyobject def XONSH_EXPR_TOKENS(): return { 'and ', 'else', 'for ', 'if ', 'in ', 'is ', 'lambda ', 'not ', 'or ', '+', '-', '/', '//', '%', '**', '|', '&', '~', '^', '>>', '<<', '<', '<=', '>', '>=', '==', '!=', ',', '?', '??', '$(', '${', '$[', '...', '![', '!(', '@(', '@$(', '@', } @xl.lazyobject def XONSH_STMT_TOKENS(): return { 'as ', 'assert ', 'break', 'class ', 'continue', 'def ', 'del ', 'elif ', 'except ', 'finally:', 'from ', 'global ', 'import ', 'nonlocal ', 'pass', 'raise ', 'return ', 'try:', 'while ', 'with ', 'yield ', '-', '/', '//', '%', '**', '|', '&', '~', '^', '>>', '<<', '<', '<=', '->', '=', '+=', '-=', '*=', '/=', '%=', '**=', '>>=', '<<=', '&=', '^=', '|=', '//=', ';', ':', '..', } @xl.lazyobject def XONSH_TOKENS(): return set(XONSH_EXPR_TOKENS) | set(XONSH_STMT_TOKENS) def complete_python(prefix, line, start, end, ctx): """ Completes based on the contents of the current Python environment, the Python built-ins, and xonsh operators. If there are no matches, split on common delimiters and try again. """ rtn = _complete_python(prefix, line, start, end, ctx) if not rtn: prefix = (re.split(r'\(|=|{|\[|,', prefix)[-1] if not prefix.startswith(',') else prefix) start = line.find(prefix) rtn = _complete_python(prefix, line, start, end, ctx) return rtn, len(prefix) return rtn def _complete_python(prefix, line, start, end, ctx): """ Completes based on the contents of the current Python environment, the Python built-ins, and xonsh operators. """ if line != '': first = line.split()[0] if first in builtins.__xonsh_commands_cache__ and first not in ctx: return set() filt = get_filter_function() rtn = set() if ctx is not None: if '.' in prefix: rtn |= attr_complete(prefix, ctx, filt) args = python_signature_complete(prefix, line, end, ctx, filt) rtn |= args rtn |= {s for s in ctx if filt(s, prefix)} else: args = () if len(args) == 0: # not in a function call, so we can add non-expression tokens rtn |= {s for s in XONSH_TOKENS if filt(s, prefix)} else: rtn |= {s for s in XONSH_EXPR_TOKENS if filt(s, prefix)} rtn |= {s for s in dir(builtins) if filt(s, prefix)} return rtn def complete_python_mode(prefix, line, start, end, ctx): """ Python-mode completions for @( and ${ """ if not (prefix.startswith('@(') or prefix.startswith('${')): return set() prefix_start = prefix[:2] python_matches = complete_python(prefix[2:], line, start-2, end-2, ctx) if isinstance(python_matches, cabc.Sequence): python_matches = python_matches[0] return set(prefix_start + i for i in python_matches) def _safe_eval(expr, ctx): """Safely tries to evaluate an expression. If this fails, it will return a (None, None) tuple. """ _ctx = None xonsh_safe_eval = builtins.__xonsh_execer__.eval try: val = xonsh_safe_eval(expr, ctx, ctx, transform=False) _ctx = ctx except: # pylint:disable=bare-except try: val = xonsh_safe_eval(expr, builtins.__dict__, transform=False) _ctx = builtins.__dict__ except: # pylint:disable=bare-except val = _ctx = None return val, _ctx def attr_complete(prefix, ctx, filter_func): """Complete attributes of an object.""" attrs = set() m = RE_ATTR.match(prefix) if m is None: return attrs expr, attr = m.group(1, 3) expr = xt.subexpr_from_unbalanced(expr, '(', ')') expr = xt.subexpr_from_unbalanced(expr, '[', ']') expr = xt.subexpr_from_unbalanced(expr, '{', '}') val, _ctx = _safe_eval(expr, ctx) if val is None and _ctx is None: return attrs if len(attr) == 0: opts = [o for o in dir(val) if not o.startswith('_')] else: opts = [o for o in dir(val) if filter_func(o, attr)] prelen = len(prefix) for opt in opts: # check whether these options actually work (e.g., disallow 7.imag) _expr = '{0}.{1}'.format(expr, opt) _val_, _ctx_ = _safe_eval(_expr, _ctx) if _val_ is None and _ctx_ is None: continue a = getattr(val, opt) if builtins.__xonsh_env__['COMPLETIONS_BRACKETS']: if callable(a): rpl = opt + '(' elif isinstance(a, (cabc.Sequence, cabc.Mapping)): rpl = opt + '[' else: rpl = opt else: rpl = opt # note that prefix[:prelen-len(attr)] != prefix[:-len(attr)] # when len(attr) == 0. comp = prefix[:prelen - len(attr)] + rpl attrs.add(comp) return attrs def python_signature_complete(prefix, line, end, ctx, filter_func): """Completes a python function (or other callable) call by completing argument and keyword argument names. """ front = line[:end] if xt.is_balanced(front, '(', ')'): return set() funcname = xt.subexpr_before_unbalanced(front, '(', ')') val, _ctx = _safe_eval(funcname, ctx) if val is None: return set() try: sig = inspect.signature(val) except ValueError: return set() args = {p + '=' for p in sig.parameters if filter_func(p, prefix)} return args def complete_import(prefix, line, start, end, ctx): """ Completes module names and contents for "import ..." and "from ... import ..." """ ltoks = line.split() ntoks = len(ltoks) if ntoks == 2 and ltoks[0] == 'from': # completing module to import return {'{} '.format(i) for i in complete_module(prefix)} if ntoks > 1 and ltoks[0] == 'import' and start == len('import '): # completing module to import return complete_module(prefix) if ntoks > 2 and ltoks[0] == 'from' and ltoks[2] == 'import': # complete thing inside a module try: mod = importlib.import_module(ltoks[1]) except ImportError: return set() out = {i[0] for i in inspect.getmembers(mod) if i[0].startswith(prefix)} return out return set() def complete_module(prefix): return {s for s in sys.modules if get_filter_function()(s, prefix)} xonsh-0.6.0/xonsh/completers/tools.py000066400000000000000000000014741320541242300177040ustar00rootroot00000000000000"""Xonsh completer tools.""" import builtins import textwrap def _filter_normal(s, x): return s.startswith(x) def _filter_ignorecase(s, x): return s.lower().startswith(x.lower()) def get_filter_function(): """ Return an appropriate filtering function for completions, given the valid of $CASE_SENSITIVE_COMPLETIONS """ csc = builtins.__xonsh_env__.get('CASE_SENSITIVE_COMPLETIONS') if csc: return _filter_normal else: return _filter_ignorecase def justify(s, max_length, left_pad=0): """ Re-wrap the string s so that each line is no more than max_length characters long, padding all lines but the first on the left with the string left_pad. """ txt = textwrap.wrap(s, width=max_length, subsequent_indent=' '*left_pad) return '\n'.join(txt) xonsh-0.6.0/xonsh/completers/xompletions.py000066400000000000000000000025071320541242300211230ustar00rootroot00000000000000"""Provides completions for xonsh internal utilities""" import xonsh.xontribs as xx import xonsh.tools as xt def complete_xonfig(prefix, line, start, end, ctx): """Completion for ``xonfig``""" args = line.split(' ') if len(args) == 0 or args[0] != 'xonfig': return None curix = args.index(prefix) if curix == 1: possible = {'info', 'wizard', 'styles', 'colors', '-h'} elif curix == 2 and args[1] == 'colors': possible = set(xt.color_style_names()) else: raise StopIteration return {i for i in possible if i.startswith(prefix)} def _list_installed_xontribs(): meta = xx.xontrib_metadata() installed = [] for md in meta['xontribs']: name = md['name'] spec = xx.find_xontrib(name) if spec is not None: installed.append(spec.name.rsplit('.')[-1]) return installed def complete_xontrib(prefix, line, start, end, ctx): """Completion for ``xontrib``""" args = line.split(' ') if len(args) == 0 or args[0] != 'xontrib': return None curix = args.index(prefix) if curix == 1: possible = {'list', 'load'} elif curix == 2: if args[1] == 'load': possible = _list_installed_xontribs() else: raise StopIteration return {i for i in possible if i.startswith(prefix)} xonsh-0.6.0/xonsh/contexts.py000066400000000000000000000076551320541242300162450ustar00rootroot00000000000000"""Context management tools for xonsh.""" import sys import textwrap import builtins from collections.abc import Mapping class Block(object): """This is a context manager for obtaining a block of lines without actually executing the block. The lines are accessible as the 'lines' attribute. This must be used as a macro. """ __xonsh_block__ = str def __init__(self): """ Attributes ---------- lines : list of str or None Block lines as if split by str.splitlines(), if available. glbs : Mapping or None Global execution context, ie globals(). locs : Mapping or None Local execution context, ie locals(). """ self.lines = self.glbs = self.locs = None def __enter__(self): if not hasattr(self, 'macro_block'): raise XonshError(self.__class__.__name__ + ' must be entered as a macro!') self.lines = self.macro_block.splitlines() self.glbs = self.macro_globals if self.macro_locals is not self.macro_globals: # leave locals as None when it is the same as globals self.locs = self.macro_locals return self def __exit__(self, exc_type, exc_value, traceback): pass class Functor(Block): """This is a context manager that turns the block into a callable object, bound to the execution context it was created in. """ def __init__(self, args=(), kwargs=None, rtn=''): """ Parameters ---------- args : Sequence of str, optional A tuple of argument names for the functor. kwargs : Mapping of str to values or list of item tuples, optional Keyword argument names and values, if available. rtn : str, optional Name of object to return, if available. Attributes ---------- func : function The underlying function object. This defaults to none and is set after the the block is exited. """ super().__init__() self.func = None self.args = args if kwargs is None: self.kwargs = [] elif isinstance(kwargs, Mapping): self.kwargs = sorted(kwargs.items()) else: self.kwargs = kwargs self.rtn = rtn def __enter__(self): super().__enter__() body = textwrap.indent(self.macro_block, ' ') uid = hash(body) + sys.maxsize # should always be a positive int name = '__xonsh_functor_{uid}__'.format(uid=uid) # construct signature string sig = rtn = '' sig = ', '.join(self.args) kwstr = ', '.join([k + '=None' for k, _ in self.kwargs]) if len(kwstr) > 0: sig = kwstr if len(sig) == 0 else sig + ', ' + kwstr # construct return string rtn = str(self.rtn) if len(rtn) > 0: rtn = ' return ' + rtn + '\n' # construct function string fstr = 'def {name}({sig}):\n{body}\n{rtn}' fstr = fstr.format(name=name, sig=sig, body=body, rtn=rtn) glbs = self.glbs locs = self.locs execer = builtins.__xonsh_execer__ execer.exec(fstr, glbs=glbs, locs=locs) if locs is not None and name in locs: func = locs[name] elif name in glbs: func = glbs[name] else: raise ValueError('Functor block could not be found in context.') if len(self.kwargs) > 0: func.__defaults__ = tuple(v for _, v in self.kwargs) self.func = func return self def __exit__(self, exc_type, exc_value, traceback): pass def __call__(self, *args, **kwargs): """Dispatches to func.""" if self.func is None: msg = "{} block with 'None' func not callable" raise AttributeError(msg.formst(self.__class__.__name__)) return self.func(*args, **kwargs) xonsh-0.6.0/xonsh/diff_history.py000066400000000000000000000265301320541242300170600ustar00rootroot00000000000000# -*- coding: utf-8 -*- """Tools for diff'ing two xonsh history files in a meaningful fashion.""" import difflib import datetime import itertools import argparse from xonsh.lazyjson import LazyJSON from xonsh.tools import print_color NO_COLOR_S = '{NO_COLOR}' RED_S = '{RED}' GREEN_S = '{GREEN}' BOLD_RED_S = '{BOLD_RED}' BOLD_GREEN_S = '{BOLD_GREEN}' # intern some strings REPLACE_S = 'replace' DELETE_S = 'delete' INSERT_S = 'insert' EQUAL_S = 'equal' def bold_str_diff(a, b, sm=None): if sm is None: sm = difflib.SequenceMatcher() aline = RED_S + '- ' bline = GREEN_S + '+ ' sm.set_seqs(a, b) for tag, i1, i2, j1, j2 in sm.get_opcodes(): if tag == REPLACE_S: aline += BOLD_RED_S + a[i1:i2] + RED_S bline += BOLD_GREEN_S + b[j1:j2] + GREEN_S elif tag == DELETE_S: aline += BOLD_RED_S + a[i1:i2] + RED_S elif tag == INSERT_S: bline += BOLD_GREEN_S + b[j1:j2] + GREEN_S elif tag == EQUAL_S: aline += a[i1:i2] bline += b[j1:j2] else: raise RuntimeError('tag not understood') return aline + NO_COLOR_S + '\n' + bline + NO_COLOR_S + '\n' def redline(line): return '{red}- {line}{no_color}\n'.format(red=RED_S, line=line, no_color=NO_COLOR_S) def greenline(line): return '{green}+ {line}{no_color}\n'.format(green=GREEN_S, line=line, no_color=NO_COLOR_S) def highlighted_ndiff(a, b): """Returns a highlighted string, with bold characters where different.""" s = '' sm = difflib.SequenceMatcher() sm.set_seqs(a, b) linesm = difflib.SequenceMatcher() for tag, i1, i2, j1, j2 in sm.get_opcodes(): if tag == REPLACE_S: for aline, bline in itertools.zip_longest(a[i1:i2], b[j1:j2]): if bline is None: s += redline(aline) elif aline is None: s += greenline(bline) else: s += bold_str_diff(aline, bline, sm=linesm) elif tag == DELETE_S: for aline in a[i1:i2]: s += redline(aline) elif tag == INSERT_S: for bline in b[j1:j2]: s += greenline(bline) elif tag == EQUAL_S: for aline in a[i1:i2]: s += ' ' + aline + '\n' else: raise RuntimeError('tag not understood') return s class HistoryDiffer(object): """This class helps diff two xonsh history files.""" def __init__(self, afile, bfile, reopen=False, verbose=False): """ Parameters ---------- afile : file handle or str The first file to diff bfile : file handle or str The second file to diff reopen : bool, optional Whether or not to reopen the file handles each time. The default here is opposite from the LazyJSON default because we know that we will be doing a lot of reading so it is best to keep the handles open. verbose : bool, optional Whether to print a verbose amount of information. """ self.a = LazyJSON(afile, reopen=reopen) self.b = LazyJSON(bfile, reopen=reopen) self.verbose = verbose self.sm = difflib.SequenceMatcher(autojunk=False) def __del__(self): self.a.close() self.b.close() def __str__(self): return self.format() def _header_line(self, lj): s = lj._f.name if hasattr(lj._f, 'name') else '' s += ' (' + lj['sessionid'] + ')' s += ' [locked]' if lj['locked'] else ' [unlocked]' ts = lj['ts'].load() ts0 = datetime.datetime.fromtimestamp(ts[0]) s += ' started: ' + ts0.isoformat(' ') if ts[1] is not None: ts1 = datetime.datetime.fromtimestamp(ts[1]) s += ' stopped: ' + ts1.isoformat(' ') + ' runtime: ' + str(ts1 - ts0) return s def header(self): """Computes a header string difference.""" s = ('{red}--- {aline}{no_color}\n' '{green}+++ {bline}{no_color}') s = s.format(aline=self._header_line(self.a), bline=self._header_line(self.b), red=RED_S, green=GREEN_S, no_color=NO_COLOR_S) return s def _env_both_diff(self, in_both, aenv, benv): sm = self.sm s = '' for key in sorted(in_both): aval = aenv[key] bval = benv[key] if aval == bval: continue s += '{0!r} is in both, but differs\n'.format(key) s += bold_str_diff(aval, bval, sm=sm) + '\n' return s def _env_in_one_diff(self, x, y, color, xid, xenv): only_x = sorted(x - y) if len(only_x) == 0: return '' if self.verbose: xstr = ',\n'.join([' {0!r}: {1!r}'.format(key, xenv[key]) for key in only_x]) xstr = '\n' + xstr else: xstr = ', '.join(['{0!r}'.format(key) for key in only_x]) in_x = 'These vars are only in {color}{xid}{no_color}: {{{xstr}}}\n\n' return in_x.format(xid=xid, color=color, no_color=NO_COLOR_S, xstr=xstr) def envdiff(self): """Computes the difference between the environments.""" aenv = self.a['env'].load() benv = self.b['env'].load() akeys = frozenset(aenv) bkeys = frozenset(benv) in_both = akeys & bkeys if len(in_both) == len(akeys) == len(bkeys): keydiff = self._env_both_diff(in_both, aenv, benv) if len(keydiff) == 0: return '' in_a = in_b = '' else: keydiff = self._env_both_diff(in_both, aenv, benv) in_a = self._env_in_one_diff(akeys, bkeys, RED_S, self.a['sessionid'], aenv) in_b = self._env_in_one_diff(bkeys, akeys, GREEN_S, self.b['sessionid'], benv) s = 'Environment\n-----------\n' + in_a + keydiff + in_b return s def _cmd_in_one_diff(self, inp, i, xlj, xid, color): s = 'cmd #{i} only in {color}{xid}{no_color}:\n' s = s.format(i=i, color=color, xid=xid, no_color=NO_COLOR_S) lines = inp.splitlines() lt = '{color}{pre}{no_color} {line}\n' s += lt.format(color=color, no_color=NO_COLOR_S, line=lines[0], pre='>>>') for line in lines[1:]: s += lt.format(color=color, no_color=NO_COLOR_S, line=line, pre='...') if not self.verbose: return s + '\n' out = xlj['cmds'][0].get('out', 'Note: no output stored') s += out.rstrip() + '\n\n' return s def _cmd_out_and_rtn_diff(self, i, j): s = '' aout = self.a['cmds'][i].get('out', None) bout = self.b['cmds'][j].get('out', None) if aout is None and bout is None: # s += 'Note: neither output stored\n' pass elif bout is None: aid = self.a['sessionid'] s += 'Note: only {red}{aid}{no_color} output stored\n'.format( red=RED_S, aid=aid, no_color=NO_COLOR_S) elif aout is None: bid = self.b['sessionid'] s += 'Note: only {green}{bid}{no_color} output stored\n'.format( green=GREEN_S, bid=bid, no_color=NO_COLOR_S) elif aout != bout: s += 'Outputs differ\n' s += highlighted_ndiff(aout.splitlines(), bout.splitlines()) else: pass artn = self.a['cmds'][i]['rtn'] brtn = self.b['cmds'][j]['rtn'] if artn != brtn: s += ('Return vals {red}{artn}{no_color} & {green}{brtn}{no_color} differ\n' ).format(red=RED_S, green=GREEN_S, no_color=NO_COLOR_S, artn=artn, brtn=brtn) return s def _cmd_replace_diff(self, i, ainp, aid, j, binp, bid): s = ('cmd #{i} in {red}{aid}{no_color} is replaced by \n' 'cmd #{j} in {green}{bid}{no_color}:\n') s = s.format(i=i, aid=aid, j=j, bid=bid, red=RED_S, green=GREEN_S, no_color=NO_COLOR_S) s += highlighted_ndiff(ainp.splitlines(), binp.splitlines()) if not self.verbose: return s + '\n' s += self._cmd_out_and_rtn_diff(i, j) return s + '\n' def cmdsdiff(self): """Computes the difference of the commands themselves.""" aid = self.a['sessionid'] bid = self.b['sessionid'] ainps = [c['inp'] for c in self.a['cmds']] binps = [c['inp'] for c in self.b['cmds']] sm = self.sm sm.set_seqs(ainps, binps) s = '' for tag, i1, i2, j1, j2 in sm.get_opcodes(): if tag == REPLACE_S: zipper = itertools.zip_longest for i, ainp, j, binp in zipper(range(i1, i2), ainps[i1:i2], range(j1, j2), binps[j1:j2]): if j is None: s += self._cmd_in_one_diff(ainp, i, self.a, aid, RED_S) elif i is None: s += self._cmd_in_one_diff(binp, j, self.b, bid, GREEN_S) else: self._cmd_replace_diff(i, ainp, aid, j, binp, bid) elif tag == DELETE_S: for i, inp in enumerate(ainps[i1:i2], i1): s += self._cmd_in_one_diff(inp, i, self.a, aid, RED_S) elif tag == INSERT_S: for j, inp in enumerate(binps[j1:j2], j1): s += self._cmd_in_one_diff(inp, j, self.b, bid, GREEN_S) elif tag == EQUAL_S: for i, j, in zip(range(i1, i2), range(j1, j2)): odiff = self._cmd_out_and_rtn_diff(i, j) if len(odiff) > 0: h = ('cmd #{i} in {red}{aid}{no_color} input is the same as \n' 'cmd #{j} in {green}{bid}{no_color}, but output differs:\n') s += h.format(i=i, aid=aid, j=j, bid=bid, red=RED_S, green=GREEN_S, no_color=NO_COLOR_S) s += odiff + '\n' else: raise RuntimeError('tag not understood') if len(s) == 0: return s return 'Commands\n--------\n' + s def format(self): """Formats the difference between the two history files.""" s = self.header() ed = self.envdiff() if len(ed) > 0: s += '\n\n' + ed cd = self.cmdsdiff() if len(cd) > 0: s += '\n\n' + cd return s.rstrip() _HD_PARSER = None def dh_create_parser(p=None): global _HD_PARSER p_was_none = (p is None) if _HD_PARSER is not None and p_was_none: return _HD_PARSER if p_was_none: p = argparse.ArgumentParser('diff-history', description='diffs two xonsh history files') p.add_argument('--reopen', dest='reopen', default=False, action='store_true', help='make lazy file loading reopen files each time') p.add_argument('-v', '--verbose', dest='verbose', default=False, action='store_true', help='whether to print even more information') p.add_argument('a', help='first file in diff') p.add_argument('b', help='second file in diff') if p_was_none: _HD_PARSER = p return p def dh_main_action(ns, hist=None, stdout=None, stderr=None): hd = HistoryDiffer(ns.a, ns.b, reopen=ns.reopen, verbose=ns.verbose) print_color(hd.format(), file=stdout) xonsh-0.6.0/xonsh/dirstack.py000066400000000000000000000413631320541242300161740ustar00rootroot00000000000000# -*- coding: utf-8 -*- """Directory stack and associated utilities for the xonsh shell.""" import os import glob import argparse import builtins import subprocess from xonsh.lazyasd import lazyobject from xonsh.tools import get_sep from xonsh.events import events from xonsh.platform import ON_WINDOWS DIRSTACK = [] """A list containing the currently remembered directories.""" _unc_tempDrives = {} """ drive: sharePath for temp drive letters we create for UNC mapping""" def _unc_check_enabled()->bool: """Check whether CMD.EXE is enforcing no-UNC-as-working-directory check. Check can be disabled by setting {HKCU, HKLM}/SOFTWARE\Microsoft\Command Processor\DisableUNCCheck:REG_DWORD=1 Returns: True if `CMD.EXE` is enforcing the check (default Windows situation) False if check is explicitly disabled. """ if not ON_WINDOWS: return import winreg wval = None try: key = winreg.OpenKey(winreg.HKEY_CURRENT_USER, r'software\microsoft\command processor') wval, wtype = winreg.QueryValueEx(key, 'DisableUNCCheck') winreg.CloseKey(key) except OSError as e: pass if wval is None: try: key2 = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r'software\microsoft\command processor') wval, wtype = winreg.QueryValueEx(key2, 'DisableUNCCheck') winreg.CloseKey(key2) except OSError as e: # NOQA pass return False if wval else True def _is_unc_path(some_path)->bool: """True if path starts with 2 backward (or forward, due to python path hacking) slashes.""" return len(some_path) > 1 and some_path[0] == some_path[1] and some_path[0] in (os.sep, os.altsep) def _unc_map_temp_drive(unc_path)->str: """Map a new temporary drive letter for each distinct share, unless `CMD.EXE` is not insisting on non-UNC working directory. Emulating behavior of `CMD.EXE` `pushd`, create a new mapped drive (starting from Z: towards A:, skipping existing drive letters) for each new UNC path user selects. Args: unc_path: the path specified by user. Assumed to be a UNC path of form \\\share... Returns: a replacement for `unc_path` to be used as the actual new working directory. Note that the drive letter may be a the same as one already mapped if the server and share portion of `unc_path` is the same as one still active on the stack. """ global _unc_tempDrives assert unc_path[1] in (os.sep, os.altsep), "unc_path is UNC form of path" if not _unc_check_enabled(): return unc_path else: unc_share, rem_path = os.path.splitdrive(unc_path) unc_share = unc_share.casefold() for d in _unc_tempDrives: if _unc_tempDrives[d] == unc_share: return os.path.join(d, rem_path) for dord in range(ord('z'), ord('a'), -1): d = chr(dord) + ':' if not os.path.isdir(d): # find unused drive letter starting from z: subprocess.check_output(['NET', 'USE', d, unc_share], universal_newlines=True) _unc_tempDrives[d] = unc_share return os.path.join(d, rem_path) def _unc_unmap_temp_drive(left_drive, cwd): """Unmap a temporary drive letter if it is no longer needed. Called after popping `DIRSTACK` and changing to new working directory, so we need stack *and* new current working directory to be sure drive letter no longer needed. Args: left_drive: driveletter (and colon) of working directory we just left cwd: full path of new current working directory """ global _unc_tempDrives if left_drive not in _unc_tempDrives: # if not one we've mapped, don't unmap it return for p in DIRSTACK + [cwd]: # if still in use , don't unmap it. if p.casefold().startswith(left_drive): return _unc_tempDrives.pop(left_drive) subprocess.check_output(['NET', 'USE', left_drive, '/delete'], universal_newlines=True) events.doc('on_chdir', """ on_chdir(olddir: str, newdir: str) -> None Fires when the current directory is changed for any reason. """) def _get_cwd(): try: return os.getcwd() except (OSError, FileNotFoundError): return None def _change_working_directory(newdir, follow_symlinks=False): env = builtins.__xonsh_env__ old = env['PWD'] new = os.path.join(old, newdir) absnew = os.path.abspath(new) if follow_symlinks: absnew = os.path.realpath(absnew) try: os.chdir(absnew) except (OSError, FileNotFoundError): if new.endswith(get_sep()): new = new[:-1] if os.path.basename(new) == '..': env['PWD'] = new else: if old is not None: env['OLDPWD'] = old if new is not None: env['PWD'] = absnew # Fire event if the path actually changed if old != env['PWD']: events.on_chdir.fire(olddir=old, newdir=env['PWD']) def _try_cdpath(apath): # NOTE: this CDPATH implementation differs from the bash one. # In bash if a CDPATH is set, an unqualified local folder # is considered after all CDPATHs, example: # CDPATH=$HOME/src (with src/xonsh/ inside) # $ cd xonsh -> src/xonsh (with xonsh/xonsh) # a second $ cd xonsh has no effects, to move in the nested xonsh # in bash a full $ cd ./xonsh is needed. # In xonsh a relative folder is always preferred. env = builtins.__xonsh_env__ cdpaths = env.get('CDPATH') for cdp in cdpaths: globber = builtins.__xonsh_expand_path__(os.path.join(cdp, apath)) for cdpath_prefixed_path in glob.iglob(globber): return cdpath_prefixed_path return apath def cd(args, stdin=None): """Changes the directory. If no directory is specified (i.e. if `args` is None) then this changes to the current user's home directory. """ env = builtins.__xonsh_env__ oldpwd = env.get('OLDPWD', None) cwd = env['PWD'] follow_symlinks = False if len(args) > 0 and args[0] == '-P': follow_symlinks = True del args[0] if len(args) == 0: d = os.path.expanduser('~') elif len(args) == 1: d = os.path.expanduser(args[0]) if not os.path.isdir(d): if d == '-': if oldpwd is not None: d = oldpwd else: return '', 'cd: no previous directory stored\n', 1 elif d.startswith('-'): try: num = int(d[1:]) except ValueError: return '', 'cd: Invalid destination: {0}\n'.format(d), 1 if num == 0: return None, None, 0 elif num < 0: return '', 'cd: Invalid destination: {0}\n'.format(d), 1 elif num > len(DIRSTACK): e = 'cd: Too few elements in dirstack ({0} elements)\n' return '', e.format(len(DIRSTACK)), 1 else: d = DIRSTACK[num - 1] else: d = _try_cdpath(d) else: return '', ('cd takes 0 or 1 arguments, not {0}. An additional `-P` ' 'flag can be passed in first position to follow symlinks.' '\n'.format(len(args))), 1 if not os.path.exists(d): return '', 'cd: no such file or directory: {0}\n'.format(d), 1 if not os.path.isdir(d): return '', 'cd: {0} is not a directory\n'.format(d), 1 if not os.access(d, os.X_OK): return '', 'cd: permission denied: {0}\n'.format(d), 1 if ON_WINDOWS and _is_unc_path(d) and _unc_check_enabled() and (not env.get('AUTO_PUSHD')): return '', "cd: can't cd to UNC path on Windows, unless $AUTO_PUSHD set or reg entry " \ + r'HKCU\SOFTWARE\MICROSOFT\Command Processor\DisableUNCCheck:DWORD = 1' + '\n', 1 # now, push the directory onto the dirstack if AUTO_PUSHD is set if cwd is not None and env.get('AUTO_PUSHD'): pushd(['-n', '-q', cwd]) if ON_WINDOWS and _is_unc_path(d): d = _unc_map_temp_drive(d) _change_working_directory(d, follow_symlinks) return None, None, 0 @lazyobject def pushd_parser(): parser = argparse.ArgumentParser(prog="pushd") parser.add_argument('dir', nargs='?') parser.add_argument('-n', dest='cd', help='Suppresses the normal change of directory when' ' adding directories to the stack, so that only the' ' stack is manipulated.', action='store_false') parser.add_argument('-q', dest='quiet', help='Do not call dirs, regardless of $PUSHD_SILENT', action='store_true') return parser def pushd(args, stdin=None): """xonsh command: pushd Adds a directory to the top of the directory stack, or rotates the stack, making the new top of the stack the current working directory. On Windows, if the path is a UNC path (begins with `\\\`) and if the `DisableUNCCheck` registry value is not enabled, creates a temporary mapped drive letter and sets the working directory there, emulating behavior of `PUSHD` in `CMD.EXE` """ global DIRSTACK try: args = pushd_parser.parse_args(args) except SystemExit: return None, None, 1 env = builtins.__xonsh_env__ pwd = env['PWD'] if env.get('PUSHD_MINUS', False): BACKWARD = '-' FORWARD = '+' else: BACKWARD = '+' FORWARD = '-' if args.dir is None: try: new_pwd = DIRSTACK.pop(0) except IndexError: e = 'pushd: Directory stack is empty\n' return None, e, 1 elif os.path.isdir(args.dir): new_pwd = args.dir else: try: num = int(args.dir[1:]) except ValueError: e = 'Invalid argument to pushd: {0}\n' return None, e.format(args.dir), 1 if num < 0: e = 'Invalid argument to pushd: {0}\n' return None, e.format(args.dir), 1 if num > len(DIRSTACK): e = 'Too few elements in dirstack ({0} elements)\n' return None, e.format(len(DIRSTACK)), 1 elif args.dir.startswith(FORWARD): if num == len(DIRSTACK): new_pwd = None else: new_pwd = DIRSTACK.pop(len(DIRSTACK) - 1 - num) elif args.dir.startswith(BACKWARD): if num == 0: new_pwd = None else: new_pwd = DIRSTACK.pop(num - 1) else: e = 'Invalid argument to pushd: {0}\n' return None, e.format(args.dir), 1 if new_pwd is not None: if ON_WINDOWS and _is_unc_path(new_pwd): new_pwd = _unc_map_temp_drive(new_pwd) if args.cd: DIRSTACK.insert(0, os.path.expanduser(pwd)) _change_working_directory(new_pwd) else: DIRSTACK.insert(0, os.path.expanduser(new_pwd)) maxsize = env.get('DIRSTACK_SIZE') if len(DIRSTACK) > maxsize: DIRSTACK = DIRSTACK[:maxsize] if not args.quiet and not env.get('PUSHD_SILENT'): return dirs([], None) return None, None, 0 @lazyobject def popd_parser(): parser = argparse.ArgumentParser(prog="popd") parser.add_argument('dir', nargs='?') parser.add_argument('-n', dest='cd', help='Suppresses the normal change of directory when' ' adding directories to the stack, so that only the' ' stack is manipulated.', action='store_false') parser.add_argument('-q', dest='quiet', help='Do not call dirs, regardless of $PUSHD_SILENT', action='store_true') return parser def popd(args, stdin=None): """ xonsh command: popd Removes entries from the directory stack. """ global DIRSTACK try: args = pushd_parser.parse_args(args) except SystemExit: return None, None, 1 env = builtins.__xonsh_env__ if env.get('PUSHD_MINUS'): BACKWARD = '-' FORWARD = '+' else: BACKWARD = '-' FORWARD = '+' if args.dir is None: try: new_pwd = DIRSTACK.pop(0) except IndexError: e = 'popd: Directory stack is empty\n' return None, e, 1 else: try: num = int(args.dir[1:]) except ValueError: e = 'Invalid argument to popd: {0}\n' return None, e.format(args.dir), 1 if num < 0: e = 'Invalid argument to popd: {0}\n' return None, e.format(args.dir), 1 if num > len(DIRSTACK): e = 'Too few elements in dirstack ({0} elements)\n' return None, e.format(len(DIRSTACK)), 1 elif args.dir.startswith(FORWARD): if num == len(DIRSTACK): new_pwd = DIRSTACK.pop(0) else: new_pwd = None DIRSTACK.pop(len(DIRSTACK) - 1 - num) elif args.dir.startswith(BACKWARD): if num == 0: new_pwd = DIRSTACK.pop(0) else: new_pwd = None DIRSTACK.pop(num - 1) else: e = 'Invalid argument to popd: {0}\n' return None, e.format(args.dir), 1 if new_pwd is not None: e = None if args.cd: env = builtins.__xonsh_env__ pwd = env['PWD'] _change_working_directory(new_pwd) if ON_WINDOWS: drive, rem_path = os.path.splitdrive(pwd) _unc_unmap_temp_drive(drive.casefold(), new_pwd) if not args.quiet and not env.get('PUSHD_SILENT'): return dirs([], None) return None, None, 0 @lazyobject def dirs_parser(): parser = argparse.ArgumentParser(prog="dirs") parser.add_argument('N', nargs='?') parser.add_argument('-c', dest='clear', help='Clears the directory stack by deleting all of' ' the entries.', action='store_true') parser.add_argument('-p', dest='print_long', help='Print the directory stack with one entry per' ' line.', action='store_true') parser.add_argument('-v', dest='verbose', help='Print the directory stack with one entry per' ' line, prefixing each entry with its index in the' ' stack.', action='store_true') parser.add_argument('-l', dest='long', help='Produces a longer listing; the default listing' ' format uses a tilde to denote the home directory.', action='store_true') return parser def dirs(args, stdin=None): """xonsh command: dirs Displays the list of currently remembered directories. Can also be used to clear the directory stack. """ global DIRSTACK try: args = dirs_parser.parse_args(args) except SystemExit: return None, None env = builtins.__xonsh_env__ dirstack = [os.path.expanduser(env['PWD'])] + DIRSTACK if env.get('PUSHD_MINUS'): BACKWARD = '-' FORWARD = '+' else: BACKWARD = '-' FORWARD = '+' if args.clear: DIRSTACK = [] return None, None, 0 if args.long: o = dirstack else: d = os.path.expanduser('~') o = [i.replace(d, '~') for i in dirstack] if args.verbose: out = '' pad = len(str(len(o) - 1)) for (ix, e) in enumerate(o): blanks = ' ' * (pad - len(str(ix))) out += '\n{0}{1} {2}'.format(blanks, ix, e) out = out[1:] elif args.print_long: out = '\n'.join(o) else: out = ' '.join(o) N = args.N if N is not None: try: num = int(N[1:]) except ValueError: e = 'Invalid argument to dirs: {0}\n' return None, e.format(N), 1 if num < 0: e = 'Invalid argument to dirs: {0}\n' return None, e.format(len(o)), 1 if num >= len(o): e = 'Too few elements in dirstack ({0} elements)\n' return None, e.format(len(o)), 1 if N.startswith(BACKWARD): idx = num elif N.startswith(FORWARD): idx = len(o) - 1 - num else: e = 'Invalid argument to dirs: {0}\n' return None, e.format(N), 1 out = o[idx] return out + '\n', None, 0 xonsh-0.6.0/xonsh/environ.py000066400000000000000000001505331320541242300160500ustar00rootroot00000000000000# -*- coding: utf-8 -*- """Environment for the xonsh shell.""" import os import re import sys import json import pprint import textwrap import locale import builtins import warnings import contextlib import collections import collections.abc as cabc from xonsh import __version__ as XONSH_VERSION from xonsh.lazyasd import LazyObject, lazyobject from xonsh.codecache import run_script_with_cache from xonsh.dirstack import _get_cwd from xonsh.events import events from xonsh.foreign_shells import load_foreign_envs, load_foreign_aliases from xonsh.xontribs import update_context, prompt_xontrib_install from xonsh.platform import ( BASH_COMPLETIONS_DEFAULT, DEFAULT_ENCODING, PATH_DEFAULT, ON_WINDOWS, ON_LINUX, os_environ ) from xonsh.tools import ( always_true, always_false, ensure_string, is_env_path, str_to_env_path, env_path_to_str, is_bool, to_bool, bool_to_str, is_history_tuple, to_history_tuple, history_tuple_to_str, is_float, is_string, is_string_or_callable, is_completions_display_value, to_completions_display_value, is_string_set, csv_to_set, set_to_csv, is_int, is_bool_seq, to_bool_or_int, bool_or_int_to_str, csv_to_bool_seq, bool_seq_to_csv, DefaultNotGiven, print_exception, setup_win_unicode_console, intensify_colors_on_win_setter, is_dynamic_cwd_width, to_dynamic_cwd_tuple, dynamic_cwd_tuple_to_str, is_logfile_opt, to_logfile_opt, logfile_opt_to_str, executables_in, is_nonstring_seq_of_strings, pathsep_to_upper_seq, seq_to_upper_pathsep, print_color, is_history_backend, to_itself, swap_values, ) import xonsh.prompt.base as prompt events.doc('on_envvar_new', """ on_envvar_new(name: str, value: Any) -> None Fires after a new environment variable is created. Note: Setting envvars inside the handler might cause a recursion until the limit. """) events.doc('on_envvar_change', """ on_envvar_change(name: str, oldvalue: Any, newvalue: Any) -> None Fires after an environment variable is changed. Note: Setting envvars inside the handler might cause a recursion until the limit. """) @lazyobject def HELP_TEMPLATE(): return ('{{INTENSE_RED}}{envvar}{{NO_COLOR}}:\n\n' '{{INTENSE_YELLOW}}{docstr}{{NO_COLOR}}\n\n' 'default: {{CYAN}}{default}{{NO_COLOR}}\n' 'configurable: {{CYAN}}{configurable}{{NO_COLOR}}') @lazyobject def LOCALE_CATS(): lc = {'LC_CTYPE': locale.LC_CTYPE, 'LC_COLLATE': locale.LC_COLLATE, 'LC_NUMERIC': locale.LC_NUMERIC, 'LC_MONETARY': locale.LC_MONETARY, 'LC_TIME': locale.LC_TIME, } if hasattr(locale, 'LC_MESSAGES'): lc['LC_MESSAGES'] = locale.LC_MESSAGES return lc def locale_convert(key): """Creates a converter for a locale key.""" def lc_converter(val): try: locale.setlocale(LOCALE_CATS[key], val) val = locale.setlocale(LOCALE_CATS[key]) except (locale.Error, KeyError): msg = 'Failed to set locale {0!r} to {1!r}'.format(key, val) warnings.warn(msg, RuntimeWarning) return val return lc_converter def to_debug(x): """Converts value using to_bool_or_int() and sets this value on as the execer's debug level. """ val = to_bool_or_int(x) if hasattr(builtins, '__xonsh_execer__'): builtins.__xonsh_execer__.debug_level = val return val Ensurer = collections.namedtuple('Ensurer', ['validate', 'convert', 'detype']) Ensurer.__doc__ = """Named tuples whose elements are functions that represent environment variable validation, conversion, detyping. """ @lazyobject def DEFAULT_ENSURERS(): return { 'AUTO_CD': (is_bool, to_bool, bool_to_str), 'AUTO_PUSHD': (is_bool, to_bool, bool_to_str), 'AUTO_SUGGEST': (is_bool, to_bool, bool_to_str), 'AUTO_SUGGEST_IN_COMPLETIONS': (is_bool, to_bool, bool_to_str), 'BASH_COMPLETIONS': (is_env_path, str_to_env_path, env_path_to_str), 'CASE_SENSITIVE_COMPLETIONS': (is_bool, to_bool, bool_to_str), re.compile('\w*DIRS$'): (is_env_path, str_to_env_path, env_path_to_str), 'COLOR_INPUT': (is_bool, to_bool, bool_to_str), 'COLOR_RESULTS': (is_bool, to_bool, bool_to_str), 'COMPLETIONS_BRACKETS': (is_bool, to_bool, bool_to_str), 'COMPLETIONS_CONFIRM': (is_bool, to_bool, bool_to_str), 'COMPLETIONS_DISPLAY': (is_completions_display_value, to_completions_display_value, str), 'COMPLETIONS_MENU_ROWS': (is_int, int, str), 'COMPLETION_QUERY_LIMIT': (is_int, int, str), 'DIRSTACK_SIZE': (is_int, int, str), 'DYNAMIC_CWD_WIDTH': (is_dynamic_cwd_width, to_dynamic_cwd_tuple, dynamic_cwd_tuple_to_str), 'DYNAMIC_CWD_ELISION_CHAR': (is_string, ensure_string, ensure_string), 'EXPAND_ENV_VARS': (is_bool, to_bool, bool_to_str), 'FORCE_POSIX_PATHS': (is_bool, to_bool, bool_to_str), 'FOREIGN_ALIASES_OVERRIDE': (is_bool, to_bool, bool_to_str), 'FUZZY_PATH_COMPLETION': (is_bool, to_bool, bool_to_str), 'GLOB_SORTED': (is_bool, to_bool, bool_to_str), 'HISTCONTROL': (is_string_set, csv_to_set, set_to_csv), 'IGNOREEOF': (is_bool, to_bool, bool_to_str), 'INTENSIFY_COLORS_ON_WIN': (always_false, intensify_colors_on_win_setter, bool_to_str), 'LANG': (is_string, ensure_string, ensure_string), 'LC_COLLATE': (always_false, locale_convert('LC_COLLATE'), ensure_string), 'LC_CTYPE': (always_false, locale_convert('LC_CTYPE'), ensure_string), 'LC_MESSAGES': (always_false, locale_convert('LC_MESSAGES'), ensure_string), 'LC_MONETARY': (always_false, locale_convert('LC_MONETARY'), ensure_string), 'LC_NUMERIC': (always_false, locale_convert('LC_NUMERIC'), ensure_string), 'LC_TIME': (always_false, locale_convert('LC_TIME'), ensure_string), 'LOADED_CONFIG': (is_bool, to_bool, bool_to_str), 'LOADED_RC_FILES': (is_bool_seq, csv_to_bool_seq, bool_seq_to_csv), 'MOUSE_SUPPORT': (is_bool, to_bool, bool_to_str), 'MULTILINE_PROMPT': (is_string_or_callable, ensure_string, ensure_string), re.compile('\w*PATH$'): (is_env_path, str_to_env_path, env_path_to_str), 'PATHEXT': (is_nonstring_seq_of_strings, pathsep_to_upper_seq, seq_to_upper_pathsep), 'PRETTY_PRINT_RESULTS': (is_bool, to_bool, bool_to_str), 'PROMPT': (is_string_or_callable, ensure_string, ensure_string), 'PUSHD_MINUS': (is_bool, to_bool, bool_to_str), 'PUSHD_SILENT': (is_bool, to_bool, bool_to_str), 'RAISE_SUBPROC_ERROR': (is_bool, to_bool, bool_to_str), 'RIGHT_PROMPT': (is_string_or_callable, ensure_string, ensure_string), 'BOTTOM_TOOLBAR': (is_string_or_callable, ensure_string, ensure_string), 'SUBSEQUENCE_PATH_COMPLETION': (is_bool, to_bool, bool_to_str), 'SUGGEST_COMMANDS': (is_bool, to_bool, bool_to_str), 'SUGGEST_MAX_NUM': (is_int, int, str), 'SUGGEST_THRESHOLD': (is_int, int, str), 'SUPPRESS_BRANCH_TIMEOUT_MESSAGE': (is_bool, to_bool, bool_to_str), 'UPDATE_COMPLETIONS_ON_KEYPRESS': (is_bool, to_bool, bool_to_str), 'UPDATE_OS_ENVIRON': (is_bool, to_bool, bool_to_str), 'UPDATE_PROMPT_ON_KEYPRESS': (is_bool, to_bool, bool_to_str), 'VC_BRANCH_TIMEOUT': (is_float, float, str), 'VC_HG_SHOW_BRANCH': (is_bool, to_bool, bool_to_str), 'VI_MODE': (is_bool, to_bool, bool_to_str), 'VIRTUAL_ENV': (is_string, ensure_string, ensure_string), 'WIN_UNICODE_CONSOLE': (always_false, setup_win_unicode_console, bool_to_str), 'XONSHRC': (is_env_path, str_to_env_path, env_path_to_str), 'XONSH_APPEND_NEWLINE': (is_bool, to_bool, bool_to_str), 'XONSH_AUTOPAIR': (is_bool, to_bool, bool_to_str), 'XONSH_CACHE_SCRIPTS': (is_bool, to_bool, bool_to_str), 'XONSH_CACHE_EVERYTHING': (is_bool, to_bool, bool_to_str), 'XONSH_COLOR_STYLE': (is_string, ensure_string, ensure_string), 'XONSH_DEBUG': (always_false, to_debug, bool_or_int_to_str), 'XONSH_ENCODING': (is_string, ensure_string, ensure_string), 'XONSH_ENCODING_ERRORS': (is_string, ensure_string, ensure_string), 'XONSH_HISTORY_BACKEND': (is_history_backend, to_itself, ensure_string), 'XONSH_HISTORY_FILE': (is_string, ensure_string, ensure_string), 'XONSH_HISTORY_SIZE': (is_history_tuple, to_history_tuple, history_tuple_to_str), 'XONSH_LOGIN': (is_bool, to_bool, bool_to_str), 'XONSH_PROC_FREQUENCY': (is_float, float, str), 'XONSH_SHOW_TRACEBACK': (is_bool, to_bool, bool_to_str), 'XONSH_STDERR_PREFIX': (is_string, ensure_string, ensure_string), 'XONSH_STDERR_POSTFIX': (is_string, ensure_string, ensure_string), 'XONSH_STORE_STDOUT': (is_bool, to_bool, bool_to_str), 'XONSH_STORE_STDIN': (is_bool, to_bool, bool_to_str), 'XONSH_TRACEBACK_LOGFILE': (is_logfile_opt, to_logfile_opt, logfile_opt_to_str), 'XONSH_DATETIME_FORMAT': (is_string, ensure_string, ensure_string), } # # Defaults # def default_value(f): """Decorator for making callable default values.""" f._xonsh_callable_default = True return f def is_callable_default(x): """Checks if a value is a callable default.""" return callable(x) and getattr(x, '_xonsh_callable_default', False) DEFAULT_TITLE = '{current_job:{} | }{user}@{hostname}: {cwd} | xonsh' @default_value def xonsh_data_dir(env): """Ensures and returns the $XONSH_DATA_DIR""" xdd = os.path.expanduser(os.path.join(env.get('XDG_DATA_HOME'), 'xonsh')) os.makedirs(xdd, exist_ok=True) return xdd @default_value def xonsh_config_dir(env): """Ensures and returns the $XONSH_CONFIG_DIR""" xcd = os.path.expanduser(os.path.join(env.get('XDG_CONFIG_HOME'), 'xonsh')) os.makedirs(xcd, exist_ok=True) return xcd @default_value def xonshconfig(env): """Ensures and returns the $XONSHCONFIG""" xcd = env.get('XONSH_CONFIG_DIR') xc = os.path.join(xcd, 'config.json') return xc @default_value def default_xonshrc(env): """Creates a new instance of the default xonshrc tuple.""" if ON_WINDOWS: dxrc = (xonshconfig(env), os.path.join(os_environ['ALLUSERSPROFILE'], 'xonsh', 'xonshrc'), os.path.expanduser('~/.xonshrc')) else: dxrc = (xonshconfig(env), '/etc/xonshrc', os.path.expanduser('~/.xonshrc')) return dxrc # Default values should generally be immutable, that way if a user wants # to set them they have to do a copy and write them to the environment. # try to keep this sorted. @lazyobject def DEFAULT_VALUES(): dv = { 'AUTO_CD': False, 'AUTO_PUSHD': False, 'AUTO_SUGGEST': True, 'AUTO_SUGGEST_IN_COMPLETIONS': True, 'BASH_COMPLETIONS': BASH_COMPLETIONS_DEFAULT, 'CASE_SENSITIVE_COMPLETIONS': ON_LINUX, 'CDPATH': (), 'COLOR_INPUT': True, 'COLOR_RESULTS': True, 'COMPLETIONS_BRACKETS': True, 'COMPLETIONS_CONFIRM': False, 'COMPLETIONS_DISPLAY': 'multi', 'COMPLETIONS_MENU_ROWS': 5, 'COMPLETION_QUERY_LIMIT': 100, 'DIRSTACK_SIZE': 20, 'DYNAMIC_CWD_WIDTH': (float('inf'), 'c'), 'DYNAMIC_CWD_ELISION_CHAR': '', 'EXPAND_ENV_VARS': True, 'FORCE_POSIX_PATHS': False, 'FOREIGN_ALIASES_OVERRIDE': False, 'PROMPT_FIELDS': dict(prompt.PROMPT_FIELDS), 'FUZZY_PATH_COMPLETION': True, 'GLOB_SORTED': True, 'HISTCONTROL': set(), 'IGNOREEOF': False, 'INDENT': ' ', 'INTENSIFY_COLORS_ON_WIN': True, 'LANG': 'C.UTF-8', 'LC_CTYPE': locale.setlocale(locale.LC_CTYPE), 'LC_COLLATE': locale.setlocale(locale.LC_COLLATE), 'LC_TIME': locale.setlocale(locale.LC_TIME), 'LC_MONETARY': locale.setlocale(locale.LC_MONETARY), 'LC_NUMERIC': locale.setlocale(locale.LC_NUMERIC), 'LOADED_CONFIG': False, 'LOADED_RC_FILES': (), 'MOUSE_SUPPORT': False, 'MULTILINE_PROMPT': '.', 'PATH': PATH_DEFAULT, 'PATHEXT': ['.COM', '.EXE', '.BAT', '.CMD'] if ON_WINDOWS else [], 'PRETTY_PRINT_RESULTS': True, 'PROMPT': prompt.default_prompt(), 'PUSHD_MINUS': False, 'PUSHD_SILENT': False, 'RAISE_SUBPROC_ERROR': False, 'RIGHT_PROMPT': '', 'BOTTOM_TOOLBAR': '', 'SHELL_TYPE': 'best', 'SUBSEQUENCE_PATH_COMPLETION': True, 'SUPPRESS_BRANCH_TIMEOUT_MESSAGE': False, 'SUGGEST_COMMANDS': True, 'SUGGEST_MAX_NUM': 5, 'SUGGEST_THRESHOLD': 3, 'TITLE': DEFAULT_TITLE, 'UPDATE_COMPLETIONS_ON_KEYPRESS': False, 'UPDATE_OS_ENVIRON': False, 'UPDATE_PROMPT_ON_KEYPRESS': False, 'VC_BRANCH_TIMEOUT': 0.2 if ON_WINDOWS else 0.1, 'VC_HG_SHOW_BRANCH': True, 'VI_MODE': False, 'WIN_UNICODE_CONSOLE': True, 'XDG_CONFIG_HOME': os.path.expanduser(os.path.join('~', '.config')), 'XDG_DATA_HOME': os.path.expanduser(os.path.join('~', '.local', 'share')), 'XONSHCONFIG': xonshconfig, 'XONSHRC': default_xonshrc, 'XONSH_APPEND_NEWLINE': False, 'XONSH_AUTOPAIR': False, 'XONSH_CACHE_SCRIPTS': True, 'XONSH_CACHE_EVERYTHING': False, 'XONSH_COLOR_STYLE': 'default', 'XONSH_CONFIG_DIR': xonsh_config_dir, 'XONSH_DATA_DIR': xonsh_data_dir, 'XONSH_DEBUG': 0, 'XONSH_ENCODING': DEFAULT_ENCODING, 'XONSH_ENCODING_ERRORS': 'surrogateescape', 'XONSH_HISTORY_BACKEND': 'json', 'XONSH_HISTORY_FILE': os.path.expanduser('~/.xonsh_history.json'), 'XONSH_HISTORY_SIZE': (8128, 'commands'), 'XONSH_LOGIN': False, 'XONSH_PROC_FREQUENCY': 1e-4, 'XONSH_SHOW_TRACEBACK': False, 'XONSH_STDERR_PREFIX': '', 'XONSH_STDERR_POSTFIX': '', 'XONSH_STORE_STDIN': False, 'XONSH_STORE_STDOUT': False, 'XONSH_TRACEBACK_LOGFILE': None, 'XONSH_DATETIME_FORMAT': '%Y-%m-%d %H:%M', } if hasattr(locale, 'LC_MESSAGES'): dv['LC_MESSAGES'] = locale.setlocale(locale.LC_MESSAGES) return dv VarDocs = collections.namedtuple('VarDocs', ['docstr', 'configurable', 'default', 'store_as_str']) VarDocs.__doc__ = """Named tuple for environment variable documentation Parameters ---------- docstr : str The environment variable docstring. configurable : bool, optional Flag for whether the environment variable is configurable or not. default : str, optional Custom docstring for the default value for complex defaults. Is this is DefaultNotGiven, then the default will be looked up from DEFAULT_VALUES and converted to a str. store_as_str : bool, optional Flag for whether the environment variable should be stored as a string. This is used when persisting a variable that is not JSON serializable to the config file. For example, sets, frozensets, and potentially other non-trivial data types. default, False. """ # iterates from back VarDocs.__new__.__defaults__ = (True, DefaultNotGiven, False) # Please keep the following in alphabetic order - scopatz @lazyobject def DEFAULT_DOCS(): return { 'ANSICON': VarDocs('This is used on Windows to set the title, ' 'if available.', configurable=False), 'AUTO_CD': VarDocs( 'Flag to enable changing to a directory by entering the dirname or ' 'full path only (without the cd command).'), 'AUTO_PUSHD': VarDocs( 'Flag for automatically pushing directories onto the directory stack.' ), 'AUTO_SUGGEST': VarDocs( 'Enable automatic command suggestions based on history, like in the fish ' 'shell.\n\nPressing the right arrow key inserts the currently ' 'displayed suggestion. Only usable with ``$SHELL_TYPE=prompt_toolkit.``'), 'AUTO_SUGGEST_IN_COMPLETIONS': VarDocs( 'Places the auto-suggest result as the first option in the completions. ' 'This enables you to tab complete the auto-suggestion.' ), 'BASH_COMPLETIONS': VarDocs( 'This is a list (or tuple) of strings that specifies where the ' '``bash_completion`` script may be found. ' 'The first valid path will be used. For better performance, ' 'bash-completion v2.x is recommended since it lazy-loads individual ' 'completion scripts. ' 'For both bash-completion v1.x and v2.x, paths of individual completion ' 'scripts (like ``.../completes/ssh``) do not need to be included here. ' 'The default values are platform ' 'dependent, but sane. To specify an alternate list, do so in the run ' 'control file.', default=( "Normally this is:\n\n" " ``('/usr/share/bash-completion/bash_completion', )``\n\n" "But, on Mac it is:\n\n" " ``('/usr/local/share/bash-completion/bash_completion', " "'/usr/local/etc/bash_completion')``\n\n" "Other OS-specific defaults may be added in the future.")), 'CASE_SENSITIVE_COMPLETIONS': VarDocs( 'Sets whether completions should be case sensitive or case ' 'insensitive.', default='True on Linux, False otherwise.'), 'CDPATH': VarDocs( 'A list of paths to be used as roots for a cd, breaking compatibility ' 'with Bash, xonsh always prefer an existing relative path.'), 'COLOR_INPUT': VarDocs('Flag for syntax highlighting interactive input.'), 'COLOR_RESULTS': VarDocs('Flag for syntax highlighting return values.'), 'COMPLETIONS_BRACKETS': VarDocs( 'Flag to enable/disable inclusion of square brackets and parentheses ' 'in Python attribute completions.', default='True'), 'COMPLETIONS_DISPLAY': VarDocs( 'Configure if and how Python completions are displayed by the ' '``prompt_toolkit`` shell.\n\nThis option does not affect Bash ' 'completions, auto-suggestions, etc.\n\nChanging it at runtime will ' 'take immediate effect, so you can quickly disable and enable ' 'completions during shell sessions.\n\n' "- If ``$COMPLETIONS_DISPLAY`` is ``none`` or ``false``, do not display\n" " those completions.\n" "- If ``$COMPLETIONS_DISPLAY`` is ``single``, display completions in a\n" ' single column while typing.\n' "- If ``$COMPLETIONS_DISPLAY`` is ``multi`` or ``true``, display completions\n" " in multiple columns while typing.\n\n" 'These option values are not case- or type-sensitive, so e.g.' "writing ``$COMPLETIONS_DISPLAY = None`` " "and ``$COMPLETIONS_DISPLAY = 'none'`` are equivalent. Only usable with " "``$SHELL_TYPE=prompt_toolkit``"), 'COMPLETIONS_CONFIRM': VarDocs( 'While tab-completions menu is displayed, press to confirm ' 'completion instead of running command. This only affects the ' 'prompt-toolkit shell.'), 'COMPLETIONS_MENU_ROWS': VarDocs( 'Number of rows to reserve for tab-completions menu if ' "``$COMPLETIONS_DISPLAY`` is ``single`` or ``multi``. This only affects the " 'prompt-toolkit shell.'), 'COMPLETION_QUERY_LIMIT': VarDocs( 'The number of completions to display before the user is asked ' 'for confirmation.'), 'DIRSTACK_SIZE': VarDocs('Maximum size of the directory stack.'), 'DYNAMIC_CWD_WIDTH': VarDocs( 'Maximum length in number of characters ' 'or as a percentage for the ``cwd`` prompt variable. For example, ' '"20" is a twenty character width and "10%" is ten percent of the ' 'number of columns available.'), 'DYNAMIC_CWD_ELISION_CHAR': VarDocs( 'The string used to show a shortened directory in a shortened cwd, ' 'e.g. ``\'…\'``.'), 'EXPAND_ENV_VARS': VarDocs( 'Toggles whether environment variables are expanded inside of strings ' 'in subprocess mode.'), 'FORCE_POSIX_PATHS': VarDocs( "Forces forward slashes (``/``) on Windows systems when using auto " 'completion if set to anything truthy.', configurable=ON_WINDOWS), 'FOREIGN_ALIASES_OVERRIDE': VarDocs( 'Whether or not foreign aliases should override xonsh aliases ' 'with the same name. Note that setting of this must happen in the ' 'static configuration file ' "``$XONSH_CONFIG_DIR/config.json`` in the 'env' section and not in " '``.xonshrc`` as loading of foreign aliases happens before' '``.xonshrc`` is parsed', configurable=True), 'PROMPT_FIELDS': VarDocs( 'Dictionary containing variables to be used when formatting $PROMPT ' "and $TITLE. See 'Customizing the Prompt' " 'http://xon.sh/tutorial.html#customizing-the-prompt', configurable=False, default='``xonsh.prompt.PROMPT_FIELDS``'), 'FUZZY_PATH_COMPLETION': VarDocs( "Toggles 'fuzzy' matching of paths for tab completion, which is only " "used as a fallback if no other completions succeed but can be used " "as a way to adjust for typographical errors. If ``True``, then, e.g.," " ``xonhs`` will match ``xonsh``."), 'GLOB_SORTED': VarDocs( "Toggles whether globbing results are manually sorted. If ``False``, " "the results are returned in arbitrary order."), 'HISTCONTROL': VarDocs( 'A set of strings (comma-separated list in string form) of options ' 'that determine what commands are saved to the history list. By ' "default all commands are saved. The option ``ignoredups`` will not " "save the command if it matches the previous command. The option " "'ignoreerr' will cause any commands that fail (i.e. return non-zero " "exit status) to not be added to the history list.", store_as_str=True), 'IGNOREEOF': VarDocs('Prevents Ctrl-D from exiting the shell.'), 'INDENT': VarDocs('Indentation string for multiline input'), 'INTENSIFY_COLORS_ON_WIN': VarDocs( 'Enhance style colors for readability ' 'when using the default terminal (``cmd.exe``) on Windows. Blue colors, ' 'which are hard to read, are replaced with cyan. Other colors are ' 'generally replaced by their bright counter parts.', configurable=ON_WINDOWS), 'LANG': VarDocs('Fallback locale setting for systems where it matters'), 'LOADED_CONFIG': VarDocs( 'Whether or not the xonsh config file was loaded', configurable=False), 'LOADED_RC_FILES': VarDocs( 'Whether or not any of the xonsh run control files were loaded at ' 'startup. This is a sequence of bools in Python that is converted ' "to a CSV list in string form, ie ``[True, False]`` becomes " "``'True,False'``.", configurable=False), 'MOUSE_SUPPORT': VarDocs( 'Enable mouse support in the ``prompt_toolkit`` shell. This allows ' 'clicking for positioning the cursor or selecting a completion. In ' 'some terminals however, this disables the ability to scroll back ' 'through the history of the terminal. Only usable with ' '``$SHELL_TYPE=prompt_toolkit``'), 'MULTILINE_PROMPT': VarDocs( 'Prompt text for 2nd+ lines of input, may be str or function which ' 'returns a str.'), 'OLDPWD': VarDocs('Used to represent a previous present working directory.', configurable=False), 'PATH': VarDocs( 'List of strings representing where to look for executables.'), 'PATHEXT': VarDocs('Sequence of extension strings (eg, ``.EXE``) for ' 'filtering valid executables by. Each element must be ' 'uppercase.'), 'PRETTY_PRINT_RESULTS': VarDocs( 'Flag for "pretty printing" return values.'), 'PROMPT': VarDocs( 'The prompt text. May contain keyword arguments which are ' "auto-formatted, see 'Customizing the Prompt' at " 'http://xon.sh/tutorial.html#customizing-the-prompt. ' 'This value is never inherited from parent processes.', default='``xonsh.environ.DEFAULT_PROMPT``'), 'PUSHD_MINUS': VarDocs( 'Flag for directory pushing functionality. False is the normal ' 'behavior.'), 'PUSHD_SILENT': VarDocs( 'Whether or not to suppress directory stack manipulation output.'), 'RAISE_SUBPROC_ERROR': VarDocs( 'Whether or not to raise an error if a subprocess (captured or ' 'uncaptured) returns a non-zero exit status, which indicates failure. ' 'This is most useful in xonsh scripts or modules where failures ' 'should cause an end to execution. This is less useful at a terminal. ' 'The error that is raised is a ``subprocess.CalledProcessError``.'), 'RIGHT_PROMPT': VarDocs( 'Template string for right-aligned text ' 'at the prompt. This may be parametrized in the same way as ' 'the ``$PROMPT`` variable. Currently, this is only available in the ' 'prompt-toolkit shell.'), 'BOTTOM_TOOLBAR': VarDocs( 'Template string for the bottom toolbar. ' 'This may be parametrized in the same way as ' 'the ``$PROMPT`` variable. Currently, this is only available in the ' 'prompt-toolkit shell.'), 'SHELL_TYPE': VarDocs( 'Which shell is used. Currently two base shell types are supported:\n\n' " - ``readline`` that is backed by Python's readline module\n" " - ``prompt_toolkit`` that uses external library of the same name\n" " - ``random`` selects a random shell from the above on startup\n" " - ``best`` selects the most feature-rich shell available on the\n" " user's system\n\n" 'To use the ``prompt_toolkit`` shell you need to have the ' '`prompt_toolkit `_' ' library installed. To specify which shell should be used, do so in ' 'the run control file.', default='``best``'), 'SUBSEQUENCE_PATH_COMPLETION': VarDocs( "Toggles subsequence matching of paths for tab completion. " "If ``True``, then, e.g., ``~/u/ro`` can match ``~/lou/carcolh``."), 'SUGGEST_COMMANDS': VarDocs( 'When a user types an invalid command, xonsh will try to offer ' 'suggestions of similar valid commands if this is True.'), 'SUGGEST_MAX_NUM': VarDocs( 'xonsh will show at most this many suggestions in response to an ' 'invalid command. If negative, there is no limit to how many ' 'suggestions are shown.'), 'SUGGEST_THRESHOLD': VarDocs( 'An error threshold. If the Levenshtein distance between the entered ' 'command and a valid command is less than this value, the valid ' 'command will be offered as a suggestion. Also used for "fuzzy" ' 'tab completion of paths.'), 'SUPPRESS_BRANCH_TIMEOUT_MESSAGE': VarDocs( 'Whether or not to suppress branch timeout warning messages.'), 'TERM': VarDocs( 'TERM is sometimes set by the terminal emulator. This is used (when ' "valid) to determine whether or not to set the title. Users shouldn't " "need to set this themselves. Note that this variable should be set as " "early as possible in order to ensure it is effective. Here are a few " "options:\n\n" "* Set this from the program that launches xonsh. On POSIX systems, \n" " this can be performed by using env, e.g. \n" " ``/usr/bin/env TERM=xterm-color xonsh`` or similar.\n" "* From the xonsh command line, namely ``xonsh -DTERM=xterm-color``.\n" "* In the config file with ``{\"env\": {\"TERM\": \"xterm-color\"}}``.\n" "* Lastly, in xonshrc with ``$TERM``\n\n" "Ideally, your terminal emulator will set this correctly but that does " "not always happen.", configurable=False), 'TITLE': VarDocs( 'The title text for the window in which xonsh is running. Formatted ' "in the same manner as ``$PROMPT``, see 'Customizing the Prompt' " 'http://xon.sh/tutorial.html#customizing-the-prompt.', default='``xonsh.environ.DEFAULT_TITLE``'), 'UPDATE_COMPLETIONS_ON_KEYPRESS': VarDocs( 'Completions display is evaluated and presented whenever a key is ' 'pressed. This avoids the need to press TAB, except to cycle through ' 'the possibilities. This currently only affects the prompt-toolkit shell.' ), 'UPDATE_OS_ENVIRON': VarDocs( "If True ``os_environ`` will always be updated " "when the xonsh environment changes. The environment can be reset to " "the default value by calling ``__xonsh_env__.undo_replace_env()``"), 'UPDATE_PROMPT_ON_KEYPRESS': VarDocs( 'Disables caching the prompt between commands, ' 'so that it would be reevaluated on each keypress. ' 'Disabled by default because of the incurred performance penalty.'), 'VC_BRANCH_TIMEOUT': VarDocs( 'The timeout (in seconds) for version control ' 'branch computations. This is a timeout per subprocess call, so the ' 'total time to compute will be larger than this in many cases.'), 'VC_HG_SHOW_BRANCH': VarDocs( 'Whether or not to show the Mercurial branch in the prompt.'), 'VI_MODE': VarDocs( "Flag to enable ``vi_mode`` in the ``prompt_toolkit`` shell."), 'VIRTUAL_ENV': VarDocs( 'Path to the currently active Python environment.', configurable=False), 'WIN_UNICODE_CONSOLE': VarDocs( "Enables unicode support in windows terminals. Requires the external " "library ``win_unicode_console``.", configurable=ON_WINDOWS), 'XDG_CONFIG_HOME': VarDocs( 'Open desktop standard configuration home dir. This is the same ' 'default as used in the standard.', configurable=False, default="``~/.config``"), 'XDG_DATA_HOME': VarDocs( 'Open desktop standard data home dir. This is the same default as ' 'used in the standard.', default="``~/.local/share``"), 'XONSHCONFIG': VarDocs( 'The location of the static xonsh configuration file, if it exists. ' 'This is in JSON format.', configurable=False, default="``$XONSH_CONFIG_DIR/config.json``"), 'XONSHRC': VarDocs( 'A list of the locations of run control files, if they exist. User ' 'defined run control file will supersede values set in system-wide ' 'control file if there is a naming collision.', default=( "On Linux & Mac OSX: ``['/etc/xonshrc', '~/.xonshrc']``\n" "\nOn Windows: " "``['%ALLUSERSPROFILE%\\\\xonsh\\\\xonshrc', '~/.xonshrc']``")), 'XONSH_APPEND_NEWLINE': VarDocs( 'Append new line when a partial line is preserved in output.' ), 'XONSH_AUTOPAIR': VarDocs( 'Whether Xonsh will auto-insert matching parentheses, brackets, and ' 'quotes. Only available under the prompt-toolkit shell.' ), 'XONSH_CACHE_SCRIPTS': VarDocs( 'Controls whether the code for scripts run from xonsh will be cached' ' (``True``) or re-compiled each time (``False``).'), 'XONSH_CACHE_EVERYTHING': VarDocs( 'Controls whether all code (including code entered at the interactive' ' prompt) will be cached.'), 'XONSH_COLOR_STYLE': VarDocs( 'Sets the color style for xonsh colors. This is a style name, not ' 'a color map. Run ``xonfig styles`` to see the available styles.'), 'XONSH_CONFIG_DIR': VarDocs( 'This is the location where xonsh configuration information is stored.', configurable=False, default="``$XDG_CONFIG_HOME/xonsh``"), 'XONSH_DEBUG': VarDocs( 'Sets the xonsh debugging level. This may be an integer or a boolean. ' 'Setting this variable prior to stating xonsh to ``1`` or ``True`` ' 'will suppress amalgamated imports. Setting it to ``2`` will get some ' 'basic information like input transformation, command replacement. ' 'With ``3`` or a higher number will make more debugging information ' 'presented, like PLY parsing messages.', configurable=False), 'XONSH_DATA_DIR': VarDocs( 'This is the location where xonsh data files are stored, such as ' 'history.', default="``$XDG_DATA_HOME/xonsh``"), 'XONSH_ENCODING': VarDocs( 'This is the encoding that xonsh should use for subprocess operations.', default='``sys.getdefaultencoding()``'), 'XONSH_ENCODING_ERRORS': VarDocs( 'The flag for how to handle encoding errors should they happen. ' 'Any string flag that has been previously registered with Python ' "is allowed. See the 'Python codecs documentation' " "(https://docs.python.org/3/library/codecs.html#error-handlers) " 'for more information and available options.', default="``surrogateescape``"), 'XONSH_GITSTATUS_*': VarDocs( 'Symbols for gitstatus prompt. Default values are: \n\n' '* ``XONSH_GITSTATUS_HASH``: ``:``\n' '* ``XONSH_GITSTATUS_BRANCH``: ``{CYAN}``\n' '* ``XONSH_GITSTATUS_OPERATION``: ``{CYAN}``\n' '* ``XONSH_GITSTATUS_STAGED``: ``{RED}●``\n' '* ``XONSH_GITSTATUS_CONFLICTS``: ``{RED}×``\n' '* ``XONSH_GITSTATUS_CHANGED``: ``{BLUE}+``\n' '* ``XONSH_GITSTATUS_UNTRACKED``: ``…``\n' '* ``XONSH_GITSTATUS_STASHED``: ``⚑``\n' '* ``XONSH_GITSTATUS_CLEAN``: ``{BOLD_GREEN}✓``\n' '* ``XONSH_GITSTATUS_AHEAD``: ``↑·``\n' '* ``XONSH_GITSTATUS_BEHIND``: ``↓·``\n' ), 'XONSH_HISTORY_BACKEND': VarDocs( "Set which history backend to use. Options are: 'json', " "'sqlite', and 'dummy'. The default is 'json'. " '``XONSH_HISTORY_BACKEND`` also accepts a class type that inherits ' 'from ``xonsh.history.base.History``, or its instance.'), 'XONSH_HISTORY_FILE': VarDocs( 'Location of history file (deprecated).', configurable=False, default="``~/.xonsh_history``"), 'XONSH_HISTORY_SIZE': VarDocs( 'Value and units tuple that sets the size of history after garbage ' 'collection. Canonical units are:\n\n' "- ``commands`` for the number of past commands executed,\n" "- ``files`` for the number of history files to keep,\n" "- ``s`` for the number of seconds in the past that are allowed, and\n" "- ``b`` for the number of bytes that history may consume.\n\n" "Common abbreviations, such as '6 months' or '1 GB' are also allowed.", default="``(8128, 'commands')`` or ``'8128 commands'``"), 'XONSH_INTERACTIVE': VarDocs( '``True`` if xonsh is running interactively, and ``False`` otherwise.', configurable=False), 'XONSH_LOGIN': VarDocs( '``True`` if xonsh is running as a login shell, and ``False`` otherwise.', configurable=False), 'XONSH_PROC_FREQUENCY': VarDocs( 'The process frequency is the time that ' 'xonsh process threads sleep for while running command pipelines. ' 'The value has units of seconds [s].'), 'XONSH_SHOW_TRACEBACK': VarDocs( 'Controls if a traceback is shown if exceptions occur in the shell. ' 'Set to ``True`` to always show traceback or ``False`` to always hide. ' 'If undefined then the traceback is hidden but a notice is shown on how ' 'to enable the full traceback.'), 'XONSH_SOURCE': VarDocs( "When running a xonsh script, this variable contains the absolute path " "to the currently executing script's file.", configurable=False), 'XONSH_STDERR_PREFIX': VarDocs( 'A format string, using the same keys and colors as ``$PROMPT``, that ' 'is prepended whenever stderr is displayed. This may be used in ' 'conjunction with ``$XONSH_STDERR_POSTFIX`` to close out the block.' 'For example, to have stderr appear on a red background, the ' 'prefix & postfix pair would be "{BACKGROUND_RED}" & "{NO_COLOR}".'), 'XONSH_STDERR_POSTFIX': VarDocs( 'A format string, using the same keys and colors as ``$PROMPT``, that ' 'is appended whenever stderr is displayed. This may be used in ' 'conjunction with ``$XONSH_STDERR_PREFIX`` to start the block.' 'For example, to have stderr appear on a red background, the ' 'prefix & postfix pair would be "{BACKGROUND_RED}" & "{NO_COLOR}".'), 'XONSH_STORE_STDIN': VarDocs( 'Whether or not to store the stdin that is supplied to the ' '``!()`` and ``![]`` operators.'), 'XONSH_STORE_STDOUT': VarDocs( 'Whether or not to store the ``stdout`` and ``stderr`` streams in the ' 'history files.'), 'XONSH_TRACEBACK_LOGFILE': VarDocs( 'Specifies a file to store the traceback log to, regardless of whether ' '``XONSH_SHOW_TRACEBACK`` has been set. Its value must be a writable file ' 'or None / the empty string if traceback logging is not desired. ' 'Logging to a file is not enabled by default.'), 'XONSH_DATETIME_FORMAT': VarDocs( 'The format that is used for ``datetime.strptime()`` in various places' 'i.e the history timestamp option'), } # # actual environment # class Env(cabc.MutableMapping): """A xonsh environment, whose variables have limited typing (unlike BASH). Most variables are, by default, strings (like BASH). However, the following rules also apply based on variable-name: * PATH: any variable whose name ends in PATH is a list of strings. * XONSH_HISTORY_SIZE: this variable is an (int | float, str) tuple. * LC_* (locale categories): locale category names get/set the Python locale via locale.getlocale() and locale.setlocale() functions. An Env instance may be converted to an untyped version suitable for use in a subprocess. """ _arg_regex = None def __init__(self, *args, **kwargs): """If no initial environment is given, os_environ is used.""" self._d = {} # sentinel value for non existing envvars self._no_value = object() self._orig_env = None self._ensurers = {k: Ensurer(*v) for k, v in DEFAULT_ENSURERS.items()} self._defaults = DEFAULT_VALUES self._docs = DEFAULT_DOCS if len(args) == 0 and len(kwargs) == 0: args = (os_environ,) for key, val in dict(*args, **kwargs).items(): self[key] = val if ON_WINDOWS: path_key = next((k for k in self._d if k.upper() == 'PATH'), None) if path_key: self['PATH'] = self._d.pop(path_key) if 'PATH' not in self._d: # this is here so the PATH is accessible to subprocs and so that # it can be modified in-place in the xonshrc file self._d['PATH'] = list(PATH_DEFAULT) self._detyped = None @property def arg_regex(self): if self._arg_regex is None: self._arg_regex = re.compile(r'ARG(\d+)') return self._arg_regex @staticmethod def detypeable(val): return not (callable(val) or isinstance(val, cabc.MutableMapping)) def detype(self): if self._detyped is not None: return self._detyped ctx = {} for key, val in self._d.items(): if not self.detypeable(val): continue if not isinstance(key, str): key = str(key) ensurer = self.get_ensurer(key) val = ensurer.detype(val) ctx[key] = val self._detyped = ctx return ctx def replace_env(self): """Replaces the contents of os_environ with a detyped version of the xonsh environment. """ if self._orig_env is None: self._orig_env = dict(os_environ) os_environ.clear() os_environ.update(self.detype()) def undo_replace_env(self): """Replaces the contents of os_environ with a detyped version of the xonsh environment. """ if self._orig_env is not None: os_environ.clear() os_environ.update(self._orig_env) self._orig_env = None def get_ensurer(self, key, default=Ensurer(always_true, None, ensure_string)): """Gets an ensurer for the given key.""" if key in self._ensurers: return self._ensurers[key] for k, ensurer in self._ensurers.items(): if isinstance(k, str): continue if k.match(key) is not None: break else: ensurer = default self._ensurers[key] = ensurer return ensurer def get_docs(self, key, default=VarDocs('')): """Gets the documentation for the environment variable.""" vd = self._docs.get(key, None) if vd is None: return default if vd.default is DefaultNotGiven: dval = pprint.pformat(self._defaults.get(key, '')) vd = vd._replace(default=dval) self._docs[key] = vd return vd def help(self, key): """Get information about a specific environment variable.""" vardocs = self.get_docs(key) width = min(79, os.get_terminal_size()[0]) docstr = '\n'.join(textwrap.wrap(vardocs.docstr, width=width)) template = HELP_TEMPLATE.format(envvar=key, docstr=docstr, default=vardocs.default, configurable=vardocs.configurable) print_color(template) def is_manually_set(self, varname): """ Checks if an environment variable has been manually set. """ return varname in self._d @contextlib.contextmanager def swap(self, other=None, **kwargs): """Provides a context manager for temporarily swapping out certain environment variables with other values. On exit from the context manager, the original values are restored. """ old = {} # single positional argument should be a dict-like object if other is not None: for k, v in other.items(): old[k] = self.get(k, NotImplemented) self[k] = v # kwargs could also have been sent in for k, v in kwargs.items(): old[k] = self.get(k, NotImplemented) self[k] = v yield self # restore the values for k, v in old.items(): if v is NotImplemented: del self[k] else: self[k] = v # # Mutable mapping interface # def __getitem__(self, key): # remove this block on next release if key == 'FORMATTER_DICT': print('PendingDeprecationWarning: FORMATTER_DICT is an alias of ' 'PROMPT_FIELDS and will be removed in the next release', file=sys.stderr) return self['PROMPT_FIELDS'] if key is Ellipsis: return self m = self.arg_regex.match(key) if (m is not None) and (key not in self._d) and ('ARGS' in self._d): args = self._d['ARGS'] ix = int(m.group(1)) if ix >= len(args): e = "Not enough arguments given to access ARG{0}." raise KeyError(e.format(ix)) val = self._d['ARGS'][ix] elif key in self._d: val = self._d[key] elif key in self._defaults: val = self._defaults[key] if is_callable_default(val): val = val(self) else: e = "Unknown environment variable: ${}" raise KeyError(e.format(key)) if isinstance(val, (cabc.MutableSet, cabc.MutableSequence, cabc.MutableMapping)): self._detyped = None return val def __setitem__(self, key, val): ensurer = self.get_ensurer(key) if not ensurer.validate(val): val = ensurer.convert(val) # existing envvars can have any value including None old_value = self._d[key] if key in self._d else self._no_value self._d[key] = val if self.detypeable(val): self._detyped = None if self.get('UPDATE_OS_ENVIRON'): if self._orig_env is None: self.replace_env() else: os_environ[key] = ensurer.detype(val) if old_value is self._no_value: events.on_envvar_new.fire(name=key, value=val) elif old_value != val: events.on_envvar_change.fire(name=key, oldvalue=old_value, newvalue=val) def __delitem__(self, key): val = self._d.pop(key) if self.detypeable(val): self._detyped = None if self.get('UPDATE_OS_ENVIRON') and key in os_environ: del os_environ[key] def get(self, key, default=None): """The environment will look up default values from its own defaults if a default is not given here. """ try: return self[key] except KeyError: return default def __iter__(self): yield from (set(self._d) | set(self._defaults)) def __contains__(self, item): return item in self._d or item in self._defaults def __len__(self): return len(self._d) def __str__(self): return str(self._d) def __repr__(self): return '{0}.{1}(...)'.format(self.__class__.__module__, self.__class__.__name__, self._d) def _repr_pretty_(self, p, cycle): name = '{0}.{1}'.format(self.__class__.__module__, self.__class__.__name__) with p.group(0, name + '(', ')'): if cycle: p.text('...') elif len(self): p.break_() p.pretty(dict(self)) def _yield_executables(directory, name): if ON_WINDOWS: base_name, ext = os.path.splitext(name.lower()) for fname in executables_in(directory): fbase, fext = os.path.splitext(fname.lower()) if base_name == fbase and (len(ext) == 0 or ext == fext): yield os.path.join(directory, fname) else: for x in executables_in(directory): if x == name: yield os.path.join(directory, name) return def locate_binary(name): """Locates an executable on the file system.""" return builtins.__xonsh_commands_cache__.locate_binary(name) BASE_ENV = LazyObject(lambda: { 'BASH_COMPLETIONS': list(DEFAULT_VALUES['BASH_COMPLETIONS']), 'PROMPT_FIELDS': dict(DEFAULT_VALUES['PROMPT_FIELDS']), 'XONSH_VERSION': XONSH_VERSION, }, globals(), 'BASE_ENV') def load_static_config(ctx, config=None): """Loads a static configuration file from a given context, rather than the current environment. Optionally may pass in configuration file name. """ env = {} env['XDG_CONFIG_HOME'] = ctx.get('XDG_CONFIG_HOME', DEFAULT_VALUES['XDG_CONFIG_HOME']) env['XONSH_CONFIG_DIR'] = ctx['XONSH_CONFIG_DIR'] if 'XONSH_CONFIG_DIR' in ctx \ else xonsh_config_dir(env) if config is not None: env['XONSHCONFIG'] = ctx['XONSHCONFIG'] = config elif 'XONSHCONFIG' in ctx: config = env['XONSHCONFIG'] = ctx['XONSHCONFIG'] else: # don't set in ctx in order to maintain default config = env['XONSHCONFIG'] = xonshconfig(env) if os.path.isfile(config): # Note that an Env instance at __xonsh_env__ has not been started yet, # per se, so we have to use os_environ encoding = os_environ.get('XONSH_ENCODING', DEFAULT_VALUES.get('XONSH_ENCODING', 'utf8')) errors = os_environ.get('XONSH_ENCODING_ERRORS', DEFAULT_VALUES.get('XONSH_ENCODING_ERRORS', 'surrogateescape')) with open(config, 'r', encoding=encoding, errors=errors) as f: try: conf = json.load(f) assert isinstance(conf, cabc.Mapping) ctx['LOADED_CONFIG'] = True except Exception as e: conf = {} ctx['LOADED_CONFIG'] = False print_exception() # JSONDecodeError was added in Python v3.5 jerr = json.JSONDecodeError \ if hasattr(json, 'JSONDecodeError') else ValueError if isinstance(e, jerr): msg = 'Xonsh config file is not valid JSON.' else: msg = 'Could not load xonsh config.' print(msg, file=sys.stderr) else: conf = {} ctx['LOADED_CONFIG'] = False builtins.__xonsh_config__ = conf return conf def xonshrc_context(rcfiles=None, execer=None, ctx=None, env=None, login=True): """Attempts to read in all xonshrc files and return the context.""" loaded = env['LOADED_RC_FILES'] = [] ctx = {} if ctx is None else ctx if rcfiles is None: return env env['XONSHRC'] = tuple(rcfiles) for rcfile in rcfiles: if not os.path.isfile(rcfile): loaded.append(False) continue _, ext = os.path.splitext(rcfile) if ext == '.json': status = static_config_run_control(rcfile, ctx, env, execer=execer, login=login) else: status = xonsh_script_run_control(rcfile, ctx, env, execer=execer, login=login) loaded.append(status) return ctx def windows_foreign_env_fixes(ctx): """Environment fixes for Windows. Operates in-place.""" # remove these bash variables which only cause problems. for ev in ['HOME', 'OLDPWD']: if ev in ctx: del ctx[ev] # Override path-related bash variables; on Windows bash uses # /c/Windows/System32 syntax instead of C:\\Windows\\System32 # which messes up these environment variables for xonsh. for ev in ['PATH', 'TEMP', 'TMP']: if ev in os_environ: ctx[ev] = os_environ[ev] elif ev in ctx: del ctx[ev] ctx['PWD'] = _get_cwd() or '' def foreign_env_fixes(ctx): """Environment fixes for all operating systems""" if 'PROMPT' in ctx: del ctx['PROMPT'] def static_config_run_control(filename, ctx, env, execer=None, login=True): """Loads a static config file and applies it as a run control.""" if not login: return conf = load_static_config(env, config=filename) # load foreign shells foreign_env = load_foreign_envs(shells=conf.get('foreign_shells', ()), issue_warning=False) if ON_WINDOWS: windows_foreign_env_fixes(foreign_env) foreign_env_fixes(foreign_env) env.update(foreign_env) aliases = builtins.aliases foreign_aliases = load_foreign_aliases(config=filename, issue_warning=True) for k, v in foreign_aliases.items(): if k in aliases: msg = ('Skipping application of {0!r} alias from foreign shell ' '(loaded from {1!r}) since it shares a name with an ' 'existing xonsh alias.') print(msg.format(k, filename)) else: aliases[k] = v # load xontribs names = conf.get('xontribs', ()) for name in names: update_context(name, ctx=ctx) if getattr(update_context, 'bad_imports', None): prompt_xontrib_install(update_context.bad_imports) del update_context.bad_imports # Do static config environment last, to allow user to override any of # our environment choices env.update(conf.get('env', ())) return True def xonsh_script_run_control(filename, ctx, env, execer=None, login=True): """Loads a xonsh file and applies it as a run control.""" if execer is None: return False updates = {'__file__': filename, '__name__': os.path.abspath(filename)} try: with swap_values(ctx, updates): run_script_with_cache(filename, execer, ctx) loaded = True except SyntaxError as err: msg = 'syntax error in xonsh run control file {0!r}: {1!s}' print_exception(msg.format(filename, err)) loaded = False except Exception as err: msg = 'error running xonsh run control file {0!r}: {1!s}' print_exception(msg.format(filename, err)) loaded = False return loaded def default_env(env=None): """Constructs a default xonsh environment.""" # in order of increasing precedence ctx = dict(BASE_ENV) ctx.update(os_environ) ctx['PWD'] = _get_cwd() or '' # other shells' PROMPT definitions generally don't work in XONSH: try: del ctx['PROMPT'] except KeyError: pass # finalize env if env is not None: ctx.update(env) return ctx xonsh-0.6.0/xonsh/events.py000066400000000000000000000213331320541242300156670ustar00rootroot00000000000000""" Events for xonsh. In all likelihood, you want builtins.events The best way to "declare" an event is something like:: events.doc('on_spam', "Comes with eggs") """ import abc import builtins import collections.abc import inspect from xonsh.tools import print_exception def has_kwargs(func): return any(p.kind == p.VAR_KEYWORD for p in inspect.signature(func).parameters.values()) def debug_level(): if hasattr(builtins, '__xonsh_env__'): return builtins.__xonsh_env__.get('XONSH_DEBUG') # FIXME: Under py.test, return 1(?) else: return 0 # Optimize for speed, not guaranteed correctness class AbstractEvent(collections.abc.MutableSet, abc.ABC): """ A given event that handlers can register against. Acts as a ``MutableSet`` for registered handlers. Note that ordering is never guaranteed. """ @property def species(self): """ The species (basically, class) of the event """ return type(self).__bases__[0] # events.on_chdir -> -> def __call__(self, handler): """ Registers a handler. It's suggested to use this as a decorator. A decorator method is added to the handler, validator(). If a validator function is added, it can filter if the handler will be considered. The validator takes the same arguments as the handler. If it returns False, the handler will not called or considered, as if it was not registered at all. Parameters ---------- handler : callable The handler to register Returns ------- rtn : callable The handler """ # Using Python's "private" munging to minimize hypothetical collisions handler.__validator = None if debug_level(): if not has_kwargs(handler): raise ValueError("Event handlers need a **kwargs for future proofing") self.add(handler) def validator(vfunc): """ Adds a validator function to a handler to limit when it is considered. """ if debug_level(): if not has_kwargs(handler): raise ValueError("Event validators need a **kwargs for future proofing") handler.__validator = vfunc handler.validator = validator return handler def _filterhandlers(self, handlers, **kwargs): """ Helper method for implementing classes. Generates the handlers that pass validation. """ for handler in handlers: if handler.__validator is not None and not handler.__validator(**kwargs): continue yield handler @abc.abstractmethod def fire(self, **kwargs): """ Fires an event, calling registered handlers with the given arguments. Parameters ---------- **kwargs : Keyword arguments to pass to each handler """ class Event(AbstractEvent): """ An event species for notify and scatter-gather events. """ # Wish I could just pull from set... def __init__(self): self._handlers = set() def __len__(self): return len(self._handlers) def __contains__(self, item): return item in self._handlers def __iter__(self): yield from self._handlers def add(self, item): """ Add an element to a set. This has no effect if the element is already present. """ self._handlers.add(item) def discard(self, item): """ Remove an element from a set if it is a member. If the element is not a member, do nothing. """ self._handlers.discard(item) def fire(self, **kwargs): """ Fires an event, calling registered handlers with the given arguments. A non-unique iterable of the results is returned. Each handler is called immediately. Exceptions are turned in to warnings. Parameters ---------- **kwargs : Keyword arguments to pass to each handler Returns ------- vals : iterable Return values of each handler. If multiple handlers return the same value, it will appear multiple times. """ vals = [] for handler in self._filterhandlers(self._handlers, **kwargs): try: rv = handler(**kwargs) except Exception: print_exception("Exception raised in event handler; ignored.") else: vals.append(rv) return vals class LoadEvent(AbstractEvent): """ An event species where each handler is called exactly once, shortly after either the event is fired or the handler is registered (whichever is later). Additional firings are ignored. Note: Does not support scatter/gather, due to never knowing when we have all the handlers. Note: Maintains a strong reference to pargs/kwargs in case of the addition of future handlers. Note: This is currently NOT thread safe. """ def __init__(self): self._fired = set() self._unfired = set() self._hasfired = False def __len__(self): return len(self._fired) + len(self._unfired) def __contains__(self, item): return item in self._fired or item in self._unfired def __iter__(self): yield from self._fired yield from self._unfired def add(self, item): """ Add an element to a set. This has no effect if the element is already present. """ if self._hasfired: self._call(item) self._fired.add(item) else: self._unfired.add(item) def discard(self, item): """ Remove an element from a set if it is a member. If the element is not a member, do nothing. """ self._fired.discard(item) self._unfired.discard(item) def _call(self, handler): try: handler(**self._kwargs) except Exception: print_exception("Exception raised in event handler; ignored.") def fire(self, **kwargs): if self._hasfired: return self._kwargs = kwargs while self._unfired: handler = self._unfired.pop() self._call(handler) self._hasfired = True return () # Entirely for API compatibility class EventManager: """ Container for all events in a system. Meant to be a singleton, but doesn't enforce that itself. Each event is just an attribute. They're created dynamically on first use. """ def doc(self, name, docstring): """ Applies a docstring to an event. Parameters ---------- name : str The name of the event, eg "on_precommand" docstring : str The docstring to apply to the event """ type(getattr(self, name)).__doc__ = docstring @staticmethod def _mkevent(name, species=Event, doc=None): # NOTE: Also used in `xonsh_events` test fixture # (A little bit of magic to enable docstrings to work right) return type(name, (species,), {'__doc__': doc, '__module__': 'xonsh.events', '__qualname__': 'events.'+name})() def transmogrify(self, name, species): """ Converts an event from one species to another, preserving handlers and docstring. Please note: Some species maintain specialized state. This is lost on transmogrification. Parameters ---------- name : str The name of the event, eg "on_precommand" species : subclass of AbstractEvent The type to turn the event in to. """ if isinstance(species, str): species = globals()[species] if not issubclass(species, AbstractEvent): raise ValueError("Invalid event class; must be a subclass of AbstractEvent") oldevent = getattr(self, name) newevent = self._mkevent(name, species, type(oldevent).__doc__) setattr(self, name, newevent) for handler in oldevent: newevent.add(handler) def __getattr__(self, name): """Get an event, if it doesn't already exist.""" if name.startswith('_'): raise AttributeError # This is only called if the attribute doesn't exist, so create the Event... e = self._mkevent(name) # ... and save it. setattr(self, name, e) # Now it exists, and we won't be called again. return e # Not lazy because: # 1. Initialization of EventManager can't be much cheaper # 2. It's expected to be used at load time, negating any benefits of using lazy object events = EventManager() xonsh-0.6.0/xonsh/execer.py000066400000000000000000000267551320541242300156530ustar00rootroot00000000000000# -*- coding: utf-8 -*- """Implements the xonsh executer.""" import sys import types import inspect import builtins import collections.abc as cabc from xonsh.ast import CtxAwareTransformer from xonsh.parser import Parser from xonsh.tools import (subproc_toks, find_next_break, get_logical_line, replace_logical_line, balanced_parens) from xonsh.built_ins import load_builtins, unload_builtins class Execer(object): """Executes xonsh code in a context.""" def __init__(self, filename='', debug_level=0, parser_args=None, unload=True, xonsh_ctx=None, scriptcache=True, cacheall=False): """Parameters ---------- filename : str, optional File we are to execute. debug_level : int, optional Debugging level to use in lexing and parsing. parser_args : dict, optional Arguments to pass down to the parser. unload : bool, optional Whether or not to unload xonsh builtins upon deletion. xonsh_ctx : dict or None, optional Xonsh xontext to load as builtins.__xonsh_ctx__ scriptcache : bool, optional Whether or not to use a precompiled bytecode cache when execing code, default: True. cacheall : bool, optional Whether or not to cache all xonsh code, and not just files. If this is set to true, it will cache command line input too, default: False. """ parser_args = parser_args or {} self.parser = Parser(**parser_args) self.filename = filename self.debug_level = debug_level self.unload = unload self.scriptcache = scriptcache self.cacheall = cacheall self.ctxtransformer = CtxAwareTransformer(self.parser) load_builtins(execer=self, ctx=xonsh_ctx) def __del__(self): if self.unload: unload_builtins() def parse(self, input, ctx, mode='exec', filename=None, transform=True): """Parses xonsh code in a context-aware fashion. For context-free parsing, please use the Parser class directly or pass in transform=False. """ if filename is None: filename = self.filename if not transform: return self.parser.parse(input, filename=filename, mode=mode, debug_level=(self.debug_level > 2)) # Parsing actually happens in a couple of phases. The first is a # shortcut for a context-free parser. Normally, all subprocess # lines should be wrapped in $(), to indicate that they are a # subproc. But that would be super annoying. Unfortunately, Python # mode - after indentation - is whitespace agnostic while, using # the Python token, subproc mode is whitespace aware. That is to say, # in Python mode "ls -l", "ls-l", and "ls - l" all parse to the # same AST because whitespace doesn't matter to the minus binary op. # However, these phases all have very different meaning in subproc # mode. The 'right' way to deal with this is to make the entire # grammar whitespace aware, and then ignore all of the whitespace # tokens for all of the Python rules. The lazy way implemented here # is to parse a line a second time with a $() wrapper if it fails # the first time. This is a context-free phase. tree, input = self._parse_ctx_free(input, mode=mode, filename=filename) if tree is None: return None # Now we need to perform context-aware AST transformation. This is # because the "ls -l" is valid Python. The only way that we know # it is not actually Python is by checking to see if the first token # (ls) is part of the execution context. If it isn't, then we will # assume that this line is supposed to be a subprocess line, assuming # it also is valid as a subprocess line. if ctx is None: ctx = set() elif isinstance(ctx, cabc.Mapping): ctx = set(ctx.keys()) tree = self.ctxtransformer.ctxvisit(tree, input, ctx, mode=mode, debug_level=self.debug_level) return tree def compile(self, input, mode='exec', glbs=None, locs=None, stacklevel=2, filename=None, transform=True): """Compiles xonsh code into a Python code object, which may then be execed or evaled. """ if filename is None: filename = self.filename if glbs is None or locs is None: frame = inspect.stack()[stacklevel][0] glbs = frame.f_globals if glbs is None else glbs locs = frame.f_locals if locs is None else locs ctx = set(dir(builtins)) | set(glbs.keys()) | set(locs.keys()) tree = self.parse(input, ctx, mode=mode, filename=filename, transform=transform) if tree is None: return None # handles comment only input code = compile(tree, filename, mode) return code def eval(self, input, glbs=None, locs=None, stacklevel=2, filename=None, transform=True): """Evaluates (and returns) xonsh code.""" if isinstance(input, types.CodeType): code = input else: if filename is None: filename = self.filename code = self.compile(input=input, glbs=glbs, locs=locs, mode='eval', stacklevel=stacklevel, filename=filename, transform=transform) if code is None: return None # handles comment only input return eval(code, glbs, locs) def exec(self, input, mode='exec', glbs=None, locs=None, stacklevel=2, filename=None, transform=True): """Execute xonsh code.""" if isinstance(input, types.CodeType): code = input else: if filename is None: filename = self.filename code = self.compile(input=input, glbs=glbs, locs=locs, mode=mode, stacklevel=stacklevel, filename=filename, transform=transform) if code is None: return None # handles comment only input return exec(code, glbs, locs) def _parse_ctx_free(self, input, mode='exec', filename=None): last_error_line = last_error_col = -1 parsed = False original_error = None greedy = False if filename is None: filename = self.filename while not parsed: try: tree = self.parser.parse(input, filename=filename, mode=mode, debug_level=(self.debug_level > 2)) parsed = True except IndentationError as e: if original_error is None: raise e else: raise original_error except SyntaxError as e: if original_error is None: original_error = e if (e.loc is None) or (last_error_line == e.loc.lineno and last_error_col in (e.loc.column + 1, e.loc.column)): raise original_error from None last_error_col = e.loc.column last_error_line = e.loc.lineno idx = last_error_line - 1 lines = input.splitlines() line, nlogical, idx = get_logical_line(lines, idx) if input.endswith('\n'): lines.append('') if len(line.strip()) == 0: # whitespace only lines are not valid syntax in Python's # interactive mode='single', who knew?! Just ignore them. # this might cause actual syntax errors to have bad line # numbers reported, but should only affect interactive mode del lines[idx] last_error_line = last_error_col = -1 input = '\n'.join(lines) continue if last_error_line > 1 and lines[idx-1].rstrip()[-1:] == ':': # catch non-indented blocks and raise error. prev_indent = len(lines[idx-1]) - len(lines[idx-1].lstrip()) curr_indent = len(lines[idx]) - len(lines[idx].lstrip()) if prev_indent == curr_indent: raise original_error lexer = self.parser.lexer maxcol = None if greedy else find_next_break(line, mincol=last_error_col, lexer=lexer) if not greedy and maxcol in (e.loc.column + 1, e.loc.column): # go greedy the first time if the syntax error was because # we hit an end token out of place. This usually indicates # a subshell or maybe a macro. if not balanced_parens(line, maxcol=maxcol): greedy = True maxcol = None sbpline = subproc_toks(line, returnline=True, greedy=greedy, maxcol=maxcol, lexer=lexer) if sbpline is None: # subprocess line had no valid tokens, if len(line.partition('#')[0].strip()) == 0: # likely because it only contained a comment. del lines[idx] last_error_line = last_error_col = -1 input = '\n'.join(lines) continue elif not greedy: greedy = True continue else: # or for some other syntax error raise original_error elif sbpline[last_error_col:].startswith('![![') or \ sbpline.lstrip().startswith('![!['): # if we have already wrapped this in subproc tokens # and it still doesn't work, adding more won't help # anything if not greedy: greedy = True continue else: raise original_error else: # print some debugging info if self.debug_level > 1: msg = ('{0}:{1}:{2}{3} - {4}\n' '{0}:{1}:{2}{3} + {5}') mstr = '' if maxcol is None else ':' + str(maxcol) msg = msg.format(self.filename, last_error_line, last_error_col, mstr, line, sbpline) print(msg, file=sys.stderr) # replace the line replace_logical_line(lines, sbpline, idx, nlogical) last_error_col += 3 input = '\n'.join(lines) return tree, input xonsh-0.6.0/xonsh/foreign_shells.py000066400000000000000000000514241320541242300173720ustar00rootroot00000000000000# -*- coding: utf-8 -*- """Tools to help interface with foreign shells, such as Bash.""" import os import re import json import shlex import sys import tempfile import builtins import subprocess import warnings import functools import collections.abc as cabc from xonsh.lazyasd import lazyobject from xonsh.tools import to_bool, ensure_string from xonsh.platform import ON_WINDOWS, ON_CYGWIN, os_environ COMMAND = """{seterrprevcmd} {prevcmd} echo __XONSH_ENV_BEG__ {envcmd} echo __XONSH_ENV_END__ echo __XONSH_ALIAS_BEG__ {aliascmd} echo __XONSH_ALIAS_END__ echo __XONSH_FUNCS_BEG__ {funcscmd} echo __XONSH_FUNCS_END__ {postcmd} {seterrpostcmd}""" DEFAULT_BASH_FUNCSCMD = r"""# get function names from declare declstr=$(declare -F) read -r -a decls <<< $declstr funcnames="" for((n=0;n<${#decls[@]};n++)); do if (( $(($n % 3 )) == 2 )); then # get every 3rd entry funcnames="$funcnames ${decls[$n]}" fi done # get functions locations: funcname lineno filename shopt -s extdebug namelocfilestr=$(declare -F $funcnames) shopt -u extdebug # print just names and files as JSON object read -r -a namelocfile <<< $namelocfilestr sep=" " namefile="{" while IFS='' read -r line || [[ -n "$line" ]]; do name=${line%%"$sep"*} locfile=${line#*"$sep"} loc=${locfile%%"$sep"*} file=${locfile#*"$sep"} namefile="${namefile}\"${name}\":\"${file//\\/\\\\}\"," done <<< "$namelocfilestr" if [[ "{" == "${namefile}" ]]; then namefile="${namefile}}" else namefile="${namefile%?}}" fi echo $namefile""" DEFAULT_ZSH_FUNCSCMD = """# get function names autoload -U is-at-least # We'll need to version check zsh namefile="{" for name in ${(ok)functions}; do # force zsh to load the func in order to get the filename, # but use +X so that it isn't executed. autoload +X $name || continue loc=$(whence -v $name) loc=${(z)loc} if is-at-least 5.2; then file=${loc[-1]} else file=${loc[7,-1]} fi namefile="${namefile}\\"${name}\\":\\"${(Q)file:A}\\"," done if [[ "{" == "${namefile}" ]]; then namefile="${namefile}}" else namefile="${namefile%?}}" fi echo ${namefile}""" # mapping of shell name aliases to keys in other lookup dictionaries. @lazyobject def CANON_SHELL_NAMES(): return { 'bash': 'bash', '/bin/bash': 'bash', 'zsh': 'zsh', '/bin/zsh': 'zsh', '/usr/bin/zsh': 'zsh', 'cmd': 'cmd', 'cmd.exe': 'cmd', } @lazyobject def DEFAULT_ENVCMDS(): return { 'bash': 'env', 'zsh': 'env', 'cmd': 'set', } @lazyobject def DEFAULT_ALIASCMDS(): return { 'bash': 'alias', 'zsh': 'alias -L', 'cmd': '', } @lazyobject def DEFAULT_FUNCSCMDS(): return { 'bash': DEFAULT_BASH_FUNCSCMD, 'zsh': DEFAULT_ZSH_FUNCSCMD, 'cmd': '', } @lazyobject def DEFAULT_SOURCERS(): return { 'bash': 'source', 'zsh': 'source', 'cmd': 'call', } @lazyobject def DEFAULT_TMPFILE_EXT(): return { 'bash': '.sh', 'zsh': '.zsh', 'cmd': '.bat', } @lazyobject def DEFAULT_RUNCMD(): return { 'bash': '-c', 'zsh': '-c', 'cmd': '/C', } @lazyobject def DEFAULT_SETERRPREVCMD(): return { 'bash': 'set -e', 'zsh': 'set -e', 'cmd': '@echo off', } @lazyobject def DEFAULT_SETERRPOSTCMD(): return { 'bash': '', 'zsh': '', 'cmd': 'if errorlevel 1 exit 1', } @functools.lru_cache() def foreign_shell_data(shell, interactive=True, login=False, envcmd=None, aliascmd=None, extra_args=(), currenv=None, safe=True, prevcmd='', postcmd='', funcscmd=None, sourcer=None, use_tmpfile=False, tmpfile_ext=None, runcmd=None, seterrprevcmd=None, seterrpostcmd=None, show=False, dryrun=False): """Extracts data from a foreign (non-xonsh) shells. Currently this gets the environment, aliases, and functions but may be extended in the future. Parameters ---------- shell : str The name of the shell, such as 'bash' or '/bin/sh'. interactive : bool, optional Whether the shell should be run in interactive mode. login : bool, optional Whether the shell should be a login shell. envcmd : str or None, optional The command to generate environment output with. aliascmd : str or None, optional The command to generate alias output with. extra_args : tuple of str, optional Additional command line options to pass into the shell. currenv : tuple of items or None, optional Manual override for the current environment. safe : bool, optional Flag for whether or not to safely handle exceptions and other errors. prevcmd : str, optional A command to run in the shell before anything else, useful for sourcing and other commands that may require environment recovery. postcmd : str, optional A command to run after everything else, useful for cleaning up any damage that the prevcmd may have caused. funcscmd : str or None, optional This is a command or script that can be used to determine the names and locations of any functions that are native to the foreign shell. This command should print *only* a JSON object that maps function names to the filenames where the functions are defined. If this is None, then a default script will attempted to be looked up based on the shell name. Callable wrappers for these functions will be returned in the aliases dictionary. sourcer : str or None, optional How to source a foreign shell file for purposes of calling functions in that shell. If this is None, a default value will attempt to be looked up based on the shell name. use_tmpfile : bool, optional This specifies if the commands are written to a tmp file or just parsed directly to the shell tmpfile_ext : str or None, optional If tmpfile is True this sets specifies the extension used. runcmd : str or None, optional Command line switches to use when running the script, such as -c for Bash and /C for cmd.exe. seterrprevcmd : str or None, optional Command that enables exit-on-error for the shell that is run at the start of the script. For example, this is "set -e" in Bash. To disable exit-on-error behavior, simply pass in an empty string. seterrpostcmd : str or None, optional Command that enables exit-on-error for the shell that is run at the end of the script. For example, this is "if errorlevel 1 exit 1" in cmd.exe. To disable exit-on-error behavior, simply pass in an empty string. show : bool, optional Whether or not to display the script that will be run. dryrun : bool, optional Whether or not to actually run and process the command. Returns ------- env : dict Dictionary of shell's environment. (None if the subproc command fails) aliases : dict Dictionary of shell's aliases, this includes foreign function wrappers.(None if the subproc command fails) """ cmd = [shell] cmd.extend(extra_args) # needs to come here for GNU long options if interactive: cmd.append('-i') if login: cmd.append('-l') shkey = CANON_SHELL_NAMES[shell] envcmd = DEFAULT_ENVCMDS.get(shkey, 'env') if envcmd is None else envcmd aliascmd = DEFAULT_ALIASCMDS.get(shkey, 'alias') if aliascmd is None else aliascmd funcscmd = DEFAULT_FUNCSCMDS.get(shkey, 'echo {}') if funcscmd is None else funcscmd tmpfile_ext = DEFAULT_TMPFILE_EXT.get(shkey, 'sh') if tmpfile_ext is None else tmpfile_ext runcmd = DEFAULT_RUNCMD.get(shkey, '-c') if runcmd is None else runcmd seterrprevcmd = DEFAULT_SETERRPREVCMD.get(shkey, '') \ if seterrprevcmd is None else seterrprevcmd seterrpostcmd = DEFAULT_SETERRPOSTCMD.get(shkey, '') \ if seterrpostcmd is None else seterrpostcmd command = COMMAND.format(envcmd=envcmd, aliascmd=aliascmd, prevcmd=prevcmd, postcmd=postcmd, funcscmd=funcscmd, seterrprevcmd=seterrprevcmd, seterrpostcmd=seterrpostcmd).strip() if show: print(command) if dryrun: return None, None cmd.append(runcmd) if not use_tmpfile: cmd.append(command) else: tmpfile = tempfile.NamedTemporaryFile(suffix=tmpfile_ext, delete=False) tmpfile.write(command.encode('utf8')) tmpfile.close() cmd.append(tmpfile.name) if currenv is None and hasattr(builtins, '__xonsh_env__'): currenv = builtins.__xonsh_env__.detype() elif currenv is not None: currenv = dict(currenv) try: s = subprocess.check_output(cmd, stderr=subprocess.PIPE, env=currenv, # start new session to avoid hangs # (doesn't work on Cygwin though) start_new_session=(not ON_CYGWIN), universal_newlines=True) except (subprocess.CalledProcessError, FileNotFoundError): if not safe: raise return None, None finally: if use_tmpfile: os.remove(tmpfile.name) env = parse_env(s) aliases = parse_aliases(s) funcs = parse_funcs(s, shell=shell, sourcer=sourcer, extra_args=extra_args) aliases.update(funcs) return env, aliases @lazyobject def ENV_RE(): return re.compile('__XONSH_ENV_BEG__\n(.*)' '__XONSH_ENV_END__', flags=re.DOTALL) @lazyobject def ENV_SPLIT_RE(): return re.compile('^([^=]+)=([^=]*|[^\n]*)$', flags=re.DOTALL | re.MULTILINE) def parse_env(s): """Parses the environment portion of string into a dict.""" m = ENV_RE.search(s) if m is None: return {} g1 = m.group(1) g1 = g1[:-1] if g1.endswith('\n') else g1 env = dict(ENV_SPLIT_RE.findall(g1)) return env @lazyobject def ALIAS_RE(): return re.compile('__XONSH_ALIAS_BEG__\n(.*)' '__XONSH_ALIAS_END__', flags=re.DOTALL) def parse_aliases(s): """Parses the aliases portion of string into a dict.""" m = ALIAS_RE.search(s) if m is None: return {} g1 = m.group(1) items = [line.split('=', 1) for line in g1.splitlines() if line.startswith('alias ') and '=' in line] aliases = {} for key, value in items: try: key = key[6:] # lstrip 'alias ' # undo bash's weird quoting of single quotes (sh_single_quote) value = value.replace('\'\\\'\'', '\'') # strip one single quote at the start and end of value if value[0] == '\'' and value[-1] == '\'': value = value[1:-1] value = shlex.split(value) except ValueError as exc: warnings.warn('could not parse alias "{0}": {1!r}'.format(key, exc), RuntimeWarning) continue aliases[key] = value return aliases @lazyobject def FUNCS_RE(): return re.compile('__XONSH_FUNCS_BEG__\n(.+)\n' '__XONSH_FUNCS_END__', flags=re.DOTALL) def parse_funcs(s, shell, sourcer=None, extra_args=()): """Parses the funcs portion of a string into a dict of callable foreign function wrappers. """ m = FUNCS_RE.search(s) if m is None: return {} g1 = m.group(1) if ON_WINDOWS: g1 = g1.replace(os.sep, os.altsep) try: namefiles = json.loads(g1.strip()) except json.decoder.JSONDecodeError as exc: msg = ('{0!r}\n\ncould not parse {1} functions:\n' ' s = {2!r}\n' ' g1 = {3!r}\n\n' 'Note: you may be seeing this error if you use zsh with ' 'prezto. Prezto overwrites GNU coreutils functions (like echo) ' 'with its own zsh functions. Please try disabling prezto.') warnings.warn(msg.format(exc, shell, s, g1), RuntimeWarning) return {} sourcer = DEFAULT_SOURCERS.get(shell, 'source') if sourcer is None \ else sourcer funcs = {} for funcname, filename in namefiles.items(): if funcname.startswith('_') or not filename: continue # skip private functions and invalid files if not os.path.isabs(filename): filename = os.path.abspath(filename) wrapper = ForeignShellFunctionAlias(name=funcname, shell=shell, sourcer=sourcer, filename=filename, extra_args=extra_args) funcs[funcname] = wrapper return funcs class ForeignShellFunctionAlias(object): """This class is responsible for calling foreign shell functions as if they were aliases. This does not currently support taking stdin. """ INPUT = ('{sourcer} "{filename}"\n' '{funcname} {args}\n') def __init__(self, name, shell, filename, sourcer=None, extra_args=()): """ Parameters ---------- name : str function name shell : str Name or path to shell filename : str Where the function is defined, path to source. sourcer : str or None, optional Command to source foreign files with. extra_args : tuple of str, optional Additional command line options to pass into the shell. """ sourcer = DEFAULT_SOURCERS.get(shell, 'source') if sourcer is None \ else sourcer self.name = name self.shell = shell self.filename = filename self.sourcer = sourcer self.extra_args = extra_args def __eq__(self, other): if not hasattr(other, 'name') or not hasattr(other, 'shell') or \ not hasattr(other, 'filename') or not hasattr(other, 'sourcer') \ or not hasattr(other, 'exta_args'): return NotImplemented return (self.name == other.name) and (self.shell == other.shell) and \ (self.filename == other.filename) and \ (self.sourcer == other.sourcer) and \ (self.extra_args == other.extra_args) def __call__(self, args, stdin=None): args, streaming = self._is_streaming(args) input = self.INPUT.format(sourcer=self.sourcer, filename=self.filename, funcname=self.name, args=' '.join(args)) cmd = [self.shell] + list(self.extra_args) + ['-c', input] env = builtins.__xonsh_env__ denv = env.detype() if streaming: subprocess.check_call(cmd, env=denv) out = None else: out = subprocess.check_output(cmd, env=denv, stderr=subprocess.STDOUT) out = out.decode(encoding=env.get('XONSH_ENCODING'), errors=env.get('XONSH_ENCODING_ERRORS')) out = out.replace('\r\n', '\n') return out def _is_streaming(self, args): """Test and modify args if --xonsh-stream is present.""" if '--xonsh-stream' not in args: return args, False args = list(args) args.remove('--xonsh-stream') return args, True @lazyobject def VALID_SHELL_PARAMS(): return frozenset([ 'shell', 'interactive', 'login', 'envcmd', 'aliascmd', 'extra_args', 'currenv', 'safe', 'prevcmd', 'postcmd', 'funcscmd', 'sourcer', ]) def ensure_shell(shell): """Ensures that a mapping follows the shell specification.""" if not isinstance(shell, cabc.MutableMapping): shell = dict(shell) shell_keys = set(shell.keys()) if not (shell_keys <= VALID_SHELL_PARAMS): msg = 'unknown shell keys: {0}' raise KeyError(msg.format(shell_keys - VALID_SHELL_PARAMS)) shell['shell'] = ensure_string(shell['shell']).lower() if 'interactive' in shell_keys: shell['interactive'] = to_bool(shell['interactive']) if 'login' in shell_keys: shell['login'] = to_bool(shell['login']) if 'envcmd' in shell_keys: shell['envcmd'] = None if shell['envcmd'] is None \ else ensure_string(shell['envcmd']) if 'aliascmd' in shell_keys: shell['aliascmd'] = None if shell['aliascmd'] is None \ else ensure_string(shell['aliascmd']) if 'extra_args' in shell_keys and not isinstance(shell['extra_args'], tuple): shell['extra_args'] = tuple(map(ensure_string, shell['extra_args'])) if 'currenv' in shell_keys and not isinstance(shell['currenv'], tuple): ce = shell['currenv'] if isinstance(ce, cabc.Mapping): ce = tuple([(ensure_string(k), v) for k, v in ce.items()]) elif isinstance(ce, cabc.Sequence): ce = tuple([(ensure_string(k), v) for k, v in ce]) else: raise RuntimeError('unrecognized type for currenv') shell['currenv'] = ce if 'safe' in shell_keys: shell['safe'] = to_bool(shell['safe']) if 'prevcmd' in shell_keys: shell['prevcmd'] = ensure_string(shell['prevcmd']) if 'postcmd' in shell_keys: shell['postcmd'] = ensure_string(shell['postcmd']) if 'funcscmd' in shell_keys: shell['funcscmd'] = None if shell['funcscmd'] is None \ else ensure_string(shell['funcscmd']) if 'sourcer' in shell_keys: shell['sourcer'] = None if shell['sourcer'] is None \ else ensure_string(shell['sourcer']) if 'seterrprevcmd' in shell_keys: shell['seterrprevcmd'] = None if shell['seterrprevcmd'] is None \ else ensure_string(shell['seterrprevcmd']) if 'seterrpostcmd' in shell_keys: shell['seterrpostcmd'] = None if shell['seterrpostcmd'] is None \ else ensure_string(shell['seterrpostcmd']) return shell def _get_shells(shells=None, config=None, issue_warning=True): if shells is not None and config is not None: raise RuntimeError('Only one of shells and config may be non-None.') elif shells is not None: pass else: env = getattr(builtins, '__xonsh_env__', os_environ) if env.get('LOADED_CONFIG', False): conf = builtins.__xonsh_config__ else: from xonsh.environ import load_static_config conf = load_static_config(env, config) shells = conf.get('foreign_shells', ()) return shells def load_foreign_envs(shells=None, config=None, issue_warning=True): """Loads environments from foreign shells. Parameters ---------- shells : sequence of dicts, optional An iterable of dicts that can be passed into foreign_shell_data() as keyword arguments. Not compatible with config not being None. config : str of None, optional Path to the static config file. Not compatible with shell not being None. If both shell and config is None, then it will be read from the $XONSHCONFIG environment variable. issue_warning : bool, optional Issues warnings if config file cannot be found. Returns ------- env : dict A dictionary of the merged environments. """ shells = _get_shells(shells=shells, config=config, issue_warning=issue_warning) env = {} for shell in shells: shell = ensure_shell(shell) shenv, _ = foreign_shell_data(**shell) if shenv: env.update(shenv) return env def load_foreign_aliases(shells=None, config=None, issue_warning=True): """Loads aliases from foreign shells. Parameters ---------- shells : sequence of dicts, optional An iterable of dicts that can be passed into foreign_shell_data() as keyword arguments. Not compatible with config not being None. config : str of None, optional Path to the static config file. Not compatible with shell not being None. If both shell and config is None, then it will be read from the $XONSHCONFIG environment variable. issue_warning : bool, optional Issues warnings if config file cannot be found. Returns ------- aliases : dict A dictionary of the merged aliases. """ shells = _get_shells(shells=shells, config=config, issue_warning=issue_warning) aliases = {} xonsh_aliases = builtins.aliases for shell in shells: shell = ensure_shell(shell) _, shaliases = foreign_shell_data(**shell) if not builtins.__xonsh_env__.get('FOREIGN_ALIASES_OVERRIDE'): shaliases = {} if shaliases is None else shaliases for alias in set(shaliases) & set(xonsh_aliases): del shaliases[alias] if builtins.__xonsh_env__.get('XONSH_DEBUG') > 1: print('aliases: ignoring alias {!r} of shell {!r} ' 'which tries to override xonsh alias.' ''.format(alias, shell['shell']), file=sys.stderr) aliases.update(shaliases) return aliases xonsh-0.6.0/xonsh/fs.py000066400000000000000000000063341320541242300147770ustar00rootroot00000000000000""" Backported functions to implement the PEP 519 (Adding a file system path protocol) API. """ import abc import sys import io import pathlib try: from os import PathLike, fspath, fsencode, fsdecode except ImportError: class PathLike(abc.ABC): """Abstract base class for implementing the file system path protocol.""" @abc.abstractmethod def __fspath__(self): """Return the file system path representation of the object.""" raise NotImplementedError PathLike.register(pathlib.Path) def fspath(path): """Return the string representation of the path. If str or bytes is passed in, it is returned unchanged. If __fspath__() returns something other than str or bytes then TypeError is raised. If this function is given something that is not str, bytes, or os.PathLike then TypeError is raised. """ if isinstance(path, (str, bytes)): return path if isinstance(path, pathlib.Path): return str(path) # Work from the object's type to match method resolution of other magic # methods. path_type = type(path) try: path = path_type.__fspath__(path) except AttributeError: if hasattr(path_type, '__fspath__'): raise else: if isinstance(path, (str, bytes)): return path else: raise TypeError("expected __fspath__() to return str or bytes, " "not " + type(path).__name__) raise TypeError("expected str, bytes or os.PathLike object, not " + path_type.__name__) def _fscodec(): encoding = sys.getfilesystemencoding() if encoding == 'mbcs': errors = 'strict' else: errors = 'surrogateescape' def fsencode(filename): """Encode filename (an os.PathLike, bytes, or str) to the filesystem encoding with 'surrogateescape' error handler, return bytes unchanged. On Windows, use 'strict' error handler if the file system encoding is 'mbcs' (which is the default encoding). """ filename = fspath(filename) # Does type-checking of `filename`. if isinstance(filename, str): return filename.encode(encoding, errors) else: return filename def fsdecode(filename): """Decode filename (an os.PathLike, bytes, or str) from the filesystem encoding with 'surrogateescape' error handler, return str unchanged. On Windows, use 'strict' error handler if the file system encoding is 'mbcs' (which is the default encoding). """ filename = fspath(filename) # Does type-checking of `filename`. if isinstance(filename, bytes): return filename.decode(encoding, errors) else: return filename return fsencode, fsdecode fsencode, fsdecode = _fscodec() del _fscodec def open(file, *pargs, **kwargs): if isinstance(file, PathLike): file = fspath(file) return io.open(file, *pargs, **kwargs) xonsh-0.6.0/xonsh/history/000077500000000000000000000000001320541242300155105ustar00rootroot00000000000000xonsh-0.6.0/xonsh/history/__init__.py000066400000000000000000000012621320541242300176220ustar00rootroot00000000000000# amalgamate exclude import os as _os if _os.getenv('XONSH_DEBUG', ''): pass else: import sys as _sys try: from xonsh.history import __amalgam__ base = __amalgam__ _sys.modules['xonsh.history.base'] = __amalgam__ dummy = __amalgam__ _sys.modules['xonsh.history.dummy'] = __amalgam__ json = __amalgam__ _sys.modules['xonsh.history.json'] = __amalgam__ sqlite = __amalgam__ _sys.modules['xonsh.history.sqlite'] = __amalgam__ main = __amalgam__ _sys.modules['xonsh.history.main'] = __amalgam__ del __amalgam__ except ImportError: pass del _sys del _os # amalgamate end xonsh-0.6.0/xonsh/history/base.py000066400000000000000000000107641320541242300170040ustar00rootroot00000000000000# -*- coding: utf-8 -*- """Base class of Xonsh History backends.""" import types import uuid class HistoryEntry(types.SimpleNamespace): """Represent a command in history. Attributes ---------- cmd: str The command as typed by the user, including newlines out: str The output of the command, if xonsh is configured to save it rtn: int The return of the command (ie, 0 on success) ts: two-tuple of floats The timestamps of when the command started and finished, including fractions. """ class History: """Xonsh history backend base class. History objects should be created via a subclass of History. Indexing -------- History acts like a sequence that can be indexed to return ``HistoryEntry`` objects. Note that the most recent command is the last item in history. Attributes ---------- rtns : sequence of ints The return of the command (ie, 0 on success) inps : sequence of strings The command as typed by the user, including newlines tss : sequence of two-tuples of floats The timestamps of when the command started and finished, including fractions outs : sequence of strings The output of the command, if xonsh is configured to save it gc : A garbage collector or None The garbage collector In all of these sequences, index 0 is the oldest and -1 (the last item) is the newest. """ def __init__(self, sessionid=None, **kwargs): """Represents a xonsh session's history. Parameters ---------- sessionid : int, uuid, str, optional Current session identifier, will generate a new sessionid if not set. """ self.sessionid = uuid.uuid4() if sessionid is None else sessionid self.gc = None self.buffer = None self.filename = None self.inps = None self.rtns = None self.tss = None self.outs = None self.last_cmd_rtn = None self.last_cmd_out = None def __len__(self): """Return the number of items in current session.""" return len(list(self.items())) def __getitem__(self, item): """Retrieve history entries, see ``History`` docs for more info.""" if isinstance(item, int): if item >= len(self): raise IndexError('history index out of range') return HistoryEntry(cmd=self.inps[item], out=self.outs[item], rtn=self.rtns[item], ts=self.tss[item]) elif isinstance(item, slice): cmds = self.inps[item] outs = self.outs[item] rtns = self.rtns[item] tss = self.tss[item] return [HistoryEntry(cmd=c, out=o, rtn=r, ts=t) for c, o, r, t in zip(cmds, outs, rtns, tss)] else: raise TypeError('history indices must be integers ' 'or slices, not {}'.format(type(item))) def __setitem__(self, *args): raise PermissionError('You cannot change history! ' 'you can create new though.') def append(self, cmd): """Append a command item into history. Parameters ---------- cmd: dict This dict contains information about the command that is to be added to the history list. It should contain the keys ``inp``, ``rtn`` and ``ts``. These key names mirror the same names defined as instance variables in the ``HistoryEntry`` class. """ pass def flush(self, **kwargs): """Flush the history items to disk from a buffer.""" pass def items(self): """Get history items of current session.""" raise NotImplementedError def all_items(self): """Get all history items.""" raise NotImplementedError def info(self): """A collection of information about the shell history. Returns ------- dict or collections.OrderedDict Contains history information as str key pairs. """ raise NotImplementedError def run_gc(self, size=None, blocking=True): """Run the garbage collector. Parameters ---------- size: None or tuple of a int and a string Determines the size and units of what would be allowed to remain. blocking: bool If set blocking, then wait until gc action finished. """ pass xonsh-0.6.0/xonsh/history/dummy.py000066400000000000000000000010521320541242300172130ustar00rootroot00000000000000# -*- coding: utf-8 -*- """Implements the xonsh history backend.""" import collections from xonsh.history.base import History class DummyHistory(History): """A dummy implement of history backend.""" def append(self, cmd): pass def items(self): yield {'inp': 'dummy in action', 'ts': 1464652800, 'ind': 0} def all_items(self): return self.items() def info(self): data = collections.OrderedDict() data['backend'] = 'dummy' data['sessionid'] = str(self.sessionid) return data xonsh-0.6.0/xonsh/history/json.py000066400000000000000000000355001320541242300170360ustar00rootroot00000000000000# -*- coding: utf-8 -*- """Implements JSON version of xonsh history backend.""" import os import sys import time import json import builtins import collections import threading import collections.abc as cabc from xonsh.history.base import History import xonsh.tools as xt import xonsh.lazyjson as xlj import xonsh.xoreutils.uptime as uptime def _xhj_gc_commands_to_rmfiles(hsize, files): """Return the history files to remove to get under the command limit.""" rmfiles = [] n = 0 ncmds = 0 for ts, fcmds, f in files[::-1]: if fcmds == 0: # we need to make sure that 'empty' history files don't hang around rmfiles.append((ts, fcmds, f)) if ncmds + fcmds > hsize: break ncmds += fcmds n += 1 rmfiles += files[:-n] return rmfiles def _xhj_gc_files_to_rmfiles(hsize, files): """Return the history files to remove to get under the file limit.""" rmfiles = files[:-hsize] if len(files) > hsize else [] return rmfiles def _xhj_gc_seconds_to_rmfiles(hsize, files): """Return the history files to remove to get under the age limit.""" rmfiles = [] now = time.time() for ts, _, f in files: if (now - ts) < hsize: break rmfiles.append((None, None, f)) return rmfiles def _xhj_gc_bytes_to_rmfiles(hsize, files): """Return the history files to remove to get under the byte limit.""" rmfiles = [] n = 0 nbytes = 0 for _, _, f in files[::-1]: fsize = os.stat(f).st_size if nbytes + fsize > hsize: break nbytes += fsize n += 1 rmfiles = files[:-n] return rmfiles def _xhj_get_history_files(sort=True, reverse=False): """Find and return the history files. Optionally sort files by modify time. """ data_dir = builtins.__xonsh_env__.get('XONSH_DATA_DIR') data_dir = xt.expanduser_abs_path(data_dir) try: files = [os.path.join(data_dir, f) for f in os.listdir(data_dir) if f.startswith('xonsh-') and f.endswith('.json')] except OSError: files = [] if builtins.__xonsh_env__.get('XONSH_DEBUG'): xt.print_exception("Could not collect xonsh history files.") if sort: files.sort(key=lambda x: os.path.getmtime(x), reverse=reverse) return files class JsonHistoryGC(threading.Thread): """Shell history garbage collection.""" def __init__(self, wait_for_shell=True, size=None, *args, **kwargs): """Thread responsible for garbage collecting old history. May wait for shell (and for xonshrc to have been loaded) to start work. """ super().__init__(*args, **kwargs) self.daemon = True self.size = size self.wait_for_shell = wait_for_shell self.start() self.gc_units_to_rmfiles = {'commands': _xhj_gc_commands_to_rmfiles, 'files': _xhj_gc_files_to_rmfiles, 's': _xhj_gc_seconds_to_rmfiles, 'b': _xhj_gc_bytes_to_rmfiles} def run(self): while self.wait_for_shell: time.sleep(0.01) env = builtins.__xonsh_env__ # pylint: disable=no-member if self.size is None: hsize, units = env.get('XONSH_HISTORY_SIZE') else: hsize, units = xt.to_history_tuple(self.size) files = self.files(only_unlocked=True) rmfiles_fn = self.gc_units_to_rmfiles.get(units) if rmfiles_fn is None: raise ValueError('Units type {0!r} not understood'.format(units)) for _, _, f in rmfiles_fn(hsize, files): try: os.remove(f) except OSError: pass def files(self, only_unlocked=False): """Find and return the history files. Optionally locked files may be excluded. This is sorted by the last closed time. Returns a list of (timestamp, number of cmds, file name) tuples. """ # pylint: disable=no-member env = getattr(builtins, '__xonsh_env__', None) if env is None: return [] boot = uptime.boottime() fs = _xhj_get_history_files(sort=False) files = [] for f in fs: try: if os.path.getsize(f) == 0: # collect empty files (for gc) files.append((time.time(), 0, f)) continue lj = xlj.LazyJSON(f, reopen=False) if lj['locked'] and lj['ts'][0] < boot: # computer was rebooted between when this history was created # and now and so this history should be unlocked. hist = lj.load() lj.close() hist['locked'] = False with open(f, 'w', newline='\n') as fp: xlj.ljdump(hist, fp, sort_keys=True) lj = xlj.LazyJSON(f, reopen=False) if only_unlocked and lj['locked']: continue # info: closing timestamp, number of commands, filename files.append((lj['ts'][1] or lj['ts'][0], len(lj.sizes['cmds']) - 1, f)) lj.close() except (IOError, OSError, ValueError): continue files.sort() return files class JsonHistoryFlusher(threading.Thread): """Flush shell history to disk periodically.""" def __init__(self, filename, buffer, queue, cond, at_exit=False, *args, **kwargs): """Thread for flushing history.""" super(JsonHistoryFlusher, self).__init__(*args, **kwargs) self.filename = filename self.buffer = buffer self.queue = queue queue.append(self) self.cond = cond self.at_exit = at_exit if at_exit: self.dump() queue.popleft() else: self.start() def run(self): with self.cond: self.cond.wait_for(self.i_am_at_the_front) self.dump() self.queue.popleft() def i_am_at_the_front(self): """Tests if the flusher is at the front of the queue.""" return self is self.queue[0] def dump(self): """Write the cached history to external storage.""" opts = builtins.__xonsh_env__.get('HISTCONTROL') last_inp = None cmds = [] for cmd in self.buffer: if 'ignoredups' in opts and cmd['inp'] == last_inp: # Skipping dup cmd continue if 'ignoreerr' in opts and cmd['rtn'] != 0: # Skipping failed cmd continue cmds.append(cmd) last_inp = cmd['inp'] with open(self.filename, 'r', newline='\n') as f: hist = xlj.LazyJSON(f).load() load_hist_len = len(hist['cmds']) hist['cmds'].extend(cmds) if self.at_exit: hist['ts'][1] = time.time() # apply end time hist['locked'] = False if not builtins.__xonsh_env__.get('XONSH_STORE_STDOUT', False): [cmd.pop('out') for cmd in hist['cmds'][load_hist_len:] if 'out' in cmd] with open(self.filename, 'w', newline='\n') as f: xlj.ljdump(hist, f, sort_keys=True) class JsonCommandField(cabc.Sequence): """A field in the 'cmds' portion of history.""" def __init__(self, field, hist, default=None): """Represents a field in the 'cmds' portion of history. Will query the buffer for the relevant data, if possible. Otherwise it will lazily acquire data from the file. Parameters ---------- field : str The name of the field to query. hist : History object The history object to query. default : optional The default value to return if key is not present. """ self.field = field self.hist = hist self.default = default def __len__(self): return len(self.hist) def __getitem__(self, key): size = len(self) if isinstance(key, slice): return [self[i] for i in range(*key.indices(size))] elif not isinstance(key, int): raise IndexError( 'JsonCommandField may only be indexed by int or slice.') elif size == 0: raise IndexError('JsonCommandField is empty.') # now we know we have an int key = size + key if key < 0 else key # ensure key is non-negative bufsize = len(self.hist.buffer) if size - bufsize <= key: # key is in buffer return self.hist.buffer[key + bufsize - size].get( self.field, self.default) # now we know we have to go into the file queue = self.hist._queue queue.append(self) with self.hist._cond: self.hist._cond.wait_for(self.i_am_at_the_front) with open(self.hist.filename, 'r', newline='\n') as f: lj = xlj.LazyJSON(f, reopen=False) rtn = lj['cmds'][key].get(self.field, self.default) if isinstance(rtn, xlj.LJNode): rtn = rtn.load() queue.popleft() return rtn def i_am_at_the_front(self): """Tests if the command field is at the front of the queue.""" return self is self.hist._queue[0] class JsonHistory(History): """Xonsh history backend implemented with JSON files. JsonHistory implements two extra actions: ``diff``, and ``replay``. """ def __init__(self, filename=None, sessionid=None, buffersize=100, gc=True, **meta): """Represents a xonsh session's history as an in-memory buffer that is periodically flushed to disk. Parameters ---------- filename : str, optional Location of history file, defaults to ``$XONSH_DATA_DIR/xonsh-{sessionid}.json``. sessionid : int, uuid, str, optional Current session identifier, will generate a new sessionid if not set. buffersize : int, optional Maximum buffersize in memory. meta : optional Top-level metadata to store along with the history. The kwargs 'cmds' and 'sessionid' are not allowed and will be overwritten. gc : bool, optional Run garbage collector flag. """ super().__init__(sessionid=sessionid, **meta) if filename is None: # pylint: disable=no-member data_dir = builtins.__xonsh_env__.get('XONSH_DATA_DIR') data_dir = os.path.expanduser(data_dir) self.filename = os.path.join( data_dir, 'xonsh-{0}.json'.format(self.sessionid)) else: self.filename = filename self.buffer = [] self.buffersize = buffersize self._queue = collections.deque() self._cond = threading.Condition() self._len = 0 self.last_cmd_out = None self.last_cmd_rtn = None meta['cmds'] = [] meta['sessionid'] = str(self.sessionid) with open(self.filename, 'w', newline='\n') as f: xlj.ljdump(meta, f, sort_keys=True) self.gc = JsonHistoryGC() if gc else None # command fields that are known self.tss = JsonCommandField('ts', self) self.inps = JsonCommandField('inp', self) self.outs = JsonCommandField('out', self) self.rtns = JsonCommandField('rtn', self) def __len__(self): return self._len def append(self, cmd): """Appends command to history. Will periodically flush the history to file. Parameters ---------- cmd : dict This dict contains information about the command that is to be added to the history list. It should contain the keys ``inp``, ``rtn`` and ``ts``. These key names mirror the same names defined as instance variables in the ``HistoryEntry`` class. Returns ------- hf : JsonHistoryFlusher or None The thread that was spawned to flush history """ self.buffer.append(cmd) self._len += 1 # must come before flushing if len(self.buffer) >= self.buffersize: hf = self.flush() else: hf = None return hf def flush(self, at_exit=False): """Flushes the current command buffer to disk. Parameters ---------- at_exit : bool, optional Whether the JsonHistoryFlusher should act as a thread in the background, or execute immediately and block. Returns ------- hf : JsonHistoryFlusher or None The thread that was spawned to flush history """ if len(self.buffer) == 0: return hf = JsonHistoryFlusher(self.filename, tuple(self.buffer), self._queue, self._cond, at_exit=at_exit) self.buffer.clear() return hf def items(self): """Display history items of current session.""" for item, tss in zip(self.inps, self.tss): yield {'inp': item.rstrip(), 'ts': tss[0]} def all_items(self, **kwargs): """ Returns all history as found in XONSH_DATA_DIR. yield format: {'inp': cmd, 'rtn': 0, ...} """ while self.gc and self.gc.is_alive(): time.sleep(0.011) # gc sleeps for 0.01 secs, sleep a beat longer for f in _xhj_get_history_files(): try: json_file = xlj.LazyJSON(f, reopen=False) except ValueError: # Invalid json file continue try: commands = json_file.load()['cmds'] except json.decoder.JSONDecodeError: # file is corrupted somehow if builtins.__xonsh_env__.get('XONSH_DEBUG') > 0: msg = 'xonsh history file {0!r} is not valid JSON' print(msg.format(f), file=sys.stderr) continue for c in commands: yield {'inp': c['inp'].rstrip(), 'ts': c['ts'][0]} # all items should also include session items yield from self.items() def info(self): data = collections.OrderedDict() data['backend'] = 'json' data['sessionid'] = str(self.sessionid) data['filename'] = self.filename data['length'] = len(self) data['buffersize'] = self.buffersize data['bufferlength'] = len(self.buffer) envs = builtins.__xonsh_env__ data['gc options'] = envs.get('XONSH_HISTORY_SIZE') return data def run_gc(self, size=None, blocking=True): self.gc = JsonHistoryGC(wait_for_shell=False, size=size) if blocking: while self.gc.is_alive(): continue xonsh-0.6.0/xonsh/history/main.py000066400000000000000000000307141320541242300170130ustar00rootroot00000000000000# -*- coding: utf-8 -*- """Main entry points of the xonsh history.""" import argparse import builtins import datetime import functools import json import os import sys from xonsh.history.base import History from xonsh.history.dummy import DummyHistory from xonsh.history.json import JsonHistory from xonsh.history.sqlite import SqliteHistory import xonsh.diff_history as xdh import xonsh.lazyasd as xla import xonsh.tools as xt HISTORY_BACKENDS = { 'dummy': DummyHistory, 'json': JsonHistory, 'sqlite': SqliteHistory, } def construct_history(**kwargs): """Construct the history backend object.""" env = builtins.__xonsh_env__ backend = env.get('XONSH_HISTORY_BACKEND') if isinstance(backend, str) and backend in HISTORY_BACKENDS: kls_history = HISTORY_BACKENDS[backend] elif xt.is_class(backend): kls_history = backend elif isinstance(backend, History): return backend else: print('Unknown history backend: {}. Using JSON version'.format( backend), file=sys.stderr) kls_history = JsonHistory return kls_history(**kwargs) def _xh_session_parser(hist=None, **kwargs): """Returns history items of current session.""" if hist is None: hist = builtins.__xonsh_history__ return hist.items() def _xh_all_parser(hist=None, **kwargs): """Returns all history items.""" if hist is None: hist = builtins.__xonsh_history__ return hist.all_items() def _xh_find_histfile_var(file_list, default=None): """Return the path of the history file from the value of the envvar HISTFILE. """ for f in file_list: f = xt.expanduser_abs_path(f) if not os.path.isfile(f): continue with open(f, 'r') as rc_file: for line in rc_file: if line.startswith('HISTFILE='): hist_file = line.split('=', 1)[1].strip('\'"\n') hist_file = xt.expanduser_abs_path(hist_file) if os.path.isfile(hist_file): return hist_file else: if default: default = xt.expanduser_abs_path(default) if os.path.isfile(default): return default def _xh_bash_hist_parser(location=None, **kwargs): """Yield commands from bash history file""" if location is None: location = _xh_find_histfile_var([os.path.join('~', '.bashrc'), os.path.join('~', '.bash_profile')], os.path.join('~', '.bash_history')) if location: with open(location, 'r', errors='backslashreplace') as bash_hist: for ind, line in enumerate(bash_hist): yield {'inp': line.rstrip(), 'ts': 0.0, 'ind': ind} else: print("No bash history file", file=sys.stderr) def _xh_zsh_hist_parser(location=None, **kwargs): """Yield commands from zsh history file""" if location is None: location = _xh_find_histfile_var([os.path.join('~', '.zshrc'), os.path.join('~', '.zprofile')], os.path.join('~', '.zsh_history')) if location: with open(location, 'r', errors='backslashreplace') as zsh_hist: for ind, line in enumerate(zsh_hist): if line.startswith(':'): try: start_time, command = line.split(';', 1) except ValueError: # Invalid history entry continue try: start_time = float(start_time.split(':')[1]) except ValueError: start_time = 0.0 yield {'inp': command.rstrip(), 'ts': start_time, 'ind': ind} else: yield {'inp': line.rstrip(), 'ts': 0.0, 'ind': ind} else: print("No zsh history file found", file=sys.stderr) def _xh_filter_ts(commands, start_time, end_time): """Yield only the commands between start and end time.""" for cmd in commands: if start_time <= cmd['ts'] < end_time: yield cmd def _xh_get_history(session='session', *, slices=None, datetime_format=None, start_time=None, end_time=None, location=None): """Get the requested portion of shell history. Parameters ---------- session: {'session', 'all', 'xonsh', 'bash', 'zsh'} The history session to get. slices : list of slice-like objects, optional Get only portions of history. start_time, end_time: float, optional Filter commands by timestamp. location: string, optional The history file location (bash or zsh) Returns ------- generator A filtered list of commands """ cmds = [] for i, item in enumerate(_XH_HISTORY_SESSIONS[session](location=location)): item['ind'] = i cmds.append(item) if slices: # transform/check all slices slices = [xt.ensure_slice(s) for s in slices] cmds = xt.get_portions(cmds, slices) if start_time or end_time: if start_time is None: start_time = 0.0 else: start_time = xt.ensure_timestamp(start_time, datetime_format) if end_time is None: end_time = float('inf') else: end_time = xt.ensure_timestamp(end_time, datetime_format) cmds = _xh_filter_ts(cmds, start_time, end_time) return cmds def _xh_show_history(hist, ns, stdout=None, stderr=None): """Show the requested portion of shell history. Accepts same parameters with `_xh_get_history`. """ try: commands = _xh_get_history(ns.session, slices=ns.slices, start_time=ns.start_time, end_time=ns.end_time, datetime_format=ns.datetime_format) except ValueError as err: print("history: error: {}".format(err), file=stderr) return if ns.reverse: commands = reversed(list(commands)) if ns.numerate and ns.timestamp: for c in commands: dt = datetime.datetime.fromtimestamp(c['ts']) print('{}:({}) {}'.format(c['ind'], xt.format_datetime(dt), c['inp']), file=stdout) elif ns.numerate: for c in commands: print('{}: {}'.format(c['ind'], c['inp']), file=stdout) elif ns.timestamp: for c in commands: dt = datetime.datetime.fromtimestamp(c['ts']) print('({}) {}'.format(xt.format_datetime(dt), c['inp']), file=stdout) else: for c in commands: print(c['inp'], file=stdout) @xla.lazyobject def _XH_HISTORY_SESSIONS(): return {'session': _xh_session_parser, 'xonsh': _xh_all_parser, 'all': _xh_all_parser, 'zsh': _xh_zsh_hist_parser, 'bash': _xh_bash_hist_parser} _XH_MAIN_ACTIONS = {'show', 'id', 'file', 'info', 'diff', 'gc'} @functools.lru_cache() def _xh_create_parser(): """Create a parser for the "history" command.""" p = argparse.ArgumentParser(prog='history', description="try 'history --help' " 'for more info') subp = p.add_subparsers(title='commands', dest='action') # session action show = subp.add_parser('show', prefix_chars='-+', help='display history of a session, default command') show.add_argument('-r', dest='reverse', default=False, action='store_true', help='reverses the direction') show.add_argument('-n', dest='numerate', default=False, action='store_true', help='numerate each command') show.add_argument('-t', dest='timestamp', default=False, action='store_true', help='show command timestamps') show.add_argument('-T', dest='end_time', default=None, help='show only commands before timestamp') show.add_argument('+T', dest='start_time', default=None, help='show only commands after timestamp') show.add_argument('-f', dest='datetime_format', default=None, help='the datetime format to be used for' 'filtering and printing') show.add_argument('session', nargs='?', choices=_XH_HISTORY_SESSIONS.keys(), default='session', metavar='session', help='{} (default: current session, all is an alias for xonsh)' ''.format(', '.join(map(repr, _XH_HISTORY_SESSIONS.keys())))) show.add_argument('slices', nargs='*', default=None, metavar='slice', help='integer or slice notation') # 'id' subcommand subp.add_parser('id', help='display the current session id') # 'file' subcommand subp.add_parser('file', help='display the current history filename') # 'info' subcommand info = subp.add_parser('info', help=('display information about the ' 'current history')) info.add_argument('--json', dest='json', default=False, action='store_true', help='print in JSON format') # gc gcp = subp.add_parser( 'gc', help='launches a new history garbage collector') gcp.add_argument('--size', nargs=2, dest='size', default=None, help=('next two arguments represent the history size and ' 'units; e.g. "--size 8128 commands"')) bgcp = gcp.add_mutually_exclusive_group() bgcp.add_argument('--blocking', dest='blocking', default=True, action='store_true', help=('ensures that the gc blocks the main thread, ' 'default True')) bgcp.add_argument('--non-blocking', dest='blocking', action='store_false', help='makes the gc non-blocking, and thus return sooner') hist = builtins.__xonsh_history__ if isinstance(hist, JsonHistory): # add actions belong only to JsonHistory diff = subp.add_parser('diff', help='diff two xonsh history files') xdh.dh_create_parser(p=diff) import xonsh.replay as xrp replay = subp.add_parser('replay', help='replay a xonsh history file') xrp.replay_create_parser(p=replay) _XH_MAIN_ACTIONS.add('replay') return p def _xh_parse_args(args): """Prepare and parse arguments for the history command. Add default action for ``history`` and default session for ``history show``. """ parser = _xh_create_parser() if not args: args = ['show', 'session'] elif args[0] not in _XH_MAIN_ACTIONS and args[0] not in ('-h', '--help'): args = ['show', 'session'] + args if args[0] == 'show': if not any(a in _XH_HISTORY_SESSIONS for a in args): args.insert(1, 'session') ns, slices = parser.parse_known_args(args) if slices: if not ns.slices: ns.slices = slices else: ns.slices.extend(slices) else: ns = parser.parse_args(args) return ns def history_main(args=None, stdin=None, stdout=None, stderr=None): """This is the history command entry point.""" hist = builtins.__xonsh_history__ ns = _xh_parse_args(args) if not ns or not ns.action: return if ns.action == 'show': _xh_show_history(hist, ns, stdout=stdout, stderr=stderr) elif ns.action == 'info': data = hist.info() if ns.json: s = json.dumps(data) print(s, file=stdout) else: lines = ['{0}: {1}'.format(k, v) for k, v in data.items()] print('\n'.join(lines), file=stdout) elif ns.action == 'id': if not hist.sessionid: return print(str(hist.sessionid), file=stdout) elif ns.action == 'file': if not hist.filename: return print(str(hist.filename), file=stdout) elif ns.action == 'gc': hist.run_gc(size=ns.size, blocking=ns.blocking) elif ns.action == 'diff': if isinstance(hist, JsonHistory): xdh.dh_main_action(ns) elif ns.action == 'replay': if isinstance(hist, JsonHistory): import xonsh.replay as xrp xrp.replay_main_action(hist, ns, stdout=stdout, stderr=stderr) else: print('Unknown history action {}'.format(ns.action), file=sys.stderr) xonsh-0.6.0/xonsh/history/sqlite.py000066400000000000000000000165041320541242300173710ustar00rootroot00000000000000# -*- coding: utf-8 -*- """Implements the xonsh history backend via sqlite3.""" import builtins import collections import json import os import sqlite3 import sys import threading import time from xonsh.history.base import History import xonsh.tools as xt def _xh_sqlite_get_file_name(): envs = builtins.__xonsh_env__ file_name = envs.get('XONSH_HISTORY_SQLITE_FILE') if not file_name: data_dir = envs.get('XONSH_DATA_DIR') file_name = os.path.join(data_dir, 'xonsh-history.sqlite') return xt.expanduser_abs_path(file_name) def _xh_sqlite_get_conn(filename=None): if filename is None: filename = _xh_sqlite_get_file_name() return sqlite3.connect(filename) def _xh_sqlite_create_history_table(cursor): """Create Table for history items. Columns: info - JSON formatted, reserved for future extension. """ cursor.execute(""" CREATE TABLE IF NOT EXISTS xonsh_history (inp TEXT, rtn INTEGER, tsb REAL, tse REAL, sessionid TEXT, out TEXT, info TEXT ) """) def _xh_sqlite_insert_command(cursor, cmd, sessionid, store_stdout): sql = 'INSERT INTO xonsh_history (inp, rtn, tsb, tse, sessionid' tss = cmd.get('ts', [None, None]) params = [ cmd['inp'].rstrip(), cmd['rtn'], tss[0], tss[1], sessionid, ] if store_stdout and 'out' in cmd: sql += ', out' params.append(cmd['out']) if 'info' in cmd: sql += ', info' info = json.dumps(cmd['info']) params.append(info) sql += ') VALUES (' + ('?, ' * len(params)).rstrip(', ') + ')' cursor.execute(sql, tuple(params)) def _xh_sqlite_get_count(cursor, sessionid=None): sql = 'SELECT count(*) FROM xonsh_history ' params = [] if sessionid is not None: sql += 'WHERE sessionid = ? ' params.append(str(sessionid)) cursor.execute(sql, tuple(params)) return cursor.fetchone()[0] def _xh_sqlite_get_records(cursor, sessionid=None, limit=None, reverse=False): sql = 'SELECT inp, tsb, rtn FROM xonsh_history ' params = [] if sessionid is not None: sql += 'WHERE sessionid = ? ' params.append(sessionid) sql += 'ORDER BY tsb ' if reverse: sql += 'DESC ' if limit is not None: sql += 'LIMIT %d ' % limit cursor.execute(sql, tuple(params)) return cursor.fetchall() def _xh_sqlite_delete_records(cursor, size_to_keep): sql = 'SELECT min(tsb) FROM (' sql += 'SELECT tsb FROM xonsh_history ORDER BY tsb DESC ' sql += 'LIMIT %d)' % size_to_keep cursor.execute(sql) result = cursor.fetchone() if not result: return max_tsb = result[0] sql = 'DELETE FROM xonsh_history WHERE tsb < ?' result = cursor.execute(sql, (max_tsb,)) return result.rowcount def xh_sqlite_append_history(cmd, sessionid, store_stdout, filename=None): with _xh_sqlite_get_conn(filename=filename) as conn: c = conn.cursor() _xh_sqlite_create_history_table(c) _xh_sqlite_insert_command(c, cmd, sessionid, store_stdout) conn.commit() def xh_sqlite_get_count(sessionid=None, filename=None): with _xh_sqlite_get_conn(filename=filename) as conn: c = conn.cursor() return _xh_sqlite_get_count(c, sessionid=sessionid) def xh_sqlite_items(sessionid=None, filename=None): with _xh_sqlite_get_conn(filename=filename) as conn: c = conn.cursor() _xh_sqlite_create_history_table(c) return _xh_sqlite_get_records(c, sessionid=sessionid) def xh_sqlite_delete_items(size_to_keep, filename=None): with _xh_sqlite_get_conn(filename=filename) as conn: c = conn.cursor() _xh_sqlite_create_history_table(c) return _xh_sqlite_delete_records(c, size_to_keep) class SqliteHistoryGC(threading.Thread): """Shell history garbage collection.""" def __init__(self, wait_for_shell=True, size=None, filename=None, *args, **kwargs): """Thread responsible for garbage collecting old history. May wait for shell (and for xonshrc to have been loaded) to start work. """ super().__init__(*args, **kwargs) self.daemon = True self.filename = filename self.size = size self.wait_for_shell = wait_for_shell self.start() def run(self): while self.wait_for_shell: time.sleep(0.01) if self.size is not None: hsize, units = xt.to_history_tuple(self.size) else: envs = builtins.__xonsh_env__ hsize, units = envs.get('XONSH_HISTORY_SIZE') if units != 'commands': print('sqlite backed history gc currently only supports ' '"commands" as units', file=sys.stderr) return if hsize < 0: return xh_sqlite_delete_items(hsize, filename=self.filename) class SqliteHistory(History): """Xonsh history backend implemented with sqlite3.""" def __init__(self, gc=True, filename=None, **kwargs): super().__init__(**kwargs) if filename is None: filename = _xh_sqlite_get_file_name() self.filename = filename self.gc = SqliteHistoryGC() if gc else None self._last_hist_inp = None self.inps = [] self.rtns = [] self.outs = [] self.tss = [] def append(self, cmd): envs = builtins.__xonsh_env__ opts = envs.get('HISTCONTROL') inp = cmd['inp'].rstrip() self.inps.append(inp) store_stdout = envs.get('XONSH_STORE_STDOUT', False) if store_stdout: self.outs.append(cmd.get('out')) else: self.outs.append(None) self.rtns.append(cmd['rtn']) self.tss.append(cmd.get('ts', (None, None))) opts = envs.get('HISTCONTROL') if 'ignoredups' in opts and inp == self._last_hist_inp: # Skipping dup cmd return if 'ignoreerr' in opts and cmd['rtn'] != 0: # Skipping failed cmd return self._last_hist_inp = inp xh_sqlite_append_history( cmd, str(self.sessionid), store_stdout, filename=self.filename) def all_items(self): """Display all history items.""" for item in xh_sqlite_items(filename=self.filename): yield {'inp': item[0], 'ts': item[1], 'rtn': item[2]} def items(self): """Display history items of current session.""" for item in xh_sqlite_items( sessionid=str(self.sessionid), filename=self.filename): yield {'inp': item[0], 'ts': item[1], 'rtn': item[2]} def info(self): data = collections.OrderedDict() data['backend'] = 'sqlite' data['sessionid'] = str(self.sessionid) data['filename'] = self.filename data['session items'] = xh_sqlite_get_count( sessionid=self.sessionid, filename=self.filename) data['all items'] = xh_sqlite_get_count(filename=self.filename) envs = builtins.__xonsh_env__ data['gc options'] = envs.get('XONSH_HISTORY_SIZE') return data def run_gc(self, size=None, blocking=True): self.gc = SqliteHistoryGC(wait_for_shell=False, size=size) if blocking: while self.gc.is_alive(): continue xonsh-0.6.0/xonsh/imphooks.py000066400000000000000000000235271320541242300162230ustar00rootroot00000000000000# -*- coding: utf-8 -*- """Import hooks for importing xonsh source files. This module registers the hooks it defines when it is imported. """ import os import re import sys import types import builtins import contextlib import importlib from importlib.machinery import ModuleSpec from importlib.abc import MetaPathFinder, SourceLoader, Loader from xonsh.events import events from xonsh.execer import Execer from xonsh.platform import scandir from xonsh.lazyasd import lazyobject @lazyobject def ENCODING_LINE(): # this regex comes from PEP 263 # https://www.python.org/dev/peps/pep-0263/#defining-the-encoding return re.compile(b"^[ tv]*#.*?coding[:=][ t]*([-_.a-zA-Z0-9]+)") def find_source_encoding(src): """Finds the source encoding given bytes representing a file. If no encoding is found, UTF-8 will be returned as per the docs https://docs.python.org/3/howto/unicode.html#unicode-literals-in-python-source-code """ utf8 = 'UTF-8' first, _, rest = src.partition(b'\n') m = ENCODING_LINE.match(first) if m is not None: return m.group(1).decode(utf8) second, _, _ = rest.partition(b'\n') m = ENCODING_LINE.match(second) if m is not None: return m.group(1).decode(utf8) return utf8 class XonshImportHook(MetaPathFinder, SourceLoader): """Implements the import hook for xonsh source files.""" def __init__(self, *args, **kwargs): super(XonshImportHook, self).__init__(*args, **kwargs) self._filenames = {} self._execer = None @property def execer(self): if hasattr(builtins, '__xonsh_execer__'): execer = builtins.__xonsh_execer__ if self._execer is not None: self._execer = None elif self._execer is None: self._execer = execer = Execer(unload=False) else: execer = self._execer return execer # # MetaPathFinder methods # def find_spec(self, fullname, path, target=None): """Finds the spec for a xonsh module if it exists.""" dot = '.' spec = None path = sys.path if path is None else path if dot not in fullname and dot not in path: path = [dot] + path name = fullname.rsplit(dot, 1)[-1] fname = name + '.xsh' for p in path: if not isinstance(p, str): continue if not os.path.isdir(p) or not os.access(p, os.R_OK): continue if fname not in (x.name for x in scandir(p)): continue spec = ModuleSpec(fullname, self) self._filenames[fullname] = os.path.join(p, fname) break return spec # # SourceLoader methods # def create_module(self, spec): """Create a xonsh module with the appropriate attributes.""" mod = types.ModuleType(spec.name) mod.__file__ = self.get_filename(spec.name) mod.__loader__ = self mod.__package__ = spec.parent or '' return mod def get_filename(self, fullname): """Returns the filename for a module's fullname.""" return self._filenames[fullname] def get_data(self, path): """Gets the bytes for a path.""" raise NotImplementedError def get_code(self, fullname): """Gets the code object for a xonsh file.""" filename = self.get_filename(fullname) if filename is None: msg = "xonsh file {0!r} could not be found".format(fullname) raise ImportError(msg) with open(filename, 'rb') as f: src = f.read() enc = find_source_encoding(src) src = src.decode(encoding=enc) src = src if src.endswith('\n') else src + '\n' execer = self.execer execer.filename = filename ctx = {} # dummy for modules code = execer.compile(src, glbs=ctx, locs=ctx) return code # # Import events # events.doc('on_import_pre_find_spec', """ on_import_pre_find_spec(fullname: str, path: str, target: module or None) -> None Fires before any import find_spec() calls have been executed. The parameters here are the same as importlib.abc.MetaPathFinder.find_spec(). Namely, :``fullname``: The full name of the module to import. :``path``: None if a top-level import, otherwise the ``__path__`` of the parent package. :``target``: Target module used to make a better guess about the package spec. """) events.doc('on_import_post_find_spec', """ on_import_post_find_spec(spec, fullname, path, target) -> None Fires after all import find_spec() calls have been executed. The parameters here the spec and the arguments importlib.abc.MetaPathFinder.find_spec(). Namely, :``spec``: A ModuleSpec object if the spec was found, or None if it was not. :``fullname``: The full name of the module to import. :``path``: None if a top-level import, otherwise the ``__path__`` of the parent package. :``target``: Target module used to make a better guess about the package spec. """) events.doc('on_import_pre_create_module', """ on_import_pre_create_module(spec: ModuleSpec) -> None Fires right before a module is created by its loader. The only parameter is the spec object. See importlib for more details. """) events.doc('on_import_post_create_module', """ on_import_post_create_module(module: Module, spec: ModuleSpec) -> None Fires after a module is created by its loader but before the loader returns it. The parameters here are the module object itself and the spec object. See importlib for more details. """) events.doc('on_import_pre_exec_module', """ on_import_pre_exec_module(module: Module) -> None Fires right before a module is executed by its loader. The only parameter is the module itself. See importlib for more details. """) events.doc('on_import_post_exec_module', """ on_import_post_create_module(module: Module) -> None Fires after a module is executed by its loader but before the loader returns it. The only parameter is the module itself. See importlib for more details. """) def _should_dispatch_xonsh_import_event_loader(): """Figures out if we should dispatch to a load event""" return (len(events.on_import_pre_create_module) > 0 or len(events.on_import_post_create_module) > 0 or len(events.on_import_pre_exec_module) > 0 or len(events.on_import_post_exec_module) > 0) class XonshImportEventHook(MetaPathFinder): """Implements the import hook for firing xonsh events on import.""" def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self._fullname_stack = [] @contextlib.contextmanager def append_stack(self, fullname): """A context manager for appending and then removing a name from the fullname stack. """ self._fullname_stack.append(fullname) yield del self._fullname_stack[-1] # # MetaPathFinder methods # def find_spec(self, fullname, path, target=None): """Finds the spec for a xonsh module if it exists.""" if fullname in reversed(self._fullname_stack): # don't execute if we are already in the stack. return None npre = len(events.on_import_pre_find_spec) npost = len(events.on_import_post_find_spec) dispatch_load = _should_dispatch_xonsh_import_event_loader() if npre > 0: events.on_import_pre_find_spec.fire(fullname=fullname, path=path, target=target) elif npost == 0 and not dispatch_load: # no events to fire, proceed normally and prevent recursion return None # now find the spec with self.append_stack(fullname): spec = importlib.util.find_spec(fullname) # fire post event if npost > 0: events.on_import_post_find_spec.fire(spec=spec, fullname=fullname, path=path, target=target) if dispatch_load and spec is not None and hasattr(spec.loader, 'create_module'): spec.loader = XonshImportEventLoader(spec.loader) return spec class XonshImportEventLoader(Loader): """A class that dispatches loader calls to another loader and fires relevant xonsh events. """ def __init__(self, loader): self.loader = loader # # Loader methods # def create_module(self, spec): """Creates and returns the module object.""" events.on_import_pre_create_module.fire(spec=spec) mod = self.loader.create_module(spec) events.on_import_post_create_module.fire(module=mod, spec=spec) return mod def exec_module(self, module): """Executes the module in its own namespace.""" events.on_import_pre_exec_module.fire(module=module) rtn = self.loader.exec_module(module) events.on_import_post_exec_module.fire(module=module) return rtn def load_module(self, fullname): """Legacy module loading, provided for backwards compatibility.""" return self.loader.load_module(fullname) def module_repr(self, module): """Legacy module repr, provided for backwards compatibility.""" return self.loader.module_repr(module) def install_import_hooks(): """ Install Xonsh import hooks in ``sys.meta_path`` in order for ``.xsh`` files to be importable and import events to be fired. Can safely be called many times, will be no-op if xonsh import hooks are already present. """ found_imp = found_event = False for hook in sys.meta_path: if isinstance(hook, XonshImportHook): found_imp = True elif isinstance(hook, XonshImportEventHook): found_event = True if not found_imp: sys.meta_path.append(XonshImportHook()) if not found_event: sys.meta_path.insert(0, XonshImportEventHook()) # alias to deprecated name install_hook = install_import_hooks xonsh-0.6.0/xonsh/inspectors.py000066400000000000000000000664351320541242300165700ustar00rootroot00000000000000# -*- coding: utf-8 -*- """Tools for inspecting Python objects. This file was forked from the IPython project: * Copyright (c) 2008-2014, IPython Development Team * Copyright (C) 2001-2007 Fernando Perez * Copyright (c) 2001, Janko Hauser * Copyright (c) 2001, Nathaniel Gray """ import os import io import sys import types import inspect import itertools import linecache import collections from xonsh.lazyasd import LazyObject from xonsh.tokenize import detect_encoding from xonsh.openpy import read_py_file from xonsh.tools import (cast_unicode, safe_hasattr, indent, print_color, format_color) from xonsh.platform import HAS_PYGMENTS, PYTHON_VERSION_INFO from xonsh.lazyimps import pygments, pyghooks from xonsh.style_tools import partial_color_tokenize # builtin docstrings to ignore _func_call_docstring = LazyObject(lambda: types.FunctionType.__call__.__doc__, globals(), '_func_call_docstring') _object_init_docstring = LazyObject(lambda: object.__init__.__doc__, globals(), '_object_init_docstring') _builtin_type_docstrings = LazyObject(lambda: { t.__doc__ for t in (types.ModuleType, types.MethodType, types.FunctionType) }, globals(), '_builtin_type_docstrings') _builtin_func_type = LazyObject(lambda: type(all), globals(), '_builtin_func_type') # Bound methods have the same type as builtin functions _builtin_meth_type = LazyObject(lambda: type(str.upper), globals(), '_builtin_meth_type') info_fields = LazyObject(lambda: [ 'type_name', 'base_class', 'string_form', 'namespace', 'length', 'file', 'definition', 'docstring', 'source', 'init_definition', 'class_docstring', 'init_docstring', 'call_def', 'call_docstring', # These won't be printed but will be used to determine how to # format the object 'ismagic', 'isalias', 'isclass', 'argspec', 'found', 'name' ], globals(), 'info_fields') def object_info(**kw): """Make an object info dict with all fields present.""" infodict = dict(itertools.zip_longest(info_fields, [None])) infodict.update(kw) return infodict def get_encoding(obj): """Get encoding for python source file defining obj Returns None if obj is not defined in a sourcefile. """ ofile = find_file(obj) # run contents of file through pager starting at line where the object # is defined, as long as the file isn't binary and is actually on the # filesystem. if ofile is None: return None elif ofile.endswith(('.so', '.dll', '.pyd')): return None elif not os.path.isfile(ofile): return None else: # Print only text files, not extension binaries. Note that # getsourcelines returns lineno with 1-offset and page() uses # 0-offset, so we must adjust. with io.open(ofile, 'rb') as buf: # Tweaked to use io.open for Python 2 encoding, _ = detect_encoding(buf.readline) return encoding def getdoc(obj): """Stable wrapper around inspect.getdoc. This can't crash because of attribute problems. It also attempts to call a getdoc() method on the given object. This allows objects which provide their docstrings via non-standard mechanisms (like Pyro proxies) to still be inspected by ipython's ? system.""" # Allow objects to offer customized documentation via a getdoc method: try: ds = obj.getdoc() except Exception: # pylint:disable=broad-except pass else: # if we get extra info, we add it to the normal docstring. if isinstance(ds, str): return inspect.cleandoc(ds) try: docstr = inspect.getdoc(obj) encoding = get_encoding(obj) return cast_unicode(docstr, encoding=encoding) except Exception: # pylint:disable=broad-except # Harden against an inspect failure, which can occur with # SWIG-wrapped extensions. raise def getsource(obj, is_binary=False): """Wrapper around inspect.getsource. This can be modified by other projects to provide customized source extraction. Inputs: - obj: an object whose source code we will attempt to extract. Optional inputs: - is_binary: whether the object is known to come from a binary source. This implementation will skip returning any output for binary objects, but custom extractors may know how to meaningfully process them.""" if is_binary: return None else: # get source if obj was decorated with @decorator if hasattr(obj, "__wrapped__"): obj = obj.__wrapped__ try: src = inspect.getsource(obj) except TypeError: if hasattr(obj, '__class__'): src = inspect.getsource(obj.__class__) encoding = get_encoding(obj) return cast_unicode(src, encoding=encoding) def is_simple_callable(obj): """True if obj is a function ()""" return (inspect.isfunction(obj) or inspect.ismethod(obj) or isinstance(obj, _builtin_func_type) or isinstance(obj, _builtin_meth_type)) def getargspec(obj): """Wrapper around :func:`inspect.getfullargspec` on Python 3, and :func:inspect.getargspec` on Python 2. In addition to functions and methods, this can also handle objects with a ``__call__`` attribute. """ if safe_hasattr(obj, '__call__') and not is_simple_callable(obj): obj = obj.__call__ return inspect.getfullargspec(obj) def format_argspec(argspec): """Format argspect, convenience wrapper around inspect's. This takes a dict instead of ordered arguments and calls inspect.format_argspec with the arguments in the necessary order. """ return inspect.formatargspec(argspec['args'], argspec['varargs'], argspec['varkw'], argspec['defaults']) def call_tip(oinfo, format_call=True): """Extract call tip data from an oinfo dict. Parameters ---------- oinfo : dict format_call : bool, optional If True, the call line is formatted and returned as a string. If not, a tuple of (name, argspec) is returned. Returns ------- call_info : None, str or (str, dict) tuple. When format_call is True, the whole call information is formatted as a single string. Otherwise, the object's name and its argspec dict are returned. If no call information is available, None is returned. docstring : str or None The most relevant docstring for calling purposes is returned, if available. The priority is: call docstring for callable instances, then constructor docstring for classes, then main object's docstring otherwise (regular functions). """ # Get call definition argspec = oinfo.get('argspec') if argspec is None: call_line = None else: # Callable objects will have 'self' as their first argument, prune # it out if it's there for clarity (since users do *not* pass an # extra first argument explicitly). try: has_self = argspec['args'][0] == 'self' except (KeyError, IndexError): pass else: if has_self: argspec['args'] = argspec['args'][1:] call_line = oinfo['name'] + format_argspec(argspec) # Now get docstring. # The priority is: call docstring, constructor docstring, main one. doc = oinfo.get('call_docstring') if doc is None: doc = oinfo.get('init_docstring') if doc is None: doc = oinfo.get('docstring', '') return call_line, doc def find_file(obj): """Find the absolute path to the file where an object was defined. This is essentially a robust wrapper around `inspect.getabsfile`. Returns None if no file can be found. Parameters ---------- obj : any Python object Returns ------- fname : str The absolute path to the file where the object was defined. """ # get source if obj was decorated with @decorator if safe_hasattr(obj, '__wrapped__'): obj = obj.__wrapped__ fname = None try: fname = inspect.getabsfile(obj) except TypeError: # For an instance, the file that matters is where its class was # declared. if hasattr(obj, '__class__'): try: fname = inspect.getabsfile(obj.__class__) except TypeError: # Can happen for builtins pass except: # pylint:disable=bare-except pass return cast_unicode(fname) def find_source_lines(obj): """Find the line number in a file where an object was defined. This is essentially a robust wrapper around `inspect.getsourcelines`. Returns None if no file can be found. Parameters ---------- obj : any Python object Returns ------- lineno : int The line number where the object definition starts. """ # get source if obj was decorated with @decorator if safe_hasattr(obj, '__wrapped__'): obj = obj.__wrapped__ try: try: lineno = inspect.getsourcelines(obj)[1] except TypeError: # For instances, try the class object like getsource() does if hasattr(obj, '__class__'): lineno = inspect.getsourcelines(obj.__class__)[1] else: lineno = None except: # pylint:disable=bare-except return None return lineno if PYTHON_VERSION_INFO < (3, 5, 0): FrameInfo = collections.namedtuple('FrameInfo', ['frame', 'filename', 'lineno', 'function', 'code_context', 'index']) def getouterframes(frame, context=1): """Wrapper for getouterframes so that it acts like the Python v3.5 version.""" return [FrameInfo(*f) for f in inspect.getouterframes(frame, context=context)] else: getouterframes = inspect.getouterframes class Inspector(object): """Inspects objects.""" def __init__(self, str_detail_level=0): self.str_detail_level = str_detail_level def _getdef(self, obj, oname=''): """Return the call signature for any callable object. If any exception is generated, None is returned instead and the exception is suppressed. """ try: hdef = oname + inspect.formatargspec(*getargspec(obj)) return cast_unicode(hdef) except: # pylint:disable=bare-except return None def noinfo(self, msg, oname): """Generic message when no information is found.""" print('No %s found' % msg, end=' ') if oname: print('for %s' % oname) else: print() def pdef(self, obj, oname=''): """Print the call signature for any callable object. If the object is a class, print the constructor information. """ if not callable(obj): print('Object is not callable.') return header = '' if inspect.isclass(obj): header = self.__head('Class constructor information:\n') obj = obj.__init__ output = self._getdef(obj, oname) if output is None: self.noinfo('definition header', oname) else: print(header, output, end=' ', file=sys.stdout) def pdoc(self, obj, oname=''): """Print the docstring for any object. Optional -formatter: a function to run the docstring through for specially formatted docstrings. """ head = self.__head # For convenience lines = [] ds = getdoc(obj) if ds: lines.append(head("Class docstring:")) lines.append(indent(ds)) if inspect.isclass(obj) and hasattr(obj, '__init__'): init_ds = getdoc(obj.__init__) if init_ds is not None: lines.append(head("Init docstring:")) lines.append(indent(init_ds)) elif hasattr(obj, '__call__'): call_ds = getdoc(obj.__call__) if call_ds: lines.append(head("Call docstring:")) lines.append(indent(call_ds)) if not lines: self.noinfo('documentation', oname) else: print('\n'.join(lines)) def psource(self, obj, oname=''): """Print the source code for an object.""" # Flush the source cache because inspect can return out-of-date source linecache.checkcache() try: src = getsource(obj) except: # pylint:disable=bare-except self.noinfo('source', oname) else: print(src) def pfile(self, obj, oname=''): """Show the whole file where an object was defined.""" lineno = find_source_lines(obj) if lineno is None: self.noinfo('file', oname) return ofile = find_file(obj) # run contents of file through pager starting at line where the object # is defined, as long as the file isn't binary and is actually on the # filesystem. if ofile.endswith(('.so', '.dll', '.pyd')): print('File %r is binary, not printing.' % ofile) elif not os.path.isfile(ofile): print('File %r does not exist, not printing.' % ofile) else: # Print only text files, not extension binaries. Note that # getsourcelines returns lineno with 1-offset and page() uses # 0-offset, so we must adjust. o = read_py_file(ofile, skip_encoding_cookie=False) print(o, lineno - 1) def _format_fields_str(self, fields, title_width=0): """Formats a list of fields for display using color strings. Parameters ---------- fields : list A list of 2-tuples: (field_title, field_content) title_width : int How many characters to pad titles to. Default to longest title. """ out = [] if title_width == 0: title_width = max(len(title) + 2 for title, _ in fields) for title, content in fields: title_len = len(title) title = '{BOLD_RED}' + title + ':{NO_COLOR}' if len(content.splitlines()) > 1: title += '\n' else: title += " ".ljust(title_width - title_len) out.append(cast_unicode(title) + cast_unicode(content)) return format_color("\n".join(out) + '\n') def _format_fields_tokens(self, fields, title_width=0): """Formats a list of fields for display using color tokens from pygments. Parameters ---------- fields : list A list of 2-tuples: (field_title, field_content) title_width : int How many characters to pad titles to. Default to longest title. """ out = [] if title_width == 0: title_width = max(len(title) + 2 for title, _ in fields) for title, content in fields: title_len = len(title) title = '{BOLD_RED}' + title + ':{NO_COLOR}' if not isinstance(content, str) or len(content.splitlines()) > 1: title += '\n' else: title += " ".ljust(title_width - title_len) out += partial_color_tokenize(title) if isinstance(content, str): out[-1] = (out[-1][0], out[-1][1] + content + '\n') else: out += content out[-1] = (out[-1][0], out[-1][1] + '\n') out[-1] = (out[-1][0], out[-1][1] + '\n') return out def _format_fields(self, fields, title_width=0): """Formats a list of fields for display using color tokens from pygments. Parameters ---------- fields : list A list of 2-tuples: (field_title, field_content) title_width : int How many characters to pad titles to. Default to longest title. """ if HAS_PYGMENTS: rtn = self._format_fields_tokens(fields, title_width=title_width) else: rtn = self._format_fields_str(fields, title_width=title_width) return rtn # The fields to be displayed by pinfo: (fancy_name, key_in_info_dict) pinfo_fields1 = [("Type", "type_name")] pinfo_fields2 = [("String form", "string_form")] pinfo_fields3 = [("Length", "length"), ("File", "file"), ("Definition", "definition"), ] pinfo_fields_obj = [("Class docstring", "class_docstring"), ("Init docstring", "init_docstring"), ("Call def", "call_def"), ("Call docstring", "call_docstring"), ] def pinfo(self, obj, oname='', info=None, detail_level=0): """Show detailed information about an object. Parameters ---------- obj : object oname : str, optional name of the variable pointing to the object. info : dict, optional a structure with some information fields which may have been precomputed already. detail_level : int, optional if set to 1, more information is given. """ info = self.info(obj, oname=oname, info=info, detail_level=detail_level) displayfields = [] def add_fields(fields): for title, key in fields: field = info[key] if field is not None: displayfields.append((title, field.rstrip())) add_fields(self.pinfo_fields1) add_fields(self.pinfo_fields2) # Namespace if (info['namespace'] is not None and info['namespace'] != 'Interactive'): displayfields.append(("Namespace", info['namespace'].rstrip())) add_fields(self.pinfo_fields3) if info['isclass'] and info['init_definition']: displayfields.append(("Init definition", info['init_definition'].rstrip())) # Source or docstring, depending on detail level and whether # source found. if detail_level > 0 and info['source'] is not None: displayfields.append(("Source", cast_unicode(info['source']))) elif info['docstring'] is not None: displayfields.append(("Docstring", info["docstring"])) # Constructor info for classes if info['isclass']: if info['init_docstring'] is not None: displayfields.append(("Init docstring", info['init_docstring'])) # Info for objects: else: add_fields(self.pinfo_fields_obj) # Finally send to printer/pager: if displayfields: print_color(self._format_fields(displayfields)) def info(self, obj, oname='', info=None, detail_level=0): """Compute a dict with detailed information about an object. Optional arguments: - oname: name of the variable pointing to the object. - info: a structure with some information fields which may have been precomputed already. - detail_level: if set to 1, more information is given. """ obj_type = type(obj) if info is None: ismagic = 0 isalias = 0 ospace = '' else: ismagic = info.ismagic isalias = info.isalias ospace = info.namespace # Get docstring, special-casing aliases: if isalias: if not callable(obj): if len(obj) >= 2 and isinstance(obj[1], str): ds = "Alias to the system command:\n {0}".format(obj[1]) else: # pylint:disable=bare-except ds = "Alias: " + str(obj) else: ds = "Alias to " + str(obj) if obj.__doc__: ds += "\nDocstring:\n" + obj.__doc__ else: ds = getdoc(obj) if ds is None: ds = '' # store output in a dict, we initialize it here and fill it as we go out = dict(name=oname, found=True, isalias=isalias, ismagic=ismagic) string_max = 200 # max size of strings to show (snipped if longer) shalf = int((string_max - 5) / 2) if ismagic: obj_type_name = 'Magic function' elif isalias: obj_type_name = 'System alias' else: obj_type_name = obj_type.__name__ out['type_name'] = obj_type_name try: bclass = obj.__class__ out['base_class'] = str(bclass) except: # pylint:disable=bare-except pass # String form, but snip if too long in ? form (full in ??) if detail_level >= self.str_detail_level: try: ostr = str(obj) str_head = 'string_form' if not detail_level and len(ostr) > string_max: ostr = ostr[:shalf] + ' <...> ' + ostr[-shalf:] ostr = ("\n" + " " * len(str_head.expandtabs())). \ join(q.strip() for q in ostr.split("\n")) out[str_head] = ostr except: # pylint:disable=bare-except pass if ospace: out['namespace'] = ospace # Length (for strings and lists) try: out['length'] = str(len(obj)) except: # pylint:disable=bare-except pass # Filename where object was defined binary_file = False fname = find_file(obj) if fname is None: # if anything goes wrong, we don't want to show source, so it's as # if the file was binary binary_file = True else: if fname.endswith(('.so', '.dll', '.pyd')): binary_file = True elif fname.endswith(''): fname = ('Dynamically generated function. ' 'No source code available.') out['file'] = fname # Docstrings only in detail 0 mode, since source contains them (we # avoid repetitions). If source fails, we add them back, see below. if ds and detail_level == 0: out['docstring'] = ds # Original source code for any callable if detail_level: # Flush the source cache because inspect can return out-of-date # source linecache.checkcache() source = None try: try: source = getsource(obj, binary_file) except TypeError: if hasattr(obj, '__class__'): source = getsource(obj.__class__, binary_file) if source is not None: source = source.rstrip() if HAS_PYGMENTS: lexer = pyghooks.XonshLexer() source = list(pygments.lex(source, lexer=lexer)) out['source'] = source except Exception: # pylint:disable=broad-except pass if ds and source is None: out['docstring'] = ds # Constructor docstring for classes if inspect.isclass(obj): out['isclass'] = True # reconstruct the function definition and print it: try: obj_init = obj.__init__ except AttributeError: init_def = init_ds = None else: init_def = self._getdef(obj_init, oname) init_ds = getdoc(obj_init) # Skip Python's auto-generated docstrings if init_ds == _object_init_docstring: init_ds = None if init_def or init_ds: if init_def: out['init_definition'] = init_def if init_ds: out['init_docstring'] = init_ds # and class docstring for instances: else: # reconstruct the function definition and print it: defln = self._getdef(obj, oname) if defln: out['definition'] = defln # First, check whether the instance docstring is identical to the # class one, and print it separately if they don't coincide. In # most cases they will, but it's nice to print all the info for # objects which use instance-customized docstrings. if ds: try: cls = getattr(obj, '__class__') except: # pylint:disable=bare-except class_ds = None else: class_ds = getdoc(cls) # Skip Python's auto-generated docstrings if class_ds in _builtin_type_docstrings: class_ds = None if class_ds and ds != class_ds: out['class_docstring'] = class_ds # Next, try to show constructor docstrings try: init_ds = getdoc(obj.__init__) # Skip Python's auto-generated docstrings if init_ds == _object_init_docstring: init_ds = None except AttributeError: init_ds = None if init_ds: out['init_docstring'] = init_ds # Call form docstring for callable instances if safe_hasattr(obj, '__call__') and not is_simple_callable(obj): call_def = self._getdef(obj.__call__, oname) if call_def: call_def = call_def # it may never be the case that call def and definition # differ, but don't include the same signature twice if call_def != out.get('definition'): out['call_def'] = call_def call_ds = getdoc(obj.__call__) # Skip Python's auto-generated docstrings if call_ds == _func_call_docstring: call_ds = None if call_ds: out['call_docstring'] = call_ds # Compute the object's argspec as a callable. The key is to decide # whether to pull it from the object itself, from its __init__ or # from its __call__ method. if inspect.isclass(obj): # Old-style classes need not have an __init__ callable_obj = getattr(obj, "__init__", None) elif callable(obj): callable_obj = obj else: callable_obj = None if callable_obj: try: argspec = getargspec(callable_obj) except (TypeError, AttributeError): # For extensions/builtins we can't retrieve the argspec pass else: # named tuples' _asdict() method returns an OrderedDict, but we # we want a normal out['argspec'] = argspec_dict = dict(argspec._asdict()) # We called this varkw before argspec became a named tuple. # With getfullargspec it's also called varkw. if 'varkw' not in argspec_dict: argspec_dict['varkw'] = argspec_dict.pop('keywords') return object_info(**out) xonsh-0.6.0/xonsh/jobs.py000066400000000000000000000311211320541242300153140ustar00rootroot00000000000000# -*- coding: utf-8 -*- """Job control for the xonsh shell.""" import os import sys import time import ctypes import signal import builtins import subprocess import collections from xonsh.lazyasd import LazyObject from xonsh.platform import FD_STDERR, ON_DARWIN, ON_WINDOWS, ON_CYGWIN, LIBC from xonsh.tools import unthreadable tasks = LazyObject(collections.deque, globals(), 'tasks') # Track time stamp of last exit command, so that two consecutive attempts to # exit can kill all jobs and exit. _last_exit_time = None if ON_DARWIN: def _send_signal(job, signal): # On OS X, os.killpg() may cause PermissionError when there are # any zombie processes in the process group. # See github issue #1012 for details for pid in job['pids']: if pid is None: # the pid of an aliased proc is None continue try: os.kill(pid, signal) except ProcessLookupError: pass elif ON_WINDOWS: pass elif ON_CYGWIN: # Similar to what happened on OSX, more issues on Cygwin # (see Github issue #514). def _send_signal(job, signal): try: os.killpg(job['pgrp'], signal) except Exception: for pid in job['pids']: try: os.kill(pid, signal) except Exception: pass else: def _send_signal(job, signal): pgrp = job['pgrp'] if pgrp is None: for pid in job['pids']: try: os.kill(pid, signal) except Exception: pass else: os.killpg(job['pgrp'], signal) if ON_WINDOWS: def _continue(job): job['status'] = "running" def _kill(job): subprocess.check_output(['taskkill', '/F', '/T', '/PID', str(job['obj'].pid)]) def ignore_sigtstp(): pass def give_terminal_to(pgid): pass def wait_for_active_job(last_task=None, backgrounded=False): """ Wait for the active job to finish, to be killed by SIGINT, or to be suspended by ctrl-z. """ _clear_dead_jobs() active_task = get_next_task() # Return when there are no foreground active task if active_task is None: return last_task obj = active_task['obj'] _continue(active_task) while obj.returncode is None: try: obj.wait(0.01) except subprocess.TimeoutExpired: pass except KeyboardInterrupt: _kill(active_task) return wait_for_active_job(last_task=active_task) else: def _continue(job): _send_signal(job, signal.SIGCONT) def _kill(job): _send_signal(job, signal.SIGKILL) def ignore_sigtstp(): signal.signal(signal.SIGTSTP, signal.SIG_IGN) _shell_pgrp = os.getpgrp() _block_when_giving = LazyObject(lambda: (signal.SIGTTOU, signal.SIGTTIN, signal.SIGTSTP, signal.SIGCHLD), globals(), '_block_when_giving') # give_terminal_to is a simplified version of: # give_terminal_to from bash 4.3 source, jobs.c, line 4030 # this will give the terminal to the process group pgid if ON_CYGWIN: # on cygwin, signal.pthread_sigmask does not exist in Python, even # though pthread_sigmask is defined in the kernel. thus, we use # ctypes to mimic the calls in the "normal" version below. def give_terminal_to(pgid): omask = ctypes.c_ulong() mask = ctypes.c_ulong() LIBC.sigemptyset(ctypes.byref(mask)) for i in _block_when_giving: LIBC.sigaddset(ctypes.byref(mask), ctypes.c_int(i)) LIBC.sigemptyset(ctypes.byref(omask)) LIBC.sigprocmask(ctypes.c_int(signal.SIG_BLOCK), ctypes.byref(mask), ctypes.byref(omask)) LIBC.tcsetpgrp(ctypes.c_int(FD_STDERR), ctypes.c_int(pgid)) LIBC.sigprocmask(ctypes.c_int(signal.SIG_SETMASK), ctypes.byref(omask), None) return True else: def give_terminal_to(pgid): if pgid is None: return False oldmask = signal.pthread_sigmask(signal.SIG_BLOCK, _block_when_giving) try: os.tcsetpgrp(FD_STDERR, pgid) return True except ProcessLookupError: # when the process finished before giving terminal to it, # see issue #2288 return False except OSError as e: if e.errno == 22: # [Errno 22] Invalid argument # there are cases that all the processes of pgid have # finished, then we don't need to do anything here, see # issue #2220 return False elif e.errno == 25: # [Errno 25] Inappropriate ioctl for device # There are also cases where we are not connected to a # real TTY, even though we may be run in interactive # mode. See issue #2267 for an example with emacs return False else: raise finally: signal.pthread_sigmask(signal.SIG_SETMASK, oldmask) def wait_for_active_job(last_task=None, backgrounded=False): """ Wait for the active job to finish, to be killed by SIGINT, or to be suspended by ctrl-z. """ _clear_dead_jobs() active_task = get_next_task() # Return when there are no foreground active task if active_task is None: return last_task obj = active_task['obj'] backgrounded = False try: _, wcode = os.waitpid(obj.pid, os.WUNTRACED) except ChildProcessError: # No child processes return wait_for_active_job(last_task=active_task, backgrounded=backgrounded) if os.WIFSTOPPED(wcode): print('^Z') active_task['status'] = "stopped" backgrounded = True elif os.WIFSIGNALED(wcode): print() # get a newline because ^C will have been printed obj.signal = (os.WTERMSIG(wcode), os.WCOREDUMP(wcode)) obj.returncode = None else: obj.returncode = os.WEXITSTATUS(wcode) obj.signal = None return wait_for_active_job(last_task=active_task, backgrounded=backgrounded) def get_next_task(): """ Get the next active task and put it on top of the queue""" selected_task = None for tid in tasks: task = get_task(tid) if not task['bg'] and task['status'] == "running": selected_task = tid break if selected_task is None: return tasks.remove(selected_task) tasks.appendleft(selected_task) return get_task(selected_task) def get_task(tid): return builtins.__xonsh_all_jobs__[tid] def _clear_dead_jobs(): to_remove = set() for tid in tasks: obj = get_task(tid)['obj'] if obj.poll() is not None: to_remove.add(tid) for job in to_remove: tasks.remove(job) del builtins.__xonsh_all_jobs__[job] def print_one_job(num, outfile=sys.stdout): """Print a line describing job number ``num``.""" try: job = builtins.__xonsh_all_jobs__[num] except KeyError: return pos = '+' if tasks[0] == num else '-' if tasks[1] == num else ' ' status = job['status'] cmd = [' '.join(i) if isinstance(i, list) else i for i in job['cmds']] cmd = ' '.join(cmd) pid = job['pids'][-1] bg = ' &' if job['bg'] else '' print('[{}]{} {}: {}{} ({})'.format(num, pos, status, cmd, bg, pid), file=outfile) def get_next_job_number(): """Get the lowest available unique job number (for the next job created). """ _clear_dead_jobs() i = 1 while i in builtins.__xonsh_all_jobs__: i += 1 return i def add_job(info): """Add a new job to the jobs dictionary.""" num = get_next_job_number() info['started'] = time.time() info['status'] = "running" tasks.appendleft(num) builtins.__xonsh_all_jobs__[num] = info if info['bg'] and builtins.__xonsh_env__.get('XONSH_INTERACTIVE'): print_one_job(num) def clean_jobs(): """Clean up jobs for exiting shell In non-interactive mode, kill all jobs. In interactive mode, check for suspended or background jobs, print a warning if any exist, and return False. Otherwise, return True. """ jobs_clean = True if builtins.__xonsh_env__['XONSH_INTERACTIVE']: _clear_dead_jobs() if builtins.__xonsh_all_jobs__: global _last_exit_time hist = builtins.__xonsh_history__ if hist is not None and len(hist.tss) > 0: last_cmd_start = hist.tss[-1][0] else: last_cmd_start = None if (_last_exit_time and last_cmd_start and _last_exit_time > last_cmd_start): # Exit occurred after last command started, so it was called as # part of the last command and is now being called again # immediately. Kill jobs and exit without reminder about # unfinished jobs in this case. kill_all_jobs() else: if len(builtins.__xonsh_all_jobs__) > 1: msg = 'there are unfinished jobs' else: msg = 'there is an unfinished job' if builtins.__xonsh_env__['SHELL_TYPE'] != 'prompt_toolkit': # The Ctrl+D binding for prompt_toolkit already inserts a # newline print() print('xonsh: {}'.format(msg), file=sys.stderr) print('-' * 5, file=sys.stderr) jobs([], stdout=sys.stderr) print('-' * 5, file=sys.stderr) print('Type "exit" or press "ctrl-d" again to force quit.', file=sys.stderr) jobs_clean = False _last_exit_time = time.time() else: kill_all_jobs() return jobs_clean def kill_all_jobs(): """ Send SIGKILL to all child processes (called when exiting xonsh). """ _clear_dead_jobs() for job in builtins.__xonsh_all_jobs__.values(): _kill(job) def jobs(args, stdin=None, stdout=sys.stdout, stderr=None): """ xonsh command: jobs Display a list of all current jobs. """ _clear_dead_jobs() for j in tasks: print_one_job(j, outfile=stdout) return None, None @unthreadable def fg(args, stdin=None): """ xonsh command: fg Bring the currently active job to the foreground, or, if a single number is given as an argument, bring that job to the foreground. Additionally, specify "+" for the most recent job and "-" for the second most recent job. """ _clear_dead_jobs() if len(tasks) == 0: return '', 'Cannot bring nonexistent job to foreground.\n' if len(args) == 0: tid = tasks[0] # take the last manipulated task by default elif len(args) == 1: try: if args[0] == '+': # take the last manipulated task tid = tasks[0] elif args[0] == '-': # take the second to last manipulated task tid = tasks[1] else: tid = int(args[0]) except (ValueError, IndexError): return '', 'Invalid job: {}\n'.format(args[0]) if tid not in builtins.__xonsh_all_jobs__: return '', 'Invalid job: {}\n'.format(args[0]) else: return '', 'fg expects 0 or 1 arguments, not {}\n'.format(len(args)) # Put this one on top of the queue tasks.remove(tid) tasks.appendleft(tid) job = get_task(tid) job['bg'] = False job['status'] = "running" if builtins.__xonsh_env__.get('XONSH_INTERACTIVE'): print_one_job(tid) pipeline = job['pipeline'] pipeline.resume(job) def bg(args, stdin=None): """xonsh command: bg Resume execution of the currently active job in the background, or, if a single number is given as an argument, resume that job in the background. """ res = fg(args, stdin) if res is None: curtask = get_task(tasks[0]) curtask['bg'] = True _continue(curtask) else: return res xonsh-0.6.0/xonsh/jsonutils.py000066400000000000000000000007101320541242300164110ustar00rootroot00000000000000"""Custom tools for managing JSON serialization / deserialization of xonsh objects. """ import functools from xonsh.tools import EnvPath @functools.singledispatch def serialize_xonsh_json(val): """JSON serializer for xonsh custom data structures. This is only called when another normal JSON types are not found. """ return str(val) @serialize_xonsh_json.register(EnvPath) def _serialize_xonsh_json_env_path(val): return val.paths xonsh-0.6.0/xonsh/jupyter_kernel.py000066400000000000000000000071311320541242300174250ustar00rootroot00000000000000# -*- coding: utf-8 -*- """Hooks for Jupyter Xonsh Kernel.""" import builtins from pprint import pformat from ipykernel.kernelbase import Kernel from xonsh import __version__ as version from xonsh.main import main_context from xonsh.completer import Completer MAX_SIZE = 8388608 # 8 Mb class XonshKernel(Kernel): """Xonsh xernal for Jupyter""" implementation = 'Xonsh ' + version implementation_version = version language = 'xonsh' language_version = version banner = 'Xonsh - Python-powered, cross-platform shell' language_info = {'name': 'xonsh', 'version': version, 'pygments_lexer': 'xonsh', 'codemirror_mode': 'shell', 'mimetype': 'text/x-sh', 'file_extension': '.xsh', } def __init__(self, **kwargs): self.completer = Completer() super().__init__(**kwargs) def do_execute(self, code, silent, store_history=True, user_expressions=None, allow_stdin=False): """Execute user code.""" if len(code.strip()) == 0: return {'status': 'ok', 'execution_count': self.execution_count, 'payload': [], 'user_expressions': {}} shell = builtins.__xonsh_shell__ hist = builtins.__xonsh_history__ try: shell.default(code) interrupted = False except KeyboardInterrupt: interrupted = True if not silent: # stdout response if hasattr(builtins, '_') and builtins._ is not None: # rely on sys.displayhook functionality self._respond_in_chunks('stdout', pformat(builtins._)) builtins._ = None if hist is not None and len(hist) > 0: self._respond_in_chunks('stdout', hist.outs[-1]) if interrupted: return {'status': 'abort', 'execution_count': self.execution_count} rtn = 0 if (hist is None or len(hist) == 0) else hist.rtns[-1] if 0 < rtn: message = {'status': 'error', 'execution_count': self.execution_count, 'ename': '', 'evalue': str(rtn), 'traceback': []} else: message = {'status': 'ok', 'execution_count': self.execution_count, 'payload': [], 'user_expressions': {}} return message def _respond_in_chunks(self, name, s, chunksize=1024): if s is None: return n = len(s) if n == 0: return lower = range(0, n, chunksize) upper = range(chunksize, n+chunksize, chunksize) for l, u in zip(lower, upper): response = {'name': name, 'text': s[l:u], } self.send_response(self.iopub_socket, 'stream', response) def do_complete(self, code, pos): """Get completions.""" shell = builtins.__xonsh_shell__ line = code.split('\n')[-1] line = builtins.aliases.expand_alias(line) prefix = line.split(' ')[-1] endidx = pos begidx = pos - len(prefix) rtn, _ = self.completer.complete(prefix, line, begidx, endidx, shell.ctx) message = {'matches': rtn, 'cursor_start': begidx, 'cursor_end': endidx, 'metadata': {}, 'status': 'ok'} return message if __name__ == '__main__': from ipykernel.kernelapp import IPKernelApp # must manually pass in args to avoid interfering w/ Jupyter arg parsing with main_context(argv=['--shell-type=readline']): IPKernelApp.launch_instance(kernel_class=XonshKernel) xonsh-0.6.0/xonsh/lazyasd.py000066400000000000000000000257731320541242300160460ustar00rootroot00000000000000"""Lazy and self destructive containers for speeding up module import.""" # Copyright 2015-2016, the xonsh developers. All rights reserved. import os import sys import time import types import builtins import threading import importlib import importlib.util import collections.abc as cabc __version__ = '0.1.3' class LazyObject(object): def __init__(self, load, ctx, name): """Lazily loads an object via the load function the first time an attribute is accessed. Once loaded it will replace itself in the provided context (typically the globals of the call site) with the given name. For example, you can prevent the compilation of a regular expression until it is actually used:: DOT = LazyObject((lambda: re.compile('.')), globals(), 'DOT') Parameters ---------- load : function with no arguments A loader function that performs the actual object construction. ctx : Mapping Context to replace the LazyObject instance in with the object returned by load(). name : str Name in the context to give the loaded object. This *should* be the name on the LHS of the assignment. """ self._lasdo = { 'loaded': False, 'load': load, 'ctx': ctx, 'name': name, } def _lazy_obj(self): d = self._lasdo if d['loaded']: obj = d['obj'] else: obj = d['load']() d['ctx'][d['name']] = d['obj'] = obj d['loaded'] = True return obj def __getattribute__(self, name): if name == '_lasdo' or name == '_lazy_obj': return super().__getattribute__(name) obj = self._lazy_obj() return getattr(obj, name) def __bool__(self): obj = self._lazy_obj() return bool(obj) def __iter__(self): obj = self._lazy_obj() yield from obj def __getitem__(self, item): obj = self._lazy_obj() return obj[item] def __setitem__(self, key, value): obj = self._lazy_obj() obj[key] = value def __delitem__(self, item): obj = self._lazy_obj() del obj[item] def __call__(self, *args, **kwargs): obj = self._lazy_obj() return obj(*args, **kwargs) def __lt__(self, other): obj = self._lazy_obj() return obj < other def __le__(self, other): obj = self._lazy_obj() return obj <= other def __eq__(self, other): obj = self._lazy_obj() return obj == other def __ne__(self, other): obj = self._lazy_obj() return obj != other def __gt__(self, other): obj = self._lazy_obj() return obj > other def __ge__(self, other): obj = self._lazy_obj() return obj >= other def __hash__(self): obj = self._lazy_obj() return hash(obj) def __or__(self, other): obj = self._lazy_obj() return obj | other def __str__(self): return str(self._lazy_obj()) def __repr__(self): return repr(self._lazy_obj()) def lazyobject(f): """Decorator for constructing lazy objects from a function.""" return LazyObject(f, f.__globals__, f.__name__) class LazyDict(cabc.MutableMapping): def __init__(self, loaders, ctx, name): """Dictionary like object that lazily loads its values from an initial dict of key-loader function pairs. Each key is loaded when its value is first accessed. Once fully loaded, this object will replace itself in the provided context (typically the globals of the call site) with the given name. For example, you can prevent the compilation of a bunch of regular expressions until they are actually used:: RES = LazyDict({ 'dot': lambda: re.compile('.'), 'all': lambda: re.compile('.*'), 'two': lambda: re.compile('..'), }, globals(), 'RES') Parameters ---------- loaders : Mapping of keys to functions with no arguments A mapping of loader function that performs the actual value construction upon access. ctx : Mapping Context to replace the LazyDict instance in with the the fully loaded mapping. name : str Name in the context to give the loaded mapping. This *should* be the name on the LHS of the assignment. """ self._loaders = loaders self._ctx = ctx self._name = name self._d = type(loaders)() # make sure to return the same type def _destruct(self): if len(self._loaders) == 0: self._ctx[self._name] = self._d def __getitem__(self, key): d = self._d if key in d: val = d[key] else: # pop will raise a key error for us loader = self._loaders.pop(key) d[key] = val = loader() self._destruct() return val def __setitem__(self, key, value): self._d[key] = value if key in self._loaders: del self._loaders[key] self._destruct() def __delitem__(self, key): if key in self._d: del self._d[key] else: del self._loaders[key] self._destruct() def __iter__(self): yield from (set(self._d.keys()) | set(self._loaders.keys())) def __len__(self): return len(self._d) + len(self._loaders) def lazydict(f): """Decorator for constructing lazy dicts from a function.""" return LazyDict(f, f.__globals__, f.__name__) class LazyBool(object): def __init__(self, load, ctx, name): """Boolean like object that lazily computes it boolean value when it is first asked. Once loaded, this result will replace itself in the provided context (typically the globals of the call site) with the given name. For example, you can prevent the complex boolean until it is actually used:: ALIVE = LazyDict(lambda: not DEAD, globals(), 'ALIVE') Parameters ---------- load : function with no arguments A loader function that performs the actual boolean evaluation. ctx : Mapping Context to replace the LazyBool instance in with the the fully loaded mapping. name : str Name in the context to give the loaded mapping. This *should* be the name on the LHS of the assignment. """ self._load = load self._ctx = ctx self._name = name self._result = None def __bool__(self): if self._result is None: res = self._ctx[self._name] = self._result = self._load() else: res = self._result return res def lazybool(f): """Decorator for constructing lazy booleans from a function.""" return LazyBool(f, f.__globals__, f.__name__) # # Background module loaders # class BackgroundModuleProxy(types.ModuleType): """Proxy object for modules loaded in the background that block attribute access until the module is loaded.. """ def __init__(self, modname): self.__dct__ = { 'loaded': False, 'modname': modname, } def __getattribute__(self, name): passthrough = frozenset({'__dct__', '__class__', '__spec__'}) if name in passthrough: return super().__getattribute__(name) dct = self.__dct__ modname = dct['modname'] if dct['loaded']: mod = sys.modules[modname] else: delay_types = (BackgroundModuleProxy, type(None)) while isinstance(sys.modules.get(modname, None), delay_types): time.sleep(0.001) mod = sys.modules[modname] dct['loaded'] = True # some modules may do construction after import, give them a second stall = 0 while not hasattr(mod, name) and stall < 1000: stall += 1 time.sleep(0.001) return getattr(mod, name) class BackgroundModuleLoader(threading.Thread): """Thread to load modules in the background.""" def __init__(self, name, package, replacements, *args, **kwargs): super().__init__(*args, **kwargs) self.daemon = True self.name = name self.package = package self.replacements = replacements self.start() def run(self): # wait for other modules to stop being imported # We assume that module loading is finished when sys.modules doesn't # get longer in 5 consecutive 1ms waiting steps counter = 0 last = -1 while counter < 5: new = len(sys.modules) if new == last: counter += 1 else: last = new counter = 0 time.sleep(0.001) # now import module properly modname = importlib.util.resolve_name(self.name, self.package) if isinstance(sys.modules[modname], BackgroundModuleProxy): del sys.modules[modname] mod = importlib.import_module(self.name, package=self.package) for targname, varname in self.replacements.items(): if targname in sys.modules: targmod = sys.modules[targname] setattr(targmod, varname, mod) def load_module_in_background(name, package=None, debug='DEBUG', env=None, replacements=None): """Entry point for loading modules in background thread. Parameters ---------- name : str Module name to load in background thread. package : str or None, optional Package name, has the same meaning as in importlib.import_module(). debug : str, optional Debugging symbol name to look up in the environment. env : Mapping or None, optional Environment this will default to __xonsh_env__, if available, and os.environ otherwise. replacements : Mapping or None, optional Dictionary mapping fully qualified module names (eg foo.bar.baz) that import the lazily loaded module, with the variable name in that module. For example, suppose that foo.bar imports module a as b, this dict is then {'foo.bar': 'b'}. Returns ------- module : ModuleType This is either the original module that is found in sys.modules or a proxy module that will block until delay attribute access until the module is fully loaded. """ modname = importlib.util.resolve_name(name, package) if modname in sys.modules: return sys.modules[modname] if env is None: env = getattr(builtins, '__xonsh_env__', os.environ) if env.get(debug, None): mod = importlib.import_module(name, package=package) return mod proxy = sys.modules[modname] = BackgroundModuleProxy(modname) BackgroundModuleLoader(name, package, replacements or {}) return proxy xonsh-0.6.0/xonsh/lazyimps.py000066400000000000000000000026631320541242300162400ustar00rootroot00000000000000"""Lazy imports that may apply across the xonsh package.""" import importlib from xonsh.platform import ON_WINDOWS, ON_DARWIN from xonsh.lazyasd import LazyObject, lazyobject pygments = LazyObject(lambda: importlib.import_module('pygments'), globals(), 'pygments') pyghooks = LazyObject(lambda: importlib.import_module('xonsh.pyghooks'), globals(), 'pyghooks') @lazyobject def pty(): if ON_WINDOWS: return else: return importlib.import_module('pty') @lazyobject def termios(): if ON_WINDOWS: return else: return importlib.import_module('termios') @lazyobject def fcntl(): if ON_WINDOWS: return else: return importlib.import_module('fcntl') @lazyobject def tty(): if ON_WINDOWS: return else: return importlib.import_module('tty') @lazyobject def _winapi(): if ON_WINDOWS: import _winapi as m else: m = None return m @lazyobject def msvcrt(): if ON_WINDOWS: import msvcrt as m else: m = None return m @lazyobject def winutils(): if ON_WINDOWS: import xonsh.winutils as m else: m = None return m @lazyobject def macutils(): if ON_DARWIN: import xonsh.macutils as m else: m = None return m @lazyobject def terminal256(): return importlib.import_module('pygments.formatters.terminal256') xonsh-0.6.0/xonsh/lazyjson.py000066400000000000000000000172401320541242300162360ustar00rootroot00000000000000# -*- coding: utf-8 -*- """Implements a lazy JSON file class that wraps around json data.""" import io import json import weakref import contextlib import collections.abc as cabc def _to_json_with_size(obj, offset=0, sort_keys=False): if isinstance(obj, str): s = json.dumps(obj) o = offset n = size = len(s.encode()) # size in bytes elif isinstance(obj, cabc.Mapping): s = '{' j = offset + 1 o = {} size = {} items = sorted(obj.items()) if sort_keys else obj.items() for key, val in items: s_k, o_k, n_k, size_k = _to_json_with_size(key, offset=j, sort_keys=sort_keys) s += s_k + ': ' j += n_k + 2 s_v, o_v, n_v, size_v = _to_json_with_size(val, offset=j, sort_keys=sort_keys) o[key] = o_v size[key] = size_v s += s_v + ', ' j += n_v + 2 if s.endswith(', '): s = s[:-2] s += '}\n' n = len(s) o['__total__'] = offset size['__total__'] = n elif isinstance(obj, cabc.Sequence): s = '[' j = offset + 1 o = [] size = [] for x in obj: s_x, o_x, n_x, size_x = _to_json_with_size(x, offset=j, sort_keys=sort_keys) o.append(o_x) size.append(size_x) s += s_x + ', ' j += n_x + 2 if s.endswith(', '): s = s[:-2] s += ']\n' n = len(s) o.append(offset) size.append(n) else: s = json.dumps(obj, sort_keys=sort_keys) o = offset n = size = len(s) return s, o, n, size def index(obj, sort_keys=False): """Creates an index for a JSON file.""" idx = {} json_obj = _to_json_with_size(obj, sort_keys=sort_keys) s, idx['offsets'], _, idx['sizes'] = json_obj return s, idx JSON_FORMAT = """{{"locs": [{iloc:>10}, {ilen:>10}, {dloc:>10}, {dlen:>10}], "index": {index}, "data": {data} }} """ def dumps(obj, sort_keys=False): """Dumps an object to JSON with an index.""" data, idx = index(obj, sort_keys=sort_keys) jdx = json.dumps(idx, sort_keys=sort_keys) iloc = 69 ilen = len(jdx) dloc = iloc + ilen + 11 dlen = len(data) s = JSON_FORMAT.format(index=jdx, data=data, iloc=iloc, ilen=ilen, dloc=dloc, dlen=dlen) return s def ljdump(obj, fp, sort_keys=False): """Dumps an object to JSON file.""" s = dumps(obj, sort_keys=sort_keys) fp.write(s) class LJNode(cabc.Mapping, cabc.Sequence): """A proxy node for JSON nodes. Acts as both sequence and mapping.""" def __init__(self, offsets, sizes, root): """Parameters ---------- offsets : dict, list, or int offsets of corresponding data structure, in bytes sizes : dict, list, or int sizes of corresponding data structure, in bytes root : weakref.proxy of LazyJSON weakref back to root node, which should be a LazyJSON object. """ self.offsets = offsets self.sizes = sizes self.root = root self.is_mapping = isinstance(self.offsets, cabc.Mapping) self.is_sequence = isinstance(self.offsets, cabc.Sequence) def __len__(self): # recall that for maps, the '__total__' key is added and for # sequences the last element represents the total size/offset. return len(self.sizes) - 1 def load(self): """Returns the Python data structure represented by the node.""" if self.is_mapping: offset = self.offsets['__total__'] size = self.sizes['__total__'] elif self.is_sequence: offset = self.offsets[-1] size = self.sizes[-1] elif isinstance(self.offsets, int): offset = self.offsets size = self.sizes return self._load_or_node(offset, size) def _load_or_node(self, offset, size): if isinstance(offset, int): with self.root._open(newline='\n') as f: f.seek(self.root.dloc + offset) s = f.read(size) val = json.loads(s) elif isinstance(offset, (cabc.Mapping, cabc.Sequence)): val = LJNode(offset, size, self.root) else: raise TypeError('incorrect types for offset node') return val def _getitem_mapping(self, key): if key == '__total__': raise KeyError('"__total__" is a special LazyJSON key!') offset = self.offsets[key] size = self.sizes[key] return self._load_or_node(offset, size) def _getitem_sequence(self, key): if isinstance(key, int): rtn = self._load_or_node(self.offsets[key], self.sizes[key]) elif isinstance(key, slice): key = slice(*key.indices(len(self))) rtn = list(map(self._load_or_node, self.offsets[key], self.sizes[key])) else: raise TypeError('only integer indexing available') return rtn def __getitem__(self, key): if self.is_mapping: rtn = self._getitem_mapping(key) elif self.is_sequence: rtn = self._getitem_sequence(key) else: raise NotImplementedError return rtn def __iter__(self): if self.is_mapping: keys = set(self.offsets.keys()) keys.discard('__total__') yield from iter(keys) elif self.is_sequence: i = 0 n = len(self) while i < n: yield self._load_or_node(self.offsets[i], self.sizes[i]) i += 1 else: raise NotImplementedError class LazyJSON(LJNode): """Represents a lazy json file. Can be used like a normal Python dict or list. """ def __init__(self, f, reopen=True): """Parameters ---------- f : file handle or str JSON file to open. reopen : bool, optional Whether new file handle should be opened for each load. """ self._f = f self.reopen = reopen if not reopen and isinstance(f, str): self._f = open(f, 'r', newline='\n') self._load_index() self.root = weakref.proxy(self) self.is_mapping = isinstance(self.offsets, cabc.Mapping) self.is_sequence = isinstance(self.offsets, cabc.Sequence) def __del__(self): self.close() def close(self): """Close the file handle, if appropriate.""" if not self.reopen and isinstance(self._f, io.IOBase): try: self._f.close() except OSError: pass @contextlib.contextmanager def _open(self, *args, **kwargs): if self.reopen and isinstance(self._f, str): f = open(self._f, *args, **kwargs) yield f f.close() else: yield self._f def _load_index(self): """Loads the index from the start of the file.""" with self._open(newline='\n') as f: # read in the location data f.seek(9) locs = f.read(48) locs = json.loads(locs) self.iloc, self.ilen, self.dloc, self.dlen = locs # read in the index f.seek(self.iloc) idx = f.read(self.ilen) idx = json.loads(idx) self.offsets = idx['offsets'] self.sizes = idx['sizes'] def __enter__(self): return self def __exit__(self, exc_type, exc_value, traceback): self.close() xonsh-0.6.0/xonsh/lexer.py000066400000000000000000000332021320541242300155000ustar00rootroot00000000000000# -*- coding: utf-8 -*- """Lexer for xonsh code. Written using a hybrid of ``tokenize`` and PLY. """ import io # 'keyword' interferes with ast.keyword import keyword as kwmod try: from ply.lex import LexToken except ImportError: from xonsh.ply.ply.lex import LexToken from xonsh.lazyasd import lazyobject from xonsh.platform import PYTHON_VERSION_INFO from xonsh.tokenize import (OP, IOREDIRECT, STRING, DOLLARNAME, NUMBER, SEARCHPATH, NEWLINE, INDENT, DEDENT, NL, COMMENT, ENCODING, ENDMARKER, NAME, ERRORTOKEN, GREATER, LESS, RIGHTSHIFT, tokenize, TokenError) @lazyobject def token_map(): """Mapping from ``tokenize`` tokens (or token types) to PLY token types. If a simple one-to-one mapping from ``tokenize`` to PLY exists, the lexer will look it up here and generate a single PLY token of the given type. Otherwise, it will fall back to handling that token using one of the handlers in``special_handlers``. """ tm = {} # operators _op_map = { # punctuation ',': 'COMMA', '.': 'PERIOD', ';': 'SEMI', ':': 'COLON', '...': 'ELLIPSIS', # basic operators '+': 'PLUS', '-': 'MINUS', '*': 'TIMES', '@': 'AT', '/': 'DIVIDE', '//': 'DOUBLEDIV', '%': 'MOD', '**': 'POW', '|': 'PIPE', '~': 'TILDE', '^': 'XOR', '<<': 'LSHIFT', '>>': 'RSHIFT', '<': 'LT', '<=': 'LE', '>': 'GT', '>=': 'GE', '==': 'EQ', '!=': 'NE', '->': 'RARROW', # assignment operators '=': 'EQUALS', '+=': 'PLUSEQUAL', '-=': 'MINUSEQUAL', '*=': 'TIMESEQUAL', '@=': 'ATEQUAL', '/=': 'DIVEQUAL', '%=': 'MODEQUAL', '**=': 'POWEQUAL', '<<=': 'LSHIFTEQUAL', '>>=': 'RSHIFTEQUAL', '&=': 'AMPERSANDEQUAL', '^=': 'XOREQUAL', '|=': 'PIPEEQUAL', '//=': 'DOUBLEDIVEQUAL', # extra xonsh operators '?': 'QUESTION', '??': 'DOUBLE_QUESTION', '@$': 'ATDOLLAR', '&': 'AMPERSAND', } for (op, typ) in _op_map.items(): tm[(OP, op)] = typ tm[IOREDIRECT] = 'IOREDIRECT' tm[STRING] = 'STRING' tm[DOLLARNAME] = 'DOLLAR_NAME' tm[NUMBER] = 'NUMBER' tm[SEARCHPATH] = 'SEARCHPATH' tm[NEWLINE] = 'NEWLINE' tm[INDENT] = 'INDENT' tm[DEDENT] = 'DEDENT' if PYTHON_VERSION_INFO >= (3, 5, 0): from xonsh.tokenize import ASYNC, AWAIT tm[ASYNC] = 'ASYNC' tm[AWAIT] = 'AWAIT' return tm def handle_name(state, token): """Function for handling name tokens""" typ = 'NAME' if state['pymode'][-1][0]: if token.string in kwmod.kwlist: typ = token.string.upper() state['last'] = token yield _new_token(typ, token.string, token.start) else: prev = state['last'] state['last'] = token has_whitespace = prev.end != token.start if token.string == 'and' and has_whitespace: yield _new_token('AND', token.string, token.start) elif token.string == 'or' and has_whitespace: yield _new_token('OR', token.string, token.start) else: yield _new_token('NAME', token.string, token.start) def _end_delimiter(state, token): py = state['pymode'] s = token.string l, c = token.start if len(py) > 1: mode, orig, match, pos = py.pop() if s != match: e = '"{}" at {} ends "{}" at {} (expected "{}")' return e.format(s, (l, c), orig, pos, match) else: return 'Unmatched "{}" at line {}, column {}'.format(s, l, c) def handle_rparen(state, token): """ Function for handling ``)`` """ e = _end_delimiter(state, token) if e is None: state['last'] = token yield _new_token('RPAREN', ')', token.start) else: yield _new_token('ERRORTOKEN', e, token.start) def handle_rbrace(state, token): """Function for handling ``}``""" e = _end_delimiter(state, token) if e is None: state['last'] = token yield _new_token('RBRACE', '}', token.start) else: yield _new_token('ERRORTOKEN', e, token.start) def handle_rbracket(state, token): """ Function for handling ``]`` """ e = _end_delimiter(state, token) if e is None: state['last'] = token yield _new_token('RBRACKET', ']', token.start) else: yield _new_token('ERRORTOKEN', e, token.start) def handle_error_space(state, token): """ Function for handling special whitespace characters in subprocess mode """ if not state['pymode'][-1][0]: state['last'] = token yield _new_token('WS', token.string, token.start) else: yield from [] def handle_error_linecont(state, token): """Function for handling special line continuations as whitespace characters in subprocess mode. """ if state['pymode'][-1][0]: return prev = state['last'] if prev.end != token.start: return # previous token is separated by whitespace state['last'] = token yield _new_token('WS', '\\', token.start) def handle_error_token(state, token): """ Function for handling error tokens """ state['last'] = token if token.string == '!': typ = 'BANG' elif not state['pymode'][-1][0]: typ = 'NAME' else: typ = 'ERRORTOKEN' yield _new_token(typ, token.string, token.start) def handle_ignore(state, token): """Function for handling tokens that should be ignored""" yield from [] def handle_double_amps(state, token): yield _new_token('AND', 'and', token.start) def handle_double_pipe(state, token): yield _new_token('OR', 'or', token.start) def handle_redirect(state, token): # The parser expects whitespace after a redirection in subproc mode. # If whitespace does not exist, we'll issue an empty whitespace # token before proceeding. state['last'] = token typ = token.type st = token.string key = (typ, st) if (typ, st) in token_map else typ yield _new_token(token_map[key], st, token.start) if state['pymode'][-1][0]: return # add a whitespace token after a redirection, if we need to next_tok = next(state['stream']) if next_tok.start == token.end: yield _new_token('WS', '', token.end) yield from handle_token(state, next_tok) def _make_matcher_handler(tok, typ, pymode, ender, handlers): matcher = (')' if tok.endswith('(') else '}' if tok.endswith('{') else ']' if tok.endswith('[') else None) def _inner_handler(state, token): state['pymode'].append((pymode, tok, matcher, token.start)) state['last'] = token yield _new_token(typ, tok, token.start) handlers[(OP, tok)] = _inner_handler @lazyobject def special_handlers(): """Mapping from ``tokenize`` tokens (or token types) to the proper function for generating PLY tokens from them. In addition to yielding PLY tokens, these functions may manipulate the Lexer's state. """ sh = { NL: handle_ignore, COMMENT: handle_ignore, ENCODING: handle_ignore, ENDMARKER: handle_ignore, NAME: handle_name, ERRORTOKEN: handle_error_token, LESS: handle_redirect, GREATER: handle_redirect, RIGHTSHIFT: handle_redirect, IOREDIRECT: handle_redirect, (OP, '<'): handle_redirect, (OP, '>'): handle_redirect, (OP, '>>'): handle_redirect, (OP, ')'): handle_rparen, (OP, '}'): handle_rbrace, (OP, ']'): handle_rbracket, (OP, '&&'): handle_double_amps, (OP, '||'): handle_double_pipe, (ERRORTOKEN, ' '): handle_error_space, (ERRORTOKEN, '\\\n'): handle_error_linecont, (ERRORTOKEN, '\\\r\n'): handle_error_linecont, } _make_matcher_handler('(', 'LPAREN', True, ')', sh) _make_matcher_handler('[', 'LBRACKET', True, ']', sh) _make_matcher_handler('{', 'LBRACE', True, '}', sh) _make_matcher_handler('$(', 'DOLLAR_LPAREN', False, ')', sh) _make_matcher_handler('$[', 'DOLLAR_LBRACKET', False, ']', sh) _make_matcher_handler('${', 'DOLLAR_LBRACE', True, '}', sh) _make_matcher_handler('!(', 'BANG_LPAREN', False, ')', sh) _make_matcher_handler('![', 'BANG_LBRACKET', False, ']', sh) _make_matcher_handler('@(', 'AT_LPAREN', True, ')', sh) _make_matcher_handler('@$(', 'ATDOLLAR_LPAREN', False, ')', sh) return sh def handle_token(state, token): """ General-purpose token handler. Makes use of ``token_map`` or ``special_map`` to yield one or more PLY tokens from the given input. Parameters ---------- state : The current state of the lexer, including information about whether we are in Python mode or subprocess mode, which changes the lexer's behavior. Also includes the stream of tokens yet to be considered. token : The token (from ``tokenize``) currently under consideration """ typ = token.type st = token.string pymode = state['pymode'][-1][0] if not pymode: if state['last'] is not None and state['last'].end != token.start: cur = token.start old = state['last'].end if cur[0] == old[0] and cur[1] > old[1]: yield _new_token('WS', token.line[old[1]:cur[1]], old) if (typ, st) in special_handlers: yield from special_handlers[(typ, st)](state, token) elif (typ, st) in token_map: state['last'] = token yield _new_token(token_map[(typ, st)], st, token.start) elif typ in special_handlers: yield from special_handlers[typ](state, token) elif typ in token_map: state['last'] = token yield _new_token(token_map[typ], st, token.start) else: m = "Unexpected token: {0}".format(token) yield _new_token("ERRORTOKEN", m, token.start) def get_tokens(s): """ Given a string containing xonsh code, generates a stream of relevant PLY tokens using ``handle_token``. """ state = {'indents': [0], 'last': None, 'pymode': [(True, '', '', (0, 0))], 'stream': tokenize(io.BytesIO(s.encode('utf-8')).readline)} while True: try: token = next(state['stream']) yield from handle_token(state, token) except StopIteration: if len(state['pymode']) > 1: pm, o, m, p = state['pymode'][-1] l, c = p e = 'Unmatched "{}" at line {}, column {}' yield _new_token('ERRORTOKEN', e.format(o, l, c), (0, 0)) break except TokenError as e: # this is recoverable in single-line mode (from the shell) # (e.g., EOF while scanning string literal) yield _new_token('ERRORTOKEN', e.args[0], (0, 0)) break except IndentationError as e: # this is never recoverable yield _new_token('ERRORTOKEN', e, (0, 0)) break # synthesize a new PLY token def _new_token(type, value, pos): o = LexToken() o.type = type o.value = value o.lineno, o.lexpos = pos return o class Lexer(object): """Implements a lexer for the xonsh language.""" _tokens = None def __init__(self): """ Attributes ---------- fname : str Filename last : token The last token seen. lineno : int The last line number seen. """ self.fname = '' self.last = None self.beforelast = None def build(self, **kwargs): """Part of the PLY lexer API.""" pass def reset(self): pass def input(self, s): """Calls the lexer on the string s.""" self.token_stream = get_tokens(s) def token(self): """Retrieves the next token.""" self.beforelast = self.last self.last = next(self.token_stream, None) return self.last def __iter__(self): t = self.token() while t is not None: yield t t = self.token() def split(self, s): """Splits a string into a list of strings which are whitespace-separated tokens. """ vals = [] self.input(s) l = c = -1 ws = 'WS' nl = '\n' for t in self: if t.type == ws: continue elif l < t.lineno: vals.append(t.value) elif len(vals) > 0 and c == t.lexpos: vals[-1] = vals[-1] + t.value else: vals.append(t.value) nnl = t.value.count(nl) if nnl == 0: l = t.lineno c = t.lexpos + len(t.value) else: l = t.lineno + nnl c = len(t.value.rpartition(nl)[-1]) return vals # # All the tokens recognized by the lexer # @property def tokens(self): if self._tokens is None: t = tuple(token_map.values()) + ( 'NAME', # name tokens 'BANG', # ! tokens 'WS', # whitespace in subprocess mode 'LPAREN', 'RPAREN', # ( ) 'LBRACKET', 'RBRACKET', # [ ] 'LBRACE', 'RBRACE', # { } 'AT_LPAREN', # @( 'BANG_LPAREN', # !( 'BANG_LBRACKET', # ![ 'DOLLAR_LPAREN', # $( 'DOLLAR_LBRACE', # ${ 'DOLLAR_LBRACKET', # $[ 'ATDOLLAR_LPAREN', # @$( 'ERRORTOKEN', # whoops! ) + tuple(i.upper() for i in kwmod.kwlist) self._tokens = t return self._tokens xonsh-0.6.0/xonsh/macutils.py000066400000000000000000000014041320541242300162010ustar00rootroot00000000000000"""Provides some Mac / Darwin based utility functions for xonsh.""" from ctypes import c_uint, byref, create_string_buffer from xonsh.platform import LIBC def sysctlbyname(name, return_str=True): """Gets a sysctl value by name. If return_str is true, this will return a string representation, else it will return the raw value. """ # forked from https://gist.github.com/pudquick/581a71425439f2cf8f09 size = c_uint(0) # Find out how big our buffer will be LIBC.sysctlbyname(name, None, byref(size), None, 0) # Make the buffer buf = create_string_buffer(size.value) # Re-run, but provide the buffer LIBC.sysctlbyname(name, buf, byref(size), None, 0) if return_str: return buf.value else: return buf.raw xonsh-0.6.0/xonsh/main.py000066400000000000000000000342741320541242300153170ustar00rootroot00000000000000# -*- coding: utf-8 -*- """The main xonsh script.""" import os import sys import enum import argparse import builtins import contextlib import signal import traceback from xonsh import __version__ from xonsh.timings import setup_timings from xonsh.lazyasd import lazyobject from xonsh.shell import Shell from xonsh.pretty import pretty from xonsh.execer import Execer from xonsh.proc import HiddenCommandPipeline from xonsh.jobs import ignore_sigtstp from xonsh.tools import setup_win_unicode_console, print_color, to_bool_or_int from xonsh.platform import HAS_PYGMENTS, ON_WINDOWS from xonsh.codecache import run_script_with_cache, run_code_with_cache from xonsh.xonfig import print_welcome_screen from xonsh.lazyimps import pygments, pyghooks from xonsh.imphooks import install_import_hooks from xonsh.events import events from xonsh.environ import xonshrc_context events.transmogrify('on_post_init', 'LoadEvent') events.doc('on_post_init', """ on_post_init() -> None Fired after all initialization is finished and we're ready to do work. NOTE: This is fired before the wizard is automatically started. """) events.transmogrify('on_exit', 'LoadEvent') events.doc('on_exit', """ on_exit() -> None Fired after all commands have been executed, before tear-down occurs. NOTE: All the caveats of the ``atexit`` module also apply to this event. """) events.transmogrify('on_pre_cmdloop', 'LoadEvent') events.doc('on_pre_cmdloop', """ on_pre_cmdloop() -> None Fired just before the command loop is started, if it is. """) events.transmogrify('on_post_cmdloop', 'LoadEvent') events.doc('on_post_cmdloop', """ on_post_cmdloop() -> None Fired just after the command loop finishes, if it is. NOTE: All the caveats of the ``atexit`` module also apply to this event. """) events.transmogrify('on_pre_rc', 'LoadEvent') events.doc('on_pre_rc', """ on_pre_rc() -> None Fired just before rc files are loaded, if they are. """) events.transmogrify('on_post_rc', 'LoadEvent') events.doc('on_post_rc', """ on_post_rc() -> None Fired just after rc files are loaded, if they are. """) def get_setproctitle(): """Proxy function for loading process title""" try: from setproctitle import setproctitle as spt except ImportError: return return spt def path_argument(s): """Return a path only if the path is actually legal This is very similar to argparse.FileType, except that it doesn't return an open file handle, but rather simply validates the path.""" s = os.path.abspath(os.path.expanduser(s)) if not os.path.isfile(s): msg = '{0!r} must be a valid path to a file'.format(s) raise argparse.ArgumentTypeError(msg) return s @lazyobject def parser(): p = argparse.ArgumentParser(description='xonsh', add_help=False) p.add_argument('-h', '--help', dest='help', action='store_true', default=False, help='show help and exit') p.add_argument('-V', '--version', dest='version', action='store_true', default=False, help='show version information and exit') p.add_argument('-c', help="Run a single command and exit", dest='command', required=False, default=None) p.add_argument('-i', '--interactive', help='force running in interactive mode', dest='force_interactive', action='store_true', default=False) p.add_argument('-l', '--login', help='run as a login shell', dest='login', action='store_true', default=False) p.add_argument('--config-path', help='DEPRECATED: static configuration files may now be used ' 'in the XONSHRC file list, see the --rc option.', dest='config_path', default=None, type=path_argument) p.add_argument('--rc', help="The xonshrc files to load, these may be either xonsh " "files or JSON-based static configuration files.", dest='rc', nargs='+', type=path_argument, default=None) p.add_argument('--no-rc', help="Do not load the .xonshrc files", dest='norc', action='store_true', default=False) p.add_argument('--no-script-cache', help="Do not cache scripts as they are run", dest='scriptcache', action='store_false', default=True) p.add_argument('--cache-everything', help="Use a cache, even for interactive commands", dest='cacheall', action='store_true', default=False) p.add_argument('-D', dest='defines', help='define an environment variable, in the form of ' '-DNAME=VAL. May be used many times.', metavar='ITEM', action='append', default=None) p.add_argument('--shell-type', help='What kind of shell should be used. ' 'Possible options: readline, prompt_toolkit, random. ' 'Warning! If set this overrides $SHELL_TYPE variable.', dest='shell_type', choices=('readline', 'prompt_toolkit', 'best', 'random'), default=None) p.add_argument('--timings', help='Prints timing information before the prompt is shown. ' 'This is useful while tracking down performance issues ' 'and investigating startup times.', dest='timings', action='store_true', default=None) p.add_argument('file', metavar='script-file', help='If present, execute the script in script-file' ' and exit', nargs='?', default=None) p.add_argument('args', metavar='args', help='Additional arguments to the script specified ' 'by script-file', nargs=argparse.REMAINDER, default=[]) return p def _pprint_displayhook(value): if value is None: return builtins._ = None # Set '_' to None to avoid recursion if isinstance(value, HiddenCommandPipeline): builtins._ = value return env = builtins.__xonsh_env__ if env.get('PRETTY_PRINT_RESULTS'): printed_val = pretty(value) else: printed_val = repr(value) if HAS_PYGMENTS and env.get('COLOR_RESULTS'): tokens = list(pygments.lex(printed_val, lexer=pyghooks.XonshLexer())) print_color(tokens) else: print(printed_val) # black & white case builtins._ = value class XonshMode(enum.Enum): single_command = 0 script_from_file = 1 script_from_stdin = 2 interactive = 3 def start_services(shell_kwargs, args): """Starts up the essential services in the proper order. This returns the environment instance as a convenience. """ install_import_hooks() # create execer, which loads builtins ctx = shell_kwargs.get('ctx', {}) debug = to_bool_or_int(os.getenv('XONSH_DEBUG', '0')) events.on_timingprobe.fire(name='pre_execer_init') execer = Execer(xonsh_ctx=ctx, debug_level=debug, scriptcache=shell_kwargs.get('scriptcache', True), cacheall=shell_kwargs.get('cacheall', False)) events.on_timingprobe.fire(name='post_execer_init') # load rc files login = shell_kwargs.get('login', True) env = builtins.__xonsh_env__ rc = shell_kwargs.get('rc', None) rc = env.get('XONSHRC') if rc is None else rc if args.mode != XonshMode.interactive and not args.force_interactive: # Don't load xonshrc if not interactive shell rc = None events.on_pre_rc.fire() xonshrc_context(rcfiles=rc, execer=execer, ctx=ctx, env=env, login=login) events.on_post_rc.fire() # create shell builtins.__xonsh_shell__ = Shell(execer=execer, **shell_kwargs) ctx['__name__'] = '__main__' return env def premain(argv=None): """Setup for main xonsh entry point. Returns parsed arguments.""" if argv is None: argv = sys.argv[1:] setup_timings() setproctitle = get_setproctitle() if setproctitle is not None: setproctitle(' '.join(['xonsh'] + argv)) builtins.__xonsh_ctx__ = {} args = parser.parse_args(argv) if args.help: parser.print_help() parser.exit() if args.version: version = '/'.join(('xonsh', __version__)) print(version) parser.exit() shell_kwargs = {'shell_type': args.shell_type, 'completer': False, 'login': False, 'scriptcache': args.scriptcache, 'cacheall': args.cacheall, 'ctx': builtins.__xonsh_ctx__} if args.login: shell_kwargs['login'] = True if args.norc: shell_kwargs['rc'] = () elif args.rc: shell_kwargs['rc'] = args.rc setattr(sys, 'displayhook', _pprint_displayhook) if args.command is not None: args.mode = XonshMode.single_command shell_kwargs['shell_type'] = 'none' elif args.file is not None: args.mode = XonshMode.script_from_file shell_kwargs['shell_type'] = 'none' elif not sys.stdin.isatty() and not args.force_interactive: args.mode = XonshMode.script_from_stdin shell_kwargs['shell_type'] = 'none' else: args.mode = XonshMode.interactive shell_kwargs['completer'] = True shell_kwargs['login'] = True env = start_services(shell_kwargs, args) env['XONSH_LOGIN'] = shell_kwargs['login'] if args.defines is not None: env.update([x.split('=', 1) for x in args.defines]) env['XONSH_INTERACTIVE'] = args.force_interactive or (args.mode == XonshMode.interactive) if ON_WINDOWS: setup_win_unicode_console(env.get('WIN_UNICODE_CONSOLE', True)) return args def _failback_to_other_shells(args, err): # only failback for interactive shell; if we cannot tell, treat it # as an interactive one for safe. if hasattr(args, 'mode') and args.mode != XonshMode.interactive: raise err foreign_shell = None shells_file = '/etc/shells' if not os.path.exists(shells_file): # right now, it will always break here on Windows raise err excluded_list = ['xonsh', 'screen'] with open(shells_file) as f: for line in f: line = line.strip() if not line or line.startswith('#'): continue if '/' not in line: continue _, shell = line.rsplit('/', 1) if shell in excluded_list: continue if not os.path.exists(line): continue foreign_shell = line break if foreign_shell: traceback.print_exc() print('Xonsh encountered an issue during launch', file=sys.stderr) print('Failback to {}'.format(foreign_shell), file=sys.stderr) os.execlp(foreign_shell, foreign_shell) else: raise err def main(argv=None): args = None try: args = premain(argv) return main_xonsh(args) except Exception as err: _failback_to_other_shells(args, err) def main_xonsh(args): """Main entry point for xonsh cli.""" if not ON_WINDOWS: def func_sig_ttin_ttou(n, f): pass signal.signal(signal.SIGTTIN, func_sig_ttin_ttou) signal.signal(signal.SIGTTOU, func_sig_ttin_ttou) events.on_post_init.fire() env = builtins.__xonsh_env__ shell = builtins.__xonsh_shell__ try: if args.mode == XonshMode.interactive: # enter the shell env['XONSH_INTERACTIVE'] = True ignore_sigtstp() if (env['XONSH_INTERACTIVE'] and not env['LOADED_CONFIG'] and not any(os.path.isfile(i) for i in env['XONSHRC'])): print_welcome_screen() events.on_pre_cmdloop.fire() try: shell.shell.cmdloop() finally: events.on_post_cmdloop.fire() elif args.mode == XonshMode.single_command: # run a single command and exit run_code_with_cache(args.command.lstrip(), shell.execer, mode='single') elif args.mode == XonshMode.script_from_file: # run a script contained in a file path = os.path.abspath(os.path.expanduser(args.file)) if os.path.isfile(path): sys.argv = [args.file] + args.args env['ARGS'] = sys.argv[:] # $ARGS is not sys.argv env['XONSH_SOURCE'] = path shell.ctx.update({'__file__': args.file, '__name__': '__main__'}) run_script_with_cache(args.file, shell.execer, glb=shell.ctx, loc=None, mode='exec') else: print('xonsh: {0}: No such file or directory.'.format(args.file)) elif args.mode == XonshMode.script_from_stdin: # run a script given on stdin code = sys.stdin.read() run_code_with_cache(code, shell.execer, glb=shell.ctx, loc=None, mode='exec') finally: events.on_exit.fire() postmain(args) def postmain(args=None): """Teardown for main xonsh entry point, accepts parsed arguments.""" if ON_WINDOWS: setup_win_unicode_console(enable=False) if hasattr(builtins, '__xonsh_shell__'): del builtins.__xonsh_shell__ @contextlib.contextmanager def main_context(argv=None): """Generator that runs pre- and post-main() functions. This has two iterations. The first yields the shell. The second returns None but cleans up the shell. """ args = premain(argv) yield builtins.__xonsh_shell__ postmain(args) xonsh-0.6.0/xonsh/openpy.py000066400000000000000000000100031320541242300156650ustar00rootroot00000000000000# -*- coding: utf-8 -*- """Tools to open ``*.py`` files as Unicode. Uses the encoding specified within the file, as per PEP 263. Much of the code is taken from the tokenize module in Python 3.2. This file was forked from the IPython project: * Copyright (c) 2008-2014, IPython Development Team * Copyright (C) 2001-2007 Fernando Perez * Copyright (c) 2001, Janko Hauser * Copyright (c) 2001, Nathaniel Gray """ import io import re from xonsh.lazyasd import LazyObject from xonsh.tokenize import detect_encoding, tokopen cookie_comment_re = LazyObject( lambda: re.compile(r"^\s*#.*coding[:=]\s*([-\w.]+)", re.UNICODE), globals(), 'cookie_comment_re') def source_to_unicode(txt, errors='replace', skip_encoding_cookie=True): """Converts a bytes string with python source code to unicode. Unicode strings are passed through unchanged. Byte strings are checked for the python source file encoding cookie to determine encoding. txt can be either a bytes buffer or a string containing the source code. """ if isinstance(txt, str): return txt if isinstance(txt, bytes): buf = io.BytesIO(txt) else: buf = txt try: encoding, _ = detect_encoding(buf.readline) except SyntaxError: encoding = "ascii" buf.seek(0) text = io.TextIOWrapper(buf, encoding, errors=errors, line_buffering=True) text.mode = 'r' if skip_encoding_cookie: return u"".join(strip_encoding_cookie(text)) else: return text.read() def strip_encoding_cookie(filelike): """Generator to pull lines from a text-mode file, skipping the encoding cookie if it is found in the first two lines. """ it = iter(filelike) try: first = next(it) if not cookie_comment_re.match(first): yield first second = next(it) if not cookie_comment_re.match(second): yield second except StopIteration: return for line in it: yield line def read_py_file(filename, skip_encoding_cookie=True): """Read a Python file, using the encoding declared inside the file. Parameters ---------- filename : str The path to the file to read. skip_encoding_cookie : bool If True (the default), and the encoding declaration is found in the first two lines, that line will be excluded from the output - compiling a unicode string with an encoding declaration is a SyntaxError in Python 2. Returns ------- A unicode string containing the contents of the file. """ with tokopen(filename) as f: # the open function defined in this module. if skip_encoding_cookie: return "".join(strip_encoding_cookie(f)) else: return f.read() def read_py_url(url, errors='replace', skip_encoding_cookie=True): """Read a Python file from a URL, using the encoding declared inside the file. Parameters ---------- url : str The URL from which to fetch the file. errors : str How to handle decoding errors in the file. Options are the same as for bytes.decode(), but here 'replace' is the default. skip_encoding_cookie : bool If True (the default), and the encoding declaration is found in the first two lines, that line will be excluded from the output - compiling a unicode string with an encoding declaration is a SyntaxError in Python 2. Returns ------- A unicode string containing the contents of the file. """ # Deferred import for faster start try: from urllib.request import urlopen # Py 3 except ImportError: from urllib import urlopen response = urlopen(url) buf = io.BytesIO(response.read()) return source_to_unicode(buf, errors, skip_encoding_cookie) def _list_readline(x): """Given a list, returns a readline() function that returns the next element with each call. """ x = iter(x) def readline(): return next(x) return readline xonsh-0.6.0/xonsh/parser.py000066400000000000000000000006441320541242300156610ustar00rootroot00000000000000# -*- coding: utf-8 -*- """Implements the xonsh parser.""" from xonsh.lazyasd import lazyobject from xonsh.platform import PYTHON_VERSION_INFO @lazyobject def Parser(): if PYTHON_VERSION_INFO > (3, 6): from xonsh.parsers.v36 import Parser as p elif PYTHON_VERSION_INFO > (3, 5): from xonsh.parsers.v35 import Parser as p else: from xonsh.parsers.v34 import Parser as p return p xonsh-0.6.0/xonsh/parsers/000077500000000000000000000000001320541242300154665ustar00rootroot00000000000000xonsh-0.6.0/xonsh/parsers/__init__.py000066400000000000000000000000001320541242300175650ustar00rootroot00000000000000xonsh-0.6.0/xonsh/parsers/base.py000066400000000000000000002776541320541242300167770ustar00rootroot00000000000000# -*- coding: utf-8 -*- """Implements the base xonsh parser.""" import os import re import time import textwrap from threading import Thread from ast import parse as pyparse from collections import Iterable, Sequence, Mapping try: from ply import yacc except ImportError: from xonsh.ply.ply import yacc from xonsh import ast from xonsh.ast import has_elts, xonsh_call from xonsh.lexer import Lexer, LexToken from xonsh.platform import PYTHON_VERSION_INFO from xonsh.tokenize import SearchPath, StringPrefix from xonsh.lazyasd import LazyObject from xonsh.parsers.context_check import check_contexts RE_SEARCHPATH = LazyObject(lambda: re.compile(SearchPath), globals(), 'RE_SEARCHPATH') RE_STRINGPREFIX = LazyObject(lambda: re.compile(StringPrefix), globals(), 'RE_STRINGPREFIX') class Location(object): """Location in a file.""" def __init__(self, fname, lineno, column=None): """Takes a filename, line number, and optionally a column number.""" self.fname = fname self.lineno = lineno self.column = column def __str__(self): s = '{0}:{1}'.format(self.fname, self.lineno) if self.column is not None: s += ':{0}'.format(self.column) return s def ensure_has_elts(x, lineno=None, col_offset=None): """Ensures that x is an AST node with elements.""" if not has_elts(x): if not isinstance(x, Iterable): x = [x] lineno = x[0].lineno if lineno is None else lineno col_offset = x[0].col_offset if col_offset is None else col_offset x = ast.Tuple(elts=x, ctx=ast.Load(), lineno=lineno, col_offset=col_offset) return x def empty_list(lineno=None, col=None): """Creates the AST node for an empty list.""" return ast.List(elts=[], ctx=ast.Load(), lineno=lineno, col_offset=col) def binop(x, op, y, lineno=None, col=None): """Creates the AST node for a binary operation.""" lineno = x.lineno if lineno is None else lineno col = x.col_offset if col is None else col return ast.BinOp(left=x, op=op, right=y, lineno=lineno, col_offset=col) def call_split_lines(x, lineno=None, col=None): """Creates the AST node for calling the 'splitlines' attribute of an object, nominally a string. """ return ast.Call(func=ast.Attribute(value=x, attr='splitlines', ctx=ast.Load(), lineno=lineno, col_offset=col), args=[], keywords=[], starargs=None, kwargs=None, lineno=lineno, col_offset=col) def ensure_list_from_str_or_list(x, lineno=None, col=None): """Creates the AST node for the following expression:: [x] if isinstance(x, str) else x Somewhat useful. """ return ast.IfExp(test=ast.Call(func=ast.Name(id='isinstance', ctx=ast.Load(), lineno=lineno, col_offset=col), args=[x, ast.Name(id='str', ctx=ast.Load(), lineno=lineno, col_offset=col)], keywords=[], starargs=None, kwargs=None, lineno=lineno, col_offset=col), body=ast.List(elts=[x], ctx=ast.Load(), lineno=lineno, col_offset=col), orelse=x, lineno=lineno, col_offset=col) def xonsh_help(x, lineno=None, col=None): """Creates the AST node for calling the __xonsh_help__() function.""" return xonsh_call('__xonsh_help__', [x], lineno=lineno, col=col) def xonsh_superhelp(x, lineno=None, col=None): """Creates the AST node for calling the __xonsh_superhelp__() function.""" return xonsh_call('__xonsh_superhelp__', [x], lineno=lineno, col=col) def xonsh_pathsearch(pattern, pymode=False, lineno=None, col=None): """Creates the AST node for calling the __xonsh_pathsearch__() function. The pymode argument indicate if it is called from subproc or python mode""" pymode = ast.NameConstant(value=pymode, lineno=lineno, col_offset=col) searchfunc, pattern = RE_SEARCHPATH.match(pattern).groups() pattern = ast.Str(s=pattern, lineno=lineno, col_offset=col) pathobj = False if searchfunc.startswith('@'): func = searchfunc[1:] elif 'g' in searchfunc: func = '__xonsh_globsearch__' pathobj = 'p' in searchfunc else: func = '__xonsh_regexsearch__' pathobj = 'p' in searchfunc func = ast.Name(id=func, ctx=ast.Load(), lineno=lineno, col_offset=col) pathobj = ast.NameConstant(value=pathobj, lineno=lineno, col_offset=col) return xonsh_call('__xonsh_pathsearch__', args=[func, pattern, pymode, pathobj], lineno=lineno, col=col) def load_ctx(x): """Recursively sets ctx to ast.Load()""" if not hasattr(x, 'ctx'): return x.ctx = ast.Load() if isinstance(x, (ast.Tuple, ast.List)): for e in x.elts: load_ctx(e) elif isinstance(x, ast.Starred): load_ctx(x.value) def store_ctx(x): """Recursively sets ctx to ast.Store()""" if not hasattr(x, 'ctx'): return x.ctx = ast.Store() if isinstance(x, (ast.Tuple, ast.List)): for e in x.elts: store_ctx(e) elif isinstance(x, ast.Starred): store_ctx(x.value) def del_ctx(x): """Recursively sets ctx to ast.Del()""" if not hasattr(x, 'ctx'): return x.ctx = ast.Del() if isinstance(x, (ast.Tuple, ast.List)): for e in x.elts: del_ctx(e) elif isinstance(x, ast.Starred): del_ctx(x.value) def empty_list_if_newline(x): return [] if x == '\n' else x def lopen_loc(x): """Extracts the line and column number for a node that may have an opening parenthesis, brace, or bracket. """ lineno = x._lopen_lineno if hasattr(x, '_lopen_lineno') else x.lineno col = x._lopen_col if hasattr(x, '_lopen_col') else x.col_offset return lineno, col class YaccLoader(Thread): """Thread to load (but not shave) the yacc parser.""" def __init__(self, parser, yacc_kwargs, *args, **kwargs): super().__init__(*args, **kwargs) self.daemon = True self.parser = parser self.yacc_kwargs = yacc_kwargs self.start() def run(self): self.parser.parser = yacc.yacc(**self.yacc_kwargs) class BaseParser(object): """A base class that parses the xonsh language.""" def __init__(self, lexer_optimize=True, lexer_table='xonsh.lexer_table', yacc_optimize=True, yacc_table='xonsh.parser_table', yacc_debug=False, outputdir=None): """Parameters ---------- lexer_optimize : bool, optional Set to false when unstable and true when lexer is stable. lexer_table : str, optional Lexer module used when optimized. yacc_optimize : bool, optional Set to false when unstable and true when parser is stable. yacc_table : str, optional Parser module used when optimized. yacc_debug : debug, optional Dumps extra debug info. outputdir : str or None, optional The directory to place generated tables within. Defaults to the root xonsh dir. """ self.lexer = lexer = Lexer() self.tokens = lexer.tokens self._lines = None self.xonsh_code = None self._attach_nocomma_tok_rules() self._attach_nocloser_base_rules() self._attach_nodedent_base_rules() self._attach_nonewline_base_rules() self._attach_subproc_arg_part_rules() opt_rules = [ 'newlines', 'arglist', 'func_call', 'rarrow_test', 'typedargslist', 'equals_test', 'colon_test', 'tfpdef', 'comma_tfpdef_list', 'comma_pow_tfpdef', 'vfpdef', 'comma_vfpdef_list', 'comma_pow_vfpdef', 'equals_yield_expr_or_testlist_list', 'testlist', 'as_name', 'period_or_ellipsis_list', 'comma_import_as_name_list', 'comma_dotted_as_name_list', 'comma_name_list', 'comma_test', 'elif_part_list', 'finally_part', 'varargslist', 'or_and_test_list', 'and_not_test_list', 'comp_op_expr_list', 'xor_and_expr_list', 'ampersand_shift_expr_list', 'shift_arith_expr_list', 'op_factor_list', 'trailer_list', 'testlist_comp', 'yield_expr_or_testlist_comp', 'dictorsetmaker', 'comma_subscript_list', 'test', 'sliceop', 'comp_iter', 'yield_arg', 'test_comma_list', 'macroarglist', 'any_raw_toks'] for rule in opt_rules: self._opt_rule(rule) list_rules = [ 'comma_tfpdef', 'comma_vfpdef', 'semi_small_stmt', 'comma_test_or_star_expr', 'period_or_ellipsis', 'comma_import_as_name', 'comma_dotted_as_name', 'period_name', 'comma_name', 'elif_part', 'except_part', 'comma_with_item', 'or_and_test', 'and_not_test', 'comp_op_expr', 'pipe_xor_expr', 'xor_and_expr', 'ampersand_shift_expr', 'shift_arith_expr', 'pm_term', 'op_factor', 'trailer', 'comma_subscript', 'comma_expr_or_star_expr', 'comma_test', 'comma_argument', 'comma_item', 'attr_period_name', 'test_comma', 'equals_yield_expr_or_testlist', 'comma_nocomma'] for rule in list_rules: self._list_rule(rule) tok_rules = ['def', 'class', 'return', 'number', 'name', 'bang', 'none', 'true', 'false', 'ellipsis', 'if', 'del', 'assert', 'lparen', 'lbrace', 'lbracket', 'string', 'times', 'plus', 'minus', 'divide', 'doublediv', 'mod', 'at', 'lshift', 'rshift', 'pipe', 'xor', 'ampersand', 'for', 'colon', 'import', 'except', 'nonlocal', 'global', 'yield', 'from', 'raise', 'with', 'dollar_lparen', 'dollar_lbrace', 'dollar_lbracket', 'try', 'bang_lparen', 'bang_lbracket', 'comma', 'rparen', 'rbracket', 'at_lparen', 'atdollar_lparen', 'indent', 'dedent', 'newline'] for rule in tok_rules: self._tok_rule(rule) yacc_kwargs = dict(module=self, debug=yacc_debug, start='start_symbols', optimize=yacc_optimize, tabmodule=yacc_table) if not yacc_debug: yacc_kwargs['errorlog'] = yacc.NullLogger() if outputdir is None: outputdir = os.path.dirname(os.path.dirname(__file__)) yacc_kwargs['outputdir'] = outputdir if yacc_debug: # create parser on main thread self.parser = yacc.yacc(**yacc_kwargs) else: self.parser = None YaccLoader(self, yacc_kwargs) # Keeps track of the last token given to yacc (the lookahead token) self._last_yielded_token = None def reset(self): """Resets for clean parsing.""" self.lexer.reset() self._last_yielded_token = None self._lines = None self.xonsh_code = None def parse(self, s, filename='', mode='exec', debug_level=0): """Returns an abstract syntax tree of xonsh code. Parameters ---------- s : str The xonsh code. filename : str, optional Name of the file. mode : str, optional Execution mode, one of: exec, eval, or single. debug_level : str, optional Debugging level passed down to yacc. Returns ------- tree : AST """ self.reset() self.xonsh_code = s self.lexer.fname = filename while self.parser is None: time.sleep(0.01) # block until the parser is ready tree = self.parser.parse(input=s, lexer=self.lexer, debug=debug_level) if tree is not None: check_contexts(tree) # hack for getting modes right if mode == 'single': if isinstance(tree, ast.Expression): tree = ast.Interactive(body=[self.expr(tree.body)]) elif isinstance(tree, ast.Module): tree = ast.Interactive(body=tree.body) return tree def _lexer_errfunc(self, msg, line, column): self._parse_error(msg, self.currloc(line, column)) def _yacc_lookahead_token(self): """Gets the next-to-last and last token seen by the lexer.""" return self.lexer.beforelast, self.lexer.last def _opt_rule(self, rulename): """For a rule name, creates an associated optional rule. '_opt' is appended to the rule name. """ def optfunc(self, p): p[0] = p[1] optfunc.__doc__ = ('{0}_opt : empty\n' ' | {0}').format(rulename) optfunc.__name__ = 'p_' + rulename + '_opt' setattr(self.__class__, optfunc.__name__, optfunc) def _list_rule(self, rulename): """For a rule name, creates an associated list rule. '_list' is appended to the rule name. """ def listfunc(self, p): p[0] = p[1] if len(p) == 2 else p[1] + p[2] listfunc.__doc__ = ('{0}_list : {0}\n' ' | {0}_list {0}').format(rulename) listfunc.__name__ = 'p_' + rulename + '_list' setattr(self.__class__, listfunc.__name__, listfunc) def _tok_rule(self, rulename): """For a rule name, creates a rule that returns the corresponding token. '_tok' is appended to the rule name. """ def tokfunc(self, p): s, t = self._yacc_lookahead_token() uprule = rulename.upper() if s is not None and s.type == uprule: p[0] = s elif t is not None and t.type == uprule: p[0] = t else: raise TypeError('token for {0!r} not found.'.format(rulename)) tokfunc.__doc__ = '{0}_tok : {1}'.format(rulename, rulename.upper()) tokfunc.__name__ = 'p_' + rulename + '_tok' setattr(self.__class__, tokfunc.__name__, tokfunc) def currloc(self, lineno, column=None): """Returns the current location.""" return Location(fname=self.lexer.fname, lineno=lineno, column=column) def expr(self, p): """Creates an expression for a token.""" expr = ast.Expr(value=p, lineno=p.lineno, col_offset=p.col_offset) expr.max_lineno = self.lineno expr.max_col = self.col return expr def token_col(self, t): """Gets ths token column""" return t.lexpos @property def lineno(self): if self.lexer.last is None: return 1 else: return self.lexer.last.lineno @property def col(self): s, t = self._yacc_lookahead_token() if t is not None: if t.type == 'NEWLINE': t = s return self.token_col(t) return 0 @property def lines(self): if self._lines is None and self.xonsh_code is not None: self._lines = self.xonsh_code.splitlines(keepends=True) return self._lines def source_slice(self, start, stop): """Gets the original source code from two (line, col) tuples in source-space (i.e. lineno start at 1). """ bline, bcol = start eline, ecol = stop bline -= 1 lines = self.lines[bline:eline] if ecol == 0: explen = eline - bline if explen == len(lines) and explen > 1: lines[-1] = '' else: lines[-1] = lines[-1][:ecol] lines[0] = lines[0][bcol:] return ''.join(lines) def _parse_error(self, msg, loc): if self.xonsh_code is None or loc is None: err_line_pointer = '' else: col = loc.column + 1 lines = self.lines if loc.lineno == 0: loc.lineno = len(lines) i = loc.lineno - 1 if 0 <= i < len(lines): err_line = lines[i].rstrip() err_line_pointer = '\n{}\n{: >{}}'.format(err_line, '^', col) else: err_line_pointer = '' err = SyntaxError('{0}: {1}{2}'.format(loc, msg, err_line_pointer)) err.loc = loc raise err # # Precedence of operators # precedence = (('left', 'PIPE'), ('left', 'XOR'), ('left', 'AMPERSAND'), ('left', 'EQ', 'NE'), ('left', 'GT', 'GE', 'LT', 'LE'), ('left', 'RSHIFT', 'LSHIFT'), ('left', 'PLUS', 'MINUS'), ('left', 'TIMES', 'DIVIDE', 'DOUBLEDIV', 'MOD'), ('left', 'POW'), ) # # Grammar as defined by BNF # def p_start_symbols(self, p): """start_symbols : single_input | file_input | eval_input | empty """ p[0] = p[1] def p_single_input(self, p): """single_input : compound_stmt NEWLINE """ p1 = empty_list_if_newline(p[1]) p0 = ast.Interactive(body=p1) p[0] = p0 def p_file_input(self, p): """file_input : file_stmts""" p[0] = ast.Module(body=p[1]) def p_file_stmts_nl(self, p): """file_stmts : newline_or_stmt""" # newline_or_stmt ENDMARKER p[0] = empty_list_if_newline(p[1]) def p_file_stmts_files(self, p): """file_stmts : file_stmts newline_or_stmt""" # file_input newline_or_stmt ENDMARKER p2 = empty_list_if_newline(p[2]) p[0] = p[1] + p2 def p_newline_or_stmt(self, p): """newline_or_stmt : NEWLINE | stmt """ p[0] = p[1] def p_newlines(self, p): """newlines : NEWLINE | newlines NEWLINE """ p[0] = p[1] if len(p) == 2 else p[1] + p[2] def p_eval_input(self, p): """eval_input : testlist newlines_opt """ p1 = p[1] p[0] = ast.Expression(body=p1, lineno=p1.lineno, col_offset=p1.col_offset) def p_func_call(self, p): """func_call : LPAREN arglist_opt RPAREN""" p[0] = p[2] def p_attr_period_name(self, p): """attr_period_name : PERIOD NAME""" p[0] = [p[2]] def p_attr_name_alone(self, p): """attr_name : name_tok""" p1 = p[1] p[0] = ast.Name(id=p1.value, ctx=ast.Load(), lineno=p1.lineno, col_offset=p1.lexpos) def p_attr_name_with(self, p): """attr_name : name_tok attr_period_name_list""" p1 = p[1] name = ast.Name(id=p1.value, ctx=ast.Load(), lineno=p1.lineno, col_offset=p1.lexpos) p2 = p[2] p0 = ast.Attribute(value=name, attr=p2[0], ctx=ast.Load(), lineno=p1.lineno, col_offset=p1.lexpos) for a in p2[1:]: p0 = ast.Attribute(value=p0, attr=a, ctx=ast.Load(), lineno=p0.lineno, col_offset=p0.col_offset) p[0] = p0 def p_decorator_no_call(self, p): """decorator : at_tok attr_name NEWLINE""" p[0] = p[2] def p_decorator_call(self, p): """decorator : at_tok attr_name func_call NEWLINE""" p1, name, p3 = p[1], p[2], p[3] if isinstance(name, ast.Attribute) or (p3 is not None): lineno, col = name.lineno, name.col_offset else: lineno, col = p1.lineno, p1.lexpos if p3 is None: p0 = ast.Call(func=name, args=[], keywords=[], starargs=None, kwargs=None, lineno=lineno, col_offset=col) else: p0 = ast.Call(func=name, lineno=lineno, col_offset=col, **p3) p[0] = p0 def p_decorators(self, p): """decorators : decorator | decorators decorator """ p[0] = [p[1]] if len(p) == 2 else p[1] + [p[2]] def p_decorated(self, p): """decorated : decorators classdef_or_funcdef""" p1, p2 = p[1], p[2] targ = p2[0] targ.decorator_list = p1 # this is silly, CPython. This claims a func or class starts on # the line of the first decorator, rather than the 'def' or 'class' # line. However, it retains the original col_offset. targ.lineno = p1[0].lineno # async functions take the col number of the 'def', unless they are # decorated, in which case they have the col of the 'async'. WAT? if hasattr(targ, '_async_tok'): targ.col_offset = targ._async_tok.lexpos del targ._async_tok p[0] = p2 def p_rarrow_test(self, p): """rarrow_test : RARROW test""" p[0] = p[2] def p_funcdef(self, p): """funcdef : def_tok NAME parameters rarrow_test_opt COLON suite""" f = ast.FunctionDef(name=p[2], args=p[3], returns=p[4], body=p[6], decorator_list=[], lineno=p[1].lineno, col_offset=p[1].lexpos) p[0] = [f] def p_parameters(self, p): """parameters : LPAREN typedargslist_opt RPAREN""" p2 = p[2] if p2 is None: p2 = ast.arguments(args=[], vararg=None, kwonlyargs=[], kw_defaults=[], kwarg=None, defaults=[]) p[0] = p2 def p_equals_test(self, p): """equals_test : EQUALS test""" p[0] = p[2] def p_typedargslist_kwarg(self, p): """typedargslist : POW tfpdef""" p[0] = ast.arguments(args=[], vararg=None, kwonlyargs=[], kw_defaults=[], kwarg=p[2], defaults=[]) def p_typedargslist_times4(self, p): """typedargslist : TIMES tfpdef_opt comma_pow_tfpdef_opt""" p0 = ast.arguments(args=[], vararg=None, kwonlyargs=[], kw_defaults=[], kwarg=p[3], defaults=[]) self._set_var_args(p0, p[2], None) p[0] = p0 def p_typedargslist_times5(self, p): """typedargslist : TIMES tfpdef_opt comma_tfpdef_list comma_pow_tfpdef_opt""" # *args, x, **kwargs p0 = ast.arguments(args=[], vararg=None, kwonlyargs=[], kw_defaults=[], kwarg=p[4], defaults=[]) self._set_var_args(p0, p[2], p[3]) # *args p[0] = p0 def p_typedargslist_t5(self, p): """typedargslist : tfpdef equals_test_opt comma_tfpdef_list_opt comma_opt""" # x p0 = ast.arguments(args=[], vararg=None, kwonlyargs=[], kw_defaults=[], kwarg=None, defaults=[]) self._set_regular_args(p0, p[1], p[2], p[3], p[4]) p[0] = p0 def p_typedargslist_t7(self, p): """typedargslist : tfpdef equals_test_opt comma_tfpdef_list_opt comma_opt POW tfpdef""" # x, **kwargs p0 = ast.arguments(args=[], vararg=None, kwonlyargs=[], kw_defaults=[], kwarg=p[6], defaults=[]) self._set_regular_args(p0, p[1], p[2], p[3], p[4]) p[0] = p0 def p_typedargslist_t8(self, p): """typedargslist : tfpdef equals_test_opt comma_tfpdef_list_opt comma_opt TIMES tfpdef_opt comma_tfpdef_list_opt""" p0 = ast.arguments(args=[], vararg=None, kwonlyargs=[], kw_defaults=[], kwarg=None, defaults=[]) self._set_regular_args(p0, p[1], p[2], p[3], p[4]) self._set_var_args(p0, p[6], p[7]) p[0] = p0 def p_typedargslist_t10(self, p): """typedargslist : tfpdef equals_test_opt comma_tfpdef_list_opt comma_opt TIMES tfpdef_opt COMMA POW vfpdef""" # x, *args, **kwargs p0 = ast.arguments(args=[], vararg=None, kwonlyargs=[], kw_defaults=[], kwarg=p[9], defaults=[]) self._set_regular_args(p0, p[1], p[2], p[3], p[4]) self._set_var_args(p0, p[6], None) p[0] = p0 def p_typedargslist_t11(self, p): """typedargslist : tfpdef equals_test_opt comma_tfpdef_list_opt comma_opt TIMES tfpdef_opt comma_tfpdef_list COMMA POW tfpdef""" # x, *args, **kwargs p0 = ast.arguments(args=[], vararg=None, kwonlyargs=[], kw_defaults=[], kwarg=p[10], defaults=[]) self._set_regular_args(p0, p[1], p[2], p[3], p[4]) self._set_var_args(p0, p[6], p[7]) p[0] = p0 def p_colon_test(self, p): """colon_test : COLON test""" p[0] = p[2] def p_tfpdef(self, p): """tfpdef : name_tok colon_test_opt""" p1 = p[1] kwargs = {'arg': p1.value, 'annotation': p[2]} if PYTHON_VERSION_INFO >= (3, 5, 1): kwargs.update({ 'lineno': p1.lineno, 'col_offset': p1.lexpos, }) p[0] = ast.arg(**kwargs) def p_comma_tfpdef_empty(self, p): """comma_tfpdef : COMMA""" p[0] = [] def p_comma_tfpdef_args(self, p): """comma_tfpdef : COMMA tfpdef equals_test_opt""" p[0] = [{'arg': p[2], 'default': p[3]}] def p_comma_pow_tfpdef(self, p): """comma_pow_tfpdef : COMMA POW tfpdef""" p[0] = p[3] def _set_args_def(self, argmts, vals, kwargs=False): args, defs = (argmts.kwonlyargs, argmts.kw_defaults) if kwargs else \ (argmts.args, argmts.defaults) for v in vals: args.append(v['arg']) d = v['default'] if kwargs or (d is not None): defs.append(d) def _set_regular_args(self, p0, p1, p2, p3, p4): if p2 is None and p3 is None: # x p0.args.append(p1) elif p2 is not None and p3 is None: # x=42 p0.args.append(p1) p0.defaults.append(p2) elif p2 is None and p3 is not None: # x, y and x, y=42 p0.args.append(p1) self._set_args_def(p0, p3) else: # x=42, y=42 p0.args.append(p1) p0.defaults.append(p2) self._set_args_def(p0, p3) def _set_var_args(self, p0, vararg, kwargs): if vararg is None: self._set_args_def(p0, kwargs, kwargs=True) elif vararg is not None and kwargs is None: # *args p0.vararg = vararg elif vararg is not None and kwargs is not None: # *args, x and *args, x, y and *args, x=10 and *args, x=10, y # and *args, x, y=10, and *args, x=42, y=65 p0.vararg = vararg self._set_args_def(p0, kwargs, kwargs=True) else: assert False def p_varargslist_kwargs(self, p): """varargslist : POW vfpdef""" p[0] = ast.arguments(args=[], vararg=None, kwonlyargs=[], kw_defaults=[], kwarg=p[2], defaults=[]) def p_varargslist_times4(self, p): """varargslist : TIMES vfpdef_opt comma_pow_vfpdef_opt""" p0 = ast.arguments(args=[], vararg=None, kwonlyargs=[], kw_defaults=[], kwarg=p[3], defaults=[]) self._set_var_args(p0, p[2], None) p[0] = p0 def p_varargslist_times5(self, p): """varargslist : TIMES vfpdef_opt comma_vfpdef_list comma_pow_vfpdef_opt""" # *args, x, **kwargs p0 = ast.arguments(args=[], vararg=None, kwonlyargs=[], kw_defaults=[], kwarg=p[4], defaults=[]) self._set_var_args(p0, p[2], p[3]) # *args p[0] = p0 def p_varargslist_v5(self, p): """varargslist : vfpdef equals_test_opt comma_vfpdef_list_opt comma_opt""" # x p0 = ast.arguments(args=[], vararg=None, kwonlyargs=[], kw_defaults=[], kwarg=None, defaults=[]) self._set_regular_args(p0, p[1], p[2], p[3], p[4]) p[0] = p0 def p_varargslist_v7(self, p): """varargslist : vfpdef equals_test_opt comma_vfpdef_list_opt comma_opt POW vfpdef""" # x, **kwargs p0 = ast.arguments(args=[], vararg=None, kwonlyargs=[], kw_defaults=[], kwarg=p[6], defaults=[]) self._set_regular_args(p0, p[1], p[2], p[3], p[4]) p[0] = p0 def p_varargslist_v8(self, p): """varargslist : vfpdef equals_test_opt comma_vfpdef_list_opt comma_opt TIMES vfpdef_opt comma_vfpdef_list_opt""" # x, *args p0 = ast.arguments(args=[], vararg=None, kwonlyargs=[], kw_defaults=[], kwarg=None, defaults=[]) self._set_regular_args(p0, p[1], p[2], p[3], p[4]) self._set_var_args(p0, p[6], p[7]) p[0] = p0 def p_varargslist_v10(self, p): """varargslist : vfpdef equals_test_opt comma_vfpdef_list_opt comma_opt TIMES vfpdef_opt COMMA POW vfpdef""" # x, *args, **kwargs p0 = ast.arguments(args=[], vararg=None, kwonlyargs=[], kw_defaults=[], kwarg=p[9], defaults=[]) self._set_regular_args(p0, p[1], p[2], p[3], p[4]) self._set_var_args(p0, p[6], None) p[0] = p0 def p_varargslist_v11(self, p): """varargslist : vfpdef equals_test_opt comma_vfpdef_list_opt comma_opt TIMES vfpdef_opt comma_vfpdef_list COMMA POW vfpdef""" p0 = ast.arguments(args=[], vararg=None, kwonlyargs=[], kw_defaults=[], kwarg=p[10], defaults=[]) self._set_regular_args(p0, p[1], p[2], p[3], p[4]) self._set_var_args(p0, p[6], p[7]) p[0] = p0 def p_vfpdef(self, p): """vfpdef : name_tok""" p1 = p[1] kwargs = {'arg': p1.value, 'annotation': None} if PYTHON_VERSION_INFO >= (3, 5, 1): kwargs.update({ 'lineno': p1.lineno, 'col_offset': p1.lexpos, }) p[0] = ast.arg(**kwargs) def p_comma_vfpdef_empty(self, p): """comma_vfpdef : COMMA""" p[0] = [] def p_comma_vfpdef_value(self, p): """comma_vfpdef : COMMA vfpdef equals_test_opt""" p[0] = [{'arg': p[2], 'default': p[3]}] def p_comma_pow_vfpdef(self, p): """comma_pow_vfpdef : COMMA POW vfpdef""" p[0] = p[3] def p_stmt(self, p): """stmt : simple_stmt | compound_stmt """ p[0] = p[1] def p_stmt_list(self, p): """stmt_list : stmt | stmt_list stmt """ if len(p) == 2: p[0] = p[1] else: p[0] = p[1] + p[2] def p_semi_opt(self, p): """semi_opt : SEMI | empty """ if len(p) == 2: p[0] = p[1] def p_semi_small_stmt(self, p): """semi_small_stmt : SEMI small_stmt""" p[0] = [p[2]] def p_simple_stmt_single(self, p): """simple_stmt : small_stmt semi_opt NEWLINE""" p[0] = [p[1]] def p_simple_stmt_many(self, p): """simple_stmt : small_stmt semi_small_stmt_list semi_opt NEWLINE""" p[0] = [p[1]] + p[2] def p_small_stmt(self, p): """small_stmt : expr_stmt | del_stmt | pass_stmt | flow_stmt | import_stmt | global_stmt | nonlocal_stmt | assert_stmt """ p[0] = p[1] _augassign_op = { '+=': ast.Add, '-=': ast.Sub, '*=': ast.Mult, '@=': ast.MatMult, '/=': ast.Div, '%=': ast.Mod, '//=': ast.FloorDiv, '**=': ast.Pow, '^=': ast.BitXor, '&=': ast.BitAnd, '|=': ast.BitOr, '<<=': ast.LShift, '>>=': ast.RShift } def p_expr_stmt_testlist_assign(self, p): """expr_stmt : testlist_star_expr equals_yield_expr_or_testlist_list_opt | testlist equals_yield_expr_or_testlist_list_opt """ p1, p2 = p[1], p[2] if isinstance(p1, ast.Tuple): p1 = [p1] if p2 is None and len(p1) == 1: p[0] = self.expr(p1[0]) elif p2 is None: assert False else: for targ in p1: store_ctx(targ) list(map(store_ctx, p2[:-1])) lineno, col = lopen_loc(p1[0]) p[0] = ast.Assign(targets=p1 + p2[:-1], value=p2[-1], lineno=lineno, col_offset=col) def p_expr_stmt_augassign(self, p): """expr_stmt : testlist_star_expr augassign yield_expr_or_testlist""" p1, p2 = p[1], p[2] if not isinstance(p1, ast.Tuple): p1 = p1[0] store_ctx(p1) op = self._augassign_op[p2] if op is None: self._parse_error('operation {0!r} not supported'.format(p2), self.currloc(lineno=p.lineno, column=p.lexpos)) p[0] = ast.AugAssign(target=p1, op=op(), value=p[3], lineno=p1.lineno, col_offset=p1.col_offset) def store_star_expr(self, p1, p2, targs, rhs): """Stores complex unpacking statements that target *x variables.""" p1 = [] if p1 is None else p1 if isinstance(p1, ast.Tuple): p1 = [p1] for targ in p1: store_ctx(targ) store_ctx(p2) for targ in targs: store_ctx(targ) p1.append(p2) p1.extend(targs) p1 = [ast.Tuple(elts=p1, ctx=ast.Store(), lineno=p1[0].lineno, col_offset=p1[0].col_offset)] p0 = ast.Assign(targets=p1, value=rhs, lineno=p1[0].lineno, col_offset=p1[0].col_offset) return p0 def p_expr_stmt_star5(self, p): """expr_stmt : test_comma_list_opt star_expr comma_test_list equals_yield_expr_or_testlist""" targs, rhs = p[3], p[4][0] p[0] = self.store_star_expr(p[1], p[2], targs, rhs) def p_expr_stmt_star6(self, p): """expr_stmt : test_comma_list_opt star_expr comma_opt test_comma_list_opt equals_yield_expr_or_testlist""" targs, rhs = (p[4] or []), p[5][0] p[0] = self.store_star_expr(p[1], p[2], targs, rhs) def p_test_comma(self, p): """test_comma : test COMMA""" p[0] = [p[1]] def p_comma_opt(self, p): """comma_opt : COMMA | empty """ if len(p) == 2: p[0] = p[1] def p_test_or_star_expr(self, p): """test_or_star_expr : test | star_expr """ p[0] = p[1] def p_comma_test_or_star_expr(self, p): """comma_test_or_star_expr : COMMA test_or_star_expr""" p[0] = [p[2]] def p_testlist_star_expr(self, p): """testlist_star_expr : test_or_star_expr comma_test_or_star_expr_list comma_opt | test_or_star_expr comma_opt """ p1, p2 = p[1], p[2] if p2 is None: p0 = [p1] elif p2 == ',': p0 = [ast.Tuple(elts=[p1], ctx=ast.Load(), lineno=p1.lineno, col_offset=p1.col_offset)] else: p0 = [ast.Tuple(elts=[p1] + p2, ctx=ast.Load(), lineno=p1.lineno, col_offset=p1.col_offset)] p[0] = p0 def p_augassign(self, p): """augassign : PLUSEQUAL | MINUSEQUAL | TIMESEQUAL | ATEQUAL | DIVEQUAL | MODEQUAL | AMPERSANDEQUAL | PIPEEQUAL | XOREQUAL | LSHIFTEQUAL | RSHIFTEQUAL | POWEQUAL | DOUBLEDIVEQUAL """ p[0] = p[1] def p_yield_expr_or_testlist(self, p): """yield_expr_or_testlist : yield_expr | testlist """ p[0] = p[1] def p_equals_yield_expr_or_testlist(self, p): """equals_yield_expr_or_testlist : EQUALS yield_expr_or_testlist""" p[0] = [p[2]] # # For normal assignments, additional restrictions enforced # by the interpreter # def p_del_stmt(self, p): """del_stmt : del_tok exprlist""" p1 = p[1] p2 = p[2] for targ in p2: del_ctx(targ) p0 = ast.Delete(targets=p2, ctx=ast.Del(), lineno=p1.lineno, col_offset=p1.lexpos) p[0] = p0 def p_pass_stmt(self, p): """pass_stmt : PASS""" p[0] = ast.Pass(lineno=self.lineno, col_offset=self.col) def p_flow_stmt(self, p): """flow_stmt : break_stmt | continue_stmt | return_stmt | raise_stmt | yield_stmt """ p[0] = p[1] def p_break_stmt(self, p): """break_stmt : BREAK""" p[0] = ast.Break(lineno=self.lineno, col_offset=self.col) def p_continue_stmt(self, p): """continue_stmt : CONTINUE""" p[0] = ast.Continue(lineno=self.lineno, col_offset=self.col) def p_return_stmt(self, p): """return_stmt : return_tok testlist_opt""" p1 = p[1] p[0] = ast.Return(value=p[2], lineno=p1.lineno, col_offset=p1.lexpos) def p_yield_stmt(self, p): """yield_stmt : yield_expr""" p[0] = self.expr(p[1]) def p_raise_stmt_r1(self, p): """raise_stmt : raise_tok""" p1 = p[1] p[0] = ast.Raise(exc=None, cause=None, lineno=p1.lineno, col_offset=p1.lexpos) def p_raise_stmt_r2(self, p): """raise_stmt : raise_tok test""" p1 = p[1] p[0] = ast.Raise(exc=p[2], cause=None, lineno=p1.lineno, col_offset=p1.lexpos) def p_raise_stmt_r3(self, p): """raise_stmt : raise_tok test FROM test""" p1 = p[1] p[0] = ast.Raise(exc=p[2], cause=p[4], lineno=p1.lineno, col_offset=p1.lexpos) def p_import_stmt(self, p): """import_stmt : import_name | import_from """ p[0] = p[1] def p_import_name(self, p): """import_name : import_tok dotted_as_names """ p1 = p[1] p[0] = ast.Import(names=p[2], lineno=p1.lineno, col_offset=p1.lexpos) def p_import_from_pre_f3(self, p): """import_from_pre : from_tok period_or_ellipsis_list""" p1 = p[1] p[0] = (p[2], p1.lineno, p1.lexpos) def p_import_from_pre_f4(self, p): """import_from_pre : from_tok period_or_ellipsis_list_opt dotted_name""" p1, p2, p3 = p[1], p[2], p[3] p0 = p3 if p2 is None else p2 + p3 p[0] = (p0, p1.lineno, p1.lexpos) def p_import_from_post_times(self, p): """import_from_post : TIMES""" p[0] = [ast.alias(name='*', asname=None)] def p_import_from_post_as(self, p): """import_from_post : import_as_names""" p[0] = p[1] def p_import_from_post_paren(self, p): """import_from_post : LPAREN import_as_names RPAREN""" p[0] = p[2] def p_import_from(self, p): """import_from : import_from_pre IMPORT import_from_post""" # note below: the ('.' | '...') is necessary because '...' is # tokenized as ELLIPSIS p1, lineno, col = p[1] mod = p1.lstrip('.') lvl = len(p1) - len(mod) mod = mod or None p[0] = ast.ImportFrom(module=mod, names=p[3], level=lvl, lineno=lineno, col_offset=col) def p_period_or_ellipsis(self, p): """period_or_ellipsis : PERIOD | ELLIPSIS """ p[0] = p[1] def p_as_name(self, p): """as_name : AS NAME""" p[0] = p[2] def p_import_as_name(self, p): """import_as_name : NAME as_name_opt""" p[0] = ast.alias(name=p[1], asname=p[2]) def p_comma_import_as_name(self, p): """comma_import_as_name : COMMA import_as_name """ p[0] = [p[2]] def p_dotted_as_name(self, p): """dotted_as_name : dotted_name as_name_opt""" p0 = ast.alias(name=p[1], asname=p[2]) p[0] = p0 def p_comma_dotted_as_name(self, p): """comma_dotted_as_name : COMMA dotted_as_name""" p[0] = [p[2]] def p_import_as_names(self, p): """import_as_names : import_as_name comma_import_as_name_list_opt comma_opt """ p1, p2 = p[1], p[2] p0 = [p1] if p2 is not None: p0.extend(p2) p[0] = p0 def p_dotted_as_names(self, p): """dotted_as_names : dotted_as_name comma_dotted_as_name_list_opt""" p1, p2 = p[1], p[2] p0 = [p1] if p2 is not None: p0.extend(p2) p[0] = p0 def p_period_name(self, p): """period_name : PERIOD NAME""" p[0] = p[1] + p[2] def p_dotted_name(self, p): """dotted_name : NAME | NAME period_name_list """ p[0] = p[1] if len(p) == 2 else p[1] + p[2] def p_comma_name(self, p): """comma_name : COMMA NAME""" p[0] = [p[2]] def p_global_stmt(self, p): """global_stmt : global_tok NAME comma_name_list_opt""" p1, p2, p3 = p[1], p[2], p[3] names = [p2] if p3 is not None: names += p3 p[0] = ast.Global(names=names, lineno=p1.lineno, col_offset=p1.lexpos) def p_nonlocal_stmt(self, p): """nonlocal_stmt : nonlocal_tok NAME comma_name_list_opt""" p1, p2, p3 = p[1], p[2], p[3] names = [p2] if p3 is not None: names += p3 p[0] = ast.Nonlocal(names=names, lineno=p1.lineno, col_offset=p1.lexpos) def p_comma_test(self, p): """comma_test : COMMA test""" p[0] = [p[2]] def p_assert_stmt(self, p): """assert_stmt : assert_tok test comma_test_opt""" p1, p2, p3 = p[1], p[2], p[3] if p3 is not None: if len(p3) != 1: assert False p3 = p3[0] p[0] = ast.Assert(test=p2, msg=p3, lineno=p1.lineno, col_offset=p1.lexpos) def p_compound_stmt(self, p): """compound_stmt : if_stmt | while_stmt | for_stmt | try_stmt | with_stmt | funcdef | classdef | decorated """ p[0] = p[1] def p_elif_part(self, p): """elif_part : ELIF test COLON suite""" p2 = p[2] p[0] = [ast.If(test=p2, body=p[4], orelse=[], lineno=p2.lineno, col_offset=p2.col_offset)] def p_else_part(self, p): """else_part : ELSE COLON suite""" p[0] = p[3] def p_if_stmt(self, p): """if_stmt : if_tok test COLON suite elif_part_list_opt | if_tok test COLON suite elif_part_list_opt else_part """ p1 = p[1] lastif = ast.If(test=p[2], body=p[4], orelse=[], lineno=p1.lineno, col_offset=p1.lexpos) p0 = [lastif] p5 = p[5] p6 = p[6] if len(p) > 6 else [] if p5 is not None: for elseif in p5: lastif.orelse.append(elseif) lastif = elseif lastif.orelse = p6 p[0] = p0 def p_while_stmt(self, p): """while_stmt : WHILE test COLON suite | WHILE test COLON suite else_part """ p5 = p[5] if len(p) > 5 else [] p[0] = [ast.While(test=p[2], body=p[4], orelse=p5, lineno=self.lineno, col_offset=self.col)] def p_for_stmt(self, p): """for_stmt : for_tok exprlist IN testlist COLON suite | for_tok exprlist IN testlist COLON suite else_part """ p1, p2 = p[1], p[2] p7 = p[7] if len(p) > 7 else [] if len(p2) == 1: p2 = p2[0] store_ctx(p2) else: for x in p2: store_ctx(x) p2 = ast.Tuple(elts=p2, ctx=ast.Store(), lineno=p2[0].lineno, col_offset=p2[0].col_offset) p[0] = [ast.For(target=p2, iter=p[4], body=p[6], orelse=p7, lineno=p1.lineno, col_offset=p1.lexpos)] def p_except_part(self, p): """except_part : except_clause COLON suite""" p0 = p[1] p0.body = p[3] p[0] = [p0] def p_finally_part(self, p): """finally_part : FINALLY COLON suite""" p[0] = p[3] def p_try_stmt_t5(self, p): """try_stmt : try_tok COLON suite finally_part""" p1 = p[1] p[0] = [ast.Try(body=p[3], handlers=[], orelse=[], finalbody=p[4], lineno=p1.lineno, col_offset=p1.lexpos)] def p_try_stmt_t6(self, p): """try_stmt : try_tok COLON suite except_part_list finally_part_opt""" p1 = p[1] p[0] = [ast.Try(body=p[3], handlers=p[4], orelse=[], finalbody=([] if p[5] is None else p[5]), lineno=p1.lineno, col_offset=p1.lexpos)] def p_try_stmt_t7(self, p): """try_stmt : try_tok COLON suite except_part_list else_part finally_part_opt""" p1 = p[1] p[0] = [ast.Try(body=p[3], handlers=p[4], orelse=([] if p[5] is None else p[5]), finalbody=([] if p[6] is None else p[6]), lineno=p1.lineno, col_offset=p1.lexpos)] def p_with_stmt_w5(self, p): """with_stmt : with_tok with_item COLON suite""" p1 = p[1] p[0] = [ast.With(items=[p[2]], body=p[4], lineno=p1.lineno, col_offset=p1.lexpos)] def p_with_stmt_p6(self, p): """with_stmt : with_tok with_item comma_with_item_list COLON suite""" p1 = p[1] p[0] = [ast.With(items=[p[2]] + p[3], body=p[5], lineno=p1.lineno, col_offset=p1.lexpos)] def p_with_bang_stmt_single_suite(self, p): """with_stmt : with_tok BANG with_item rawsuite""" p1, p3, p4 = p[1], p[3], p[4] expr = p3.context_expr l, c = expr.lineno, expr.col_offset gblcall = xonsh_call('globals', [], lineno=l, col=c) loccall = xonsh_call('locals', [], lineno=l, col=c) margs = [expr, p4, gblcall, loccall] p3.context_expr = xonsh_call('__xonsh_enter_macro__', margs, lineno=l, col=c) body = [ast.Pass(lineno=p4.lineno, col_offset=p4.col_offset)] p[0] = [ast.With(items=[p3], body=body, lineno=p1.lineno, col_offset=p1.lexpos)] def p_with_bang_stmt_many_suite(self, p): """with_stmt : with_tok BANG with_item comma_with_item_list rawsuite""" p1, p3, p4, p5 = p[1], p[3], p[4], p[5] items = [p3] + p4 for item in items: expr = item.context_expr l, c = expr.lineno, expr.col_offset gblcall = xonsh_call('globals', [], lineno=l, col=c) loccall = xonsh_call('locals', [], lineno=l, col=c) margs = [expr, p5, gblcall, loccall] item.context_expr = xonsh_call('__xonsh_enter_macro__', margs, lineno=l, col=c) body = [ast.Pass(lineno=p5.lineno, col_offset=p5.col_offset)] p[0] = [ast.With(items=items, body=body, lineno=p1.lineno, col_offset=p1.lexpos)] def p_as_expr(self, p): """as_expr : AS expr""" p2 = p[2] store_ctx(p2) p[0] = p2 def p_with_item(self, p): """with_item : test | test as_expr """ p2 = p[2] if len(p) > 2 else None p[0] = ast.withitem(context_expr=p[1], optional_vars=p2) def p_comma_with_item(self, p): """comma_with_item : COMMA with_item""" p[0] = [p[2]] def p_except_clause_e2(self, p): """except_clause : except_tok""" p1 = p[1] p[0] = ast.ExceptHandler(type=None, name=None, lineno=p1.lineno, col_offset=p1.lexpos) def p_except_clause(self, p): """except_clause : except_tok test as_name_opt""" p1 = p[1] p[0] = ast.ExceptHandler(type=p[2], name=p[3], lineno=p1.lineno, col_offset=p1.lexpos) def p_suite(self, p): """suite : simple_stmt | NEWLINE INDENT stmt_list DEDENT """ p[0] = p[1] if len(p) == 2 else p[3] def p_rawsuite_indent(self, p): """rawsuite : COLON NEWLINE indent_tok nodedent dedent_tok""" p3, p5 = p[3], p[5] beg = (p3.lineno, p3.lexpos) end = (p5.lineno, p5.lexpos) s = self.source_slice(beg, end) s = textwrap.dedent(s) p[0] = ast.Str(s=s, lineno=beg[0], col_offset=beg[1]) def p_rawsuite_simple_stmt(self, p): """rawsuite : colon_tok nonewline newline_tok""" p1, p3 = p[1], p[3] beg = (p1.lineno, p1.lexpos + 1) end = (p3.lineno, p3.lexpos) s = self.source_slice(beg, end).strip() p[0] = ast.Str(s=s, lineno=beg[0], col_offset=beg[1]) def _attach_nodedent_base_rules(self): toks = set(self.tokens) toks.remove('DEDENT') ts = '\n | '.join(sorted(toks)) doc = 'nodedent : ' + ts + '\n' self.p_nodedent_base.__func__.__doc__ = doc def p_nodedent_base(self, p): # see above attachment function pass def p_nodedent_any(self, p): """nodedent : INDENT any_dedent_toks DEDENT""" pass def p_nodedent_many(self, p): """nodedent : nodedent nodedent""" pass def p_any_dedent_tok(self, p): """any_dedent_tok : nodedent | DEDENT """ pass def p_any_dedent_toks(self, p): """any_dedent_toks : any_dedent_tok | any_dedent_toks any_dedent_tok """ pass def _attach_nonewline_base_rules(self): toks = set(self.tokens) toks -= {'NEWLINE', 'LPAREN', 'RPAREN', 'LBRACE', 'RBRACE', 'LBRACKET', 'RBRACKET', 'AT_LPAREN', 'BANG_LPAREN', 'BANG_LBRACKET', 'DOLLAR_LPAREN', 'DOLLAR_LBRACE', 'DOLLAR_LBRACKET', 'ATDOLLAR_LPAREN'} ts = '\n | '.join(sorted(toks)) doc = 'nonewline : ' + ts + '\n' self.p_nonewline_base.__func__.__doc__ = doc def p_nonewline_base(self, p): # see above attachment function pass def p_nonewline_any(self, p): """nonewline : any_nested_raw""" pass def p_nonewline_many(self, p): """nonewline : nonewline nonewline""" pass def p_test_ol(self, p): """test : or_test | lambdef """ p[0] = p[1] def p_test_o5(self, p): """test : or_test IF or_test ELSE test""" p[0] = ast.IfExp(test=p[3], body=p[1], orelse=p[5], lineno=self.lineno, col_offset=self.col) def p_test_nocond(self, p): """test_nocond : or_test | lambdef_nocond """ p[0] = p[1] def p_lambdef(self, p): """lambdef : LAMBDA varargslist_opt COLON test""" p2, p4 = p[2], p[4] if p2 is None: args = ast.arguments(args=[], vararg=None, kwonlyargs=[], kw_defaults=[], kwarg=None, defaults=[]) else: args = p2 p0 = ast.Lambda(args=args, body=p4, lineno=self.lineno, col_offset=self.col) p[0] = p0 def p_lambdef_nocond(self, p): """lambdef_nocond : LAMBDA varargslist_opt COLON test_nocond""" assert False def p_or_test(self, p): """or_test : and_test or_and_test_list_opt""" p1, p2 = p[1], p[2] if p2 is None: p0 = p1 elif len(p2) == 2: lineno, col = lopen_loc(p1) p0 = ast.BoolOp(op=p2[0], values=[p1, p2[1]], lineno=lineno, col_offset=col) else: lineno, col = lopen_loc(p1) p0 = ast.BoolOp(op=p2[0], values=[p[1]] + p2[1::2], lineno=lineno, col_offset=col) p[0] = p0 def p_or_and_test(self, p): """or_and_test : OR and_test""" p[0] = [ast.Or(), p[2]] def p_and_test(self, p): """and_test : not_test and_not_test_list_opt""" p1, p2 = p[1], p[2] if p2 is None: p0 = p1 elif len(p2) == 2: lineno, col = lopen_loc(p1) p0 = ast.BoolOp(op=p2[0], values=[p1, p2[1]], lineno=lineno, col_offset=col) else: lineno, col = lopen_loc(p1) p0 = ast.BoolOp(op=p2[0], values=[p1] + p2[1::2], lineno=lineno, col_offset=col) p[0] = p0 def p_and_not_test(self, p): """and_not_test : AND not_test""" p[0] = [ast.And(), p[2]] def p_not_test_not(self, p): """not_test : NOT not_test""" p[0] = ast.UnaryOp(op=ast.Not(), operand=p[2], lineno=self.lineno, col_offset=self.col) def p_not_test(self, p): """not_test : comparison""" p[0] = p[1] def p_comparison(self, p): """comparison : expr comp_op_expr_list_opt""" p1, p2 = p[1], p[2] if p2 is None: p0 = p1 else: p0 = ast.Compare(left=p1, ops=p2[::2], comparators=p2[1::2], lineno=p1.lineno, col_offset=p1.col_offset) p[0] = p0 def p_comp_op_expr(self, p): """comp_op_expr : comp_op expr""" p[0] = [p[1], p[2]] _comp_ops = { '<': ast.Lt, '>': ast.Gt, '==': ast.Eq, '>=': ast.GtE, '<=': ast.LtE, '!=': ast.NotEq, 'in': ast.In, ('not', 'in'): ast.NotIn, 'is': ast.Is, ('is', 'not'): ast.IsNot } def p_comp_op_monograph(self, p): """comp_op : LT | GT | EQ | GE | LE | NE | IN | IS """ p[0] = self._comp_ops[p[1]]() def p_comp_op_digraph(self, p): """comp_op : NOT IN | IS NOT """ p[0] = self._comp_ops[(p[1], p[2])]() def p_star_expr(self, p): """star_expr : times_tok expr""" p1 = p[1] p[0] = ast.Starred(value=p[2], ctx=ast.Load(), lineno=p1.lineno, col_offset=p1.lexpos) def _binop_combine(self, p1, p2): """Combines binary operations""" if p2 is None: p0 = p1 elif isinstance(p2, ast.BinOp): p2.left = p1 p0 = p2 elif isinstance(p2, Sequence) and isinstance(p2[0], ast.BinOp): p0 = p2[0] p0.left = p1 p0.lineno, p0.col_offset = lopen_loc(p1) for bop in p2[1:]: locer = p1 if p0.left is p1 else p0 bop.left = p0 p0.lineno, p0.col_offset = lopen_loc(locer) p0 = bop else: p0 = p1 + p2 return p0 def p_expr(self, p): """expr : xor_expr | xor_expr pipe_xor_expr_list """ p[0] = self._binop_combine(p[1], p[2] if len(p) > 2 else None) def p_pipe_xor_expr(self, p): """pipe_xor_expr : pipe_tok xor_expr""" p1 = p[1] p[0] = [ast.BinOp(left=None, op=ast.BitOr(), right=p[2], lineno=p1.lineno, col_offset=p1.lexpos)] def p_xor_expr(self, p): """xor_expr : and_expr xor_and_expr_list_opt""" p[0] = self._binop_combine(p[1], p[2]) def p_xor_and_expr(self, p): """xor_and_expr : xor_tok and_expr""" p1 = p[1] p[0] = [ast.BinOp(left=None, op=ast.BitXor(), right=p[2], lineno=p1.lineno, col_offset=p1.lexpos)] def p_and_expr(self, p): """and_expr : shift_expr ampersand_shift_expr_list_opt""" p[0] = self._binop_combine(p[1], p[2]) def p_ampersand_shift_expr(self, p): """ampersand_shift_expr : ampersand_tok shift_expr""" p1 = p[1] p[0] = [ast.BinOp(left=None, op=ast.BitAnd(), right=p[2], lineno=p1.lineno, col_offset=p1.lexpos)] def p_shift_expr(self, p): """shift_expr : arith_expr shift_arith_expr_list_opt""" p[0] = self._binop_combine(p[1], p[2]) def p_shift_arith_expr(self, p): """shift_arith_expr : lshift_tok arith_expr | rshift_tok arith_expr """ p1 = p[1] op = ast.LShift() if p1.value == '<<' else ast.RShift() p[0] = [ast.BinOp(left=None, op=op, right=p[2], lineno=p1.lineno, col_offset=p1.lexpos)] def p_arith_expr_single(self, p): """arith_expr : term""" p[0] = p[1] def p_arith_expr_many(self, p): """arith_expr : term pm_term_list""" p1, p2 = p[1], p[2] if len(p2) == 2: lineno, col = lopen_loc(p1) p0 = ast.BinOp(left=p1, op=p2[0], right=p2[1], lineno=lineno, col_offset=col) else: left = p1 for op, right in zip(p2[::2], p2[1::2]): locer = left if left is p1 else op lineno, col = lopen_loc(locer) left = ast.BinOp(left=left, op=op, right=right, lineno=lineno, col_offset=col) p0 = left p[0] = p0 _term_binops = { '+': ast.Add, '-': ast.Sub, '*': ast.Mult, '@': ast.MatMult, '/': ast.Div, '%': ast.Mod, '//': ast.FloorDiv } def p_pm_term(self, p): """pm_term : plus_tok term | minus_tok term """ p1 = p[1] op = self._term_binops[p1.value](lineno=p1.lineno, col_offset=p1.lexpos) p[0] = [op, p[2]] def p_term(self, p): """term : factor op_factor_list_opt""" p1, p2 = p[1], p[2] if p2 is None: p0 = p1 elif len(p2) == 2: lineno, col = lopen_loc(p1) p0 = ast.BinOp(left=p1, op=p2[0], right=p2[1], lineno=lineno, col_offset=col) else: left = p1 for op, right in zip(p2[::2], p2[1::2]): locer = left if left is p1 else op lineno, col = lopen_loc(locer) left = ast.BinOp(left=left, op=op, right=right, lineno=lineno, col_offset=col) p0 = left p[0] = p0 def p_op_factor(self, p): """op_factor : times_tok factor | at_tok factor | divide_tok factor | mod_tok factor | doublediv_tok factor """ p1 = p[1] op = self._term_binops[p1.value] if op is None: self._parse_error('operation {0!r} not supported'.format(p1), self.currloc(lineno=p.lineno, column=p.lexpos)) p[0] = [op(lineno=p1.lineno, col_offset=p1.lexpos), p[2]] _factor_ops = {'+': ast.UAdd, '-': ast.USub, '~': ast.Invert} def p_factor_power(self, p): """factor : power""" p[0] = p[1] def p_factor_unary(self, p): """factor : PLUS factor | MINUS factor | TILDE factor """ op = self._factor_ops[p[1]]() p[0] = ast.UnaryOp(op=op, operand=p[2], lineno=self.lineno, col_offset=self.col) def p_power_atom(self, p): """power : atom_expr""" p[0] = p[1] def p_power(self, p): """power : atom_expr POW factor""" p1 = p[1] p[0] = ast.BinOp(left=p1, op=ast.Pow(), right=p[3], lineno=p1.lineno, col_offset=p1.col_offset) def p_yield_expr_or_testlist_comp(self, p): """yield_expr_or_testlist_comp : yield_expr | testlist_comp """ p[0] = p[1] def _list_or_elts_if_not_real_tuple(self, x): if isinstance(x, ast.Tuple) and not (hasattr(x, '_real_tuple') and x._real_tuple): rtn = x.elts else: rtn = [x] return rtn def apply_trailers(self, leader, trailers): """Helper function for atom expr.""" if trailers is None: return leader p0 = leader for trailer in trailers: if isinstance(trailer, (ast.Index, ast.Slice, ast.ExtSlice)): p0 = ast.Subscript(value=leader, slice=trailer, ctx=ast.Load(), lineno=leader.lineno, col_offset=leader.col_offset) elif isinstance(trailer, Mapping): # call normal functions p0 = ast.Call(func=leader, lineno=leader.lineno, col_offset=leader.col_offset, **trailer) elif isinstance(trailer, (ast.Tuple, tuple)): # call macro functions l, c = leader.lineno, leader.col_offset gblcall = xonsh_call('globals', [], lineno=l, col=c) loccall = xonsh_call('locals', [], lineno=l, col=c) if isinstance(trailer, tuple): trailer, arglist = trailer margs = [leader, trailer, gblcall, loccall] p0 = xonsh_call('__xonsh_call_macro__', margs, lineno=l, col=c) elif isinstance(trailer, str): if trailer == '?': p0 = xonsh_help(leader, lineno=leader.lineno, col=leader.col_offset) elif trailer == '??': p0 = xonsh_superhelp(leader, lineno=leader.lineno, col=leader.col_offset) else: p0 = ast.Attribute(value=leader, attr=trailer, ctx=ast.Load(), lineno=leader.lineno, col_offset=leader.col_offset) else: assert False leader = p0 return p0 def p_atom_expr(self, p): """atom_expr : atom trailer_list_opt""" p[0] = self.apply_trailers(p[1], p[2]) # # Atom rules! (So does Adam!) # def p_atom_lparen(self, p): """atom : lparen_tok yield_expr_or_testlist_comp_opt RPAREN""" p1, p2 = p[1], p[2] p1, p1_tok = p1.value, p1 if p2 is None: # empty container atom p0 = ast.Tuple(elts=[], ctx=ast.Load(), lineno=self.lineno, col_offset=self.col) p0._real_tuple = True elif isinstance(p2, ast.AST): p0 = p2 p0._lopen_lineno, p0._lopen_col = p1_tok.lineno, p1_tok.lexpos p0._real_tuple = True elif len(p2) == 1 and isinstance(p2[0], ast.AST): p0 = p2[0] p0._lopen_lineno, p0._lopen_col = p1_tok.lineno, p1_tok.lexpos else: self.p_error(p) p[0] = p0 def p_atom_lbraket(self, p): """atom : lbracket_tok testlist_comp_opt RBRACKET""" p1, p2 = p[1], p[2] p1, p1_tok = p1.value, p1 if p2 is None: p0 = ast.List(elts=[], ctx=ast.Load(), lineno=self.lineno, col_offset=self.col) elif isinstance(p2, ast.GeneratorExp): p0 = ast.ListComp(elt=p2.elt, generators=p2.generators, lineno=p2.lineno, col_offset=p2.col_offset) else: if isinstance(p2, ast.Tuple): if hasattr(p2, '_real_tuple') and p2._real_tuple: elts = [p2] else: elts = p2.elts else: elts = [p2] p0 = ast.List(elts=elts, ctx=ast.Load(), lineno=p1_tok.lineno, col_offset=p1_tok.lexpos) p[0] = p0 def p_atom_lbrace(self, p): """atom : lbrace_tok dictorsetmaker_opt RBRACE""" p1, p2 = p[1], p[2] p1, p1_tok = p1.value, p1 if p2 is None: p0 = ast.Dict(keys=[], values=[], ctx=ast.Load(), lineno=self.lineno, col_offset=self.col) else: p0 = p2 p0.lineno, p0.col_offset = p1_tok.lineno, p1_tok.lexpos p[0] = p0 def p_atom_ns(self, p): """atom : number | string_literal_list """ p[0] = p[1] def p_atom_name(self, p): """atom : name_tok""" p1 = p[1] p[0] = ast.Name(id=p1.value, ctx=ast.Load(), lineno=p1.lineno, col_offset=p1.lexpos) def p_atom_ellip(self, p): """atom : ellipsis_tok""" p1 = p[1] p[0] = ast.EllipsisNode(lineno=p1.lineno, col_offset=p1.lexpos) def p_atom_none(self, p): """atom : none_tok""" p1 = p[1] p[0] = ast.NameConstant(value=None, lineno=p1.lineno, col_offset=p1.lexpos) def p_atom_true(self, p): """atom : true_tok""" p1 = p[1] p[0] = ast.NameConstant(value=True, lineno=p1.lineno, col_offset=p1.lexpos) def p_atom_false(self, p): """atom : false_tok""" p1 = p[1] p[0] = ast.NameConstant(value=False, lineno=p1.lineno, col_offset=p1.lexpos) def p_atom_pathsearch(self, p): """atom : SEARCHPATH""" p[0] = xonsh_pathsearch(p[1], pymode=True, lineno=self.lineno, col=self.col) def p_atom_dname(self, p): """atom : DOLLAR_NAME""" p[0] = self._envvar_by_name(p[1][1:], lineno=self.lineno, col=self.col) def p_atom_fistful_of_dollars(self, p): """atom : dollar_lbrace_tok test RBRACE | bang_lparen_tok subproc RPAREN | dollar_lparen_tok subproc RPAREN | bang_lbracket_tok subproc RBRACKET | dollar_lbracket_tok subproc RBRACKET """ p[0] = self._dollar_rules(p) def p_atom_bang_empty_fistful_of_dollars(self, p): """atom : bang_lparen_tok subproc bang_tok RPAREN | dollar_lparen_tok subproc bang_tok RPAREN | bang_lbracket_tok subproc bang_tok RBRACKET | dollar_lbracket_tok subproc bang_tok RBRACKET """ self._append_subproc_bang_empty(p) p[0] = self._dollar_rules(p) def p_atom_bang_fistful_of_dollars(self, p): """atom : bang_lparen_tok subproc bang_tok nocloser rparen_tok | dollar_lparen_tok subproc bang_tok nocloser rparen_tok | bang_lbracket_tok subproc bang_tok nocloser rbracket_tok | dollar_lbracket_tok subproc bang_tok nocloser rbracket_tok """ self._append_subproc_bang(p) p[0] = self._dollar_rules(p) def _attach_nocloser_base_rules(self): toks = set(self.tokens) toks -= {'LPAREN', 'RPAREN', 'LBRACE', 'RBRACE', 'LBRACKET', 'RBRACKET', 'AT_LPAREN', 'BANG_LPAREN', 'BANG_LBRACKET', 'DOLLAR_LPAREN', 'DOLLAR_LBRACE', 'DOLLAR_LBRACKET', 'ATDOLLAR_LPAREN'} ts = '\n | '.join(sorted(toks)) doc = 'nocloser : ' + ts + '\n' self.p_nocloser_base.__func__.__doc__ = doc def p_nocloser_base(self, p): # see above attachment function pass def p_nocloser_any(self, p): """nocloser : any_nested_raw""" pass def p_nocloser_many(self, p): """nocloser : nocloser nocloser""" pass def p_string_literal(self, p): """string_literal : string_tok""" p1 = p[1] prefix = RE_STRINGPREFIX.match(p1.value).group() if 'p' in prefix: value_without_p = prefix.replace('p', '') + p1.value[len(prefix):] s = ast.Str(s=ast.literal_eval(value_without_p), lineno=p1.lineno, col_offset=p1.lexpos) p[0] = xonsh_call('__xonsh_path_literal__', [s], lineno=p1.lineno, col=p1.lexpos) elif 'f' in prefix or 'F' in prefix: s = pyparse(p1.value).body[0].value s = ast.increment_lineno(s, p1.lineno - 1) p[0] = s else: s = ast.literal_eval(p1.value) is_bytes = 'b' in prefix or 'B' in prefix cls = ast.Bytes if is_bytes else ast.Str p[0] = cls(s=s, lineno=p1.lineno, col_offset=p1.lexpos) def p_string_literal_list(self, p): """string_literal_list : string_literal | string_literal_list string_literal """ if len(p) == 3: p[1].s += p[2].s p[0] = p[1] def p_number(self, p): """number : number_tok""" p1 = p[1] p[0] = ast.Num(n=ast.literal_eval(p1.value.replace('_', '')), lineno=p1.lineno, col_offset=p1.lexpos) def p_testlist_comp_comp(self, p): """testlist_comp : test_or_star_expr comp_for""" p1, p2 = p[1], p[2] p[0] = ast.GeneratorExp(elt=p1, generators=p2['comps'], lineno=p1.lineno, col_offset=p1.col_offset) def p_testlist_comp_comma(self, p): """testlist_comp : test_or_star_expr comma_opt""" p1, p2 = p[1], p[2] if p2 is None: # split out grouping parentheses. p[0] = p1 else: p[0] = ast.Tuple(elts=[p1], ctx=ast.Load(), lineno=p1.lineno, col_offset=p1.col_offset) def p_testlist_comp_many(self, p): """testlist_comp : test_or_star_expr comma_test_or_star_expr_list comma_opt""" p1, p2 = p[1], p[2] p[0] = ast.Tuple(elts=[p1] + p2, ctx=ast.Load(), lineno=p1.lineno, col_offset=p1.col_offset) def p_trailer_lparen(self, p): """trailer : LPAREN arglist_opt RPAREN""" p[0] = [p[2] or dict(args=[], keywords=[], starargs=None, kwargs=None)] def p_trailer_bang_lparen(self, p): """trailer : bang_lparen_tok macroarglist_opt rparen_tok | bang_lparen_tok nocomma comma_tok rparen_tok | bang_lparen_tok nocomma comma_tok WS rparen_tok | bang_lparen_tok macroarglist comma_tok rparen_tok | bang_lparen_tok macroarglist comma_tok WS rparen_tok """ p1, p2, p3 = p[1], p[2], p[3] begins = [(p1.lineno, p1.lexpos + 2)] ends = [(p3.lineno, p3.lexpos)] if p2: begins.extend([(x[0], x[1] + 1) for x in p2]) ends = p2 + ends elts = [] for beg, end in zip(begins, ends): s = self.source_slice(beg, end).strip() if not s: if len(begins) == 1: break else: msg = 'empty macro arguments not allowed' self._parse_error(msg, self.currloc(*beg)) node = ast.Str(s=s, lineno=beg[0], col_offset=beg[1]) elts.append(node) p0 = ast.Tuple(elts=elts, ctx=ast.Load(), lineno=p1.lineno, col_offset=p1.lexpos) p[0] = [p0] def p_trailer_p3(self, p): """trailer : LBRACKET subscriptlist RBRACKET | PERIOD NAME """ p[0] = [p[2]] def p_trailer_quest(self, p): """trailer : DOUBLE_QUESTION | QUESTION """ p[0] = [p[1]] def _attach_nocomma_tok_rules(self): toks = set(self.tokens) toks -= {'COMMA', 'LPAREN', 'RPAREN', 'LBRACE', 'RBRACE', 'LBRACKET', 'RBRACKET', 'AT_LPAREN', 'BANG_LPAREN', 'BANG_LBRACKET', 'DOLLAR_LPAREN', 'DOLLAR_LBRACE', 'DOLLAR_LBRACKET', 'ATDOLLAR_LPAREN'} ts = '\n | '.join(sorted(toks)) doc = 'nocomma_tok : ' + ts + '\n' self.p_nocomma_tok.__func__.__doc__ = doc # The following grammar rules are no-ops because we don't need to glue the # source code back together piece-by-piece. Instead, we simply look for # top-level commas and record their positions. With these positions and the # respective positions of the bounding parentheses, we can use the # source_slice() method. This does a much better job of capturing exactly # the source code that was provided. The tokenizer & lexer can be a little # lossy, especially with respect to whitespace. def p_nocomma_tok(self, p): # see attachment function above for docstring pass def p_any_raw_tok(self, p): """any_raw_tok : nocomma | COMMA """ pass def p_any_raw_toks_one(self, p): """any_raw_toks : any_raw_tok""" pass def p_any_raw_toks_many(self, p): """any_raw_toks : any_raw_toks any_raw_tok""" pass def p_nocomma_part_tok(self, p): """nocomma_part : nocomma_tok""" pass def p_any_nested_raw(self, p): """any_nested_raw : LPAREN any_raw_toks_opt RPAREN | LBRACE any_raw_toks_opt RBRACE | LBRACKET any_raw_toks_opt RBRACKET | AT_LPAREN any_raw_toks_opt RPAREN | BANG_LPAREN any_raw_toks_opt RPAREN | BANG_LBRACKET any_raw_toks_opt RBRACKET | DOLLAR_LPAREN any_raw_toks_opt RPAREN | DOLLAR_LBRACE any_raw_toks_opt RBRACE | DOLLAR_LBRACKET any_raw_toks_opt RBRACKET | ATDOLLAR_LPAREN any_raw_toks_opt RPAREN """ pass def p_nocomma_part_any(self, p): """nocomma_part : any_nested_raw""" pass def p_nocomma_base(self, p): """nocomma : nocomma_part""" pass def p_nocomma_append(self, p): """nocomma : nocomma nocomma_part""" pass def p_comma_nocomma(self, p): """comma_nocomma : comma_tok nocomma""" p1 = p[1] p[0] = [(p1.lineno, p1.lexpos)] def p_macroarglist_single(self, p): """macroarglist : nocomma""" p[0] = [] def p_macroarglist_many(self, p): """macroarglist : nocomma comma_nocomma_list""" p[0] = p[2] def p_subscriptlist(self, p): """subscriptlist : subscript comma_subscript_list_opt comma_opt""" p1, p2 = p[1], p[2] if p2 is None: pass elif isinstance(p1, ast.Slice) or \ any([isinstance(x, ast.Slice) for x in p2]): p1 = ast.ExtSlice(dims=[p1]+p2) else: p1.value = ast.Tuple(elts=[p1.value] + [x.value for x in p2], ctx=ast.Load(), lineno=p1.lineno, col_offset=p1.col_offset) p[0] = p1 def p_comma_subscript(self, p): """comma_subscript : COMMA subscript""" p[0] = [p[2]] def p_subscript_test(self, p): """subscript : test""" p1 = p[1] p[0] = ast.Index(value=p1, lineno=p1.lineno, col_offset=p1.col_offset) def p_subscript_tok(self, p): """subscript : test_opt colon_tok test_opt sliceop_opt""" p1 = p[1] if p1 is None: p2 = p[2] lineno, col = p2.lineno, p2.lexpos else: lineno, col = p1.lineno, p1.col_offset p[0] = ast.Slice(lower=p1, upper=p[3], step=p[4], lineno=lineno, col_offset=col) def p_sliceop(self, p): """sliceop : COLON test_opt""" p[0] = p[2] def p_expr_or_star_expr(self, p): """expr_or_star_expr : expr | star_expr """ p[0] = p[1] def p_comma_expr_or_star_expr(self, p): """comma_expr_or_star_expr : COMMA expr_or_star_expr""" p[0] = [p[2]] def p_exprlist_e3(self, p): """exprlist : expr_or_star_expr comma_opt""" p[0] = [p[1]] def p_exprlist_many(self, p): """exprlist : expr_or_star_expr comma_expr_or_star_expr_list comma_opt""" p2 = p[2] p2.insert(0, p[1]) p[0] = p2 def p_testlist_test(self, p): """testlist : test""" p1 = p[1] if isinstance(p1, ast.Tuple) and (hasattr(p1, '_real_tuple') and p1._real_tuple and p1.elts): p1.lineno, p1.col_offset = lopen_loc(p1.elts[0]) p[0] = p1 def p_testlist_single(self, p): """testlist : test COMMA""" p1 = p[1] if isinstance(p1, ast.List) or (isinstance(p1, ast.Tuple) and hasattr(p1, '_real_tuple') and p1._real_tuple): lineno, col = lopen_loc(p1) p[0] = ast.Tuple(elts=[p1], ctx=ast.Load(), lineno=p1.lineno, col_offset=p1.col_offset) else: p[0] = ensure_has_elts(p[1]) def p_testlist_many(self, p): """testlist : test comma_test_list COMMA | test comma_test_list """ p1 = p[1] if isinstance(p1, ast.List) or (isinstance(p1, ast.Tuple) and hasattr(p1, '_real_tuple') and p1._real_tuple): lineno, col = lopen_loc(p1) p1 = ast.Tuple(elts=[p1], ctx=ast.Load(), lineno=p1.lineno, col_offset=p1.col_offset) else: p1 = ensure_has_elts(p1) p1.elts += p[2] p[0] = p1 def p_comma_item(self, p): """comma_item : COMMA item""" p[0] = p[2] # # Dict or set maker # def p_dictorsetmaker_t6(self, p): """dictorsetmaker : test COLON test comma_item_list comma_opt""" p1, p4 = p[1], p[4] keys = [p1] vals = [p[3]] for k, v in zip(p4[::2], p4[1::2]): keys.append(k) vals.append(v) lineno, col = lopen_loc(p1) p[0] = ast.Dict(keys=keys, values=vals, ctx=ast.Load(), lineno=lineno, col_offset=col) def p_dictorsetmaker_i4(self, p): """dictorsetmaker : item comma_item_list comma_opt""" p1, p2 = p[1], p[2] keys = [p1[0]] vals = [p1[1]] for k, v in zip(p2[::2], p2[1::2]): keys.append(k) vals.append(v) lineno, col = lopen_loc(p1[0] or p2[0]) p[0] = ast.Dict(keys=keys, values=vals, ctx=ast.Load(), lineno=lineno, col_offset=col) def p_dictorsetmaker_t4_dict(self, p): """dictorsetmaker : test COLON testlist""" keys = [p[1]] vals = self._list_or_elts_if_not_real_tuple(p[3]) lineno, col = lopen_loc(p[1]) p[0] = ast.Dict(keys=keys, values=vals, ctx=ast.Load(), lineno=lineno, col_offset=col) def p_dictorsetmaker_t4_set(self, p): """dictorsetmaker : test_or_star_expr comma_test_or_star_expr_list comma_opt""" p[0] = ast.Set(elts=[p[1]] + p[2], ctx=ast.Load(), lineno=self.lineno, col_offset=self.col) def p_dictorsetmaker_test_comma(self, p): """dictorsetmaker : test_or_star_expr comma_opt""" elts = self._list_or_elts_if_not_real_tuple(p[1]) p[0] = ast.Set(elts=elts, ctx=ast.Load(), lineno=self.lineno, col_offset=self.col) def p_dictorsetmaker_testlist(self, p): """dictorsetmaker : testlist""" elts = self._list_or_elts_if_not_real_tuple(p[1]) p[0] = ast.Set(elts=elts, ctx=ast.Load(), lineno=self.lineno, col_offset=self.col) def p_dictorsetmaker_comp(self, p): """dictorsetmaker : item comp_for | test_or_star_expr comp_for """ p1 = p[1] comps = p[2].get('comps', []) if isinstance(p1, list) and len(p1) == 2: p[0] = ast.DictComp(key=p1[0], value=p1[1], generators=comps, lineno=self.lineno, col_offset=self.col) else: p[0] = ast.SetComp(elt=p1, generators=comps, lineno=self.lineno, col_offset=self.col) def p_classdef(self, p): """classdef : class_tok NAME func_call_opt COLON suite""" p1, p3 = p[1], p[3] b, kw = ([], []) if p3 is None else (p3['args'], p3['keywords']) c = ast.ClassDef(name=p[2], bases=b, keywords=kw, starargs=None, kwargs=None, body=p[5], decorator_list=[], lineno=p1.lineno, col_offset=p1.lexpos) p[0] = [c] def p_comma_argument(self, p): """comma_argument : COMMA argument""" p[0] = [p[2]] def p_comp_iter(self, p): """comp_iter : comp_for | comp_if """ p[0] = p[1] def p_comp_for(self, p): """comp_for : FOR exprlist IN or_test comp_iter_opt""" targs, it, p5 = p[2], p[4], p[5] if len(targs) == 1: targ = targs[0] else: targ = ensure_has_elts(targs) store_ctx(targ) comp = ast.comprehension(target=targ, iter=it, ifs=[]) comps = [comp] p0 = {'comps': comps} if p5 is not None: comps += p5.get('comps', []) comp.ifs += p5.get('if', []) p[0] = p0 def p_comp_if(self, p): """comp_if : IF test_nocond comp_iter_opt""" p2, p3 = p[2], p[3] p0 = {'if': [p2]} if p3 is not None: p0['comps'] = p3.get('comps', []) p[0] = p0 def p_yield_expr(self, p): """yield_expr : yield_tok yield_arg_opt""" p1, p2 = p[1], p[2] if p2 is None: p0 = ast.Yield(value=p2, lineno=p1.lineno, col_offset=p1.lexpos) elif p2['from']: p0 = ast.YieldFrom(value=p2['val'], lineno=p1.lineno, col_offset=p1.lexpos) else: p0 = ast.Yield(value=p2['val'], lineno=p1.lineno, col_offset=p1.lexpos) p[0] = p0 def p_yield_arg_from(self, p): """yield_arg : FROM test""" p[0] = {'from': True, 'val': p[2]} def p_yield_arg_testlist(self, p): """yield_arg : testlist""" p[0] = {'from': False, 'val': p[1]} # # subprocess # def _dollar_rules(self, p): """These handle the special xonsh $ shell atoms by looking up in a special __xonsh_env__ dictionary injected in the __builtin__. """ lenp = len(p) p1, p2 = p[1], p[2] if isinstance(p1, LexToken): p1, p1_tok = p1.value, p1 lineno, col = p1_tok.lineno, p1_tok.lexpos else: lineno, col = self.lineno, self.col if lenp == 3: # $NAME p0 = self._envvar_by_name(p2, lineno=lineno, col=col) elif p1 == '${': xenv = self._xenv(lineno=lineno, col=col) idx = ast.Index(value=p2) p0 = ast.Subscript(value=xenv, slice=idx, ctx=ast.Load(), lineno=lineno, col_offset=col) elif p1 == '$(': p0 = xonsh_call('__xonsh_subproc_captured_stdout__', p2, lineno=lineno, col=col) elif p1 == '!(': p0 = xonsh_call('__xonsh_subproc_captured_object__', p2, lineno=lineno, col=col) elif p1 == '![': p0 = xonsh_call('__xonsh_subproc_captured_hiddenobject__', p2, lineno=lineno, col=col) elif p1 == '$[': p0 = xonsh_call('__xonsh_subproc_uncaptured__', p2, lineno=lineno, col=col) else: assert False return p0 def _xenv(self, lineno=lineno, col=col): """Creates a new xonsh env reference.""" return ast.Name(id='__xonsh_env__', ctx=ast.Load(), lineno=lineno, col_offset=col) def _envvar_getter_by_name(self, var, lineno=None, col=None): xenv = self._xenv(lineno=lineno, col=col) func = ast.Attribute(value=xenv, attr='get', ctx=ast.Load(), lineno=lineno, col_offset=col) return ast.Call(func=func, args=[ast.Str(s=var, lineno=lineno, col_offset=col), ast.Str(s='', lineno=lineno, col_offset=col)], keywords=[], starargs=None, kwargs=None, lineno=lineno, col_offset=col) def _envvar_by_name(self, var, lineno=None, col=None): """Looks up a xonsh variable by name.""" xenv = self._xenv(lineno=lineno, col=col) idx = ast.Index(value=ast.Str(s=var, lineno=lineno, col_offset=col)) return ast.Subscript(value=xenv, slice=idx, ctx=ast.Load(), lineno=lineno, col_offset=col) def _subproc_cliargs(self, args, lineno=None, col=None): """Creates an expression for subprocess CLI arguments.""" cliargs = currlist = empty_list(lineno=lineno, col=col) for arg in args: action = arg._cliarg_action if action == 'append': if currlist is None: currlist = empty_list(lineno=lineno, col=col) cliargs = binop(cliargs, ast.Add(), currlist, lineno=lineno, col=col) currlist.elts.append(arg) elif action == 'extend': cliargs = binop(cliargs, ast.Add(), arg, lineno=lineno, col=col) currlist = None elif action == 'splitlines': sl = call_split_lines(arg, lineno=lineno, col=col) cliargs = binop(cliargs, ast.Add(), sl, lineno=lineno, col=col) currlist = None elif action == 'ensure_list': x = ensure_list_from_str_or_list(arg, lineno=lineno, col=col) cliargs = binop(cliargs, ast.Add(), x, lineno=lineno, col=col) currlist = None else: raise ValueError("action not understood: " + action) del arg._cliarg_action return cliargs def p_pipe(self, p): """pipe : PIPE | WS PIPE | PIPE WS | WS PIPE WS """ p[0] = ast.Str(s='|', lineno=self.lineno, col_offset=self.col) def p_subproc_s2(self, p): """subproc : subproc_atoms | subproc_atoms WS """ p1 = p[1] p[0] = [self._subproc_cliargs(p1, lineno=self.lineno, col=self.col)] def p_subproc_amp(self, p): """subproc : subproc AMPERSAND""" p1 = p[1] p[0] = p1 + [ast.Str(s=p[2], lineno=self.lineno, col_offset=self.col)] def p_subproc_pipe(self, p): """subproc : subproc pipe subproc_atoms | subproc pipe subproc_atoms WS """ p1 = p[1] if len(p1) > 1 and hasattr(p1[-2], 's') and p1[-2].s != '|': msg = 'additional redirect following non-pipe redirect' self._parse_error(msg, self.currloc(lineno=self.lineno, column=self.col)) cliargs = self._subproc_cliargs(p[3], lineno=self.lineno, col=self.col) p[0] = p1 + [p[2], cliargs] def p_subproc_atoms_single(self, p): """subproc_atoms : subproc_atom""" p[0] = [p[1]] def p_subproc_atoms_many(self, p): """subproc_atoms : subproc_atoms WS subproc_atom""" p1 = p[1] p1.append(p[3]) p[0] = p1 def p_subproc_atoms_subshell(self, p): """subproc_atoms : lparen_tok any_raw_tok rparen_tok | lparen_tok any_raw_toks rparen_tok """ p1 = p[1] p3 = p[3] l = p1.lineno c = p1.lexpos + 1 subcmd = self.source_slice((l, c), (p3.lineno, p3.lexpos)) subcmd = subcmd.strip() + '\n' p0 = [ast.Str(s='xonsh', lineno=l, col_offset=c), ast.Str(s='-c', lineno=l, col_offset=c), ast.Str(s=subcmd, lineno=l, col_offset=c), ] for arg in p0: arg._cliarg_action = 'append' p[0] = p0 # # Subproc atom rules # def _append_subproc_bang_empty(self, p): """Appends an empty string in subprocess mode to the argument list.""" p3 = p[3] node = ast.Str(s='', lineno=p3.lineno, col_offset=p3.lexpos + 1) p[2][-1].elts.append(node) def _append_subproc_bang(self, p): """Appends the part between ! and the ) or ] in subprocess mode to the argument list. """ p3, p5 = p[3], p[5] beg = (p3.lineno, p3.lexpos + 1) end = (p5.lineno, p5.lexpos) s = self.source_slice(beg, end).strip() node = ast.Str(s=s, lineno=beg[0], col_offset=beg[1]) p[2][-1].elts.append(node) def p_subproc_atom_uncaptured(self, p): """subproc_atom : dollar_lbracket_tok subproc RBRACKET""" p1 = p[1] p0 = xonsh_call('__xonsh_subproc_uncaptured__', args=p[2], lineno=p1.lineno, col=p1.lexpos) p0._cliarg_action = 'splitlines' p[0] = p0 def p_subproc_atom_uncaptured_bang_empty(self, p): """subproc_atom : dollar_lbracket_tok subproc bang_tok RBRACKET""" self._append_subproc_bang_empty(p) self.p_subproc_atom_uncaptured(p) def p_subproc_atom_uncaptured_bang(self, p): """subproc_atom : dollar_lbracket_tok subproc bang_tok nocloser rbracket_tok""" self._append_subproc_bang(p) self.p_subproc_atom_uncaptured(p) def p_subproc_atom_captured_stdout(self, p): """subproc_atom : dollar_lparen_tok subproc RPAREN""" p1 = p[1] p0 = xonsh_call('__xonsh_subproc_captured_stdout__', args=p[2], lineno=p1.lineno, col=p1.lexpos) p0._cliarg_action = 'append' p[0] = p0 def p_subproc_atom_captured_stdout_bang_empty(self, p): """subproc_atom : dollar_lparen_tok subproc bang_tok RPAREN""" self._append_subproc_bang_empty(p) self.p_subproc_atom_captured_stdout(p) def p_subproc_atom_captured_stdout_bang(self, p): """subproc_atom : dollar_lparen_tok subproc bang_tok nocloser rparen_tok""" self._append_subproc_bang(p) self.p_subproc_atom_captured_stdout(p) def p_subproc_atom_pyenv_lookup(self, p): """subproc_atom : dollar_lbrace_tok test RBRACE""" p1 = p[1] lineno, col = p1.lineno, p1.lexpos xenv = self._xenv(lineno=lineno, col=col) func = ast.Attribute(value=xenv, attr='get', ctx=ast.Load(), lineno=lineno, col_offset=col) p0 = ast.Call(func=func, args=[p[2], ast.Str(s='', lineno=lineno, col_offset=col)], keywords=[], starargs=None, kwargs=None, lineno=lineno, col_offset=col) p0._cliarg_action = 'append' p[0] = p0 def p_subproc_atom_pyeval(self, p): """subproc_atom : at_lparen_tok testlist_comp RPAREN""" p1 = p[1] p0 = xonsh_call('__xonsh_list_of_strs_or_callables__', [p[2]], lineno=p1.lineno, col=p1.lexpos) p0._cliarg_action = 'extend' p[0] = p0 def p_subproc_atom_subproc_inject(self, p): """subproc_atom : atdollar_lparen_tok subproc RPAREN""" p1 = p[1] p0 = xonsh_call('__xonsh_subproc_captured_inject__', p[2], lineno=p1.lineno, col=p1.lexpos) p0._cliarg_action = 'extend' p[0] = p0 def p_subproc_atom_subproc_inject_bang_empty(self, p): """subproc_atom : atdollar_lparen_tok subproc bang_tok RPAREN""" self._append_subproc_bang_empty(p) self.p_subproc_atom_subproc_inject(p) def p_subproc_atom_subproc_inject_bang(self, p): """subproc_atom : atdollar_lparen_tok subproc bang_tok nocloser rparen_tok""" self._append_subproc_bang(p) self.p_subproc_atom_subproc_inject(p) def p_subproc_atom_redirect(self, p): """subproc_atom : GT | LT | RSHIFT | IOREDIRECT """ p0 = ast.Str(s=p[1], lineno=self.lineno, col_offset=self.col) p0._cliarg_action = 'append' p[0] = p0 def p_subproc_atom_re(self, p): """subproc_atom : SEARCHPATH""" p0 = xonsh_pathsearch(p[1], pymode=False, lineno=self.lineno, col=self.col) p0._cliarg_action = 'extend' p[0] = p0 def p_subproc_atom_str(self, p): """subproc_atom : string_literal""" p0 = xonsh_call('__xonsh_expand_path__', args=[p[1]], lineno=self.lineno, col=self.col) p0._cliarg_action = 'append' p[0] = p0 def p_subproc_atom_arg(self, p): """subproc_atom : subproc_arg""" p1 = p[1] p0 = ast.Str(s=p[1], lineno=self.lineno, col_offset=self.col) if '*' in p1: p0 = xonsh_call('__xonsh_glob__', args=[p0], lineno=self.lineno, col=self.col) p0._cliarg_action = 'extend' else: p0 = xonsh_call('__xonsh_expand_path__', args=[p0], lineno=self.lineno, col=self.col) p0._cliarg_action = 'append' p[0] = p0 def p_subproc_arg_single(self, p): """subproc_arg : subproc_arg_part""" p[0] = p[1] def p_subproc_arg_many(self, p): """subproc_arg : subproc_arg subproc_arg_part""" # This glues the string together after parsing p[0] = p[1] + p[2] def _attach_subproc_arg_part_rules(self): toks = set(self.tokens) toks -= {'AND', 'OR', 'NOT', 'BANG', 'PIPE', 'WS', 'GT', 'LT', 'LSHIFT', 'RSHIFT', 'IOREDIRECT', 'SEARCHPATH', 'INDENT', 'DEDENT', 'LPAREN', 'RPAREN', 'LBRACE', 'RBRACE', 'LBRACKET', 'RBRACKET', 'AT_LPAREN', 'BANG_LPAREN', 'BANG_LBRACKET', 'DOLLAR_LPAREN', 'DOLLAR_LBRACE', 'DOLLAR_LBRACKET', 'ATDOLLAR_LPAREN'} ts = '\n | '.join(sorted(toks)) doc = 'subproc_arg_part : ' + ts + '\n' self.p_subproc_arg_part.__func__.__doc__ = doc def p_subproc_arg_part(self, p): # Many tokens cannot be part of this rule, such as $, ', ", () # Use a string atom instead. See above attachment functions p[0] = p[1] # # Helpers # def p_test_comma_combine(self, p): """test_comma_list : test comma_test_list | test comma_test_list COMMA """ p2 = p[2] p2.insert(0, p[1]) p[0] = p2 def p_empty(self, p): 'empty : ' p[0] = None def p_error(self, p): if p is None: self._parse_error('no further code', None) elif p.type == 'ERRORTOKEN': if isinstance(p.value, BaseException): raise p.value else: self._parse_error(p.value, self.currloc(lineno=p.lineno, column=p.lexpos)) else: msg = 'code: {0}'.format(p.value), self._parse_error(msg, self.currloc(lineno=p.lineno, column=p.lexpos)) xonsh-0.6.0/xonsh/parsers/context_check.py000066400000000000000000000052221320541242300206620ustar00rootroot00000000000000import ast import keyword import collections _all_keywords = frozenset(keyword.kwlist) def _not_assignable(x, augassign=False): """ If ``x`` represents a value that can be assigned to, return ``None``. Otherwise, return a string describing the object. For use in generating meaningful syntax errors. """ if augassign and isinstance(x, (ast.Tuple, ast.List)): return 'literal' elif isinstance(x, (ast.Tuple, ast.List)): if len(x.elts) == 0: return '()' for i in x.elts: res = _not_assignable(i) if res is not None: return res elif isinstance(x, (ast.Set, ast.Dict, ast.Num, ast.Str, ast.Bytes)): return 'literal' elif isinstance(x, ast.Call): return 'function call' elif isinstance(x, ast.Lambda): return 'lambda' elif isinstance(x, (ast.BoolOp, ast.BinOp, ast.UnaryOp)): return 'operator' elif isinstance(x, ast.IfExp): return 'conditional expression' elif isinstance(x, ast.ListComp): return 'list comprehension' elif isinstance(x, ast.DictComp): return 'dictionary comprehension' elif isinstance(x, ast.SetComp): return 'set comprehension' elif isinstance(x, ast.GeneratorExp): return 'generator expression' elif isinstance(x, ast.Compare): return 'comparison' elif isinstance(x, ast.Name) and x.id in _all_keywords: return 'keyword' elif isinstance(x, ast.NameConstant): return 'keyword' _loc = collections.namedtuple('_loc', ['lineno', 'column']) def check_contexts(tree): c = ContextCheckingVisitor() c.visit(tree) if c.error is not None: e = SyntaxError(c.error[0]) e.loc = _loc(c.error[1], c.error[2]) raise e class ContextCheckingVisitor(ast.NodeVisitor): def __init__(self): self.error = None def visit_Delete(self, node): for i in node.targets: err = _not_assignable(i) if err is not None: msg = "can't delete {}".format(err) self.error = msg, i.lineno, i.col_offset break def visit_Assign(self, node): for i in node.targets: err = _not_assignable(i) if err is not None: msg = "can't assign to {}".format(err) self.error = msg, i.lineno, i.col_offset break def visit_AugAssign(self, node): err = _not_assignable(node.target, True) if err is not None: msg = "illegal target for augmented assignment: {}".format(err) self.error = msg, node.target.lineno, node.target.col_offset xonsh-0.6.0/xonsh/parsers/v34.py000066400000000000000000000117451320541242300164640ustar00rootroot00000000000000# -*- coding: utf-8 -*- """Implements the xonsh parser for Python v3.4.""" import xonsh.ast as ast from xonsh.parsers.base import BaseParser class Parser(BaseParser): """A Python v3.4 compliant parser for the xonsh language.""" def __init__(self, lexer_optimize=True, lexer_table='xonsh.lexer_table', yacc_optimize=True, yacc_table='xonsh.parser_table', yacc_debug=False, outputdir=None): """Parameters ---------- lexer_optimize : bool, optional Set to false when unstable and true when lexer is stable. lexer_table : str, optional Lexer module used when optimized. yacc_optimize : bool, optional Set to false when unstable and true when parser is stable. yacc_table : str, optional Parser module used when optimized. yacc_debug : debug, optional Dumps extra debug info. outputdir : str or None, optional The directory to place generated tables within. """ # Rule creation and modification *must* take place before super() opt_rules = ['argument_comma_list', 'comma_argument_list'] for rule in opt_rules: self._opt_rule(rule) list_rules = ['argument_comma'] for rule in list_rules: self._list_rule(rule) super().__init__(lexer_optimize=lexer_optimize, lexer_table=lexer_table, yacc_optimize=yacc_optimize, yacc_table=yacc_table, yacc_debug=yacc_debug, outputdir=outputdir) def p_classdef_or_funcdef(self, p): """classdef_or_funcdef : classdef | funcdef """ p[0] = p[1] def p_item(self, p): """item : test COLON test""" lenp = len(p) if lenp == 4: p0 = [p[1], p[3]] elif lenp == 3: p0 = [None, p[2]] else: assert False p[0] = p0 def _set_arg(self, args, arg, ensure_kw=False): if isinstance(arg, ast.keyword): args['keywords'].append(arg) elif ensure_kw: args['kwargs'] = arg else: args['args'].append(arg) def p_arglist(self, p): """arglist : argument comma_opt | argument_comma_list argument comma_opt | argument_comma_list_opt TIMES test comma_argument_list_opt | argument_comma_list_opt TIMES test COMMA POW test | argument_comma_list_opt TIMES test comma_argument_list COMMA POW test | argument_comma_list_opt POW test """ lenp = len(p) p1, p2 = p[1], p[2] p0 = {'args': [], 'keywords': [], 'starargs': None, 'kwargs': None} if lenp == 3: self._set_arg(p0, p1) elif lenp == 4 and p2 != '**': for arg in p1: self._set_arg(p0, arg) self._set_arg(p0, p2) elif lenp == 4 and p2 == '**': if p1 is not None: for arg in p1: self._set_arg(p0, arg) self._set_arg(p0, p[3], ensure_kw=True) elif lenp == 5: p0['starargs'], p4 = p[3], p[4] if p1 is not None: for arg in p1: self._set_arg(p0, arg) if p4 is not None: for arg in p4: self._set_arg(p0, arg, ensure_kw=True) elif lenp == 7: p0['starargs'] = p[3] if p1 is not None: for arg in p1: self._set_arg(p0, arg) self._set_arg(p0, p[6], ensure_kw=True) elif lenp == 8: p0['starargs'], p4 = p[3], p[4] if p1 is not None: for arg in p1: self._set_arg(p0, arg) for arg in p4: self._set_arg(p0, arg, ensure_kw=True) self._set_arg(p0, p[7], ensure_kw=True) else: assert False p[0] = p0 def p_argument_comma(self, p): """argument_comma : argument COMMA""" p[0] = [p[1]] def p_argument(self, p): """argument : test | test comp_for | test EQUALS test """ # Really [keyword '='] test # The reason that keywords are test nodes instead of NAME is that using # NAME results in an ambiguity. p1 = p[1] lenp = len(p) if lenp == 2: p0 = p1 elif lenp == 3: if p1 == '**': p0 = ast.keyword(arg=None, value=p[2]) elif p1 == '*': p0 = ast.Starred(value=p[2]) else: p0 = ast.GeneratorExp(elt=p1, generators=p[2]['comps'], lineno=p1.lineno, col_offset=p1.col_offset) elif lenp == 4: p0 = ast.keyword(arg=p1.id, value=p[3]) else: assert False p[0] = p0 xonsh-0.6.0/xonsh/parsers/v35.py000066400000000000000000000113531320541242300164600ustar00rootroot00000000000000# -*- coding: utf-8 -*- """Implements the xonsh parser for Python v3.5.""" import xonsh.ast as ast from xonsh.parsers.base import BaseParser class Parser(BaseParser): """A Python v3.5 compliant parser for the xonsh language.""" def __init__(self, lexer_optimize=True, lexer_table='xonsh.lexer_table', yacc_optimize=True, yacc_table='xonsh.parser_table', yacc_debug=False, outputdir=None): """Parameters ---------- lexer_optimize : bool, optional Set to false when unstable and true when lexer is stable. lexer_table : str, optional Lexer module used when optimized. yacc_optimize : bool, optional Set to false when unstable and true when parser is stable. yacc_table : str, optional Parser module used when optimized. yacc_debug : debug, optional Dumps extra debug info. outputdir : str or None, optional The directory to place generated tables within. """ # Rule creation and modification *must* take place before super() tok_rules = ['await', 'async'] for rule in tok_rules: self._tok_rule(rule) super().__init__(lexer_optimize=lexer_optimize, lexer_table=lexer_table, yacc_optimize=yacc_optimize, yacc_table=yacc_table, yacc_debug=yacc_debug, outputdir=outputdir) def p_classdef_or_funcdef(self, p): """classdef_or_funcdef : classdef | funcdef | async_funcdef """ p[0] = p[1] def p_async_funcdef(self, p): """async_funcdef : async_tok funcdef""" p1, f = p[1], p[2][0] p[0] = [ast.AsyncFunctionDef(**f.__dict__)] p[0][0]._async_tok = p1 def p_async_compound_stmt(self, p): """compound_stmt : async_stmt""" p[0] = p[1] def p_async_for_stmt(self, p): """async_for_stmt : ASYNC for_stmt""" f = p[2][0] p[0] = [ast.AsyncFor(**f.__dict__)] def p_async_with_stmt(self, p): """async_with_stmt : ASYNC with_stmt""" w = p[2][0] p[0] = [ast.AsyncWith(**w.__dict__)] def p_atom_expr_await(self, p): """atom_expr : await_tok atom trailer_list_opt""" p0 = self.apply_trailers(p[2], p[3]) p1 = p[1] p0 = ast.Await(value=p0, ctx=ast.Load(), lineno=p1.lineno, col_offset=p1.lexpos) p[0] = p0 def p_async_stmt(self, p): """async_stmt : async_funcdef | async_with_stmt | async_for_stmt """ p[0] = p[1] def p_item_test(self, p): """item : test COLON test""" p[0] = [p[1], p[3]] def p_item_pow(self, p): """item : POW expr""" p[0] = [None, p[2]] def _set_arg(self, args, arg, ensure_kw=False): if isinstance(arg, ast.keyword): args['keywords'].append(arg) elif ensure_kw: args['keywords'].append(ast.keyword(arg=None, value=arg)) else: args['args'].append(arg) def p_arglist_single(self, p): """arglist : argument comma_opt""" p0 = {'args': [], 'keywords': []} self._set_arg(p0, p[1]) p[0] = p0 def p_arglist_many(self, p): """arglist : argument comma_argument_list comma_opt """ p0 = {'args': [], 'keywords': []} self._set_arg(p0, p[1]) for arg in p[2]: self._set_arg(p0, arg) p[0] = p0 # Argument rules # "test '=' test" is really "keyword '=' test", but we have no such token. # These need to be in a single rule to avoid grammar that is ambiguous # to our LL(1) parser. Even though 'test' includes '*expr' in star_expr, # we explicitly match '*' here, too, to give it proper precedence. # Illegal combinations and orderings are blocked in ast.c: # multiple (test comp_for) arguments are blocked; keyword unpackings # that precede iterable unpackings are blocked; etc. def p_argument_test_or_star(self, p): """argument : test_or_star_expr""" p[0] = p[1] def p_argument_kwargs(self, p): """argument : POW test""" p[0] = ast.keyword(arg=None, value=p[2]) def p_argument_args(self, p): """argument : TIMES test""" p[0] = ast.Starred(value=p[2]) def p_argument(self, p): """argument : test comp_for""" p1 = p[1] p[0] = ast.GeneratorExp(elt=p1, generators=p[2]['comps'], lineno=p1.lineno, col_offset=p1.col_offset) def p_argument_eq(self, p): """argument : test EQUALS test""" p[0] = ast.keyword(arg=p[1].id, value=p[3]) xonsh-0.6.0/xonsh/parsers/v36.py000066400000000000000000000016011320541242300164540ustar00rootroot00000000000000# -*- coding: utf-8 -*- """Implements the xonsh parser for Python v3.6.""" import xonsh.ast as ast from xonsh.parsers.v35 import Parser as ThreeFiveParser from xonsh.parsers.base import store_ctx, ensure_has_elts class Parser(ThreeFiveParser): """A Python v3.6 compliant parser for the xonsh language.""" def p_comp_for(self, p): """comp_for : FOR exprlist IN or_test comp_iter_opt""" targs, it, p5 = p[2], p[4], p[5] if len(targs) == 1: targ = targs[0] else: targ = ensure_has_elts(targs) store_ctx(targ) # only difference with base should be the is_async=0 comp = ast.comprehension(target=targ, iter=it, ifs=[], is_async=0) comps = [comp] p0 = {'comps': comps} if p5 is not None: comps += p5.get('comps', []) comp.ifs += p5.get('if', []) p[0] = p0 xonsh-0.6.0/xonsh/platform.py000066400000000000000000000366341320541242300162210ustar00rootroot00000000000000"""Module for platform-specific constants and implementations, as well as compatibility layers to make use of the 'best' implementation available on a platform. """ import os import sys import ctypes import signal import pathlib import builtins import platform import functools import subprocess import collections import importlib.util from xonsh.lazyasd import LazyBool, lazyobject, lazybool # do not import any xonsh-modules here to avoid circular dependencies FD_STDIN = 0 FD_STDOUT = 1 FD_STDERR = 2 @lazyobject def distro(): try: import distro as d except ImportError: d = None except Exception: raise return d # # OS # ON_DARWIN = LazyBool(lambda: platform.system() == 'Darwin', globals(), 'ON_DARWIN') """``True`` if executed on a Darwin platform, else ``False``. """ ON_LINUX = LazyBool(lambda: platform.system() == 'Linux', globals(), 'ON_LINUX') """``True`` if executed on a Linux platform, else ``False``. """ ON_WINDOWS = LazyBool(lambda: platform.system() == 'Windows', globals(), 'ON_WINDOWS') """``True`` if executed on a native Windows platform, else ``False``. """ ON_CYGWIN = LazyBool(lambda: sys.platform == 'cygwin', globals(), 'ON_CYGWIN') """``True`` if executed on a Cygwin Windows platform, else ``False``. """ ON_POSIX = LazyBool(lambda: (os.name == 'posix'), globals(), 'ON_POSIX') """``True`` if executed on a POSIX-compliant platform, else ``False``. """ ON_FREEBSD = LazyBool(lambda: (sys.platform.startswith('freebsd')), globals(), 'ON_FREEBSD') """``True`` if on a FreeBSD operating system, else ``False``.""" ON_NETBSD = LazyBool(lambda: (sys.platform.startswith('netbsd')), globals(), 'ON_NETBSD') """``True`` if on a NetBSD operating system, else ``False``.""" @lazybool def ON_BSD(): """``True`` if on a BSD operating system, else ``False``.""" return bool(ON_FREEBSD) or bool(ON_NETBSD) @lazybool def ON_BEOS(): """True if we are on BeOS or Haiku.""" return sys.platform == 'beos5' or sys.platform == 'haiku1' # # Python & packages # PYTHON_VERSION_INFO = sys.version_info[:3] """ Version of Python interpreter as three-value tuple. """ @lazyobject def PYTHON_VERSION_INFO_BYTES(): """The python version info tuple in a canonical bytes form.""" return '.'.join(map(str, sys.version_info)).encode() ON_ANACONDA = LazyBool( lambda: any(s in sys.version for s in {'Anaconda', 'Continuum', 'conda-forge'}), globals(), 'ON_ANACONDA') """ ``True`` if executed in an Anaconda instance, else ``False``. """ CAN_RESIZE_WINDOW = LazyBool(lambda: hasattr(signal, 'SIGWINCH'), globals(), 'CAN_RESIZE_WINDOW') """``True`` if we can resize terminal window, as provided by the presense of signal.SIGWINCH, else ``False``. """ @lazybool def HAS_PYGMENTS(): """``True`` if `pygments` is available, else ``False``.""" spec = importlib.util.find_spec('pygments') return (spec is not None) @functools.lru_cache(1) def pygments_version(): """pygments.__version__ version if available, else None.""" if HAS_PYGMENTS: import pygments v = pygments.__version__ else: v = None return v @functools.lru_cache(1) def has_prompt_toolkit(): """ Tests if the `prompt_toolkit` is available. """ spec = importlib.util.find_spec('prompt_toolkit') return (spec is not None) @functools.lru_cache(1) def ptk_version(): """ Returns `prompt_toolkit.__version__` if available, else ``None``. """ if has_prompt_toolkit(): import prompt_toolkit return getattr(prompt_toolkit, '__version__', '<0.57') else: return None @functools.lru_cache(1) def ptk_version_info(): """ Returns `prompt_toolkit`'s version as tuple of integers. """ if has_prompt_toolkit(): return tuple(int(x) for x in ptk_version().strip('<>+-=.').split('.')) else: return None @functools.lru_cache(1) def ptk_version_is_supported(): minimum_required_ptk_version = (1, 0) return ptk_version_info()[:2] >= minimum_required_ptk_version @functools.lru_cache(1) def best_shell_type(): if ON_WINDOWS or has_prompt_toolkit(): return 'prompt_toolkit' else: return 'readline' @functools.lru_cache(1) def is_readline_available(): """Checks if readline is available to import.""" spec = importlib.util.find_spec('readline') return (spec is not None) @lazyobject def seps(): """String of all path separators.""" s = os.path.sep if os.path.altsep is not None: s += os.path.altsep return s def pathsplit(p): """This is a safe version of os.path.split(), which does not work on input without a drive. """ n = len(p) while n and p[n-1] not in seps: n -= 1 pre = p[:n] pre = pre.rstrip(seps) or pre post = p[n:] return pre, post def pathbasename(p): """This is a safe version of os.path.basename(), which does not work on input without a drive. This version does. """ return pathsplit(p)[-1] @lazyobject def expanduser(): """Dispatches to the correct platform-dependent expanduser() function.""" if ON_WINDOWS: return windows_expanduser else: return os.path.expanduser def windows_expanduser(path): """A Windows-specific expanduser() function for xonsh. This is needed since os.path.expanduser() does not check on Windows if the user actually exists. This restricts expanding the '~' if it is not followed by a separator. That is only '~/' and '~\' are expanded. """ if not path.startswith('~'): return path elif len(path) < 2 or path[1] in seps: return os.path.expanduser(path) else: return path # termios tc(get|set)attr indexes. IFLAG = 0 OFLAG = 1 CFLAG = 2 LFLAG = 3 ISPEED = 4 OSPEED = 5 CC = 6 # # Dev release info # @functools.lru_cache(1) def githash(): """Returns a tuple contains two strings: the hash and the date.""" install_base = os.path.dirname(__file__) githash_file = '{}/dev.githash'.format(install_base) if not os.path.exists(githash_file): return None, None sha = None date_ = None try: with open(githash_file) as f: sha, date_ = f.read().strip().split('|') except ValueError: pass return sha, date_ # # Encoding # DEFAULT_ENCODING = sys.getdefaultencoding() """ Default string encoding. """ if PYTHON_VERSION_INFO < (3, 5, 0): class DirEntry: def __init__(self, directory, name): self.__path__ = pathlib.Path(directory) / name self.name = name self.path = str(self.__path__) self.is_symlink = self.__path__.is_symlink def inode(self): return os.stat(self.path, follow_symlinks=False).st_ino def is_dir(self, *, follow_symlinks=True): if follow_symlinks: return self.__path__.is_dir() else: return not self.__path__.is_symlink() \ and self.__path__.is_dir() def is_file(self, *, follow_symlinks=True): if follow_symlinks: return self.__path__.is_file() else: return not self.__path__.is_symlink() \ and self.__path__.is_file() def stat(self, *, follow_symlinks=True): return os.stat(self.path, follow_symlinks=follow_symlinks) def scandir(path): """ Compatibility layer for `os.scandir` from Python 3.5+. """ return (DirEntry(path, x) for x in os.listdir(path)) else: scandir = os.scandir # # Linux distro # @functools.lru_cache(1) def linux_distro(): """The id of the Linux distribution running on, possibly 'unknown'. None on non-Linux platforms. """ if ON_LINUX: if distro: ld = distro.id() elif PYTHON_VERSION_INFO < (3, 7, 0): ld = platform.linux_distribution()[0] or 'unknown' elif '-ARCH-' in platform.platform(): ld = 'arch' # that's the only one we need to know for now else: ld = 'unknown' else: ld = None return ld # # Windows # @functools.lru_cache(1) def git_for_windows_path(): """Returns the path to git for windows, if available and None otherwise.""" import winreg try: key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, 'SOFTWARE\\GitForWindows') gfwp, _ = winreg.QueryValueEx(key, "InstallPath") except FileNotFoundError: gfwp = None return gfwp @functools.lru_cache(1) def windows_bash_command(): """Determines the command for Bash on windows.""" # Check that bash is on path otherwise try the default directory # used by Git for windows wbc = 'bash' cmd_cache = builtins.__xonsh_commands_cache__ bash_on_path = cmd_cache.lazy_locate_binary('bash', ignore_alias=True) if bash_on_path: try: out = subprocess.check_output([bash_on_path, '--version'], stderr=subprocess.PIPE, universal_newlines=True) except subprocess.CalledProcessError: bash_works = False else: # Check if Bash is from the "Windows Subsystem for Linux" (WSL) # which can't be used by xonsh foreign-shell/completer bash_works = out and 'pc-linux-gnu' not in out.splitlines()[0] if bash_works: wbc = bash_on_path else: gfwp = git_for_windows_path() if gfwp: bashcmd = os.path.join(gfwp, 'bin\\bash.exe') if os.path.isfile(bashcmd): wbc = bashcmd return wbc # # Environment variables defaults # if ON_WINDOWS: class OSEnvironCasePreserving(collections.MutableMapping): """ Case-preserving wrapper for os.environ on Windows. It uses nt.environ to get the correct cased keys on initialization. It also preserves the case of any variables add after initialization. """ def __init__(self): import nt self._upperkeys = dict((k.upper(), k) for k in nt.environ) def _sync(self): """ Ensure that the case sensitive map of the keys are in sync with os.environ """ envkeys = set(os.environ.keys()) for key in envkeys.difference(self._upperkeys): self._upperkeys[key] = key.upper() for key in set(self._upperkeys).difference(envkeys): del self._upperkeys[key] def __contains__(self, k): self._sync() return k.upper() in self._upperkeys def __len__(self): self._sync() return len(self._upperkeys) def __iter__(self): self._sync() return iter(self._upperkeys.values()) def __getitem__(self, k): self._sync() return os.environ[k] def __setitem__(self, k, v): self._sync() self._upperkeys[k.upper()] = k os.environ[k] = v def __delitem__(self, k): self._sync() if k.upper() in self._upperkeys: del self._upperkeys[k.upper()] del os.environ[k] def getkey_actual_case(self, k): self._sync() return self._upperkeys.get(k.upper()) @lazyobject def os_environ(): """This dispatches to the correct, case-sensitive version of os.environ. This is mainly a problem for Windows. See #2024 for more details. This can probably go away once support for Python v3.5 or v3.6 is dropped. """ if ON_WINDOWS: return OSEnvironCasePreserving() else: return os.environ @functools.lru_cache(1) def bash_command(): """Determines the command for Bash on the current platform.""" if ON_WINDOWS: bc = windows_bash_command() else: bc = 'bash' return bc @lazyobject def BASH_COMPLETIONS_DEFAULT(): """A possibly empty tuple with default paths to Bash completions known for the current platform. """ if ON_LINUX or ON_CYGWIN: bcd = ('/usr/share/bash-completion/bash_completion', ) elif ON_DARWIN: bcd = ('/usr/local/share/bash-completion/bash_completion', # v2.x '/usr/local/etc/bash_completion') # v1.x elif ON_WINDOWS and git_for_windows_path(): bcd = (os.path.join(git_for_windows_path(), 'usr\\share\\bash-completion\\bash_completion'), os.path.join(git_for_windows_path(), 'mingw64\\share\\git\\completion\\' 'git-completion.bash')) else: bcd = () return bcd @lazyobject def PATH_DEFAULT(): if ON_LINUX or ON_CYGWIN: if linux_distro() == 'arch': pd = ('/usr/local/sbin', '/usr/local/bin', '/usr/bin', '/usr/bin/site_perl', '/usr/bin/vendor_perl', '/usr/bin/core_perl') else: pd = (os.path.expanduser('~/bin'), '/usr/local/sbin', '/usr/local/bin', '/usr/sbin', '/usr/bin', '/sbin', '/bin', '/usr/games', '/usr/local/games') elif ON_DARWIN: pd = ('/usr/local/bin', '/usr/bin', '/bin', '/usr/sbin', '/sbin') elif ON_WINDOWS: import winreg key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r'SYSTEM\CurrentControlSet\Control\Session Manager\Environment') pd = tuple(winreg.QueryValueEx(key, 'Path')[0].split(os.pathsep)) else: pd = () return pd # # libc # @lazyobject def LIBC(): """The platform dependent libc implementation.""" if ON_DARWIN: libc = ctypes.CDLL(ctypes.util.find_library("c")) elif ON_CYGWIN: libc = ctypes.CDLL('cygwin1.dll') elif ON_BSD: try: libc = ctypes.CDLL('libc.so') except AttributeError: libc = None except OSError: # OS X; can't use ctypes.util.find_library because that creates # a new process on Linux, which is undesirable. try: libc = ctypes.CDLL('libc.dylib') except OSError: libc = None elif ON_POSIX: try: libc = ctypes.CDLL('libc.so') except AttributeError: libc = None except OSError: # Debian and derivatives do the wrong thing because /usr/lib/libc.so # is a GNU ld script rather than an ELF object. To get around this, we # have to be more specific. # We don't want to use ctypes.util.find_library because that creates a # new process on Linux. We also don't want to try too hard because at # this point we're already pretty sure this isn't Linux. try: libc = ctypes.CDLL('libc.so.6') except OSError: libc = None if not hasattr(libc, 'sysinfo'): # Not Linux. libc = None elif ON_WINDOWS: if hasattr(ctypes, 'windll') and hasattr(ctypes.windll, 'kernel32'): libc = ctypes.windll.kernel32 else: try: # Windows CE uses the cdecl calling convention. libc = ctypes.CDLL('coredll.lib') except (AttributeError, OSError): libc = None elif ON_BEOS: libc = ctypes.CDLL('libroot.so') else: libc = None return libc xonsh-0.6.0/xonsh/pretty.py000066400000000000000000000652421320541242300157210ustar00rootroot00000000000000# -*- coding: utf-8 -*- """ Python advanced pretty printer. This pretty printer is intended to replace the old `pprint` python module which does not allow developers to provide their own pretty print callbacks. This module is based on ruby's `prettyprint.rb` library by `Tanaka Akira`. The following implementations were forked from the IPython project: * Copyright (c) 2008-2014, IPython Development Team * Copyright (C) 2001-2007 Fernando Perez * Copyright (c) 2001, Janko Hauser * Copyright (c) 2001, Nathaniel Gray Example Usage ------------- To directly print the representation of an object use `pprint`:: from pretty import pretty_print pretty_pprint(complex_object) To get a string of the output use `pretty`:: from pretty import pretty string = pretty(complex_object) Extending --------- The pretty library allows developers to add pretty printing rules for their own objects. This process is straightforward. All you have to do is to add a `_repr_pretty_` method to your object and call the methods on the pretty printer passed:: class MyObject(object): def _repr_pretty_(self, p, cycle): ... Here is an example implementation of a `_repr_pretty_` method for a list subclass:: class MyList(list): def _repr_pretty_(self, p, cycle): if cycle: p.text('MyList(...)') else: with p.group(8, 'MyList([', '])'): for idx, item in enumerate(self): if idx: p.text(',') p.breakable() p.pretty(item) The `cycle` parameter is `True` if pretty detected a cycle. You *have* to react to that or the result is an infinite loop. `p.text()` just adds non breaking text to the output, `p.breakable()` either adds a whitespace or breaks here. If you pass it an argument it's used instead of the default space. `p.pretty` prettyprints another object using the pretty print method. The first parameter to the `group` function specifies the extra indentation of the next line. In this example the next item will either be on the same line (if the items are short enough) or aligned with the right edge of the opening bracket of `MyList`. If you just want to indent something you can use the group function without open / close parameters. You can also use this code:: with p.indent(2): ... :copyright: 2007 by Armin Ronacher. Portions (c) 2009 by Robert Kern. :license: BSD License. """ import io import re import sys import types import datetime import contextlib import collections from xonsh.lazyasd import LazyObject, lazyobject __all__ = [ 'pretty', 'pretty_print', 'PrettyPrinter', 'RepresentationPrinter', 'for_type', 'for_type_by_name'] MAX_SEQ_LENGTH = 1000 def _safe_getattr(obj, attr, default=None): """Safe version of getattr. Same as getattr, but will return ``default`` on any Exception, rather than raising. """ try: return getattr(obj, attr, default) except Exception: return default CUnicodeIO = io.StringIO def pretty(obj, verbose=False, max_width=79, newline='\n', max_seq_length=MAX_SEQ_LENGTH): """ Pretty print the object's representation. """ stream = CUnicodeIO() printer = RepresentationPrinter(stream, verbose, max_width, newline, max_seq_length=max_seq_length) printer.pretty(obj) printer.flush() return stream.getvalue() def pretty_print(obj, verbose=False, max_width=79, newline='\n', max_seq_length=MAX_SEQ_LENGTH): """ Like pretty() but print to stdout. """ printer = RepresentationPrinter(sys.stdout, verbose, max_width, newline, max_seq_length=max_seq_length) printer.pretty(obj) printer.flush() sys.stdout.write(newline) sys.stdout.flush() class _PrettyPrinterBase(object): @contextlib.contextmanager def indent(self, indent): """with statement support for indenting/dedenting.""" self.indentation += indent try: yield finally: self.indentation -= indent @contextlib.contextmanager def group(self, indent=0, open='', close=''): """like begin_group / end_group but for the with statement.""" self.begin_group(indent, open) try: yield finally: self.end_group(indent, close) class PrettyPrinter(_PrettyPrinterBase): """ Baseclass for the `RepresentationPrinter` prettyprinter that is used to generate pretty reprs of objects. Contrary to the `RepresentationPrinter` this printer knows nothing about the default pprinters or the `_repr_pretty_` callback method. """ def __init__(self, output, max_width=79, newline='\n', max_seq_length=MAX_SEQ_LENGTH): self.output = output self.max_width = max_width self.newline = newline self.max_seq_length = max_seq_length self.output_width = 0 self.buffer_width = 0 self.buffer = collections.deque() root_group = Group(0) self.group_stack = [root_group] self.group_queue = GroupQueue(root_group) self.indentation = 0 def _break_outer_groups(self): while self.max_width < self.output_width + self.buffer_width: group = self.group_queue.deq() if not group: return while group.breakables: x = self.buffer.popleft() self.output_width = x.output(self.output, self.output_width) self.buffer_width -= x.width while self.buffer and isinstance(self.buffer[0], Text): x = self.buffer.popleft() self.output_width = x.output(self.output, self.output_width) self.buffer_width -= x.width def text(self, obj): """Add literal text to the output.""" width = len(obj) if self.buffer: text = self.buffer[-1] if not isinstance(text, Text): text = Text() self.buffer.append(text) text.add(obj, width) self.buffer_width += width self._break_outer_groups() else: self.output.write(obj) self.output_width += width def breakable(self, sep=' '): """ Add a breakable separator to the output. This does not mean that it will automatically break here. If no breaking on this position takes place the `sep` is inserted which default to one space. """ width = len(sep) group = self.group_stack[-1] if group.want_break: self.flush() self.output.write(self.newline) self.output.write(' ' * self.indentation) self.output_width = self.indentation self.buffer_width = 0 else: self.buffer.append(Breakable(sep, width, self)) self.buffer_width += width self._break_outer_groups() def break_(self): """ Explicitly insert a newline into the output, maintaining correct indentation. """ self.flush() self.output.write(self.newline) self.output.write(' ' * self.indentation) self.output_width = self.indentation self.buffer_width = 0 def begin_group(self, indent=0, open=''): """ Begin a group. If you want support for python < 2.5 which doesn't has the with statement this is the preferred way: p.begin_group(1, '{') ... p.end_group(1, '}') The python 2.5 expression would be this: with p.group(1, '{', '}'): ... The first parameter specifies the indentation for the next line (usually the width of the opening text), the second the opening text. All parameters are optional. """ if open: self.text(open) group = Group(self.group_stack[-1].depth + 1) self.group_stack.append(group) self.group_queue.enq(group) self.indentation += indent def _enumerate(self, seq): """like enumerate, but with an upper limit on the number of items""" for idx, x in enumerate(seq): if self.max_seq_length and idx >= self.max_seq_length: self.text(',') self.breakable() self.text('...') return yield idx, x def end_group(self, dedent=0, close=''): """End a group. See `begin_group` for more details.""" self.indentation -= dedent group = self.group_stack.pop() if not group.breakables: self.group_queue.remove(group) if close: self.text(close) def flush(self): """Flush data that is left in the buffer.""" for data in self.buffer: self.output_width += data.output(self.output, self.output_width) self.buffer.clear() self.buffer_width = 0 def _get_mro(obj_class): """ Get a reasonable method resolution order of a class and its superclasses for both old-style and new-style classes. """ if not hasattr(obj_class, '__mro__'): # Old-style class. Mix in object to make a fake new-style class. try: obj_class = type(obj_class.__name__, (obj_class, object), {}) except TypeError: # Old-style extension type that does not descend from object. # FIXME: try to construct a more thorough MRO. mro = [obj_class] else: mro = obj_class.__mro__[1:-1] else: mro = obj_class.__mro__ return mro class RepresentationPrinter(PrettyPrinter): """ Special pretty printer that has a `pretty` method that calls the pretty printer for a python object. This class stores processing data on `self` so you must *never* use this class in a threaded environment. Always lock it or reinstantiate it. Instances also have a verbose flag callbacks can access to control their output. For example the default instance repr prints all attributes and methods that are not prefixed by an underscore if the printer is in verbose mode. """ def __init__(self, output, verbose=False, max_width=79, newline='\n', singleton_pprinters=None, type_pprinters=None, deferred_pprinters=None, max_seq_length=MAX_SEQ_LENGTH): PrettyPrinter.__init__(self, output, max_width, newline, max_seq_length=max_seq_length) self.verbose = verbose self.stack = [] if singleton_pprinters is None: singleton_pprinters = _singleton_pprinters.copy() self.singleton_pprinters = singleton_pprinters if type_pprinters is None: type_pprinters = _type_pprinters.copy() self.type_pprinters = type_pprinters if deferred_pprinters is None: deferred_pprinters = _deferred_type_pprinters.copy() self.deferred_pprinters = deferred_pprinters def pretty(self, obj): """Pretty print the given object.""" obj_id = id(obj) cycle = obj_id in self.stack self.stack.append(obj_id) self.begin_group() try: obj_class = _safe_getattr(obj, '__class__', None) or type(obj) # First try to find registered singleton printers for the type. try: printer = self.singleton_pprinters[obj_id] except (TypeError, KeyError): pass else: return printer(obj, self, cycle) # Next walk the mro and check for either: # 1) a registered printer # 2) a _repr_pretty_ method for cls in _get_mro(obj_class): if cls in self.type_pprinters: # printer registered in self.type_pprinters return self.type_pprinters[cls](obj, self, cycle) else: # deferred printer printer = self._in_deferred_types(cls) if printer is not None: return printer(obj, self, cycle) else: # Finally look for special method names. # Some objects automatically create any requested # attribute. Try to ignore most of them by checking for # callability. if '_repr_pretty_' in cls.__dict__: meth = cls._repr_pretty_ if callable(meth): return meth(obj, self, cycle) return _default_pprint(obj, self, cycle) finally: self.end_group() self.stack.pop() def _in_deferred_types(self, cls): """ Check if the given class is specified in the deferred type registry. Returns the printer from the registry if it exists, and None if the class is not in the registry. Successful matches will be moved to the regular type registry for future use. """ mod = _safe_getattr(cls, '__module__', None) name = _safe_getattr(cls, '__name__', None) key = (mod, name) printer = None if key in self.deferred_pprinters: # Move the printer over to the regular registry. printer = self.deferred_pprinters.pop(key) self.type_pprinters[cls] = printer return printer class Printable(object): def output(self, stream, output_width): return output_width class Text(Printable): def __init__(self): self.objs = [] self.width = 0 def output(self, stream, output_width): for obj in self.objs: stream.write(obj) return output_width + self.width def add(self, obj, width): self.objs.append(obj) self.width += width class Breakable(Printable): def __init__(self, seq, width, pretty): self.obj = seq self.width = width self.pretty = pretty self.indentation = pretty.indentation self.group = pretty.group_stack[-1] self.group.breakables.append(self) def output(self, stream, output_width): self.group.breakables.popleft() if self.group.want_break: stream.write(self.pretty.newline) stream.write(' ' * self.indentation) return self.indentation if not self.group.breakables: self.pretty.group_queue.remove(self.group) stream.write(self.obj) return output_width + self.width class Group(Printable): def __init__(self, depth): self.depth = depth self.breakables = collections.deque() self.want_break = False class GroupQueue(object): def __init__(self, *groups): self.queue = [] for group in groups: self.enq(group) def enq(self, group): depth = group.depth while depth > len(self.queue) - 1: self.queue.append([]) self.queue[depth].append(group) def deq(self): for stack in self.queue: for idx, group in enumerate(reversed(stack)): if group.breakables: del stack[idx] group.want_break = True return group for group in stack: group.want_break = True del stack[:] def remove(self, group): try: self.queue[group.depth].remove(group) except ValueError: pass @lazyobject def _baseclass_reprs(): try: br = (object.__repr__, types.InstanceType.__repr__) except AttributeError: # Python 3 br = (object.__repr__,) return br def _default_pprint(obj, p, cycle): """ The default print function. Used if an object does not provide one and it's none of the builtin objects. """ klass = _safe_getattr(obj, '__class__', None) or type(obj) if _safe_getattr(klass, '__repr__', None) not in _baseclass_reprs: # A user-provided repr. Find newlines and replace them with p.break_() _repr_pprint(obj, p, cycle) return p.begin_group(1, '<') p.pretty(klass) p.text(' at 0x%x' % id(obj)) if cycle: p.text(' ...') elif p.verbose: first = True for key in dir(obj): if not key.startswith('_'): try: value = getattr(obj, key) except AttributeError: continue if isinstance(value, types.MethodType): continue if not first: p.text(',') p.breakable() p.text(key) p.text('=') step = len(key) + 1 p.indentation += step p.pretty(value) p.indentation -= step first = False p.end_group(1, '>') def _seq_pprinter_factory(start, end, basetype): """ Factory that returns a pprint function useful for sequences. Used by the default pprint for tuples, dicts, and lists. """ def inner(obj, p, cycle): typ = type(obj) if basetype is not None and typ is not basetype and typ.__repr__ != basetype.__repr__: # If the subclass provides its own repr, use it instead. return p.text(typ.__repr__(obj)) if cycle: return p.text(start + '...' + end) step = len(start) p.begin_group(step, start) for idx, x in p._enumerate(obj): if idx: p.text(',') p.breakable() p.pretty(x) if len(obj) == 1 and type(obj) is tuple: # Special case for 1-item tuples. p.text(',') p.end_group(step, end) return inner def _set_pprinter_factory(start, end, basetype): """ Factory that returns a pprint function useful for sets and frozensets. """ def inner(obj, p, cycle): typ = type(obj) if basetype is not None and typ is not basetype and typ.__repr__ != basetype.__repr__: # If the subclass provides its own repr, use it instead. return p.text(typ.__repr__(obj)) if cycle: return p.text(start + '...' + end) if len(obj) == 0: # Special case. p.text(basetype.__name__ + '()') else: step = len(start) p.begin_group(step, start) # Like dictionary keys, we will try to sort the items if there aren't too many items = obj if not (p.max_seq_length and len(obj) >= p.max_seq_length): try: items = sorted(obj) except Exception: # Sometimes the items don't sort. pass for idx, x in p._enumerate(items): if idx: p.text(',') p.breakable() p.pretty(x) p.end_group(step, end) return inner def _dict_pprinter_factory(start, end, basetype=None): """ Factory that returns a pprint function used by the default pprint of dicts and dict proxies. """ def inner(obj, p, cycle): typ = type(obj) if basetype is not None and typ is not basetype and typ.__repr__ != basetype.__repr__: # If the subclass provides its own repr, use it instead. return p.text(typ.__repr__(obj)) if cycle: return p.text('{...}') p.begin_group(1, start) keys = obj.keys() # if dict isn't large enough to be truncated, sort keys before displaying if not (p.max_seq_length and len(obj) >= p.max_seq_length): try: keys = sorted(keys) except Exception: # Sometimes the keys don't sort. pass for idx, key in p._enumerate(keys): if idx: p.text(',') p.breakable() p.pretty(key) p.text(': ') p.pretty(obj[key]) p.end_group(1, end) return inner def _super_pprint(obj, p, cycle): """The pprint for the super type.""" p.begin_group(8, '') def _re_pattern_pprint(obj, p, cycle): """The pprint function for regular expression patterns.""" p.text('re.compile(') pattern = repr(obj.pattern) if pattern[:1] in 'uU': pattern = pattern[1:] prefix = 'ur' else: prefix = 'r' pattern = prefix + pattern.replace('\\\\', '\\') p.text(pattern) if obj.flags: p.text(',') p.breakable() done_one = False for flag in ('TEMPLATE', 'IGNORECASE', 'LOCALE', 'MULTILINE', 'DOTALL', 'UNICODE', 'VERBOSE', 'DEBUG'): if obj.flags & getattr(re, flag): if done_one: p.text('|') p.text('re.' + flag) done_one = True p.text(')') def _type_pprint(obj, p, cycle): """The pprint for classes and types.""" # Heap allocated types might not have the module attribute, # and others may set it to None. # Checks for a __repr__ override in the metaclass if type(obj).__repr__ is not type.__repr__: _repr_pprint(obj, p, cycle) return mod = _safe_getattr(obj, '__module__', None) try: name = obj.__qualname__ if not isinstance(name, str): # This can happen if the type implements __qualname__ as a property # or other descriptor in Python 2. raise Exception("Try __name__") except Exception: name = obj.__name__ if not isinstance(name, str): name = '' if mod in (None, '__builtin__', 'builtins', 'exceptions'): p.text(name) else: p.text(mod + '.' + name) def _repr_pprint(obj, p, cycle): """A pprint that just redirects to the normal repr function.""" # Find newlines and replace them with p.break_() output = repr(obj) for idx, output_line in enumerate(output.splitlines()): if idx: p.break_() p.text(output_line) def _function_pprint(obj, p, cycle): """Base pprint for all functions and builtin functions.""" name = _safe_getattr(obj, '__qualname__', obj.__name__) mod = obj.__module__ if mod and mod not in ('__builtin__', 'builtins', 'exceptions'): name = mod + '.' + name p.text('' % name) def _exception_pprint(obj, p, cycle): """Base pprint for all exceptions.""" name = getattr(obj.__class__, '__qualname__', obj.__class__.__name__) if obj.__class__.__module__ not in ('exceptions', 'builtins'): name = '%s.%s' % (obj.__class__.__module__, name) step = len(name) + 1 p.begin_group(step, name + '(') for idx, arg in enumerate(getattr(obj, 'args', ())): if idx: p.text(',') p.breakable() p.pretty(arg) p.end_group(step, ')') @lazyobject def _type_pprinters(): #: printers for builtin types tp = { int: _repr_pprint, float: _repr_pprint, str: _repr_pprint, tuple: _seq_pprinter_factory('(', ')', tuple), list: _seq_pprinter_factory('[', ']', list), dict: _dict_pprinter_factory('{', '}', dict), set: _set_pprinter_factory('{', '}', set), frozenset: _set_pprinter_factory('frozenset({', '})', frozenset), super: _super_pprint, type(re.compile('')): _re_pattern_pprint, type: _type_pprint, types.FunctionType: _function_pprint, types.BuiltinFunctionType: _function_pprint, types.MethodType: _repr_pprint, datetime.datetime: _repr_pprint, datetime.timedelta: _repr_pprint, } #: the exception base try: _exception_base = BaseException except NameError: _exception_base = Exception tp[_exception_base] = _exception_pprint try: tp[types.DictProxyType] = _dict_pprinter_factory('') tp[types.ClassType] = _type_pprint tp[types.SliceType] = _repr_pprint except AttributeError: # Python 3 tp[slice] = _repr_pprint try: tp[xrange] = _repr_pprint tp[long] = _repr_pprint tp[unicode] = _repr_pprint except NameError: tp[range] = _repr_pprint tp[bytes] = _repr_pprint return tp #: printers for types specified by name @lazyobject def _deferred_type_pprinters(): dtp = {} for_type_by_name('collections', 'defaultdict', _defaultdict_pprint, dtp=dtp) for_type_by_name('collections', 'OrderedDict', _ordereddict_pprint, dtp=dtp) for_type_by_name('collections', 'deque', _deque_pprint, dtp=dtp) for_type_by_name('collections', 'Counter', _counter_pprint, dtp=dtp) return dtp def for_type(typ, func): """ Add a pretty printer for a given type. """ oldfunc = _type_pprinters.get(typ, None) if func is not None: # To support easy restoration of old pprinters, we need to ignore Nones. _type_pprinters[typ] = func return oldfunc def for_type_by_name(type_module, type_name, func, dtp=None): """ Add a pretty printer for a type specified by the module and name of a type rather than the type object itself. """ if dtp is None: dtp = _deferred_type_pprinters key = (type_module, type_name) oldfunc = dtp.get(key, None) if func is not None: # To support easy restoration of old pprinters, we need to ignore Nones. dtp[key] = func return oldfunc #: printers for the default singletons _singleton_pprinters = LazyObject(lambda: dict.fromkeys( map(id, [None, True, False, Ellipsis, NotImplemented]), _repr_pprint), globals(), '_singleton_pprinters') def _defaultdict_pprint(obj, p, cycle): name = obj.__class__.__name__ with p.group(len(name) + 1, name + '(', ')'): if cycle: p.text('...') else: p.pretty(obj.default_factory) p.text(',') p.breakable() p.pretty(dict(obj)) def _ordereddict_pprint(obj, p, cycle): name = obj.__class__.__name__ with p.group(len(name) + 1, name + '(', ')'): if cycle: p.text('...') elif len(obj): p.pretty(list(obj.items())) def _deque_pprint(obj, p, cycle): name = obj.__class__.__name__ with p.group(len(name) + 1, name + '(', ')'): if cycle: p.text('...') else: p.pretty(list(obj)) def _counter_pprint(obj, p, cycle): name = obj.__class__.__name__ with p.group(len(name) + 1, name + '(', ')'): if cycle: p.text('...') elif len(obj): p.pretty(dict(obj)) xonsh-0.6.0/xonsh/proc.py000066400000000000000000002420031320541242300153250ustar00rootroot00000000000000# -*- coding: utf-8 -*- """Interface for running Python functions as subprocess-mode commands. Code for several helper methods in the `ProcProxy` class have been reproduced without modification from `subprocess.py` in the Python 3.4.2 standard library. The contents of `subprocess.py` (and, thus, the reproduced methods) are Copyright (c) 2003-2005 by Peter Astrand and were licensed to the Python Software foundation under a Contributor Agreement. """ import io import os import re import sys import time import queue import array import ctypes import signal import inspect import builtins import functools import threading import subprocess import collections.abc as cabc from xonsh.platform import (ON_WINDOWS, ON_POSIX, CAN_RESIZE_WINDOW, LFLAG, CC) from xonsh.tools import (redirect_stdout, redirect_stderr, print_exception, XonshCalledProcessError, findfirst, on_main_thread, XonshError, format_std_prepost) from xonsh.lazyasd import lazyobject, LazyObject from xonsh.jobs import wait_for_active_job, give_terminal_to, _continue from xonsh.lazyimps import fcntl, termios, _winapi, msvcrt, winutils # these decorators are imported for users back-compatible from xonsh.tools import unthreadable, uncapturable # NOQA # foreground has be deprecated foreground = unthreadable @lazyobject def STDOUT_CAPTURE_KINDS(): return frozenset(['stdout', 'object']) # The following escape codes are xterm codes. # See http://rtfm.etla.org/xterm/ctlseq.html for more. MODE_NUMS = ('1049', '47', '1047') START_ALTERNATE_MODE = LazyObject( lambda: frozenset('\x1b[?{0}h'.format(i).encode() for i in MODE_NUMS), globals(), 'START_ALTERNATE_MODE') END_ALTERNATE_MODE = LazyObject( lambda: frozenset('\x1b[?{0}l'.format(i).encode() for i in MODE_NUMS), globals(), 'END_ALTERNATE_MODE') ALTERNATE_MODE_FLAGS = LazyObject( lambda: tuple(START_ALTERNATE_MODE) + tuple(END_ALTERNATE_MODE), globals(), 'ALTERNATE_MODE_FLAGS') RE_HIDDEN_BYTES = LazyObject(lambda: re.compile(b'(\001.*?\002)'), globals(), 'RE_HIDDEN') @lazyobject def RE_VT100_ESCAPE(): return re.compile(b'(\x9B|\x1B\[)[0-?]*[ -\/]*[@-~]') @lazyobject def RE_HIDE_ESCAPE(): return re.compile(b'(' + RE_HIDDEN_BYTES.pattern + b'|' + RE_VT100_ESCAPE.pattern + b')') class QueueReader: """Provides a file-like interface to reading from a queue.""" def __init__(self, fd, timeout=None): """ Parameters ---------- fd : int A file descriptor timeout : float or None, optional The queue reading timeout. """ self.fd = fd self.timeout = timeout self.closed = False self.queue = queue.Queue() self.thread = None def close(self): """close the reader""" self.closed = True def is_fully_read(self): """Returns whether or not the queue is fully read and the reader is closed. """ return (self.closed and (self.thread is None or not self.thread.is_alive()) and self.queue.empty()) def read_queue(self): """Reads a single chunk from the queue. This is blocking if the timeout is None and non-blocking otherwise. """ try: return self.queue.get(block=True, timeout=self.timeout) except queue.Empty: return b'' def read(self, size=-1): """Reads bytes from the file.""" i = 0 buf = b'' while size < 0 or i != size: line = self.read_queue() if line: buf += line else: break i += len(line) return buf def readline(self, size=-1): """Reads a line, or a partial line from the file descriptor.""" i = 0 nl = b'\n' buf = b'' while size < 0 or i != size: line = self.read_queue() if line: buf += line if line.endswith(nl): break else: break i += len(line) return buf def _read_all_lines(self): """This reads all remaining lines in a blocking fashion.""" lines = [] while not self.is_fully_read(): chunk = self.read_queue() lines.extend(chunk.splitlines(keepends=True)) return lines def readlines(self, hint=-1): """Reads lines from the file descriptor. This is blocking for negative hints (i.e. read all the remaining lines) and non-blocking otherwise. """ if hint == -1: return self._read_all_lines() lines = [] while len(lines) != hint: chunk = self.read_queue() if not chunk: break lines.extend(chunk.splitlines(keepends=True)) return lines def fileno(self): """Returns the file descriptor number.""" return self.fd @staticmethod def readable(): """Returns true, because this object is always readable.""" return True def iterqueue(self): """Iterates through all remaining chunks in a blocking fashion.""" while not self.is_fully_read(): chunk = self.read_queue() if not chunk: continue yield chunk def populate_fd_queue(reader, fd, queue): """Reads 1 kb of data from a file descriptor into a queue. If this ends or fails, it flags the calling reader object as closed. """ while True: try: c = os.read(fd, 1024) except OSError: reader.closed = True break if c: queue.put(c) else: reader.closed = True break class NonBlockingFDReader(QueueReader): """A class for reading characters from a file descriptor on a background thread. This has the advantages that the calling thread can close the file and that the reading does not block the calling thread. """ def __init__(self, fd, timeout=None): """ Parameters ---------- fd : int A file descriptor timeout : float or None, optional The queue reading timeout. """ super().__init__(fd, timeout=timeout) # start reading from stream self.thread = threading.Thread(target=populate_fd_queue, args=(self, self.fd, self.queue)) self.thread.daemon = True self.thread.start() def populate_buffer(reader, fd, buffer, chunksize): """Reads bytes from the file descriptor and copies them into a buffer. The reads happen in parallel using the pread() syscall; which is only available on POSIX systems. If the read fails for any reason, the reader is flagged as closed. """ offset = 0 while True: try: buf = os.pread(fd, chunksize, offset) except OSError: reader.closed = True break if buf: buffer.write(buf) offset += len(buf) else: reader.closed = True break class BufferedFDParallelReader: """Buffered, parallel background thread reader.""" def __init__(self, fd, buffer=None, chunksize=1024): """ Parameters ---------- fd : int File descriptor from which to read. buffer : binary file-like or None, optional A buffer to write bytes into. If None, a new BytesIO object is created. chunksize : int, optional The max size of the parallel reads, default 1 kb. """ self.fd = fd self.buffer = io.BytesIO() if buffer is None else buffer self.chunksize = chunksize self.closed = False # start reading from stream self.thread = threading.Thread(target=populate_buffer, args=(self, fd, self.buffer, chunksize)) self.thread.daemon = True self.thread.start() def _expand_console_buffer(cols, max_offset, expandsize, orig_posize, fd): # if we are getting close to the end of the console buffer, # expand it so that we can read from it successfully. if cols == 0: return orig_posize[-1], max_offset, orig_posize rows = ((max_offset + expandsize)//cols) + 1 winutils.set_console_screen_buffer_size(cols, rows, fd=fd) orig_posize = orig_posize[:3] + (rows,) max_offset = (rows - 1) * cols return rows, max_offset, orig_posize def populate_console(reader, fd, buffer, chunksize, queue, expandsize=None): """Reads bytes from the file descriptor and puts lines into the queue. The reads happened in parallel, using xonsh.winutils.read_console_output_character(), and is thus only available on windows. If the read fails for any reason, the reader is flagged as closed. """ # OK, so this function is super annoying because Windows stores its # buffers as a 2D regular, dense array -- without trailing newlines. # Meanwhile, we want to add *lines* to the queue. Also, as is typical # with parallel reads, the entire buffer that you ask for may not be # filled. Thus we have to deal with the full generality. # 1. reads may end in the middle of a line # 2. excess whitespace at the end of a line may not be real, unless # 3. you haven't read to the end of the line yet! # So there are alignment issues everywhere. Also, Windows will automatically # read past the current cursor position, even though there is presumably # nothing to see there. # # These chunked reads basically need to happen like this because, # a. The default buffer size is HUGE for the console (90k lines x 120 cols) # as so we can't just read in everything at the end and see what we # care about without a noticeable performance hit. # b. Even with this huge size, it is still possible to write more lines than # this, so we should scroll along with the console. # Unfortunately, because we do not have control over the terminal emulator, # It is not possible to compute how far back we should set the beginning # read position because we don't know how many characters have been popped # off the top of the buffer. If we did somehow know this number we could do # something like the following: # # new_offset = (y*cols) + x # if new_offset == max_offset: # new_offset -= scrolled_offset # x = new_offset%cols # y = new_offset//cols # continue # # So this method is imperfect and only works as long as the screen has # room to expand to. Thus the trick here is to expand the screen size # when we get close enough to the end of the screen. There remain some # async issues related to not being able to set the cursor position. # but they just affect the alignment / capture of the output of the # first command run after a screen resize. if expandsize is None: expandsize = 100 * chunksize x, y, cols, rows = posize = winutils.get_position_size(fd) pre_x = pre_y = -1 orig_posize = posize offset = (cols*y) + x max_offset = (rows - 1) * cols # I believe that there is a bug in PTK that if we reset the # cursor position, the cursor on the next prompt is accidentally on # the next line. If this is fixed, uncomment the following line. # if max_offset < offset + expandsize: # rows, max_offset, orig_posize = _expand_console_buffer( # cols, max_offset, expandsize, # orig_posize, fd) # winutils.set_console_cursor_position(x, y, fd=fd) while True: posize = winutils.get_position_size(fd) offset = (cols*y) + x if ((posize[1], posize[0]) <= (y, x) and posize[2:] == (cols, rows)) or \ (pre_x == x and pre_y == y): # already at or ahead of the current cursor position. if reader.closed: break else: time.sleep(reader.timeout) continue elif max_offset <= offset + expandsize: ecb = _expand_console_buffer(cols, max_offset, expandsize, orig_posize, fd) rows, max_offset, orig_posize = ecb continue elif posize[2:] == (cols, rows): # cursor updated but screen size is the same. pass else: # screen size changed, which is offset preserving orig_posize = posize cols, rows = posize[2:] x = offset % cols y = offset // cols pre_x = pre_y = -1 max_offset = (rows - 1) * cols continue try: buf = winutils.read_console_output_character(x=x, y=y, fd=fd, buf=buffer, bufsize=chunksize, raw=True) except (OSError, IOError): reader.closed = True break # cursor position and offset if not reader.closed: buf = buf.rstrip() nread = len(buf) if nread == 0: time.sleep(reader.timeout) continue cur_x, cur_y = posize[0], posize[1] cur_offset = (cols*cur_y) + cur_x beg_offset = (cols*y) + x end_offset = beg_offset + nread if end_offset > cur_offset and cur_offset != max_offset: buf = buf[:cur_offset-end_offset] # convert to lines xshift = cols - x yshift = (nread // cols) + (1 if nread % cols > 0 else 0) lines = [buf[:xshift]] lines += [buf[l * cols + xshift:(l + 1) * cols + xshift] for l in range(yshift)] lines = [line for line in lines if line] if not lines: time.sleep(reader.timeout) continue # put lines in the queue nl = b'\n' for line in lines[:-1]: queue.put(line.rstrip() + nl) if len(lines[-1]) == xshift: queue.put(lines[-1].rstrip() + nl) else: queue.put(lines[-1]) # update x and y locations if (beg_offset + len(buf)) % cols == 0: new_offset = beg_offset + len(buf) else: new_offset = beg_offset + len(buf.rstrip()) pre_x = x pre_y = y x = new_offset % cols y = new_offset // cols time.sleep(reader.timeout) class ConsoleParallelReader(QueueReader): """Parallel reader for consoles that runs in a background thread. This is only needed, available, and useful on Windows. """ def __init__(self, fd, buffer=None, chunksize=1024, timeout=None): """ Parameters ---------- fd : int Standard buffer file descriptor, 0 for stdin, 1 for stdout (default), and 2 for stderr. buffer : ctypes.c_wchar_p, optional An existing buffer to (re-)use. chunksize : int, optional The max size of the parallel reads, default 1 kb. timeout : float, optional The queue reading timeout. """ timeout = timeout or builtins.__xonsh_env__.get('XONSH_PROC_FREQUENCY') super().__init__(fd, timeout=timeout) self._buffer = buffer # this cannot be public if buffer is None: self._buffer = ctypes.c_char_p(b" " * chunksize) self.chunksize = chunksize # start reading from stream self.thread = threading.Thread(target=populate_console, args=(self, fd, self._buffer, chunksize, self.queue)) self.thread.daemon = True self.thread.start() def safe_fdclose(handle, cache=None): """Closes a file handle in the safest way possible, and potentially storing the result. """ if cache is not None and cache.get(handle, False): return status = True if handle is None: pass elif isinstance(handle, int): if handle >= 3: # don't close stdin, stdout, stderr, -1 try: os.close(handle) except OSError: status = False elif handle is sys.stdin or handle is sys.stdout or handle is sys.stderr: # don't close stdin, stdout, or stderr pass else: try: handle.close() except OSError: status = False if cache is not None: cache[handle] = status def safe_flush(handle): """Attempts to safely flush a file handle, returns success bool.""" status = True try: handle.flush() except OSError: status = False return status def still_writable(fd): """Determines whether a file descriptor is still writable by trying to write an empty string and seeing if it fails. """ try: os.write(fd, b'') status = True except OSError: status = False return status class PopenThread(threading.Thread): """A thread for running and managing subprocess. This allows reading from the stdin, stdout, and stderr streams in a non-blocking fashion. This takes the same arguments and keyword arguments as regular Popen. This requires that the captured_stdout and captured_stderr attributes to be set following instantiation. """ def __init__(self, *args, stdin=None, stdout=None, stderr=None, **kwargs): super().__init__() self.lock = threading.RLock() env = builtins.__xonsh_env__ # stdin setup self.orig_stdin = stdin if stdin is None: self.stdin_fd = 0 elif isinstance(stdin, int): self.stdin_fd = stdin else: self.stdin_fd = stdin.fileno() self.store_stdin = env.get('XONSH_STORE_STDIN') self.timeout = env.get('XONSH_PROC_FREQUENCY') self.in_alt_mode = False self.stdin_mode = None # stdout setup self.orig_stdout = stdout self.stdout_fd = 1 if stdout is None else stdout.fileno() self._set_pty_size() # stderr setup self.orig_stderr = stderr # Set some signal handles, if we can. Must come before process # is started to prevent deadlock on windows self.proc = None # has to be here for closure for handles self.old_int_handler = self.old_winch_handler = None self.old_tstp_handler = self.old_quit_handler = None if on_main_thread(): self.old_int_handler = signal.signal(signal.SIGINT, self._signal_int) if ON_POSIX: self.old_tstp_handler = signal.signal(signal.SIGTSTP, self._signal_tstp) self.old_quit_handler = signal.signal(signal.SIGQUIT, self._signal_quit) if CAN_RESIZE_WINDOW: self.old_winch_handler = signal.signal(signal.SIGWINCH, self._signal_winch) # start up process if ON_WINDOWS and stdout is not None: os.set_handle_inheritable(stdout.fileno(), False) try: self.proc = proc = subprocess.Popen(*args, stdin=stdin, stdout=stdout, stderr=stderr, **kwargs) except Exception: self._clean_up() raise self.pid = proc.pid self.universal_newlines = uninew = proc.universal_newlines if uninew: self.encoding = enc = env.get('XONSH_ENCODING') self.encoding_errors = err = env.get('XONSH_ENCODING_ERRORS') self.stdin = io.BytesIO() # stdin is always bytes! self.stdout = io.TextIOWrapper(io.BytesIO(), encoding=enc, errors=err) self.stderr = io.TextIOWrapper(io.BytesIO(), encoding=enc, errors=err) else: self.encoding = self.encoding_errors = None self.stdin = io.BytesIO() self.stdout = io.BytesIO() self.stderr = io.BytesIO() self.suspended = False self.prevs_are_closed = False self.start() def run(self): """Runs the subprocess by performing a parallel read on stdin if allowed, and copying bytes from captured_stdout to stdout and bytes from captured_stderr to stderr. """ proc = self.proc spec = self._wait_and_getattr('spec') # get stdin and apply parallel reader if needed. stdin = self.stdin if self.orig_stdin is None: origin = None elif ON_POSIX and self.store_stdin: origin = self.orig_stdin origfd = origin if isinstance(origin, int) else origin.fileno() origin = BufferedFDParallelReader(origfd, buffer=stdin) else: origin = None # get non-blocking stdout stdout = self.stdout.buffer if self.universal_newlines else self.stdout capout = spec.captured_stdout if capout is None: procout = None else: procout = NonBlockingFDReader(capout.fileno(), timeout=self.timeout) # get non-blocking stderr stderr = self.stderr.buffer if self.universal_newlines else self.stderr caperr = spec.captured_stderr if caperr is None: procerr = None else: procerr = NonBlockingFDReader(caperr.fileno(), timeout=self.timeout) # initial read from buffer self._read_write(procout, stdout, sys.__stdout__) self._read_write(procerr, stderr, sys.__stderr__) # loop over reads while process is running. i = j = cnt = 1 while proc.poll() is None: # this is here for CPU performance reasons. if i + j == 0: cnt = min(cnt + 1, 1000) tout = self.timeout * cnt if procout is not None: procout.timeout = tout if procerr is not None: procerr.timeout = tout elif cnt == 1: pass else: cnt = 1 if procout is not None: procout.timeout = self.timeout if procerr is not None: procerr.timeout = self.timeout # redirect some output! i = self._read_write(procout, stdout, sys.__stdout__) j = self._read_write(procerr, stderr, sys.__stderr__) if self.suspended: break if self.suspended: return # close files to send EOF to non-blocking reader. # capout & caperr seem to be needed only by Windows, while # orig_stdout & orig_stderr are need by posix and Windows. # Also, order seems to matter here, # with orig_* needed to be closed before cap* safe_fdclose(self.orig_stdout) safe_fdclose(self.orig_stderr) if ON_WINDOWS: safe_fdclose(capout) safe_fdclose(caperr) # read in the remaining data in a blocking fashion. while (procout is not None and not procout.is_fully_read()) or \ (procerr is not None and not procerr.is_fully_read()): self._read_write(procout, stdout, sys.__stdout__) self._read_write(procerr, stderr, sys.__stderr__) # kill the process if it is still alive. Happens when piping. if proc.poll() is None: proc.terminate() def _wait_and_getattr(self, name): """make sure the instance has a certain attr, and return it.""" while not hasattr(self, name): time.sleep(1e-7) return getattr(self, name) def _read_write(self, reader, writer, stdbuf): """Reads a chunk of bytes from a buffer and write into memory or back down to the standard buffer, as appropriate. Returns the number of successful reads. """ if reader is None: return 0 i = -1 for i, chunk in enumerate(iter(reader.read_queue, b'')): self._alt_mode_switch(chunk, writer, stdbuf) if i >= 0: writer.flush() stdbuf.flush() return i + 1 def _alt_mode_switch(self, chunk, membuf, stdbuf): """Enables recursively switching between normal capturing mode and 'alt' mode, which passes through values to the standard buffer. Pagers, text editors, curses applications, etc. use alternate mode. """ i, flag = findfirst(chunk, ALTERNATE_MODE_FLAGS) if flag is None: self._alt_mode_writer(chunk, membuf, stdbuf) else: # This code is executed when the child process switches the # terminal into or out of alternate mode. The line below assumes # that the user has opened vim, less, or similar, and writes writes # to stdin. j = i + len(flag) # write the first part of the chunk in the current mode. self._alt_mode_writer(chunk[:i], membuf, stdbuf) # switch modes # write the flag itself the current mode where alt mode is on # so that it is streamed to the terminal ASAP. # this is needed for terminal emulators to find the correct # positions before and after alt mode. alt_mode = (flag in START_ALTERNATE_MODE) if alt_mode: self.in_alt_mode = alt_mode self._alt_mode_writer(flag, membuf, stdbuf) self._enable_cbreak_stdin() else: self._alt_mode_writer(flag, membuf, stdbuf) self.in_alt_mode = alt_mode self._disable_cbreak_stdin() # recurse this function, but without the current flag. self._alt_mode_switch(chunk[j:], membuf, stdbuf) def _alt_mode_writer(self, chunk, membuf, stdbuf): """Write bytes to the standard buffer if in alt mode or otherwise to the in-memory buffer. """ if not chunk: pass # don't write empty values elif self.in_alt_mode: stdbuf.buffer.write(chunk) else: with self.lock: p = membuf.tell() membuf.seek(0, io.SEEK_END) membuf.write(chunk) membuf.seek(p) # # Window resize handlers # def _signal_winch(self, signum, frame): """Signal handler for SIGWINCH - window size has changed.""" self.send_signal(signal.SIGWINCH) self._set_pty_size() def _set_pty_size(self): """Sets the window size of the child pty based on the window size of our own controlling terminal. """ if ON_WINDOWS or not os.isatty(self.stdout_fd): return # Get the terminal size of the real terminal, set it on the # pseudoterminal. buf = array.array('h', [0, 0, 0, 0]) # 1 = stdout here try: fcntl.ioctl(1, termios.TIOCGWINSZ, buf, True) fcntl.ioctl(self.stdout_fd, termios.TIOCSWINSZ, buf) except OSError: pass # # SIGINT handler # def _signal_int(self, signum, frame): """Signal handler for SIGINT - Ctrl+C may have been pressed.""" self.send_signal(signum) if self.proc is not None and self.proc.poll() is not None: self._restore_sigint(frame=frame) if on_main_thread(): signal.pthread_kill(threading.get_ident(), signal.SIGINT) def _restore_sigint(self, frame=None): old = self.old_int_handler if old is not None: if on_main_thread(): signal.signal(signal.SIGINT, old) self.old_int_handler = None if frame is not None: self._disable_cbreak_stdin() if old is not None and old is not self._signal_int: old(signal.SIGINT, frame) # # SIGTSTP handler # def _signal_tstp(self, signum, frame): """Signal handler for suspending SIGTSTP - Ctrl+Z may have been pressed. """ self.suspended = True self.send_signal(signum) self._restore_sigtstp(frame=frame) def _restore_sigtstp(self, frame=None): old = self.old_tstp_handler if old is not None: if on_main_thread(): signal.signal(signal.SIGTSTP, old) self.old_tstp_handler = None if frame is not None: self._disable_cbreak_stdin() # # SIGQUIT handler # def _signal_quit(self, signum, frame): """Signal handler for quiting SIGQUIT - Ctrl+\ may have been pressed. """ self.send_signal(signum) self._restore_sigquit(frame=frame) def _restore_sigquit(self, frame=None): old = self.old_quit_handler if old is not None: if on_main_thread(): signal.signal(signal.SIGQUIT, old) self.old_quit_handler = None if frame is not None: self._disable_cbreak_stdin() # # cbreak mode handlers # def _enable_cbreak_stdin(self): if not ON_POSIX: return try: self.stdin_mode = termios.tcgetattr(self.stdin_fd)[:] except termios.error: # this can happen for cases where another process is controlling # xonsh's tty device, such as in testing. self.stdin_mode = None return new = self.stdin_mode[:] new[LFLAG] &= ~(termios.ECHO | termios.ICANON) new[CC][termios.VMIN] = 1 new[CC][termios.VTIME] = 0 try: # termios.TCSAFLUSH may be less reliable than termios.TCSANOW termios.tcsetattr(self.stdin_fd, termios.TCSANOW, new) except termios.error: self._disable_cbreak_stdin() def _disable_cbreak_stdin(self): if not ON_POSIX or self.stdin_mode is None: return new = self.stdin_mode[:] new[LFLAG] |= termios.ECHO | termios.ICANON new[CC][termios.VMIN] = 1 new[CC][termios.VTIME] = 0 try: termios.tcsetattr(self.stdin_fd, termios.TCSANOW, new) except termios.error: pass # # Dispatch methods # def poll(self): """Dispatches to Popen.returncode.""" return self.proc.returncode def wait(self, timeout=None): """Dispatches to Popen.wait(), but also does process cleanup such as joining this thread and replacing the original window size signal handler. """ self._disable_cbreak_stdin() rtn = self.proc.wait(timeout=timeout) self.join() # need to replace the old signal handlers somewhere... if self.old_winch_handler is not None and on_main_thread(): signal.signal(signal.SIGWINCH, self.old_winch_handler) self.old_winch_handler = None self._clean_up() return rtn def _clean_up(self): self._restore_sigint() self._restore_sigtstp() self._restore_sigquit() @property def returncode(self): """Process return code.""" return self.proc.returncode @returncode.setter def returncode(self, value): """Process return code.""" self.proc.returncode = value @property def signal(self): """Process signal, or None.""" s = getattr(self.proc, "signal", None) if s is None: rtn = self.returncode if rtn is not None and rtn != 0: s = (-1*rtn, rtn < 0 if ON_WINDOWS else os.WCOREDUMP(rtn)) return s @signal.setter def signal(self, value): """Process signal, or None.""" self.proc.signal = value def send_signal(self, signal): """Dispatches to Popen.send_signal().""" dt = 0.0 while self.proc is None and dt < self.timeout: time.sleep(1e-7) dt += 1e-7 if self.proc is None: return try: rtn = self.proc.send_signal(signal) except ProcessLookupError: # This can happen in the case of !(cmd) when the command has ended rtn = None return rtn def terminate(self): """Dispatches to Popen.terminate().""" return self.proc.terminate() def kill(self): """Dispatches to Popen.kill().""" return self.proc.kill() class Handle(int): closed = False def Close(self, CloseHandle=None): CloseHandle = CloseHandle or _winapi.CloseHandle if not self.closed: self.closed = True CloseHandle(self) def Detach(self): if not self.closed: self.closed = True return int(self) raise ValueError("already closed") def __repr__(self): return "Handle(%d)" % int(self) __del__ = Close __str__ = __repr__ class FileThreadDispatcher: """Dispatches to different file handles depending on the current thread. Useful if you want file operation to go to different places for different threads. """ def __init__(self, default=None): """ Parameters ---------- default : file-like or None, optional The file handle to write to if a thread cannot be found in the registry. If None, a new in-memory instance. Attributes ---------- registry : dict Maps thread idents to file handles. """ if default is None: default = io.TextIOWrapper(io.BytesIO()) self.default = default self.registry = {} def register(self, handle): """Registers a file handle for the current thread. Returns self so that this method can be used in a with-statement. """ self.registry[threading.get_ident()] = handle return self def deregister(self): """Removes the current thread from the registry.""" del self.registry[threading.get_ident()] @property def available(self): """True if the thread is available in the registry.""" return threading.get_ident() in self.registry @property def handle(self): """Gets the current handle for the thread.""" return self.registry.get(threading.get_ident(), self.default) def __enter__(self): pass def __exit__(self, ex_type, ex_value, ex_traceback): self.deregister() # # io.TextIOBase interface # @property def encoding(self): """Gets the encoding for this thread's handle.""" return self.handle.encoding @property def errors(self): """Gets the errors for this thread's handle.""" return self.handle.errors @property def newlines(self): """Gets the newlines for this thread's handle.""" return self.handle.newlines @property def buffer(self): """Gets the buffer for this thread's handle.""" return self.handle.buffer def detach(self): """Detaches the buffer for the current thread.""" return self.handle.detach() def read(self, size=None): """Reads from the handle for the current thread.""" return self.handle.read(size) def readline(self, size=-1): """Reads a line from the handle for the current thread.""" return self.handle.readline(size) def readlines(self, hint=-1): """Reads lines from the handle for the current thread.""" return self.handle.readlines(hint) def seek(self, offset, whence=io.SEEK_SET): """Seeks the current file.""" return self.handle.seek(offset, whence) def tell(self): """Reports the current position in the handle for the current thread.""" return self.handle.tell() def write(self, s): """Writes to this thread's handle. This also flushes, just to be extra sure the string was written. """ h = self.handle try: r = h.write(s) h.flush() except OSError: r = None return r @property def line_buffering(self): """Gets if line buffering for this thread's handle enabled.""" return self.handle.line_buffering # # io.IOBase interface # def close(self): """Closes the current thread's handle.""" return self.handle.close() @property def closed(self): """Is the thread's handle closed.""" return self.handle.closed def fileno(self): """Returns the file descriptor for the current thread.""" return self.handle.fileno() def flush(self): """Flushes the file descriptor for the current thread.""" return safe_flush(self.handle) def isatty(self): """Returns if the file descriptor for the current thread is a tty.""" return self.handle.isatty() def readable(self): """Returns if file descriptor for the current thread is readable.""" return self.handle.readable() def seekable(self): """Returns if file descriptor for the current thread is seekable.""" return self.handle.seekable() def truncate(self, size=None): """Truncates the file for for the current thread.""" return self.handle.truncate() def writable(self, size=None): """Returns if file descriptor for the current thread is writable.""" return self.handle.writable(size) def writelines(self): """Writes lines for the file descriptor for the current thread.""" return self.handle.writelines() # These should NOT be lazy since they *need* to get the true stdout from the # main thread. Also their creation time should be negligible. STDOUT_DISPATCHER = FileThreadDispatcher(default=sys.stdout) STDERR_DISPATCHER = FileThreadDispatcher(default=sys.stderr) def parse_proxy_return(r, stdout, stderr): """Proxies may return a variety of outputs. This handles them generally. Parameters ---------- r : tuple, str, int, or None Return from proxy function stdout : file-like Current stdout stream stdout : file-like Current stderr stream Returns ------- cmd_result : int The return code of the proxy """ cmd_result = 0 if isinstance(r, str): stdout.write(r) stdout.flush() elif isinstance(r, int): cmd_result = r elif isinstance(r, cabc.Sequence): rlen = len(r) if rlen > 0 and r[0] is not None: stdout.write(r[0]) stdout.flush() if rlen > 1 and r[1] is not None: stderr.write(r[1]) stderr.flush() if rlen > 2 and r[2] is not None: cmd_result = r[2] elif r is not None: # for the random object... stdout.write(str(r)) stdout.flush() return cmd_result def proxy_zero(f, args, stdin, stdout, stderr, spec): """Calls a proxy function which takes no parameters.""" return f() def proxy_one(f, args, stdin, stdout, stderr, spec): """Calls a proxy function which takes one parameter: args""" return f(args) def proxy_two(f, args, stdin, stdout, stderr, spec): """Calls a proxy function which takes two parameter: args and stdin.""" return f(args, stdin) def proxy_three(f, args, stdin, stdout, stderr, spec): """Calls a proxy function which takes three parameter: args, stdin, stdout. """ return f(args, stdin, stdout) def proxy_four(f, args, stdin, stdout, stderr, spec): """Calls a proxy function which takes four parameter: args, stdin, stdout, and stderr. """ return f(args, stdin, stdout, stderr) PROXIES = (proxy_zero, proxy_one, proxy_two, proxy_three, proxy_four) PROXY_KWARG_NAMES = frozenset(['args', 'stdin', 'stdout', 'stderr', 'spec']) def partial_proxy(f): """Dispatches the appropriate proxy function based on the number of args.""" numargs = 0 for name, param in inspect.signature(f).parameters.items(): if param.kind == param.POSITIONAL_ONLY or \ param.kind == param.POSITIONAL_OR_KEYWORD: numargs += 1 elif name in PROXY_KWARG_NAMES and param.kind == param.KEYWORD_ONLY: numargs += 1 if numargs < 5: return functools.partial(PROXIES[numargs], f) elif numargs == 5: # don't need to partial. return f else: e = 'Expected proxy with 5 or fewer arguments for {}, not {}' raise XonshError(e.format(', '.join(PROXY_KWARG_NAMES), numargs)) class ProcProxyThread(threading.Thread): """ Class representing a function to be run as a subprocess-mode command. """ def __init__(self, f, args, stdin=None, stdout=None, stderr=None, universal_newlines=False, env=None): """Parameters ---------- f : function The function to be executed. args : list A (possibly empty) list containing the arguments that were given on the command line stdin : file-like, optional A file-like object representing stdin (input can be read from here). If `stdin` is not provided or if it is explicitly set to `None`, then an instance of `io.StringIO` representing an empty file is used. stdout : file-like, optional A file-like object representing stdout (normal output can be written here). If `stdout` is not provided or if it is explicitly set to `None`, then `sys.stdout` is used. stderr : file-like, optional A file-like object representing stderr (error output can be written here). If `stderr` is not provided or if it is explicitly set to `None`, then `sys.stderr` is used. universal_newlines : bool, optional Whether or not to use universal newlines. env : Mapping, optional Environment mapping. """ self.orig_f = f self.f = partial_proxy(f) self.args = args self.pid = None self.returncode = None self._closed_handle_cache = {} handles = self._get_handles(stdin, stdout, stderr) (self.p2cread, self.p2cwrite, self.c2pread, self.c2pwrite, self.errread, self.errwrite) = handles # default values self.stdin = stdin self.stdout = stdout self.stderr = stderr self.env = env or builtins.__xonsh_env__ self._interrupted = False if ON_WINDOWS: if self.p2cwrite != -1: self.p2cwrite = msvcrt.open_osfhandle(self.p2cwrite.Detach(), 0) if self.c2pread != -1: self.c2pread = msvcrt.open_osfhandle(self.c2pread.Detach(), 0) if self.errread != -1: self.errread = msvcrt.open_osfhandle(self.errread.Detach(), 0) if self.p2cwrite != -1: self.stdin = io.open(self.p2cwrite, 'wb', -1) if universal_newlines: self.stdin = io.TextIOWrapper(self.stdin, write_through=True, line_buffering=False) elif isinstance(stdin, int) and stdin != 0: self.stdin = io.open(stdin, 'wb', -1) if self.c2pread != -1: self.stdout = io.open(self.c2pread, 'rb', -1) if universal_newlines: self.stdout = io.TextIOWrapper(self.stdout) if self.errread != -1: self.stderr = io.open(self.errread, 'rb', -1) if universal_newlines: self.stderr = io.TextIOWrapper(self.stderr) # Set some signal handles, if we can. Must come before process # is started to prevent deadlock on windows self.old_int_handler = None if on_main_thread(): self.old_int_handler = signal.signal(signal.SIGINT, self._signal_int) # start up the proc super().__init__() self.start() def __del__(self): self._restore_sigint() def run(self): """Set up input/output streams and execute the child function in a new thread. This is part of the `threading.Thread` interface and should not be called directly. """ if self.f is None: return spec = self._wait_and_getattr('spec') last_in_pipeline = spec.last_in_pipeline if last_in_pipeline: capout = spec.captured_stdout # NOQA caperr = spec.captured_stderr # NOQA env = builtins.__xonsh_env__ enc = env.get('XONSH_ENCODING') err = env.get('XONSH_ENCODING_ERRORS') if ON_WINDOWS: if self.p2cread != -1: self.p2cread = msvcrt.open_osfhandle(self.p2cread.Detach(), 0) if self.c2pwrite != -1: self.c2pwrite = msvcrt.open_osfhandle(self.c2pwrite.Detach(), 0) if self.errwrite != -1: self.errwrite = msvcrt.open_osfhandle(self.errwrite.Detach(), 0) # get stdin if self.stdin is None: sp_stdin = None elif self.p2cread != -1: sp_stdin = io.TextIOWrapper(io.open(self.p2cread, 'rb', -1), encoding=enc, errors=err) else: sp_stdin = sys.stdin # stdout if self.c2pwrite != -1: sp_stdout = io.TextIOWrapper(io.open(self.c2pwrite, 'wb', -1), encoding=enc, errors=err) else: sp_stdout = sys.stdout # stderr if self.errwrite == self.c2pwrite: sp_stderr = sp_stdout elif self.errwrite != -1: sp_stderr = io.TextIOWrapper(io.open(self.errwrite, 'wb', -1), encoding=enc, errors=err) else: sp_stderr = sys.stderr # run the function itself try: with STDOUT_DISPATCHER.register(sp_stdout), \ STDERR_DISPATCHER.register(sp_stderr), \ redirect_stdout(STDOUT_DISPATCHER), \ redirect_stderr(STDERR_DISPATCHER): r = self.f(self.args, sp_stdin, sp_stdout, sp_stderr, spec) except SystemExit as e: r = e.code if isinstance(e.code, int) else int(bool(e.code)) except OSError as e: status = still_writable(self.c2pwrite) and \ still_writable(self.errwrite) if status: # stdout and stderr are still writable, so error must # come from function itself. print_exception() r = 1 else: # stdout and stderr are no longer writable, so error must # come from the fact that the next process in the pipeline # has closed the other side of the pipe. The function then # attempted to write to this side of the pipe anyway. This # is not truly an error and we should exit gracefully. r = 0 except Exception: print_exception() r = 1 safe_flush(sp_stdout) safe_flush(sp_stderr) self.returncode = parse_proxy_return(r, sp_stdout, sp_stderr) if not last_in_pipeline and not ON_WINDOWS: # mac requires us *not to* close the handles here while # windows requires us *to* close the handles here return # clean up # scopz: not sure why this is needed, but stdin cannot go here # and stdout & stderr must. handles = [self.stdout, self.stderr] for handle in handles: safe_fdclose(handle, cache=self._closed_handle_cache) def _wait_and_getattr(self, name): """make sure the instance has a certain attr, and return it.""" while not hasattr(self, name): time.sleep(1e-7) return getattr(self, name) def poll(self): """Check if the function has completed. Returns ------- None if the function is still executing, and the returncode otherwise """ return self.returncode def wait(self, timeout=None): """Waits for the process to finish and returns the return code.""" self.join() self._restore_sigint() return self.returncode # # SIGINT handler # def _signal_int(self, signum, frame): """Signal handler for SIGINT - Ctrl+C may have been pressed.""" # Check if we have already been interrupted. This should prevent # the possibility of infinite recursion. if self._interrupted: return self._interrupted = True # close file handles here to stop an processes piped to us. handles = (self.p2cread, self.p2cwrite, self.c2pread, self.c2pwrite, self.errread, self.errwrite) for handle in handles: safe_fdclose(handle) if self.poll() is not None: self._restore_sigint(frame=frame) if on_main_thread(): signal.pthread_kill(threading.get_ident(), signal.SIGINT) def _restore_sigint(self, frame=None): old = self.old_int_handler if old is not None: if on_main_thread(): signal.signal(signal.SIGINT, old) self.old_int_handler = None if frame is not None: if old is not None and old is not self._signal_int: old(signal.SIGINT, frame) if self._interrupted: self.returncode = 1 # The code below (_get_devnull, _get_handles, and _make_inheritable) comes # from subprocess.py in the Python 3.4.2 Standard Library def _get_devnull(self): if not hasattr(self, '_devnull'): self._devnull = os.open(os.devnull, os.O_RDWR) return self._devnull if ON_WINDOWS: def _make_inheritable(self, handle): """Return a duplicate of handle, which is inheritable""" h = _winapi.DuplicateHandle( _winapi.GetCurrentProcess(), handle, _winapi.GetCurrentProcess(), 0, 1, _winapi.DUPLICATE_SAME_ACCESS) return Handle(h) def _get_handles(self, stdin, stdout, stderr): """Construct and return tuple with IO objects: p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite """ if stdin is None and stdout is None and stderr is None: return (-1, -1, -1, -1, -1, -1) p2cread, p2cwrite = -1, -1 c2pread, c2pwrite = -1, -1 errread, errwrite = -1, -1 if stdin is None: p2cread = _winapi.GetStdHandle(_winapi.STD_INPUT_HANDLE) if p2cread is None: p2cread, _ = _winapi.CreatePipe(None, 0) p2cread = Handle(p2cread) _winapi.CloseHandle(_) elif stdin == subprocess.PIPE: p2cread, p2cwrite = Handle(p2cread), Handle(p2cwrite) elif stdin == subprocess.DEVNULL: p2cread = msvcrt.get_osfhandle(self._get_devnull()) elif isinstance(stdin, int): p2cread = msvcrt.get_osfhandle(stdin) else: # Assuming file-like object p2cread = msvcrt.get_osfhandle(stdin.fileno()) p2cread = self._make_inheritable(p2cread) if stdout is None: c2pwrite = _winapi.GetStdHandle(_winapi.STD_OUTPUT_HANDLE) if c2pwrite is None: _, c2pwrite = _winapi.CreatePipe(None, 0) c2pwrite = Handle(c2pwrite) _winapi.CloseHandle(_) elif stdout == subprocess.PIPE: c2pread, c2pwrite = _winapi.CreatePipe(None, 0) c2pread, c2pwrite = Handle(c2pread), Handle(c2pwrite) elif stdout == subprocess.DEVNULL: c2pwrite = msvcrt.get_osfhandle(self._get_devnull()) elif isinstance(stdout, int): c2pwrite = msvcrt.get_osfhandle(stdout) else: # Assuming file-like object c2pwrite = msvcrt.get_osfhandle(stdout.fileno()) c2pwrite = self._make_inheritable(c2pwrite) if stderr is None: errwrite = _winapi.GetStdHandle(_winapi.STD_ERROR_HANDLE) if errwrite is None: _, errwrite = _winapi.CreatePipe(None, 0) errwrite = Handle(errwrite) _winapi.CloseHandle(_) elif stderr == subprocess.PIPE: errread, errwrite = _winapi.CreatePipe(None, 0) errread, errwrite = Handle(errread), Handle(errwrite) elif stderr == subprocess.STDOUT: errwrite = c2pwrite elif stderr == subprocess.DEVNULL: errwrite = msvcrt.get_osfhandle(self._get_devnull()) elif isinstance(stderr, int): errwrite = msvcrt.get_osfhandle(stderr) else: # Assuming file-like object errwrite = msvcrt.get_osfhandle(stderr.fileno()) errwrite = self._make_inheritable(errwrite) return (p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite) else: # POSIX versions def _get_handles(self, stdin, stdout, stderr): """Construct and return tuple with IO objects: p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite """ p2cread, p2cwrite = -1, -1 c2pread, c2pwrite = -1, -1 errread, errwrite = -1, -1 if stdin is None: pass elif stdin == subprocess.PIPE: p2cread, p2cwrite = os.pipe() elif stdin == subprocess.DEVNULL: p2cread = self._get_devnull() elif isinstance(stdin, int): p2cread = stdin else: # Assuming file-like object p2cread = stdin.fileno() if stdout is None: pass elif stdout == subprocess.PIPE: c2pread, c2pwrite = os.pipe() elif stdout == subprocess.DEVNULL: c2pwrite = self._get_devnull() elif isinstance(stdout, int): c2pwrite = stdout else: # Assuming file-like object c2pwrite = stdout.fileno() if stderr is None: pass elif stderr == subprocess.PIPE: errread, errwrite = os.pipe() elif stderr == subprocess.STDOUT: errwrite = c2pwrite elif stderr == subprocess.DEVNULL: errwrite = self._get_devnull() elif isinstance(stderr, int): errwrite = stderr else: # Assuming file-like object errwrite = stderr.fileno() return (p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite) # # Foreground Thread Process Proxies # class ProcProxy(object): """This is process proxy class that runs its alias functions on the same thread that it was called from, which is typically the main thread. This prevents the process from running on a background thread, but enables debugger and profiler tools (functions) be run on the same thread that they are attempting to debug. """ def __init__(self, f, args, stdin=None, stdout=None, stderr=None, universal_newlines=False, env=None): self.orig_f = f self.f = partial_proxy(f) self.args = args self.pid = os.getpid() self.returncode = None self.stdin = stdin self.stdout = stdout self.stderr = stderr self.universal_newlines = universal_newlines self.env = env def poll(self): """Check if the function has completed via the returncode or None. """ return self.returncode def wait(self, timeout=None): """Runs the function and returns the result. Timeout argument only present for API compatibility. """ if self.f is None: return 0 env = builtins.__xonsh_env__ enc = env.get('XONSH_ENCODING') err = env.get('XONSH_ENCODING_ERRORS') spec = self._wait_and_getattr('spec') # set file handles if self.stdin is None: stdin = None else: if isinstance(self.stdin, int): inbuf = io.open(self.stdin, 'rb', -1) else: inbuf = self.stdin stdin = io.TextIOWrapper(inbuf, encoding=enc, errors=err) stdout = self._pick_buf(self.stdout, sys.stdout, enc, err) stderr = self._pick_buf(self.stderr, sys.stderr, enc, err) # run the actual function try: r = self.f(self.args, stdin, stdout, stderr, spec) except Exception: print_exception() r = 1 self.returncode = parse_proxy_return(r, stdout, stderr) safe_flush(stdout) safe_flush(stderr) return self.returncode @staticmethod def _pick_buf(handle, sysbuf, enc, err): if handle is None or handle is sysbuf: buf = sysbuf elif isinstance(handle, int): if handle < 3: buf = sysbuf else: buf = io.TextIOWrapper(io.open(handle, 'wb', -1), encoding=enc, errors=err) elif hasattr(handle, 'encoding'): # must be a text stream, no need to wrap. buf = handle else: # must be a binary stream, should wrap it. buf = io.TextIOWrapper(handle, encoding=enc, errors=err) return buf def _wait_and_getattr(self, name): """make sure the instance has a certain attr, and return it.""" while not hasattr(self, name): time.sleep(1e-7) return getattr(self, name) @lazyobject def SIGNAL_MESSAGES(): sm = { signal.SIGABRT: 'Aborted', signal.SIGFPE: 'Floating point exception', signal.SIGILL: 'Illegal instructions', signal.SIGTERM: 'Terminated', signal.SIGSEGV: 'Segmentation fault', } if ON_POSIX: sm.update({ signal.SIGQUIT: 'Quit', signal.SIGHUP: 'Hangup', signal.SIGKILL: 'Killed', }) return sm def safe_readlines(handle, hint=-1): """Attempts to read lines without throwing an error.""" try: lines = handle.readlines(hint) except OSError: lines = [] return lines def safe_readable(handle): """Attempts to find if the handle is readable without throwing an error.""" try: status = handle.readable() except (OSError, ValueError): status = False return status def update_fg_process_group(pipeline_group, background): if background: return False if not ON_POSIX: return False env = builtins.__xonsh_env__ if not env.get('XONSH_INTERACTIVE'): return False return give_terminal_to(pipeline_group) class CommandPipeline: """Represents a subprocess-mode command pipeline.""" attrnames = ("stdin", "stdout", "stderr", "pid", "returncode", "args", "alias", "stdin_redirect", "stdout_redirect", "stderr_redirect", "timestamps", "executed_cmd", 'input', 'output', 'errors') nonblocking = (io.BytesIO, NonBlockingFDReader, ConsoleParallelReader) def __init__(self, specs): """ Parameters ---------- specs : list of SubprocSpec Process specifications Attributes ---------- spec : SubprocSpec The last specification in specs proc : Popen-like The process in procs ended : bool Boolean for if the command has stopped executing. input : str A string of the standard input. output : str A string of the standard output. errors : str A string of the standard error. lines : list of str The output lines starttime : floats or None Pipeline start timestamp. """ self.starttime = None self.ended = False self.procs = [] self.specs = specs self.spec = specs[-1] self.captured = specs[-1].captured self.input = self._output = self.errors = self.endtime = None self._closed_handle_cache = {} self.lines = [] self._stderr_prefix = self._stderr_postfix = None self.term_pgid = None background = self.spec.background pipeline_group = None for spec in specs: if self.starttime is None: self.starttime = time.time() try: proc = spec.run(pipeline_group=pipeline_group) except XonshError: self._return_terminal() raise if proc.pid and pipeline_group is None and not spec.is_proxy and \ self.captured != 'object': pipeline_group = proc.pid if update_fg_process_group(pipeline_group, background): self.term_pgid = pipeline_group self.procs.append(proc) self.proc = self.procs[-1] def __repr__(self): s = self.__class__.__name__ + '(' s += ', '.join(a + '=' + str(getattr(self, a)) for a in self.attrnames) s += ')' return s def __bool__(self): return self.returncode == 0 def __len__(self): return len(self.procs) def __iter__(self): """Iterates through stdout and returns the lines, converting to strings and universal newlines if needed. """ if self.ended: yield from iter(self.lines) else: yield from self.tee_stdout() def iterraw(self): """Iterates through the last stdout, and returns the lines exactly as found. """ # get appropriate handles spec = self.spec proc = self.proc timeout = builtins.__xonsh_env__.get('XONSH_PROC_FREQUENCY') # get the correct stdout stdout = proc.stdout if ((stdout is None or spec.stdout is None or not safe_readable(stdout)) and spec.captured_stdout is not None): stdout = spec.captured_stdout if hasattr(stdout, 'buffer'): stdout = stdout.buffer if stdout is not None and not isinstance(stdout, self.nonblocking): stdout = NonBlockingFDReader(stdout.fileno(), timeout=timeout) if not stdout or self.captured == 'stdout' or not safe_readable(stdout) or \ not spec.threadable: # we get here if the process is not threadable or the # class is the real Popen PrevProcCloser(pipeline=self) task = wait_for_active_job() if task is None or task['status'] != 'stopped': proc.wait() self._endtime() if self.captured == 'object': self.end(tee_output=False) elif self.captured == 'hiddenobject' and stdout: b = stdout.read() lines = b.splitlines(keepends=True) yield from lines self.end(tee_output=False) elif self.captured == 'stdout': b = stdout.read() s = self._decode_uninew(b, universal_newlines=True) self.lines = s.splitlines(keepends=True) raise StopIteration # get the correct stderr stderr = proc.stderr if ((stderr is None or spec.stderr is None or not safe_readable(stderr)) and spec.captured_stderr is not None): stderr = spec.captured_stderr if hasattr(stderr, 'buffer'): stderr = stderr.buffer if stderr is not None and not isinstance(stderr, self.nonblocking): stderr = NonBlockingFDReader(stderr.fileno(), timeout=timeout) # read from process while it is running check_prev_done = len(self.procs) == 1 prev_end_time = None i = j = cnt = 1 while proc.poll() is None: if getattr(proc, 'suspended', False): return elif getattr(proc, 'in_alt_mode', False): time.sleep(0.1) # probably not leaving any time soon continue elif not check_prev_done: # In the case of pipelines with more than one command # we should give the commands a little time # to start up fully. This is particularly true for # GNU Parallel, which has a long startup time. pass elif self._prev_procs_done(): self._close_prev_procs() proc.prevs_are_closed = True break stdout_lines = safe_readlines(stdout, 1024) i = len(stdout_lines) if i != 0: yield from stdout_lines stderr_lines = safe_readlines(stderr, 1024) j = len(stderr_lines) if j != 0: self.stream_stderr(stderr_lines) if not check_prev_done: # if we are piping... if (stdout_lines or stderr_lines): # see if we have some output. check_prev_done = True elif prev_end_time is None: # or see if we already know that the next-to-last # proc in the pipeline has ended. if self._prev_procs_done(): # if it has, record the time prev_end_time = time.time() elif time.time() - prev_end_time >= 0.1: # if we still don't have any output, even though the # next-to-last proc has finished, wait a bit to make # sure we have fully started up, etc. check_prev_done = True # this is for CPU usage if i + j == 0: cnt = min(cnt + 1, 1000) else: cnt = 1 time.sleep(timeout * cnt) # read from process now that it is over yield from safe_readlines(stdout) self.stream_stderr(safe_readlines(stderr)) proc.wait() self._endtime() yield from safe_readlines(stdout) self.stream_stderr(safe_readlines(stderr)) if self.captured == 'object': self.end(tee_output=False) def itercheck(self): """Iterates through the command lines and throws an error if the returncode is non-zero. """ yield from self if self.returncode: # I included self, as providing access to stderr and other details # useful when instance isn't assigned to a variable in the shell. raise XonshCalledProcessError(self.returncode, self.executed_cmd, self.stdout, self.stderr, self) def tee_stdout(self): """Writes the process stdout to the output variable, line-by-line, and yields each line. This may optionally accept lines (in bytes) to iterate over, in which case it does not call iterraw(). """ env = builtins.__xonsh_env__ enc = env.get('XONSH_ENCODING') err = env.get('XONSH_ENCODING_ERRORS') lines = self.lines stream = self.captured not in STDOUT_CAPTURE_KINDS if stream and not self.spec.stdout: stream = False stdout_has_buffer = hasattr(sys.stdout, 'buffer') nl = b'\n' cr = b'\r' crnl = b'\r\n' for line in self.iterraw(): # write to stdout line ASAP, if needed if stream: if stdout_has_buffer: sys.stdout.buffer.write(line) else: sys.stdout.write(line.decode(encoding=enc, errors=err)) sys.stdout.flush() # do some munging of the line before we return it if line.endswith(crnl): line = line[:-2] + nl elif line.endswith(cr): line = line[:-1] + nl line = RE_HIDE_ESCAPE.sub(b'', line) line = line.decode(encoding=enc, errors=err) # tee it up! lines.append(line) yield line def stream_stderr(self, lines): """Streams lines to sys.stderr and the errors attribute.""" if not lines: return env = builtins.__xonsh_env__ enc = env.get('XONSH_ENCODING') err = env.get('XONSH_ENCODING_ERRORS') b = b''.join(lines) if self.stderr_prefix: b = self.stderr_prefix + b if self.stderr_postfix: b += self.stderr_postfix stderr_has_buffer = hasattr(sys.stderr, 'buffer') # write bytes to std stream if stderr_has_buffer: sys.stderr.buffer.write(b) else: sys.stderr.write(b.decode(encoding=enc, errors=err)) sys.stderr.flush() # do some munging of the line before we save it to the attr b = b.replace(b'\r\n', b'\n').replace(b'\r', b'\n') b = RE_HIDE_ESCAPE.sub(b'', b) env = builtins.__xonsh_env__ s = b.decode(encoding=env.get('XONSH_ENCODING'), errors=env.get('XONSH_ENCODING_ERRORS')) # set the errors if self.errors is None: self.errors = s else: self.errors += s def _decode_uninew(self, b, universal_newlines=None): """Decode bytes into a str and apply universal newlines as needed.""" if not b: return '' if isinstance(b, bytes): env = builtins.__xonsh_env__ s = b.decode(encoding=env.get('XONSH_ENCODING'), errors=env.get('XONSH_ENCODING_ERRORS')) else: s = b if universal_newlines or self.spec.universal_newlines: s = s.replace('\r\n', '\n').replace('\r', '\n') return s # # Ending methods # def end(self, tee_output=True): """ End the pipeline, return the controlling terminal if needed. Main things done in self._end(). """ if self.ended: return self._end(tee_output=tee_output) self._return_terminal() def _end(self, tee_output): """Waits for the command to complete and then runs any closing and cleanup procedures that need to be run. """ if tee_output: for _ in self.tee_stdout(): pass self._endtime() # since we are driven by getting output, input may not be available # until the command has completed. self._set_input() self._close_prev_procs() self._close_proc() self._check_signal() self._apply_to_history() self.ended = True self._raise_subproc_error() def _return_terminal(self): if ON_WINDOWS or not ON_POSIX: return pgid = os.getpgid(0) if self.term_pgid is None or pgid == self.term_pgid: return if give_terminal_to(pgid): # if gave term succeed self.term_pgid = pgid if hasattr(builtins, '__xonsh_shell__'): # restoring sanity could probably be called whenever we return # control to the shell. But it only seems to matter after a # ^Z event. This *has* to be called after we give the terminal # back to the shell. builtins.__xonsh_shell__.shell.restore_tty_sanity() def resume(self, job, tee_output=True): self.ended = False if give_terminal_to(job['pgrp']): self.term_pgid = job['pgrp'] _continue(job) self.end(tee_output=tee_output) def _endtime(self): """Sets the closing timestamp if it hasn't been already.""" if self.endtime is None: self.endtime = time.time() def _safe_close(self, handle): safe_fdclose(handle, cache=self._closed_handle_cache) def _prev_procs_done(self): """Boolean for if all previous processes have completed. If there is only a single process in the pipeline, this returns False. """ for s, p in zip(self.specs[:-1], self.procs[:-1]): self._safe_close(p.stdin) self._safe_close(s.stdin) if p.poll() is None: return False self._safe_close(p.stdout) self._safe_close(s.stdout) self._safe_close(p.stderr) self._safe_close(s.stderr) return len(self) > 1 def _close_prev_procs(self): """Closes all but the last proc's stdout.""" for s, p in zip(self.specs[:-1], self.procs[:-1]): self._safe_close(s.stdin) self._safe_close(p.stdin) self._safe_close(s.stdout) self._safe_close(p.stdout) self._safe_close(s.stderr) self._safe_close(p.stderr) def _close_proc(self): """Closes last proc's stdout.""" s = self.spec p = self.proc self._safe_close(s.stdin) self._safe_close(p.stdin) self._safe_close(s.stdout) self._safe_close(p.stdout) self._safe_close(s.stderr) self._safe_close(p.stderr) self._safe_close(s.captured_stdout) self._safe_close(s.captured_stderr) def _set_input(self): """Sets the input variable.""" stdin = self.proc.stdin if stdin is None or isinstance(stdin, int) or stdin.closed or \ not stdin.seekable() or not safe_readable(stdin): input = b'' else: stdin.seek(0) input = stdin.read() self.input = self._decode_uninew(input) def _check_signal(self): """Checks if a signal was received and issues a message.""" proc_signal = getattr(self.proc, 'signal', None) if proc_signal is None: return sig, core = proc_signal sig_str = SIGNAL_MESSAGES.get(sig) if sig_str: if core: sig_str += ' (core dumped)' print(sig_str, file=sys.stderr) if self.errors is not None: self.errors += sig_str + '\n' def _apply_to_history(self): """Applies the results to the current history object.""" hist = builtins.__xonsh_history__ if hist is not None: hist.last_cmd_rtn = self.proc.returncode def _raise_subproc_error(self): """Raises a subprocess error, if we are supposed to.""" spec = self.spec rtn = self.returncode if (not spec.is_proxy and rtn is not None and rtn > 0 and builtins.__xonsh_env__.get('RAISE_SUBPROC_ERROR')): raise subprocess.CalledProcessError(rtn, spec.cmd, output=self.output) # # Properties # @property def stdin(self): """Process stdin.""" return self.proc.stdin @property def stdout(self): """Process stdout.""" return self.proc.stdout @property def stderr(self): """Process stderr.""" return self.proc.stderr @property def inp(self): """Creates normalized input string from args.""" return ' '.join(self.args) @property def output(self): if self._output is None: self._output = ''.join(self.lines) return self._output @property def out(self): """Output value as a str.""" self.end() return self.output @property def err(self): """Error messages as a string.""" self.end() return self.errors @property def pid(self): """Process identifier.""" return self.proc.pid @property def returncode(self): """Process return code, waits until command is completed.""" self.end() return self.proc.returncode rtn = returncode @property def args(self): """Arguments to the process.""" return self.spec.args @property def rtn(self): """Alias to return code.""" return self.returncode @property def alias(self): """Alias the process used.""" return self.spec.alias @property def stdin_redirect(self): """Redirection used for stdin.""" stdin = self.spec.stdin name = getattr(stdin, 'name', '') mode = getattr(stdin, 'mode', 'r') return [name, mode] @property def stdout_redirect(self): """Redirection used for stdout.""" stdout = self.spec.stdout name = getattr(stdout, 'name', '') mode = getattr(stdout, 'mode', 'a') return [name, mode] @property def stderr_redirect(self): """Redirection used for stderr.""" stderr = self.spec.stderr name = getattr(stderr, 'name', '') mode = getattr(stderr, 'mode', 'r') return [name, mode] @property def timestamps(self): """The start and end time stamps.""" return [self.starttime, self.endtime] @property def executed_cmd(self): """The resolve and executed command.""" return self.spec.cmd @property def stderr_prefix(self): """Prefix to print in front of stderr, as bytes.""" p = self._stderr_prefix if p is None: env = builtins.__xonsh_env__ t = env.get('XONSH_STDERR_PREFIX') s = format_std_prepost(t, env=env) p = s.encode(encoding=env.get('XONSH_ENCODING'), errors=env.get('XONSH_ENCODING_ERRORS')) self._stderr_prefix = p return p @property def stderr_postfix(self): """Postfix to print after stderr, as bytes.""" p = self._stderr_postfix if p is None: env = builtins.__xonsh_env__ t = env.get('XONSH_STDERR_POSTFIX') s = format_std_prepost(t, env=env) p = s.encode(encoding=env.get('XONSH_ENCODING'), errors=env.get('XONSH_ENCODING_ERRORS')) self._stderr_postfix = p return p class HiddenCommandPipeline(CommandPipeline): def __repr__(self): return '' def pause_call_resume(p, f, *args, **kwargs): """For a process p, this will call a function f with the remaining args and and kwargs. If the process cannot accept signals, the function will be called. Parameters ---------- p : Popen object or similar f : callable args : remaining arguments kwargs : keyword arguments """ can_send_signal = hasattr(p, 'send_signal') and ON_POSIX if can_send_signal: p.send_signal(signal.SIGSTOP) try: f(*args, **kwargs) except Exception: pass if can_send_signal: p.send_signal(signal.SIGCONT) class PrevProcCloser(threading.Thread): """Previous process closer thread for pipelines whose last command is itself unthreadable. This makes sure that the pipeline is driven forward and does not deadlock. """ def __init__(self, pipeline): """ Parameters ---------- pipeline : CommandPipeline The pipeline whose prev procs we should close. """ self.pipeline = pipeline super().__init__() self.daemon = True self.start() def run(self): """Runs the closing algorithm.""" pipeline = self.pipeline check_prev_done = len(pipeline.procs) == 1 if check_prev_done: return proc = pipeline.proc prev_end_time = None timeout = builtins.__xonsh_env__.get('XONSH_PROC_FREQUENCY') sleeptime = min(timeout * 1000, 0.1) while proc.poll() is None: if not check_prev_done: # In the case of pipelines with more than one command # we should give the commands a little time # to start up fully. This is particularly true for # GNU Parallel, which has a long startup time. pass elif pipeline._prev_procs_done(): pipeline._close_prev_procs() proc.prevs_are_closed = True break if not check_prev_done: # if we are piping... if prev_end_time is None: # or see if we already know that the next-to-last # proc in the pipeline has ended. if pipeline._prev_procs_done(): # if it has, record the time prev_end_time = time.time() elif time.time() - prev_end_time >= 0.1: # if we still don't have any output, even though the # next-to-last proc has finished, wait a bit to make # sure we have fully started up, etc. check_prev_done = True # this is for CPU usage time.sleep(sleeptime) xonsh-0.6.0/xonsh/prompt/000077500000000000000000000000001320541242300153305ustar00rootroot00000000000000xonsh-0.6.0/xonsh/prompt/__init__.py000066400000000000000000000013711320541242300174430ustar00rootroot00000000000000# amalgamate exclude import os as _os if _os.getenv('XONSH_DEBUG', ''): pass else: import sys as _sys try: from xonsh.prompt import __amalgam__ cwd = __amalgam__ _sys.modules['xonsh.prompt.cwd'] = __amalgam__ env = __amalgam__ _sys.modules['xonsh.prompt.env'] = __amalgam__ gitstatus = __amalgam__ _sys.modules['xonsh.prompt.gitstatus'] = __amalgam__ job = __amalgam__ _sys.modules['xonsh.prompt.job'] = __amalgam__ vc = __amalgam__ _sys.modules['xonsh.prompt.vc'] = __amalgam__ base = __amalgam__ _sys.modules['xonsh.prompt.base'] = __amalgam__ del __amalgam__ except ImportError: pass del _sys del _os # amalgamate end xonsh-0.6.0/xonsh/prompt/base.py000066400000000000000000000165761320541242300166330ustar00rootroot00000000000000# -*- coding: utf-8 -*- """Base prompt, provides PROMPT_FIELDS and prompt related functions""" import builtins import itertools import os import re import socket import string import sys import xonsh.lazyasd as xl import xonsh.tools as xt import xonsh.platform as xp from xonsh.prompt.cwd import ( _collapsed_pwd, _replace_home_cwd, _dynamically_collapsed_pwd ) from xonsh.prompt.job import _current_job from xonsh.prompt.env import (env_name, vte_new_tab_cwd) from xonsh.prompt.vc import ( current_branch, branch_color, branch_bg_color ) from xonsh.prompt.gitstatus import gitstatus_prompt @xt.lazyobject def DEFAULT_PROMPT(): return default_prompt() class PromptFormatter: """Class that holds all the related prompt formatting methods, uses the ``PROMPT_FIELDS`` envvar (no color formatting). """ def __init__(self): self.cache = {} def __call__(self, template=DEFAULT_PROMPT, fields=None): """Formats a xonsh prompt template string.""" if fields is None: self.fields = builtins.__xonsh_env__.get('PROMPT_FIELDS', PROMPT_FIELDS) else: self.fields = fields try: prompt = self._format_prompt(template=template) except Exception: return _failover_template_format(template) # keep cache only during building prompt self.cache.clear() return prompt def _format_prompt(self, template=DEFAULT_PROMPT): template = template() if callable(template) else template toks = [] for literal, field, spec, conv in _FORMATTER.parse(template): toks.append(literal) entry = self._format_field(field, spec, conv) if entry is not None: toks.append(entry) return ''.join(toks) def _format_field(self, field, spec, conv): if field is None: return elif field.startswith('$'): val = builtins.__xonsh_env__[field[1:]] return _format_value(val, spec, conv) elif field in self.fields: val = self._get_field_value(field) return _format_value(val, spec, conv) else: # color or unknown field, return as is return '{' + field + '}' def _get_field_value(self, field): field_value = self.fields[field] if field_value in self.cache: return self.cache[field_value] try: value = field_value() if callable(field_value) else field_value self.cache[field_value] = value except Exception: print('prompt: error: on field {!r}' ''.format(field), file=sys.stderr) xt.print_exception() value = '(ERROR:{})'.format(field) return value @xl.lazyobject def PROMPT_FIELDS(): return dict( user=xp.os_environ.get('USERNAME' if xp.ON_WINDOWS else 'USER', ''), prompt_end='#' if xt.is_superuser() else '$', hostname=socket.gethostname().split('.', 1)[0], cwd=_dynamically_collapsed_pwd, cwd_dir=lambda: os.path.dirname(_replace_home_cwd()), cwd_base=lambda: os.path.basename(_replace_home_cwd()), short_cwd=_collapsed_pwd, curr_branch=current_branch, branch_color=branch_color, branch_bg_color=branch_bg_color, current_job=_current_job, env_name=env_name, vte_new_tab_cwd=vte_new_tab_cwd, gitstatus=gitstatus_prompt, ) @xl.lazyobject def _FORMATTER(): return string.Formatter() def default_prompt(): """Creates a new instance of the default prompt.""" if xp.ON_CYGWIN: dp = ('{env_name:{} }{BOLD_GREEN}{user}@{hostname}' '{BOLD_BLUE} {cwd} {prompt_end}{NO_COLOR} ') elif xp.ON_WINDOWS: dp = ('{env_name:{} }' '{BOLD_INTENSE_GREEN}{user}@{hostname}{BOLD_INTENSE_CYAN} ' '{cwd}{branch_color}{curr_branch: {}}{NO_COLOR} ' '{BOLD_INTENSE_CYAN}{prompt_end}{NO_COLOR} ') else: dp = ('{env_name:{} }' '{BOLD_GREEN}{user}@{hostname}{BOLD_BLUE} ' '{cwd}{branch_color}{curr_branch: {}}{NO_COLOR} ' '{BOLD_BLUE}{prompt_end}{NO_COLOR} ') return dp def _failover_template_format(template): if callable(template): try: # Exceptions raises from function of producing $PROMPT # in user's xonshrc should not crash xonsh return template() except Exception: xt.print_exception() return '$ ' return template @xt.lazyobject def RE_HIDDEN(): return re.compile('\001.*?\002') def multiline_prompt(curr=''): """Returns the filler text for the prompt in multiline scenarios.""" line = curr.rsplit('\n', 1)[1] if '\n' in curr else curr line = RE_HIDDEN.sub('', line) # gets rid of colors # most prompts end in whitespace, head is the part before that. head = line.rstrip() headlen = len(head) # tail is the trailing whitespace tail = line if headlen == 0 else line.rsplit(head[-1], 1)[1] # now to construct the actual string dots = builtins.__xonsh_env__.get('MULTILINE_PROMPT') dots = dots() if callable(dots) else dots if dots is None or len(dots) == 0: return '' tokstr = xt.format_color(dots, hide=True) baselen = 0 basetoks = [] for x in tokstr.split('\001'): pre, sep, post = x.partition('\002') if len(sep) == 0: basetoks.append(('', pre)) baselen += len(pre) else: basetoks.append(('\001' + pre + '\002', post)) baselen += len(post) if baselen == 0: return xt.format_color('{NO_COLOR}' + tail, hide=True) toks = basetoks * (headlen // baselen) n = headlen % baselen count = 0 for tok in basetoks: slen = len(tok[1]) newcount = slen + count if slen == 0: continue elif newcount <= n: toks.append(tok) else: toks.append((tok[0], tok[1][:n - count])) count = newcount if n <= count: break toks.append((xt.format_color('{NO_COLOR}', hide=True), tail)) rtn = ''.join(itertools.chain.from_iterable(toks)) return rtn def is_template_string(template, PROMPT_FIELDS=None): """Returns whether or not the string is a valid template.""" template = template() if callable(template) else template try: included_names = set(i[1] for i in _FORMATTER.parse(template)) except ValueError: return False included_names.discard(None) if PROMPT_FIELDS is None: fmtter = builtins.__xonsh_env__.get('PROMPT_FIELDS', PROMPT_FIELDS) else: fmtter = PROMPT_FIELDS known_names = set(fmtter.keys()) return included_names <= known_names def _format_value(val, spec, conv): """Formats a value from a template string {val!conv:spec}. The spec is applied as a format string itself, but if the value is None, the result will be empty. The purpose of this is to allow optional parts in a prompt string. For example, if the prompt contains '{current_job:{} | }', and 'current_job' returns 'sleep', the result is 'sleep | ', and if 'current_job' returns None, the result is ''. """ if val is None: return '' val = _FORMATTER.convert_field(val, conv) if spec: val = _FORMATTER.format(spec, val) if not isinstance(val, str): val = str(val) return val xonsh-0.6.0/xonsh/prompt/cwd.py000066400000000000000000000054121320541242300164610ustar00rootroot00000000000000# -*- coding: utf-8 -*- """CWD related prompt formatter""" import os import shutil import builtins import xonsh.tools as xt import xonsh.platform as xp def _replace_home(x): if xp.ON_WINDOWS: home = (builtins.__xonsh_env__['HOMEDRIVE'] + builtins.__xonsh_env__['HOMEPATH'][0]) if x.startswith(home): x = x.replace(home, '~', 1) if builtins.__xonsh_env__.get('FORCE_POSIX_PATHS'): x = x.replace(os.sep, os.altsep) return x else: home = builtins.__xonsh_env__['HOME'] if x.startswith(home): x = x.replace(home, '~', 1) return x def _replace_home_cwd(): return _replace_home(builtins.__xonsh_env__['PWD']) def _collapsed_pwd(): sep = xt.get_sep() pwd = _replace_home_cwd().split(sep) l = len(pwd) leader = sep if l > 0 and len(pwd[0]) == 0 else '' base = [i[0] if ix != l - 1 else i for ix, i in enumerate(pwd) if len(i) > 0] return leader + sep.join(base) def _dynamically_collapsed_pwd(): """Return the compact current working directory. It respects the environment variable DYNAMIC_CWD_WIDTH. """ original_path = _replace_home_cwd() target_width, units = builtins.__xonsh_env__['DYNAMIC_CWD_WIDTH'] elision_char = builtins.__xonsh_env__['DYNAMIC_CWD_ELISION_CHAR'] if target_width == float('inf'): return original_path if (units == '%'): cols, _ = shutil.get_terminal_size() target_width = (cols * target_width) // 100 sep = xt.get_sep() pwd = original_path.split(sep) last = pwd.pop() remaining_space = target_width - len(last) # Reserve space for separators remaining_space_for_text = remaining_space - len(pwd) parts = [] for i in range(len(pwd)): part = pwd[i] part_len = int(min(len(part), max(1, remaining_space_for_text // (len(pwd) - i)))) remaining_space_for_text -= part_len if len(part) > part_len: reduced_part = part[0:part_len-len(elision_char)] + elision_char parts.append(reduced_part) else: parts.append(part) parts.append(last) full = sep.join(parts) truncature_char = elision_char if elision_char else '...' # If even if displaying one letter per dir we are too long if (len(full) > target_width): # We truncate the left most part full = truncature_char + full[int(-target_width) + len(truncature_char):] # if there is not even a single separator we still # want to display at least the beginning of the directory if full.find(sep) == -1: full = (truncature_char + sep + last)[0:int(target_width) - len(truncature_char)] + truncature_char return full xonsh-0.6.0/xonsh/prompt/env.py000066400000000000000000000021321320541242300164700ustar00rootroot00000000000000# -*- coding: utf-8 -*- """Prompt formatter for virtualenv and others""" import os import builtins import xonsh.platform as xp def env_name(pre_chars='(', post_chars=')'): """Extract the current environment name from $VIRTUAL_ENV or $CONDA_DEFAULT_ENV if that is set """ env_path = builtins.__xonsh_env__.get('VIRTUAL_ENV', '') if len(env_path) == 0 and xp.ON_ANACONDA: env_path = builtins.__xonsh_env__.get('CONDA_DEFAULT_ENV', '') env_name = os.path.basename(env_path) if env_name: return pre_chars + env_name + post_chars def vte_new_tab_cwd(): """This prints an escape sequence that tells VTE terminals the hostname and pwd. This should not be needed in most cases, but sometimes is for certain Linux terminals that do not read the PWD from the environment on startup. Note that this does not return a string, it simply prints and flushes the escape sequence to stdout directly. """ env = builtins.__xonsh_env__ t = '\033]7;file://{}{}\007' s = t.format(env.get('HOSTNAME'), env.get('PWD')) print(s, end='', flush=True) xonsh-0.6.0/xonsh/prompt/gitstatus.py000066400000000000000000000144631320541242300177410ustar00rootroot00000000000000# -*- coding: utf-8 -*- """Informative git status prompt formatter""" import builtins import collections import os import subprocess import xonsh.lazyasd as xl GitStatus = collections.namedtuple('GitStatus', ['branch', 'num_ahead', 'num_behind', 'untracked', 'changed', 'conflicts', 'staged', 'stashed', 'operations']) def _check_output(*args, **kwargs): kwargs.update(dict(env=builtins.__xonsh_env__.detype(), stdout=subprocess.PIPE, stderr=subprocess.DEVNULL, universal_newlines=True )) timeout = builtins.__xonsh_env__['VC_BRANCH_TIMEOUT'] # See https://docs.python.org/3/library/subprocess.html#subprocess.Popen.communicate with subprocess.Popen(*args, **kwargs) as proc: try: out, err = proc.communicate(timeout=timeout) if proc.returncode != 0: raise subprocess.CalledProcessError(proc.returncode, proc.args, output=out, stderr=err) # note err will always be empty as we redirect stderr to DEVNULL abvoe return out except subprocess.TimeoutExpired: # We use `.terminate()` (SIGTERM) instead of `.kill()` (SIGKILL) here # because otherwise we guarantee that a `.git/index.lock` file will be # left over, and subsequent git operations will fail. # We don't want that. # As a result, we must rely on git to exit properly on SIGTERM. proc.terminate() # We wait() to ensure that git has finished before the next # `gitstatus` prompt is rendered (otherwise `index.lock` still exists, # and it will fail). # We don't technically have to call `wait()` here as the # `with subprocess.Popen()` context manager above would do that # for us, but we do it to be explicit that waiting is being done. proc.wait() # we ignore what git says after we sent it SIGTERM raise @xl.lazyobject def _DEFS(): DEFS = { 'HASH': ':', 'BRANCH': '{CYAN}', 'OPERATION': '{CYAN}', 'STAGED': '{RED}●', 'CONFLICTS': '{RED}×', 'CHANGED': '{BLUE}+', 'UNTRACKED': '…', 'STASHED': '⚑', 'CLEAN': '{BOLD_GREEN}✓', 'AHEAD': '↑·', 'BEHIND': '↓·', } return DEFS def _get_def(key): def_ = builtins.__xonsh_env__.get('XONSH_GITSTATUS_' + key) return def_ if def_ is not None else _DEFS[key] def _get_tag_or_hash(): tag_or_hash = _check_output(['git', 'describe', '--always']).strip() hash_ = _check_output(['git', 'rev-parse', '--short', 'HEAD']).strip() have_tag_name = tag_or_hash != hash_ return tag_or_hash if have_tag_name else _get_def('HASH') + hash_ def _get_stash(gitdir): try: with open(os.path.join(gitdir, 'logs/refs/stash')) as f: return sum(1 for _ in f) except IOError: return 0 def _gitoperation(gitdir): files = ( ('rebase-merge', 'REBASE'), ('rebase-apply', 'AM/REBASE'), ('MERGE_HEAD', 'MERGING'), ('CHERRY_PICK_HEAD', 'CHERRY-PICKING'), ('REVERT_HEAD', 'REVERTING'), ('BISECT_LOG', 'BISECTING'), ) return [f[1] for f in files if os.path.exists(os.path.join(gitdir, f[0]))] def gitstatus(): """Return namedtuple with fields: branch name, number of ahead commit, number of behind commit, untracked number, changed number, conflicts number, staged number, stashed number, operation.""" status = _check_output(['git', 'status', '--porcelain', '--branch']) branch = '' num_ahead, num_behind = 0, 0 untracked, changed, conflicts, staged = 0, 0, 0, 0 for line in status.splitlines(): if line.startswith('##'): line = line[2:].strip() if 'Initial commit on' in line: branch = line.split()[-1] elif 'no branch' in line: branch = _get_tag_or_hash() elif '...' not in line: branch = line else: branch, rest = line.split('...') if ' ' in rest: divergence = rest.split(' ', 1)[-1] divergence = divergence.strip('[]') for div in divergence.split(', '): if 'ahead' in div: num_ahead = int(div[len('ahead '):].strip()) elif 'behind' in div: num_behind = int(div[len('behind '):].strip()) elif line.startswith('??'): untracked += 1 else: if len(line) > 1 and line[1] == 'M': changed += 1 if len(line) > 0 and line[0] == 'U': conflicts += 1 elif len(line) > 0 and line[0] != ' ': staged += 1 gitdir = _check_output(['git', 'rev-parse', '--git-dir']).strip() stashed = _get_stash(gitdir) operations = _gitoperation(gitdir) return GitStatus(branch, num_ahead, num_behind, untracked, changed, conflicts, staged, stashed, operations) def gitstatus_prompt(): """Return str `BRANCH|OPERATOR|numbers`""" try: s = gitstatus() except subprocess.SubprocessError: return None ret = _get_def('BRANCH') + s.branch if s.num_ahead > 0: ret += _get_def('AHEAD') + str(s.num_ahead) if s.num_behind > 0: ret += _get_def('BEHIND') + str(s.num_behind) if s.operations: ret += _get_def('OPERATION') + '|' + '|'.join(s.operations) ret += '|' if s.staged > 0: ret += _get_def('STAGED') + str(s.staged) + '{NO_COLOR}' if s.conflicts > 0: ret += _get_def('CONFLICTS') + str(s.conflicts) + '{NO_COLOR}' if s.changed > 0: ret += _get_def('CHANGED') + str(s.changed) + '{NO_COLOR}' if s.untracked > 0: ret += _get_def('UNTRACKED') + str(s.untracked) + '{NO_COLOR}' if s.stashed > 0: ret += _get_def('STASHED') + str(s.stashed) + '{NO_COLOR}' if s.staged + s.conflicts + s.changed + s.untracked + s.stashed == 0: ret += _get_def('CLEAN') + '{NO_COLOR}' ret += '{NO_COLOR}' return ret xonsh-0.6.0/xonsh/prompt/job.py000066400000000000000000000005141320541242300164540ustar00rootroot00000000000000# -*- coding: utf-8 -*- """Prompt formatter for current jobs""" import xonsh.jobs as xj def _current_job(): j = xj.get_next_task() if j is not None: if not j['bg']: cmd = j['cmds'][-1] s = cmd[0] if s == 'sudo' and len(cmd) > 1: s = cmd[1] return s xonsh-0.6.0/xonsh/prompt/vc.py000066400000000000000000000201251320541242300163120ustar00rootroot00000000000000# -*- coding: utf-8 -*- """Prompt formatter for simple version control branches""" # pylint:disable=no-member, invalid-name import os import sys import queue import builtins import threading import subprocess import xonsh.tools as xt def _get_git_branch(q): denv = builtins.__xonsh_env__.detype() try: branches = xt.decode_bytes(subprocess.check_output( ['git', 'branch'], env=denv, stderr=subprocess.DEVNULL, )).splitlines() except (subprocess.CalledProcessError, OSError, FileNotFoundError): q.put(None) else: for branch in branches: if not branch.startswith('* '): continue elif branch.endswith(')'): branch = branch.split()[-1][:-1] else: branch = branch.split()[-1] q.put(branch) break else: q.put(None) def get_git_branch(): """Attempts to find the current git branch. If this could not be determined (timeout, not in a git repo, etc.) then this returns None. """ branch = None timeout = builtins.__xonsh_env__.get('VC_BRANCH_TIMEOUT') q = queue.Queue() t = threading.Thread(target=_get_git_branch, args=(q,)) t.start() t.join(timeout=timeout) try: branch = q.get_nowait() except queue.Empty: branch = None return branch def _get_hg_root(q): _curpwd = builtins.__xonsh_env__['PWD'] while True: if not os.path.isdir(_curpwd): return False if any([b.name == '.hg' for b in xt.scandir(_curpwd)]): q.put(_curpwd) break else: _oldpwd = _curpwd _curpwd = os.path.split(_curpwd)[0] if _oldpwd == _curpwd: return False def get_hg_branch(root=None): """Try to get the mercurial branch of the current directory, return None if not in a repo or subprocess.TimeoutExpired if timed out. """ env = builtins.__xonsh_env__ timeout = env['VC_BRANCH_TIMEOUT'] q = queue.Queue() t = threading.Thread(target=_get_hg_root, args=(q,)) t.start() t.join(timeout=timeout) try: root = q.get_nowait() except queue.Empty: return None if env.get('VC_HG_SHOW_BRANCH'): # get branch name branch_path = os.path.sep.join([root, '.hg', 'branch']) if os.path.exists(branch_path): with open(branch_path, 'r') as branch_file: branch = branch_file.read() else: branch = 'default' else: branch = '' # add bookmark, if we can bookmark_path = os.path.sep.join([root, '.hg', 'bookmarks.current']) if os.path.exists(bookmark_path): with open(bookmark_path, 'r') as bookmark_file: active_bookmark = bookmark_file.read() if env.get('VC_HG_SHOW_BRANCH') is True: branch = "{0}, {1}".format(*(b.strip(os.linesep) for b in (branch, active_bookmark))) else: branch = active_bookmark.strip(os.linesep) else: branch = branch.strip(os.linesep) return branch _FIRST_BRANCH_TIMEOUT = True def _first_branch_timeout_message(): global _FIRST_BRANCH_TIMEOUT sbtm = builtins.__xonsh_env__['SUPPRESS_BRANCH_TIMEOUT_MESSAGE'] if not _FIRST_BRANCH_TIMEOUT or sbtm: return _FIRST_BRANCH_TIMEOUT = False print('xonsh: branch timeout: computing the branch name, color, or both ' 'timed out while formatting the prompt. You may avoid this by ' 'increasing the value of $VC_BRANCH_TIMEOUT or by removing branch ' 'fields, like {curr_branch}, from your $PROMPT. See the FAQ ' 'for more details. This message will be suppressed for the remainder ' 'of this session. To suppress this message permanently, set ' '$SUPPRESS_BRANCH_TIMEOUT_MESSAGE = True in your xonshrc file.', file=sys.stderr) def current_branch(): """Gets the branch for a current working directory. Returns an empty string if the cwd is not a repository. This currently only works for git and hg and should be extended in the future. If a timeout occurred, the string '' is returned. """ branch = None cmds = builtins.__xonsh_commands_cache__ # check for binary only once if cmds.is_empty(): has_git = bool(cmds.locate_binary('git', ignore_alias=True)) has_hg = bool(cmds.locate_binary('hg', ignore_alias=True)) else: has_git = bool(cmds.lazy_locate_binary('git', ignore_alias=True)) has_hg = bool(cmds.lazy_locate_binary('hg', ignore_alias=True)) if has_git: branch = get_git_branch() if not branch and has_hg: branch = get_hg_branch() if isinstance(branch, subprocess.TimeoutExpired): branch = '' _first_branch_timeout_message() return branch or None def _git_dirty_working_directory(q, include_untracked): status = None denv = builtins.__xonsh_env__.detype() try: cmd = ['git', 'status', '--porcelain'] if include_untracked: cmd.append('--untracked-files=normal') else: cmd.append('--untracked-files=no') status = subprocess.check_output(cmd, stderr=subprocess.DEVNULL, env=denv) except (subprocess.CalledProcessError, OSError, FileNotFoundError): q.put(None) if status is not None: return q.put(bool(status)) def git_dirty_working_directory(include_untracked=False): """Returns whether or not the git directory is dirty. If this could not be determined (timeout, file not found, etc.) then this returns None. """ timeout = builtins.__xonsh_env__.get("VC_BRANCH_TIMEOUT") q = queue.Queue() t = threading.Thread(target=_git_dirty_working_directory, args=(q, include_untracked)) t.start() t.join(timeout=timeout) try: return q.get_nowait() except queue.Empty: return None def hg_dirty_working_directory(): """Computes whether or not the mercurial working directory is dirty or not. If this cannot be determined, None is returned. """ env = builtins.__xonsh_env__ cwd = env['PWD'] denv = env.detype() vcbt = env['VC_BRANCH_TIMEOUT'] # Override user configurations settings and aliases denv['HGRCPATH'] = '' try: s = subprocess.check_output(['hg', 'identify', '--id'], stderr=subprocess.PIPE, cwd=cwd, timeout=vcbt, universal_newlines=True, env=denv) return s.strip(os.linesep).endswith('+') except (subprocess.CalledProcessError, subprocess.TimeoutExpired, FileNotFoundError): return None def dirty_working_directory(): """Returns a boolean as to whether there are uncommitted files in version control repository we are inside. If this cannot be determined, returns None. Currently supports git and hg. """ dwd = None cmds = builtins.__xonsh_commands_cache__ if cmds.lazy_locate_binary('git', ignore_alias=True): dwd = git_dirty_working_directory() if cmds.lazy_locate_binary('hg', ignore_alias=True) and dwd is None: dwd = hg_dirty_working_directory() return dwd def branch_color(): """Return red if the current branch is dirty, yellow if the dirtiness can not be determined, and green if it clean. These are bold, intense colors for the foreground. """ dwd = dirty_working_directory() if dwd is None: color = '{BOLD_INTENSE_YELLOW}' elif dwd: color = '{BOLD_INTENSE_RED}' else: color = '{BOLD_INTENSE_GREEN}' return color def branch_bg_color(): """Return red if the current branch is dirty, yellow if the dirtiness can not be determined, and green if it clean. These are background colors. """ dwd = dirty_working_directory() if dwd is None: color = '{BACKGROUND_YELLOW}' elif dwd: color = '{BACKGROUND_RED}' else: color = '{BACKGROUND_GREEN}' return color xonsh-0.6.0/xonsh/ptk/000077500000000000000000000000001320541242300146055ustar00rootroot00000000000000xonsh-0.6.0/xonsh/ptk/__init__.py000066400000000000000000000003541320541242300167200ustar00rootroot00000000000000# must come before ptk / pygments imports from xonsh.lazyasd import load_module_in_background load_module_in_background('pkg_resources', debug='XONSH_DEBUG', replacements={'pygments.plugin': 'pkg_resources'}) xonsh-0.6.0/xonsh/ptk/completer.py000066400000000000000000000104751320541242300171600ustar00rootroot00000000000000# -*- coding: utf-8 -*- """Completer implementation to use with prompt_toolkit.""" import os import builtins from prompt_toolkit.layout.dimension import LayoutDimension from prompt_toolkit.completion import Completer, Completion from prompt_toolkit.auto_suggest import AutoSuggestFromHistory class PromptToolkitCompleter(Completer): """Simple prompt_toolkit Completer object. It just redirects requests to normal Xonsh completer. """ def __init__(self, completer, ctx, shell): """Takes instance of xonsh.completer.Completer, the xonsh execution context, and the shell instance itself. """ self.completer = completer self.ctx = ctx self.shell = shell self.hist_suggester = AutoSuggestFromHistory() def get_completions(self, document, complete_event): """Returns a generator for list of completions.""" env = builtins.__xonsh_env__ should_complete = ( complete_event.completion_requested or env.get('UPDATE_COMPLETIONS_ON_KEYPRESS') ) # Only generate completions when the user hits tab. if not should_complete or self.completer is None: return # generate actual completions line = document.current_line.lstrip() line_ex = builtins.aliases.expand_alias(line) endidx = document.cursor_position_col begidx = (line[:endidx].rfind(' ') + 1 if line[:endidx].rfind(' ') >= 0 else 0) prefix = line[begidx:endidx] expand_offset = len(line_ex) - len(line) # get normal completions completions, l = self.completer.complete(prefix, line_ex, begidx + expand_offset, endidx + expand_offset, self.ctx) # completions from auto suggest sug_comp = None if env.get('AUTO_SUGGEST') and env.get('AUTO_SUGGEST_IN_COMPLETIONS'): sug_comp = self.suggestion_completion(document, line) if sug_comp is None: pass elif len(completions) == 0: completions = (sug_comp,) else: completions = set(completions) completions.discard(sug_comp) completions = (sug_comp,) + tuple(sorted(completions)) # reserve space, if needed. if len(completions) <= 1: pass elif len(os.path.commonprefix(completions)) <= len(prefix): self.reserve_space() # Find common prefix (strip quoting) c_prefix = os.path.commonprefix([a.strip('\'"') for a in completions]) # Find last split symbol, do not trim the last part while c_prefix: if c_prefix[-1] in r'/\.:@,': break c_prefix = c_prefix[:-1] # yield completions if sug_comp is None: pre = min(document.cursor_position_col - begidx, len(c_prefix)) else: pre = len(c_prefix) for comp in completions: # do not display quote disp = comp[pre:].strip('\'"') yield Completion(comp, -l, display=disp) def suggestion_completion(self, document, line): """Provides a completion based on the current auto-suggestion.""" cli = self.shell.prompter.cli sug = self.hist_suggester.get_suggestion(cli, cli.current_buffer, document) if sug is None: return None comp, _, _ = sug.text.partition(' ') _, _, prev = line.rpartition(' ') return prev + comp def reserve_space(self): cli = builtins.__xonsh_shell__.shell.prompter.cli window = cli.application.layout.children[0].content.children[1] if window and window.render_info: h = window.render_info.content_height r = builtins.__xonsh_env__.get('COMPLETIONS_MENU_ROWS') size = h + r def comp_height(cli): # If there is an autocompletion menu to be shown, make sure that o # layout has at least a minimal height in order to display it. if not cli.is_done: return LayoutDimension(min=size) else: return LayoutDimension() window._height = comp_height xonsh-0.6.0/xonsh/ptk/history.py000066400000000000000000000043611320541242300166640ustar00rootroot00000000000000# -*- coding: utf-8 -*- """History object for use with prompt_toolkit.""" import builtins from threading import Thread import prompt_toolkit.history class PromptToolkitHistory(prompt_toolkit.history.History): """History class that implements the prompt-toolkit history interface with the xonsh backend. """ def __init__(self, load_prev=True, wait_for_gc=True, *args, **kwargs): """Initialize history object.""" super().__init__() self.strings = [] if load_prev: PromptToolkitHistoryAdder(self, wait_for_gc=wait_for_gc) def append(self, entry): """Append new entry to the history.""" self.strings.append(entry) def __getitem__(self, index): return self.strings[index] def __len__(self): return len(self.strings) def __iter__(self): return iter(self.strings) class PromptToolkitHistoryAdder(Thread): def __init__(self, ptkhist, wait_for_gc=True, *args, **kwargs): """Thread responsible for adding inputs from history to the current prompt-toolkit history instance. May wait for the history garbage collector to finish. """ super(PromptToolkitHistoryAdder, self).__init__(*args, **kwargs) self.daemon = True self.ptkhist = ptkhist self.wait_for_gc = wait_for_gc self.start() def run(self): hist = builtins.__xonsh_history__ if hist is None: return buf = None ptkhist = self.ptkhist for cmd in hist.all_items(): line = cmd['inp'].rstrip() if len(ptkhist) == 0 or line != ptkhist[-1]: ptkhist.append(line) if buf is None: buf = self._buf() if buf is None: continue buf.reset(initial_document=buf.document) def _buf(self): # Thread-safe version of # buf = builtins.__xonsh_shell__.shell.prompter.cli.application.buffer path = ['__xonsh_shell__', 'shell', 'prompter', 'cli', 'application', 'buffer'] buf = builtins for a in path: buf = getattr(buf, a, None) if buf is None: break return buf xonsh-0.6.0/xonsh/ptk/key_bindings.py000066400000000000000000000277611320541242300176410ustar00rootroot00000000000000# -*- coding: utf-8 -*- """Key bindings for prompt_toolkit xonsh shell.""" import builtins from prompt_toolkit.enums import DEFAULT_BUFFER from prompt_toolkit.filters import (Condition, IsMultiline, HasSelection, EmacsInsertMode, ViInsertMode) from prompt_toolkit.keys import Keys from xonsh.aliases import xonsh_exit from xonsh.tools import check_for_partial_string, get_line_continuation from xonsh.shell import transform_command env = builtins.__xonsh_env__ DEDENT_TOKENS = frozenset(['raise', 'return', 'pass', 'break', 'continue']) def carriage_return(b, cli, *, autoindent=True): """Preliminary parser to determine if 'Enter' key should send command to the xonsh parser for execution or should insert a newline for continued input. Current 'triggers' for inserting a newline are: - Not on first line of buffer and line is non-empty - Previous character is a colon (covers if, for, etc...) - User is in an open paren-block - Line ends with backslash - Any text exists below cursor position (relevant when editing previous multiline blocks) """ doc = b.document at_end_of_line = _is_blank(doc.current_line_after_cursor) current_line_blank = _is_blank(doc.current_line) indent = env.get('INDENT') if autoindent else '' partial_string_info = check_for_partial_string(doc.text) in_partial_string = (partial_string_info[0] is not None and partial_string_info[1] is None) # indent after a colon if (doc.current_line_before_cursor.strip().endswith(':') and at_end_of_line): b.newline(copy_margin=autoindent) b.insert_text(indent, fire_event=False) # if current line isn't blank, check dedent tokens elif (not current_line_blank and doc.current_line.split(maxsplit=1)[0] in DEDENT_TOKENS and doc.line_count > 1): b.newline(copy_margin=autoindent) b.delete_before_cursor(count=len(indent)) elif (not doc.on_first_line and not current_line_blank): b.newline(copy_margin=autoindent) elif (doc.current_line.endswith(get_line_continuation())): b.newline(copy_margin=autoindent) elif (doc.find_next_word_beginning() is not None and (any(not _is_blank(i) for i in doc.lines_from_current[1:]))): b.newline(copy_margin=autoindent) elif not current_line_blank and not can_compile(doc.text): b.newline(copy_margin=autoindent) elif current_line_blank and in_partial_string: b.newline(copy_margin=autoindent) else: b.accept_action.validate_and_handle(cli, b) def _is_blank(l): return len(l.strip()) == 0 def can_compile(src): """Returns whether the code can be compiled, i.e. it is valid xonsh.""" src = src if src.endswith('\n') else src + '\n' src = transform_command(src, show_diff=False) src = src.lstrip() try: builtins.__xonsh_execer__.compile(src, mode='single', glbs=None, locs=builtins.__xonsh_ctx__) rtn = True except SyntaxError: rtn = False except Exception: rtn = True return rtn @Condition def tab_insert_indent(cli): """Check if should insert indent instead of starting autocompletion. Checks if there are only whitespaces before the cursor - if so indent should be inserted, otherwise autocompletion. """ before_cursor = cli.current_buffer.document.current_line_before_cursor return bool(before_cursor.isspace()) @Condition def beginning_of_line(cli): """Check if cursor is at beginning of a line other than the first line in a multiline document """ before_cursor = cli.current_buffer.document.current_line_before_cursor return bool(len(before_cursor) == 0 and not cli.current_buffer.document.on_first_line) @Condition def end_of_line(cli): """Check if cursor is at the end of a line other than the last line in a multiline document """ d = cli.current_buffer.document at_end = d.is_cursor_at_the_end_of_line last_line = d.is_cursor_at_the_end return bool(at_end and not last_line) @Condition def should_confirm_completion(cli): """Check if completion needs confirmation""" return (builtins.__xonsh_env__.get('COMPLETIONS_CONFIRM') and cli.current_buffer.complete_state) # Copied from prompt-toolkit's key_binding/bindings/basic.py @Condition def ctrl_d_condition(cli): """Ctrl-D binding is only active when the default buffer is selected and empty. """ if builtins.__xonsh_env__.get("IGNOREEOF"): raise EOFError else: return (cli.current_buffer_name == DEFAULT_BUFFER and not cli.current_buffer.text) @Condition def autopair_condition(cli): """Check if XONSH_AUTOPAIR is set""" return builtins.__xonsh_env__.get("XONSH_AUTOPAIR", False) @Condition def whitespace_or_bracket_before(cli): """Check if there is whitespace or an opening bracket to the left of the cursor""" d = cli.current_buffer.document return bool(d.cursor_position == 0 or d.char_before_cursor.isspace() or d.char_before_cursor in '([{') @Condition def whitespace_or_bracket_after(cli): """Check if there is whitespace or a closing bracket to the right of the cursor""" d = cli.current_buffer.document return bool(d.is_cursor_at_the_end_of_line or d.current_char.isspace() or d.current_char in ')]}') def load_xonsh_bindings(key_bindings_manager): """ Load custom key bindings. """ handle = key_bindings_manager.registry.add_binding has_selection = HasSelection() insert_mode = ViInsertMode() | EmacsInsertMode() @handle(Keys.Tab, filter=tab_insert_indent) def insert_indent(event): """ If there are only whitespaces before current cursor position insert indent instead of autocompleting. """ event.cli.current_buffer.insert_text(env.get('INDENT')) @handle(Keys.ControlX, Keys.ControlE, filter=~has_selection) def open_editor(event): """ Open current buffer in editor """ event.current_buffer.open_in_editor(event.cli) @handle(Keys.BackTab, filter=insert_mode) def insert_literal_tab(event): """ Insert literal tab on Shift+Tab instead of autocompleting """ b = event.current_buffer if b.complete_state: b.complete_previous() else: event.cli.current_buffer.insert_text(env.get('INDENT')) @handle('(', filter=autopair_condition & whitespace_or_bracket_after) def insert_right_parens(event): event.cli.current_buffer.insert_text('(') event.cli.current_buffer.insert_text(')', move_cursor=False) @handle(')', filter=autopair_condition) def overwrite_right_parens(event): buffer = event.cli.current_buffer if buffer.document.current_char == ')': buffer.cursor_position += 1 else: buffer.insert_text(')') @handle('[', filter=autopair_condition & whitespace_or_bracket_after) def insert_right_bracket(event): event.cli.current_buffer.insert_text('[') event.cli.current_buffer.insert_text(']', move_cursor=False) @handle(']', filter=autopair_condition) def overwrite_right_bracket(event): buffer = event.cli.current_buffer if buffer.document.current_char == ']': buffer.cursor_position += 1 else: buffer.insert_text(']') @handle('{', filter=autopair_condition & whitespace_or_bracket_after) def insert_right_brace(event): event.cli.current_buffer.insert_text('{') event.cli.current_buffer.insert_text('}', move_cursor=False) @handle('}', filter=autopair_condition) def overwrite_right_brace(event): buffer = event.cli.current_buffer if buffer.document.current_char == '}': buffer.cursor_position += 1 else: buffer.insert_text('}') @handle('\'', filter=autopair_condition) def insert_right_quote(event): buffer = event.cli.current_buffer if buffer.document.current_char == '\'': buffer.cursor_position += 1 elif whitespace_or_bracket_before(event.cli)\ and whitespace_or_bracket_after(event.cli): buffer.insert_text('\'') buffer.insert_text('\'', move_cursor=False) else: buffer.insert_text('\'') @handle('"', filter=autopair_condition) def insert_right_double_quote(event): buffer = event.cli.current_buffer if buffer.document.current_char == '"': buffer.cursor_position += 1 elif whitespace_or_bracket_before(event.cli)\ and whitespace_or_bracket_after(event.cli): buffer.insert_text('"') buffer.insert_text('"', move_cursor=False) else: buffer.insert_text('"') @handle(Keys.Backspace, filter=autopair_condition) def delete_brackets_or_quotes(event): """Delete empty pair of brackets or quotes""" buffer = event.cli.current_buffer before = buffer.document.char_before_cursor after = buffer.document.current_char if any([before == b and after == a for (b, a) in ['()', '[]', '{}', "''", '""']]): buffer.delete(1) buffer.delete_before_cursor(1) @handle(Keys.ControlD, filter=ctrl_d_condition) def call_exit_alias(event): """Use xonsh exit function""" b = event.cli.current_buffer b.accept_action.validate_and_handle(event.cli, b) xonsh_exit([]) @handle(Keys.ControlJ, filter=IsMultiline()) def multiline_carriage_return(event): """ Wrapper around carriage_return multiline parser """ b = event.cli.current_buffer carriage_return(b, event.cli) @handle(Keys.ControlJ, filter=should_confirm_completion) def enter_confirm_completion(event): """Ignore (confirm completion)""" event.current_buffer.complete_state = None @handle(Keys.Escape, filter=should_confirm_completion) def esc_cancel_completion(event): """Use to cancel completion""" event.cli.current_buffer.cancel_completion() @handle(Keys.Escape, Keys.ControlJ) def execute_block_now(event): """Execute a block of text irrespective of cursor position""" b = event.cli.current_buffer b.accept_action.validate_and_handle(event.cli, b) @handle(Keys.Left, filter=beginning_of_line) def wrap_cursor_back(event): """Move cursor to end of previous line unless at beginning of document """ b = event.cli.current_buffer b.cursor_up(count=1) relative_end_index = b.document.get_end_of_line_position() b.cursor_right(count=relative_end_index) @handle(Keys.Right, filter=end_of_line) def wrap_cursor_forward(event): """Move cursor to beginning of next line unless at end of document""" b = event.cli.current_buffer relative_begin_index = b.document.get_start_of_line_position() b.cursor_left(count=abs(relative_begin_index)) b.cursor_down(count=1) @handle(Keys.ControlI, filter=insert_mode) def generate_completions(event): """ Tab-completion: where the first tab completes the common suffix and the second tab lists all the completions. Notes ----- This method was forked from the mainline prompt-toolkit repo. Copyright (c) 2014, Jonathan Slenders, All rights reserved. """ b = event.current_buffer def second_tab(): if b.complete_state: b.complete_next() else: event.cli.start_completion(select_first=False) # On the second tab-press, or when already navigating through # completions. if event.is_repeat or b.complete_state: second_tab() else: event.cli.start_completion(insert_common_part=True, select_first=False) xonsh-0.6.0/xonsh/ptk/shell.py000066400000000000000000000317671320541242300163040ustar00rootroot00000000000000# -*- coding: utf-8 -*- """The prompt_toolkit based xonsh shell.""" import sys import builtins from prompt_toolkit.key_binding.manager import KeyBindingManager from prompt_toolkit.auto_suggest import AutoSuggestFromHistory from prompt_toolkit.layout.lexers import PygmentsLexer from prompt_toolkit.shortcuts import print_tokens from prompt_toolkit.styles import PygmentsStyle, style_from_dict from xonsh.base_shell import BaseShell from xonsh.tools import print_exception, carriage_return from xonsh.ptk.completer import PromptToolkitCompleter from xonsh.ptk.history import PromptToolkitHistory from xonsh.ptk.key_bindings import load_xonsh_bindings from xonsh.ptk.shortcuts import Prompter from xonsh.events import events from xonsh.shell import transform_command from xonsh.platform import HAS_PYGMENTS, ON_WINDOWS from xonsh.style_tools import partial_color_tokenize, _TokenType, DEFAULT_STYLE_DICT from xonsh.lazyimps import pygments, pyghooks, winutils Token = _TokenType() events.transmogrify('on_ptk_create', 'LoadEvent') events.doc('on_ptk_create', """ on_ptk_create(prompter: Prompter, history: PromptToolkitHistory, completer: PromptToolkitCompleter, bindings: KeyBindingManager) -> Fired after prompt toolkit has been initialized """) class PromptToolkitShell(BaseShell): """The xonsh shell.""" def __init__(self, **kwargs): super().__init__(**kwargs) if ON_WINDOWS: winutils.enable_virtual_terminal_processing() self._first_prompt = True self.prompter = Prompter() self.history = PromptToolkitHistory() self.pt_completer = PromptToolkitCompleter(self.completer, self.ctx, self) key_bindings_manager_args = { 'enable_auto_suggest_bindings': True, 'enable_search': True, 'enable_abort_and_exit_bindings': True, } self.key_bindings_manager = KeyBindingManager(**key_bindings_manager_args) load_xonsh_bindings(self.key_bindings_manager) # This assumes that PromptToolkitShell is a singleton events.on_ptk_create.fire( prompter=self.prompter, history=self.history, completer=self.pt_completer, bindings=self.key_bindings_manager, ) def singleline(self, store_in_history=True, auto_suggest=None, enable_history_search=True, multiline=True, **kwargs): """Reads a single line of input from the shell. The store_in_history kwarg flags whether the input should be stored in PTK's in-memory history. """ events.on_pre_prompt.fire() env = builtins.__xonsh_env__ mouse_support = env.get('MOUSE_SUPPORT') if store_in_history: history = self.history else: history = None enable_history_search = False auto_suggest = auto_suggest if env.get('AUTO_SUGGEST') else None completions_display = env.get('COMPLETIONS_DISPLAY') multicolumn = (completions_display == 'multi') complete_while_typing = env.get('UPDATE_COMPLETIONS_ON_KEYPRESS') if complete_while_typing: # PTK requires history search to be none when completing while typing enable_history_search = False if HAS_PYGMENTS: self.styler.style_name = env.get('XONSH_COLOR_STYLE') completer = None if completions_display == 'none' else self.pt_completer if not env.get('UPDATE_PROMPT_ON_KEYPRESS'): prompt_tokens_cached = self.prompt_tokens(None) get_prompt_tokens = lambda cli: prompt_tokens_cached rprompt_tokens_cached = self.rprompt_tokens(None) get_rprompt_tokens = lambda cli: rprompt_tokens_cached bottom_toolbar_tokens_cached = self.bottom_toolbar_tokens(None) get_bottom_toolbar_tokens = lambda cli: bottom_toolbar_tokens_cached else: get_prompt_tokens = self.prompt_tokens get_rprompt_tokens = self.rprompt_tokens get_bottom_toolbar_tokens = self.bottom_toolbar_tokens with self.prompter: prompt_args = { 'mouse_support': mouse_support, 'auto_suggest': auto_suggest, 'get_prompt_tokens': get_prompt_tokens, 'get_rprompt_tokens': get_rprompt_tokens, 'get_bottom_toolbar_tokens': get_bottom_toolbar_tokens, 'completer': completer, 'multiline': multiline, 'get_continuation_tokens': self.continuation_tokens, 'history': history, 'enable_history_search': enable_history_search, 'reserve_space_for_menu': 0, 'key_bindings_registry': self.key_bindings_manager.registry, 'display_completions_in_columns': multicolumn, 'complete_while_typing': complete_while_typing, } if builtins.__xonsh_env__.get('COLOR_INPUT'): if HAS_PYGMENTS: prompt_args['lexer'] = PygmentsLexer(pyghooks.XonshLexer) prompt_args['style'] = PygmentsStyle(pyghooks.xonsh_style_proxy(self.styler)) else: prompt_args['style'] = style_from_dict(DEFAULT_STYLE_DICT) line = self.prompter.prompt(**prompt_args) events.on_post_prompt.fire() return line def _push(self, line): """Pushes a line onto the buffer and compiles the code in a way that enables multiline input. """ code = None self.buffer.append(line) if self.need_more_lines: return None, code src = ''.join(self.buffer) src = transform_command(src) try: code = self.execer.compile(src, mode='single', glbs=self.ctx, locs=None) self.reset_buffer() except Exception: # pylint: disable=broad-except self.reset_buffer() print_exception() return src, None return src, code def cmdloop(self, intro=None): """Enters a loop that reads and execute input from user.""" if intro: print(intro) auto_suggest = AutoSuggestFromHistory() self.push = self._push while not builtins.__xonsh_exit__: try: line = self.singleline(auto_suggest=auto_suggest) if not line: self.emptyline() else: line = self.precmd(line) self.default(line) except (KeyboardInterrupt, SystemExit): self.reset_buffer() except EOFError: if builtins.__xonsh_env__.get("IGNOREEOF"): print('Use "exit" to leave the shell.', file=sys.stderr) else: break def prompt_tokens(self, cli): """Returns a list of (token, str) tuples for the current prompt.""" p = builtins.__xonsh_env__.get('PROMPT') try: p = self.prompt_formatter(p) except Exception: # pylint: disable=broad-except print_exception() toks = partial_color_tokenize(p) if self._first_prompt: carriage_return() self._first_prompt = False self.settitle() return toks def rprompt_tokens(self, cli): """Returns a list of (token, str) tuples for the current right prompt. """ p = builtins.__xonsh_env__.get('RIGHT_PROMPT') # self.prompt_formatter does handle empty strings properly, # but this avoids descending into it in the common case of # $RIGHT_PROMPT == ''. if isinstance(p, str) and len(p) == 0: return [] try: p = self.prompt_formatter(p) except Exception: # pylint: disable=broad-except print_exception() toks = partial_color_tokenize(p) return toks def bottom_toolbar_tokens(self, cli): """Returns a list of (token, str) tuples for the current bottom toolbar. """ p = builtins.__xonsh_env__.get('BOTTOM_TOOLBAR') # self.prompt_formatter does handle empty strings properly, # but this avoids descending into it in the common case of # $TOOLBAR == ''. if isinstance(p, str) and len(p) == 0: return [] try: p = self.prompt_formatter(p) except Exception: # pylint: disable=broad-except print_exception() toks = partial_color_tokenize(p) return toks def continuation_tokens(self, cli, width): """Displays dots in multiline prompt""" width = width - 1 dots = builtins.__xonsh_env__.get('MULTILINE_PROMPT') dots = dots() if callable(dots) else dots if dots is None: return [(Token, ' '*(width + 1))] basetoks = self.format_color(dots) baselen = sum(len(t[1]) for t in basetoks) if baselen == 0: return [(Token, ' '*(width + 1))] toks = basetoks * (width // baselen) n = width % baselen count = 0 for tok in basetoks: slen = len(tok[1]) newcount = slen + count if slen == 0: continue elif newcount <= n: toks.append(tok) else: toks.append((tok[0], tok[1][:n-count])) count = newcount if n <= count: break toks.append((Token, ' ')) # final space return toks def format_color(self, string, hide=False, force_string=False, **kwargs): """Formats a color string using Pygments. This, therefore, returns a list of (Token, str) tuples. If force_string is set to true, though, this will return a color formatted string. """ tokens = partial_color_tokenize(string) if force_string and HAS_PYGMENTS: env = builtins.__xonsh_env__ self.styler.style_name = env.get('XONSH_COLOR_STYLE') proxy_style = pyghooks.xonsh_style_proxy(self.styler) formatter = pyghooks.XonshTerminal256Formatter(style=proxy_style) s = pygments.format(tokens, formatter) return s elif force_string: print("To force colorization of string, install Pygments") return tokens else: return tokens def print_color(self, string, end='\n', **kwargs): """Prints a color string using prompt-toolkit color management.""" if isinstance(string, str): tokens = partial_color_tokenize(string + end) else: # assume this is a list of (Token, str) tuples and just print tokens = string if HAS_PYGMENTS: env = builtins.__xonsh_env__ self.styler.style_name = env.get('XONSH_COLOR_STYLE') proxy_style = PygmentsStyle(pyghooks.xonsh_style_proxy(self.styler)) else: proxy_style = style_from_dict(DEFAULT_STYLE_DICT) print_tokens(tokens, style=proxy_style) def color_style_names(self): """Returns an iterable of all available style names.""" if not HAS_PYGMENTS: return ['For other xonsh styles, please install pygments'] return pygments.styles.get_all_styles() def color_style(self): """Returns the current color map.""" if not HAS_PYGMENTS: return DEFAULT_STYLE_DICT env = builtins.__xonsh_env__ self.styler.style_name = env.get('XONSH_COLOR_STYLE') return self.styler.styles def restore_tty_sanity(self): """An interface for resetting the TTY stdin mode. This is highly dependent on the shell backend. Also it is mostly optional since it only affects ^Z backgrounding behaviour. """ # PTK does not seem to need any specialization here. However, # if it does for some reason in the future... # The following writes an ANSI escape sequence that sends the cursor # to the end of the line. This has the effect of restoring ECHO mode. # See http://unix.stackexchange.com/a/108014/129048 for more details. # This line can also be replaced by os.system("stty sane"), as per # http://stackoverflow.com/questions/19777129/interactive-python-interpreter-run-in-background#comment29421919_19778355 # However, it is important to note that not termios-based solution # seems to work. My guess is that this is because termios restoration # needs to be performed by the subprocess itself. This fix is important # when subprocesses don't properly restore the terminal attributes, # like Python in interactive mode. Also note that the sequences "\033M" # and "\033E" seem to work too, but these are technically VT100 codes. # I used the more primitive ANSI sequence to maximize compatibility. # -scopatz 2017-01-28 # if not ON_POSIX: # return # sys.stdout.write('\033[9999999C\n') xonsh-0.6.0/xonsh/ptk/shortcuts.py000066400000000000000000000112431320541242300172160ustar00rootroot00000000000000"""A prompt-toolkit inspired shortcut collection.""" import builtins import textwrap from prompt_toolkit.interface import CommandLineInterface from prompt_toolkit.enums import EditingMode from prompt_toolkit.utils import DummyContext from prompt_toolkit.shortcuts import (create_prompt_application, create_eventloop, create_asyncio_eventloop, create_output) from xonsh.platform import ptk_version_info import xonsh.tools as xt class Prompter(object): def __init__(self, cli=None, *args, **kwargs): """Implements a prompt that statefully holds a command-line interface. When used as a context manager, it will return itself on entry and reset itself on exit. Parameters ---------- cli : CommandLineInterface or None, optional If this is not a CommandLineInterface object, such an object will be created when the prompt() method is called. """ self.cli = cli self.major_minor = ptk_version_info()[:2] def __enter__(self): self.reset() return self def __exit__(self, exc_type, exc_value, traceback): pass def prompt(self, message='', **kwargs): """Get input from the user and return it. This is a wrapper around a lot of prompt_toolkit functionality and can be a replacement for raw_input. (or GNU readline.) If you want to keep your history across several calls, create one `~prompt_toolkit.history.History instance and pass it every time. This function accepts many keyword arguments. Except for the following. they are a proxy to the arguments of create_prompt_application(). Parameters ---------- patch_stdout : file-like, optional Replace ``sys.stdout`` by a proxy that ensures that print statements from other threads won't destroy the prompt. (They will be printed above the prompt instead.) return_asyncio_coroutine : bool, optional When True, return a asyncio coroutine. (Python >3.3) Notes ----- This method was forked from the mainline prompt-toolkit repo. Copyright (c) 2014, Jonathan Slenders, All rights reserved. """ patch_stdout = kwargs.pop('patch_stdout', False) return_asyncio_coroutine = kwargs.pop('return_asyncio_coroutine', False) if return_asyncio_coroutine: eventloop = create_asyncio_eventloop() else: eventloop = kwargs.pop('eventloop', None) or create_eventloop() # Create CommandLineInterface. if self.cli is None: if builtins.__xonsh_env__.get('VI_MODE'): editing_mode = EditingMode.VI else: editing_mode = EditingMode.EMACS kwargs['editing_mode'] = editing_mode cli = CommandLineInterface( application=create_prompt_application(message, **kwargs), eventloop=eventloop, output=create_output()) self.cli = cli else: cli = self.cli # Replace stdout. patch_context = cli.patch_stdout_context() if patch_stdout else DummyContext() # Read input and return it. if return_asyncio_coroutine: # Create an asyncio coroutine and call it. exec_context = {'patch_context': patch_context, 'cli': cli} exec(textwrap.dedent(''' import asyncio @asyncio.coroutine def prompt_coro(): with patch_context: document = yield from cli.run_async(reset_current_buffer=False) if document: return document.text '''), exec_context) return exec_context['prompt_coro']() else: # Note: We pass `reset_current_buffer=False`, because that way # it's easy to give DEFAULT_BUFFER a default value, without it # getting erased. We don't have to reset anyway, because this is # the first and only time that this CommandLineInterface will run. try: with patch_context: document = cli.run(reset_current_buffer=False) if document: return document.text except Exception: xt.print_exception() # return something to prevent xonsh crash when any # exceptions raise return '' finally: eventloop.close() def reset(self): """Resets the prompt and cli to a pristine state on this object.""" self.cli = None xonsh-0.6.0/xonsh/pyghooks.py000066400000000000000000001306721320541242300162350ustar00rootroot00000000000000# -*- coding: utf-8 -*- """Hooks for pygments syntax highlighting.""" import os import re import sys import string import builtins from collections import ChainMap from collections.abc import MutableMapping # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # DO NOT MOVE # must come before pygments imports from xonsh.lazyasd import load_module_in_background load_module_in_background('pkg_resources', debug='XONSH_DEBUG', replacements={'pygments.plugin': 'pkg_resources'}) # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! from pygments.lexer import inherit, bygroups, include from pygments.lexers.agile import PythonLexer from pygments.token import (Keyword, Name, Comment, String, Error, Number, Operator, Generic, Whitespace, Token, Punctuation, Text) from pygments.style import Style from pygments.styles import get_style_by_name import pygments.util from xonsh.commands_cache import CommandsCache from xonsh.lazyasd import LazyObject, LazyDict, lazyobject from xonsh.tools import (ON_WINDOWS, intensify_colors_for_cmd_exe, expand_gray_colors_for_cmd_exe) from xonsh.color_tools import (RE_BACKGROUND, BASE_XONSH_COLORS, make_palette, find_closest_color) from xonsh.style_tools import norm_name from xonsh.lazyimps import terminal256 from xonsh.platform import os_environ def _command_is_valid(cmd): try: cmd_abspath = os.path.abspath(os.path.expanduser(cmd)) except (FileNotFoundError, OSError): return False return cmd in builtins.__xonsh_commands_cache__ or \ (os.path.isfile(cmd_abspath) and os.access(cmd_abspath, os.X_OK)) def _command_is_autocd(cmd): if not builtins.__xonsh_env__.get('AUTO_CD', False): return False try: cmd_abspath = os.path.abspath(os.path.expanduser(cmd)) except (FileNotFoundError, OSError): return False return os.path.isdir(cmd_abspath) def subproc_cmd_callback(_, match): """Yield Builtin token if match contains valid command, otherwise fallback to fallback lexer. """ cmd = match.group() yield match.start(), Name.Builtin if _command_is_valid(cmd) else Error, cmd def subproc_arg_callback(_, match): """Check if match contains valid path""" text = match.group() try: ispath = os.path.exists(os.path.expanduser(text)) except (FileNotFoundError, OSError): ispath = False yield (match.start(), Name.Constant if ispath else Text, text) COMMAND_TOKEN_RE = r'[^=\s\[\]{}()$"\'`<&|;!]+(?=\s|$|\)|\]|\}|!)' class XonshLexer(PythonLexer): """Xonsh console lexer for pygments.""" name = 'Xonsh lexer' aliases = ['xonsh', 'xsh'] filenames = ['*.xsh', '*xonshrc'] def __init__(self, *args, **kwargs): # If the lexor is loaded as a pygment plugin, we have to mock # __xonsh_env__ and __xonsh_commands_cache__ if not hasattr(builtins, '__xonsh_env__'): setattr(builtins, '__xonsh_env__', {}) if ON_WINDOWS: pathext = os_environ.get('PATHEXT', ['.EXE', '.BAT', '.CMD']) builtins.__xonsh_env__['PATHEXT'] = pathext.split(os.pathsep) if not hasattr(builtins, '__xonsh_commands_cache__'): setattr(builtins, '__xonsh_commands_cache__', CommandsCache()) _ = builtins.__xonsh_commands_cache__.all_commands # NOQA super().__init__(*args, **kwargs) tokens = { 'mode_switch_brackets': [ (r'(\$)(\{)', bygroups(Keyword, Punctuation), 'py_curly_bracket'), (r'(@)(\()', bygroups(Keyword, Punctuation), 'py_bracket'), (r'([\!\$])(\()', bygroups(Keyword, Punctuation), ('subproc_bracket', 'subproc_start')), (r'(@\$)(\()', bygroups(Keyword, Punctuation), ('subproc_bracket', 'subproc_start')), (r'([\!\$])(\[)', bygroups(Keyword, Punctuation), ('subproc_square_bracket', 'subproc_start')), (r'(g?)(`)', bygroups(String.Affix, String.Backtick), 'backtick_re'), ], 'subproc_bracket': [ (r'\)', Punctuation, '#pop'), include('subproc'), ], 'subproc_square_bracket': [ (r'\]', Punctuation, '#pop'), include('subproc'), ], 'py_bracket': [ (r'\)', Punctuation, '#pop'), include('root'), ], 'py_curly_bracket': [ (r'\}', Punctuation, '#pop'), include('root'), ], 'backtick_re': [ (r'[\.\^\$\*\+\?\[\]\|]', String.Regex), (r'({[0-9]+}|{[0-9]+,[0-9]+})\??', String.Regex), (r'\\([0-9]+|[AbBdDsSwWZabfnrtuUvx\\])', String.Escape), (r'`', String.Backtick, '#pop'), (r'[^`\.\^\$\*\+\?\[\]\|]+', String.Backtick), ], 'root': [ (r'\?', Keyword), (r'(?<=\w)!', Keyword), (r'\$\w+', Name.Variable), (r'\(', Punctuation, 'py_bracket'), (r'\{', Punctuation, 'py_curly_bracket'), include('mode_switch_brackets'), inherit, ], 'subproc_start': [ (r'\s+', Whitespace), (COMMAND_TOKEN_RE, subproc_cmd_callback, '#pop'), (r'', Whitespace, '#pop'), ], 'subproc': [ include('mode_switch_brackets'), (r'&&|\|\|', Operator, 'subproc_start'), (r'"(\\\\|\\[0-7]+|\\.|[^"\\])*"', String.Double), (r"'(\\\\|\\[0-7]+|\\.|[^'\\])*'", String.Single), (r'(?<=\w|\s)!', Keyword, 'subproc_macro'), (r'^!', Keyword, 'subproc_macro'), (r';', Punctuation, 'subproc_start'), (r'&|=', Punctuation), (r'\|', Punctuation, 'subproc_start'), (r'\s+', Text), (r'[^=\s\[\]{}()$"\'`<&|;]+', subproc_arg_callback), (r'<', Text), (r'\$\w+', Name.Variable), ], 'subproc_macro': [ (r'(\s*)([^\n]+)', bygroups(Whitespace, String)), (r'', Whitespace, '#pop'), ], } def get_tokens_unprocessed(self, text): """Check first command, then call super.get_tokens_unprocessed with root or subproc state""" start = 0 state = ('root', ) m = re.match(r'(\s*)({})'.format(COMMAND_TOKEN_RE), text) if m is not None: yield m.start(1), Whitespace, m.group(1) cmd = m.group(2) cmd_is_valid = _command_is_valid(cmd) cmd_is_autocd = _command_is_autocd(cmd) if cmd_is_valid or cmd_is_autocd: yield (m.start(2), Name.Builtin if cmd_is_valid else Name.Constant, cmd) start = m.end(2) state = ('subproc', ) for i, t, v in super().get_tokens_unprocessed(text[start:], state): yield i + start, t, v class XonshConsoleLexer(XonshLexer): """Xonsh console lexer for pygments.""" name = 'Xonsh console lexer' aliases = ['xonshcon'] filenames = [] tokens = { 'root': [(r'^(>>>|\.\.\.) ', Generic.Prompt), (r'\n(>>>|\.\.\.)', Generic.Prompt), (r'\n(?![>.][>.][>.] )([^\n]*)', Generic.Output), (r'\n(?![>.][>.][>.] )(.*?)$', Generic.Output), inherit] } # # Colors and Styles # Color = Token.Color # alias to new color token namespace def color_by_name(name, fg=None, bg=None): """Converts a color name to a color token, foreground name, and background name. Will take into consideration current foreground and background colors, if provided. Parameters ---------- name : str Color name. fg : str, optional Foreground color name. bg : str, optional Background color name. Returns ------- tok : Token Pygments Token.Color subclass fg : str or None New computed foreground color name. bg : str or None New computed background color name. """ name = name.upper() if name == 'NO_COLOR': return Color.NO_COLOR, None, None m = RE_BACKGROUND.search(name) if m is None: # must be foreground color fg = norm_name(name) else: bg = norm_name(name) # assemble token if fg is None and bg is None: tokname = 'NO_COLOR' elif fg is None: tokname = bg elif bg is None: tokname = fg else: tokname = fg + '__' + bg tok = getattr(Color, tokname) return tok, fg, bg def code_by_name(name, styles): """Converts a token name into a pygments-style color code. Parameters ---------- name : str Color token name. styles : Mapping Mapping for looking up non-hex colors Returns ------- code : str Pygments style color code. """ fg, _, bg = name.lower().partition('__') if fg.startswith('background_'): fg, bg = bg, fg codes = [] # foreground color if len(fg) == 0: pass elif 'hex' in fg: for p in fg.split('_'): codes.append('#'+p[3:] if p.startswith('hex') else p) else: fgtok = getattr(Color, fg.upper()) if fgtok in styles: codes.append(styles[fgtok]) else: codes += fg.split('_') # background color if len(bg) == 0: pass elif bg.startswith('background_hex'): codes.append('bg:#'+bg[14:]) else: bgtok = getattr(Color, bg.upper()) if bgtok in styles: codes.append(styles[bgtok]) else: codes.append(bg.replace('background_', 'bg:')) code = ' '.join(codes) return code def partial_color_tokenize(template): """Tokenizes a template string containing colors. Will return a list of tuples mapping the token to the string which has that color. These sub-strings maybe templates themselves. """ if hasattr(builtins, '__xonsh_shell__'): styles = __xonsh_shell__.shell.styler.styles else: styles = None color = Color.NO_COLOR try: toks, color = _partial_color_tokenize_main(template, styles) except Exception: toks = [(Color.NO_COLOR, template)] if styles is not None: styles[color] # ensure color is available return toks def _partial_color_tokenize_main(template, styles): formatter = string.Formatter() bopen = '{' bclose = '}' colon = ':' expl = '!' color = Color.NO_COLOR fg = bg = None value = '' toks = [] for literal, field, spec, conv in formatter.parse(template): if field is None: value += literal elif field in KNOWN_COLORS or '#' in field: value += literal next_color, fg, bg = color_by_name(field, fg, bg) if next_color is not color: if len(value) > 0: toks.append((color, value)) if styles is not None: styles[color] # ensure color is available color = next_color value = '' elif field is not None: parts = [literal, bopen, field] if conv is not None and len(conv) > 0: parts.append(expl) parts.append(conv) if spec is not None and len(spec) > 0: parts.append(colon) parts.append(spec) parts.append(bclose) value += ''.join(parts) else: value += literal toks.append((color, value)) return toks, color class CompoundColorMap(MutableMapping): """Looks up color tokens by name, potentially generating the value from the lookup. """ def __init__(self, styles, *args, **kwargs): self.styles = styles self.colors = dict(*args, **kwargs) def __getitem__(self, key): if key in self.colors: return self.colors[key] if key in self.styles: value = self.styles[key] self[key] = value return value if key is Color: raise KeyError pre, _, name = str(key).rpartition('.') if pre != 'Token.Color': raise KeyError value = code_by_name(name, self.styles) self[key] = value return value def __setitem__(self, key, value): self.colors[key] = value def __delitem__(self, key): del self.colors[key] def __iter__(self): yield from self.colors.keys() def __len__(self): return len(self.colors) class XonshStyle(Style): """A xonsh pygments style that will dispatch to the correct color map by using a ChainMap. The style_name property may be used to reset the current style. """ def __init__(self, style_name='default'): """ Parameters ---------- style_name : str, optional The style name to initialize with. """ self.trap = {} # for trapping custom colors set by user self._smap = {} self._style_name = '' self.style_name = style_name super().__init__() @property def style_name(self): return self._style_name @style_name.setter def style_name(self, value): if self._style_name == value: return if value not in STYLES: try: # loading style dynamically pygments_style_by_name(value) except Exception: print('Could not find style {0!r}, using default'.format(value), file=sys.stderr) value = 'default' builtins.__xonsh_env__['XONSH_COLOR_STYLE'] = value cmap = STYLES[value] if value == 'default': self._smap = XONSH_BASE_STYLE.copy() else: try: self._smap = get_style_by_name(value)().styles.copy() except (ImportError, pygments.util.ClassNotFound): self._smap = XONSH_BASE_STYLE.copy() compound = CompoundColorMap(ChainMap(self.trap, cmap, PTK_STYLE, self._smap)) self.styles = ChainMap(self.trap, cmap, PTK_STYLE, self._smap, compound) self._style_name = value if ON_WINDOWS: self.enhance_colors_for_cmd_exe() @style_name.deleter def style_name(self): self._style_name = '' def enhance_colors_for_cmd_exe(self): """ Enhance colors when using cmd.exe on windows. When using the default style all blue and dark red colors are changed to CYAN and intense red. """ env = builtins.__xonsh_env__ # Ensure we are not using ConEmu if 'CONEMUANSI' not in env: if not hasattr(pygments.style, 'ansicolors'): # Auto suggest needs to be a darker shade to be distinguishable # from the default color self.styles[Token.AutoSuggestion] = '#444444' self._smap.update(expand_gray_colors_for_cmd_exe(self._smap)) if env.get('INTENSIFY_COLORS_ON_WIN', False): has_ansi = hasattr(pygments.style, 'ansicolors') newcolors = intensify_colors_for_cmd_exe(self._smap, ansi=has_ansi) self._smap.update(newcolors) def xonsh_style_proxy(styler): """Factory for a proxy class to a xonsh style.""" class XonshStyleProxy(Style): """Simple proxy class to fool prompt toolkit.""" target = styler styles = styler.styles def __new__(cls, *args, **kwargs): return cls.target return XonshStyleProxy if hasattr(pygments.style, 'ansicolors'): PTK_STYLE = { Token.Menu.Completions: 'bg:#ansilightgray #ansiblack', Token.Menu.Completions.Completion: '', Token.Menu.Completions.Completion.Current: 'bg:#ansidarkgray #ansiwhite', Token.Scrollbar: 'bg:#ansidarkgray', Token.Scrollbar.Button: 'bg:#ansiblack', Token.Scrollbar.Arrow: 'bg:#ansiblack #ansiwhite bold', Token.AutoSuggestion: '#ansidarkgray', Token.Aborted: '#ansidarkgray', } else: PTK_STYLE = { Token.Menu.Completions: 'bg:#888888 #000000', Token.Menu.Completions.Completion: '', Token.Menu.Completions.Completion.Current: 'bg:#555555 #ffffff', Token.Scrollbar: 'bg:#555555', Token.Scrollbar.Button: 'bg:#000000', Token.Scrollbar.Arrow: 'bg:#000000 #ffffff bold', Token.AutoSuggestion: '#666666', Token.Aborted: '#666666', } if hasattr(pygments.style, 'ansicolors'): XONSH_BASE_STYLE = LazyObject(lambda: { Whitespace: '#ansilightgray', Comment: 'underline #ansiteal', Comment.Preproc: 'underline #ansibrown', Keyword: 'bold #ansidarkgreen', Keyword.Pseudo: 'nobold', Keyword.Type: 'nobold #ansidarkred', Operator: '#ansidarkgray', Operator.Word: 'bold #ansipurple', Name.Builtin: '#ansidarkgreen', Name.Function: '#ansiblue', Name.Class: 'bold #ansiblue', Name.Namespace: 'bold #ansiblue', Name.Exception: 'bold #ansired', Name.Variable: '#ansidarkblue', Name.Constant: '#ansidarkred', Name.Label: '#ansiyellow', Name.Entity: 'bold #ansilightgray', Name.Attribute: '#ansiyellow', Name.Tag: 'bold #ansidarkgreen', Name.Decorator: '#ansifuchsia', String: '#ansired', String.Doc: 'underline', String.Interpol: 'bold #ansipurple', String.Escape: 'bold #ansibrown', String.Regex: '#ansipurple', String.Symbol: '#ansibrown', String.Other: '#ansidarkgreen', Number: '#ansidarkgray', Generic.Heading: 'bold #ansidarkblue', Generic.Subheading: 'bold #ansipurple', Generic.Deleted: '#ansidarkred', Generic.Inserted: '#ansigreen', Generic.Error: 'bold #ansired', Generic.Emph: 'underline', Generic.Prompt: 'bold #ansidarkblue', Generic.Output: '#ansidarkblue', Generic.Traceback: '#ansidarkblue', Error: '#ansired', }, globals(), 'XONSH_BASE_STYLE') else: XONSH_BASE_STYLE = LazyObject(lambda: { Whitespace: "#bbbbbb", Comment: "italic #408080", Comment.Preproc: "noitalic #BC7A00", Keyword: "bold #008000", Keyword.Pseudo: "nobold", Keyword.Type: "nobold #B00040", Operator: "#666666", Operator.Word: "bold #AA22FF", Name.Builtin: "#008000", Name.Function: "#0000FF", Name.Class: "bold #0000FF", Name.Namespace: "bold #0000FF", Name.Exception: "bold #D2413A", Name.Variable: "#19177C", Name.Constant: "#880000", Name.Label: "#A0A000", Name.Entity: "bold #999999", Name.Attribute: "#7D9029", Name.Tag: "bold #008000", Name.Decorator: "#AA22FF", String: "#BA2121", String.Doc: "italic", String.Interpol: "bold #BB6688", String.Escape: "bold #BB6622", String.Regex: "#BB6688", String.Symbol: "#19177C", String.Other: "#008000", Number: "#666666", Generic.Heading: "bold #000080", Generic.Subheading: "bold #800080", Generic.Deleted: "#A00000", Generic.Inserted: "#00A000", Generic.Error: "#FF0000", Generic.Emph: "italic", Generic.Strong: "bold", Generic.Prompt: "bold #000080", Generic.Output: "#888", Generic.Traceback: "#04D", Error: "border:#FF0000" }, globals(), 'XONSH_BASE_STYLE') KNOWN_COLORS = LazyObject(lambda: frozenset([ 'BACKGROUND_BLACK', 'BACKGROUND_BLUE', 'BACKGROUND_CYAN', 'BACKGROUND_GREEN', 'BACKGROUND_INTENSE_BLACK', 'BACKGROUND_INTENSE_BLUE', 'BACKGROUND_INTENSE_CYAN', 'BACKGROUND_INTENSE_GREEN', 'BACKGROUND_INTENSE_PURPLE', 'BACKGROUND_INTENSE_RED', 'BACKGROUND_INTENSE_WHITE', 'BACKGROUND_INTENSE_YELLOW', 'BACKGROUND_PURPLE', 'BACKGROUND_RED', 'BACKGROUND_WHITE', 'BACKGROUND_YELLOW', 'BLACK', 'BLUE', 'BOLD_BLACK', 'BOLD_BLUE', 'BOLD_CYAN', 'BOLD_GREEN', 'BOLD_INTENSE_BLACK', 'BOLD_INTENSE_BLUE', 'BOLD_INTENSE_CYAN', 'BOLD_INTENSE_GREEN', 'BOLD_INTENSE_PURPLE', 'BOLD_INTENSE_RED', 'BOLD_INTENSE_WHITE', 'BOLD_INTENSE_YELLOW', 'BOLD_PURPLE', 'BOLD_RED', 'BOLD_UNDERLINE_BLACK', 'BOLD_UNDERLINE_BLUE', 'BOLD_UNDERLINE_CYAN', 'BOLD_UNDERLINE_GREEN', 'BOLD_UNDERLINE_INTENSE_BLACK', 'BOLD_UNDERLINE_INTENSE_BLUE', 'BOLD_UNDERLINE_INTENSE_CYAN', 'BOLD_UNDERLINE_INTENSE_GREEN', 'BOLD_UNDERLINE_INTENSE_PURPLE', 'BOLD_UNDERLINE_INTENSE_RED', 'BOLD_UNDERLINE_INTENSE_WHITE', 'BOLD_UNDERLINE_INTENSE_YELLOW', 'BOLD_UNDERLINE_PURPLE', 'BOLD_UNDERLINE_RED', 'BOLD_UNDERLINE_WHITE', 'BOLD_UNDERLINE_YELLOW', 'BOLD_WHITE', 'BOLD_YELLOW', 'CYAN', 'GREEN', 'INTENSE_BLACK', 'INTENSE_BLUE', 'INTENSE_CYAN', 'INTENSE_GREEN', 'INTENSE_PURPLE', 'INTENSE_RED', 'INTENSE_WHITE', 'INTENSE_YELLOW', 'NO_COLOR', 'PURPLE', 'RED', 'UNDERLINE_BLACK', 'UNDERLINE_BLUE', 'UNDERLINE_CYAN', 'UNDERLINE_GREEN', 'UNDERLINE_INTENSE_BLACK', 'UNDERLINE_INTENSE_BLUE', 'UNDERLINE_INTENSE_CYAN', 'UNDERLINE_INTENSE_GREEN', 'UNDERLINE_INTENSE_PURPLE', 'UNDERLINE_INTENSE_RED', 'UNDERLINE_INTENSE_WHITE', 'UNDERLINE_INTENSE_YELLOW', 'UNDERLINE_PURPLE', 'UNDERLINE_RED', 'UNDERLINE_WHITE', 'UNDERLINE_YELLOW', 'WHITE', 'YELLOW', ]), globals(), 'KNOWN_COLORS') def _expand_style(cmap): """Expands a style in order to more quickly make color map changes.""" for key, val in list(cmap.items()): if key is Color.NO_COLOR: continue _, _, key = str(key).rpartition('.') cmap[getattr(Color, 'BOLD_'+key)] = 'bold ' + val cmap[getattr(Color, 'UNDERLINE_'+key)] = 'underline ' + val cmap[getattr(Color, 'BOLD_UNDERLINE_'+key)] = 'bold underline ' + val if val == 'noinherit': cmap[getattr(Color, 'BACKGROUND_'+key)] = val else: cmap[getattr(Color, 'BACKGROUND_'+key)] = 'bg:' + val def _bw_style(): style = { Color.BLACK: 'noinherit', Color.BLUE: 'noinherit', Color.CYAN: 'noinherit', Color.GREEN: 'noinherit', Color.INTENSE_BLACK: 'noinherit', Color.INTENSE_BLUE: 'noinherit', Color.INTENSE_CYAN: 'noinherit', Color.INTENSE_GREEN: 'noinherit', Color.INTENSE_PURPLE: 'noinherit', Color.INTENSE_RED: 'noinherit', Color.INTENSE_WHITE: 'noinherit', Color.INTENSE_YELLOW: 'noinherit', Color.NO_COLOR: 'noinherit', Color.PURPLE: 'noinherit', Color.RED: 'noinherit', Color.WHITE: 'noinherit', Color.YELLOW: 'noinherit', } _expand_style(style) return style def _default_style(): if hasattr(pygments.style, 'ansicolors'): style = { Color.BLACK: '#ansiblack', Color.BLUE: '#ansidarkblue', Color.CYAN: '#ansiteal', Color.GREEN: '#ansidarkgreen', Color.INTENSE_BLACK: '#ansidarkgray', Color.INTENSE_BLUE: '#ansiblue', Color.INTENSE_CYAN: '#ansiturquoise', Color.INTENSE_GREEN: '#ansigreen', Color.INTENSE_PURPLE: '#ansifuchsia', Color.INTENSE_RED: '#ansired', Color.INTENSE_WHITE: '#ansiwhite', Color.INTENSE_YELLOW: '#ansiyellow', Color.NO_COLOR: 'noinherit', Color.PURPLE: '#ansipurple', Color.RED: '#ansidarkred', Color.WHITE: '#ansilightgray', Color.YELLOW: '#ansibrown', } elif ON_WINDOWS and 'CONEMUANSI' not in os_environ: # These colors must match the color specification # in prompt_toolkit, so the colors are converted # correctly when using cmd.exe style = { Color.BLACK: '#000000', Color.BLUE: '#0000AA', Color.CYAN: '#00AAAA', Color.GREEN: '#00AA00', Color.INTENSE_BLACK: '#444444', Color.INTENSE_BLUE: '#4444FF', Color.INTENSE_CYAN: '#44FFFF', Color.INTENSE_GREEN: '#44FF44', Color.INTENSE_PURPLE: '#FF44FF', Color.INTENSE_RED: '#FF4444', Color.INTENSE_WHITE: '#FFFFFF', Color.INTENSE_YELLOW: '#FFFF44', Color.NO_COLOR: 'noinherit', Color.PURPLE: '#AA00AA', Color.RED: '#AA0000', Color.WHITE: '#888888', Color.YELLOW: '#AAAA00', } else: style = { Color.BLACK: '#000000', Color.BLUE: '#0000AA', Color.CYAN: '#00AAAA', Color.GREEN: '#00AA00', Color.INTENSE_BLACK: '#555555', Color.INTENSE_BLUE: '#0000FF', Color.INTENSE_CYAN: '#55FFFF', Color.INTENSE_GREEN: '#00FF00', Color.INTENSE_PURPLE: '#FF00FF', Color.INTENSE_RED: '#FF0000', Color.INTENSE_WHITE: '#ffffff', Color.INTENSE_YELLOW: '#FFFF55', Color.NO_COLOR: 'noinherit', Color.PURPLE: '#AA00AA', Color.RED: '#AA0000', Color.WHITE: '#aaaaaa', Color.YELLOW: '#ffff00', } _expand_style(style) return style def _monokai_style(): style = { Color.BLACK: '#1e0010', Color.BLUE: '#6666ef', Color.CYAN: '#66d9ef', Color.GREEN: '#2ee22e', Color.INTENSE_BLACK: '#5e5e5e', Color.INTENSE_BLUE: '#2626d7', Color.INTENSE_CYAN: '#2ed9d9', Color.INTENSE_GREEN: '#a6e22e', Color.INTENSE_PURPLE: '#ae81ff', Color.INTENSE_RED: '#f92672', Color.INTENSE_WHITE: '#f8f8f2', Color.INTENSE_YELLOW: '#e6db74', Color.NO_COLOR: 'noinherit', Color.PURPLE: '#960050', Color.RED: '#AF0000', Color.WHITE: '#d7d7d7', Color.YELLOW: '#e2e22e', } _expand_style(style) return style ###################################### # Auto-generated below this line # ###################################### def _algol_style(): style = { Color.BLACK: '#666', Color.BLUE: '#666', Color.CYAN: '#666', Color.GREEN: '#666', Color.INTENSE_BLACK: '#666', Color.INTENSE_BLUE: '#888', Color.INTENSE_CYAN: '#888', Color.INTENSE_GREEN: '#888', Color.INTENSE_PURPLE: '#888', Color.INTENSE_RED: '#FF0000', Color.INTENSE_WHITE: '#888', Color.INTENSE_YELLOW: '#888', Color.NO_COLOR: 'noinherit', Color.PURPLE: '#666', Color.RED: '#FF0000', Color.WHITE: '#888', Color.YELLOW: '#FF0000', } _expand_style(style) return style def _algol_nu_style(): style = { Color.BLACK: '#666', Color.BLUE: '#666', Color.CYAN: '#666', Color.GREEN: '#666', Color.INTENSE_BLACK: '#666', Color.INTENSE_BLUE: '#888', Color.INTENSE_CYAN: '#888', Color.INTENSE_GREEN: '#888', Color.INTENSE_PURPLE: '#888', Color.INTENSE_RED: '#FF0000', Color.INTENSE_WHITE: '#888', Color.INTENSE_YELLOW: '#888', Color.NO_COLOR: 'noinherit', Color.PURPLE: '#666', Color.RED: '#FF0000', Color.WHITE: '#888', Color.YELLOW: '#FF0000', } _expand_style(style) return style def _autumn_style(): style = { Color.BLACK: '#000080', Color.BLUE: '#0000aa', Color.CYAN: '#00aaaa', Color.GREEN: '#00aa00', Color.INTENSE_BLACK: '#555555', Color.INTENSE_BLUE: '#1e90ff', Color.INTENSE_CYAN: '#1e90ff', Color.INTENSE_GREEN: '#4c8317', Color.INTENSE_PURPLE: '#FAA', Color.INTENSE_RED: '#aa5500', Color.INTENSE_WHITE: '#bbbbbb', Color.INTENSE_YELLOW: '#FAA', Color.NO_COLOR: 'noinherit', Color.PURPLE: '#800080', Color.RED: '#aa0000', Color.WHITE: '#aaaaaa', Color.YELLOW: '#aa5500', } _expand_style(style) return style def _borland_style(): style = { Color.BLACK: '#000000', Color.BLUE: '#000080', Color.CYAN: '#008080', Color.GREEN: '#008800', Color.INTENSE_BLACK: '#555555', Color.INTENSE_BLUE: '#0000FF', Color.INTENSE_CYAN: '#ddffdd', Color.INTENSE_GREEN: '#888888', Color.INTENSE_PURPLE: '#e3d2d2', Color.INTENSE_RED: '#FF0000', Color.INTENSE_WHITE: '#ffdddd', Color.INTENSE_YELLOW: '#e3d2d2', Color.NO_COLOR: 'noinherit', Color.PURPLE: '#800080', Color.RED: '#aa0000', Color.WHITE: '#aaaaaa', Color.YELLOW: '#a61717', } _expand_style(style) return style def _colorful_style(): style = { Color.BLACK: '#000', Color.BLUE: '#00C', Color.CYAN: '#0e84b5', Color.GREEN: '#00A000', Color.INTENSE_BLACK: '#555', Color.INTENSE_BLUE: '#33B', Color.INTENSE_CYAN: '#bbbbbb', Color.INTENSE_GREEN: '#888', Color.INTENSE_PURPLE: '#FAA', Color.INTENSE_RED: '#D42', Color.INTENSE_WHITE: '#fff0ff', Color.INTENSE_YELLOW: '#FAA', Color.NO_COLOR: 'noinherit', Color.PURPLE: '#800080', Color.RED: '#A00000', Color.WHITE: '#bbbbbb', Color.YELLOW: '#A60', } _expand_style(style) return style def _emacs_style(): style = { Color.BLACK: '#008000', Color.BLUE: '#000080', Color.CYAN: '#04D', Color.GREEN: '#00A000', Color.INTENSE_BLACK: '#666666', Color.INTENSE_BLUE: '#04D', Color.INTENSE_CYAN: '#bbbbbb', Color.INTENSE_GREEN: '#00BB00', Color.INTENSE_PURPLE: '#AA22FF', Color.INTENSE_RED: '#D2413A', Color.INTENSE_WHITE: '#bbbbbb', Color.INTENSE_YELLOW: '#bbbbbb', Color.NO_COLOR: 'noinherit', Color.PURPLE: '#800080', Color.RED: '#A00000', Color.WHITE: '#bbbbbb', Color.YELLOW: '#BB6622', } _expand_style(style) return style def _friendly_style(): style = { Color.BLACK: '#007020', Color.BLUE: '#000080', Color.CYAN: '#0e84b5', Color.GREEN: '#00A000', Color.INTENSE_BLACK: '#555555', Color.INTENSE_BLUE: '#70a0d0', Color.INTENSE_CYAN: '#60add5', Color.INTENSE_GREEN: '#40a070', Color.INTENSE_PURPLE: '#bb60d5', Color.INTENSE_RED: '#d55537', Color.INTENSE_WHITE: '#fff0f0', Color.INTENSE_YELLOW: '#bbbbbb', Color.NO_COLOR: 'noinherit', Color.PURPLE: '#800080', Color.RED: '#A00000', Color.WHITE: '#bbbbbb', Color.YELLOW: '#c65d09', } _expand_style(style) return style def _fruity_style(): style = { Color.BLACK: '#0f140f', Color.BLUE: '#0086d2', Color.CYAN: '#0086d2', Color.GREEN: '#008800', Color.INTENSE_BLACK: '#444444', Color.INTENSE_BLUE: '#0086f7', Color.INTENSE_CYAN: '#0086f7', Color.INTENSE_GREEN: '#888888', Color.INTENSE_PURPLE: '#ff0086', Color.INTENSE_RED: '#fb660a', Color.INTENSE_WHITE: '#ffffff', Color.INTENSE_YELLOW: '#cdcaa9', Color.NO_COLOR: 'noinherit', Color.PURPLE: '#ff0086', Color.RED: '#ff0007', Color.WHITE: '#cdcaa9', Color.YELLOW: '#fb660a', } _expand_style(style) return style def _igor_style(): style = { Color.BLACK: '#009C00', Color.BLUE: '#0000FF', Color.CYAN: '#007575', Color.GREEN: '#009C00', Color.INTENSE_BLACK: '#007575', Color.INTENSE_BLUE: '#0000FF', Color.INTENSE_CYAN: '#007575', Color.INTENSE_GREEN: '#009C00', Color.INTENSE_PURPLE: '#CC00A3', Color.INTENSE_RED: '#C34E00', Color.INTENSE_WHITE: '#CC00A3', Color.INTENSE_YELLOW: '#C34E00', Color.NO_COLOR: 'noinherit', Color.PURPLE: '#CC00A3', Color.RED: '#C34E00', Color.WHITE: '#CC00A3', Color.YELLOW: '#C34E00', } _expand_style(style) return style def _lovelace_style(): style = { Color.BLACK: '#444444', Color.BLUE: '#2838b0', Color.CYAN: '#289870', Color.GREEN: '#388038', Color.INTENSE_BLACK: '#666666', Color.INTENSE_BLUE: '#2838b0', Color.INTENSE_CYAN: '#888888', Color.INTENSE_GREEN: '#289870', Color.INTENSE_PURPLE: '#a848a8', Color.INTENSE_RED: '#b83838', Color.INTENSE_WHITE: '#888888', Color.INTENSE_YELLOW: '#a89028', Color.NO_COLOR: 'noinherit', Color.PURPLE: '#a848a8', Color.RED: '#c02828', Color.WHITE: '#888888', Color.YELLOW: '#b85820', } _expand_style(style) return style def _manni_style(): style = { Color.BLACK: '#000000', Color.BLUE: '#000099', Color.CYAN: '#009999', Color.GREEN: '#00CC00', Color.INTENSE_BLACK: '#555555', Color.INTENSE_BLUE: '#9999FF', Color.INTENSE_CYAN: '#00CCFF', Color.INTENSE_GREEN: '#99CC66', Color.INTENSE_PURPLE: '#CC00FF', Color.INTENSE_RED: '#FF6600', Color.INTENSE_WHITE: '#FFCCCC', Color.INTENSE_YELLOW: '#FFCC33', Color.NO_COLOR: 'noinherit', Color.PURPLE: '#CC00FF', Color.RED: '#AA0000', Color.WHITE: '#AAAAAA', Color.YELLOW: '#CC3300', } _expand_style(style) return style def _murphy_style(): style = { Color.BLACK: '#000', Color.BLUE: '#000080', Color.CYAN: '#0e84b5', Color.GREEN: '#00A000', Color.INTENSE_BLACK: '#555', Color.INTENSE_BLUE: '#66f', Color.INTENSE_CYAN: '#5ed', Color.INTENSE_GREEN: '#5ed', Color.INTENSE_PURPLE: '#e9e', Color.INTENSE_RED: '#f84', Color.INTENSE_WHITE: '#eee', Color.INTENSE_YELLOW: '#fc8', Color.NO_COLOR: 'noinherit', Color.PURPLE: '#800080', Color.RED: '#A00000', Color.WHITE: '#bbbbbb', Color.YELLOW: '#c65d09', } _expand_style(style) return style def _native_style(): style = { Color.BLACK: '#520000', Color.BLUE: '#3677a9', Color.CYAN: '#24909d', Color.GREEN: '#589819', Color.INTENSE_BLACK: '#666666', Color.INTENSE_BLUE: '#447fcf', Color.INTENSE_CYAN: '#40ffff', Color.INTENSE_GREEN: '#6ab825', Color.INTENSE_PURPLE: '#e3d2d2', Color.INTENSE_RED: '#cd2828', Color.INTENSE_WHITE: '#ffffff', Color.INTENSE_YELLOW: '#ed9d13', Color.NO_COLOR: 'noinherit', Color.PURPLE: '#666666', Color.RED: '#a61717', Color.WHITE: '#aaaaaa', Color.YELLOW: '#a61717', } _expand_style(style) return style def _paraiso_dark_style(): style = { Color.BLACK: '#776e71', Color.BLUE: '#815ba4', Color.CYAN: '#06b6ef', Color.GREEN: '#48b685', Color.INTENSE_BLACK: '#776e71', Color.INTENSE_BLUE: '#815ba4', Color.INTENSE_CYAN: '#5bc4bf', Color.INTENSE_GREEN: '#48b685', Color.INTENSE_PURPLE: '#e7e9db', Color.INTENSE_RED: '#ef6155', Color.INTENSE_WHITE: '#e7e9db', Color.INTENSE_YELLOW: '#fec418', Color.NO_COLOR: 'noinherit', Color.PURPLE: '#815ba4', Color.RED: '#ef6155', Color.WHITE: '#5bc4bf', Color.YELLOW: '#f99b15', } _expand_style(style) return style def _paraiso_light_style(): style = { Color.BLACK: '#2f1e2e', Color.BLUE: '#2f1e2e', Color.CYAN: '#06b6ef', Color.GREEN: '#48b685', Color.INTENSE_BLACK: '#2f1e2e', Color.INTENSE_BLUE: '#815ba4', Color.INTENSE_CYAN: '#5bc4bf', Color.INTENSE_GREEN: '#48b685', Color.INTENSE_PURPLE: '#815ba4', Color.INTENSE_RED: '#ef6155', Color.INTENSE_WHITE: '#5bc4bf', Color.INTENSE_YELLOW: '#fec418', Color.NO_COLOR: 'noinherit', Color.PURPLE: '#815ba4', Color.RED: '#2f1e2e', Color.WHITE: '#8d8687', Color.YELLOW: '#f99b15', } _expand_style(style) return style def _pastie_style(): style = { Color.BLACK: '#000000', Color.BLUE: '#0000DD', Color.CYAN: '#0066bb', Color.GREEN: '#008800', Color.INTENSE_BLACK: '#555555', Color.INTENSE_BLUE: '#3333bb', Color.INTENSE_CYAN: '#ddffdd', Color.INTENSE_GREEN: '#22bb22', Color.INTENSE_PURPLE: '#e3d2d2', Color.INTENSE_RED: '#dd7700', Color.INTENSE_WHITE: '#fff0ff', Color.INTENSE_YELLOW: '#e3d2d2', Color.NO_COLOR: 'noinherit', Color.PURPLE: '#bb0066', Color.RED: '#aa0000', Color.WHITE: '#bbbbbb', Color.YELLOW: '#aa6600', } _expand_style(style) return style def _perldoc_style(): style = { Color.BLACK: '#000080', Color.BLUE: '#000080', Color.CYAN: '#1e889b', Color.GREEN: '#00aa00', Color.INTENSE_BLACK: '#555555', Color.INTENSE_BLUE: '#B452CD', Color.INTENSE_CYAN: '#bbbbbb', Color.INTENSE_GREEN: '#228B22', Color.INTENSE_PURPLE: '#B452CD', Color.INTENSE_RED: '#CD5555', Color.INTENSE_WHITE: '#e3d2d2', Color.INTENSE_YELLOW: '#e3d2d2', Color.NO_COLOR: 'noinherit', Color.PURPLE: '#8B008B', Color.RED: '#aa0000', Color.WHITE: '#a7a7a7', Color.YELLOW: '#cb6c20', } _expand_style(style) return style def _rrt_style(): style = { Color.BLACK: '#ff0000', Color.BLUE: '#87ceeb', Color.CYAN: '#87ceeb', Color.GREEN: '#00ff00', Color.INTENSE_BLACK: '#87ceeb', Color.INTENSE_BLUE: '#87ceeb', Color.INTENSE_CYAN: '#7fffd4', Color.INTENSE_GREEN: '#00ff00', Color.INTENSE_PURPLE: '#ee82ee', Color.INTENSE_RED: '#ff0000', Color.INTENSE_WHITE: '#e5e5e5', Color.INTENSE_YELLOW: '#eedd82', Color.NO_COLOR: 'noinherit', Color.PURPLE: '#ee82ee', Color.RED: '#ff0000', Color.WHITE: '#87ceeb', Color.YELLOW: '#ff0000', } _expand_style(style) return style def _tango_style(): style = { Color.BLACK: '#000000', Color.BLUE: '#0000cf', Color.CYAN: '#3465a4', Color.GREEN: '#00A000', Color.INTENSE_BLACK: '#204a87', Color.INTENSE_BLUE: '#5c35cc', Color.INTENSE_CYAN: '#f8f8f8', Color.INTENSE_GREEN: '#4e9a06', Color.INTENSE_PURPLE: '#f8f8f8', Color.INTENSE_RED: '#ef2929', Color.INTENSE_WHITE: '#f8f8f8', Color.INTENSE_YELLOW: '#c4a000', Color.NO_COLOR: 'noinherit', Color.PURPLE: '#800080', Color.RED: '#a40000', Color.WHITE: '#f8f8f8', Color.YELLOW: '#8f5902', } _expand_style(style) return style def _trac_style(): style = { Color.BLACK: '#000000', Color.BLUE: '#000080', Color.CYAN: '#009999', Color.GREEN: '#808000', Color.INTENSE_BLACK: '#555555', Color.INTENSE_BLUE: '#445588', Color.INTENSE_CYAN: '#ddffdd', Color.INTENSE_GREEN: '#999988', Color.INTENSE_PURPLE: '#e3d2d2', Color.INTENSE_RED: '#bb8844', Color.INTENSE_WHITE: '#ffdddd', Color.INTENSE_YELLOW: '#e3d2d2', Color.NO_COLOR: 'noinherit', Color.PURPLE: '#800080', Color.RED: '#aa0000', Color.WHITE: '#aaaaaa', Color.YELLOW: '#808000', } _expand_style(style) return style def _vim_style(): style = { Color.BLACK: '#000080', Color.BLUE: '#000080', Color.CYAN: '#00cdcd', Color.GREEN: '#00cd00', Color.INTENSE_BLACK: '#666699', Color.INTENSE_BLUE: '#3399cc', Color.INTENSE_CYAN: '#00cdcd', Color.INTENSE_GREEN: '#00cd00', Color.INTENSE_PURPLE: '#cd00cd', Color.INTENSE_RED: '#FF0000', Color.INTENSE_WHITE: '#cccccc', Color.INTENSE_YELLOW: '#cdcd00', Color.NO_COLOR: 'noinherit', Color.PURPLE: '#cd00cd', Color.RED: '#cd0000', Color.WHITE: '#cccccc', Color.YELLOW: '#cd0000', } _expand_style(style) return style def _vs_style(): style = { Color.BLACK: '#008000', Color.BLUE: '#0000ff', Color.CYAN: '#2b91af', Color.GREEN: '#008000', Color.INTENSE_BLACK: '#2b91af', Color.INTENSE_BLUE: '#2b91af', Color.INTENSE_CYAN: '#2b91af', Color.INTENSE_GREEN: '#2b91af', Color.INTENSE_PURPLE: '#2b91af', Color.INTENSE_RED: '#FF0000', Color.INTENSE_WHITE: '#2b91af', Color.INTENSE_YELLOW: '#2b91af', Color.NO_COLOR: 'noinherit', Color.PURPLE: '#a31515', Color.RED: '#a31515', Color.WHITE: '#2b91af', Color.YELLOW: '#a31515', } _expand_style(style) return style def _xcode_style(): style = { Color.BLACK: '#000000', Color.BLUE: '#1C01CE', Color.CYAN: '#3F6E75', Color.GREEN: '#177500', Color.INTENSE_BLACK: '#3F6E75', Color.INTENSE_BLUE: '#2300CE', Color.INTENSE_CYAN: '#3F6E75', Color.INTENSE_GREEN: '#3F6E75', Color.INTENSE_PURPLE: '#A90D91', Color.INTENSE_RED: '#C41A16', Color.INTENSE_WHITE: '#3F6E75', Color.INTENSE_YELLOW: '#836C28', Color.NO_COLOR: 'noinherit', Color.PURPLE: '#A90D91', Color.RED: '#C41A16', Color.WHITE: '#3F6E75', Color.YELLOW: '#836C28', } _expand_style(style) return style STYLES = LazyDict({ 'algol': _algol_style, 'algol_nu': _algol_nu_style, 'autumn': _autumn_style, 'borland': _borland_style, 'bw': _bw_style, 'colorful': _colorful_style, 'default': _default_style, 'emacs': _emacs_style, 'friendly': _friendly_style, 'fruity': _fruity_style, 'igor': _igor_style, 'lovelace': _lovelace_style, 'manni': _manni_style, 'monokai': _monokai_style, 'murphy': _murphy_style, 'native': _native_style, 'paraiso-dark': _paraiso_dark_style, 'paraiso-light': _paraiso_light_style, 'pastie': _pastie_style, 'perldoc': _perldoc_style, 'rrt': _rrt_style, 'tango': _tango_style, 'trac': _trac_style, 'vim': _vim_style, 'vs': _vs_style, 'xcode': _xcode_style, }, globals(), 'STYLES') del (_algol_style, _algol_nu_style, _autumn_style, _borland_style, _bw_style, _colorful_style, _default_style, _emacs_style, _friendly_style, _fruity_style, _igor_style, _lovelace_style, _manni_style, _monokai_style, _murphy_style, _native_style, _paraiso_dark_style, _paraiso_light_style, _pastie_style, _perldoc_style, _rrt_style, _tango_style, _trac_style, _vim_style, _vs_style, _xcode_style) # dynamic styles def make_pygments_style(palette): """Makes a pygments style based on a color palette.""" global Color style = {getattr(Color, 'NO_COLOR'): 'noinherit'} for name, t in BASE_XONSH_COLORS.items(): color = find_closest_color(t, palette) style[getattr(Color, name)] = '#' + color style[getattr(Color, 'BOLD_'+name)] = 'bold #' + color style[getattr(Color, 'UNDERLINE_'+name)] = 'underline #' + color style[getattr(Color, 'BOLD_UNDERLINE_'+name)] = 'bold underline #' + color style[getattr(Color, 'BACKGROUND_'+name)] = 'bg:#' + color return style def pygments_style_by_name(name): """Gets or makes a pygments color style by its name.""" if name in STYLES: return STYLES[name] pstyle = get_style_by_name(name) palette = make_palette(pstyle.styles.values()) astyle = make_pygments_style(palette) STYLES[name] = astyle return astyle # # Formatter # @lazyobject def XonshTerminal256Formatter(): class XonshTerminal256FormatterProxy(terminal256.Terminal256Formatter): """Proxy class for xonsh terminal256 formatting that understands. xonsh color tokens. """ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) # just keep the opening token for colors. color_names = set(map(str, Color.subtypes)) for name, (opener, closer) in self.style_string.items(): if name in color_names: self.style_string[name] = (opener, '') # special case NO_COLOR, because it is special. self.style_string['Token.Color.NO_COLOR'] = ('\x1b[39m', '') return XonshTerminal256FormatterProxy xonsh-0.6.0/xonsh/pytest_plugin.py000066400000000000000000000037741320541242300173020ustar00rootroot00000000000000# -*- coding: utf-8 -*- """Pytest plugin for testing xsh files.""" import sys import importlib from traceback import format_list, extract_tb import pytest from xonsh.imphooks import install_import_hooks def pytest_configure(config): install_import_hooks() def pytest_collection_modifyitems(items): items.sort(key=lambda x: 0 if isinstance(x, XshFunction) else 1) def _limited_traceback(excinfo): """ Return a formatted traceback with all the stack from this frame (i.e __file__) up removed """ tb = extract_tb(excinfo.tb) try: idx = [__file__ in e for e in tb].index(True) return format_list(tb[idx+1:]) except ValueError: return format_list(tb) def pytest_collect_file(parent, path): if path.ext.lower() == ".xsh" and path.basename.startswith("test_"): return XshFile(path, parent) class XshFile(pytest.File): def collect(self): sys.path.append(self.fspath.dirname) mod = importlib.import_module(self.fspath.purebasename) sys.path.pop(0) tests = [t for t in dir(mod) if t.startswith('test_')] for test_name in tests: obj = getattr(mod, test_name) if hasattr(obj, '__call__'): yield XshFunction(name=test_name, parent=self, test_func=obj, test_module=mod) class XshFunction(pytest.Item): def __init__(self, name, parent, test_func, test_module): super().__init__(name, parent) self._test_func = test_func self._test_module = test_module def runtest(self): self._test_func() def repr_failure(self, excinfo): """ called when self.runtest() raises an exception. """ formatted_tb = _limited_traceback(excinfo) formatted_tb.insert(0, "xonsh execution failed\n") formatted_tb.append('{}: {}'.format(excinfo.type.__name__, excinfo.value)) return "".join(formatted_tb) def reportinfo(self): return self.fspath, 0, "xonsh test: {}".format(self.name) xonsh-0.6.0/xonsh/readline_shell.py000066400000000000000000000545241320541242300173450ustar00rootroot00000000000000# -*- coding: utf-8 -*- """The readline based xonsh shell. Portions of this code related to initializing the readline library are included from the IPython project. The IPython project is: * Copyright (c) 2008-2014, IPython Development Team * Copyright (c) 2001-2007, Fernando Perez * Copyright (c) 2001, Janko Hauser * Copyright (c) 2001, Nathaniel Gray """ import os import sys import cmd import select import shutil import builtins import importlib import threading import subprocess import collections from xonsh.lazyasd import LazyObject, lazyobject from xonsh.base_shell import BaseShell from xonsh.ansi_colors import (ansi_partial_color_format, ansi_color_style_names, ansi_color_style) from xonsh.prompt.base import multiline_prompt from xonsh.tools import (print_exception, check_for_partial_string, to_bool, columnize, carriage_return) from xonsh.platform import ON_WINDOWS, ON_CYGWIN, ON_DARWIN, ON_POSIX, os_environ from xonsh.lazyimps import pygments, pyghooks, winutils from xonsh.events import events readline = None RL_COMPLETION_SUPPRESS_APPEND = RL_LIB = RL_STATE = None RL_COMPLETION_QUERY_ITEMS = None RL_CAN_RESIZE = False RL_DONE = None RL_VARIABLE_VALUE = None _RL_STATE_DONE = 0x1000000 _RL_STATE_ISEARCH = 0x0000080 _RL_PREV_CASE_SENSITIVE_COMPLETIONS = 'to-be-set' def setup_readline(): """Sets up the readline module and completion suppression, if available.""" global RL_COMPLETION_SUPPRESS_APPEND, RL_LIB, RL_CAN_RESIZE, RL_STATE, \ readline, RL_COMPLETION_QUERY_ITEMS if RL_COMPLETION_SUPPRESS_APPEND is not None: return for _rlmod_name in ('gnureadline', 'readline'): try: readline = importlib.import_module(_rlmod_name) sys.modules['readline'] = readline except ImportError: pass else: break if readline is None: print("""Skipping setup. Because no `readline` implementation available. Please install a backend (`readline`, `prompt-toolkit`, etc) to use `xonsh` interactively. See https://github.com/xonsh/xonsh/issues/1170""") return import ctypes import ctypes.util uses_libedit = readline.__doc__ and 'libedit' in readline.__doc__ readline.set_completer_delims(' \t\n') # Cygwin seems to hang indefinitely when querying the readline lib if (not ON_CYGWIN) and (not readline.__file__.endswith('.py')): RL_LIB = lib = ctypes.cdll.LoadLibrary(readline.__file__) try: RL_COMPLETION_SUPPRESS_APPEND = ctypes.c_int.in_dll( lib, 'rl_completion_suppress_append') except ValueError: # not all versions of readline have this symbol, ie Macs sometimes RL_COMPLETION_SUPPRESS_APPEND = None try: RL_COMPLETION_QUERY_ITEMS = ctypes.c_int.in_dll( lib, 'rl_completion_query_items') except ValueError: # not all versions of readline have this symbol, ie Macs sometimes RL_COMPLETION_QUERY_ITEMS = None try: RL_STATE = ctypes.c_int.in_dll(lib, 'rl_readline_state') except Exception: pass RL_CAN_RESIZE = hasattr(lib, 'rl_reset_screen_size') env = builtins.__xonsh_env__ # reads in history readline.set_history_length(-1) ReadlineHistoryAdder() # sets up IPython-like history matching with up and down readline.parse_and_bind('"\e[B": history-search-forward') readline.parse_and_bind('"\e[A": history-search-backward') # Setup Shift-Tab to indent readline.parse_and_bind('"\e[Z": "{0}"'.format(env.get('INDENT'))) # handle tab completion differences found in libedit readline compatibility # as discussed at http://stackoverflow.com/a/7116997 if uses_libedit and ON_DARWIN: readline.parse_and_bind("bind ^I rl_complete") print('\n'.join(['', "*" * 78, "libedit detected - readline will not be well behaved, including but not limited to:", " * crashes on tab completion", " * incorrect history navigation", " * corrupting long-lines", " * failure to wrap or indent lines properly", "", "It is highly recommended that you install gnureadline, which is installable with:", " xpip install gnureadline", "*" * 78]), file=sys.stderr) else: readline.parse_and_bind("tab: complete") # try to load custom user settings inputrc_name = os_environ.get('INPUTRC') if inputrc_name is None: if uses_libedit: inputrc_name = '.editrc' else: inputrc_name = '.inputrc' inputrc_name = os.path.join(os.path.expanduser('~'), inputrc_name) if (not ON_WINDOWS) and (not os.path.isfile(inputrc_name)): inputrc_name = '/etc/inputrc' if ON_WINDOWS: winutils.enable_virtual_terminal_processing() if os.path.isfile(inputrc_name): try: readline.read_init_file(inputrc_name) except Exception: # this seems to fail with libedit print_exception('xonsh: could not load readline default init file.') # properly reset input typed before the first prompt readline.set_startup_hook(carriage_return) def teardown_readline(): """Tears down up the readline module, if available.""" try: import readline except (ImportError, TypeError): return def _rebind_case_sensitive_completions(): # handle case sensitive, see Github issue #1342 for details global _RL_PREV_CASE_SENSITIVE_COMPLETIONS env = builtins.__xonsh_env__ case_sensitive = env.get('CASE_SENSITIVE_COMPLETIONS') if case_sensitive is _RL_PREV_CASE_SENSITIVE_COMPLETIONS: return if case_sensitive: readline.parse_and_bind("set completion-ignore-case off") else: readline.parse_and_bind("set completion-ignore-case on") _RL_PREV_CASE_SENSITIVE_COMPLETIONS = case_sensitive def fix_readline_state_after_ctrl_c(): """ Fix to allow Ctrl-C to exit reverse-i-search. Based on code from: http://bugs.python.org/file39467/raw_input__workaround_demo.py """ if ON_WINDOWS: # hack to make pyreadline mimic the desired behavior try: _q = readline.rl.mode.process_keyevent_queue if len(_q) > 1: _q.pop() except Exception: pass if RL_STATE is None: return if RL_STATE.value & _RL_STATE_ISEARCH: RL_STATE.value &= ~_RL_STATE_ISEARCH if not RL_STATE.value & _RL_STATE_DONE: RL_STATE.value |= _RL_STATE_DONE def rl_completion_suppress_append(val=1): """Sets the rl_completion_suppress_append variable, if possible. A value of 1 (default) means to suppress, a value of 0 means to enable. """ if RL_COMPLETION_SUPPRESS_APPEND is None: return RL_COMPLETION_SUPPRESS_APPEND.value = val def rl_completion_query_items(val=None): """Sets the rl_completion_query_items variable, if possible. A None value will set this to $COMPLETION_QUERY_LIMIT, otherwise any integer is accepted. """ if RL_COMPLETION_QUERY_ITEMS is None: return if val is None: val = builtins.__xonsh_env__.get('COMPLETION_QUERY_LIMIT') RL_COMPLETION_QUERY_ITEMS.value = val def rl_variable_dumper(readable=True): """Dumps the currently set readline variables. If readable is True, then this output may be used in an inputrc file. """ RL_LIB.rl_variable_dumper(int(readable)) def rl_variable_value(variable): """Returns the currently set value for a readline configuration variable.""" global RL_VARIABLE_VALUE if RL_VARIABLE_VALUE is None: import ctypes RL_VARIABLE_VALUE = RL_LIB.rl_variable_value RL_VARIABLE_VALUE.restype = ctypes.c_char_p env = builtins.__xonsh_env__ enc, errors = env.get('XONSH_ENCODING'), env.get('XONSH_ENCODING_ERRORS') if isinstance(variable, str): variable = variable.encode(encoding=enc, errors=errors) rtn = RL_VARIABLE_VALUE(variable) return rtn.decode(encoding=enc, errors=errors) @lazyobject def rl_on_new_line(): """Grabs one of a few possible redisplay functions in readline.""" names = ['rl_on_new_line', 'rl_forced_update_display', 'rl_redisplay'] for name in names: func = getattr(RL_LIB, name, None) if func is not None: break else: def print_for_newline(): print() func = print_for_newline return func def _insert_text_func(s, readline): """Creates a function to insert text via readline.""" def inserter(): readline.insert_text(s) readline.redisplay() return inserter DEDENT_TOKENS = LazyObject(lambda: frozenset(['raise', 'return', 'pass', 'break', 'continue']), globals(), 'DEDENT_TOKENS') class ReadlineShell(BaseShell, cmd.Cmd): """The readline based xonsh shell.""" def __init__(self, completekey='tab', stdin=None, stdout=None, **kwargs): super().__init__(completekey=completekey, stdin=stdin, stdout=stdout, **kwargs) setup_readline() self._current_indent = '' self._current_prompt = '' self._force_hide = None self.cmdqueue = collections.deque() def __del__(self): teardown_readline() def singleline(self, store_in_history=True, **kwargs): """Reads a single line of input. The store_in_history kwarg flags whether the input should be stored in readline's in-memory history. """ if not store_in_history: # store current position to remove it later try: import readline except ImportError: store_in_history = True pos = readline.get_current_history_length() - 1 events.on_pre_prompt.fire() rtn = input(self.prompt) events.on_post_prompt.fire() if not store_in_history and pos >= 0: readline.remove_history_item(pos) return rtn def parseline(self, line): """Overridden to no-op.""" return '', line, line def _querycompletions(self, completions, loc): """Returns whether or not we should show completions""" if os.path.commonprefix([c[loc:] for c in completions]): return True elif len(completions) <= builtins.__xonsh_env__.get('COMPLETION_QUERY_LIMIT'): return True msg = '\nDisplay all {} possibilities? '.format(len(completions)) msg += '({GREEN}y{NO_COLOR} or {RED}n{NO_COLOR})' self.print_color(msg, end='', flush=True, file=sys.stderr) yn = 'x' while yn not in 'yn': yn = sys.stdin.read(1) show_completions = to_bool(yn) print() if not show_completions: rl_on_new_line() return False w, h = shutil.get_terminal_size() lines = columnize(completions, width=w) more_msg = self.format_color('{YELLOW}==={NO_COLOR} more or ' '{PURPLE}({NO_COLOR}q{PURPLE}){NO_COLOR}uit ' '{YELLOW}==={NO_COLOR}') while len(lines) > h - 1: print(''.join(lines[:h-1]), end='', flush=True, file=sys.stderr) lines = lines[h-1:] print(more_msg, end='', flush=True, file=sys.stderr) q = sys.stdin.read(1).lower() print(flush=True, file=sys.stderr) if q == 'q': rl_on_new_line() return False print(''.join(lines), end='', flush=True, file=sys.stderr) rl_on_new_line() return False def completedefault(self, prefix, line, begidx, endidx): """Implements tab-completion for text.""" if self.completer is None: return [] rl_completion_suppress_append() # this needs to be called each time _rebind_case_sensitive_completions() rl_completion_query_items(val=999999999) completions, l = self.completer.complete(prefix, line, begidx, endidx, ctx=self.ctx) chopped = prefix[:-l] rtn_completions = [chopped + i for i in completions] show_completions = self._querycompletions(completions, endidx - begidx) return rtn_completions if show_completions else [] # tab complete on first index too completenames = completedefault def _load_remaining_input_into_queue(self): buf = b'' while True: r, w, x = select.select([self.stdin], [], [], 1e-6) if len(r) == 0: break buf += os.read(self.stdin.fileno(), 1024) if len(buf) > 0: buf = buf.decode().replace('\r\n', '\n').replace('\r', '\n') self.cmdqueue.extend(buf.splitlines(keepends=True)) def postcmd(self, stop, line): """Called just before execution of line. For readline, this handles the automatic indentation of code blocks. """ try: import readline except ImportError: return stop if self.need_more_lines: if len(line.strip()) == 0: readline.set_pre_input_hook(None) self._current_indent = '' elif line.rstrip()[-1] == ':': ind = line[:len(line) - len(line.lstrip())] ind += builtins.__xonsh_env__.get('INDENT') readline.set_pre_input_hook(_insert_text_func(ind, readline)) self._current_indent = ind elif line.split(maxsplit=1)[0] in DEDENT_TOKENS: env = builtins.__xonsh_env__ ind = self._current_indent[:-len(env.get('INDENT'))] readline.set_pre_input_hook(_insert_text_func(ind, readline)) self._current_indent = ind else: ind = line[:len(line) - len(line.lstrip())] if ind != self._current_indent: insert_func = _insert_text_func(ind, readline) readline.set_pre_input_hook(insert_func) self._current_indent = ind else: readline.set_pre_input_hook(None) return stop def _cmdloop(self, intro=None): """Repeatedly issue a prompt, accept input, parse an initial prefix off the received input, and dispatch to action methods, passing them the remainder of the line as argument. This was forked from Lib/cmd.py from the Python standard library v3.4.3, (C) Python Software Foundation, 2015. """ self.preloop() if self.use_rawinput and self.completekey: try: import readline self.old_completer = readline.get_completer() readline.set_completer(self.complete) readline.parse_and_bind(self.completekey + ": complete") have_readline = True except ImportError: have_readline = False try: if intro is not None: self.intro = intro if self.intro: self.stdout.write(str(self.intro) + "\n") stop = None while not stop: line = None exec_now = False if len(self.cmdqueue) > 0: line = self.cmdqueue.popleft() exec_now = line.endswith('\n') if self.use_rawinput and not exec_now: inserter = None if line is None \ else _insert_text_func(line, readline) if inserter is not None: readline.set_pre_input_hook(inserter) try: line = self.singleline() except EOFError: if builtins.__xonsh_env__.get("IGNOREEOF"): self.stdout.write('Use "exit" to leave the shell.' '\n') line = '' else: line = 'EOF' if inserter is not None: readline.set_pre_input_hook(None) else: self.print_color(self.prompt, file=self.stdout) if line is not None: os.write(self.stdin.fileno(), line.encode()) if not exec_now: line = self.stdin.readline() if len(line) == 0: line = 'EOF' else: line = line.rstrip('\r\n') if have_readline and line != 'EOF': readline.add_history(line) if not ON_WINDOWS: # select() is not fully functional on windows self._load_remaining_input_into_queue() line = self.precmd(line) stop = self.onecmd(line) stop = self.postcmd(stop, line) if ON_WINDOWS: winutils.enable_virtual_terminal_processing() self.postloop() finally: if self.use_rawinput and self.completekey: try: import readline readline.set_completer(self.old_completer) except ImportError: pass def cmdloop(self, intro=None): while not builtins.__xonsh_exit__: try: self._cmdloop(intro=intro) except (KeyboardInterrupt, SystemExit): print() # Gives a newline fix_readline_state_after_ctrl_c() self.reset_buffer() intro = None @property def prompt(self): """Obtains the current prompt string.""" global RL_LIB, RL_CAN_RESIZE if RL_CAN_RESIZE: # This is needed to support some system where line-wrapping doesn't # work. This is a bug in upstream Python, or possibly readline. RL_LIB.rl_reset_screen_size() if self.need_more_lines: if self.mlprompt is None: try: self.mlprompt = multiline_prompt(curr=self._current_prompt) except Exception: # pylint: disable=broad-except print_exception() self.mlprompt = ' ' return self.mlprompt env = builtins.__xonsh_env__ # pylint: disable=no-member p = env.get('PROMPT') try: p = self.prompt_formatter(p) except Exception: # pylint: disable=broad-except print_exception() hide = True if self._force_hide is None else self._force_hide p = ansi_partial_color_format(p, style=env.get('XONSH_COLOR_STYLE'), hide=hide) self._current_prompt = p self.settitle() return p def format_color(self, string, hide=False, force_string=False, **kwargs): """Readline implementation of color formatting. This uses ANSI color codes. """ hide = hide if self._force_hide is None else self._force_hide style = builtins.__xonsh_env__.get('XONSH_COLOR_STYLE') return ansi_partial_color_format(string, hide=hide, style=style) def print_color(self, string, hide=False, **kwargs): if isinstance(string, str): s = self.format_color(string, hide=hide) else: # assume this is a list of (Token, str) tuples and format it env = builtins.__xonsh_env__ self.styler.style_name = env.get('XONSH_COLOR_STYLE') style_proxy = pyghooks.xonsh_style_proxy(self.styler) formatter = pyghooks.XonshTerminal256Formatter(style=style_proxy) s = pygments.format(string, formatter).rstrip() print(s, **kwargs) def color_style_names(self): """Returns an iterable of all available style names.""" return ansi_color_style_names() def color_style(self): """Returns the current color map.""" style = style = builtins.__xonsh_env__.get('XONSH_COLOR_STYLE') return ansi_color_style(style=style) def restore_tty_sanity(self): """An interface for resetting the TTY stdin mode. This is highly dependent on the shell backend. Also it is mostly optional since it only affects ^Z backgrounding behaviour. """ if not ON_POSIX: return stty, _ = builtins.__xonsh_commands_cache__.lazyget('stty', None) if stty is None: return # If available, we should just call the stty utility. This call should # not throw even if stty fails. It should also be noted that subprocess # calls, like the following, seem to be ineffective: # subprocess.call([stty, 'sane'], shell=True) # My guess is that this is because Popen does some crazy redirecting # under the covers. This effectively hides the true TTY stdin handle # from stty. To get around this we have to use the lower level # os.system() function. os.system(stty + ' sane') class ReadlineHistoryAdder(threading.Thread): def __init__(self, wait_for_gc=True, *args, **kwargs): """Thread responsible for adding inputs from history to the current readline instance. May wait for the history garbage collector to finish. """ super(ReadlineHistoryAdder, self).__init__(*args, **kwargs) self.daemon = True self.wait_for_gc = wait_for_gc self.start() def run(self): try: import readline except ImportError: return hist = builtins.__xonsh_history__ if hist is None: return i = 1 for h in hist.all_items(): line = h['inp'].rstrip() if i == 1: pass elif line == readline.get_history_item(i - 1): continue readline.add_history(line) if RL_LIB is not None: RL_LIB.history_set_pos(i) i += 1 xonsh-0.6.0/xonsh/replay.py000066400000000000000000000114541320541242300156620ustar00rootroot00000000000000# -*- coding: utf-8 -*- """Tools to replay xonsh history files.""" import json import time import builtins import collections.abc as cabc from xonsh.tools import swap from xonsh.lazyjson import LazyJSON from xonsh.environ import Env import xonsh.history.main as xhm DEFAULT_MERGE_ENVS = ('replay', 'native') class Replayer(object): """Replays a xonsh history file.""" def __init__(self, f, reopen=True): """ Parameters ---------- f : file handle or str Path to xonsh history file. reopen : bool, optional Whether new file handle should be opened for each load, passed directly into LazyJSON class. """ self._lj = LazyJSON(f, reopen=reopen) def __del__(self): self._lj.close() def replay(self, merge_envs=DEFAULT_MERGE_ENVS, target=None): """Replays the history specified, returns the history object where the code was executed. Parameters ---------- merge_env : tuple of str or Mappings, optional Describes how to merge the environments, in order of increasing precedence. Available strings are 'replay' and 'native'. The 'replay' env comes from the history file that we are replaying. The 'native' env comes from what this instance of xonsh was started up with. Instead of a string, a dict or other mapping may be passed in as well. Defaults to ('replay', 'native'). target : str, optional Path to new history file. """ shell = builtins.__xonsh_shell__ re_env = self._lj['env'].load() new_env = self._merge_envs(merge_envs, re_env) new_hist = xhm.construct_history( env=new_env.detype(), locked=True, ts=[time.time(), None], gc=False, filename=target) with swap(builtins, '__xonsh_env__', new_env), swap(builtins, '__xonsh_history__', new_hist): for cmd in self._lj['cmds']: inp = cmd['inp'] shell.default(inp) if builtins.__xonsh_exit__: # prevent premature exit builtins.__xonsh_exit__ = False new_hist.flush(at_exit=True) return new_hist def _merge_envs(self, merge_envs, re_env): new_env = {} for e in merge_envs: if e == 'replay': new_env.update(re_env) elif e == 'native': new_env.update(builtins.__xonsh_env__) elif isinstance(e, cabc.Mapping): new_env.update(e) else: raise TypeError('Type of env not understood: {0!r}'.format(e)) new_env = Env(**new_env) return new_env _REPLAY_PARSER = None def replay_create_parser(p=None): global _REPLAY_PARSER p_was_none = (p is None) if _REPLAY_PARSER is not None and p_was_none: return _REPLAY_PARSER if p_was_none: from argparse import ArgumentParser p = ArgumentParser('replay', description='replays a xonsh history file') p.add_argument('--merge-envs', dest='merge_envs', default=DEFAULT_MERGE_ENVS, nargs='+', help="Describes how to merge the environments, in order of " "increasing precedence. Available strings are 'replay' and " "'native'. The 'replay' env comes from the history file that we " "are replaying. The 'native' env comes from what this instance " "of xonsh was started up with. One or more of these options may " "be passed in. Defaults to '--merge-envs replay native'.") p.add_argument('--json', dest='json', default=False, action='store_true', help='print history info in JSON format') p.add_argument('-o', '--target', dest='target', default=None, help='path to new history file') p.add_argument('path', help='path to replay history file') if p_was_none: _REPLAY_PARSER = p return p def replay_main_action(h, ns, stdout=None, stderr=None): replayer = Replayer(ns.path) hist = replayer.replay(merge_envs=ns.merge_envs, target=ns.target) print('----------------------------------------------------------------') print('Just replayed history, new history has the following information') print('----------------------------------------------------------------') data = hist.info() if ns.json: s = json.dumps(data) print(s, file=stdout) else: lines = ['{0}: {1}'.format(k, v) for k, v in data.items()] print('\n'.join(lines), file=stdout) def replay_main(args, stdin=None): """Acts as main function for replaying a xonsh history file.""" parser = replay_create_parser() ns = parser.parse_args(args) replay_main_action(ns) xonsh-0.6.0/xonsh/shell.py000066400000000000000000000130421320541242300154700ustar00rootroot00000000000000# -*- coding: utf-8 -*- """The xonsh shell""" import sys import random import time import difflib import builtins import warnings from xonsh.platform import (best_shell_type, has_prompt_toolkit, ptk_version_is_supported) from xonsh.tools import XonshError, print_exception from xonsh.events import events import xonsh.history.main as xhm events.doc('on_transform_command', """ on_transform_command(cmd: str) -> str Fired to request xontribs to transform a command line. Return the transformed command, or the same command if no transformation occurs. Only done for interactive sessions. This may be fired multiple times per command, with other transformers input or output, so design any handlers for this carefully. """) events.doc('on_precommand', """ on_precommand(cmd: str) -> None Fires just before a command is executed. """) events.doc('on_postcommand', """ on_postcommand(cmd: str, rtn: int, out: str or None, ts: list) -> None Fires just after a command is executed. The arguments are the same as history. Parameters: * ``cmd``: The command that was executed (after transformation) * ``rtn``: The result of the command executed (``0`` for success) * ``out``: If xonsh stores command output, this is the output * ``ts``: Timestamps, in the order of ``[starting, ending]`` """) events.doc('on_pre_prompt', """ on_first_prompt() -> None Fires just before the prompt is shown """) events.doc('on_post_prompt', """ on_first_prompt() -> None Fires just after the prompt returns """) def transform_command(src, show_diff=True): """Returns the results of firing the precommand handles.""" i = 0 limit = sys.getrecursionlimit() lst = '' raw = src while src != lst: lst = src srcs = events.on_transform_command.fire(cmd=src) for s in srcs: if s != lst: src = s break i += 1 if i == limit: print_exception('Modifications to source input took more than ' 'the recursion limit number of iterations to ' 'converge.') debug_level = builtins.__xonsh_env__.get('XONSH_DEBUG') if show_diff and debug_level > 1 and src != raw: sys.stderr.writelines(difflib.unified_diff( raw.splitlines(keepends=True), src.splitlines(keepends=True), fromfile='before precommand event', tofile='after precommand event', )) return src class Shell(object): """Main xonsh shell. Initializes execution environment and decides if prompt_toolkit or readline version of shell should be used. """ def __init__(self, execer, ctx=None, shell_type=None, **kwargs): """ Parameters ---------- execer : Execer An execer instance capable of running xonsh code. ctx : Mapping, optional The execution context for the shell (e.g. the globals namespace). If none, this is computed by loading the rc files. If not None, this no additional context is computed and this is used directly. shell_type : str, optional The shell type to start, such as 'readline', 'prompt_toolkit', or 'random'. """ self.execer = execer self.ctx = {} if ctx is None else ctx env = builtins.__xonsh_env__ # build history backend before creating shell builtins.__xonsh_history__ = hist = xhm.construct_history( env=env.detype(), ts=[time.time(), None], locked=True) # pick a valid shell -- if no shell is specified by the user, # shell type is pulled from env if shell_type is None: shell_type = env.get('SHELL_TYPE') if shell_type == 'none': # This bricks interactive xonsh # Can happen from the use of .xinitrc, .xsession, etc shell_type = 'best' if shell_type == 'best' or shell_type is None: shell_type = best_shell_type() elif shell_type == 'random': shell_type = random.choice(('readline', 'prompt_toolkit')) if shell_type == 'prompt_toolkit': if not has_prompt_toolkit(): warnings.warn('prompt_toolkit is not available, using ' 'readline instead.') shell_type = 'readline' elif not ptk_version_is_supported(): warnings.warn('prompt-toolkit version < v1.0.0 is not ' 'supported. Please update prompt-toolkit. Using ' 'readline instead.') shell_type = 'readline' self.shell_type = env['SHELL_TYPE'] = shell_type # actually make the shell if shell_type == 'none': from xonsh.base_shell import BaseShell as shell_class elif shell_type == 'prompt_toolkit': from xonsh.ptk.shell import PromptToolkitShell as shell_class elif shell_type == 'readline': from xonsh.readline_shell import ReadlineShell as shell_class else: raise XonshError('{} is not recognized as a shell type'.format( shell_type)) self.shell = shell_class(execer=self.execer, ctx=self.ctx, **kwargs) # allows history garbage collector to start running if hist.gc is not None: hist.gc.wait_for_shell = False def __getattr__(self, attr): """Delegates calls to appropriate shell instance.""" return getattr(self.shell, attr) xonsh-0.6.0/xonsh/style_tools.py000066400000000000000000000352401320541242300167450ustar00rootroot00000000000000"""Xonsh color styling tools that simulate pygments, when it is unavailable.""" import builtins import string from xonsh.platform import HAS_PYGMENTS from xonsh.lazyasd import LazyObject from xonsh.color_tools import RE_BACKGROUND class _TokenType(tuple): """ Forked from the pygments project https://bitbucket.org/birkenfeld/pygments-main Copyright (c) 2006-2017 by the respective authors, All rights reserved. See https://bitbucket.org/birkenfeld/pygments-main/raw/05818a4ef9891d9ac22c851f7b3ea4b4fce460ab/AUTHORS """ parent = None def split(self): buf = [] node = self while node is not None: buf.append(node) node = node.parent buf.reverse() return buf def __init__(self, *args): # no need to call super.__init__ self.subtypes = set() def __contains__(self, val): return self is val or ( type(val) is self.__class__ and val[:len(self)] == self ) def __getattr__(self, val): if not val or not val[0].isupper(): return tuple.__getattribute__(self, val) new = _TokenType(self + (val,)) setattr(self, val, new) self.subtypes.add(new) new.parent = self return new def __repr__(self): return 'Token' + (self and '.' or '') + '.'.join(self) def __copy__(self): # These instances are supposed to be singletons return self def __deepcopy__(self, memo): # These instances are supposed to be singletons return self Token = _TokenType() Color = Token.Color def partial_color_tokenize(template): """Tokenizes a template string containing colors. Will return a list of tuples mapping the token to the string which has that color. These sub-strings maybe templates themselves. """ if HAS_PYGMENTS and hasattr(builtins, '__xonsh_shell__'): styles = __xonsh_shell__.shell.styler.styles elif hasattr(builtins, '__xonsh_shell__'): styles = DEFAULT_STYLE_DICT else: styles = None color = Color.NO_COLOR try: toks, color = _partial_color_tokenize_main(template, styles) except Exception: toks = [(Color.NO_COLOR, template)] if styles is not None: styles[color] # ensure color is available return toks def _partial_color_tokenize_main(template, styles): formatter = string.Formatter() bopen = '{' bclose = '}' colon = ':' expl = '!' color = Color.NO_COLOR fg = bg = None value = '' toks = [] for literal, field, spec, conv in formatter.parse(template): if field is None: value += literal elif field in KNOWN_COLORS or '#' in field: value += literal next_color, fg, bg = color_by_name(field, fg, bg) if next_color is not color: if len(value) > 0: toks.append((color, value)) if styles is not None: styles[color] # ensure color is available color = next_color value = '' elif field is not None: parts = [literal, bopen, field] if conv is not None and len(conv) > 0: parts.append(expl) parts.append(conv) if spec is not None and len(spec) > 0: parts.append(colon) parts.append(spec) parts.append(bclose) value += ''.join(parts) else: value += literal toks.append((color, value)) return toks, color def color_by_name(name, fg=None, bg=None): """Converts a color name to a color token, foreground name, and background name. Will take into consideration current foreground and background colors, if provided. Parameters ---------- name : str Color name. fg : str, optional Foreground color name. bg : str, optional Background color name. Returns ------- tok : Token Pygments Token.Color subclass fg : str or None New computed foreground color name. bg : str or None New computed background color name. """ name = name.upper() if name == 'NO_COLOR': return Color.NO_COLOR, None, None m = RE_BACKGROUND.search(name) if m is None: # must be foreground color fg = norm_name(name) else: bg = norm_name(name) # assemble token if fg is None and bg is None: tokname = 'NO_COLOR' elif fg is None: tokname = bg elif bg is None: tokname = fg else: tokname = fg + '__' + bg tok = getattr(Color, tokname) return tok, fg, bg def norm_name(name): """Normalizes a color name.""" return name.replace('#', 'HEX').replace('BGHEX', 'BACKGROUND_HEX') KNOWN_COLORS = LazyObject(lambda: frozenset([ 'BACKGROUND_BLACK', 'BACKGROUND_BLUE', 'BACKGROUND_CYAN', 'BACKGROUND_GREEN', 'BACKGROUND_INTENSE_BLACK', 'BACKGROUND_INTENSE_BLUE', 'BACKGROUND_INTENSE_CYAN', 'BACKGROUND_INTENSE_GREEN', 'BACKGROUND_INTENSE_PURPLE', 'BACKGROUND_INTENSE_RED', 'BACKGROUND_INTENSE_WHITE', 'BACKGROUND_INTENSE_YELLOW', 'BACKGROUND_PURPLE', 'BACKGROUND_RED', 'BACKGROUND_WHITE', 'BACKGROUND_YELLOW', 'BLACK', 'BLUE', 'BOLD_BLACK', 'BOLD_BLUE', 'BOLD_CYAN', 'BOLD_GREEN', 'BOLD_INTENSE_BLACK', 'BOLD_INTENSE_BLUE', 'BOLD_INTENSE_CYAN', 'BOLD_INTENSE_GREEN', 'BOLD_INTENSE_PURPLE', 'BOLD_INTENSE_RED', 'BOLD_INTENSE_WHITE', 'BOLD_INTENSE_YELLOW', 'BOLD_PURPLE', 'BOLD_RED', 'BOLD_UNDERLINE_BLACK', 'BOLD_UNDERLINE_BLUE', 'BOLD_UNDERLINE_CYAN', 'BOLD_UNDERLINE_GREEN', 'BOLD_UNDERLINE_INTENSE_BLACK', 'BOLD_UNDERLINE_INTENSE_BLUE', 'BOLD_UNDERLINE_INTENSE_CYAN', 'BOLD_UNDERLINE_INTENSE_GREEN', 'BOLD_UNDERLINE_INTENSE_PURPLE', 'BOLD_UNDERLINE_INTENSE_RED', 'BOLD_UNDERLINE_INTENSE_WHITE', 'BOLD_UNDERLINE_INTENSE_YELLOW', 'BOLD_UNDERLINE_PURPLE', 'BOLD_UNDERLINE_RED', 'BOLD_UNDERLINE_WHITE', 'BOLD_UNDERLINE_YELLOW', 'BOLD_WHITE', 'BOLD_YELLOW', 'CYAN', 'GREEN', 'INTENSE_BLACK', 'INTENSE_BLUE', 'INTENSE_CYAN', 'INTENSE_GREEN', 'INTENSE_PURPLE', 'INTENSE_RED', 'INTENSE_WHITE', 'INTENSE_YELLOW', 'NO_COLOR', 'PURPLE', 'RED', 'UNDERLINE_BLACK', 'UNDERLINE_BLUE', 'UNDERLINE_CYAN', 'UNDERLINE_GREEN', 'UNDERLINE_INTENSE_BLACK', 'UNDERLINE_INTENSE_BLUE', 'UNDERLINE_INTENSE_CYAN', 'UNDERLINE_INTENSE_GREEN', 'UNDERLINE_INTENSE_PURPLE', 'UNDERLINE_INTENSE_RED', 'UNDERLINE_INTENSE_WHITE', 'UNDERLINE_INTENSE_YELLOW', 'UNDERLINE_PURPLE', 'UNDERLINE_RED', 'UNDERLINE_WHITE', 'UNDERLINE_YELLOW', 'WHITE', 'YELLOW', ]), globals(), 'KNOWN_COLORS') DEFAULT_STYLE_DICT = LazyObject(lambda: { Token: '', Token.Aborted: '#ansidarkgray', Token.AutoSuggestion: '#ansidarkgray', Token.Color.BACKGROUND_BLACK: 'bg:#ansiblack', Token.Color.BACKGROUND_BLUE: 'bg:#ansidarkblue', Token.Color.BACKGROUND_CYAN: 'bg:#ansiteal', Token.Color.BACKGROUND_GREEN: 'bg:#ansidarkgreen', Token.Color.BACKGROUND_INTENSE_BLACK: 'bg:#ansidarkgray', Token.Color.BACKGROUND_INTENSE_BLUE: 'bg:#ansiblue', Token.Color.BACKGROUND_INTENSE_CYAN: 'bg:#ansiturquoise', Token.Color.BACKGROUND_INTENSE_GREEN: 'bg:#ansigreen', Token.Color.BACKGROUND_INTENSE_PURPLE: 'bg:#ansifuchsia', Token.Color.BACKGROUND_INTENSE_RED: 'bg:#ansired', Token.Color.BACKGROUND_INTENSE_WHITE: 'bg:#ansiwhite', Token.Color.BACKGROUND_INTENSE_YELLOW: 'bg:#ansiyellow', Token.Color.BACKGROUND_PURPLE: 'bg:#ansipurple', Token.Color.BACKGROUND_RED: 'bg:#ansidarkred', Token.Color.BACKGROUND_WHITE: 'bg:#ansilightgray', Token.Color.BACKGROUND_YELLOW: 'bg:#ansibrown', Token.Color.BLACK: '#ansiblack', Token.Color.BLUE: '#ansidarkblue', Token.Color.BOLD_BLACK: 'bold #ansiblack', Token.Color.BOLD_BLUE: 'bold #ansidarkblue', Token.Color.BOLD_CYAN: 'bold #ansiteal', Token.Color.BOLD_GREEN: 'bold #ansidarkgreen', Token.Color.BOLD_INTENSE_BLACK: 'bold #ansidarkgray', Token.Color.BOLD_INTENSE_BLUE: 'bold #ansiblue', Token.Color.BOLD_INTENSE_CYAN: 'bold #ansiturquoise', Token.Color.BOLD_INTENSE_GREEN: 'bold #ansigreen', Token.Color.BOLD_INTENSE_PURPLE: 'bold #ansifuchsia', Token.Color.BOLD_INTENSE_RED: 'bold #ansired', Token.Color.BOLD_INTENSE_WHITE: 'bold #ansiwhite', Token.Color.BOLD_INTENSE_YELLOW: 'bold #ansiyellow', Token.Color.BOLD_PURPLE: 'bold #ansipurple', Token.Color.BOLD_RED: 'bold #ansidarkred', Token.Color.BOLD_UNDERLINE_BLACK: 'bold underline #ansiblack', Token.Color.BOLD_UNDERLINE_BLUE: 'bold underline #ansidarkblue', Token.Color.BOLD_UNDERLINE_CYAN: 'bold underline #ansiteal', Token.Color.BOLD_UNDERLINE_GREEN: 'bold underline #ansidarkgreen', Token.Color.BOLD_UNDERLINE_INTENSE_BLACK: 'bold underline #ansidarkgray', Token.Color.BOLD_UNDERLINE_INTENSE_BLUE: 'bold underline #ansiblue', Token.Color.BOLD_UNDERLINE_INTENSE_CYAN: 'bold underline #ansiturquoise', Token.Color.BOLD_UNDERLINE_INTENSE_GREEN: 'bold underline #ansigreen', Token.Color.BOLD_UNDERLINE_INTENSE_PURPLE: 'bold underline #ansifuchsia', Token.Color.BOLD_UNDERLINE_INTENSE_RED: 'bold underline #ansired', Token.Color.BOLD_UNDERLINE_INTENSE_WHITE: 'bold underline #ansiwhite', Token.Color.BOLD_UNDERLINE_INTENSE_YELLOW: 'bold underline #ansiyellow', Token.Color.BOLD_UNDERLINE_PURPLE: 'bold underline #ansipurple', Token.Color.BOLD_UNDERLINE_RED: 'bold underline #ansidarkred', Token.Color.BOLD_UNDERLINE_WHITE: 'bold underline #ansilightgray', Token.Color.BOLD_UNDERLINE_YELLOW: 'bold underline #ansibrown', Token.Color.BOLD_WHITE: 'bold #ansilightgray', Token.Color.BOLD_YELLOW: 'bold #ansibrown', Token.Color.CYAN: '#ansiteal', Token.Color.GREEN: '#ansidarkgreen', Token.Color.INTENSE_BLACK: '#ansidarkgray', Token.Color.INTENSE_BLUE: '#ansiblue', Token.Color.INTENSE_CYAN: '#ansiturquoise', Token.Color.INTENSE_GREEN: '#ansigreen', Token.Color.INTENSE_PURPLE: '#ansifuchsia', Token.Color.INTENSE_RED: '#ansired', Token.Color.INTENSE_WHITE: '#ansiwhite', Token.Color.INTENSE_YELLOW: '#ansiyellow', Token.Color.NO_COLOR: 'noinherit', Token.Color.PURPLE: '#ansipurple', Token.Color.RED: '#ansidarkred', Token.Color.UNDERLINE_BLACK: 'underline #ansiblack', Token.Color.UNDERLINE_BLUE: 'underline #ansidarkblue', Token.Color.UNDERLINE_CYAN: 'underline #ansiteal', Token.Color.UNDERLINE_GREEN: 'underline #ansidarkgreen', Token.Color.UNDERLINE_INTENSE_BLACK: 'underline #ansidarkgray', Token.Color.UNDERLINE_INTENSE_BLUE: 'underline #ansiblue', Token.Color.UNDERLINE_INTENSE_CYAN: 'underline #ansiturquoise', Token.Color.UNDERLINE_INTENSE_GREEN: 'underline #ansigreen', Token.Color.UNDERLINE_INTENSE_PURPLE: 'underline #ansifuchsia', Token.Color.UNDERLINE_INTENSE_RED: 'underline #ansired', Token.Color.UNDERLINE_INTENSE_WHITE: 'underline #ansiwhite', Token.Color.UNDERLINE_INTENSE_YELLOW: 'underline #ansiyellow', Token.Color.UNDERLINE_PURPLE: 'underline #ansipurple', Token.Color.UNDERLINE_RED: 'underline #ansidarkred', Token.Color.UNDERLINE_WHITE: 'underline #ansilightgray', Token.Color.UNDERLINE_YELLOW: 'underline #ansibrown', Token.Color.WHITE: '#ansilightgray', Token.Color.YELLOW: '#ansibrown', Token.Comment: 'underline #ansiteal', Token.Comment.Hashbang: '', Token.Comment.Multiline: '', Token.Comment.Preproc: 'underline #ansibrown', Token.Comment.PreprocFile: '', Token.Comment.Single: '', Token.Comment.Special: '', Token.Error: '#ansired', Token.Escape: '', Token.Generic: '', Token.Generic.Deleted: '#ansidarkred', Token.Generic.Emph: 'underline', Token.Generic.Error: 'bold #ansired', Token.Generic.Heading: 'bold #ansidarkblue', Token.Generic.Inserted: '#ansigreen', Token.Generic.Output: '#ansidarkblue', Token.Generic.Prompt: 'bold #ansidarkblue', Token.Generic.Strong: '', Token.Generic.Subheading: 'bold #ansipurple', Token.Generic.Traceback: '#ansidarkblue', Token.Keyword: 'bold #ansidarkgreen', Token.Keyword.Constant: '', Token.Keyword.Declaration: '', Token.Keyword.Namespace: '', Token.Keyword.Pseudo: 'nobold', Token.Keyword.Reserved: '', Token.Keyword.Type: 'nobold #ansidarkred', Token.Literal: '', Token.Literal.Date: '', Token.Literal.Number: '#ansidarkgray', Token.Literal.Number.Bin: '', Token.Literal.Number.Float: '', Token.Literal.Number.Hex: '', Token.Literal.Number.Integer: '', Token.Literal.Number.Integer.Long: '', Token.Literal.Number.Oct: '', Token.Literal.String: '#ansired', Token.Literal.String.Affix: '', Token.Literal.String.Backtick: '', Token.Literal.String.Char: '', Token.Literal.String.Delimiter: '', Token.Literal.String.Doc: 'underline', Token.Literal.String.Double: '', Token.Literal.String.Escape: 'bold #ansibrown', Token.Literal.String.Heredoc: '', Token.Literal.String.Interpol: 'bold #ansipurple', Token.Literal.String.Other: '#ansidarkgreen', Token.Literal.String.Regex: '#ansipurple', Token.Literal.String.Single: '', Token.Literal.String.Symbol: '#ansibrown', Token.Menu.Completions: 'bg:#ansilightgray #ansiblack', Token.Menu.Completions.Completion: '', Token.Menu.Completions.Completion.Current: 'bg:#ansidarkgray #ansiwhite', Token.Name: '', Token.Name.Attribute: '#ansiyellow', Token.Name.Builtin: '#ansidarkgreen', Token.Name.Builtin.Pseudo: '', Token.Name.Class: 'bold #ansiblue', Token.Name.Constant: '#ansidarkred', Token.Name.Decorator: '#ansifuchsia', Token.Name.Entity: 'bold #ansilightgray', Token.Name.Exception: 'bold #ansired', Token.Name.Function: '#ansiblue', Token.Name.Function.Magic: '', Token.Name.Label: '#ansiyellow', Token.Name.Namespace: 'bold #ansiblue', Token.Name.Other: '', Token.Name.Property: '', Token.Name.Tag: 'bold #ansidarkgreen', Token.Name.Variable: '#ansidarkblue', Token.Name.Variable.Class: '', Token.Name.Variable.Global: '', Token.Name.Variable.Instance: '', Token.Name.Variable.Magic: '', Token.Operator: '#ansidarkgray', Token.Operator.Word: 'bold #ansipurple', Token.Other: '', Token.Punctuation: '', Token.Scrollbar: 'bg:#ansidarkgray', Token.Scrollbar.Arrow: 'bg:#ansiblack #ansiwhite bold', Token.Scrollbar.Button: 'bg:#ansiblack', Token.Text: '', Token.Text.Whitespace: '#ansilightgray'}, globals(), 'DEFAULT_STYLE_DICT') xonsh-0.6.0/xonsh/timings.py000066400000000000000000000236321320541242300160410ustar00rootroot00000000000000# -*- coding: utf-8 -*- """Timing related functionality for the xonsh shell. The following time_it alias and Timer was forked from the IPython project: * Copyright (c) 2008-2014, IPython Development Team * Copyright (C) 2001-2007 Fernando Perez * Copyright (c) 2001, Janko Hauser * Copyright (c) 2001, Nathaniel Gray """ import os import gc import sys import math import time import timeit import builtins import itertools from xonsh.lazyasd import lazyobject, lazybool from xonsh.events import events from xonsh.platform import ON_WINDOWS @lazybool def _HAVE_RESOURCE(): try: import resource as r have = True except ImportError: # There is no distinction of user/system time under windows, so we # just use time.perf_counter() for everything... have = False return have @lazyobject def resource(): import resource as r return r @lazyobject def clocku(): if _HAVE_RESOURCE: def clocku(): """clocku() -> floating point number Return the *USER* CPU time in seconds since the start of the process.""" return resource.getrusage(resource.RUSAGE_SELF)[0] else: clocku = time.perf_counter return clocku @lazyobject def clocks(): if _HAVE_RESOURCE: def clocks(): """clocks() -> floating point number Return the *SYSTEM* CPU time in seconds since the start of the process.""" return resource.getrusage(resource.RUSAGE_SELF)[1] else: clocks = time.perf_counter return clocks @lazyobject def clock(): if _HAVE_RESOURCE: def clock(): """clock() -> floating point number Return the *TOTAL USER+SYSTEM* CPU time in seconds since the start of the process.""" u, s = resource.getrusage(resource.RUSAGE_SELF)[:2] return u + s else: clock = time.perf_counter return clock @lazyobject def clock2(): if _HAVE_RESOURCE: def clock2(): """clock2() -> (t_user,t_system) Similar to clock(), but return a tuple of user/system times.""" return resource.getrusage(resource.RUSAGE_SELF)[:2] else: def clock2(): """Under windows, system CPU time can't be measured. This just returns perf_counter() and zero.""" return time.perf_counter(), 0.0 return clock2 def format_time(timespan, precision=3): """Formats the timespan in a human readable form""" if timespan >= 60.0: # we have more than a minute, format that in a human readable form parts = [("d", 60 * 60 * 24), ("h", 60 * 60), ("min", 60), ("s", 1)] time = [] leftover = timespan for suffix, length in parts: value = int(leftover / length) if value > 0: leftover = leftover % length time.append('{0}{1}'.format(str(value), suffix)) if leftover < 1: break return " ".join(time) # Unfortunately the unicode 'micro' symbol can cause problems in # certain terminals. # See bug: https://bugs.launchpad.net/ipython/+bug/348466 # Try to prevent crashes by being more secure than it needs to # E.g. eclipse is able to print a mu, but has no sys.stdout.encoding set. units = ["s", "ms", 'us', "ns"] # the save value if hasattr(sys.stdout, 'encoding') and sys.stdout.encoding: try: '\xb5'.encode(sys.stdout.encoding) units = ["s", "ms", '\xb5s', "ns"] except Exception: pass scaling = [1, 1e3, 1e6, 1e9] if timespan > 0.0: order = min(-int(math.floor(math.log10(timespan)) // 3), 3) else: order = 3 return "{1:.{0}g} {2}".format(precision, timespan * scaling[order], units[order]) class Timer(timeit.Timer): """Timer class that explicitly uses self.inner which is an undocumented implementation detail of CPython, not shared by PyPy. """ # Timer.timeit copied from CPython 3.4.2 def timeit(self, number=timeit.default_number): """Time 'number' executions of the main statement. To be precise, this executes the setup statement once, and then returns the time it takes to execute the main statement a number of times, as a float measured in seconds. The argument is the number of times through the loop, defaulting to one million. The main statement, the setup statement and the timer function to be used are passed to the constructor. """ it = itertools.repeat(None, number) gcold = gc.isenabled() gc.disable() try: timing = self.inner(it, self.timer) finally: if gcold: gc.enable() return timing INNER_TEMPLATE = """ def inner(_it, _timer): #setup _t0 = _timer() for _i in _it: {stmt} _t1 = _timer() return _t1 - _t0 """ def timeit_alias(args, stdin=None): """Runs timing study on arguments.""" # some real args number = 0 quiet = False repeat = 3 precision = 3 # setup ctx = builtins.__xonsh_ctx__ timer = Timer(timer=clock) stmt = ' '.join(args) innerstr = INNER_TEMPLATE.format(stmt=stmt) # Track compilation time so it can be reported if too long # Minimum time above which compilation time will be reported tc_min = 0.1 t0 = clock() innercode = builtins.compilex(innerstr, filename='', mode='exec', glbs=ctx) tc = clock() - t0 # get inner func ns = {} builtins.execx(innercode, glbs=ctx, locs=ns, mode='exec') timer.inner = ns['inner'] # Check if there is a huge difference between the best and worst timings. worst_tuning = 0 if number == 0: # determine number so that 0.2 <= total time < 2.0 number = 1 for _ in range(1, 10): time_number = timer.timeit(number) worst_tuning = max(worst_tuning, time_number / number) if time_number >= 0.2: break number *= 10 all_runs = timer.repeat(repeat, number) best = min(all_runs) / number # print some debug info if not quiet: worst = max(all_runs) / number if worst_tuning: worst = max(worst, worst_tuning) # Check best timing is greater than zero to avoid a # ZeroDivisionError. # In cases where the slowest timing is less than 10 microseconds # we assume that it does not really matter if the fastest # timing is 4 times faster than the slowest timing or not. if worst > 4 * best and best > 0 and worst > 1e-5: print(('The slowest run took {0:0.2f} times longer than the ' 'fastest. This could mean that an intermediate result ' 'is being cached.').format(worst / best)) print("{0} loops, best of {1}: {2} per loop" .format(number, repeat, format_time(best, precision))) if tc > tc_min: print("Compiler time: {0:.2f} s".format(tc)) return _timings = {'start': clock()} def setup_timings(): global _timings if '--timings' in sys.argv: events.doc('on_timingprobe', """ on_timingprobe(name: str) -> None Fired to insert some timings into the startuptime list """) @events.on_timingprobe def timing_on_timingprobe(name, **kw): global _timings _timings[name] = clock() @events.on_post_cmdloop def timing_on_post_cmdloop(**kw): global _timings _timings['on_post_cmdloop'] = clock() @events.on_post_init def timing_on_post_init(**kw): global _timings _timings['on_post_init'] = clock() @events.on_post_rc def timing_on_post_rc(**kw): global _timings _timings['on_post_rc'] = clock() @events.on_postcommand def timing_on_postcommand(**kw): global _timings _timings['on_postcommand'] = clock() @events.on_pre_cmdloop def timing_on_pre_cmdloop(**kw): global _timings _timings['on_pre_cmdloop'] = clock() @events.on_pre_rc def timing_on_pre_rc(**kw): global _timings _timings['on_pre_rc'] = clock() @events.on_precommand def timing_on_precommand(**kw): global _timings _timings['on_precommand'] = clock() @events.on_ptk_create def timing_on_ptk_create(**kw): global _timings _timings['on_ptk_create'] = clock() @events.on_chdir def timing_on_chdir(**kw): global _timings _timings['on_chdir'] = clock() @events.on_post_prompt def timing_on_post_prompt(**kw): global _timings _timings = {'on_post_prompt': clock()} @events.on_pre_prompt def timing_on_pre_prompt(**kw): global _timings _timings['on_pre_prompt'] = clock() times = list(_timings.items()) times = sorted(times, key=lambda x: x[1]) width = max(len(s) for s, _ in times) + 2 header_format = '|{{:<{}}}|{{:^11}}|{{:^11}}|'.format(width) entry_format = '|{{:<{}}}|{{:^11.3f}}|{{:^11.3f}}|'.format(width) sepline = '|{}|{}|{}|'.format('-'*width, '-'*11, '-'*11) # Print result table print(' Debug level: {}'.format(os.getenv('XONSH_DEBUG', 'Off'))) print(sepline) print(header_format.format('Event name', 'Time (s)', 'Delta (s)')) print(sepline) prevtime = tstart = times[0][1] for name, ts in times: print(entry_format.format(name, ts - tstart, ts - prevtime)) prevtime = ts print(sepline) xonsh-0.6.0/xonsh/tokenize.py000066400000000000000000000770051320541242300162220ustar00rootroot00000000000000"""Tokenization help for xonsh programs. This file is a modified version of tokenize.py form the Python 3.4 and 3.5 standard libraries (licensed under the Python Software Foundation License, version 2), which provides tokenization help for Python programs. It is modified to properly tokenize xonsh code, including backtick regex path and several xonsh-specific operators. A few pieces of this file are specific to the version of Python being used. To find these pieces, search the PY35. Original file credits: __author__ = 'Ka-Ping Yee ' __credits__ = ('GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, ' 'Skip Montanaro, Raymond Hettinger, Trent Nelson, ' 'Michael Foord') """ import re import io import sys import codecs import builtins import itertools import collections from token import (AMPER, AMPEREQUAL, AT, CIRCUMFLEX, CIRCUMFLEXEQUAL, COLON, COMMA, DEDENT, DOT, DOUBLESLASH, DOUBLESLASHEQUAL, DOUBLESTAR, DOUBLESTAREQUAL, ENDMARKER, EQEQUAL, EQUAL, ERRORTOKEN, GREATER, GREATEREQUAL, INDENT, LBRACE, LEFTSHIFT, LEFTSHIFTEQUAL, LESS, LESSEQUAL, LPAR, LSQB, MINEQUAL, MINUS, NAME, NEWLINE, NOTEQUAL, NUMBER, N_TOKENS, OP, PERCENT, PERCENTEQUAL, PLUS, PLUSEQUAL, RBRACE, RIGHTSHIFT, RIGHTSHIFTEQUAL, RPAR, RSQB, SEMI, SLASH, SLASHEQUAL, STAR, STAREQUAL, STRING, TILDE, VBAR, VBAREQUAL, tok_name) from xonsh.lazyasd import LazyObject from xonsh.platform import PYTHON_VERSION_INFO cookie_re = LazyObject( lambda: re.compile(r'^[ \t\f]*#.*coding[:=][ \t]*([-\w.]+)', re.ASCII), globals(), 'cookie_re') blank_re = LazyObject(lambda: re.compile(br'^[ \t\f]*(?:[#\r\n]|$)', re.ASCII), globals(), 'blank_re') # # token modifications # import token __all__ = token.__all__ + ["COMMENT", "tokenize", "detect_encoding", "NL", "untokenize", "ENCODING", "TokenInfo", "TokenError", 'SEARCHPATH', 'ATDOLLAR', 'ATEQUAL', 'DOLLARNAME', 'IOREDIRECT'] PY35 = PYTHON_VERSION_INFO >= (3, 5, 0) if PY35: ASYNC = token.ASYNC AWAIT = token.AWAIT AUGASSIGN_OPS = r"[+\-*/%&@|^=<>]=?" ADDSPACE_TOKS = (NAME, NUMBER, ASYNC, AWAIT) else: AUGASSIGN_OPS = r"[+\-*/%&|^=<>]=?" ADDSPACE_TOKS = (NAME, NUMBER) del token # must clean up token COMMENT = N_TOKENS tok_name[COMMENT] = 'COMMENT' NL = N_TOKENS + 1 tok_name[NL] = 'NL' ENCODING = N_TOKENS + 2 tok_name[ENCODING] = 'ENCODING' N_TOKENS += 3 SEARCHPATH = N_TOKENS tok_name[N_TOKENS] = 'SEARCHPATH' N_TOKENS += 1 IOREDIRECT = N_TOKENS tok_name[N_TOKENS] = 'IOREDIRECT' N_TOKENS += 1 DOLLARNAME = N_TOKENS tok_name[N_TOKENS] = 'DOLLARNAME' N_TOKENS += 1 ATDOLLAR = N_TOKENS tok_name[N_TOKENS] = 'ATDOLLAR' N_TOKENS += 1 ATEQUAL = N_TOKENS tok_name[N_TOKENS] = 'ATEQUAL' N_TOKENS += 1 _xonsh_tokens = { '?': 'QUESTION', '@=': 'ATEQUAL', '@$': 'ATDOLLAR', '||': 'DOUBLEPIPE', '&&': 'DOUBLEAMPER', '@(': 'ATLPAREN', '!(': 'BANGLPAREN', '![': 'BANGLBRACKET', '$(': 'DOLLARLPAREN', '$[': 'DOLLARLBRACKET', '${': 'DOLLARLBRACE', '??': 'DOUBLEQUESTION', '@$(': 'ATDOLLARLPAREN', } additional_parenlevs = frozenset({'@(', '!(', '![', '$(', '$[', '${', '@$('}) _glbs = globals() for v in _xonsh_tokens.values(): _glbs[v] = N_TOKENS tok_name[N_TOKENS] = v N_TOKENS += 1 __all__.append(v) del _glbs, v EXACT_TOKEN_TYPES = { '(': LPAR, ')': RPAR, '[': LSQB, ']': RSQB, ':': COLON, ',': COMMA, ';': SEMI, '+': PLUS, '-': MINUS, '*': STAR, '/': SLASH, '|': VBAR, '&': AMPER, '<': LESS, '>': GREATER, '=': EQUAL, '.': DOT, '%': PERCENT, '{': LBRACE, '}': RBRACE, '==': EQEQUAL, '!=': NOTEQUAL, '<=': LESSEQUAL, '>=': GREATEREQUAL, '~': TILDE, '^': CIRCUMFLEX, '<<': LEFTSHIFT, '>>': RIGHTSHIFT, '**': DOUBLESTAR, '+=': PLUSEQUAL, '-=': MINEQUAL, '*=': STAREQUAL, '/=': SLASHEQUAL, '%=': PERCENTEQUAL, '&=': AMPEREQUAL, '|=': VBAREQUAL, '^=': CIRCUMFLEXEQUAL, '<<=': LEFTSHIFTEQUAL, '>>=': RIGHTSHIFTEQUAL, '**=': DOUBLESTAREQUAL, '//': DOUBLESLASH, '//=': DOUBLESLASHEQUAL, '@': AT, } EXACT_TOKEN_TYPES.update(_xonsh_tokens) class TokenInfo(collections.namedtuple('TokenInfo', 'type string start end line')): def __repr__(self): annotated_type = '%d (%s)' % (self.type, tok_name[self.type]) return ('TokenInfo(type=%s, string=%r, start=%r, end=%r, line=%r)' % self._replace(type=annotated_type)) @property def exact_type(self): if self.type == OP and self.string in EXACT_TOKEN_TYPES: return EXACT_TOKEN_TYPES[self.string] else: return self.type def group(*choices): return '(' + '|'.join(choices) + ')' def tokany(*choices): return group(*choices) + '*' def maybe(*choices): return group(*choices) + '?' # Note: we use unicode matching for names ("\w") but ascii matching for # number literals. Whitespace = r'[ \f\t]*' Comment = r'#[^\r\n]*' Ignore = Whitespace + tokany(r'\\\r?\n' + Whitespace) + maybe(Comment) Name_RE = r'\$?\w+' Hexnumber = r'0[xX](?:_?[0-9a-fA-F])+' Binnumber = r'0[bB](?:_?[01])+' Octnumber = r'0[oO](?:_?[0-7])+' Decnumber = r'(?:0(?:_?0)*|[1-9](?:_?[0-9])*)' Intnumber = group(Hexnumber, Binnumber, Octnumber, Decnumber) Exponent = r'[eE][-+]?[0-9](?:_?[0-9])*' Pointfloat = group(r'[0-9](?:_?[0-9])*\.(?:[0-9](?:_?[0-9])*)?', r'\.[0-9](?:_?[0-9])*') + maybe(Exponent) Expfloat = r'[0-9](?:_?[0-9])*' + Exponent Floatnumber = group(Pointfloat, Expfloat) Imagnumber = group(r'[0-9](?:_?[0-9])*[jJ]', Floatnumber + r'[jJ]') Number = group(Imagnumber, Floatnumber, Intnumber) StringPrefix = r'(?:[bBp][rR]?|[rR][bBpfF]?|[uU]|[fF][rR]?)?' # Tail end of ' string. Single = r"[^'\\]*(?:\\.[^'\\]*)*'" # Tail end of " string. Double = r'[^"\\]*(?:\\.[^"\\]*)*"' # Tail end of ''' string. Single3 = r"[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''" # Tail end of """ string. Double3 = r'[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""' Triple = group(StringPrefix + "'''", StringPrefix + '"""') # Single-line ' or " string. String = group(StringPrefix + r"'[^\n'\\]*(?:\\.[^\n'\\]*)*'", StringPrefix + r'"[^\n"\\]*(?:\\.[^\n"\\]*)*"') # Xonsh-specific Syntax SearchPath = r"((?:[rgp]+|@\w*)?)`([^\n`\\]*(?:\\.[^\n`\\]*)*)`" # Because of leftmost-then-longest match semantics, be sure to put the # longest operators first (e.g., if = came before ==, == would get # recognized as two instances of =). _redir_names = ('out', 'all', 'err', 'e', '2', 'a', '&', '1', 'o') _redir_map = ( # stderr to stdout 'err>out', 'err>&1', '2>out', 'err>o', 'err>1', 'e>out', 'e>&1', '2>&1', 'e>o', '2>o', 'e>1', '2>1', # stdout to stderr 'out>err', 'out>&2', '1>err', 'out>e', 'out>2', 'o>err', 'o>&2', '1>&2', 'o>e', '1>e', 'o>2', '1>2', ) IORedirect = group(group(*_redir_map), '{}>>?'.format(group(*_redir_names))) _redir_check = set(_redir_map) _redir_check = {'{}>'.format(i) for i in _redir_names}.union(_redir_check) _redir_check = {'{}>>'.format(i) for i in _redir_names}.union(_redir_check) _redir_check = frozenset(_redir_check) Operator = group(r"\*\*=?", r">>=?", r"<<=?", r"!=", r"//=?", r"->", r"@\$\(?", r'\|\|', '&&', r'@\(', r'!\(', r'!\[', r'\$\(', r'\$\[', '\${', r'\?\?', r'\?', AUGASSIGN_OPS, r"~") Bracket = '[][(){}]' Special = group(r'\r?\n', r'\.\.\.', r'[:;.,@]') Funny = group(Operator, Bracket, Special) PlainToken = group(IORedirect, Number, Funny, String, Name_RE, SearchPath) # First (or only) line of ' or " string. ContStr = group(StringPrefix + r"'[^\n'\\]*(?:\\.[^\n'\\]*)*" + group("'", r'\\\r?\n'), StringPrefix + r'"[^\n"\\]*(?:\\.[^\n"\\]*)*' + group('"', r'\\\r?\n')) PseudoExtras = group(r'\\\r?\n|\Z', Comment, Triple, SearchPath) PseudoToken = Whitespace + group(PseudoExtras, IORedirect, Number, Funny, ContStr, Name_RE) def _compile(expr): return re.compile(expr, re.UNICODE) endpats = {"'": Single, '"': Double, "'''": Single3, '"""': Double3, "r'''": Single3, 'r"""': Double3, "b'''": Single3, 'b"""': Double3, "f'''": Single3, 'f"""': Double3, "R'''": Single3, 'R"""': Double3, "B'''": Single3, 'B"""': Double3, "F'''": Single3, 'F"""': Double3, "br'''": Single3, 'br"""': Double3, "fr'''": Single3, 'fr"""': Double3, "bR'''": Single3, 'bR"""': Double3, "Br'''": Single3, 'Br"""': Double3, "BR'''": Single3, 'BR"""': Double3, "rb'''": Single3, 'rb"""': Double3, "rf'''": Single3, 'rf"""': Double3, "Rb'''": Single3, 'Rb"""': Double3, "Fr'''": Single3, 'Fr"""': Double3, "rB'''": Single3, 'rB"""': Double3, "rF'''": Single3, 'rF"""': Double3, "RB'''": Single3, 'RB"""': Double3, "RF'''": Single3, 'RF"""': Double3, "u'''": Single3, 'u"""': Double3, "U'''": Single3, 'U"""': Double3, "p'''": Single3, 'p"""': Double3, "pr'''": Single3, 'pr"""': Double3, "pR'''": Single3, 'pR"""': Double3, "rp'''": Single3, 'rp"""': Double3, "Rp'''": Single3, 'Rp"""': Double3, 'r': None, 'R': None, 'b': None, 'B': None, 'u': None, 'U': None, 'p': None, 'f': None, 'F': None} triple_quoted = {} for t in ("'''", '"""', "r'''", 'r"""', "R'''", 'R"""', "b'''", 'b"""', "B'''", 'B"""', "f'''", 'f"""', "F'''", 'F"""', "br'''", 'br"""', "Br'''", 'Br"""', "bR'''", 'bR"""', "BR'''", 'BR"""', "rb'''", 'rb"""', "rB'''", 'rB"""', "Rb'''", 'Rb"""', "RB'''", 'RB"""', "fr'''", 'fr"""', "Fr'''", 'Fr"""', "fR'''", 'fR"""', "FR'''", 'FR"""', "rf'''", 'rf"""', "rF'''", 'rF"""', "Rf'''", 'Rf"""', "RF'''", 'RF"""', "u'''", 'u"""', "U'''", 'U"""', "p'''", 'p""""', "pr'''", 'pr""""', "pR'''", 'pR""""', "rp'''", 'rp""""', "Rp'''", 'Rp""""', ): triple_quoted[t] = t single_quoted = {} for t in ("'", '"', "r'", 'r"', "R'", 'R"', "b'", 'b"', "B'", 'B"', "f'", 'f"', "F'", 'F"', "br'", 'br"', "Br'", 'Br"', "bR'", 'bR"', "BR'", 'BR"', "rb'", 'rb"', "rB'", 'rB"', "Rb'", 'Rb"', "RB'", 'RB"', "fr'", 'fr"', "Fr'", 'Fr"', "fR'", 'fR"', "FR'", 'FR"', "rf'", 'rf"', "rF'", 'rF"', "Rf'", 'Rf"', "RF'", 'RF"', "u'", 'u"', "U'", 'U"', "p'", 'p"', "pr'", 'pr"', "pR'", 'pR"', "rp'", 'rp"', "Rp'", 'Rp"', ): single_quoted[t] = t tabsize = 8 class TokenError(Exception): pass class StopTokenizing(Exception): pass class Untokenizer: def __init__(self): self.tokens = [] self.prev_row = 1 self.prev_col = 0 self.encoding = None def add_whitespace(self, start): row, col = start if row < self.prev_row or row == self.prev_row and col < self.prev_col: raise ValueError("start ({},{}) precedes previous end ({},{})" .format(row, col, self.prev_row, self.prev_col)) row_offset = row - self.prev_row if row_offset: self.tokens.append("\\\n" * row_offset) self.prev_col = 0 col_offset = col - self.prev_col if col_offset: self.tokens.append(" " * col_offset) def untokenize(self, iterable): it = iter(iterable) indents = [] startline = False for t in it: if len(t) == 2: self.compat(t, it) break tok_type, token, start, end, line = t if tok_type == ENCODING: self.encoding = token continue if tok_type == ENDMARKER: break if tok_type == INDENT: indents.append(token) continue elif tok_type == DEDENT: indents.pop() self.prev_row, self.prev_col = end continue elif tok_type in (NEWLINE, NL): startline = True elif startline and indents: indent = indents[-1] if start[1] >= len(indent): self.tokens.append(indent) self.prev_col = len(indent) startline = False self.add_whitespace(start) self.tokens.append(token) self.prev_row, self.prev_col = end if tok_type in (NEWLINE, NL): self.prev_row += 1 self.prev_col = 0 return "".join(self.tokens) def compat(self, token, iterable): indents = [] toks_append = self.tokens.append startline = token[0] in (NEWLINE, NL) prevstring = False for tok in itertools.chain([token], iterable): toknum, tokval = tok[:2] if toknum == ENCODING: self.encoding = tokval continue if toknum in ADDSPACE_TOKS: tokval += ' ' # Insert a space between two consecutive strings if toknum == STRING: if prevstring: tokval = ' ' + tokval prevstring = True else: prevstring = False if toknum == INDENT: indents.append(tokval) continue elif toknum == DEDENT: indents.pop() continue elif toknum in (NEWLINE, NL): startline = True elif startline and indents: toks_append(indents[-1]) startline = False toks_append(tokval) def untokenize(iterable): """Transform tokens back into Python source code. It returns a bytes object, encoded using the ENCODING token, which is the first token sequence output by tokenize. Each element returned by the iterable must be a token sequence with at least two elements, a token number and token value. If only two tokens are passed, the resulting output is poor. Round-trip invariant for full input: Untokenized source will match input source exactly Round-trip invariant for limited intput: # Output bytes will tokenize the back to the input t1 = [tok[:2] for tok in tokenize(f.readline)] newcode = untokenize(t1) readline = BytesIO(newcode).readline t2 = [tok[:2] for tok in tokenize(readline)] assert t1 == t2 """ ut = Untokenizer() out = ut.untokenize(iterable) if ut.encoding is not None: out = out.encode(ut.encoding) return out def _get_normal_name(orig_enc): """Imitates get_normal_name in tokenizer.c.""" # Only care about the first 12 characters. enc = orig_enc[:12].lower().replace("_", "-") if enc == "utf-8" or enc.startswith("utf-8-"): return "utf-8" if enc in ("latin-1", "iso-8859-1", "iso-latin-1") or \ enc.startswith(("latin-1-", "iso-8859-1-", "iso-latin-1-")): return "iso-8859-1" return orig_enc def detect_encoding(readline): """ The detect_encoding() function is used to detect the encoding that should be used to decode a Python source file. It requires one argument, readline, in the same way as the tokenize() generator. It will call readline a maximum of twice, and return the encoding used (as a string) and a list of any lines (left as bytes) it has read in. It detects the encoding from the presence of a utf-8 bom or an encoding cookie as specified in pep-0263. If both a bom and a cookie are present, but disagree, a SyntaxError will be raised. If the encoding cookie is an invalid charset, raise a SyntaxError. Note that if a utf-8 bom is found, 'utf-8-sig' is returned. If no encoding is specified, then the default of 'utf-8' will be returned. """ try: filename = readline.__self__.name except AttributeError: filename = None bom_found = False encoding = None default = 'utf-8' def read_or_stop(): try: return readline() except StopIteration: return b'' def find_cookie(line): try: # Decode as UTF-8. Either the line is an encoding declaration, # in which case it should be pure ASCII, or it must be UTF-8 # per default encoding. line_string = line.decode('utf-8') except UnicodeDecodeError: msg = "invalid or missing encoding declaration" if filename is not None: msg = '{} for {!r}'.format(msg, filename) raise SyntaxError(msg) match = cookie_re.match(line_string) if not match: return None encoding = _get_normal_name(match.group(1)) try: codecs.lookup(encoding) except LookupError: # This behaviour mimics the Python interpreter if filename is None: msg = "unknown encoding: " + encoding else: msg = "unknown encoding for {!r}: {}".format(filename, encoding) raise SyntaxError(msg) if bom_found: if encoding != 'utf-8': # This behaviour mimics the Python interpreter if filename is None: msg = 'encoding problem: utf-8' else: msg = 'encoding problem for {!r}: utf-8'.format(filename) raise SyntaxError(msg) encoding += '-sig' return encoding first = read_or_stop() if first.startswith(codecs.BOM_UTF8): bom_found = True first = first[3:] default = 'utf-8-sig' if not first: return default, [] encoding = find_cookie(first) if encoding: return encoding, [first] if not blank_re.match(first): return default, [first] second = read_or_stop() if not second: return default, [first] encoding = find_cookie(second) if encoding: return encoding, [first, second] return default, [first, second] def tokopen(filename): """Open a file in read only mode using the encoding detected by detect_encoding(). """ buffer = builtins.open(filename, 'rb') try: encoding, lines = detect_encoding(buffer.readline) buffer.seek(0) text = io.TextIOWrapper(buffer, encoding, line_buffering=True) text.mode = 'r' return text except Exception: buffer.close() raise def _tokenize(readline, encoding): lnum = parenlev = continued = 0 numchars = '0123456789' contstr, needcont = '', 0 contline = None indents = [0] # 'stashed' and 'async_*' are used for async/await parsing stashed = None async_def = False async_def_indent = 0 async_def_nl = False if encoding is not None: if encoding == "utf-8-sig": # BOM will already have been stripped. encoding = "utf-8" yield TokenInfo(ENCODING, encoding, (0, 0), (0, 0), '') while True: # loop over lines in stream try: line = readline() except StopIteration: line = b'' if encoding is not None: line = line.decode(encoding) lnum += 1 pos, max = 0, len(line) if contstr: # continued string if not line: raise TokenError("EOF in multi-line string", strstart) endmatch = endprog.match(line) if endmatch: pos = end = endmatch.end(0) yield TokenInfo(STRING, contstr + line[:end], strstart, (lnum, end), contline + line) contstr, needcont = '', 0 contline = None elif needcont and line[-2:] != '\\\n' and line[-3:] != '\\\r\n': yield TokenInfo(ERRORTOKEN, contstr + line, strstart, (lnum, len(line)), contline) contstr = '' contline = None continue else: contstr = contstr + line contline = contline + line continue elif parenlev == 0 and not continued: # new statement if not line: break column = 0 while pos < max: # measure leading whitespace if line[pos] == ' ': column += 1 elif line[pos] == '\t': column = (column // tabsize + 1) * tabsize elif line[pos] == '\f': column = 0 else: break pos += 1 if pos == max: break if line[pos] in '#\r\n': # skip comments or blank lines if line[pos] == '#': comment_token = line[pos:].rstrip('\r\n') nl_pos = pos + len(comment_token) yield TokenInfo(COMMENT, comment_token, (lnum, pos), (lnum, pos + len(comment_token)), line) yield TokenInfo(NL, line[nl_pos:], (lnum, nl_pos), (lnum, len(line)), line) else: yield TokenInfo((NL, COMMENT)[line[pos] == '#'], line[pos:], (lnum, pos), (lnum, len(line)), line) continue if column > indents[-1]: # count indents or dedents indents.append(column) yield TokenInfo(INDENT, line[:pos], (lnum, 0), (lnum, pos), line) while column < indents[-1]: if column not in indents: raise IndentationError( "unindent does not match any outer indentation level", ("", lnum, pos, line)) indents = indents[:-1] if async_def and async_def_indent >= indents[-1]: async_def = False async_def_nl = False async_def_indent = 0 yield TokenInfo(DEDENT, '', (lnum, pos), (lnum, pos), line) if async_def and async_def_nl and async_def_indent >= indents[-1]: async_def = False async_def_nl = False async_def_indent = 0 else: # continued statement if not line: raise TokenError("EOF in multi-line statement", (lnum, 0)) continued = 0 while pos < max: pseudomatch = _compile(PseudoToken).match(line, pos) if pseudomatch: # scan for tokens start, end = pseudomatch.span(1) spos, epos, pos = (lnum, start), (lnum, end), end if start == end: continue token, initial = line[start:end], line[start] if token in _redir_check: yield TokenInfo(IOREDIRECT, token, spos, epos, line) elif (initial in numchars or # ordinary number (initial == '.' and token != '.' and token != '...')): yield TokenInfo(NUMBER, token, spos, epos, line) elif initial in '\r\n': if stashed: yield stashed stashed = None if parenlev > 0: yield TokenInfo(NL, token, spos, epos, line) else: yield TokenInfo(NEWLINE, token, spos, epos, line) if async_def: async_def_nl = True elif initial == '#': assert not token.endswith("\n") if stashed: yield stashed stashed = None yield TokenInfo(COMMENT, token, spos, epos, line) # Xonsh-specific Regex Globbing elif re.match(SearchPath, token): yield TokenInfo(SEARCHPATH, token, spos, epos, line) elif token in triple_quoted: endprog = _compile(endpats[token]) endmatch = endprog.match(line, pos) if endmatch: # all on one line pos = endmatch.end(0) token = line[start:pos] yield TokenInfo(STRING, token, spos, (lnum, pos), line) else: strstart = (lnum, start) # multiple lines contstr = line[start:] contline = line break elif initial in single_quoted or \ token[:2] in single_quoted or \ token[:3] in single_quoted: if token[-1] == '\n': # continued string strstart = (lnum, start) endprog = _compile(endpats[initial] or endpats[token[1]] or endpats[token[2]]) contstr, needcont = line[start:], 1 contline = line break else: # ordinary string yield TokenInfo(STRING, token, spos, epos, line) elif token.startswith('$') and token[1:].isidentifier(): yield TokenInfo(DOLLARNAME, token, spos, epos, line) elif initial.isidentifier(): # ordinary name if token in ('async', 'await'): if async_def: yield TokenInfo( ASYNC if token == 'async' else AWAIT, token, spos, epos, line) continue tok = TokenInfo(NAME, token, spos, epos, line) if token == 'async' and not stashed: stashed = tok continue if token == 'def' and (stashed and stashed.type == NAME and stashed.string == 'async'): async_def = True async_def_indent = indents[-1] yield TokenInfo(ASYNC, stashed.string, stashed.start, stashed.end, stashed.line) stashed = None if stashed: yield stashed stashed = None yield tok elif token == '\\\n' or token == '\\\r\n': # continued stmt continued = 1 yield TokenInfo(ERRORTOKEN, token, spos, epos, line) elif initial == '\\': # continued stmt # for cases like C:\\path\\to\\file continued = 1 else: if initial in '([{': parenlev += 1 elif initial in ')]}': parenlev -= 1 elif token in additional_parenlevs: parenlev += 1 if stashed: yield stashed stashed = None yield TokenInfo(OP, token, spos, epos, line) else: yield TokenInfo(ERRORTOKEN, line[pos], (lnum, pos), (lnum, pos + 1), line) pos += 1 if stashed: yield stashed stashed = None for indent in indents[1:]: # pop remaining indent levels yield TokenInfo(DEDENT, '', (lnum, 0), (lnum, 0), '') yield TokenInfo(ENDMARKER, '', (lnum, 0), (lnum, 0), '') def tokenize(readline): """ The tokenize() generator requires one argument, readline, which must be a callable object which provides the same interface as the readline() method of built-in file objects. Each call to the function should return one line of input as bytes. Alternately, readline can be a callable function terminating with StopIteration: readline = open(myfile, 'rb').__next__ # Example of alternate readline The generator produces 5-tuples with these members: the token type; the token string; a 2-tuple (srow, scol) of ints specifying the row and column where the token begins in the source; a 2-tuple (erow, ecol) of ints specifying the row and column where the token ends in the source; and the line on which the token was found. The line passed is the logical line; continuation lines are included. The first token sequence will always be an ENCODING token which tells you which encoding was used to decode the bytes stream. """ encoding, consumed = detect_encoding(readline) rl_gen = iter(readline, b"") empty = itertools.repeat(b"") return _tokenize(itertools.chain(consumed, rl_gen, empty).__next__, encoding) # An undocumented, backwards compatible, API for all the places in the standard # library that expect to be able to use tokenize with strings def generate_tokens(readline): return _tokenize(readline, None) def tokenize_main(): import argparse # Helper error handling routines def perror(message): print(message, file=sys.stderr) def error(message, filename=None, location=None): if location: args = (filename,) + location + (message,) perror("%s:%d:%d: error: %s" % args) elif filename: perror("%s: error: %s" % (filename, message)) else: perror("error: %s" % message) sys.exit(1) # Parse the arguments and options parser = argparse.ArgumentParser(prog='python -m tokenize') parser.add_argument(dest='filename', nargs='?', metavar='filename.py', help='the file to tokenize; defaults to stdin') parser.add_argument('-e', '--exact', dest='exact', action='store_true', help='display token names using the exact type') args = parser.parse_args() try: # Tokenize the input if args.filename: filename = args.filename with builtins.open(filename, 'rb') as f: tokens = list(tokenize(f.readline)) else: filename = "" tokens = _tokenize(sys.stdin.readline, None) # Output the tokenization for token in tokens: token_type = token.type if args.exact: token_type = token.exact_type token_range = "%d,%d-%d,%d:" % (token.start + token.end) print("%-20s%-15s%-15r" % (token_range, tok_name[token_type], token.string)) except IndentationError as err: line, column = err.args[1][1:3] error(err.args[0], filename, (line, column)) except TokenError as err: line, column = err.args[1] error(err.args[0], filename, (line, column)) except SyntaxError as err: error(err, filename) except OSError as err: error(err) except KeyboardInterrupt: print("interrupted\n") except Exception as err: perror("unexpected error: %s" % err) raise xonsh-0.6.0/xonsh/tools.py000066400000000000000000001765171320541242300155420ustar00rootroot00000000000000# -*- coding: utf-8 -*- """Misc. xonsh tools. The following implementations were forked from the IPython project: * Copyright (c) 2008-2014, IPython Development Team * Copyright (C) 2001-2007 Fernando Perez * Copyright (c) 2001, Janko Hauser * Copyright (c) 2001, Nathaniel Gray Implementations: * decode() * encode() * cast_unicode() * safe_hasattr() * indent() """ import builtins import collections import collections.abc as cabc import contextlib import ctypes import datetime from distutils.version import LooseVersion import functools import glob import itertools import os import pathlib import re import subprocess import sys import threading import traceback import warnings import operator # adding imports from further xonsh modules is discouraged to avoid circular # dependencies from xonsh import __version__ from xonsh.lazyasd import LazyObject, LazyDict, lazyobject from xonsh.platform import (has_prompt_toolkit, scandir, DEFAULT_ENCODING, ON_LINUX, ON_WINDOWS, PYTHON_VERSION_INFO, expanduser, os_environ) @functools.lru_cache(1) def is_superuser(): if ON_WINDOWS: rtn = (ctypes.windll.shell32.IsUserAnAdmin() != 0) else: rtn = (os.getuid() == 0) return rtn class XonshError(Exception): pass class XonshCalledProcessError(XonshError, subprocess.CalledProcessError): """Raised when there's an error with a called process Inherits from XonshError and subprocess.CalledProcessError, catching either will also catch this error. Raised *after* iterating over stdout of a captured command, if the returncode of the command is nonzero. Example: try: for line in !(ls): print(line) except subprocess.CalledProcessError as error: print("Error in process: {}.format(error.completed_command.pid)) This also handles differences between Python3.4 and 3.5 where CalledProcessError is concerned. """ def __init__(self, returncode, command, output=None, stderr=None, completed_command=None): super().__init__(returncode, command, output) self.stderr = stderr self.completed_command = completed_command def expand_path(s, expand_user=True): """Takes a string path and expands ~ to home if expand_user is set and environment vars if EXPAND_ENV_VARS is set.""" env = getattr(builtins, '__xonsh_env__', os_environ) if env.get('EXPAND_ENV_VARS', False): s = expandvars(s) if expand_user: # expand ~ according to Bash unquoted rules "Each variable assignment is # checked for unquoted tilde-prefixes immediately following a ':' or the # first '='". See the following for more details. # https://www.gnu.org/software/bash/manual/html_node/Tilde-Expansion.html pre, char, post = s.partition('=') if char: s = expanduser(pre) + char s += os.pathsep.join(map(expanduser, post.split(os.pathsep))) else: s = expanduser(s) return s def _expandpath(path): """Performs environment variable / user expansion on a given path if EXPAND_ENV_VARS is set. """ env = getattr(builtins, '__xonsh_env__', os_environ) expand_user = env.get('EXPAND_ENV_VARS', False) return expand_path(path, expand_user=expand_user) def decode_bytes(b): """Tries to decode the bytes using XONSH_ENCODING if available, otherwise using sys.getdefaultencoding(). """ env = getattr(builtins, '__xonsh_env__', os_environ) enc = env.get('XONSH_ENCODING') or DEFAULT_ENCODING err = env.get('XONSH_ENCODING_ERRORS') or 'strict' return b.decode(encoding=enc, errors=err) def findfirst(s, substrs): """Finds whichever of the given substrings occurs first in the given string and returns that substring, or returns None if no such strings occur. """ i = len(s) result = None for substr in substrs: pos = s.find(substr) if -1 < pos < i: i = pos result = substr return i, result class EnvPath(collections.MutableSequence): """A class that implements an environment path, which is a list of strings. Provides a custom method that expands all paths if the relevant env variable has been set. """ def __init__(self, args=None): if not args: self._l = [] else: if isinstance(args, str): self._l = args.split(os.pathsep) elif isinstance(args, pathlib.Path): self._l = [args] elif isinstance(args, bytes): # decode bytes to a string and then split based on # the default path separator self._l = decode_bytes(args).split(os.pathsep) elif isinstance(args, collections.Iterable): # put everything in a list -before- performing the type check # in order to be able to retrieve it later, for cases such as # when a generator expression was passed as an argument args = list(args) if not all(isinstance(i, (str, bytes, pathlib.Path)) for i in args): # make TypeError's message as informative as possible # when given an invalid initialization sequence raise TypeError( "EnvPath's initialization sequence should only " "contain str, bytes and pathlib.Path entries") self._l = args else: raise TypeError('EnvPath cannot be initialized with items ' 'of type %s' % type(args)) def __getitem__(self, item): # handle slices separately if isinstance(item, slice): return [_expandpath(i) for i in self._l[item]] else: return _expandpath(self._l[item]) def __setitem__(self, index, item): self._l.__setitem__(index, item) def __len__(self): return len(self._l) def __delitem__(self, key): self._l.__delitem__(key) def insert(self, index, value): self._l.insert(index, value) @property def paths(self): """ Returns the list of directories that this EnvPath contains. """ return list(self) def __repr__(self): return repr(self._l) def __eq__(self, other): if len(self) != len(other): return False return all(map(operator.eq, self, other)) def _repr_pretty_(self, p, cycle): """ Pretty print path list """ if cycle: p.text('EnvPath(...)') else: with p.group(1, 'EnvPath(\n[', ']\n)'): for idx, item in enumerate(self): if idx: p.text(',') p.breakable() p.pretty(item) def __add__(self, other): if isinstance(other, EnvPath): other = other._l return EnvPath(self._l + other) def __radd__(self, other): if isinstance(other, EnvPath): other = other._l return EnvPath(other + self._l) class DefaultNotGivenType(object): """Singleton for representing when no default value is given.""" __inst = None def __new__(cls): if DefaultNotGivenType.__inst is None: DefaultNotGivenType.__inst = object.__new__(cls) return DefaultNotGivenType.__inst DefaultNotGiven = DefaultNotGivenType() BEG_TOK_SKIPS = LazyObject( lambda: frozenset(['WS', 'INDENT', 'NOT', 'LPAREN']), globals(), 'BEG_TOK_SKIPS') END_TOK_TYPES = LazyObject(lambda: frozenset(['SEMI', 'AND', 'OR', 'RPAREN']), globals(), 'END_TOK_TYPES') RE_END_TOKS = LazyObject(lambda: re.compile('(;|and|\&\&|or|\|\||\))'), globals(), 'RE_END_TOKS') LPARENS = LazyObject(lambda: frozenset(['LPAREN', 'AT_LPAREN', 'BANG_LPAREN', 'DOLLAR_LPAREN', 'ATDOLLAR_LPAREN']), globals(), 'LPARENS') def _is_not_lparen_and_rparen(lparens, rtok): """Tests if an RPAREN token is matched with something other than a plain old LPAREN type. """ # note that any([]) is False, so this covers len(lparens) == 0 return rtok.type == 'RPAREN' and any(x != 'LPAREN' for x in lparens) def balanced_parens(line, mincol=0, maxcol=None, lexer=None): """Determines if parentheses are balanced in an expression.""" line = line[mincol:maxcol] if lexer is None: lexer = builtins.__xonsh_execer__.parser.lexer if '(' not in line and ')' not in line: return True cnt = 0 lexer.input(line) for tok in lexer: if tok.type in LPARENS: cnt += 1 elif tok.type == 'RPAREN': cnt -= 1 elif tok.type == 'ERRORTOKEN' and ')' in tok.value: cnt -= 1 return cnt == 0 def find_next_break(line, mincol=0, lexer=None): """Returns the column number of the next logical break in subproc mode. This function may be useful in finding the maxcol argument of subproc_toks(). """ if mincol >= 1: line = line[mincol:] if lexer is None: lexer = builtins.__xonsh_execer__.parser.lexer if RE_END_TOKS.search(line) is None: return None maxcol = None lparens = [] lexer.input(line) for tok in lexer: if tok.type in LPARENS: lparens.append(tok.type) elif tok.type in END_TOK_TYPES: if _is_not_lparen_and_rparen(lparens, tok): lparens.pop() else: maxcol = tok.lexpos + mincol + 1 break elif tok.type == 'ERRORTOKEN' and ')' in tok.value: maxcol = tok.lexpos + mincol + 1 break elif tok.type == 'BANG': maxcol = mincol + len(line) + 1 break return maxcol def subproc_toks(line, mincol=-1, maxcol=None, lexer=None, returnline=False, greedy=False): """Encapsulates tokens in a source code line in a uncaptured subprocess ![] starting at a minimum column. If there are no tokens (ie in a comment line) this returns None. If greedy is True, it will encapsulate normal parentheses. Greedy is False by default. """ if lexer is None: lexer = builtins.__xonsh_execer__.parser.lexer if maxcol is None: maxcol = len(line) + 1 lexer.reset() lexer.input(line) toks = [] lparens = [] saw_macro = False end_offset = 0 for tok in lexer: pos = tok.lexpos if tok.type not in END_TOK_TYPES and pos >= maxcol: break if tok.type == 'BANG': saw_macro = True if saw_macro and tok.type not in ('NEWLINE', 'DEDENT'): toks.append(tok) continue if tok.type in LPARENS: lparens.append(tok.type) if greedy and len(lparens) > 0 and 'LPAREN' in lparens: toks.append(tok) if tok.type == 'RPAREN': lparens.pop() continue if len(toks) == 0 and tok.type in BEG_TOK_SKIPS: continue # handle indentation elif len(toks) > 0 and toks[-1].type in END_TOK_TYPES: if _is_not_lparen_and_rparen(lparens, toks[-1]): lparens.pop() # don't continue or break elif pos < maxcol and tok.type not in ('NEWLINE', 'DEDENT', 'WS'): if not greedy: toks.clear() if tok.type in BEG_TOK_SKIPS: continue else: break if pos < mincol: continue toks.append(tok) if tok.type == 'WS' and tok.value == '\\': pass # line continuation elif tok.type == 'NEWLINE': break elif tok.type == 'DEDENT': # fake a newline when dedenting without a newline tok.type = 'NEWLINE' tok.value = '\n' tok.lineno -= 1 if len(toks) >= 2: prev_tok_end = toks[-2].lexpos + len(toks[-2].value) else: prev_tok_end = len(line) if '#' in line[prev_tok_end:]: tok.lexpos = prev_tok_end # prevents wrapping comments else: tok.lexpos = len(line) break elif check_bad_str_token(tok): return else: if len(toks) > 0 and toks[-1].type in END_TOK_TYPES: if _is_not_lparen_and_rparen(lparens, toks[-1]): pass elif greedy and toks[-1].type == 'RPAREN': pass else: toks.pop() if len(toks) == 0: return # handle comment lines tok = toks[-1] pos = tok.lexpos if isinstance(tok.value, str): end_offset = len(tok.value.rstrip()) else: el = line[pos:].split('#')[0].rstrip() end_offset = len(el) if len(toks) == 0: return # handle comment lines elif saw_macro or greedy: end_offset = len(toks[-1].value.rstrip()) + 1 beg, end = toks[0].lexpos, (toks[-1].lexpos + end_offset) end = len(line[:end].rstrip()) rtn = '![' + line[beg:end] + ']' if returnline: rtn = line[:beg] + rtn + line[end:] return rtn def check_bad_str_token(tok): """Checks if a token is a bad string.""" if tok.type == 'ERRORTOKEN' and tok.value == 'EOF in multi-line string': return True elif isinstance(tok.value, str) and not check_quotes(tok.value): return True else: return False def check_quotes(s): """Checks a string to make sure that if it starts with quotes, it also ends with quotes. """ starts_as_str = RE_BEGIN_STRING.match(s) is not None ends_as_str = s.endswith('"') or s.endswith("'") if not starts_as_str and not ends_as_str: ok = True elif starts_as_str and not ends_as_str: ok = False elif not starts_as_str and ends_as_str: ok = False else: m = RE_COMPLETE_STRING.match(s) ok = m is not None return ok def _have_open_triple_quotes(s): if s.count('"""') % 2 == 1: open_triple = '"""' elif s.count("'''") % 2 == 1: open_triple = "'''" else: open_triple = False return open_triple def get_line_continuation(): """ The line continuation characters used in subproc mode. In interactive mode on Windows the backslash must be preceded by a space. This is because paths on Windows may end in a backslash. """ if (ON_WINDOWS and hasattr(builtins, '__xonsh_env__') and builtins.__xonsh_env__.get('XONSH_INTERACTIVE', False)): return ' \\' else: return '\\' def get_logical_line(lines, idx): """Returns a single logical line (i.e. one without line continuations) from a list of lines. This line should begin at index idx. This also returns the number of physical lines the logical line spans. The lines should not contain newlines """ n = 1 nlines = len(lines) linecont = get_line_continuation() while idx > 0 and lines[idx-1].endswith(linecont): idx -= 1 start = idx line = lines[idx] open_triple = _have_open_triple_quotes(line) while (line.endswith(linecont) or open_triple) and idx < nlines - 1: n += 1 idx += 1 if line.endswith(linecont): line = line[:-1] + lines[idx] else: line = line + '\n' + lines[idx] open_triple = _have_open_triple_quotes(line) return line, n, start def replace_logical_line(lines, logical, idx, n): """Replaces lines at idx that may end in line continuation with a logical line that spans n lines. """ linecont = get_line_continuation() if n == 1: lines[idx] = logical return space = ' ' for i in range(idx, idx+n-1): a = len(lines[i]) b = logical.find(space, a-1) if b < 0: # no space found lines[i] = logical logical = '' else: # found space to split on lines[i] = logical[:b] + linecont logical = logical[b:] lines[idx+n-1] = logical def is_balanced(expr, ltok, rtok): """Determines whether an expression has unbalanced opening and closing tokens.""" lcnt = expr.count(ltok) if lcnt == 0: return True rcnt = expr.count(rtok) if lcnt == rcnt: return True else: return False def subexpr_from_unbalanced(expr, ltok, rtok): """Attempts to pull out a valid subexpression for unbalanced grouping, based on opening tokens, eg. '(', and closing tokens, eg. ')'. This does not do full tokenization, but should be good enough for tab completion. """ if is_balanced(expr, ltok, rtok): return expr subexpr = expr.rsplit(ltok, 1)[-1] subexpr = subexpr.rsplit(',', 1)[-1] subexpr = subexpr.rsplit(':', 1)[-1] return subexpr def subexpr_before_unbalanced(expr, ltok, rtok): """Obtains the expression prior to last unbalanced left token.""" subexpr, _, post = expr.rpartition(ltok) nrtoks_in_post = post.count(rtok) while nrtoks_in_post != 0: for i in range(nrtoks_in_post): subexpr, _, post = subexpr.rpartition(ltok) nrtoks_in_post = post.count(rtok) _, _, subexpr = subexpr.rpartition(rtok) _, _, subexpr = subexpr.rpartition(ltok) return subexpr def decode(s, encoding=None): encoding = encoding or DEFAULT_ENCODING return s.decode(encoding, "replace") def encode(u, encoding=None): encoding = encoding or DEFAULT_ENCODING return u.encode(encoding, "replace") def cast_unicode(s, encoding=None): if isinstance(s, bytes): return decode(s, encoding) return s def safe_hasattr(obj, attr): """In recent versions of Python, hasattr() only catches AttributeError. This catches all errors. """ try: getattr(obj, attr) return True except Exception: # pylint:disable=bare-except return False def indent(instr, nspaces=4, ntabs=0, flatten=False): """Indent a string a given number of spaces or tabstops. indent(str,nspaces=4,ntabs=0) -> indent str by ntabs+nspaces. Parameters ---------- instr : basestring The string to be indented. nspaces : int (default: 4) The number of spaces to be indented. ntabs : int (default: 0) The number of tabs to be indented. flatten : bool (default: False) Whether to scrub existing indentation. If True, all lines will be aligned to the same indentation. If False, existing indentation will be strictly increased. Returns ------- outstr : string indented by ntabs and nspaces. """ if instr is None: return ind = '\t' * ntabs + ' ' * nspaces if flatten: pat = re.compile(r'^\s*', re.MULTILINE) else: pat = re.compile(r'^', re.MULTILINE) outstr = re.sub(pat, ind, instr) if outstr.endswith(os.linesep + ind): return outstr[:-len(ind)] else: return outstr def get_sep(): """ Returns the appropriate filepath separator char depending on OS and xonsh options set """ if ON_WINDOWS and builtins.__xonsh_env__.get('FORCE_POSIX_PATHS'): return os.altsep else: return os.sep def fallback(cond, backup): """Decorator for returning the object if cond is true and a backup if cond is false. """ def dec(obj): return obj if cond else backup return dec # The following redirect classes were taken directly from Python 3.5's source # code (from the contextlib module). This can be removed when 3.5 is released, # although redirect_stdout exists in 3.4, redirect_stderr does not. # See the Python software license: https://docs.python.org/3/license.html # Copyright (c) Python Software Foundation. All rights reserved. class _RedirectStream: _stream = None def __init__(self, new_target): self._new_target = new_target # We use a list of old targets to make this CM re-entrant self._old_targets = [] def __enter__(self): self._old_targets.append(getattr(sys, self._stream)) setattr(sys, self._stream, self._new_target) return self._new_target def __exit__(self, exctype, excinst, exctb): setattr(sys, self._stream, self._old_targets.pop()) class redirect_stdout(_RedirectStream): """Context manager for temporarily redirecting stdout to another file:: # How to send help() to stderr with redirect_stdout(sys.stderr): help(dir) # How to write help() to a file with open('help.txt', 'w') as f: with redirect_stdout(f): help(pow) Mostly for backwards compatibility. """ _stream = "stdout" class redirect_stderr(_RedirectStream): """Context manager for temporarily redirecting stderr to another file.""" _stream = "stderr" def _yield_accessible_unix_file_names(path): """yield file names of executable files in path.""" if not os.path.exists(path): return for file_ in scandir(path): try: if file_.is_file() and os.access(file_.path, os.X_OK): yield file_.name except (FileNotFoundError, NotADirectoryError): # broken Symlink are neither dir not files pass def _executables_in_posix(path): if not os.path.exists(path): return elif PYTHON_VERSION_INFO < (3, 5, 0): for fname in os.listdir(path): fpath = os.path.join(path, fname) if (os.path.exists(fpath) and os.access(fpath, os.X_OK) and (not os.path.isdir(fpath))): yield fname else: yield from _yield_accessible_unix_file_names(path) def _executables_in_windows(path): if not os.path.isdir(path): return extensions = builtins.__xonsh_env__['PATHEXT'] if PYTHON_VERSION_INFO < (3, 5, 0): for fname in os.listdir(path): fpath = os.path.join(path, fname) if (os.path.exists(fpath) and not os.path.isdir(fpath)): base_name, ext = os.path.splitext(fname) if ext.upper() in extensions: yield fname else: for x in scandir(path): if x.is_file(): fname = x.name else: continue base_name, ext = os.path.splitext(fname) if ext.upper() in extensions: yield fname def executables_in(path): """Returns a generator of files in path that the user could execute. """ if ON_WINDOWS: func = _executables_in_windows else: func = _executables_in_posix try: yield from func(path) except PermissionError: return def command_not_found(cmd): """Uses the debian/ubuntu command-not-found utility to suggest packages for a command that cannot currently be found. """ if not ON_LINUX: return '' elif not os.path.isfile('/usr/lib/command-not-found'): # utility is not on PATH return '' c = '/usr/lib/command-not-found {0}; exit 0' s = subprocess.check_output(c.format(cmd), universal_newlines=True, stderr=subprocess.STDOUT, shell=True) s = '\n'.join(s.rstrip().splitlines()).strip() return s def suggest_commands(cmd, env, aliases): """Suggests alternative commands given an environment and aliases.""" if not env.get('SUGGEST_COMMANDS'): return '' thresh = env.get('SUGGEST_THRESHOLD') max_sugg = env.get('SUGGEST_MAX_NUM') if max_sugg < 0: max_sugg = float('inf') cmd = cmd.lower() suggested = {} for alias in builtins.aliases: if alias not in suggested: if levenshtein(alias.lower(), cmd, thresh) < thresh: suggested[alias] = 'Alias' for path in filter(os.path.isdir, env.get('PATH')): for _file in executables_in(path): if (_file not in suggested and levenshtein(_file.lower(), cmd, thresh) < thresh): suggested[_file] = \ 'Command ({0})'.format(os.path.join(path, _file)) suggested = collections.OrderedDict( sorted(suggested.items(), key=lambda x: suggestion_sort_helper(x[0].lower(), cmd))) num = min(len(suggested), max_sugg) if num == 0: rtn = command_not_found(cmd) else: oneof = '' if num == 1 else 'one of ' tips = 'Did you mean {}the following?'.format(oneof) items = list(suggested.popitem(False) for _ in range(num)) length = max(len(key) for key, _ in items) + 2 alternatives = '\n'.join(' {: <{}} {}'.format(key + ":", length, val) for key, val in items) rtn = '{}\n{}'.format(tips, alternatives) c = command_not_found(cmd) rtn += ('\n\n' + c) if len(c) > 0 else '' return rtn def print_exception(msg=None): """Print exceptions with/without traceback.""" env = getattr(builtins, '__xonsh_env__', None) # flags indicating whether the traceback options have been manually set if env is None: env = os_environ manually_set_trace = 'XONSH_SHOW_TRACEBACK' in env manually_set_logfile = 'XONSH_TRACEBACK_LOGFILE' in env else: manually_set_trace = env.is_manually_set('XONSH_SHOW_TRACEBACK') manually_set_logfile = env.is_manually_set('XONSH_TRACEBACK_LOGFILE') if (not manually_set_trace) and (not manually_set_logfile): # Notify about the traceback output possibility if neither of # the two options have been manually set sys.stderr.write('xonsh: For full traceback set: ' '$XONSH_SHOW_TRACEBACK = True\n') # get env option for traceback and convert it if necessary show_trace = env.get('XONSH_SHOW_TRACEBACK', False) if not is_bool(show_trace): show_trace = to_bool(show_trace) # if the trace option has been set, print all traceback info to stderr if show_trace: # notify user about XONSH_TRACEBACK_LOGFILE if it has # not been set manually if not manually_set_logfile: sys.stderr.write('xonsh: To log full traceback to a file set: ' '$XONSH_TRACEBACK_LOGFILE = \n') traceback.print_exc() # additionally, check if a file for traceback logging has been # specified and convert to a proper option if needed log_file = env.get('XONSH_TRACEBACK_LOGFILE', None) log_file = to_logfile_opt(log_file) if log_file: # if log_file <> '' or log_file <> None, append # traceback log there as well with open(os.path.abspath(log_file), 'a') as f: traceback.print_exc(file=f) if not show_trace: # if traceback output is disabled, print the exception's # error message on stderr. display_error_message() if msg: msg = msg if msg.endswith('\n') else msg + '\n' sys.stderr.write(msg) def display_error_message(): """ Prints the error message of the current exception on stderr. """ exc_type, exc_value, exc_traceback = sys.exc_info() exception_only = traceback.format_exception_only(exc_type, exc_value) sys.stderr.write(''.join(exception_only)) def is_writable_file(filepath): """ Checks if a filepath is valid for writing. """ filepath = expand_path(filepath) # convert to absolute path if needed if not os.path.isabs(filepath): filepath = os.path.abspath(filepath) # cannot write to directories if os.path.isdir(filepath): return False # if the file exists and is writable, we're fine if os.path.exists(filepath): return True if os.access(filepath, os.W_OK) else False # if the path doesn't exist, isolate its directory component # and ensure that directory is writable instead return os.access(os.path.dirname(filepath), os.W_OK) # Modified from Public Domain code, by Magnus Lie Hetland # from http://hetland.org/coding/python/levenshtein.py def levenshtein(a, b, max_dist=float('inf')): """Calculates the Levenshtein distance between a and b.""" n, m = len(a), len(b) if abs(n - m) > max_dist: return float('inf') if n > m: # Make sure n <= m, to use O(min(n,m)) space a, b = b, a n, m = m, n current = range(n + 1) for i in range(1, m + 1): previous, current = current, [i] + [0] * n for j in range(1, n + 1): add, delete = previous[j] + 1, current[j - 1] + 1 change = previous[j - 1] if a[j - 1] != b[i - 1]: change = change + 1 current[j] = min(add, delete, change) return current[n] def suggestion_sort_helper(x, y): """Returns a score (lower is better) for x based on how similar it is to y. Used to rank suggestions.""" x = x.lower() y = y.lower() lendiff = len(x) + len(y) inx = len([i for i in x if i not in y]) iny = len([i for i in y if i not in x]) return lendiff + inx + iny def escape_windows_cmd_string(s): """Returns a string that is usable by the Windows cmd.exe. The escaping is based on details here and empirical testing: http://www.robvanderwoude.com/escapechars.php """ for c in '^()%!<>&|"': s = s.replace(c, '^' + c) return s def argvquote(arg, force=False): """ Returns an argument quoted in such a way that that CommandLineToArgvW on Windows will return the argument string unchanged. This is the same thing Popen does when supplied with an list of arguments. Arguments in a command line should be separated by spaces; this function does not add these spaces. This implementation follows the suggestions outlined here: https://blogs.msdn.microsoft.com/twistylittlepassagesallalike/2011/04/23/everyone-quotes-command-line-arguments-the-wrong-way/ """ if not force and len(arg) != 0 and not any([c in arg for c in ' \t\n\v"']): return arg else: n_backslashes = 0 cmdline = '"' for c in arg: if c == "\\": # first count the number of current backslashes n_backslashes += 1 continue if c == '"': # Escape all backslashes and the following double quotation mark cmdline += (n_backslashes * 2 + 1) * '\\' else: # backslashes are not special here cmdline += n_backslashes * '\\' n_backslashes = 0 cmdline += c # Escape all backslashes, but let the terminating # double quotation mark we add below be interpreted # as a metacharacter cmdline += + n_backslashes * 2 * '\\' + '"' return cmdline def on_main_thread(): """Checks if we are on the main thread or not.""" return threading.current_thread() is threading.main_thread() _DEFAULT_SENTINEL = object() @contextlib.contextmanager def swap(namespace, name, value, default=_DEFAULT_SENTINEL): """Swaps a current variable name in a namespace for another value, and then replaces it when the context is exited. """ old = getattr(namespace, name, default) setattr(namespace, name, value) yield value if old is default: delattr(namespace, name) else: setattr(namespace, name, old) @contextlib.contextmanager def swap_values(d, updates, default=_DEFAULT_SENTINEL): """Updates a dictionary (or other mapping) with values from another mapping, and then restores the original mapping when the context is exited. """ old = {k: d.get(k, default) for k in updates} d.update(updates) yield for k, v in old.items(): if v is default and k in d: del d[k] else: d[k] = v # # Validators and converters # def is_int(x): """Tests if something is an integer""" return isinstance(x, int) def is_float(x): """Tests if something is a float""" return isinstance(x, float) def is_string(x): """Tests if something is a string""" return isinstance(x, str) def is_slice(x): """Tests if something is a slice""" return isinstance(x, slice) def is_callable(x): """Tests if something is callable""" return callable(x) def is_string_or_callable(x): """Tests if something is a string or callable""" return is_string(x) or is_callable(x) def is_class(x): """Tests if something is a class""" return isinstance(x, type) def always_true(x): """Returns True""" return True def always_false(x): """Returns False""" return False def ensure_string(x): """Returns a string if x is not a string, and x if it already is.""" return str(x) def is_env_path(x): """This tests if something is an environment path, ie a list of strings.""" return isinstance(x, EnvPath) def str_to_env_path(x): """Converts a string to an environment path, ie a list of strings, splitting on the OS separator. """ # splitting will be done implicitly in EnvPath's __init__ return EnvPath(x) def env_path_to_str(x): """Converts an environment path to a string by joining on the OS separator. """ return os.pathsep.join(x) def is_bool(x): """Tests if something is a boolean.""" return isinstance(x, bool) def is_logfile_opt(x): """ Checks if x is a valid $XONSH_TRACEBACK_LOGFILE option. Returns False if x is not a writable/creatable file or an empty string or None. """ if x is None: return True if not isinstance(x, str): return False else: return (is_writable_file(x) or x == '') def to_logfile_opt(x): """ Converts a $XONSH_TRACEBACK_LOGFILE option to either a str containing the filepath if it is a writable file or None if the filepath is not valid, informing the user on stderr about the invalid choice. """ if is_logfile_opt(x): return x else: # if option is not valid, return a proper # option and inform the user on stderr sys.stderr.write('xonsh: $XONSH_TRACEBACK_LOGFILE must be a ' 'filepath pointing to a file that either exists ' 'and is writable or that can be created.\n') return None def logfile_opt_to_str(x): """ Detypes a $XONSH_TRACEBACK_LOGFILE option. """ if x is None: # None should not be detyped to 'None', as 'None' constitutes # a perfectly valid filename and retyping it would introduce # ambiguity. Detype to the empty string instead. return '' return str(x) _FALSES = LazyObject(lambda: frozenset(['', '0', 'n', 'f', 'no', 'none', 'false']), globals(), '_FALSES') def to_bool(x): """"Converts to a boolean in a semantically meaningful way.""" if isinstance(x, bool): return x elif isinstance(x, str): return False if x.lower() in _FALSES else True else: return bool(x) def to_itself(x): """No conversion, returns itself.""" return x def bool_to_str(x): """Converts a bool to an empty string if False and the string '1' if True. """ return '1' if x else '' _BREAKS = LazyObject(lambda: frozenset(['b', 'break', 's', 'skip', 'q', 'quit']), globals(), '_BREAKS') def to_bool_or_break(x): if isinstance(x, str) and x.lower() in _BREAKS: return 'break' else: return to_bool(x) def is_bool_or_int(x): """Returns whether a value is a boolean or integer.""" return is_bool(x) or is_int(x) def to_bool_or_int(x): """Converts a value to a boolean or an integer.""" if isinstance(x, str): return int(x) if x.isdigit() else to_bool(x) elif is_int(x): # bools are ints too! return x else: return bool(x) def bool_or_int_to_str(x): """Converts a boolean or integer to a string.""" return bool_to_str(x) if is_bool(x) else str(x) @lazyobject def SLICE_REG(): return re.compile(r'(?P(?:-\d)?\d*):(?P(?:-\d)?\d*):?(?P(?:-\d)?\d*)') def ensure_slice(x): """Try to convert an object into a slice, complain on failure""" if not x and x != 0: return slice(None) elif is_slice(x): return x try: x = int(x) if x != -1: s = slice(x, x + 1) else: s = slice(-1, None, None) except ValueError: x = x.strip('[]()') m = SLICE_REG.fullmatch(x) if m: groups = (int(i) if i else None for i in m.groups()) s = slice(*groups) else: raise ValueError('cannot convert {!r} to slice'.format(x)) except TypeError: try: s = slice(*(int(i) for i in x)) except (TypeError, ValueError): raise ValueError('cannot convert {!r} to slice'.format(x)) return s def get_portions(it, slices): """Yield from portions of an iterable. Parameters ---------- it: iterable slices: a slice or a list of slice objects """ if is_slice(slices): slices = [slices] if len(slices) == 1: s = slices[0] try: yield from itertools.islice(it, s.start, s.stop, s.step) return except ValueError: # islice failed pass it = list(it) for s in slices: yield from it[s] def is_slice_as_str(x): """ Test if string x is a slice. If not a string return False. """ try: x = x.strip('[]()') m = SLICE_REG.fullmatch(x) if m: return True except AttributeError: pass return False def is_int_as_str(x): """ Test if string x is an integer. If not a string return False. """ try: return x.isdecimal() except AttributeError: return False def is_string_set(x): """Tests if something is a set of strings""" return (isinstance(x, cabc.Set) and all(isinstance(a, str) for a in x)) def csv_to_set(x): """Convert a comma-separated list of strings to a set of strings.""" if not x: return set() else: return set(x.split(',')) def set_to_csv(x): """Convert a set of strings to a comma-separated list of strings.""" return ','.join(x) def pathsep_to_set(x): """Converts a os.pathsep separated string to a set of strings.""" if not x: return set() else: return set(x.split(os.pathsep)) def set_to_pathsep(x, sort=False): """Converts a set to an os.pathsep separated string. The sort kwarg specifies whether to sort the set prior to str conversion. """ if sort: x = sorted(x) return os.pathsep.join(x) def is_string_seq(x): """Tests if something is a sequence of strings""" return (isinstance(x, cabc.Sequence) and all(isinstance(a, str) for a in x)) def is_nonstring_seq_of_strings(x): """Tests if something is a sequence of strings, where the top-level sequence is not a string itself. """ return (isinstance(x, cabc.Sequence) and not isinstance(x, str) and all(isinstance(a, str) for a in x)) def pathsep_to_seq(x): """Converts a os.pathsep separated string to a sequence of strings.""" if not x: return [] else: return x.split(os.pathsep) def seq_to_pathsep(x): """Converts a sequence to an os.pathsep separated string.""" return os.pathsep.join(x) def pathsep_to_upper_seq(x): """Converts a os.pathsep separated string to a sequence of uppercase strings. """ if not x: return [] else: return x.upper().split(os.pathsep) def seq_to_upper_pathsep(x): """Converts a sequence to an uppercase os.pathsep separated string.""" return os.pathsep.join(x).upper() def is_bool_seq(x): """Tests if an object is a sequence of bools.""" return isinstance(x, cabc.Sequence) and all(isinstance(y, bool) for y in x) def csv_to_bool_seq(x): """Takes a comma-separated string and converts it into a list of bools.""" return [to_bool(y) for y in csv_to_set(x)] def bool_seq_to_csv(x): """Converts a sequence of bools to a comma-separated string.""" return ','.join(map(str, x)) def is_completions_display_value(x): return x in {'none', 'single', 'multi'} def to_completions_display_value(x): x = str(x).lower() if x in {'none', 'false'}: x = 'none' elif x in {'multi', 'true'}: x = 'multi' elif x == 'single': pass else: msg = '"{}" is not a valid value for $COMPLETIONS_DISPLAY. '.format(x) msg += 'Using "multi".' warnings.warn(msg, RuntimeWarning) x = 'multi' return x def setup_win_unicode_console(enable): """"Enables or disables unicode display on windows.""" try: import win_unicode_console except ImportError: win_unicode_console = False enable = to_bool(enable) if ON_WINDOWS and win_unicode_console: if enable: win_unicode_console.enable() else: win_unicode_console.disable() return enable # history validation _min_to_sec = lambda x: 60.0 * float(x) _hour_to_sec = lambda x: 60.0 * _min_to_sec(x) _day_to_sec = lambda x: 24.0 * _hour_to_sec(x) _month_to_sec = lambda x: 30.4375 * _day_to_sec(x) _year_to_sec = lambda x: 365.25 * _day_to_sec(x) _kb_to_b = lambda x: 1024 * int(x) _mb_to_b = lambda x: 1024 * _kb_to_b(x) _gb_to_b = lambda x: 1024 * _mb_to_b(x) _tb_to_b = lambda x: 1024 * _tb_to_b(x) CANON_HISTORY_UNITS = LazyObject( lambda: frozenset(['commands', 'files', 's', 'b']), globals(), 'CANON_HISTORY_UNITS') HISTORY_UNITS = LazyObject(lambda: { '': ('commands', int), 'c': ('commands', int), 'cmd': ('commands', int), 'cmds': ('commands', int), 'command': ('commands', int), 'commands': ('commands', int), 'f': ('files', int), 'files': ('files', int), 's': ('s', float), 'sec': ('s', float), 'second': ('s', float), 'seconds': ('s', float), 'm': ('s', _min_to_sec), 'min': ('s', _min_to_sec), 'mins': ('s', _min_to_sec), 'h': ('s', _hour_to_sec), 'hr': ('s', _hour_to_sec), 'hour': ('s', _hour_to_sec), 'hours': ('s', _hour_to_sec), 'd': ('s', _day_to_sec), 'day': ('s', _day_to_sec), 'days': ('s', _day_to_sec), 'mon': ('s', _month_to_sec), 'month': ('s', _month_to_sec), 'months': ('s', _month_to_sec), 'y': ('s', _year_to_sec), 'yr': ('s', _year_to_sec), 'yrs': ('s', _year_to_sec), 'year': ('s', _year_to_sec), 'years': ('s', _year_to_sec), 'b': ('b', int), 'byte': ('b', int), 'bytes': ('b', int), 'kb': ('b', _kb_to_b), 'kilobyte': ('b', _kb_to_b), 'kilobytes': ('b', _kb_to_b), 'mb': ('b', _mb_to_b), 'meg': ('b', _mb_to_b), 'megs': ('b', _mb_to_b), 'megabyte': ('b', _mb_to_b), 'megabytes': ('b', _mb_to_b), 'gb': ('b', _gb_to_b), 'gig': ('b', _gb_to_b), 'gigs': ('b', _gb_to_b), 'gigabyte': ('b', _gb_to_b), 'gigabytes': ('b', _gb_to_b), 'tb': ('b', _tb_to_b), 'terabyte': ('b', _tb_to_b), 'terabytes': ('b', _tb_to_b), }, globals(), 'HISTORY_UNITS') """Maps lowercase unit names to canonical name and conversion utilities.""" def is_history_tuple(x): """Tests if something is a proper history value, units tuple.""" if (isinstance(x, cabc.Sequence) and len(x) == 2 and isinstance(x[0], (int, float)) and x[1].lower() in CANON_HISTORY_UNITS): return True return False def is_history_backend(x): """Tests if something is a valid history backend.""" return is_string(x) or is_class(x) or isinstance(x, object) def is_dynamic_cwd_width(x): """ Determine if the input is a valid input for the DYNAMIC_CWD_WIDTH environment variable. """ return (isinstance(x, tuple) and len(x) == 2 and isinstance(x[0], float) and x[1] in set('c%')) def to_dynamic_cwd_tuple(x): """Convert to a canonical cwd_width tuple.""" unit = 'c' if isinstance(x, str): if x[-1] == '%': x = x[:-1] unit = '%' else: unit = 'c' return (float(x), unit) else: return (float(x[0]), x[1]) def dynamic_cwd_tuple_to_str(x): """Convert a canonical cwd_width tuple to a string.""" if x[1] == '%': return str(x[0]) + '%' else: return str(x[0]) RE_HISTORY_TUPLE = LazyObject( lambda: re.compile('([-+]?[0-9]*\.?[0-9]+([eE][-+]?[0-9]+)?)\s*([A-Za-z]*)'), globals(), 'RE_HISTORY_TUPLE') def to_history_tuple(x): """Converts to a canonical history tuple.""" if not isinstance(x, (cabc.Sequence, float, int)): raise ValueError('history size must be given as a sequence or number') if isinstance(x, str): m = RE_HISTORY_TUPLE.match(x.strip().lower()) return to_history_tuple((m.group(1), m.group(3))) elif isinstance(x, (float, int)): return to_history_tuple((x, 'commands')) units, converter = HISTORY_UNITS[x[1]] value = converter(x[0]) return (value, units) def history_tuple_to_str(x): """Converts a valid history tuple to a canonical string.""" return '{0} {1}'.format(*x) def format_color(string, **kwargs): """Formats strings that may contain colors. This simply dispatches to the shell instances method of the same name. The results of this function should be directly usable by print_color(). """ return builtins.__xonsh_shell__.shell.format_color(string, **kwargs) def print_color(string, **kwargs): """Prints a string that may contain colors. This dispatched to the shell method of the same name. Colors will be formatted if they have not already been. """ builtins.__xonsh_shell__.shell.print_color(string, **kwargs) def color_style_names(): """Returns an iterable of all available style names.""" return builtins.__xonsh_shell__.shell.color_style_names() def color_style(): """Returns the current color map.""" return builtins.__xonsh_shell__.shell.color_style() def _get_color_indexes(style_map): """ Generates the color and windows color index for a style """ import prompt_toolkit table = prompt_toolkit.terminal.win32_output.ColorLookupTable() pt_style = prompt_toolkit.styles.style_from_dict(style_map) for token in style_map: attr = pt_style.token_to_attrs[token] if attr.color is not None: try: index = table.lookup_color(attr.color, attr.bgcolor) except AttributeError: index = table.lookup_fg_color(attr.color) try: rgb = (int(attr.color[0:2], 16), int(attr.color[2:4], 16), int(attr.color[4:6], 16)) except Exception: rgb = None yield token, index, rgb def intensify_colors_for_cmd_exe(style_map, replace_colors=None, ansi=False): """Returns a modified style to where colors that maps to dark colors are replaced with brighter versions. Also expands the range used by the gray colors """ modified_style = {} stype = builtins.__xonsh_env__.get('SHELL_TYPE') if (not ON_WINDOWS or (stype not in ('prompt_toolkit', 'best')) or (stype == 'best' and not has_prompt_toolkit())): return modified_style if replace_colors is None: if ansi: replace_colors = { 1: '#ansiturquoise', # subst blue with bright cyan 2: '#ansigreen', # subst green with bright green 4: '#ansired', # subst red with bright red 5: '#ansifuchsia', # subst magenta with bright magenta 6: '#ansiyellow', # subst yellow with bright yellow 9: '#ansiteal', # subst intense blue (hard to read) # with dark cyan (which is readable) } else: replace_colors = { 1: '#44ffff', # subst blue with bright cyan 2: '#44ff44', # subst green with bright green 4: '#ff4444', # subst red with bright red 5: '#ff44ff', # subst magenta with bright magenta 6: '#ffff44', # subst yellow with bright yellow 9: '#00aaaa', # subst intense blue (hard to read) # with dark cyan (which is readable) } for token, idx, _ in _get_color_indexes(style_map): if idx in replace_colors: modified_style[token] = replace_colors[idx] return modified_style def expand_gray_colors_for_cmd_exe(style_map): """ Expand the style's gray scale color range. All gray scale colors has a tendency to map to the same default GRAY in cmd.exe. """ modified_style = {} stype = builtins.__xonsh_env__.get('SHELL_TYPE') if (not ON_WINDOWS or (stype not in ('prompt_toolkit', 'best')) or (stype == 'best' and not has_prompt_toolkit())): return modified_style for token, idx, rgb in _get_color_indexes(style_map): if idx == 7 and rgb: if sum(rgb) <= 306: # Equal and below '#666666 is reset to dark gray modified_style[token] = '#444444' elif sum(rgb) >= 408: # Equal and above 0x888888 is reset to white modified_style[token] = '#ffffff' return modified_style def intensify_colors_on_win_setter(enable): """Resets the style when setting the INTENSIFY_COLORS_ON_WIN environment variable. """ enable = to_bool(enable) if hasattr(builtins, '__xonsh_shell__'): if hasattr(builtins.__xonsh_shell__.shell.styler, 'style_name'): delattr(builtins.__xonsh_shell__.shell.styler, 'style_name') return enable def format_std_prepost(template, env=None): """Formats a template prefix/postfix string for a standard buffer. Returns a string suitable for prepending or appending. """ if not template: return '' env = builtins.__xonsh_env__ if env is None else env shell = builtins.__xonsh_shell__.shell try: s = shell.prompt_formatter(template) except Exception: print_exception() # \001\002 is there to fool pygments into not returning an empty string # for potentially empty input. This happens when the template is just a # color code with no visible text. invis = '\001\002' s = shell.format_color(invis + s + invis, force_string=True) s = s.replace(invis, '') return s _RE_STRING_START = "[bBprRuUf]*" _RE_STRING_TRIPLE_DOUBLE = '"""' _RE_STRING_TRIPLE_SINGLE = "'''" _RE_STRING_DOUBLE = '"' _RE_STRING_SINGLE = "'" _STRINGS = (_RE_STRING_TRIPLE_DOUBLE, _RE_STRING_TRIPLE_SINGLE, _RE_STRING_DOUBLE, _RE_STRING_SINGLE) RE_BEGIN_STRING = LazyObject( lambda: re.compile("(" + _RE_STRING_START + '(' + "|".join(_STRINGS) + '))'), globals(), 'RE_BEGIN_STRING') """Regular expression matching the start of a string, including quotes and leading characters (r, b, or u)""" RE_STRING_START = LazyObject(lambda: re.compile(_RE_STRING_START), globals(), 'RE_STRING_START') """Regular expression matching the characters before the quotes when starting a string (r, b, or u, case insensitive)""" RE_STRING_CONT = LazyDict({ '"': lambda: re.compile(r'((\\(.|\n))|([^"\\]))*'), "'": lambda: re.compile(r"((\\(.|\n))|([^'\\]))*"), '"""': lambda: re.compile(r'((\\(.|\n))|([^"\\])|("(?!""))|\n)*'), "'''": lambda: re.compile(r"((\\(.|\n))|([^'\\])|('(?!''))|\n)*"), }, globals(), 'RE_STRING_CONT') """Dictionary mapping starting quote sequences to regular expressions that match the contents of a string beginning with those quotes (not including the terminating quotes)""" @lazyobject def RE_COMPLETE_STRING(): ptrn = ('^' + _RE_STRING_START + '(?P' + "|".join(_STRINGS) + ')' + '.*?(?P=quote)$') return re.compile(ptrn, re.DOTALL) def check_for_partial_string(x): """Returns the starting index (inclusive), ending index (exclusive), and starting quote string of the most recent Python string found in the input. check_for_partial_string(x) -> (startix, endix, quote) Parameters ---------- x : str The string to be checked (representing a line of terminal input) Returns ------- startix : int (or None) The index where the most recent Python string found started (inclusive), or None if no strings exist in the input endix : int (or None) The index where the most recent Python string found ended (exclusive), or None if no strings exist in the input OR if the input ended in the middle of a Python string quote : str (or None) A string containing the quote used to start the string (e.g., b", ", '''), or None if no string was found. """ string_indices = [] starting_quote = [] current_index = 0 match = re.search(RE_BEGIN_STRING, x) while match is not None: # add the start in start = match.start() quote = match.group(0) lenquote = len(quote) current_index += start # store the starting index of the string, as well as the # characters in the starting quotes (e.g., ", ', """, r", etc) string_indices.append(current_index) starting_quote.append(quote) # determine the string that should terminate this string ender = re.sub(RE_STRING_START, '', quote) x = x[start + lenquote:] current_index += lenquote # figure out what is inside the string continuer = RE_STRING_CONT[ender] contents = re.match(continuer, x) inside = contents.group(0) leninside = len(inside) current_index += contents.start() + leninside + len(ender) # if we are not at the end of the input string, add the ending index of # the string to string_indices if contents.end() < len(x): string_indices.append(current_index) x = x[leninside + len(ender):] # find the next match match = re.search(RE_BEGIN_STRING, x) numquotes = len(string_indices) if numquotes == 0: return (None, None, None) elif numquotes % 2: return (string_indices[-1], None, starting_quote[-1]) else: return (string_indices[-2], string_indices[-1], starting_quote[-1]) # regular expressions for matching environment variables # i.e $FOO, ${'FOO'} @lazyobject def POSIX_ENVVAR_REGEX(): pat = r"""\$({(?P['"])|)(?P\w+)((?P=quote)}|(?:\1\b))""" return re.compile(pat) def expandvars(path): """Expand shell variables of the forms $var, ${var} and %var%. Unknown variables are left unchanged.""" env = builtins.__xonsh_env__ if isinstance(path, bytes): path = path.decode(encoding=env.get('XONSH_ENCODING'), errors=env.get('XONSH_ENCODING_ERRORS')) elif isinstance(path, pathlib.Path): # get the path's string representation path = str(path) if '$' in path: for match in POSIX_ENVVAR_REGEX.finditer(path): name = match.group('envvar') if name in env: ensurer = env.get_ensurer(name) value = ensurer.detype(env[name]) path = POSIX_ENVVAR_REGEX.sub(value, path, count=1) return path # # File handling tools # def backup_file(fname): """Moves an existing file to a new name that has the current time right before the extension. """ # lazy imports import shutil from datetime import datetime base, ext = os.path.splitext(fname) timestamp = datetime.now().strftime('%Y-%m-%d-%H-%M-%S-%f') newfname = '%s.%s%s' % (base, timestamp, ext) shutil.move(fname, newfname) def normabspath(p): """Returns as normalized absolute path, namely, normcase(abspath(p))""" return os.path.normcase(os.path.abspath(p)) def expanduser_abs_path(inp): """ Provides user expanded absolute path """ return os.path.abspath(expanduser(inp)) WINDOWS_DRIVE_MATCHER = LazyObject(lambda: re.compile(r'^\w:'), globals(), 'WINDOWS_DRIVE_MATCHER') def expand_case_matching(s): """Expands a string to a case insensitive globable string.""" t = [] openers = {'[', '{'} closers = {']', '}'} nesting = 0 drive_part = WINDOWS_DRIVE_MATCHER.match(s) if ON_WINDOWS else None if drive_part: drive_part = drive_part.group(0) t.append(drive_part) s = s[len(drive_part):] for c in s: if c in openers: nesting += 1 elif c in closers: nesting -= 1 elif nesting > 0: pass elif c.isalpha(): folded = c.casefold() if len(folded) == 1: c = '[{0}{1}]'.format(c.upper(), c.lower()) else: newc = ['[{0}{1}]?'.format(f.upper(), f.lower()) for f in folded[:-1]] newc = ''.join(newc) newc += '[{0}{1}{2}]'.format(folded[-1].upper(), folded[-1].lower(), c) c = newc t.append(c) return ''.join(t) def globpath(s, ignore_case=False, return_empty=False, sort_result=None): """Simple wrapper around glob that also expands home and env vars.""" o, s = _iglobpath(s, ignore_case=ignore_case, sort_result=sort_result) o = list(o) no_match = [] if return_empty else [s] return o if len(o) != 0 else no_match def _iglobpath(s, ignore_case=False, sort_result=None): s = builtins.__xonsh_expand_path__(s) if sort_result is None: sort_result = builtins.__xonsh_env__.get('GLOB_SORTED') if ignore_case: s = expand_case_matching(s) if sys.version_info > (3, 5): if '**' in s and '**/*' not in s: s = s.replace('**', '**/*') # `recursive` is only a 3.5+ kwarg. if sort_result: paths = glob.glob(s, recursive=True) paths.sort() paths = iter(paths) else: paths = glob.iglob(s, recursive=True) return paths, s else: if sort_result: paths = glob.glob(s) paths.sort() paths = iter(paths) else: paths = glob.iglob(s) return paths, s def iglobpath(s, ignore_case=False, sort_result=None): """Simple wrapper around iglob that also expands home and env vars.""" return _iglobpath(s, ignore_case=ignore_case, sort_result=sort_result)[0] def ensure_timestamp(t, datetime_format=None): if isinstance(t, (int, float)): return t try: return float(t) except (ValueError, TypeError): pass if datetime_format is None: datetime_format = builtins.__xonsh_env__['XONSH_DATETIME_FORMAT'] if isinstance(t, datetime.datetime): t = t.timestamp() else: t = datetime.datetime.strptime(t, datetime_format).timestamp() return t def format_datetime(dt): """Format datetime object to string base on $XONSH_DATETIME_FORMAT Env.""" format_ = builtins.__xonsh_env__['XONSH_DATETIME_FORMAT'] return dt.strftime(format_) def columnize(elems, width=80, newline='\n'): """Takes an iterable of strings and returns a list of lines with the elements placed in columns. Each line will be at most *width* columns. The newline character will be appended to the end of each line. """ sizes = [len(e) + 1 for e in elems] nelem = len(elems) ncols = 1 nrows = len(sizes) columns = [sizes] last_longest_row = max(sizes) while True: longest_row = sum(map(max, columns)) if longest_row - 1 <= width: # we might be able to fit another column. ncols += 1 nrows = nelem // ncols columns = [sizes[i*nrows:(i+1)*nrows] for i in range(ncols)] last_longest_row = longest_row else: # we can't fit another column ncols -= 1 nrows = nelem // ncols break pad = (width - last_longest_row + ncols) // ncols pad = pad if pad > 1 else 1 data = [elems[i*nrows:(i+1)*nrows] for i in range(ncols)] colwidths = [max(map(len, d)) + pad for d in data] colwidths[-1] -= pad row_t = ''.join(['{{row[{i}]: <{{w[{i}]}}}}'.format(i=i) for i in range(ncols)]) row_t += newline lines = [row_t.format(row=row, w=colwidths) for row in itertools.zip_longest(*data, fillvalue='')] return lines def unthreadable(f): """Decorator that specifies that a callable alias should be run only on the main thread process. This is often needed for debuggers and profilers. """ f.__xonsh_threadable__ = False return f def uncapturable(f): """Decorator that specifies that a callable alias should not be run with any capturing. This is often needed if the alias call interactive subprocess, like pagers and text editors. """ f.__xonsh_capturable__ = False return f def carriage_return(): """Writes a carriage return to stdout, and nothing else.""" print('\r', flush=True, end='') def deprecated(deprecated_in=None, removed_in=None): """Parametrized decorator that deprecates a function in a graceful manner. Updates the decorated function's docstring to mention the version that deprecation occurred in and the version it will be removed in if both of these values are passed. When removed_in is not a release equal to or less than the current release, call ``warnings.warn`` with details, while raising ``DeprecationWarning``. When removed_in is a release equal to or less than the current release, raise an ``AssertionError``. Parameters ---------- deprecated_in : str The version number that deprecated this function. removed_in : str The version number that this function will be removed in. """ message_suffix = _deprecated_message_suffix(deprecated_in, removed_in) if not message_suffix: message_suffix = '' def decorated(func): warning_message = '{} has been deprecated'.format(func.__name__) warning_message += message_suffix @functools.wraps(func) def wrapped(*args, **kwargs): _deprecated_error_on_expiration(func.__name__, removed_in) func(*args, **kwargs) warnings.warn(warning_message, DeprecationWarning) wrapped.__doc__ = ( '{}\n\n{}'.format(wrapped.__doc__, warning_message) if wrapped.__doc__ else warning_message) return wrapped return decorated def _deprecated_message_suffix(deprecated_in, removed_in): if deprecated_in and removed_in: message_suffix = ( ' in version {} and will be removed in version {}'.format( deprecated_in, removed_in)) elif deprecated_in and not removed_in: message_suffix = ( ' in version {}'.format(deprecated_in)) elif not deprecated_in and removed_in: message_suffix = ( ' and will be removed in version {}'.format(removed_in)) else: message_suffix = None return message_suffix def _deprecated_error_on_expiration(name, removed_in): if not removed_in: return elif LooseVersion(__version__) >= LooseVersion(removed_in): raise AssertionError( '{} has passed its version {} expiry date!'.format( name, removed_in)) xonsh-0.6.0/xonsh/tracer.py000066400000000000000000000171111320541242300156420ustar00rootroot00000000000000"""Implements a xonsh tracer.""" import os import re import sys import inspect import argparse import linecache import importlib import functools from xonsh.lazyasd import LazyObject from xonsh.platform import HAS_PYGMENTS from xonsh.tools import DefaultNotGiven, print_color, normabspath, to_bool from xonsh.inspectors import find_file, getouterframes from xonsh.lazyimps import pygments, pyghooks from xonsh.proc import STDOUT_CAPTURE_KINDS import xonsh.prompt.cwd as prompt terminal = LazyObject(lambda: importlib.import_module( 'pygments.formatters.terminal'), globals(), 'terminal') class TracerType(object): """Represents a xonsh tracer object, which keeps track of all tracing state. This is a singleton. """ _inst = None valid_events = frozenset(['line', 'call']) def __new__(cls, *args, **kwargs): if cls._inst is None: cls._inst = super(TracerType, cls).__new__(cls, *args, **kwargs) return cls._inst def __init__(self): self.prev_tracer = DefaultNotGiven self.files = set() self.usecolor = True self.lexer = pyghooks.XonshLexer() self.formatter = terminal.TerminalFormatter() self._last = ('', -1) # filename, lineno tuple def __del__(self): for f in set(self.files): self.stop(f) def color_output(self, usecolor): """Specify whether or not the tracer output should be colored.""" # we have to use a function to set usecolor because of the way that # lazyasd works. Namely, it cannot dispatch setattr to the target # object without being unable to access its own __dict__. This makes # setting an attr look like getting a function. self.usecolor = usecolor def start(self, filename): """Starts tracing a file.""" files = self.files if len(files) == 0: self.prev_tracer = sys.gettrace() files.add(normabspath(filename)) sys.settrace(self.trace) curr = inspect.currentframe() for frame, fname, *_ in getouterframes(curr, context=0): if normabspath(fname) in files: frame.f_trace = self.trace def stop(self, filename): """Stops tracing a file.""" filename = normabspath(filename) self.files.discard(filename) if len(self.files) == 0: sys.settrace(self.prev_tracer) curr = inspect.currentframe() for frame, fname, *_ in getouterframes(curr, context=0): if normabspath(fname) == filename: frame.f_trace = self.prev_tracer self.prev_tracer = DefaultNotGiven def trace(self, frame, event, arg): """Implements a line tracing function.""" if event not in self.valid_events: return self.trace fname = find_file(frame) if fname in self.files: lineno = frame.f_lineno curr = (fname, lineno) if curr != self._last: line = linecache.getline(fname, lineno).rstrip() s = tracer_format_line(fname, lineno, line, color=self.usecolor, lexer=self.lexer, formatter=self.formatter) print_color(s) self._last = curr return self.trace tracer = LazyObject(TracerType, globals(), 'tracer') COLORLESS_LINE = '{fname}:{lineno}:{line}' COLOR_LINE = ('{{PURPLE}}{fname}{{BLUE}}:' '{{GREEN}}{lineno}{{BLUE}}:' '{{NO_COLOR}}') def tracer_format_line(fname, lineno, line, color=True, lexer=None, formatter=None): """Formats a trace line suitable for printing.""" fname = min(fname, prompt._replace_home(fname), os.path.relpath(fname), key=len) if not color: return COLORLESS_LINE.format(fname=fname, lineno=lineno, line=line) cline = COLOR_LINE.format(fname=fname, lineno=lineno) if not HAS_PYGMENTS: return cline + line # OK, so we have pygments tokens = pyghooks.partial_color_tokenize(cline) lexer = lexer or pyghooks.XonshLexer() tokens += pygments.lex(line, lexer=lexer) if tokens[-1][1] == '\n': del tokens[-1] elif tokens[-1][1].endswith('\n'): tokens[-1] = (tokens[-1][0], tokens[-1][1].rstrip()) return tokens # # Command line interface # def _find_caller(args): """Somewhat hacky method of finding the __file__ based on the line executed.""" re_line = re.compile(r'[^;\s|&<>]+\s+' + r'\s+'.join(args)) curr = inspect.currentframe() for _, fname, lineno, _, lines, _ in getouterframes(curr, context=1)[3:]: if lines is not None and re_line.search(lines[0]) is not None: return fname elif lineno == 1 and re_line.search(linecache.getline(fname, lineno)) is not None: # There is a bug in CPython such that getouterframes(curr, context=1) # will actually return the 2nd line in the code_context field, even though # line number is itself correct. We manually fix that in this branch. return fname else: msg = ('xonsh: warning: __file__ name could not be found. You may be ' 'trying to trace interactively. Please pass in the file names ' 'you want to trace explicitly.') print(msg, file=sys.stderr) def _on(ns, args): """Turns on tracing for files.""" for f in ns.files: if f == '__file__': f = _find_caller(args) if f is None: continue tracer.start(f) def _off(ns, args): """Turns off tracing for files.""" for f in ns.files: if f == '__file__': f = _find_caller(args) if f is None: continue tracer.stop(f) def _color(ns, args): """Manages color action for tracer CLI.""" tracer.color_output(ns.toggle) @functools.lru_cache(1) def _tracer_create_parser(): """Creates tracer argument parser""" p = argparse.ArgumentParser(prog='trace', description='tool for tracing xonsh code as it runs.') subp = p.add_subparsers(title='action', dest='action') onp = subp.add_parser('on', aliases=['start', 'add'], help='begins tracing selected files.') onp.add_argument('files', nargs='*', default=['__file__'], help=('file paths to watch, use "__file__" (default) to select ' 'the current file.')) off = subp.add_parser('off', aliases=['stop', 'del', 'rm'], help='removes selected files fom tracing.') off.add_argument('files', nargs='*', default=['__file__'], help=('file paths to stop watching, use "__file__" (default) to ' 'select the current file.')) col = subp.add_parser('color', help='output color management for tracer.') col.add_argument('toggle', type=to_bool, help='true/false, y/n, etc. to toggle color usage.') return p _TRACER_MAIN_ACTIONS = { 'on': _on, 'add': _on, 'start': _on, 'rm': _off, 'off': _off, 'del': _off, 'stop': _off, 'color': _color, } def tracermain(args=None, stdin=None, stdout=None, stderr=None, spec=None): """Main function for tracer command-line interface.""" parser = _tracer_create_parser() ns = parser.parse_args(args) usecolor = ((spec.captured not in STDOUT_CAPTURE_KINDS) and sys.stdout.isatty()) tracer.color_output(usecolor) return _TRACER_MAIN_ACTIONS[ns.action](ns, args) xonsh-0.6.0/xonsh/winutils.py000066400000000000000000000350261320541242300162450ustar00rootroot00000000000000""" This file is based on the code from https://github.com/JustAMan/pyWinClobber/blob/master/win32elevate.py Copyright (c) 2013 by JustAMan at GitHub Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ import os import ctypes import subprocess from ctypes import c_ulong, c_char_p, c_int, c_void_p, POINTER, byref from ctypes.wintypes import (HANDLE, BOOL, DWORD, HWND, HINSTANCE, HKEY, LPDWORD, SHORT, LPCWSTR, WORD, SMALL_RECT, LPCSTR) from xonsh.lazyasd import lazyobject from xonsh import lazyimps # we aren't amalgamated in this module. from xonsh import platform __all__ = ('sudo', ) @lazyobject def CloseHandle(): ch = ctypes.windll.kernel32.CloseHandle ch.argtypes = (HANDLE,) ch.restype = BOOL return ch @lazyobject def GetActiveWindow(): gaw = ctypes.windll.user32.GetActiveWindow gaw.argtypes = () gaw.restype = HANDLE return gaw TOKEN_READ = 0x20008 class ShellExecuteInfo(ctypes.Structure): _fields_ = [ ('cbSize', DWORD), ('fMask', c_ulong), ('hwnd', HWND), ('lpVerb', c_char_p), ('lpFile', c_char_p), ('lpParameters', c_char_p), ('lpDirectory', c_char_p), ('nShow', c_int), ('hInstApp', HINSTANCE), ('lpIDList', c_void_p), ('lpClass', c_char_p), ('hKeyClass', HKEY), ('dwHotKey', DWORD), ('hIcon', HANDLE), ('hProcess', HANDLE) ] def __init__(self, **kw): ctypes.Structure.__init__(self) self.cbSize = ctypes.sizeof(self) for field_name, field_value in kw.items(): setattr(self, field_name, field_value) @lazyobject def ShellExecuteEx(): see = ctypes.windll.Shell32.ShellExecuteExA PShellExecuteInfo = ctypes.POINTER(ShellExecuteInfo) see.argtypes = (PShellExecuteInfo, ) see.restype = BOOL return see @lazyobject def WaitForSingleObject(): wfso = ctypes.windll.kernel32.WaitForSingleObject wfso.argtypes = (HANDLE, DWORD) wfso.restype = DWORD return wfso # SW_HIDE = 0 SW_SHOW = 5 SEE_MASK_NOCLOSEPROCESS = 0x00000040 SEE_MASK_NO_CONSOLE = 0x00008000 INFINITE = -1 def wait_and_close_handle(process_handle): """ Waits till spawned process finishes and closes the handle for it Parameters ---------- process_handle : HANDLE The Windows handle for the process """ WaitForSingleObject(process_handle, INFINITE) CloseHandle(process_handle) def sudo(executable, args=None): """ This will re-run current Python script requesting to elevate administrative rights. Parameters ---------- param executable : str The path/name of the executable args : list of str The arguments to be passed to the executable """ if not args: args = [] execute_info = ShellExecuteInfo( fMask=SEE_MASK_NOCLOSEPROCESS | SEE_MASK_NO_CONSOLE, hwnd=GetActiveWindow(), lpVerb=b'runas', lpFile=executable.encode('utf-8'), lpParameters=subprocess.list2cmdline(args).encode('utf-8'), lpDirectory=None, nShow=SW_SHOW ) if not ShellExecuteEx(byref(execute_info)): raise ctypes.WinError() wait_and_close_handle(execute_info.hProcess) # # The following has been refactored from # http://stackoverflow.com/a/37505496/2312428 # # input flags ENABLE_PROCESSED_INPUT = 0x0001 ENABLE_LINE_INPUT = 0x0002 ENABLE_ECHO_INPUT = 0x0004 ENABLE_WINDOW_INPUT = 0x0008 ENABLE_MOUSE_INPUT = 0x0010 ENABLE_INSERT_MODE = 0x0020 ENABLE_QUICK_EDIT_MODE = 0x0040 # output flags ENABLE_PROCESSED_OUTPUT = 0x0001 ENABLE_WRAP_AT_EOL_OUTPUT = 0x0002 ENABLE_VIRTUAL_TERMINAL_PROCESSING = 0x0004 # VT100 (Win 10) def check_zero(result, func, args): if not result: err = ctypes.get_last_error() if err: raise ctypes.WinError(err) return args @lazyobject def GetStdHandle(): return lazyimps._winapi.GetStdHandle @lazyobject def STDHANDLES(): """Tuple of the Windows handles for (stdin, stdout, stderr).""" hs = [lazyimps._winapi.STD_INPUT_HANDLE, lazyimps._winapi.STD_OUTPUT_HANDLE, lazyimps._winapi.STD_ERROR_HANDLE] hcons = [] for h in hs: hcon = GetStdHandle(int(h)) hcons.append(hcon) return tuple(hcons) @lazyobject def GetConsoleMode(): gcm = ctypes.windll.kernel32.GetConsoleMode gcm.errcheck = check_zero gcm.argtypes = (HANDLE, # _In_ hConsoleHandle LPDWORD) # _Out_ lpMode return gcm def get_console_mode(fd=1): """Get the mode of the active console input, output, or error buffer. Note that if the process isn't attached to a console, this function raises an EBADF IOError. Parameters ---------- fd : int Standard buffer file descriptor, 0 for stdin, 1 for stdout (default), and 2 for stderr """ mode = DWORD() hcon = STDHANDLES[fd] GetConsoleMode(hcon, byref(mode)) return mode.value @lazyobject def SetConsoleMode(): scm = ctypes.windll.kernel32.SetConsoleMode scm.errcheck = check_zero scm.argtypes = (HANDLE, # _In_ hConsoleHandle DWORD) # _Out_ lpMode return scm def set_console_mode(mode, fd=1): """Set the mode of the active console input, output, or error buffer. Note that if the process isn't attached to a console, this function raises an EBADF IOError. Parameters ---------- mode : int Mode flags to set on the handle. fd : int, optional Standard buffer file descriptor, 0 for stdin, 1 for stdout (default), and 2 for stderr """ hcon = STDHANDLES[fd] SetConsoleMode(hcon, mode) def enable_virtual_terminal_processing(): """Enables virtual terminal processing on Windows. This includes ANSI escape sequence interpretation. See http://stackoverflow.com/a/36760881/2312428 """ SetConsoleMode(GetStdHandle(-11), 7) @lazyobject def COORD(): if platform.has_prompt_toolkit(): # turns out that PTK has a separate ctype wrapper # for this struct and also wraps similar function calls # we need to use the same struct to prevent clashes. import prompt_toolkit.win32_types return prompt_toolkit.win32_types.COORD class _COORD(ctypes.Structure): """Struct from the winapi, representing coordinates in the console. Attributes ---------- X : int Column position Y : int Row position """ _fields_ = [("X", SHORT), ("Y", SHORT)] return _COORD @lazyobject def ReadConsoleOutputCharacterA(): rcoc = ctypes.windll.kernel32.ReadConsoleOutputCharacterA rcoc.errcheck = check_zero rcoc.argtypes = (HANDLE, # _In_ hConsoleOutput LPCSTR, # _Out_ LPTSTR lpMode DWORD, # _In_ nLength COORD, # _In_ dwReadCoord, LPDWORD) # _Out_ lpNumberOfCharsRead rcoc.restype = BOOL return rcoc @lazyobject def ReadConsoleOutputCharacterW(): rcoc = ctypes.windll.kernel32.ReadConsoleOutputCharacterW rcoc.errcheck = check_zero rcoc.argtypes = (HANDLE, # _In_ hConsoleOutput LPCWSTR, # _Out_ LPTSTR lpMode DWORD, # _In_ nLength COORD, # _In_ dwReadCoord, LPDWORD) # _Out_ lpNumberOfCharsRead rcoc.restype = BOOL return rcoc def read_console_output_character(x=0, y=0, fd=1, buf=None, bufsize=1024, raw=False): """Reads characters from the console buffer. Parameters ---------- x : int, optional Starting column. y : int, optional Starting row. fd : int, optional Standard buffer file descriptor, 0 for stdin, 1 for stdout (default), and 2 for stderr. buf : ctypes.c_wchar_p if raw else ctypes.c_wchar_p, optional An existing buffer to (re-)use. bufsize : int, optional The maximum read size. raw : bool, optional Whether to read in and return as bytes (True) or as a unicode string (False, default). Returns ------- value : str Result of what was read, may be shorter than bufsize. """ hcon = STDHANDLES[fd] if buf is None: if raw: buf = ctypes.c_char_p(b" " * bufsize) else: buf = ctypes.c_wchar_p(" " * bufsize) coord = COORD(x, y) n = DWORD() if raw: ReadConsoleOutputCharacterA(hcon, buf, bufsize, coord, byref(n)) else: ReadConsoleOutputCharacterW(hcon, buf, bufsize, coord, byref(n)) return buf.value[:n.value] def pread_console(fd, buffersize, offset, buf=None): """This is a console-based implementation of os.pread() for windows. that uses read_console_output_character(). """ cols, rows = os.get_terminal_size(fd=fd) x = offset % cols y = offset // cols return read_console_output_character(x=x, y=y, fd=fd, buf=buf, bufsize=buffersize, raw=True) # # The following piece has been forked from colorama.win32 # Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file. # @lazyobject def CONSOLE_SCREEN_BUFFER_INFO(): if platform.has_prompt_toolkit(): # turns out that PTK has a separate ctype wrapper # for this struct and also wraps kernel32.GetConsoleScreenBufferInfo # we need to use the same struct to prevent clashes. import prompt_toolkit.win32_types return prompt_toolkit.win32_types.CONSOLE_SCREEN_BUFFER_INFO # Otherwise we should wrap it ourselves COORD() # force COORD to load class _CONSOLE_SCREEN_BUFFER_INFO(ctypes.Structure): """Struct from in wincon.h. See Windows API docs for more details. Attributes ---------- dwSize : COORD Size of dwCursorPosition : COORD Current cursor location. wAttributes : WORD Flags for screen buffer. srWindow : SMALL_RECT Actual size of screen dwMaximumWindowSize : COORD Maximum window scrollback size. """ _fields_ = [ ("dwSize", COORD), ("dwCursorPosition", COORD), ("wAttributes", WORD), ("srWindow", SMALL_RECT), ("dwMaximumWindowSize", COORD), ] return _CONSOLE_SCREEN_BUFFER_INFO @lazyobject def GetConsoleScreenBufferInfo(): """Returns the windows version of the get screen buffer.""" gcsbi = ctypes.windll.kernel32.GetConsoleScreenBufferInfo gcsbi.errcheck = check_zero gcsbi.argtypes = ( HANDLE, POINTER(CONSOLE_SCREEN_BUFFER_INFO), ) gcsbi.restype = BOOL return gcsbi def get_console_screen_buffer_info(fd=1): """Returns an screen buffer info object for the relevant stdbuf. Parameters ---------- fd : int, optional Standard buffer file descriptor, 0 for stdin, 1 for stdout (default), and 2 for stderr. Returns ------- csbi : CONSOLE_SCREEN_BUFFER_INFO Information about the console screen buffer. """ hcon = STDHANDLES[fd] csbi = CONSOLE_SCREEN_BUFFER_INFO() GetConsoleScreenBufferInfo(hcon, byref(csbi)) return csbi # # end colorama forked section # def get_cursor_position(fd=1): """Gets the current cursor position as an (x, y) tuple.""" csbi = get_console_screen_buffer_info(fd=fd) coord = csbi.dwCursorPosition return (coord.X, coord.Y) def get_cursor_offset(fd=1): """Gets the current cursor position as a total offset value.""" csbi = get_console_screen_buffer_info(fd=fd) pos = csbi.dwCursorPosition size = csbi.dwSize return (pos.Y * size.X) + pos.X def get_position_size(fd=1): """Gets the current cursor position and screen size tuple: (x, y, columns, lines). """ info = get_console_screen_buffer_info(fd) return (info.dwCursorPosition.X, info.dwCursorPosition.Y, info.dwSize.X, info.dwSize.Y) @lazyobject def SetConsoleScreenBufferSize(): """Set screen buffer dimensions.""" scsbs = ctypes.windll.kernel32.SetConsoleScreenBufferSize scsbs.errcheck = check_zero scsbs.argtypes = ( HANDLE, # _In_ HANDLE hConsoleOutput COORD, # _In_ COORD dwSize ) scsbs.restype = BOOL return scsbs def set_console_screen_buffer_size(x, y, fd=1): """Sets the console size for a standard buffer. Parameters ---------- x : int Number of columns. y : int Number of rows. fd : int, optional Standard buffer file descriptor, 0 for stdin, 1 for stdout (default), and 2 for stderr. """ coord = COORD() coord.X = x coord.Y = y hcon = STDHANDLES[fd] rtn = SetConsoleScreenBufferSize(hcon, coord) return rtn @lazyobject def SetConsoleCursorPosition(): """Set cursor position in console.""" sccp = ctypes.windll.kernel32.SetConsoleCursorPosition sccp.errcheck = check_zero sccp.argtypes = ( HANDLE, # _In_ HANDLE hConsoleOutput COORD, # _In_ COORD dwCursorPosition ) sccp.restype = BOOL return sccp def set_console_cursor_position(x, y, fd=1): """Sets the console cursor position for a standard buffer. Parameters ---------- x : int Number of columns. y : int Number of rows. fd : int, optional Standard buffer file descriptor, 0 for stdin, 1 for stdout (default), and 2 for stderr. """ coord = COORD() coord.X = x coord.Y = y hcon = STDHANDLES[fd] rtn = SetConsoleCursorPosition(hcon, coord) return rtn xonsh-0.6.0/xonsh/wizard.py000066400000000000000000000527471320541242300157000ustar00rootroot00000000000000"""Tools for creating command-line and web-based wizards from a tree of nodes. """ import os import ast import json import pprint import builtins import textwrap from xonsh.tools import to_bool, to_bool_or_break, backup_file, print_color from xonsh.jsonutils import serialize_xonsh_json # # Nodes themselves # class Node(object): """Base type of all nodes.""" attrs = () def __str__(self): return PrettyFormatter(self).visit() def __repr__(self): return str(self).replace('\n', '') class Wizard(Node): """Top-level node in the tree.""" attrs = ('children', 'path') def __init__(self, children, path=None): self.children = children self.path = path class Pass(Node): """Simple do-nothing node""" class Message(Node): """Contains a simple message to report to the user.""" attrs = ('message') def __init__(self, message): self.message = message class Question(Node): """Asks a question and then chooses the next node based on the response. """ attrs = ('question', 'responses', 'converter', 'path') def __init__(self, question, responses, converter=None, path=None): """ Parameters ---------- question : str The question itself. responses : dict with str keys and Node values Mapping from user-input responses to nodes. converter : callable, optional Converts the string the user typed into another object that serves as a key to the responses dict. path : str or sequence of str, optional A path within the storage object. """ self.question = question self.responses = responses self.converter = converter self.path = path class Input(Node): """Gets input from the user.""" attrs = ('prompt', 'converter', 'show_conversion', 'confirm', 'path') def __init__(self, prompt='>>> ', converter=None, show_conversion=False, confirm=False, retry=False, path=None): """ Parameters ---------- prompt : str, optional Prompt string prior to input converter : callable, optional Converts the string the user typed into another object prior to storage. show_conversion : bool, optional Flag for whether or not to show the results of the conversion function if the conversion function was meaningfully executed. Default False. confirm : bool, optional Whether the input should be confirmed until true or broken, default False. retry : bool, optional In the event that the conversion operation fails, should users be re-prompted until they provide valid input. Default False. path : str or sequence of str, optional A path within the storage object. """ self.prompt = prompt self.converter = converter self.show_conversion = show_conversion self.confirm = confirm self.retry = retry self.path = path class While(Node): """Computes a body while a condition function evaluates to true. The condition function has the form ``cond(visitor=None, node=None)`` and must return an object that responds to the Python magic method ``__bool__``. The beg attribute specifies the number to start the loop iteration at. """ attrs = ('cond', 'body', 'idxname', 'beg', 'path') def __init__(self, cond, body, idxname='idx', beg=0, path=None): """ Parameters ---------- cond : callable Function that determines if the next loop iteration should be executed. body : sequence of nodes A list of node to execute on each iteration. The condition function has the form ``cond(visitor=None, node=None)`` and must return an object that responds to the Python magic method ``__bool__``. idxname : str, optional The variable name for the index. beg : int, optional The first index value when evaluating path format strings. path : str or sequence of str, optional A path within the storage object. """ self.cond = cond self.body = body self.idxname = idxname self.beg = beg self.path = path # # Helper nodes # class YesNo(Question): """Represents a simple yes/no question.""" def __init__(self, question, yes, no, path=None): """ Parameters ---------- question : str The question itself. yes : Node Node to execute if the response is True. no : Node Node to execute if the response is False. path : str or sequence of str, optional A path within the storage object. """ responses = {True: yes, False: no} super().__init__(question, responses, converter=to_bool, path=path) class TrueFalse(Input): """Input node the returns a True or False value.""" def __init__(self, prompt='yes or no [default: no]? ', path=None): super().__init__(prompt=prompt, converter=to_bool, show_conversion=False, confirm=False, path=path) class TrueFalseBreak(Input): """Input node the returns a True, False, or 'break' value.""" def __init__(self, prompt='yes, no, or break [default: no]? ', path=None): super().__init__(prompt=prompt, converter=to_bool_or_break, show_conversion=False, confirm=False, path=path) class StoreNonEmpty(Input): """Stores the user input only if the input was not an empty string. This works by wrapping the converter function. """ def __init__(self, prompt='>>> ', converter=None, show_conversion=False, confirm=False, retry=False, path=None, store_raw=False): def nonempty_converter(x): """Converts non-empty values and converts empty inputs to Unstorable. """ if len(x) == 0: x = Unstorable elif converter is None: pass elif store_raw: converter(x) # make sure str is valid, even if storing raw else: x = converter(x) return x super().__init__(prompt=prompt, converter=nonempty_converter, show_conversion=show_conversion, confirm=confirm, path=path, retry=retry) class StateFile(Input): """Node for representing the state as a JSON file under a default or user given file name. This node type is likely not useful on its own. """ attrs = ('default_file', 'check', 'ask_filename') def __init__(self, default_file=None, check=True, ask_filename=True): """ Parameters ---------- default_file : str, optional The default filename to save the file as. check : bool, optional Whether to print the current state and ask if it should be saved/loaded prior to asking for the file name and saving the file, default=True. ask_filename : bool, optional Whether to ask for the filename (if ``False``, always use the default filename) """ self._df = None super().__init__(prompt='filename: ', converter=None, confirm=False, path=None) self.ask_filename = ask_filename self.default_file = default_file self.check = check @property def default_file(self): return self._df @default_file.setter def default_file(self, val): self._df = val if val is None: self.prompt = 'filename: ' else: self.prompt = 'filename [default={0!r}]: '.format(val) class Save(StateFile): """Node for saving the state as a JSON file under a default or user given file name. """ class Load(StateFile): """Node for loading the state as a JSON file under a default or user given file name. """ def create_truefalse_cond(prompt='yes or no [default: no]? ', path=None): """This creates a basic condition function for use with nodes like While or other conditions. The condition function creates and visits a TrueFalse node and returns the result. This TrueFalse node takes the prompt and path that is passed in here. """ def truefalse_cond(visitor, node=None): """Prompts the user for a true/false condition.""" tf = TrueFalse(prompt=prompt, path=path) rtn = visitor.visit(tf) return rtn return truefalse_cond # # Tools for trees of nodes. # def _lowername(cls): return cls.__name__.lower() class Visitor(object): """Super-class for all classes that should walk over a tree of nodes. This implements the visit() method. """ def __init__(self, tree=None): self.tree = tree def visit(self, node=None): """Walks over a node. If no node is provided, the tree is used.""" if node is None: node = self.tree if node is None: raise RuntimeError('no node or tree given!') for clsname in map(_lowername, type.mro(node.__class__)): meth = getattr(self, 'visit_' + clsname, None) if callable(meth): rtn = meth(node) break else: msg = 'could not find valid visitor method for {0} on {1}' nodename = node.__class__.__name__ selfname = self.__class__.__name__ raise AttributeError(msg.format(nodename, selfname)) return rtn class PrettyFormatter(Visitor): """Formats a tree of nodes into a pretty string""" def __init__(self, tree=None, indent=' '): super().__init__(tree=tree) self.level = 0 self.indent = indent def visit_node(self, node): s = node.__class__.__name__ + '(' if len(node.attrs) == 0: return s + ')' s += '\n' self.level += 1 t = [] for aname in node.attrs: a = getattr(node, aname) t.append(self.visit(a) if isinstance(a, Node) else pprint.pformat(a)) t = ['{0}={1}'.format(n, x) for n, x in zip(node.attrs, t)] s += textwrap.indent(',\n'.join(t), self.indent) self.level -= 1 s += '\n)' return s def visit_wizard(self, node): s = 'Wizard(children=[' if len(node.children) == 0: if node.path is None: return s + '])' else: return s + '], path={0!r})'.format(node.path) s += '\n' self.level += 1 s += textwrap.indent(',\n'.join(map(self.visit, node.children)), self.indent) self.level -= 1 if node.path is None: s += '\n])' else: s += '{0}],\n{0}path={1!r}\n)'.format(self.indent, node.path) return s def visit_message(self, node): return 'Message({0!r})'.format(node.message) def visit_question(self, node): s = node.__class__.__name__ + '(\n' self.level += 1 s += self.indent + 'question={0!r},\n'.format(node.question) s += self.indent + 'responses={' if len(node.responses) == 0: s += '}' else: s += '\n' t = sorted(node.responses.items()) t = ['{0!r}: {1}'.format(k, self.visit(v)) for k, v in t] s += textwrap.indent(',\n'.join(t), 2*self.indent) s += '\n' + self.indent + '}' if node.converter is not None: s += ',\n' + self.indent + 'converter={0!r}'.format(node.converter) if node.path is not None: s += ',\n' + self.indent + 'path={0!r}'.format(node.path) self.level -= 1 s += '\n)' return s def visit_input(self, node): s = '{0}(prompt={1!r}'.format(node.__class__.__name__, node.prompt) if node.converter is None and node.path is None: return s + '\n)' if node.converter is not None: s += ',\n' + self.indent + 'converter={0!r}'.format(node.converter) s += ',\n' + self.indent + 'show_conversion={0!r}'.format(node.show_conversion) s += ',\n' + self.indent + 'confirm={0!r}'.format(node.confirm) s += ',\n' + self.indent + 'retry={0!r}'.format(node.retry) if node.path is not None: s += ',\n' + self.indent + 'path={0!r}'.format(node.path) s += '\n)' return s def visit_statefile(self, node): s = '{0}(default_file={1!r}, check={2}, ask_filename={3})' s = s.format(node.__class__.__name__, node.default_file, node.check, node.ask_filename) return s def visit_while(self, node): s = '{0}(cond={1!r}'.format(node.__class__.__name__, node.cond) s += ',\n' + self.indent + 'body=[' if len(node.body) > 0: s += '\n' self.level += 1 s += textwrap.indent(',\n'.join(map(self.visit, node.body)), self.indent) self.level -= 1 s += '\n' + self.indent s += ']' s += ',\n' + self.indent + 'idxname={0!r}'.format(node.idxname) s += ',\n' + self.indent + 'beg={0!r}'.format(node.beg) if node.path is not None: s += ',\n' + self.indent + 'path={0!r}'.format(node.path) s += '\n)' return s def ensure_str_or_int(x): """Creates a string or int.""" if isinstance(x, int): return x x = x if isinstance(x, str) else str(x) try: x = ast.literal_eval(x) except (ValueError, SyntaxError): pass if not isinstance(x, (int, str)): msg = '{0!r} could not be converted to int or str'.format(x) raise ValueError(msg) return x def canon_path(path, indices=None): """Returns the canonical form of a path, which is a tuple of str or ints. Indices may be optionally passed in. """ if not isinstance(path, str): return tuple(map(ensure_str_or_int, path)) if indices is not None: path = path.format(**indices) path = path[1:] if path.startswith('/') else path path = path[:-1] if path.endswith('/') else path if len(path) == 0: return () return tuple(map(ensure_str_or_int, path.split('/'))) class UnstorableType(object): """Represents an unstorable return value for when no input was given or such input was skipped. Typically represented by the Unstorable singleton. """ _inst = None def __new__(cls, *args, **kwargs): if cls._inst is None: cls._inst = super(UnstorableType, cls).__new__(cls, *args, **kwargs) return cls._inst Unstorable = UnstorableType() class StateVisitor(Visitor): """This class visits the nodes and stores the results in a top-level dict of data according to the state path of the node. The the node does not have a path or the path does not exist, the storage is skipped. This class can be optionally initialized with an existing state. """ def __init__(self, tree=None, state=None, indices=None): super().__init__(tree=tree) self.state = {} if state is None else state self.indices = {} if indices is None else indices def visit(self, node=None): if node is None: node = self.tree if node is None: raise RuntimeError('no node or tree given!') rtn = super().visit(node) path = getattr(node, 'path', None) if callable(path): path = path(visitor=self, node=node, val=rtn) if path is not None and rtn is not Unstorable: self.store(path, rtn, indices=self.indices) return rtn def store(self, path, val, indices=None): """Stores a value at the path location.""" path = canon_path(path, indices=indices) loc = self.state for p, n in zip(path[:-1], path[1:]): if isinstance(p, str) and p not in loc: loc[p] = {} if isinstance(n, str) else [] elif isinstance(p, int) and abs(p) + (p >= 0) > len(loc): i = abs(p) + (p >= 0) - len(loc) if isinstance(n, str): ex = [{} for _ in range(i)] else: ex = [[] for _ in range(i)] loc.extend(ex) loc = loc[p] p = path[-1] if isinstance(p, int) and abs(p) + (p >= 0) > len(loc): i = abs(p) + (p >= 0) - len(loc) ex = [None]*i loc.extend(ex) loc[p] = val YN = "{GREEN}yes{NO_COLOR} or {RED}no{NO_COLOR} [default: no]? " YNB = ('{GREEN}yes{NO_COLOR}, {RED}no{NO_COLOR}, or ' '{YELLOW}break{NO_COLOR} [default: no]? ') class PromptVisitor(StateVisitor): """Visits the nodes in the tree via the a command-line prompt.""" def __init__(self, tree=None, state=None, **kwargs): """ Parameters ---------- tree : Node, optional Tree of nodes to start visitor with. state : dict, optional Initial state to begin with. kwargs : optional Options that are passed through to the prompt via the shell's singleline() method. See BaseShell for mor details. """ super().__init__(tree=tree, state=state) self.env = builtins.__xonsh_env__ self.shell = builtins.__xonsh_shell__.shell self.shell_kwargs = kwargs def visit_wizard(self, node): for child in node.children: self.visit(child) def visit_pass(self, node): pass def visit_message(self, node): print_color(node.message) def visit_question(self, node): self.env['PROMPT'] = node.question r = self.shell.singleline(**self.shell_kwargs) if callable(node.converter): r = node.converter(r) self.visit(node.responses[r]) return r def visit_input(self, node): need_input = True while need_input: self.env['PROMPT'] = node.prompt raw = self.shell.singleline(**self.shell_kwargs) if callable(node.converter): try: x = node.converter(raw) except KeyboardInterrupt: raise except Exception: if node.retry: msg = ('{{BOLD_RED}}Invalid{{NO_COLOR}} input {0!r}, ' 'please retry.') print_color(msg.format(raw)) continue else: raise if node.show_conversion and x is not Unstorable \ and str(x) != raw: msg = '{{BOLD_PURPLE}}Converted{{NO_COLOR}} input {0!r} to {1!r}.' print_color(msg.format(raw, x)) else: x = raw if node.confirm: msg = 'Would you like to keep the input: {0}' print(msg.format(pprint.pformat(x))) confirmer = TrueFalseBreak(prompt=YNB) status = self.visit(confirmer) if isinstance(status, str) and status == 'break': x = Unstorable break else: need_input = not status else: need_input = False return x def visit_while(self, node): rtns = [] origidx = self.indices.get(node.idxname, None) self.indices[node.idxname] = idx = node.beg while node.cond(visitor=self, node=node): rtn = list(map(self.visit, node.body)) rtns.append(rtn) idx += 1 self.indices[node.idxname] = idx if origidx is None: del self.indices[node.idxname] else: self.indices[node.idxname] = origidx return rtns def visit_save(self, node): jstate = json.dumps(self.state, indent=1, sort_keys=True, default=serialize_xonsh_json) if node.check: msg = 'The current state is:\n\n{0}\n' print(msg.format(textwrap.indent(jstate, ' '))) ap = 'Would you like to save this state, ' + YN asker = TrueFalse(prompt=ap) do_save = self.visit(asker) if not do_save: return Unstorable fname = None if node.ask_filename: fname = self.visit_input(node) if fname is None or len(fname) == 0: fname = node.default_file if os.path.isfile(fname): backup_file(fname) else: os.makedirs(os.path.dirname(fname), exist_ok=True) with open(fname, 'w') as f: f.write(jstate) return fname def visit_load(self, node): if node.check: ap = 'Would you like to load an existing file, ' + YN asker = TrueFalse(prompt=ap) do_load = self.visit(asker) if not do_load: return Unstorable fname = self.visit_input(node) if fname is None or len(fname) == 0: fname = node.default_file if os.path.isfile(fname): with open(fname, 'r') as f: self.state = json.load(f) print_color('{{GREEN}}{0!r} loaded.{{NO_COLOR}}'.format(fname)) else: print_color(('{{RED}}{0!r} could not be found, ' 'continuing.{{NO_COLOR}}').format(fname)) return fname xonsh-0.6.0/xonsh/xonfig.py000066400000000000000000000534031320541242300156600ustar00rootroot00000000000000"""The xonsh configuration (xonfig) utility.""" import os import re import ast import json import shutil import random import pprint import textwrap import builtins import argparse import functools import itertools import contextlib try: import ply except ImportError: from xonsh.ply import ply import xonsh.wizard as wiz from xonsh import __version__ as XONSH_VERSION from xonsh.prompt.base import is_template_string from xonsh.platform import (is_readline_available, ptk_version, PYTHON_VERSION_INFO, pygments_version, ON_POSIX, ON_LINUX, linux_distro, ON_DARWIN, ON_WINDOWS, ON_CYGWIN, DEFAULT_ENCODING, githash) from xonsh.tools import (to_bool, is_string, print_exception, is_superuser, color_style_names, print_color, color_style) from xonsh.xontribs import xontrib_metadata, find_xontrib from xonsh.lazyasd import lazyobject HR = "'`-.,_,.-*'`-.,_,.-*'`-.,_,.-*'`-.,_,.-*'`-.,_,.-*'`-.,_,.-*'`-.,_,.-*'" WIZARD_HEAD = """ {{BOLD_WHITE}}Welcome to the xonsh configuration wizard!{{NO_COLOR}} {{YELLOW}}------------------------------------------{{NO_COLOR}} This will present a guided tour through setting up the xonsh static config file. Xonsh will automatically ask you if you want to run this wizard if the configuration file does not exist. However, you can always rerun this wizard with the xonfig command: $ xonfig wizard This wizard will load an existing configuration, if it is available. Also never fear when this wizard saves its results! It will create a backup of any existing configuration automatically. This wizard has two main phases: foreign shell setup and environment variable setup. Each phase may be skipped in its entirety. For the configuration to take effect, you will need to restart xonsh. {hr} """.format(hr=HR) WIZARD_FS = """ {hr} {{BOLD_WHITE}}Foreign Shell Setup{{NO_COLOR}} {{YELLOW}}-------------------{{NO_COLOR}} The xonsh shell has the ability to interface with foreign shells such as Bash, zsh, or fish. For configuration, this means that xonsh can load the environment, aliases, and functions specified in the config files of these shells. Naturally, these shells must be available on the system to work. Being able to share configuration (and source) from foreign shells makes it easier to transition to and from xonsh. """.format(hr=HR) WIZARD_ENV = """ {hr} {{BOLD_WHITE}}Environment Variable Setup{{NO_COLOR}} {{YELLOW}}--------------------------{{NO_COLOR}} The xonsh shell also allows you to setup environment variables from the static configuration file. Any variables set in this way are superseded by the definitions in the xonshrc or on the command line. Still, setting environment variables in this way can help define options that are global to the system or user. The following lists the environment variable name, its documentation, the default value, and the current value. The default and current values are presented as pretty repr strings of their Python types. {{BOLD_GREEN}}Note:{{NO_COLOR}} Simply hitting enter for any environment variable will accept the default value for that entry. """.format(hr=HR) WIZARD_ENV_QUESTION = "Would you like to set env vars now, " + wiz.YN WIZARD_XONTRIB = """ {hr} {{BOLD_WHITE}}Xontribs{{NO_COLOR}} {{YELLOW}}--------{{NO_COLOR}} No shell is complete without extensions, and xonsh is no exception. Xonsh extensions are called {{BOLD_GREEN}}xontribs{{NO_COLOR}}, or xonsh contributions. Xontribs are dynamically loadable, either by importing them directly or by using the 'xontrib' command. However, you can also configure xonsh to load xontribs automatically on startup prior to loading the run control files. This allows the xontrib to be used immediately in your xonshrc files. The following describes all xontribs that have been registered with xonsh. These come from users, 3rd party developers, or xonsh itself! """.format(hr=HR) WIZARD_XONTRIB_QUESTION = "Would you like to enable xontribs now, " + wiz.YN WIZARD_TAIL = """ Thanks for using the xonsh configuration wizard!""" def make_fs_wiz(): """Makes the foreign shell part of the wizard.""" cond = wiz.create_truefalse_cond(prompt='Add a new foreign shell, ' + wiz.YN) fs = wiz.While(cond=cond, body=[ wiz.Input('shell name (e.g. bash): ', path='/foreign_shells/{idx}/shell'), wiz.StoreNonEmpty('interactive shell [bool, default=True]: ', converter=to_bool, show_conversion=True, path='/foreign_shells/{idx}/interactive'), wiz.StoreNonEmpty('login shell [bool, default=False]: ', converter=to_bool, show_conversion=True, path='/foreign_shells/{idx}/login'), wiz.StoreNonEmpty("env command [str, default='env']: ", path='/foreign_shells/{idx}/envcmd'), wiz.StoreNonEmpty("alias command [str, default='alias']: ", path='/foreign_shells/{idx}/aliascmd'), wiz.StoreNonEmpty(("extra command line arguments [list of str, " "default=[]]: "), converter=ast.literal_eval, show_conversion=True, path='/foreign_shells/{idx}/extra_args'), wiz.StoreNonEmpty('current environment [dict, default=None]: ', converter=ast.literal_eval, show_conversion=True, path='/foreign_shells/{idx}/currenv'), wiz.StoreNonEmpty('safely handle exceptions [bool, default=True]: ', converter=to_bool, show_conversion=True, path='/foreign_shells/{idx}/safe'), wiz.StoreNonEmpty("pre-command [str, default='']: ", path='/foreign_shells/{idx}/prevcmd'), wiz.StoreNonEmpty("post-command [str, default='']: ", path='/foreign_shells/{idx}/postcmd'), wiz.StoreNonEmpty("foreign function command [str, default=None]: ", path='/foreign_shells/{idx}/funcscmd'), wiz.StoreNonEmpty("source command [str, default=None]: ", path='/foreign_shells/{idx}/sourcer'), wiz.Message(message='Foreign shell added.\n') ]) return fs def _wrap_paragraphs(text, width=70, **kwargs): """Wraps paragraphs instead.""" pars = text.split('\n') pars = ['\n'.join(textwrap.wrap(p, width=width, **kwargs)) for p in pars] s = '\n'.join(pars) return s ENVVAR_MESSAGE = """ {{BOLD_CYAN}}${name}{{NO_COLOR}} {docstr} {{RED}}default value:{{NO_COLOR}} {default} {{RED}}current value:{{NO_COLOR}} {current}""" ENVVAR_PROMPT = "{BOLD_GREEN}>>>{NO_COLOR} " def make_exit_message(): """Creates a message for how to exit the wizard.""" shell_type = builtins.__xonsh_shell__.shell_type keyseq = 'Ctrl-D' if shell_type == 'readline' else 'Ctrl-C' msg = 'To exit the wizard at any time, press {BOLD_UNDERLINE_CYAN}' msg += keyseq + '{NO_COLOR}.\n' m = wiz.Message(message=msg) return m def make_envvar(name): """Makes a StoreNonEmpty node for an environment variable.""" env = builtins.__xonsh_env__ vd = env.get_docs(name) if not vd.configurable: return default = vd.default if '\n' in default: default = '\n' + _wrap_paragraphs(default, width=69) curr = env.get(name) if is_string(curr) and is_template_string(curr): curr = curr.replace('{', '{{').replace('}', '}}') curr = pprint.pformat(curr, width=69) if '\n' in curr: curr = '\n' + curr msg = ENVVAR_MESSAGE.format(name=name, default=default, current=curr, docstr=_wrap_paragraphs(vd.docstr, width=69)) mnode = wiz.Message(message=msg) ens = env.get_ensurer(name) path = '/env/' + name pnode = wiz.StoreNonEmpty(ENVVAR_PROMPT, converter=ens.convert, show_conversion=True, path=path, retry=True, store_raw=vd.store_as_str) return mnode, pnode def _make_flat_wiz(kidfunc, *args): kids = map(kidfunc, *args) flatkids = [] for k in kids: if k is None: continue flatkids.extend(k) wizard = wiz.Wizard(children=flatkids) return wizard def make_env_wiz(): """Makes an environment variable wizard.""" w = _make_flat_wiz(make_envvar, sorted(builtins.__xonsh_env__._docs.keys())) return w XONTRIB_PROMPT = '{BOLD_GREEN}Add this xontrib{NO_COLOR}, ' + wiz.YN def _xontrib_path(visitor=None, node=None, val=None): # need this to append only based on user-selected size return ('xontribs', len(visitor.state.get('xontribs', ()))) def make_xontrib(xontrib, package): """Makes a message and StoreNonEmpty node for a xontrib.""" name = xontrib.get('name', '') msg = '\n{BOLD_CYAN}' + name + '{NO_COLOR}\n' if 'url' in xontrib: msg += '{RED}url:{NO_COLOR} ' + xontrib['url'] + '\n' if 'package' in xontrib: msg += '{RED}package:{NO_COLOR} ' + xontrib['package'] + '\n' if 'url' in package: if 'url' in xontrib and package['url'] != xontrib['url']: msg += '{RED}package-url:{NO_COLOR} ' + package['url'] + '\n' if 'license' in package: msg += '{RED}license:{NO_COLOR} ' + package['license'] + '\n' msg += '{PURPLE}installed?{NO_COLOR} ' msg += ('no' if find_xontrib(name) is None else 'yes') + '\n' desc = xontrib.get('description', '') if not isinstance(desc, str): desc = ''.join(desc) msg += _wrap_paragraphs(desc, width=69) if msg.endswith('\n'): msg = msg[:-1] mnode = wiz.Message(message=msg) convert = lambda x: name if to_bool(x) else wiz.Unstorable pnode = wiz.StoreNonEmpty(XONTRIB_PROMPT, converter=convert, path=_xontrib_path) return mnode, pnode def make_xontribs_wiz(): """Makes a xontrib wizard.""" md = xontrib_metadata() pkgs = [md['packages'].get(d.get('package', None), {}) for d in md['xontribs']] w = _make_flat_wiz(make_xontrib, md['xontribs'], pkgs) return w def make_xonfig_wizard(default_file=None, confirm=False): """Makes a configuration wizard for xonsh config file. Parameters ---------- default_file : str, optional Default filename to save and load to. User will still be prompted. confirm : bool, optional Confirm that the main part of the wizard should be run. """ w = wiz.Wizard(children=[ wiz.Message(message=WIZARD_HEAD), make_exit_message(), wiz.Load(default_file=default_file, check=True), wiz.Message(message=WIZARD_FS), make_fs_wiz(), wiz.Message(message=WIZARD_ENV), wiz.YesNo(question=WIZARD_ENV_QUESTION, yes=make_env_wiz(), no=wiz.Pass()), wiz.Message(message=WIZARD_XONTRIB), wiz.YesNo(question=WIZARD_XONTRIB_QUESTION, yes=make_xontribs_wiz(), no=wiz.Pass()), wiz.Message(message='\n' + HR + '\n'), wiz.Save(default_file=default_file, check=True), wiz.Message(message=WIZARD_TAIL), ]) if confirm: q = ("Would you like to run the xonsh configuration wizard now?\n\n" "1. Yes (You can abort at any time)\n" "2. No, but ask me next time.\n" "3. No, and don't ask me again.\n\n" "1, 2, or 3 [default: 2]? ") passer = wiz.Pass() saver = wiz.Save(check=False, ask_filename=False, default_file=default_file) w = wiz.Question(q, {1: w, 2: passer, 3: saver}, converter=lambda x: int(x) if x != '' else 2) return w def _wizard(ns): env = builtins.__xonsh_env__ shell = builtins.__xonsh_shell__.shell fname = env.get('XONSHCONFIG') if ns.file is None else ns.file w = make_xonfig_wizard(default_file=fname, confirm=ns.confirm) tempenv = {'PROMPT': '', 'XONSH_STORE_STDOUT': False} pv = wiz.PromptVisitor(w, store_in_history=False, multiline=False) @contextlib.contextmanager def force_hide(): if env.get('XONSH_STORE_STDOUT') and hasattr(shell, '_force_hide'): orig, shell._force_hide = shell._force_hide, False yield shell._force_hide = orig else: yield with force_hide(), env.swap(tempenv): try: pv.visit() except (KeyboardInterrupt, Exception): print() print_exception() def _xonfig_format_human(data): wcol1 = wcol2 = 0 for key, val in data: wcol1 = max(wcol1, len(key)) wcol2 = max(wcol2, len(str(val))) hr = '+' + ('-' * (wcol1 + 2)) + '+' + ('-' * (wcol2 + 2)) + '+\n' row = '| {key!s:<{wcol1}} | {val!s:<{wcol2}} |\n' s = hr for key, val in data: s += row.format(key=key, wcol1=wcol1, val=val, wcol2=wcol2) s += hr return s def _xonfig_format_json(data): data = {k.replace(' ', '_'): v for k, v in data} s = json.dumps(data, sort_keys=True, indent=1) + '\n' return s def _info(ns): env = builtins.__xonsh_env__ try: ply.__version__ = ply.__version__ except AttributeError: ply.__version__ = '3.8' data = [ ('xonsh', XONSH_VERSION), ] hash_, date_ = githash() if hash_: data.append(('Git SHA', hash_)) data.append(('Commit Date', date_)) data.extend([ ('Python', '{}.{}.{}'.format(*PYTHON_VERSION_INFO)), ('PLY', ply.__version__), ('have readline', is_readline_available()), ('prompt toolkit', ptk_version() or None), ('shell type', env.get('SHELL_TYPE')), ('pygments', pygments_version()), ('on posix', bool(ON_POSIX)), ('on linux', bool(ON_LINUX)), ]) if ON_LINUX: data.append(('distro', linux_distro())) data.extend([ ('on darwin', ON_DARWIN), ('on windows', ON_WINDOWS), ('on cygwin', ON_CYGWIN), ('is superuser', is_superuser()), ('default encoding', DEFAULT_ENCODING), ('xonsh encoding', env.get('XONSH_ENCODING')), ('encoding errors', env.get('XONSH_ENCODING_ERRORS')), ]) formatter = _xonfig_format_json if ns.json else _xonfig_format_human s = formatter(data) return s def _styles(ns): env = builtins.__xonsh_env__ curr = env.get('XONSH_COLOR_STYLE') styles = sorted(color_style_names()) if ns.json: s = json.dumps(styles, sort_keys=True, indent=1) print(s) return lines = [] for style in styles: if style == curr: lines.append('* {GREEN}' + style + '{NO_COLOR}') else: lines.append(' ' + style) s = '\n'.join(lines) print_color(s) def _str_colors(cmap, cols): color_names = sorted(cmap.keys(), key=(lambda s: (len(s), s))) grper = lambda s: min(cols // (len(s) + 1), 8) lines = [] for n, group in itertools.groupby(color_names, key=grper): width = cols // n line = '' for i, name in enumerate(group): buf = ' ' * (width - len(name)) line += '{' + name + '}' + name + '{NO_COLOR}' + buf if (i + 1) % n == 0: lines.append(line) line = '' if len(line) != 0: lines.append(line) return '\n'.join(lines) def _tok_colors(cmap, cols): from xonsh.style_tools import Color nc = Color.NO_COLOR names_toks = {} for t in cmap.keys(): name = str(t) if name.startswith('Token.Color.'): _, _, name = name.rpartition('.') names_toks[name] = t color_names = sorted(names_toks.keys(), key=(lambda s: (len(s), s))) grper = lambda s: min(cols // (len(s) + 1), 8) toks = [] for n, group in itertools.groupby(color_names, key=grper): width = cols // n for i, name in enumerate(group): toks.append((names_toks[name], name)) buf = ' ' * (width - len(name)) if (i + 1) % n == 0: buf += '\n' toks.append((nc, buf)) if not toks[-1][1].endswith('\n'): toks[-1] = (nc, toks[-1][1] + '\n') return toks def _colors(args): columns, _ = shutil.get_terminal_size() columns -= int(ON_WINDOWS) style_stash = builtins.__xonsh_env__['XONSH_COLOR_STYLE'] if args.style is not None: if args.style not in color_style_names(): print('Invalid style: {}'.format(args.style)) return builtins.__xonsh_env__['XONSH_COLOR_STYLE'] = args.style color_map = color_style() akey = next(iter(color_map)) if isinstance(akey, str): s = _str_colors(color_map, columns) else: s = _tok_colors(color_map, columns) print_color(s) builtins.__xonsh_env__['XONSH_COLOR_STYLE'] = style_stash def _tutorial(args): import webbrowser webbrowser.open('http://xon.sh/tutorial.html') @functools.lru_cache(1) def _xonfig_create_parser(): p = argparse.ArgumentParser(prog='xonfig', description='Manages xonsh configuration.') subp = p.add_subparsers(title='action', dest='action') info = subp.add_parser('info', help=('displays configuration information, ' 'default action')) info.add_argument('--json', action='store_true', default=False, help='reports results as json') wiz = subp.add_parser('wizard', help='displays configuration information') wiz.add_argument('--file', default=None, help='config file location, default=$XONSHCONFIG') wiz.add_argument('--confirm', action='store_true', default=False, help='confirm that the wizard should be run.') sty = subp.add_parser('styles', help='prints available xonsh color styles') sty.add_argument('--json', action='store_true', default=False, help='reports results as json') colors = subp.add_parser('colors', help='preview color style') colors.add_argument('style', nargs='?', default=None, help='style to preview, default: ') subp.add_parser('tutorial', help='Launch tutorial in browser.') return p _XONFIG_MAIN_ACTIONS = { 'info': _info, 'wizard': _wizard, 'styles': _styles, 'colors': _colors, 'tutorial': _tutorial, } def xonfig_main(args=None): """Main xonfig entry point.""" if not args or (args[0] not in _XONFIG_MAIN_ACTIONS and args[0] not in {'-h', '--help'}): args.insert(0, 'info') parser = _xonfig_create_parser() ns = parser.parse_args(args) if ns.action is None: # apply default action ns = parser.parse_args(['info'] + args) return _XONFIG_MAIN_ACTIONS[ns.action](ns) @lazyobject def STRIP_COLOR_RE(): return re.compile('{.*?}') def _align_string(string, align='<', fill=' ', width=80): """ Align and pad a color formatted string """ linelen = len(STRIP_COLOR_RE.sub('', string)) padlen = max(width-linelen, 0) if align == '^': return fill*(padlen//2) + string + fill*(padlen//2 + padlen % 2) elif align == '>': return fill*padlen + string elif align == '<': return string + fill*padlen else: return string @lazyobject def TAGLINES(): return [ "Exofrills in the shell", "No frills in the shell", "Become the Lord of the Files", "Break out of your shell", "The only shell that is also a shell", "All that is and all that shell be", "It cannot be that hard", "Pass the xonsh, Piggy", "Piggy glanced nervously into hell and cradled the xonsh", "The xonsh is a symbol", "It is pronounced conch", "The shell, bourne again", "Snailed it", "Starfish loves you", "Come snail away", "This is Major Tom to Ground Xonshtrol", "Sally sells csh and keeps xonsh to herself", "Nice indeed. Everything's accounted for, except your old shell.", "I wanna thank you for putting me back in my snail shell", "Crustaceanly Yours", "With great shell comes great reproducibility", "None shell pass", "You shell not pass!", "The x-on shell", "Ever wonder why there isn't a Taco Shell? Because it is a corny idea.", "The carcolh will catch you!", "People xonshtantly mispronounce these things", "WHAT...is your favorite shell?", "Conches for the xonsh god!", "Python-powered, cross-platform, Unix-gazing shell", "Tab completion in Alderaan places", ] # list of strings or tuples (string, align, fill) WELCOME_MSG = [ '', ('{{INTENSE_WHITE}}Welcome to the xonsh shell ({version}){{NO_COLOR}}', '^', ' '), '', ('{{INTENSE_RED}}~{{NO_COLOR}} {tagline} {{INTENSE_RED}}~{{NO_COLOR}}', '^', ' '), '', ('{{INTENSE_BLACK}}', '<', '-'), '{{GREEN}}xonfig{{NO_COLOR}} tutorial {{INTENSE_WHITE}}-> Launch the tutorial in ' 'the browser{{NO_COLOR}}', '{{GREEN}}xonfig{{NO_COLOR}} wizard {{INTENSE_WHITE}}-> Run the configuration ' 'wizard and claim your shell {{NO_COLOR}}', '{{INTENSE_BLACK}}(Note: Run the Wizard or create a {{RED}}~/.xonshrc{{INTENSE_BLACK}} file ' 'to suppress the welcome screen)', '', ] def print_welcome_screen(): subst = dict(tagline=random.choice(list(TAGLINES)), version=XONSH_VERSION) for elem in WELCOME_MSG: if isinstance(elem, str): elem = (elem, '', '') line = elem[0].format(**subst) termwidth = os.get_terminal_size().columns line = _align_string(line, elem[1], elem[2], width=termwidth) print_color(line) xonsh-0.6.0/xonsh/xontribs.json000066400000000000000000000231221320541242300165520ustar00rootroot00000000000000{"xontribs": [ {"name": "apt_tabcomplete", "package": "xonsh-apt-tabcomplete", "url": "https://github.com/DangerOnTheRanger/xonsh-apt-tabcomplete", "description": ["Adds tabcomplete functionality to apt-get/apt-cache inside of xonsh."] }, {"name": "autoxsh", "package": "xonsh-autoxsh", "url": "https://github.com/Granitas/xonsh-autoxsh", "description": ["Adds automatic execution of xonsh script files called", "``.autoxsh`` when enterting a directory with ``cd`` function"] }, {"name": "bashisms", "package": "xonsh", "url": "http://xon.sh", "description": [ "Enables additional Bash-like syntax while at the command prompt. For ", "example, the ``!!`` syntax for running the previous command is now usable.", "Note that these features are implemented as precommand events and these ", "additions do not affect the xonsh language when run as script. That said, ", "you might find them useful if you have strong muscle memory.\n\n", "**Warning:** This xontrib may modify user command line input to implement ", "its behavior. To see the modifications as they are applied (in unified diff", "format), please set ``$XONSH_DEBUG`` to ``2`` or higher."] }, {"name": "coreutils", "package": "xonsh", "url": "http://xon.sh", "description": [ "Additional core utilities that are implemented in xonsh. The current list ", "includes:\n", "\n", "* cat\n", "* echo\n", "* pwd\n", "* tee\n", "* tty", "* yes\n", "\n", "In many cases, these may have a lower performance overhead than the ", "posix command line utility with the same name. This is because these ", "tools avoid the need for a full subprocess call. Additionally, these ", "tools are cross-platform."] }, {"name": "distributed", "package": "xonsh", "url": "http://xon.sh", "description": [ "The distributed parallel computing library hooks for xonsh. ", "Importantly this provides a substitute 'dworker' command which enables ", "distributed workers to have access to xonsh builtins.\n\n", "Furthermore, this xontrib adds a 'DSubmitter' context manager for ", "executing a block remotely. Moreover, this also adds a convenience ", "function 'dsubmit()' for creating DSubmitter and Executor instances ", "at the same time. Thus users may submit distributed jobs with::\n\n", " with dsubmit('127.0.0.1:8786', rtn='x') as dsub:\n", " x = $(echo I am elsewhere)\n\n", " res = dsub.future.result()\n", " print(res)\n\n", "This is useful for long running or non-blocking jobs."] }, {"name": "docker_tabcomplete", "package": "xonsh-docker-tabcomplete", "url": "https://github.com/xsteadfastx/xonsh-docker-tabcomplete", "description": ["Adds tabcomplete functionality to docker inside of xonsh."] }, {"name": "jedi", "package": "xonsh", "url": "http://xon.sh", "description": ["Jedi tab completion hooks for xonsh."] }, {"name": "mpl", "package": "xonsh", "url": "http://xon.sh", "description": ["Matplotlib hooks for xonsh, including the new 'mpl' alias ", "that displays the current figure on the screen."] }, {"name": "prompt_ret_code", "package": "xonsh", "url": "http://xon.sh", "description": ["Adds return code info to the prompt"] }, {"name": "free_cwd", "package": "xonsh", "url": "http://xon.sh", "description": [ "Release the lock on the current directory whenever the", "prompt is shown. Enabling this will allow the other programs or", "Windows Explorer to delete or rename the current or parent", "directories. Internally, it is accomplished by temporarily resetting", "CWD to the root drive folder while waiting at the prompt. This only", "works with the prompt_toolkit backend and can cause cause issues", "if any extensions are enabled that hook the prompt and relies on", "``os.getcwd()``"] }, {"name": "whole_word_jumping", "package": "xonsh", "url": "http://xon.sh", "description": [ "Jumping across whole words (non-whitespace) with Ctrl+Left/Right.", "Alt+Left/Right remains unmodified to jump over smaller word segments."] }, {"name": "scrapy_tabcomplete", "package": "xonsh-scrapy-tabcomplete", "url": "https://github.com/Granitas/xonsh-scrapy-tabcomplete", "description": ["Adds tabcomplete functionality to scrapy inside of xonsh."] }, {"name": "vox", "package": "xonsh", "url": "http://xon.sh", "description": ["Python virtual environment manager for xonsh."] }, {"name": "vox_tabcomplete", "package": "xonsh-vox-tabcomplete", "url": "https://github.com/Granitosaurus/xonsh-vox-tabcomplete", "description": ["Adds tabcomplete functionality to vox inside of xonsh."] }, {"name": "xo", "package": "exofrills", "url": "https://github.com/scopatz/xo", "description": ["Adds an 'xo' alias to run the exofrills text editor in the ", "current Python interpreter session. This shaves off a ", "bit of the startup time when running your favorite, minimal ", "text editor."] }, {"name": "xonda", "package": "xonda", "url": "https://github.com/gforsyth/xonda", "description": ["A thin wrapper around conda with tab completion"] }, {"name": "avox", "package": "xontrib-avox", "url": "https://github.com/astronouth7303/xontrib-avox", "description": ["Automatic (de)activation of virtual environments as you cd around"] }, {"name": "z", "package": "xontrib-z", "url": "https://github.com/astronouth7303/xontrib-z", "description": ["Tracks your most used directories, based on 'frecency'."] }, {"name": "powerline", "package": "xontrib-powerline", "url": "https://github.com/santagada/xontrib-powerline", "description": ["Powerline for Xonsh shell"] }, {"name": "prompt_vi_mode", "package": "xontrib-prompt-vi-mode", "url": "https://github.com/t184256/xontrib-prompt-vi-mode", "description": ["vi-mode status formatter for xonsh prompt"] }, {"name": "click_tabcomplete", "package": "xonsh-click-tabcomplete", "url": "https://github.com/Granitosaurus/xonsh-click-tabcomplete", "description": ["Adds tabcomplete functionality to click based python applications inside of xonsh."] }, {"name": "fzf-widgets", "package": "xontrib-fzf-widgets", "url": "https://github.com/shahinism/xontrib-fzf-widgets", "description": ["Adds some fzf widgets to your xonsh shell."] }, {"name": "schedule", "package": "xontrib-schedule", "url": "https://github.com/astronouth7303/xontrib-schedule", "description": ["Xonsh Task Scheduling"] } ], "packages": { "exofrills": { "license": "WTFPL", "url": "http://exofrills.org", "install": { "conda": "conda install -c conda-forge xo", "pip": "xpip install exofrills"} }, "xonsh": { "license": "BSD 3-clause", "url": "http://xon.sh", "install": { "conda": "conda install -c conda-forge xonsh", "pip": "xpip install xonsh", "aura": "sudo aura -A xonsh", "yaourt": "yaourt -Sa xonsh"} }, "xontrib-prompt-ret-code": { "license": "MIT", "url": "https://github.com/Siecje/xontrib-prompt-ret-code", "install": { "pip": "xpip install xontrib-prompt-ret-code" } }, "xonsh-apt-tabcomplete": { "license": "BSD 2-clause", "url": "https://github.com/DangerOnTheRanger/xonsh-apt-tabcomplete", "install": { "pip": "xpip install xonsh-apt-tabcomplete" } }, "xonsh-docker-tabcomplete": { "license": "MIT", "url": "https://github.com/xsteadfastx/xonsh-docker-tabcomplete", "install": { "pip": "xpip install xonsh-docker-tabcomplete" } }, "xonsh-scrapy-tabcomplete": { "license": "GPLv3", "url": "https://github.com/Granitas/xonsh-scrapy-tabcomplete", "install": { "pip": "xpip install xonsh-scrapy-tabcomplete" } }, "xonsh-vox-tabcomplete": { "license": "GPLv3", "url": "https://github.com/Granitosaurus/xonsh-vox-tabcomplete", "install": { "pip": "xpip install xonsh-vox-tabcomplete" } }, "xonsh-click-tabcomplete": { "license": "GPLv3", "url": "https://github.com/Granitosaurus/xonsh-click-tabcomplete", "install": { "pip": "xpip install xonsh-click-tabcomplete" } }, "xonsh-autoxsh": { "license": "GPLv3", "url": "https://github.com/Granitas/xonsh-autoxsh", "install": { "pip": "xpip install xonsh-autoxsh" } }, "xonda": { "license": "MIT", "url": "https://github.com/gforsyth/xonda", "install": { "pip": "xpip install xonda" } }, "xontrib-avox": { "license": "GPLv3", "url": "https://github.com/astronouth7303/xontrib-avox", "install": { "pip": "xpip install xontrib-avox" } }, "xontrib-z": { "license": "GPLv3", "url": "https://github.com/astronouth7303/xontrib-z", "install": { "pip": "xpip install xontrib-z" } }, "xontrib-powerline": { "license": "MIT", "url": "https://github.com/santagada/xontrib-powerline", "install": { "pip": "xpip install xontrib-powerline" } }, "xontrib-thefuck": { "license": "MIT", "url": "https://github.com/meatballs/xontrib-thefuck", "install": { "pip": "xpip install xontrib-thefuck" } }, "xontrib-prompt-vi-mode": { "license": "MIT", "url": "https://github.com/t184256/xontrib-prompt-vi-mode", "install": { "pip": "xpip install xontrib-prompt-vi-mode" } }, "xontrib-fzf-widgets": { "license": "GPLv3", "url": "https://github.com/shahinism/xontrib-fzf-widgets", "install": { "pip": "xpip install xontrib-fzf-widgets" } }, "xontrib-schedule": { "license": "MIT", "url": "https://github.com/astronouth7303/xontrib-schedule", "install": { "pip": "xpip install xontrib-schedule" } } } } xonsh-0.6.0/xonsh/xontribs.py000066400000000000000000000124441320541242300162360ustar00rootroot00000000000000"""Tools for helping manage xontributions.""" import os import sys import json import builtins import argparse import functools import importlib import importlib.util from xonsh.tools import print_color, unthreadable @functools.lru_cache(1) def xontribs_json(): return os.path.join(os.path.dirname(__file__), 'xontribs.json') def find_xontrib(name): """Finds a xontribution from its name.""" if name.startswith('.'): spec = importlib.util.find_spec(name, package='xontrib') else: spec = importlib.util.find_spec('.' + name, package='xontrib') return spec or importlib.util.find_spec(name) def xontrib_context(name): """Return a context dictionary for a xontrib of a given name.""" spec = find_xontrib(name) if spec is None: return None m = importlib.import_module(spec.name) pubnames = getattr(m, '__all__', None) if pubnames is not None: ctx = {k: getattr(m, k) for k in pubnames} else: ctx = {k: getattr(m, k) for k in dir(m) if not k.startswith('_')} return ctx def prompt_xontrib_install(names): """Returns a formatted string with name of xontrib package to prompt user""" md = xontrib_metadata() packages = [] for name in names: for xontrib in md['xontribs']: if xontrib['name'] == name: packages.append(xontrib['package']) print('The following xontribs are enabled but not installed: \n' ' {xontribs}\n' 'To install them run \n' ' xpip install {packages}'.format(xontribs=' '.join(names), packages=' '.join(packages))) def update_context(name, ctx=None): """Updates a context in place from a xontrib. If ctx is not provided, then __xonsh_ctx__ is updated. """ if ctx is None: ctx = builtins.__xonsh_ctx__ if not hasattr(update_context, 'bad_imports'): update_context.bad_imports = [] modctx = xontrib_context(name) if modctx is None: update_context.bad_imports.append(name) return ctx return ctx.update(modctx) @functools.lru_cache() def xontrib_metadata(): """Loads and returns the xontribs.json file.""" with open(xontribs_json(), 'r') as f: md = json.load(f) return md def _load(ns): """load xontribs""" ctx = builtins.__xonsh_ctx__ for name in ns.names: if ns.verbose: print('loading xontrib {0!r}'.format(name)) update_context(name, ctx=ctx) if update_context.bad_imports: prompt_xontrib_install(update_context.bad_imports) del update_context.bad_imports def _list(ns): """Lists xontribs.""" meta = xontrib_metadata() data = [] nname = 6 # ensures some buffer space. names = None if len(ns.names) == 0 else set(ns.names) for md in meta['xontribs']: name = md['name'] if names is not None and md['name'] not in names: continue nname = max(nname, len(name)) spec = find_xontrib(name) if spec is None: installed = loaded = False else: installed = True loaded = spec.name in sys.modules d = {'name': name, 'installed': installed, 'loaded': loaded} data.append(d) if ns.json: jdata = {d.pop('name'): d for d in data} s = json.dumps(jdata) print(s) else: s = "" for d in data: name = d['name'] lname = len(name) s += "{PURPLE}" + name + "{NO_COLOR} " + " "*(nname - lname) if d['installed']: s += '{GREEN}installed{NO_COLOR} ' else: s += '{RED}not-installed{NO_COLOR} ' if d['loaded']: s += '{GREEN}loaded{NO_COLOR}' else: s += '{RED}not-loaded{NO_COLOR}' s += '\n' print_color(s[:-1]) @functools.lru_cache() def _create_xontrib_parser(): # parse command line args parser = argparse.ArgumentParser(prog='xontrib', description='Manages xonsh extensions') subp = parser.add_subparsers(title='action', dest='action') load = subp.add_parser('load', help='loads xontribs') load.add_argument('-v', '--verbose', action='store_true', default=False, dest='verbose') load.add_argument('names', nargs='+', default=(), help='names of xontribs') lyst = subp.add_parser('list', help=('list xontribs, whether they are ' 'installed, and loaded.')) lyst.add_argument('--json', action='store_true', default=False, help='reports results as json') lyst.add_argument('names', nargs='*', default=(), help='names of xontribs') return parser _MAIN_XONTRIB_ACTIONS = { 'load': _load, 'list': _list, } @unthreadable def xontribs_main(args=None, stdin=None): """Alias that loads xontribs""" if not args or (args[0] not in _MAIN_XONTRIB_ACTIONS and args[0] not in {'-h', '--help'}): args.insert(0, 'load') parser = _create_xontrib_parser() ns = parser.parse_args(args) if ns.action is None: # apply default action ns = parser.parse_args(['load'] + args) return _MAIN_XONTRIB_ACTIONS[ns.action](ns) xonsh-0.6.0/xonsh/xoreutils/000077500000000000000000000000001320541242300160455ustar00rootroot00000000000000xonsh-0.6.0/xonsh/xoreutils/__init__.py000066400000000000000000000000361320541242300201550ustar00rootroot00000000000000# amalgamate # amalgamate end xonsh-0.6.0/xonsh/xoreutils/_which.py000066400000000000000000000321431320541242300176630ustar00rootroot00000000000000#!/usr/bin/env python # Copyright (c) 2002-2007 ActiveState Software Inc. # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. # IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY # CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, # TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # # Author: # Trent Mick (TrentM@ActiveState.com) # Home: # http://trentm.com/projects/which/ import os import sys import stat import getopt import builtins import collections.abc as cabc r"""Find the full path to commands. which(command, path=None, verbose=0, exts=None) Return the full path to the first match of the given command on the path. whichall(command, path=None, verbose=0, exts=None) Return a list of full paths to all matches of the given command on the path. whichgen(command, path=None, verbose=0, exts=None) Return a generator which will yield full paths to all matches of the given command on the path. By default the PATH environment variable is searched (as well as, on Windows, the AppPaths key in the registry), but a specific 'path' list to search may be specified as well. On Windows, the PATHEXT environment variable is applied as appropriate. If "verbose" is true then a tuple of the form (, ) is returned for each match. The latter element is a textual description of where the match was found. For example: from PATH element 0 from HKLM\SOFTWARE\...\perl.exe """ _cmdlnUsage = """ Show the full path of commands. Usage: which [...] [...] Options: -h, --help Print this help and exit. -V, --version Print the version info and exit. -a, --all Print *all* matching paths. -v, --verbose Print out how matches were located and show near misses on stderr. -q, --quiet Just print out matches. I.e., do not print out near misses. -p , --path= An alternative path (list of directories) may be specified for searching. -e , --exts= Specify a list of extensions to consider instead of the usual list (';'-separate list, Windows only). Show the full path to the program that would be run for each given command name, if any. Which, like GNU's which, returns the number of failed arguments, or -1 when no was given. Near misses include duplicates, non-regular files and (on Un*x) files without executable access. """ __version_info__ = (1, 2, 0) __version__ = '.'.join(map(str, __version_info__)) __all__ = ["which", "whichall", "whichgen", "WhichError"] class WhichError(Exception): pass # internal support stuff def _getRegisteredExecutable(exeName): """Windows allow application paths to be registered in the registry.""" registered = None if sys.platform.startswith('win'): if os.path.splitext(exeName)[1].lower() != '.exe': exeName += '.exe' try: import winreg as _winreg except ImportError: import _winreg try: key = "SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\App Paths\\" + \ exeName value = _winreg.QueryValue(_winreg.HKEY_LOCAL_MACHINE, key) registered = (value, "from HKLM\\" + key) except _winreg.error: pass if registered and not os.path.exists(registered[0]): registered = None return registered def _samefile(fname1, fname2): if sys.platform.startswith('win'): return (os.path.normpath(os.path.normcase(fname1)) == os.path.normpath(os.path.normcase(fname2))) else: return os.path.samefile(fname1, fname2) def _cull(potential, matches, verbose=0): """Cull inappropriate matches. Possible reasons: - a duplicate of a previous match - not a disk file - not executable (non-Windows) If 'potential' is approved it is returned and added to 'matches'. Otherwise, None is returned. """ for match in matches: # don't yield duplicates if _samefile(potential[0], match[0]): if verbose: sys.stderr.write("duplicate: %s (%s)\n" % potential) return None else: if not stat.S_ISREG(os.stat(potential[0]).st_mode): if verbose: sys.stderr.write("not a regular file: %s (%s)\n" % potential) elif sys.platform != "win32" \ and not os.access(potential[0], os.X_OK): if verbose: sys.stderr.write("no executable access: %s (%s)\n" % potential) else: matches.append(potential) return potential # module API def whichgen(command, path=None, verbose=0, exts=None): """Return a generator of full paths to the given command. "command" is a the name of the executable to search for. "path" is an optional alternate path list to search. The default it to use the PATH environment variable. "verbose", if true, will cause a 2-tuple to be returned for each match. The second element is a textual description of where the match was found. "exts" optionally allows one to specify a list of extensions to use instead of the standard list for this system. This can effectively be used as an optimization to, for example, avoid stat's of "foo.vbs" when searching for "foo" and you know it is not a VisualBasic script but ".vbs" is on PATHEXT. This option is only supported on Windows. This method returns a generator which yields tuples of the form (, ). """ matches = [] if path is None: usingGivenPath = 0 path = os.environ.get("PATH", "").split(os.pathsep) if sys.platform.startswith("win"): path.insert(0, os.curdir) # implied by Windows shell else: usingGivenPath = 1 # Windows has the concept of a list of extensions (PATHEXT env var). if sys.platform.startswith("win"): if exts is None: exts = builtins.__xonsh_env__['PATHEXT'] # If '.exe' is not in exts then obviously this is Win9x and # or a bogus PATHEXT, then use a reasonable default. for ext in exts: if ext.lower() == ".exe": break else: exts = ['.COM', '.EXE', '.BAT', '.CMD'] elif not isinstance(exts, cabc.Sequence): raise TypeError("'exts' argument must be a sequence or None") else: if exts is not None: raise WhichError("'exts' argument is not supported on " "platform '%s'" % sys.platform) exts = [] # File name cannot have path separators because PATH lookup does not # work that way. if os.sep in command or os.altsep and os.altsep in command: if os.path.exists(command): match = _cull((command, "explicit path given"), matches, verbose) yield match else: for i in range(len(path)): dirName = path[i] # On windows the dirName *could* be quoted, drop the quotes if sys.platform.startswith("win") and len(dirName) >= 2 \ and dirName[0] == '"' and dirName[-1] == '"': dirName = dirName[1:-1] for ext in ([''] + exts): absName = os.path.abspath( os.path.normpath(os.path.join(dirName, command + ext))) if os.path.isfile(absName): if usingGivenPath: fromWhere = "from given path element %d" % i elif not sys.platform.startswith("win"): fromWhere = "from PATH element %d" % i elif i == 0: fromWhere = "from current directory" else: fromWhere = "from PATH element %d" % (i - 1) match = _cull((absName, fromWhere), matches, verbose) if match: yield match match = _getRegisteredExecutable(command) if match is not None: match = _cull(match, matches, verbose) if match: yield match def which(command, path=None, verbose=0, exts=None): """Return the full path to the first match of the given command on the path. "command" is a the name of the executable to search for. "path" is an optional alternate path list to search. The default it to use the PATH environment variable. "verbose", if true, will cause a 2-tuple to be returned. The second element is a textual description of where the match was found. "exts" optionally allows one to specify a list of extensions to use instead of the standard list for this system. This can effectively be used as an optimization to, for example, avoid stat's of "foo.vbs" when searching for "foo" and you know it is not a VisualBasic script but ".vbs" is on PATHEXT. This option is only supported on Windows. If no match is found for the command, a WhichError is raised. """ try: absName, fromWhere = whichgen(command, path, verbose, exts).next() except StopIteration: raise WhichError("Could not find '%s' on the path." % command) if verbose: return absName, fromWhere else: return absName def whichall(command, path=None, verbose=0, exts=None): """Return a list of full paths to all matches of the given command on the path. "command" is a the name of the executable to search for. "path" is an optional alternate path list to search. The default it to use the PATH environment variable. "verbose", if true, will cause a 2-tuple to be returned for each match. The second element is a textual description of where the match was found. "exts" optionally allows one to specify a list of extensions to use instead of the standard list for this system. This can effectively be used as an optimization to, for example, avoid stat's of "foo.vbs" when searching for "foo" and you know it is not a VisualBasic script but ".vbs" is on PATHEXT. This option is only supported on Windows. """ if verbose: return list(whichgen(command, path, verbose, exts)) else: return list(absName for absName, _ in whichgen(command, path, verbose, exts)) # mainline def main(argv): all = 0 verbose = 0 altpath = None exts = None try: optlist, args = getopt.getopt(argv[1:], 'haVvqp:e:', ['help', 'all', 'version', 'verbose', 'quiet', 'path=', 'exts=']) except getopt.GetoptErrsor as msg: sys.stderr.write("which: error: %s. Your invocation was: %s\n" % (msg, argv)) sys.stderr.write("Try 'which --help'.\n") return 1 for opt, optarg in optlist: if opt in ('-h', '--help'): print(_cmdlnUsage) return 0 elif opt in ('-V', '--version'): print("which %s" % __version__) return 0 elif opt in ('-a', '--all'): all = 1 elif opt in ('-v', '--verbose'): verbose = 1 elif opt in ('-q', '--quiet'): verbose = 0 elif opt in ('-p', '--path'): if optarg: altpath = optarg.split(os.pathsep) else: altpath = [] elif opt in ('-e', '--exts'): if optarg: exts = optarg.split(os.pathsep) else: exts = [] if len(args) == 0: return -1 failures = 0 for arg in args: # print "debug: search for %r" % arg nmatches = 0 for absName, fromWhere in whichgen(arg, path=altpath, verbose=verbose, exts=exts): if verbose: print("%s (%s)" % (absName, fromWhere)) else: print(absName) nmatches += 1 if not all: break if not nmatches: failures += 1 return failures if __name__ == "__main__": sys.exit(main(sys.argv)) xonsh-0.6.0/xonsh/xoreutils/cat.py000066400000000000000000000067011320541242300171720ustar00rootroot00000000000000"""Implements a cat command for xonsh.""" import os from xonsh.xoreutils.util import arg_handler def _cat_single_file(opts, fname, stdin, out, err, line_count=1): if fname == '-': f = stdin elif os.path.isdir(fname): print("cat: {}: Is a directory.".format(fname), file=err) return True, line_count elif not os.path.exists(fname): print("cat: No such file or directory: {}".format(fname), file=err) return True, line_count else: f = open(fname, 'rb') sep = os.linesep.encode() last_was_blank = False while True: _r = r = f.readline() if isinstance(_r, str): _r = r = _r.encode() if r == b'': break if r.endswith(sep): _r = _r[:-len(sep)] this_one_blank = _r == b'' if last_was_blank and this_one_blank and opts['squeeze_blank']: continue last_was_blank = this_one_blank if (opts['number_all'] or (opts['number_nonblank'] and not this_one_blank)): start = ("%6d " % line_count).encode() _r = start + _r line_count += 1 if opts['show_ends']: _r = _r + b'$' try: print(_r.decode('unicode_escape'), flush=True, file=out) except: pass return False, line_count def cat(args, stdin, stdout, stderr): """A cat command for xonsh.""" opts = _cat_parse_args(args) if opts is None: print(CAT_HELP_STR, file=stdout) return 0 line_count = 1 errors = False if len(args) == 0: args = ['-'] for i in args: o = _cat_single_file(opts, i, stdin, stdout, stderr, line_count) if o is None: return -1 _e, line_count = o errors = _e or errors return int(errors) def _cat_parse_args(args): out = {'number_nonblank': False, 'number_all': False, 'squeeze_blank': False, 'show_ends': False} if '--help' in args: return arg_handler(args, out, '-b', 'number_nonblank', True, '--number-nonblank') arg_handler(args, out, '-n', 'number_all', True, '--number') arg_handler(args, out, '-E', 'show_ends', True, '--show-ends') arg_handler(args, out, '-s', 'squeeze_blank', True, '--squeeze-blank') arg_handler(args, out, '-T', 'show_tabs', True, '--show-tabs') return out CAT_HELP_STR = """This version of cat was written in Python for the xonsh project: http://xon.sh Based on cat from GNU coreutils: http://www.gnu.org/software/coreutils/ Usage: cat [OPTION]... [FILE]... Concatenate FILE(s), or standard input, to standard output. -b, --number-nonblank number nonempty output lines, overrides -n -E, --show-ends display $ at end of each line -n, --number number all output lines -s, --squeeze-blank suppress repeated empty output lines -T, --show-tabs display TAB characters as ^I -u (ignored) --help display this help and exit With no FILE, or when FILE is -, read standard input. Examples: cat f - g Output f's contents, then standard input, then g's contents. cat Copy standard input to standard output.""" # NOT IMPLEMENTED: # -A, --show-all equivalent to -vET # -e equivalent to -vE # -t equivalent to -vT # -v, --show-nonprinting use ^ and M- notation, except for LFD and TAB # --version output version information and exit""" xonsh-0.6.0/xonsh/xoreutils/echo.py000066400000000000000000000024671320541242300173460ustar00rootroot00000000000000"""Implements a simple echo command for xonsh.""" def echo(args, stdin, stdout, stderr): """A simple echo command.""" opts = _echo_parse_args(args) if opts is None: return if opts['help']: print(ECHO_HELP, file=stdout) return 0 ender = opts['end'] args = map(str, args) if opts['escapes']: args = map(lambda x: x.encode().decode('unicode_escape'), args) print(*args, end=ender, file=stdout) def _echo_parse_args(args): out = {'escapes': False, 'end': '\n', 'help': False} if '-e' in args: args.remove('-e') out['escapes'] = True if '-E' in args: args.remove('-E') out['escapes'] = False if '-n' in args: args.remove('-n') out['end'] = '' if '-h' in args or '--help' in args: out['help'] = True return out ECHO_HELP = """Usage: echo [OPTIONS]... [STRING]... Echo the STRING(s) to standard output. -n do not include the trailing newline -e enable interpretation of backslash escapes -E disable interpretation of backslash escapes (default) -h --help display this message and exit This version of echo was written in Python for the xonsh project: http://xon.sh Based on echo from GNU coreutils: http://www.gnu.org/software/coreutils/""" xonsh-0.6.0/xonsh/xoreutils/pwd.py000066400000000000000000000014001320541242300172040ustar00rootroot00000000000000"""A pwd implementation for xonsh.""" import os def pwd(args, stdin, stdout, stderr): """A pwd implementation""" e = __xonsh_env__['PWD'] if '-h' in args or '--help' in args: print(PWD_HELP, file=stdout) return 0 if '-P' in args: e = os.path.realpath(e) print(e, file=stdout) return 0 PWD_HELP = """Usage: pwd [OPTION]... Print the full filename of the current working directory. -P, --physical avoid all symlinks --help display this help and exit This version of pwd was written in Python for the xonsh project: http://xon.sh Based on pwd from GNU coreutils: http://www.gnu.org/software/coreutils/""" # Not Implemented # -L, --logical use PWD from environment, even if it contains symlinks xonsh-0.6.0/xonsh/xoreutils/tee.py000066400000000000000000000030241320541242300171730ustar00rootroot00000000000000"""A tee implementation for xonsh.""" def tee(args, stdin, stdout, stderr): """A tee command for xonsh.""" mode = 'w' if '-a' in args: args.remove('-a') mode = 'a' if '--append' in args: args.remove('--append') mode = 'a' if '--help' in args: print(TEE_HELP, file=stdout) return 0 if stdin is None: msg = "tee was not piped stdin, must have input stream to read from." print(msg, file=stderr) return 1 errors = False files = [] for i in args: if i == '-': files.append(stdout) else: try: files.append(open(i, mode)) except: print('tee: failed to open {}'.format(i), file=stderr) errors = True files.append(stdout) while True: r = stdin.read(1024) if r == '': break for i in files: i.write(r) for i in files: if i != stdout: i.close() return int(errors) TEE_HELP = """This version of tee was written in Python for the xonsh project: http://xon.sh Based on tee from GNU coreutils: http://www.gnu.org/software/coreutils/ Usage: tee [OPTION]... [FILE]... Copy standard input to each FILE, and also to standard output. -a, --append append to the given FILEs, do not overwrite --help display this help and exit If a FILE is -, copy again to standard output.""" # NOT IMPLEMENTED: # -i, --ignore-interrupts ignore interrupt signals xonsh-0.6.0/xonsh/xoreutils/tty.py000066400000000000000000000023721320541242300172430ustar00rootroot00000000000000"""A tty implementation for xonsh""" import os import sys def tty(args, stdin, stdout, stderr): """A tty command for xonsh.""" if '--help' in args: print(TTY_HELP, file=stdout) return 0 silent = False for i in ('-s', '--silent', '--quiet'): if i in args: silent = True args.remove(i) if len(args) > 0: if not silent: for i in args: print('tty: Invalid option: {}'.format(i), file=stderr) print("Try 'tty --help' for more information", file=stderr) return 2 try: fd = stdin.fileno() except: fd = sys.stdin.fileno() if not os.isatty(fd): if not silent: print('not a tty', file=stdout) return 1 if not silent: try: print(os.ttyname(fd), file=stdout) except: return 3 return 0 TTY_HELP = """Usage: tty [OPTION]... Print the file name of the terminal connected to standard input. -s, --silent, --quiet print nothing, only return an exit status --help display this help and exit This version of tty was written in Python for the xonsh project: http://xon.sh Based on tty from GNU coreutils: http://www.gnu.org/software/coreutils/""" xonsh-0.6.0/xonsh/xoreutils/uptime.py000066400000000000000000000211261320541242300177240ustar00rootroot00000000000000""" Provides a cross-platform way to figure out the system uptime. Should work on damned near any operating system you can realistically expect to be asked to write Python code for. If this module is invoked as a stand-alone script, it will print the current uptime in a human-readable format, or display an error message if it can't, to standard output. This file was forked from the uptime project: https://github.com/Cairnarvon/uptime Copyright (c) 2012, Koen Crolla, All rights reserved. """ import os import sys import time import ctypes import struct import xonsh.platform as xp import xonsh.lazyimps as xlimps import xonsh.lazyasd as xl _BOOTTIME = None def _uptime_osx(): """Returns the uptime on mac / darwin.""" global _BOOTTIME bt = xlimps.macutils.sysctlbyname(b"kern.boottime", return_str=False) if len(bt) == 4: bt = struct.unpack_from('@hh', bt) elif len(bt) == 8: bt = struct.unpack_from('@ii', bt) elif len(bt) == 16: bt = struct.unpack_from('@qq', bt) else: raise ValueError('length of boot time not understood: ' + repr(bt)) bt = bt[0] + bt[1]*1e-6 if bt == 0.0: return None _BOOTTIME = bt return time.time() - bt def _uptime_linux(): """Returns uptime in seconds or None, on Linux.""" # With procfs try: with open('/proc/uptime', 'r') as f: up = float(f.readline().split()[0]) return up except (IOError, ValueError): pass buf = ctypes.create_string_buffer(128) # 64 suffices on 32-bit, whatever. if xp.LIBC.sysinfo(buf) < 0: return None up = struct.unpack_from('@l', buf.raw)[0] if up < 0: up = None return up def _boottime_linux(): """A way to figure out the boot time directly on Linux.""" global _BOOTTIME try: with open('/proc/stat', 'r') as f: for line in f: if line.startswith('btime'): _BOOTTIME = float(line.split()[1]) return _BOOTTIME except (IOError, IndexError): return None def _uptime_amiga(): """Returns uptime in seconds or None, on AmigaOS.""" global _BOOTTIME try: _BOOTTIME = os.stat('RAM:').st_ctime return time.time() - _BOOTTIME except (NameError, OSError): return None def _uptime_beos(): """Returns uptime in seconds on None, on BeOS/Haiku.""" if not hasattr(xp.LIBC, 'system_time'): return None xp.LIBC.system_time.restype = ctypes.c_int64 return xp.LIBC.system_time() / 1000000. def _uptime_bsd(): """Returns uptime in seconds or None, on BSD (including OS X).""" global _BOOTTIME if not hasattr(xp.LIBC, 'sysctlbyname'): # Not BSD. return None # Determine how much space we need for the response. sz = ctypes.c_uint(0) xp.LIBC.sysctlbyname('kern.boottime', None, ctypes.byref(sz), None, 0) if sz.value != struct.calcsize('@LL'): # Unexpected, let's give up. return None # For real now. buf = ctypes.create_string_buffer(sz.value) xp.LIBC.sysctlbyname('kern.boottime', buf, ctypes.byref(sz), None, 0) sec, usec = struct.unpack_from('@LL', buf.raw) # OS X disagrees what that second value is. if usec > 1000000: usec = 0. _BOOTTIME = sec + usec / 1000000. up = time.time() - _BOOTTIME if up < 0: up = None return up def _uptime_minix(): """Returns uptime in seconds or None, on MINIX.""" try: with open('/proc/uptime', 'r') as f: up = float(f.read()) return up except (IOError, ValueError): return None def _uptime_plan9(): """Returns uptime in seconds or None, on Plan 9.""" # Apparently Plan 9 only has Python 2.2, which I'm not prepared to # support. Maybe some Linuxes implement /dev/time, though, someone was # talking about it somewhere. try: # The time file holds one 32-bit number representing the sec- # onds since start of epoch and three 64-bit numbers, repre- # senting nanoseconds since start of epoch, clock ticks, and # clock frequency. # -- cons(3) with open('/dev/time', 'r') as f: s, ns, ct, cf = f.read().split() return float(ct) / float(cf) except (IOError, ValueError): return None def _uptime_solaris(): """Returns uptime in seconds or None, on Solaris.""" global _BOOTTIME try: kstat = ctypes.CDLL('libkstat.so') except (AttributeError, OSError): return None # kstat doesn't have uptime, but it does have boot time. # Unfortunately, getting at it isn't perfectly straightforward. # First, let's pretend to be kstat.h # Constant KSTAT_STRLEN = 31 # According to every kstat.h I could find. # Data structures class anon_union(ctypes.Union): # The ``value'' union in kstat_named_t actually has a bunch more # members, but we're only using it for boot_time, so we only need # the padding and the one we're actually using. _fields_ = [('c', ctypes.c_char * 16), ('time', ctypes.c_int)] class kstat_named_t(ctypes.Structure): _fields_ = [('name', ctypes.c_char * KSTAT_STRLEN), ('data_type', ctypes.c_char), ('value', anon_union)] # Function signatures kstat.kstat_open.restype = ctypes.c_void_p kstat.kstat_lookup.restype = ctypes.c_void_p kstat.kstat_lookup.argtypes = [ctypes.c_void_p, ctypes.c_char_p, ctypes.c_int, ctypes.c_char_p] kstat.kstat_read.restype = ctypes.c_int kstat.kstat_read.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p] kstat.kstat_data_lookup.restype = ctypes.POINTER(kstat_named_t) kstat.kstat_data_lookup.argtypes = [ctypes.c_void_p, ctypes.c_char_p] # Now, let's do something useful. # Initialise kstat control structure. kc = kstat.kstat_open() if not kc: return None # We're looking for unix:0:system_misc:boot_time. ksp = kstat.kstat_lookup(kc, 'unix', 0, 'system_misc') if ksp and kstat.kstat_read(kc, ksp, None) != -1: data = kstat.kstat_data_lookup(ksp, 'boot_time') if data: _BOOTTIME = data.contents.value.time # Clean-up. kstat.kstat_close(kc) if _BOOTTIME is not None: return time.time() - _BOOTTIME return None def _uptime_syllable(): """Returns uptime in seconds or None, on Syllable.""" global _BOOTTIME try: _BOOTTIME = os.stat('/dev/pty/mst/pty0').st_mtime return time.time() - _BOOTTIME except (NameError, OSError): return None def _uptime_windows(): """ Returns uptime in seconds or None, on Windows. Warning: may return incorrect answers after 49.7 days on versions older than Vista. """ if hasattr(xp.LIBC, 'GetTickCount64'): # Vista/Server 2008 or later. xp.LIBC.GetTickCount64.restype = ctypes.c_uint64 return xp.LIBC.GetTickCount64() / 1000. if hasattr(xp.LIBC, 'GetTickCount'): # WinCE and Win2k or later; gives wrong answers after 49.7 days. xp.LIBC.GetTickCount.restype = ctypes.c_uint32 return xp.LIBC.GetTickCount() / 1000. return None @xl.lazyobject def _UPTIME_FUNCS(): return {'amiga': _uptime_amiga, 'aros12': _uptime_amiga, 'beos5': _uptime_beos, 'cygwin': _uptime_linux, 'darwin': _uptime_osx, 'haiku1': _uptime_beos, 'linux': _uptime_linux, 'linux-armv71': _uptime_linux, 'linux2': _uptime_linux, 'minix3': _uptime_minix, 'sunos5': _uptime_solaris, 'syllable': _uptime_syllable, 'win32': _uptime_windows, 'wince': _uptime_windows} def uptime(): """Returns uptime in seconds if even remotely possible, or None if not.""" if _BOOTTIME is not None: return time.time() - _BOOTTIME up = _UPTIME_FUNCS.get(sys.platform, _uptime_bsd)() if up is None: up = (_uptime_bsd() or _uptime_plan9() or _uptime_linux() or _uptime_windows() or _uptime_solaris() or _uptime_beos() or _uptime_amiga() or _uptime_syllable() or _uptime_osx()) return up def boottime(): """Returns boot time if remotely possible, or None if not.""" global _BOOTTIME if _BOOTTIME is None: up = uptime() if up is None: return None _BOOTTIME = time.time() - up return _BOOTTIME xonsh-0.6.0/xonsh/xoreutils/util.py000066400000000000000000000010471320541242300173760ustar00rootroot00000000000000"""Assorted utilities for xonsh core utils.""" def arg_handler(args, out, short, key, val, long=None): """A simple argument handler for xoreutils.""" if short in args: args.remove(short) if isinstance(key, (list, tuple)): for k in key: out[k] = val else: out[key] = val if long is not None and long in args: args.remove(long) if isinstance(key, (list, tuple)): for k in key: out[k] = val else: out[key] = val xonsh-0.6.0/xonsh/xoreutils/which.py000066400000000000000000000147271320541242300175340ustar00rootroot00000000000000"""Implements the which xoreutil.""" import os import argparse import builtins import functools from xonsh.xoreutils import _which import xonsh.platform as xp import xonsh.proc as xproc @functools.lru_cache() def _which_create_parser(): desc = "Parses arguments to which wrapper" parser = argparse.ArgumentParser('which', description=desc) parser.add_argument('args', type=str, nargs='+', help='The executables or aliases to search for') parser.add_argument('-a', '--all', action='store_true', dest='all', help='Show all matches in globals, xonsh.aliases, $PATH') parser.add_argument('-s', '--skip-alias', action='store_true', help='Do not search inxonsh.aliases', dest='skip') parser.add_argument('-V', '--version', action='version', version='{}'.format(_which.__version__), help='Display the version of the python which module ' 'used by xonsh') parser.add_argument('-v', '--verbose', action='store_true', dest='verbose', help='Print out how matches were located and show ' 'near misses on stderr') parser.add_argument('-p', '--plain', action='store_true', dest='plain', help='Do not display alias expansions or location of ' 'where binaries are found. This is the ' 'default behavior, but the option can be used to ' 'override the --verbose option') parser.add_argument('--very-small-rocks', action=AWitchAWitch) if xp.ON_WINDOWS: parser.add_argument('-e', '--exts', nargs='*', type=str, help='Specify a list of extensions to use instead ' 'of the standard list for this system. This can ' 'effectively be used as an optimization to, for ' 'example, avoid stat\'s of "foo.vbs" when ' 'searching for "foo" and you know it is not a ' 'VisualBasic script but ".vbs" is on PATHEXT. ' 'This option is only supported on Windows', dest='exts') return parser def print_global_object(arg, stdout): """Print the object.""" obj = builtins.__xonsh_ctx__.get(arg) print('global object of {}'.format(type(obj)), file=stdout) def print_path(abs_name, from_where, stdout, verbose=False, captured=False): """Print the name and path of the command.""" if xp.ON_WINDOWS: # Use list dir to get correct case for the filename # i.e. windows is case insensitive but case preserving p, f = os.path.split(abs_name) f = next(s.name for s in xp.scandir(p) if s.name.lower() == f.lower()) abs_name = os.path.join(p, f) if builtins.__xonsh_env__.get('FORCE_POSIX_PATHS', False): abs_name.replace(os.sep, os.altsep) if verbose: print('{} ({})'.format(abs_name, from_where), file=stdout) else: end = '' if captured else '\n' print(abs_name, end=end, file=stdout) def print_alias(arg, stdout, verbose=False): """Print the alias.""" if not verbose: if not callable(builtins.aliases[arg]): print(' '.join(builtins.aliases[arg]), file=stdout) else: print(arg, file=stdout) else: print("aliases['{}'] = {}".format(arg, builtins.aliases[arg]), file=stdout) if callable(builtins.aliases[arg]): builtins.__xonsh_superhelp__(builtins.aliases[arg]) def which(args, stdin=None, stdout=None, stderr=None, spec=None): """ Checks if each arguments is a xonsh aliases, then if it's an executable, then finally return an error code equal to the number of misses. If '-a' flag is passed, run both to return both `xonsh` match and `which` match. """ parser = _which_create_parser() if len(args) == 0: parser.print_usage(file=stderr) return -1 pargs = parser.parse_args(args) verbose = pargs.verbose or pargs.all if spec is not None: captured = spec.captured in xproc.STDOUT_CAPTURE_KINDS else: captured = False if pargs.plain: verbose = False if xp.ON_WINDOWS: if pargs.exts: exts = pargs.exts else: exts = builtins.__xonsh_env__['PATHEXT'] else: exts = None failures = [] for arg in pargs.args: nmatches = 0 if pargs.all and arg in builtins.__xonsh_ctx__: print_global_object(arg, stdout) nmatches += 1 if arg in builtins.aliases and not pargs.skip: print_alias(arg, stdout, verbose) nmatches += 1 if not pargs.all: continue # which.whichgen gives the nicest 'verbose' output if PATH is taken # from os.environ so we temporarily override it with # __xosnh_env__['PATH'] original_os_path = xp.os_environ['PATH'] xp.os_environ['PATH'] = builtins.__xonsh_env__.detype()['PATH'] matches = _which.whichgen(arg, exts=exts, verbose=verbose) for abs_name, from_where in matches: print_path(abs_name, from_where, stdout, verbose, captured) nmatches += 1 if not pargs.all: break xp.os_environ['PATH'] = original_os_path if not nmatches: failures.append(arg) if len(failures) == 0: return 0 else: print('{} not in '.format(', '.join(failures)), file=stderr, end='') if pargs.all: print('globals or ', file=stderr, end='') print('$PATH', file=stderr, end='') if not pargs.skip: print(' or xonsh.builtins.aliases', file=stderr, end='') print('', file=stderr, end='\n') return len(failures) class AWitchAWitch(argparse.Action): """The Ducstring, the mother of all ducs.""" SUPPRESS = '==SUPPRESS==' def __init__(self, option_strings, version=None, dest=SUPPRESS, default=SUPPRESS, **kwargs): super().__init__(option_strings=option_strings, dest=dest, default=default, nargs=0, **kwargs) def __call__(self, parser, namespace, values, option_string=None): import webbrowser webbrowser.open('https://github.com/xonsh/xonsh/commit/f49b400') parser.exit() xonsh-0.6.0/xonsh/xoreutils/yes.py000066400000000000000000000012031320541242300172130ustar00rootroot00000000000000"""An implementation of yes for xonsh.""" def yes(args, stdin, stdout, stderr): """A yes command.""" if '--help' in args: print(YES_HELP, file=stdout) return 0 to_print = ["y"] if len(args) == 0 else [str(i) for i in args] while True: print(*to_print, file=stdout) return 0 YES_HELP = """Usage: yes [STRING]... or: yes OPTION Repeatedly output a line with all specified STRING(s), or 'y'. --help display this help and exit This version of yes was written in Python for the xonsh project: http://xon.sh Based on yes from GNU coreutils: http://www.gnu.org/software/coreutils/""" xonsh-0.6.0/xontrib/000077500000000000000000000000001320541242300143355ustar00rootroot00000000000000xonsh-0.6.0/xontrib/README000066400000000000000000000003001320541242300152060ustar00rootroot00000000000000xontrib is an implicit namespace package. DO NOT add an __init__.py file to this directory. Feel free to add both *.xsh and *.py files to the directory, they will be installed and available. xonsh-0.6.0/xontrib/bashisms.py000066400000000000000000000030541320541242300165220ustar00rootroot00000000000000"""Bash-like interface extensions for xonsh.""" import shlex import sys from prompt_toolkit.keys import Keys from prompt_toolkit.filters import Condition, EmacsInsertMode, ViInsertMode __all__ = () @events.on_transform_command def bash_preproc(cmd, **kw): if not __xonsh_history__.inps: if cmd.strip() == '!!': return '' return cmd return cmd.replace('!!', __xonsh_history__.inps[-1].strip()) @events.on_ptk_create def custom_keybindings(bindings, **kw): handler = bindings.registry.add_binding insert_mode = ViInsertMode() | EmacsInsertMode() @Condition def last_command_exists(cli): return len(__xonsh_history__) > 0 @handler(Keys.Escape, '.', filter=last_command_exists & insert_mode) def recall_last_arg(event): arg = __xonsh_history__[-1].cmd.split()[-1] event.current_buffer.insert_text(arg) def alias(args, stdin=None): ret = 0 if args: for arg in args: if '=' in arg: # shlex.split to remove quotes, e.g. "foo='echo hey'" into # "foo=echo hey" name, cmd = shlex.split(arg)[0].split('=', 1) aliases[name] = shlex.split(cmd) elif arg in aliases: print('{}={}'.format(arg, aliases[arg])) else: print("alias: {}: not found".format(arg), file=sys.stderr) ret = 1 else: for alias, cmd in aliases.items(): print('{}={}'.format(alias, cmd)) return ret aliases['alias'] = alias xonsh-0.6.0/xontrib/coreutils.py000066400000000000000000000013231320541242300167170ustar00rootroot00000000000000"""Additional core utilities that are implemented in xonsh. The current list includes: * cat * echo * pwd * tee * tty * yes In many cases, these may have a lower performance overhead than the posix command line utility with the same name. This is because these tools avoid the need for a full subprocess call. Additionally, these tools are cross-platform. """ from xonsh.xoreutils.cat import cat from xonsh.xoreutils.echo import echo from xonsh.xoreutils.pwd import pwd from xonsh.xoreutils.tee import tee from xonsh.xoreutils.tty import tty from xonsh.xoreutils.yes import yes __all__ = () aliases['cat'] = cat aliases['echo'] = echo aliases['pwd'] = pwd aliases['tee'] = tee aliases['tty'] = tty aliases['yes'] = yes xonsh-0.6.0/xontrib/distributed.py000066400000000000000000000040711320541242300172330ustar00rootroot00000000000000"""Hooks for the distributed parallel computing library.""" from xonsh.contexts import Functor __all__ = 'DSubmitter', 'dsubmit' def dworker(args, stdin=None): """Programmatic access to the dworker utility, to allow launching workers that also have access to xonsh builtins. """ from distributed.cli import dworker dworker.main.main(args=args, prog_name='dworker', standalone_mode=False) aliases['dworker'] = dworker class DSubmitter(Functor): """Context manager for submitting distributed jobs.""" def __init__(self, executor, **kwargs): """ Parameters ---------- executor : distributed.Executor The executor to submit to. kwargs : optional All other kwargs are passed up to superclasses init. """ super().__init__(**kwargs) self.executor = executor self.future = None def __enter__(self): super().__enter__() self.future = None return self def __exit__(self, exc_type, exc_value, traceback): res = super().__exit__(exc_type, exc_value, traceback) if not res: return res self.future = self.executor.submit(self.func) return res def dsubmit(*a, args=(), kwargs=None, rtn='', **kw): """Returns a distributed submission context manager, DSubmitter(), with a new executor instance. Parameters ---------- args : Sequence of str, optional A tuple of argument names for DSubmitter. kwargs : Mapping of str to values or list of item tuples, optional Keyword argument names and values for DSubmitter. rtn : str, optional Name of object to return for DSubmitter. a, kw : Sequence and Mapping All other arguments and keyword arguments are used to construct the executor instance. Returns ------- dsub : DSubmitter An instance of the DSubmitter context manager. """ from distributed import Executor e = Executor(*a, **kw) dsub = DSubmitter(e, args=args, kwargs=kwargs, rtn=rtn) return dsub xonsh-0.6.0/xontrib/free_cwd.py000066400000000000000000000062741320541242300164760ustar00rootroot00000000000000""" This will release the lock on the current directory whenever the prompt is shown. Enabling this will allow the other programs or Windows Explorer to delete or rename the current or parent directories. Internally, it is accomplished by temporarily resetting CWD to the root drive folder while waiting at the prompt. This only works with the prompt_toolkit backend and can cause cause issues if any extensions are enabled that hook the prompt and relies on ``os.getcwd()``. """ import os import builtins import functools from xonsh.tools import print_exception def _chdir_up(path): """ Change directory to path or if path does not exist the first valid parent. """ try: os.chdir(path) return path except (FileNotFoundError, NotADirectoryError): parent = os.path.dirname(path) if parent != path: return _chdir_up(parent) else: raise def _cwd_release_wrapper(func): """ Decorator for Windows to the wrap the prompt function and release the process lock on the current directory while the prompt is displayed. This works by temporarily setting the workdir to the users home directory. """ env = builtins.__xonsh_env__ if env.get('UPDATE_PROMPT_ON_KEYPRESS'): return func if not hasattr(func, '_orgfunc') else func._orgfunc if hasattr(func, '_orgfunc'): # Already wrapped return func else: @functools.wraps(func) def wrapper(*args, **kwargs): rootdir = os.path.splitdrive(os.getcwd())[0] + '\\' os.chdir(rootdir) try: out = func(*args, **kwargs) finally: try: pwd = env.get('PWD', rootdir) os.chdir(pwd) except (FileNotFoundError, NotADirectoryError): print_exception() newpath = _chdir_up(pwd) builtins.__xonsh_env__['PWD'] = newpath raise KeyboardInterrupt return out wrapper._orgfunc = func return wrapper def _cwd_restore_wrapper(func): """ Decorator for Windows which will temporary restore the true working directory. Designed to wrap completer callbacks from the prompt_toolkit or readline. """ env = builtins.__xonsh_env__ if env.get('UPDATE_PROMPT_ON_KEYPRESS'): return func if not hasattr(func, '_orgfunc') else func._orgfunc if hasattr(func, '_orgfunc'): # Already wrapped return func else: @functools.wraps(func) def wrapper(*args, **kwargs): workdir = os.getcwd() _chdir_up(env.get('PWD', workdir)) out = func(*args, **kwargs) _chdir_up(workdir) return out wrapper._orgfunc = func return wrapper @events.on_ptk_create def setup_release_cwd_hook(prompter, history, completer, bindings, **kw): prompter.prompt = _cwd_release_wrapper(prompter.prompt) if completer.completer: # Temporarily restore cwd for callbacks to the completer completer.completer.complete = _cwd_restore_wrapper(completer.completer.complete) xonsh-0.6.0/xontrib/jedi.xsh000066400000000000000000000021741320541242300160000ustar00rootroot00000000000000"""Jedi-based completer for Python-mode.""" import builtins import importlib from xonsh.lazyasd import lazyobject, lazybool __all__ = () @lazybool def HAS_JEDI(): """``True`` if `jedi` is available, else ``False``.""" spec = importlib.util.find_spec('jedi') return (spec is not None) @lazyobject def jedi(): if HAS_JEDI: import jedi as m else: m = None return m def complete_jedi(prefix, line, start, end, ctx): """Jedi-based completer for Python-mode.""" if not HAS_JEDI: return set() src = builtins.__xonsh_shell__.shell.accumulated_inputs + line script = jedi.api.Interpreter(src, [ctx], column=end) if builtins.__xonsh_env__.get('CASE_SENSITIVE_COMPLETIONS'): rtn = {x.name_with_symbols for x in script.completions() if x.name_with_symbols.startswith(prefix)} else: rtn = {x.name_with_symbols for x in script.completions()} return rtn # register the completer builtins.__xonsh_ctx__['complete_jedi'] = complete_jedi completer add jedi complete_jedi end completer remove python_mode del builtins.__xonsh_ctx__['complete_jedi'] xonsh-0.6.0/xontrib/mpl.py000066400000000000000000000033641320541242300155050ustar00rootroot00000000000000"""Matplotlib xontribution. This xontrib should be loaded before matplotlib is imported. """ from xonsh.tools import unthreadable from xonsh.lazyasd import lazyobject __all__ = () @unthreadable def mpl(args, stdin=None): """Hooks to matplotlib""" from xontrib.mplhooks import show show() aliases['mpl'] = mpl @lazyobject def pylab_helpers(): try: import matplotlib._pylab_helpers as m except ImportError: m = None return m @events.on_import_post_exec_module def interactive_pyplot(module=None, **kwargs): """This puts pyplot in interactive mode once it is imported.""" if module.__name__ != 'matplotlib.pyplot' or \ not __xonsh_env__.get('XONSH_INTERACTIVE'): return # Since we are in interactive mode, let's monkey-patch plt.show # to try to never block. module.ion() module._INSTALL_FIG_OBSERVER = False plt_show = module.show def xonsh_show(*args, **kwargs): """This is a monkey patched version of matplotlib.pyplot.show() for xonsh's interactive mode. First it tries non-blocking mode (block=False). If for some reason this fails, it will run show in normal blocking mode (block=True). """ kwargs.update(block=False) rtn = plt_show(*args, **kwargs) figmanager = pylab_helpers.Gcf.get_active() if figmanager is not None: # unblocked mode failed, try blocking. kwargs.update(block=True) rtn = plt_show(*args, **kwargs) return rtn module.show = xonsh_show # register figure drawer @events.on_postcommand def redraw_mpl_figure(**kwargs): """Redraws the current matplotlib figure after each command.""" pylab_helpers.Gcf.draw_all() xonsh-0.6.0/xontrib/mplhooks.py000066400000000000000000000120231320541242300165410ustar00rootroot00000000000000"""Matplotlib hooks, for what its worth.""" from io import BytesIO import shutil import numpy as np import matplotlib import matplotlib.pyplot as plt from xonsh.tools import print_color, ON_WINDOWS try: # Use iterm2_tools as an indicator for the iterm2 terminal emulator from iterm2_tools.images import display_image_bytes except ImportError: _use_iterm = False else: _use_iterm = True XONTRIB_MPL_MINIMAL_DEFAULT = True def _get_buffer(fig, **kwargs): b = BytesIO() fig.savefig(b, **kwargs) b.seek(0) return b def figure_to_rgb_array(fig, shape=None): """Converts figure to a numpy array Parameters ---------- fig : matplotlib.figure.Figure the figure to be plotted shape : iterable with the shape of the output array. by default this attempts to use the pixel height and width of the figure Returns ------- array : np.ndarray An RGBA array of the image represented by the figure. Note: the method will throw an exception if the given shape is wrong. """ array = np.frombuffer(_get_buffer(fig, dpi=fig.dpi, format='raw').read(), dtype='uint8') if shape is None: w, h = fig.canvas.get_width_height() shape = (h, w, 4) return array.reshape(*shape) def figure_to_tight_array(fig, width, height, minimal=True): """Converts figure to a numpy array of rgb values of tight value Parameters ---------- fig : matplotlib.figure.Figure the figure to be plotted width : int pixel width of the final array height : int pixel height of the final array minimal : bool whether or not to reduce the output array to minimized margins/whitespace text is also eliminated Returns ------- array : np.ndarray An RGBA array of the image represented by the figure. """ # store the properties of the figure in order to restore it w, h = fig.canvas.get_width_height() dpi_fig = fig.dpi if minimal: # perform reversible operations to produce an optimally tight layout dpi = dpi_fig subplotpars = { k: getattr(fig.subplotpars, k) for k in ['wspace', 'hspace', 'bottom', 'top', 'left', 'right'] } # set the figure dimensions to the terminal size fig.set_size_inches(width/dpi, height/dpi, forward=True) width, height = fig.canvas.get_width_height() # remove all space between subplots fig.subplots_adjust(wspace=0, hspace=0) # move all subplots to take the entirety of space in the figure # leave only one line for top and bottom fig.subplots_adjust(bottom=1/height, top=1-1/height, left=0, right=1) # reduce font size in order to reduce text impact on the image font_size = matplotlib.rcParams['font.size'] matplotlib.rcParams.update({'font.size': 0}) else: dpi = min([width * fig.dpi // w, height * fig.dpi // h]) fig.dpi = dpi width, height = fig.canvas.get_width_height() # Draw the renderer and get the RGB buffer from the figure array = figure_to_rgb_array(fig, shape=(height, width, 4)) if minimal: # cleanup after tight layout # clean up rcParams matplotlib.rcParams.update({'font.size': font_size}) # reset the axis positions and figure dimensions fig.set_size_inches(w/dpi, h/dpi, forward=True) fig.subplots_adjust(**subplotpars) else: fig.dpi = dpi_fig return array def buf_to_color_str(buf): """Converts an RGB array to a xonsh color string.""" space = ' ' pix = '{{bg#{0:02x}{1:02x}{2:02x}}} ' pixels = [] for h in range(buf.shape[0]): last = None for w in range(buf.shape[1]): rgb = buf[h, w] if last is not None and (last == rgb).all(): pixels.append(space) else: pixels.append(pix.format(*rgb)) last = rgb pixels.append('{NO_COLOR}\n') pixels[-1] = pixels[-1].rstrip() return ''.join(pixels) def display_figure_with_iterm2(fig): """Displays a matplotlib figure using iterm2 inline-image escape sequence. Parameters ---------- fig : matplotlib.figure.Figure the figure to be plotted """ print(display_image_bytes(_get_buffer(fig, format='png', dpi=fig.dpi).read())) def show(): '''Run the mpl display sequence by printing the most recent figure to console''' try: minimal = __xonsh_env__['XONTRIB_MPL_MINIMAL'] except KeyError: minimal = XONTRIB_MPL_MINIMAL_DEFAULT fig = plt.gcf() if _use_iterm: display_figure_with_iterm2(fig) else: # Display the image using terminal characters to fit into the console w, h = shutil.get_terminal_size() if ON_WINDOWS: w -= 1 # @melund reports that win terminals are too thin h -= 1 # leave space for next prompt buf = figure_to_tight_array(fig, w, h, minimal) s = buf_to_color_str(buf) print_color(s) xonsh-0.6.0/xontrib/prompt_ret_code.xsh000066400000000000000000000016201320541242300202450ustar00rootroot00000000000000from xonsh.tools import ON_WINDOWS as _ON_WINDOWS def _ret_code_color(): if __xonsh_history__.rtns: color = 'blue' if __xonsh_history__.rtns[-1] == 0 else 'red' else: color = 'blue' if _ON_WINDOWS: if color == 'blue': return '{BOLD_INTENSE_CYAN}' elif color == 'red': return '{BOLD_INTENSE_RED}' else: if color == 'blue': return '{BOLD_BLUE}' elif color == 'red': return '{BOLD_RED}' def _ret_code(): if __xonsh_history__.rtns: return_code = __xonsh_history__.rtns[-1] if return_code != 0: return '[{}]'.format(return_code) return None $PROMPT = $PROMPT.replace('{prompt_end}{NO_COLOR}', '{ret_code_color}{ret_code}{prompt_end}{NO_COLOR}') $PROMPT_FIELDS['ret_code_color'] = _ret_code_color $PROMPT_FIELDS['ret_code'] = _ret_code xonsh-0.6.0/xontrib/vox.py000066400000000000000000000134101320541242300155220ustar00rootroot00000000000000"""Python virtual environment manager for xonsh.""" import sys import xontrib.voxapi as voxapi import xonsh.lazyasd as lazyasd __all__ = () class VoxHandler: """Vox is a virtual environment manager for xonsh.""" def parser(): from argparse import ArgumentParser parser = ArgumentParser(prog='vox', description=__doc__) subparsers = parser.add_subparsers(dest='command') create = subparsers.add_parser( 'new', aliases=['create'], help='Create a new virtual environment' ) create.add_argument('name', metavar='ENV', help='The environments to create') create.add_argument('--system-site-packages', default=False, action='store_true', dest='system_site', help='Give the virtual environment access to the ' 'system site-packages dir.') from xonsh.platform import ON_WINDOWS group = create.add_mutually_exclusive_group() group.add_argument('--symlinks', default=not ON_WINDOWS, action='store_true', dest='symlinks', help='Try to use symlinks rather than copies, ' 'when symlinks are not the default for ' 'the platform.') group.add_argument('--copies', default=ON_WINDOWS, action='store_false', dest='symlinks', help='Try to use copies rather than symlinks, ' 'even when symlinks are the default for ' 'the platform.') create.add_argument('--without-pip', dest='with_pip', default=True, action='store_false', help='Skips installing or upgrading pip in the ' 'virtual environment (pip is bootstrapped ' 'by default)') activate = subparsers.add_parser( 'activate', aliases=['workon', 'enter'], help='Activate virtual environment' ) activate.add_argument('name', metavar='ENV', help='The environment to activate') subparsers.add_parser('deactivate', aliases=['exit'], help='Deactivate current virtual environment') subparsers.add_parser('list', aliases=['ls'], help='List all available environments') remove = subparsers.add_parser('remove', aliases=['rm', 'delete', 'del'], help='Remove virtual environment') remove.add_argument('names', metavar='ENV', nargs='+', help='The environments to remove') subparsers.add_parser('help', help='Show this help message') return parser parser = lazyasd.LazyObject(parser, locals(), 'parser') aliases = { 'create': 'new', 'workon': 'activate', 'enter': 'activate', 'exit': 'deactivate', 'ls': 'list', 'rm': 'remove', 'delete': 'remove', 'del': 'remove', } def __init__(self): self.vox = voxapi.Vox() def __call__(self, args, stdin=None): """Call the right handler method for a given command.""" args = self.parser.parse_args(args) cmd = self.aliases.get(args.command, args.command) if cmd is None: self.parser.print_usage() else: getattr(self, 'cmd_'+cmd)(args, stdin) def cmd_new(self, args, stdin=None): """Create a virtual environment in $VIRTUALENV_HOME with python3's ``venv``. """ print('Creating environment...') self.vox.create(args.name) msg = 'Environment {0!r} created. Activate it with "vox activate {0}".\n' print(msg.format(args.name)) def cmd_activate(self, args, stdin=None): """Activate a virtual environment. """ try: self.vox.activate(args.name) except KeyError: print('This environment doesn\'t exist. Create it with "vox new %s".\n' % args.name, file=sys.stderr) return None else: print('Activated "%s".\n' % args.name) def cmd_deactivate(self, args, stdin=None): """Deactivate the active virtual environment.""" if self.vox.active() is None: print('No environment currently active. Activate one with "vox activate".\n', file=sys.stderr) return None env_name = self.vox.deactivate() print('Deactivated "%s".\n' % env_name) def cmd_list(self, args, stdin=None): """List available virtual environments.""" try: envs = sorted(self.vox.keys()) except PermissionError: print('No permissions on VIRTUALENV_HOME') return None if not envs: print('No environments available. Create one with "vox new".\n', file=sys.stderr) return None print('Available environments:') print('\n'.join(envs)) def cmd_remove(self, args, stdin=None): """Remove virtual environments. """ for name in args.names: try: del self.vox[name] except voxapi.EnvironmentInUse: print('The "%s" environment is currently active. In order to remove it, deactivate it first with "vox deactivate %s".\n' % (name, name), file=sys.stderr) return else: print('Environment "%s" removed.' % name) print() def cmd_help(self, args, stdin=None): self.parser.print_help() @classmethod def handle(cls, args, stdin=None): """Runs Vox environment manager.""" vox = cls() return vox(args, stdin=stdin) aliases['vox'] = VoxHandler.handle xonsh-0.6.0/xontrib/voxapi.py000066400000000000000000000246601320541242300162250ustar00rootroot00000000000000""" API for Vox, the Python virtual environment manager for xonsh. Vox defines several events related to the life cycle of virtual environments: * ``vox_on_create(env: str) -> None`` * ``vox_on_activate(env: str) -> None`` * ``vox_on_deactivate(env: str) -> None`` * ``vox_on_delete(env: str) -> None`` """ import os import sys import venv import shutil import builtins import collections.abc from xonsh.platform import ON_POSIX, ON_WINDOWS from xonsh.fs import PathLike, fspath # This is because builtins aren't globally created during testing. # FIXME: Is there a better way? from xonsh.events import events events.doc('vox_on_create', """ vox_on_create(env: str) -> None Fired after an environment is created. """) events.doc('vox_on_activate', """ vox_on_activate(env: str) -> None Fired after an environment is activated. """) events.doc('vox_on_deactivate', """ vox_on_deactivate(env: str) -> None Fired after an environment is deactivated. """) events.doc('vox_on_delete', """ vox_on_delete(env: str) -> None Fired after an environment is deleted (through vox). """) VirtualEnvironment = collections.namedtuple('VirtualEnvironment', ['env', 'bin', 'lib', 'inc']) def _subdir_names(): """ Gets the names of the special dirs in a venv. This is not necessarily exhaustive of all the directories that could be in a venv, and there may additional logic to get to useful places. """ if ON_WINDOWS: return 'Scripts', 'Lib', 'Include' elif ON_POSIX: return 'bin', 'lib', 'include' else: raise OSError('This OS is not supported.') def _mkvenv(env_dir): """ Constructs a VirtualEnvironment based on the given base path. This only cares about the platform. No filesystem calls are made. """ env_dir = os.path.normpath(env_dir) if ON_WINDOWS: binname = os.path.join(env_dir, 'Scripts') incpath = os.path.join(env_dir, 'Include') libpath = os.path.join(env_dir, 'Lib', 'site-packages') elif ON_POSIX: binname = os.path.join(env_dir, 'bin') incpath = os.path.join(env_dir, 'include') libpath = os.path.join(env_dir, 'lib', 'python%d.%d' % sys.version_info[:2], 'site-packages') else: raise OSError('This OS is not supported.') return VirtualEnvironment(env_dir, binname, libpath, incpath) class EnvironmentInUse(Exception): """The given environment is currently activated, and the operation cannot be performed.""" class NoEnvironmentActive(Exception): """No environment is currently activated, and the operation cannot be performed.""" class Vox(collections.abc.Mapping): """API access to Vox and virtual environments, in a dict-like format. Makes use of the VirtualEnvironment namedtuple: 1. ``env``: The full path to the environment 2. ``bin``: The full path to the bin/Scripts directory of the environment """ def __init__(self): if not builtins.__xonsh_env__.get('VIRTUALENV_HOME'): home_path = os.path.expanduser('~') self.venvdir = os.path.join(home_path, '.virtualenvs') builtins.__xonsh_env__['VIRTUALENV_HOME'] = self.venvdir else: self.venvdir = builtins.__xonsh_env__['VIRTUALENV_HOME'] def create(self, name, *, system_site_packages=False, symlinks=False, with_pip=True): """Create a virtual environment in $VIRTUALENV_HOME with python3's ``venv``. Parameters ---------- name : str Virtual environment name system_site_packages : bool If True, the system (global) site-packages dir is available to created environments. symlinks : bool If True, attempt to symlink rather than copy files into virtual environment. with_pip : bool If True, ensure pip is installed in the virtual environment. (Default is True) """ # NOTE: clear=True is the same as delete then create. # NOTE: upgrade=True is its own method if isinstance(name, PathLike): env_path = fspath(name) else: env_path = os.path.join(self.venvdir, name) if not self._check_reserved(env_path): raise ValueError("venv can't contain reserved names ({})".format(', '.join(_subdir_names()))) venv.create( env_path, system_site_packages=system_site_packages, symlinks=symlinks, with_pip=with_pip) events.vox_on_create.fire(name=name) def upgrade(self, name, *, symlinks=False, with_pip=True): """Create a virtual environment in $VIRTUALENV_HOME with python3's ``venv``. WARNING: If a virtual environment was created with symlinks or without PIP, you must specify these options again on upgrade. Parameters ---------- name : str Virtual environment name symlinks : bool If True, attempt to symlink rather than copy files into virtual environment. with_pip : bool If True, ensure pip is installed in the virtual environment. """ # venv doesn't reload this, so we have to do it ourselves. # Is there a bug for this in Python? There should be. env_path, bin_path = self[name] cfgfile = os.path.join(env_path, 'pyvenv.cfg') cfgops = {} with open(cfgfile) as cfgfile: for l in cfgfile: l = l.strip() if '=' not in l: continue k, v = l.split('=', 1) cfgops[k.strip()] = v.strip() flags = { 'system_site_packages': cfgops['include-system-site-packages'] == 'true', 'symlinks': symlinks, 'with_pip': with_pip, } # END things we shouldn't be doing. # Ok, do what we came here to do. venv.create(env_path, upgrade=True, **flags) @staticmethod def _check_reserved(name): return os.path.basename(name) not in _subdir_names() # FIXME: Check the middle components, too def __getitem__(self, name): """Get information about a virtual environment. Parameters ---------- name : str or Ellipsis Virtual environment name or absolute path. If ... is given, return the current one (throws a KeyError if there isn't one). """ if name is ...: env_paths = [builtins.__xonsh_env__['VIRTUAL_ENV']] elif isinstance(name, PathLike): env_paths = [fspath(name)] else: if not self._check_reserved(name): # Don't allow a venv that could be a venv special dir raise KeyError() env_paths = [] if os.path.isdir(name): env_paths += [name] env_paths += [os.path.join(self.venvdir, name)] for ep in env_paths: ve = _mkvenv(ep) # Actually check if this is an actual venv or just a organizational directory # eg, if 'spam/eggs' is a venv, reject 'spam' if not os.path.exists(ve.bin): continue return ve else: raise KeyError() def __contains__(self, name): # For some reason, MutableMapping seems to do this against iter, which is just silly. try: self[name] except KeyError: return False else: return True def __iter__(self): """List available virtual environments found in $VIRTUALENV_HOME. """ bin_, lib, inc = _subdir_names() for dirpath, dirnames, _ in os.walk(self.venvdir): if bin_ in dirnames and lib in dirnames: yield dirpath[len(self.venvdir)+1:] # +1 is to remove the separator # Don't recurse in to the special dirs dirnames.remove(bin_) dirnames.remove(lib) # This one in particular is likely to be quite large. dirnames.remove(inc) def __len__(self): """Counts known virtual environments, using the same rules as iter(). """ l = 0 for _ in self: l += 1 return l def active(self): """Get the name of the active virtual environment. You can use this as a key to get further information. Returns None if no environment is active. """ if 'VIRTUAL_ENV' not in builtins.__xonsh_env__: return env_path = builtins.__xonsh_env__['VIRTUAL_ENV'] if env_path.startswith(self.venvdir): name = env_path[len(self.venvdir):] if name[0] in '/\\': name = name[1:] return name else: return env_path def activate(self, name): """ Activate a virtual environment. Parameters ---------- name : str Virtual environment name or absolute path. """ env = builtins.__xonsh_env__ ve = self[name] if 'VIRTUAL_ENV' in env: self.deactivate() type(self).oldvars = {'PATH': list(env['PATH'])} env['PATH'].insert(0, ve.bin) env['VIRTUAL_ENV'] = ve.env if 'PYTHONHOME' in env: type(self).oldvars['PYTHONHOME'] = env.pop('PYTHONHOME') events.vox_on_activate.fire(name=name) def deactivate(self): """ Deactivate the active virtual environment. Returns its name. """ env = builtins.__xonsh_env__ if 'VIRTUAL_ENV' not in env: raise NoEnvironmentActive('No environment currently active.') env_name = self.active() if hasattr(type(self), 'oldvars'): for k, v in type(self).oldvars.items(): env[k] = v del type(self).oldvars env.pop('VIRTUAL_ENV') events.vox_on_deactivate.fire(name=env_name) return env_name def __delitem__(self, name): """ Permanently deletes a virtual environment. Parameters ---------- name : str Virtual environment name or absolute path. """ env_path = self[name].env try: if self[...].env == env_path: raise EnvironmentInUse('The "%s" environment is currently active.' % name) except KeyError: # No current venv, ... fails pass shutil.rmtree(env_path) events.vox_on_delete.fire(name=name) xonsh-0.6.0/xontrib/whole_word_jumping.py000066400000000000000000000017651320541242300206220ustar00rootroot00000000000000"""Jumping across whole words (non-whitespace) with Ctrl+Left/Right. Alt+Left/Right remains unmodified to jump over smaller word segments. """ from prompt_toolkit.keys import Keys __all__ = () @events.on_ptk_create def custom_keybindings(bindings, **kw): # Key bindings for jumping over whole words (everything that's not # white space) using Ctrl+Left and Ctrl+Right; # Alt+Left and Alt+Right still jump over smaller word segments. # See https://github.com/xonsh/xonsh/issues/2403 handler = bindings.registry.add_binding @handler(Keys.ControlLeft) def ctrl_left(event): buff = event.current_buffer pos = buff.document.find_previous_word_beginning(count=event.arg, WORD=True) if pos: buff.cursor_position += pos @handler(Keys.ControlRight) def ctrl_right(event): buff = event.current_buffer pos = buff.document.find_next_word_ending(count=event.arg, WORD=True) if pos: buff.cursor_position += pos xonsh-0.6.0/xontribs.json000077700000000000000000000000001320541242300213102xonsh/xontribs.jsonustar00rootroot00000000000000