pax_global_header00006660000000000000000000000064146774756160014541gustar00rootroot0000000000000052 comment=cf10d272527f0e71b4ab3dd13bed7b1dfbe6e1ed parse_type-0.6.4/000077500000000000000000000000001467747561600137235ustar00rootroot00000000000000parse_type-0.6.4/.coveragerc000066400000000000000000000021741467747561600160500ustar00rootroot00000000000000# ========================================================================= # COVERAGE CONFIGURATION FILE: .coveragerc # ========================================================================= # LANGUAGE: Python # SEE ALSO: # * http://nedbatchelder.com/code/coverage/ # * http://nedbatchelder.com/code/coverage/config.html # ========================================================================= [run] # data_file = .coverage source = parse_type branch = True parallel = True omit = mock.py, ez_setup.py, distribute.py [report] ignore_errors = True show_missing = True # Regexes for lines to exclude from consideration exclude_lines = # Have to re-enable the standard pragma pragma: no cover # Don't complain about missing debug-only code: def __repr__ if self\.debug # Don't complain if tests don't hit defensive assertion code: raise AssertionError raise NotImplementedError # Don't complain if non-runnable code isn't run: if 0: if False: if __name__ == .__main__.: [html] directory = build/coverage.html title = Coverage Report: parse_type [xml] output = build/coverage.xml parse_type-0.6.4/.editorconfig000066400000000000000000000010641467747561600164010ustar00rootroot00000000000000# ============================================================================= # EDITOR CONFIGURATION: http://editorconfig.org # ============================================================================= root = true # -- DEFAULT: Unix-style newlines with a newline ending every file. [*] charset = utf-8 end_of_line = lf insert_final_newline = true trim_trailing_whitespace = true [*.{py,rst,ini,txt}] indent_style = space indent_size = 4 [*.feature] indent_style = space indent_size = 2 [**/makefile] indent_style = tab [*.{cmd,bat}] end_of_line = crlf parse_type-0.6.4/.envrc000066400000000000000000000020121467747561600150340ustar00rootroot00000000000000# =========================================================================== # PROJECT ENVIRONMENT SETUP: parse_type/.envrc # =========================================================================== # SHELL: bash (or similiar) # USAGE: # # -- BETTER: Use direnv (requires: Setup in bash -- $HOME/.bashrc) # # BASH PROFILE NEEDS: eval "$(direnv hook bash)" # direnv allow . # # SIMPLISTIC ALTERNATIVE (without cleanup when directory scope is left again): # source .envrc # # SEE ALSO: # * https://direnv.net/ # * https://peps.python.org/pep-0582/ Python local packages directory # =========================================================================== # MAYBE: HERE="${PWD}" # -- USE OPTIONAL PARTS (if exist/enabled): # DISABLED: dotenv_if_exists .env source_env_if_exists .envrc.use_venv # -- SETUP-PYTHON: Prepend ${HERE} to PYTHONPATH (as PRIMARY search path) # SIMILAR TO: export PYTHONPATH="${HERE}:${PYTHONPATH}" path_add PYTHONPATH . path_add PATH bin # DISABLED: source_env_if_exists .envrc.override parse_type-0.6.4/.envrc.use_venv000066400000000000000000000014671467747561600167020ustar00rootroot00000000000000# =========================================================================== # PROJECT ENVIRONMENT SETUP: parse_type/.envrc.use_venv # =========================================================================== # DESCRIPTION: # Setup and use a Python virtual environment (venv). # On entering the directory: Creates and activates a venv for a python version. # On leaving the directory: Deactivates the venv (virtual environment). # # SEE ALSO: # * https://direnv.net/ # * https://github.com/direnv/direnv/wiki/Python # * https://direnv.net/man/direnv-stdlib.1.html#codelayout-python-ltpythonexegtcode # =========================================================================== # -- VIRTUAL ENVIRONMENT SUPPORT: layout python python3 # VENV LOCATION: .direnv/python-$(PYTHON_VERSION) layout python python3 parse_type-0.6.4/.github/000077500000000000000000000000001467747561600152635ustar00rootroot00000000000000parse_type-0.6.4/.github/workflows/000077500000000000000000000000001467747561600173205ustar00rootroot00000000000000parse_type-0.6.4/.github/workflows/release-to-pypi.yml000066400000000000000000000034541467747561600230700ustar00rootroot00000000000000# -- WORKFLOW: Publish/release this package on PyPI # SEE: # * https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-python # * https://docs.github.com/en/actions/use-cases-and-examples/building-and-testing/building-and-testing-python#publishing-to-pypi # # * https://docs.github.com/en/actions/writing-workflows # * https://docs.github.com/en/actions/writing-workflows/choosing-when-your-workflow-runs # * https://docs.github.com/en/actions/writing-workflows/choosing-when-your-workflow-runs/events-that-trigger-workflows#release # # GITHUB ACTIONS: # * https://github.com/actions/checkout # * https://github.com/pypa/gh-action-pypi-publish # # RELATED: # * https://github.com/actions/starter-workflows/blob/main/ci/python-publish.yml # -- STATE: PREPARED_ONLY, NOT_RELEASED_YET name: release-to-pypi on: release: types: [published] tags: - v0.* - v1.* permissions: contents: read jobs: publish-package: runs-on: ubuntu-latest if: ${{ startsWith(github.ref, 'refs/tags/v') }} environment: name: pypi url: https://pypi.org/p/parse-type permissions: id-token: write # REQUIRED-FOR: Trusted publishing. steps: - uses: actions/checkout@v4 - uses: actions/setup-python@v5 with: python-version: "3.10" - name: "Install Python package dependencies (with: uv)" run: | python -m pip install -U uv python -m uv pip install -U pip setuptools wheel build twine - name: Build this package run: python -m build - name: Check this package (before upload) run: twine check dist/* - name: Upload this package to PyPI uses: pypa/gh-action-pypi-publish@release/v1 with: print-hash: true verbose: true parse_type-0.6.4/.github/workflows/test-pypy27.yml000066400000000000000000000023151467747561600221730ustar00rootroot00000000000000# -- TEST-VARIANT: pypy-27 on ubuntu-latest # BASED ON: test.yml # DESCRIPTION: Checks for Python 2.7 support and any problems name: test-pypy27 on: workflow_dispatch: push: branches: [ "main", "release/**" ] pull_request: types: [opened, reopened, review_requested] branches: [ "main" ] jobs: test: runs-on: ubuntu-latest strategy: fail-fast: false matrix: python-version: ["pypy-2.7"] steps: - uses: actions/checkout@v4 - uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} cache: 'pip' cache-dependency-path: 'py.requirements/*.txt' - name: Install Python package dependencies run: | python -m pip install -U pip setuptools wheel pip install --upgrade -r py.requirements/ci.github.testing.txt pip install -e . - name: Run tests run: pytest - name: Upload test reports uses: actions/upload-artifact@v4 with: name: test reports path: | build/testing/report.xml build/testing/report.html if: ${{ job.status == 'failure' }} # MAYBE: if: ${{ always() }} parse_type-0.6.4/.github/workflows/test.yml000066400000000000000000000033311467747561600210220ustar00rootroot00000000000000# -- SOURCE: https://github.com/marketplace/actions/setup-python # SEE: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-python # SUPPORTED PYTHON VERSIONS: https://github.com/actions/python-versions name: test on: workflow_dispatch: push: branches: [ "main", "release/**" ] pull_request: types: [opened, reopened, review_requested] branches: [ "main" ] jobs: test: # -- EXAMPLE: runs-on: ubuntu-latest runs-on: ${{ matrix.os }} strategy: fail-fast: false matrix: # PREPARED: os: [ubuntu-latest, macos-latest, windows-latest] os: [ubuntu-latest, windows-latest] python-version: ["3.12", "3.11", "3.10"] exclude: - os: windows-latest python-version: "2.7" steps: - uses: actions/checkout@v4 # DISABLED: name: Setup Python ${{ matrix.python-version }} on platform=${{ matrix.os }} - uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} cache: 'pip' cache-dependency-path: 'py.requirements/*.txt' - name: "Install Python package dependencies (with: uv)" run: | python -m pip install -U uv python -m uv pip install -U pip setuptools wheel python -m uv pip install --upgrade -r py.requirements/ci.github.testing.txt python -m uv pip install -e . - name: Run tests run: pytest - name: Upload test reports uses: actions/upload-artifact@v3 with: name: test reports path: | build/testing/report.xml build/testing/report.html if: ${{ job.status == 'failure' }} # MAYBE: if: ${{ always() }} parse_type-0.6.4/.gitignore000066400000000000000000000006321467747561600157140ustar00rootroot00000000000000*.py[cod] # -- TEMPORARY PYTHON PACKAGE PARTS: parse_type/_version.py MANIFEST *.egg *.egg-info dist build downloads __pycache__ # Installer logs pip-log.txt Pipfile Pipfile.lock # -- TESTS, COVERAGE REPORTS, ... .cache/ .direnv/ .eggs/ .pytest_cache/ .ruff_cache/ .tox/ .venv*/ .coverage .done.* # -- IDE-RELATED: .fleet/ .idea/ .vscode/ .project .pydevproject # -- EXCLUDE GIT-SUBPROJECTS: /lib/parse/ parse_type-0.6.4/.repos000066400000000000000000000023241467747561600150550ustar00rootroot00000000000000# =========================================================================== # vcs: Multi-repo configuration # =========================================================================== # USAGE: # vcs --commands # Show available commands # # vcs import < .repos # vcs import --input=.repos # vcs import --input=https://github.com/jenisys/cxx.simplelog/blob/master/.repos # vcs import --input=https://github.com/jenisys/cxx.simplelog/blob/master/.rosinstall # vcs import --shallow --input=.repos # vcs import lib/ --input=.repos # # vcs pull # vcs status # # vcs export --nested # Use branch-name # vcs export --nested --exact # Use commit-hashes instead of branch-name # vcs export --nested --exact-with-tags # Use tags or commit-hashes # vcs export --nested lib/doctest # For a specific path instead of ".". # # BAD: vcs-export adds basename of current-directory to repositories. # # SEE ALSO: # * https://github.com/dirk-thomas/vcstool # =========================================================================== # REQUIRES: pip install vcstool repositories: lib/parse: type: git url: https://github.com/r1chardj0n3s/parse.git version: master parse_type-0.6.4/CHANGES.txt000066400000000000000000000063361467747561600155440ustar00rootroot00000000000000Version History =============================================================================== Version: 0.7.0 (UNRELEASED) ------------------------------------------------------------------------------- GOALS: * Drop support for Python 2.7 * Support Python >= 3.7 (probably) Version: 0.6.2 (2023-07-04) ------------------------------------------------------------------------------- FIXES: * #21: tests/test_parse.py tests ``parse_type.parse`` (per default). REASON: Using for older installed ``parse`` module may cause weird problems. RELATED TO: ``parse v1.19.1`` (behavior changed compared to ``v1.19.0``) Version: 0.6.1 (2023-07-02) ------------------------------------------------------------------------------- * Switch to MIT license (same as: `parse`_ module) * Use SPDX-License-Identifier in source code (to simplify understanding) * UPDATE/SYNC to `parse`_ v1.19.1 * ADDED: ``pyproject.toml`` to support newer ``pip`` versions REASON: ``setup.py`` becomes DEPRECATED in 2023-09 for newer ``pip`` versions. FIXED: * Issue #19: 0.6.0: pytest is failing in two units (submitted by: kloczek; caused by: `parse`_ v1.19.1) * Issue #1: Licensing confusion DEVELOPMENT: * VCS: Renamed default branch of Git repository to "main" (was: "master"). * CI: Use github-actions as CI pipeline. Version: 0.6.0 (2022-01-18) ------------------------------------------------------------------------------- FIXED: + issue #17: setup.py: Remove use of "use_2to3" (submitted by: xxx) Version: 0.5.6 (2020-09-11) ------------------------------------------------------------------------------- FIXED: + parse issue #119 (same as: #121): int_convert memory effect with number-base discovery + UPDATE to parse v1.18.0 (needed by: parse issue #119) Version: 0.5.5 (2020-09-10) ------------------------------------------------------------------------------- FIXED: + parse PR #122: Fixes issue #121 in parse: int_convert memory effect. Version: 0.5.4 (2020-09-10) ------------------------------------------------------------------------------- UPDATED: + parse v1.17.0 Version: 0.5.3 (2019-12-15) ------------------------------------------------------------------------------- UPDATED: + setup.py: Add support for Python 3.8. + UPDATE: Dependencies Version: 0.5.2 (2019-07-14) ------------------------------------------------------------------------------- UPDATED: + parse v1.12.0 FIXED: + Python3 DeprecationWarning for regex (here: in docstrings). Version: 0.5.1 (2018-05-27) ------------------------------------------------------------------------------- CHANGED: + Add parse_type.cfparse.Parser(..., case_sensitive=False, ...) parameter to match functionality in parse.Parser constructor (in parse-1.8.4). + UPDATE to parse-1.8.4 Version: 0.5.0 (2018-04-08; includes: v0.4.3) ------------------------------------------------------------------------------- FIXED: + FIX doctest for parse_type.parse module. CHANGES: * UPDATE: parse-1.8.3 (was: parse-1.8.2) NOTE: ``parse`` module and ``parse_type.parse`` module are now identical. BACKWARD INCOMPATIBLE CHANGES: * RENAMED: type_converter.regex_group_count attribute (was: .group_count) (pull-request review changes of the ``parse`` module). .. _parse: https://github.com/r1chardj0n3s/parse parse_type-0.6.4/LICENSE000066400000000000000000000020551467747561600147320ustar00rootroot00000000000000MIT License Copyright (c) 2013-2023 jenisys Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. parse_type-0.6.4/MANIFEST.in000066400000000000000000000007471467747561600154710ustar00rootroot00000000000000include README.rst include LICENSE include .coveragerc include .editorconfig include *.py include *.rst include *.txt include *.ini include *.cfg include *.yaml include bin/invoke* exclude __*.rst exclude __*.txt recursive-include bin *.cmd *.py *.sh recursive-include py.requirements *.txt recursive-include tasks *.py *.txt recursive-include tests *.py # -- DISABLED: recursive-include docs *.rst *.txt *.py prune .direnv prune .tox prune .venv* parse_type-0.6.4/README.rst000066400000000000000000000227601467747561600154210ustar00rootroot00000000000000=============================================================================== parse_type =============================================================================== .. |badge.CI_status| image:: https://github.com/jenisys/parse_type/actions/workflows/test.yml/badge.svg :target: https://github.com/jenisys/parse_type/actions/workflows/test.yml :alt: CI Build Status .. |badge.latest_version| image:: https://img.shields.io/pypi/v/parse_type.svg :target: https://pypi.python.org/pypi/parse_type :alt: Latest Version .. |badge.downloads| image:: https://img.shields.io/pypi/dm/parse_type.svg :target: https://pypi.python.org/pypi/parse_type :alt: Downloads .. |badge.license| image:: https://img.shields.io/pypi/l/parse_type.svg :target: https://pypi.python.org/pypi/parse_type/ :alt: License |badge.CI_status| |badge.latest_version| |badge.license| |badge.downloads| `parse_type`_ extends the `parse`_ module (opposite of `string.format()`_) with the following features: * build type converters for common use cases (enum/mapping, choice) * build a type converter with a cardinality constraint (0..1, 0..*, 1..*) from the type converter with cardinality=1. * compose a type converter from other type converters * an extended parser that supports the CardinalityField naming schema and creates missing type variants (0..1, 0..*, 1..*) from the primary type converter .. _parse_type: http://pypi.python.org/pypi/parse_type .. _parse: http://pypi.python.org/pypi/parse .. _`string.format()`: http://docs.python.org/library/string.html#format-string-syntax Definitions ------------------------------------------------------------------------------- *type converter* A type converter function that converts a textual representation of a value type into instance of this value type. In addition, a type converter function is often annotated with attributes that allows the `parse`_ module to use it in a generic way. A type converter is also called a *parse_type* (a definition used here). *cardinality field* A naming convention for related types that differ in cardinality. A cardinality field is a type name suffix in the format of a field. It allows parse format expression, ala:: "{person:Person}" #< Cardinality: 1 (one; the normal case) "{person:Person?}" #< Cardinality: 0..1 (zero or one = optional) "{persons:Person*}" #< Cardinality: 0..* (zero or more = many0) "{persons:Person+}" #< Cardinality: 1..* (one or more = many) This naming convention mimics the relationship descriptions in UML diagrams. Basic Example ------------------------------------------------------------------------------- Define an own type converter for numbers (integers): .. code-block:: python # -- USE CASE: def parse_number(text): return int(text) parse_number.pattern = r"\d+" # -- REGULAR EXPRESSION pattern for type. This is equivalent to: .. code-block:: python import parse @parse.with_pattern(r"\d+") def parse_number(text): return int(text) assert hasattr(parse_number, "pattern") assert parse_number.pattern == r"\d+" .. code-block:: python # -- USE CASE: Use the type converter with the parse module. schema = "Hello {number:Number}" parser = parse.Parser(schema, dict(Number=parse_number)) result = parser.parse("Hello 42") assert result is not None, "REQUIRE: text matches the schema." assert result["number"] == 42 result = parser.parse("Hello XXX") assert result is None, "MISMATCH: text does not match the schema." .. hint:: The described functionality above is standard functionality of the `parse`_ module. It serves as introduction for the remaining cases. Cardinality ------------------------------------------------------------------------------- Create an type converter for "ManyNumbers" (List, separated with commas) with cardinality "1..* = 1+" (many) from the type converter for a "Number". .. code-block:: python # -- USE CASE: Create new type converter with a cardinality constraint. # CARDINALITY: many := one or more (1..*) from parse import Parser from parse_type import TypeBuilder parse_numbers = TypeBuilder.with_many(parse_number, listsep=",") schema = "List: {numbers:ManyNumbers}" parser = Parser(schema, dict(ManyNumbers=parse_numbers)) result = parser.parse("List: 1, 2, 3") assert result["numbers"] == [1, 2, 3] Create an type converter for an "OptionalNumbers" with cardinality "0..1 = ?" (optional) from the type converter for a "Number". .. code-block:: python # -- USE CASE: Create new type converter with cardinality constraint. # CARDINALITY: optional := zero or one (0..1) from parse import Parser from parse_type import TypeBuilder parse_optional_number = TypeBuilder.with_optional(parse_number) schema = "Optional: {number:OptionalNumber}" parser = Parser(schema, dict(OptionalNumber=parse_optional_number)) result = parser.parse("Optional: 42") assert result["number"] == 42 result = parser.parse("Optional: ") assert result["number"] == None Enumeration (Name-to-Value Mapping) ------------------------------------------------------------------------------- Create an type converter for an "Enumeration" from the description of the mapping as dictionary. .. code-block:: python # -- USE CASE: Create a type converter for an enumeration. from parse import Parser from parse_type import TypeBuilder parse_enum_yesno = TypeBuilder.make_enum({"yes": True, "no": False}) parser = Parser("Answer: {answer:YesNo}", dict(YesNo=parse_enum_yesno)) result = parser.parse("Answer: yes") assert result["answer"] == True Create an type converter for an "Enumeration" from the description of the mapping as an enumeration class (`Python 3.4 enum`_ or the `enum34`_ backport; see also: `PEP-0435`_). .. code-block:: python # -- USE CASE: Create a type converter for enum34 enumeration class. # NOTE: Use Python 3.4 or enum34 backport. from parse import Parser from parse_type import TypeBuilder from enum import Enum class Color(Enum): red = 1 green = 2 blue = 3 parse_enum_color = TypeBuilder.make_enum(Color) parser = Parser("Select: {color:Color}", dict(Color=parse_enum_color)) result = parser.parse("Select: red") assert result["color"] is Color.red .. _`Python 3.4 enum`: http://docs.python.org/3.4/library/enum.html#module-enum .. _enum34: http://pypi.python.org/pypi/enum34 .. _PEP-0435: http://www.python.org/dev/peps/pep-0435 Choice (Name Enumeration) ------------------------------------------------------------------------------- A Choice data type allows to select one of several strings. Create an type converter for an "Choice" list, a list of unique names (as string). .. code-block:: python from parse import Parser from parse_type import TypeBuilder parse_choice_yesno = TypeBuilder.make_choice(["yes", "no"]) schema = "Answer: {answer:ChoiceYesNo}" parser = Parser(schema, dict(ChoiceYesNo=parse_choice_yesno)) result = parser.parse("Answer: yes") assert result["answer"] == "yes" Variant (Type Alternatives) ------------------------------------------------------------------------------- Sometimes you need a type converter that can accept text for multiple type converter alternatives. This is normally called a "variant" (or: union). Create an type converter for an "Variant" type that accepts: * Numbers (positive numbers, as integer) * Color enum values (by name) .. code-block:: python from parse import Parser, with_pattern from parse_type import TypeBuilder from enum import Enum class Color(Enum): red = 1 green = 2 blue = 3 @with_pattern(r"\d+") def parse_number(text): return int(text) # -- MAKE VARIANT: Alternatives of different type converters. parse_color = TypeBuilder.make_enum(Color) parse_variant = TypeBuilder.make_variant([parse_number, parse_color]) schema = "Variant: {variant:Number_or_Color}" parser = Parser(schema, dict(Number_or_Color=parse_variant)) # -- TEST VARIANT: With number, color and mismatch. result = parser.parse("Variant: 42") assert result["variant"] == 42 result = parser.parse("Variant: blue") assert result["variant"] is Color.blue result = parser.parse("Variant: __MISMATCH__") assert not result Extended Parser with CardinalityField support ------------------------------------------------------------------------------- The parser extends the ``parse.Parser`` and adds the following functionality: * supports the CardinalityField naming scheme * automatically creates missing type variants for types with a CardinalityField by using the primary type converter for cardinality=1 * extends the provide type converter dictionary with new type variants. Example: .. code-block:: python # -- USE CASE: Parser with CardinalityField support. # NOTE: Automatically adds missing type variants with CardinalityField part. # USE: parse_number() type converter from above. from parse_type.cfparse import Parser # -- PREPARE: parser, adds missing type variant for cardinality 1..* (many) type_dict = dict(Number=parse_number) schema = "List: {numbers:Number+}" parser = Parser(schema, type_dict) assert "Number+" in type_dict, "Created missing type variant based on: Number" # -- USE: parser. result = parser.parse("List: 1, 2, 3") assert result["numbers"] == [1, 2, 3] parse_type-0.6.4/SECURITY.md000066400000000000000000000021341467747561600155140ustar00rootroot00000000000000# Security Policy ## Supported Versions The following versions are currently being supported with security updates. | Version | Supported | | ------- | ------------------ | | HEAD | :white_check_mark: | | 0.6.x | :white_check_mark: | | < 0.6.0 | :x: | ## Reporting a Vulnerability Please report security issues by using the new [Github vulnerability reporting mechanism][security advisories for this repository] that is enabled for [this repository]. SEE ALSO: * [THIS REPOSITORY]: [Security Advisories][security advisories for this repository] * [docs.github.com]/.../[guidance-on-reporting-and-writing/privately-reporting-a-security-vulnerability] [this repository]: https://github.com/jenisys/parse_type [security advisories for this repository]: https://github.com/jenisys/parse_type/security/advisories [docs.github.com]: https://docs.github.com/en/ [guidance-on-reporting-and-writing/privately-reporting-a-security-vulnerability]: https://docs.github.com/en/code-security/security-advisories/guidance-on-reporting-and-writing/privately-reporting-a-security-vulnerability parse_type-0.6.4/bin/000077500000000000000000000000001467747561600144735ustar00rootroot00000000000000parse_type-0.6.4/bin/github-workflow.json_schema000066400000000000000000002723061467747561600220520ustar00rootroot00000000000000{ "$schema": "http://json-schema.org/draft-07/schema#", "$id": "https://json.schemastore.org/github-workflow.json", "$comment": "https://help.github.com/en/github/automating-your-workflow-with-github-actions/workflow-syntax-for-github-actions", "additionalProperties": false, "definitions": { "architecture": { "type": "string", "enum": ["ARM32", "x64", "x86"] }, "branch": { "$comment": "https://help.github.com/en/github/automating-your-workflow-with-github-actions/workflow-syntax-for-github-actions#onpushpull_requestbranchestags", "$ref": "#/definitions/globs", "description": "When using the push and pull_request events, you can configure a workflow to run on specific branches or tags. If you only define only tags or only branches, the workflow won't run for events affecting the undefined Git ref.\nThe branches, branches-ignore, tags, and tags-ignore keywords accept glob patterns that use the * and ** wildcard characters to match more than one branch or tag name. For more information, see https://help.github.com/en/github/automating-your-workflow-with-github-actions/workflow-syntax-for-github-actions#filter-pattern-cheat-sheet.\nThe patterns defined in branches and tags are evaluated against the Git ref's name. For example, defining the pattern mona/octocat in branches will match the refs/heads/mona/octocat Git ref. The pattern releases/** will match the refs/heads/releases/10 Git ref.\nYou can use two types of filters to prevent a workflow from running on pushes and pull requests to tags and branches:\n- branches or branches-ignore - You cannot use both the branches and branches-ignore filters for the same event in a workflow. Use the branches filter when you need to filter branches for positive matches and exclude branches. Use the branches-ignore filter when you only need to exclude branch names.\n- tags or tags-ignore - You cannot use both the tags and tags-ignore filters for the same event in a workflow. Use the tags filter when you need to filter tags for positive matches and exclude tags. Use the tags-ignore filter when you only need to exclude tag names.\nYou can exclude tags and branches using the ! character. The order that you define patterns matters.\n- A matching negative pattern (prefixed with !) after a positive match will exclude the Git ref.\n- A matching positive pattern after a negative match will include the Git ref again." }, "concurrency": { "type": "object", "properties": { "group": { "$comment": "https://docs.github.com/en/actions/reference/workflow-syntax-for-github-actions#example-using-concurrency-to-cancel-any-in-progress-job-or-run-1", "description": "When a concurrent job or workflow is queued, if another job or workflow using the same concurrency group in the repository is in progress, the queued job or workflow will be pending. Any previously pending job or workflow in the concurrency group will be canceled.", "type": "string" }, "cancel-in-progress": { "$comment": "https://docs.github.com/en/actions/reference/workflow-syntax-for-github-actions#example-using-concurrency-to-cancel-any-in-progress-job-or-run-1", "description": "To cancel any currently running job or workflow in the same concurrency group, specify cancel-in-progress: true.", "oneOf": [ { "type": "boolean" }, { "$ref": "#/definitions/expressionSyntax" } ] } }, "required": ["group"], "additionalProperties": false }, "configuration": { "oneOf": [ { "type": "string" }, { "type": "number" }, { "type": "boolean" }, { "type": "object", "additionalProperties": { "$ref": "#/definitions/configuration" } }, { "type": "array", "items": { "$ref": "#/definitions/configuration" } } ] }, "container": { "type": "object", "properties": { "image": { "$comment": "https://help.github.com/en/actions/automating-your-workflow-with-github-actions/workflow-syntax-for-github-actions#jobsjob_idcontainerimage", "description": "The Docker image to use as the container to run the action. The value can be the Docker Hub image name or a registry name.", "type": "string" }, "credentials": { "$comment": "https://docs.github.com/en/free-pro-team@latest/actions/reference/workflow-syntax-for-github-actions#jobsjob_idcontainercredentials", "description": "If the image's container registry requires authentication to pull the image, you can use credentials to set a map of the username and password. The credentials are the same values that you would provide to the `docker login` command.", "type": "object", "properties": { "username": { "type": "string" }, "password": { "type": "string" } } }, "env": { "$comment": "https://help.github.com/en/actions/automating-your-workflow-with-github-actions/workflow-syntax-for-github-actions#jobsjob_idcontainerenv", "$ref": "#/definitions/env", "description": "Sets an array of environment variables in the container." }, "ports": { "$comment": "https://help.github.com/en/actions/automating-your-workflow-with-github-actions/workflow-syntax-for-github-actions#jobsjob_idcontainerports", "description": "Sets an array of ports to expose on the container.", "type": "array", "items": { "oneOf": [ { "type": "number" }, { "type": "string" } ] }, "minItems": 1 }, "volumes": { "$comment": "https://help.github.com/en/actions/automating-your-workflow-with-github-actions/workflow-syntax-for-github-actions#jobsjob_idcontainervolumes", "description": "Sets an array of volumes for the container to use. You can use volumes to share data between services or other steps in a job. You can specify named Docker volumes, anonymous Docker volumes, or bind mounts on the host.\nTo specify a volume, you specify the source and destination path: :\nThe is a volume name or an absolute path on the host machine, and is an absolute path in the container.", "type": "array", "items": { "type": "string", "pattern": "^[^:]+:[^:]+$" }, "minItems": 1 }, "options": { "$comment": "https://help.github.com/en/actions/automating-your-workflow-with-github-actions/workflow-syntax-for-github-actions#jobsjob_idcontaineroptions", "description": "Additional Docker container resource options. For a list of options, see https://docs.docker.com/engine/reference/commandline/create/#options.", "type": "string" } }, "required": ["image"], "additionalProperties": false }, "defaults": { "type": "object", "properties": { "run": { "type": "object", "properties": { "shell": { "$ref": "#/definitions/shell" }, "working-directory": { "$ref": "#/definitions/working-directory" } }, "minProperties": 1, "additionalProperties": false } }, "minProperties": 1, "additionalProperties": false }, "permissions": { "$comment": "https://docs.github.com/en/actions/reference/workflow-syntax-for-github-actions#permissions", "description": "You can modify the default permissions granted to the GITHUB_TOKEN, adding or removing access as required, so that you only allow the minimum required access.", "oneOf": [ { "type": "string", "enum": ["read-all", "write-all"] }, { "$ref": "#/definitions/permissions-event" } ] }, "permissions-event": { "type": "object", "additionalProperties": false, "properties": { "actions": { "$ref": "#/definitions/permissions-level" }, "attestations": { "$ref": "#/definitions/permissions-level" }, "checks": { "$ref": "#/definitions/permissions-level" }, "contents": { "$ref": "#/definitions/permissions-level" }, "deployments": { "$ref": "#/definitions/permissions-level" }, "discussions": { "$ref": "#/definitions/permissions-level" }, "id-token": { "$ref": "#/definitions/permissions-level" }, "issues": { "$ref": "#/definitions/permissions-level" }, "packages": { "$ref": "#/definitions/permissions-level" }, "pages": { "$ref": "#/definitions/permissions-level" }, "pull-requests": { "$ref": "#/definitions/permissions-level" }, "repository-projects": { "$ref": "#/definitions/permissions-level" }, "security-events": { "$ref": "#/definitions/permissions-level" }, "statuses": { "$ref": "#/definitions/permissions-level" } } }, "permissions-level": { "type": "string", "enum": ["read", "write", "none"] }, "env": { "$comment": "https://docs.github.com/en/actions/learn-github-actions/environment-variables", "description": "To set custom environment variables, you need to specify the variables in the workflow file. You can define environment variables for a step, job, or entire workflow using the jobs..steps[*].env, jobs..env, and env keywords. For more information, see https://docs.github.com/en/actions/learn-github-actions/workflow-syntax-for-github-actions#jobsjob_idstepsenv", "oneOf": [ { "type": "object", "additionalProperties": { "oneOf": [ { "type": "string" }, { "type": "number" }, { "type": "boolean" } ] } }, { "$ref": "#/definitions/stringContainingExpressionSyntax" } ] }, "environment": { "$comment": "https://docs.github.com/en/free-pro-team@latest/actions/reference/workflow-syntax-for-github-actions#jobsjob_idenvironment", "description": "The environment that the job references", "type": "object", "properties": { "name": { "$comment": "https://docs.github.com/en/free-pro-team@latest/actions/reference/workflow-syntax-for-github-actions#example-using-a-single-environment-name", "description": "The name of the environment configured in the repo.", "type": "string" }, "url": { "$comment": "https://docs.github.com/en/free-pro-team@latest/actions/reference/workflow-syntax-for-github-actions#example-using-environment-name-and-url", "description": "A deployment URL", "type": "string" } }, "required": ["name"], "additionalProperties": false }, "event": { "$comment": "https://help.github.com/en/github/automating-your-workflow-with-github-actions/events-that-trigger-workflows", "type": "string", "enum": [ "branch_protection_rule", "check_run", "check_suite", "create", "delete", "deployment", "deployment_status", "discussion", "discussion_comment", "fork", "gollum", "issue_comment", "issues", "label", "merge_group", "milestone", "page_build", "project", "project_card", "project_column", "public", "pull_request", "pull_request_review", "pull_request_review_comment", "pull_request_target", "push", "registry_package", "release", "status", "watch", "workflow_call", "workflow_dispatch", "workflow_run", "repository_dispatch" ] }, "eventObject": { "oneOf": [ { "type": "object" }, { "type": "null" } ], "additionalProperties": true }, "expressionSyntax": { "$comment": "escape `{` and `}` in pattern to be unicode compatible (#1360)", "type": "string", "pattern": "^\\$\\{\\{(.|[\r\n])*\\}\\}$" }, "stringContainingExpressionSyntax": { "$comment": "escape `{` and `}` in pattern to be unicode compatible (#1360)", "type": "string", "pattern": "^.*\\$\\{\\{(.|[\r\n])*\\}\\}.*$" }, "globs": { "type": "array", "items": { "type": "string", "minLength": 1 }, "minItems": 1 }, "machine": { "type": "string", "enum": ["linux", "macos", "windows"] }, "name": { "type": "string", "pattern": "^[_a-zA-Z][a-zA-Z0-9_-]*$" }, "path": { "$comment": "https://help.github.com/en/github/automating-your-workflow-with-github-actions/workflow-syntax-for-github-actions#onpushpull_requestpaths", "$ref": "#/definitions/globs", "description": "When using the push and pull_request events, you can configure a workflow to run when at least one file does not match paths-ignore or at least one modified file matches the configured paths. Path filters are not evaluated for pushes to tags.\nThe paths-ignore and paths keywords accept glob patterns that use the * and ** wildcard characters to match more than one path name. For more information, see https://help.github.com/en/github/automating-your-workflow-with-github-actions/workflow-syntax-for-github-actions#filter-pattern-cheat-sheet.\nYou can exclude paths using two types of filters. You cannot use both of these filters for the same event in a workflow.\n- paths-ignore - Use the paths-ignore filter when you only need to exclude path names.\n- paths - Use the paths filter when you need to filter paths for positive matches and exclude paths." }, "ref": { "properties": { "branches": { "$ref": "#/definitions/branch" }, "branches-ignore": { "$ref": "#/definitions/branch" }, "tags": { "$ref": "#/definitions/branch" }, "tags-ignore": { "$ref": "#/definitions/branch" }, "paths": { "$ref": "#/definitions/path" }, "paths-ignore": { "$ref": "#/definitions/path" } }, "oneOf": [ { "type": "object", "allOf": [ { "not": { "required": ["branches", "branches-ignore"] } }, { "not": { "required": ["tags", "tags-ignore"] } }, { "not": { "required": ["paths", "paths-ignore"] } } ] }, { "type": "null" } ] }, "shell": { "$comment": "https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsshell", "description": "You can override the default shell settings in the runner's operating system using the shell keyword. You can use built-in shell keywords, or you can define a custom set of shell options.", "anyOf": [ { "type": "string" }, { "$comment": "https://help.github.com/en/actions/reference/workflow-syntax-for-github-actions#custom-shell", "type": "string", "enum": ["bash", "pwsh", "python", "sh", "cmd", "powershell"] } ] }, "types": { "$comment": "https://help.github.com/en/github/automating-your-workflow-with-github-actions/workflow-syntax-for-github-actions#onevent_nametypes", "description": "Selects the types of activity that will trigger a workflow run. Most GitHub events are triggered by more than one type of activity. For example, the event for the release resource is triggered when a release is published, unpublished, created, edited, deleted, or prereleased. The types keyword enables you to narrow down activity that causes the workflow to run. When only one activity type triggers a webhook event, the types keyword is unnecessary.\nYou can use an array of event types. For more information about each event and their activity types, see https://help.github.com/en/articles/events-that-trigger-workflows#webhook-events.", "type": "array", "minItems": 1 }, "working-directory": { "$comment": "https://help.github.com/en/actions/reference/workflow-syntax-for-github-actions#jobsjob_idstepsrun", "description": "Using the working-directory keyword, you can specify the working directory of where to run the command.", "type": "string" }, "jobNeeds": { "$comment": "https://help.github.com/en/github/automating-your-workflow-with-github-actions/workflow-syntax-for-github-actions#jobsjob_idneeds", "description": "Identifies any jobs that must complete successfully before this job will run. It can be a string or array of strings. If a job fails, all jobs that need it are skipped unless the jobs use a conditional statement that causes the job to continue.", "oneOf": [ { "type": "array", "items": { "$ref": "#/definitions/name" }, "minItems": 1 }, { "$ref": "#/definitions/name" } ] }, "matrix": { "$comment": "https://help.github.com/en/actions/automating-your-workflow-with-github-actions/workflow-syntax-for-github-actions#jobsjob_idstrategymatrix", "description": "A build matrix is a set of different configurations of the virtual environment. For example you might run a job against more than one supported version of a language, operating system, or tool. Each configuration is a copy of the job that runs and reports a status.\nYou can specify a matrix by supplying an array for the configuration options. For example, if the GitHub virtual environment supports Node.js versions 6, 8, and 10 you could specify an array of those versions in the matrix.\nWhen you define a matrix of operating systems, you must set the required runs-on keyword to the operating system of the current job, rather than hard-coding the operating system name. To access the operating system name, you can use the matrix.os context parameter to set runs-on. For more information, see https://help.github.com/en/articles/contexts-and-expression-syntax-for-github-actions.", "oneOf": [ { "type": "object", "patternProperties": { "^(in|ex)clude$": { "$comment": "https://help.github.com/en/actions/automating-your-workflow-with-github-actions/workflow-syntax-for-github-actions#example-including-configurations-in-a-matrix-build", "oneOf": [ { "$ref": "#/definitions/expressionSyntax" }, { "type": "array", "items": { "type": "object", "additionalProperties": { "$ref": "#/definitions/configuration" } }, "minItems": 1 } ] } }, "additionalProperties": { "oneOf": [ { "type": "array", "items": { "$ref": "#/definitions/configuration" }, "minItems": 1 }, { "$ref": "#/definitions/expressionSyntax" } ] }, "minProperties": 1 }, { "$ref": "#/definitions/expressionSyntax" } ] }, "reusableWorkflowCallJob": { "$comment": "https://docs.github.com/en/actions/learn-github-actions/reusing-workflows#calling-a-reusable-workflow", "description": "Each job must have an id to associate with the job. The key job_id is a string and its value is a map of the job's configuration data. You must replace with a string that is unique to the jobs object. The must start with a letter or _ and contain only alphanumeric characters, -, or _.", "type": "object", "properties": { "name": { "$comment": "https://help.github.com/en/github/automating-your-workflow-with-github-actions/workflow-syntax-for-github-actions#jobsjob_idname", "description": "The name of the job displayed on GitHub.", "type": "string" }, "needs": { "$ref": "#/definitions/jobNeeds" }, "permissions": { "$ref": "#/definitions/permissions" }, "if": { "$comment": "https://help.github.com/en/actions/automating-your-workflow-with-github-actions/workflow-syntax-for-github-actions#jobsjob_idif", "description": "You can use the if conditional to prevent a job from running unless a condition is met. You can use any supported context and expression to create a conditional.\nExpressions in an if conditional do not require the ${{ }} syntax. For more information, see https://help.github.com/en/articles/contexts-and-expression-syntax-for-github-actions.", "type": ["boolean", "number", "string"] }, "uses": { "$comment": "https://docs.github.com/en/actions/learn-github-actions/workflow-syntax-for-github-actions#jobsjob_iduses", "description": "The location and version of a reusable workflow file to run as a job, of the form './{path/to}/{localfile}.yml' or '{owner}/{repo}/{path}/{filename}@{ref}'. {ref} can be a SHA, a release tag, or a branch name. Using the commit SHA is the safest for stability and security.", "type": "string", "pattern": "^(.+\\/)+(.+)\\.(ya?ml)(@.+)?$" }, "with": { "$comment": "https://docs.github.com/en/actions/learn-github-actions/workflow-syntax-for-github-actions#jobsjob_idwith", "$ref": "#/definitions/env", "description": "A map of inputs that are passed to the called workflow. Any inputs that you pass must match the input specifications defined in the called workflow. Unlike 'jobs..steps[*].with', the inputs you pass with 'jobs..with' are not be available as environment variables in the called workflow. Instead, you can reference the inputs by using the inputs context." }, "secrets": { "$comment": "https://docs.github.com/en/actions/learn-github-actions/workflow-syntax-for-github-actions#jobsjob_idsecrets", "description": "When a job is used to call a reusable workflow, you can use 'secrets' to provide a map of secrets that are passed to the called workflow. Any secrets that you pass must match the names defined in the called workflow.", "oneOf": [ { "$ref": "#/definitions/env" }, { "type": "string", "enum": ["inherit"] } ] }, "strategy": { "$comment": "https://help.github.com/en/actions/automating-your-workflow-with-github-actions/workflow-syntax-for-github-actions#jobsjob_idstrategy", "description": "A strategy creates a build matrix for your jobs. You can define different variations of an environment to run each job in.", "type": "object", "properties": { "matrix": { "$ref": "#/definitions/matrix" }, "fail-fast": { "$comment": "https://help.github.com/en/actions/automating-your-workflow-with-github-actions/workflow-syntax-for-github-actions#jobsjob_idstrategyfail-fast", "description": "When set to true, GitHub cancels all in-progress jobs if any matrix job fails. Default: true", "type": ["boolean", "string"], "default": true }, "max-parallel": { "$comment": "https://help.github.com/en/actions/automating-your-workflow-with-github-actions/workflow-syntax-for-github-actions#jobsjob_idstrategymax-parallel", "description": "The maximum number of jobs that can run simultaneously when using a matrix job strategy. By default, GitHub will maximize the number of jobs run in parallel depending on the available runners on GitHub-hosted virtual machines.", "type": ["number", "string"] } }, "required": ["matrix"], "additionalProperties": false }, "concurrency": { "$comment": "https://docs.github.com/en/actions/reference/workflow-syntax-for-github-actions#jobsjob_idconcurrency", "description": "Concurrency ensures that only a single job or workflow using the same concurrency group will run at a time. A concurrency group can be any string or expression. The expression can use any context except for the secrets context. \nYou can also specify concurrency at the workflow level. \nWhen a concurrent job or workflow is queued, if another job or workflow using the same concurrency group in the repository is in progress, the queued job or workflow will be pending. Any previously pending job or workflow in the concurrency group will be canceled. To also cancel any currently running job or workflow in the same concurrency group, specify cancel-in-progress: true.", "oneOf": [ { "type": "string" }, { "$ref": "#/definitions/concurrency" } ] } }, "required": ["uses"], "additionalProperties": false }, "normalJob": { "$comment": "https://help.github.com/en/github/automating-your-workflow-with-github-actions/workflow-syntax-for-github-actions#jobsjob_id", "description": "Each job must have an id to associate with the job. The key job_id is a string and its value is a map of the job's configuration data. You must replace with a string that is unique to the jobs object. The must start with a letter or _ and contain only alphanumeric characters, -, or _.", "type": "object", "properties": { "name": { "$comment": "https://help.github.com/en/github/automating-your-workflow-with-github-actions/workflow-syntax-for-github-actions#jobsjob_idname", "description": "The name of the job displayed on GitHub.", "type": "string" }, "needs": { "$ref": "#/definitions/jobNeeds" }, "permissions": { "$ref": "#/definitions/permissions" }, "runs-on": { "$comment": "https://help.github.com/en/github/automating-your-workflow-with-github-actions/workflow-syntax-for-github-actions#jobsjob_idruns-on", "description": "The type of machine to run the job on. The machine can be either a GitHub-hosted runner, or a self-hosted runner.", "anyOf": [ { "$comment": "https://help.github.com/en/actions/automating-your-workflow-with-github-actions/workflow-syntax-for-github-actions#github-hosted-runners", "type": "string" }, { "$comment": "https://help.github.com/en/actions/automating-your-workflow-with-github-actions/workflow-syntax-for-github-actions#self-hosted-runners", "type": "array", "anyOf": [ { "items": [ { "type": "string" } ], "minItems": 1 } ] }, { "$comment": "https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#choosing-runners-in-a-group", "type": "object", "properties": { "group": { "type": "string" }, "labels": { "oneOf": [ { "type": "string" }, { "type": "array", "items": { "type": "string" } } ] } } }, { "$ref": "#/definitions/stringContainingExpressionSyntax" }, { "$ref": "#/definitions/expressionSyntax" } ] }, "environment": { "$comment": "https://docs.github.com/en/free-pro-team@latest/actions/reference/workflow-syntax-for-github-actions#jobsjob_idenvironment", "description": "The environment that the job references.", "oneOf": [ { "type": "string" }, { "$ref": "#/definitions/environment" } ] }, "outputs": { "$comment": "https://help.github.com/en/actions/reference/workflow-syntax-for-github-actions#jobsjob_idoutputs", "description": "A map of outputs for a job. Job outputs are available to all downstream jobs that depend on this job.", "type": "object", "additionalProperties": { "type": "string" }, "minProperties": 1 }, "env": { "$comment": "https://help.github.com/en/actions/automating-your-workflow-with-github-actions/workflow-syntax-for-github-actions#jobsjob_idenv", "$ref": "#/definitions/env", "description": "A map of environment variables that are available to all steps in the job." }, "defaults": { "$comment": "https://help.github.com/en/actions/reference/workflow-syntax-for-github-actions#jobsjob_iddefaults", "$ref": "#/definitions/defaults", "description": "A map of default settings that will apply to all steps in the job." }, "if": { "$comment": "https://help.github.com/en/actions/automating-your-workflow-with-github-actions/workflow-syntax-for-github-actions#jobsjob_idif", "description": "You can use the if conditional to prevent a job from running unless a condition is met. You can use any supported context and expression to create a conditional.\nExpressions in an if conditional do not require the ${{ }} syntax. For more information, see https://help.github.com/en/articles/contexts-and-expression-syntax-for-github-actions.", "type": ["boolean", "number", "string"] }, "steps": { "$comment": "https://help.github.com/en/actions/automating-your-workflow-with-github-actions/workflow-syntax-for-github-actions#jobsjob_idsteps", "description": "A job contains a sequence of tasks called steps. Steps can run commands, run setup tasks, or run an action in your repository, a public repository, or an action published in a Docker registry. Not all steps run actions, but all actions run as a step. Each step runs in its own process in the virtual environment and has access to the workspace and filesystem. Because steps run in their own process, changes to environment variables are not preserved between steps. GitHub provides built-in steps to set up and complete a job.\nMust contain either `uses` or `run`\n", "type": "array", "items": { "allOf": [ { "oneOf": [ { "type": "object", "properties": { "uses": { "type": "string" } }, "required": ["uses"] }, { "type": "object", "properties": { "run": { "type": "string" } }, "required": ["run"] } ] }, { "type": "object", "properties": { "id": { "$comment": "https://help.github.com/en/actions/automating-your-workflow-with-github-actions/workflow-syntax-for-github-actions#jobsjob_idstepsid", "description": "A unique identifier for the step. You can use the id to reference the step in contexts. For more information, see https://help.github.com/en/articles/contexts-and-expression-syntax-for-github-actions.", "type": "string" }, "if": { "$comment": "https://help.github.com/en/actions/automating-your-workflow-with-github-actions/workflow-syntax-for-github-actions#jobsjob_idstepsif", "description": "You can use the if conditional to prevent a step from running unless a condition is met. You can use any supported context and expression to create a conditional.\nExpressions in an if conditional do not require the ${{ }} syntax. For more information, see https://help.github.com/en/articles/contexts-and-expression-syntax-for-github-actions.", "type": ["boolean", "number", "string"] }, "name": { "$comment": "https://help.github.com/en/actions/automating-your-workflow-with-github-actions/workflow-syntax-for-github-actions#jobsjob_idstepsname", "description": "A name for your step to display on GitHub.", "type": "string" }, "uses": { "$comment": "https://help.github.com/en/actions/automating-your-workflow-with-github-actions/workflow-syntax-for-github-actions#jobsjob_idstepsuses", "description": "Selects an action to run as part of a step in your job. An action is a reusable unit of code. You can use an action defined in the same repository as the workflow, a public repository, or in a published Docker container image (https://hub.docker.com/).\nWe strongly recommend that you include the version of the action you are using by specifying a Git ref, SHA, or Docker tag number. If you don't specify a version, it could break your workflows or cause unexpected behavior when the action owner publishes an update.\n- Using the commit SHA of a released action version is the safest for stability and security.\n- Using the specific major action version allows you to receive critical fixes and security patches while still maintaining compatibility. It also assures that your workflow should still work.\n- Using the master branch of an action may be convenient, but if someone releases a new major version with a breaking change, your workflow could break.\nSome actions require inputs that you must set using the with keyword. Review the action's README file to determine the inputs required.\nActions are either JavaScript files or Docker containers. If the action you're using is a Docker container you must run the job in a Linux virtual environment. For more details, see https://help.github.com/en/articles/virtual-environments-for-github-actions.", "type": "string" }, "run": { "$comment": "https://help.github.com/en/actions/automating-your-workflow-with-github-actions/workflow-syntax-for-github-actions#jobsjob_idstepsrun", "description": "Runs command-line programs using the operating system's shell. If you do not provide a name, the step name will default to the text specified in the run command.\nCommands run using non-login shells by default. You can choose a different shell and customize the shell used to run commands. For more information, see https://help.github.com/en/actions/automating-your-workflow-with-github-actions/workflow-syntax-for-github-actions#using-a-specific-shell.\nEach run keyword represents a new process and shell in the virtual environment. When you provide multi-line commands, each line runs in the same shell.", "type": "string" }, "working-directory": { "$ref": "#/definitions/working-directory" }, "shell": { "$ref": "#/definitions/shell" }, "with": { "$comment": "https://help.github.com/en/actions/automating-your-workflow-with-github-actions/workflow-syntax-for-github-actions#jobsjob_idstepswith", "$ref": "#/definitions/env", "description": "A map of the input parameters defined by the action. Each input parameter is a key/value pair. Input parameters are set as environment variables. The variable is prefixed with INPUT_ and converted to upper case.", "properties": { "args": { "$comment": "https://help.github.com/en/actions/automating-your-workflow-with-github-actions/workflow-syntax-for-github-actions#jobsjob_idstepswithargs", "type": "string" }, "entrypoint": { "$comment": "https://help.github.com/en/actions/automating-your-workflow-with-github-actions/workflow-syntax-for-github-actions#jobsjob_idstepswithentrypoint", "type": "string" } } }, "env": { "$comment": "https://help.github.com/en/actions/automating-your-workflow-with-github-actions/workflow-syntax-for-github-actions#jobsjob_idstepsenv", "$ref": "#/definitions/env", "description": "Sets environment variables for steps to use in the virtual environment. You can also set environment variables for the entire workflow or a job." }, "continue-on-error": { "$comment": "https://help.github.com/en/actions/automating-your-workflow-with-github-actions/workflow-syntax-for-github-actions#jobsjob_idstepscontinue-on-error", "description": "Prevents a job from failing when a step fails. Set to true to allow a job to pass when this step fails.", "oneOf": [ { "type": "boolean" }, { "$ref": "#/definitions/expressionSyntax" } ], "default": false }, "timeout-minutes": { "$comment": "https://help.github.com/en/actions/automating-your-workflow-with-github-actions/workflow-syntax-for-github-actions#jobsjob_idstepstimeout-minutes", "description": "The maximum number of minutes to run the step before killing the process.", "oneOf": [ { "type": "number" }, { "$ref": "#/definitions/expressionSyntax" } ] } }, "dependencies": { "working-directory": ["run"], "shell": ["run"] }, "additionalProperties": false } ] }, "minItems": 1 }, "timeout-minutes": { "$comment": "https://help.github.com/en/actions/automating-your-workflow-with-github-actions/workflow-syntax-for-github-actions#jobsjob_idtimeout-minutes", "description": "The maximum number of minutes to let a workflow run before GitHub automatically cancels it. Default: 360", "oneOf": [ { "type": "number" }, { "$ref": "#/definitions/expressionSyntax" } ], "default": 360 }, "strategy": { "$comment": "https://help.github.com/en/actions/automating-your-workflow-with-github-actions/workflow-syntax-for-github-actions#jobsjob_idstrategy", "description": "A strategy creates a build matrix for your jobs. You can define different variations of an environment to run each job in.", "type": "object", "properties": { "matrix": { "$ref": "#/definitions/matrix" }, "fail-fast": { "$comment": "https://help.github.com/en/actions/automating-your-workflow-with-github-actions/workflow-syntax-for-github-actions#jobsjob_idstrategyfail-fast", "description": "When set to true, GitHub cancels all in-progress jobs if any matrix job fails. Default: true", "type": ["boolean", "string"], "default": true }, "max-parallel": { "$comment": "https://help.github.com/en/actions/automating-your-workflow-with-github-actions/workflow-syntax-for-github-actions#jobsjob_idstrategymax-parallel", "description": "The maximum number of jobs that can run simultaneously when using a matrix job strategy. By default, GitHub will maximize the number of jobs run in parallel depending on the available runners on GitHub-hosted virtual machines.", "type": ["number", "string"] } }, "required": ["matrix"], "additionalProperties": false }, "continue-on-error": { "$comment": "https://help.github.com/en/actions/reference/workflow-syntax-for-github-actions#jobsjob_idcontinue-on-error", "description": "Prevents a workflow run from failing when a job fails. Set to true to allow a workflow run to pass when this job fails.", "oneOf": [ { "type": "boolean" }, { "$ref": "#/definitions/expressionSyntax" } ] }, "container": { "$comment": "https://help.github.com/en/actions/automating-your-workflow-with-github-actions/workflow-syntax-for-github-actions#jobsjob_idcontainer", "description": "A container to run any steps in a job that don't already specify a container. If you have steps that use both script and container actions, the container actions will run as sibling containers on the same network with the same volume mounts.\nIf you do not set a container, all steps will run directly on the host specified by runs-on unless a step refers to an action configured to run in a container.", "oneOf": [ { "type": "string" }, { "$ref": "#/definitions/container" } ] }, "services": { "$comment": "https://help.github.com/en/actions/automating-your-workflow-with-github-actions/workflow-syntax-for-github-actions#jobsjob_idservices", "description": "Additional containers to host services for a job in a workflow. These are useful for creating databases or cache services like redis. The runner on the virtual machine will automatically create a network and manage the life cycle of the service containers.\nWhen you use a service container for a job or your step uses container actions, you don't need to set port information to access the service. Docker automatically exposes all ports between containers on the same network.\nWhen both the job and the action run in a container, you can directly reference the container by its hostname. The hostname is automatically mapped to the service name.\nWhen a step does not use a container action, you must access the service using localhost and bind the ports.", "type": "object", "additionalProperties": { "$ref": "#/definitions/container" } }, "concurrency": { "$comment": "https://docs.github.com/en/actions/reference/workflow-syntax-for-github-actions#jobsjob_idconcurrency", "description": "Concurrency ensures that only a single job or workflow using the same concurrency group will run at a time. A concurrency group can be any string or expression. The expression can use any context except for the secrets context. \nYou can also specify concurrency at the workflow level. \nWhen a concurrent job or workflow is queued, if another job or workflow using the same concurrency group in the repository is in progress, the queued job or workflow will be pending. Any previously pending job or workflow in the concurrency group will be canceled. To also cancel any currently running job or workflow in the same concurrency group, specify cancel-in-progress: true.", "oneOf": [ { "type": "string" }, { "$ref": "#/definitions/concurrency" } ] } }, "required": ["runs-on"], "additionalProperties": false } }, "properties": { "name": { "$comment": "https://help.github.com/en/github/automating-your-workflow-with-github-actions/workflow-syntax-for-github-actions#name", "description": "The name of your workflow. GitHub displays the names of your workflows on your repository's actions page. If you omit this field, GitHub sets the name to the workflow's filename.", "type": "string" }, "on": { "$comment": "https://help.github.com/en/github/automating-your-workflow-with-github-actions/workflow-syntax-for-github-actions#on", "description": "The name of the GitHub event that triggers the workflow. You can provide a single event string, array of events, array of event types, or an event configuration map that schedules a workflow or restricts the execution of a workflow to specific files, tags, or branch changes. For a list of available events, see https://help.github.com/en/github/automating-your-workflow-with-github-actions/events-that-trigger-workflows.", "oneOf": [ { "$ref": "#/definitions/event" }, { "type": "array", "items": { "$ref": "#/definitions/event" }, "minItems": 1 }, { "type": "object", "properties": { "branch_protection_rule": { "$comment": "https://docs.github.com/en/actions/learn-github-actions/events-that-trigger-workflows#branch_protection_rule", "$ref": "#/definitions/eventObject", "description": "Runs your workflow anytime the branch_protection_rule event occurs. More than one activity type triggers this event.", "properties": { "types": { "$ref": "#/definitions/types", "items": { "type": "string", "enum": ["created", "edited", "deleted"] }, "default": ["created", "edited", "deleted"] } } }, "check_run": { "$comment": "https://help.github.com/en/github/automating-your-workflow-with-github-actions/events-that-trigger-workflows#check-run-event-check_run", "$ref": "#/definitions/eventObject", "description": "Runs your workflow anytime the check_run event occurs. More than one activity type triggers this event. For information about the REST API, see https://developer.github.com/v3/checks/runs.", "properties": { "types": { "$ref": "#/definitions/types", "items": { "type": "string", "enum": [ "created", "rerequested", "completed", "requested_action" ] }, "default": [ "created", "rerequested", "completed", "requested_action" ] } } }, "check_suite": { "$comment": "https://help.github.com/en/github/automating-your-workflow-with-github-actions/events-that-trigger-workflows#check-suite-event-check_suite", "$ref": "#/definitions/eventObject", "description": "Runs your workflow anytime the check_suite event occurs. More than one activity type triggers this event. For information about the REST API, see https://developer.github.com/v3/checks/suites/.", "properties": { "types": { "$ref": "#/definitions/types", "items": { "type": "string", "enum": ["completed", "requested", "rerequested"] }, "default": ["completed", "requested", "rerequested"] } } }, "create": { "$comment": "https://help.github.com/en/github/automating-your-workflow-with-github-actions/events-that-trigger-workflows#create-event-create", "$ref": "#/definitions/eventObject", "description": "Runs your workflow anytime someone creates a branch or tag, which triggers the create event. For information about the REST API, see https://developer.github.com/v3/git/refs/#create-a-reference." }, "delete": { "$comment": "https://help.github.com/en/github/automating-your-workflow-with-github-actions/events-that-trigger-workflows#delete-event-delete", "$ref": "#/definitions/eventObject", "description": "Runs your workflow anytime someone deletes a branch or tag, which triggers the delete event. For information about the REST API, see https://developer.github.com/v3/git/refs/#delete-a-reference." }, "deployment": { "$comment": "https://help.github.com/en/github/automating-your-workflow-with-github-actions/events-that-trigger-workflows#deployment-event-deployment", "$ref": "#/definitions/eventObject", "description": "Runs your workflow anytime someone creates a deployment, which triggers the deployment event. Deployments created with a commit SHA may not have a Git ref. For information about the REST API, see https://developer.github.com/v3/repos/deployments/." }, "deployment_status": { "$comment": "https://docs.github.com/en/github/automating-your-workflow-with-github-actions/events-that-trigger-workflows", "$ref": "#/definitions/eventObject", "description": "Runs your workflow anytime a third party provides a deployment status, which triggers the deployment_status event. Deployments created with a commit SHA may not have a Git ref. For information about the REST API, see https://developer.github.com/v3/repos/deployments/#create-a-deployment-status." }, "discussion": { "$comment": "https://docs.github.com/en/actions/reference/events-that-trigger-workflows#discussion", "$ref": "#/definitions/eventObject", "description": "Runs your workflow anytime the discussion event occurs. More than one activity type triggers this event. For information about the GraphQL API, see https://docs.github.com/en/graphql/guides/using-the-graphql-api-for-discussions", "properties": { "types": { "$ref": "#/definitions/types", "items": { "type": "string", "enum": [ "created", "edited", "deleted", "transferred", "pinned", "unpinned", "labeled", "unlabeled", "locked", "unlocked", "category_changed", "answered", "unanswered" ] }, "default": [ "created", "edited", "deleted", "transferred", "pinned", "unpinned", "labeled", "unlabeled", "locked", "unlocked", "category_changed", "answered", "unanswered" ] } } }, "discussion_comment": { "$comment": "https://docs.github.com/en/actions/reference/events-that-trigger-workflows#discussion_comment", "$ref": "#/definitions/eventObject", "description": "Runs your workflow anytime the discussion_comment event occurs. More than one activity type triggers this event. For information about the GraphQL API, see https://docs.github.com/en/graphql/guides/using-the-graphql-api-for-discussions", "properties": { "types": { "$ref": "#/definitions/types", "items": { "type": "string", "enum": ["created", "edited", "deleted"] }, "default": ["created", "edited", "deleted"] } } }, "fork": { "$comment": "https://help.github.com/en/github/automating-your-workflow-with-github-actions/events-that-trigger-workflows#fork-event-fork", "$ref": "#/definitions/eventObject", "description": "Runs your workflow anytime when someone forks a repository, which triggers the fork event. For information about the REST API, see https://developer.github.com/v3/repos/forks/#create-a-fork." }, "gollum": { "$comment": "https://help.github.com/en/github/automating-your-workflow-with-github-actions/events-that-trigger-workflows#gollum-event-gollum", "$ref": "#/definitions/eventObject", "description": "Runs your workflow when someone creates or updates a Wiki page, which triggers the gollum event." }, "issue_comment": { "$comment": "https://help.github.com/en/github/automating-your-workflow-with-github-actions/events-that-trigger-workflows#issue-comment-event-issue_comment", "$ref": "#/definitions/eventObject", "description": "Runs your workflow anytime the issue_comment event occurs. More than one activity type triggers this event. For information about the REST API, see https://developer.github.com/v3/issues/comments/.", "properties": { "types": { "$ref": "#/definitions/types", "items": { "type": "string", "enum": ["created", "edited", "deleted"] }, "default": ["created", "edited", "deleted"] } } }, "issues": { "$comment": "https://help.github.com/en/github/automating-your-workflow-with-github-actions/events-that-trigger-workflows#issues-event-issues", "$ref": "#/definitions/eventObject", "description": "Runs your workflow anytime the issues event occurs. More than one activity type triggers this event. For information about the REST API, see https://developer.github.com/v3/issues.", "properties": { "types": { "$ref": "#/definitions/types", "items": { "type": "string", "enum": [ "opened", "edited", "deleted", "transferred", "pinned", "unpinned", "closed", "reopened", "assigned", "unassigned", "labeled", "unlabeled", "locked", "unlocked", "milestoned", "demilestoned" ] }, "default": [ "opened", "edited", "deleted", "transferred", "pinned", "unpinned", "closed", "reopened", "assigned", "unassigned", "labeled", "unlabeled", "locked", "unlocked", "milestoned", "demilestoned" ] } } }, "label": { "$comment": "https://help.github.com/en/github/automating-your-workflow-with-github-actions/events-that-trigger-workflows#label-event-label", "$ref": "#/definitions/eventObject", "description": "Runs your workflow anytime the label event occurs. More than one activity type triggers this event. For information about the REST API, see https://developer.github.com/v3/issues/labels/.", "properties": { "types": { "$ref": "#/definitions/types", "items": { "type": "string", "enum": ["created", "edited", "deleted"] }, "default": ["created", "edited", "deleted"] } } }, "merge_group": { "$comment": "https://help.github.com/en/github/automating-your-workflow-with-github-actions/events-that-trigger-workflows#merge_group", "$ref": "#/definitions/eventObject", "description": "Runs your workflow when a pull request is added to a merge queue, which adds the pull request to a merge group. For information about the merge queue, see https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/incorporating-changes-from-a-pull-request/merging-a-pull-request-with-a-merge-queue .", "properties": { "types": { "$ref": "#/definitions/types", "items": { "type": "string", "enum": ["checks_requested"] }, "default": ["checks_requested"] } } }, "milestone": { "$comment": "https://help.github.com/en/github/automating-your-workflow-with-github-actions/events-that-trigger-workflows#milestone-event-milestone", "$ref": "#/definitions/eventObject", "description": "Runs your workflow anytime the milestone event occurs. More than one activity type triggers this event. For information about the REST API, see https://developer.github.com/v3/issues/milestones/.", "properties": { "types": { "$ref": "#/definitions/types", "items": { "type": "string", "enum": ["created", "closed", "opened", "edited", "deleted"] }, "default": [ "created", "closed", "opened", "edited", "deleted" ] } } }, "page_build": { "$comment": "https://help.github.com/en/github/automating-your-workflow-with-github-actions/events-that-trigger-workflows#page-build-event-page_build", "$ref": "#/definitions/eventObject", "description": "Runs your workflow anytime someone pushes to a GitHub Pages-enabled branch, which triggers the page_build event. For information about the REST API, see https://developer.github.com/v3/repos/pages/." }, "project": { "$comment": "https://help.github.com/en/github/automating-your-workflow-with-github-actions/events-that-trigger-workflows#project-event-project", "$ref": "#/definitions/eventObject", "description": "Runs your workflow anytime the project event occurs. More than one activity type triggers this event. For information about the REST API, see https://developer.github.com/v3/projects/.", "properties": { "types": { "$ref": "#/definitions/types", "items": { "type": "string", "enum": [ "created", "updated", "closed", "reopened", "edited", "deleted" ] }, "default": [ "created", "updated", "closed", "reopened", "edited", "deleted" ] } } }, "project_card": { "$comment": "https://help.github.com/en/github/automating-your-workflow-with-github-actions/events-that-trigger-workflows#project-card-event-project_card", "$ref": "#/definitions/eventObject", "description": "Runs your workflow anytime the project_card event occurs. More than one activity type triggers this event. For information about the REST API, see https://developer.github.com/v3/projects/cards.", "properties": { "types": { "$ref": "#/definitions/types", "items": { "type": "string", "enum": [ "created", "moved", "converted", "edited", "deleted" ] }, "default": [ "created", "moved", "converted", "edited", "deleted" ] } } }, "project_column": { "$comment": "https://help.github.com/en/github/automating-your-workflow-with-github-actions/events-that-trigger-workflows#project-column-event-project_column", "$ref": "#/definitions/eventObject", "description": "Runs your workflow anytime the project_column event occurs. More than one activity type triggers this event. For information about the REST API, see https://developer.github.com/v3/projects/columns.", "properties": { "types": { "$ref": "#/definitions/types", "items": { "type": "string", "enum": ["created", "updated", "moved", "deleted"] }, "default": ["created", "updated", "moved", "deleted"] } } }, "public": { "$comment": "https://help.github.com/en/github/automating-your-workflow-with-github-actions/events-that-trigger-workflows#public-event-public", "$ref": "#/definitions/eventObject", "description": "Runs your workflow anytime someone makes a private repository public, which triggers the public event. For information about the REST API, see https://developer.github.com/v3/repos/#edit." }, "pull_request": { "$comment": "https://help.github.com/en/github/automating-your-workflow-with-github-actions/events-that-trigger-workflows#pull-request-event-pull_request", "$ref": "#/definitions/ref", "description": "Runs your workflow anytime the pull_request event occurs. More than one activity type triggers this event. For information about the REST API, see https://developer.github.com/v3/pulls.\nNote: Workflows do not run on private base repositories when you open a pull request from a forked repository.\nWhen you create a pull request from a forked repository to the base repository, GitHub sends the pull_request event to the base repository and no pull request events occur on the forked repository.\nWorkflows don't run on forked repositories by default. You must enable GitHub Actions in the Actions tab of the forked repository.\nThe permissions for the GITHUB_TOKEN in forked repositories is read-only. For more information about the GITHUB_TOKEN, see https://help.github.com/en/articles/virtual-environments-for-github-actions.", "properties": { "types": { "$ref": "#/definitions/types", "items": { "type": "string", "enum": [ "assigned", "unassigned", "labeled", "unlabeled", "opened", "edited", "closed", "reopened", "synchronize", "converted_to_draft", "ready_for_review", "locked", "unlocked", "milestoned", "demilestoned", "review_requested", "review_request_removed", "auto_merge_enabled", "auto_merge_disabled" ] }, "default": ["opened", "synchronize", "reopened"] } }, "patternProperties": { "^(branche|tag|path)s(-ignore)?$": { "type": "array" } }, "additionalProperties": false }, "pull_request_review": { "$comment": "https://help.github.com/en/github/automating-your-workflow-with-github-actions/events-that-trigger-workflows#pull-request-review-event-pull_request_review", "$ref": "#/definitions/eventObject", "description": "Runs your workflow anytime the pull_request_review event occurs. More than one activity type triggers this event. For information about the REST API, see https://developer.github.com/v3/pulls/reviews.\nNote: Workflows do not run on private base repositories when you open a pull request from a forked repository.\nWhen you create a pull request from a forked repository to the base repository, GitHub sends the pull_request event to the base repository and no pull request events occur on the forked repository.\nWorkflows don't run on forked repositories by default. You must enable GitHub Actions in the Actions tab of the forked repository.\nThe permissions for the GITHUB_TOKEN in forked repositories is read-only. For more information about the GITHUB_TOKEN, see https://help.github.com/en/articles/virtual-environments-for-github-actions.", "properties": { "types": { "$ref": "#/definitions/types", "items": { "type": "string", "enum": ["submitted", "edited", "dismissed"] }, "default": ["submitted", "edited", "dismissed"] } } }, "pull_request_review_comment": { "$comment": "https://help.github.com/en/github/automating-your-workflow-with-github-actions/events-that-trigger-workflows#pull-request-review-comment-event-pull_request_review_comment", "$ref": "#/definitions/eventObject", "description": "Runs your workflow anytime a comment on a pull request's unified diff is modified, which triggers the pull_request_review_comment event. More than one activity type triggers this event. For information about the REST API, see https://developer.github.com/v3/pulls/comments.\nNote: Workflows do not run on private base repositories when you open a pull request from a forked repository.\nWhen you create a pull request from a forked repository to the base repository, GitHub sends the pull_request event to the base repository and no pull request events occur on the forked repository.\nWorkflows don't run on forked repositories by default. You must enable GitHub Actions in the Actions tab of the forked repository.\nThe permissions for the GITHUB_TOKEN in forked repositories is read-only. For more information about the GITHUB_TOKEN, see https://help.github.com/en/articles/virtual-environments-for-github-actions.", "properties": { "types": { "$ref": "#/definitions/types", "items": { "type": "string", "enum": ["created", "edited", "deleted"] }, "default": ["created", "edited", "deleted"] } } }, "pull_request_target": { "$comment": "https://docs.github.com/en/actions/reference/events-that-trigger-workflows#pull_request_target", "$ref": "#/definitions/ref", "description": "This event is similar to pull_request, except that it runs in the context of the base repository of the pull request, rather than in the merge commit. This means that you can more safely make your secrets available to the workflows triggered by the pull request, because only workflows defined in the commit on the base repository are run. For example, this event allows you to create workflows that label and comment on pull requests, based on the contents of the event payload.", "properties": { "types": { "$ref": "#/definitions/types", "items": { "type": "string", "enum": [ "assigned", "unassigned", "labeled", "unlabeled", "opened", "edited", "closed", "reopened", "synchronize", "converted_to_draft", "ready_for_review", "locked", "unlocked", "review_requested", "review_request_removed", "auto_merge_enabled", "auto_merge_disabled" ] }, "default": ["opened", "synchronize", "reopened"] } }, "patternProperties": { "^(branche|tag|path)s(-ignore)?$": {} }, "additionalProperties": false }, "push": { "$comment": "https://help.github.com/en/github/automating-your-workflow-with-github-actions/events-that-trigger-workflows#push-event-push", "$ref": "#/definitions/ref", "description": "Runs your workflow when someone pushes to a repository branch, which triggers the push event.\nNote: The webhook payload available to GitHub Actions does not include the added, removed, and modified attributes in the commit object. You can retrieve the full commit object using the REST API. For more information, see https://developer.github.com/v3/repos/commits/#get-a-single-commit.", "patternProperties": { "^(branche|tag|path)s(-ignore)?$": { "items": { "type": "string" }, "type": "array" } }, "additionalProperties": false }, "registry_package": { "$comment": "https://help.github.com/en/actions/reference/events-that-trigger-workflows#registry-package-event-registry_package", "$ref": "#/definitions/eventObject", "description": "Runs your workflow anytime a package is published or updated. For more information, see https://help.github.com/en/github/managing-packages-with-github-packages.", "properties": { "types": { "$ref": "#/definitions/types", "items": { "type": "string", "enum": ["published", "updated"] }, "default": ["published", "updated"] } } }, "release": { "$comment": "https://help.github.com/en/github/automating-your-workflow-with-github-actions/events-that-trigger-workflows#release-event-release", "$ref": "#/definitions/eventObject", "description": "Runs your workflow anytime the release event occurs. More than one activity type triggers this event. For information about the REST API, see https://developer.github.com/v3/repos/releases/ in the GitHub Developer documentation.", "properties": { "types": { "$ref": "#/definitions/types", "items": { "type": "string", "enum": [ "published", "unpublished", "created", "edited", "deleted", "prereleased", "released" ] }, "default": [ "published", "unpublished", "created", "edited", "deleted", "prereleased", "released" ] } } }, "status": { "$comment": "https://help.github.com/en/github/automating-your-workflow-with-github-actions/events-that-trigger-workflows#status-event-status", "$ref": "#/definitions/eventObject", "description": "Runs your workflow anytime the status of a Git commit changes, which triggers the status event. For information about the REST API, see https://developer.github.com/v3/repos/statuses/." }, "watch": { "$comment": "https://help.github.com/en/github/automating-your-workflow-with-github-actions/events-that-trigger-workflows#watch-event-watch", "$ref": "#/definitions/eventObject", "description": "Runs your workflow anytime the watch event occurs. More than one activity type triggers this event. For information about the REST API, see https://developer.github.com/v3/activity/starring/." }, "workflow_call": { "$comment": "https://docs.github.com/en/actions/learn-github-actions/events-that-trigger-workflows#workflow_call", "description": "Allows workflows to be reused by other workflows.", "properties": { "inputs": { "$comment": "https://docs.github.com/en/actions/reference/workflow-syntax-for-github-actions#onworkflow_callinputs", "description": "When using the workflow_call keyword, you can optionally specify inputs that are passed to the called workflow from the caller workflow.", "type": "object", "patternProperties": { "^[_a-zA-Z][a-zA-Z0-9_-]*$": { "$comment": "https://docs.github.com/en/actions/creating-actions/metadata-syntax-for-github-actions#inputsinput_id", "description": "A string identifier to associate with the input. The value of is a map of the input's metadata. The must be a unique identifier within the inputs object. The must start with a letter or _ and contain only alphanumeric characters, -, or _.", "type": "object", "properties": { "description": { "$comment": "https://help.github.com/en/github/automating-your-workflow-with-github-actions/metadata-syntax-for-github-actions#inputsinput_iddescription", "description": "A string description of the input parameter.", "type": "string" }, "deprecationMessage": { "description": "A string shown to users using the deprecated input.", "type": "string" }, "required": { "$comment": "https://help.github.com/en/github/automating-your-workflow-with-github-actions/metadata-syntax-for-github-actions#inputsinput_idrequired", "description": "A boolean to indicate whether the action requires the input parameter. Set to true when the parameter is required.", "type": "boolean" }, "type": { "$comment": "https://docs.github.com/en/actions/learn-github-actions/workflow-syntax-for-github-actions#onworkflow_callinput_idtype", "description": "Required if input is defined for the on.workflow_call keyword. The value of this parameter is a string specifying the data type of the input. This must be one of: boolean, number, or string.", "type": "string", "enum": ["boolean", "number", "string"] }, "default": { "$comment": "https://help.github.com/en/github/automating-your-workflow-with-github-actions/metadata-syntax-for-github-actions#inputsinput_iddefault", "description": "The default value is used when an input parameter isn't specified in a workflow file.", "type": ["boolean", "number", "string"] } }, "required": ["type"], "additionalProperties": false } }, "additionalProperties": false }, "secrets": { "$comment": "https://docs.github.com/en/actions/learn-github-actions/workflow-syntax-for-github-actions#onworkflow_callsecrets", "description": "A map of the secrets that can be used in the called workflow. Within the called workflow, you can use the secrets context to refer to a secret.", "patternProperties": { "^[_a-zA-Z][a-zA-Z0-9_-]*$": { "$comment": "https://docs.github.com/en/actions/learn-github-actions/workflow-syntax-for-github-actions#onworkflow_callsecretssecret_id", "description": "A string identifier to associate with the secret.", "properties": { "description": { "description": "A string description of the secret parameter.", "type": "string" }, "required": { "$comment": "https://docs.github.com/en/actions/learn-github-actions/workflow-syntax-for-github-actions#onworkflow_callsecretssecret_idrequired", "description": "A boolean specifying whether the secret must be supplied.", "type": "boolean" } }, "required": ["required"], "additionalProperties": false } }, "additionalProperties": false } } }, "workflow_dispatch": { "$comment": "https://github.blog/changelog/2020-07-06-github-actions-manual-triggers-with-workflow_dispatch/", "description": "You can now create workflows that are manually triggered with the new workflow_dispatch event. You will then see a 'Run workflow' button on the Actions tab, enabling you to easily trigger a run.", "properties": { "inputs": { "$comment": "https://help.github.com/en/github/automating-your-workflow-with-github-actions/metadata-syntax-for-github-actions#inputs", "description": "Input parameters allow you to specify data that the action expects to use during runtime. GitHub stores input parameters as environment variables. Input ids with uppercase letters are converted to lowercase during runtime. We recommended using lowercase input ids.", "type": "object", "patternProperties": { "^[_a-zA-Z][a-zA-Z0-9_-]*$": { "$comment": "https://help.github.com/en/github/automating-your-workflow-with-github-actions/metadata-syntax-for-github-actions#inputsinput_id", "description": "A string identifier to associate with the input. The value of is a map of the input's metadata. The must be a unique identifier within the inputs object. The must start with a letter or _ and contain only alphanumeric characters, -, or _.", "type": "object", "properties": { "description": { "$comment": "https://help.github.com/en/github/automating-your-workflow-with-github-actions/metadata-syntax-for-github-actions#inputsinput_iddescription", "description": "A string description of the input parameter.", "type": "string" }, "deprecationMessage": { "description": "A string shown to users using the deprecated input.", "type": "string" }, "required": { "$comment": "https://help.github.com/en/github/automating-your-workflow-with-github-actions/metadata-syntax-for-github-actions#inputsinput_idrequired", "description": "A boolean to indicate whether the action requires the input parameter. Set to true when the parameter is required.", "type": "boolean" }, "default": { "$comment": "https://help.github.com/en/github/automating-your-workflow-with-github-actions/metadata-syntax-for-github-actions#inputsinput_iddefault", "description": "A string representing the default value. The default value is used when an input parameter isn't specified in a workflow file." }, "type": { "$comment": "https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#onworkflow_dispatchinputsinput_idtype", "description": "A string representing the type of the input.", "type": "string", "enum": [ "string", "choice", "boolean", "number", "environment" ] }, "options": { "$comment": "https://github.blog/changelog/2021-11-10-github-actions-input-types-for-manual-workflows", "description": "The options of the dropdown list, if the type is a choice.", "type": "array", "items": { "type": "string" }, "minItems": 1 } }, "allOf": [ { "if": { "properties": { "type": { "const": "string" } }, "required": ["type"] }, "then": { "properties": { "default": { "type": "string" } } } }, { "if": { "properties": { "type": { "const": "boolean" } }, "required": ["type"] }, "then": { "properties": { "default": { "type": "boolean" } } } }, { "if": { "properties": { "type": { "const": "number" } }, "required": ["type"] }, "then": { "properties": { "default": { "type": "number" } } } }, { "if": { "properties": { "type": { "const": "environment" } }, "required": ["type"] }, "then": { "properties": { "default": { "type": "string" } } } }, { "if": { "properties": { "type": { "const": "choice" } }, "required": ["type"] }, "then": { "required": ["options"] } } ], "required": ["description"], "additionalProperties": false } }, "additionalProperties": false } }, "additionalProperties": false }, "workflow_run": { "$comment": "https://docs.github.com/en/actions/reference/events-that-trigger-workflows#workflow_run", "$ref": "#/definitions/eventObject", "description": "This event occurs when a workflow run is requested or completed, and allows you to execute a workflow based on the finished result of another workflow. For example, if your pull_request workflow generates build artifacts, you can create a new workflow that uses workflow_run to analyze the results and add a comment to the original pull request.", "properties": { "types": { "$ref": "#/definitions/types", "items": { "type": "string", "enum": ["requested", "completed", "in_progress"] }, "default": ["requested", "completed"] }, "workflows": { "type": "array", "items": { "type": "string" }, "minItems": 1 } }, "patternProperties": { "^branches(-ignore)?$": {} } }, "repository_dispatch": { "$comment": "https://help.github.com/en/github/automating-your-workflow-with-github-actions/events-that-trigger-workflows#external-events-repository_dispatch", "$ref": "#/definitions/eventObject", "description": "You can use the GitHub API to trigger a webhook event called repository_dispatch when you want to trigger a workflow for activity that happens outside of GitHub. For more information, see https://developer.github.com/v3/repos/#create-a-repository-dispatch-event.\nTo trigger the custom repository_dispatch webhook event, you must send a POST request to a GitHub API endpoint and provide an event_type name to describe the activity type. To trigger a workflow run, you must also configure your workflow to use the repository_dispatch event." }, "schedule": { "$comment": "https://help.github.com/en/github/automating-your-workflow-with-github-actions/events-that-trigger-workflows#scheduled-events-schedule", "description": "You can schedule a workflow to run at specific UTC times using POSIX cron syntax (https://pubs.opengroup.org/onlinepubs/9699919799/utilities/crontab.html#tag_20_25_07). Scheduled workflows run on the latest commit on the default or base branch. The shortest interval you can run scheduled workflows is once every 5 minutes.\nNote: GitHub Actions does not support the non-standard syntax @yearly, @monthly, @weekly, @daily, @hourly, and @reboot.\nYou can use crontab guru (https://crontab.guru/). to help generate your cron syntax and confirm what time it will run. To help you get started, there is also a list of crontab guru examples (https://crontab.guru/examples.html).", "type": "array", "items": { "properties": { "cron": { "type": "string" } }, "additionalProperties": false }, "minItems": 1 } }, "additionalProperties": false } ] }, "env": { "$comment": "https://help.github.com/en/github/automating-your-workflow-with-github-actions/workflow-syntax-for-github-actions#env", "$ref": "#/definitions/env", "description": "A map of environment variables that are available to all jobs and steps in the workflow." }, "defaults": { "$comment": "https://help.github.com/en/actions/reference/workflow-syntax-for-github-actions#defaults", "$ref": "#/definitions/defaults", "description": "A map of default settings that will apply to all jobs in the workflow." }, "concurrency": { "$comment": "https://docs.github.com/en/actions/reference/workflow-syntax-for-github-actions#concurrency", "description": "Concurrency ensures that only a single job or workflow using the same concurrency group will run at a time. A concurrency group can be any string or expression. The expression can use any context except for the secrets context. \nYou can also specify concurrency at the workflow level. \nWhen a concurrent job or workflow is queued, if another job or workflow using the same concurrency group in the repository is in progress, the queued job or workflow will be pending. Any previously pending job or workflow in the concurrency group will be canceled. To also cancel any currently running job or workflow in the same concurrency group, specify cancel-in-progress: true.", "oneOf": [ { "type": "string" }, { "$ref": "#/definitions/concurrency" } ] }, "jobs": { "$comment": "https://help.github.com/en/github/automating-your-workflow-with-github-actions/workflow-syntax-for-github-actions#jobs", "description": "A workflow run is made up of one or more jobs. Jobs run in parallel by default. To run jobs sequentially, you can define dependencies on other jobs using the jobs..needs keyword.\nEach job runs in a fresh instance of the virtual environment specified by runs-on.\nYou can run an unlimited number of jobs as long as you are within the workflow usage limits. For more information, see https://help.github.com/en/github/automating-your-workflow-with-github-actions/workflow-syntax-for-github-actions#usage-limits.", "type": "object", "patternProperties": { "^[_a-zA-Z][a-zA-Z0-9_-]*$": { "oneOf": [ { "$ref": "#/definitions/normalJob" }, { "$ref": "#/definitions/reusableWorkflowCallJob" } ] } }, "minProperties": 1, "additionalProperties": false }, "run-name": { "$comment": "https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#run-name", "description": "The name for workflow runs generated from the workflow. GitHub displays the workflow run name in the list of workflow runs on your repository's 'Actions' tab.", "type": "string" }, "permissions": { "$ref": "#/definitions/permissions" } }, "required": ["on", "jobs"], "type": "object" } parse_type-0.6.4/bin/github-workflow_check.py000077500000000000000000000077211467747561600213460ustar00rootroot00000000000000#!/usr/bin/env python3 """ Check a github-workflow YAML file. RELATED: JSON schema for Github Action workflows * https://dev.to/robertobutti/vscode-how-to-check-workflow-syntax-for-github-actions-4k0o - https://help.github.com/en/actions/reference/workflow-syntax-for-github-actions - https://github.com/actions/starter-workflows/tree/master/ci - https://github.com/actions/starter-workflows/blob/main/ci/python-publish.yml REQUIRES: pip install check-jsonschema DOWNLOAD: https://github.com/SchemaStore/schemastore/blob/master/src/schemas/json/github-workflow.json USE: check-jsonschema --schemafile github-workflow.json_schema.txt .github/workflows/release-to-pypi.yml * https://github.com/SchemaStore/schemastore/blob/master/src/schemas/json/github-action.json * MAYBE: https://github.com/softprops/github-actions-schemas/blob/master/workflow.json REQUIRES: * pip install check-jsonschema * pip install typer >= 0.12.5 * pip install typing-extensions GITHUB WORKFLOW SCHEMA: * https://github.com/SchemaStore/schemastore/blob/master/src/schemas/json/github-workflow.json """ from pathlib import Path from subprocess import run from typing import Optional from typing_extensions import Self import typer # ----------------------------------------------------------------------------- # CONSTANTS # ----------------------------------------------------------------------------- HERE = Path(__file__).parent.absolute() GITHUB_WORKFLOW_SCHEMA_URL = "https://github.com/SchemaStore/schemastore/blob/master/src/schemas/json/github-workflow.json" GITHUB_WORKFLOW_SCHEMA_PATH = HERE/"github-workflow.json_schema" # ----------------------------------------------------------------------------- # CLASSES: # ----------------------------------------------------------------------------- class Verdict: def __init__(self, path: Path, outcome: bool, message: Optional[str] = None): self.path = path self.outcome = outcome self.message = message or "" @property def verdict(self): the_verdict = "FAILED" if self.outcome: the_verdict = "OK" return the_verdict def as_bool(self): return bool(self.outcome) def __bool__(self): return self.as_bool() def __str__(self): return f"{self.verdict}: {self.path} {self.message}".strip() def __repr__(self): class_name = self.__class__.__name__ return f"<{class_name}: path={self.path}, verdict={self.verdict}, message='{self.message}'>" @classmethod def make_success(cls, path: Path, message: Optional[str] = None) -> Self: return cls(path, outcome=True, message=message) @classmethod def make_failure(cls, path: Path, message: Optional[str] = None) -> Self: return cls(path, outcome=False, message=message) def workflow_check(path: Path) -> Verdict: schema = GITHUB_WORKFLOW_SCHEMA_PATH print(f"CHECK: {path} ... ") result = run(["check-jsonschema", f"--schemafile={schema}", f"{path}"]) if result.returncode == 0: return Verdict.make_success(path) # -- OTHERWISE: return Verdict.make_failure(path) def workflow_check_many(paths: list[Path]) -> list[Verdict]: verdicts = [] for path in paths: verdict = workflow_check(path) verdicts.append(verdict) return verdicts def main(paths: list[Path]) -> int: """ Check github-workflow YAML file(s). :param paths: Paths to YAML file(s). :return: 0, if all checks pass. 1, otherwise """ verdicts = workflow_check_many(paths) count_passed = 0 count_failed = 0 for verdict in verdicts: # DISABLED: print(str(verdict)) if verdict: count_passed += 1 else: count_failed += 1 summary = f"SUMMARY: {len(verdicts)} files, {count_passed} passed, {count_failed} failed" print(summary) result = 1 if count_failed == 0: result = 0 return result if __name__ == '__main__': typer.run(main) parse_type-0.6.4/bin/make_localpi.py000077500000000000000000000170151467747561600174740ustar00rootroot00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- """ Utility script to create a pypi-like directory structure (localpi) from a number of Python packages in a directory of the local filesystem. DIRECTORY STRUCTURE (before): +-- downloads/ +-- alice-1.0.zip +-- alice-1.0.tar.gz +-- bob-1.3.0.tar.gz +-- bob-1.4.2.tar.gz +-- charly-1.0.tar.bz2 DIRECTORY STRUCTURE (afterwards): +-- downloads/ +-- simple/ | +-- alice/index.html --> ../../alice-*.* | +-- bob/index.html --> ../../bob-*.* | +-- charly/index.html --> ../../charly-*.* | +-- index.html --> alice/, bob/, ... +-- alice-1.0.zip +-- alice-1.0.tar.gz +-- bob-1.3.0.tar.gz +-- bob-1.4.2.tar.gz +-- charly-1.0.tar.bz2 USAGE EXAMPLE: mkdir -p /tmp/downloads pip install --download=/tmp/downloads argparse Jinja2 make_localpi.py /tmp/downloads pip install --index-url=file:///tmp/downloads/simple argparse Jinja2 ALTERNATIVE: pip install --download=/tmp/downloads argparse Jinja2 pip install --find-links=/tmp/downloads --no-index argparse Jinja2 """ from __future__ import with_statement, print_function from fnmatch import fnmatch import os.path import shutil import sys __author__ = "Jens Engel" __version__ = "0.2" __license__ = "BSD" __copyright__ = "(c) 2013 by Jens Engel" class Package(object): """ Package entity that keeps track of: * one or more versions of this package * one or more archive types """ PATTERNS = [ "*.egg", "*.exe", "*.whl", "*.zip", "*.tar.gz", "*.tar.bz2", "*.7z" ] def __init__(self, filename, name=None): if not name and filename: name = self.get_pkgname(filename) self.name = name self.files = [] if filename: self.files.append(filename) @property def versions(self): versions_info = [ self.get_pkgversion(p) for p in self.files ] return versions_info @classmethod def get_pkgversion(cls, filename): parts = os.path.basename(filename).rsplit("-", 1) version = "" if len(parts) >= 2: version = parts[1] for pattern in cls.PATTERNS: assert pattern.startswith("*") suffix = pattern[1:] if version.endswith(suffix): version = version[:-len(suffix)] break return version @staticmethod def get_pkgname(filename): name = os.path.basename(filename).rsplit("-", 1)[0] if name.startswith("http%3A") or name.startswith("https%3A"): # -- PIP DOWNLOAD-CACHE PACKAGE FILE NAME SCHEMA: pos = name.rfind("%2F") name = name[pos+3:] return name @staticmethod def splitext(filename): fname = os.path.splitext(filename)[0] if fname.endswith(".tar"): fname = os.path.splitext(fname)[0] return fname @classmethod def isa(cls, filename): basename = os.path.basename(filename) if basename.startswith("."): return False for pattern in cls.PATTERNS: if fnmatch(filename, pattern): return True return False def make_index_for(package, index_dir, verbose=True): """ Create an 'index.html' for one package. :param package: Package object to use. :param index_dir: Where 'index.html' should be created. """ index_template = """\ {title}

{title}

    {packages}
""" item_template = '
  • {0}
  • ' index_filename = os.path.join(index_dir, "index.html") if not os.path.isdir(index_dir): os.makedirs(index_dir) parts = [] for pkg_filename in package.files: pkg_name = os.path.basename(pkg_filename) if pkg_name == "index.html": # -- ROOT-INDEX: pkg_name = os.path.basename(os.path.dirname(pkg_filename)) else: pkg_name = package.splitext(pkg_name) pkg_relpath_to = os.path.relpath(pkg_filename, index_dir) parts.append(item_template.format(pkg_name, pkg_relpath_to)) if not parts: print("OOPS: Package %s has no files" % package.name) return if verbose: root_index = not Package.isa(package.files[0]) if root_index: info = "with %d package(s)" % len(package.files) else: package_versions = sorted(set(package.versions)) info = ", ".join(reversed(package_versions)) message = "%-30s %s" % (package.name, info) print(message) with open(index_filename, "w") as f: packages = "\n".join(parts) text = index_template.format(title=package.name, packages=packages) f.write(text.strip()) f.close() def make_package_index(download_dir): """ Create a pypi server like file structure below download directory. :param download_dir: Download directory with packages. EXAMPLE BEFORE: +-- downloads/ +-- alice-1.0.zip +-- alice-1.0.tar.gz +-- bob-1.3.0.tar.gz +-- bob-1.4.2.tar.gz +-- charly-1.0.tar.bz2 EXAMPLE AFTERWARDS: +-- downloads/ +-- simple/ | +-- alice/index.html --> ../../alice-*.* | +-- bob/index.html --> ../../bob-*.* | +-- charly/index.html --> ../../charly-*.* | +-- index.html --> alice/index.html, bob/index.html, ... +-- alice-1.0.zip +-- alice-1.0.tar.gz +-- bob-1.3.0.tar.gz +-- bob-1.4.2.tar.gz +-- charly-1.0.tar.bz2 """ if not os.path.isdir(download_dir): raise ValueError("No such directory: %r" % download_dir) pkg_rootdir = os.path.join(download_dir, "simple") if os.path.isdir(pkg_rootdir): shutil.rmtree(pkg_rootdir, ignore_errors=True) os.mkdir(pkg_rootdir) # -- STEP: Collect all packages. package_map = {} packages = [] for filename in sorted(os.listdir(download_dir)): if not Package.isa(filename): continue pkg_filepath = os.path.join(download_dir, filename) package_name = Package.get_pkgname(pkg_filepath) package = package_map.get(package_name, None) if not package: # -- NEW PACKAGE DETECTED: Store/register package. package = Package(pkg_filepath) package_map[package.name] = package packages.append(package) else: # -- SAME PACKAGE: Collect other variant/version. package.files.append(pkg_filepath) # -- STEP: Make local PYTHON PACKAGE INDEX. root_package = Package(None, "Python Package Index") root_package.files = [ os.path.join(pkg_rootdir, pkg.name, "index.html") for pkg in packages ] make_index_for(root_package, pkg_rootdir) for package in packages: index_dir = os.path.join(pkg_rootdir, package.name) make_index_for(package, index_dir) # ----------------------------------------------------------------------------- # MAIN: # ----------------------------------------------------------------------------- if __name__ == "__main__": if (len(sys.argv) != 2) or "-h" in sys.argv[1:] or "--help" in sys.argv[1:]: print("USAGE: %s DOWNLOAD_DIR" % os.path.basename(sys.argv[0])) print(__doc__) sys.exit(1) make_package_index(sys.argv[1]) parse_type-0.6.4/bin/project_bootstrap.sh000077500000000000000000000012401467747561600205720ustar00rootroot00000000000000#!/bin/sh # ============================================================================= # BOOTSTRAP PROJECT: Download all requirements # ============================================================================= # test ${PIP_DOWNLOADS_DIR} || mkdir -p ${PIP_DOWNLOADS_DIR} # tox -e init set -e # -- CONFIGURATION: HERE=`dirname $0` TOP="${HERE}/.." : ${PIP_INDEX_URL="http://pypi.python.org/simple"} : ${PIP_DOWNLOAD_DIR:="${TOP}/downloads"} export PIP_INDEX_URL PIP_DOWNLOADS_DIR # -- EXECUTE STEPS: ${HERE}/toxcmd.py mkdir ${PIP_DOWNLOAD_DIR} pip install --download=${PIP_DOWNLOAD_DIR} -r ${TOP}/requirements/all.txt ${HERE}/make_localpi.py ${PIP_DOWNLOAD_DIR} parse_type-0.6.4/bin/toxcmd.py000077500000000000000000000210721467747561600163500ustar00rootroot00000000000000#!/usr/bin/env python # -*- coding: UTF-8 -*- """ Provides a command container for additional tox commands, used in "tox.ini". COMMANDS: * copytree * copy * py2to3 REQUIRES: * argparse """ from glob import glob import argparse import inspect import os.path import shutil import sys __author__ = "Jens Engel" __copyright__ = "(c) 2013 by Jens Engel" __license__ = "BSD" # ----------------------------------------------------------------------------- # CONSTANTS: # ----------------------------------------------------------------------------- VERSION = "0.1.0" FORMATTER_CLASS = argparse.RawDescriptionHelpFormatter # ----------------------------------------------------------------------------- # SUBCOMMAND: copytree # ----------------------------------------------------------------------------- def command_copytree(args): """ Copy one or more source directory(s) below a destination directory. Parts of the destination directory path are created if needed. Similar to the UNIX command: 'cp -R srcdir destdir' """ for srcdir in args.srcdirs: basename = os.path.basename(srcdir) destdir2 = os.path.normpath(os.path.join(args.destdir, basename)) if os.path.exists(destdir2): shutil.rmtree(destdir2) sys.stdout.write("copytree: %s => %s\n" % (srcdir, destdir2)) shutil.copytree(srcdir, destdir2) return 0 def setup_parser_copytree(parser): parser.add_argument("srcdirs", nargs="+", help="Source directory(s)") parser.add_argument("destdir", help="Destination directory") command_copytree.usage = "%(prog)s srcdir... destdir" command_copytree.short = "Copy source dir(s) below a destination directory." command_copytree.setup_parser = setup_parser_copytree # ----------------------------------------------------------------------------- # SUBCOMMAND: copy # ----------------------------------------------------------------------------- def command_copy(args): """ Copy one or more source-files(s) to a destpath (destfile or destdir). Destdir mode is used if: * More than one srcfile is provided * Last parameter ends with a slash ("/"). * Last parameter is an existing directory Destination directory path is created if needed. Similar to the UNIX command: 'cp srcfile... destpath' """ sources = args.sources destpath = args.destpath source_files = [] for file_ in sources: if "*" in file_: selected = glob(file_) source_files.extend(selected) elif os.path.isfile(file_): source_files.append(file_) if destpath.endswith("/") or os.path.isdir(destpath) or len(sources) > 1: # -- DESTDIR-MODE: Last argument is a directory. destdir = destpath else: # -- DESTFILE-MODE: Copy (and rename) one file. assert len(source_files) == 1 destdir = os.path.dirname(destpath) # -- WORK-HORSE: Copy one or more files to destpath. if not os.path.isdir(destdir): sys.stdout.write("copy: Create dir %s\n" % destdir) os.makedirs(destdir) for source in source_files: destname = os.path.join(destdir, os.path.basename(source)) sys.stdout.write("copy: %s => %s\n" % (source, destname)) shutil.copy(source, destname) return 0 def setup_parser_copy(parser): parser.add_argument("sources", nargs="+", help="Source files.") parser.add_argument("destpath", help="Destination path") command_copy.usage = "%(prog)s sources... destpath" command_copy.short = "Copy one or more source files to a destinition." command_copy.setup_parser = setup_parser_copy # ----------------------------------------------------------------------------- # SUBCOMMAND: mkdir # ----------------------------------------------------------------------------- def command_mkdir(args): """ Create a non-existing directory (or more ...). If the directory exists, the step is skipped. Similar to the UNIX command: 'mkdir -p dir' """ errors = 0 for directory in args.dirs: if os.path.exists(directory): if not os.path.isdir(directory): # -- SANITY CHECK: directory exists, but as file... sys.stdout.write("mkdir: %s\n" % directory) sys.stdout.write("ERROR: Exists already, but as file...\n") errors += 1 else: # -- NORMAL CASE: Directory does not exits yet. assert not os.path.isdir(directory) sys.stdout.write("mkdir: %s\n" % directory) os.makedirs(directory) return errors def setup_parser_mkdir(parser): parser.add_argument("dirs", nargs="+", help="Directory(s)") command_mkdir.usage = "%(prog)s dir..." command_mkdir.short = "Create non-existing directory (or more...)." command_mkdir.setup_parser = setup_parser_mkdir # ----------------------------------------------------------------------------- # SUBCOMMAND: py2to3 # ----------------------------------------------------------------------------- def command_py2to3(args): """ Apply '2to3' tool (Python2 to Python3 conversion tool) to Python sources. """ from lib2to3.main import main sys.exit(main("lib2to3.fixes", args=args.sources)) def setup_parser4py2to3(parser): parser.add_argument("sources", nargs="+", help="Source files.") command_py2to3.name = "2to3" command_py2to3.usage = "%(prog)s sources..." command_py2to3.short = "Apply python's 2to3 tool to Python sources." command_py2to3.setup_parser = setup_parser4py2to3 # ----------------------------------------------------------------------------- # COMMAND HELPERS/UTILS: # ----------------------------------------------------------------------------- def discover_commands(): commands = [] for name, func in inspect.getmembers(inspect.getmodule(toxcmd_main)): if name.startswith("__"): continue if name.startswith("command_") and callable(func): command_name0 = name.replace("command_", "") command_name = getattr(func, "name", command_name0) commands.append(Command(command_name, func)) return commands class Command(object): def __init__(self, name, func): assert isinstance(name, basestring) assert callable(func) self.name = name self.func = func self.parser = None def setup_parser(self, command_parser): setup_parser = getattr(self.func, "setup_parser", None) if setup_parser and callable(setup_parser): setup_parser(command_parser) else: command_parser.add_argument("args", nargs="*") @property def usage(self): usage = getattr(self.func, "usage", None) return usage @property def short_description(self): short_description = getattr(self.func, "short", "") return short_description @property def description(self): return inspect.getdoc(self.func) def __call__(self, args): return self.func(args) # ----------------------------------------------------------------------------- # MAIN-COMMAND: # ----------------------------------------------------------------------------- def toxcmd_main(args=None): """Command util with subcommands for tox environments.""" usage = "USAGE: %(prog)s [OPTIONS] COMMAND args..." if args is None: args = sys.argv[1:] # -- STEP: Build command-line parser. parser = argparse.ArgumentParser(description=inspect.getdoc(toxcmd_main), formatter_class=FORMATTER_CLASS) common_parser = parser.add_argument_group("Common options") common_parser.add_argument("--version", action="version", version=VERSION) subparsers = parser.add_subparsers(help="commands") for command in discover_commands(): command_parser = subparsers.add_parser(command.name, usage=command.usage, description=command.description, help=command.short_description, formatter_class=FORMATTER_CLASS) command_parser.set_defaults(func=command) command.setup_parser(command_parser) command.parser = command_parser # -- STEP: Process command-line and run command. options = parser.parse_args(args) command_function = options.func return command_function(options) # ----------------------------------------------------------------------------- # MAIN: # ----------------------------------------------------------------------------- if __name__ == "__main__": sys.exit(toxcmd_main()) parse_type-0.6.4/invoke.yaml000066400000000000000000000012661467747561600161070ustar00rootroot00000000000000# ===================================================== # INVOKE CONFIGURATION: parse_type # ===================================================== # -- ON WINDOWS: # run: # echo: true # pty: false # shell: C:\Windows\System32\cmd.exe # ===================================================== project: name: parse_type repo: "pypi" # -- TODO: until upload problems are resolved. repo_url: "https://upload.pypi.org/legacy/" tasks: auto_dash_names: false run: echo: true cleanup_all: extra_directories: - build - dist - .hypothesis - .pytest_cache - .ruff_cache - ".venv*" - ".tox" extra_files: - ".done.*" parse_type-0.6.4/justfile000066400000000000000000000044561467747561600155040ustar00rootroot00000000000000# ============================================================================= # justfile: A makefile-like build script -- parse_type # ============================================================================= # REQUIRES: cargo install just # PLATFORMS: Windows, Linux, macOS, ... # USAGE: # just --list # just # just # # SEE ALSO: # * https://github.com/casey/just # ============================================================================= # -- OPTION: Load environment-variables from "$HERE/.env" file (if exists) set dotenv-load # ----------------------------------------------------------------------------- # CONFIG: # ----------------------------------------------------------------------------- HERE := justfile_directory() PIP_INSTALL_OPTIONS := env_var_or_default("PIP_INSTALL_OPTIONS", "--quiet") PYTEST_OPTIONS := env_var_or_default("PYTEST_OPTIONS", "") # ----------------------------------------------------------------------------- # BUILD RECIPES / TARGETS: # ----------------------------------------------------------------------------- # DEFAULT-TARGET: Ensure that packages are installed and runs tests. default: (_ensure-install-packages "testing") test # PART=all, testing, ... install-packages PART="all": @echo "INSTALL-PACKAGES: {{PART}} ..." pip install {{PIP_INSTALL_OPTIONS}} -r py.requirements/{{PART}}.txt @touch "{{HERE}}/.done.install-packages.{{PART}}" # ENSURE: Python packages are installed. _ensure-install-packages PART="all": #!/usr/bin/env python3 from subprocess import run from os import path if not path.exists("{{HERE}}/.done.install-packages.{{PART}}"): run("just install-packages {{PART}}", shell=True) # -- SIMILAR: This solution requires a Bourne-like shell (may not work on: Windows). # _ensure-install-packages PART="testing": # @test -e "{{HERE}}/.done.install-packages.{{PART}}" || just install-packages {{PART}} # Run tests. test *TESTS: python -m pytest {{PYTEST_OPTIONS}} {{TESTS}} # Determine test coverage by running the tests. coverage: coverage run -m pytest coverage combine coverage report coverage html # Cleanup most parts (but leave PRECIOUS parts). cleanup: (_ensure-install-packages "all") invoke cleanup # Cleanup everything. cleanup-all: invoke cleanup.all parse_type-0.6.4/parse_type/000077500000000000000000000000001467747561600160765ustar00rootroot00000000000000parse_type-0.6.4/parse_type/__init__.py000066400000000000000000000006341467747561600202120ustar00rootroot00000000000000# -*- coding: UTF-8 -*- # Copyright 2013 - 2023, jenisys # SPDX-License-Identifier: MIT """ This module extends the :mod:`parse` to build and derive additional parse-types from other, existing types. """ from __future__ import absolute_import from parse_type.cardinality import Cardinality from parse_type.builder import TypeBuilder, build_type_dict __all__ = ["Cardinality", "TypeBuilder", "build_type_dict"] parse_type-0.6.4/parse_type/builder.py000066400000000000000000000275771467747561600201200ustar00rootroot00000000000000# -*- coding: utf-8 -*- # pylint: disable=missing-docstring r""" Provides support to compose user-defined parse types. Cardinality ------------ It is often useful to constrain how often a data type occurs. This is also called the cardinality of a data type (in a context). The supported cardinality are: * 0..1 zero_or_one, optional: T or None * 0..N zero_or_more, list_of * 1..N one_or_more, list_of (many) .. doctest:: cardinality >>> from parse_type import TypeBuilder >>> from parse import Parser >>> def parse_number(text): ... return int(text) >>> parse_number.pattern = r"\d+" >>> parse_many_numbers = TypeBuilder.with_many(parse_number) >>> more_types = { "Numbers": parse_many_numbers } >>> parser = Parser("List: {numbers:Numbers}", more_types) >>> parser.parse("List: 1, 2, 3") Enumeration Type (Name-to-Value Mappings) ----------------------------------------- An Enumeration data type allows to select one of several enum values by using its name. The converter function returns the selected enum value. .. doctest:: make_enum >>> parse_enum_yesno = TypeBuilder.make_enum({"yes": True, "no": False}) >>> more_types = { "YesNo": parse_enum_yesno } >>> parser = Parser("Answer: {answer:YesNo}", more_types) >>> parser.parse("Answer: yes") Choice (Name Enumerations) ----------------------------- A Choice data type allows to select one of several strings. .. doctest:: make_choice >>> parse_choice_yesno = TypeBuilder.make_choice(["yes", "no"]) >>> more_types = { "ChoiceYesNo": parse_choice_yesno } >>> parser = Parser("Answer: {answer:ChoiceYesNo}", more_types) >>> parser.parse("Answer: yes") """ from __future__ import absolute_import import inspect import re import enum from parse_type.cardinality import pattern_group_count, \ Cardinality, TypeBuilder as CardinalityTypeBuilder __all__ = ["TypeBuilder", "build_type_dict", "parse_anything"] class TypeBuilder(CardinalityTypeBuilder): """ Provides a utility class to build type-converters (parse_types) for the :mod:`parse` module. """ default_strict = True default_re_opts = (re.IGNORECASE | re.DOTALL) @classmethod def make_list(cls, item_converter=None, listsep=','): """ Create a type converter for a list of items (many := 1..*). The parser accepts anything and the converter needs to fail on errors. :param item_converter: Type converter for an item. :param listsep: List separator to use (as string). :return: Type converter function object for the list. """ if not item_converter: item_converter = parse_anything return cls.with_cardinality(Cardinality.many, item_converter, pattern=cls.anything_pattern, listsep=listsep) @staticmethod def make_enum(enum_mappings): """ Creates a type converter for an enumeration or text-to-value mapping. :param enum_mappings: Defines enumeration names and values. :return: Type converter function object for the enum/mapping. """ if (inspect.isclass(enum_mappings) and issubclass(enum_mappings, enum.Enum)): enum_class = enum_mappings enum_mappings = enum_class.__members__ def convert_enum(text): if text not in convert_enum.mappings: text = text.lower() # REQUIRED-BY: parse re.IGNORECASE return convert_enum.mappings[text] #< text.lower() ??? convert_enum.pattern = r"|".join(enum_mappings.keys()) convert_enum.mappings = enum_mappings return convert_enum @staticmethod def _normalize_choices(choices, transform): assert transform is None or callable(transform) if transform: choices = [transform(value) for value in choices] else: choices = list(choices) return choices @classmethod def make_choice(cls, choices, transform=None, strict=None): """ Creates a type-converter function to select one from a list of strings. The type-converter function returns the selected choice_text. The :param:`transform()` function is applied in the type converter. It can be used to enforce the case (because parser uses re.IGNORECASE). :param choices: List of strings as choice. :param transform: Optional, initial transform function for parsed text. :return: Type converter function object for this choices. """ # -- NOTE: Parser uses re.IGNORECASE flag # => transform may enforce case. choices = cls._normalize_choices(choices, transform) if strict is None: strict = cls.default_strict def convert_choice(text): if transform: text = transform(text) if strict and text not in convert_choice.choices: values = ", ".join(convert_choice.choices) raise ValueError("%s not in: %s" % (text, values)) return text convert_choice.pattern = r"|".join(choices) convert_choice.choices = choices return convert_choice @classmethod def make_choice2(cls, choices, transform=None, strict=None): """ Creates a type converter to select one item from a list of strings. The type converter function returns a tuple (index, choice_text). :param choices: List of strings as choice. :param transform: Optional, initial transform function for parsed text. :return: Type converter function object for this choices. """ choices = cls._normalize_choices(choices, transform) if strict is None: strict = cls.default_strict def convert_choice2(text): if transform: text = transform(text) if strict and text not in convert_choice2.choices: values = ", ".join(convert_choice2.choices) raise ValueError("%s not in: %s" % (text, values)) index = convert_choice2.choices.index(text) return index, text convert_choice2.pattern = r"|".join(choices) convert_choice2.choices = choices return convert_choice2 @classmethod def make_variant(cls, converters, re_opts=None, compiled=False, strict=True): """ Creates a type converter for a number of type converter alternatives. The first matching type converter is used. REQUIRES: type_converter.pattern attribute :param converters: List of type converters as alternatives. :param re_opts: Regular expression options zu use (=default_re_opts). :param compiled: Use compiled regexp matcher, if true (=False). :param strict: Enable assertion checks. :return: Type converter function object. .. note:: Works only with named fields in :class:`parse.Parser`. Parser needs group_index delta for unnamed/fixed fields. This is not supported for user-defined types. Otherwise, you need to use :class:`parse_type.parse.Parser` (patched version of the :mod:`parse` module). """ # -- NOTE: Uses double-dispatch with regex pattern rematch because # match is not passed through to primary type converter. assert converters, "REQUIRE: Non-empty list." if len(converters) == 1: return converters[0] if re_opts is None: re_opts = cls.default_re_opts pattern = r")|(".join([tc.pattern for tc in converters]) pattern = r"("+ pattern + ")" group_count = len(converters) for converter in converters: group_count += pattern_group_count(converter.pattern) if compiled: convert_variant = cls.__create_convert_variant_compiled(converters, re_opts, strict) else: convert_variant = cls.__create_convert_variant(re_opts, strict) convert_variant.pattern = pattern convert_variant.converters = tuple(converters) convert_variant.regex_group_count = group_count return convert_variant @staticmethod def __create_convert_variant(re_opts, strict): # -- USE: Regular expression pattern (compiled on use). def convert_variant(text, m=None): # pylint: disable=invalid-name, unused-argument, missing-docstring for converter in convert_variant.converters: if re.match(converter.pattern, text, re_opts): return converter(text) # -- pragma: no cover assert not strict, "OOPS-VARIANT-MISMATCH: %s" % text return None return convert_variant @staticmethod def __create_convert_variant_compiled(converters, re_opts, strict): # -- USE: Compiled regular expression matcher. for converter in converters: matcher = getattr(converter, "matcher", None) if not matcher: converter.matcher = re.compile(converter.pattern, re_opts) def convert_variant(text, m=None): # pylint: disable=invalid-name, unused-argument, missing-docstring for converter in convert_variant.converters: if converter.matcher.match(text): return converter(text) # -- pragma: no cover assert not strict, "OOPS-VARIANT-MISMATCH: %s" % text return None return convert_variant def build_type_dict(converters): """ Builds type dictionary for user-defined type converters, used by :mod:`parse` module. This requires that each type converter has a "name" attribute. :param converters: List of type converters (parse_types) :return: Type converter dictionary """ more_types = {} for converter in converters: assert callable(converter) more_types[converter.name] = converter return more_types # ----------------------------------------------------------------------------- # COMMON TYPE CONVERTERS # ----------------------------------------------------------------------------- def parse_anything(text, match=None, match_start=0): """ Provides a generic type converter that accepts anything and returns the text (unchanged). :param text: Text to convert (as string). :return: Same text (as string). """ # pylint: disable=unused-argument return text parse_anything.pattern = TypeBuilder.anything_pattern # ----------------------------------------------------------------------------- # Copyright (c) 2012-2020 by Jens Engel (https://github/jenisys/parse_type) # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. parse_type-0.6.4/parse_type/cardinality.py000066400000000000000000000205601467747561600207560ustar00rootroot00000000000000# -*- coding: utf-8 -*- """ This module simplifies to build parse types and regular expressions for a data type with the specified cardinality. """ # -- USE: enum34 from __future__ import absolute_import from enum import Enum # ----------------------------------------------------------------------------- # FUNCTIONS: # ----------------------------------------------------------------------------- def pattern_group_count(pattern): """Count the pattern-groups within a regex-pattern (as text).""" return pattern.replace(r"\(", "").count("(") # ----------------------------------------------------------------------------- # CLASS: Cardinality (Enum Class) # ----------------------------------------------------------------------------- class Cardinality(Enum): """Cardinality enumeration class to simplify building regular expression patterns for a data type with the specified cardinality. """ # pylint: disable=bad-whitespace __order__ = "one, zero_or_one, zero_or_more, one_or_more" one = (None, 0) zero_or_one = (r"(%s)?", 1) # SCHEMA: pattern zero_or_more = (r"(%s)?(\s*%s\s*(%s))*", 3) # SCHEMA: pattern sep pattern one_or_more = (r"(%s)(\s*%s\s*(%s))*", 3) # SCHEMA: pattern sep pattern # -- ALIASES: optional = zero_or_one many0 = zero_or_more many = one_or_more def __init__(self, schema, group_count=0): self.schema = schema self.group_count = group_count #< Number of match groups. def is_many(self): """Checks for a more general interpretation of "many". :return: True, if Cardinality.zero_or_more or Cardinality.one_or_more. """ return ((self is Cardinality.zero_or_more) or (self is Cardinality.one_or_more)) def make_pattern(self, pattern, listsep=','): """Make pattern for a data type with the specified cardinality. .. code-block:: python yes_no_pattern = r"yes|no" many_yes_no = Cardinality.one_or_more.make_pattern(yes_no_pattern) :param pattern: Regular expression for type (as string). :param listsep: List separator for multiple items (as string, optional) :return: Regular expression pattern for type with cardinality. """ if self is Cardinality.one: return pattern elif self is Cardinality.zero_or_one: return self.schema % pattern # -- OTHERWISE: return self.schema % (pattern, listsep, pattern) def compute_group_count(self, pattern): """Compute the number of regexp match groups when the pattern is provided to the :func:`Cardinality.make_pattern()` method. :param pattern: Item regexp pattern (as string). :return: Number of regexp match groups in the cardinality pattern. """ group_count = self.group_count pattern_repeated = 1 if self.is_many(): pattern_repeated = 2 return group_count + pattern_repeated * pattern_group_count(pattern) # ----------------------------------------------------------------------------- # CLASS: TypeBuilder # ----------------------------------------------------------------------------- class TypeBuilder(object): """Provides a utility class to build type-converters (parse_types) for parse. It supports to build new type-converters for different cardinality based on the type-converter for cardinality one. """ anything_pattern = r".+?" default_pattern = anything_pattern @classmethod def with_cardinality(cls, cardinality, converter, pattern=None, listsep=','): """Creates a type converter for the specified cardinality by using the type converter for T. :param cardinality: Cardinality to use (0..1, 0..*, 1..*). :param converter: Type converter (function) for data type T. :param pattern: Regexp pattern for an item (=converter.pattern). :return: type-converter for optional (T or None). """ if cardinality is Cardinality.one: return converter # -- NORMAL-CASE builder_func = getattr(cls, "with_%s" % cardinality.name) if cardinality is Cardinality.zero_or_one: return builder_func(converter, pattern) # -- MANY CASE: 0..*, 1..* return builder_func(converter, pattern, listsep=listsep) @classmethod def with_zero_or_one(cls, converter, pattern=None): """Creates a type converter for a T with 0..1 times by using the type converter for one item of T. :param converter: Type converter (function) for data type T. :param pattern: Regexp pattern for an item (=converter.pattern). :return: type-converter for optional (T or None). """ cardinality = Cardinality.zero_or_one if not pattern: pattern = getattr(converter, "pattern", cls.default_pattern) optional_pattern = cardinality.make_pattern(pattern) group_count = cardinality.compute_group_count(pattern) def convert_optional(text, m=None): # pylint: disable=invalid-name, unused-argument, missing-docstring if text: text = text.strip() if not text: return None return converter(text) convert_optional.pattern = optional_pattern convert_optional.regex_group_count = group_count return convert_optional @classmethod def with_zero_or_more(cls, converter, pattern=None, listsep=","): """Creates a type converter function for a list with 0..N items by using the type converter for one item of T. :param converter: Type converter (function) for data type T. :param pattern: Regexp pattern for an item (=converter.pattern). :param listsep: Optional list separator between items (default: ',') :return: type-converter for list """ cardinality = Cardinality.zero_or_more if not pattern: pattern = getattr(converter, "pattern", cls.default_pattern) many0_pattern = cardinality.make_pattern(pattern, listsep) group_count = cardinality.compute_group_count(pattern) def convert_list0(text, m=None): # pylint: disable=invalid-name, unused-argument, missing-docstring if text: text = text.strip() if not text: return [] return [converter(part.strip()) for part in text.split(listsep)] convert_list0.pattern = many0_pattern # OLD convert_list0.group_count = group_count convert_list0.regex_group_count = group_count return convert_list0 @classmethod def with_one_or_more(cls, converter, pattern=None, listsep=","): """Creates a type converter function for a list with 1..N items by using the type converter for one item of T. :param converter: Type converter (function) for data type T. :param pattern: Regexp pattern for an item (=converter.pattern). :param listsep: Optional list separator between items (default: ',') :return: Type converter for list """ cardinality = Cardinality.one_or_more if not pattern: pattern = getattr(converter, "pattern", cls.default_pattern) many_pattern = cardinality.make_pattern(pattern, listsep) group_count = cardinality.compute_group_count(pattern) def convert_list(text, m=None): # pylint: disable=invalid-name, unused-argument, missing-docstring return [converter(part.strip()) for part in text.split(listsep)] convert_list.pattern = many_pattern # OLD: convert_list.group_count = group_count convert_list.regex_group_count = group_count return convert_list # -- ALIAS METHODS: @classmethod def with_optional(cls, converter, pattern=None): """Alias for :py:meth:`with_zero_or_one()` method.""" return cls.with_zero_or_one(converter, pattern) @classmethod def with_many(cls, converter, pattern=None, listsep=','): """Alias for :py:meth:`with_one_or_more()` method.""" return cls.with_one_or_more(converter, pattern, listsep) @classmethod def with_many0(cls, converter, pattern=None, listsep=','): """Alias for :py:meth:`with_zero_or_more()` method.""" return cls.with_zero_or_more(converter, pattern, listsep) parse_type-0.6.4/parse_type/cardinality_field.py000066400000000000000000000152561467747561600221270ustar00rootroot00000000000000# -*- coding: utf-8 -*- """ Provides support for cardinality fields. A cardinality field is a type suffix for parse format expression, ala: "{person:Person?}" #< Cardinality: 0..1 = zero or one = optional "{persons:Person*}" #< Cardinality: 0..* = zero or more = many0 "{persons:Person+}" #< Cardinality: 1..* = one or more = many """ from __future__ import absolute_import import six from parse_type.cardinality import Cardinality, TypeBuilder class MissingTypeError(KeyError): # pylint: disable=missing-docstring pass # ----------------------------------------------------------------------------- # CLASS: Cardinality (Field Part) # ----------------------------------------------------------------------------- class CardinalityField(object): """Cardinality field for parse format expression, ala: "{person:Person?}" #< Cardinality: 0..1 = zero or one = optional "{persons:Person*}" #< Cardinality: 0..* = zero or more = many0 "{persons:Person+}" #< Cardinality: 1..* = one or more = many """ # -- MAPPING SUPPORT: pattern_chars = "?*+" from_char_map = { '?': Cardinality.zero_or_one, '*': Cardinality.zero_or_more, '+': Cardinality.one_or_more, } to_char_map = dict([(value, key) for key, value in from_char_map.items()]) @classmethod def matches_type(cls, type_name): """Checks if a type name uses the CardinalityField naming scheme. :param type_name: Type name to check (as string). :return: True, if type name has CardinalityField name suffix. """ return type_name and type_name[-1] in CardinalityField.pattern_chars @classmethod def split_type(cls, type_name): """Split type of a type name with CardinalityField suffix into its parts. :param type_name: Type name (as string). :return: Tuple (type_basename, cardinality) """ if cls.matches_type(type_name): basename = type_name[:-1] cardinality = cls.from_char_map[type_name[-1]] else: # -- ASSUME: Cardinality.one cardinality = Cardinality.one basename = type_name return (basename, cardinality) @classmethod def make_type(cls, basename, cardinality): """Build new type name according to CardinalityField naming scheme. :param basename: Type basename of primary type (as string). :param cardinality: Cardinality of the new type (as Cardinality item). :return: Type name with CardinalityField suffix (if needed) """ if cardinality is Cardinality.one: # -- POSTCONDITION: assert not cls.make_type(type_name) return basename # -- NORMAL CASE: type with CardinalityField suffix. type_name = "%s%s" % (basename, cls.to_char_map[cardinality]) # -- POSTCONDITION: assert cls.make_type(type_name) return type_name # ----------------------------------------------------------------------------- # CLASS: CardinalityFieldTypeBuilder # ----------------------------------------------------------------------------- class CardinalityFieldTypeBuilder(object): """Utility class to create type converters based on: * the CardinalityField naming scheme and * type converter for cardinality=1 """ listsep = ',' @classmethod def create_type_variant(cls, type_name, type_converter): r"""Create type variants for types with a cardinality field. The new type converters are based on the type converter with cardinality=1. .. code-block:: python import parse @parse.with_pattern(r'\d+') def parse_number(text): return int(text) new_type = CardinalityFieldTypeBuilder.create_type_variant( "Number+", parse_number) new_type = CardinalityFieldTypeBuilder.create_type_variant( "Number+", dict(Number=parse_number)) :param type_name: Type name with cardinality field suffix. :param type_converter: Type converter or type dictionary. :return: Type converter variant (function). :raises: ValueError, if type_name does not end with CardinalityField :raises: MissingTypeError, if type_converter is missing in type_dict """ assert isinstance(type_name, six.string_types) if not CardinalityField.matches_type(type_name): message = "type_name='%s' has no CardinalityField" % type_name raise ValueError(message) primary_name, cardinality = CardinalityField.split_type(type_name) if isinstance(type_converter, dict): type_dict = type_converter type_converter = type_dict.get(primary_name, None) if not type_converter: raise MissingTypeError(primary_name) assert callable(type_converter) type_variant = TypeBuilder.with_cardinality(cardinality, type_converter, listsep=cls.listsep) type_variant.name = type_name return type_variant @classmethod def create_type_variants(cls, type_names, type_dict): """Create type variants for types with a cardinality field. The new type converters are based on the type converter with cardinality=1. .. code-block:: python # -- USE: parse_number() type converter function. new_types = CardinalityFieldTypeBuilder.create_type_variants( ["Number?", "Number+"], dict(Number=parse_number)) :param type_names: List of type names with cardinality field suffix. :param type_dict: Type dictionary with named type converters. :return: Type dictionary with type converter variants. """ type_variant_dict = {} for type_name in type_names: type_variant = cls.create_type_variant(type_name, type_dict) type_variant_dict[type_name] = type_variant return type_variant_dict # MAYBE: Check if really needed. @classmethod def create_missing_type_variants(cls, type_names, type_dict): """Create missing type variants for types with a cardinality field. :param type_names: List of type names with cardinality field suffix. :param type_dict: Type dictionary with named type converters. :return: Type dictionary with missing type converter variants. """ missing_type_names = [name for name in type_names if name not in type_dict] return cls.create_type_variants(missing_type_names, type_dict) parse_type-0.6.4/parse_type/cfparse.py000066400000000000000000000072751467747561600201060ustar00rootroot00000000000000# -*- coding: utf-8 -*- """ Provides an extended :class:`parse.Parser` class that supports the cardinality fields in (user-defined) types. """ from __future__ import absolute_import import logging import parse from .cardinality_field import CardinalityField, CardinalityFieldTypeBuilder from .parse_util import FieldParser log = logging.getLogger(__name__) # pylint: disable=invalid-name class Parser(parse.Parser): """Provides an extended :class:`parse.Parser` with cardinality field support. A cardinality field is a type suffix for parse format expression, ala: "... {person:Person?} ..." -- OPTIONAL: Cardinality zero or one, 0..1 "... {persons:Person*} ..." -- MANY0: Cardinality zero or more, 0.. "... {persons:Person+} ..." -- MANY: Cardinality one or more, 1.. When the primary type converter for cardinality=1 is provided, the type variants for the other cardinality cases can be derived from it. This parser class automatically creates missing type variants for types with a cardinality field and passes the extended type dictionary to its base class. """ # -- TYPE-BUILDER: For missing types in Fields with CardinalityField part. type_builder = CardinalityFieldTypeBuilder def __init__(self, schema, extra_types=None, case_sensitive=False, type_builder=None): """Creates a parser with CardinalityField part support. :param schema: Parse schema (or format) for parser (as string). :param extra_types: Type dictionary with type converters (or None). :param case_sensitive: Indicates if case-sensitive regexp are used. :param type_builder: Type builder to use for missing types. """ if extra_types is None: extra_types = {} missing = self.create_missing_types(schema, extra_types, type_builder) if missing: # pylint: disable=logging-not-lazy log.debug("MISSING TYPES: %s" % ",".join(missing.keys())) extra_types.update(missing) # -- FINALLY: Delegate to base class. super(Parser, self).__init__(schema, extra_types, case_sensitive=case_sensitive) @classmethod def create_missing_types(cls, schema, type_dict, type_builder=None): """Creates missing types for fields with a CardinalityField part. It is assumed that the primary type converter for cardinality=1 is registered in the type dictionary. :param schema: Parse schema (or format) for parser (as string). :param type_dict: Type dictionary with type converters. :param type_builder: Type builder to use for missing types. :return: Type dictionary with missing types. Empty, if none. :raises: MissingTypeError, if a primary type converter with cardinality=1 is missing. """ if not type_builder: type_builder = cls.type_builder missing = cls.extract_missing_special_type_names(schema, type_dict) return type_builder.create_type_variants(missing, type_dict) @staticmethod def extract_missing_special_type_names(schema, type_dict): # pylint: disable=invalid-name """Extract the type names for fields with CardinalityField part. Selects only the missing type names that are not in the type dictionary. :param schema: Parse schema to use (as string). :param type_dict: Type dictionary with type converters. :return: Generator with missing type names (as string). """ for name in FieldParser.extract_types(schema): if CardinalityField.matches_type(name) and (name not in type_dict): yield name parse_type-0.6.4/parse_type/parse.py000066400000000000000000001064201467747561600175650ustar00rootroot00000000000000# -*- coding: UTF-8 -*- # BASED-ON: https://github.com/r1chardj0n3s/parse/parse.py # VERSION: parse 1.20.2 # Same as original parse modules. # # pylint: disable=line-too-long, invalid-name, too-many-locals, too-many-arguments # pylint: disable=redefined-builtin, too-few-public-methods, no-else-return # pylint: disable=unused-variable, no-self-use, missing-docstring # pylint: disable=unused-argument, unused-variable # pylint: disable=too-many-branches, too-many-statements # pylint: disable=all # # -- ORIGINAL-CODE STARTS-HERE ------------------------------------------------ from __future__ import absolute_import import logging import re import sys from datetime import datetime from datetime import time from datetime import timedelta from datetime import tzinfo from decimal import Decimal from functools import partial __version__ = "1.20.2" __all__ = ["parse", "search", "findall", "with_pattern"] log = logging.getLogger(__name__) def with_pattern(pattern, regex_group_count=None): r"""Attach a regular expression pattern matcher to a custom type converter function. This annotates the type converter with the :attr:`pattern` attribute. EXAMPLE: >>> import parse >>> @parse.with_pattern(r"\d+") ... def parse_number(text): ... return int(text) is equivalent to: >>> def parse_number(text): ... return int(text) >>> parse_number.pattern = r"\d+" :param pattern: regular expression pattern (as text) :param regex_group_count: Indicates how many regex-groups are in pattern. :return: wrapped function """ def decorator(func): func.pattern = pattern func.regex_group_count = regex_group_count return func return decorator class int_convert: """Convert a string to an integer. The string may start with a sign. It may be of a base other than 2, 8, 10 or 16. If base isn't specified, it will be detected automatically based on a string format. When string starts with a base indicator, 0#nnnn, it overrides the default base of 10. It may also have other non-numeric characters that we can ignore. """ CHARS = "0123456789abcdefghijklmnopqrstuvwxyz" def __init__(self, base=None): self.base = base def __call__(self, string, match): if string[0] == "-": sign = -1 number_start = 1 elif string[0] == "+": sign = 1 number_start = 1 else: sign = 1 number_start = 0 base = self.base # If base wasn't specified, detect it automatically if base is None: # Assume decimal number, unless different base is detected base = 10 # For number formats starting with 0b, 0o, 0x, use corresponding base ... if string[number_start] == "0" and len(string) - number_start > 2: if string[number_start + 1] in "bB": base = 2 elif string[number_start + 1] in "oO": base = 8 elif string[number_start + 1] in "xX": base = 16 chars = int_convert.CHARS[:base] string = re.sub("[^%s]" % chars, "", string.lower()) return sign * int(string, base) class convert_first: """Convert the first element of a pair. This equivalent to lambda s,m: converter(s). But unlike a lambda function, it can be pickled """ def __init__(self, converter): self.converter = converter def __call__(self, string, match): return self.converter(string) def percentage(string, match): return float(string[:-1]) / 100.0 class FixedTzOffset(tzinfo): """Fixed offset in minutes east from UTC.""" ZERO = timedelta(0) def __init__(self, offset, name): self._offset = timedelta(minutes=offset) self._name = name def __repr__(self): return "<%s %s %s>" % (self.__class__.__name__, self._name, self._offset) def utcoffset(self, dt): return self._offset def tzname(self, dt): return self._name def dst(self, dt): return self.ZERO def __eq__(self, other): if not isinstance(other, FixedTzOffset): return NotImplemented return self._name == other._name and self._offset == other._offset MONTHS_MAP = { "Jan": 1, "January": 1, "Feb": 2, "February": 2, "Mar": 3, "March": 3, "Apr": 4, "April": 4, "May": 5, "Jun": 6, "June": 6, "Jul": 7, "July": 7, "Aug": 8, "August": 8, "Sep": 9, "September": 9, "Oct": 10, "October": 10, "Nov": 11, "November": 11, "Dec": 12, "December": 12, } DAYS_PAT = r"(Mon|Tue|Wed|Thu|Fri|Sat|Sun)" MONTHS_PAT = r"(Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec)" ALL_MONTHS_PAT = r"(%s)" % "|".join(MONTHS_MAP) TIME_PAT = r"(\d{1,2}:\d{1,2}(:\d{1,2}(\.\d+)?)?)" AM_PAT = r"(\s+[AP]M)" TZ_PAT = r"(\s+[-+]\d\d?:?\d\d)" def date_convert( string, match, ymd=None, mdy=None, dmy=None, d_m_y=None, hms=None, am=None, tz=None, mm=None, dd=None, ): """Convert the incoming string containing some date / time info into a datetime instance. """ groups = match.groups() time_only = False if mm and dd: y = datetime.today().year m = groups[mm] d = groups[dd] elif ymd is not None: y, m, d = re.split(r"[-/\s]", groups[ymd]) elif mdy is not None: m, d, y = re.split(r"[-/\s]", groups[mdy]) elif dmy is not None: d, m, y = re.split(r"[-/\s]", groups[dmy]) elif d_m_y is not None: d, m, y = d_m_y d = groups[d] m = groups[m] y = groups[y] else: time_only = True H = M = S = u = 0 if hms is not None and groups[hms]: t = groups[hms].split(":") if len(t) == 2: H, M = t else: H, M, S = t if "." in S: S, u = S.split(".") u = int(float("." + u) * 1000000) S = int(S) H = int(H) M = int(M) if am is not None: am = groups[am] if am: am = am.strip() if am == "AM" and H == 12: # correction for "12" hour functioning as "0" hour: 12:15 AM = 00:15 by 24 hr clock H -= 12 elif am == "PM" and H == 12: # no correction needed: 12PM is midday, 12:00 by 24 hour clock pass elif am == "PM": H += 12 if tz is not None: tz = groups[tz] if tz == "Z": tz = FixedTzOffset(0, "UTC") elif tz: tz = tz.strip() if tz.isupper(): # TODO use the awesome python TZ module? pass else: sign = tz[0] if ":" in tz: tzh, tzm = tz[1:].split(":") elif len(tz) == 4: # 'snnn' tzh, tzm = tz[1], tz[2:4] else: tzh, tzm = tz[1:3], tz[3:5] offset = int(tzm) + int(tzh) * 60 if sign == "-": offset = -offset tz = FixedTzOffset(offset, tz) if time_only: d = time(H, M, S, u, tzinfo=tz) else: y = int(y) if m.isdigit(): m = int(m) else: m = MONTHS_MAP[m] d = int(d) d = datetime(y, m, d, H, M, S, u, tzinfo=tz) return d def strf_date_convert(x, _, type): is_date = any("%" + x in type for x in "aAwdbBmyYjUW") is_time = any("%" + x in type for x in "HIpMSfz") dt = datetime.strptime(x, type) if "%y" not in type and "%Y" not in type: # year not specified dt = dt.replace(year=datetime.today().year) if is_date and is_time: return dt elif is_date: return dt.date() elif is_time: return dt.time() else: ValueError("Datetime not a date nor a time?") # ref: https://docs.python.org/3/library/datetime.html#strftime-and-strptime-format-codes dt_format_to_regex = { "%a": "(?:Sun|Mon|Tue|Wed|Thu|Fri|Sat)", "%A": "(?:Sunday|Monday|Tuesday|Wednesday|Thursday|Friday|Saturday)", "%w": "[0-6]", "%d": "[0-9]{1,2}", "%b": "(?:Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec)", "%B": "(?:January|February|March|April|May|June|July|August|September|October|November|December)", "%m": "[0-9]{1,2}", "%y": "[0-9]{2}", "%Y": "[0-9]{4}", "%H": "[0-9]{1,2}", "%I": "[0-9]{1,2}", "%p": "(?:AM|PM)", "%M": "[0-9]{2}", "%S": "[0-9]{2}", "%f": "[0-9]{1,6}", "%z": "[+|-][0-9]{2}(:?[0-9]{2})?(:?[0-9]{2})?", # "%Z": punt "%j": "[0-9]{1,3}", "%U": "[0-9]{1,2}", "%W": "[0-9]{1,2}", } # Compile a regular expression pattern that matches any date/time format symbol. dt_format_symbols_re = re.compile("|".join(dt_format_to_regex)) def get_regex_for_datetime_format(format_): """ Generate a regex pattern for a given datetime format string. Parameters: format_ (str): The datetime format string. Returns: str: A regex pattern corresponding to the datetime format string. """ # Replace all format symbols with their regex patterns. return dt_format_symbols_re.sub(lambda m: dt_format_to_regex[m.group(0)], format_) class TooManyFields(ValueError): pass class RepeatedNameError(ValueError): pass # note: {} are handled separately REGEX_SAFETY = re.compile(r"([?\\.[\]()*+^$!|])") # allowed field types ALLOWED_TYPES = set(list("nbox%fFegwWdDsSl") + ["t" + c for c in "ieahgcts"]) def extract_format(format, extra_types): """Pull apart the format [[fill]align][sign][0][width][.precision][type]""" fill = align = None if format[0] in "<>=^": align = format[0] format = format[1:] elif len(format) > 1 and format[1] in "<>=^": fill = format[0] align = format[1] format = format[2:] if format.startswith(("+", "-", " ")): format = format[1:] zero = False if format and format[0] == "0": zero = True format = format[1:] width = "" while format: if not format[0].isdigit(): break width += format[0] format = format[1:] if format.startswith("."): # Precision isn't needed but we need to capture it so that # the ValueError isn't raised. format = format[1:] # drop the '.' precision = "" while format: if not format[0].isdigit(): break precision += format[0] format = format[1:] # the rest is the type, if present type = format if ( type and type not in ALLOWED_TYPES and type not in extra_types and not any(k in type for k in dt_format_to_regex) ): raise ValueError("format spec %r not recognised" % type) return locals() PARSE_RE = re.compile(r"({{|}}|{[\w-]*(?:\.[\w-]+|\[[^]]+])*(?::[^}]+)?})") class Parser(object): """Encapsulate a format string that may be used to parse other strings.""" def __init__(self, format, extra_types=None, case_sensitive=False): # a mapping of a name as in {hello.world} to a regex-group compatible # name, like hello__world. It's used to prevent the transformation of # name-to-group and group to name to fail subtly, such as in: # hello_.world-> hello___world->hello._world self._group_to_name_map = {} # also store the original field name to group name mapping to allow # multiple instances of a name in the format string self._name_to_group_map = {} # and to sanity check the repeated instances store away the first # field type specification for the named field self._name_types = {} self._format = format if extra_types is None: extra_types = {} self._extra_types = extra_types if case_sensitive: self._re_flags = re.DOTALL else: self._re_flags = re.IGNORECASE | re.DOTALL self._fixed_fields = [] self._named_fields = [] self._group_index = 0 self._type_conversions = {} self._expression = self._generate_expression() self.__search_re = None self.__match_re = None log.debug("format %r -> %r", format, self._expression) def __repr__(self): if len(self._format) > 20: return "<%s %r>" % (self.__class__.__name__, self._format[:17] + "...") return "<%s %r>" % (self.__class__.__name__, self._format) @property def _search_re(self): if self.__search_re is None: try: self.__search_re = re.compile(self._expression, self._re_flags) except AssertionError: # access error through sys to keep py3k and backward compat e = str(sys.exc_info()[1]) if e.endswith("this version only supports 100 named groups"): raise TooManyFields( "sorry, you are attempting to parse too many complex fields" ) return self.__search_re @property def _match_re(self): if self.__match_re is None: expression = r"\A%s\Z" % self._expression try: self.__match_re = re.compile(expression, self._re_flags) except AssertionError: # access error through sys to keep py3k and backward compat e = str(sys.exc_info()[1]) if e.endswith("this version only supports 100 named groups"): raise TooManyFields( "sorry, you are attempting to parse too many complex fields" ) except re.error: raise NotImplementedError( "Group names (e.g. (?P) can " "cause failure, as they are not escaped properly: '%s'" % expression ) return self.__match_re @property def named_fields(self): return self._named_fields[:] @property def fixed_fields(self): return self._fixed_fields[:] @property def format(self): return self._format def parse(self, string, evaluate_result=True): """Match my format to the string exactly. Return a Result or Match instance or None if there's no match. """ m = self._match_re.match(string) if m is None: return None if evaluate_result: return self.evaluate_result(m) else: return Match(self, m) def search(self, string, pos=0, endpos=None, evaluate_result=True): """Search the string for my format. Optionally start the search at "pos" character index and limit the search to a maximum index of endpos - equivalent to search(string[:endpos]). If the ``evaluate_result`` argument is set to ``False`` a Match instance is returned instead of the actual Result instance. Return either a Result instance or None if there's no match. """ if endpos is None: endpos = len(string) m = self._search_re.search(string, pos, endpos) if m is None: return None if evaluate_result: return self.evaluate_result(m) else: return Match(self, m) def findall( self, string, pos=0, endpos=None, extra_types=None, evaluate_result=True ): """Search "string" for all occurrences of "format". Optionally start the search at "pos" character index and limit the search to a maximum index of endpos - equivalent to search(string[:endpos]). Returns an iterator that holds Result or Match instances for each format match found. """ if endpos is None: endpos = len(string) return ResultIterator( self, string, pos, endpos, evaluate_result=evaluate_result ) def _expand_named_fields(self, named_fields): result = {} for field, value in named_fields.items(): # split 'aaa[bbb][ccc]...' into 'aaa' and '[bbb][ccc]...' n = field.find("[") if n == -1: basename, subkeys = field, "" else: basename, subkeys = field[:n], field[n:] # create nested dictionaries {'aaa': {'bbb': {'ccc': ...}}} d = result k = basename if subkeys: for subkey in re.findall(r"\[[^]]+]", subkeys): d = d.setdefault(k, {}) k = subkey[1:-1] # assign the value to the last key d[k] = value return result def evaluate_result(self, m): """Generate a Result instance for the given regex match object""" # ok, figure the fixed fields we've pulled out and type convert them fixed_fields = list(m.groups()) for n in self._fixed_fields: if n in self._type_conversions: fixed_fields[n] = self._type_conversions[n](fixed_fields[n], m) fixed_fields = tuple(fixed_fields[n] for n in self._fixed_fields) # grab the named fields, converting where requested groupdict = m.groupdict() named_fields = {} name_map = {} for k in self._named_fields: korig = self._group_to_name_map[k] name_map[korig] = k if k in self._type_conversions: value = self._type_conversions[k](groupdict[k], m) else: value = groupdict[k] named_fields[korig] = value # now figure the match spans spans = {n: m.span(name_map[n]) for n in named_fields} spans.update((i, m.span(n + 1)) for i, n in enumerate(self._fixed_fields)) # and that's our result return Result(fixed_fields, self._expand_named_fields(named_fields), spans) def _regex_replace(self, match): return "\\" + match.group(1) def _generate_expression(self): # turn my _format attribute into the _expression attribute e = [] for part in PARSE_RE.split(self._format): if not part: continue elif part == "{{": e.append(r"\{") elif part == "}}": e.append(r"\}") elif part[0] == "{" and part[-1] == "}": # this will be a braces-delimited field to handle e.append(self._handle_field(part)) else: # just some text to match e.append(REGEX_SAFETY.sub(self._regex_replace, part)) return "".join(e) def _to_group_name(self, field): # return a version of field which can be used as capture group, even # though it might contain '.' group = field.replace(".", "_").replace("[", "_").replace("]", "_").replace("-", "_") # make sure we don't collide ("a.b" colliding with "a_b") n = 1 while group in self._group_to_name_map: n += 1 if "." in field: group = field.replace(".", "_" * n) elif "_" in field: group = field.replace("_", "_" * n) elif "-" in field: group = field.replace("-", "_" * n) else: raise KeyError("duplicated group name %r" % (field,)) # save off the mapping self._group_to_name_map[group] = field self._name_to_group_map[field] = group return group def _handle_field(self, field): # first: lose the braces field = field[1:-1] # now figure whether this is an anonymous or named field, and whether # there's any format specification format = "" if ":" in field: name, format = field.split(":", 1) else: name = field # This *should* be more flexible, but parsing complicated structures # out of the string is hard (and not necessarily useful) ... and I'm # being lazy. So for now `identifier` is "anything starting with a # letter" and digit args don't get attribute or element stuff. if name and name[0].isalpha(): if name in self._name_to_group_map: if self._name_types[name] != format: raise RepeatedNameError( 'field type %r for field "%s" ' "does not match previous seen type %r" % (format, name, self._name_types[name]) ) group = self._name_to_group_map[name] # match previously-seen value return r"(?P=%s)" % group else: group = self._to_group_name(name) self._name_types[name] = format self._named_fields.append(group) # this will become a group, which must not contain dots wrap = r"(?P<%s>%%s)" % group else: self._fixed_fields.append(self._group_index) wrap = r"(%s)" group = self._group_index # simplest case: no type specifier ({} or {name}) if not format: self._group_index += 1 return wrap % r".+?" # decode the format specification format = extract_format(format, self._extra_types) # figure type conversions, if any type = format["type"] is_numeric = type and type in "n%fegdobx" conv = self._type_conversions if type in self._extra_types: type_converter = self._extra_types[type] s = getattr(type_converter, "pattern", r".+?") regex_group_count = getattr(type_converter, "regex_group_count", 0) if regex_group_count is None: regex_group_count = 0 self._group_index += regex_group_count conv[group] = convert_first(type_converter) elif type == "n": s = r"\d{1,3}([,.]\d{3})*" self._group_index += 1 conv[group] = int_convert(10) elif type == "b": s = r"(0[bB])?[01]+" conv[group] = int_convert(2) self._group_index += 1 elif type == "o": s = r"(0[oO])?[0-7]+" conv[group] = int_convert(8) self._group_index += 1 elif type == "x": s = r"(0[xX])?[0-9a-fA-F]+" conv[group] = int_convert(16) self._group_index += 1 elif type == "%": s = r"\d+(\.\d+)?%" self._group_index += 1 conv[group] = percentage elif type == "f": s = r"\d*\.\d+" conv[group] = convert_first(float) elif type == "F": s = r"\d*\.\d+" conv[group] = convert_first(Decimal) elif type == "e": s = r"\d*\.\d+[eE][-+]?\d+|nan|NAN|[-+]?inf|[-+]?INF" conv[group] = convert_first(float) elif type == "g": s = r"\d+(\.\d+)?([eE][-+]?\d+)?|nan|NAN|[-+]?inf|[-+]?INF" self._group_index += 2 conv[group] = convert_first(float) elif type == "d": if format.get("width"): width = r"{1,%s}" % int(format["width"]) else: width = "+" s = r"\d{w}|[-+ ]?0[xX][0-9a-fA-F]{w}|[-+ ]?0[bB][01]{w}|[-+ ]?0[oO][0-7]{w}".format( w=width ) conv[group] = int_convert() # do not specify number base, determine it automatically elif any(k in type for k in dt_format_to_regex): s = get_regex_for_datetime_format(type) conv[group] = partial(strf_date_convert, type=type) elif type == "ti": s = r"(\d{4}-\d\d-\d\d)((\s+|T)%s)?(Z|\s*[-+]\d\d:?\d\d)?" % TIME_PAT n = self._group_index conv[group] = partial(date_convert, ymd=n + 1, hms=n + 4, tz=n + 7) self._group_index += 7 elif type == "tg": s = r"(\d{1,2}[-/](\d{1,2}|%s)[-/]\d{4})(\s+%s)?%s?%s?" s %= (ALL_MONTHS_PAT, TIME_PAT, AM_PAT, TZ_PAT) n = self._group_index conv[group] = partial( date_convert, dmy=n + 1, hms=n + 5, am=n + 8, tz=n + 9 ) self._group_index += 9 elif type == "ta": s = r"((\d{1,2}|%s)[-/]\d{1,2}[-/]\d{4})(\s+%s)?%s?%s?" s %= (ALL_MONTHS_PAT, TIME_PAT, AM_PAT, TZ_PAT) n = self._group_index conv[group] = partial( date_convert, mdy=n + 1, hms=n + 5, am=n + 8, tz=n + 9 ) self._group_index += 9 elif type == "te": # this will allow microseconds through if they're present, but meh s = r"(%s,\s+)?(\d{1,2}\s+%s\s+\d{4})\s+%s%s" s %= (DAYS_PAT, MONTHS_PAT, TIME_PAT, TZ_PAT) n = self._group_index conv[group] = partial(date_convert, dmy=n + 3, hms=n + 5, tz=n + 8) self._group_index += 8 elif type == "th": # slight flexibility here from the stock Apache format s = r"(\d{1,2}[-/]%s[-/]\d{4}):%s%s" % (MONTHS_PAT, TIME_PAT, TZ_PAT) n = self._group_index conv[group] = partial(date_convert, dmy=n + 1, hms=n + 3, tz=n + 6) self._group_index += 6 elif type == "tc": s = r"(%s)\s+%s\s+(\d{1,2})\s+%s\s+(\d{4})" s %= (DAYS_PAT, MONTHS_PAT, TIME_PAT) n = self._group_index conv[group] = partial(date_convert, d_m_y=(n + 4, n + 3, n + 8), hms=n + 5) self._group_index += 8 elif type == "tt": s = r"%s?%s?%s?" % (TIME_PAT, AM_PAT, TZ_PAT) n = self._group_index conv[group] = partial(date_convert, hms=n + 1, am=n + 4, tz=n + 5) self._group_index += 5 elif type == "ts": s = r"%s(\s+)(\d+)(\s+)(\d{1,2}:\d{1,2}:\d{1,2})?" % MONTHS_PAT n = self._group_index conv[group] = partial(date_convert, mm=n + 1, dd=n + 3, hms=n + 5) self._group_index += 5 elif type == "l": s = r"[A-Za-z]+" elif type: s = r"\%s+" % type elif format.get("precision"): if format.get("width"): s = r".{%s,%s}?" % (format["width"], format["precision"]) else: s = r".{1,%s}?" % format["precision"] elif format.get("width"): s = r".{%s,}?" % format["width"] else: s = r".+?" align = format["align"] fill = format["fill"] # handle some numeric-specific things like fill and sign if is_numeric: # prefix with something (align "=" trumps zero) if align == "=": # special case - align "=" acts like the zero above but with # configurable fill defaulting to "0" if not fill: fill = "0" s = r"%s*" % fill + s # allow numbers to be prefixed with a sign s = r"[-+ ]?" + s if not fill: fill = " " # Place into a group now - this captures the value we want to keep. # Everything else from now is just padding to be stripped off if wrap: s = wrap % s self._group_index += 1 if format["width"]: # all we really care about is that if the format originally # specified a width then there will probably be padding - without # an explicit alignment that'll mean right alignment with spaces # padding if not align: align = ">" if fill in r".\+?*[](){}^$": fill = "\\" + fill # align "=" has been handled if align == "<": s = "%s%s*" % (s, fill) elif align == ">": s = "%s*%s" % (fill, s) elif align == "^": s = "%s*%s%s*" % (fill, s, fill) return s class Result(object): """The result of a parse() or search(). Fixed results may be looked up using `result[index]`. Slices of fixed results may also be looked up. Named results may be looked up using `result['name']`. Named results may be tested for existence using `'name' in result`. """ def __init__(self, fixed, named, spans): self.fixed = fixed self.named = named self.spans = spans def __getitem__(self, item): if isinstance(item, (int, slice)): return self.fixed[item] return self.named[item] def __repr__(self): return "<%s %r %r>" % (self.__class__.__name__, self.fixed, self.named) def __contains__(self, name): return name in self.named class Match(object): """The result of a parse() or search() if no results are generated. This class is only used to expose internal used regex match objects to the user and use them for external Parser.evaluate_result calls. """ def __init__(self, parser, match): self.parser = parser self.match = match def evaluate_result(self): """Generate results for this Match""" return self.parser.evaluate_result(self.match) class ResultIterator(object): """The result of a findall() operation. Each element is a Result instance. """ def __init__(self, parser, string, pos, endpos, evaluate_result=True): self.parser = parser self.string = string self.pos = pos self.endpos = endpos self.evaluate_result = evaluate_result def __iter__(self): return self def __next__(self): m = self.parser._search_re.search(self.string, self.pos, self.endpos) if m is None: raise StopIteration() self.pos = m.end() if self.evaluate_result: return self.parser.evaluate_result(m) else: return Match(self.parser, m) # pre-py3k compat next = __next__ def parse(format, string, extra_types=None, evaluate_result=True, case_sensitive=False): """Using "format" attempt to pull values from "string". The format must match the string contents exactly. If the value you're looking for is instead just a part of the string use search(). If ``evaluate_result`` is True the return value will be an Result instance with two attributes: .fixed - tuple of fixed-position values from the string .named - dict of named values from the string If ``evaluate_result`` is False the return value will be a Match instance with one method: .evaluate_result() - This will return a Result instance like you would get with ``evaluate_result`` set to True The default behaviour is to match strings case insensitively. You may match with case by specifying case_sensitive=True. If the format is invalid a ValueError will be raised. See the module documentation for the use of "extra_types". In the case there is no match parse() will return None. """ p = Parser(format, extra_types=extra_types, case_sensitive=case_sensitive) return p.parse(string, evaluate_result=evaluate_result) def search( format, string, pos=0, endpos=None, extra_types=None, evaluate_result=True, case_sensitive=False, ): """Search "string" for the first occurrence of "format". The format may occur anywhere within the string. If instead you wish for the format to exactly match the string use parse(). Optionally start the search at "pos" character index and limit the search to a maximum index of endpos - equivalent to search(string[:endpos]). If ``evaluate_result`` is True the return value will be an Result instance with two attributes: .fixed - tuple of fixed-position values from the string .named - dict of named values from the string If ``evaluate_result`` is False the return value will be a Match instance with one method: .evaluate_result() - This will return a Result instance like you would get with ``evaluate_result`` set to True The default behaviour is to match strings case insensitively. You may match with case by specifying case_sensitive=True. If the format is invalid a ValueError will be raised. See the module documentation for the use of "extra_types". In the case there is no match parse() will return None. """ p = Parser(format, extra_types=extra_types, case_sensitive=case_sensitive) return p.search(string, pos, endpos, evaluate_result=evaluate_result) def findall( format, string, pos=0, endpos=None, extra_types=None, evaluate_result=True, case_sensitive=False, ): """Search "string" for all occurrences of "format". You will be returned an iterator that holds Result instances for each format match found. Optionally start the search at "pos" character index and limit the search to a maximum index of endpos - equivalent to search(string[:endpos]). If ``evaluate_result`` is True each returned Result instance has two attributes: .fixed - tuple of fixed-position values from the string .named - dict of named values from the string If ``evaluate_result`` is False each returned value is a Match instance with one method: .evaluate_result() - This will return a Result instance like you would get with ``evaluate_result`` set to True The default behaviour is to match strings case insensitively. You may match with case by specifying case_sensitive=True. If the format is invalid a ValueError will be raised. See the module documentation for the use of "extra_types". """ p = Parser(format, extra_types=extra_types, case_sensitive=case_sensitive) return p.findall(string, pos, endpos, evaluate_result=evaluate_result) def compile(format, extra_types=None, case_sensitive=False): """Create a Parser instance to parse "format". The resultant Parser has a method .parse(string) which behaves in the same manner as parse(format, string). The default behaviour is to match strings case insensitively. You may match with case by specifying case_sensitive=True. Use this function if you intend to parse many strings with the same format. See the module documentation for the use of "extra_types". Returns a Parser instance. """ return Parser(format, extra_types=extra_types, case_sensitive=case_sensitive) # Copyright (c) 2012-2020 Richard Jones # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # vim: set filetype=python ts=4 sw=4 et si tw=75 parse_type-0.6.4/parse_type/parse_util.py000066400000000000000000000140401467747561600206160ustar00rootroot00000000000000# -*- coding: utf-8 -*- # pylint: disable=missing-docstring """ Provides generic utility classes for the :class:`parse.Parser` class. """ from __future__ import absolute_import from collections import namedtuple import parse import six # -- HELPER-CLASS: For format part in a Field. # REQUIRES: Python 2.6 or newer. # pylint: disable=redefined-builtin, too-many-arguments FormatSpec = namedtuple("FormatSpec", ["type", "width", "zero", "align", "fill", "precision"]) def make_format_spec(type=None, width="", zero=False, align=None, fill=None, precision=None): return FormatSpec(type, width, zero, align, fill, precision) # pylint: enable=redefined-builtin class Field(object): """ Provides a ValueObject for a Field in a parse expression. Examples: * "{}" * "{name}" * "{:format}" * "{name:format}" Format specification: [[fill]align][0][width][.precision][type] """ # pylint: disable=redefined-builtin ALIGN_CHARS = '<>=^' def __init__(self, name="", format=None): self.name = name self.format = format self._format_spec = None def set_format(self, format): self.format = format self._format_spec = None @property def has_format(self): return bool(self.format) @property def format_spec(self): if not self._format_spec and self.format: self._format_spec = self.extract_format_spec(self.format) return self._format_spec def __str__(self): name = self.name or "" if self.has_format: return "{%s:%s}" % (name, self.format) return "{%s}" % name def __eq__(self, other): if isinstance(other, Field): format1 = self.format or "" format2 = other.format or "" return (self.name == other.name) and (format1 == format2) elif isinstance(other, six.string_types): return str(self) == other else: raise ValueError(other) def __ne__(self, other): return not self.__eq__(other) @staticmethod def make_format(format_spec): """Build format string from a format specification. :param format_spec: Format specification (as FormatSpec object). :return: Composed format (as string). """ fill = '' align = '' zero = '' width = format_spec.width if format_spec.align: align = format_spec.align[0] if format_spec.fill: fill = format_spec.fill[0] if format_spec.zero: zero = '0' precision_part = "" if format_spec.precision: precision_part = ".%s" % format_spec.precision # -- FORMAT-SPEC: [[fill]align][0][width][.precision][type] return "%s%s%s%s%s%s" % (fill, align, zero, width, precision_part, format_spec.type) @classmethod def extract_format_spec(cls, format): """Pull apart the format: [[fill]align][0][width][.precision][type]""" # -- BASED-ON: parse.extract_format() # pylint: disable=redefined-builtin, unsubscriptable-object if not format: raise ValueError("INVALID-FORMAT: %s (empty-string)" % format) orig_format = format fill = align = None if format[0] in cls.ALIGN_CHARS: align = format[0] format = format[1:] elif len(format) > 1 and format[1] in cls.ALIGN_CHARS: fill = format[0] align = format[1] format = format[2:] zero = False if format and format[0] == '0': zero = True format = format[1:] width = '' while format: if not format[0].isdigit(): break width += format[0] format = format[1:] precision = None if format.startswith('.'): # Precision isn't needed but we need to capture it so that # the ValueError isn't raised. format = format[1:] # drop the '.' precision = '' while format: if not format[0].isdigit(): break precision += format[0] format = format[1:] # the rest is the type, if present type = format if not type: raise ValueError("INVALID-FORMAT: %s (without type)" % orig_format) return FormatSpec(type, width, zero, align, fill, precision) class FieldParser(object): """ Utility class that parses/extracts fields in parse expressions. """ @classmethod def parse(cls, text): if not (text.startswith('{') and text.endswith('}')): message = "FIELD-SCHEMA MISMATCH: text='%s' (missing braces)" % text raise ValueError(message) # first: lose the braces text = text[1:-1] if ':' in text: # -- CASE: Typed field with format. name, format_ = text.split(':') else: name = text format_ = None return Field(name, format_) @classmethod def extract_fields(cls, schema): """Extract fields in a parse expression schema. :param schema: Parse expression schema/format to use (as string). :return: Generator for fields in schema (as Field objects). """ # -- BASED-ON: parse.Parser._generate_expression() for part in parse.PARSE_RE.split(schema): if not part or part == '{{' or part == '}}': continue elif part[0] == '{': # this will be a braces-delimited field to handle yield cls.parse(part) @classmethod def extract_types(cls, schema): """Extract types (names) for typed fields (with format/type part). :param schema: Parser schema/format to use. :return: Generator for type names (as string). """ for field in cls.extract_fields(schema): if field.has_format: yield field.format_spec.type parse_type-0.6.4/py.requirements/000077500000000000000000000000001467747561600170755ustar00rootroot00000000000000parse_type-0.6.4/py.requirements/all.txt000066400000000000000000000007021467747561600204050ustar00rootroot00000000000000# ============================================================================ # BEHAVE: PYTHON PACKAGE REQUIREMENTS: All requirements # ============================================================================ # DESCRIPTION: # pip install -r # # SEE ALSO: # * http://www.pip-installer.org/ # ============================================================================ -r basic.txt -r packaging.txt -r develop.txt -r testing.txt parse_type-0.6.4/py.requirements/basic.txt000066400000000000000000000010161467747561600207150ustar00rootroot00000000000000# ============================================================================ # PYTHON PACKAGE REQUIREMENTS: Normal usage/installation (minimal) # ============================================================================ # DESCRIPTION: # pip install -r # # SEE ALSO: # * http://www.pip-installer.org/ # ============================================================================ parse >= 1.18.0; python_version >= '3.0' parse >= 1.13.1; python_version <= '2.7' enum34; python_version < '3.4' six >= 1.15 parse_type-0.6.4/py.requirements/ci.github.testing.txt000066400000000000000000000006121467747561600231650ustar00rootroot00000000000000pytest < 5.0; python_version < '3.0' pytest >= 5.0; python_version >= '3.0' pytest-html >= 1.19.0 # -- NEEDED: By some tests (as proof of concept) # NOTE: path.py-10.1 is required for python2.6 # HINT: path.py => path (python-install-package was renamed for python3) # DISABLED: path.py >= 11.5.0; python_version < '3.5' # DISABLED: path >= 13.1.0; python_version >= '3.5' -r basic.txt parse_type-0.6.4/py.requirements/develop.txt000066400000000000000000000017551467747561600213040ustar00rootroot00000000000000# ============================================================================ # PYTHON PACKAGE REQUIREMENTS FOR: parse_type -- For development only # ============================================================================ # -- BUILD-SYSTEM SUPPORT: Using invoke -r ../tasks/py.requirements.txt # -- RELEASE MANAGEMENT: Push package to pypi. twine >= 1.13.0 -r packaging.txt # -- PYTHON2/PYTHON3 COMPATIBILITY: modernize >= 0.5 # -- PYTHON 3 TYPE HINTS: typing-extensions; python_version >= '3.8' typer >= 0.12.5; python_version >= '3.7' # -- MULTI-REPO TOOL: vcstool >= 0.3.0 # -- LINTERS: ruff; python_version >= '3.7' pylint # -- TEST SUPPORT: CODE COVERAGE SUPPORT, ... coverage >= 4.4 pytest-cov tox >= 1.8.1,<4.0 # -- HINT: tox >= 4.0 has breaking changes. virtualenv < 20.22.0; python_version <= '3.6' # -- SUPPORT FOR: Python 2.7, Python <= 3.6 virtualenv >= 20.0.0; python_version > '3.6' argparse # -- NEEDED-FOR: toxcmd.py # -- RELATED: -r testing.txt -r docs.txt parse_type-0.6.4/py.requirements/docs.txt000066400000000000000000000004231467747561600205650ustar00rootroot00000000000000# ============================================================================ # PYTHON PACKAGE REQUIREMENTS: For documentation generation (PREPARED) # ============================================================================ Sphinx >=1.6 sphinx_bootstrap_theme >= 0.6.0 parse_type-0.6.4/py.requirements/optional.txt000066400000000000000000000005751467747561600214720ustar00rootroot00000000000000# ============================================================================ # PYTHON PACKAGE REQUIREMENTS FOR: parse_type -- Optional for development # ============================================================================ # -- GIT MULTI-REPO TOOL: wstool # REQUIRES: wstool >= 0.1.18 (which is not in pypi.org, yet) https://github.com/vcstools/wstool/archive/0.1.18.zip parse_type-0.6.4/py.requirements/packaging.txt000066400000000000000000000013021467747561600215560ustar00rootroot00000000000000# ============================================================================ # PYTHON PACKAGE REQUIREMENTS: packaging support # ============================================================================ # DESCRIPTION: # pip install -r # # SEE ALSO: # * http://www.pip-installer.org/ # ============================================================================ # -- PACKAGING SUPPORT: build >= 0.5.1 setuptools setuptools-scm wheel # -- DISABLED: # setuptools >= 64.0.0; python_version >= '3.5' # setuptools < 45.0.0; python_version < '3.5' # DROP: Python2, Python 3.4 support. # setuptools_scm >= 8.0.0; python_version >= '3.7' # setuptools_scm < 8.0.0; python_version < '3.7' parse_type-0.6.4/py.requirements/testing.txt000066400000000000000000000005051467747561600213130ustar00rootroot00000000000000# ============================================================================ # PYTHON PACKAGE REQUIREMENTS FOR: parse_type -- For testing only # ============================================================================ pytest < 5.0; python_version < '3.0' pytest >= 5.0; python_version >= '3.0' pytest-html >= 1.19.0 parse_type-0.6.4/pyproject.toml000066400000000000000000000112411467747561600166360ustar00rootroot00000000000000# ============================================================================= # PACKAGING: parse_type # ============================================================================= # SEE ALSO: # * https://setuptools.pypa.io/en/latest/userguide/pyproject_config.html # * https://setuptools-scm.readthedocs.io/en/latest/usage/ # * https://pypi.org/classifiers/ # ============================================================================= # PYTHON3: requires = ["setuptools>=64", "setuptools_scm>=8", "wheel"] [build-system] requires = ["setuptools", "setuptools_scm", "wheel"] build-backend = "setuptools.build_meta" [project] name = "parse_type" authors = [ {name = "Jens Engel", email = "jenisys@noreply.github.com"}, ] description = "Simplifies to build parse types based on the parse module" dynamic = ["version"] readme = "README.rst" requires-python = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*" keywords = ["parse", "parsing"] license = {text = "MIT"} classifiers = [ "Development Status :: 5 - Production/Stable", "Environment :: Console", "Environment :: Web Environment", "Intended Audience :: Developers", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3.2", "Programming Language :: Python :: 3.3", "Programming Language :: Python :: 3.4", "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: Implementation :: CPython", "Programming Language :: Python :: Implementation :: PyPy", "Topic :: Software Development :: Code Generators", "Topic :: Software Development :: Libraries :: Python Modules", ] dependencies = [ "parse >= 1.18.0; python_version >= '3.0'", "parse >= 1.13.1; python_version <= '2.7'", "enum34; python_version < '3.4'", "six >= 1.15", ] [project.urls] Homepage = "https://github.com/jenisys/parse_type" Download = "https://pypi.org/project/parse_type/" "Source Code" = "https://github.com/jenisys/parse_type" "Issue Tracker" = "https://github.com/jenisys/parse_type/issues/" [project.optional-dependencies] develop = [ # -- DISABLED: # "setuptools >= 64.0.0; python_version >= '3.5'", # "setuptools < 45.0.0; python_version < '3.5'", # DROP: Python2, Python 3.4 support. # "setuptools_scm >= 8.0.0; python_version >= '3.7'", # "setuptools_scm < 8.0.0; python_version < '3.7'", "setuptools", "setuptools-scm", "wheel", "build >= 0.5.1", "twine >= 1.13.0", "coverage >= 4.4", "pytest < 5.0; python_version < '3.0'", # >= 4.2 "pytest >= 5.0; python_version >= '3.0'", "pytest-html >= 1.19.0", "pytest-cov", "tox >=2.8,<4.0", "virtualenv < 20.22.0; python_version <= '3.6'", # -- SUPPORT FOR: Python 2.7, Python <= 3.6 "virtualenv >= 20.0.0; python_version > '3.6'", "ruff; python_version >= '3.7'", "pylint", ] docs = [ "Sphinx >=1.6", "sphinx_bootstrap_theme >= 0.6.0" ] testing = [ "pytest < 5.0; python_version < '3.0'", # >= 4.2 "pytest >= 5.0; python_version >= '3.0'", "pytest-html >= 1.19.0", ] [tool.distutils.bdist_wheel] universal = true # ----------------------------------------------------------------------------- # PACAKING TOOL SPECIFIC PARTS: # ----------------------------------------------------------------------------- [tool.setuptools] platforms = ["any"] zip-safe = true # -- DISABLED: # [tool.setuptools.dynamic] # version = {attr = "parse_type._version.version"} [tool.setuptools.packages.find] where = ["."] include = ["parse_type*"] exclude = ["tests*"] namespaces = false # -- SETUPTOOLS-SCM: Generate version info from git-tag(s). [tool.setuptools_scm] version_file = "parse_type/_version.py" # ============================================================================= # OTHER TOOLS # ============================================================================= [tool.black] line_length = 100 target-version = ['py38'] include = '\.pyi?$' exclude = ''' ( /( \.git | \.venv | \.netbox | \.vscode | configuration )/ ) ''' [tool.isort] profile = "black" multi_line_output = 3 line_length = 100 # ----------------------------------------------------------------------------- # PYLINT: # ----------------------------------------------------------------------------- [tool.pylint.messages_control] disable = "C0330, C0326" [tool.pylint.format] max-line-length = "100" parse_type-0.6.4/pytest.ini000066400000000000000000000021341467747561600157540ustar00rootroot00000000000000# ============================================================================ # PYTEST CONFIGURATION FILE: pytest.ini # ============================================================================ # SEE ALSO: # * http://pytest.org/ # * http://pytest.org/latest/customize.html # * http://pytest.org/latest/usage.html # * http://pytest.org/latest/example/pythoncollection.html#change-naming-conventions # ============================================================================ # MORE OPTIONS: # addopts = # python_classes=*Test # python_functions=test # ============================================================================ [pytest] minversion = 4.2 testpaths = tests python_files = test_*.py junit_family = xunit2 addopts = --metadata PACKAGE_UNDER_TEST parse_type --html=build/testing/report.html --self-contained-html --junit-xml=build/testing/report.xml # markers = # smoke # slow # -- PREPARED: # filterwarnings = # ignore:.*invalid escape sequence.*:DeprecationWarning # -- BACKWARD COMPATIBILITY: pytest < 2.8 norecursedirs = .git .tox build dist .venv* tmp* _* parse_type-0.6.4/setup.cfg000066400000000000000000000001101467747561600155340ustar00rootroot00000000000000[upload_docs] upload_dir = build/docs/html [bdist_wheel] universal = 1 parse_type-0.6.4/setup.py000066400000000000000000000123061467747561600154370ustar00rootroot00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- """ Setup script for "parse_type" package. USAGE: pip install . SEE ALSO: * https://pypi.org/pypi/parse_type * https://github.com/jenisys/parse_type RELATED: * https://setuptools.readthedocs.io/en/latest/history.html * https://setuptools-scm.readthedocs.io/en/latest/usage/ """ import sys import os.path sys.path.insert(0, os.curdir) # -- USE: setuptools from setuptools import setup, find_packages # DISABLED: from setuptools_scm import ScmVersion # ----------------------------------------------------------------------------- # PREPARE SETUP: # ----------------------------------------------------------------------------- HERE = os.path.dirname(__file__) README = os.path.join(HERE, "README.rst") long_description = ''.join(open(README).readlines()[4:]) # ----------------------------------------------------------------------------- # UTILITY: # ----------------------------------------------------------------------------- def find_packages_by_root_package(where): """Better than excluding everything that is not needed, collect only what is needed. """ root_package = os.path.basename(where) packages = [ "%s.%s" % (root_package, sub_package) for sub_package in find_packages(where)] packages.insert(0, root_package) return packages # -- SEE: https://setuptools-scm.readthedocs.io/en/latest/customizing/ # HINT: get_version_func(version: ScmVersion) -> str: def get_this_package_version(version): from setuptools_scm.version import guess_next_version if version.distance is None: # -- FIX: Python 2.7 problem w/ setuptools-scm v5.0.2 version.distance = 0 return version.format_next_version(guess_next_version, "{guessed}b{distance}") # ----------------------------------------------------------------------------- # SETUP: # ----------------------------------------------------------------------------- setup( name = "parse_type", # DISABLED: version = "0.6.3", use_scm_version={"version_scheme": get_this_package_version}, author = "Jens Engel", author_email = "jenisys@noreply.github.com", url = "https://github.com/jenisys/parse_type", download_url= "http://pypi.python.org/pypi/parse_type", description = "Simplifies to build parse types based on the parse module", long_description = long_description, keywords= "parse, parsing", license = "MIT", packages = find_packages_by_root_package("parse_type"), include_package_data = True, # -- REQUIREMENTS: python_requires=">=2.7, !=3.0.*, !=3.1.*", setup_requires=[ # -- DISABLED: # "setuptools >= 64.0.0; python_version >= '3.5'", # "setuptools < 45.0.0; python_version < '3.5'", # DROP: Python2, Python 3.4 support. # "setuptools_scm >= 8.0.0; python_version >= '3.7'", # "setuptools_scm < 8.0.0; python_version < '3.7'", "setuptools", "setuptools-scm", "wheel", ], install_requires=[ "parse >= 1.18.0; python_version >= '3.0'", "parse >= 1.13.1; python_version <= '2.7'", "enum34; python_version < '3.4'", "six >= 1.15", ], tests_require=[ "pytest < 5.0; python_version < '3.0'", # >= 4.2 "pytest >= 5.0; python_version >= '3.0'", "pytest-html >= 1.19.0", ], extras_require={ "docs": [ "Sphinx >=1.6", "sphinx_bootstrap_theme >= 0.6.0" ], "develop": [ "build >= 0.5.1", "twine >= 1.13.0", "coverage >= 4.4", "pytest < 5.0; python_version < '3.0'", # >= 4.2 "pytest >= 5.0; python_version >= '3.0'", "pytest-html >= 1.19.0", "pytest-cov", "tox >=2.8,<4.0", "virtualenv < 20.22.0; python_version <= '3.6'", # -- SUPPORT FOR: Python 2.7, Python <= 3.6 "virtualenv >= 20.0.0; python_version > '3.6'", "ruff; python_version >= '3.7'", "pylint", ], }, test_suite = "tests", test_loader = "setuptools.command.test:ScanningLoader", zip_safe = True, classifiers = [ "Development Status :: 5 - Production/Stable", "Environment :: Console", "Environment :: Web Environment", "Intended Audience :: Developers", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3.2", "Programming Language :: Python :: 3.3", "Programming Language :: Python :: 3.4", "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: Implementation :: CPython", "Programming Language :: Python :: Implementation :: PyPy", "Topic :: Software Development :: Code Generators", "Topic :: Software Development :: Libraries :: Python Modules", ], platforms = ['any'], ) parse_type-0.6.4/tasks/000077500000000000000000000000001467747561600150505ustar00rootroot00000000000000parse_type-0.6.4/tasks/__init__.py000066400000000000000000000035271467747561600171700ustar00rootroot00000000000000# -*- coding: UTF-8 -*- # pylint: disable=wrong-import-position, wrong-import-order """ Invoke build script. Show all tasks with:: invoke -l .. seealso:: * http://pyinvoke.org * https://github.com/pyinvoke/invoke """ from __future__ import absolute_import, print_function # ----------------------------------------------------------------------------- # IMPORTS: # ----------------------------------------------------------------------------- import sys from invoke import Collection # -- TASK-LIBRARY: import invoke_cleanup as cleanup from . import test from . import release # DISABLED: from . import docs # ----------------------------------------------------------------------------- # TASKS: # ----------------------------------------------------------------------------- # None # ----------------------------------------------------------------------------- # TASK CONFIGURATION: # ----------------------------------------------------------------------------- namespace = Collection() namespace.add_collection(Collection.from_module(cleanup), name="cleanup") namespace.add_collection(Collection.from_module(test)) namespace.add_collection(Collection.from_module(release)) # -- DISABLED: namespace.add_collection(Collection.from_module(docs)) namespace.configure({ "tasks": { "auto_dash_names": False } }) # -- ENSURE: python cleanup is used for this project. cleanup.cleanup_tasks.add_task(cleanup.clean_python) # -- INJECT: clean configuration into this namespace namespace.configure(cleanup.namespace.configuration()) if sys.platform.startswith("win"): # -- OVERRIDE SETTINGS: For platform=win32, ... (Windows) from ._compat_shutil import which run_settings = dict(echo=True, pty=False, shell=which("cmd")) namespace.configure({"run": run_settings}) else: namespace.configure({"run": dict(echo=True, pty=True)}) parse_type-0.6.4/tasks/_compat_shutil.py000066400000000000000000000003341467747561600204340ustar00rootroot00000000000000# -*- coding: UTF-8 -*- # pylint: disable=unused-import # PYTHON VERSION COMPATIBILITY HELPER try: from shutil import which # -- SINCE: Python 3.3 except ImportError: from backports.shutil_which import which parse_type-0.6.4/tasks/docs.py000066400000000000000000000160171467747561600163570ustar00rootroot00000000000000# -*- coding: UTF-8 -*- """ Provides tasks to build documentation with sphinx, etc. """ from __future__ import absolute_import, print_function import os import sys from invoke import task, Collection from invoke.util import cd from path import Path # -- TASK-LIBRARY: from ._tasklet_cleanup import cleanup_tasks, cleanup_dirs # ----------------------------------------------------------------------------- # CONSTANTS: # ----------------------------------------------------------------------------- SPHINX_LANGUAGE_DEFAULT = os.environ.get("SPHINX_LANGUAGE", "en") # ----------------------------------------------------------------------------- # UTILTITIES: # ----------------------------------------------------------------------------- def _sphinxdoc_get_language(ctx, language=None): language = language or ctx.config.sphinx.language or SPHINX_LANGUAGE_DEFAULT return language def _sphinxdoc_get_destdir(ctx, builder, language=None): if builder == "gettext": # -- CASE: not LANGUAGE-SPECIFIC destdir = Path(ctx.config.sphinx.destdir or "build")/builder else: # -- CASE: LANGUAGE-SPECIFIC: language = _sphinxdoc_get_language(ctx, language) destdir = Path(ctx.config.sphinx.destdir or "build")/builder/language return destdir # ----------------------------------------------------------------------------- # TASKS: # ----------------------------------------------------------------------------- @task def clean(ctx, dry_run=False): """Cleanup generated document artifacts.""" basedir = ctx.sphinx.destdir or "build/docs" cleanup_dirs([basedir], dry_run=dry_run) @task(help={ "builder": "Builder to use (html, ...)", "language": "Language to use (en, ...)", "options": "Additional options for sphinx-build", }) def build(ctx, builder="html", language=None, options=""): """Build docs with sphinx-build""" language = _sphinxdoc_get_language(ctx, language) sourcedir = ctx.config.sphinx.sourcedir destdir = _sphinxdoc_get_destdir(ctx, builder, language=language) destdir = destdir.abspath() with cd(sourcedir): destdir_relative = Path(".").relpathto(destdir) command = "sphinx-build {opts} -b {builder} -D language={language} {sourcedir} {destdir}" \ .format(builder=builder, sourcedir=".", destdir=destdir_relative, language=language, opts=options) ctx.run(command) @task(help={ "builder": "Builder to use (html, ...)", "language": "Language to use (en, ...)", "options": "Additional options for sphinx-build", }) def rebuild(ctx, builder="html", language=None, options=""): """Rebuilds the docs. Perform the steps: clean, build """ clean(ctx) build(ctx, builder=builder, language=None, options=options) @task def linkcheck(ctx): """Check if all links are corect.""" build(ctx, builder="linkcheck") @task(help={"language": "Language to use (en, ...)"}) def browse(ctx, language=None): """Open documentation in web browser.""" output_dir = _sphinxdoc_get_destdir(ctx, "html", language=language) page_html = Path(output_dir)/"index.html" if not page_html.exists(): build(ctx, builder="html") assert page_html.exists() open_cmd = "open" # -- WORKS ON: MACOSX if sys.platform.startswith("win"): open_cmd = "start" ctx.run("{open} {page_html}".format(open=open_cmd, page_html=page_html)) # ctx.run('python -m webbrowser -t {page_html}'.format(page_html=page_html)) # -- DISABLED: # import webbrowser # print("Starting webbrowser with page=%s" % page_html) # webbrowser.open(str(page_html)) @task(help={ "dest": "Destination directory to save docs", "format": "Format/Builder to use (html, ...)", "language": "Language to use (en, ...)", }) # pylint: disable=redefined-builtin def save(ctx, dest="docs.html", format="html", language=None): """Save/update docs under destination directory.""" print("STEP: Generate docs in HTML format") build(ctx, builder=format, language=language) print("STEP: Save docs under %s/" % dest) source_dir = Path(_sphinxdoc_get_destdir(ctx, format, language=language)) Path(dest).rmtree_p() source_dir.copytree(dest) # -- POST-PROCESSING: Polish up. for part in [".buildinfo", ".doctrees"]: partpath = Path(dest)/part if partpath.isdir(): partpath.rmtree_p() elif partpath.exists(): partpath.remove_p() @task(help={ "language": 'Language to use, like "en" (default: "all" to build all).', }) def update_translation(ctx, language="all"): """Update sphinx-doc translation(s) messages from the "English" docs. * Generates gettext *.po files in "build/docs/gettext/" directory * Updates/generates gettext *.po per language in "docs/LOCALE/{language}/" .. note:: Afterwards, the missing message translations can be filled in. :param language: Indicate which language messages to update (or "all"). REQUIRES: * sphinx * sphinx-intl >= 0.9 .. seealso:: https://github.com/sphinx-doc/sphinx-intl """ if language == "all": # -- CASE: Process/update all support languages (translations). DEFAULT_LANGUAGES = os.environ.get("SPHINXINTL_LANGUAGE", None) if DEFAULT_LANGUAGES: # -- EXAMPLE: SPHINXINTL_LANGUAGE="de,ja" DEFAULT_LANGUAGES = DEFAULT_LANGUAGES.split(",") languages = ctx.config.sphinx.languages or DEFAULT_LANGUAGES else: # -- CASE: Process only one language (translation use case). languages = [language] # -- STEP: Generate *.po/*.pot files w/ sphinx-build -b gettext build(ctx, builder="gettext") # -- STEP: Update *.po/*.pot files w/ sphinx-intl if languages: gettext_build_dir = _sphinxdoc_get_destdir(ctx, "gettext").abspath() docs_sourcedir = ctx.config.sphinx.sourcedir languages_opts = "-l "+ " -l ".join(languages) with ctx.cd(docs_sourcedir): ctx.run("sphinx-intl update -p {gettext_dir} {languages}".format( gettext_dir=gettext_build_dir.relpath(docs_sourcedir), languages=languages_opts)) else: print("OOPS: No languages specified (use: SPHINXINTL_LANGUAGE=...)") # ----------------------------------------------------------------------------- # TASK CONFIGURATION: # ----------------------------------------------------------------------------- namespace = Collection(clean, rebuild, linkcheck, browse, save, update_translation) namespace.add_task(build, default=True) namespace.configure({ "sphinx": { # -- FOR TASKS: docs.build, docs.rebuild, docs.clean, ... "language": SPHINX_LANGUAGE_DEFAULT, "sourcedir": "docs", "destdir": "build/docs", # -- FOR TASK: docs.update_translation "languages": None, # -- List of language translations, like: de, ja, ... } }) # -- ADD CLEANUP TASK: cleanup_tasks.add_task(clean, "clean_docs") cleanup_tasks.configure(namespace.configuration()) parse_type-0.6.4/tasks/invoke_dry_run.py000066400000000000000000000032721467747561600204630ustar00rootroot00000000000000# -*- coding: UTF-8 -*- """ Basic support to use a --dry-run mode w/ invoke tasks. .. code-block:: from ._dry_run import DryRunContext @task def destroy_something(ctx, path, dry_run=False): if dry_run: ctx = DryRunContext(ctx) # -- DRY-RUN MODE: Only echos commands. ctx.run("rm -rf {}".format(path)) """ from __future__ import print_function from contextlib import contextmanager @contextmanager def dry_run_mode(ctx): """Contextmanages/scope-guard that switches into dry-run mode. Afterwards the original mode is restored. .. code-block:: python with dry_run_mode(ctx): ctx.run(...) """ # -- SETUP PHASE: initial_dry_run = ctx.config.run.dry ctx.config.run.dry = True yield ctx # -- CLEANUP PHASE: ctx.config.run.dry = initial_dry_run class DryRunContext(object): PREFIX = "DRY-RUN: " SCHEMA = "{prefix}{command}" SCHEMA_WITH_KWARGS = "{prefix}{command} (with kwargs={kwargs})" def __init__(self, ctx=None, prefix=None, schema=None): if prefix is None: prefix = self.PREFIX if schema is None: schema = self.SCHEMA self.ctx = ctx self.prefix = prefix self.schema = schema self.ctx.config.run.dry = True @property def config(self): return self.ctx.config def run(self, command, **kwargs): message = self.schema.format(command=command, prefix=self.prefix, kwargs=kwargs) print(message) def sudo(self, command, **kwargs): command2 = "sudo %s" % command self.run(command2, **kwargs) parse_type-0.6.4/tasks/py.requirements.txt000066400000000000000000000015101467747561600207600ustar00rootroot00000000000000# ============================================================================ # INVOKE PYTHON PACKAGE REQUIREMENTS: For tasks # ============================================================================ # DESCRIPTION: # pip install -r # # SEE ALSO: # * http://www.pip-installer.org/ # ============================================================================ invoke >=1.7.0,<2.0; python_version < '3.6' invoke >=1.7.0; python_version >= '3.6' pycmd six >= 1.15.0 # -- HINT, was RENAMED: path.py => path (for python3) path >= 13.1.0; python_version >= '3.5' path.py >= 11.5.0; python_version < '3.5' # -- PYTHON2 BACKPORTS: pathlib; python_version <= '3.4' backports.shutil_which; python_version <= '3.3' git+https://github.com/jenisys/invoke-cleanup@v0.3.7 # -- SECTION: develop # PREPARED: requests parse_type-0.6.4/tasks/release.py000066400000000000000000000161531467747561600170500ustar00rootroot00000000000000# -*- coding: UTF-8 -*- """ Tasks for releasing this project. Normal steps:: python setup.py sdist bdist_wheel twine register dist/{project}-{version}.tar.gz twine upload dist/* twine upload --skip-existing dist/* python setup.py upload # -- DEPRECATED: No longer supported -> Use RTD instead # -- DEPRECATED: python setup.py upload_docs pypi repositories: * https://pypi.python.org/pypi * https://testpypi.python.org/pypi (not working anymore) * https://test.pypi.org/legacy/ (not working anymore) Configuration file for pypi repositories: .. code-block:: init # -- FILE: $HOME/.pypirc [distutils] index-servers = pypi testpypi [pypi] # DEPRECATED: repository = https://pypi.python.org/pypi username = __USERNAME_HERE__ password: [testpypi] # DEPRECATED: repository = https://test.pypi.org/legacy username = __USERNAME_HERE__ password: .. seealso:: * https://packaging.python.org/ * https://packaging.python.org/guides/ * https://packaging.python.org/tutorials/distributing-packages/ """ from __future__ import absolute_import, print_function from invoke import Collection, task from invoke_cleanup import path_glob from .invoke_dry_run import DryRunContext # ----------------------------------------------------------------------------- # TASKS: # ----------------------------------------------------------------------------- @task def checklist(ctx=None): # pylint: disable=unused-argument """Checklist for releasing this project.""" checklist_text = """PRE-RELEASE CHECKLIST: [ ] Everything is checked in [ ] All tests pass w/ tox RELEASE CHECKLIST: [{x1}] Bump version to new-version by adding tag to the repository [{x2}] Build packages (sdist, bdist_wheel via prepare) [{x3}] Register and upload packages to testpypi repository (first) [{x4}] Verify release is OK and packages from testpypi are usable [{x5}] Register and upload packages to pypi repository [{x6}] Push last changes to Github repository POST-RELEASE CHECKLIST: [ ] Bump version to new-develop-version by adding tag to the repository [ ] Adapt CHANGES (if necessary) [ ] Commit latest changes to Github repository """ steps = dict(x1=None, x2=None, x3=None, x4=None, x5=None, x6=None) yesno_map = {True: "x", False: "_", None: " "} answers = {name: yesno_map[value] for name, value in steps.items()} print(checklist_text.format(**answers)) @task(name="bump_version") def bump_version(ctx, new_version, dry_run=False): """Bump version (to prepare a new release).""" if not new_version.startswith("v"): new_version = "v{version}".format(version=new_version) if dry_run: ctx = DryRunContext(ctx) ctx.run("git tag {version}".format(version=new_version)) @task(name="build", aliases=["build_packages"]) def build_packages(ctx, hide=False): """Build packages for this release.""" print("build_packages:") ctx.run("python -m build", echo=True, hide=hide) @task def prepare(ctx, new_version=None, hide=True, dry_run=False): """Prepare the release: bump version, build packages, ...""" if new_version is not None: bump_version(ctx, new_version, dry_run=dry_run) build_packages(ctx, hide=hide) packages = ensure_packages_exist(ctx, check_only=True) print_packages(packages) # -- NOT-NEEDED: # @task(name="register") # def register_packages(ctx, repo=None, dry_run=False): # """Register release (packages) in artifact-store/repository.""" # original_ctx = ctx # if repo is None: # repo = ctx.project.repo or "pypi" # if dry_run: # ctx = DryRunContext(ctx) # packages = ensure_packages_exist(original_ctx) # print_packages(packages) # for artifact in packages: # ctx.run("twine register --repository={repo} {artifact}".format( # artifact=artifact, repo=repo)) @task def upload(ctx, repo=None, repo_url=None, dry_run=False, skip_existing=False, verbose=False): """Upload release packages to repository (artifact-store).""" if repo is None: repo = ctx.project.repo or "pypi" if repo_url is None: repo_url = ctx.project.repo_url or None original_ctx = ctx if dry_run: ctx = DryRunContext(ctx) # -- OPTIONS: opts = [] if repo_url: opts.append("--repository-url={0}".format(repo_url)) elif repo: opts.append("--repository={0}".format(repo)) if skip_existing: opts.append("--skip-existing") if verbose: opts.append("--verbose") packages = ensure_packages_exist(original_ctx) print_packages(packages) ctx.run("twine upload {opts} dist/*".format(opts=" ".join(opts))) # ctx.run("twine upload --repository={repo} dist/*".format(repo=repo)) # 2018-05-05 WORK-AROUND for new https://pypi.org/: # twine upload --repository-url=https://upload.pypi.org/legacy /dist/* # NOT-WORKING: repo_url = "https://upload.pypi.org/simple/" # # ctx.run("twine upload --repository-url={repo_url} {opts} dist/*".format( # repo_url=repo_url, opts=" ".join(opts))) # ctx.run("twine upload --repository={repo} {opts} dist/*".format( # repo=repo, opts=" ".join(opts))) # -- DEPRECATED: Use RTD instead # @task(name="upload_docs") # def upload_docs(ctx, repo=None, dry_run=False): # """Upload and publish docs. # # NOTE: Docs are built first. # """ # if repo is None: # repo = ctx.project.repo or "pypi" # if dry_run: # ctx = DryRunContext(ctx) # # ctx.run("python setup.py upload_docs") # # ----------------------------------------------------------------------------- # TASK HELPERS: # ----------------------------------------------------------------------------- def print_packages(packages): print("PACKAGES[%d]:" % len(packages)) for package in packages: package_size = package.stat().st_size package_time = package.stat().st_mtime print(" - %s (size=%s)" % (package, package_size)) def ensure_packages_exist(ctx, pattern=None, check_only=False): if pattern is None: project_name = ctx.project.name project_prefix = project_name.replace("_", "-").split("-")[0] pattern = "dist/%s*" % project_prefix packages = list(path_glob(pattern, current_dir=".")) if not packages: if check_only: message = "No artifacts found: pattern=%s" % pattern raise RuntimeError(message) else: # -- RECURSIVE-SELF-CALL: Once print("NO-PACKAGES-FOUND: Build packages first ...") build_packages(ctx, hide=True) packages = ensure_packages_exist(ctx, pattern, check_only=True) return packages # ----------------------------------------------------------------------------- # TASK CONFIGURATION: # ----------------------------------------------------------------------------- # DISABLED: register_packages namespace = Collection(bump_version, checklist, prepare, build_packages, upload) namespace.configure({ "project": { "repo": "pypi", "repo_url": None, } }) parse_type-0.6.4/tasks/test.py000066400000000000000000000154311467747561600164050ustar00rootroot00000000000000# -*- coding: UTF-8 -*- """ Invoke test tasks. """ from __future__ import print_function import os.path import sys from invoke import task, Collection # -- TASK-LIBRARY: from invoke_cleanup import cleanup_tasks, cleanup_dirs, cleanup_files # --------------------------------------------------------------------------- # CONSTANTS: # --------------------------------------------------------------------------- USE_BEHAVE = False # --------------------------------------------------------------------------- # TASKS # --------------------------------------------------------------------------- @task(name="all", help={ "args": "Command line args for test run.", }) def test_all(ctx, args="", options=""): """Run all tests (default).""" pytest_args = select_by_prefix(args, ctx.pytest.scopes) behave_args = None if USE_BEHAVE: behave_args = select_by_prefix(args, ctx.behave_test.scopes) pytest_should_run = not args or (args and pytest_args) behave_should_run = not args or (args and behave_args) if pytest_should_run: pytest(ctx, pytest_args, options=options) if behave_should_run and USE_BEHAVE: behave(ctx, behave_args, options=options) @task def clean(ctx, dry_run=False): """Cleanup (temporary) test artifacts.""" directories = ctx.test.clean.directories or [] files = ctx.test.clean.files or [] cleanup_dirs(directories, dry_run=dry_run) cleanup_files(files, dry_run=dry_run) @task(name="unit") def unittest(ctx, args="", options=""): """Run unit tests.""" pytest(ctx, args, options) @task def pytest(ctx, args="", options=""): """Run unit tests.""" args = args or ctx.pytest.args options = options or ctx.pytest.options ctx.run("pytest {options} {args}".format(options=options, args=args)) @task(help={ "args": "Command line args for behave", "format": "Formatter to use (progress, pretty, ...)", }) def behave(ctx, args="", format="", options=""): """Run behave tests.""" format = format or ctx.behave_test.format options = options or ctx.behave_test.options args = args or ctx.behave_test.args if os.path.exists("bin/behave"): behave_cmd = "{python} bin/behave".format(python=sys.executable) else: behave_cmd = "{python} -m behave".format(python=sys.executable) for group_args in grouped_by_prefix(args, ctx.behave_test.scopes): ctx.run("{behave} -f {format} {options} {args}".format( behave=behave_cmd, format=format, options=options, args=group_args)) @task(help={ "args": "Tests to run (empty: all)", "report": "Coverage report format to use (report, html, xml)", }) def coverage(ctx, args="", report="report", append=False): """Determine test coverage (run pytest, behave)""" append = append or ctx.coverage.append report_formats = ctx.coverage.report_formats or [] if report not in report_formats: report_formats.insert(0, report) opts = [] if append: opts.append("--append") pytest_args = select_by_prefix(args, ctx.pytest.scopes) behave_args = select_by_prefix(args, ctx.behave_test.scopes) pytest_should_run = not args or (args and pytest_args) behave_should_run = not args or (args and behave_args) and USE_BEHAVE if not args: behave_args = ctx.behave_test.args or "features" if isinstance(pytest_args, list): pytest_args = " ".join(pytest_args) if isinstance(behave_args, list): behave_args = " ".join(behave_args) # -- RUN TESTS WITH COVERAGE: if pytest_should_run: ctx.run("coverage run {options} -m pytest {args}".format( args=pytest_args, options=" ".join(opts))) if behave_should_run and USE_BEHAVE: behave_options = ctx.behave_test.coverage_options or "" os.environ["COVERAGE_PROCESS_START"] = os.path.abspath(".coveragerc") behave(ctx, args=behave_args, options=behave_options) del os.environ["COVERAGE_PROCESS_START"] # -- POST-PROCESSING: ctx.run("coverage combine") for report_format in report_formats: ctx.run("coverage {report_format}".format(report_format=report_format)) # --------------------------------------------------------------------------- # UTILITIES: # --------------------------------------------------------------------------- def select_prefix_for(arg, prefixes): for prefix in prefixes: if arg.startswith(prefix): return prefix return os.path.dirname(arg) def select_by_prefix(args, prefixes): selected = [] for arg in args.strip().split(): assert not arg.startswith("-"), "REQUIRE: arg, not options" scope = select_prefix_for(arg, prefixes) if scope: selected.append(arg) return " ".join(selected) def grouped_by_prefix(args, prefixes): """Group behave args by (directory) scope into multiple test-runs.""" group_args = [] current_scope = None for arg in args.strip().split(): assert not arg.startswith("-"), "REQUIRE: arg, not options" scope = select_prefix_for(arg, prefixes) if scope != current_scope: if group_args: # -- DETECTED GROUP-END: yield " ".join(group_args) group_args = [] current_scope = scope group_args.append(arg) if group_args: yield " ".join(group_args) # --------------------------------------------------------------------------- # TASK MANAGEMENT / CONFIGURATION # --------------------------------------------------------------------------- namespace = Collection(clean, unittest, pytest, coverage) namespace.add_task(test_all, default=True) if USE_BEHAVE: namespace.add_task(behave) namespace.configure({ "test": { "clean": { "directories": [ ".cache", "assets", # -- TEST RUNS # -- BEHAVE-SPECIFIC: "__WORKDIR__", "reports", "test_results", ], "files": [ ".coverage", ".coverage.*", # -- BEHAVE-SPECIFIC: "report.html", "rerun*.txt", "rerun*.featureset", "testrun*.json", ], }, }, "pytest": { "scopes": ["tests"], "args": "", "options": "", # -- NOTE: Overide in configfile "invoke.yaml" }, # "behave_test": behave.namespace._configuration["behave_test"], "behave_test": { "scopes": ["features"], "args": "features", "format": "progress", "options": "", # -- NOTE: Overide in configfile "invoke.yaml" "coverage_options": "", }, "coverage": { "append": False, "report_formats": ["report", "html"], }, }) # -- ADD CLEANUP TASK: cleanup_tasks.add_task(clean, "clean_test") cleanup_tasks.configure(namespace.configuration()) parse_type-0.6.4/tests/000077500000000000000000000000001467747561600150655ustar00rootroot00000000000000parse_type-0.6.4/tests/__init__.py000066400000000000000000000000001467747561600171640ustar00rootroot00000000000000parse_type-0.6.4/tests/parse_tests/000077500000000000000000000000001467747561600174215ustar00rootroot00000000000000parse_type-0.6.4/tests/parse_tests/__init__.py000066400000000000000000000001221467747561600215250ustar00rootroot00000000000000# COPY TESTSUITE FROM: parse v1.20.2 # SEE: https://github.com/r1chardj0n3s/parse parse_type-0.6.4/tests/parse_tests/test_bugs.py000066400000000000000000000057751467747561600220100ustar00rootroot00000000000000import pickle from datetime import datetime import parse def test_tz_compare_to_None(): utc = parse.FixedTzOffset(0, "UTC") assert utc is not None assert utc != "spam" def test_named_date_issue7(): r = parse.parse("on {date:ti}", "on 2012-09-17") assert r["date"] == datetime(2012, 9, 17, 0, 0, 0) # fix introduced regressions r = parse.parse("a {:ti} b", "a 1997-07-16T19:20 b") assert r[0] == datetime(1997, 7, 16, 19, 20, 0) r = parse.parse("a {:ti} b", "a 1997-07-16T19:20Z b") utc = parse.FixedTzOffset(0, "UTC") assert r[0] == datetime(1997, 7, 16, 19, 20, tzinfo=utc) r = parse.parse("a {date:ti} b", "a 1997-07-16T19:20Z b") assert r["date"] == datetime(1997, 7, 16, 19, 20, tzinfo=utc) def test_dotted_type_conversion_pull_8(): # test pull request 8 which fixes type conversion related to dotted # names being applied correctly r = parse.parse("{a.b:d}", "1") assert r["a.b"] == 1 r = parse.parse("{a_b:w} {a.b:d}", "1 2") assert r["a_b"] == "1" assert r["a.b"] == 2 def test_pm_overflow_issue16(): r = parse.parse("Meet at {:tg}", "Meet at 1/2/2011 12:45 PM") assert r[0] == datetime(2011, 2, 1, 12, 45) def test_pm_handling_issue57(): r = parse.parse("Meet at {:tg}", "Meet at 1/2/2011 12:15 PM") assert r[0] == datetime(2011, 2, 1, 12, 15) r = parse.parse("Meet at {:tg}", "Meet at 1/2/2011 12:15 AM") assert r[0] == datetime(2011, 2, 1, 0, 15) def test_user_type_with_group_count_issue60(): @parse.with_pattern(r"((\w+))", regex_group_count=2) def parse_word_and_covert_to_uppercase(text): return text.strip().upper() @parse.with_pattern(r"\d+") def parse_number(text): return int(text) # -- CASE: Use named (OK) type_map = {"Name": parse_word_and_covert_to_uppercase, "Number": parse_number} r = parse.parse( "Hello {name:Name} {number:Number}", "Hello Alice 42", extra_types=type_map ) assert r.named == {"name": "ALICE", "number": 42} # -- CASE: Use unnamed/fixed (problematic) r = parse.parse("Hello {:Name} {:Number}", "Hello Alice 42", extra_types=type_map) assert r[0] == "ALICE" assert r[1] == 42 def test_unmatched_brace_doesnt_match(): r = parse.parse("{who.txt", "hello") assert r is None def test_pickling_bug_110(): p = parse.compile("{a:d}") # prior to the fix, this would raise an AttributeError pickle.dumps(p) def test_unused_centered_alignment_bug(): r = parse.parse("{:^2S}", "foo") assert r[0] == "foo" r = parse.search("{:^2S}", "foo") assert r[0] == "foo" # specifically test for the case in issue #118 as well r = parse.parse("Column {:d}:{:^}", "Column 1: Timestep") assert r[0] == 1 assert r[1] == "Timestep" def test_unused_left_alignment_bug(): r = parse.parse("{:<2S}", "foo") assert r[0] == "foo" r = parse.search("{:<2S}", "foo") assert r[0] == "foo" def test_match_trailing_newline(): r = parse.parse("{}", "test\n") assert r[0] == "test\n" parse_type-0.6.4/tests/parse_tests/test_findall.py000066400000000000000000000011761467747561600224500ustar00rootroot00000000000000import parse def test_findall(): s = "".join( r.fixed[0] for r in parse.findall(">{}<", "

    some bold text

    ") ) assert s == "some bold text" def test_no_evaluate_result(): s = "".join( m.evaluate_result().fixed[0] for m in parse.findall( ">{}<", "

    some bold text

    ", evaluate_result=False ) ) assert s == "some bold text" def test_case_sensitivity(): l = [r.fixed[0] for r in parse.findall("x({})x", "X(hi)X")] assert l == ["hi"] l = [r.fixed[0] for r in parse.findall("x({})x", "X(hi)X", case_sensitive=True)] assert l == [] parse_type-0.6.4/tests/parse_tests/test_parse.py000066400000000000000000000614001467747561600221450ustar00rootroot00000000000000# coding: utf-8 import sys from datetime import date from datetime import datetime from datetime import time import pytest import parse def test_no_match(): # string does not match format assert parse.parse("{{hello}}", "hello") is None def test_nothing(): # do no actual parsing r = parse.parse("{{hello}}", "{hello}") assert r.fixed == () assert r.named == {} def test_no_evaluate_result(): # pull a fixed value out of string match = parse.parse("hello {}", "hello world", evaluate_result=False) r = match.evaluate_result() assert r.fixed == ("world",) def test_regular_expression(): # match an actual regular expression s = r"^(hello\s[wW]{}!+.*)$" e = s.replace("{}", "orld") r = parse.parse(s, e) assert r.fixed == ("orld",) e = s.replace("{}", ".*?") r = parse.parse(s, e) assert r.fixed == (".*?",) def test_question_mark(): # issue9: make sure a ? in the parse string is handled correctly r = parse.parse('"{}"?', '"teststr"?') assert r[0] == "teststr" def test_pipe(): # issue22: make sure a | in the parse string is handled correctly r = parse.parse("| {}", "| teststr") assert r[0] == "teststr" def test_unicode(): # issue29: make sure unicode is parsable r = parse.parse("{}", "t€ststr") assert r[0] == "t€ststr" def test_hexadecimal(): # issue42: make sure bare hexadecimal isn't matched as "digits" r = parse.parse("{:d}", "abcdef") assert r is None def test_fixed(): # pull a fixed value out of string r = parse.parse("hello {}", "hello world") assert r.fixed == ("world",) def test_left(): # pull left-aligned text out of string r = parse.parse("{:<} world", "hello world") assert r.fixed == ("hello",) def test_right(): # pull right-aligned text out of string r = parse.parse("hello {:>}", "hello world") assert r.fixed == ("world",) def test_center(): # pull center-aligned text out of string r = parse.parse("hello {:^} world", "hello there world") assert r.fixed == ("there",) def test_typed(): # pull a named, typed values out of string r = parse.parse("hello {:d} {:w}", "hello 12 people") assert r.fixed == (12, "people") r = parse.parse("hello {:w} {:w}", "hello 12 people") assert r.fixed == ("12", "people") def test_sign(): # sign is ignored r = parse.parse("Pi = {:.7f}", "Pi = 3.1415926") assert r.fixed == (3.1415926,) r = parse.parse("Pi = {:+.7f}", "Pi = 3.1415926") assert r.fixed == (3.1415926,) r = parse.parse("Pi = {:-.7f}", "Pi = 3.1415926") assert r.fixed == (3.1415926,) r = parse.parse("Pi = {: .7f}", "Pi = 3.1415926") assert r.fixed == (3.1415926,) def test_precision(): # pull a float out of a string r = parse.parse("Pi = {:.7f}", "Pi = 3.1415926") assert r.fixed == (3.1415926,) r = parse.parse("Pi/10 = {:8.5f}", "Pi/10 = 0.31415") assert r.fixed == (0.31415,) # float may have not leading zero r = parse.parse("Pi/10 = {:8.5f}", "Pi/10 = .31415") assert r.fixed == (0.31415,) r = parse.parse("Pi/10 = {:8.5f}", "Pi/10 = -.31415") assert r.fixed == (-0.31415,) def test_custom_type(): # use a custom type r = parse.parse( "{:shouty} {:spam}", "hello world", {"shouty": lambda s: s.upper(), "spam": lambda s: "".join(reversed(s))}, ) assert r.fixed == ("HELLO", "dlrow") r = parse.parse("{:d}", "12", {"d": lambda s: int(s) * 2}) assert r.fixed == (24,) r = parse.parse("{:d}", "12") assert r.fixed == (12,) def test_typed_fail(): # pull a named, typed values out of string assert parse.parse("hello {:d} {:w}", "hello people 12") is None def test_named(): # pull a named value out of string r = parse.parse("hello {name}", "hello world") assert r.named == {"name": "world"} def test_named_repeated(): # test a name may be repeated r = parse.parse("{n} {n}", "x x") assert r.named == {"n": "x"} def test_named_repeated_type(): # test a name may be repeated with type conversion r = parse.parse("{n:d} {n:d}", "1 1") assert r.named == {"n": 1} def test_named_repeated_fail_value(): # test repeated name fails if value mismatches r = parse.parse("{n} {n}", "x y") assert r is None def test_named_repeated_type_fail_value(): # test repeated name with type conversion fails if value mismatches r = parse.parse("{n:d} {n:d}", "1 2") assert r is None def test_named_repeated_type_mismatch(): # test repeated name with mismatched type with pytest.raises(parse.RepeatedNameError): parse.compile("{n:d} {n:w}") def test_mixed(): # pull a fixed and named values out of string r = parse.parse("hello {} {name} {} {spam}", "hello world and other beings") assert r.fixed == ("world", "other") assert r.named == {"name": "and", "spam": "beings"} def test_named_typed(): # pull a named, typed values out of string r = parse.parse("hello {number:d} {things}", "hello 12 people") assert r.named == {"number": 12, "things": "people"} r = parse.parse("hello {number:w} {things}", "hello 12 people") assert r.named == {"number": "12", "things": "people"} def test_named_aligned_typed(): # pull a named, typed values out of string r = parse.parse("hello {number:d} {things}", "hello 12 people") assert r.named == {"number": 12, "things": "people"} r = parse.parse("hello {number:^d} {things}", "hello 12 people") assert r.named == {"number": 12, "things": "people"} def test_multiline(): r = parse.parse("hello\n{}\nworld", "hello\nthere\nworld") assert r.fixed[0] == "there" def test_spans(): # test the string sections our fields come from string = "hello world" r = parse.parse("hello {}", string) assert r.spans == {0: (6, 11)} start, end = r.spans[0] assert string[start:end] == r.fixed[0] string = "hello world" r = parse.parse("hello {:>}", string) assert r.spans == {0: (10, 15)} start, end = r.spans[0] assert string[start:end] == r.fixed[0] string = "hello 0x12 world" r = parse.parse("hello {val:x} world", string) assert r.spans == {"val": (6, 10)} start, end = r.spans["val"] assert string[start:end] == "0x%x" % r.named["val"] string = "hello world and other beings" r = parse.parse("hello {} {name} {} {spam}", string) assert r.spans == {0: (6, 11), "name": (12, 15), 1: (16, 21), "spam": (22, 28)} def test_numbers(): # pull a numbers out of a string def y(fmt, s, e, str_equals=False): p = parse.compile(fmt) r = p.parse(s) assert r is not None r = r.fixed[0] if str_equals: assert str(r) == str(e) else: assert r == e def n(fmt, s, e): assert parse.parse(fmt, s) is None y("a {:d} b", "a 0 b", 0) y("a {:d} b", "a 12 b", 12) y("a {:5d} b", "a 12 b", 12) y("a {:5d} b", "a -12 b", -12) y("a {:d} b", "a -12 b", -12) y("a {:d} b", "a +12 b", 12) y("a {:d} b", "a 12 b", 12) y("a {:d} b", "a 0b1000 b", 8) y("a {:d} b", "a 0o1000 b", 512) y("a {:d} b", "a 0x1000 b", 4096) y("a {:d} b", "a 0xabcdef b", 0xABCDEF) y("a {:%} b", "a 100% b", 1) y("a {:%} b", "a 50% b", 0.5) y("a {:%} b", "a 50.1% b", 0.501) y("a {:n} b", "a 100 b", 100) y("a {:n} b", "a 1,000 b", 1000) y("a {:n} b", "a 1.000 b", 1000) y("a {:n} b", "a -1,000 b", -1000) y("a {:n} b", "a 10,000 b", 10000) y("a {:n} b", "a 100,000 b", 100000) n("a {:n} b", "a 100,00 b", None) y("a {:n} b", "a 100.000 b", 100000) y("a {:n} b", "a 1.000.000 b", 1000000) y("a {:f} b", "a 12.0 b", 12.0) y("a {:f} b", "a -12.1 b", -12.1) y("a {:f} b", "a +12.1 b", 12.1) y("a {:f} b", "a .121 b", 0.121) y("a {:f} b", "a -.121 b", -0.121) n("a {:f} b", "a 12 b", None) y("a {:e} b", "a 1.0e10 b", 1.0e10) y("a {:e} b", "a .0e10 b", 0.0e10) y("a {:e} b", "a 1.0E10 b", 1.0e10) y("a {:e} b", "a 1.10000e10 b", 1.1e10) y("a {:e} b", "a 1.0e-10 b", 1.0e-10) y("a {:e} b", "a 1.0e+10 b", 1.0e10) # can't actually test this one on values 'cos nan != nan y("a {:e} b", "a nan b", float("nan"), str_equals=True) y("a {:e} b", "a NAN b", float("nan"), str_equals=True) y("a {:e} b", "a inf b", float("inf")) y("a {:e} b", "a +inf b", float("inf")) y("a {:e} b", "a -inf b", float("-inf")) y("a {:e} b", "a INF b", float("inf")) y("a {:e} b", "a +INF b", float("inf")) y("a {:e} b", "a -INF b", float("-inf")) y("a {:g} b", "a 1 b", 1) y("a {:g} b", "a 1e10 b", 1e10) y("a {:g} b", "a 1.0e10 b", 1.0e10) y("a {:g} b", "a 1.0E10 b", 1.0e10) y("a {:b} b", "a 1000 b", 8) y("a {:b} b", "a 0b1000 b", 8) y("a {:o} b", "a 12345670 b", int("12345670", 8)) y("a {:o} b", "a 0o12345670 b", int("12345670", 8)) y("a {:x} b", "a 1234567890abcdef b", 0x1234567890ABCDEF) y("a {:x} b", "a 1234567890ABCDEF b", 0x1234567890ABCDEF) y("a {:x} b", "a 0x1234567890abcdef b", 0x1234567890ABCDEF) y("a {:x} b", "a 0x1234567890ABCDEF b", 0x1234567890ABCDEF) y("a {:05d} b", "a 00001 b", 1) y("a {:05d} b", "a -00001 b", -1) y("a {:05d} b", "a +00001 b", 1) y("a {:02d} b", "a 10 b", 10) y("a {:=d} b", "a 000012 b", 12) y("a {:x=5d} b", "a xxx12 b", 12) y("a {:x=5d} b", "a -xxx12 b", -12) # Test that hex numbers that ambiguously start with 0b / 0B are parsed correctly # See issue #65 (https://github.com/r1chardj0n3s/parse/issues/65) y("a {:x} b", "a 0B b", 0xB) y("a {:x} b", "a 0B1 b", 0xB1) y("a {:x} b", "a 0b b", 0xB) y("a {:x} b", "a 0b1 b", 0xB1) # Test that number signs are understood correctly y("a {:d} b", "a -0o10 b", -8) y("a {:d} b", "a -0b1010 b", -10) y("a {:d} b", "a -0x1010 b", -0x1010) y("a {:o} b", "a -10 b", -8) y("a {:b} b", "a -1010 b", -10) y("a {:x} b", "a -1010 b", -0x1010) y("a {:d} b", "a +0o10 b", 8) y("a {:d} b", "a +0b1010 b", 10) y("a {:d} b", "a +0x1010 b", 0x1010) y("a {:o} b", "a +10 b", 8) y("a {:b} b", "a +1010 b", 10) y("a {:x} b", "a +1010 b", 0x1010) def test_two_datetimes(): r = parse.parse("a {:ti} {:ti} b", "a 1997-07-16 2012-08-01 b") assert len(r.fixed) == 2 assert r[0] == datetime(1997, 7, 16) assert r[1] == datetime(2012, 8, 1) def test_flexible_datetimes(): r = parse.parse("a {:%Y-%m-%d} b", "a 1997-07-16 b") assert len(r.fixed) == 1 assert r[0] == date(1997, 7, 16) r = parse.parse("a {:%Y-%b-%d} b", "a 1997-Feb-16 b") assert len(r.fixed) == 1 assert r[0] == date(1997, 2, 16) r = parse.parse("a {:%Y-%b-%d} {:d} b", "a 1997-Feb-16 8 b") assert len(r.fixed) == 2 assert r[0] == date(1997, 2, 16) r = parse.parse("a {my_date:%Y-%b-%d} {num:d} b", "a 1997-Feb-16 8 b") assert (r.named["my_date"]) == date(1997, 2, 16) assert (r.named["num"]) == 8 r = parse.parse("a {:%Y-%B-%d} b", "a 1997-February-16 b") assert r[0] == date(1997, 2, 16) r = parse.parse("a {:%Y%m%d} b", "a 19970716 b") assert r[0] == date(1997, 7, 16) def test_flexible_datetime_with_colon(): r = parse.parse("{dt:%Y-%m-%d %H:%M:%S}", "2023-11-21 13:23:27") assert r.named["dt"] == datetime(2023, 11, 21, 13, 23, 27) def test_datetime_with_various_subsecond_precision(): r = parse.parse("{dt:%Y-%m-%d %H:%M:%S.%f}", "2023-11-21 13:23:27.123456") assert r.named["dt"] == datetime(2023, 11, 21, 13, 23, 27, 123456) r = parse.parse("{dt:%Y-%m-%d %H:%M:%S.%f}", "2023-11-21 13:23:27.12345") assert r.named["dt"] == datetime(2023, 11, 21, 13, 23, 27, 123450) r = parse.parse("{dt:%Y-%m-%d %H:%M:%S.%f}", "2023-11-21 13:23:27.1234") assert r.named["dt"] == datetime(2023, 11, 21, 13, 23, 27, 123400) r = parse.parse("{dt:%Y-%m-%d %H:%M:%S.%f}", "2023-11-21 13:23:27.123") assert r.named["dt"] == datetime(2023, 11, 21, 13, 23, 27, 123000) r = parse.parse("{dt:%Y-%m-%d %H:%M:%S.%f}", "2023-11-21 13:23:27.0") assert r.named["dt"] == datetime(2023, 11, 21, 13, 23, 27, 0) @pytest.mark.skipif( sys.version_info[0] < 3, reason="Python 3+ required for timezone support" ) def test_flexible_datetime_with_timezone(): from datetime import timezone r = parse.parse("{dt:%Y-%m-%d %H:%M:%S %z}", "2023-11-21 13:23:27 +0000") assert r.named["dt"] == datetime(2023, 11, 21, 13, 23, 27, tzinfo=timezone.utc) @pytest.mark.skipif( sys.version_info[0] < 3, reason="Python 3+ required for timezone support" ) def test_flexible_datetime_with_timezone_that_has_colons(): from datetime import timezone r = parse.parse("{dt:%Y-%m-%d %H:%M:%S %z}", "2023-11-21 13:23:27 +00:00:00") assert r.named["dt"] == datetime(2023, 11, 21, 13, 23, 27, tzinfo=timezone.utc) def test_flexible_time(): r = parse.parse("a {time:%H:%M:%S} b", "a 13:23:27 b") assert r.named["time"] == time(13, 23, 27) def test_flexible_time_no_hour(): r = parse.parse("a {time:%M:%S} b", "a 23:27 b") assert r.named["time"] == time(0, 23, 27) def test_flexible_time_ms(): r = parse.parse("a {time:%M:%S:%f} b", "a 23:27:123456 b") assert r.named["time"] == time(0, 23, 27, 123456) def test_flexible_dates_single_digit(): r = parse.parse("{dt:%Y/%m/%d}", "2023/1/1") assert r.named["dt"] == date(2023, 1, 1) def test_flexible_dates_j(): r = parse.parse("{dt:%Y/%j}", "2023/9") assert r.named["dt"] == date(2023, 1, 9) r = parse.parse("{dt:%Y/%j}", "2023/009") assert r.named["dt"] == date(2023, 1, 9) def test_flexible_dates_year_current_year_inferred(): r = parse.parse("{dt:%j}", "9") assert r.named["dt"] == date(datetime.today().year, 1, 9) def test_datetimes(): def y(fmt, s, e, tz=None): p = parse.compile(fmt) r = p.parse(s) assert r is not None r = r.fixed[0] assert r == e assert tz is None or r.tzinfo == tz utc = parse.FixedTzOffset(0, "UTC") assert repr(utc) == "" aest = parse.FixedTzOffset(10 * 60, "+1000") tz60 = parse.FixedTzOffset(60, "+01:00") # ISO 8660 variants # YYYY-MM-DD (eg 1997-07-16) y("a {:ti} b", "a 1997-07-16 b", datetime(1997, 7, 16)) # YYYY-MM-DDThh:mmTZD (eg 1997-07-16T19:20+01:00) y("a {:ti} b", "a 1997-07-16 19:20 b", datetime(1997, 7, 16, 19, 20, 0)) y("a {:ti} b", "a 1997-07-16T19:20 b", datetime(1997, 7, 16, 19, 20, 0)) y( "a {:ti} b", "a 1997-07-16T19:20Z b", datetime(1997, 7, 16, 19, 20, tzinfo=utc), ) y( "a {:ti} b", "a 1997-07-16T19:20+0100 b", datetime(1997, 7, 16, 19, 20, tzinfo=tz60), ) y( "a {:ti} b", "a 1997-07-16T19:20+01:00 b", datetime(1997, 7, 16, 19, 20, tzinfo=tz60), ) y( "a {:ti} b", "a 1997-07-16T19:20 +01:00 b", datetime(1997, 7, 16, 19, 20, tzinfo=tz60), ) # YYYY-MM-DDThh:mm:ssTZD (eg 1997-07-16T19:20:30+01:00) y("a {:ti} b", "a 1997-07-16 19:20:30 b", datetime(1997, 7, 16, 19, 20, 30)) y("a {:ti} b", "a 1997-07-16T19:20:30 b", datetime(1997, 7, 16, 19, 20, 30)) y( "a {:ti} b", "a 1997-07-16T19:20:30Z b", datetime(1997, 7, 16, 19, 20, 30, tzinfo=utc), ) y( "a {:ti} b", "a 1997-07-16T19:20:30+01:00 b", datetime(1997, 7, 16, 19, 20, 30, tzinfo=tz60), ) y( "a {:ti} b", "a 1997-07-16T19:20:30 +01:00 b", datetime(1997, 7, 16, 19, 20, 30, tzinfo=tz60), ) # YYYY-MM-DDThh:mm:ss.sTZD (eg 1997-07-16T19:20:30.45+01:00) y( "a {:ti} b", "a 1997-07-16 19:20:30.500000 b", datetime(1997, 7, 16, 19, 20, 30, 500000), ) y( "a {:ti} b", "a 1997-07-16T19:20:30.500000 b", datetime(1997, 7, 16, 19, 20, 30, 500000), ) y( "a {:ti} b", "a 1997-07-16T19:20:30.5Z b", datetime(1997, 7, 16, 19, 20, 30, 500000, tzinfo=utc), ) y( "a {:ti} b", "a 1997-07-16T19:20:30.5+01:00 b", datetime(1997, 7, 16, 19, 20, 30, 500000, tzinfo=tz60), ) aest_d = datetime(2011, 11, 21, 10, 21, 36, tzinfo=aest) dt = datetime(2011, 11, 21, 10, 21, 36) dt00 = datetime(2011, 11, 21, 10, 21) d = datetime(2011, 11, 21) # te RFC2822 e-mail format datetime y("a {:te} b", "a Mon, 21 Nov 2011 10:21:36 +1000 b", aest_d) y("a {:te} b", "a Mon, 21 Nov 2011 10:21:36 +10:00 b", aest_d) y("a {:te} b", "a 21 Nov 2011 10:21:36 +1000 b", aest_d) # tg global (day/month) format datetime y("a {:tg} b", "a 21/11/2011 10:21:36 AM +1000 b", aest_d) y("a {:tg} b", "a 21/11/2011 10:21:36 AM +10:00 b", aest_d) y("a {:tg} b", "a 21-11-2011 10:21:36 AM +1000 b", aest_d) y("a {:tg} b", "a 21/11/2011 10:21:36 +1000 b", aest_d) y("a {:tg} b", "a 21/11/2011 10:21:36 b", dt) y("a {:tg} b", "a 21/11/2011 10:21 b", dt00) y("a {:tg} b", "a 21-11-2011 b", d) y("a {:tg} b", "a 21-Nov-2011 10:21:36 AM +1000 b", aest_d) y("a {:tg} b", "a 21-November-2011 10:21:36 AM +1000 b", aest_d) # ta US (month/day) format datetime y("a {:ta} b", "a 11/21/2011 10:21:36 AM +1000 b", aest_d) y("a {:ta} b", "a 11/21/2011 10:21:36 AM +10:00 b", aest_d) y("a {:ta} b", "a 11-21-2011 10:21:36 AM +1000 b", aest_d) y("a {:ta} b", "a 11/21/2011 10:21:36 +1000 b", aest_d) y("a {:ta} b", "a 11/21/2011 10:21:36 b", dt) y("a {:ta} b", "a 11/21/2011 10:21 b", dt00) y("a {:ta} b", "a 11-21-2011 b", d) y("a {:ta} b", "a Nov-21-2011 10:21:36 AM +1000 b", aest_d) y("a {:ta} b", "a November-21-2011 10:21:36 AM +1000 b", aest_d) y("a {:ta} b", "a November-21-2011 b", d) # ts Linux System log format datetime y( "a {:ts} b", "a Nov 21 10:21:36 b", datetime(datetime.today().year, 11, 21, 10, 21, 36), ) y( "a {:ts} b", "a Nov 1 10:21:36 b", datetime(datetime.today().year, 11, 1, 10, 21, 36), ) y( "a {:ts} b", "a Nov 1 03:21:36 b", datetime(datetime.today().year, 11, 1, 3, 21, 36), ) # th HTTP log format date/time datetime y("a {:th} b", "a 21/Nov/2011:10:21:36 +1000 b", aest_d) y("a {:th} b", "a 21/Nov/2011:10:21:36 +10:00 b", aest_d) d = datetime(2011, 11, 21, 10, 21, 36) # tc ctime() format datetime y("a {:tc} b", "a Mon Nov 21 10:21:36 2011 b", d) t530 = parse.FixedTzOffset(-5 * 60 - 30, "-5:30") t830 = parse.FixedTzOffset(-8 * 60 - 30, "-8:30") # tt Time time y("a {:tt} b", "a 10:21:36 AM +1000 b", time(10, 21, 36, tzinfo=aest)) y("a {:tt} b", "a 10:21:36 AM +10:00 b", time(10, 21, 36, tzinfo=aest)) y("a {:tt} b", "a 10:21:36 AM b", time(10, 21, 36)) y("a {:tt} b", "a 10:21:36 PM b", time(22, 21, 36)) y("a {:tt} b", "a 10:21:36 b", time(10, 21, 36)) y("a {:tt} b", "a 10:21 b", time(10, 21)) y("a {:tt} b", "a 10:21:36 PM -5:30 b", time(22, 21, 36, tzinfo=t530)) y("a {:tt} b", "a 10:21:36 PM -530 b", time(22, 21, 36, tzinfo=t530)) y("a {:tt} b", "a 10:21:36 PM -05:30 b", time(22, 21, 36, tzinfo=t530)) y("a {:tt} b", "a 10:21:36 PM -0530 b", time(22, 21, 36, tzinfo=t530)) y("a {:tt} b", "a 10:21:36 PM -08:30 b", time(22, 21, 36, tzinfo=t830)) y("a {:tt} b", "a 10:21:36 PM -0830 b", time(22, 21, 36, tzinfo=t830)) def test_datetime_group_count(): # test we increment the group count correctly for datetimes r = parse.parse("{:ti} {}", "1972-01-01 spam") assert r.fixed[1] == "spam" r = parse.parse("{:tg} {}", "1-1-1972 spam") assert r.fixed[1] == "spam" r = parse.parse("{:ta} {}", "1-1-1972 spam") assert r.fixed[1] == "spam" r = parse.parse("{:th} {}", "21/Nov/2011:10:21:36 +1000 spam") assert r.fixed[1] == "spam" r = parse.parse("{:te} {}", "21 Nov 2011 10:21:36 +1000 spam") assert r.fixed[1] == "spam" r = parse.parse("{:tc} {}", "Mon Nov 21 10:21:36 2011 spam") assert r.fixed[1] == "spam" r = parse.parse("{:tt} {}", "10:21 spam") assert r.fixed[1] == "spam" def test_mixed_types(): # stress-test: pull one of everything out of a string r = parse.parse( """ letters: {:w} non-letters: {:W} whitespace: "{:s}" non-whitespace: \t{:S}\n digits: {:d} {:d} non-digits: {:D} numbers with thousands: {:n} fixed-point: {:f} floating-point: {:e} general numbers: {:g} {:g} binary: {:b} octal: {:o} hex: {:x} ISO 8601 e.g. {:ti} RFC2822 e.g. {:te} Global e.g. {:tg} US e.g. {:ta} ctime() e.g. {:tc} HTTP e.g. {:th} time: {:tt} final value: {} """, """ letters: abcdef_GHIJLK non-letters: !@#%$ *^% whitespace: " \t\n" non-whitespace: \tabc\n digits: 12345 0b1011011 non-digits: abcdef numbers with thousands: 1,000 fixed-point: 100.2345 floating-point: 1.1e-10 general numbers: 1 1.1 binary: 0b1000 octal: 0o1000 hex: 0x1000 ISO 8601 e.g. 1972-01-20T10:21:36Z RFC2822 e.g. Mon, 20 Jan 1972 10:21:36 +1000 Global e.g. 20/1/1972 10:21:36 AM +1:00 US e.g. 1/20/1972 10:21:36 PM +10:30 ctime() e.g. Sun Sep 16 01:03:52 1973 HTTP e.g. 21/Nov/2011:00:07:11 +0000 time: 10:21:36 PM -5:30 final value: spam """, ) assert r is not None assert r.fixed[22] == "spam" def test_mixed_type_variant(): r = parse.parse( """ letters: {:w} non-letters: {:W} whitespace: "{:s}" non-whitespace: \t{:S}\n digits: {:d} non-digits: {:D} numbers with thousands: {:n} fixed-point: {:f} floating-point: {:e} general numbers: {:g} {:g} binary: {:b} octal: {:o} hex: {:x} ISO 8601 e.g. {:ti} RFC2822 e.g. {:te} Global e.g. {:tg} US e.g. {:ta} ctime() e.g. {:tc} HTTP e.g. {:th} time: {:tt} final value: {} """, """ letters: abcdef_GHIJLK non-letters: !@#%$ *^% whitespace: " \t\n" non-whitespace: \tabc\n digits: 0xabcdef non-digits: abcdef numbers with thousands: 1.000.000 fixed-point: 0.00001 floating-point: NAN general numbers: 1.1e10 nan binary: 0B1000 octal: 0O1000 hex: 0X1000 ISO 8601 e.g. 1972-01-20T10:21:36Z RFC2822 e.g. Mon, 20 Jan 1972 10:21:36 +1000 Global e.g. 20/1/1972 10:21:36 AM +1:00 US e.g. 1/20/1972 10:21:36 PM +10:30 ctime() e.g. Sun Sep 16 01:03:52 1973 HTTP e.g. 21/Nov/2011:00:07:11 +0000 time: 10:21:36 PM -5:30 final value: spam """, ) assert r is not None assert r.fixed[21] == "spam" @pytest.mark.skipif(sys.version_info >= (3, 5), reason="Python 3.5 removed the limit of 100 named groups in a regular expression") def test_too_many_fields(): # Python 3.5 removed the limit of 100 named groups in a regular expression, # so only test for the exception if the limit exists. p = parse.compile("{:ti}" * 15) with pytest.raises(parse.TooManyFields): p.parse("") def test_letters(): res = parse.parse("{:l}", "") assert res is None res = parse.parse("{:l}", "sPaM") assert res.fixed == ("sPaM",) res = parse.parse("{:l}", "sP4M") assert res is None res = parse.parse("{:l}", "sP_M") assert res is None def test_strftime_strptime_roundtrip(): dt = datetime.now() fmt = "_".join([k for k in parse.dt_format_to_regex if k != "%z"]) s = dt.strftime(fmt) [res] = parse.parse("{:" + fmt + "}", s) assert res == dt def test_parser_format(): parser = parse.compile("hello {}") assert parser.format.format("world") == "hello world" with pytest.raises(AttributeError): parser.format = "hi {}" def test_hyphen_inside_field_name(): # https://github.com/r1chardj0n3s/parse/issues/86 # https://github.com/python-openapi/openapi-core/issues/672 template = "/local/sub/{user-id}/duration" assert parse.Parser(template).named_fields == ["user_id"] string = "https://dummy_server.com/local/sub/1647222638/duration" result = parse.search(template, string) assert result["user-id"] == "1647222638" def test_hyphen_inside_field_name_collision_handling(): template = "/foo/{user-id}/{user_id}/{user.id}/bar/" assert parse.Parser(template).named_fields == ["user_id", "user__id", "user___id"] string = "/foo/1/2/3/bar/" result = parse.search(template, string) assert result["user-id"] == "1" assert result["user_id"] == "2" assert result["user.id"] == "3" parse_type-0.6.4/tests/parse_tests/test_parsetype.py000066400000000000000000000153411467747561600230520ustar00rootroot00000000000000from decimal import Decimal import pytest import parse def assert_match(parser, text, param_name, expected): result = parser.parse(text) assert result[param_name] == expected def assert_mismatch(parser, text, param_name): result = parser.parse(text) assert result is None def assert_fixed_match(parser, text, expected): result = parser.parse(text) assert result.fixed == expected def assert_fixed_mismatch(parser, text): result = parser.parse(text) assert result is None def test_pattern_should_be_used(): def parse_number(text): return int(text) parse_number.pattern = r"\d+" parse_number.name = "Number" # For testing only. extra_types = {parse_number.name: parse_number} format = "Value is {number:Number} and..." parser = parse.Parser(format, extra_types) assert_match(parser, "Value is 42 and...", "number", 42) assert_match(parser, "Value is 00123 and...", "number", 123) assert_mismatch(parser, "Value is ALICE and...", "number") assert_mismatch(parser, "Value is -123 and...", "number") def test_pattern_should_be_used2(): def parse_yesno(text): return parse_yesno.mapping[text.lower()] parse_yesno.mapping = { "yes": True, "no": False, "on": True, "off": False, "true": True, "false": False, } parse_yesno.pattern = r"|".join(parse_yesno.mapping.keys()) parse_yesno.name = "YesNo" # For testing only. extra_types = {parse_yesno.name: parse_yesno} format = "Answer: {answer:YesNo}" parser = parse.Parser(format, extra_types) # -- ENSURE: Known enum values are correctly extracted. for value_name, value in parse_yesno.mapping.items(): text = "Answer: %s" % value_name assert_match(parser, text, "answer", value) # -- IGNORE-CASE: In parsing, calls type converter function !!! assert_match(parser, "Answer: YES", "answer", True) assert_mismatch(parser, "Answer: __YES__", "answer") def test_with_pattern(): ab_vals = {"a": 1, "b": 2} @parse.with_pattern(r"[ab]") def ab(text): return ab_vals[text] parser = parse.Parser("test {result:ab}", {"ab": ab}) assert_match(parser, "test a", "result", 1) assert_match(parser, "test b", "result", 2) assert_mismatch(parser, "test c", "result") def test_with_pattern_and_regex_group_count(): # -- SPECIAL-CASE: Regex-grouping is used in user-defined type # NOTE: Missing or wroung regex_group_counts cause problems # with parsing following params. @parse.with_pattern(r"(meter|kilometer)", regex_group_count=1) def parse_unit(text): return text.strip() @parse.with_pattern(r"\d+") def parse_number(text): return int(text) type_converters = {"Number": parse_number, "Unit": parse_unit} # -- CASE: Unnamed-params (affected) parser = parse.Parser("test {:Unit}-{:Number}", type_converters) assert_fixed_match(parser, "test meter-10", ("meter", 10)) assert_fixed_match(parser, "test kilometer-20", ("kilometer", 20)) assert_fixed_mismatch(parser, "test liter-30") # -- CASE: Named-params (uncritical; should not be affected) # REASON: Named-params have additional, own grouping. parser2 = parse.Parser("test {unit:Unit}-{value:Number}", type_converters) assert_match(parser2, "test meter-10", "unit", "meter") assert_match(parser2, "test meter-10", "value", 10) assert_match(parser2, "test kilometer-20", "unit", "kilometer") assert_match(parser2, "test kilometer-20", "value", 20) assert_mismatch(parser2, "test liter-30", "unit") def test_with_pattern_and_wrong_regex_group_count_raises_error(): # -- SPECIAL-CASE: # Regex-grouping is used in user-defined type, but wrong value is provided. @parse.with_pattern(r"(meter|kilometer)", regex_group_count=1) def parse_unit(text): return text.strip() @parse.with_pattern(r"\d+") def parse_number(text): return int(text) # -- CASE: Unnamed-params (affected) BAD_REGEX_GROUP_COUNTS_AND_ERRORS = [ (None, ValueError), (0, ValueError), (2, IndexError), ] for bad_regex_group_count, error_class in BAD_REGEX_GROUP_COUNTS_AND_ERRORS: parse_unit.regex_group_count = bad_regex_group_count # -- OVERRIDE-HERE type_converters = {"Number": parse_number, "Unit": parse_unit} parser = parse.Parser("test {:Unit}-{:Number}", type_converters) with pytest.raises(error_class): parser.parse("test meter-10") def test_with_pattern_and_regex_group_count_is_none(): # -- CORNER-CASE: Increase code-coverage. data_values = {"a": 1, "b": 2} @parse.with_pattern(r"[ab]") def parse_data(text): return data_values[text] parse_data.regex_group_count = None # ENFORCE: None # -- CASE: Unnamed-params parser = parse.Parser("test {:Data}", {"Data": parse_data}) assert_fixed_match(parser, "test a", (1,)) assert_fixed_match(parser, "test b", (2,)) assert_fixed_mismatch(parser, "test c") # -- CASE: Named-params parser2 = parse.Parser("test {value:Data}", {"Data": parse_data}) assert_match(parser2, "test a", "value", 1) assert_match(parser2, "test b", "value", 2) assert_mismatch(parser2, "test c", "value") def test_case_sensitivity(): r = parse.parse("SPAM {} SPAM", "spam spam spam") assert r[0] == "spam" assert parse.parse("SPAM {} SPAM", "spam spam spam", case_sensitive=True) is None def test_decimal_value(): value = Decimal("5.5") str_ = "test {}".format(value) parser = parse.Parser("test {:F}") assert parser.parse(str_)[0] == value def test_width_str(): res = parse.parse("{:.2}{:.2}", "look") assert res.fixed == ("lo", "ok") res = parse.parse("{:2}{:2}", "look") assert res.fixed == ("lo", "ok") res = parse.parse("{:4}{}", "look at that") assert res.fixed == ("look", " at that") def test_width_constraints(): res = parse.parse("{:4}", "looky") assert res.fixed == ("looky",) res = parse.parse("{:4.4}", "looky") assert res is None res = parse.parse("{:4.4}", "ook") assert res is None res = parse.parse("{:4}{:.4}", "look at that") assert res.fixed == ("look at ", "that") def test_width_multi_int(): res = parse.parse("{:02d}{:02d}", "0440") assert res.fixed == (4, 40) res = parse.parse("{:03d}{:d}", "04404") assert res.fixed == (44, 4) def test_width_empty_input(): res = parse.parse("{:.2}", "") assert res is None res = parse.parse("{:2}", "l") assert res is None res = parse.parse("{:2d}", "") assert res is None def test_int_convert_stateless_base(): parser = parse.Parser("{:d}") assert parser.parse("1234")[0] == 1234 assert parser.parse("0b1011")[0] == 0b1011 parse_type-0.6.4/tests/parse_tests/test_pattern.py000066400000000000000000000056031467747561600225130ustar00rootroot00000000000000import pytest import parse def _test_expression(format, expression): assert parse.Parser(format)._expression == expression def test_braces(): # pull a simple string out of another string _test_expression("{{ }}", r"\{ \}") def test_fixed(): # pull a simple string out of another string _test_expression("{}", r"(.+?)") _test_expression("{} {}", r"(.+?) (.+?)") def test_named(): # pull a named string out of another string _test_expression("{name}", r"(?P.+?)") _test_expression("{name} {other}", r"(?P.+?) (?P.+?)") def test_named_typed(): # pull a named string out of another string _test_expression("{name:w}", r"(?P\w+)") _test_expression("{name:w} {other:w}", r"(?P\w+) (?P\w+)") def test_numbered(): _test_expression("{0}", r"(.+?)") _test_expression("{0} {1}", r"(.+?) (.+?)") _test_expression("{0:f} {1:f}", r"([-+ ]?\d*\.\d+) ([-+ ]?\d*\.\d+)") def test_bird(): # skip some trailing whitespace _test_expression("{:>}", r" *(.+?)") def test_format_variety(): def _(fmt, matches): d = parse.extract_format(fmt, {"spam": "spam"}) for k in matches: assert d.get(k) == matches[k] for t in "%obxegfdDwWsS": _(t, {"type": t}) _("10" + t, {"type": t, "width": "10"}) _("05d", {"type": "d", "width": "5", "zero": True}) _("<", {"align": "<"}) _(".<", {"align": "<", "fill": "."}) _(">", {"align": ">"}) _(".>", {"align": ">", "fill": "."}) _("^", {"align": "^"}) _(".^", {"align": "^", "fill": "."}) _("x=d", {"type": "d", "align": "=", "fill": "x"}) _("d", {"type": "d"}) _("ti", {"type": "ti"}) _("spam", {"type": "spam"}) _(".^010d", {"type": "d", "width": "10", "align": "^", "fill": ".", "zero": True}) _(".2f", {"type": "f", "precision": "2"}) _("10.2f", {"type": "f", "width": "10", "precision": "2"}) def test_dot_separated_fields(): # this should just work and provide the named value res = parse.parse("{hello.world}_{jojo.foo.baz}_{simple}", "a_b_c") assert res.named["hello.world"] == "a" assert res.named["jojo.foo.baz"] == "b" assert res.named["simple"] == "c" def test_dict_style_fields(): res = parse.parse("{hello[world]}_{hello[foo][baz]}_{simple}", "a_b_c") assert res.named["hello"]["world"] == "a" assert res.named["hello"]["foo"]["baz"] == "b" assert res.named["simple"] == "c" def test_dot_separated_fields_name_collisions(): # this should just work and provide the named value res = parse.parse("{a_.b}_{a__b}_{a._b}_{a___b}", "a_b_c_d") assert res.named["a_.b"] == "a" assert res.named["a__b"] == "b" assert res.named["a._b"] == "c" assert res.named["a___b"] == "d" def test_invalid_groupnames_are_handled_gracefully(): with pytest.raises(NotImplementedError): parse.parse("{hello['world']}", "doesn't work") parse_type-0.6.4/tests/parse_tests/test_result.py000066400000000000000000000014541467747561600223540ustar00rootroot00000000000000import pytest import parse def test_fixed_access(): r = parse.Result((1, 2), {}, None) assert r[0] == 1 assert r[1] == 2 with pytest.raises(IndexError): r[2] with pytest.raises(KeyError): r["spam"] def test_slice_access(): r = parse.Result((1, 2, 3, 4), {}, None) assert r[1:3] == (2, 3) assert r[-5:5] == (1, 2, 3, 4) assert r[:4:2] == (1, 3) assert r[::-2] == (4, 2) assert r[5:10] == () def test_named_access(): r = parse.Result((), {"spam": "ham"}, None) assert r["spam"] == "ham" with pytest.raises(KeyError): r["ham"] with pytest.raises(IndexError): r[0] def test_contains(): r = parse.Result(("cat",), {"spam": "ham"}, None) assert "spam" in r assert "cat" not in r assert "ham" not in r parse_type-0.6.4/tests/parse_tests/test_search.py000066400000000000000000000010121467747561600222710ustar00rootroot00000000000000import parse def test_basic(): r = parse.search("a {} c", " a b c ") assert r.fixed == ("b",) def test_multiline(): r = parse.search("age: {:d}\n", "name: Rufus\nage: 42\ncolor: red\n") assert r.fixed == (42,) def test_pos(): r = parse.search("a {} c", " a b c ", 2) assert r is None def test_no_evaluate_result(): match = parse.search( "age: {:d}\n", "name: Rufus\nage: 42\ncolor: red\n", evaluate_result=False ) r = match.evaluate_result() assert r.fixed == (42,) parse_type-0.6.4/tests/parse_tests_with_parse_type/000077500000000000000000000000001467747561600227075ustar00rootroot00000000000000parse_type-0.6.4/tests/parse_tests_with_parse_type/__init__.py000066400000000000000000000002071467747561600250170ustar00rootroot00000000000000# COPY TESTSUITE FROM: parse v1.20.2 # SEE: https://github.com/r1chardj0n3s/parse # NOTES: # * Apply testsuite to "parse_type.parse" parse_type-0.6.4/tests/parse_tests_with_parse_type/test_bugs.py000066400000000000000000000062511467747561600252640ustar00rootroot00000000000000# -- REPLACE: parse with parse_type.parse from __future__ import absolute_import, print_function from parse_type import parse # -- ORGINAL_SOURCE_STARTS_HERE: import pickle from datetime import datetime # DISABLED: import parse def test_tz_compare_to_None(): utc = parse.FixedTzOffset(0, "UTC") assert utc is not None assert utc != "spam" def test_named_date_issue7(): r = parse.parse("on {date:ti}", "on 2012-09-17") assert r["date"] == datetime(2012, 9, 17, 0, 0, 0) # fix introduced regressions r = parse.parse("a {:ti} b", "a 1997-07-16T19:20 b") assert r[0] == datetime(1997, 7, 16, 19, 20, 0) r = parse.parse("a {:ti} b", "a 1997-07-16T19:20Z b") utc = parse.FixedTzOffset(0, "UTC") assert r[0] == datetime(1997, 7, 16, 19, 20, tzinfo=utc) r = parse.parse("a {date:ti} b", "a 1997-07-16T19:20Z b") assert r["date"] == datetime(1997, 7, 16, 19, 20, tzinfo=utc) def test_dotted_type_conversion_pull_8(): # test pull request 8 which fixes type conversion related to dotted # names being applied correctly r = parse.parse("{a.b:d}", "1") assert r["a.b"] == 1 r = parse.parse("{a_b:w} {a.b:d}", "1 2") assert r["a_b"] == "1" assert r["a.b"] == 2 def test_pm_overflow_issue16(): r = parse.parse("Meet at {:tg}", "Meet at 1/2/2011 12:45 PM") assert r[0] == datetime(2011, 2, 1, 12, 45) def test_pm_handling_issue57(): r = parse.parse("Meet at {:tg}", "Meet at 1/2/2011 12:15 PM") assert r[0] == datetime(2011, 2, 1, 12, 15) r = parse.parse("Meet at {:tg}", "Meet at 1/2/2011 12:15 AM") assert r[0] == datetime(2011, 2, 1, 0, 15) def test_user_type_with_group_count_issue60(): @parse.with_pattern(r"((\w+))", regex_group_count=2) def parse_word_and_covert_to_uppercase(text): return text.strip().upper() @parse.with_pattern(r"\d+") def parse_number(text): return int(text) # -- CASE: Use named (OK) type_map = {"Name": parse_word_and_covert_to_uppercase, "Number": parse_number} r = parse.parse( "Hello {name:Name} {number:Number}", "Hello Alice 42", extra_types=type_map ) assert r.named == {"name": "ALICE", "number": 42} # -- CASE: Use unnamed/fixed (problematic) r = parse.parse("Hello {:Name} {:Number}", "Hello Alice 42", extra_types=type_map) assert r[0] == "ALICE" assert r[1] == 42 def test_unmatched_brace_doesnt_match(): r = parse.parse("{who.txt", "hello") assert r is None def test_pickling_bug_110(): p = parse.compile("{a:d}") # prior to the fix, this would raise an AttributeError pickle.dumps(p) def test_unused_centered_alignment_bug(): r = parse.parse("{:^2S}", "foo") assert r[0] == "foo" r = parse.search("{:^2S}", "foo") assert r[0] == "foo" # specifically test for the case in issue #118 as well r = parse.parse("Column {:d}:{:^}", "Column 1: Timestep") assert r[0] == 1 assert r[1] == "Timestep" def test_unused_left_alignment_bug(): r = parse.parse("{:<2S}", "foo") assert r[0] == "foo" r = parse.search("{:<2S}", "foo") assert r[0] == "foo" def test_match_trailing_newline(): r = parse.parse("{}", "test\n") assert r[0] == "test\n" parse_type-0.6.4/tests/parse_tests_with_parse_type/test_findall.py000066400000000000000000000014521467747561600257330ustar00rootroot00000000000000# -- REPLACE: parse with parse_type.parse from __future__ import absolute_import, print_function from parse_type import parse # -- ORIGINAL_SOURCE_STARTS_HERE: # DISABLED: import parse def test_findall(): s = "".join( r.fixed[0] for r in parse.findall(">{}<", "

    some bold text

    ") ) assert s == "some bold text" def test_no_evaluate_result(): s = "".join( m.evaluate_result().fixed[0] for m in parse.findall( ">{}<", "

    some bold text

    ", evaluate_result=False ) ) assert s == "some bold text" def test_case_sensitivity(): l = [r.fixed[0] for r in parse.findall("x({})x", "X(hi)X")] assert l == ["hi"] l = [r.fixed[0] for r in parse.findall("x({})x", "X(hi)X", case_sensitive=True)] assert l == [] parse_type-0.6.4/tests/parse_tests_with_parse_type/test_parse.py000066400000000000000000000616551467747561600254470ustar00rootroot00000000000000# coding: utf-8 # -- REPLACE: parse with parse_type.parse from __future__ import absolute_import, print_function from parse_type import parse # -- ORIGINAL_SOURCE_STARTS_HERE: import sys from datetime import date from datetime import datetime from datetime import time import pytest # DISABLED: import parse def test_no_match(): # string does not match format assert parse.parse("{{hello}}", "hello") is None def test_nothing(): # do no actual parsing r = parse.parse("{{hello}}", "{hello}") assert r.fixed == () assert r.named == {} def test_no_evaluate_result(): # pull a fixed value out of string match = parse.parse("hello {}", "hello world", evaluate_result=False) r = match.evaluate_result() assert r.fixed == ("world",) def test_regular_expression(): # match an actual regular expression s = r"^(hello\s[wW]{}!+.*)$" e = s.replace("{}", "orld") r = parse.parse(s, e) assert r.fixed == ("orld",) e = s.replace("{}", ".*?") r = parse.parse(s, e) assert r.fixed == (".*?",) def test_question_mark(): # issue9: make sure a ? in the parse string is handled correctly r = parse.parse('"{}"?', '"teststr"?') assert r[0] == "teststr" def test_pipe(): # issue22: make sure a | in the parse string is handled correctly r = parse.parse("| {}", "| teststr") assert r[0] == "teststr" def test_unicode(): # issue29: make sure unicode is parsable r = parse.parse("{}", "t€ststr") assert r[0] == "t€ststr" def test_hexadecimal(): # issue42: make sure bare hexadecimal isn't matched as "digits" r = parse.parse("{:d}", "abcdef") assert r is None def test_fixed(): # pull a fixed value out of string r = parse.parse("hello {}", "hello world") assert r.fixed == ("world",) def test_left(): # pull left-aligned text out of string r = parse.parse("{:<} world", "hello world") assert r.fixed == ("hello",) def test_right(): # pull right-aligned text out of string r = parse.parse("hello {:>}", "hello world") assert r.fixed == ("world",) def test_center(): # pull center-aligned text out of string r = parse.parse("hello {:^} world", "hello there world") assert r.fixed == ("there",) def test_typed(): # pull a named, typed values out of string r = parse.parse("hello {:d} {:w}", "hello 12 people") assert r.fixed == (12, "people") r = parse.parse("hello {:w} {:w}", "hello 12 people") assert r.fixed == ("12", "people") def test_sign(): # sign is ignored r = parse.parse("Pi = {:.7f}", "Pi = 3.1415926") assert r.fixed == (3.1415926,) r = parse.parse("Pi = {:+.7f}", "Pi = 3.1415926") assert r.fixed == (3.1415926,) r = parse.parse("Pi = {:-.7f}", "Pi = 3.1415926") assert r.fixed == (3.1415926,) r = parse.parse("Pi = {: .7f}", "Pi = 3.1415926") assert r.fixed == (3.1415926,) def test_precision(): # pull a float out of a string r = parse.parse("Pi = {:.7f}", "Pi = 3.1415926") assert r.fixed == (3.1415926,) r = parse.parse("Pi/10 = {:8.5f}", "Pi/10 = 0.31415") assert r.fixed == (0.31415,) # float may have not leading zero r = parse.parse("Pi/10 = {:8.5f}", "Pi/10 = .31415") assert r.fixed == (0.31415,) r = parse.parse("Pi/10 = {:8.5f}", "Pi/10 = -.31415") assert r.fixed == (-0.31415,) def test_custom_type(): # use a custom type r = parse.parse( "{:shouty} {:spam}", "hello world", {"shouty": lambda s: s.upper(), "spam": lambda s: "".join(reversed(s))}, ) assert r.fixed == ("HELLO", "dlrow") r = parse.parse("{:d}", "12", {"d": lambda s: int(s) * 2}) assert r.fixed == (24,) r = parse.parse("{:d}", "12") assert r.fixed == (12,) def test_typed_fail(): # pull a named, typed values out of string assert parse.parse("hello {:d} {:w}", "hello people 12") is None def test_named(): # pull a named value out of string r = parse.parse("hello {name}", "hello world") assert r.named == {"name": "world"} def test_named_repeated(): # test a name may be repeated r = parse.parse("{n} {n}", "x x") assert r.named == {"n": "x"} def test_named_repeated_type(): # test a name may be repeated with type conversion r = parse.parse("{n:d} {n:d}", "1 1") assert r.named == {"n": 1} def test_named_repeated_fail_value(): # test repeated name fails if value mismatches r = parse.parse("{n} {n}", "x y") assert r is None def test_named_repeated_type_fail_value(): # test repeated name with type conversion fails if value mismatches r = parse.parse("{n:d} {n:d}", "1 2") assert r is None def test_named_repeated_type_mismatch(): # test repeated name with mismatched type with pytest.raises(parse.RepeatedNameError): parse.compile("{n:d} {n:w}") def test_mixed(): # pull a fixed and named values out of string r = parse.parse("hello {} {name} {} {spam}", "hello world and other beings") assert r.fixed == ("world", "other") assert r.named == {"name": "and", "spam": "beings"} def test_named_typed(): # pull a named, typed values out of string r = parse.parse("hello {number:d} {things}", "hello 12 people") assert r.named == {"number": 12, "things": "people"} r = parse.parse("hello {number:w} {things}", "hello 12 people") assert r.named == {"number": "12", "things": "people"} def test_named_aligned_typed(): # pull a named, typed values out of string r = parse.parse("hello {number:d} {things}", "hello 12 people") assert r.named == {"number": 12, "things": "people"} r = parse.parse("hello {number:^d} {things}", "hello 12 people") assert r.named == {"number": 12, "things": "people"} def test_multiline(): r = parse.parse("hello\n{}\nworld", "hello\nthere\nworld") assert r.fixed[0] == "there" def test_spans(): # test the string sections our fields come from string = "hello world" r = parse.parse("hello {}", string) assert r.spans == {0: (6, 11)} start, end = r.spans[0] assert string[start:end] == r.fixed[0] string = "hello world" r = parse.parse("hello {:>}", string) assert r.spans == {0: (10, 15)} start, end = r.spans[0] assert string[start:end] == r.fixed[0] string = "hello 0x12 world" r = parse.parse("hello {val:x} world", string) assert r.spans == {"val": (6, 10)} start, end = r.spans["val"] assert string[start:end] == "0x%x" % r.named["val"] string = "hello world and other beings" r = parse.parse("hello {} {name} {} {spam}", string) assert r.spans == {0: (6, 11), "name": (12, 15), 1: (16, 21), "spam": (22, 28)} def test_numbers(): # pull a numbers out of a string def y(fmt, s, e, str_equals=False): p = parse.compile(fmt) r = p.parse(s) assert r is not None r = r.fixed[0] if str_equals: assert str(r) == str(e) else: assert r == e def n(fmt, s, e): assert parse.parse(fmt, s) is None y("a {:d} b", "a 0 b", 0) y("a {:d} b", "a 12 b", 12) y("a {:5d} b", "a 12 b", 12) y("a {:5d} b", "a -12 b", -12) y("a {:d} b", "a -12 b", -12) y("a {:d} b", "a +12 b", 12) y("a {:d} b", "a 12 b", 12) y("a {:d} b", "a 0b1000 b", 8) y("a {:d} b", "a 0o1000 b", 512) y("a {:d} b", "a 0x1000 b", 4096) y("a {:d} b", "a 0xabcdef b", 0xABCDEF) y("a {:%} b", "a 100% b", 1) y("a {:%} b", "a 50% b", 0.5) y("a {:%} b", "a 50.1% b", 0.501) y("a {:n} b", "a 100 b", 100) y("a {:n} b", "a 1,000 b", 1000) y("a {:n} b", "a 1.000 b", 1000) y("a {:n} b", "a -1,000 b", -1000) y("a {:n} b", "a 10,000 b", 10000) y("a {:n} b", "a 100,000 b", 100000) n("a {:n} b", "a 100,00 b", None) y("a {:n} b", "a 100.000 b", 100000) y("a {:n} b", "a 1.000.000 b", 1000000) y("a {:f} b", "a 12.0 b", 12.0) y("a {:f} b", "a -12.1 b", -12.1) y("a {:f} b", "a +12.1 b", 12.1) y("a {:f} b", "a .121 b", 0.121) y("a {:f} b", "a -.121 b", -0.121) n("a {:f} b", "a 12 b", None) y("a {:e} b", "a 1.0e10 b", 1.0e10) y("a {:e} b", "a .0e10 b", 0.0e10) y("a {:e} b", "a 1.0E10 b", 1.0e10) y("a {:e} b", "a 1.10000e10 b", 1.1e10) y("a {:e} b", "a 1.0e-10 b", 1.0e-10) y("a {:e} b", "a 1.0e+10 b", 1.0e10) # can't actually test this one on values 'cos nan != nan y("a {:e} b", "a nan b", float("nan"), str_equals=True) y("a {:e} b", "a NAN b", float("nan"), str_equals=True) y("a {:e} b", "a inf b", float("inf")) y("a {:e} b", "a +inf b", float("inf")) y("a {:e} b", "a -inf b", float("-inf")) y("a {:e} b", "a INF b", float("inf")) y("a {:e} b", "a +INF b", float("inf")) y("a {:e} b", "a -INF b", float("-inf")) y("a {:g} b", "a 1 b", 1) y("a {:g} b", "a 1e10 b", 1e10) y("a {:g} b", "a 1.0e10 b", 1.0e10) y("a {:g} b", "a 1.0E10 b", 1.0e10) y("a {:b} b", "a 1000 b", 8) y("a {:b} b", "a 0b1000 b", 8) y("a {:o} b", "a 12345670 b", int("12345670", 8)) y("a {:o} b", "a 0o12345670 b", int("12345670", 8)) y("a {:x} b", "a 1234567890abcdef b", 0x1234567890ABCDEF) y("a {:x} b", "a 1234567890ABCDEF b", 0x1234567890ABCDEF) y("a {:x} b", "a 0x1234567890abcdef b", 0x1234567890ABCDEF) y("a {:x} b", "a 0x1234567890ABCDEF b", 0x1234567890ABCDEF) y("a {:05d} b", "a 00001 b", 1) y("a {:05d} b", "a -00001 b", -1) y("a {:05d} b", "a +00001 b", 1) y("a {:02d} b", "a 10 b", 10) y("a {:=d} b", "a 000012 b", 12) y("a {:x=5d} b", "a xxx12 b", 12) y("a {:x=5d} b", "a -xxx12 b", -12) # Test that hex numbers that ambiguously start with 0b / 0B are parsed correctly # See issue #65 (https://github.com/r1chardj0n3s/parse/issues/65) y("a {:x} b", "a 0B b", 0xB) y("a {:x} b", "a 0B1 b", 0xB1) y("a {:x} b", "a 0b b", 0xB) y("a {:x} b", "a 0b1 b", 0xB1) # Test that number signs are understood correctly y("a {:d} b", "a -0o10 b", -8) y("a {:d} b", "a -0b1010 b", -10) y("a {:d} b", "a -0x1010 b", -0x1010) y("a {:o} b", "a -10 b", -8) y("a {:b} b", "a -1010 b", -10) y("a {:x} b", "a -1010 b", -0x1010) y("a {:d} b", "a +0o10 b", 8) y("a {:d} b", "a +0b1010 b", 10) y("a {:d} b", "a +0x1010 b", 0x1010) y("a {:o} b", "a +10 b", 8) y("a {:b} b", "a +1010 b", 10) y("a {:x} b", "a +1010 b", 0x1010) def test_two_datetimes(): r = parse.parse("a {:ti} {:ti} b", "a 1997-07-16 2012-08-01 b") assert len(r.fixed) == 2 assert r[0] == datetime(1997, 7, 16) assert r[1] == datetime(2012, 8, 1) def test_flexible_datetimes(): r = parse.parse("a {:%Y-%m-%d} b", "a 1997-07-16 b") assert len(r.fixed) == 1 assert r[0] == date(1997, 7, 16) r = parse.parse("a {:%Y-%b-%d} b", "a 1997-Feb-16 b") assert len(r.fixed) == 1 assert r[0] == date(1997, 2, 16) r = parse.parse("a {:%Y-%b-%d} {:d} b", "a 1997-Feb-16 8 b") assert len(r.fixed) == 2 assert r[0] == date(1997, 2, 16) r = parse.parse("a {my_date:%Y-%b-%d} {num:d} b", "a 1997-Feb-16 8 b") assert (r.named["my_date"]) == date(1997, 2, 16) assert (r.named["num"]) == 8 r = parse.parse("a {:%Y-%B-%d} b", "a 1997-February-16 b") assert r[0] == date(1997, 2, 16) r = parse.parse("a {:%Y%m%d} b", "a 19970716 b") assert r[0] == date(1997, 7, 16) def test_flexible_datetime_with_colon(): r = parse.parse("{dt:%Y-%m-%d %H:%M:%S}", "2023-11-21 13:23:27") assert r.named["dt"] == datetime(2023, 11, 21, 13, 23, 27) def test_datetime_with_various_subsecond_precision(): r = parse.parse("{dt:%Y-%m-%d %H:%M:%S.%f}", "2023-11-21 13:23:27.123456") assert r.named["dt"] == datetime(2023, 11, 21, 13, 23, 27, 123456) r = parse.parse("{dt:%Y-%m-%d %H:%M:%S.%f}", "2023-11-21 13:23:27.12345") assert r.named["dt"] == datetime(2023, 11, 21, 13, 23, 27, 123450) r = parse.parse("{dt:%Y-%m-%d %H:%M:%S.%f}", "2023-11-21 13:23:27.1234") assert r.named["dt"] == datetime(2023, 11, 21, 13, 23, 27, 123400) r = parse.parse("{dt:%Y-%m-%d %H:%M:%S.%f}", "2023-11-21 13:23:27.123") assert r.named["dt"] == datetime(2023, 11, 21, 13, 23, 27, 123000) r = parse.parse("{dt:%Y-%m-%d %H:%M:%S.%f}", "2023-11-21 13:23:27.0") assert r.named["dt"] == datetime(2023, 11, 21, 13, 23, 27, 0) @pytest.mark.skipif( sys.version_info[0] < 3, reason="Python 3+ required for timezone support" ) def test_flexible_datetime_with_timezone(): from datetime import timezone r = parse.parse("{dt:%Y-%m-%d %H:%M:%S %z}", "2023-11-21 13:23:27 +0000") assert r.named["dt"] == datetime(2023, 11, 21, 13, 23, 27, tzinfo=timezone.utc) @pytest.mark.skipif( sys.version_info[0] < 3, reason="Python 3+ required for timezone support" ) def test_flexible_datetime_with_timezone_that_has_colons(): from datetime import timezone r = parse.parse("{dt:%Y-%m-%d %H:%M:%S %z}", "2023-11-21 13:23:27 +00:00:00") assert r.named["dt"] == datetime(2023, 11, 21, 13, 23, 27, tzinfo=timezone.utc) def test_flexible_time(): r = parse.parse("a {time:%H:%M:%S} b", "a 13:23:27 b") assert r.named["time"] == time(13, 23, 27) def test_flexible_time_no_hour(): r = parse.parse("a {time:%M:%S} b", "a 23:27 b") assert r.named["time"] == time(0, 23, 27) def test_flexible_time_ms(): r = parse.parse("a {time:%M:%S:%f} b", "a 23:27:123456 b") assert r.named["time"] == time(0, 23, 27, 123456) def test_flexible_dates_single_digit(): r = parse.parse("{dt:%Y/%m/%d}", "2023/1/1") assert r.named["dt"] == date(2023, 1, 1) def test_flexible_dates_j(): r = parse.parse("{dt:%Y/%j}", "2023/9") assert r.named["dt"] == date(2023, 1, 9) r = parse.parse("{dt:%Y/%j}", "2023/009") assert r.named["dt"] == date(2023, 1, 9) def test_flexible_dates_year_current_year_inferred(): r = parse.parse("{dt:%j}", "9") assert r.named["dt"] == date(datetime.today().year, 1, 9) def test_datetimes(): def y(fmt, s, e, tz=None): p = parse.compile(fmt) r = p.parse(s) assert r is not None r = r.fixed[0] assert r == e assert tz is None or r.tzinfo == tz utc = parse.FixedTzOffset(0, "UTC") assert repr(utc) == "" aest = parse.FixedTzOffset(10 * 60, "+1000") tz60 = parse.FixedTzOffset(60, "+01:00") # ISO 8660 variants # YYYY-MM-DD (eg 1997-07-16) y("a {:ti} b", "a 1997-07-16 b", datetime(1997, 7, 16)) # YYYY-MM-DDThh:mmTZD (eg 1997-07-16T19:20+01:00) y("a {:ti} b", "a 1997-07-16 19:20 b", datetime(1997, 7, 16, 19, 20, 0)) y("a {:ti} b", "a 1997-07-16T19:20 b", datetime(1997, 7, 16, 19, 20, 0)) y( "a {:ti} b", "a 1997-07-16T19:20Z b", datetime(1997, 7, 16, 19, 20, tzinfo=utc), ) y( "a {:ti} b", "a 1997-07-16T19:20+0100 b", datetime(1997, 7, 16, 19, 20, tzinfo=tz60), ) y( "a {:ti} b", "a 1997-07-16T19:20+01:00 b", datetime(1997, 7, 16, 19, 20, tzinfo=tz60), ) y( "a {:ti} b", "a 1997-07-16T19:20 +01:00 b", datetime(1997, 7, 16, 19, 20, tzinfo=tz60), ) # YYYY-MM-DDThh:mm:ssTZD (eg 1997-07-16T19:20:30+01:00) y("a {:ti} b", "a 1997-07-16 19:20:30 b", datetime(1997, 7, 16, 19, 20, 30)) y("a {:ti} b", "a 1997-07-16T19:20:30 b", datetime(1997, 7, 16, 19, 20, 30)) y( "a {:ti} b", "a 1997-07-16T19:20:30Z b", datetime(1997, 7, 16, 19, 20, 30, tzinfo=utc), ) y( "a {:ti} b", "a 1997-07-16T19:20:30+01:00 b", datetime(1997, 7, 16, 19, 20, 30, tzinfo=tz60), ) y( "a {:ti} b", "a 1997-07-16T19:20:30 +01:00 b", datetime(1997, 7, 16, 19, 20, 30, tzinfo=tz60), ) # YYYY-MM-DDThh:mm:ss.sTZD (eg 1997-07-16T19:20:30.45+01:00) y( "a {:ti} b", "a 1997-07-16 19:20:30.500000 b", datetime(1997, 7, 16, 19, 20, 30, 500000), ) y( "a {:ti} b", "a 1997-07-16T19:20:30.500000 b", datetime(1997, 7, 16, 19, 20, 30, 500000), ) y( "a {:ti} b", "a 1997-07-16T19:20:30.5Z b", datetime(1997, 7, 16, 19, 20, 30, 500000, tzinfo=utc), ) y( "a {:ti} b", "a 1997-07-16T19:20:30.5+01:00 b", datetime(1997, 7, 16, 19, 20, 30, 500000, tzinfo=tz60), ) aest_d = datetime(2011, 11, 21, 10, 21, 36, tzinfo=aest) dt = datetime(2011, 11, 21, 10, 21, 36) dt00 = datetime(2011, 11, 21, 10, 21) d = datetime(2011, 11, 21) # te RFC2822 e-mail format datetime y("a {:te} b", "a Mon, 21 Nov 2011 10:21:36 +1000 b", aest_d) y("a {:te} b", "a Mon, 21 Nov 2011 10:21:36 +10:00 b", aest_d) y("a {:te} b", "a 21 Nov 2011 10:21:36 +1000 b", aest_d) # tg global (day/month) format datetime y("a {:tg} b", "a 21/11/2011 10:21:36 AM +1000 b", aest_d) y("a {:tg} b", "a 21/11/2011 10:21:36 AM +10:00 b", aest_d) y("a {:tg} b", "a 21-11-2011 10:21:36 AM +1000 b", aest_d) y("a {:tg} b", "a 21/11/2011 10:21:36 +1000 b", aest_d) y("a {:tg} b", "a 21/11/2011 10:21:36 b", dt) y("a {:tg} b", "a 21/11/2011 10:21 b", dt00) y("a {:tg} b", "a 21-11-2011 b", d) y("a {:tg} b", "a 21-Nov-2011 10:21:36 AM +1000 b", aest_d) y("a {:tg} b", "a 21-November-2011 10:21:36 AM +1000 b", aest_d) # ta US (month/day) format datetime y("a {:ta} b", "a 11/21/2011 10:21:36 AM +1000 b", aest_d) y("a {:ta} b", "a 11/21/2011 10:21:36 AM +10:00 b", aest_d) y("a {:ta} b", "a 11-21-2011 10:21:36 AM +1000 b", aest_d) y("a {:ta} b", "a 11/21/2011 10:21:36 +1000 b", aest_d) y("a {:ta} b", "a 11/21/2011 10:21:36 b", dt) y("a {:ta} b", "a 11/21/2011 10:21 b", dt00) y("a {:ta} b", "a 11-21-2011 b", d) y("a {:ta} b", "a Nov-21-2011 10:21:36 AM +1000 b", aest_d) y("a {:ta} b", "a November-21-2011 10:21:36 AM +1000 b", aest_d) y("a {:ta} b", "a November-21-2011 b", d) # ts Linux System log format datetime y( "a {:ts} b", "a Nov 21 10:21:36 b", datetime(datetime.today().year, 11, 21, 10, 21, 36), ) y( "a {:ts} b", "a Nov 1 10:21:36 b", datetime(datetime.today().year, 11, 1, 10, 21, 36), ) y( "a {:ts} b", "a Nov 1 03:21:36 b", datetime(datetime.today().year, 11, 1, 3, 21, 36), ) # th HTTP log format date/time datetime y("a {:th} b", "a 21/Nov/2011:10:21:36 +1000 b", aest_d) y("a {:th} b", "a 21/Nov/2011:10:21:36 +10:00 b", aest_d) d = datetime(2011, 11, 21, 10, 21, 36) # tc ctime() format datetime y("a {:tc} b", "a Mon Nov 21 10:21:36 2011 b", d) t530 = parse.FixedTzOffset(-5 * 60 - 30, "-5:30") t830 = parse.FixedTzOffset(-8 * 60 - 30, "-8:30") # tt Time time y("a {:tt} b", "a 10:21:36 AM +1000 b", time(10, 21, 36, tzinfo=aest)) y("a {:tt} b", "a 10:21:36 AM +10:00 b", time(10, 21, 36, tzinfo=aest)) y("a {:tt} b", "a 10:21:36 AM b", time(10, 21, 36)) y("a {:tt} b", "a 10:21:36 PM b", time(22, 21, 36)) y("a {:tt} b", "a 10:21:36 b", time(10, 21, 36)) y("a {:tt} b", "a 10:21 b", time(10, 21)) y("a {:tt} b", "a 10:21:36 PM -5:30 b", time(22, 21, 36, tzinfo=t530)) y("a {:tt} b", "a 10:21:36 PM -530 b", time(22, 21, 36, tzinfo=t530)) y("a {:tt} b", "a 10:21:36 PM -05:30 b", time(22, 21, 36, tzinfo=t530)) y("a {:tt} b", "a 10:21:36 PM -0530 b", time(22, 21, 36, tzinfo=t530)) y("a {:tt} b", "a 10:21:36 PM -08:30 b", time(22, 21, 36, tzinfo=t830)) y("a {:tt} b", "a 10:21:36 PM -0830 b", time(22, 21, 36, tzinfo=t830)) def test_datetime_group_count(): # test we increment the group count correctly for datetimes r = parse.parse("{:ti} {}", "1972-01-01 spam") assert r.fixed[1] == "spam" r = parse.parse("{:tg} {}", "1-1-1972 spam") assert r.fixed[1] == "spam" r = parse.parse("{:ta} {}", "1-1-1972 spam") assert r.fixed[1] == "spam" r = parse.parse("{:th} {}", "21/Nov/2011:10:21:36 +1000 spam") assert r.fixed[1] == "spam" r = parse.parse("{:te} {}", "21 Nov 2011 10:21:36 +1000 spam") assert r.fixed[1] == "spam" r = parse.parse("{:tc} {}", "Mon Nov 21 10:21:36 2011 spam") assert r.fixed[1] == "spam" r = parse.parse("{:tt} {}", "10:21 spam") assert r.fixed[1] == "spam" def test_mixed_types(): # stress-test: pull one of everything out of a string r = parse.parse( """ letters: {:w} non-letters: {:W} whitespace: "{:s}" non-whitespace: \t{:S}\n digits: {:d} {:d} non-digits: {:D} numbers with thousands: {:n} fixed-point: {:f} floating-point: {:e} general numbers: {:g} {:g} binary: {:b} octal: {:o} hex: {:x} ISO 8601 e.g. {:ti} RFC2822 e.g. {:te} Global e.g. {:tg} US e.g. {:ta} ctime() e.g. {:tc} HTTP e.g. {:th} time: {:tt} final value: {} """, """ letters: abcdef_GHIJLK non-letters: !@#%$ *^% whitespace: " \t\n" non-whitespace: \tabc\n digits: 12345 0b1011011 non-digits: abcdef numbers with thousands: 1,000 fixed-point: 100.2345 floating-point: 1.1e-10 general numbers: 1 1.1 binary: 0b1000 octal: 0o1000 hex: 0x1000 ISO 8601 e.g. 1972-01-20T10:21:36Z RFC2822 e.g. Mon, 20 Jan 1972 10:21:36 +1000 Global e.g. 20/1/1972 10:21:36 AM +1:00 US e.g. 1/20/1972 10:21:36 PM +10:30 ctime() e.g. Sun Sep 16 01:03:52 1973 HTTP e.g. 21/Nov/2011:00:07:11 +0000 time: 10:21:36 PM -5:30 final value: spam """, ) assert r is not None assert r.fixed[22] == "spam" def test_mixed_type_variant(): r = parse.parse( """ letters: {:w} non-letters: {:W} whitespace: "{:s}" non-whitespace: \t{:S}\n digits: {:d} non-digits: {:D} numbers with thousands: {:n} fixed-point: {:f} floating-point: {:e} general numbers: {:g} {:g} binary: {:b} octal: {:o} hex: {:x} ISO 8601 e.g. {:ti} RFC2822 e.g. {:te} Global e.g. {:tg} US e.g. {:ta} ctime() e.g. {:tc} HTTP e.g. {:th} time: {:tt} final value: {} """, """ letters: abcdef_GHIJLK non-letters: !@#%$ *^% whitespace: " \t\n" non-whitespace: \tabc\n digits: 0xabcdef non-digits: abcdef numbers with thousands: 1.000.000 fixed-point: 0.00001 floating-point: NAN general numbers: 1.1e10 nan binary: 0B1000 octal: 0O1000 hex: 0X1000 ISO 8601 e.g. 1972-01-20T10:21:36Z RFC2822 e.g. Mon, 20 Jan 1972 10:21:36 +1000 Global e.g. 20/1/1972 10:21:36 AM +1:00 US e.g. 1/20/1972 10:21:36 PM +10:30 ctime() e.g. Sun Sep 16 01:03:52 1973 HTTP e.g. 21/Nov/2011:00:07:11 +0000 time: 10:21:36 PM -5:30 final value: spam """, ) assert r is not None assert r.fixed[21] == "spam" @pytest.mark.skipif(sys.version_info >= (3, 5), reason="Python 3.5 removed the limit of 100 named groups in a regular expression") def test_too_many_fields(): # Python 3.5 removed the limit of 100 named groups in a regular expression, # so only test for the exception if the limit exists. p = parse.compile("{:ti}" * 15) with pytest.raises(parse.TooManyFields): p.parse("") def test_letters(): res = parse.parse("{:l}", "") assert res is None res = parse.parse("{:l}", "sPaM") assert res.fixed == ("sPaM",) res = parse.parse("{:l}", "sP4M") assert res is None res = parse.parse("{:l}", "sP_M") assert res is None def test_strftime_strptime_roundtrip(): dt = datetime.now() fmt = "_".join([k for k in parse.dt_format_to_regex if k != "%z"]) s = dt.strftime(fmt) [res] = parse.parse("{:" + fmt + "}", s) assert res == dt def test_parser_format(): parser = parse.compile("hello {}") assert parser.format.format("world") == "hello world" with pytest.raises(AttributeError): parser.format = "hi {}" def test_hyphen_inside_field_name(): # https://github.com/r1chardj0n3s/parse/issues/86 # https://github.com/python-openapi/openapi-core/issues/672 template = "/local/sub/{user-id}/duration" assert parse.Parser(template).named_fields == ["user_id"] string = "https://dummy_server.com/local/sub/1647222638/duration" result = parse.search(template, string) assert result["user-id"] == "1647222638" def test_hyphen_inside_field_name_collision_handling(): template = "/foo/{user-id}/{user_id}/{user.id}/bar/" assert parse.Parser(template).named_fields == ["user_id", "user__id", "user___id"] string = "/foo/1/2/3/bar/" result = parse.search(template, string) assert result["user-id"] == "1" assert result["user_id"] == "2" assert result["user.id"] == "3" parse_type-0.6.4/tests/parse_tests_with_parse_type/test_parsetype.py000066400000000000000000000156161467747561600263450ustar00rootroot00000000000000# -- REPLACE: parse with parse_type.parse from __future__ import absolute_import, print_function from parse_type import parse # -- ORIGINAL_SOURCE_STARTS_HERE: from decimal import Decimal import pytest # DISABLED: import parse def assert_match(parser, text, param_name, expected): result = parser.parse(text) assert result[param_name] == expected def assert_mismatch(parser, text, param_name): result = parser.parse(text) assert result is None def assert_fixed_match(parser, text, expected): result = parser.parse(text) assert result.fixed == expected def assert_fixed_mismatch(parser, text): result = parser.parse(text) assert result is None def test_pattern_should_be_used(): def parse_number(text): return int(text) parse_number.pattern = r"\d+" parse_number.name = "Number" # For testing only. extra_types = {parse_number.name: parse_number} format = "Value is {number:Number} and..." parser = parse.Parser(format, extra_types) assert_match(parser, "Value is 42 and...", "number", 42) assert_match(parser, "Value is 00123 and...", "number", 123) assert_mismatch(parser, "Value is ALICE and...", "number") assert_mismatch(parser, "Value is -123 and...", "number") def test_pattern_should_be_used2(): def parse_yesno(text): return parse_yesno.mapping[text.lower()] parse_yesno.mapping = { "yes": True, "no": False, "on": True, "off": False, "true": True, "false": False, } parse_yesno.pattern = r"|".join(parse_yesno.mapping.keys()) parse_yesno.name = "YesNo" # For testing only. extra_types = {parse_yesno.name: parse_yesno} format = "Answer: {answer:YesNo}" parser = parse.Parser(format, extra_types) # -- ENSURE: Known enum values are correctly extracted. for value_name, value in parse_yesno.mapping.items(): text = "Answer: %s" % value_name assert_match(parser, text, "answer", value) # -- IGNORE-CASE: In parsing, calls type converter function !!! assert_match(parser, "Answer: YES", "answer", True) assert_mismatch(parser, "Answer: __YES__", "answer") def test_with_pattern(): ab_vals = {"a": 1, "b": 2} @parse.with_pattern(r"[ab]") def ab(text): return ab_vals[text] parser = parse.Parser("test {result:ab}", {"ab": ab}) assert_match(parser, "test a", "result", 1) assert_match(parser, "test b", "result", 2) assert_mismatch(parser, "test c", "result") def test_with_pattern_and_regex_group_count(): # -- SPECIAL-CASE: Regex-grouping is used in user-defined type # NOTE: Missing or wroung regex_group_counts cause problems # with parsing following params. @parse.with_pattern(r"(meter|kilometer)", regex_group_count=1) def parse_unit(text): return text.strip() @parse.with_pattern(r"\d+") def parse_number(text): return int(text) type_converters = {"Number": parse_number, "Unit": parse_unit} # -- CASE: Unnamed-params (affected) parser = parse.Parser("test {:Unit}-{:Number}", type_converters) assert_fixed_match(parser, "test meter-10", ("meter", 10)) assert_fixed_match(parser, "test kilometer-20", ("kilometer", 20)) assert_fixed_mismatch(parser, "test liter-30") # -- CASE: Named-params (uncritical; should not be affected) # REASON: Named-params have additional, own grouping. parser2 = parse.Parser("test {unit:Unit}-{value:Number}", type_converters) assert_match(parser2, "test meter-10", "unit", "meter") assert_match(parser2, "test meter-10", "value", 10) assert_match(parser2, "test kilometer-20", "unit", "kilometer") assert_match(parser2, "test kilometer-20", "value", 20) assert_mismatch(parser2, "test liter-30", "unit") def test_with_pattern_and_wrong_regex_group_count_raises_error(): # -- SPECIAL-CASE: # Regex-grouping is used in user-defined type, but wrong value is provided. @parse.with_pattern(r"(meter|kilometer)", regex_group_count=1) def parse_unit(text): return text.strip() @parse.with_pattern(r"\d+") def parse_number(text): return int(text) # -- CASE: Unnamed-params (affected) BAD_REGEX_GROUP_COUNTS_AND_ERRORS = [ (None, ValueError), (0, ValueError), (2, IndexError), ] for bad_regex_group_count, error_class in BAD_REGEX_GROUP_COUNTS_AND_ERRORS: parse_unit.regex_group_count = bad_regex_group_count # -- OVERRIDE-HERE type_converters = {"Number": parse_number, "Unit": parse_unit} parser = parse.Parser("test {:Unit}-{:Number}", type_converters) with pytest.raises(error_class): parser.parse("test meter-10") def test_with_pattern_and_regex_group_count_is_none(): # -- CORNER-CASE: Increase code-coverage. data_values = {"a": 1, "b": 2} @parse.with_pattern(r"[ab]") def parse_data(text): return data_values[text] parse_data.regex_group_count = None # ENFORCE: None # -- CASE: Unnamed-params parser = parse.Parser("test {:Data}", {"Data": parse_data}) assert_fixed_match(parser, "test a", (1,)) assert_fixed_match(parser, "test b", (2,)) assert_fixed_mismatch(parser, "test c") # -- CASE: Named-params parser2 = parse.Parser("test {value:Data}", {"Data": parse_data}) assert_match(parser2, "test a", "value", 1) assert_match(parser2, "test b", "value", 2) assert_mismatch(parser2, "test c", "value") def test_case_sensitivity(): r = parse.parse("SPAM {} SPAM", "spam spam spam") assert r[0] == "spam" assert parse.parse("SPAM {} SPAM", "spam spam spam", case_sensitive=True) is None def test_decimal_value(): value = Decimal("5.5") str_ = "test {}".format(value) parser = parse.Parser("test {:F}") assert parser.parse(str_)[0] == value def test_width_str(): res = parse.parse("{:.2}{:.2}", "look") assert res.fixed == ("lo", "ok") res = parse.parse("{:2}{:2}", "look") assert res.fixed == ("lo", "ok") res = parse.parse("{:4}{}", "look at that") assert res.fixed == ("look", " at that") def test_width_constraints(): res = parse.parse("{:4}", "looky") assert res.fixed == ("looky",) res = parse.parse("{:4.4}", "looky") assert res is None res = parse.parse("{:4.4}", "ook") assert res is None res = parse.parse("{:4}{:.4}", "look at that") assert res.fixed == ("look at ", "that") def test_width_multi_int(): res = parse.parse("{:02d}{:02d}", "0440") assert res.fixed == (4, 40) res = parse.parse("{:03d}{:d}", "04404") assert res.fixed == (44, 4) def test_width_empty_input(): res = parse.parse("{:.2}", "") assert res is None res = parse.parse("{:2}", "l") assert res is None res = parse.parse("{:2d}", "") assert res is None def test_int_convert_stateless_base(): parser = parse.Parser("{:d}") assert parser.parse("1234")[0] == 1234 assert parser.parse("0b1011")[0] == 0b1011 parse_type-0.6.4/tests/parse_tests_with_parse_type/test_pattern.py000066400000000000000000000060601467747561600257770ustar00rootroot00000000000000# -- REPLACE: parse with parse_type.parse from __future__ import absolute_import, print_function from parse_type import parse # -- ORIGINAL_SOURCE_STARTS_HERE: import pytest # DISABLED: import parse def _test_expression(format, expression): assert parse.Parser(format)._expression == expression def test_braces(): # pull a simple string out of another string _test_expression("{{ }}", r"\{ \}") def test_fixed(): # pull a simple string out of another string _test_expression("{}", r"(.+?)") _test_expression("{} {}", r"(.+?) (.+?)") def test_named(): # pull a named string out of another string _test_expression("{name}", r"(?P.+?)") _test_expression("{name} {other}", r"(?P.+?) (?P.+?)") def test_named_typed(): # pull a named string out of another string _test_expression("{name:w}", r"(?P\w+)") _test_expression("{name:w} {other:w}", r"(?P\w+) (?P\w+)") def test_numbered(): _test_expression("{0}", r"(.+?)") _test_expression("{0} {1}", r"(.+?) (.+?)") _test_expression("{0:f} {1:f}", r"([-+ ]?\d*\.\d+) ([-+ ]?\d*\.\d+)") def test_bird(): # skip some trailing whitespace _test_expression("{:>}", r" *(.+?)") def test_format_variety(): def _(fmt, matches): d = parse.extract_format(fmt, {"spam": "spam"}) for k in matches: assert d.get(k) == matches[k] for t in "%obxegfdDwWsS": _(t, {"type": t}) _("10" + t, {"type": t, "width": "10"}) _("05d", {"type": "d", "width": "5", "zero": True}) _("<", {"align": "<"}) _(".<", {"align": "<", "fill": "."}) _(">", {"align": ">"}) _(".>", {"align": ">", "fill": "."}) _("^", {"align": "^"}) _(".^", {"align": "^", "fill": "."}) _("x=d", {"type": "d", "align": "=", "fill": "x"}) _("d", {"type": "d"}) _("ti", {"type": "ti"}) _("spam", {"type": "spam"}) _(".^010d", {"type": "d", "width": "10", "align": "^", "fill": ".", "zero": True}) _(".2f", {"type": "f", "precision": "2"}) _("10.2f", {"type": "f", "width": "10", "precision": "2"}) def test_dot_separated_fields(): # this should just work and provide the named value res = parse.parse("{hello.world}_{jojo.foo.baz}_{simple}", "a_b_c") assert res.named["hello.world"] == "a" assert res.named["jojo.foo.baz"] == "b" assert res.named["simple"] == "c" def test_dict_style_fields(): res = parse.parse("{hello[world]}_{hello[foo][baz]}_{simple}", "a_b_c") assert res.named["hello"]["world"] == "a" assert res.named["hello"]["foo"]["baz"] == "b" assert res.named["simple"] == "c" def test_dot_separated_fields_name_collisions(): # this should just work and provide the named value res = parse.parse("{a_.b}_{a__b}_{a._b}_{a___b}", "a_b_c_d") assert res.named["a_.b"] == "a" assert res.named["a__b"] == "b" assert res.named["a._b"] == "c" assert res.named["a___b"] == "d" def test_invalid_groupnames_are_handled_gracefully(): with pytest.raises(NotImplementedError): parse.parse("{hello['world']}", "doesn't work") parse_type-0.6.4/tests/parse_tests_with_parse_type/test_result.py000066400000000000000000000017311467747561600256400ustar00rootroot00000000000000# -- REPLACE: parse with parse_type.parse from __future__ import absolute_import, print_function from parse_type import parse # -- ORIGINAL_SOURCE_STARTS_HERE: import pytest # DISABLED: import parse def test_fixed_access(): r = parse.Result((1, 2), {}, None) assert r[0] == 1 assert r[1] == 2 with pytest.raises(IndexError): r[2] with pytest.raises(KeyError): r["spam"] def test_slice_access(): r = parse.Result((1, 2, 3, 4), {}, None) assert r[1:3] == (2, 3) assert r[-5:5] == (1, 2, 3, 4) assert r[:4:2] == (1, 3) assert r[::-2] == (4, 2) assert r[5:10] == () def test_named_access(): r = parse.Result((), {"spam": "ham"}, None) assert r["spam"] == "ham" with pytest.raises(KeyError): r["ham"] with pytest.raises(IndexError): r[0] def test_contains(): r = parse.Result(("cat",), {"spam": "ham"}, None) assert "spam" in r assert "cat" not in r assert "ham" not in r parse_type-0.6.4/tests/parse_tests_with_parse_type/test_search.py000066400000000000000000000012541467747561600255670ustar00rootroot00000000000000# -- REPLACE: parse with parse_type.parse from __future__ import absolute_import, print_function from parse_type import parse # -- ORIGINAL_SOURCE_STARTS_HERE: # import parse def test_basic(): r = parse.search("a {} c", " a b c ") assert r.fixed == ("b",) def test_multiline(): r = parse.search("age: {:d}\n", "name: Rufus\nage: 42\ncolor: red\n") assert r.fixed == (42,) def test_pos(): r = parse.search("a {} c", " a b c ", 2) assert r is None def test_no_evaluate_result(): match = parse.search( "age: {:d}\n", "name: Rufus\nage: 42\ncolor: red\n", evaluate_result=False ) r = match.evaluate_result() assert r.fixed == (42,) parse_type-0.6.4/tests/parse_type_test.py000077500000000000000000000122001467747561600206470ustar00rootroot00000000000000# -*- coding: utf-8 -*- from __future__ import absolute_import from parse_type import TypeBuilder from enum import Enum try: import unittest2 as unittest except ImportError: import unittest # ----------------------------------------------------------------------------- # TEST SUPPORT FOR: TypeBuilder Tests # ----------------------------------------------------------------------------- # -- PROOF-OF-CONCEPT DATATYPE: def parse_number(text): return int(text) parse_number.pattern = r"\d+" # Provide better regexp pattern than default. parse_number.name = "Number" # For testing only. # -- ENUM DATATYPE: parse_yesno = TypeBuilder.make_enum({ "yes": True, "no": False, "on": True, "off": False, "true": True, "false": False, }) parse_yesno.name = "YesNo" # For testing only. # -- ENUM CLASS: class Color(Enum): red = 1 green = 2 blue = 3 parse_color = TypeBuilder.make_enum(Color) parse_color.name = "Color" # -- CHOICE DATATYPE: parse_person_choice = TypeBuilder.make_choice(["Alice", "Bob", "Charly"]) parse_person_choice.name = "PersonChoice" # For testing only. # ----------------------------------------------------------------------------- # ABSTRACT TEST CASE: # ----------------------------------------------------------------------------- class TestCase(unittest.TestCase): # -- PYTHON VERSION BACKWARD-COMPATIBILTY: if not hasattr(unittest.TestCase, "assertIsNone"): def assertIsNone(self, obj, msg=None): self.assert_(obj is None, msg) def assertIsNotNone(self, obj, msg=None): self.assert_(obj is not None, msg) class ParseTypeTestCase(TestCase): """ Common test case base class for :mod:`parse_type` tests. """ def assert_match(self, parser, text, param_name, expected): """ Check that a parser can parse the provided text and extracts the expected value for a parameter. :param parser: Parser to use :param text: Text to parse :param param_name: Name of parameter :param expected: Expected value of parameter. :raise: AssertionError on failures. """ result = parser.parse(text) self.assertIsNotNone(result) self.assertEqual(result[param_name], expected) def assert_mismatch(self, parser, text, param_name=None): """ Check that a parser cannot extract the parameter from the provided text. A parse mismatch has occured. :param parser: Parser to use :param text: Text to parse :param param_name: Name of parameter :raise: AssertionError on failures. """ result = parser.parse(text) self.assertIsNone(result) def ensure_can_parse_all_enum_values(self, parser, type_converter, schema, name): # -- ENSURE: Known enum values are correctly extracted. for value_name, value in type_converter.mappings.items(): text = schema % value_name self.assert_match(parser, text, name, value) def ensure_can_parse_all_choices(self, parser, type_converter, schema, name): transform = getattr(type_converter, "transform", None) for choice_value in type_converter.choices: text = schema % choice_value expected_value = choice_value if transform: assert callable(transform) expected_value = transform(choice_value) self.assert_match(parser, text, name, expected_value) def ensure_can_parse_all_choices2(self, parser, type_converter, schema, name): transform = getattr(type_converter, "transform", None) for index, choice_value in enumerate(type_converter.choices): text = schema % choice_value if transform: assert callable(transform) expected_value = (index, transform(choice_value)) else: expected_value = (index, choice_value) self.assert_match(parser, text, name, expected_value) # Copyright (c) 2012-2013 by Jens Engel (https://github/jenisys/parse_type) # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. parse_type-0.6.4/tests/test_builder.py000077500000000000000000000557121467747561600201410ustar00rootroot00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- """ Test suite for parse_type.py REQUIRES: parse >= 1.8.4 ('pattern' attribute support) """ from __future__ import absolute_import import re import unittest import parse from .parse_type_test import ParseTypeTestCase from .parse_type_test \ import parse_number, parse_yesno, parse_person_choice, parse_color, Color from parse_type import TypeBuilder, build_type_dict from enum import Enum # ----------------------------------------------------------------------------- # TEST CASE: TestTypeBuilder4Enum # ----------------------------------------------------------------------------- class TestTypeBuilder4Enum(ParseTypeTestCase): TYPE_CONVERTERS = [ parse_yesno ] def test_parse_enum_yesno(self): extra_types = build_type_dict([ parse_yesno ]) schema = "Answer: {answer:YesNo}" parser = parse.Parser(schema, extra_types) # -- PERFORM TESTS: self.ensure_can_parse_all_enum_values(parser, parse_yesno, "Answer: %s", "answer") # -- VALID: self.assert_match(parser, "Answer: yes", "answer", True) self.assert_match(parser, "Answer: no", "answer", False) # -- IGNORE-CASE: In parsing, calls type converter function !!! self.assert_match(parser, "Answer: YES", "answer", True) # -- PARSE MISMATCH: self.assert_mismatch(parser, "Answer: __YES__", "answer") self.assert_mismatch(parser, "Answer: yes ", "answer") self.assert_mismatch(parser, "Answer: yes ZZZ", "answer") def test_make_enum_with_dict(self): parse_nword = TypeBuilder.make_enum({"one": 1, "two": 2, "three": 3}) parse_nword.name = "NumberAsWord" extra_types = build_type_dict([ parse_nword ]) schema = "Answer: {number:NumberAsWord}" parser = parse.Parser(schema, extra_types) # -- PERFORM TESTS: self.ensure_can_parse_all_enum_values(parser, parse_nword, "Answer: %s", "number") # -- VALID: self.assert_match(parser, "Answer: one", "number", 1) self.assert_match(parser, "Answer: two", "number", 2) # -- IGNORE-CASE: In parsing, calls type converter function !!! self.assert_match(parser, "Answer: THREE", "number", 3) # -- PARSE MISMATCH: self.assert_mismatch(parser, "Answer: __one__", "number") self.assert_mismatch(parser, "Answer: one ", "number") self.assert_mismatch(parser, "Answer: one_", "number") self.assert_mismatch(parser, "Answer: one ZZZ", "number") def test_make_enum_with_enum_class(self): """ Use :meth:`parse_type.TypeBuilder.make_enum()` with enum34 classes. """ class Color(Enum): red = 1 green = 2 blue = 3 parse_color = TypeBuilder.make_enum(Color) parse_color.name = "Color" schema = "Answer: {color:Color}" parser = parse.Parser(schema, dict(Color=parse_color)) # -- PERFORM TESTS: self.ensure_can_parse_all_enum_values(parser, parse_color, "Answer: %s", "color") # -- VALID: self.assert_match(parser, "Answer: red", "color", Color.red) self.assert_match(parser, "Answer: green", "color", Color.green) self.assert_match(parser, "Answer: blue", "color", Color.blue) # -- IGNORE-CASE: In parsing, calls type converter function !!! self.assert_match(parser, "Answer: RED", "color", Color.red) # -- PARSE MISMATCH: self.assert_mismatch(parser, "Answer: __RED__", "color") self.assert_mismatch(parser, "Answer: red ", "color") self.assert_mismatch(parser, "Answer: redx", "color") self.assert_mismatch(parser, "Answer: redx ZZZ", "color") # ----------------------------------------------------------------------------- # TEST CASE: TestTypeBuilder4Choice # ----------------------------------------------------------------------------- class TestTypeBuilder4Choice(ParseTypeTestCase): def test_parse_choice_persons(self): extra_types = build_type_dict([ parse_person_choice ]) schema = "Answer: {answer:PersonChoice}" parser = parse.Parser(schema, extra_types) # -- PERFORM TESTS: self.assert_match(parser, "Answer: Alice", "answer", "Alice") self.assert_match(parser, "Answer: Bob", "answer", "Bob") self.ensure_can_parse_all_choices(parser, parse_person_choice, "Answer: %s", "answer") # -- IGNORE-CASE: In parsing, calls type converter function !!! # SKIP-WART: self.assert_match(parser, "Answer: BOB", "answer", "BOB") # -- PARSE MISMATCH: self.assert_mismatch(parser, "Answer: __Alice__", "answer") self.assert_mismatch(parser, "Answer: Alice ", "answer") self.assert_mismatch(parser, "Answer: Alice ZZZ", "answer") def test_make_choice(self): parse_choice = TypeBuilder.make_choice(["one", "two", "three"]) parse_choice.name = "NumberWordChoice" extra_types = build_type_dict([ parse_choice ]) schema = "Answer: {answer:NumberWordChoice}" parser = parse.Parser(schema, extra_types) # -- PERFORM TESTS: self.assert_match(parser, "Answer: one", "answer", "one") self.assert_match(parser, "Answer: two", "answer", "two") self.ensure_can_parse_all_choices(parser, parse_choice, "Answer: %s", "answer") # -- PARSE MISMATCH: self.assert_mismatch(parser, "Answer: __one__", "answer") self.assert_mismatch(parser, "Answer: one ", "answer") self.assert_mismatch(parser, "Answer: one ZZZ", "answer") def test_make_choice__anycase_accepted_case_sensitity(self): # -- NOTE: strict=False => Disable errors due to case-mismatch. parse_choice = TypeBuilder.make_choice(["one", "two", "three"], strict=False) schema = "Answer: {answer:NumberWordChoice}" parser = parse.Parser(schema, dict(NumberWordChoice=parse_choice)) # -- PERFORM TESTS: # NOTE: Parser uses re.IGNORECASE flag => Any case accepted. self.assert_match(parser, "Answer: one", "answer", "one") self.assert_match(parser, "Answer: TWO", "answer", "TWO") self.assert_match(parser, "Answer: Three", "answer", "Three") def test_make_choice__samecase_match_or_error(self): # -- NOTE: strict=True => Enable errors due to case-mismatch. parse_choice = TypeBuilder.make_choice(["One", "TWO", "three"], strict=True) schema = "Answer: {answer:NumberWordChoice}" parser = parse.Parser(schema, dict(NumberWordChoice=parse_choice)) # -- PERFORM TESTS: Case matches. # NOTE: Parser uses re.IGNORECASE flag => Any case accepted. self.assert_match(parser, "Answer: One", "answer", "One") self.assert_match(parser, "Answer: TWO", "answer", "TWO") self.assert_match(parser, "Answer: three", "answer", "three") # -- PERFORM TESTS: EXACT-CASE MISMATCH case_mismatch_input_data = ["one", "ONE", "Two", "two", "Three" ] for input_value in case_mismatch_input_data: input_text = "Answer: %s" % input_value with self.assertRaises(ValueError): parser.parse(input_text) def test_make_choice__anycase_accepted_lowercase_enforced(self): # -- NOTE: strict=True => Enable errors due to case-mismatch. parse_choice = TypeBuilder.make_choice(["one", "two", "three"], transform=lambda x: x.lower(), strict=True) schema = "Answer: {answer:NumberWordChoice}" parser = parse.Parser(schema, dict(NumberWordChoice=parse_choice)) # -- PERFORM TESTS: # NOTE: Parser uses re.IGNORECASE flag # => Any case accepted, but result is in lower case. self.assert_match(parser, "Answer: one", "answer", "one") self.assert_match(parser, "Answer: TWO", "answer", "two") self.assert_match(parser, "Answer: Three", "answer", "three") def test_make_choice__with_transform(self): transform = lambda x: x.upper() parse_choice = TypeBuilder.make_choice(["ONE", "two", "Three"], transform) self.assertSequenceEqual(parse_choice.choices, ["ONE", "TWO", "THREE"]) schema = "Answer: {answer:NumberWordChoice}" parser = parse.Parser(schema, dict(NumberWordChoice=parse_choice)) # -- PERFORM TESTS: self.assert_match(parser, "Answer: one", "answer", "ONE") self.assert_match(parser, "Answer: two", "answer", "TWO") self.ensure_can_parse_all_choices(parser, parse_choice, "Answer: %s", "answer") # -- PARSE MISMATCH: self.assert_mismatch(parser, "Answer: __one__", "answer") self.assert_mismatch(parser, "Answer: one ", "answer") self.assert_mismatch(parser, "Answer: one ZZZ", "answer") def test_make_choice2(self): # -- strict=False: Disable errors due to case mismatch. parse_choice2 = TypeBuilder.make_choice2(["zero", "one", "two"], strict=False) parse_choice2.name = "NumberWordChoice2" extra_types = build_type_dict([ parse_choice2 ]) schema = "Answer: {answer:NumberWordChoice2}" parser = parse.Parser(schema, extra_types) # -- PERFORM TESTS: self.assert_match(parser, "Answer: zero", "answer", (0, "zero")) self.assert_match(parser, "Answer: one", "answer", (1, "one")) self.assert_match(parser, "Answer: two", "answer", (2, "two")) self.ensure_can_parse_all_choices2(parser, parse_choice2, "Answer: %s", "answer") # -- PARSE MISMATCH: self.assert_mismatch(parser, "Answer: __one__", "answer") self.assert_mismatch(parser, "Answer: one ", "answer") self.assert_mismatch(parser, "Answer: one ZZZ", "answer") def test_make_choice2__with_transform(self): transform = lambda x: x.lower() parse_choice2 = TypeBuilder.make_choice2(["ZERO", "one", "Two"], transform=transform) self.assertSequenceEqual(parse_choice2.choices, ["zero", "one", "two"]) schema = "Answer: {answer:NumberWordChoice}" parser = parse.Parser(schema, dict(NumberWordChoice=parse_choice2)) # -- PERFORM TESTS: # NOTE: Parser uses re.IGNORECASE => Any case is accepted. self.assert_match(parser, "Answer: zERO", "answer", (0, "zero")) self.assert_match(parser, "Answer: ONE", "answer", (1, "one")) self.assert_match(parser, "Answer: Two", "answer", (2, "two")) def test_make_choice2__samecase_match_or_error(self): # -- NOTE: strict=True => Enable errors due to case-mismatch. parse_choice2 = TypeBuilder.make_choice2(["Zero", "one", "TWO"], strict=True) schema = "Answer: {answer:NumberWordChoice}" parser = parse.Parser(schema, dict(NumberWordChoice=parse_choice2)) # -- PERFORM TESTS: Case matches. # NOTE: Parser uses re.IGNORECASE flag => Any case accepted. self.assert_match(parser, "Answer: Zero", "answer", (0, "Zero")) self.assert_match(parser, "Answer: one", "answer", (1, "one")) self.assert_match(parser, "Answer: TWO", "answer", (2, "TWO")) # -- PERFORM TESTS: EXACT-CASE MISMATCH case_mismatch_input_data = ["zero", "ZERO", "One", "ONE", "two" ] for input_value in case_mismatch_input_data: input_text = "Answer: %s" % input_value with self.assertRaises(ValueError): parser.parse(input_text) # ----------------------------------------------------------------------------- # TEST CASE: TestTypeBuilder4Variant # ----------------------------------------------------------------------------- class TestTypeBuilder4Variant(ParseTypeTestCase): TYPE_CONVERTERS = [ parse_number, parse_yesno ] def check_parse_variant_number_or_yesno(self, parse_variant, with_ignorecase=True): schema = "Variant: {variant:YesNo_or_Number}" parser = parse.Parser(schema, dict(YesNo_or_Number=parse_variant)) # -- TYPE 1: YesNo self.assert_match(parser, "Variant: yes", "variant", True) self.assert_match(parser, "Variant: no", "variant", False) # -- IGNORECASE problem => re_opts if with_ignorecase: self.assert_match(parser, "Variant: YES", "variant", True) # -- TYPE 2: Number self.assert_match(parser, "Variant: 0", "variant", 0) self.assert_match(parser, "Variant: 1", "variant", 1) self.assert_match(parser, "Variant: 12", "variant", 12) self.assert_match(parser, "Variant: 42", "variant", 42) # -- PARSE MISMATCH: self.assert_mismatch(parser, "Variant: __YES__") self.assert_mismatch(parser, "Variant: yes ") self.assert_mismatch(parser, "Variant: yes ZZZ") self.assert_mismatch(parser, "Variant: -1") # -- PERFORM TESTS: self.ensure_can_parse_all_enum_values(parser, parse_yesno, "Variant: %s", "variant") def test_make_variant__uncompiled(self): type_converters = [parse_yesno, parse_number] parse_variant1 = TypeBuilder.make_variant(type_converters) self.check_parse_variant_number_or_yesno(parse_variant1) def test_make_variant__compiled(self): # -- REVERSED ORDER VARIANT: type_converters = [parse_number, parse_yesno] parse_variant2 = TypeBuilder.make_variant(type_converters, compiled=True) self.check_parse_variant_number_or_yesno(parse_variant2) def test_make_variant__with_re_opts_0(self): # -- SKIP: IGNORECASE checks which would raise an error in strict mode. type_converters = [parse_number, parse_yesno] parse_variant3 = TypeBuilder.make_variant(type_converters, re_opts=0) self.check_parse_variant_number_or_yesno(parse_variant3, with_ignorecase=False) def test_make_variant__with_re_opts_IGNORECASE(self): type_converters = [parse_number, parse_yesno] parse_variant3 = TypeBuilder.make_variant(type_converters, re_opts=re.IGNORECASE) self.check_parse_variant_number_or_yesno(parse_variant3) def test_make_variant__with_strict(self): # -- SKIP: IGNORECASE checks which would raise an error in strict mode. type_converters = [parse_number, parse_yesno] parse_variant = TypeBuilder.make_variant(type_converters, strict=True) self.check_parse_variant_number_or_yesno(parse_variant, with_ignorecase=False) def test_make_variant__with_strict_raises_error_on_case_mismatch(self): # -- NEEDS: # * re_opts=0 (IGNORECASE disabled) # * strict=True, allow that an error is raised type_converters = [parse_number, parse_yesno] parse_variant = TypeBuilder.make_variant(type_converters, strict=True, re_opts=0) schema = "Variant: {variant:YesNo_or_Number}" parser = parse.Parser(schema, dict(YesNo_or_Number=parse_variant)) self.assertRaises(AssertionError, parser.parse, "Variant: YES") def test_make_variant__without_strict_may_return_none_on_case_mismatch(self): # -- NEEDS: # * re_opts=0 (IGNORECASE disabled) # * strict=False, otherwise an error is raised type_converters = [parse_number, parse_yesno] parse_variant = TypeBuilder.make_variant(type_converters, re_opts=0, strict=False) schema = "Variant: {variant:YesNo_or_Number}" parser = parse.Parser(schema, dict(YesNo_or_Number=parse_variant)) result = parser.parse("Variant: No") self.assertNotEqual(result, None) self.assertEqual(result["variant"], None) def test_make_variant__with_strict_and_compiled_raises_error_on_case_mismatch(self): # XXX re_opts=0 seems to work differently. # -- NEEDS: # * re_opts=0 (IGNORECASE disabled) # * strict=True, allow that an error is raised type_converters = [parse_number, parse_yesno] # -- ENSURE: coverage for cornercase. parse_number.matcher = re.compile(parse_number.pattern) parse_variant = TypeBuilder.make_variant(type_converters, compiled=True, re_opts=0, strict=True) schema = "Variant: {variant:YesNo_or_Number}" parser = parse.Parser(schema, dict(YesNo_or_Number=parse_variant)) # XXX self.assertRaises(AssertionError, parser.parse, "Variant: YES") result = parser.parse("Variant: Yes") self.assertNotEqual(result, None) self.assertEqual(result["variant"], True) def test_make_variant__without_strict_and_compiled_may_return_none_on_case_mismatch(self): # XXX re_opts=0 seems to work differently. # -- NEEDS: # * re_opts=0 (IGNORECASE disabled) # * strict=False, otherwise an error is raised type_converters = [parse_number, parse_yesno] parse_variant = TypeBuilder.make_variant(type_converters, compiled=True, re_opts=0, strict=True) schema = "Variant: {variant:YesNo_or_Number}" parser = parse.Parser(schema, dict(YesNo_or_Number=parse_variant)) result = parser.parse("Variant: NO") self.assertNotEqual(result, None) self.assertEqual(result["variant"], False) def test_make_variant__with_color_or_person(self): type_converters = [parse_color, parse_person_choice] parse_variant2 = TypeBuilder.make_variant(type_converters) schema = "Variant2: {variant:Color_or_Person}" parser = parse.Parser(schema, dict(Color_or_Person=parse_variant2)) # -- TYPE 1: Color self.assert_match(parser, "Variant2: red", "variant", Color.red) self.assert_match(parser, "Variant2: blue", "variant", Color.blue) # -- TYPE 2: Person self.assert_match(parser, "Variant2: Alice", "variant", "Alice") self.assert_match(parser, "Variant2: Bob", "variant", "Bob") self.assert_match(parser, "Variant2: Charly", "variant", "Charly") # -- PARSE MISMATCH: self.assert_mismatch(parser, "Variant2: __Alice__") self.assert_mismatch(parser, "Variant2: Alice ") self.assert_mismatch(parser, "Variant2: Alice2") self.assert_mismatch(parser, "Variant2: red2") # -- PERFORM TESTS: self.ensure_can_parse_all_enum_values(parser, parse_color, "Variant2: %s", "variant") self.ensure_can_parse_all_choices(parser, parse_person_choice, "Variant2: %s", "variant") class TestParserWithManyTypedFields(ParseTypeTestCase): parse_variant1 = TypeBuilder.make_variant([parse_number, parse_yesno]) parse_variant1.name = "Number_or_YesNo" parse_variant2 = TypeBuilder.make_variant([parse_color, parse_person_choice]) parse_variant2.name = "Color_or_PersonChoice" TYPE_CONVERTERS = [ parse_number, parse_yesno, parse_color, parse_person_choice, parse_variant1, parse_variant2, ] def test_parse_with_many_named_fields(self): type_dict = build_type_dict(self.TYPE_CONVERTERS) schema = """\ Number: {number:Number} YesNo: {answer:YesNo} Color: {color:Color} Person: {person:PersonChoice} Variant1: {variant1:Number_or_YesNo} Variant2: {variant2:Color_or_PersonChoice} """ parser = parse.Parser(schema, type_dict) text = """\ Number: 12 YesNo: yes Color: red Person: Alice Variant1: 42 Variant2: Bob """ expected = dict( number=12, answer=True, color=Color.red, person="Alice", variant1=42, variant2="Bob" ) result = parser.parse(text) self.assertIsNotNone(result) self.assertEqual(result.named, expected) def test_parse_with_many_unnamed_fields(self): type_dict = build_type_dict(self.TYPE_CONVERTERS) schema = """\ Number: {:Number} YesNo: {:YesNo} Color: {:Color} Person: {:PersonChoice} """ # -- OMIT: XFAIL, due to group_index delta counting => Parser problem. # Variant2: {:Color_or_PersonChoice} # Variant1: {:Number_or_YesNo} parser = parse.Parser(schema, type_dict) text = """\ Number: 12 YesNo: yes Color: red Person: Alice """ # SKIP: Variant2: Bob # SKIP: Variant1: 42 expected = [ 12, True, Color.red, "Alice", ] # -- SKIP: "Bob", 42 ] result = parser.parse(text) self.assertIsNotNone(result) self.assertEqual(result.fixed, tuple(expected)) def test_parse_with_many_unnamed_fields_with_variants(self): type_dict = build_type_dict(self.TYPE_CONVERTERS) schema = """\ Number: {:Number} YesNo: {:YesNo} Color: {:Color} Person: {:PersonChoice} Variant2: {:Color_or_PersonChoice} Variant1: {:Number_or_YesNo} """ # -- OMIT: XFAIL, due to group_index delta counting => Parser problem. parser = parse.Parser(schema, type_dict) text = """\ Number: 12 YesNo: yes Color: red Person: Alice Variant2: Bob Variant1: 42 """ expected = [ 12, True, Color.red, "Alice", "Bob", 42 ] result = parser.parse(text) self.assertIsNotNone(result) self.assertEqual(result.fixed, tuple(expected)) # ----------------------------------------------------------------------------- # MAIN: # ----------------------------------------------------------------------------- if __name__ == '__main__': unittest.main() # Copyright (c) 2012-2013 by Jens Engel (https://github/jenisys/parse_type) # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. parse_type-0.6.4/tests/test_cardinality.py000077500000000000000000000565411467747561600210170ustar00rootroot00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- """ Test suite to test the :mod:`parse_type.cardinality` module. """ from __future__ import absolute_import from .parse_type_test import ParseTypeTestCase, parse_number from parse_type import Cardinality, TypeBuilder, build_type_dict from parse import Parser import parse import unittest # ----------------------------------------------------------------------------- # TEST CASE: TestCardinality # ----------------------------------------------------------------------------- class TestCardinality(ParseTypeTestCase): def test_enum_basics(self): assert Cardinality.optional is Cardinality.zero_or_one assert Cardinality.many0 is Cardinality.zero_or_more assert Cardinality.many is Cardinality.one_or_more def check_pattern_for_cardinality_one(self, pattern, new_pattern): expected_pattern = Cardinality.one.make_pattern(pattern) self.assertEqual(pattern, new_pattern) self.assertEqual(new_pattern, expected_pattern) def check_pattern_for_cardinality_zero_or_one(self, pattern, new_pattern): expected_pattern = Cardinality.zero_or_one.schema % pattern self.assertNotEqual(pattern, new_pattern) self.assertEqual(new_pattern, expected_pattern) def check_pattern_for_cardinality_zero_or_more(self, pattern, new_pattern): expected_pattern = Cardinality.zero_or_more.make_pattern(pattern) self.assertNotEqual(pattern, new_pattern) self.assertEqual(new_pattern, expected_pattern) def check_pattern_for_cardinality_one_or_more(self, pattern, new_pattern): expected_pattern = Cardinality.one_or_more.make_pattern(pattern) self.assertNotEqual(pattern, new_pattern) self.assertEqual(new_pattern, expected_pattern) def check_pattern_for_cardinality_optional(self, pattern, new_pattern): expected = Cardinality.optional.make_pattern(pattern) self.assertEqual(new_pattern, expected) self.check_pattern_for_cardinality_zero_or_one(pattern, new_pattern) def check_pattern_for_cardinality_many0(self, pattern, new_pattern): expected = Cardinality.many0.make_pattern(pattern) self.assertEqual(new_pattern, expected) self.check_pattern_for_cardinality_zero_or_more(pattern, new_pattern) def check_pattern_for_cardinality_many(self, pattern, new_pattern): expected = Cardinality.many.make_pattern(pattern) self.assertEqual(new_pattern, expected) self.check_pattern_for_cardinality_one_or_more(pattern, new_pattern) def test_make_pattern(self): data = [ (Cardinality.one, r"\d+", r"\d+"), (Cardinality.one, r"\w+", None), (Cardinality.zero_or_one, r"\w+", None), (Cardinality.one_or_more, r"\w+", None), (Cardinality.optional, "XXX", Cardinality.zero_or_one.make_pattern("XXX")), (Cardinality.many0, "XXX", Cardinality.zero_or_more.make_pattern("XXX")), (Cardinality.many, "XXX", Cardinality.one_or_more.make_pattern("XXX")), ] for cardinality, pattern, expected_pattern in data: if expected_pattern is None: expected_pattern = cardinality.make_pattern(pattern) new_pattern = cardinality.make_pattern(pattern) self.assertEqual(new_pattern, expected_pattern) name = cardinality.name checker = getattr(self, "check_pattern_for_cardinality_%s" % name) checker(pattern, new_pattern) def test_make_pattern_for_zero_or_one(self): patterns = [r"\d", r"\d+", r"\w+", r"XXX" ] expecteds = [r"(\d)?", r"(\d+)?", r"(\w+)?", r"(XXX)?" ] for pattern, expected in zip(patterns, expecteds): new_pattern = Cardinality.zero_or_one.make_pattern(pattern) self.assertEqual(new_pattern, expected) self.check_pattern_for_cardinality_zero_or_one(pattern, new_pattern) def test_make_pattern_for_zero_or_more(self): pattern = "XXX" expected = r"(XXX)?(\s*,\s*(XXX))*" new_pattern = Cardinality.zero_or_more.make_pattern(pattern) self.assertEqual(new_pattern, expected) self.check_pattern_for_cardinality_zero_or_more(pattern, new_pattern) def test_make_pattern_for_one_or_more(self): pattern = "XXX" expected = r"(XXX)(\s*,\s*(XXX))*" new_pattern = Cardinality.one_or_more.make_pattern(pattern) self.assertEqual(new_pattern, expected) self.check_pattern_for_cardinality_one_or_more(pattern, new_pattern) def test_is_many(self): is_many_true_valueset = set( [Cardinality.zero_or_more, Cardinality.one_or_more]) for cardinality in Cardinality: expected = cardinality in is_many_true_valueset self.assertEqual(cardinality.is_many(), expected) # ----------------------------------------------------------------------------- # TEST CASE: CardinalityTypeBuilderTest # ----------------------------------------------------------------------------- class CardinalityTypeBuilderTest(ParseTypeTestCase): def check_parse_number_with_zero_or_one(self, parse_candidate, type_name="OptionalNumber"): schema = "Optional: {number:%s}" % type_name type_dict = { "Number": parse_number, type_name: parse_candidate, } parser = parse.Parser(schema, type_dict) # -- PERFORM TESTS: self.assert_match(parser, "Optional: ", "number", None) self.assert_match(parser, "Optional: 1", "number", 1) self.assert_match(parser, "Optional: 42", "number", 42) # -- PARSE MISMATCH: self.assert_mismatch(parser, "Optional: x", "number") # Not a Number. self.assert_mismatch(parser, "Optional: -1", "number") # Negative. self.assert_mismatch(parser, "Optional: a, b", "number") # List of ... def check_parse_number_with_optional(self, parse_candidate, type_name="OptionalNumber"): self.check_parse_number_with_zero_or_one(parse_candidate, type_name) def check_parse_number_with_zero_or_more(self, parse_candidate, type_name="Numbers0"): schema = "List: {numbers:%s}" % type_name type_dict = { type_name: parse_candidate, } parser = parse.Parser(schema, type_dict) # -- PERFORM TESTS: self.assert_match(parser, "List: ", "numbers", [ ]) self.assert_match(parser, "List: 1", "numbers", [ 1 ]) self.assert_match(parser, "List: 1, 2", "numbers", [ 1, 2 ]) self.assert_match(parser, "List: 1, 2, 3", "numbers", [ 1, 2, 3 ]) # -- PARSE MISMATCH: self.assert_mismatch(parser, "List: x", "numbers") # Not a Number. self.assert_mismatch(parser, "List: -1", "numbers") # Negative. self.assert_mismatch(parser, "List: 1,", "numbers") # Trailing sep. self.assert_mismatch(parser, "List: a, b", "numbers") # List of ... def check_parse_number_with_one_or_more(self, parse_candidate, type_name="Numbers"): schema = "List: {numbers:%s}" % type_name type_dict = { "Number": parse_number, type_name: parse_candidate, } parser = parse.Parser(schema, type_dict) # -- PERFORM TESTS: self.assert_match(parser, "List: 1", "numbers", [ 1 ]) self.assert_match(parser, "List: 1, 2", "numbers", [ 1, 2 ]) self.assert_match(parser, "List: 1, 2, 3", "numbers", [ 1, 2, 3 ]) # -- PARSE MISMATCH: self.assert_mismatch(parser, "List: ", "numbers") # Zero items. self.assert_mismatch(parser, "List: x", "numbers") # Not a Number. self.assert_mismatch(parser, "List: -1", "numbers") # Negative. self.assert_mismatch(parser, "List: 1,", "numbers") # Trailing sep. self.assert_mismatch(parser, "List: a, b", "numbers") # List of ... def check_parse_choice_with_optional(self, parse_candidate): # Choice (["red", "green", "blue"]) schema = "Optional: {color:OptionalChoiceColor}" parser = parse.Parser(schema, dict(OptionalChoiceColor=parse_candidate)) # -- PERFORM TESTS: self.assert_match(parser, "Optional: ", "color", None) self.assert_match(parser, "Optional: red", "color", "red") self.assert_match(parser, "Optional: green", "color", "green") self.assert_match(parser, "Optional: blue", "color", "blue") # -- PARSE MISMATCH: self.assert_mismatch(parser, "Optional: r", "color") # Not a Color. self.assert_mismatch(parser, "Optional: redx", "color") # Similar. self.assert_mismatch(parser, "Optional: red, blue", "color") # List of ... def check_parse_number_with_many(self, parse_candidate, type_name="Numbers"): self.check_parse_number_with_one_or_more(parse_candidate, type_name) def check_parse_number_with_many0(self, parse_candidate, type_name="Numbers0"): self.check_parse_number_with_zero_or_more(parse_candidate, type_name) # ----------------------------------------------------------------------------- # TEST CASE: TestTypeBuilder4Cardinality # ----------------------------------------------------------------------------- class TestTypeBuilder4Cardinality(CardinalityTypeBuilderTest): def test_with_zero_or_one_basics(self): parse_opt_number = TypeBuilder.with_zero_or_one(parse_number) self.assertEqual(parse_opt_number.pattern, r"(\d+)?") def test_with_zero_or_one__number(self): parse_opt_number = TypeBuilder.with_zero_or_one(parse_number) self.check_parse_number_with_zero_or_one(parse_opt_number) def test_with_optional__number(self): # -- ALIAS FOR: zero_or_one parse_opt_number = TypeBuilder.with_optional(parse_number) self.check_parse_number_with_optional(parse_opt_number) def test_with_optional__choice(self): # -- ALIAS FOR: zero_or_one parse_color = TypeBuilder.make_choice(["red", "green", "blue"]) parse_opt_color = TypeBuilder.with_optional(parse_color) self.check_parse_choice_with_optional(parse_opt_color) def test_with_zero_or_more_basics(self): parse_numbers = TypeBuilder.with_zero_or_more(parse_number) self.assertEqual(parse_numbers.pattern, r"(\d+)?(\s*,\s*(\d+))*") def test_with_zero_or_more__number(self): parse_numbers = TypeBuilder.with_zero_or_more(parse_number) self.check_parse_number_with_zero_or_more(parse_numbers) def test_with_zero_or_more__choice(self): parse_color = TypeBuilder.make_choice(["red", "green", "blue"]) parse_colors = TypeBuilder.with_zero_or_more(parse_color) parse_colors.name = "Colors0" extra_types = build_type_dict([ parse_colors ]) schema = "List: {colors:Colors0}" parser = parse.Parser(schema, extra_types) # -- PERFORM TESTS: self.assert_match(parser, "List: ", "colors", [ ]) self.assert_match(parser, "List: green", "colors", [ "green" ]) self.assert_match(parser, "List: red, green", "colors", [ "red", "green" ]) # -- PARSE MISMATCH: self.assert_mismatch(parser, "List: x", "colors") # Not a Color. self.assert_mismatch(parser, "List: black", "colors") # Unknown self.assert_mismatch(parser, "List: red,", "colors") # Trailing sep. self.assert_mismatch(parser, "List: a, b", "colors") # List of ... def test_with_one_or_more_basics(self): parse_numbers = TypeBuilder.with_one_or_more(parse_number) self.assertEqual(parse_numbers.pattern, r"(\d+)(\s*,\s*(\d+))*") def test_with_one_or_more_basics_with_other_separator(self): parse_numbers2 = TypeBuilder.with_one_or_more(parse_number, listsep=';') self.assertEqual(parse_numbers2.pattern, r"(\d+)(\s*;\s*(\d+))*") parse_numbers2 = TypeBuilder.with_one_or_more(parse_number, listsep=':') self.assertEqual(parse_numbers2.pattern, r"(\d+)(\s*:\s*(\d+))*") def test_with_one_or_more(self): parse_numbers = TypeBuilder.with_one_or_more(parse_number) self.check_parse_number_with_one_or_more(parse_numbers) def test_with_many(self): # -- ALIAS FOR: one_or_more parse_numbers = TypeBuilder.with_many(parse_number) self.check_parse_number_with_many(parse_numbers) def test_with_many0(self): # -- ALIAS FOR: one_or_more parse_numbers = TypeBuilder.with_many0(parse_number) self.check_parse_number_with_many0(parse_numbers) def test_with_one_or_more_choice(self): parse_color = TypeBuilder.make_choice(["red", "green", "blue"]) parse_colors = TypeBuilder.with_one_or_more(parse_color) parse_colors.name = "Colors" extra_types = build_type_dict([ parse_colors ]) schema = "List: {colors:Colors}" parser = parse.Parser(schema, extra_types) # -- PERFORM TESTS: self.assert_match(parser, "List: green", "colors", [ "green" ]) self.assert_match(parser, "List: red, green", "colors", [ "red", "green" ]) # -- PARSE MISMATCH: self.assert_mismatch(parser, "List: ", "colors") # Zero items. self.assert_mismatch(parser, "List: x", "colors") # Not a Color. self.assert_mismatch(parser, "List: black", "colors") # Unknown self.assert_mismatch(parser, "List: red,", "colors") # Trailing sep. self.assert_mismatch(parser, "List: a, b", "colors") # List of ... def test_with_one_or_more_enum(self): parse_color = TypeBuilder.make_enum({"red": 1, "green":2, "blue": 3}) parse_colors = TypeBuilder.with_one_or_more(parse_color) parse_colors.name = "Colors" extra_types = build_type_dict([ parse_colors ]) schema = "List: {colors:Colors}" parser = parse.Parser(schema, extra_types) # -- PERFORM TESTS: self.assert_match(parser, "List: green", "colors", [ 2 ]) self.assert_match(parser, "List: red, green", "colors", [ 1, 2 ]) # -- PARSE MISMATCH: self.assert_mismatch(parser, "List: ", "colors") # Zero items. self.assert_mismatch(parser, "List: x", "colors") # Not a Color. self.assert_mismatch(parser, "List: black", "colors") # Unknown self.assert_mismatch(parser, "List: red,", "colors") # Trailing sep. self.assert_mismatch(parser, "List: a, b", "colors") # List of ... def test_with_one_or_more_with_other_separator(self): parse_numbers2 = TypeBuilder.with_one_or_more(parse_number, listsep=';') parse_numbers2.name = "Numbers2" extra_types = build_type_dict([ parse_numbers2 ]) schema = "List: {numbers:Numbers2}" parser = parse.Parser(schema, extra_types) # -- PERFORM TESTS: self.assert_match(parser, "List: 1", "numbers", [ 1 ]) self.assert_match(parser, "List: 1; 2", "numbers", [ 1, 2 ]) self.assert_match(parser, "List: 1; 2; 3", "numbers", [ 1, 2, 3 ]) def test_with_cardinality_one(self): parse_number2 = TypeBuilder.with_cardinality(Cardinality.one, parse_number) assert parse_number2 is parse_number def test_with_cardinality_zero_or_one(self): parse_opt_number = TypeBuilder.with_cardinality( Cardinality.zero_or_one, parse_number) self.check_parse_number_with_zero_or_one(parse_opt_number) def test_with_cardinality_zero_or_more(self): parse_many0_numbers = TypeBuilder.with_cardinality( Cardinality.zero_or_more, parse_number) self.check_parse_number_with_zero_or_more(parse_many0_numbers) def test_with_cardinality_one_or_more(self): parse_many_numbers = TypeBuilder.with_cardinality( Cardinality.one_or_more, parse_number) self.check_parse_number_with_one_or_more(parse_many_numbers) def test_with_cardinality_optional(self): parse_opt_number = TypeBuilder.with_cardinality( Cardinality.optional, parse_number) self.check_parse_number_with_optional(parse_opt_number) def test_with_cardinality_many0(self): parse_many0_numbers = TypeBuilder.with_cardinality( Cardinality.many0, parse_number) self.check_parse_number_with_zero_or_more(parse_many0_numbers) def test_with_cardinality_many(self): parse_many_numbers = TypeBuilder.with_cardinality( Cardinality.many, parse_number) self.check_parse_number_with_many(parse_many_numbers) def test_parse_with_optional_and_named_fields(self): parse_opt_number = TypeBuilder.with_optional(parse_number) parse_opt_number.name = "Number?" type_dict = build_type_dict([parse_opt_number, parse_number]) schema = "Numbers: {number1:Number?} {number2:Number}" parser = parse.Parser(schema, type_dict) # -- CASE: Optional number is present result = parser.parse("Numbers: 34 12") expected = dict(number1=34, number2=12) self.assertIsNotNone(result) self.assertEqual(result.named, expected) # -- CASE: Optional number is missing result = parser.parse("Numbers: 12") expected = dict(number1=None, number2=12) self.assertIsNotNone(result) self.assertEqual(result.named, expected) def test_parse_with_optional_and_unnamed_fields(self): # -- ENSURE: Cardinality.optional.group_count is correct # REQUIRES: Parser := parse_type.Parser with group_count support parse_opt_number = TypeBuilder.with_optional(parse_number) parse_opt_number.name = "Number?" type_dict = build_type_dict([parse_opt_number, parse_number]) schema = "Numbers: {:Number?} {:Number}" parser = Parser(schema, type_dict) # -- CASE: Optional number is present result = parser.parse("Numbers: 34 12") expected = (34, 12) self.assertIsNotNone(result) self.assertEqual(result.fixed, tuple(expected)) # -- CASE: Optional number is missing result = parser.parse("Numbers: 12") expected = (None, 12) self.assertIsNotNone(result) self.assertEqual(result.fixed, tuple(expected)) def test_parse_with_many_and_unnamed_fields(self): # -- ENSURE: Cardinality.one_or_more.group_count is correct # REQUIRES: Parser := parse_type.Parser with group_count support parse_many_numbers = TypeBuilder.with_many(parse_number) parse_many_numbers.name = "Number+" type_dict = build_type_dict([parse_many_numbers, parse_number]) schema = "Numbers: {:Number+} {:Number}" parser = Parser(schema, type_dict) # -- CASE: result = parser.parse("Numbers: 1, 2, 3 42") expected = ([1, 2, 3], 42) self.assertIsNotNone(result) self.assertEqual(result.fixed, tuple(expected)) result = parser.parse("Numbers: 3 43") expected = ([ 3 ], 43) self.assertIsNotNone(result) self.assertEqual(result.fixed, tuple(expected)) def test_parse_with_many0_and_unnamed_fields(self): # -- ENSURE: Cardinality.zero_or_more.group_count is correct # REQUIRES: Parser := parse_type.Parser with group_count support parse_many0_numbers = TypeBuilder.with_many0(parse_number) parse_many0_numbers.name = "Number*" type_dict = build_type_dict([parse_many0_numbers, parse_number]) schema = "Numbers: {:Number*} {:Number}" parser = Parser(schema, type_dict) # -- CASE: Optional numbers are present result = parser.parse("Numbers: 1, 2, 3 42") expected = ([1, 2, 3], 42) self.assertIsNotNone(result) self.assertEqual(result.fixed, tuple(expected)) # -- CASE: Optional numbers are missing := EMPTY-LIST result = parser.parse("Numbers: 43") expected = ([ ], 43) self.assertIsNotNone(result) self.assertEqual(result.fixed, tuple(expected)) # class TestParserWithManyTypedFields(ParseTypeTestCase): #parse_variant1 = TypeBuilder.make_variant([parse_number, parse_yesno]) #parse_variant1.name = "Number_or_YesNo" #parse_variant2 = TypeBuilder.make_variant([parse_color, parse_person_choice]) #parse_variant2.name = "Color_or_PersonChoice" #TYPE_CONVERTERS = [ # parse_number, # parse_yesno, # parse_color, # parse_person_choice, # parse_variant1, # parse_variant2, #] # # def test_parse_with_many_named_fields(self): # type_dict = build_type_dict(self.TYPE_CONVERTERS) # schema = """\ #Number: {number:Number} #YesNo: {answer:YesNo} #Color: {color:Color} #Person: {person:PersonChoice} #Variant1: {variant1:Number_or_YesNo} #Variant2: {variant2:Color_or_PersonChoice} #""" # parser = parse.Parser(schema, type_dict) # # text = """\ #Number: 12 #YesNo: yes #Color: red #Person: Alice #Variant1: 42 #Variant2: Bob #""" # expected = dict( # number=12, # answer=True, # color=Color.red, # person="Alice", # variant1=42, # variant2="Bob" # ) # # result = parser.parse(text) # self.assertIsNotNone(result) # self.assertEqual(result.named, expected) # def test_parse_with_many_unnamed_fields(self): # type_dict = build_type_dict(self.TYPE_CONVERTERS) # schema = """\ #Number: {:Number} #YesNo: {:YesNo} #Color: {:Color} #Person: {:PersonChoice} #""" # # -- OMIT: XFAIL, due to group_index delta counting => Parser problem. # # Variant2: {:Color_or_PersonChoice} # # Variant1: {:Number_or_YesNo} # parser = parse.Parser(schema, type_dict) # # text = """\ #Number: 12 #YesNo: yes #Color: red #Person: Alice #""" # # SKIP: Variant2: Bob # # SKIP: Variant1: 42 # expected = [ 12, True, Color.red, "Alice", ] # -- SKIP: "Bob", 42 ] # # result = parser.parse(text) # self.assertIsNotNone(result) # self.assertEqual(result.fixed, tuple(expected)) # ----------------------------------------------------------------------------- # MAIN: # ----------------------------------------------------------------------------- if __name__ == '__main__': unittest.main() # Copyright (c) 2012-2013 by Jens Engel (https://github/jenisys/parse_type) # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. parse_type-0.6.4/tests/test_cardinality_field.py000077500000000000000000000376361467747561600221660ustar00rootroot00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- """ Test experiment for parse. Add cardinality format field after type: "... {person:Person?} ..." -- CARDINALITY: Zero or one, 0..1 (optional) "... {persons:Person*} ..." -- CARDINALITY: Zero or more, 0..N (many0) "... {persons:Person+} ..." -- CARDINALITY: One or more, 1..N (many) REQUIRES: parse >= 1.5.3.1 ('pattern' attribute support and further extensions) STATUS: IDEA, working prototype with patched parse module, but not accepted. """ from __future__ import absolute_import from .parse_type_test \ import TestCase, parse_number, unittest from .test_cardinality import CardinalityTypeBuilderTest from parse_type import Cardinality from parse_type.cardinality_field \ import CardinalityField, CardinalityFieldTypeBuilder, MissingTypeError # ------------------------------------------------------------------------- # TEST CASE: TestParseTypeWithCardinalityField # ------------------------------------------------------------------------- class TestCardinalityField(TestCase): VALID_TYPE_NAMES = ["Number?", "Number*", "Number+"] INVALID_TYPE_NAMES = ["?Invalid", "Inval*d", "In+valid"] def test_pattern_chars(self): for pattern_char in CardinalityField.pattern_chars: self.assertIn(pattern_char, CardinalityField.from_char_map) def test_to_from_char_map_symmetry(self): for cardinality, char in CardinalityField.to_char_map.items(): self.assertEqual(cardinality, CardinalityField.from_char_map[char]) for char, cardinality in CardinalityField.from_char_map.items(): self.assertEqual(char, CardinalityField.to_char_map[cardinality]) def test_matches_type_name(self): for type_name in self.VALID_TYPE_NAMES: self.assertTrue(CardinalityField.matches_type(type_name)) for type_name in self.INVALID_TYPE_NAMES: self.assertFalse(CardinalityField.matches_type(type_name)) def test_split_type__with_valid_special_names(self): actual = CardinalityField.split_type("Color?") self.assertEqual(actual, ("Color", Cardinality.optional)) self.assertEqual(actual, ("Color", Cardinality.zero_or_one)) actual = CardinalityField.split_type("Color+") self.assertEqual(actual, ("Color", Cardinality.many)) self.assertEqual(actual, ("Color", Cardinality.one_or_more)) actual = CardinalityField.split_type("Color*") self.assertEqual(actual, ("Color", Cardinality.many0)) self.assertEqual(actual, ("Color", Cardinality.zero_or_more)) def test_split_type__with_valid_special_names2(self): for type_name in self.VALID_TYPE_NAMES: self.assertTrue(CardinalityField.matches_type(type_name)) cardinality_char = type_name[-1] expected_basename = type_name[:-1] expected_cardinality = CardinalityField.from_char_map[cardinality_char] expected = (expected_basename, expected_cardinality) actual = CardinalityField.split_type(type_name) self.assertEqual(actual, expected) def test_split_type__with_cardinality_one(self): actual = CardinalityField.split_type("Color") self.assertEqual(actual, ("Color", Cardinality.one)) def test_split_type__with_invalid_names(self): for type_name in self.INVALID_TYPE_NAMES: expected = (type_name, Cardinality.one) actual = CardinalityField.split_type(type_name) self.assertEqual(actual, expected) self.assertFalse(CardinalityField.matches_type(type_name)) def test_make_type__with_cardinality_one(self): expected = "Number" type_name = CardinalityField.make_type("Number", Cardinality.one) self.assertEqual(type_name, expected) self.assertFalse(CardinalityField.matches_type(type_name)) def test_make_type__with_cardinality_optional(self): expected = "Number?" type_name = CardinalityField.make_type("Number", Cardinality.optional) self.assertEqual(type_name, expected) self.assertTrue(CardinalityField.matches_type(type_name)) type_name2 = CardinalityField.make_type("Number", Cardinality.zero_or_one) self.assertEqual(type_name2, expected) self.assertEqual(type_name2, type_name) def test_make_type__with_cardinality_many(self): expected = "Number+" type_name = CardinalityField.make_type("Number", Cardinality.many) self.assertEqual(type_name, expected) self.assertTrue(CardinalityField.matches_type(type_name)) type_name2 = CardinalityField.make_type("Number", Cardinality.one_or_more) self.assertEqual(type_name2, expected) self.assertEqual(type_name2, type_name) def test_make_type__with_cardinality_many0(self): expected = "Number*" type_name = CardinalityField.make_type("Number", Cardinality.many0) self.assertEqual(type_name, expected) self.assertTrue(CardinalityField.matches_type(type_name)) type_name2 = CardinalityField.make_type("Number", Cardinality.zero_or_more) self.assertEqual(type_name2, expected) self.assertEqual(type_name2, type_name) def test_split_type2make_type__symmetry_with_valid_names(self): for type_name in self.VALID_TYPE_NAMES: primary_name, cardinality = CardinalityField.split_type(type_name) type_name2 = CardinalityField.make_type(primary_name, cardinality) self.assertEqual(type_name, type_name2) def test_split_type2make_type__symmetry_with_cardinality_one(self): for type_name in self.INVALID_TYPE_NAMES: primary_name, cardinality = CardinalityField.split_type(type_name) type_name2 = CardinalityField.make_type(primary_name, cardinality) self.assertEqual(type_name, primary_name) self.assertEqual(type_name, type_name2) self.assertEqual(cardinality, Cardinality.one) # ------------------------------------------------------------------------- # TEST CASE: # ------------------------------------------------------------------------- class TestCardinalityFieldTypeBuilder(CardinalityTypeBuilderTest): INVALID_TYPE_DICT_DATA = [ (dict(), "empty type_dict"), (dict(NumberX=parse_number), "non-empty type_dict (wrong name)"), ] # -- UTILITY METHODS: def generate_type_variants(self,type_name): for pattern_char in CardinalityField.pattern_chars: special_name = "%s%s" % (type_name.strip(), pattern_char) self.assertTrue(CardinalityField.matches_type(special_name)) yield special_name # -- METHOD: CardinalityFieldTypeBuilder.create_type_variant() def test_create_type_variant__with_many_and_type_converter(self): type_builder = CardinalityFieldTypeBuilder parse_candidate = type_builder.create_type_variant("Number+", type_converter=parse_number) self.check_parse_number_with_many(parse_candidate, "Number+") def test_create_type_variant__with_optional_and_type_dict(self): type_builder = CardinalityFieldTypeBuilder parse_candidate = type_builder.create_type_variant("Number?", dict(Number=parse_number)) self.check_parse_number_with_optional(parse_candidate, "Number?") def test_create_type_variant__with_many_and_type_dict(self): type_builder = CardinalityFieldTypeBuilder parse_candidate = type_builder.create_type_variant("Number+", dict(Number=parse_number)) self.check_parse_number_with_many(parse_candidate, "Number+") def test_create_type_variant__with_many0_and_type_dict(self): type_builder = CardinalityFieldTypeBuilder parse_candidate = type_builder.create_type_variant("Number*", dict(Number=parse_number)) self.check_parse_number_with_many0(parse_candidate, "Number*") def test_create_type_variant__can_create_all_variants(self): type_builder = CardinalityFieldTypeBuilder for special_name in self.generate_type_variants("Number"): # -- CASE: type_converter parse_candidate = type_builder.create_type_variant(special_name, parse_number) self.assertTrue(callable(parse_candidate)) # -- CASE: type_dict parse_candidate = type_builder.create_type_variant(special_name, dict(Number=parse_number)) self.assertTrue(callable(parse_candidate)) def test_create_type_variant__raises_error_with_invalid_type_name(self): type_builder = CardinalityFieldTypeBuilder for invalid_type_name in TestCardinalityField.INVALID_TYPE_NAMES: with self.assertRaises(ValueError): type_builder.create_type_variant(invalid_type_name, parse_number) def test_create_type_variant__raises_error_with_missing_primary_type(self): type_builder = CardinalityFieldTypeBuilder for special_name in self.generate_type_variants("Number"): for type_dict, description in self.INVALID_TYPE_DICT_DATA: with self.assertRaises(MissingTypeError): type_builder.create_type_variant(special_name, type_dict) # -- METHOD: CardinalityFieldTypeBuilder.create_type_variants() def test_create_type_variants__all(self): type_builder = CardinalityFieldTypeBuilder special_names = ["Number?", "Number+", "Number*"] type_dict = dict(Number=parse_number) new_types = type_builder.create_type_variants(special_names, type_dict) self.assertSequenceEqual(set(new_types.keys()), set(special_names)) self.assertEqual(len(new_types), 3) parse_candidate = new_types["Number?"] self.check_parse_number_with_optional(parse_candidate, "Number?") parse_candidate = new_types["Number+"] self.check_parse_number_with_many(parse_candidate, "Number+") parse_candidate = new_types["Number*"] self.check_parse_number_with_many0(parse_candidate, "Number*") def test_create_type_variants__raises_error_with_invalid_type_name(self): type_builder = CardinalityFieldTypeBuilder for invalid_type_name in TestCardinalityField.INVALID_TYPE_NAMES: type_dict = dict(Number=parse_number) with self.assertRaises(ValueError): type_names = [invalid_type_name] type_builder.create_type_variants(type_names, type_dict) def test_create_missing_type_variants__raises_error_with_missing_primary_type(self): type_builder = CardinalityFieldTypeBuilder for special_name in self.generate_type_variants("Number"): for type_dict, description in self.INVALID_TYPE_DICT_DATA: self.assertNotIn("Number", type_dict) with self.assertRaises(MissingTypeError): names = [special_name] type_builder.create_type_variants(names, type_dict) # -- METHOD: CardinalityFieldTypeBuilder.create_missing_type_variants() def test_create_missing_type_variants__all_missing(self): type_builder = CardinalityFieldTypeBuilder missing_names = ["Number?", "Number+", "Number*"] new_types = type_builder.create_missing_type_variants(missing_names, dict(Number=parse_number)) self.assertSequenceEqual(set(new_types.keys()), set(missing_names)) self.assertEqual(len(new_types), 3) def test_create_missing_type_variants__none_missing(self): # -- PREPARE: Create all types and store them in the type_dict. type_builder = CardinalityFieldTypeBuilder type_names = ["Number?", "Number+", "Number*"] all_type_names = ["Number", "Number?", "Number+", "Number*"] type_dict = dict(Number=parse_number) new_types = type_builder.create_missing_type_variants(type_names, type_dict) type_dict.update(new_types) self.assertSequenceEqual(set(new_types.keys()), set(type_names)) self.assertSequenceEqual(set(type_dict.keys()), set(all_type_names)) # -- TEST: All special types are already stored in the type_dict. new_types2 = type_builder.create_missing_type_variants(type_names, type_dict) self.assertEqual(len(new_types2), 0) def test_create_missing_type_variants__some_missing(self): # -- PREPARE: Create some types and store them in the type_dict. type_builder = CardinalityFieldTypeBuilder special_names = ["Number?", "Number+", "Number*"] type_names1 = ["Number?", "Number*"] type_names2 = special_names type_dict = dict(Number=parse_number) new_types = type_builder.create_missing_type_variants(type_names1, type_dict) type_dict.update(new_types) self.assertSequenceEqual(set(new_types.keys()), set(type_names1)) self.assertSequenceEqual(set(type_dict.keys()), set(["Number", "Number?", "Number*"])) # -- TEST: All special types are already stored in the type_dict. new_types2 = type_builder.create_missing_type_variants(type_names2, type_dict) self.assertEqual(len(new_types2), 1) self.assertSequenceEqual(set(new_types2.keys()), set(["Number+"])) def test_create_type_variant__raises_error_with_invalid_type_name(self): type_builder = CardinalityFieldTypeBuilder for invalid_type_name in TestCardinalityField.INVALID_TYPE_NAMES: type_dict = dict(Number=parse_number) with self.assertRaises(ValueError): type_names = [invalid_type_name] type_builder.create_missing_type_variants(type_names, type_dict) def test_create_missing_type_variants__raises_error_with_missing_primary_type(self): type_builder = CardinalityFieldTypeBuilder for special_name in self.generate_type_variants("Number"): for type_dict, description in self.INVALID_TYPE_DICT_DATA: self.assertNotIn("Number", type_dict) with self.assertRaises(MissingTypeError): names = [special_name] type_builder.create_missing_type_variants(names, type_dict) # ----------------------------------------------------------------------------- # MAIN: # ----------------------------------------------------------------------------- if __name__ == '__main__': unittest.main() # Copyright (c) 2012-2013 by Jens Engel (https://github/jenisys/parse_type) # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. parse_type-0.6.4/tests/test_cardinality_field0.py000077500000000000000000000156221467747561600222350ustar00rootroot00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- """ Test experiment for parse. Add cardinality format field after type: "... {person:Person?} ..." -- CARDINALITY: Zero or one, 0..1 (optional) "... {persons:Person*} ..." -- CARDINALITY: Zero or more, 0..N (many0) "... {persons:Person+} ..." -- CARDINALITY: One or more, 1..N (many) REQUIRES: parse >= 1.5.3.1 ('pattern' attribute support and further extensions) STATUS: IDEA, working prototype with patched parse module, but not accepted. """ from __future__ import absolute_import from .parse_type_test import ParseTypeTestCase from parse_type import TypeBuilder, build_type_dict import parse import unittest ENABLED = False if ENABLED: # ------------------------------------------------------------------------- # TEST CASE: TestParseTypeWithCardinalityField # ------------------------------------------------------------------------- class TestParseTypeWithCardinalityField(ParseTypeTestCase): """ Test cardinality field part in parse type expressions, ala: "... {person:Person?} ..." -- OPTIONAL: cardinality is zero or one. "... {persons:Person*} ..." -- MANY0: cardinality is zero or more. "... {persons:Person+} ..." -- MANY: cardinality is one or more. NOTE: * TypeBuilder has a similar and slightly more flexible feature. * Cardinality field part works currently only for user-defined types. """ def test_without_cardinality_field(self): # -- IMPLCIT CARDINALITY: one # -- SETUP: parse_person = TypeBuilder.make_choice(["Alice", "Bob", "Charly"]) parse_person.name = "Person" # For testing only. extra_types = build_type_dict([ parse_person ]) schema = "One: {person:Person}" parser = parse.Parser(schema, extra_types) # -- PERFORM TESTS: self.assert_match(parser, "One: Alice", "person", "Alice") self.assert_match(parser, "One: Bob", "person", "Bob") # -- PARSE MISMATCH: self.assert_mismatch(parser, "One: ", "person") # Missing. self.assert_mismatch(parser, "One: BAlice", "person") # Similar1. self.assert_mismatch(parser, "One: Boby", "person") # Similar2. self.assert_mismatch(parser, "One: a", "person") # INVALID ... def test_cardinality_field_with_zero_or_one(self): # -- SETUP: parse_person = TypeBuilder.make_choice(["Alice", "Bob", "Charly"]) parse_person.name = "Person" # For testing only. extra_types = build_type_dict([ parse_person ]) schema = "Optional: {person:Person?}" parser = parse.Parser(schema, extra_types) # -- PERFORM TESTS: self.assert_match(parser, "Optional: ", "person", None) self.assert_match(parser, "Optional: Alice", "person", "Alice") self.assert_match(parser, "Optional: Bob", "person", "Bob") # -- PARSE MISMATCH: self.assert_mismatch(parser, "Optional: Anna", "person") # Similar1. self.assert_mismatch(parser, "Optional: Boby", "person") # Similar2. self.assert_mismatch(parser, "Optional: a", "person") # INVALID ... def test_cardinality_field_with_one_or_more(self): # -- SETUP: parse_person = TypeBuilder.make_choice(["Alice", "Bob", "Charly"]) parse_person.name = "Person" # For testing only. extra_types = build_type_dict([ parse_person ]) schema = "List: {persons:Person+}" parser = parse.Parser(schema, extra_types) # -- PERFORM TESTS: self.assert_match(parser, "List: Alice", "persons", [ "Alice" ]) self.assert_match(parser, "List: Bob", "persons", [ "Bob" ]) self.assert_match(parser, "List: Bob, Alice", "persons", [ "Bob", "Alice" ]) # -- PARSE MISMATCH: self.assert_mismatch(parser, "List: ", "persons") # Zero items. self.assert_mismatch(parser, "List: BAlice", "persons") # Unknown1. self.assert_mismatch(parser, "List: Boby", "persons") # Unknown2. self.assert_mismatch(parser, "List: Alice,", "persons") # Trailing, self.assert_mismatch(parser, "List: a, b", "persons") # List of... def test_cardinality_field_with_zero_or_more(self): # -- SETUP: parse_person = TypeBuilder.make_choice(["Alice", "Bob", "Charly"]) parse_person.name = "Person" # For testing only. extra_types = build_type_dict([ parse_person ]) schema = "List: {persons:Person*}" parser = parse.Parser(schema, extra_types) # -- PERFORM TESTS: self.assert_match(parser, "List: ", "persons", [ ]) self.assert_match(parser, "List: Alice", "persons", [ "Alice" ]) self.assert_match(parser, "List: Bob", "persons", [ "Bob" ]) self.assert_match(parser, "List: Bob, Alice", "persons", [ "Bob", "Alice" ]) # -- PARSE MISMATCH: self.assert_mismatch(parser, "List:", "persons") # Too short. self.assert_mismatch(parser, "List: BAlice", "persons") # Unknown1. self.assert_mismatch(parser, "List: Boby", "persons") # Unknown2. self.assert_mismatch(parser, "List: Alice,", "persons") # Trailing, self.assert_mismatch(parser, "List: a, b", "persons") # List of... # ----------------------------------------------------------------------------- # MAIN: # ----------------------------------------------------------------------------- if __name__ == '__main__': unittest.main() # Copyright (c) 2012-2013 by Jens Engel (https://github/jenisys/parse_type) # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. parse_type-0.6.4/tests/test_cfparse.py000066400000000000000000000205451467747561600201270ustar00rootroot00000000000000# -*- coding: utf-8 -*- #!/usr/bin/env python # -*- coding: utf-8 -*- """ Test suite to test the :mod:`parse_type.cfparse` module. """ from __future__ import absolute_import from .parse_type_test import ParseTypeTestCase, parse_number, unittest from parse_type.cfparse import Parser from parse_type.cardinality_field \ import MissingTypeError, CardinalityFieldTypeBuilder # ----------------------------------------------------------------------------- # TEST CASE: # ----------------------------------------------------------------------------- class TestParser(ParseTypeTestCase): """ Test :class:`parse_type.cfparse.Parser`. Ensure that: * parser can parse fields with CardinalityField part even when these special type variants are not provided. * parser creates missing type converter variants for CardinalityFields as long as the primary type converter for cardinality=1 is provided. """ SPECIAL_FIELD_TYPES_DATA = [ ("{number1:Number?}", ["Number?"]), ("{number2:Number+}", ["Number+"]), ("{number3:Number*}", ["Number*"]), ("{number1:Number?} {number2:Number+} {number3:Number*}", ["Number?", "Number+", "Number*"]), ] def test_parser__can_parse_normal_fields(self): existing_types = dict(Number=parse_number) schema = "Number: {number:Number}" parser = Parser(schema, existing_types) self.assert_match(parser, "Number: 42", "number", 42) self.assert_match(parser, "Number: 123", "number", 123) self.assert_mismatch(parser, "Number: ") self.assert_mismatch(parser, "Number: XXX") self.assert_mismatch(parser, "Number: -123") def test_parser__can_parse_cardinality_field_optional(self): # -- CARDINALITY: 0..1 = zero_or_one = optional existing_types = dict(Number=parse_number) self.assertFalse("Number?" in existing_types) # -- ENSURE: Missing type variant is created. schema = "OptionalNumber: {number:Number?}" parser = Parser(schema, existing_types) self.assertTrue("Number?" in existing_types) # -- ENSURE: Newly created type variant is usable. self.assert_match(parser, "OptionalNumber: 42", "number", 42) self.assert_match(parser, "OptionalNumber: 123", "number", 123) self.assert_match(parser, "OptionalNumber: ", "number", None) self.assert_mismatch(parser, "OptionalNumber:") self.assert_mismatch(parser, "OptionalNumber: XXX") self.assert_mismatch(parser, "OptionalNumber: -123") def test_parser__can_parse_cardinality_field_many(self): # -- CARDINALITY: 1..* = one_or_more = many existing_types = dict(Number=parse_number) self.assertFalse("Number+" in existing_types) # -- ENSURE: Missing type variant is created. schema = "List: {numbers:Number+}" parser = Parser(schema, existing_types) self.assertTrue("Number+" in existing_types) # -- ENSURE: Newly created type variant is usable. self.assert_match(parser, "List: 42", "numbers", [42]) self.assert_match(parser, "List: 1, 2, 3", "numbers", [1, 2, 3]) self.assert_match(parser, "List: 4,5,6", "numbers", [4, 5, 6]) self.assert_mismatch(parser, "List: ") self.assert_mismatch(parser, "List:") self.assert_mismatch(parser, "List: XXX") self.assert_mismatch(parser, "List: -123") def test_parser__can_parse_cardinality_field_many_with_own_type_builder(self): # -- CARDINALITY: 1..* = one_or_more = many class MyCardinalityFieldTypeBuilder(CardinalityFieldTypeBuilder): listsep = ';' type_builder = MyCardinalityFieldTypeBuilder existing_types = dict(Number=parse_number) self.assertFalse("Number+" in existing_types) # -- ENSURE: Missing type variant is created. schema = "List: {numbers:Number+}" parser = Parser(schema, existing_types, type_builder=type_builder) self.assertTrue("Number+" in existing_types) # -- ENSURE: Newly created type variant is usable. # NOTE: Use other list separator. self.assert_match(parser, "List: 42", "numbers", [42]) self.assert_match(parser, "List: 1; 2; 3", "numbers", [1, 2, 3]) self.assert_match(parser, "List: 4;5;6", "numbers", [4, 5, 6]) self.assert_mismatch(parser, "List: ") self.assert_mismatch(parser, "List:") self.assert_mismatch(parser, "List: XXX") self.assert_mismatch(parser, "List: -123") def test_parser__can_parse_cardinality_field_many0(self): # -- CARDINALITY: 0..* = zero_or_more = many0 existing_types = dict(Number=parse_number) self.assertFalse("Number*" in existing_types) # -- ENSURE: Missing type variant is created. schema = "List0: {numbers:Number*}" parser = Parser(schema, existing_types) self.assertTrue("Number*" in existing_types) # -- ENSURE: Newly created type variant is usable. self.assert_match(parser, "List0: 42", "numbers", [42]) self.assert_match(parser, "List0: 1, 2, 3", "numbers", [1, 2, 3]) self.assert_match(parser, "List0: ", "numbers", []) self.assert_mismatch(parser, "List0:") self.assert_mismatch(parser, "List0: XXX") self.assert_mismatch(parser, "List0: -123") def test_create_missing_types__without_cardinality_fields_in_schema(self): schemas = ["{}", "{:Number}", "{number3}", "{number4:Number}", "XXX"] existing_types = {} for schema in schemas: new_types = Parser.create_missing_types(schema, existing_types) self.assertEqual(len(new_types), 0) self.assertEqual(new_types, {}) def test_create_missing_types__raises_error_if_primary_type_is_missing(self): # -- HINT: primary type is not provided in type_dict (existing_types) existing_types = {} for schema, missing_types in self.SPECIAL_FIELD_TYPES_DATA: with self.assertRaises(MissingTypeError): Parser.create_missing_types(schema, existing_types) def test_create_missing_types__if_special_types_are_missing(self): existing_types = dict(Number=parse_number) for schema, missing_types in self.SPECIAL_FIELD_TYPES_DATA: new_types = Parser.create_missing_types(schema, existing_types) self.assertSequenceEqual(set(new_types.keys()), set(missing_types)) def test_create_missing_types__if_special_types_exist(self): existing_types = dict(Number=parse_number) for schema, missing_types in self.SPECIAL_FIELD_TYPES_DATA: # -- FIRST STEP: Prepare new_types = Parser.create_missing_types(schema, existing_types) self.assertGreater(len(new_types), 0) # -- SECOND STEP: Now all needed special types should exist. existing_types2 = existing_types.copy() existing_types2.update(new_types) new_types2 = Parser.create_missing_types(schema, existing_types2) self.assertEqual(len(new_types2), 0) # ----------------------------------------------------------------------------- # MAIN: # ----------------------------------------------------------------------------- if __name__ == '__main__': unittest.main() # Copyright (c) 2012-2013 by Jens Engel (https://github/jenisys/parse_type) # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. parse_type-0.6.4/tests/test_parse_decorator.py000077500000000000000000000130701467747561600216560ustar00rootroot00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # pylint: disable=invalid-name, missing-docstring, too-few-public-methods """ Integrated into :mod:`parse` module. """ from __future__ import absolute_import import unittest import parse from parse_type import build_type_dict from .parse_type_test import ParseTypeTestCase # ----------------------------------------------------------------------------- # TEST CASE: TestParseTypeWithPatternDecorator # ----------------------------------------------------------------------------- class TestParseTypeWithPatternDecorator(ParseTypeTestCase): r""" Test the pattern decorator for type-converter (parse_type) functions. >>> def parse_number(text): ... return int(text) >>> parse_number.pattern = r"\d+" is equivalent to: >>> import parse >>> @parse.with_pattern(r"\d+") ... def parse_number(text): ... return int(text) >>> assert hasattr(parse_number, "pattern") >>> assert parse_number.pattern == r"\d+" """ def assert_decorated_with_pattern(self, func, expected_pattern): self.assertTrue(callable(func)) self.assertTrue(hasattr(func, "pattern")) self.assertEqual(func.pattern, expected_pattern) def assert_converter_call(self, func, text, expected_value): value = func(text) self.assertEqual(value, expected_value) # -- TESTS: def test_function_with_pattern_decorator(self): @parse.with_pattern(r"\d+") def parse_number(text): return int(text) self.assert_decorated_with_pattern(parse_number, r"\d+") self.assert_converter_call(parse_number, "123", 123) def test_classmethod_with_pattern_decorator(self): choice_pattern = r"Alice|Bob|Charly" class C(object): @classmethod @parse.with_pattern(choice_pattern) def parse_choice(cls, text): return text self.assert_decorated_with_pattern(C.parse_choice, choice_pattern) self.assert_converter_call(C.parse_choice, "Alice", "Alice") def test_staticmethod_with_pattern_decorator(self): choice_pattern = r"Alice|Bob|Charly" class S(object): @staticmethod @parse.with_pattern(choice_pattern) def parse_choice(text): return text self.assert_decorated_with_pattern(S.parse_choice, choice_pattern) self.assert_converter_call(S.parse_choice, "Bob", "Bob") def test_decorated_function_with_parser(self): # -- SETUP: @parse.with_pattern(r"\d+") def parse_number(text): return int(text) parse_number.name = "Number" #< For test automation. more_types = build_type_dict([parse_number]) schema = "Test: {number:Number}" parser = parse.Parser(schema, more_types) # -- PERFORM TESTS: # pylint: disable=bad-whitespace self.assert_match(parser, "Test: 1", "number", 1) self.assert_match(parser, "Test: 42", "number", 42) self.assert_match(parser, "Test: 123", "number", 123) # -- PARSE MISMATCH: self.assert_mismatch(parser, "Test: x", "number") # Not a Number. self.assert_mismatch(parser, "Test: -1", "number") # Negative. self.assert_mismatch(parser, "Test: a, b", "number") # List of ... def test_decorated_classmethod_with_parser(self): # -- SETUP: class C(object): @classmethod @parse.with_pattern(r"Alice|Bob|Charly") def parse_person(cls, text): return text more_types = {"Person": C.parse_person} schema = "Test: {person:Person}" parser = parse.Parser(schema, more_types) # -- PERFORM TESTS: # pylint: disable=bad-whitespace self.assert_match(parser, "Test: Alice", "person", "Alice") self.assert_match(parser, "Test: Bob", "person", "Bob") # -- PARSE MISMATCH: self.assert_mismatch(parser, "Test: ", "person") # Missing. self.assert_mismatch(parser, "Test: BAlice", "person") # Similar1. self.assert_mismatch(parser, "Test: Boby", "person") # Similar2. self.assert_mismatch(parser, "Test: a", "person") # INVALID ... # ----------------------------------------------------------------------------- # MAIN: # ----------------------------------------------------------------------------- if __name__ == '__main__': unittest.main() # Copyright (c) 2012-2013 by Jens Engel (https://github/jenisys/parse_type) # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. parse_type-0.6.4/tests/test_parse_number.py000066400000000000000000000034521467747561600211640ustar00rootroot00000000000000# -*- coding: UTF-8 -*- """ Additional unit tests for the :mod`parse` module. Related to auto-detection of number base (base=10, 2, 8, 16). """ from __future__ import absolute_import, print_function import pytest import parse parse_version = parse.__version__ print("USING: parse-%s" % parse_version) if parse_version in ("1.17.0", "1.16.0"): # -- REQUIRES: parse >= 1.18.0 -- WORKAROUND HERE print("USING: parse_type.parse (INSTEAD)") from parse_type import parse def assert_parse_number_with_format_d(text, expected): parser = parse.Parser("{value:d}") result = parser.parse(text) assert result.named == dict(value=expected) @pytest.mark.parametrize("text, expected", [ ("123", 123) ]) def test_parse_number_with_base10(text, expected): assert_parse_number_with_format_d(text, expected) @pytest.mark.parametrize("text, expected", [ ("0b0", 0), ("0b1011", 11), ]) def test_parse_number_with_base2(text, expected): assert_parse_number_with_format_d(text, expected) @pytest.mark.parametrize("text, expected", [ ("0o0", 0), ("0o10", 8), ("0o12", 10), ]) def test_parse_number_with_base8(text, expected): assert_parse_number_with_format_d(text, expected) @pytest.mark.parametrize("text, expected", [ ("0x0", 0), ("0x01", 1), ("0x12", 18), ]) def test_parse_number_with_base16(text, expected): assert_parse_number_with_format_d(text, expected) @pytest.mark.parametrize("text1, expected1, text2, expected2", [ ("0x12", 18, "12", 12) ]) def test_parse_number_twice(text1, expected1, text2, expected2): """ENSURE: Issue #121 int_convert memory effect is fixed.""" parser = parse.Parser("{:d}") result1 = parser.parse(text1) result2 = parser.parse(text2) assert result1.fixed[0] == expected1 assert result2.fixed[0] == expected2 parse_type-0.6.4/tests/test_parse_util.py000066400000000000000000000434631467747561600206570ustar00rootroot00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- """ Test suite to test the :mod:`parse_type.parse_util` module. """ from __future__ import absolute_import, print_function from .parse_type_test import TestCase, unittest from parse_type.parse_util \ import Field, FieldParser, FormatSpec, make_format_spec # ----------------------------------------------------------------------------- # TEST CASE: # ----------------------------------------------------------------------------- class TestField(TestCase): EMPTY_FORMAT_FIELDS = [ Field(), #< Empty field. Field("name"), #< Named field without format. Field("name", ""), #< Named field with format=empty-string. Field(format=""), #< Field with format=empty-string. ] NONEMPTY_FORMAT_FIELDS = [ Field(format="Number"), #< Typed field without name". Field("name", "Number"), #< Named and typed field". ] INVALID_FORMAT_FIELDS = [ Field(format="<"), #< Align without type. Field(format="_<"), #< Fill and align without type. Field(format="_<10"), #< Fill, align and width without type. Field(format="_<098"), #< Fill, align, zero and width without type. ] FIELDS = EMPTY_FORMAT_FIELDS + NONEMPTY_FORMAT_FIELDS + INVALID_FORMAT_FIELDS def test_is_typed__returns_true_for_nonempty_format(self): fields = self.NONEMPTY_FORMAT_FIELDS + self.INVALID_FORMAT_FIELDS for field in fields: self.assertTrue(field.has_format, "Field: %s" % field) def test_is_typed__returns_false_for_empty_format(self): fields = self.EMPTY_FORMAT_FIELDS for field in fields: self.assertFalse(field.has_format, "Field: %s" % field) def test_format_spec__returns_none_if_format_is_empty(self): for field in self.EMPTY_FORMAT_FIELDS: self.assertIsNone(field.format_spec, "Field: %s" % field) def test_format_spec__if_format_is_nonempty_and_valid(self): for field in self.NONEMPTY_FORMAT_FIELDS: self.assertIsNotNone(field.format_spec) self.assertIsInstance(field.format_spec, FormatSpec) def test_format_spec__raises_error_if_nonempty_format_is_invalid(self): for field in self.INVALID_FORMAT_FIELDS: with self.assertRaises(ValueError): field.format_spec def test_format_spec__is_lazy_evaluated(self): fields = [Field(), Field("name"), Field("name", "type"), Field(format="type")] for field in fields: self.assertIsNone(field._format_spec) if field.format: _ = field.format_spec.type self.assertIsNotNone(field.format_spec) else: self.assertIsNone(field.format_spec) def test_set_format_invalidates_format_spec(self): field = Field(format="Number") self.assertEqual(field.format, "Number") self.assertEqual(field.format_spec.type, "Number") self.assertEqual(field.format_spec.align, None) field.set_format("d", "=Number", "^Number+"] for format in formats: format_spec = Field.extract_format_spec(format) expected_align = format[0] expected_type = format[1:] expected_spec = make_format_spec(type=expected_type, align=expected_align) self.assertEqual(format_spec, expected_spec) self.assertValidFormatAlign(format_spec.align) def test_extract_format_spec__with_fill_align_and_type(self): # -- ALIGN_CHARS = "<>=^" formats = ["Xd", "0=Number", " ^Number+"] for format in formats: format_spec = Field.extract_format_spec(format) expected_fill = format[0] expected_align = format[1] expected_type = format[2:] expected_spec = make_format_spec(type=expected_type, align=expected_align, fill=expected_fill) self.assertEqual(format_spec, expected_spec) self.assertValidFormatAlign(format_spec.align) # -- ALIGN_CHARS = "<>=^" FORMAT_AND_FORMAT_SPEC_DATA = [ ("^010Number+", make_format_spec(type="Number+", width="10", zero=True, align="^", fill=None)), ("X<010Number+", make_format_spec(type="Number+", width="10", zero=True, align="<", fill="X")), ("_>0098Number?", make_format_spec(type="Number?", width="098", zero=True, align=">", fill="_")), ("*=129Number*", make_format_spec(type="Number*", width="129", zero=False, align="=", fill="*")), ("X129Number?", make_format_spec(type="X129Number?", width="", zero=False, align=None, fill=None)), (".3Number", make_format_spec(type="Number", width="", zero=False, align=None, fill=None, precision="3")), ("6.2Number", make_format_spec(type="Number", width="6", zero=False, align=None, fill=None, precision="2")), ] def test_extract_format_spec__with_all(self): for format, expected_spec in self.FORMAT_AND_FORMAT_SPEC_DATA: format_spec = Field.extract_format_spec(format) self.assertEqual(format_spec, expected_spec) self.assertValidFormatWidth(format_spec.width) if format_spec.align is not None: self.assertValidFormatAlign(format_spec.align) def test_make_format(self): for expected_format, format_spec in self.FORMAT_AND_FORMAT_SPEC_DATA: format = Field.make_format(format_spec) self.assertEqual(format, expected_format) format_spec2 = Field.extract_format_spec(format) self.assertEqual(format_spec2, format_spec) # ----------------------------------------------------------------------------- # TEST CASE: # ----------------------------------------------------------------------------- class TestFieldParser(TestCase): INVALID_FIELDS = ["", "{", "}", "xxx", "name:type", ":type"] VALID_FIELD_DATA = [ ("{}", Field()), ("{name}", Field("name")), ("{:type}", Field(format="type")), ("{name:type}", Field("name", "type")) ] #def assertFieldEqual(self, actual, expected): # message = "FAILED: %s == %s" % (actual, expected) # self.assertIsInstance(actual, Field) # self.assertIsInstance(expected, Field) # self.assertEqual(actual, expected, message) # # self.assertEqual(actual.name, expected.name, message) # # self.assertEqual(actual.format, expected.format, message) def test_parse__raises_error_with_missing_or_partial_braces(self): for field_text in self.INVALID_FIELDS: with self.assertRaises(ValueError): FieldParser.parse(field_text) def test_parse__with_valid_fields(self): for field_text, expected_field in self.VALID_FIELD_DATA: field = FieldParser.parse(field_text) self.assertEqual(field, expected_field) def test_extract_fields__without_field(self): prefix = "XXX ___" suffix = "XXX {{escaped_field}} {{escaped_field:xxx_type}} XXX" field_texts = [prefix, suffix, prefix + suffix, suffix + prefix] for field_text in field_texts: fields = list(FieldParser.extract_fields(field_text)) self.assertEqual(len(fields), 0) def test_extract_fields__with_one_field(self): prefix = "XXX ___" suffix = "XXX {{escaped_field}} {{escaped_field:xxx_type}} XXX" for field_text, expected_field in self.VALID_FIELD_DATA: fields = list(FieldParser.extract_fields(field_text)) self.assertEqual(len(fields), 1) self.assertSequenceEqual(fields, [expected_field]) field_text2 = prefix + field_text + suffix fields2 = list(FieldParser.extract_fields(field_text2)) self.assertEqual(len(fields2), 1) self.assertSequenceEqual(fields, fields2) def test_extract_fields__with_many_fields(self): MANY_FIELDS_DATA = [ ("{}xxx{name2}", [Field(), Field("name2")]), ("{name1}yyy{:type2}", [Field("name1"), Field(format="type2")]), ("{:type1}xxx{name2}{name3:type3}", [Field(format="type1"), Field("name2"), Field("name3", "type3")]), ] prefix = "XXX ___" suffix = "XXX {{escaped_field}} {{escaped_field:xxx_type}} XXX" for field_text, expected_fields in MANY_FIELDS_DATA: fields = list(FieldParser.extract_fields(field_text)) self.assertEqual(len(fields), len(expected_fields)) self.assertSequenceEqual(fields, expected_fields) field_text2 = prefix + field_text + suffix fields2 = list(FieldParser.extract_fields(field_text2)) self.assertEqual(len(fields2), len(expected_fields)) self.assertSequenceEqual(fields2, expected_fields) def test_extract_types(self): MANY_TYPES_DATA = [ ("{}xxx{name2}", []), ("{name1}yyy{:type2}", ["type2"]), ("{:type1}xxx{name2}{name3:type3}", ["type1", "type3"]), ] for field_text, expected_types in MANY_TYPES_DATA: type_names = list(FieldParser.extract_types(field_text)) self.assertEqual(len(type_names), len(expected_types)) self.assertSequenceEqual(type_names, expected_types) # ----------------------------------------------------------------------------- # MAIN: # ----------------------------------------------------------------------------- if __name__ == '__main__': unittest.main() # Copyright (c) 2012-2013 by Jens Engel (https://github/jenisys/parse_type) # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. parse_type-0.6.4/tox.ini000066400000000000000000000067611467747561600152500ustar00rootroot00000000000000# ============================================================================ # TOX CONFIGURATION: parse_type # ============================================================================ # DESCRIPTION: # Use tox to run tasks (tests, ...) in a clean virtual environment. # Tox is configured by default for online usage. # # Run tox, like: # # tox -e py27 # Runs tox with python 2.7 # tox -e py39 # Runs tox with python 3.9 # tox # Runs tox with all installed python versions. # tox --parallel # Runs tox in parallel mode w/ all envs. # # SEE ALSO: # * https://tox.readthedocs.io/en/latest/config.html # ============================================================================ # -- ONLINE USAGE: # PIP_INDEX_URL = https://pypi.org/simple [tox] minversion = 3.10.0 envlist = py312, py311, py310, py39, doctest, pypy3 skip_missing_interpreters = True isolated_build = True # DISABLED: sitepackages = False # ----------------------------------------------------------------------------- # TEST ENVIRONMENTS: # ----------------------------------------------------------------------------- # install_command = pip install -U {opts} {packages} [testenv] install_command = pip install -U {opts} {packages} changedir = {toxinidir} commands = pytest {posargs:tests} deps = -r py.requirements/basic.txt -r py.requirements/testing.txt setenv = PYTHONPATH={toxinidir} TOXRUN = yes PYSETUP_BOOTSTRAP = no # -- SPECIAL CASE: # RELATED: https://github.com/pypa/virtualenv/issues/2284 -- macOS 12 Monterey related # NOTES: # * pip-install seems to need "--user" option. # * Script(s) do not seem to be installed any more (actually to $HOME/User area). [testenv:py27] # DISABLED: install_command = pip install --user -U {opts} {packages} install_command = pip install -U {opts} {packages} changedir = {toxinidir} commands= python -m pytest {posargs:tests} deps= {[testenv]deps} passenv = PYTHONPATH = {toxinidir} # MAYBE: allowlist_externals = curl # -- VIRTUAL-ENVIRONMENT SETUP PROCEDURE: For python 2.7 # virtualenv -p python2.7 .venv_py27 # source .venv_py27 # scripts/ensurepip_python27.sh # python -m pip install -r py.requirements/basic.txt # python -m pip install -r py.requirements/testing.txt [testenv:doctest] basepython = python3 commands = pytest --doctest-modules -v parse_type setenv = PYTHONPATH={toxinidir} # ----------------------------------------------------------------------------- # MORE TEST ENVIRONMENTS: # ----------------------------------------------------------------------------- [testenv:coverage] basepython = python3 commands = pytest --cov=parse_type {posargs:tests} coverage combine coverage html coverage xml deps = {[testenv]deps} pytest-cov coverage>=4.0 setenv = PYTHONPATH={toxinidir} [testenv:install] basepython = python3 changedir = {envdir} commands = python ../../setup.py install -q {toxinidir}/bin/toxcmd.py copytree ../../tests . pytest {posargs:tests} deps = {[testenv]deps} setenv = PYTHONPATH={toxinidir} # ----------------------------------------------------------------------------- # SELDOM USED TEST ENVIRONMENTS: # ----------------------------------------------------------------------------- # -- ENSURE: README.rst is well-formed. # python setup.py --long-description | rst2html.py >output.html [testenv:check_setup] changedir = {toxinidir} commands= python setup.py --long-description > output.tmp rst2html.py output.tmp output.html deps = docutils