pax_global_header00006660000000000000000000000064147544461450014530gustar00rootroot0000000000000052 comment=eee84d755cf3ac4573efa552cec283d4e7f8758e poetry-core-2.1.1/000077500000000000000000000000001475444614500140015ustar00rootroot00000000000000poetry-core-2.1.1/.gitattributes000066400000000000000000000002671475444614500167010ustar00rootroot00000000000000poetry.lock linguist-generated=true vendors/poetry.lock linguist-generated=true poetry/core/_vendor/** linguist-generated=true poetry/core/_vendor/vendor.txt linguist-generated=false poetry-core-2.1.1/.github/000077500000000000000000000000001475444614500153415ustar00rootroot00000000000000poetry-core-2.1.1/.github/ISSUE_TEMPLATE/000077500000000000000000000000001475444614500175245ustar00rootroot00000000000000poetry-core-2.1.1/.github/ISSUE_TEMPLATE/config.yml000066400000000000000000000011041475444614500215100ustar00rootroot00000000000000# Ref: https://help.github.com/en/github/building-a-strong-community/configuring-issue-templates-for-your-repository#configuring-the-template-chooser blank_issues_enabled: false contact_links: - name: '✏️ Poetry Issue Tracker' url: https://github.com/python-poetry/poetry/issues/new/choose about: | Submit your issues to the Poetry issue tracker. Bug reports and feature requests will be tracked there. - name: '💬 Discord Server' url: https://discordapp.com/invite/awxPgve about: | Chat with the community, ask questions and learn about best practices. poetry-core-2.1.1/.github/PULL_REQUEST_TEMPLATE.md000066400000000000000000000012141475444614500211400ustar00rootroot00000000000000Resolves: python-poetry# - [ ] Added **tests** for changed code. - [ ] Updated **documentation** for changed code. poetry-core-2.1.1/.github/workflows/000077500000000000000000000000001475444614500173765ustar00rootroot00000000000000poetry-core-2.1.1/.github/workflows/downstream.yaml000066400000000000000000000050431475444614500224470ustar00rootroot00000000000000name: Poetry Downstream Tests on: workflow_dispatch: pull_request: {} push: branches: [main] permissions: {} jobs: tests: name: ${{ matrix.ref }} runs-on: ubuntu-latest strategy: matrix: ref: ["main"] fail-fast: false defaults: run: shell: bash steps: - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 with: persist-credentials: false path: poetry-core - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 with: persist-credentials: false path: poetry repository: python-poetry/poetry ref: ${{ matrix.ref }} - name: Set up Python 3.10 uses: actions/setup-python@42375524e23c412d93fb67b49958b491fce71c38 # v5.4.0 with: python-version: "3.10" - name: Get full python version id: full-python-version run: echo version=$(python -c "import sys; print('-'.join(str(v) for v in sys.version_info))") >> $GITHUB_OUTPUT - name: Set up Poetry run: | pip install poetry poetry config virtualenvs.in-project true - name: Set up cache uses: actions/cache@1bd1e32a3bdc45362d1e726936510720a7c30a57 # v4.2.0 id: cache with: path: ./poetry/.venv key: venv-${{ steps.full-python-version.outputs.version }}-${{ hashFiles('**/poetry.lock') }} - name: Ensure cache is healthy if: steps.cache.outputs.cache-hit == 'true' working-directory: ./poetry run: timeout 10s poetry run pip --version >/dev/null 2>&1 || rm -rf .venv - name: Switch downstream to development poetry-core working-directory: ./poetry run: | # remove poetry-core from main group to avoid version conflicts # with a potential entry in the test group poetry remove poetry-core # add to test group to overwrite a potential entry in that group poetry add --lock --group test ../poetry-core - name: Install downstream dependencies working-directory: ./poetry run: | # force update of directory dependency in cached venv # (even if directory dependency with same version is already installed) poetry run pip uninstall -y poetry-core poetry install # TODO: mark run as success even when this fails and add comment to PR instead - name: Run downstream test suite working-directory: ./poetry run: poetry run pytest poetry-core-2.1.1/.github/workflows/release.yaml000066400000000000000000000031521475444614500217030ustar00rootroot00000000000000name: Release on: release: types: [published] permissions: {} jobs: build: name: Build runs-on: ubuntu-latest steps: - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 with: persist-credentials: false - run: pipx run build - uses: actions/upload-artifact@65c4c4a1ddee5b72f698fdd19549f0f0fb45cf08 # v4.6.0 with: name: distfiles path: dist/ if-no-files-found: error upload-github: name: Upload (GitHub) runs-on: ubuntu-latest permissions: contents: write needs: build steps: # We need to be in a git repo for gh to work. - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 with: persist-credentials: false - uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8 with: name: distfiles path: dist/ - run: gh release upload "${TAG_NAME}" dist/*.{tar.gz,whl} env: GH_TOKEN: ${{ github.token }} TAG_NAME: ${{ github.event.release.tag_name }} upload-pypi: name: Upload (PyPI) runs-on: ubuntu-latest environment: name: pypi url: https://pypi.org/project/poetry-core/ permissions: id-token: write needs: build steps: - uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8 with: name: distfiles path: dist/ - uses: pypa/gh-action-pypi-publish@76f52bc884231f62b9a034ebfe128415bbaabdfc # v1.12.4 with: print-hash: true poetry-core-2.1.1/.github/workflows/tests.yaml000066400000000000000000000052731475444614500214330ustar00rootroot00000000000000name: Tests on: pull_request: {} push: env: PYTHONWARNDEFAULTENCODING: 'true' permissions: {} jobs: tests: name: ${{ matrix.os }} / ${{ matrix.python-version }} runs-on: "${{ matrix.os }}-latest" strategy: matrix: os: - Ubuntu - MacOS - Windows python-version: - "3.9" - "3.10" - "3.11" - "3.12" - "3.13" include: - os: Ubuntu python-version: pypy-3.10 fail-fast: false defaults: run: shell: bash steps: - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 with: persist-credentials: false - name: Set up Python ${{ matrix.python-version }} uses: actions/setup-python@42375524e23c412d93fb67b49958b491fce71c38 # v5.4.0 with: python-version: ${{ matrix.python-version }} allow-prereleases: true - name: Get full Python version id: full-python-version run: echo version=$(python -c "import sys; print('-'.join(str(v) for v in sys.version_info))") >> $GITHUB_OUTPUT - name: Bootstrap poetry run: | curl -sSL https://install.python-poetry.org | python - -y - name: Update PATH if: ${{ matrix.os != 'Windows' }} run: echo "$HOME/.local/bin" >> $GITHUB_PATH - name: Update Path for Windows if: ${{ matrix.os == 'Windows' }} run: echo "$APPDATA\Python\Scripts" >> $GITHUB_PATH - name: Configure poetry run: poetry config virtualenvs.in-project true - name: Set up cache uses: actions/cache@1bd1e32a3bdc45362d1e726936510720a7c30a57 # v4.2.0 id: cache with: path: .venv key: venv-${{ runner.os }}-${{ steps.full-python-version.outputs.version }}-${{ hashFiles('**/poetry.lock') }} - name: Ensure cache is healthy if: steps.cache.outputs.cache-hit == 'true' run: | # `timeout` is not available on macOS, so we define a custom function. [ "$(command -v timeout)" ] || function timeout() { perl -e 'alarm shift; exec @ARGV' "$@"; } # Using `timeout` is a safeguard against the Poetry command hanging for some reason. timeout 10s poetry run pip --version || rm -rf .venv - name: Check lock file run: poetry check --lock - name: Install dependencies run: poetry install - name: Run tests run: poetry run python -m pytest -p no:sugar -q tests/ - name: Run integration tests run: poetry run python -m pytest -p no:sugar --integration -q tests/integration - name: Run mypy run: poetry run mypy poetry-core-2.1.1/.github/workflows/update-licenses.yaml000066400000000000000000000033461475444614500233550ustar00rootroot00000000000000name: Update Licenses on: workflow_dispatch: schedule: - cron: "0 0 1 * *" # run once a month # we create the token we need later on permissions: {} jobs: update-licenses: runs-on: ubuntu-latest steps: - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 with: persist-credentials: false - name: Set up Python 3.10 uses: actions/setup-python@42375524e23c412d93fb67b49958b491fce71c38 # v5.4.0 with: python-version: "3.10" - name: Bootstrap poetry run: | curl -sSL https://install.python-poetry.org | python - -y - name: Update PATH run: echo "$HOME/.local/bin" >> $GITHUB_PATH - name: Configure poetry run: poetry config virtualenvs.in-project true - name: Install dependencies run: poetry install - name: Update licenses list run: | poetry run python src/poetry/core/spdx/helpers.py poetry run pre-commit run --all-files || : - name: Generate token id: generate_token uses: tibdex/github-app-token@v2 with: app_id: ${{ secrets.POETRY_TOKEN_APP_ID }} private_key: ${{ secrets.POETRY_TOKEN_APP_KEY }} - name: Create PR if necessary uses: peter-evans/create-pull-request@v5 with: token: ${{ steps.generate_token.outputs.token }} commit-message: "Automated licenses list update" author: "github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>" branch: "license-autoupdate" title: "Automated licenses list update" body: "Full log: https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}" poetry-core-2.1.1/.gitignore000066400000000000000000000007231475444614500157730ustar00rootroot00000000000000*.pyc # Packages *.egg !/tests/**/*.egg /*.egg-info /tests/fixtures/**/*.egg-info /dist/* build _build .cache *.so # Installer logs pip-log.txt # Unit test / coverage reports .coverage .tox .pytest_cache .DS_Store .idea/* .python-version .vscode/* /test.py /test_*.* /setup.cfg MANIFEST.in /setup.py /docs/site/* /tests/fixtures/simple_project/setup.py /tests/fixtures/project_with_extras/setup.py .mypy_cache .venv /releases/* pip-wheel-metadata /poetry.toml poetry-core-2.1.1/.pre-commit-config.yaml000066400000000000000000000016251475444614500202660ustar00rootroot00000000000000exclude: | (?x)( ^tests/.*/fixtures/.* | ^src/poetry/core/_vendor ) repos: - repo: https://github.com/pre-commit/pre-commit-hooks rev: v5.0.0 hooks: - id: trailing-whitespace - id: end-of-file-fixer - id: check-merge-conflict - id: check-case-conflict - id: check-json - id: check-toml exclude: tests/fixtures/project_duplicate_dependency/pyproject.toml - id: check-yaml - id: pretty-format-json args: - --autofix - --no-ensure-ascii - --no-sort-keys - id: check-ast - id: debug-statements - id: check-docstring-first - repo: https://github.com/astral-sh/ruff-pre-commit rev: v0.9.6 hooks: - id: ruff - id: ruff-format - repo: https://github.com/woodruffw/zizmor-pre-commit rev: v1.3.1 hooks: - id: zizmor poetry-core-2.1.1/CHANGELOG.md000066400000000000000000001322221475444614500156140ustar00rootroot00000000000000# Change Log ## [2.1.1] - 2025-02-16 ### Fixed - Fix an issue where simplifying a `python_version` marker resulted in an invalid marker ([#838](https://github.com/python-poetry/poetry-core/pull/838)). ## [2.1.0] - 2025-02-15 ### Added - Pass a local version label to the build backend interface ([#814](https://github.com/python-poetry/poetry-core/pull/814)). - Expose build-system dependencies via the `poetry` instance ([#319](https://github.com/python-poetry/poetry-core/pull/319)). - Add `has_upper_bound method` to `VersionConstraint` ([#833](https://github.com/python-poetry/poetry-core/pull/833)). ### Changed - Improve performance of calculating intersections and unions of `extra` markers ([#818](https://github.com/python-poetry/poetry-core/pull/818)). - Improve performance of calculating intersections and unions of complex markers ([#821](https://github.com/python-poetry/poetry-core/pull/821), [#832](https://github.com/python-poetry/poetry-core/pull/832)). - Improve performance of marker operations by simplifying `python_version` markers ([#826](https://github.com/python-poetry/poetry-core/pull/826)). - Improve performance by caching parsed requirements ([#828](https://github.com/python-poetry/poetry-core/pull/828)). - Improve error message when a referenced license file is missing ([#827](https://github.com/python-poetry/poetry-core/pull/827)). ### Fixed - Fix an issue where inclusive ordering with post releases was inconsistent with PEP 440 ([#379](https://github.com/python-poetry/poetry-core/pull/379)). - Fix an issue where invalid URI tokens in PEP 508 requirement strings were silently discarded ([#817](https://github.com/python-poetry/poetry-core/pull/817)). - Fix an issue where wrong markers were calculated when removing parts covered by the project's python constraint ([#824](https://github.com/python-poetry/poetry-core/pull/824)). - Fix an issue where optional dependencies that are not part of an extra were included in the wheel metadata ([#830](https://github.com/python-poetry/poetry-core/pull/830)). - Fix an issue where the `__pycache__` directory and `*.pyc` files were included in sdists and wheels ([#835](https://github.com/python-poetry/poetry-core/pull/835)). ## [2.0.1] - 2025-01-11 ### Changed - Replace the deprecated core metadata field `Home-page` with `Project-URL: Homepage` ([#807](https://github.com/python-poetry/poetry-core/pull/807)). ### Fixed - Fix an issue where includes from `tool.poetry.packages` without a specified `format` were not initialized with the default value resulting in a `KeyError` ([#805](https://github.com/python-poetry/poetry-core/pull/805)). - Fix an issue where some `project.urls` entries were not processed correctly resulting in a `KeyError` ([#807](https://github.com/python-poetry/poetry-core/pull/807)). - Fix an issue where dynamic `project.dependencies` via `tool.poetry.dependencies` were ignored if `project.optional-dependencies` were defined ([#811](https://github.com/python-poetry/poetry-core/pull/811)). ## [2.0.0] - 2025-01-04 ### Added - **Add support for the `project` section in the `pyproject.toml` file according to PEP 621** ([#708](https://github.com/python-poetry/poetry-core/pull/708), [#792](https://github.com/python-poetry/poetry-core/pull/792)). - Add support for non PEP440 compliant version in the `platform_release` marker ([#722](https://github.com/python-poetry/poetry-core/pull/722)). - Add support for string comparisons with `in` / `not in` in generic constraints ([#722](https://github.com/python-poetry/poetry-core/pull/722)). - Add support for script files that are generated by a build script ([#710](https://github.com/python-poetry/poetry-core/pull/710)). - Add support for `SOURCE_DATE_EPOCH` when building packages ([#766](https://github.com/python-poetry/poetry-core/pull/766), [#781](https://github.com/python-poetry/poetry-core/pull/781)). ### Changed - Drop support for Python 3.8 ([#798](https://github.com/python-poetry/poetry-core/pull/798)). - Create `METADATA` files with version 2.3 instead of 2.2 ([#707](https://github.com/python-poetry/poetry-core/pull/707)). - Normalize source vcs URLs ([#701](https://github.com/python-poetry/poetry-core/pull/701)). - Make `allow-prereleases` a tri-state setting ([#783](https://github.com/python-poetry/poetry-core/pull/783)). - Rename exceptions to have an `Error` suffix ([#767](https://github.com/python-poetry/poetry-core/pull/767)). - Remove support for `x` in version constraints ([#770](https://github.com/python-poetry/poetry-core/pull/770)). - Remove support for scripts with extras ([#708](https://github.com/python-poetry/poetry-core/pull/708)). - Remove deprecated features and interfaces ([#702](https://github.com/python-poetry/poetry-core/pull/702), [#769](https://github.com/python-poetry/poetry-core/pull/769)). - Deprecate `tool.poetry.dev-dependencies` in favor of `tool.poetry.group.dev.dependencies` ([#754](https://github.com/python-poetry/poetry-core/pull/754)). - Deprecate `Package.python_marker` ([#446](https://github.com/python-poetry/poetry-core/pull/446)). - Improve Cygwin git support under Windows ([#704](https://github.com/python-poetry/poetry-core/pull/704)). - Improve error message when the `pyproject.toml` file cannot be parsed ([#734](https://github.com/python-poetry/poetry-core/pull/734)). - Improve handling of `readme` files ([#752](https://github.com/python-poetry/poetry-core/pull/752)). - Improve error handling when the Python constraint is empty ([#761](https://github.com/python-poetry/poetry-core/pull/761)). - Improve performance for creating a PEP 508 requirement from a dependency ([#779](https://github.com/python-poetry/poetry-core/pull/779)). - Update list of supported licenses ([#706](https://github.com/python-poetry/poetry-core/pull/706), [#718](https://github.com/python-poetry/poetry-core/pull/718), [#727](https://github.com/python-poetry/poetry-core/pull/727), [#736](https://github.com/python-poetry/poetry-core/pull/736), [#746](https://github.com/python-poetry/poetry-core/pull/746), [#755](https://github.com/python-poetry/poetry-core/pull/755), [#764](https://github.com/python-poetry/poetry-core/pull/764), [#784](https://github.com/python-poetry/poetry-core/pull/784), [#787](https://github.com/python-poetry/poetry-core/pull/787), [#795](https://github.com/python-poetry/poetry-core/pull/795)). ### Fixed - Fix an issue where the `platlib` directory of the wrong Python was used ([#726](https://github.com/python-poetry/poetry-core/pull/726)). - Fix handling of generic constraints ([#732](https://github.com/python-poetry/poetry-core/pull/732)). - Fix an issue where building a wheel in a nested output directory results in an error ([#762](https://github.com/python-poetry/poetry-core/pull/762)). - Fix an issue where `+` was not allowed in git URL paths ([#765](https://github.com/python-poetry/poetry-core/pull/765)). - Fix an issue where the temporary directory was not cleaned up on error ([#775](https://github.com/python-poetry/poetry-core/pull/775)). - Fix an issue where the regular expression for author names was too restrictive ([#517](https://github.com/python-poetry/poetry-core/pull/517)). - Fix an issue where basic auth http(s) credentials could not be parsed ([#791](https://github.com/python-poetry/poetry-core/pull/791)). ### Vendoring - [`fastjsonschema==2.21.1`](https://github.com/horejsek/python-fastjsonschema/blob/master/CHANGELOG.txt) - [`lark==1.2.2`](https://github.com/lark-parser/lark/releases/tag/1.2.2) - [`packaging==24.2`](https://github.com/pypa/packaging/blob/main/CHANGELOG.rst) - [`tomli==2.2.1`](https://github.com/hukkin/tomli/blob/master/CHANGELOG.md) ## [1.9.1] - 2024-10-13 ### Added - Add `3.13` to the list of available Python versions ([#747](https://github.com/python-poetry/poetry-core/pull/747)). ## [1.9.0] - 2024-02-02 ### Added - Add a `to` key in `tool.poetry.packages` to allow custom subpackage names ([#672](https://github.com/python-poetry/poetry-core/pull/672)). - Add support for path dependencies that do not define a build system ([#675](https://github.com/python-poetry/poetry-core/pull/675)). - Add a `tool.poetry.package-mode` key to support non-package mode ([#661](https://github.com/python-poetry/poetry-core/pull/661)). ### Changed - Update list of supported licenses ([#659](https://github.com/python-poetry/poetry-core/pull/659), [#669](https://github.com/python-poetry/poetry-core/pull/669), [#678](https://github.com/python-poetry/poetry-core/pull/678), [#694](https://github.com/python-poetry/poetry-core/pull/694)). - Improve support for PEP 691 JSON-based Simple API ([#664](https://github.com/python-poetry/poetry-core/pull/664)). - Establish zipapp compatibility ([#670](https://github.com/python-poetry/poetry-core/pull/670)). - Rework list of files included in build artifacts ([#666](https://github.com/python-poetry/poetry-core/pull/666)). - Improve performance by treating collections in packages as immutable ([#663](https://github.com/python-poetry/poetry-core/pull/663)). - Deprecate `poetry.core.masonry.builder` ([#682](https://github.com/python-poetry/poetry-core/pull/682)). - Deprecate scripts that depend on extras ([#690](https://github.com/python-poetry/poetry-core/pull/690)). ### Fixed - Fix an issue where insignificant errors were printed if the working directory is not inside a git repository ([#684](https://github.com/python-poetry/poetry-core/pull/684)). - Fix an issue where the project's directory was not recognized as git repository on Windows due to an encoding issue ([#685](https://github.com/python-poetry/poetry-core/pull/685)). ### Vendoring - [`fastjsonschema==2.19.1`](https://github.com/horejsek/python-fastjsonschema/blob/master/CHANGELOG.txt) - [`lark==1.1.8`](https://github.com/lark-parser/lark/releases/tag/1.1.9) ## [1.8.1] - 2023-10-31 ### Fixed - Fix an issue where git URLs starting with `git+` could not be parsed anymore ([#657](https://github.com/python-poetry/poetry-core/pull/657)). ## [1.8.0] - 2023-10-31 ### Added - Add `3.12` to the list of available Python versions ([#631](https://github.com/python-poetry/poetry-core/pull/631)). - Add support for creating packages dynamically in the build script ([#629](https://github.com/python-poetry/poetry-core/pull/629)). ### Changed - Improve marker logic for `extra` markers ([#636](https://github.com/python-poetry/poetry-core/pull/636)). - Update list of supported licenses ([#635](https://github.com/python-poetry/poetry-core/pull/635), [#646](https://github.com/python-poetry/poetry-core/pull/646)). - Deprecate `Dependency.transitive_python_versions` ([#648](https://github.com/python-poetry/poetry-core/pull/648)). - Deprecate `Dependency.transitive_python_constraint` ([#649](https://github.com/python-poetry/poetry-core/pull/649)). ### Fixed - Fix an issue where projects with extension modules were not installed in editable mode ([#633](https://github.com/python-poetry/poetry-core/pull/633)). - Fix an issue where the wrong or no `lib` folder was added to the wheel ([#634](https://github.com/python-poetry/poetry-core/pull/634)). ### Vendoring - Replace [`jsonschema`](https://github.com/python-jsonschema/jsonschema) with [`fastjsonschema`](https://github.com/horejsek/python-fastjsonschema). - [`lark==1.1.8`](https://github.com/lark-parser/lark/releases/tag/1.1.8) - [`packaging==23.2`](https://github.com/pypa/packaging/blob/main/CHANGELOG.rst) ## [1.7.0] - 2023-08-20 ### Added - Optionally use resolved references when converting a VCS dependency to a PEP 508 dependency specification ([#603](https://github.com/python-poetry/poetry-core/pull/603)). - Improve performance of marker handling ([#609](https://github.com/python-poetry/poetry-core/pull/609)). ### Changed - Drop support for Python 3.7 ([#566](https://github.com/python-poetry/poetry-core/pull/566)). - Remove deprecated `poetry.core.constraints.generic` and `poetry.core.semver` ([#601](https://github.com/python-poetry/poetry-core/pull/601)). - Allow `|` as a value separator in markers with the operators `in` and `not in` ([#608](https://github.com/python-poetry/poetry-core/pull/608)). - Put pretty name (instead of normalized name) in metadata ([#620](https://github.com/python-poetry/poetry-core/pull/620)). - Update list of supported licenses ([#623](https://github.com/python-poetry/poetry-core/pull/623)). ### Fixed - Fix an issue where the encoding was not handled correctly when calling a subprocess ([#602](https://github.com/python-poetry/poetry-core/pull/602)). - Fix an issue where caret constraints with additional whitespace could not be parsed ([#606](https://github.com/python-poetry/poetry-core/pull/606)). - Fix an issue where PEP 508 dependency specifications with names starting with a digit could not be parsed ([#607](https://github.com/python-poetry/poetry-core/pull/607)). - Fix an issue where Poetry considered an unrelated `.gitignore` file resulting in an empty wheel ([#611](https://github.com/python-poetry/poetry-core/pull/611)). ### Vendoring - [`lark==1.1.7`](https://github.com/lark-parser/lark/releases/tag/1.1.7) ## [1.6.1] - 2023-05-29 ### Fixed - Fix an endless recursion in marker handling ([#593](https://github.com/python-poetry/poetry-core/pull/593)). - Fix an issue where the wheel tag was not built correctly under certain circumstances ([#591](https://github.com/python-poetry/poetry-core/pull/591)). - Fix an issue where the tests included in the sdist failed due to missing files ([#589](https://github.com/python-poetry/poetry-core/pull/589)). ## [1.6.0] - 2023-05-14 ### Added - Improve error message for invalid markers ([#569](https://github.com/python-poetry/poetry-core/pull/569)). - Increase robustness when deleting temporary directories on Windows ([#460](https://github.com/python-poetry/poetry-core/pull/460)). - Add support for file dependencies with subdirectories ([#467](https://github.com/python-poetry/poetry-core/pull/467)). ### Changed - Replace `tomlkit` with `tomli`, which changes the interface of some _internal_ classes ([#483](https://github.com/python-poetry/poetry-core/pull/483)). - Deprecate `Package.category` ([#561](https://github.com/python-poetry/poetry-core/pull/561)). ### Fixed - Fix a performance regression in marker handling ([#568](https://github.com/python-poetry/poetry-core/pull/568)). - Fix an issue where wildcard version constraints were not handled correctly ([#402](https://github.com/python-poetry/poetry-core/pull/402)). - Fix an issue where `poetry build` created duplicate Python classifiers if they were specified manually ([#578](https://github.com/python-poetry/poetry-core/pull/578)). - Fix an issue where local versions where not handled correctly ([#579](https://github.com/python-poetry/poetry-core/pull/579)). ### Vendoring - [`attrs==23.1.0`](https://github.com/python-attrs/attrs/blob/main/CHANGELOG.md) - [`packaging==23.1`](https://github.com/pypa/packaging/blob/main/CHANGELOG.rst) - [`tomli==2.0.1`](https://github.com/hukkin/tomli/blob/master/CHANGELOG.md) - [`typing-extensions==4.5.0`](https://github.com/python/typing_extensions/blob/main/CHANGELOG.md) ## [1.5.2] - 2023-03-13 ### Fixed - Fix an issue where wheels built on Windows could contain duplicate entries in the RECORD file ([#555](https://github.com/python-poetry/poetry-core/pull/555)). ## [1.5.1] - 2023-02-20 ### Changed - Improve performance by caching parsed markers, constraints and versions ([#556](https://github.com/python-poetry/poetry-core/pull/556)). ## [1.5.0] - 2023-01-27 ### Added - Improve marker handling ([#528](https://github.com/python-poetry/poetry-core/pull/528), [#534](https://github.com/python-poetry/poetry-core/pull/534), [#530](https://github.com/python-poetry/poetry-core/pull/530), [#546](https://github.com/python-poetry/poetry-core/pull/546), [#547](https://github.com/python-poetry/poetry-core/pull/547)). - Allow overriding the output directory when building dist files ([#527](https://github.com/python-poetry/poetry-core/pull/527)). - Validate whether dependencies referenced in `extras` are defined in the main dependency group ([#542](https://github.com/python-poetry/poetry-core/pull/542)). - Improve handling of generic constraints ([#515](https://github.com/python-poetry/poetry-core/pull/515)). ### Changed - Deprecate the hash function of `FileDependency` ([#535](https://github.com/python-poetry/poetry-core/pull/535)). - Do not set `allows_preleases` implicitly anymore if the lower bound of a constraint is a pre-release ([#543](https://github.com/python-poetry/poetry-core/pull/543)). - Poetry no longer generates a `setup.py` file in sdists by default ([#318](https://github.com/python-poetry/poetry-core/pull/318)). - Remove the unused `platform` attribute from `Package` ([#548](https://github.com/python-poetry/poetry-core/pull/548)). - Deprecate the `pretty_version` parameter when creating a `Package` ([#549](https://github.com/python-poetry/poetry-core/pull/549)). - Validate path dependencies during use instead of during construction ([#520](https://github.com/python-poetry/poetry-core/pull/520)). ### Fixed - Fix an issue where the PEP 517 `metadata_directory` was not respected when building an editable wheel ([#537](https://github.com/python-poetry/poetry-core/pull/537)). - Fix an issue where trailing newlines were allowed in `tool.poetry.description` ([#505](https://github.com/python-poetry/poetry-core/pull/505)). - Fix an issue where the name of the data folder in wheels was not normalized ([#532](https://github.com/python-poetry/poetry-core/pull/532)). - Fix an issue where the order of entries in the RECORD file was not deterministic ([#545](https://github.com/python-poetry/poetry-core/pull/545)). - Fix an issue where parsing of VCS URLs with escaped characters failed ([#524](https://github.com/python-poetry/poetry-core/pull/524)). - Fix an issue where the subdirectory parameter of VCS URLs was not respected ([#518](https://github.com/python-poetry/poetry-core/pull/518)). - Fix an issue where zero padding was not correctly handled in version comparisons ([#540](https://github.com/python-poetry/poetry-core/pull/540)). - Fix an issue where sdist builds did not support multiple READMEs ([#486](https://github.com/python-poetry/poetry-core/pull/486)). ### Vendoring - [`attrs==22.2.0`](https://github.com/python-attrs/attrs/blob/main/CHANGELOG.md) - [`jsonschema==4.17.3`](https://github.com/python-jsonschema/jsonschema/blob/main/CHANGELOG.rst) - [`lark==1.1.5`](https://github.com/lark-parser/lark/releases/tag/1.1.5) - [`packaging==23.0`](https://github.com/pypa/packaging/blob/main/CHANGELOG.rst) - [`pyrsistent==0.19.3`](https://github.com/tobgu/pyrsistent/blob/master/CHANGES.txt) ## [1.4.0] - 2022-11-22 ### Added - The PEP 517 `metadata_directory` is now respected as an input to the `build_wheel` hook ([#487](https://github.com/python-poetry/poetry-core/pull/487)). ### Changed - Sources are now considered more carefully when dealing with dependencies with environment markers ([#497](https://github.com/python-poetry/poetry-core/pull/497)). - `EmptyConstraint` is now hashable ([#513](https://github.com/python-poetry/poetry-core/pull/513)). - `ParseConstraintError` is now raised on version and constraint parsing errors, and includes information on the package that caused the error ([#514](https://github.com/python-poetry/poetry-core/pull/514)). ### Fixed - Fix an issue where invalid PEP 508 requirements were generated due to a missing space before semicolons ([#510](https://github.com/python-poetry/poetry-core/pull/510)). - Fix an issue where relative paths were encoded into package requirements, instead of a file:// URL as required by PEP 508 ([#512](https://github.com/python-poetry/poetry-core/pull/512)). ### Vendoring - [`jsonschema==4.17.0`](https://github.com/python-jsonschema/jsonschema/blob/main/CHANGELOG.rst) - [`lark==1.1.4`](https://github.com/lark-parser/lark/releases/tag/1.1.4) - [`pyrsistent==0.19.2`](https://github.com/tobgu/pyrsistent/blob/master/CHANGES.txt) - [`tomlkit==0.11.6`](https://github.com/sdispater/tomlkit/blob/master/CHANGELOG.md) - [`typing-extensions==4.4.0`](https://github.com/python/typing_extensions/blob/main/CHANGELOG.md) ## [1.3.2] - 2022-10-07 ### Fixed - Fix an issue where the normalization was not applied to the path of an sdist built using a PEP 517 frontend ([#495](https://github.com/python-poetry/poetry-core/pull/495)). ## [1.3.1] - 2022-10-05 ### Fixed - Fix an issue where a typing-driven assertion could be false at runtime, causing a failure during prepare_metadata_for_build_wheel ([#492](https://github.com/python-poetry/poetry-core/pull/492)). ## [1.3.0] - 2022-10-05 ### Added - Add `3.11` to the list of available Python versions ([#477](https://github.com/python-poetry/poetry-core/pull/477)). ### Changed - Deprecate `poetry.core.constraints.generic`, which is replaced by `poetry.core.packages.constraints` ([#482](https://github.com/python-poetry/poetry-core/pull/482)). - Deprecate `poetry.core.semver`, which is replaced by `poetry.core.constraints.version` ([#482](https://github.com/python-poetry/poetry-core/pull/482)). ### Fixed - Fix an issue where versions were escaped wrongly when building the wheel name ([#469](https://github.com/python-poetry/poetry-core/pull/469)). - Fix an issue where caret constraints of pre-releases with a major version of 0 resulted in an empty version range ([#475](https://github.com/python-poetry/poetry-core/pull/475)). - Fix an issue where the names of extras were not normalized according to PEP 685 ([#476](https://github.com/python-poetry/poetry-core/pull/476)). - Fix an issue where sdist names were not normalized ([#484](https://github.com/python-poetry/poetry-core/pull/484)). ## [1.2.0] - 2022-09-13 ### Added - Added support for subdirectories in `url` dependencies ([#398](https://github.com/python-poetry/poetry-core/pull/398)). ### Changed - When setting an invalid version constraint an error is raised instead of silently setting "any version" ([#461](https://github.com/python-poetry/poetry-core/pull/461)). - Allow more characters in author name ([#411](https://github.com/python-poetry/poetry-core/pull/411)). ### Fixed - Fixed an issue where incorrect `Requires-Dist` information was generated when environment markers where used for optional packages ([#462](https://github.com/python-poetry/poetry-core/pull/462)). - Fixed an issue where incorrect python constraints were parsed from environment markers ([#457](https://github.com/python-poetry/poetry-core/pull/457)). - Fixed the hashing of markers and constraints ([#466](https://github.com/python-poetry/poetry-core/pull/466)). - Fixed an issue where the PEP 508 name of directory dependencies used platform paths ([#463](https://github.com/python-poetry/poetry-core/pull/463)). ## [1.1.0] - 2022-08-31 - No functional changes. ## [1.1.0rc3] - 2022-08-26 ### Fixed - Fixed an issue where a malformed URL was passed to pip when installing from a git subdirectory ([#451](https://github.com/python-poetry/poetry-core/pull/451)). ## [1.1.0rc2] - 2022-08-26 ### Changed - Enabled setting `version` of `ProjectPackage` to support dynamically setting the project's package version (e.g. from a plugin) ([#447](https://github.com/python-poetry/poetry-core/pull/447)). ### Fixed - Fixed an issue where `authors` property was not detected ([#437](https://github.com/python-poetry/poetry-core/pull/437)). - Fixed an issue where submodules of git dependencies was not checked out ([#439](https://github.com/python-poetry/poetry-core/pull/439)). - Fixed an issue with Python constraints from markers ([#448](https://github.com/python-poetry/poetry-core/pull/448)). - Fixed an issue where the latest version of git dependency was selected instead of the locked one ([#449](https://github.com/python-poetry/poetry-core/pull/449)). ## [1.1.0rc1] - 2022-08-17 ### Changed - Replaced Poetry's helper method `canonicalize_name()` by `packaging.utils.canonicalize_name()` ([#418](https://github.com/python-poetry/poetry-core/pull/418)). - Removed unused code ([#419](https://github.com/python-poetry/poetry-core/pull/419)). ### Fixed - Fixed an issue with markers, that results in incorrectly resolved extra dependencies ([#415](https://github.com/python-poetry/poetry-core/pull/415)). - Fixed an issue where equal markers had not the same hash ([#417](https://github.com/python-poetry/poetry-core/pull/417)). - Fixed `allows_any()` for local versions ([#433](https://github.com/python-poetry/poetry-core/pull/433)). - Fixed special cases of `next_major()`, `next_minor()`, etc. and deprecated ambiguous usage ([#434](https://github.com/python-poetry/poetry-core/pull/434)). - Fixed an issue with Python constraints from markers ([#436](https://github.com/python-poetry/poetry-core/pull/436)). ## [1.1.0b3] - 2022-07-09 ### Added - Added support for valid PEP 517 projects with another build-system than poetry-core as directory dependencies ([#368](https://github.com/python-poetry/poetry-core/pull/368), [#377](https://github.com/python-poetry/poetry-core/pull/377)). - Added support for yanked files and releases according to PEP 592 ([#400](https://github.com/python-poetry/poetry-core/pull/400)). ### Changed - Relaxed schema validation to allow additional properties ([#369](https://github.com/python-poetry/poetry-core/pull/369)). - Harmonized string representation of dependencies ([#393](https://github.com/python-poetry/poetry-core/pull/393)). - Changed wheel name normalization to follow most recent packaging specification ([#394](https://github.com/python-poetry/poetry-core/pull/394)). - Changed equality check of direct origin dependencies, so that constraints are not considered anymore ([#405](https://github.com/python-poetry/poetry-core/pull/405)). - Deprecated `Dependency.set_constraint()` and replaced it by a `constraint` property for consistency ([#370](https://github.com/python-poetry/poetry-core/pull/370)). - Removed `Package.requires_extras` ([#374](https://github.com/python-poetry/poetry-core/pull/374)). - Improved marker handling ([#380](https://github.com/python-poetry/poetry-core/pull/380), [#383](https://github.com/python-poetry/poetry-core/pull/383), [#384](https://github.com/python-poetry/poetry-core/pull/384), [#390](https://github.com/python-poetry/poetry-core/pull/390), [#395](https://github.com/python-poetry/poetry-core/pull/395)). ### Fixed - Fixed hash method for `PackageSpecification`, `Package`, `Dependency` and their sub classes ([#370](https://github.com/python-poetry/poetry-core/pull/370)). - Fixed merging of markers `python_version` and `python_full_version` ([#382](https://github.com/python-poetry/poetry-core/pull/382), [#388](https://github.com/python-poetry/poetry-core/pull/388)). - Fixed python version normalization ([#385](https://github.com/python-poetry/poetry-core/pull/385), [#407](https://github.com/python-poetry/poetry-core/pull/407)). - Fixed an issue where version identifiers with a local version segment allowed non local versions ([#396](https://github.com/python-poetry/poetry-core/pull/396)). - Fixed an issue where version identifiers without a post release segment allowed post releases ([#396](https://github.com/python-poetry/poetry-core/pull/396)). - Fixed script definitions that didn't work when extras were not explicitly defined ([#404](https://github.com/python-poetry/poetry-core/pull/404)). ## [1.1.0b2] - 2022-05-24 ### Fixed - Fixed a regression where `poetry-core` no longer handled improper Python version constraints from package metadata ([#371](https://github.com/python-poetry/poetry-core/pull/371)) - Fixed missing version bump in `poetry.core.__version__` ([#367](https://github.com/python-poetry/poetry-core/pull/367)) ### Improvements - `poetry-core` generated wheel's now correctly identify `Generator` metadata as `poetry-core` instead of `poetry` ([#367](https://github.com/python-poetry/poetry-core/pull/367)) ## [1.1.0b1] - 2022-05-23 ### Fixed - Fixed an issue where canonicalize package names leads to infinite loops ([#328](https://github.com/python-poetry/poetry-core/pull/328)). - Fixed an issue where versions wasn't correct normalized to PEP-440 ([#344](https://github.com/python-poetry/poetry-core/pull/344)). - Fixed an issue with union of multi markers if one marker is a subset of the other marker ([#352](https://github.com/python-poetry/poetry-core/pull/352)). - Fixed an issue with markers which are not in disjunctive normal form (DNF) ([#347](https://github.com/python-poetry/poetry-core/pull/347)). - Fixed an issue where stub-only partial namespace packages were not recognized as packages ([#221](https://github.com/python-poetry/poetry-core/pull/221)). - Fixed an issue where PEP-508 url requirements with extras were not parsed correctly ([#345](https://github.com/python-poetry/poetry-core/pull/345)). - Fixed an issue where PEP-508 strings with wildcard exclusion constraints were incorrectly exported ([#343](https://github.com/python-poetry/poetry-core/pull/343)). - Allow hidden directories on Windows bare repos ([#341](https://github.com/python-poetry/poetry-core/pull/341)). - Fixed an issue where dependencies with an epoch are parsed as empty ([#316](https://github.com/python-poetry/poetry-core/pull/316)). - Fixed an issue where a package consisting of multiple packages wasn't build correctly ([#292](https://github.com/python-poetry/poetry-core/pull/292)). ### Added - Added support for handling git urls with subdirectory ([#288](https://github.com/python-poetry/poetry-core/pull/288)). - Added support for metadata files as described in PEP-658 for PEP-503 "simple" API repositories ([#333](https://github.com/python-poetry/poetry-core/pull/333)). ### Changed - Renamed dependency group of runtime dependencies to from `default` to `main` ([#326](https://github.com/python-poetry/poetry-core/pull/326)). ### Improvements - `poetry-core` is now completely type checked. - Improved the SemVer constraint parsing ([#327](https://github.com/python-poetry/poetry-core/pull/327)). - Improved the speed when cloning git repositories ([#290](https://github.com/python-poetry/poetry-core/pull/290)). ## [1.1.0a7] - 2022-03-05 ### Fixed - Fixed an issue when evaluate `in/not in` markers ([#188](https://github.com/python-poetry/poetry-core/pull/188)). - Fixed an issue when parsing of caret constraint with leading zero ([#201](https://github.com/python-poetry/poetry-core/pull/201)). - Respect format for explicit included files when finding excluded files ([#228](https://github.com/python-poetry/poetry-core/pull/228)). - Fixed an issue where only the last location was used when multiple packages should be included ([#108](https://github.com/python-poetry/poetry-core/pull/108)). - Ensure that package `description` contains no new line ([#219](https://github.com/python-poetry/poetry-core/pull/219)). - Fixed an issue where all default dependencies were removed instead of just the selected one ([#220](https://github.com/python-poetry/poetry-core/pull/220)). - Ensure that authors and maintainers are normalized ([#276](https://github.com/python-poetry/poetry-core/pull/276)). ### Added - Add support for most of the guaranteed hashes ([#207](https://github.com/python-poetry/poetry-core/pull/207)). - Add support to declare multiple README files ([#248](https://github.com/python-poetry/poetry-core/pull/248)). - Add support for git sub directories ([#192](https://github.com/python-poetry/poetry-core/pull/192)). - Add hooks according to PEP-660 for editable installs ([#182](https://github.com/python-poetry/poetry-core/pull/182)). - Add support for version epochs ([#264](https://github.com/python-poetry/poetry-core/pull/264)). ### Changed - Drop python3.6 support ([#263](https://github.com/python-poetry/poetry-core/pull/263)). - Loose the strictness when parsing version constraint to support invalid use of wildcards, e.g. `>=3.*` ([#186](https://github.com/python-poetry/poetry-core/pull/186)). - No longer assume a default git branch name ([#192](https://github.com/python-poetry/poetry-core/pull/192)). - Sort package name in extras to make it reproducible ([#280](https://github.com/python-poetry/poetry-core/pull/280)). ### Improvements - Improve marker handling ([#208](https://github.com/python-poetry/poetry-core/pull/208), [#282](https://github.com/python-poetry/poetry-core/pull/282), [#283](https://github.com/python-poetry/poetry-core/pull/283), [#284](https://github.com/python-poetry/poetry-core/pull/284), [#286](https://github.com/python-poetry/poetry-core/pull/286), [#291](https://github.com/python-poetry/poetry-core/pull/291), [#293](https://github.com/python-poetry/poetry-core/pull/293), [#294](https://github.com/python-poetry/poetry-core/pull/294), [#297](https://github.com/python-poetry/poetry-core/pull/297)). ## [1.1.0a6] - 2021-07-30 ### Added - Added support for dependency groups. ([#183](https://github.com/python-poetry/poetry-core/pull/183)) ## [1.1.0a5] - 2021-05-21 ### Added - Added support for script files in addition to standard entry points. ([#40](https://github.com/python-poetry/poetry-core/pull/40)) ### Fixed - Fixed an error in the way python markers with a precision >= 3 were handled. ([#178](https://github.com/python-poetry/poetry-core/pull/178)) ## [1.1.0a4] - 2021-04-30 ### Changed - Files in source distributions now have a deterministic time to improve reproducibility. ([#142](https://github.com/python-poetry/poetry-core/pull/142)) ### Fixed - Fixed an error where leading zeros in the local build part of version specifications were discarded. ([#167](https://github.com/python-poetry/poetry-core/pull/167)) - Fixed the PEP 508 representation of file dependencies. ([#153](https://github.com/python-poetry/poetry-core/pull/153)) - Fixed the copy of `Package` instances which led to file hashes not being available. ([#159](https://github.com/python-poetry/poetry-core/pull/159)) - Fixed an error in the parsing of caret requirements with a pre-release lower bound. ([#171](https://github.com/python-poetry/poetry-core/pull/171)) - Fixed an error where some pre-release versions were not flagged as pre-releases. ([#170](https://github.com/python-poetry/poetry-core/pull/170)) ## [1.1.0a3] - 2021-04-09 ### Fixed - Fixed dependency markers not being properly copied when changing the constraint ([#162](https://github.com/python-poetry/poetry-core/pull/162)). ## [1.1.0a2] - 2021-04-08 ### Fixed - Fixed performance regressions when parsing version constraints ([#152](https://github.com/python-poetry/poetry-core/pull/152)). - Fixed how local build versions are handled and compared ([#157](https://github.com/python-poetry/poetry-core/pull/157), [#158](https://github.com/python-poetry/poetry-core/pull/158)). - Fixed errors when parsing some environment markers ([#155](https://github.com/python-poetry/poetry-core/pull/155)). ## [1.1.0a1] - 2021-03-30 This version is the first to drop support for Python 2.7 and 3.5. If you are still using these versions you should update the `requires` property of the `build-system` section to restrict the version of `poetry-core`: ```toml [build-system] requires = ["poetry-core<1.1.0"] build-backend = "poetry.core.masonry.api" ``` ### Changed - Dropped support for Python 2.7 and 3.5 ([#131](https://github.com/python-poetry/poetry-core/pull/131)). - Reorganized imports internally to improve performances ([#131](https://github.com/python-poetry/poetry-core/pull/131)). - Directory dependencies are now in non-develop mode by default ([#98](https://github.com/python-poetry/poetry-core/pull/98)). - Improved support for PEP 440 specific versions that do not abide by semantic versioning ([#140](https://github.com/python-poetry/poetry-core/pull/140)). ### Fixed - Fixed path dependencies PEP 508 representation ([#141](https://github.com/python-poetry/poetry-core/pull/141)). ## [1.0.2] - 2021-02-05 ### Fixed - Fixed a missing import causing an error in Poetry ([#134](https://github.com/python-poetry/poetry-core/pull/134)). ## [1.0.1] - 2021-02-05 ### Fixed - Fixed PEP 508 representation of dependency without extras ([#102](https://github.com/python-poetry/poetry-core/pull/102)). - Fixed an error where development dependencies were being resolved when invoking the PEP-517 backend ([#101](https://github.com/python-poetry/poetry-core/pull/101)). - Fixed source distribution not being deterministic ([#105](https://github.com/python-poetry/poetry-core/pull/105)). - Fixed an error where zip files were left open when building wheels ([#122](https://github.com/python-poetry/poetry-core/pull/122)). - Fixed an error where explicitly included files were still not present in final distributions ([#124](https://github.com/python-poetry/poetry-core/pull/124)). - Fixed wheel filename matching for recent architecture ([#125](https://github.com/python-poetry/poetry-core/pull/125), [#129](https://github.com/python-poetry/poetry-core/pull/129)). - Fixed an error where the `&` character was not accepted for author names ([#120](https://github.com/python-poetry/poetry-core/pull/120)). - Fixed the PEP-508 representation of some dependencies ([#103](https://github.com/python-poetry/poetry-core/pull/103)). - Fixed the `Requires-Python` metadata generation ([#127](https://github.com/python-poetry/poetry-core/pull/127)). - Fixed an error where pre-release versions were accepted in version constraints ([#128](https://github.com/python-poetry/poetry-core/pull/128)). ## [1.0.0] - 2020-09-30 No changes. ## [1.0.0rc3] - 2020-09-30 ### Changed - Removed `intreehooks` build backend in favor of the `backend-path` mechanism ([#90](https://github.com/python-poetry/poetry-core/pull/90)). - Directory dependencies will now always use a posix path for their representation ([#90](https://github.com/python-poetry/poetry-core/pull/91)). - Dependency constraints can now be set directly via a proper setter ([#90](https://github.com/python-poetry/poetry-core/pull/90)). ## [1.0.0rc2] - 2020-09-25 ### Fixed - Fixed `python_full_version` markers conversion to version constraints ([#86](https://github.com/python-poetry/core/pull/86)). ## [1.0.0rc1] - 2020-09-25 ### Fixed - Fixed Python constraint propagation when converting a package to a dependency ([#84](https://github.com/python-poetry/core/pull/84)). - Fixed VCS ignored files being included in wheel distributions for projects using the `src` layout ([#81](https://github.com/python-poetry/core/pull/81)) ## [1.0.0b1] - 2020-09-18 ### Added - Added support for build executable for wheels ([#72](https://github.com/python-poetry/core/pull/72)). ### Changed - Improved packages with sources equality comparison ([#53](https://github.com/python-poetry/core/pull/53)). - Improved licenses handling and packaging in builders ([#57](https://github.com/python-poetry/core/pull/57)). - Refactored packages and dependencies classes to improve comparison between bare packages and packages with extras ([#78](https://github.com/python-poetry/core/pull/78)). ### Fixed - Fixed PEP-508 representation of URL dependencies ([#60](https://github.com/python-poetry/core/pull/60)). - Fixed generated `RECORD` files in some cases by ensuring it's a valid CSV file ([#61](https://github.com/python-poetry/core/pull/61)). - Fixed an error when parsing some version constraints if they contained wildcard elements ([#56](https://github.com/python-poetry/core/pull/56)). - Fixed errors when using the `exclude` property ([#62](https://github.com/python-poetry/core/pull/62)). - Fixed the way git revisions are retrieved ([#69](https://github.com/python-poetry/core/pull/69)). - Fixed dependency constraint PEP-508 compatibility when generating metadata ([#79](https://github.com/python-poetry/core/pull/79)). - Fixed potential errors on Python 3.5 when building with the `include` property set ([#75](https://github.com/python-poetry/core/pull/75)). ## [1.0.0a9] - 2020-07-24 ### Added - Added support for build scripts without `setup.py` generation ([#45](https://github.com/python-poetry/core/pull/45)). ### Changed - Improved the parsing of requirements and environment markers ([#44](https://github.com/python-poetry/core/pull/44)). ### Fixed - Fixed the default value used for the `build.generate-setup-file` settings ([#43](https://github.com/python-poetry/core/pull/43)). - Fixed error messages when the authors specified in the pyproject.toml file are invalid ([#49](https://github.com/python-poetry/core/pull/49)). - Fixed distributions build when using the PEP-517 backend for packages with includes ([#47](https://github.com/python-poetry/core/pull/47)). ## [1.0.0a8] - 2020-06-26 ### Fixed - Fixed errors in the way Python environment markers were parsed and generated ([#36](https://github.com/python-poetry/core/pull/36)). ## [1.0.0a7] - 2020-05-06 ### Added - Added support for format-specific includes via the `include` property ([#6](https://github.com/python-poetry/core/pull/6)). ### Changed - Allow url dependencies in multiple constraints dependencies ([#32](https://github.com/python-poetry/core/pull/32)). ### Fixed - Fixed PEP 508 representation and parsing of VCS dependencies ([#30](https://github.com/python-poetry/core/pull/30)). ## [1.0.0a6] - 2020-04-24 ### Added - Added support for markers inverse ([#21](https://github.com/python-poetry/core/pull/21)). - Added support for specifying that `git` dependencies should be installed in develop mode ([#23](https://github.com/python-poetry/core/pull/23)). - Added the ability to specify build settings from the Poetry main configuration file ([#26](https://github.com/python-poetry/core/pull/26)). - Added the ability to disable the generation of the `setup.py` file when building ([#26](https://github.com/python-poetry/core/pull/26)). ### Changed - Relaxed licence restrictions to support custom licences ([#5](https://github.com/python-poetry/core/pull/5)). - Improved support for PEP-440 direct references ([#22](https://github.com/python-poetry/core/pull/22)). - Improved dependency vendoring ([#25](https://github.com/python-poetry/core/pull/25)). ### Fixed - Fixed the inability to make the url dependencies optional ([#13](https://github.com/python-poetry/core/pull/13)). - Fixed whitespaces in PEP-440 constraints causing an error ([#16](https://github.com/python-poetry/core/pull/16)). - Fixed subpackage check when generating the `setup.py` file ([#17](https://github.com/python-poetry/core/pull/17)). - Fix PEP-517 issues for projects using build scripts ([#12](https://github.com/python-poetry/core/pull/12)). - Fixed support for stub-only packages ([#28](https://github.com/python-poetry/core/pull/28)). [Unreleased]: https://github.com/python-poetry/poetry-core/compare/2.1.1...main [2.1.1]: https://github.com/python-poetry/poetry-core/releases/tag/2.1.1 [2.1.0]: https://github.com/python-poetry/poetry-core/releases/tag/2.1.0 [2.0.1]: https://github.com/python-poetry/poetry-core/releases/tag/2.0.1 [2.0.0]: https://github.com/python-poetry/poetry-core/releases/tag/2.0.0 [1.9.1]: https://github.com/python-poetry/poetry-core/releases/tag/1.9.1 [1.9.0]: https://github.com/python-poetry/poetry-core/releases/tag/1.9.0 [1.8.1]: https://github.com/python-poetry/poetry-core/releases/tag/1.8.1 [1.8.0]: https://github.com/python-poetry/poetry-core/releases/tag/1.8.0 [1.7.0]: https://github.com/python-poetry/poetry-core/releases/tag/1.7.0 [1.6.1]: https://github.com/python-poetry/poetry-core/releases/tag/1.6.1 [1.6.0]: https://github.com/python-poetry/poetry-core/releases/tag/1.6.0 [1.5.2]: https://github.com/python-poetry/poetry-core/releases/tag/1.5.2 [1.5.1]: https://github.com/python-poetry/poetry-core/releases/tag/1.5.1 [1.5.0]: https://github.com/python-poetry/poetry-core/releases/tag/1.5.0 [1.4.0]: https://github.com/python-poetry/poetry-core/releases/tag/1.4.0 [1.3.2]: https://github.com/python-poetry/poetry-core/releases/tag/1.3.2 [1.3.1]: https://github.com/python-poetry/poetry-core/releases/tag/1.3.1 [1.3.0]: https://github.com/python-poetry/poetry-core/releases/tag/1.3.0 [1.2.0]: https://github.com/python-poetry/poetry-core/releases/tag/1.2.0 [1.1.0]: https://github.com/python-poetry/poetry-core/releases/tag/1.1.0 [1.1.0rc3]: https://github.com/python-poetry/poetry-core/releases/tag/1.1.0rc3 [1.1.0rc2]: https://github.com/python-poetry/poetry-core/releases/tag/1.1.0rc2 [1.1.0rc1]: https://github.com/python-poetry/poetry-core/releases/tag/1.1.0rc1 [1.1.0b3]: https://github.com/python-poetry/poetry-core/releases/tag/1.1.0b3 [1.1.0b2]: https://github.com/python-poetry/poetry-core/releases/tag/1.1.0b2 [1.1.0b1]: https://github.com/python-poetry/poetry-core/releases/tag/1.1.0b1 [1.1.0a7]: https://github.com/python-poetry/poetry-core/releases/tag/1.1.0a7 [1.1.0a6]: https://github.com/python-poetry/poetry-core/releases/tag/1.1.0a6 [1.1.0a5]: https://github.com/python-poetry/poetry-core/releases/tag/1.1.0a5 [1.1.0a4]: https://github.com/python-poetry/poetry-core/releases/tag/1.1.0a4 [1.1.0a3]: https://github.com/python-poetry/poetry-core/releases/tag/1.1.0a3 [1.1.0a2]: https://github.com/python-poetry/poetry-core/releases/tag/1.1.0a2 [1.1.0a1]: https://github.com/python-poetry/poetry-core/releases/tag/1.1.0a1 [1.0.2]: https://github.com/python-poetry/poetry-core/releases/tag/1.0.2 [1.0.1]: https://github.com/python-poetry/poetry-core/releases/tag/1.0.1 [1.0.0]: https://github.com/python-poetry/poetry-core/releases/tag/1.0.0 [1.0.0rc3]: https://github.com/python-poetry/poetry-core/releases/tag/1.0.0rc3 [1.0.0rc2]: https://github.com/python-poetry/poetry-core/releases/tag/1.0.0rc2 [1.0.0rc1]: https://github.com/python-poetry/poetry-core/releases/tag/1.0.0rc1 [1.0.0b1]: https://github.com/python-poetry/poetry-core/releases/tag/1.0.0b1 [1.0.0a9]: https://github.com/python-poetry/poetry-core/releases/tag/1.0.0a9 [1.0.0a8]: https://github.com/python-poetry/poetry-core/releases/tag/1.0.0a8 [1.0.0a7]: https://github.com/python-poetry/poetry-core/releases/tag/1.0.0a7 [1.0.0a6]: https://github.com/python-poetry/poetry-core/releases/tag/1.0.0a6 poetry-core-2.1.1/LICENSE000066400000000000000000000020461475444614500150100ustar00rootroot00000000000000Copyright (c) 2020 Sébastien Eustace Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. poetry-core-2.1.1/Makefile000066400000000000000000000017141475444614500154440ustar00rootroot00000000000000SHELL := $(shell which bash) -e MAKEFILE_PATH := $(abspath $(lastword $(MAKEFILE_LIST))) ROOT_DIR := $(patsubst %/,%,$(dir $(MAKEFILE_PATH))) VENDOR_SRC := $(ROOT_DIR)/vendors VENDOR_DIR := $(ROOT_DIR)/src/poetry/core/_vendor VENDOR_TXT := $(VENDOR_DIR)/vendor.txt POETRY_BIN ?= $(shell which poetry) .PHONY: vendor/lock vendor/lock: $(VENDOR_LOCK) # regenerate lock file @pushd $(VENDOR_SRC) && $(POETRY_BIN) lock .PHONY: vendor/sync vendor/sync: # regenerate vendor.txt file (exported from lockfile) @pushd $(VENDOR_SRC) && $(POETRY_BIN) export --without-hashes 2> /dev/null \ | sort > $(VENDOR_TXT) # vendor packages @$(POETRY_BIN) run vendoring sync # strip out *.pyi stubs @find "$(VENDOR_DIR)" -type f -name "*.pyi" -exec rm {} \; .PHONY: vendor/update vendor/update: | vendor/lock vendor/sync @: poetry-core-2.1.1/README.md000066400000000000000000000045621475444614500152670ustar00rootroot00000000000000# Poetry Core [![Poetry](https://img.shields.io/endpoint?url=https://python-poetry.org/badge/v0.json)](https://python-poetry.org/) [![PyPI version](https://img.shields.io/pypi/v/poetry-core)](https://pypi.org/project/poetry-core/) [![Python Versions](https://img.shields.io/pypi/pyversions/poetry-core)](https://pypi.org/project/poetry-core/) [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT) [![](https://github.com/python-poetry/poetry-core/workflows/Tests/badge.svg)](https://github.com/python-poetry/poetry-core/actions?query=workflow%3ATests) A [PEP 517](https://www.python.org/dev/peps/pep-0517/) build backend implementation developed for [Poetry](https://github.com/python-poetry/poetry). This project is intended to be a lightweight, fully compliant, self-contained package allowing PEP 517-compatible build frontends to build Poetry-managed projects. ## Usage In most cases, the usage of this package is transparent to the end-user as it is either used by Poetry itself or a PEP 517 frontend (eg: `pip`). In order to enable the use of `poetry-core` as your build backend, the following snippet must be present in your project's `pyproject.toml` file. ```toml [build-system] requires = ["poetry-core"] build-backend = "poetry.core.masonry.api" ``` Once this is present, a PEP 517 frontend like `pip` can build and install your project from source without the need for Poetry or any of its dependencies (besides `poetry-core`). ```shell # install to current environment pip install /path/to/poetry/managed/project # build a wheel package pip wheel /path/to/poetry/managed/project ``` ## Why is this required? Prior to the release of version `1.1.0`, Poetry was a project management tool that included a PEP 517 build backend. This was inefficient and time consuming when a PEP 517 build was required. For example, both `pip` and `tox` (with isolated builds) would install Poetry and all dependencies it required. Most of these dependencies are not required when the objective is to simply build either a source or binary distribution of your project. In order to improve the above situation, `poetry-core` was created. Shared functionality pertaining to PEP 517 build backends, including reading `pyproject.toml` and building wheel/sdist, were implemented in this package. This makes PEP 517 builds extremely fast for Poetry-managed packages. poetry-core-2.1.1/poetry.lock000066400000000000000000002535541475444614500162130ustar00rootroot00000000000000# This file is automatically @generated by Poetry 2.0.1 and should not be changed by hand. [[package]] name = "attrs" version = "25.1.0" description = "Classes Without Boilerplate" optional = false python-versions = ">=3.8" groups = ["dev"] files = [ {file = "attrs-25.1.0-py3-none-any.whl", hash = "sha256:c75a69e28a550a7e93789579c22aa26b0f5b83b75dc4e08fe092980051e1090a"}, {file = "attrs-25.1.0.tar.gz", hash = "sha256:1c97078a80c814273a76b2a298a932eb681c87415c11dee0a6921de7f1b02c3e"}, ] [package.extras] benchmark = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-codspeed", "pytest-mypy-plugins", "pytest-xdist[psutil]"] cov = ["cloudpickle", "coverage[toml] (>=5.3)", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"] dev = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pre-commit-uv", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"] docs = ["cogapp", "furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib-towncrier", "towncrier (<24.7)"] tests = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"] tests-mypy = ["mypy (>=1.11.1)", "pytest-mypy-plugins"] [[package]] name = "build" version = "1.2.2.post1" description = "A simple, correct Python build frontend" optional = false python-versions = ">=3.8" groups = ["test"] files = [ {file = "build-1.2.2.post1-py3-none-any.whl", hash = "sha256:1d61c0887fa860c01971625baae8bdd338e517b836a2f70dd1f7aa3a6b2fc5b5"}, {file = "build-1.2.2.post1.tar.gz", hash = "sha256:b36993e92ca9375a219c99e606a122ff365a760a2d4bba0caa09bd5278b608b7"}, ] [package.dependencies] colorama = {version = "*", markers = "os_name == \"nt\""} importlib-metadata = {version = ">=4.6", markers = "python_full_version < \"3.10.2\""} packaging = ">=19.1" pyproject_hooks = "*" tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""} [package.extras] docs = ["furo (>=2023.08.17)", "sphinx (>=7.0,<8.0)", "sphinx-argparse-cli (>=1.5)", "sphinx-autodoc-typehints (>=1.10)", "sphinx-issues (>=3.0.0)"] test = ["build[uv,virtualenv]", "filelock (>=3)", "pytest (>=6.2.4)", "pytest-cov (>=2.12)", "pytest-mock (>=2)", "pytest-rerunfailures (>=9.1)", "pytest-xdist (>=1.34)", "setuptools (>=42.0.0)", "setuptools (>=56.0.0)", "setuptools (>=56.0.0)", "setuptools (>=67.8.0)", "wheel (>=0.36.0)"] typing = ["build[uv]", "importlib-metadata (>=5.1)", "mypy (>=1.9.0,<1.10.0)", "tomli", "typing-extensions (>=3.7.4.3)"] uv = ["uv (>=0.1.18)"] virtualenv = ["virtualenv (>=20.0.35)"] [[package]] name = "certifi" version = "2025.1.31" description = "Python package for providing Mozilla's CA Bundle." optional = false python-versions = ">=3.6" groups = ["dev"] files = [ {file = "certifi-2025.1.31-py3-none-any.whl", hash = "sha256:ca78db4565a652026a4db2bcdf68f2fb589ea80d0be70e03929ed730746b84fe"}, {file = "certifi-2025.1.31.tar.gz", hash = "sha256:3d5da6925056f6f18f119200434a4780a94263f10d1c21d032a6f6b2baa20651"}, ] [[package]] name = "cfgv" version = "3.4.0" description = "Validate configuration and produce human readable error messages." optional = false python-versions = ">=3.8" groups = ["dev"] files = [ {file = "cfgv-3.4.0-py2.py3-none-any.whl", hash = "sha256:b7265b1f29fd3316bfcd2b330d63d024f2bfd8bcb8b0272f8e19a504856c48f9"}, {file = "cfgv-3.4.0.tar.gz", hash = "sha256:e52591d4c5f5dead8e0f673fb16db7949d2cfb3f7da4582893288f0ded8fe560"}, ] [[package]] name = "charset-normalizer" version = "3.4.1" description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." optional = false python-versions = ">=3.7" groups = ["dev"] files = [ {file = "charset_normalizer-3.4.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:91b36a978b5ae0ee86c394f5a54d6ef44db1de0815eb43de826d41d21e4af3de"}, {file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7461baadb4dc00fd9e0acbe254e3d7d2112e7f92ced2adc96e54ef6501c5f176"}, {file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e218488cd232553829be0664c2292d3af2eeeb94b32bea483cf79ac6a694e037"}, {file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:80ed5e856eb7f30115aaf94e4a08114ccc8813e6ed1b5efa74f9f82e8509858f"}, {file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b010a7a4fd316c3c484d482922d13044979e78d1861f0e0650423144c616a46a"}, {file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4532bff1b8421fd0a320463030c7520f56a79c9024a4e88f01c537316019005a"}, {file = "charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:d973f03c0cb71c5ed99037b870f2be986c3c05e63622c017ea9816881d2dd247"}, {file = "charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:3a3bd0dcd373514dcec91c411ddb9632c0d7d92aed7093b8c3bbb6d69ca74408"}, {file = "charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:d9c3cdf5390dcd29aa8056d13e8e99526cda0305acc038b96b30352aff5ff2bb"}, {file = "charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:2bdfe3ac2e1bbe5b59a1a63721eb3b95fc9b6817ae4a46debbb4e11f6232428d"}, {file = "charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:eab677309cdb30d047996b36d34caeda1dc91149e4fdca0b1a039b3f79d9a807"}, {file = "charset_normalizer-3.4.1-cp310-cp310-win32.whl", hash = "sha256:c0429126cf75e16c4f0ad00ee0eae4242dc652290f940152ca8c75c3a4b6ee8f"}, {file = "charset_normalizer-3.4.1-cp310-cp310-win_amd64.whl", hash = "sha256:9f0b8b1c6d84c8034a44893aba5e767bf9c7a211e313a9605d9c617d7083829f"}, {file = "charset_normalizer-3.4.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:8bfa33f4f2672964266e940dd22a195989ba31669bd84629f05fab3ef4e2d125"}, {file = "charset_normalizer-3.4.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:28bf57629c75e810b6ae989f03c0828d64d6b26a5e205535585f96093e405ed1"}, {file = "charset_normalizer-3.4.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f08ff5e948271dc7e18a35641d2f11a4cd8dfd5634f55228b691e62b37125eb3"}, {file = "charset_normalizer-3.4.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:234ac59ea147c59ee4da87a0c0f098e9c8d169f4dc2a159ef720f1a61bbe27cd"}, {file = "charset_normalizer-3.4.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fd4ec41f914fa74ad1b8304bbc634b3de73d2a0889bd32076342a573e0779e00"}, {file = "charset_normalizer-3.4.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:eea6ee1db730b3483adf394ea72f808b6e18cf3cb6454b4d86e04fa8c4327a12"}, {file = "charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:c96836c97b1238e9c9e3fe90844c947d5afbf4f4c92762679acfe19927d81d77"}, {file = "charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:4d86f7aff21ee58f26dcf5ae81a9addbd914115cdebcbb2217e4f0ed8982e146"}, {file = "charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:09b5e6733cbd160dcc09589227187e242a30a49ca5cefa5a7edd3f9d19ed53fd"}, {file = "charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:5777ee0881f9499ed0f71cc82cf873d9a0ca8af166dfa0af8ec4e675b7df48e6"}, {file = "charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:237bdbe6159cff53b4f24f397d43c6336c6b0b42affbe857970cefbb620911c8"}, {file = "charset_normalizer-3.4.1-cp311-cp311-win32.whl", hash = "sha256:8417cb1f36cc0bc7eaba8ccb0e04d55f0ee52df06df3ad55259b9a323555fc8b"}, {file = "charset_normalizer-3.4.1-cp311-cp311-win_amd64.whl", hash = "sha256:d7f50a1f8c450f3925cb367d011448c39239bb3eb4117c36a6d354794de4ce76"}, {file = "charset_normalizer-3.4.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:73d94b58ec7fecbc7366247d3b0b10a21681004153238750bb67bd9012414545"}, {file = "charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dad3e487649f498dd991eeb901125411559b22e8d7ab25d3aeb1af367df5efd7"}, {file = "charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c30197aa96e8eed02200a83fba2657b4c3acd0f0aa4bdc9f6c1af8e8962e0757"}, {file = "charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2369eea1ee4a7610a860d88f268eb39b95cb588acd7235e02fd5a5601773d4fa"}, {file = "charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc2722592d8998c870fa4e290c2eec2c1569b87fe58618e67d38b4665dfa680d"}, {file = "charset_normalizer-3.4.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ffc9202a29ab3920fa812879e95a9e78b2465fd10be7fcbd042899695d75e616"}, {file = "charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:804a4d582ba6e5b747c625bf1255e6b1507465494a40a2130978bda7b932c90b"}, {file = "charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:0f55e69f030f7163dffe9fd0752b32f070566451afe180f99dbeeb81f511ad8d"}, {file = "charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:c4c3e6da02df6fa1410a7680bd3f63d4f710232d3139089536310d027950696a"}, {file = "charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:5df196eb874dae23dcfb968c83d4f8fdccb333330fe1fc278ac5ceeb101003a9"}, {file = "charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:e358e64305fe12299a08e08978f51fc21fac060dcfcddd95453eabe5b93ed0e1"}, {file = "charset_normalizer-3.4.1-cp312-cp312-win32.whl", hash = "sha256:9b23ca7ef998bc739bf6ffc077c2116917eabcc901f88da1b9856b210ef63f35"}, {file = "charset_normalizer-3.4.1-cp312-cp312-win_amd64.whl", hash = "sha256:6ff8a4a60c227ad87030d76e99cd1698345d4491638dfa6673027c48b3cd395f"}, {file = "charset_normalizer-3.4.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:aabfa34badd18f1da5ec1bc2715cadc8dca465868a4e73a0173466b688f29dda"}, {file = "charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:22e14b5d70560b8dd51ec22863f370d1e595ac3d024cb8ad7d308b4cd95f8313"}, {file = "charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8436c508b408b82d87dc5f62496973a1805cd46727c34440b0d29d8a2f50a6c9"}, {file = "charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2d074908e1aecee37a7635990b2c6d504cd4766c7bc9fc86d63f9c09af3fa11b"}, {file = "charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:955f8851919303c92343d2f66165294848d57e9bba6cf6e3625485a70a038d11"}, {file = "charset_normalizer-3.4.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:44ecbf16649486d4aebafeaa7ec4c9fed8b88101f4dd612dcaf65d5e815f837f"}, {file = "charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:0924e81d3d5e70f8126529951dac65c1010cdf117bb75eb02dd12339b57749dd"}, {file = "charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:2967f74ad52c3b98de4c3b32e1a44e32975e008a9cd2a8cc8966d6a5218c5cb2"}, {file = "charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:c75cb2a3e389853835e84a2d8fb2b81a10645b503eca9bcb98df6b5a43eb8886"}, {file = "charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:09b26ae6b1abf0d27570633b2b078a2a20419c99d66fb2823173d73f188ce601"}, {file = "charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:fa88b843d6e211393a37219e6a1c1df99d35e8fd90446f1118f4216e307e48cd"}, {file = "charset_normalizer-3.4.1-cp313-cp313-win32.whl", hash = "sha256:eb8178fe3dba6450a3e024e95ac49ed3400e506fd4e9e5c32d30adda88cbd407"}, {file = "charset_normalizer-3.4.1-cp313-cp313-win_amd64.whl", hash = "sha256:b1ac5992a838106edb89654e0aebfc24f5848ae2547d22c2c3f66454daa11971"}, {file = "charset_normalizer-3.4.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f30bf9fd9be89ecb2360c7d94a711f00c09b976258846efe40db3d05828e8089"}, {file = "charset_normalizer-3.4.1-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:97f68b8d6831127e4787ad15e6757232e14e12060bec17091b85eb1486b91d8d"}, {file = "charset_normalizer-3.4.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7974a0b5ecd505609e3b19742b60cee7aa2aa2fb3151bc917e6e2646d7667dcf"}, {file = "charset_normalizer-3.4.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fc54db6c8593ef7d4b2a331b58653356cf04f67c960f584edb7c3d8c97e8f39e"}, {file = "charset_normalizer-3.4.1-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:311f30128d7d333eebd7896965bfcfbd0065f1716ec92bd5638d7748eb6f936a"}, {file = "charset_normalizer-3.4.1-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:7d053096f67cd1241601111b698f5cad775f97ab25d81567d3f59219b5f1adbd"}, {file = "charset_normalizer-3.4.1-cp37-cp37m-musllinux_1_2_i686.whl", hash = "sha256:807f52c1f798eef6cf26beb819eeb8819b1622ddfeef9d0977a8502d4db6d534"}, {file = "charset_normalizer-3.4.1-cp37-cp37m-musllinux_1_2_ppc64le.whl", hash = "sha256:dccbe65bd2f7f7ec22c4ff99ed56faa1e9f785482b9bbd7c717e26fd723a1d1e"}, {file = "charset_normalizer-3.4.1-cp37-cp37m-musllinux_1_2_s390x.whl", hash = "sha256:2fb9bd477fdea8684f78791a6de97a953c51831ee2981f8e4f583ff3b9d9687e"}, {file = "charset_normalizer-3.4.1-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:01732659ba9b5b873fc117534143e4feefecf3b2078b0a6a2e925271bb6f4cfa"}, {file = "charset_normalizer-3.4.1-cp37-cp37m-win32.whl", hash = "sha256:7a4f97a081603d2050bfaffdefa5b02a9ec823f8348a572e39032caa8404a487"}, {file = "charset_normalizer-3.4.1-cp37-cp37m-win_amd64.whl", hash = "sha256:7b1bef6280950ee6c177b326508f86cad7ad4dff12454483b51d8b7d673a2c5d"}, {file = "charset_normalizer-3.4.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:ecddf25bee22fe4fe3737a399d0d177d72bc22be6913acfab364b40bce1ba83c"}, {file = "charset_normalizer-3.4.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8c60ca7339acd497a55b0ea5d506b2a2612afb2826560416f6894e8b5770d4a9"}, {file = "charset_normalizer-3.4.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b7b2d86dd06bfc2ade3312a83a5c364c7ec2e3498f8734282c6c3d4b07b346b8"}, {file = "charset_normalizer-3.4.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dd78cfcda14a1ef52584dbb008f7ac81c1328c0f58184bf9a84c49c605002da6"}, {file = "charset_normalizer-3.4.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6e27f48bcd0957c6d4cb9d6fa6b61d192d0b13d5ef563e5f2ae35feafc0d179c"}, {file = "charset_normalizer-3.4.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:01ad647cdd609225c5350561d084b42ddf732f4eeefe6e678765636791e78b9a"}, {file = "charset_normalizer-3.4.1-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:619a609aa74ae43d90ed2e89bdd784765de0a25ca761b93e196d938b8fd1dbbd"}, {file = "charset_normalizer-3.4.1-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:89149166622f4db9b4b6a449256291dc87a99ee53151c74cbd82a53c8c2f6ccd"}, {file = "charset_normalizer-3.4.1-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:7709f51f5f7c853f0fb938bcd3bc59cdfdc5203635ffd18bf354f6967ea0f824"}, {file = "charset_normalizer-3.4.1-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:345b0426edd4e18138d6528aed636de7a9ed169b4aaf9d61a8c19e39d26838ca"}, {file = "charset_normalizer-3.4.1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:0907f11d019260cdc3f94fbdb23ff9125f6b5d1039b76003b5b0ac9d6a6c9d5b"}, {file = "charset_normalizer-3.4.1-cp38-cp38-win32.whl", hash = "sha256:ea0d8d539afa5eb2728aa1932a988a9a7af94f18582ffae4bc10b3fbdad0626e"}, {file = "charset_normalizer-3.4.1-cp38-cp38-win_amd64.whl", hash = "sha256:329ce159e82018d646c7ac45b01a430369d526569ec08516081727a20e9e4af4"}, {file = "charset_normalizer-3.4.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:b97e690a2118911e39b4042088092771b4ae3fc3aa86518f84b8cf6888dbdb41"}, {file = "charset_normalizer-3.4.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:78baa6d91634dfb69ec52a463534bc0df05dbd546209b79a3880a34487f4b84f"}, {file = "charset_normalizer-3.4.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1a2bc9f351a75ef49d664206d51f8e5ede9da246602dc2d2726837620ea034b2"}, {file = "charset_normalizer-3.4.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:75832c08354f595c760a804588b9357d34ec00ba1c940c15e31e96d902093770"}, {file = "charset_normalizer-3.4.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0af291f4fe114be0280cdd29d533696a77b5b49cfde5467176ecab32353395c4"}, {file = "charset_normalizer-3.4.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0167ddc8ab6508fe81860a57dd472b2ef4060e8d378f0cc555707126830f2537"}, {file = "charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:2a75d49014d118e4198bcee5ee0a6f25856b29b12dbf7cd012791f8a6cc5c496"}, {file = "charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:363e2f92b0f0174b2f8238240a1a30142e3db7b957a5dd5689b0e75fb717cc78"}, {file = "charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:ab36c8eb7e454e34e60eb55ca5d241a5d18b2c6244f6827a30e451c42410b5f7"}, {file = "charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:4c0907b1928a36d5a998d72d64d8eaa7244989f7aaaf947500d3a800c83a3fd6"}, {file = "charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:04432ad9479fa40ec0f387795ddad4437a2b50417c69fa275e212933519ff294"}, {file = "charset_normalizer-3.4.1-cp39-cp39-win32.whl", hash = "sha256:3bed14e9c89dcb10e8f3a29f9ccac4955aebe93c71ae803af79265c9ca5644c5"}, {file = "charset_normalizer-3.4.1-cp39-cp39-win_amd64.whl", hash = "sha256:49402233c892a461407c512a19435d1ce275543138294f7ef013f0b63d5d3765"}, {file = "charset_normalizer-3.4.1-py3-none-any.whl", hash = "sha256:d98b1668f06378c6dbefec3b92299716b931cd4e6061f3c875a71ced1780ab85"}, {file = "charset_normalizer-3.4.1.tar.gz", hash = "sha256:44251f18cd68a75b56585dd00dae26183e102cd5e0f9f1466e6df5da2ed64ea3"}, ] [[package]] name = "click" version = "8.1.8" description = "Composable command line interface toolkit" optional = false python-versions = ">=3.7" groups = ["dev"] files = [ {file = "click-8.1.8-py3-none-any.whl", hash = "sha256:63c132bbbed01578a06712a2d1f497bb62d9c1c0d329b7903a866228027263b2"}, {file = "click-8.1.8.tar.gz", hash = "sha256:ed53c9d8990d83c2a27deae68e4ee337473f6330c040a31d4225c9574d16096a"}, ] [package.dependencies] colorama = {version = "*", markers = "platform_system == \"Windows\""} [[package]] name = "colorama" version = "0.4.6" description = "Cross-platform colored terminal text." optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" groups = ["dev", "test"] files = [ {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, ] markers = {dev = "platform_system == \"Windows\"", test = "sys_platform == \"win32\" or os_name == \"nt\""} [[package]] name = "coverage" version = "7.6.11" description = "Code coverage measurement for Python" optional = false python-versions = ">=3.9" groups = ["test"] files = [ {file = "coverage-7.6.11-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:eafea49da254a8289bed3fab960f808b322eda5577cb17a3733014928bbfbebd"}, {file = "coverage-7.6.11-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:5a3f7cbbcb4ad95067a6525f83a6fc78d9cbc1e70f8abaeeaeaa72ef34f48fc3"}, {file = "coverage-7.6.11-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:de6b079b39246a7da9a40cfa62d5766bd52b4b7a88cf5a82ec4c45bf6e152306"}, {file = "coverage-7.6.11-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:60d4ad09dfc8c36c4910685faafcb8044c84e4dae302e86c585b3e2e7778726c"}, {file = "coverage-7.6.11-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8e433b6e3a834a43dae2889adc125f3fa4c66668df420d8e49bc4ee817dd7a70"}, {file = "coverage-7.6.11-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:ac5d92e2cc121a13270697e4cb37e1eb4511ac01d23fe1b6c097facc3b46489e"}, {file = "coverage-7.6.11-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:5128f3ba694c0a1bde55fc480090392c336236c3e1a10dad40dc1ab17c7675ff"}, {file = "coverage-7.6.11-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:397489c611b76302dfa1d9ea079e138dddc4af80fc6819d5f5119ec8ca6c0e47"}, {file = "coverage-7.6.11-cp310-cp310-win32.whl", hash = "sha256:c7719a5e1dc93883a6b319bc0374ecd46fb6091ed659f3fbe281ab991634b9b0"}, {file = "coverage-7.6.11-cp310-cp310-win_amd64.whl", hash = "sha256:c27df03730059118b8a923cfc8b84b7e9976742560af528242f201880879c1da"}, {file = "coverage-7.6.11-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:532fe139691af134aa8b54ed60dd3c806aa81312d93693bd2883c7b61592c840"}, {file = "coverage-7.6.11-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e0b0f272901a5172090c0802053fbc503cdc3fa2612720d2669a98a7384a7bec"}, {file = "coverage-7.6.11-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4bda710139ea646890d1c000feb533caff86904a0e0638f85e967c28cb8eec50"}, {file = "coverage-7.6.11-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a165b09e7d5f685bf659063334a9a7b1a2d57b531753d3e04bd442b3cfe5845b"}, {file = "coverage-7.6.11-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:ff136607689c1c87f43d24203b6d2055b42030f352d5176f9c8b204d4235ef27"}, {file = "coverage-7.6.11-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:050172741de03525290e67f0161ae5f7f387c88fca50d47fceb4724ceaa591d2"}, {file = "coverage-7.6.11-cp311-cp311-win32.whl", hash = "sha256:27700d859be68e4fb2e7bf774cf49933dcac6f81a9bc4c13bd41735b8d26a53b"}, {file = "coverage-7.6.11-cp311-cp311-win_amd64.whl", hash = "sha256:cd4839813b09ab1dd1be1bbc74f9a7787615f931f83952b6a9af1b2d3f708bf7"}, {file = "coverage-7.6.11-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:dbb1a822fd858d9853333a7c95d4e70dde9a79e65893138ce32c2ec6457d7a36"}, {file = "coverage-7.6.11-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:61c834cbb80946d6ebfddd9b393a4c46bec92fcc0fa069321fcb8049117f76ea"}, {file = "coverage-7.6.11-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a46d56e99a31d858d6912d31ffa4ede6a325c86af13139539beefca10a1234ce"}, {file = "coverage-7.6.11-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5b48db06f53d1864fea6dbd855e6d51d41c0f06c212c3004511c0bdc6847b297"}, {file = "coverage-7.6.11-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:b6ff5be3b1853e0862da9d349fe87f869f68e63a25f7c37ce1130b321140f963"}, {file = "coverage-7.6.11-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:be05bde21d5e6eefbc3a6de6b9bee2b47894b8945342e8663192809c4d1f08ce"}, {file = "coverage-7.6.11-cp312-cp312-win32.whl", hash = "sha256:e3b746fa0ffc5b6b8856529de487da8b9aeb4fb394bb58de6502ef45f3434f12"}, {file = "coverage-7.6.11-cp312-cp312-win_amd64.whl", hash = "sha256:ac476e6d0128fb7919b3fae726de72b28b5c9644cb4b579e4a523d693187c551"}, {file = "coverage-7.6.11-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:c86f4c7a6d1a54a24d804d9684d96e36a62d3ef7c0d7745ae2ea39e3e0293251"}, {file = "coverage-7.6.11-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:7eb0504bb307401fd08bc5163a351df301438b3beb88a4fa044681295bbefc67"}, {file = "coverage-7.6.11-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ca95d40900cf614e07f00cee8c2fad0371df03ca4d7a80161d84be2ec132b7a4"}, {file = "coverage-7.6.11-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:db4b1a69976b1b02acda15937538a1d3fe10b185f9d99920b17a740a0a102e06"}, {file = "coverage-7.6.11-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4cf96beb05d004e4c51cd846fcdf9eee9eb2681518524b66b2e7610507944c2f"}, {file = "coverage-7.6.11-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:08e5fb93576a6b054d3d326242af5ef93daaac9bb52bc25f12ccbc3fa94227cd"}, {file = "coverage-7.6.11-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:25575cd5a7d2acc46b42711e8aff826027c0e4f80fb38028a74f31ac22aae69d"}, {file = "coverage-7.6.11-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:8fa4fffd90ee92f62ff7404b4801b59e8ea8502e19c9bf2d3241ce745b52926c"}, {file = "coverage-7.6.11-cp313-cp313-win32.whl", hash = "sha256:0d03c9452d9d1ccfe5d3a5df0427705022a49b356ac212d529762eaea5ef97b4"}, {file = "coverage-7.6.11-cp313-cp313-win_amd64.whl", hash = "sha256:fd2fffc8ce8692ce540103dff26279d2af22d424516ddebe2d7e4d6dbb3816b2"}, {file = "coverage-7.6.11-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:5e7ac966ab110bd94ee844f2643f196d78fde1cd2450399116d3efdd706e19f5"}, {file = "coverage-7.6.11-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:6ba27a0375c5ef4d2a7712f829265102decd5ff78b96d342ac2fa555742c4f4f"}, {file = "coverage-7.6.11-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e2778be4f574b39ec9dcd9e5e13644f770351ee0990a0ecd27e364aba95af89b"}, {file = "coverage-7.6.11-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5edc16712187139ab635a2e644cc41fc239bc6d245b16124045743130455c652"}, {file = "coverage-7.6.11-cp313-cp313t-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:df6ff122a0a10a30121d9f0cb3fbd03a6fe05861e4ec47adb9f25e9245aabc19"}, {file = "coverage-7.6.11-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:ff562952f15eff27247a4c4b03e45ce8a82e3fb197de6a7c54080f9d4ba07845"}, {file = "coverage-7.6.11-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:4f21e3617f48d683f30cf2a6c8b739c838e600cb1454fe6b2eb486ac2bce8fbd"}, {file = "coverage-7.6.11-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:6d60577673ba48d8ae8e362e61fd4ad1a640293ffe8991d11c86f195479100b7"}, {file = "coverage-7.6.11-cp313-cp313t-win32.whl", hash = "sha256:13100f98497086b359bf56fc035a762c674de8ef526daa389ac8932cb9bff1e0"}, {file = "coverage-7.6.11-cp313-cp313t-win_amd64.whl", hash = "sha256:2c81e53782043b323bd34c7de711ed9b4673414eb517eaf35af92185b873839c"}, {file = "coverage-7.6.11-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:ff52b4e2ac0080c96e506819586c4b16cdbf46724bda90d308a7330a73cc8521"}, {file = "coverage-7.6.11-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f4679fcc9eb9004fdd1b00231ef1ec7167168071bebc4d66327e28c1979b4449"}, {file = "coverage-7.6.11-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:90de4e9ca4489e823138bd13098af9ac8028cc029f33f60098b5c08c675c7bda"}, {file = "coverage-7.6.11-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c96a142057d83ee993eaf71629ca3fb952cda8afa9a70af4132950c2bd3deb9"}, {file = "coverage-7.6.11-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:476f29a258b9cd153f2be5bf5f119d670d2806363595263917bddc167d6e5cce"}, {file = "coverage-7.6.11-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:09d03f48d9025b8a6a116cddcb6c7b8ce80e4fb4c31dd2e124a7c377036ad58e"}, {file = "coverage-7.6.11-cp39-cp39-win32.whl", hash = "sha256:bb35ae9f134fbd9cf7302a9654d5a1e597c974202678082dcc569eb39a8cde03"}, {file = "coverage-7.6.11-cp39-cp39-win_amd64.whl", hash = "sha256:f382004fa4c93c01016d9226b9d696a08c53f6818b7ad59b4e96cb67e863353a"}, {file = "coverage-7.6.11-pp39.pp310-none-any.whl", hash = "sha256:adc2d941c0381edfcf3897f94b9f41b1e504902fab78a04b1677f2f72afead4b"}, {file = "coverage-7.6.11-py3-none-any.whl", hash = "sha256:f0f334ae844675420164175bf32b04e18a81fe57ad8eb7e0cfd4689d681ffed7"}, {file = "coverage-7.6.11.tar.gz", hash = "sha256:e642e6a46a04e992ebfdabed79e46f478ec60e2c528e1e1a074d63800eda4286"}, ] [package.dependencies] tomli = {version = "*", optional = true, markers = "python_full_version <= \"3.11.0a6\" and extra == \"toml\""} [package.extras] toml = ["tomli"] [[package]] name = "distlib" version = "0.3.9" description = "Distribution utilities" optional = false python-versions = "*" groups = ["dev", "test"] files = [ {file = "distlib-0.3.9-py2.py3-none-any.whl", hash = "sha256:47f8c22fd27c27e25a65601af709b38e4f0a45ea4fc2e710f65755fa8caaaf87"}, {file = "distlib-0.3.9.tar.gz", hash = "sha256:a60f20dea646b8a33f3e7772f74dc0b2d0772d2837ee1342a00645c81edf9403"}, ] [[package]] name = "exceptiongroup" version = "1.2.2" description = "Backport of PEP 654 (exception groups)" optional = false python-versions = ">=3.7" groups = ["test"] markers = "python_version < \"3.11\"" files = [ {file = "exceptiongroup-1.2.2-py3-none-any.whl", hash = "sha256:3111b9d131c238bec2f8f516e123e14ba243563fb135d3fe885990585aa7795b"}, {file = "exceptiongroup-1.2.2.tar.gz", hash = "sha256:47c2edf7c6738fafb49fd34290706d1a1a2f4d1c6df275526b62cbb4aa5393cc"}, ] [package.extras] test = ["pytest (>=6)"] [[package]] name = "filelock" version = "3.17.0" description = "A platform independent file lock." optional = false python-versions = ">=3.9" groups = ["dev", "test"] files = [ {file = "filelock-3.17.0-py3-none-any.whl", hash = "sha256:533dc2f7ba78dc2f0f531fc6c4940addf7b70a481e269a5a3b93be94ffbe8338"}, {file = "filelock-3.17.0.tar.gz", hash = "sha256:ee4e77401ef576ebb38cd7f13b9b28893194acc20a8e68e18730ba9c0e54660e"}, ] [package.extras] docs = ["furo (>=2024.8.6)", "sphinx (>=8.1.3)", "sphinx-autodoc-typehints (>=3)"] testing = ["covdefaults (>=2.3)", "coverage (>=7.6.10)", "diff-cover (>=9.2.1)", "pytest (>=8.3.4)", "pytest-asyncio (>=0.25.2)", "pytest-cov (>=6)", "pytest-mock (>=3.14)", "pytest-timeout (>=2.3.1)", "virtualenv (>=20.28.1)"] typing = ["typing-extensions (>=4.12.2)"] [[package]] name = "identify" version = "2.6.7" description = "File identification library for Python" optional = false python-versions = ">=3.9" groups = ["dev"] files = [ {file = "identify-2.6.7-py2.py3-none-any.whl", hash = "sha256:155931cb617a401807b09ecec6635d6c692d180090a1cedca8ef7d58ba5b6aa0"}, {file = "identify-2.6.7.tar.gz", hash = "sha256:3fa266b42eba321ee0b2bb0936a6a6b9e36a1351cbb69055b3082f4193035684"}, ] [package.extras] license = ["ukkonen"] [[package]] name = "idna" version = "3.10" description = "Internationalized Domain Names in Applications (IDNA)" optional = false python-versions = ">=3.6" groups = ["dev"] files = [ {file = "idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3"}, {file = "idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9"}, ] [package.extras] all = ["flake8 (>=7.1.1)", "mypy (>=1.11.2)", "pytest (>=8.3.2)", "ruff (>=0.6.2)"] [[package]] name = "importlib-metadata" version = "8.6.1" description = "Read metadata from Python packages" optional = false python-versions = ">=3.9" groups = ["test"] markers = "python_full_version < \"3.10.2\"" files = [ {file = "importlib_metadata-8.6.1-py3-none-any.whl", hash = "sha256:02a89390c1e15fdfdc0d7c6b25cb3e62650d0494005c97d6f148bf5b9787525e"}, {file = "importlib_metadata-8.6.1.tar.gz", hash = "sha256:310b41d755445d74569f993ccfc22838295d9fe005425094fad953d7f15c8580"}, ] [package.dependencies] zipp = ">=3.20" [package.extras] check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1)"] cover = ["pytest-cov"] doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] enabler = ["pytest-enabler (>=2.2)"] perf = ["ipython"] test = ["flufl.flake8", "importlib_resources (>=1.3)", "jaraco.test (>=5.4)", "packaging", "pyfakefs", "pytest (>=6,!=8.1.*)", "pytest-perf (>=0.9.2)"] type = ["pytest-mypy"] [[package]] name = "iniconfig" version = "2.0.0" description = "brain-dead simple config-ini parsing" optional = false python-versions = ">=3.7" groups = ["test"] files = [ {file = "iniconfig-2.0.0-py3-none-any.whl", hash = "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374"}, {file = "iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3"}, ] [[package]] name = "jsonschema" version = "4.23.0" description = "An implementation of JSON Schema validation for Python" optional = false python-versions = ">=3.8" groups = ["dev"] files = [ {file = "jsonschema-4.23.0-py3-none-any.whl", hash = "sha256:fbadb6f8b144a8f8cf9f0b89ba94501d143e50411a1278633f56a7acf7fd5566"}, {file = "jsonschema-4.23.0.tar.gz", hash = "sha256:d71497fef26351a33265337fa77ffeb82423f3ea21283cd9467bb03999266bc4"}, ] [package.dependencies] attrs = ">=22.2.0" jsonschema-specifications = ">=2023.03.6" referencing = ">=0.28.4" rpds-py = ">=0.7.1" [package.extras] format = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339-validator", "rfc3987", "uri-template", "webcolors (>=1.11)"] format-nongpl = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339-validator", "rfc3986-validator (>0.1.0)", "uri-template", "webcolors (>=24.6.0)"] [[package]] name = "jsonschema-specifications" version = "2024.10.1" description = "The JSON Schema meta-schemas and vocabularies, exposed as a Registry" optional = false python-versions = ">=3.9" groups = ["dev"] files = [ {file = "jsonschema_specifications-2024.10.1-py3-none-any.whl", hash = "sha256:a09a0680616357d9a0ecf05c12ad234479f549239d0f5b55f3deea67475da9bf"}, {file = "jsonschema_specifications-2024.10.1.tar.gz", hash = "sha256:0f38b83639958ce1152d02a7f062902c41c8fd20d558b0c34344292d417ae272"}, ] [package.dependencies] referencing = ">=0.31.0" [[package]] name = "markdown-it-py" version = "3.0.0" description = "Python port of markdown-it. Markdown parsing, done right!" optional = false python-versions = ">=3.8" groups = ["dev"] files = [ {file = "markdown-it-py-3.0.0.tar.gz", hash = "sha256:e3f60a94fa066dc52ec76661e37c851cb232d92f9886b15cb560aaada2df8feb"}, {file = "markdown_it_py-3.0.0-py3-none-any.whl", hash = "sha256:355216845c60bd96232cd8d8c40e8f9765cc86f46880e43a8fd22dc1a1a8cab1"}, ] [package.dependencies] mdurl = ">=0.1,<1.0" [package.extras] benchmarking = ["psutil", "pytest", "pytest-benchmark"] code-style = ["pre-commit (>=3.0,<4.0)"] compare = ["commonmark (>=0.9,<1.0)", "markdown (>=3.4,<4.0)", "mistletoe (>=1.0,<2.0)", "mistune (>=2.0,<3.0)", "panflute (>=2.3,<3.0)"] linkify = ["linkify-it-py (>=1,<3)"] plugins = ["mdit-py-plugins"] profiling = ["gprof2dot"] rtd = ["jupyter_sphinx", "mdit-py-plugins", "myst-parser", "pyyaml", "sphinx", "sphinx-copybutton", "sphinx-design", "sphinx_book_theme"] testing = ["coverage", "pytest", "pytest-cov", "pytest-regressions"] [[package]] name = "mdurl" version = "0.1.2" description = "Markdown URL utilities" optional = false python-versions = ">=3.7" groups = ["dev"] files = [ {file = "mdurl-0.1.2-py3-none-any.whl", hash = "sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8"}, {file = "mdurl-0.1.2.tar.gz", hash = "sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba"}, ] [[package]] name = "mypy" version = "1.15.0" description = "Optional static typing for Python" optional = false python-versions = ">=3.9" groups = ["typing"] files = [ {file = "mypy-1.15.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:979e4e1a006511dacf628e36fadfecbcc0160a8af6ca7dad2f5025529e082c13"}, {file = "mypy-1.15.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c4bb0e1bd29f7d34efcccd71cf733580191e9a264a2202b0239da95984c5b559"}, {file = "mypy-1.15.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:be68172e9fd9ad8fb876c6389f16d1c1b5f100ffa779f77b1fb2176fcc9ab95b"}, {file = "mypy-1.15.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c7be1e46525adfa0d97681432ee9fcd61a3964c2446795714699a998d193f1a3"}, {file = "mypy-1.15.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:2e2c2e6d3593f6451b18588848e66260ff62ccca522dd231cd4dd59b0160668b"}, {file = "mypy-1.15.0-cp310-cp310-win_amd64.whl", hash = "sha256:6983aae8b2f653e098edb77f893f7b6aca69f6cffb19b2cc7443f23cce5f4828"}, {file = "mypy-1.15.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:2922d42e16d6de288022e5ca321cd0618b238cfc5570e0263e5ba0a77dbef56f"}, {file = "mypy-1.15.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2ee2d57e01a7c35de00f4634ba1bbf015185b219e4dc5909e281016df43f5ee5"}, {file = "mypy-1.15.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:973500e0774b85d9689715feeffcc980193086551110fd678ebe1f4342fb7c5e"}, {file = "mypy-1.15.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:5a95fb17c13e29d2d5195869262f8125dfdb5c134dc8d9a9d0aecf7525b10c2c"}, {file = "mypy-1.15.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:1905f494bfd7d85a23a88c5d97840888a7bd516545fc5aaedff0267e0bb54e2f"}, {file = "mypy-1.15.0-cp311-cp311-win_amd64.whl", hash = "sha256:c9817fa23833ff189db061e6d2eff49b2f3b6ed9856b4a0a73046e41932d744f"}, {file = "mypy-1.15.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:aea39e0583d05124836ea645f412e88a5c7d0fd77a6d694b60d9b6b2d9f184fd"}, {file = "mypy-1.15.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:2f2147ab812b75e5b5499b01ade1f4a81489a147c01585cda36019102538615f"}, {file = "mypy-1.15.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ce436f4c6d218a070048ed6a44c0bbb10cd2cc5e272b29e7845f6a2f57ee4464"}, {file = "mypy-1.15.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8023ff13985661b50a5928fc7a5ca15f3d1affb41e5f0a9952cb68ef090b31ee"}, {file = "mypy-1.15.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:1124a18bc11a6a62887e3e137f37f53fbae476dc36c185d549d4f837a2a6a14e"}, {file = "mypy-1.15.0-cp312-cp312-win_amd64.whl", hash = "sha256:171a9ca9a40cd1843abeca0e405bc1940cd9b305eaeea2dda769ba096932bb22"}, {file = "mypy-1.15.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:93faf3fdb04768d44bf28693293f3904bbb555d076b781ad2530214ee53e3445"}, {file = "mypy-1.15.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:811aeccadfb730024c5d3e326b2fbe9249bb7413553f15499a4050f7c30e801d"}, {file = "mypy-1.15.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:98b7b9b9aedb65fe628c62a6dc57f6d5088ef2dfca37903a7d9ee374d03acca5"}, {file = "mypy-1.15.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c43a7682e24b4f576d93072216bf56eeff70d9140241f9edec0c104d0c515036"}, {file = "mypy-1.15.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:baefc32840a9f00babd83251560e0ae1573e2f9d1b067719479bfb0e987c6357"}, {file = "mypy-1.15.0-cp313-cp313-win_amd64.whl", hash = "sha256:b9378e2c00146c44793c98b8d5a61039a048e31f429fb0eb546d93f4b000bedf"}, {file = "mypy-1.15.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:e601a7fa172c2131bff456bb3ee08a88360760d0d2f8cbd7a75a65497e2df078"}, {file = "mypy-1.15.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:712e962a6357634fef20412699a3655c610110e01cdaa6180acec7fc9f8513ba"}, {file = "mypy-1.15.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f95579473af29ab73a10bada2f9722856792a36ec5af5399b653aa28360290a5"}, {file = "mypy-1.15.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8f8722560a14cde92fdb1e31597760dc35f9f5524cce17836c0d22841830fd5b"}, {file = "mypy-1.15.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:1fbb8da62dc352133d7d7ca90ed2fb0e9d42bb1a32724c287d3c76c58cbaa9c2"}, {file = "mypy-1.15.0-cp39-cp39-win_amd64.whl", hash = "sha256:d10d994b41fb3497719bbf866f227b3489048ea4bbbb5015357db306249f7980"}, {file = "mypy-1.15.0-py3-none-any.whl", hash = "sha256:5469affef548bd1895d86d3bf10ce2b44e33d86923c29e4d675b3e323437ea3e"}, {file = "mypy-1.15.0.tar.gz", hash = "sha256:404534629d51d3efea5c800ee7c42b72a6554d6c400e6a79eafe15d11341fd43"}, ] [package.dependencies] mypy_extensions = ">=1.0.0" tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""} typing_extensions = ">=4.6.0" [package.extras] dmypy = ["psutil (>=4.0)"] faster-cache = ["orjson"] install-types = ["pip"] mypyc = ["setuptools (>=50)"] reports = ["lxml"] [[package]] name = "mypy-extensions" version = "1.0.0" description = "Type system extensions for programs checked with the mypy type checker." optional = false python-versions = ">=3.5" groups = ["typing"] files = [ {file = "mypy_extensions-1.0.0-py3-none-any.whl", hash = "sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d"}, {file = "mypy_extensions-1.0.0.tar.gz", hash = "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782"}, ] [[package]] name = "nodeenv" version = "1.9.1" description = "Node.js virtual environment builder" optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" groups = ["dev"] files = [ {file = "nodeenv-1.9.1-py2.py3-none-any.whl", hash = "sha256:ba11c9782d29c27c70ffbdda2d7415098754709be8a7056d79a737cd901155c9"}, {file = "nodeenv-1.9.1.tar.gz", hash = "sha256:6ec12890a2dab7946721edbfbcd91f3319c6ccc9aec47be7c7e6b7011ee6645f"}, ] [[package]] name = "packaging" version = "24.2" description = "Core utilities for Python packages" optional = false python-versions = ">=3.8" groups = ["dev", "test"] files = [ {file = "packaging-24.2-py3-none-any.whl", hash = "sha256:09abb1bccd265c01f4a3aa3f7a7db064b36514d2cba19a2f694fe6150451a759"}, {file = "packaging-24.2.tar.gz", hash = "sha256:c228a6dc5e932d346bc5739379109d49e8853dd8223571c7c5b55260edc0b97f"}, ] [[package]] name = "platformdirs" version = "4.3.6" description = "A small Python package for determining appropriate platform-specific dirs, e.g. a `user data dir`." optional = false python-versions = ">=3.8" groups = ["dev", "test"] files = [ {file = "platformdirs-4.3.6-py3-none-any.whl", hash = "sha256:73e575e1408ab8103900836b97580d5307456908a03e92031bab39e4554cc3fb"}, {file = "platformdirs-4.3.6.tar.gz", hash = "sha256:357fb2acbc885b0419afd3ce3ed34564c13c9b95c89360cd9563f73aa5e2b907"}, ] [package.extras] docs = ["furo (>=2024.8.6)", "proselint (>=0.14)", "sphinx (>=8.0.2)", "sphinx-autodoc-typehints (>=2.4)"] test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=8.3.2)", "pytest-cov (>=5)", "pytest-mock (>=3.14)"] type = ["mypy (>=1.11.2)"] [[package]] name = "pluggy" version = "1.5.0" description = "plugin and hook calling mechanisms for python" optional = false python-versions = ">=3.8" groups = ["test"] files = [ {file = "pluggy-1.5.0-py3-none-any.whl", hash = "sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669"}, {file = "pluggy-1.5.0.tar.gz", hash = "sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1"}, ] [package.extras] dev = ["pre-commit", "tox"] testing = ["pytest", "pytest-benchmark"] [[package]] name = "pre-commit" version = "4.1.0" description = "A framework for managing and maintaining multi-language pre-commit hooks." optional = false python-versions = ">=3.9" groups = ["dev"] files = [ {file = "pre_commit-4.1.0-py2.py3-none-any.whl", hash = "sha256:d29e7cb346295bcc1cc75fc3e92e343495e3ea0196c9ec6ba53f49f10ab6ae7b"}, {file = "pre_commit-4.1.0.tar.gz", hash = "sha256:ae3f018575a588e30dfddfab9a05448bfbd6b73d78709617b5a2b853549716d4"}, ] [package.dependencies] cfgv = ">=2.0.0" identify = ">=1.0.0" nodeenv = ">=0.11.1" pyyaml = ">=5.1" virtualenv = ">=20.10.0" [[package]] name = "pygments" version = "2.19.1" description = "Pygments is a syntax highlighting package written in Python." optional = false python-versions = ">=3.8" groups = ["dev"] files = [ {file = "pygments-2.19.1-py3-none-any.whl", hash = "sha256:9ea1544ad55cecf4b8242fab6dd35a93bbce657034b0611ee383099054ab6d8c"}, {file = "pygments-2.19.1.tar.gz", hash = "sha256:61c16d2a8576dc0649d9f39e089b5f02bcd27fba10d8fb4dcc28173f7a45151f"}, ] [package.extras] windows-terminal = ["colorama (>=0.4.6)"] [[package]] name = "pyproject-hooks" version = "1.2.0" description = "Wrappers to call pyproject.toml-based build backend hooks." optional = false python-versions = ">=3.7" groups = ["test"] files = [ {file = "pyproject_hooks-1.2.0-py3-none-any.whl", hash = "sha256:9e5c6bfa8dcc30091c74b0cf803c81fdd29d94f01992a7707bc97babb1141913"}, {file = "pyproject_hooks-1.2.0.tar.gz", hash = "sha256:1e859bd5c40fae9448642dd871adf459e5e2084186e8d2c2a79a824c970da1f8"}, ] [[package]] name = "pytest" version = "8.3.4" description = "pytest: simple powerful testing with Python" optional = false python-versions = ">=3.8" groups = ["test"] files = [ {file = "pytest-8.3.4-py3-none-any.whl", hash = "sha256:50e16d954148559c9a74109af1eaf0c945ba2d8f30f0a3d3335edde19788b6f6"}, {file = "pytest-8.3.4.tar.gz", hash = "sha256:965370d062bce11e73868e0335abac31b4d3de0e82f4007408d242b4f8610761"}, ] [package.dependencies] colorama = {version = "*", markers = "sys_platform == \"win32\""} exceptiongroup = {version = ">=1.0.0rc8", markers = "python_version < \"3.11\""} iniconfig = "*" packaging = "*" pluggy = ">=1.5,<2" tomli = {version = ">=1", markers = "python_version < \"3.11\""} [package.extras] dev = ["argcomplete", "attrs (>=19.2)", "hypothesis (>=3.56)", "mock", "pygments (>=2.7.2)", "requests", "setuptools", "xmlschema"] [[package]] name = "pytest-cov" version = "6.0.0" description = "Pytest plugin for measuring coverage." optional = false python-versions = ">=3.9" groups = ["test"] files = [ {file = "pytest-cov-6.0.0.tar.gz", hash = "sha256:fde0b595ca248bb8e2d76f020b465f3b107c9632e6a1d1705f17834c89dcadc0"}, {file = "pytest_cov-6.0.0-py3-none-any.whl", hash = "sha256:eee6f1b9e61008bd34975a4d5bab25801eb31898b032dd55addc93e96fcaaa35"}, ] [package.dependencies] coverage = {version = ">=7.5", extras = ["toml"]} pytest = ">=4.6" [package.extras] testing = ["fields", "hunter", "process-tests", "pytest-xdist", "virtualenv"] [[package]] name = "pytest-mock" version = "3.14.0" description = "Thin-wrapper around the mock package for easier use with pytest" optional = false python-versions = ">=3.8" groups = ["test"] files = [ {file = "pytest-mock-3.14.0.tar.gz", hash = "sha256:2719255a1efeceadbc056d6bf3df3d1c5015530fb40cf347c0f9afac88410bd0"}, {file = "pytest_mock-3.14.0-py3-none-any.whl", hash = "sha256:0b72c38033392a5f4621342fe11e9219ac11ec9d375f8e2a0c164539e0d70f6f"}, ] [package.dependencies] pytest = ">=6.2.5" [package.extras] dev = ["pre-commit", "pytest-asyncio", "tox"] [[package]] name = "pyyaml" version = "6.0.2" description = "YAML parser and emitter for Python" optional = false python-versions = ">=3.8" groups = ["dev"] files = [ {file = "PyYAML-6.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0a9a2848a5b7feac301353437eb7d5957887edbf81d56e903999a75a3d743086"}, {file = "PyYAML-6.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:29717114e51c84ddfba879543fb232a6ed60086602313ca38cce623c1d62cfbf"}, {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8824b5a04a04a047e72eea5cec3bc266db09e35de6bdfe34c9436ac5ee27d237"}, {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7c36280e6fb8385e520936c3cb3b8042851904eba0e58d277dca80a5cfed590b"}, {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ec031d5d2feb36d1d1a24380e4db6d43695f3748343d99434e6f5f9156aaa2ed"}, {file = "PyYAML-6.0.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:936d68689298c36b53b29f23c6dbb74de12b4ac12ca6cfe0e047bedceea56180"}, {file = "PyYAML-6.0.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:23502f431948090f597378482b4812b0caae32c22213aecf3b55325e049a6c68"}, {file = "PyYAML-6.0.2-cp310-cp310-win32.whl", hash = "sha256:2e99c6826ffa974fe6e27cdb5ed0021786b03fc98e5ee3c5bfe1fd5015f42b99"}, {file = "PyYAML-6.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:a4d3091415f010369ae4ed1fc6b79def9416358877534caf6a0fdd2146c87a3e"}, {file = "PyYAML-6.0.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:cc1c1159b3d456576af7a3e4d1ba7e6924cb39de8f67111c735f6fc832082774"}, {file = "PyYAML-6.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1e2120ef853f59c7419231f3bf4e7021f1b936f6ebd222406c3b60212205d2ee"}, {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5d225db5a45f21e78dd9358e58a98702a0302f2659a3c6cd320564b75b86f47c"}, {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5ac9328ec4831237bec75defaf839f7d4564be1e6b25ac710bd1a96321cc8317"}, {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ad2a3decf9aaba3d29c8f537ac4b243e36bef957511b4766cb0057d32b0be85"}, {file = "PyYAML-6.0.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:ff3824dc5261f50c9b0dfb3be22b4567a6f938ccce4587b38952d85fd9e9afe4"}, {file = "PyYAML-6.0.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:797b4f722ffa07cc8d62053e4cff1486fa6dc094105d13fea7b1de7d8bf71c9e"}, {file = "PyYAML-6.0.2-cp311-cp311-win32.whl", hash = "sha256:11d8f3dd2b9c1207dcaf2ee0bbbfd5991f571186ec9cc78427ba5bd32afae4b5"}, {file = "PyYAML-6.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:e10ce637b18caea04431ce14fabcf5c64a1c61ec9c56b071a4b7ca131ca52d44"}, {file = "PyYAML-6.0.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:c70c95198c015b85feafc136515252a261a84561b7b1d51e3384e0655ddf25ab"}, {file = "PyYAML-6.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ce826d6ef20b1bc864f0a68340c8b3287705cae2f8b4b1d932177dcc76721725"}, {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f71ea527786de97d1a0cc0eacd1defc0985dcf6b3f17bb77dcfc8c34bec4dc5"}, {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9b22676e8097e9e22e36d6b7bda33190d0d400f345f23d4065d48f4ca7ae0425"}, {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:80bab7bfc629882493af4aa31a4cfa43a4c57c83813253626916b8c7ada83476"}, {file = "PyYAML-6.0.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:0833f8694549e586547b576dcfaba4a6b55b9e96098b36cdc7ebefe667dfed48"}, {file = "PyYAML-6.0.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8b9c7197f7cb2738065c481a0461e50ad02f18c78cd75775628afb4d7137fb3b"}, {file = "PyYAML-6.0.2-cp312-cp312-win32.whl", hash = "sha256:ef6107725bd54b262d6dedcc2af448a266975032bc85ef0172c5f059da6325b4"}, {file = "PyYAML-6.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:7e7401d0de89a9a855c839bc697c079a4af81cf878373abd7dc625847d25cbd8"}, {file = "PyYAML-6.0.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:efdca5630322a10774e8e98e1af481aad470dd62c3170801852d752aa7a783ba"}, {file = "PyYAML-6.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:50187695423ffe49e2deacb8cd10510bc361faac997de9efef88badc3bb9e2d1"}, {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0ffe8360bab4910ef1b9e87fb812d8bc0a308b0d0eef8c8f44e0254ab3b07133"}, {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:17e311b6c678207928d649faa7cb0d7b4c26a0ba73d41e99c4fff6b6c3276484"}, {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:70b189594dbe54f75ab3a1acec5f1e3faa7e8cf2f1e08d9b561cb41b845f69d5"}, {file = "PyYAML-6.0.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:41e4e3953a79407c794916fa277a82531dd93aad34e29c2a514c2c0c5fe971cc"}, {file = "PyYAML-6.0.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:68ccc6023a3400877818152ad9a1033e3db8625d899c72eacb5a668902e4d652"}, {file = "PyYAML-6.0.2-cp313-cp313-win32.whl", hash = "sha256:bc2fa7c6b47d6bc618dd7fb02ef6fdedb1090ec036abab80d4681424b84c1183"}, {file = "PyYAML-6.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:8388ee1976c416731879ac16da0aff3f63b286ffdd57cdeb95f3f2e085687563"}, {file = "PyYAML-6.0.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:24471b829b3bf607e04e88d79542a9d48bb037c2267d7927a874e6c205ca7e9a"}, {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7fded462629cfa4b685c5416b949ebad6cec74af5e2d42905d41e257e0869f5"}, {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d84a1718ee396f54f3a086ea0a66d8e552b2ab2017ef8b420e92edbc841c352d"}, {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9056c1ecd25795207ad294bcf39f2db3d845767be0ea6e6a34d856f006006083"}, {file = "PyYAML-6.0.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:82d09873e40955485746739bcb8b4586983670466c23382c19cffecbf1fd8706"}, {file = "PyYAML-6.0.2-cp38-cp38-win32.whl", hash = "sha256:43fa96a3ca0d6b1812e01ced1044a003533c47f6ee8aca31724f78e93ccc089a"}, {file = "PyYAML-6.0.2-cp38-cp38-win_amd64.whl", hash = "sha256:01179a4a8559ab5de078078f37e5c1a30d76bb88519906844fd7bdea1b7729ff"}, {file = "PyYAML-6.0.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:688ba32a1cffef67fd2e9398a2efebaea461578b0923624778664cc1c914db5d"}, {file = "PyYAML-6.0.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a8786accb172bd8afb8be14490a16625cbc387036876ab6ba70912730faf8e1f"}, {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d8e03406cac8513435335dbab54c0d385e4a49e4945d2909a581c83647ca0290"}, {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f753120cb8181e736c57ef7636e83f31b9c0d1722c516f7e86cf15b7aa57ff12"}, {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3b1fdb9dc17f5a7677423d508ab4f243a726dea51fa5e70992e59a7411c89d19"}, {file = "PyYAML-6.0.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0b69e4ce7a131fe56b7e4d770c67429700908fc0752af059838b1cfb41960e4e"}, {file = "PyYAML-6.0.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a9f8c2e67970f13b16084e04f134610fd1d374bf477b17ec1599185cf611d725"}, {file = "PyYAML-6.0.2-cp39-cp39-win32.whl", hash = "sha256:6395c297d42274772abc367baaa79683958044e5d3835486c16da75d2a694631"}, {file = "PyYAML-6.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:39693e1f8320ae4f43943590b49779ffb98acb81f788220ea932a6b6c51004d8"}, {file = "pyyaml-6.0.2.tar.gz", hash = "sha256:d584d9ec91ad65861cc08d42e834324ef890a082e591037abe114850ff7bbc3e"}, ] [[package]] name = "referencing" version = "0.36.2" description = "JSON Referencing + Python" optional = false python-versions = ">=3.9" groups = ["dev"] files = [ {file = "referencing-0.36.2-py3-none-any.whl", hash = "sha256:e8699adbbf8b5c7de96d8ffa0eb5c158b3beafce084968e2ea8bb08c6794dcd0"}, {file = "referencing-0.36.2.tar.gz", hash = "sha256:df2e89862cd09deabbdba16944cc3f10feb6b3e6f18e902f7cc25609a34775aa"}, ] [package.dependencies] attrs = ">=22.2.0" rpds-py = ">=0.7.0" typing-extensions = {version = ">=4.4.0", markers = "python_version < \"3.13\""} [[package]] name = "requests" version = "2.32.3" description = "Python HTTP for Humans." optional = false python-versions = ">=3.8" groups = ["dev"] files = [ {file = "requests-2.32.3-py3-none-any.whl", hash = "sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6"}, {file = "requests-2.32.3.tar.gz", hash = "sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760"}, ] [package.dependencies] certifi = ">=2017.4.17" charset-normalizer = ">=2,<4" idna = ">=2.5,<4" urllib3 = ">=1.21.1,<3" [package.extras] socks = ["PySocks (>=1.5.6,!=1.5.7)"] use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] [[package]] name = "rich" version = "13.9.4" description = "Render rich text, tables, progress bars, syntax highlighting, markdown and more to the terminal" optional = false python-versions = ">=3.8.0" groups = ["dev"] files = [ {file = "rich-13.9.4-py3-none-any.whl", hash = "sha256:6049d5e6ec054bf2779ab3358186963bac2ea89175919d699e378b99738c2a90"}, {file = "rich-13.9.4.tar.gz", hash = "sha256:439594978a49a09530cff7ebc4b5c7103ef57baf48d5ea3184f21d9a2befa098"}, ] [package.dependencies] markdown-it-py = ">=2.2.0" pygments = ">=2.13.0,<3.0.0" typing-extensions = {version = ">=4.0.0,<5.0", markers = "python_version < \"3.11\""} [package.extras] jupyter = ["ipywidgets (>=7.5.1,<9)"] [[package]] name = "rpds-py" version = "0.22.3" description = "Python bindings to Rust's persistent data structures (rpds)" optional = false python-versions = ">=3.9" groups = ["dev"] files = [ {file = "rpds_py-0.22.3-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:6c7b99ca52c2c1752b544e310101b98a659b720b21db00e65edca34483259967"}, {file = "rpds_py-0.22.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:be2eb3f2495ba669d2a985f9b426c1797b7d48d6963899276d22f23e33d47e37"}, {file = "rpds_py-0.22.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:70eb60b3ae9245ddea20f8a4190bd79c705a22f8028aaf8bbdebe4716c3fab24"}, {file = "rpds_py-0.22.3-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:4041711832360a9b75cfb11b25a6a97c8fb49c07b8bd43d0d02b45d0b499a4ff"}, {file = "rpds_py-0.22.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:64607d4cbf1b7e3c3c8a14948b99345eda0e161b852e122c6bb71aab6d1d798c"}, {file = "rpds_py-0.22.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:81e69b0a0e2537f26d73b4e43ad7bc8c8efb39621639b4434b76a3de50c6966e"}, {file = "rpds_py-0.22.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc27863442d388870c1809a87507727b799c8460573cfbb6dc0eeaef5a11b5ec"}, {file = "rpds_py-0.22.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e79dd39f1e8c3504be0607e5fc6e86bb60fe3584bec8b782578c3b0fde8d932c"}, {file = "rpds_py-0.22.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:e0fa2d4ec53dc51cf7d3bb22e0aa0143966119f42a0c3e4998293a3dd2856b09"}, {file = "rpds_py-0.22.3-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:fda7cb070f442bf80b642cd56483b5548e43d366fe3f39b98e67cce780cded00"}, {file = "rpds_py-0.22.3-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:cff63a0272fcd259dcc3be1657b07c929c466b067ceb1c20060e8d10af56f5bf"}, {file = "rpds_py-0.22.3-cp310-cp310-win32.whl", hash = "sha256:9bd7228827ec7bb817089e2eb301d907c0d9827a9e558f22f762bb690b131652"}, {file = "rpds_py-0.22.3-cp310-cp310-win_amd64.whl", hash = "sha256:9beeb01d8c190d7581a4d59522cd3d4b6887040dcfc744af99aa59fef3e041a8"}, {file = "rpds_py-0.22.3-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:d20cfb4e099748ea39e6f7b16c91ab057989712d31761d3300d43134e26e165f"}, {file = "rpds_py-0.22.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:68049202f67380ff9aa52f12e92b1c30115f32e6895cd7198fa2a7961621fc5a"}, {file = "rpds_py-0.22.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fb4f868f712b2dd4bcc538b0a0c1f63a2b1d584c925e69a224d759e7070a12d5"}, {file = "rpds_py-0.22.3-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:bc51abd01f08117283c5ebf64844a35144a0843ff7b2983e0648e4d3d9f10dbb"}, {file = "rpds_py-0.22.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0f3cec041684de9a4684b1572fe28c7267410e02450f4561700ca5a3bc6695a2"}, {file = "rpds_py-0.22.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7ef9d9da710be50ff6809fed8f1963fecdfecc8b86656cadfca3bc24289414b0"}, {file = "rpds_py-0.22.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:59f4a79c19232a5774aee369a0c296712ad0e77f24e62cad53160312b1c1eaa1"}, {file = "rpds_py-0.22.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:1a60bce91f81ddaac922a40bbb571a12c1070cb20ebd6d49c48e0b101d87300d"}, {file = "rpds_py-0.22.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:e89391e6d60251560f0a8f4bd32137b077a80d9b7dbe6d5cab1cd80d2746f648"}, {file = "rpds_py-0.22.3-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:e3fb866d9932a3d7d0c82da76d816996d1667c44891bd861a0f97ba27e84fc74"}, {file = "rpds_py-0.22.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:1352ae4f7c717ae8cba93421a63373e582d19d55d2ee2cbb184344c82d2ae55a"}, {file = "rpds_py-0.22.3-cp311-cp311-win32.whl", hash = "sha256:b0b4136a252cadfa1adb705bb81524eee47d9f6aab4f2ee4fa1e9d3cd4581f64"}, {file = "rpds_py-0.22.3-cp311-cp311-win_amd64.whl", hash = "sha256:8bd7c8cfc0b8247c8799080fbff54e0b9619e17cdfeb0478ba7295d43f635d7c"}, {file = "rpds_py-0.22.3-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:27e98004595899949bd7a7b34e91fa7c44d7a97c40fcaf1d874168bb652ec67e"}, {file = "rpds_py-0.22.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:1978d0021e943aae58b9b0b196fb4895a25cc53d3956b8e35e0b7682eefb6d56"}, {file = "rpds_py-0.22.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:655ca44a831ecb238d124e0402d98f6212ac527a0ba6c55ca26f616604e60a45"}, {file = "rpds_py-0.22.3-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:feea821ee2a9273771bae61194004ee2fc33f8ec7db08117ef9147d4bbcbca8e"}, {file = "rpds_py-0.22.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:22bebe05a9ffc70ebfa127efbc429bc26ec9e9b4ee4d15a740033efda515cf3d"}, {file = "rpds_py-0.22.3-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3af6e48651c4e0d2d166dc1b033b7042ea3f871504b6805ba5f4fe31581d8d38"}, {file = "rpds_py-0.22.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e67ba3c290821343c192f7eae1d8fd5999ca2dc99994114643e2f2d3e6138b15"}, {file = "rpds_py-0.22.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:02fbb9c288ae08bcb34fb41d516d5eeb0455ac35b5512d03181d755d80810059"}, {file = "rpds_py-0.22.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:f56a6b404f74ab372da986d240e2e002769a7d7102cc73eb238a4f72eec5284e"}, {file = "rpds_py-0.22.3-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:0a0461200769ab3b9ab7e513f6013b7a97fdeee41c29b9db343f3c5a8e2b9e61"}, {file = "rpds_py-0.22.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:8633e471c6207a039eff6aa116e35f69f3156b3989ea3e2d755f7bc41754a4a7"}, {file = "rpds_py-0.22.3-cp312-cp312-win32.whl", hash = "sha256:593eba61ba0c3baae5bc9be2f5232430453fb4432048de28399ca7376de9c627"}, {file = "rpds_py-0.22.3-cp312-cp312-win_amd64.whl", hash = "sha256:d115bffdd417c6d806ea9069237a4ae02f513b778e3789a359bc5856e0404cc4"}, {file = "rpds_py-0.22.3-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:ea7433ce7e4bfc3a85654aeb6747babe3f66eaf9a1d0c1e7a4435bbdf27fea84"}, {file = "rpds_py-0.22.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:6dd9412824c4ce1aca56c47b0991e65bebb7ac3f4edccfd3f156150c96a7bf25"}, {file = "rpds_py-0.22.3-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:20070c65396f7373f5df4005862fa162db5d25d56150bddd0b3e8214e8ef45b4"}, {file = "rpds_py-0.22.3-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:0b09865a9abc0ddff4e50b5ef65467cd94176bf1e0004184eb915cbc10fc05c5"}, {file = "rpds_py-0.22.3-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3453e8d41fe5f17d1f8e9c383a7473cd46a63661628ec58e07777c2fff7196dc"}, {file = "rpds_py-0.22.3-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f5d36399a1b96e1a5fdc91e0522544580dbebeb1f77f27b2b0ab25559e103b8b"}, {file = "rpds_py-0.22.3-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:009de23c9c9ee54bf11303a966edf4d9087cd43a6003672e6aa7def643d06518"}, {file = "rpds_py-0.22.3-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:1aef18820ef3e4587ebe8b3bc9ba6e55892a6d7b93bac6d29d9f631a3b4befbd"}, {file = "rpds_py-0.22.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:f60bd8423be1d9d833f230fdbccf8f57af322d96bcad6599e5a771b151398eb2"}, {file = "rpds_py-0.22.3-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:62d9cfcf4948683a18a9aff0ab7e1474d407b7bab2ca03116109f8464698ab16"}, {file = "rpds_py-0.22.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:9253fc214112405f0afa7db88739294295f0e08466987f1d70e29930262b4c8f"}, {file = "rpds_py-0.22.3-cp313-cp313-win32.whl", hash = "sha256:fb0ba113b4983beac1a2eb16faffd76cb41e176bf58c4afe3e14b9c681f702de"}, {file = "rpds_py-0.22.3-cp313-cp313-win_amd64.whl", hash = "sha256:c58e2339def52ef6b71b8f36d13c3688ea23fa093353f3a4fee2556e62086ec9"}, {file = "rpds_py-0.22.3-cp313-cp313t-macosx_10_12_x86_64.whl", hash = "sha256:f82a116a1d03628a8ace4859556fb39fd1424c933341a08ea3ed6de1edb0283b"}, {file = "rpds_py-0.22.3-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:3dfcbc95bd7992b16f3f7ba05af8a64ca694331bd24f9157b49dadeeb287493b"}, {file = "rpds_py-0.22.3-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:59259dc58e57b10e7e18ce02c311804c10c5a793e6568f8af4dead03264584d1"}, {file = "rpds_py-0.22.3-cp313-cp313t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5725dd9cc02068996d4438d397e255dcb1df776b7ceea3b9cb972bdb11260a83"}, {file = "rpds_py-0.22.3-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:99b37292234e61325e7a5bb9689e55e48c3f5f603af88b1642666277a81f1fbd"}, {file = "rpds_py-0.22.3-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:27b1d3b3915a99208fee9ab092b8184c420f2905b7d7feb4aeb5e4a9c509b8a1"}, {file = "rpds_py-0.22.3-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f612463ac081803f243ff13cccc648578e2279295048f2a8d5eb430af2bae6e3"}, {file = "rpds_py-0.22.3-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f73d3fef726b3243a811121de45193c0ca75f6407fe66f3f4e183c983573e130"}, {file = "rpds_py-0.22.3-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:3f21f0495edea7fdbaaa87e633a8689cd285f8f4af5c869f27bc8074638ad69c"}, {file = "rpds_py-0.22.3-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:1e9663daaf7a63ceccbbb8e3808fe90415b0757e2abddbfc2e06c857bf8c5e2b"}, {file = "rpds_py-0.22.3-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:a76e42402542b1fae59798fab64432b2d015ab9d0c8c47ba7addddbaf7952333"}, {file = "rpds_py-0.22.3-cp313-cp313t-win32.whl", hash = "sha256:69803198097467ee7282750acb507fba35ca22cc3b85f16cf45fb01cb9097730"}, {file = "rpds_py-0.22.3-cp313-cp313t-win_amd64.whl", hash = "sha256:f5cf2a0c2bdadf3791b5c205d55a37a54025c6e18a71c71f82bb536cf9a454bf"}, {file = "rpds_py-0.22.3-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:378753b4a4de2a7b34063d6f95ae81bfa7b15f2c1a04a9518e8644e81807ebea"}, {file = "rpds_py-0.22.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:3445e07bf2e8ecfeef6ef67ac83de670358abf2996916039b16a218e3d95e97e"}, {file = "rpds_py-0.22.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7b2513ba235829860b13faa931f3b6846548021846ac808455301c23a101689d"}, {file = "rpds_py-0.22.3-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:eaf16ae9ae519a0e237a0f528fd9f0197b9bb70f40263ee57ae53c2b8d48aeb3"}, {file = "rpds_py-0.22.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:583f6a1993ca3369e0f80ba99d796d8e6b1a3a2a442dd4e1a79e652116413091"}, {file = "rpds_py-0.22.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4617e1915a539a0d9a9567795023de41a87106522ff83fbfaf1f6baf8e85437e"}, {file = "rpds_py-0.22.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0c150c7a61ed4a4f4955a96626574e9baf1adf772c2fb61ef6a5027e52803543"}, {file = "rpds_py-0.22.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2fa4331c200c2521512595253f5bb70858b90f750d39b8cbfd67465f8d1b596d"}, {file = "rpds_py-0.22.3-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:214b7a953d73b5e87f0ebece4a32a5bd83c60a3ecc9d4ec8f1dca968a2d91e99"}, {file = "rpds_py-0.22.3-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:f47ad3d5f3258bd7058d2d506852217865afefe6153a36eb4b6928758041d831"}, {file = "rpds_py-0.22.3-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:f276b245347e6e36526cbd4a266a417796fc531ddf391e43574cf6466c492520"}, {file = "rpds_py-0.22.3-cp39-cp39-win32.whl", hash = "sha256:bbb232860e3d03d544bc03ac57855cd82ddf19c7a07651a7c0fdb95e9efea8b9"}, {file = "rpds_py-0.22.3-cp39-cp39-win_amd64.whl", hash = "sha256:cfbc454a2880389dbb9b5b398e50d439e2e58669160f27b60e5eca11f68ae17c"}, {file = "rpds_py-0.22.3-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:d48424e39c2611ee1b84ad0f44fb3b2b53d473e65de061e3f460fc0be5f1939d"}, {file = "rpds_py-0.22.3-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:24e8abb5878e250f2eb0d7859a8e561846f98910326d06c0d51381fed59357bd"}, {file = "rpds_py-0.22.3-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4b232061ca880db21fa14defe219840ad9b74b6158adb52ddf0e87bead9e8493"}, {file = "rpds_py-0.22.3-pp310-pypy310_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ac0a03221cdb5058ce0167ecc92a8c89e8d0decdc9e99a2ec23380793c4dcb96"}, {file = "rpds_py-0.22.3-pp310-pypy310_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:eb0c341fa71df5a4595f9501df4ac5abfb5a09580081dffbd1ddd4654e6e9123"}, {file = "rpds_py-0.22.3-pp310-pypy310_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:bf9db5488121b596dbfc6718c76092fda77b703c1f7533a226a5a9f65248f8ad"}, {file = "rpds_py-0.22.3-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0b8db6b5b2d4491ad5b6bdc2bc7c017eec108acbf4e6785f42a9eb0ba234f4c9"}, {file = "rpds_py-0.22.3-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b3d504047aba448d70cf6fa22e06cb09f7cbd761939fdd47604f5e007675c24e"}, {file = "rpds_py-0.22.3-pp310-pypy310_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:e61b02c3f7a1e0b75e20c3978f7135fd13cb6cf551bf4a6d29b999a88830a338"}, {file = "rpds_py-0.22.3-pp310-pypy310_pp73-musllinux_1_2_i686.whl", hash = "sha256:e35ba67d65d49080e8e5a1dd40101fccdd9798adb9b050ff670b7d74fa41c566"}, {file = "rpds_py-0.22.3-pp310-pypy310_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:26fd7cac7dd51011a245f29a2cc6489c4608b5a8ce8d75661bb4a1066c52dfbe"}, {file = "rpds_py-0.22.3-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:177c7c0fce2855833819c98e43c262007f42ce86651ffbb84f37883308cb0e7d"}, {file = "rpds_py-0.22.3-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:bb47271f60660803ad11f4c61b42242b8c1312a31c98c578f79ef9387bbde21c"}, {file = "rpds_py-0.22.3-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:70fb28128acbfd264eda9bf47015537ba3fe86e40d046eb2963d75024be4d055"}, {file = "rpds_py-0.22.3-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:44d61b4b7d0c2c9ac019c314e52d7cbda0ae31078aabd0f22e583af3e0d79723"}, {file = "rpds_py-0.22.3-pp39-pypy39_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5f0e260eaf54380380ac3808aa4ebe2d8ca28b9087cf411649f96bad6900c728"}, {file = "rpds_py-0.22.3-pp39-pypy39_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b25bc607423935079e05619d7de556c91fb6adeae9d5f80868dde3468657994b"}, {file = "rpds_py-0.22.3-pp39-pypy39_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fb6116dfb8d1925cbdb52595560584db42a7f664617a1f7d7f6e32f138cdf37d"}, {file = "rpds_py-0.22.3-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a63cbdd98acef6570c62b92a1e43266f9e8b21e699c363c0fef13bd530799c11"}, {file = "rpds_py-0.22.3-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2b8f60e1b739a74bab7e01fcbe3dddd4657ec685caa04681df9d562ef15b625f"}, {file = "rpds_py-0.22.3-pp39-pypy39_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:2e8b55d8517a2fda8d95cb45d62a5a8bbf9dd0ad39c5b25c8833efea07b880ca"}, {file = "rpds_py-0.22.3-pp39-pypy39_pp73-musllinux_1_2_i686.whl", hash = "sha256:2de29005e11637e7a2361fa151f780ff8eb2543a0da1413bb951e9f14b699ef3"}, {file = "rpds_py-0.22.3-pp39-pypy39_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:666ecce376999bf619756a24ce15bb14c5bfaf04bf00abc7e663ce17c3f34fe7"}, {file = "rpds_py-0.22.3-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:5246b14ca64a8675e0a7161f7af68fe3e910e6b90542b4bfb5439ba752191df6"}, {file = "rpds_py-0.22.3.tar.gz", hash = "sha256:e32fee8ab45d3c2db6da19a5323bc3362237c8b653c70194414b892fd06a080d"}, ] [[package]] name = "setuptools" version = "75.8.0" description = "Easily download, build, install, upgrade, and uninstall Python packages" optional = false python-versions = ">=3.9" groups = ["test"] files = [ {file = "setuptools-75.8.0-py3-none-any.whl", hash = "sha256:e3982f444617239225d675215d51f6ba05f845d4eec313da4418fdbb56fb27e3"}, {file = "setuptools-75.8.0.tar.gz", hash = "sha256:c5afc8f407c626b8313a86e10311dd3f661c6cd9c09d4bf8c15c0e11f9f2b0e6"}, ] [package.extras] check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1)", "ruff (>=0.8.0)"] core = ["importlib_metadata (>=6)", "jaraco.collections", "jaraco.functools (>=4)", "jaraco.text (>=3.7)", "more_itertools", "more_itertools (>=8.8)", "packaging", "packaging (>=24.2)", "platformdirs (>=4.2.2)", "tomli (>=2.0.1)", "wheel (>=0.43.0)"] cover = ["pytest-cov"] doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "pyproject-hooks (!=1.1)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (>=1,<2)", "sphinx-reredirects", "sphinxcontrib-towncrier", "towncrier (<24.7)"] enabler = ["pytest-enabler (>=2.2)"] test = ["build[virtualenv] (>=1.0.3)", "filelock (>=3.4.0)", "ini2toml[lite] (>=0.14)", "jaraco.develop (>=7.21)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.7.2)", "jaraco.test (>=5.5)", "packaging (>=24.2)", "pip (>=19.1)", "pyproject-hooks (!=1.1)", "pytest (>=6,!=8.1.*)", "pytest-home (>=0.5)", "pytest-perf", "pytest-subprocess", "pytest-timeout", "pytest-xdist (>=3)", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel (>=0.44.0)"] type = ["importlib_metadata (>=7.0.2)", "jaraco.develop (>=7.21)", "mypy (==1.14.*)", "pytest-mypy"] [[package]] name = "toml" version = "0.10.2" description = "Python Library for Tom's Obvious, Minimal Language" optional = false python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*" groups = ["dev"] files = [ {file = "toml-0.10.2-py2.py3-none-any.whl", hash = "sha256:806143ae5bfb6a3c6e736a764057db0e6a0e05e338b5630894a5f779cabb4f9b"}, {file = "toml-0.10.2.tar.gz", hash = "sha256:b3bda1d108d5dd99f4a20d24d9c348e91c4db7ab1b749200bded2f839ccbe68f"}, ] [[package]] name = "tomli" version = "2.2.1" description = "A lil' TOML parser" optional = false python-versions = ">=3.8" groups = ["test", "typing"] files = [ {file = "tomli-2.2.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:678e4fa69e4575eb77d103de3df8a895e1591b48e740211bd1067378c69e8249"}, {file = "tomli-2.2.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:023aa114dd824ade0100497eb2318602af309e5a55595f76b626d6d9f3b7b0a6"}, {file = "tomli-2.2.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ece47d672db52ac607a3d9599a9d48dcb2f2f735c6c2d1f34130085bb12b112a"}, {file = "tomli-2.2.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6972ca9c9cc9f0acaa56a8ca1ff51e7af152a9f87fb64623e31d5c83700080ee"}, {file = "tomli-2.2.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c954d2250168d28797dd4e3ac5cf812a406cd5a92674ee4c8f123c889786aa8e"}, {file = "tomli-2.2.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:8dd28b3e155b80f4d54beb40a441d366adcfe740969820caf156c019fb5c7ec4"}, {file = "tomli-2.2.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:e59e304978767a54663af13c07b3d1af22ddee3bb2fb0618ca1593e4f593a106"}, {file = "tomli-2.2.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:33580bccab0338d00994d7f16f4c4ec25b776af3ffaac1ed74e0b3fc95e885a8"}, {file = "tomli-2.2.1-cp311-cp311-win32.whl", hash = "sha256:465af0e0875402f1d226519c9904f37254b3045fc5084697cefb9bdde1ff99ff"}, {file = "tomli-2.2.1-cp311-cp311-win_amd64.whl", hash = "sha256:2d0f2fdd22b02c6d81637a3c95f8cd77f995846af7414c5c4b8d0545afa1bc4b"}, {file = "tomli-2.2.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:4a8f6e44de52d5e6c657c9fe83b562f5f4256d8ebbfe4ff922c495620a7f6cea"}, {file = "tomli-2.2.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8d57ca8095a641b8237d5b079147646153d22552f1c637fd3ba7f4b0b29167a8"}, {file = "tomli-2.2.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4e340144ad7ae1533cb897d406382b4b6fede8890a03738ff1683af800d54192"}, {file = "tomli-2.2.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:db2b95f9de79181805df90bedc5a5ab4c165e6ec3fe99f970d0e302f384ad222"}, {file = "tomli-2.2.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:40741994320b232529c802f8bc86da4e1aa9f413db394617b9a256ae0f9a7f77"}, {file = "tomli-2.2.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:400e720fe168c0f8521520190686ef8ef033fb19fc493da09779e592861b78c6"}, {file = "tomli-2.2.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:02abe224de6ae62c19f090f68da4e27b10af2b93213d36cf44e6e1c5abd19fdd"}, {file = "tomli-2.2.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:b82ebccc8c8a36f2094e969560a1b836758481f3dc360ce9a3277c65f374285e"}, {file = "tomli-2.2.1-cp312-cp312-win32.whl", hash = "sha256:889f80ef92701b9dbb224e49ec87c645ce5df3fa2cc548664eb8a25e03127a98"}, {file = "tomli-2.2.1-cp312-cp312-win_amd64.whl", hash = "sha256:7fc04e92e1d624a4a63c76474610238576942d6b8950a2d7f908a340494e67e4"}, {file = "tomli-2.2.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f4039b9cbc3048b2416cc57ab3bda989a6fcf9b36cf8937f01a6e731b64f80d7"}, {file = "tomli-2.2.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:286f0ca2ffeeb5b9bd4fcc8d6c330534323ec51b2f52da063b11c502da16f30c"}, {file = "tomli-2.2.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a92ef1a44547e894e2a17d24e7557a5e85a9e1d0048b0b5e7541f76c5032cb13"}, {file = "tomli-2.2.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9316dc65bed1684c9a98ee68759ceaed29d229e985297003e494aa825ebb0281"}, {file = "tomli-2.2.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e85e99945e688e32d5a35c1ff38ed0b3f41f43fad8df0bdf79f72b2ba7bc5272"}, {file = "tomli-2.2.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:ac065718db92ca818f8d6141b5f66369833d4a80a9d74435a268c52bdfa73140"}, {file = "tomli-2.2.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:d920f33822747519673ee656a4b6ac33e382eca9d331c87770faa3eef562aeb2"}, {file = "tomli-2.2.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:a198f10c4d1b1375d7687bc25294306e551bf1abfa4eace6650070a5c1ae2744"}, {file = "tomli-2.2.1-cp313-cp313-win32.whl", hash = "sha256:d3f5614314d758649ab2ab3a62d4f2004c825922f9e370b29416484086b264ec"}, {file = "tomli-2.2.1-cp313-cp313-win_amd64.whl", hash = "sha256:a38aa0308e754b0e3c67e344754dff64999ff9b513e691d0e786265c93583c69"}, {file = "tomli-2.2.1-py3-none-any.whl", hash = "sha256:cb55c73c5f4408779d0cf3eef9f762b9c9f147a77de7b258bef0a5628adc85cc"}, {file = "tomli-2.2.1.tar.gz", hash = "sha256:cd45e1dc79c835ce60f7404ec8119f2eb06d38b1deba146f07ced3bbc44505ff"}, ] markers = {test = "python_full_version <= \"3.11.0a6\"", typing = "python_version < \"3.11\""} [[package]] name = "tomli-w" version = "1.2.0" description = "A lil' TOML writer" optional = false python-versions = ">=3.9" groups = ["test"] files = [ {file = "tomli_w-1.2.0-py3-none-any.whl", hash = "sha256:188306098d013b691fcadc011abd66727d3c414c571bb01b1a174ba8c983cf90"}, {file = "tomli_w-1.2.0.tar.gz", hash = "sha256:2dd14fac5a47c27be9cd4c976af5a12d87fb1f0b4512f81d69cce3b35ae25021"}, ] [[package]] name = "trove-classifiers" version = "2025.1.15.22" description = "Canonical source for classifiers on PyPI (pypi.org)." optional = false python-versions = "*" groups = ["test"] files = [ {file = "trove_classifiers-2025.1.15.22-py3-none-any.whl", hash = "sha256:5f19c789d4f17f501d36c94dbbf969fb3e8c2784d008e6f5164dd2c3d6a2b07c"}, {file = "trove_classifiers-2025.1.15.22.tar.gz", hash = "sha256:90af74358d3a01b3532bc7b3c88d8c6a094c2fd50a563d13d9576179326d7ed9"}, ] [[package]] name = "types-setuptools" version = "75.8.0.20250110" description = "Typing stubs for setuptools" optional = false python-versions = ">=3.8" groups = ["typing"] files = [ {file = "types_setuptools-75.8.0.20250110-py3-none-any.whl", hash = "sha256:a9f12980bbf9bcdc23ecd80755789085bad6bfce4060c2275bc2b4ca9f2bc480"}, {file = "types_setuptools-75.8.0.20250110.tar.gz", hash = "sha256:96f7ec8bbd6e0a54ea180d66ad68ad7a1d7954e7281a710ea2de75e355545271"}, ] [[package]] name = "typing-extensions" version = "4.12.2" description = "Backported and Experimental Type Hints for Python 3.8+" optional = false python-versions = ">=3.8" groups = ["dev", "typing"] files = [ {file = "typing_extensions-4.12.2-py3-none-any.whl", hash = "sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d"}, {file = "typing_extensions-4.12.2.tar.gz", hash = "sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8"}, ] markers = {dev = "python_version < \"3.13\""} [[package]] name = "urllib3" version = "2.3.0" description = "HTTP library with thread-safe connection pooling, file post, and more." optional = false python-versions = ">=3.9" groups = ["dev"] files = [ {file = "urllib3-2.3.0-py3-none-any.whl", hash = "sha256:1cee9ad369867bfdbbb48b7dd50374c0967a0bb7710050facf0dd6911440e3df"}, {file = "urllib3-2.3.0.tar.gz", hash = "sha256:f8c5449b3cf0861679ce7e0503c7b44b5ec981bec0d1d3795a07f1ba96f0204d"}, ] [package.extras] brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)"] h2 = ["h2 (>=4,<5)"] socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"] zstd = ["zstandard (>=0.18.0)"] [[package]] name = "vendoring" version = "1.2.0" description = "A command line tool, to simplify vendoring pure Python dependencies." optional = false python-versions = "~= 3.8" groups = ["dev"] files = [ {file = "vendoring-1.2.0-py2.py3-none-any.whl", hash = "sha256:35b5fca683264e69e851a7580bb6a6f9848af024ffc8382ed5491bcfa55750c6"}, {file = "vendoring-1.2.0.tar.gz", hash = "sha256:6340a84bf542222c96f22ebc3cb87e4d86932dc04bc8d446e38285594702c00e"}, ] [package.dependencies] click = "*" jsonschema = "*" packaging = "*" requests = "*" rich = "*" toml = "*" [package.extras] doc = ["sphinx"] test = ["pytest", "pytest-cov", "pytest-mock"] [[package]] name = "virtualenv" version = "20.29.1" description = "Virtual Python Environment builder" optional = false python-versions = ">=3.8" groups = ["dev", "test"] files = [ {file = "virtualenv-20.29.1-py3-none-any.whl", hash = "sha256:4e4cb403c0b0da39e13b46b1b2476e505cb0046b25f242bee80f62bf990b2779"}, {file = "virtualenv-20.29.1.tar.gz", hash = "sha256:b8b8970138d32fb606192cb97f6cd4bb644fa486be9308fb9b63f81091b5dc35"}, ] [package.dependencies] distlib = ">=0.3.7,<1" filelock = ">=3.12.2,<4" platformdirs = ">=3.9.1,<5" [package.extras] docs = ["furo (>=2023.7.26)", "proselint (>=0.13)", "sphinx (>=7.1.2,!=7.3)", "sphinx-argparse (>=0.4)", "sphinxcontrib-towncrier (>=0.2.1a0)", "towncrier (>=23.6)"] test = ["covdefaults (>=2.3)", "coverage (>=7.2.7)", "coverage-enable-subprocess (>=1)", "flaky (>=3.7)", "packaging (>=23.1)", "pytest (>=7.4)", "pytest-env (>=0.8.2)", "pytest-freezer (>=0.4.8)", "pytest-mock (>=3.11.1)", "pytest-randomly (>=3.12)", "pytest-timeout (>=2.1)", "setuptools (>=68)", "time-machine (>=2.10)"] [[package]] name = "zipp" version = "3.21.0" description = "Backport of pathlib-compatible object wrapper for zip files" optional = false python-versions = ">=3.9" groups = ["test"] markers = "python_full_version < \"3.10.2\"" files = [ {file = "zipp-3.21.0-py3-none-any.whl", hash = "sha256:ac1bbe05fd2991f160ebce24ffbac5f6d11d83dc90891255885223d42b3cd931"}, {file = "zipp-3.21.0.tar.gz", hash = "sha256:2c9958f6430a2040341a52eb608ed6dd93ef4392e02ffe219417c1b28b5dd1f4"}, ] [package.extras] check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1)"] cover = ["pytest-cov"] doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] enabler = ["pytest-enabler (>=2.2)"] test = ["big-O", "importlib-resources", "jaraco.functools", "jaraco.itertools", "jaraco.test", "more-itertools", "pytest (>=6,!=8.1.*)", "pytest-ignore-flaky"] type = ["pytest-mypy"] [metadata] lock-version = "2.1" python-versions = ">=3.9, <4.0" content-hash = "b85df75947dc8c45d16bd84ac4d75bb62321a6e9a2731ce68c1076718144b0ee" poetry-core-2.1.1/pyproject.toml000066400000000000000000000070651475444614500167250ustar00rootroot00000000000000[project] name = "poetry-core" version = "2.1.1" description = "Poetry PEP 517 Build Backend" authors = [ { name = "Sébastien Eustace", email = "sebastien@eustace.io" } ] maintainers = [ { name = "Arun Babu Neelicattu", email = "arun.neelicattu@gmail.com" }, { name = "Bjorn Neergaard", email = "bjorn@neersighted.com" }, { name = "Branch Vincent", email = "branchevincent@gmail.com" }, { name = "Randy Döring", email = "radoering.poetry@gmail.com" }, { name = "Steph Samson", email = "hello@stephsamson.com" }, { name = "finswimmer", email = "finswimmer77@gmail.com" }, { name = "Bartosz Sokorski", email = "b.sokorski@gmail.com" }, ] license = { text = "MIT" } requires-python = ">=3.9, <4.0" readme = "README.md" keywords = ["packaging", "dependency", "poetry"] dynamic = ["classifiers"] [project.urls] homepage = "https://github.com/python-poetry/poetry-core" repository = "https://github.com/python-poetry/poetry-core" "Bug Tracker" = "https://github.com/python-poetry/poetry/issues" [tool.poetry] requires-poetry = ">=2.0" packages = [ { include = "poetry", from = "src" }, ] include = [{ path = "tests", format = "sdist" }] classifiers = [ "Topic :: Software Development :: Build Tools", "Topic :: Software Development :: Libraries :: Python Modules", ] [tool.poetry.group.dev.dependencies] pre-commit = ">=2.15.0" vendoring = ">=1.0" [tool.poetry.group.test.dependencies] pytest = ">=7.1.2" pytest-cov = ">=3.0.0" pytest-mock = ">=3.10" build = ">=0.10.0" setuptools = ">=60" tomli-w = "^1.0.0" virtualenv = ">=20.21" trove-classifiers = ">=2022.5.19" [tool.poetry.group.typing.dependencies] mypy = ">=1.0" types-setuptools = ">=57.4.14" [tool.ruff] extend-exclude = [ "src/poetry/core/_vendor/*", "tests/**/fixtures/*", ] fix = true line-length = 88 src = ["src"] target-version = "py39" [tool.ruff.lint] extend-select = [ "B", # flake8-bugbear "C4", # flake8-comprehensions "ERA", # flake8-eradicate/eradicate "I", # isort "N", # pep8-naming "PIE", # flake8-pie "PGH", # pygrep "RUF", # ruff checks "SIM", # flake8-simplify "T20", # flake8-print "TCH", # flake8-type-checking "TID", # flake8-tidy-imports "UP", # pyupgrade "PTH", # flake8-use-pathlib ] ignore = [ "B904", # use 'raise ... from err' "B905", # use explicit 'strict=' parameter with 'zip()' ] extend-safe-fixes = [ "TCH", # move import from and to TYPE_CHECKING blocks ] unfixable = [ "ERA", # do not autoremove commented out code ] [tool.ruff.lint.flake8-tidy-imports] ban-relative-imports = "all" [tool.ruff.lint.isort] force-single-line = true lines-between-types = 1 lines-after-imports = 2 known-first-party = ["poetry.core"] known-third-party = ["poetry.core._vendor"] required-imports = ["from __future__ import annotations"] [tool.mypy] files = "src, tests" mypy_path = "src" namespace_packages = true explicit_package_bases = true show_error_codes = true strict = true enable_error_code = [ "ignore-without-code", "redundant-expr", "truthy-bool", ] exclude = [ "src/poetry/core/_vendor", "tests/fixtures", "tests/masonry/builders/fixtures", ] [[tool.mypy.overrides]] module = [ 'fastjsonschema.*', 'lark.*', 'virtualenv.*', ] ignore_missing_imports = true [tool.vendoring] destination = "src/poetry/core/_vendor/" requirements = "src/poetry/core/_vendor/vendor.txt" namespace = "" protected-files = ["vendor.txt"] patches-dir = "vendors/patches" [tool.vendoring.transformations] drop = [ "bin/", "*.so", "typing.*", "*/tests/", ] [build-system] requires = [] build-backend = "poetry.core.masonry.api" backend-path = ["src"] poetry-core-2.1.1/src/000077500000000000000000000000001475444614500145705ustar00rootroot00000000000000poetry-core-2.1.1/src/poetry/000077500000000000000000000000001475444614500161125ustar00rootroot00000000000000poetry-core-2.1.1/src/poetry/core/000077500000000000000000000000001475444614500170425ustar00rootroot00000000000000poetry-core-2.1.1/src/poetry/core/__init__.py000066400000000000000000000006121475444614500211520ustar00rootroot00000000000000from __future__ import annotations import sys from pathlib import Path # this cannot presently be replaced with importlib.metadata.version as when building # itself, poetry-core is not available as an installed distribution. __version__ = "2.1.1" __vendor_site__ = (Path(__file__).parent / "_vendor").as_posix() if __vendor_site__ not in sys.path: sys.path.insert(0, __vendor_site__) poetry-core-2.1.1/src/poetry/core/_vendor/000077500000000000000000000000001475444614500204765ustar00rootroot00000000000000poetry-core-2.1.1/src/poetry/core/_vendor/fastjsonschema/000077500000000000000000000000001475444614500235065ustar00rootroot00000000000000poetry-core-2.1.1/src/poetry/core/_vendor/fastjsonschema/LICENSE000066400000000000000000000027561475444614500245250ustar00rootroot00000000000000Copyright (c) 2018, Michal Horejsek All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. Neither the name of the {organization} nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. poetry-core-2.1.1/src/poetry/core/_vendor/fastjsonschema/__init__.py000066400000000000000000000241531475444614500256240ustar00rootroot00000000000000# ___ # \./ DANGER: This project implements some code generation # .--.O.--. techniques involving string concatenation. # \/ \/ If you look at it, you might die. # r""" Installation ************ .. code-block:: bash pip install fastjsonschema Support only for Python 3.3 and higher. About ***** ``fastjsonschema`` implements validation of JSON documents by JSON schema. The library implements JSON schema drafts 04, 06, and 07. The main purpose is to have a really fast implementation. See some numbers: * Probably the most popular, ``jsonschema``, can take up to 5 seconds for valid inputs and 1.2 seconds for invalid inputs. * Second most popular, ``json-spec``, is even worse with up to 7.2 and 1.7 seconds. * Last ``validictory``, now deprecated, is much better with 370 or 23 milliseconds, but it does not follow all standards, and it can be still slow for some purposes. With this library you can gain big improvements as ``fastjsonschema`` takes only about 25 milliseconds for valid inputs and 2 milliseconds for invalid ones. Pretty amazing, right? :-) Technically it works by generating the most stupid code on the fly, which is fast but is hard to write by hand. The best efficiency is achieved when a validator is compiled once and used many times, of course. It works similarly like regular expressions. But you can also generate the code to a file, which is even slightly faster. You can run the performance benchmarks on your computer or server with the included script: .. code-block:: bash $ make performance fast_compiled valid ==> 0.0993900 fast_compiled invalid ==> 0.0041089 fast_compiled_without_exc valid ==> 0.0465258 fast_compiled_without_exc invalid ==> 0.0023688 fast_file valid ==> 0.0989483 fast_file invalid ==> 0.0041104 fast_not_compiled valid ==> 11.9572681 fast_not_compiled invalid ==> 2.9512092 jsonschema valid ==> 5.2233240 jsonschema invalid ==> 1.3227916 jsonschema_compiled valid ==> 0.4447982 jsonschema_compiled invalid ==> 0.0231333 jsonspec valid ==> 4.1450569 jsonspec invalid ==> 1.0485777 validictory valid ==> 0.2730411 validictory invalid ==> 0.0183669 This library follows and implements `JSON schema draft-04, draft-06, and draft-07 `_. Sometimes it's not perfectly clear, so I recommend also check out this `understanding JSON schema `_. Note that there are some differences compared to JSON schema standard: * Regular expressions are full Python ones, not only what JSON schema allows. It's easier to allow everything, and also it's faster to compile without limits. So keep in mind that when you will use a more advanced regular expression, it may not work with other libraries or in other languages. * Because Python matches new line for a dollar in regular expressions (``a$`` matches ``a`` and ``a\\n``), instead of ``$`` is used ``\Z`` and all dollars in your regular expression are changed to ``\\Z`` as well. When you want to use dollar as regular character, you have to escape it (``\$``). * JSON schema says you can use keyword ``default`` for providing default values. This implementation uses that and always returns transformed input data. Usage ***** .. code-block:: python import fastjsonschema point_schema = { "type": "object", "properties": { "x": { "type": "number", }, "y": { "type": "number", }, }, "required": ["x", "y"], "additionalProperties": False, } point_validator = fastjsonschema.compile(point_schema) try: point_validator({"x": 1.0, "y": 2.0}) except fastjsonschema.JsonSchemaException as e: print(f"Data failed validation: {e}") API *** """ from functools import partial, update_wrapper from .draft04 import CodeGeneratorDraft04 from .draft06 import CodeGeneratorDraft06 from .draft07 import CodeGeneratorDraft07 from .exceptions import JsonSchemaException, JsonSchemaValueException, JsonSchemaDefinitionException from .ref_resolver import RefResolver from .version import VERSION __all__ = ( 'VERSION', 'JsonSchemaException', 'JsonSchemaValueException', 'JsonSchemaDefinitionException', 'validate', 'compile', 'compile_to_code', ) def validate(definition, data, handlers={}, formats={}, use_default=True, use_formats=True, detailed_exceptions=True): """ Validation function for lazy programmers or for use cases when you need to call validation only once, so you do not have to compile it first. Use it only when you do not care about performance (even though it will be still faster than alternative implementations). .. code-block:: python import fastjsonschema fastjsonschema.validate({'type': 'string'}, 'hello') # same as: compile({'type': 'string'})('hello') Preferred is to use :any:`compile` function. """ return compile(definition, handlers, formats, use_default, use_formats, detailed_exceptions)(data) #TODO: Change use_default to False when upgrading to version 3. # pylint: disable=redefined-builtin,dangerous-default-value,exec-used def compile(definition, handlers={}, formats={}, use_default=True, use_formats=True, detailed_exceptions=True): """ Generates validation function for validating JSON schema passed in ``definition``. Example: .. code-block:: python import fastjsonschema validate = fastjsonschema.compile({'type': 'string'}) validate('hello') This implementation supports keyword ``default`` (can be turned off by passing `use_default=False`): .. code-block:: python validate = fastjsonschema.compile({ 'type': 'object', 'properties': { 'a': {'type': 'number', 'default': 42}, }, }) data = validate({}) assert data == {'a': 42} Supported implementations are draft-04, draft-06 and draft-07. Which version should be used is determined by `$draft` in your ``definition``. When not specified, the latest implementation is used (draft-07). .. code-block:: python validate = fastjsonschema.compile({ '$schema': 'http://json-schema.org/draft-04/schema', 'type': 'number', }) You can pass mapping from URI to function that should be used to retrieve remote schemes used in your ``definition`` in parameter ``handlers``. Also, you can pass mapping for custom formats. Key is the name of your formatter and value can be regular expression, which will be compiled or callback returning `bool` (or you can raise your own exception). .. code-block:: python validate = fastjsonschema.compile(definition, formats={ 'foo': r'foo|bar', 'bar': lambda value: value in ('foo', 'bar'), }) Note that formats are automatically used as assertions. It can be turned off by passing `use_formats=False`. When disabled, custom formats are disabled as well. (Added in 2.19.0.) If you don't need detailed exceptions, you can turn the details off and gain additional performance by passing `detailed_exceptions=False`. Exception :any:`JsonSchemaDefinitionException` is raised when generating the code fails (bad definition). Exception :any:`JsonSchemaValueException` is raised from generated function when validation fails (data do not follow the definition). """ resolver, code_generator = _factory(definition, handlers, formats, use_default, use_formats, detailed_exceptions) global_state = code_generator.global_state # Do not pass local state so it can recursively call itself. exec(code_generator.func_code, global_state) func = global_state[resolver.get_scope_name()] if formats: return update_wrapper(partial(func, custom_formats=formats), func) return func # pylint: disable=dangerous-default-value def compile_to_code(definition, handlers={}, formats={}, use_default=True, use_formats=True, detailed_exceptions=True): """ Generates validation code for validating JSON schema passed in ``definition``. Example: .. code-block:: python import fastjsonschema code = fastjsonschema.compile_to_code({'type': 'string'}) with open('your_file.py', 'w') as f: f.write(code) You can also use it as a script: .. code-block:: bash echo "{'type': 'string'}" | python3 -m fastjsonschema > your_file.py python3 -m fastjsonschema "{'type': 'string'}" > your_file.py Exception :any:`JsonSchemaDefinitionException` is raised when generating the code fails (bad definition). """ _, code_generator = _factory(definition, handlers, formats, use_default, use_formats, detailed_exceptions) return ( 'VERSION = "' + VERSION + '"\n' + code_generator.global_state_code + '\n' + code_generator.func_code ) def _factory(definition, handlers, formats={}, use_default=True, use_formats=True, detailed_exceptions=True): resolver = RefResolver.from_schema(definition, handlers=handlers, store={}) code_generator = _get_code_generator_class(definition)( definition, resolver=resolver, formats=formats, use_default=use_default, use_formats=use_formats, detailed_exceptions=detailed_exceptions, ) return resolver, code_generator def _get_code_generator_class(schema): # Schema in from draft-06 can be just the boolean value. if isinstance(schema, dict): schema_version = schema.get('$schema', '') if 'draft-04' in schema_version: return CodeGeneratorDraft04 if 'draft-06' in schema_version: return CodeGeneratorDraft06 return CodeGeneratorDraft07 poetry-core-2.1.1/src/poetry/core/_vendor/fastjsonschema/__main__.py000066400000000000000000000004701475444614500256010ustar00rootroot00000000000000import json import sys from . import compile_to_code def main(): if len(sys.argv) == 2: definition = sys.argv[1] else: definition = sys.stdin.read() definition = json.loads(definition) code = compile_to_code(definition) print(code) if __name__ == '__main__': main() poetry-core-2.1.1/src/poetry/core/_vendor/fastjsonschema/draft04.py000066400000000000000000000741301475444614500253310ustar00rootroot00000000000000import decimal import re from .exceptions import JsonSchemaDefinitionException from .generator import CodeGenerator, enforce_list JSON_TYPE_TO_PYTHON_TYPE = { 'null': 'NoneType', 'boolean': 'bool', 'number': 'int, float, Decimal', 'integer': 'int', 'string': 'str', 'array': 'list, tuple', 'object': 'dict', } DOLLAR_FINDER = re.compile(r"(? {maxLength}:'): self.exc('{name} must be shorter than or equal to {maxLength} characters', rule='maxLength') def generate_pattern(self): with self.l('if isinstance({variable}, str):'): pattern = self._definition['pattern'] safe_pattern = pattern.replace('\\', '\\\\').replace('"', '\\"') end_of_string_fixed_pattern = DOLLAR_FINDER.sub(r'\\Z', pattern) self._compile_regexps[pattern] = re.compile(end_of_string_fixed_pattern) with self.l('if not REGEX_PATTERNS[{}].search({variable}):', repr(pattern)): self.exc('{name} must match pattern {}', safe_pattern, rule='pattern') def generate_format(self): """ Means that value have to be in specified format. For example date, email or other. .. code-block:: python {'format': 'email'} Valid value for this definition is user@example.com but not @username """ if not self._use_formats: return with self.l('if isinstance({variable}, str):'): format_ = self._definition['format'] # Checking custom formats - user is allowed to override default formats. if format_ in self._custom_formats: custom_format = self._custom_formats[format_] if isinstance(custom_format, str): self._generate_format(format_, format_ + '_re_pattern', custom_format) else: with self.l('if not custom_formats["{}"]({variable}):', format_): self.exc('{name} must be {}', format_, rule='format') elif format_ in self.FORMAT_REGEXS: format_regex = self.FORMAT_REGEXS[format_] self._generate_format(format_, format_ + '_re_pattern', format_regex) # Format regex is used only in meta schemas. elif format_ == 'regex': self._extra_imports_lines = ['import re'] with self.l('try:', optimize=False): self.l('re.compile({variable})') with self.l('except Exception:'): self.exc('{name} must be a valid regex', rule='format') else: raise JsonSchemaDefinitionException('Unknown format: {}'.format(format_)) def _generate_format(self, format_name, regexp_name, regexp): if self._definition['format'] == format_name: if not regexp_name in self._compile_regexps: self._compile_regexps[regexp_name] = re.compile(regexp) with self.l('if not REGEX_PATTERNS["{}"].match({variable}):', regexp_name): self.exc('{name} must be {}', format_name, rule='format') def generate_minimum(self): with self.l('if isinstance({variable}, (int, float, Decimal)):'): if not isinstance(self._definition['minimum'], (int, float, decimal.Decimal)): raise JsonSchemaDefinitionException('minimum must be a number') if self._definition.get('exclusiveMinimum', False): with self.l('if {variable} <= {minimum}:'): self.exc('{name} must be bigger than {minimum}', rule='minimum') else: with self.l('if {variable} < {minimum}:'): self.exc('{name} must be bigger than or equal to {minimum}', rule='minimum') def generate_maximum(self): with self.l('if isinstance({variable}, (int, float, Decimal)):'): if not isinstance(self._definition['maximum'], (int, float, decimal.Decimal)): raise JsonSchemaDefinitionException('maximum must be a number') if self._definition.get('exclusiveMaximum', False): with self.l('if {variable} >= {maximum}:'): self.exc('{name} must be smaller than {maximum}', rule='maximum') else: with self.l('if {variable} > {maximum}:'): self.exc('{name} must be smaller than or equal to {maximum}', rule='maximum') def generate_multiple_of(self): with self.l('if isinstance({variable}, (int, float, Decimal)):'): if not isinstance(self._definition['multipleOf'], (int, float, decimal.Decimal)): raise JsonSchemaDefinitionException('multipleOf must be a number') # For proper multiplication check of floats we need to use decimals, # because for example 19.01 / 0.01 = 1901.0000000000002. if isinstance(self._definition['multipleOf'], float): self.l('quotient = Decimal(repr({variable})) / Decimal(repr({multipleOf}))') else: self.l('quotient = {variable} / {multipleOf}') with self.l('if int(quotient) != quotient:'): self.exc('{name} must be multiple of {multipleOf}', rule='multipleOf') # For example, 1e308 / 0.123456789 with self.l('if {variable} / {multipleOf} == float("inf"):'): self.exc('inifinity reached', rule='multipleOf') def generate_min_items(self): self.create_variable_is_list() with self.l('if {variable}_is_list:'): if not isinstance(self._definition['minItems'], (int, float)): raise JsonSchemaDefinitionException('minItems must be a number') self.create_variable_with_length() with self.l('if {variable}_len < {minItems}:'): self.exc('{name} must contain at least {minItems} items', rule='minItems') def generate_max_items(self): self.create_variable_is_list() with self.l('if {variable}_is_list:'): if not isinstance(self._definition['maxItems'], (int, float)): raise JsonSchemaDefinitionException('maxItems must be a number') self.create_variable_with_length() with self.l('if {variable}_len > {maxItems}:'): self.exc('{name} must contain less than or equal to {maxItems} items', rule='maxItems') def generate_unique_items(self): """ With Python 3.4 module ``timeit`` recommended this solutions: .. code-block:: python >>> timeit.timeit("len(x) > len(set(x))", "x=range(100)+range(100)", number=100000) 0.5839540958404541 >>> timeit.timeit("len({}.fromkeys(x)) == len(x)", "x=range(100)+range(100)", number=100000) 0.7094449996948242 >>> timeit.timeit("seen = set(); any(i in seen or seen.add(i) for i in x)", "x=range(100)+range(100)", number=100000) 2.0819358825683594 >>> timeit.timeit("np.unique(x).size == len(x)", "x=range(100)+range(100); import numpy as np", number=100000) 2.1439831256866455 """ unique_definition = self._definition['uniqueItems'] if not unique_definition: return self.create_variable_is_list() with self.l('if {variable}_is_list:'): self.l( 'def fn(var): ' 'return frozenset(dict((k, fn(v)) ' 'for k, v in var.items()).items()) ' 'if hasattr(var, "items") else tuple(fn(v) ' 'for v in var) ' 'if isinstance(var, (dict, list)) else str(var) ' 'if isinstance(var, bool) else var') self.create_variable_with_length() with self.l('if {variable}_len > len(set(fn({variable}_x) for {variable}_x in {variable})):'): self.exc('{name} must contain unique items', rule='uniqueItems') def generate_items(self): """ Means array is valid only when all items are valid by this definition. .. code-block:: python { 'items': [ {'type': 'integer'}, {'type': 'string'}, ], } Valid arrays are those with integers or strings, nothing else. Since draft 06 definition can be also boolean. True means nothing, False means everything is invalid. """ items_definition = self._definition['items'] if items_definition is True: return self.create_variable_is_list() with self.l('if {variable}_is_list:'): self.create_variable_with_length() if items_definition is False: with self.l('if {variable}:'): self.exc('{name} must not be there', rule='items') elif isinstance(items_definition, list): for idx, item_definition in enumerate(items_definition): with self.l('if {variable}_len > {}:', idx): self.l('{variable}__{0} = {variable}[{0}]', idx) self.generate_func_code_block( item_definition, '{}__{}'.format(self._variable, idx), '{}[{}]'.format(self._variable_name, idx), ) if self._use_default and isinstance(item_definition, dict) and 'default' in item_definition: self.l('else: {variable}.append({})', repr(item_definition['default'])) if 'additionalItems' in self._definition: if self._definition['additionalItems'] is False: with self.l('if {variable}_len > {}:', len(items_definition)): self.exc('{name} must contain only specified items', rule='items') else: with self.l('for {variable}_x, {variable}_item in enumerate({variable}[{0}:], {0}):', len(items_definition)): count = self.generate_func_code_block( self._definition['additionalItems'], '{}_item'.format(self._variable), '{}[{{{}_x}}]'.format(self._variable_name, self._variable), ) if count == 0: self.l('pass') else: if items_definition: with self.l('for {variable}_x, {variable}_item in enumerate({variable}):'): count = self.generate_func_code_block( items_definition, '{}_item'.format(self._variable), '{}[{{{}_x}}]'.format(self._variable_name, self._variable), ) if count == 0: self.l('pass') def generate_min_properties(self): self.create_variable_is_dict() with self.l('if {variable}_is_dict:'): if not isinstance(self._definition['minProperties'], (int, float)): raise JsonSchemaDefinitionException('minProperties must be a number') self.create_variable_with_length() with self.l('if {variable}_len < {minProperties}:'): self.exc('{name} must contain at least {minProperties} properties', rule='minProperties') def generate_max_properties(self): self.create_variable_is_dict() with self.l('if {variable}_is_dict:'): if not isinstance(self._definition['maxProperties'], (int, float)): raise JsonSchemaDefinitionException('maxProperties must be a number') self.create_variable_with_length() with self.l('if {variable}_len > {maxProperties}:'): self.exc('{name} must contain less than or equal to {maxProperties} properties', rule='maxProperties') def generate_required(self): self.create_variable_is_dict() with self.l('if {variable}_is_dict:'): if not isinstance(self._definition['required'], (list, tuple)): raise JsonSchemaDefinitionException('required must be an array') if len(self._definition['required']) != len(set(self._definition['required'])): raise JsonSchemaDefinitionException('required must contain unique elements') if not self._definition.get('additionalProperties', True): not_possible = [ prop for prop in self._definition['required'] if prop not in self._definition.get('properties', {}) and not any(re.search(regex, prop) for regex in self._definition.get('patternProperties', {})) ] if not_possible: raise JsonSchemaDefinitionException('{}: items {} are required but not allowed'.format(self._variable, not_possible)) self.l('{variable}__missing_keys = set({required}) - {variable}.keys()') with self.l('if {variable}__missing_keys:'): dynamic = 'str(sorted({variable}__missing_keys)) + " properties"' self.exc('{name} must contain ', self.e(self._definition['required']), rule='required', append_to_msg=dynamic) def generate_properties(self): """ Means object with defined keys. .. code-block:: python { 'properties': { 'key': {'type': 'number'}, }, } Valid object is containing key called 'key' and value any number. """ self.create_variable_is_dict() with self.l('if {variable}_is_dict:'): self.create_variable_keys() for key, prop_definition in self._definition['properties'].items(): key_name = re.sub(r'($[^a-zA-Z]|[^a-zA-Z0-9])', '', key) if not isinstance(prop_definition, (dict, bool)): raise JsonSchemaDefinitionException('{}[{}] must be object'.format(self._variable, key_name)) with self.l('if "{}" in {variable}_keys:', self.e(key)): self.l('{variable}_keys.remove("{}")', self.e(key)) self.l('{variable}__{0} = {variable}["{1}"]', key_name, self.e(key)) self.generate_func_code_block( prop_definition, '{}__{}'.format(self._variable, key_name), '{}.{}'.format(self._variable_name, self.e(key)), clear_variables=True, ) if self._use_default and isinstance(prop_definition, dict) and 'default' in prop_definition: self.l('else: {variable}["{}"] = {}', self.e(key), repr(prop_definition['default'])) def generate_pattern_properties(self): """ Means object with defined keys as patterns. .. code-block:: python { 'patternProperties': { '^x': {'type': 'number'}, }, } Valid object is containing key starting with a 'x' and value any number. """ self.create_variable_is_dict() with self.l('if {variable}_is_dict:'): self.create_variable_keys() for pattern, definition in self._definition['patternProperties'].items(): self._compile_regexps[pattern] = re.compile(pattern) with self.l('for {variable}_key, {variable}_val in {variable}.items():'): for pattern, definition in self._definition['patternProperties'].items(): with self.l('if REGEX_PATTERNS[{}].search({variable}_key):', repr(pattern)): with self.l('if {variable}_key in {variable}_keys:'): self.l('{variable}_keys.remove({variable}_key)') self.generate_func_code_block( definition, '{}_val'.format(self._variable), '{}.{{{}_key}}'.format(self._variable_name, self._variable), clear_variables=True, ) def generate_additional_properties(self): """ Means object with keys with values defined by definition. .. code-block:: python { 'properties': { 'key': {'type': 'number'}, } 'additionalProperties': {'type': 'string'}, } Valid object is containing key called 'key' and it's value any number and any other key with any string. """ self.create_variable_is_dict() with self.l('if {variable}_is_dict:'): self.create_variable_keys() add_prop_definition = self._definition["additionalProperties"] if add_prop_definition is True or add_prop_definition == {}: return if add_prop_definition: properties_keys = list(self._definition.get("properties", {}).keys()) with self.l('for {variable}_key in {variable}_keys:'): with self.l('if {variable}_key not in {}:', properties_keys): self.l('{variable}_value = {variable}.get({variable}_key)') self.generate_func_code_block( add_prop_definition, '{}_value'.format(self._variable), '{}.{{{}_key}}'.format(self._variable_name, self._variable), ) else: with self.l('if {variable}_keys:'): self.exc('{name} must not contain "+str({variable}_keys)+" properties', rule='additionalProperties') def generate_dependencies(self): """ Means when object has property, it needs to have also other property. .. code-block:: python { 'dependencies': { 'bar': ['foo'], }, } Valid object is containing only foo, both bar and foo or none of them, but not object with only bar. Since draft 06 definition can be boolean or empty array. True and empty array means nothing, False means that key cannot be there at all. """ self.create_variable_is_dict() with self.l('if {variable}_is_dict:'): is_empty = True for key, values in self._definition["dependencies"].items(): if values == [] or values is True: continue is_empty = False with self.l('if "{}" in {variable}:', self.e(key)): if values is False: self.exc('{} in {name} must not be there', key, rule='dependencies') elif isinstance(values, list): for value in values: with self.l('if "{}" not in {variable}:', self.e(value)): self.exc('{name} missing dependency {} for {}', self.e(value), self.e(key), rule='dependencies') else: self.generate_func_code_block(values, self._variable, self._variable_name, clear_variables=True) if is_empty: self.l('pass') poetry-core-2.1.1/src/poetry/core/_vendor/fastjsonschema/draft06.py000066400000000000000000000173241475444614500253350ustar00rootroot00000000000000import decimal from .draft04 import CodeGeneratorDraft04, JSON_TYPE_TO_PYTHON_TYPE from .exceptions import JsonSchemaDefinitionException from .generator import enforce_list class CodeGeneratorDraft06(CodeGeneratorDraft04): FORMAT_REGEXS = dict(CodeGeneratorDraft04.FORMAT_REGEXS, **{ 'json-pointer': r'^(/(([^/~])|(~[01]))*)*\Z', 'uri-reference': r'^(\w+:(\/?\/?))?[^#\\\s]*(#[^\\\s]*)?\Z', 'uri-template': ( r'^(?:(?:[^\x00-\x20\"\'<>%\\^`{|}]|%[0-9a-f]{2})|' r'\{[+#./;?&=,!@|]?(?:[a-z0-9_]|%[0-9a-f]{2})+' r'(?::[1-9][0-9]{0,3}|\*)?(?:,(?:[a-z0-9_]|%[0-9a-f]{2})+' r'(?::[1-9][0-9]{0,3}|\*)?)*\})*\Z' ), }) def __init__(self, definition, resolver=None, formats={}, use_default=True, use_formats=True, detailed_exceptions=True): super().__init__(definition, resolver, formats, use_default, use_formats, detailed_exceptions) self._json_keywords_to_function.update(( ('exclusiveMinimum', self.generate_exclusive_minimum), ('exclusiveMaximum', self.generate_exclusive_maximum), ('propertyNames', self.generate_property_names), ('contains', self.generate_contains), ('const', self.generate_const), )) def _generate_func_code_block(self, definition): if isinstance(definition, bool): self.generate_boolean_schema() elif '$ref' in definition: # needed because ref overrides any sibling keywords self.generate_ref() else: self.run_generate_functions(definition) def generate_boolean_schema(self): """ Means that schema can be specified by boolean. True means everything is valid, False everything is invalid. """ if self._definition is True: self.l('pass') if self._definition is False: self.exc('{name} must not be there') def generate_type(self): """ Validation of type. Can be one type or list of types. Since draft 06 a float without fractional part is an integer. .. code-block:: python {'type': 'string'} {'type': ['string', 'number']} """ types = enforce_list(self._definition['type']) try: python_types = ', '.join(JSON_TYPE_TO_PYTHON_TYPE[t] for t in types) except KeyError as exc: raise JsonSchemaDefinitionException('Unknown type: {}'.format(exc)) extra = '' if 'integer' in types: extra += ' and not (isinstance({variable}, float) and {variable}.is_integer())'.format( variable=self._variable, ) if ('number' in types or 'integer' in types) and 'boolean' not in types: extra += ' or isinstance({variable}, bool)'.format(variable=self._variable) with self.l('if not isinstance({variable}, ({})){}:', python_types, extra): self.exc('{name} must be {}', ' or '.join(types), rule='type') def generate_exclusive_minimum(self): with self.l('if isinstance({variable}, (int, float, Decimal)):'): if not isinstance(self._definition['exclusiveMinimum'], (int, float, decimal.Decimal)): raise JsonSchemaDefinitionException('exclusiveMinimum must be an integer, a float or a decimal') with self.l('if {variable} <= {exclusiveMinimum}:'): self.exc('{name} must be bigger than {exclusiveMinimum}', rule='exclusiveMinimum') def generate_exclusive_maximum(self): with self.l('if isinstance({variable}, (int, float, Decimal)):'): if not isinstance(self._definition['exclusiveMaximum'], (int, float, decimal.Decimal)): raise JsonSchemaDefinitionException('exclusiveMaximum must be an integer, a float or a decimal') with self.l('if {variable} >= {exclusiveMaximum}:'): self.exc('{name} must be smaller than {exclusiveMaximum}', rule='exclusiveMaximum') def generate_property_names(self): """ Means that keys of object must to follow this definition. .. code-block:: python { 'propertyNames': { 'maxLength': 3, }, } Valid keys of object for this definition are foo, bar, ... but not foobar for example. """ property_names_definition = self._definition.get('propertyNames', {}) if property_names_definition is True: pass elif property_names_definition is False: self.create_variable_keys() with self.l('if {variable}_keys:'): self.exc('{name} must not be there', rule='propertyNames') else: self.create_variable_is_dict() with self.l('if {variable}_is_dict:'): self.create_variable_with_length() with self.l('if {variable}_len != 0:'): self.l('{variable}_property_names = True') with self.l('for {variable}_key in {variable}:'): with self.l('try:'): self.generate_func_code_block( property_names_definition, '{}_key'.format(self._variable), self._variable_name, clear_variables=True, ) with self.l('except JsonSchemaValueException:'): self.l('{variable}_property_names = False') with self.l('if not {variable}_property_names:'): self.exc('{name} must be named by propertyName definition', rule='propertyNames') def generate_contains(self): """ Means that array must contain at least one defined item. .. code-block:: python { 'contains': { 'type': 'number', }, } Valid array is any with at least one number. """ self.create_variable_is_list() with self.l('if {variable}_is_list:'): contains_definition = self._definition['contains'] if contains_definition is False: self.exc('{name} is always invalid', rule='contains') elif contains_definition is True: with self.l('if not {variable}:'): self.exc('{name} must not be empty', rule='contains') else: self.l('{variable}_contains = False') with self.l('for {variable}_key in {variable}:'): with self.l('try:'): self.generate_func_code_block( contains_definition, '{}_key'.format(self._variable), self._variable_name, clear_variables=True, ) self.l('{variable}_contains = True') self.l('break') self.l('except JsonSchemaValueException: pass') with self.l('if not {variable}_contains:'): self.exc('{name} must contain one of contains definition', rule='contains') def generate_const(self): """ Means that value is valid when is equeal to const definition. .. code-block:: python { 'const': 42, } Only valid value is 42 in this example. """ const = self._definition['const'] if isinstance(const, str): const = '"{}"'.format(self.e(const)) with self.l('if {variable} != {}:', const): self.exc('{name} must be same as const definition: {definition_rule}', rule='const') poetry-core-2.1.1/src/poetry/core/_vendor/fastjsonschema/draft07.py000066400000000000000000000105411475444614500253300ustar00rootroot00000000000000from .draft06 import CodeGeneratorDraft06 class CodeGeneratorDraft07(CodeGeneratorDraft06): FORMAT_REGEXS = dict(CodeGeneratorDraft06.FORMAT_REGEXS, **{ 'date': r'^(?P\d{4})-(?P(0[1-9]|1[0-2]))-(?P(0[1-9]|[12]\d|3[01]))\Z', 'iri': r'^\w+:(\/?\/?)[^\s]+\Z', 'iri-reference': r'^(\w+:(\/?\/?))?[^#\\\s]*(#[^\\\s]*)?\Z', 'idn-email': r'^[^@]+@[^@]+\.[^@]+\Z', 'idn-hostname': r'^(?!-)(xn--)?[a-zA-Z0-9][a-zA-Z0-9-_]{0,61}[a-zA-Z0-9]{0,1}\.(?!-)(xn--)?([a-zA-Z0-9\-]{1,50}|[a-zA-Z0-9-]{1,30}\.[a-zA-Z]{2,})$', 'relative-json-pointer': r'^(?:0|[1-9][0-9]*)(?:#|(?:\/(?:[^~/]|~0|~1)*)*)\Z', #'regex': r'', 'time': ( r'^(?P\d{1,2}):(?P\d{1,2})' r'(?::(?P\d{1,2})(?:\.(?P\d{1,6}))?' r'([zZ]|[+-]\d\d:\d\d)?)?\Z' ), }) def __init__(self, definition, resolver=None, formats={}, use_default=True, use_formats=True, detailed_exceptions=True): super().__init__(definition, resolver, formats, use_default, use_formats, detailed_exceptions) # pylint: disable=duplicate-code self._json_keywords_to_function.update(( ('if', self.generate_if_then_else), ('contentEncoding', self.generate_content_encoding), ('contentMediaType', self.generate_content_media_type), )) def generate_if_then_else(self): """ Implementation of if-then-else. .. code-block:: python { 'if': { 'exclusiveMaximum': 0, }, 'then': { 'minimum': -10, }, 'else': { 'multipleOf': 2, }, } Valid values are any between -10 and 0 or any multiplication of two. """ with self.l('try:', optimize=False): self.generate_func_code_block( self._definition['if'], self._variable, self._variable_name, clear_variables=True ) with self.l('except JsonSchemaValueException:'): if 'else' in self._definition: self.generate_func_code_block( self._definition['else'], self._variable, self._variable_name, clear_variables=True ) else: self.l('pass') if 'then' in self._definition: with self.l('else:'): self.generate_func_code_block( self._definition['then'], self._variable, self._variable_name, clear_variables=True ) def generate_content_encoding(self): """ Means decoding value when it's encoded by base64. .. code-block:: python { 'contentEncoding': 'base64', } """ if self._definition['contentEncoding'] == 'base64': with self.l('if isinstance({variable}, str):'): with self.l('try:'): self.l('import base64') self.l('{variable} = base64.b64decode({variable})') with self.l('except Exception:'): self.exc('{name} must be encoded by base64') with self.l('if {variable} == "":'): self.exc('contentEncoding must be base64') def generate_content_media_type(self): """ Means loading value when it's specified as JSON. .. code-block:: python { 'contentMediaType': 'application/json', } """ if self._definition['contentMediaType'] == 'application/json': with self.l('if isinstance({variable}, bytes):'): with self.l('try:'): self.l('{variable} = {variable}.decode("utf-8")') with self.l('except Exception:'): self.exc('{name} must encoded by utf8') with self.l('if isinstance({variable}, str):'): with self.l('try:'): self.l('import json') self.l('{variable} = json.loads({variable})') with self.l('except Exception:'): self.exc('{name} must be valid JSON') poetry-core-2.1.1/src/poetry/core/_vendor/fastjsonschema/exceptions.py000066400000000000000000000031141475444614500262400ustar00rootroot00000000000000import re SPLIT_RE = re.compile(r'[\.\[\]]+') class JsonSchemaException(ValueError): """ Base exception of ``fastjsonschema`` library. """ class JsonSchemaValueException(JsonSchemaException): """ Exception raised by validation function. Available properties: * ``message`` containing human-readable information what is wrong (e.g. ``data.property[index] must be smaller than or equal to 42``), * invalid ``value`` (e.g. ``60``), * ``name`` of a path in the data structure (e.g. ``data.property[index]``), * ``path`` as an array in the data structure (e.g. ``['data', 'property', 'index']``), * the whole ``definition`` which the ``value`` has to fulfil (e.g. ``{'type': 'number', 'maximum': 42}``), * ``rule`` which the ``value`` is breaking (e.g. ``maximum``) * and ``rule_definition`` (e.g. ``42``). .. versionchanged:: 2.14.0 Added all extra properties. """ def __init__(self, message, value=None, name=None, definition=None, rule=None): super().__init__(message) self.message = message self.value = value self.name = name self.definition = definition self.rule = rule @property def path(self): return [item for item in SPLIT_RE.split(self.name) if item != ''] @property def rule_definition(self): if not self.rule or not self.definition: return None return self.definition.get(self.rule) class JsonSchemaDefinitionException(JsonSchemaException): """ Exception raised by generator of validation function. """ poetry-core-2.1.1/src/poetry/core/_vendor/fastjsonschema/generator.py000066400000000000000000000314031475444614500260470ustar00rootroot00000000000000from collections import OrderedDict from decimal import Decimal import re from .exceptions import JsonSchemaValueException, JsonSchemaDefinitionException from .indent import indent from .ref_resolver import RefResolver def enforce_list(variable): if isinstance(variable, list): return variable return [variable] # pylint: disable=too-many-instance-attributes,too-many-public-methods class CodeGenerator: """ This class is not supposed to be used directly. Anything inside of this class can be changed without noticing. This class generates code of validation function from JSON schema object as string. Example: .. code-block:: python CodeGenerator(json_schema_definition).func_code """ INDENT = 4 # spaces def __init__(self, definition, resolver=None, detailed_exceptions=True): self._code = [] self._compile_regexps = {} self._custom_formats = {} self._detailed_exceptions = detailed_exceptions # Any extra library should be here to be imported only once. # Lines are imports to be printed in the file and objects # key-value pair to pass to compile function directly. self._extra_imports_lines = [ "from decimal import Decimal", ] self._extra_imports_objects = { "Decimal": Decimal, } self._variables = set() self._indent = 0 self._indent_last_line = None self._variable = None self._variable_name = None self._root_definition = definition self._definition = None # map schema URIs to validation function names for functions # that are not yet generated, but need to be generated self._needed_validation_functions = {} # validation function names that are already done self._validation_functions_done = set() if resolver is None: resolver = RefResolver.from_schema(definition, store={}) self._resolver = resolver # add main function to `self._needed_validation_functions` self._needed_validation_functions[self._resolver.get_uri()] = self._resolver.get_scope_name() self._json_keywords_to_function = OrderedDict() @property def func_code(self): """ Returns generated code of whole validation function as string. """ self._generate_func_code() return '\n'.join(self._code) @property def global_state(self): """ Returns global variables for generating function from ``func_code``. Includes compiled regular expressions and imports, so it does not have to do it every time when validation function is called. """ self._generate_func_code() return dict( **self._extra_imports_objects, REGEX_PATTERNS=self._compile_regexps, re=re, JsonSchemaValueException=JsonSchemaValueException, ) @property def global_state_code(self): """ Returns global variables for generating function from ``func_code`` as code. Includes compiled regular expressions and imports. """ self._generate_func_code() if not self._compile_regexps: return '\n'.join(self._extra_imports_lines + [ 'from fastjsonschema import JsonSchemaValueException', '', '', ]) return '\n'.join(self._extra_imports_lines + [ 'import re', 'from fastjsonschema import JsonSchemaValueException', '', '', 'REGEX_PATTERNS = ' + serialize_regexes(self._compile_regexps), '', ]) def _generate_func_code(self): if not self._code: self.generate_func_code() def generate_func_code(self): """ Creates base code of validation function and calls helper for creating code by definition. """ self.l('NoneType = type(None)') # Generate parts that are referenced and not yet generated while self._needed_validation_functions: # During generation of validation function, could be needed to generate # new one that is added again to `_needed_validation_functions`. # Therefore usage of while instead of for loop. uri, name = self._needed_validation_functions.popitem() self.generate_validation_function(uri, name) def generate_validation_function(self, uri, name): """ Generate validation function for given uri with given name """ self._validation_functions_done.add(uri) self.l('') with self._resolver.resolving(uri) as definition: with self.l('def {}(data, custom_formats={{}}, name_prefix=None):', name): self.generate_func_code_block(definition, 'data', 'data', clear_variables=True) self.l('return data') def generate_func_code_block(self, definition, variable, variable_name, clear_variables=False): """ Creates validation rules for current definition. Returns the number of validation rules generated as code. """ backup = self._definition, self._variable, self._variable_name self._definition, self._variable, self._variable_name = definition, variable, variable_name if clear_variables: backup_variables = self._variables self._variables = set() count = self._generate_func_code_block(definition) self._definition, self._variable, self._variable_name = backup if clear_variables: self._variables = backup_variables return count def _generate_func_code_block(self, definition): if not isinstance(definition, dict): raise JsonSchemaDefinitionException("definition must be an object") if '$ref' in definition: # needed because ref overrides any sibling keywords return self.generate_ref() else: return self.run_generate_functions(definition) def run_generate_functions(self, definition): """Returns the number of generate functions that were executed.""" count = 0 for key, func in self._json_keywords_to_function.items(): if key in definition: func() count += 1 return count def generate_ref(self): """ Ref can be link to remote or local definition. .. code-block:: python {'$ref': 'http://json-schema.org/draft-04/schema#'} { 'properties': { 'foo': {'type': 'integer'}, 'bar': {'$ref': '#/properties/foo'} } } """ with self._resolver.in_scope(self._definition['$ref']): name = self._resolver.get_scope_name() uri = self._resolver.get_uri() if uri not in self._validation_functions_done: self._needed_validation_functions[uri] = name # call validation function assert self._variable_name.startswith("data") path = self._variable_name[4:] name_arg = '(name_prefix or "data") + "{}"'.format(path) if '{' in name_arg: name_arg = name_arg + '.format(**locals())' self.l('{}({variable}, custom_formats, {name_arg})', name, name_arg=name_arg) # pylint: disable=invalid-name @indent def l(self, line, *args, **kwds): """ Short-cut of line. Used for inserting line. It's formated with parameters ``variable``, ``variable_name`` (as ``name`` for short-cut), all keys from current JSON schema ``definition`` and also passed arguments in ``args`` and named ``kwds``. .. code-block:: python self.l('if {variable} not in {enum}: raise JsonSchemaValueException("Wrong!")') When you want to indent block, use it as context manager. For example: .. code-block:: python with self.l('if {variable} not in {enum}:'): self.l('raise JsonSchemaValueException("Wrong!")') """ spaces = ' ' * self.INDENT * self._indent name = self._variable_name if name: # Add name_prefix to the name when it is being outputted. assert name.startswith('data') name = '" + (name_prefix or "data") + "' + name[4:] if '{' in name: name = name + '".format(**locals()) + "' context = dict( self._definition if self._definition and self._definition is not True else {}, variable=self._variable, name=name, **kwds ) line = line.format(*args, **context) line = line.replace('\n', '\\n').replace('\r', '\\r') self._code.append(spaces + line) return line def e(self, string): """ Short-cut of escape. Used for inserting user values into a string message. .. code-block:: python self.l('raise JsonSchemaValueException("Variable: {}")', self.e(variable)) """ return str(string).replace('"', '\\"') def exc(self, msg, *args, append_to_msg=None, rule=None): """ Short-cut for creating raising exception in the code. """ if not self._detailed_exceptions: self.l('raise JsonSchemaValueException("'+msg+'")', *args) return arg = '"'+msg+'"' if append_to_msg: arg += ' + (' + append_to_msg + ')' msg = 'raise JsonSchemaValueException('+arg+', value={variable}, name="{name}", definition={definition}, rule={rule})' definition = self._expand_refs(self._definition) definition_rule = self.e(definition.get(rule) if isinstance(definition, dict) else None) self.l(msg, *args, definition=repr(definition), rule=repr(rule), definition_rule=definition_rule) def _expand_refs(self, definition): if isinstance(definition, list): return [self._expand_refs(v) for v in definition] if not isinstance(definition, dict): return definition if "$ref" in definition and isinstance(definition["$ref"], str): with self._resolver.resolving(definition["$ref"]) as schema: return schema return {k: self._expand_refs(v) for k, v in definition.items()} def create_variable_with_length(self): """ Append code for creating variable with length of that variable (for example length of list or dictionary) with name ``{variable}_len``. It can be called several times and always it's done only when that variable still does not exists. """ variable_name = '{}_len'.format(self._variable) if variable_name in self._variables: return self._variables.add(variable_name) self.l('{variable}_len = len({variable})') def create_variable_keys(self): """ Append code for creating variable with keys of that variable (dictionary) with a name ``{variable}_keys``. Similar to `create_variable_with_length`. """ variable_name = '{}_keys'.format(self._variable) if variable_name in self._variables: return self._variables.add(variable_name) self.l('{variable}_keys = set({variable}.keys())') def create_variable_is_list(self): """ Append code for creating variable with bool if it's instance of list with a name ``{variable}_is_list``. Similar to `create_variable_with_length`. """ variable_name = '{}_is_list'.format(self._variable) if variable_name in self._variables: return self._variables.add(variable_name) self.l('{variable}_is_list = isinstance({variable}, (list, tuple))') def create_variable_is_dict(self): """ Append code for creating variable with bool if it's instance of list with a name ``{variable}_is_dict``. Similar to `create_variable_with_length`. """ variable_name = '{}_is_dict'.format(self._variable) if variable_name in self._variables: return self._variables.add(variable_name) self.l('{variable}_is_dict = isinstance({variable}, dict)') def serialize_regexes(patterns_dict): # Unfortunately using `pprint.pformat` is causing errors # specially with big regexes regex_patterns = ( repr(k) + ": " + repr_regex(v) for k, v in patterns_dict.items() ) return '{\n ' + ",\n ".join(regex_patterns) + "\n}" def repr_regex(regex): all_flags = ("A", "I", "DEBUG", "L", "M", "S", "X") flags = " | ".join(f"re.{f}" for f in all_flags if regex.flags & getattr(re, f)) flags = ", " + flags if flags else "" return "re.compile({!r}{})".format(regex.pattern, flags) poetry-core-2.1.1/src/poetry/core/_vendor/fastjsonschema/indent.py000066400000000000000000000016301475444614500253410ustar00rootroot00000000000000def indent(func): """ Decorator for allowing to use method as normal method or with context manager for auto-indenting code blocks. """ def wrapper(self, line, *args, optimize=True, **kwds): last_line = self._indent_last_line line = func(self, line, *args, **kwds) # When two blocks have the same condition (such as value has to be dict), # do the check only once and keep it under one block. if optimize and last_line == line: self._code.pop() self._indent_last_line = line return Indent(self, line) return wrapper class Indent: def __init__(self, instance, line): self.instance = instance self.line = line def __enter__(self): self.instance._indent += 1 def __exit__(self, type_, value, traceback): self.instance._indent -= 1 self.instance._indent_last_line = self.line poetry-core-2.1.1/src/poetry/core/_vendor/fastjsonschema/ref_resolver.py000066400000000000000000000127111475444614500265570ustar00rootroot00000000000000""" JSON Schema URI resolution scopes and dereferencing https://tools.ietf.org/id/draft-zyp-json-schema-04.html#rfc.section.7 Code adapted from https://github.com/Julian/jsonschema """ import contextlib import json import re from urllib import parse as urlparse from urllib.parse import unquote from .exceptions import JsonSchemaDefinitionException def get_id(schema): """ Originally ID was `id` and since v7 it's `$id`. """ return schema.get('$id', schema.get('id', '')) def resolve_path(schema, fragment): """ Return definition from path. Path is unescaped according https://tools.ietf.org/html/rfc6901 """ fragment = fragment.lstrip('/') parts = unquote(fragment).split('/') if fragment else [] for part in parts: part = part.replace('~1', '/').replace('~0', '~') if isinstance(schema, list): schema = schema[int(part)] elif part in schema: schema = schema[part] else: raise JsonSchemaDefinitionException('Unresolvable ref: {}'.format(part)) return schema def normalize(uri): return urlparse.urlsplit(uri).geturl() def resolve_remote(uri, handlers): """ Resolve a remote ``uri``. .. note:: urllib library is used to fetch requests from the remote ``uri`` if handlers does notdefine otherwise. """ scheme = urlparse.urlsplit(uri).scheme if scheme in handlers: result = handlers[scheme](uri) else: from urllib.request import urlopen req = urlopen(uri) encoding = req.info().get_content_charset() or 'utf-8' try: result = json.loads(req.read().decode(encoding),) except ValueError as exc: raise JsonSchemaDefinitionException('{} failed to decode: {}'.format(uri, exc)) finally: req.close() return result class RefResolver: """ Resolve JSON References. """ # pylint: disable=dangerous-default-value,too-many-arguments def __init__(self, base_uri, schema, store={}, cache=True, handlers={}): """ `base_uri` is URI of the referring document from the `schema`. `store` is an dictionary that will be used to cache the fetched schemas (if `cache=True`). Please notice that you can have caching problems when compiling schemas with colliding `$ref`. To force overwriting use `cache=False` or explicitly pass the `store` argument (with a brand new dictionary) """ self.base_uri = base_uri self.resolution_scope = base_uri self.schema = schema self.store = store self.cache = cache self.handlers = handlers self.walk(schema) @classmethod def from_schema(cls, schema, handlers={}, **kwargs): """ Construct a resolver from a JSON schema object. """ return cls( get_id(schema) if isinstance(schema, dict) else '', schema, handlers=handlers, **kwargs ) @contextlib.contextmanager def in_scope(self, scope: str): """ Context manager to handle current scope. """ old_scope = self.resolution_scope self.resolution_scope = urlparse.urljoin(old_scope, scope) try: yield finally: self.resolution_scope = old_scope @contextlib.contextmanager def resolving(self, ref: str): """ Context manager which resolves a JSON ``ref`` and enters the resolution scope of this ref. """ new_uri = urlparse.urljoin(self.resolution_scope, ref) uri, fragment = urlparse.urldefrag(new_uri) if uri and normalize(uri) in self.store: schema = self.store[normalize(uri)] elif not uri or uri == self.base_uri: schema = self.schema else: schema = resolve_remote(uri, self.handlers) if self.cache: self.store[normalize(uri)] = schema old_base_uri, old_schema = self.base_uri, self.schema self.base_uri, self.schema = uri, schema try: with self.in_scope(uri): yield resolve_path(schema, fragment) finally: self.base_uri, self.schema = old_base_uri, old_schema def get_uri(self): return normalize(self.resolution_scope) def get_scope_name(self): """ Get current scope and return it as a valid function name. """ name = 'validate_' + unquote(self.resolution_scope).replace('~1', '_').replace('~0', '_').replace('"', '') name = re.sub(r'($[^a-zA-Z]|[^a-zA-Z0-9])', '_', name) name = name.lower().rstrip('_') return name def walk(self, node: dict): """ Walk thru schema and dereferencing ``id`` and ``$ref`` instances """ if isinstance(node, bool): pass elif '$ref' in node and isinstance(node['$ref'], str): ref = node['$ref'] node['$ref'] = urlparse.urljoin(self.resolution_scope, ref) elif ('$id' in node or 'id' in node) and isinstance(get_id(node), str): with self.in_scope(get_id(node)): self.store[normalize(self.resolution_scope)] = node for _, item in node.items(): if isinstance(item, dict): self.walk(item) else: for _, item in node.items(): if isinstance(item, dict): self.walk(item) poetry-core-2.1.1/src/poetry/core/_vendor/fastjsonschema/version.py000066400000000000000000000000231475444614500255400ustar00rootroot00000000000000VERSION = '2.21.1' poetry-core-2.1.1/src/poetry/core/_vendor/lark/000077500000000000000000000000001475444614500214275ustar00rootroot00000000000000poetry-core-2.1.1/src/poetry/core/_vendor/lark/LICENSE000066400000000000000000000020611475444614500224330ustar00rootroot00000000000000Copyright © 2017 Erez Shinan Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. poetry-core-2.1.1/src/poetry/core/_vendor/lark/__init__.py000066400000000000000000000013501475444614500235370ustar00rootroot00000000000000from .exceptions import ( GrammarError, LarkError, LexError, ParseError, UnexpectedCharacters, UnexpectedEOF, UnexpectedInput, UnexpectedToken, ) from .lark import Lark from .lexer import Token from .tree import ParseTree, Tree from .utils import logger from .visitors import Discard, Transformer, Transformer_NonRecursive, Visitor, v_args __version__: str = "1.2.2" __all__ = ( "GrammarError", "LarkError", "LexError", "ParseError", "UnexpectedCharacters", "UnexpectedEOF", "UnexpectedInput", "UnexpectedToken", "Lark", "Token", "ParseTree", "Tree", "logger", "Discard", "Transformer", "Transformer_NonRecursive", "Visitor", "v_args", ) poetry-core-2.1.1/src/poetry/core/_vendor/lark/__pyinstaller/000077500000000000000000000000001475444614500242735ustar00rootroot00000000000000poetry-core-2.1.1/src/poetry/core/_vendor/lark/__pyinstaller/__init__.py000066400000000000000000000002661475444614500264100ustar00rootroot00000000000000# For usage of lark with PyInstaller. See https://pyinstaller-sample-hook.readthedocs.io/en/latest/index.html import os def get_hook_dirs(): return [os.path.dirname(__file__)] poetry-core-2.1.1/src/poetry/core/_vendor/lark/__pyinstaller/hook-lark.py000066400000000000000000000011271475444614500265350ustar00rootroot00000000000000#----------------------------------------------------------------------------- # Copyright (c) 2017-2020, PyInstaller Development Team. # # Distributed under the terms of the GNU General Public License (version 2 # or later) with exception for distributing the bootloader. # # The full license is in the file COPYING.txt, distributed with this software. # # SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception) #----------------------------------------------------------------------------- from PyInstaller.utils.hooks import collect_data_files datas = collect_data_files('lark') poetry-core-2.1.1/src/poetry/core/_vendor/lark/ast_utils.py000066400000000000000000000041051475444614500240100ustar00rootroot00000000000000""" Module of utilities for transforming a lark.Tree into a custom Abstract Syntax Tree (AST defined in classes) """ import inspect, re import types from typing import Optional, Callable from lark import Transformer, v_args class Ast: """Abstract class Subclasses will be collected by `create_transformer()` """ pass class AsList: """Abstract class Subclasses will be instantiated with the parse results as a single list, instead of as arguments. """ class WithMeta: """Abstract class Subclasses will be instantiated with the Meta instance of the tree. (see ``v_args`` for more detail) """ pass def camel_to_snake(name): return re.sub(r'(? Transformer: """Collects `Ast` subclasses from the given module, and creates a Lark transformer that builds the AST. For each class, we create a corresponding rule in the transformer, with a matching name. CamelCase names will be converted into snake_case. Example: "CodeBlock" -> "code_block". Classes starting with an underscore (`_`) will be skipped. Parameters: ast_module: A Python module containing all the subclasses of ``ast_utils.Ast`` transformer (Optional[Transformer]): An initial transformer. Its attributes may be overwritten. decorator_factory (Callable): An optional callable accepting two booleans, inline, and meta, and returning a decorator for the methods of ``transformer``. (default: ``v_args``). """ t = transformer or Transformer() for name, obj in inspect.getmembers(ast_module): if not name.startswith('_') and inspect.isclass(obj): if issubclass(obj, Ast): wrapper = decorator_factory(inline=not issubclass(obj, AsList), meta=issubclass(obj, WithMeta)) obj = wrapper(obj).__get__(t) setattr(t, camel_to_snake(name), obj) return t poetry-core-2.1.1/src/poetry/core/_vendor/lark/common.py000066400000000000000000000057001475444614500232730ustar00rootroot00000000000000from copy import deepcopy import sys from types import ModuleType from typing import Callable, Collection, Dict, Optional, TYPE_CHECKING, List if TYPE_CHECKING: from .lark import PostLex from .lexer import Lexer from .grammar import Rule from typing import Union, Type from typing import Literal if sys.version_info >= (3, 10): from typing import TypeAlias else: from typing_extensions import TypeAlias from .utils import Serialize from .lexer import TerminalDef, Token ###{standalone _ParserArgType: 'TypeAlias' = 'Literal["earley", "lalr", "cyk", "auto"]' _LexerArgType: 'TypeAlias' = 'Union[Literal["auto", "basic", "contextual", "dynamic", "dynamic_complete"], Type[Lexer]]' _LexerCallback = Callable[[Token], Token] ParserCallbacks = Dict[str, Callable] class LexerConf(Serialize): __serialize_fields__ = 'terminals', 'ignore', 'g_regex_flags', 'use_bytes', 'lexer_type' __serialize_namespace__ = TerminalDef, terminals: Collection[TerminalDef] re_module: ModuleType ignore: Collection[str] postlex: 'Optional[PostLex]' callbacks: Dict[str, _LexerCallback] g_regex_flags: int skip_validation: bool use_bytes: bool lexer_type: Optional[_LexerArgType] strict: bool def __init__(self, terminals: Collection[TerminalDef], re_module: ModuleType, ignore: Collection[str]=(), postlex: 'Optional[PostLex]'=None, callbacks: Optional[Dict[str, _LexerCallback]]=None, g_regex_flags: int=0, skip_validation: bool=False, use_bytes: bool=False, strict: bool=False): self.terminals = terminals self.terminals_by_name = {t.name: t for t in self.terminals} assert len(self.terminals) == len(self.terminals_by_name) self.ignore = ignore self.postlex = postlex self.callbacks = callbacks or {} self.g_regex_flags = g_regex_flags self.re_module = re_module self.skip_validation = skip_validation self.use_bytes = use_bytes self.strict = strict self.lexer_type = None def _deserialize(self): self.terminals_by_name = {t.name: t for t in self.terminals} def __deepcopy__(self, memo=None): return type(self)( deepcopy(self.terminals, memo), self.re_module, deepcopy(self.ignore, memo), deepcopy(self.postlex, memo), deepcopy(self.callbacks, memo), deepcopy(self.g_regex_flags, memo), deepcopy(self.skip_validation, memo), deepcopy(self.use_bytes, memo), ) class ParserConf(Serialize): __serialize_fields__ = 'rules', 'start', 'parser_type' rules: List['Rule'] callbacks: ParserCallbacks start: List[str] parser_type: _ParserArgType def __init__(self, rules: List['Rule'], callbacks: ParserCallbacks, start: List[str]): assert isinstance(start, list) self.rules = rules self.callbacks = callbacks self.start = start ###} poetry-core-2.1.1/src/poetry/core/_vendor/lark/exceptions.py000066400000000000000000000252731475444614500241730ustar00rootroot00000000000000from .utils import logger, NO_VALUE from typing import Mapping, Iterable, Callable, Union, TypeVar, Tuple, Any, List, Set, Optional, Collection, TYPE_CHECKING if TYPE_CHECKING: from .lexer import Token from .parsers.lalr_interactive_parser import InteractiveParser from .tree import Tree ###{standalone class LarkError(Exception): pass class ConfigurationError(LarkError, ValueError): pass def assert_config(value, options: Collection, msg='Got %r, expected one of %s'): if value not in options: raise ConfigurationError(msg % (value, options)) class GrammarError(LarkError): pass class ParseError(LarkError): pass class LexError(LarkError): pass T = TypeVar('T') class UnexpectedInput(LarkError): """UnexpectedInput Error. Used as a base class for the following exceptions: - ``UnexpectedCharacters``: The lexer encountered an unexpected string - ``UnexpectedToken``: The parser received an unexpected token - ``UnexpectedEOF``: The parser expected a token, but the input ended After catching one of these exceptions, you may call the following helper methods to create a nicer error message. """ line: int column: int pos_in_stream = None state: Any _terminals_by_name = None interactive_parser: 'InteractiveParser' def get_context(self, text: str, span: int=40) -> str: """Returns a pretty string pinpointing the error in the text, with span amount of context characters around it. Note: The parser doesn't hold a copy of the text it has to parse, so you have to provide it again """ assert self.pos_in_stream is not None, self pos = self.pos_in_stream start = max(pos - span, 0) end = pos + span if not isinstance(text, bytes): before = text[start:pos].rsplit('\n', 1)[-1] after = text[pos:end].split('\n', 1)[0] return before + after + '\n' + ' ' * len(before.expandtabs()) + '^\n' else: before = text[start:pos].rsplit(b'\n', 1)[-1] after = text[pos:end].split(b'\n', 1)[0] return (before + after + b'\n' + b' ' * len(before.expandtabs()) + b'^\n').decode("ascii", "backslashreplace") def match_examples(self, parse_fn: 'Callable[[str], Tree]', examples: Union[Mapping[T, Iterable[str]], Iterable[Tuple[T, Iterable[str]]]], token_type_match_fallback: bool=False, use_accepts: bool=True ) -> Optional[T]: """Allows you to detect what's wrong in the input text by matching against example errors. Given a parser instance and a dictionary mapping some label with some malformed syntax examples, it'll return the label for the example that bests matches the current error. The function will iterate the dictionary until it finds a matching error, and return the corresponding value. For an example usage, see `examples/error_reporting_lalr.py` Parameters: parse_fn: parse function (usually ``lark_instance.parse``) examples: dictionary of ``{'example_string': value}``. use_accepts: Recommended to keep this as ``use_accepts=True``. """ assert self.state is not None, "Not supported for this exception" if isinstance(examples, Mapping): examples = examples.items() candidate = (None, False) for i, (label, example) in enumerate(examples): assert not isinstance(example, str), "Expecting a list" for j, malformed in enumerate(example): try: parse_fn(malformed) except UnexpectedInput as ut: if ut.state == self.state: if ( use_accepts and isinstance(self, UnexpectedToken) and isinstance(ut, UnexpectedToken) and ut.accepts != self.accepts ): logger.debug("Different accepts with same state[%d]: %s != %s at example [%s][%s]" % (self.state, self.accepts, ut.accepts, i, j)) continue if ( isinstance(self, (UnexpectedToken, UnexpectedEOF)) and isinstance(ut, (UnexpectedToken, UnexpectedEOF)) ): if ut.token == self.token: # Try exact match first logger.debug("Exact Match at example [%s][%s]" % (i, j)) return label if token_type_match_fallback: # Fallback to token types match if (ut.token.type == self.token.type) and not candidate[-1]: logger.debug("Token Type Fallback at example [%s][%s]" % (i, j)) candidate = label, True if candidate[0] is None: logger.debug("Same State match at example [%s][%s]" % (i, j)) candidate = label, False return candidate[0] def _format_expected(self, expected): if self._terminals_by_name: d = self._terminals_by_name expected = [d[t_name].user_repr() if t_name in d else t_name for t_name in expected] return "Expected one of: \n\t* %s\n" % '\n\t* '.join(expected) class UnexpectedEOF(ParseError, UnexpectedInput): """An exception that is raised by the parser, when the input ends while it still expects a token. """ expected: 'List[Token]' def __init__(self, expected, state=None, terminals_by_name=None): super(UnexpectedEOF, self).__init__() self.expected = expected self.state = state from .lexer import Token self.token = Token("", "") # , line=-1, column=-1, pos_in_stream=-1) self.pos_in_stream = -1 self.line = -1 self.column = -1 self._terminals_by_name = terminals_by_name def __str__(self): message = "Unexpected end-of-input. " message += self._format_expected(self.expected) return message class UnexpectedCharacters(LexError, UnexpectedInput): """An exception that is raised by the lexer, when it cannot match the next string of characters to any of its terminals. """ allowed: Set[str] considered_tokens: Set[Any] def __init__(self, seq, lex_pos, line, column, allowed=None, considered_tokens=None, state=None, token_history=None, terminals_by_name=None, considered_rules=None): super(UnexpectedCharacters, self).__init__() # TODO considered_tokens and allowed can be figured out using state self.line = line self.column = column self.pos_in_stream = lex_pos self.state = state self._terminals_by_name = terminals_by_name self.allowed = allowed self.considered_tokens = considered_tokens self.considered_rules = considered_rules self.token_history = token_history if isinstance(seq, bytes): self.char = seq[lex_pos:lex_pos + 1].decode("ascii", "backslashreplace") else: self.char = seq[lex_pos] self._context = self.get_context(seq) def __str__(self): message = "No terminal matches '%s' in the current parser context, at line %d col %d" % (self.char, self.line, self.column) message += '\n\n' + self._context if self.allowed: message += self._format_expected(self.allowed) if self.token_history: message += '\nPrevious tokens: %s\n' % ', '.join(repr(t) for t in self.token_history) return message class UnexpectedToken(ParseError, UnexpectedInput): """An exception that is raised by the parser, when the token it received doesn't match any valid step forward. Parameters: token: The mismatched token expected: The set of expected tokens considered_rules: Which rules were considered, to deduce the expected tokens state: A value representing the parser state. Do not rely on its value or type. interactive_parser: An instance of ``InteractiveParser``, that is initialized to the point of failure, and can be used for debugging and error handling. Note: These parameters are available as attributes of the instance. """ expected: Set[str] considered_rules: Set[str] def __init__(self, token, expected, considered_rules=None, state=None, interactive_parser=None, terminals_by_name=None, token_history=None): super(UnexpectedToken, self).__init__() # TODO considered_rules and expected can be figured out using state self.line = getattr(token, 'line', '?') self.column = getattr(token, 'column', '?') self.pos_in_stream = getattr(token, 'start_pos', None) self.state = state self.token = token self.expected = expected # XXX deprecate? `accepts` is better self._accepts = NO_VALUE self.considered_rules = considered_rules self.interactive_parser = interactive_parser self._terminals_by_name = terminals_by_name self.token_history = token_history @property def accepts(self) -> Set[str]: if self._accepts is NO_VALUE: self._accepts = self.interactive_parser and self.interactive_parser.accepts() return self._accepts def __str__(self): message = ("Unexpected token %r at line %s, column %s.\n%s" % (self.token, self.line, self.column, self._format_expected(self.accepts or self.expected))) if self.token_history: message += "Previous tokens: %r\n" % self.token_history return message class VisitError(LarkError): """VisitError is raised when visitors are interrupted by an exception It provides the following attributes for inspection: Parameters: rule: the name of the visit rule that failed obj: the tree-node or token that was being processed orig_exc: the exception that cause it to fail Note: These parameters are available as attributes """ obj: 'Union[Tree, Token]' orig_exc: Exception def __init__(self, rule, obj, orig_exc): message = 'Error trying to process rule "%s":\n\n%s' % (rule, orig_exc) super(VisitError, self).__init__(message) self.rule = rule self.obj = obj self.orig_exc = orig_exc class MissingVariableError(LarkError): pass ###} poetry-core-2.1.1/src/poetry/core/_vendor/lark/grammar.py000066400000000000000000000071211475444614500234300ustar00rootroot00000000000000from typing import Optional, Tuple, ClassVar, Sequence from .utils import Serialize ###{standalone TOKEN_DEFAULT_PRIORITY = 0 class Symbol(Serialize): __slots__ = ('name',) name: str is_term: ClassVar[bool] = NotImplemented def __init__(self, name: str) -> None: self.name = name def __eq__(self, other): assert isinstance(other, Symbol), other return self.is_term == other.is_term and self.name == other.name def __ne__(self, other): return not (self == other) def __hash__(self): return hash(self.name) def __repr__(self): return '%s(%r)' % (type(self).__name__, self.name) fullrepr = property(__repr__) def renamed(self, f): return type(self)(f(self.name)) class Terminal(Symbol): __serialize_fields__ = 'name', 'filter_out' is_term: ClassVar[bool] = True def __init__(self, name, filter_out=False): self.name = name self.filter_out = filter_out @property def fullrepr(self): return '%s(%r, %r)' % (type(self).__name__, self.name, self.filter_out) def renamed(self, f): return type(self)(f(self.name), self.filter_out) class NonTerminal(Symbol): __serialize_fields__ = 'name', is_term: ClassVar[bool] = False class RuleOptions(Serialize): __serialize_fields__ = 'keep_all_tokens', 'expand1', 'priority', 'template_source', 'empty_indices' keep_all_tokens: bool expand1: bool priority: Optional[int] template_source: Optional[str] empty_indices: Tuple[bool, ...] def __init__(self, keep_all_tokens: bool=False, expand1: bool=False, priority: Optional[int]=None, template_source: Optional[str]=None, empty_indices: Tuple[bool, ...]=()) -> None: self.keep_all_tokens = keep_all_tokens self.expand1 = expand1 self.priority = priority self.template_source = template_source self.empty_indices = empty_indices def __repr__(self): return 'RuleOptions(%r, %r, %r, %r)' % ( self.keep_all_tokens, self.expand1, self.priority, self.template_source ) class Rule(Serialize): """ origin : a symbol expansion : a list of symbols order : index of this expansion amongst all rules of the same name """ __slots__ = ('origin', 'expansion', 'alias', 'options', 'order', '_hash') __serialize_fields__ = 'origin', 'expansion', 'order', 'alias', 'options' __serialize_namespace__ = Terminal, NonTerminal, RuleOptions origin: NonTerminal expansion: Sequence[Symbol] order: int alias: Optional[str] options: RuleOptions _hash: int def __init__(self, origin: NonTerminal, expansion: Sequence[Symbol], order: int=0, alias: Optional[str]=None, options: Optional[RuleOptions]=None): self.origin = origin self.expansion = expansion self.alias = alias self.order = order self.options = options or RuleOptions() self._hash = hash((self.origin, tuple(self.expansion))) def _deserialize(self): self._hash = hash((self.origin, tuple(self.expansion))) def __str__(self): return '<%s : %s>' % (self.origin.name, ' '.join(x.name for x in self.expansion)) def __repr__(self): return 'Rule(%r, %r, %r, %r)' % (self.origin, self.expansion, self.alias, self.options) def __hash__(self): return self._hash def __eq__(self, other): if not isinstance(other, Rule): return False return self.origin == other.origin and self.expansion == other.expansion ###} poetry-core-2.1.1/src/poetry/core/_vendor/lark/grammars/000077500000000000000000000000001475444614500232405ustar00rootroot00000000000000poetry-core-2.1.1/src/poetry/core/_vendor/lark/grammars/__init__.py000066400000000000000000000000001475444614500253370ustar00rootroot00000000000000poetry-core-2.1.1/src/poetry/core/_vendor/lark/grammars/common.lark000066400000000000000000000016601475444614500254060ustar00rootroot00000000000000// Basic terminals for common use // // Numbers // DIGIT: "0".."9" HEXDIGIT: "a".."f"|"A".."F"|DIGIT INT: DIGIT+ SIGNED_INT: ["+"|"-"] INT DECIMAL: INT "." INT? | "." INT // float = /-?\d+(\.\d+)?([eE][+-]?\d+)?/ _EXP: ("e"|"E") SIGNED_INT FLOAT: INT _EXP | DECIMAL _EXP? SIGNED_FLOAT: ["+"|"-"] FLOAT NUMBER: FLOAT | INT SIGNED_NUMBER: ["+"|"-"] NUMBER // // Strings // _STRING_INNER: /.*?/ _STRING_ESC_INNER: _STRING_INNER /(? ignore | "%import" import_path ["->" name] -> import | "%import" import_path name_list -> multi_import | "%override" rule -> override_rule | "%declare" name+ -> declare !import_path: "."? name ("." name)* name_list: "(" name ("," name)* ")" ?expansions: alias (_VBAR alias)* ?alias: expansion ["->" RULE] ?expansion: expr* ?expr: atom [OP | "~" NUMBER [".." NUMBER]] ?atom: "(" expansions ")" | "[" expansions "]" -> maybe | value ?value: STRING ".." STRING -> literal_range | name | (REGEXP | STRING) -> literal | name "{" value ("," value)* "}" -> template_usage name: RULE | TOKEN _VBAR: _NL? "|" OP: /[+*]|[?](?![a-z])/ RULE: /!?[_?]?[a-z][_a-z0-9]*/ TOKEN: /_?[A-Z][_A-Z0-9]*/ STRING: _STRING "i"? REGEXP: /\/(?!\/)(\\\/|\\\\|[^\/])*?\/[imslux]*/ _NL: /(\r?\n)+\s*/ %import common.ESCAPED_STRING -> _STRING %import common.SIGNED_INT -> NUMBER %import common.WS_INLINE COMMENT: /\s*/ "//" /[^\n]/* | /\s*/ "#" /[^\n]/* %ignore WS_INLINE %ignore COMMENT poetry-core-2.1.1/src/poetry/core/_vendor/lark/grammars/python.lark000066400000000000000000000252771475444614500254510ustar00rootroot00000000000000// Python 3 grammar for Lark // This grammar should parse all python 3.x code successfully. // Adapted from: https://docs.python.org/3/reference/grammar.html // Start symbols for the grammar: // single_input is a single interactive statement; // file_input is a module or sequence of commands read from an input file; // eval_input is the input for the eval() functions. // NB: compound_stmt in single_input is followed by extra NEWLINE! // single_input: _NEWLINE | simple_stmt | compound_stmt _NEWLINE file_input: (_NEWLINE | stmt)* eval_input: testlist _NEWLINE* decorator: "@" dotted_name [ "(" [arguments] ")" ] _NEWLINE decorators: decorator+ decorated: decorators (classdef | funcdef | async_funcdef) async_funcdef: "async" funcdef funcdef: "def" name "(" [parameters] ")" ["->" test] ":" suite parameters: paramvalue ("," paramvalue)* ["," SLASH ("," paramvalue)*] ["," [starparams | kwparams]] | starparams | kwparams SLASH: "/" // Otherwise the it will completely disappear and it will be undisguisable in the result starparams: (starparam | starguard) poststarparams starparam: "*" typedparam starguard: "*" poststarparams: ("," paramvalue)* ["," kwparams] kwparams: "**" typedparam ","? ?paramvalue: typedparam ("=" test)? ?typedparam: name (":" test)? lambdef: "lambda" [lambda_params] ":" test lambdef_nocond: "lambda" [lambda_params] ":" test_nocond lambda_params: lambda_paramvalue ("," lambda_paramvalue)* ["," [lambda_starparams | lambda_kwparams]] | lambda_starparams | lambda_kwparams ?lambda_paramvalue: name ("=" test)? lambda_starparams: "*" [name] ("," lambda_paramvalue)* ["," [lambda_kwparams]] lambda_kwparams: "**" name ","? ?stmt: simple_stmt | compound_stmt ?simple_stmt: small_stmt (";" small_stmt)* [";"] _NEWLINE ?small_stmt: (expr_stmt | assign_stmt | del_stmt | pass_stmt | flow_stmt | import_stmt | global_stmt | nonlocal_stmt | assert_stmt) expr_stmt: testlist_star_expr assign_stmt: annassign | augassign | assign annassign: testlist_star_expr ":" test ["=" test] assign: testlist_star_expr ("=" (yield_expr|testlist_star_expr))+ augassign: testlist_star_expr augassign_op (yield_expr|testlist) !augassign_op: "+=" | "-=" | "*=" | "@=" | "/=" | "%=" | "&=" | "|=" | "^=" | "<<=" | ">>=" | "**=" | "//=" ?testlist_star_expr: test_or_star_expr | test_or_star_expr ("," test_or_star_expr)+ ","? -> tuple | test_or_star_expr "," -> tuple // For normal and annotated assignments, additional restrictions enforced by the interpreter del_stmt: "del" exprlist pass_stmt: "pass" ?flow_stmt: break_stmt | continue_stmt | return_stmt | raise_stmt | yield_stmt break_stmt: "break" continue_stmt: "continue" return_stmt: "return" [testlist] yield_stmt: yield_expr raise_stmt: "raise" [test ["from" test]] import_stmt: import_name | import_from import_name: "import" dotted_as_names // note below: the ("." | "...") is necessary because "..." is tokenized as ELLIPSIS import_from: "from" (dots? dotted_name | dots) "import" ("*" | "(" import_as_names ")" | import_as_names) !dots: "."+ import_as_name: name ["as" name] dotted_as_name: dotted_name ["as" name] import_as_names: import_as_name ("," import_as_name)* [","] dotted_as_names: dotted_as_name ("," dotted_as_name)* dotted_name: name ("." name)* global_stmt: "global" name ("," name)* nonlocal_stmt: "nonlocal" name ("," name)* assert_stmt: "assert" test ["," test] ?compound_stmt: if_stmt | while_stmt | for_stmt | try_stmt | match_stmt | with_stmt | funcdef | classdef | decorated | async_stmt async_stmt: "async" (funcdef | with_stmt | for_stmt) if_stmt: "if" test ":" suite elifs ["else" ":" suite] elifs: elif_* elif_: "elif" test ":" suite while_stmt: "while" test ":" suite ["else" ":" suite] for_stmt: "for" exprlist "in" testlist ":" suite ["else" ":" suite] try_stmt: "try" ":" suite except_clauses ["else" ":" suite] [finally] | "try" ":" suite finally -> try_finally finally: "finally" ":" suite except_clauses: except_clause+ except_clause: "except" [test ["as" name]] ":" suite // NB compile.c makes sure that the default except clause is last with_stmt: "with" with_items ":" suite with_items: with_item ("," with_item)* with_item: test ["as" name] match_stmt: "match" test ":" _NEWLINE _INDENT case+ _DEDENT case: "case" pattern ["if" test] ":" suite ?pattern: sequence_item_pattern "," _sequence_pattern -> sequence_pattern | as_pattern ?as_pattern: or_pattern ("as" NAME)? ?or_pattern: closed_pattern ("|" closed_pattern)* ?closed_pattern: literal_pattern | NAME -> capture_pattern | "_" -> any_pattern | attr_pattern | "(" as_pattern ")" | "[" _sequence_pattern "]" -> sequence_pattern | "(" (sequence_item_pattern "," _sequence_pattern)? ")" -> sequence_pattern | "{" (mapping_item_pattern ("," mapping_item_pattern)* ","?)?"}" -> mapping_pattern | "{" (mapping_item_pattern ("," mapping_item_pattern)* ",")? "**" NAME ","? "}" -> mapping_star_pattern | class_pattern literal_pattern: inner_literal_pattern ?inner_literal_pattern: "None" -> const_none | "True" -> const_true | "False" -> const_false | STRING -> string | number attr_pattern: NAME ("." NAME)+ -> value name_or_attr_pattern: NAME ("." NAME)* -> value mapping_item_pattern: (literal_pattern|attr_pattern) ":" as_pattern _sequence_pattern: (sequence_item_pattern ("," sequence_item_pattern)* ","?)? ?sequence_item_pattern: as_pattern | "*" NAME -> star_pattern class_pattern: name_or_attr_pattern "(" [arguments_pattern ","?] ")" arguments_pattern: pos_arg_pattern ["," keyws_arg_pattern] | keyws_arg_pattern -> no_pos_arguments pos_arg_pattern: as_pattern ("," as_pattern)* keyws_arg_pattern: keyw_arg_pattern ("," keyw_arg_pattern)* keyw_arg_pattern: NAME "=" as_pattern suite: simple_stmt | _NEWLINE _INDENT stmt+ _DEDENT ?test: or_test ("if" or_test "else" test)? | lambdef | assign_expr assign_expr: name ":=" test ?test_nocond: or_test | lambdef_nocond ?or_test: and_test ("or" and_test)* ?and_test: not_test_ ("and" not_test_)* ?not_test_: "not" not_test_ -> not_test | comparison ?comparison: expr (comp_op expr)* star_expr: "*" expr ?expr: or_expr ?or_expr: xor_expr ("|" xor_expr)* ?xor_expr: and_expr ("^" and_expr)* ?and_expr: shift_expr ("&" shift_expr)* ?shift_expr: arith_expr (_shift_op arith_expr)* ?arith_expr: term (_add_op term)* ?term: factor (_mul_op factor)* ?factor: _unary_op factor | power !_unary_op: "+"|"-"|"~" !_add_op: "+"|"-" !_shift_op: "<<"|">>" !_mul_op: "*"|"@"|"/"|"%"|"//" // <> isn't actually a valid comparison operator in Python. It's here for the // sake of a __future__ import described in PEP 401 (which really works :-) !comp_op: "<"|">"|"=="|">="|"<="|"<>"|"!="|"in"|"not" "in"|"is"|"is" "not" ?power: await_expr ("**" factor)? ?await_expr: AWAIT? atom_expr AWAIT: "await" ?atom_expr: atom_expr "(" [arguments] ")" -> funccall | atom_expr "[" subscriptlist "]" -> getitem | atom_expr "." name -> getattr | atom ?atom: "(" yield_expr ")" | "(" _tuple_inner? ")" -> tuple | "(" comprehension{test_or_star_expr} ")" -> tuple_comprehension | "[" _exprlist? "]" -> list | "[" comprehension{test_or_star_expr} "]" -> list_comprehension | "{" _dict_exprlist? "}" -> dict | "{" comprehension{key_value} "}" -> dict_comprehension | "{" _exprlist "}" -> set | "{" comprehension{test} "}" -> set_comprehension | name -> var | number | string_concat | "(" test ")" | "..." -> ellipsis | "None" -> const_none | "True" -> const_true | "False" -> const_false ?string_concat: string+ _tuple_inner: test_or_star_expr (("," test_or_star_expr)+ [","] | ",") ?test_or_star_expr: test | star_expr ?subscriptlist: subscript | subscript (("," subscript)+ [","] | ",") -> subscript_tuple ?subscript: test | ([test] ":" [test] [sliceop]) -> slice sliceop: ":" [test] ?exprlist: (expr|star_expr) | (expr|star_expr) (("," (expr|star_expr))+ [","]|",") ?testlist: test | testlist_tuple testlist_tuple: test (("," test)+ [","] | ",") _dict_exprlist: (key_value | "**" expr) ("," (key_value | "**" expr))* [","] key_value: test ":" test _exprlist: test_or_star_expr ("," test_or_star_expr)* [","] classdef: "class" name ["(" [arguments] ")"] ":" suite arguments: argvalue ("," argvalue)* ("," [ starargs | kwargs])? | starargs | kwargs | comprehension{test} starargs: stararg ("," stararg)* ("," argvalue)* ["," kwargs] stararg: "*" test kwargs: "**" test ("," argvalue)* ?argvalue: test ("=" test)? comprehension{comp_result}: comp_result comp_fors [comp_if] comp_fors: comp_for+ comp_for: [ASYNC] "for" exprlist "in" or_test ASYNC: "async" ?comp_if: "if" test_nocond // not used in grammar, but may appear in "node" passed from Parser to Compiler encoding_decl: name yield_expr: "yield" [testlist] | "yield" "from" test -> yield_from number: DEC_NUMBER | HEX_NUMBER | BIN_NUMBER | OCT_NUMBER | FLOAT_NUMBER | IMAG_NUMBER string: STRING | LONG_STRING // Other terminals _NEWLINE: ( /\r?\n[\t ]*/ | COMMENT )+ %ignore /[\t \f]+/ // WS %ignore /\\[\t \f]*\r?\n/ // LINE_CONT %ignore COMMENT %declare _INDENT _DEDENT // Python terminals !name: NAME | "match" | "case" NAME: /[^\W\d]\w*/ COMMENT: /#[^\n]*/ STRING: /([ubf]?r?|r[ubf])("(?!"").*?(? None: self.paren_level = 0 self.indent_level = [0] assert self.tab_len > 0 def handle_NL(self, token: Token) -> Iterator[Token]: if self.paren_level > 0: return yield token indent_str = token.rsplit('\n', 1)[1] # Tabs and spaces indent = indent_str.count(' ') + indent_str.count('\t') * self.tab_len if indent > self.indent_level[-1]: self.indent_level.append(indent) yield Token.new_borrow_pos(self.INDENT_type, indent_str, token) else: while indent < self.indent_level[-1]: self.indent_level.pop() yield Token.new_borrow_pos(self.DEDENT_type, indent_str, token) if indent != self.indent_level[-1]: raise DedentError('Unexpected dedent to column %s. Expected dedent to %s' % (indent, self.indent_level[-1])) def _process(self, stream): for token in stream: if token.type == self.NL_type: yield from self.handle_NL(token) else: yield token if token.type in self.OPEN_PAREN_types: self.paren_level += 1 elif token.type in self.CLOSE_PAREN_types: self.paren_level -= 1 assert self.paren_level >= 0 while len(self.indent_level) > 1: self.indent_level.pop() yield Token(self.DEDENT_type, '') assert self.indent_level == [0], self.indent_level def process(self, stream): self.paren_level = 0 self.indent_level = [0] return self._process(stream) # XXX Hack for ContextualLexer. Maybe there's a more elegant solution? @property def always_accept(self): return (self.NL_type,) @property @abstractmethod def NL_type(self) -> str: "The name of the newline token" raise NotImplementedError() @property @abstractmethod def OPEN_PAREN_types(self) -> List[str]: "The names of the tokens that open a parenthesis" raise NotImplementedError() @property @abstractmethod def CLOSE_PAREN_types(self) -> List[str]: """The names of the tokens that close a parenthesis """ raise NotImplementedError() @property @abstractmethod def INDENT_type(self) -> str: """The name of the token that starts an indentation in the grammar. See also: %declare """ raise NotImplementedError() @property @abstractmethod def DEDENT_type(self) -> str: """The name of the token that end an indentation in the grammar. See also: %declare """ raise NotImplementedError() @property @abstractmethod def tab_len(self) -> int: """How many spaces does a tab equal""" raise NotImplementedError() class PythonIndenter(Indenter): """A postlexer that "injects" _INDENT/_DEDENT tokens based on indentation, according to the Python syntax. See also: the ``postlex`` option in `Lark`. """ NL_type = '_NEWLINE' OPEN_PAREN_types = ['LPAR', 'LSQB', 'LBRACE'] CLOSE_PAREN_types = ['RPAR', 'RSQB', 'RBRACE'] INDENT_type = '_INDENT' DEDENT_type = '_DEDENT' tab_len = 8 ###} poetry-core-2.1.1/src/poetry/core/_vendor/lark/lark.py000066400000000000000000000670301475444614500227400ustar00rootroot00000000000000from abc import ABC, abstractmethod import getpass import sys, os, pickle import tempfile import types import re from typing import ( TypeVar, Type, List, Dict, Iterator, Callable, Union, Optional, Sequence, Tuple, Iterable, IO, Any, TYPE_CHECKING, Collection ) if TYPE_CHECKING: from .parsers.lalr_interactive_parser import InteractiveParser from .tree import ParseTree from .visitors import Transformer from typing import Literal from .parser_frontends import ParsingFrontend from .exceptions import ConfigurationError, assert_config, UnexpectedInput from .utils import Serialize, SerializeMemoizer, FS, logger from .load_grammar import load_grammar, FromPackageLoader, Grammar, verify_used_files, PackageResource, sha256_digest from .tree import Tree from .common import LexerConf, ParserConf, _ParserArgType, _LexerArgType from .lexer import Lexer, BasicLexer, TerminalDef, LexerThread, Token from .parse_tree_builder import ParseTreeBuilder from .parser_frontends import _validate_frontend_args, _get_lexer_callbacks, _deserialize_parsing_frontend, _construct_parsing_frontend from .grammar import Rule try: import regex _has_regex = True except ImportError: _has_regex = False ###{standalone class PostLex(ABC): @abstractmethod def process(self, stream: Iterator[Token]) -> Iterator[Token]: return stream always_accept: Iterable[str] = () class LarkOptions(Serialize): """Specifies the options for Lark """ start: List[str] debug: bool strict: bool transformer: 'Optional[Transformer]' propagate_positions: Union[bool, str] maybe_placeholders: bool cache: Union[bool, str] regex: bool g_regex_flags: int keep_all_tokens: bool tree_class: Optional[Callable[[str, List], Any]] parser: _ParserArgType lexer: _LexerArgType ambiguity: 'Literal["auto", "resolve", "explicit", "forest"]' postlex: Optional[PostLex] priority: 'Optional[Literal["auto", "normal", "invert"]]' lexer_callbacks: Dict[str, Callable[[Token], Token]] use_bytes: bool ordered_sets: bool edit_terminals: Optional[Callable[[TerminalDef], TerminalDef]] import_paths: 'List[Union[str, Callable[[Union[None, str, PackageResource], str], Tuple[str, str]]]]' source_path: Optional[str] OPTIONS_DOC = r""" **=== General Options ===** start The start symbol. Either a string, or a list of strings for multiple possible starts (Default: "start") debug Display debug information and extra warnings. Use only when debugging (Default: ``False``) When used with Earley, it generates a forest graph as "sppf.png", if 'dot' is installed. strict Throw an exception on any potential ambiguity, including shift/reduce conflicts, and regex collisions. transformer Applies the transformer to every parse tree (equivalent to applying it after the parse, but faster) propagate_positions Propagates positional attributes into the 'meta' attribute of all tree branches. Sets attributes: (line, column, end_line, end_column, start_pos, end_pos, container_line, container_column, container_end_line, container_end_column) Accepts ``False``, ``True``, or a callable, which will filter which nodes to ignore when propagating. maybe_placeholders When ``True``, the ``[]`` operator returns ``None`` when not matched. When ``False``, ``[]`` behaves like the ``?`` operator, and returns no value at all. (default= ``True``) cache Cache the results of the Lark grammar analysis, for x2 to x3 faster loading. LALR only for now. - When ``False``, does nothing (default) - When ``True``, caches to a temporary file in the local directory - When given a string, caches to the path pointed by the string regex When True, uses the ``regex`` module instead of the stdlib ``re``. g_regex_flags Flags that are applied to all terminals (both regex and strings) keep_all_tokens Prevent the tree builder from automagically removing "punctuation" tokens (Default: ``False``) tree_class Lark will produce trees comprised of instances of this class instead of the default ``lark.Tree``. **=== Algorithm Options ===** parser Decides which parser engine to use. Accepts "earley" or "lalr". (Default: "earley"). (there is also a "cyk" option for legacy) lexer Decides whether or not to use a lexer stage - "auto" (default): Choose for me based on the parser - "basic": Use a basic lexer - "contextual": Stronger lexer (only works with parser="lalr") - "dynamic": Flexible and powerful (only with parser="earley") - "dynamic_complete": Same as dynamic, but tries *every* variation of tokenizing possible. ambiguity Decides how to handle ambiguity in the parse. Only relevant if parser="earley" - "resolve": The parser will automatically choose the simplest derivation (it chooses consistently: greedy for tokens, non-greedy for rules) - "explicit": The parser will return all derivations wrapped in "_ambig" tree nodes (i.e. a forest). - "forest": The parser will return the root of the shared packed parse forest. **=== Misc. / Domain Specific Options ===** postlex Lexer post-processing (Default: ``None``) Only works with the basic and contextual lexers. priority How priorities should be evaluated - "auto", ``None``, "normal", "invert" (Default: "auto") lexer_callbacks Dictionary of callbacks for the lexer. May alter tokens during lexing. Use with caution. use_bytes Accept an input of type ``bytes`` instead of ``str``. ordered_sets Should Earley use ordered-sets to achieve stable output (~10% slower than regular sets. Default: True) edit_terminals A callback for editing the terminals before parse. import_paths A List of either paths or loader functions to specify from where grammars are imported source_path Override the source of from where the grammar was loaded. Useful for relative imports and unconventional grammar loading **=== End of Options ===** """ if __doc__: __doc__ += OPTIONS_DOC # Adding a new option needs to be done in multiple places: # - In the dictionary below. This is the primary truth of which options `Lark.__init__` accepts # - In the docstring above. It is used both for the docstring of `LarkOptions` and `Lark`, and in readthedocs # - As an attribute of `LarkOptions` above # - Potentially in `_LOAD_ALLOWED_OPTIONS` below this class, when the option doesn't change how the grammar is loaded # - Potentially in `lark.tools.__init__`, if it makes sense, and it can easily be passed as a cmd argument _defaults: Dict[str, Any] = { 'debug': False, 'strict': False, 'keep_all_tokens': False, 'tree_class': None, 'cache': False, 'postlex': None, 'parser': 'earley', 'lexer': 'auto', 'transformer': None, 'start': 'start', 'priority': 'auto', 'ambiguity': 'auto', 'regex': False, 'propagate_positions': False, 'lexer_callbacks': {}, 'maybe_placeholders': True, 'edit_terminals': None, 'g_regex_flags': 0, 'use_bytes': False, 'ordered_sets': True, 'import_paths': [], 'source_path': None, '_plugins': {}, } def __init__(self, options_dict: Dict[str, Any]) -> None: o = dict(options_dict) options = {} for name, default in self._defaults.items(): if name in o: value = o.pop(name) if isinstance(default, bool) and name not in ('cache', 'use_bytes', 'propagate_positions'): value = bool(value) else: value = default options[name] = value if isinstance(options['start'], str): options['start'] = [options['start']] self.__dict__['options'] = options assert_config(self.parser, ('earley', 'lalr', 'cyk', None)) if self.parser == 'earley' and self.transformer: raise ConfigurationError('Cannot specify an embedded transformer when using the Earley algorithm. ' 'Please use your transformer on the resulting parse tree, or use a different algorithm (i.e. LALR)') if o: raise ConfigurationError("Unknown options: %s" % o.keys()) def __getattr__(self, name: str) -> Any: try: return self.__dict__['options'][name] except KeyError as e: raise AttributeError(e) def __setattr__(self, name: str, value: str) -> None: assert_config(name, self.options.keys(), "%r isn't a valid option. Expected one of: %s") self.options[name] = value def serialize(self, memo = None) -> Dict[str, Any]: return self.options @classmethod def deserialize(cls, data: Dict[str, Any], memo: Dict[int, Union[TerminalDef, Rule]]) -> "LarkOptions": return cls(data) # Options that can be passed to the Lark parser, even when it was loaded from cache/standalone. # These options are only used outside of `load_grammar`. _LOAD_ALLOWED_OPTIONS = {'postlex', 'transformer', 'lexer_callbacks', 'use_bytes', 'debug', 'g_regex_flags', 'regex', 'propagate_positions', 'tree_class', '_plugins'} _VALID_PRIORITY_OPTIONS = ('auto', 'normal', 'invert', None) _VALID_AMBIGUITY_OPTIONS = ('auto', 'resolve', 'explicit', 'forest') _T = TypeVar('_T', bound="Lark") class Lark(Serialize): """Main interface for the library. It's mostly a thin wrapper for the many different parsers, and for the tree constructor. Parameters: grammar: a string or file-object containing the grammar spec (using Lark's ebnf syntax) options: a dictionary controlling various aspects of Lark. Example: >>> Lark(r'''start: "foo" ''') Lark(...) """ source_path: str source_grammar: str grammar: 'Grammar' options: LarkOptions lexer: Lexer parser: 'ParsingFrontend' terminals: Collection[TerminalDef] def __init__(self, grammar: 'Union[Grammar, str, IO[str]]', **options) -> None: self.options = LarkOptions(options) re_module: types.ModuleType # Set regex or re module use_regex = self.options.regex if use_regex: if _has_regex: re_module = regex else: raise ImportError('`regex` module must be installed if calling `Lark(regex=True)`.') else: re_module = re # Some, but not all file-like objects have a 'name' attribute if self.options.source_path is None: try: self.source_path = grammar.name # type: ignore[union-attr] except AttributeError: self.source_path = '' else: self.source_path = self.options.source_path # Drain file-like objects to get their contents try: read = grammar.read # type: ignore[union-attr] except AttributeError: pass else: grammar = read() cache_fn = None cache_sha256 = None if isinstance(grammar, str): self.source_grammar = grammar if self.options.use_bytes: if not grammar.isascii(): raise ConfigurationError("Grammar must be ascii only, when use_bytes=True") if self.options.cache: if self.options.parser != 'lalr': raise ConfigurationError("cache only works with parser='lalr' for now") unhashable = ('transformer', 'postlex', 'lexer_callbacks', 'edit_terminals', '_plugins') options_str = ''.join(k+str(v) for k, v in options.items() if k not in unhashable) from . import __version__ s = grammar + options_str + __version__ + str(sys.version_info[:2]) cache_sha256 = sha256_digest(s) if isinstance(self.options.cache, str): cache_fn = self.options.cache else: if self.options.cache is not True: raise ConfigurationError("cache argument must be bool or str") try: username = getpass.getuser() except Exception: # The exception raised may be ImportError or OSError in # the future. For the cache, we don't care about the # specific reason - we just want a username. username = "unknown" cache_fn = tempfile.gettempdir() + "/.lark_cache_%s_%s_%s_%s.tmp" % (username, cache_sha256, *sys.version_info[:2]) old_options = self.options try: with FS.open(cache_fn, 'rb') as f: logger.debug('Loading grammar from cache: %s', cache_fn) # Remove options that aren't relevant for loading from cache for name in (set(options) - _LOAD_ALLOWED_OPTIONS): del options[name] file_sha256 = f.readline().rstrip(b'\n') cached_used_files = pickle.load(f) if file_sha256 == cache_sha256.encode('utf8') and verify_used_files(cached_used_files): cached_parser_data = pickle.load(f) self._load(cached_parser_data, **options) return except FileNotFoundError: # The cache file doesn't exist; parse and compose the grammar as normal pass except Exception: # We should probably narrow done which errors we catch here. logger.exception("Failed to load Lark from cache: %r. We will try to carry on.", cache_fn) # In theory, the Lark instance might have been messed up by the call to `_load`. # In practice the only relevant thing that might have been overwritten should be `options` self.options = old_options # Parse the grammar file and compose the grammars self.grammar, used_files = load_grammar(grammar, self.source_path, self.options.import_paths, self.options.keep_all_tokens) else: assert isinstance(grammar, Grammar) self.grammar = grammar if self.options.lexer == 'auto': if self.options.parser == 'lalr': self.options.lexer = 'contextual' elif self.options.parser == 'earley': if self.options.postlex is not None: logger.info("postlex can't be used with the dynamic lexer, so we use 'basic' instead. " "Consider using lalr with contextual instead of earley") self.options.lexer = 'basic' else: self.options.lexer = 'dynamic' elif self.options.parser == 'cyk': self.options.lexer = 'basic' else: assert False, self.options.parser lexer = self.options.lexer if isinstance(lexer, type): assert issubclass(lexer, Lexer) # XXX Is this really important? Maybe just ensure interface compliance else: assert_config(lexer, ('basic', 'contextual', 'dynamic', 'dynamic_complete')) if self.options.postlex is not None and 'dynamic' in lexer: raise ConfigurationError("Can't use postlex with a dynamic lexer. Use basic or contextual instead") if self.options.ambiguity == 'auto': if self.options.parser == 'earley': self.options.ambiguity = 'resolve' else: assert_config(self.options.parser, ('earley', 'cyk'), "%r doesn't support disambiguation. Use one of these parsers instead: %s") if self.options.priority == 'auto': self.options.priority = 'normal' if self.options.priority not in _VALID_PRIORITY_OPTIONS: raise ConfigurationError("invalid priority option: %r. Must be one of %r" % (self.options.priority, _VALID_PRIORITY_OPTIONS)) if self.options.ambiguity not in _VALID_AMBIGUITY_OPTIONS: raise ConfigurationError("invalid ambiguity option: %r. Must be one of %r" % (self.options.ambiguity, _VALID_AMBIGUITY_OPTIONS)) if self.options.parser is None: terminals_to_keep = '*' elif self.options.postlex is not None: terminals_to_keep = set(self.options.postlex.always_accept) else: terminals_to_keep = set() # Compile the EBNF grammar into BNF self.terminals, self.rules, self.ignore_tokens = self.grammar.compile(self.options.start, terminals_to_keep) if self.options.edit_terminals: for t in self.terminals: self.options.edit_terminals(t) self._terminals_dict = {t.name: t for t in self.terminals} # If the user asked to invert the priorities, negate them all here. if self.options.priority == 'invert': for rule in self.rules: if rule.options.priority is not None: rule.options.priority = -rule.options.priority for term in self.terminals: term.priority = -term.priority # Else, if the user asked to disable priorities, strip them from the # rules and terminals. This allows the Earley parsers to skip an extra forest walk # for improved performance, if you don't need them (or didn't specify any). elif self.options.priority is None: for rule in self.rules: if rule.options.priority is not None: rule.options.priority = None for term in self.terminals: term.priority = 0 # TODO Deprecate lexer_callbacks? self.lexer_conf = LexerConf( self.terminals, re_module, self.ignore_tokens, self.options.postlex, self.options.lexer_callbacks, self.options.g_regex_flags, use_bytes=self.options.use_bytes, strict=self.options.strict ) if self.options.parser: self.parser = self._build_parser() elif lexer: self.lexer = self._build_lexer() if cache_fn: logger.debug('Saving grammar to cache: %s', cache_fn) try: with FS.open(cache_fn, 'wb') as f: assert cache_sha256 is not None f.write(cache_sha256.encode('utf8') + b'\n') pickle.dump(used_files, f) self.save(f, _LOAD_ALLOWED_OPTIONS) except IOError as e: logger.exception("Failed to save Lark to cache: %r.", cache_fn, e) if __doc__: __doc__ += "\n\n" + LarkOptions.OPTIONS_DOC __serialize_fields__ = 'parser', 'rules', 'options' def _build_lexer(self, dont_ignore: bool=False) -> BasicLexer: lexer_conf = self.lexer_conf if dont_ignore: from copy import copy lexer_conf = copy(lexer_conf) lexer_conf.ignore = () return BasicLexer(lexer_conf) def _prepare_callbacks(self) -> None: self._callbacks = {} # we don't need these callbacks if we aren't building a tree if self.options.ambiguity != 'forest': self._parse_tree_builder = ParseTreeBuilder( self.rules, self.options.tree_class or Tree, self.options.propagate_positions, self.options.parser != 'lalr' and self.options.ambiguity == 'explicit', self.options.maybe_placeholders ) self._callbacks = self._parse_tree_builder.create_callback(self.options.transformer) self._callbacks.update(_get_lexer_callbacks(self.options.transformer, self.terminals)) def _build_parser(self) -> "ParsingFrontend": self._prepare_callbacks() _validate_frontend_args(self.options.parser, self.options.lexer) parser_conf = ParserConf(self.rules, self._callbacks, self.options.start) return _construct_parsing_frontend( self.options.parser, self.options.lexer, self.lexer_conf, parser_conf, options=self.options ) def save(self, f, exclude_options: Collection[str] = ()) -> None: """Saves the instance into the given file object Useful for caching and multiprocessing. """ if self.options.parser != 'lalr': raise NotImplementedError("Lark.save() is only implemented for the LALR(1) parser.") data, m = self.memo_serialize([TerminalDef, Rule]) if exclude_options: data["options"] = {n: v for n, v in data["options"].items() if n not in exclude_options} pickle.dump({'data': data, 'memo': m}, f, protocol=pickle.HIGHEST_PROTOCOL) @classmethod def load(cls: Type[_T], f) -> _T: """Loads an instance from the given file object Useful for caching and multiprocessing. """ inst = cls.__new__(cls) return inst._load(f) def _deserialize_lexer_conf(self, data: Dict[str, Any], memo: Dict[int, Union[TerminalDef, Rule]], options: LarkOptions) -> LexerConf: lexer_conf = LexerConf.deserialize(data['lexer_conf'], memo) lexer_conf.callbacks = options.lexer_callbacks or {} lexer_conf.re_module = regex if options.regex else re lexer_conf.use_bytes = options.use_bytes lexer_conf.g_regex_flags = options.g_regex_flags lexer_conf.skip_validation = True lexer_conf.postlex = options.postlex return lexer_conf def _load(self: _T, f: Any, **kwargs) -> _T: if isinstance(f, dict): d = f else: d = pickle.load(f) memo_json = d['memo'] data = d['data'] assert memo_json memo = SerializeMemoizer.deserialize(memo_json, {'Rule': Rule, 'TerminalDef': TerminalDef}, {}) options = dict(data['options']) if (set(kwargs) - _LOAD_ALLOWED_OPTIONS) & set(LarkOptions._defaults): raise ConfigurationError("Some options are not allowed when loading a Parser: {}" .format(set(kwargs) - _LOAD_ALLOWED_OPTIONS)) options.update(kwargs) self.options = LarkOptions.deserialize(options, memo) self.rules = [Rule.deserialize(r, memo) for r in data['rules']] self.source_path = '' _validate_frontend_args(self.options.parser, self.options.lexer) self.lexer_conf = self._deserialize_lexer_conf(data['parser'], memo, self.options) self.terminals = self.lexer_conf.terminals self._prepare_callbacks() self._terminals_dict = {t.name: t for t in self.terminals} self.parser = _deserialize_parsing_frontend( data['parser'], memo, self.lexer_conf, self._callbacks, self.options, # Not all, but multiple attributes are used ) return self @classmethod def _load_from_dict(cls, data, memo, **kwargs): inst = cls.__new__(cls) return inst._load({'data': data, 'memo': memo}, **kwargs) @classmethod def open(cls: Type[_T], grammar_filename: str, rel_to: Optional[str]=None, **options) -> _T: """Create an instance of Lark with the grammar given by its filename If ``rel_to`` is provided, the function will find the grammar filename in relation to it. Example: >>> Lark.open("grammar_file.lark", rel_to=__file__, parser="lalr") Lark(...) """ if rel_to: basepath = os.path.dirname(rel_to) grammar_filename = os.path.join(basepath, grammar_filename) with open(grammar_filename, encoding='utf8') as f: return cls(f, **options) @classmethod def open_from_package(cls: Type[_T], package: str, grammar_path: str, search_paths: 'Sequence[str]'=[""], **options) -> _T: """Create an instance of Lark with the grammar loaded from within the package `package`. This allows grammar loading from zipapps. Imports in the grammar will use the `package` and `search_paths` provided, through `FromPackageLoader` Example: Lark.open_from_package(__name__, "example.lark", ("grammars",), parser=...) """ package_loader = FromPackageLoader(package, search_paths) full_path, text = package_loader(None, grammar_path) options.setdefault('source_path', full_path) options.setdefault('import_paths', []) options['import_paths'].append(package_loader) return cls(text, **options) def __repr__(self): return 'Lark(open(%r), parser=%r, lexer=%r, ...)' % (self.source_path, self.options.parser, self.options.lexer) def lex(self, text: str, dont_ignore: bool=False) -> Iterator[Token]: """Only lex (and postlex) the text, without parsing it. Only relevant when lexer='basic' When dont_ignore=True, the lexer will return all tokens, even those marked for %ignore. :raises UnexpectedCharacters: In case the lexer cannot find a suitable match. """ lexer: Lexer if not hasattr(self, 'lexer') or dont_ignore: lexer = self._build_lexer(dont_ignore) else: lexer = self.lexer lexer_thread = LexerThread.from_text(lexer, text) stream = lexer_thread.lex(None) if self.options.postlex: return self.options.postlex.process(stream) return stream def get_terminal(self, name: str) -> TerminalDef: """Get information about a terminal""" return self._terminals_dict[name] def parse_interactive(self, text: Optional[str]=None, start: Optional[str]=None) -> 'InteractiveParser': """Start an interactive parsing session. Parameters: text (str, optional): Text to be parsed. Required for ``resume_parse()``. start (str, optional): Start symbol Returns: A new InteractiveParser instance. See Also: ``Lark.parse()`` """ return self.parser.parse_interactive(text, start=start) def parse(self, text: str, start: Optional[str]=None, on_error: 'Optional[Callable[[UnexpectedInput], bool]]'=None) -> 'ParseTree': """Parse the given text, according to the options provided. Parameters: text (str): Text to be parsed. start (str, optional): Required if Lark was given multiple possible start symbols (using the start option). on_error (function, optional): if provided, will be called on UnexpectedToken error. Return true to resume parsing. LALR only. See examples/advanced/error_handling.py for an example of how to use on_error. Returns: If a transformer is supplied to ``__init__``, returns whatever is the result of the transformation. Otherwise, returns a Tree instance. :raises UnexpectedInput: On a parse error, one of these sub-exceptions will rise: ``UnexpectedCharacters``, ``UnexpectedToken``, or ``UnexpectedEOF``. For convenience, these sub-exceptions also inherit from ``ParserError`` and ``LexerError``. """ return self.parser.parse(text, start=start, on_error=on_error) ###} poetry-core-2.1.1/src/poetry/core/_vendor/lark/lexer.py000066400000000000000000000567451475444614500231410ustar00rootroot00000000000000# Lexer Implementation from abc import abstractmethod, ABC import re from contextlib import suppress from typing import ( TypeVar, Type, Dict, Iterator, Collection, Callable, Optional, FrozenSet, Any, ClassVar, TYPE_CHECKING, overload ) from types import ModuleType import warnings try: import interegular except ImportError: pass if TYPE_CHECKING: from .common import LexerConf from .parsers.lalr_parser_state import ParserState from .utils import classify, get_regexp_width, Serialize, logger from .exceptions import UnexpectedCharacters, LexError, UnexpectedToken from .grammar import TOKEN_DEFAULT_PRIORITY ###{standalone from copy import copy try: # For the standalone parser, we need to make sure that has_interegular is False to avoid NameErrors later on has_interegular = bool(interegular) except NameError: has_interegular = False class Pattern(Serialize, ABC): "An abstraction over regular expressions." value: str flags: Collection[str] raw: Optional[str] type: ClassVar[str] def __init__(self, value: str, flags: Collection[str] = (), raw: Optional[str] = None) -> None: self.value = value self.flags = frozenset(flags) self.raw = raw def __repr__(self): return repr(self.to_regexp()) # Pattern Hashing assumes all subclasses have a different priority! def __hash__(self): return hash((type(self), self.value, self.flags)) def __eq__(self, other): return type(self) == type(other) and self.value == other.value and self.flags == other.flags @abstractmethod def to_regexp(self) -> str: raise NotImplementedError() @property @abstractmethod def min_width(self) -> int: raise NotImplementedError() @property @abstractmethod def max_width(self) -> int: raise NotImplementedError() def _get_flags(self, value): for f in self.flags: value = ('(?%s:%s)' % (f, value)) return value class PatternStr(Pattern): __serialize_fields__ = 'value', 'flags', 'raw' type: ClassVar[str] = "str" def to_regexp(self) -> str: return self._get_flags(re.escape(self.value)) @property def min_width(self) -> int: return len(self.value) @property def max_width(self) -> int: return len(self.value) class PatternRE(Pattern): __serialize_fields__ = 'value', 'flags', 'raw', '_width' type: ClassVar[str] = "re" def to_regexp(self) -> str: return self._get_flags(self.value) _width = None def _get_width(self): if self._width is None: self._width = get_regexp_width(self.to_regexp()) return self._width @property def min_width(self) -> int: return self._get_width()[0] @property def max_width(self) -> int: return self._get_width()[1] class TerminalDef(Serialize): "A definition of a terminal" __serialize_fields__ = 'name', 'pattern', 'priority' __serialize_namespace__ = PatternStr, PatternRE name: str pattern: Pattern priority: int def __init__(self, name: str, pattern: Pattern, priority: int = TOKEN_DEFAULT_PRIORITY) -> None: assert isinstance(pattern, Pattern), pattern self.name = name self.pattern = pattern self.priority = priority def __repr__(self): return '%s(%r, %r)' % (type(self).__name__, self.name, self.pattern) def user_repr(self) -> str: if self.name.startswith('__'): # We represent a generated terminal return self.pattern.raw or self.name else: return self.name _T = TypeVar('_T', bound="Token") class Token(str): """A string with meta-information, that is produced by the lexer. When parsing text, the resulting chunks of the input that haven't been discarded, will end up in the tree as Token instances. The Token class inherits from Python's ``str``, so normal string comparisons and operations will work as expected. Attributes: type: Name of the token (as specified in grammar) value: Value of the token (redundant, as ``token.value == token`` will always be true) start_pos: The index of the token in the text line: The line of the token in the text (starting with 1) column: The column of the token in the text (starting with 1) end_line: The line where the token ends end_column: The next column after the end of the token. For example, if the token is a single character with a column value of 4, end_column will be 5. end_pos: the index where the token ends (basically ``start_pos + len(token)``) """ __slots__ = ('type', 'start_pos', 'value', 'line', 'column', 'end_line', 'end_column', 'end_pos') __match_args__ = ('type', 'value') type: str start_pos: Optional[int] value: Any line: Optional[int] column: Optional[int] end_line: Optional[int] end_column: Optional[int] end_pos: Optional[int] @overload def __new__( cls, type: str, value: Any, start_pos: Optional[int] = None, line: Optional[int] = None, column: Optional[int] = None, end_line: Optional[int] = None, end_column: Optional[int] = None, end_pos: Optional[int] = None ) -> 'Token': ... @overload def __new__( cls, type_: str, value: Any, start_pos: Optional[int] = None, line: Optional[int] = None, column: Optional[int] = None, end_line: Optional[int] = None, end_column: Optional[int] = None, end_pos: Optional[int] = None ) -> 'Token': ... def __new__(cls, *args, **kwargs): if "type_" in kwargs: warnings.warn("`type_` is deprecated use `type` instead", DeprecationWarning) if "type" in kwargs: raise TypeError("Error: using both 'type' and the deprecated 'type_' as arguments.") kwargs["type"] = kwargs.pop("type_") return cls._future_new(*args, **kwargs) @classmethod def _future_new(cls, type, value, start_pos=None, line=None, column=None, end_line=None, end_column=None, end_pos=None): inst = super(Token, cls).__new__(cls, value) inst.type = type inst.start_pos = start_pos inst.value = value inst.line = line inst.column = column inst.end_line = end_line inst.end_column = end_column inst.end_pos = end_pos return inst @overload def update(self, type: Optional[str] = None, value: Optional[Any] = None) -> 'Token': ... @overload def update(self, type_: Optional[str] = None, value: Optional[Any] = None) -> 'Token': ... def update(self, *args, **kwargs): if "type_" in kwargs: warnings.warn("`type_` is deprecated use `type` instead", DeprecationWarning) if "type" in kwargs: raise TypeError("Error: using both 'type' and the deprecated 'type_' as arguments.") kwargs["type"] = kwargs.pop("type_") return self._future_update(*args, **kwargs) def _future_update(self, type: Optional[str] = None, value: Optional[Any] = None) -> 'Token': return Token.new_borrow_pos( type if type is not None else self.type, value if value is not None else self.value, self ) @classmethod def new_borrow_pos(cls: Type[_T], type_: str, value: Any, borrow_t: 'Token') -> _T: return cls(type_, value, borrow_t.start_pos, borrow_t.line, borrow_t.column, borrow_t.end_line, borrow_t.end_column, borrow_t.end_pos) def __reduce__(self): return (self.__class__, (self.type, self.value, self.start_pos, self.line, self.column)) def __repr__(self): return 'Token(%r, %r)' % (self.type, self.value) def __deepcopy__(self, memo): return Token(self.type, self.value, self.start_pos, self.line, self.column) def __eq__(self, other): if isinstance(other, Token) and self.type != other.type: return False return str.__eq__(self, other) __hash__ = str.__hash__ class LineCounter: "A utility class for keeping track of line & column information" __slots__ = 'char_pos', 'line', 'column', 'line_start_pos', 'newline_char' def __init__(self, newline_char): self.newline_char = newline_char self.char_pos = 0 self.line = 1 self.column = 1 self.line_start_pos = 0 def __eq__(self, other): if not isinstance(other, LineCounter): return NotImplemented return self.char_pos == other.char_pos and self.newline_char == other.newline_char def feed(self, token: Token, test_newline=True): """Consume a token and calculate the new line & column. As an optional optimization, set test_newline=False if token doesn't contain a newline. """ if test_newline: newlines = token.count(self.newline_char) if newlines: self.line += newlines self.line_start_pos = self.char_pos + token.rindex(self.newline_char) + 1 self.char_pos += len(token) self.column = self.char_pos - self.line_start_pos + 1 class UnlessCallback: def __init__(self, scanner): self.scanner = scanner def __call__(self, t): res = self.scanner.match(t.value, 0) if res: _value, t.type = res return t class CallChain: def __init__(self, callback1, callback2, cond): self.callback1 = callback1 self.callback2 = callback2 self.cond = cond def __call__(self, t): t2 = self.callback1(t) return self.callback2(t) if self.cond(t2) else t2 def _get_match(re_, regexp, s, flags): m = re_.match(regexp, s, flags) if m: return m.group(0) def _create_unless(terminals, g_regex_flags, re_, use_bytes): tokens_by_type = classify(terminals, lambda t: type(t.pattern)) assert len(tokens_by_type) <= 2, tokens_by_type.keys() embedded_strs = set() callback = {} for retok in tokens_by_type.get(PatternRE, []): unless = [] for strtok in tokens_by_type.get(PatternStr, []): if strtok.priority != retok.priority: continue s = strtok.pattern.value if s == _get_match(re_, retok.pattern.to_regexp(), s, g_regex_flags): unless.append(strtok) if strtok.pattern.flags <= retok.pattern.flags: embedded_strs.add(strtok) if unless: callback[retok.name] = UnlessCallback(Scanner(unless, g_regex_flags, re_, match_whole=True, use_bytes=use_bytes)) new_terminals = [t for t in terminals if t not in embedded_strs] return new_terminals, callback class Scanner: def __init__(self, terminals, g_regex_flags, re_, use_bytes, match_whole=False): self.terminals = terminals self.g_regex_flags = g_regex_flags self.re_ = re_ self.use_bytes = use_bytes self.match_whole = match_whole self.allowed_types = {t.name for t in self.terminals} self._mres = self._build_mres(terminals, len(terminals)) def _build_mres(self, terminals, max_size): # Python sets an unreasonable group limit (currently 100) in its re module # Worse, the only way to know we reached it is by catching an AssertionError! # This function recursively tries less and less groups until it's successful. postfix = '$' if self.match_whole else '' mres = [] while terminals: pattern = u'|'.join(u'(?P<%s>%s)' % (t.name, t.pattern.to_regexp() + postfix) for t in terminals[:max_size]) if self.use_bytes: pattern = pattern.encode('latin-1') try: mre = self.re_.compile(pattern, self.g_regex_flags) except AssertionError: # Yes, this is what Python provides us.. :/ return self._build_mres(terminals, max_size // 2) mres.append(mre) terminals = terminals[max_size:] return mres def match(self, text, pos): for mre in self._mres: m = mre.match(text, pos) if m: return m.group(0), m.lastgroup def _regexp_has_newline(r: str): r"""Expressions that may indicate newlines in a regexp: - newlines (\n) - escaped newline (\\n) - anything but ([^...]) - any-char (.) when the flag (?s) exists - spaces (\s) """ return '\n' in r or '\\n' in r or '\\s' in r or '[^' in r or ('(?s' in r and '.' in r) class LexerState: """Represents the current state of the lexer as it scans the text (Lexer objects are only instantiated per grammar, not per text) """ __slots__ = 'text', 'line_ctr', 'last_token' text: str line_ctr: LineCounter last_token: Optional[Token] def __init__(self, text: str, line_ctr: Optional[LineCounter]=None, last_token: Optional[Token]=None): self.text = text self.line_ctr = line_ctr or LineCounter(b'\n' if isinstance(text, bytes) else '\n') self.last_token = last_token def __eq__(self, other): if not isinstance(other, LexerState): return NotImplemented return self.text is other.text and self.line_ctr == other.line_ctr and self.last_token == other.last_token def __copy__(self): return type(self)(self.text, copy(self.line_ctr), self.last_token) class LexerThread: """A thread that ties a lexer instance and a lexer state, to be used by the parser """ def __init__(self, lexer: 'Lexer', lexer_state: LexerState): self.lexer = lexer self.state = lexer_state @classmethod def from_text(cls, lexer: 'Lexer', text: str) -> 'LexerThread': return cls(lexer, LexerState(text)) def lex(self, parser_state): return self.lexer.lex(self.state, parser_state) def __copy__(self): return type(self)(self.lexer, copy(self.state)) _Token = Token _Callback = Callable[[Token], Token] class Lexer(ABC): """Lexer interface Method Signatures: lex(self, lexer_state, parser_state) -> Iterator[Token] """ @abstractmethod def lex(self, lexer_state: LexerState, parser_state: Any) -> Iterator[Token]: return NotImplemented def make_lexer_state(self, text): "Deprecated" return LexerState(text) def _check_regex_collisions(terminal_to_regexp: Dict[TerminalDef, str], comparator, strict_mode, max_collisions_to_show=8): if not comparator: comparator = interegular.Comparator.from_regexes(terminal_to_regexp) # When in strict mode, we only ever try to provide one example, so taking # a long time for that should be fine max_time = 2 if strict_mode else 0.2 # We don't want to show too many collisions. if comparator.count_marked_pairs() >= max_collisions_to_show: return for group in classify(terminal_to_regexp, lambda t: t.priority).values(): for a, b in comparator.check(group, skip_marked=True): assert a.priority == b.priority # Mark this pair to not repeat warnings when multiple different BasicLexers see the same collision comparator.mark(a, b) # Notify the user message = f"Collision between Terminals {a.name} and {b.name}. " try: example = comparator.get_example_overlap(a, b, max_time).format_multiline() except ValueError: # Couldn't find an example within max_time steps. example = "No example could be found fast enough. However, the collision does still exists" if strict_mode: raise LexError(f"{message}\n{example}") logger.warning("%s The lexer will choose between them arbitrarily.\n%s", message, example) if comparator.count_marked_pairs() >= max_collisions_to_show: logger.warning("Found 8 regex collisions, will not check for more.") return class AbstractBasicLexer(Lexer): terminals_by_name: Dict[str, TerminalDef] @abstractmethod def __init__(self, conf: 'LexerConf', comparator=None) -> None: ... @abstractmethod def next_token(self, lex_state: LexerState, parser_state: Any = None) -> Token: ... def lex(self, state: LexerState, parser_state: Any) -> Iterator[Token]: with suppress(EOFError): while True: yield self.next_token(state, parser_state) class BasicLexer(AbstractBasicLexer): terminals: Collection[TerminalDef] ignore_types: FrozenSet[str] newline_types: FrozenSet[str] user_callbacks: Dict[str, _Callback] callback: Dict[str, _Callback] re: ModuleType def __init__(self, conf: 'LexerConf', comparator=None) -> None: terminals = list(conf.terminals) assert all(isinstance(t, TerminalDef) for t in terminals), terminals self.re = conf.re_module if not conf.skip_validation: # Sanitization terminal_to_regexp = {} for t in terminals: regexp = t.pattern.to_regexp() try: self.re.compile(regexp, conf.g_regex_flags) except self.re.error: raise LexError("Cannot compile token %s: %s" % (t.name, t.pattern)) if t.pattern.min_width == 0: raise LexError("Lexer does not allow zero-width terminals. (%s: %s)" % (t.name, t.pattern)) if t.pattern.type == "re": terminal_to_regexp[t] = regexp if not (set(conf.ignore) <= {t.name for t in terminals}): raise LexError("Ignore terminals are not defined: %s" % (set(conf.ignore) - {t.name for t in terminals})) if has_interegular: _check_regex_collisions(terminal_to_regexp, comparator, conf.strict) elif conf.strict: raise LexError("interegular must be installed for strict mode. Use `pip install 'lark[interegular]'`.") # Init self.newline_types = frozenset(t.name for t in terminals if _regexp_has_newline(t.pattern.to_regexp())) self.ignore_types = frozenset(conf.ignore) terminals.sort(key=lambda x: (-x.priority, -x.pattern.max_width, -len(x.pattern.value), x.name)) self.terminals = terminals self.user_callbacks = conf.callbacks self.g_regex_flags = conf.g_regex_flags self.use_bytes = conf.use_bytes self.terminals_by_name = conf.terminals_by_name self._scanner = None def _build_scanner(self): terminals, self.callback = _create_unless(self.terminals, self.g_regex_flags, self.re, self.use_bytes) assert all(self.callback.values()) for type_, f in self.user_callbacks.items(): if type_ in self.callback: # Already a callback there, probably UnlessCallback self.callback[type_] = CallChain(self.callback[type_], f, lambda t: t.type == type_) else: self.callback[type_] = f self._scanner = Scanner(terminals, self.g_regex_flags, self.re, self.use_bytes) @property def scanner(self): if self._scanner is None: self._build_scanner() return self._scanner def match(self, text, pos): return self.scanner.match(text, pos) def next_token(self, lex_state: LexerState, parser_state: Any = None) -> Token: line_ctr = lex_state.line_ctr while line_ctr.char_pos < len(lex_state.text): res = self.match(lex_state.text, line_ctr.char_pos) if not res: allowed = self.scanner.allowed_types - self.ignore_types if not allowed: allowed = {""} raise UnexpectedCharacters(lex_state.text, line_ctr.char_pos, line_ctr.line, line_ctr.column, allowed=allowed, token_history=lex_state.last_token and [lex_state.last_token], state=parser_state, terminals_by_name=self.terminals_by_name) value, type_ = res ignored = type_ in self.ignore_types t = None if not ignored or type_ in self.callback: t = Token(type_, value, line_ctr.char_pos, line_ctr.line, line_ctr.column) line_ctr.feed(value, type_ in self.newline_types) if t is not None: t.end_line = line_ctr.line t.end_column = line_ctr.column t.end_pos = line_ctr.char_pos if t.type in self.callback: t = self.callback[t.type](t) if not ignored: if not isinstance(t, Token): raise LexError("Callbacks must return a token (returned %r)" % t) lex_state.last_token = t return t # EOF raise EOFError(self) class ContextualLexer(Lexer): lexers: Dict[int, AbstractBasicLexer] root_lexer: AbstractBasicLexer BasicLexer: Type[AbstractBasicLexer] = BasicLexer def __init__(self, conf: 'LexerConf', states: Dict[int, Collection[str]], always_accept: Collection[str]=()) -> None: terminals = list(conf.terminals) terminals_by_name = conf.terminals_by_name trad_conf = copy(conf) trad_conf.terminals = terminals if has_interegular and not conf.skip_validation: comparator = interegular.Comparator.from_regexes({t: t.pattern.to_regexp() for t in terminals}) else: comparator = None lexer_by_tokens: Dict[FrozenSet[str], AbstractBasicLexer] = {} self.lexers = {} for state, accepts in states.items(): key = frozenset(accepts) try: lexer = lexer_by_tokens[key] except KeyError: accepts = set(accepts) | set(conf.ignore) | set(always_accept) lexer_conf = copy(trad_conf) lexer_conf.terminals = [terminals_by_name[n] for n in accepts if n in terminals_by_name] lexer = self.BasicLexer(lexer_conf, comparator) lexer_by_tokens[key] = lexer self.lexers[state] = lexer assert trad_conf.terminals is terminals trad_conf.skip_validation = True # We don't need to verify all terminals again self.root_lexer = self.BasicLexer(trad_conf, comparator) def lex(self, lexer_state: LexerState, parser_state: 'ParserState') -> Iterator[Token]: try: while True: lexer = self.lexers[parser_state.position] yield lexer.next_token(lexer_state, parser_state) except EOFError: pass except UnexpectedCharacters as e: # In the contextual lexer, UnexpectedCharacters can mean that the terminal is defined, but not in the current context. # This tests the input against the global context, to provide a nicer error. try: last_token = lexer_state.last_token # Save last_token. Calling root_lexer.next_token will change this to the wrong token token = self.root_lexer.next_token(lexer_state, parser_state) raise UnexpectedToken(token, e.allowed, state=parser_state, token_history=[last_token], terminals_by_name=self.root_lexer.terminals_by_name) except UnexpectedCharacters: raise e # Raise the original UnexpectedCharacters. The root lexer raises it with the wrong expected set. ###} poetry-core-2.1.1/src/poetry/core/_vendor/lark/load_grammar.py000066400000000000000000001512721475444614500244360ustar00rootroot00000000000000"""Parses and compiles Lark grammars into an internal representation. """ import hashlib import os.path import sys from collections import namedtuple from copy import copy, deepcopy import pkgutil from ast import literal_eval from contextlib import suppress from typing import List, Tuple, Union, Callable, Dict, Optional, Sequence, Generator from .utils import bfs, logger, classify_bool, is_id_continue, is_id_start, bfs_all_unique, small_factors, OrderedSet from .lexer import Token, TerminalDef, PatternStr, PatternRE, Pattern from .parse_tree_builder import ParseTreeBuilder from .parser_frontends import ParsingFrontend from .common import LexerConf, ParserConf from .grammar import RuleOptions, Rule, Terminal, NonTerminal, Symbol, TOKEN_DEFAULT_PRIORITY from .utils import classify, dedup_list from .exceptions import GrammarError, UnexpectedCharacters, UnexpectedToken, ParseError, UnexpectedInput from .tree import Tree, SlottedTree as ST from .visitors import Transformer, Visitor, v_args, Transformer_InPlace, Transformer_NonRecursive inline_args = v_args(inline=True) IMPORT_PATHS = ['grammars'] EXT = '.lark' _RE_FLAGS = 'imslux' _EMPTY = Symbol('__empty__') _TERMINAL_NAMES = { '.' : 'DOT', ',' : 'COMMA', ':' : 'COLON', ';' : 'SEMICOLON', '+' : 'PLUS', '-' : 'MINUS', '*' : 'STAR', '/' : 'SLASH', '\\' : 'BACKSLASH', '|' : 'VBAR', '?' : 'QMARK', '!' : 'BANG', '@' : 'AT', '#' : 'HASH', '$' : 'DOLLAR', '%' : 'PERCENT', '^' : 'CIRCUMFLEX', '&' : 'AMPERSAND', '_' : 'UNDERSCORE', '<' : 'LESSTHAN', '>' : 'MORETHAN', '=' : 'EQUAL', '"' : 'DBLQUOTE', '\'' : 'QUOTE', '`' : 'BACKQUOTE', '~' : 'TILDE', '(' : 'LPAR', ')' : 'RPAR', '{' : 'LBRACE', '}' : 'RBRACE', '[' : 'LSQB', ']' : 'RSQB', '\n' : 'NEWLINE', '\r\n' : 'CRLF', '\t' : 'TAB', ' ' : 'SPACE', } # Grammar Parser TERMINALS = { '_LPAR': r'\(', '_RPAR': r'\)', '_LBRA': r'\[', '_RBRA': r'\]', '_LBRACE': r'\{', '_RBRACE': r'\}', 'OP': '[+*]|[?](?![a-z_])', '_COLON': ':', '_COMMA': ',', '_OR': r'\|', '_DOT': r'\.(?!\.)', '_DOTDOT': r'\.\.', 'TILDE': '~', 'RULE_MODIFIERS': '(!|![?]?|[?]!?)(?=[_a-z])', 'RULE': '_?[a-z][_a-z0-9]*', 'TERMINAL': '_?[A-Z][_A-Z0-9]*', 'STRING': r'"(\\"|\\\\|[^"\n])*?"i?', 'REGEXP': r'/(?!/)(\\/|\\\\|[^/])*?/[%s]*' % _RE_FLAGS, '_NL': r'(\r?\n)+\s*', '_NL_OR': r'(\r?\n)+\s*\|', 'WS': r'[ \t]+', 'COMMENT': r'\s*//[^\n]*|\s*#[^\n]*', 'BACKSLASH': r'\\[ ]*\n', '_TO': '->', '_IGNORE': r'%ignore', '_OVERRIDE': r'%override', '_DECLARE': r'%declare', '_EXTEND': r'%extend', '_IMPORT': r'%import', 'NUMBER': r'[+-]?\d+', } RULES = { 'start': ['_list'], '_list': ['_item', '_list _item'], '_item': ['rule', 'term', 'ignore', 'import', 'declare', 'override', 'extend', '_NL'], 'rule': ['rule_modifiers RULE template_params priority _COLON expansions _NL'], 'rule_modifiers': ['RULE_MODIFIERS', ''], 'priority': ['_DOT NUMBER', ''], 'template_params': ['_LBRACE _template_params _RBRACE', ''], '_template_params': ['RULE', '_template_params _COMMA RULE'], 'expansions': ['_expansions'], '_expansions': ['alias', '_expansions _OR alias', '_expansions _NL_OR alias'], '?alias': ['expansion _TO nonterminal', 'expansion'], 'expansion': ['_expansion'], '_expansion': ['', '_expansion expr'], '?expr': ['atom', 'atom OP', 'atom TILDE NUMBER', 'atom TILDE NUMBER _DOTDOT NUMBER', ], '?atom': ['_LPAR expansions _RPAR', 'maybe', 'value'], 'value': ['terminal', 'nonterminal', 'literal', 'range', 'template_usage'], 'terminal': ['TERMINAL'], 'nonterminal': ['RULE'], '?name': ['RULE', 'TERMINAL'], '?symbol': ['terminal', 'nonterminal'], 'maybe': ['_LBRA expansions _RBRA'], 'range': ['STRING _DOTDOT STRING'], 'template_usage': ['nonterminal _LBRACE _template_args _RBRACE'], '_template_args': ['value', '_template_args _COMMA value'], 'term': ['TERMINAL _COLON expansions _NL', 'TERMINAL _DOT NUMBER _COLON expansions _NL'], 'override': ['_OVERRIDE rule', '_OVERRIDE term'], 'extend': ['_EXTEND rule', '_EXTEND term'], 'ignore': ['_IGNORE expansions _NL'], 'declare': ['_DECLARE _declare_args _NL'], 'import': ['_IMPORT _import_path _NL', '_IMPORT _import_path _LPAR name_list _RPAR _NL', '_IMPORT _import_path _TO name _NL'], '_import_path': ['import_lib', 'import_rel'], 'import_lib': ['_import_args'], 'import_rel': ['_DOT _import_args'], '_import_args': ['name', '_import_args _DOT name'], 'name_list': ['_name_list'], '_name_list': ['name', '_name_list _COMMA name'], '_declare_args': ['symbol', '_declare_args symbol'], 'literal': ['REGEXP', 'STRING'], } # Value 5 keeps the number of states in the lalr parser somewhat minimal # It isn't optimal, but close to it. See PR #949 SMALL_FACTOR_THRESHOLD = 5 # The Threshold whether repeat via ~ are split up into different rules # 50 is chosen since it keeps the number of states low and therefore lalr analysis time low, # while not being to overaggressive and unnecessarily creating rules that might create shift/reduce conflicts. # (See PR #949) REPEAT_BREAK_THRESHOLD = 50 class FindRuleSize(Transformer): def __init__(self, keep_all_tokens: bool): self.keep_all_tokens = keep_all_tokens def _will_not_get_removed(self, sym: Symbol) -> bool: if isinstance(sym, NonTerminal): return not sym.name.startswith('_') if isinstance(sym, Terminal): return self.keep_all_tokens or not sym.filter_out if sym is _EMPTY: return False assert False, sym def _args_as_int(self, args: List[Union[int, Symbol]]) -> Generator[int, None, None]: for a in args: if isinstance(a, int): yield a elif isinstance(a, Symbol): yield 1 if self._will_not_get_removed(a) else 0 else: assert False def expansion(self, args) -> int: return sum(self._args_as_int(args)) def expansions(self, args) -> int: return max(self._args_as_int(args)) @inline_args class EBNF_to_BNF(Transformer_InPlace): def __init__(self): self.new_rules = [] self.rules_cache = {} self.prefix = 'anon' self.i = 0 self.rule_options = None def _name_rule(self, inner: str): new_name = '__%s_%s_%d' % (self.prefix, inner, self.i) self.i += 1 return new_name def _add_rule(self, key, name, expansions): t = NonTerminal(name) self.new_rules.append((name, expansions, self.rule_options)) self.rules_cache[key] = t return t def _add_recurse_rule(self, type_: str, expr: Tree): try: return self.rules_cache[expr] except KeyError: new_name = self._name_rule(type_) t = NonTerminal(new_name) tree = ST('expansions', [ ST('expansion', [expr]), ST('expansion', [t, expr]) ]) return self._add_rule(expr, new_name, tree) def _add_repeat_rule(self, a, b, target, atom): """Generate a rule that repeats target ``a`` times, and repeats atom ``b`` times. When called recursively (into target), it repeats atom for x(n) times, where: x(0) = 1 x(n) = a(n) * x(n-1) + b Example rule when a=3, b=4: new_rule: target target target atom atom atom atom """ key = (a, b, target, atom) try: return self.rules_cache[key] except KeyError: new_name = self._name_rule('repeat_a%d_b%d' % (a, b)) tree = ST('expansions', [ST('expansion', [target] * a + [atom] * b)]) return self._add_rule(key, new_name, tree) def _add_repeat_opt_rule(self, a, b, target, target_opt, atom): """Creates a rule that matches atom 0 to (a*n+b)-1 times. When target matches n times atom, and target_opt 0 to n-1 times target_opt, First we generate target * i followed by target_opt, for i from 0 to a-1 These match 0 to n*a - 1 times atom Then we generate target * a followed by atom * i, for i from 0 to b-1 These match n*a to n*a + b-1 times atom The created rule will not have any shift/reduce conflicts so that it can be used with lalr Example rule when a=3, b=4: new_rule: target_opt | target target_opt | target target target_opt | target target target | target target target atom | target target target atom atom | target target target atom atom atom """ key = (a, b, target, atom, "opt") try: return self.rules_cache[key] except KeyError: new_name = self._name_rule('repeat_a%d_b%d_opt' % (a, b)) tree = ST('expansions', [ ST('expansion', [target]*i + [target_opt]) for i in range(a) ] + [ ST('expansion', [target]*a + [atom]*i) for i in range(b) ]) return self._add_rule(key, new_name, tree) def _generate_repeats(self, rule: Tree, mn: int, mx: int): """Generates a rule tree that repeats ``rule`` exactly between ``mn`` to ``mx`` times. """ # For a small number of repeats, we can take the naive approach if mx < REPEAT_BREAK_THRESHOLD: return ST('expansions', [ST('expansion', [rule] * n) for n in range(mn, mx + 1)]) # For large repeat values, we break the repetition into sub-rules. # We treat ``rule~mn..mx`` as ``rule~mn rule~0..(diff=mx-mn)``. # We then use small_factors to split up mn and diff up into values [(a, b), ...] # This values are used with the help of _add_repeat_rule and _add_repeat_rule_opt # to generate a complete rule/expression that matches the corresponding number of repeats mn_target = rule for a, b in small_factors(mn, SMALL_FACTOR_THRESHOLD): mn_target = self._add_repeat_rule(a, b, mn_target, rule) if mx == mn: return mn_target diff = mx - mn + 1 # We add one because _add_repeat_opt_rule generates rules that match one less diff_factors = small_factors(diff, SMALL_FACTOR_THRESHOLD) diff_target = rule # Match rule 1 times diff_opt_target = ST('expansion', []) # match rule 0 times (e.g. up to 1 -1 times) for a, b in diff_factors[:-1]: diff_opt_target = self._add_repeat_opt_rule(a, b, diff_target, diff_opt_target, rule) diff_target = self._add_repeat_rule(a, b, diff_target, rule) a, b = diff_factors[-1] diff_opt_target = self._add_repeat_opt_rule(a, b, diff_target, diff_opt_target, rule) return ST('expansions', [ST('expansion', [mn_target] + [diff_opt_target])]) def expr(self, rule: Tree, op: Token, *args): if op.value == '?': empty = ST('expansion', []) return ST('expansions', [rule, empty]) elif op.value == '+': # a : b c+ d # --> # a : b _c d # _c : _c c | c; return self._add_recurse_rule('plus', rule) elif op.value == '*': # a : b c* d # --> # a : b _c? d # _c : _c c | c; new_name = self._add_recurse_rule('star', rule) return ST('expansions', [new_name, ST('expansion', [])]) elif op.value == '~': if len(args) == 1: mn = mx = int(args[0]) else: mn, mx = map(int, args) if mx < mn or mn < 0: raise GrammarError("Bad Range for %s (%d..%d isn't allowed)" % (rule, mn, mx)) return self._generate_repeats(rule, mn, mx) assert False, op def maybe(self, rule: Tree): keep_all_tokens = self.rule_options and self.rule_options.keep_all_tokens rule_size = FindRuleSize(keep_all_tokens).transform(rule) empty = ST('expansion', [_EMPTY] * rule_size) return ST('expansions', [rule, empty]) class SimplifyRule_Visitor(Visitor): @staticmethod def _flatten(tree: Tree): while tree.expand_kids_by_data(tree.data): pass def expansion(self, tree: Tree): # rules_list unpacking # a : b (c|d) e # --> # a : b c e | b d e # # In AST terms: # expansion(b, expansions(c, d), e) # --> # expansions( expansion(b, c, e), expansion(b, d, e) ) self._flatten(tree) for i, child in enumerate(tree.children): if isinstance(child, Tree) and child.data == 'expansions': tree.data = 'expansions' tree.children = [self.visit(ST('expansion', [option if i == j else other for j, other in enumerate(tree.children)])) for option in dedup_list(child.children)] self._flatten(tree) break def alias(self, tree): rule, alias_name = tree.children if rule.data == 'expansions': aliases = [] for child in tree.children[0].children: aliases.append(ST('alias', [child, alias_name])) tree.data = 'expansions' tree.children = aliases def expansions(self, tree: Tree): self._flatten(tree) # Ensure all children are unique if len(set(tree.children)) != len(tree.children): tree.children = dedup_list(tree.children) # dedup is expensive, so try to minimize its use class RuleTreeToText(Transformer): def expansions(self, x): return x def expansion(self, symbols): return symbols, None def alias(self, x): (expansion, _alias), alias = x assert _alias is None, (alias, expansion, '-', _alias) # Double alias not allowed return expansion, alias.name class PrepareAnonTerminals(Transformer_InPlace): """Create a unique list of anonymous terminals. Attempt to give meaningful names to them when we add them""" def __init__(self, terminals): self.terminals = terminals self.term_set = {td.name for td in self.terminals} self.term_reverse = {td.pattern: td for td in terminals} self.i = 0 self.rule_options = None @inline_args def pattern(self, p): value = p.value if p in self.term_reverse and p.flags != self.term_reverse[p].pattern.flags: raise GrammarError(u'Conflicting flags for the same terminal: %s' % p) term_name = None if isinstance(p, PatternStr): try: # If already defined, use the user-defined terminal name term_name = self.term_reverse[p].name except KeyError: # Try to assign an indicative anon-terminal name try: term_name = _TERMINAL_NAMES[value] except KeyError: if value and is_id_continue(value) and is_id_start(value[0]) and value.upper() not in self.term_set: term_name = value.upper() if term_name in self.term_set: term_name = None elif isinstance(p, PatternRE): if p in self.term_reverse: # Kind of a weird placement.name term_name = self.term_reverse[p].name else: assert False, p if term_name is None: term_name = '__ANON_%d' % self.i self.i += 1 if term_name not in self.term_set: assert p not in self.term_reverse self.term_set.add(term_name) termdef = TerminalDef(term_name, p) self.term_reverse[p] = termdef self.terminals.append(termdef) filter_out = False if self.rule_options and self.rule_options.keep_all_tokens else isinstance(p, PatternStr) return Terminal(term_name, filter_out=filter_out) class _ReplaceSymbols(Transformer_InPlace): """Helper for ApplyTemplates""" def __init__(self): self.names = {} def value(self, c): if len(c) == 1 and isinstance(c[0], Symbol) and c[0].name in self.names: return self.names[c[0].name] return self.__default__('value', c, None) def template_usage(self, c): name = c[0].name if name in self.names: return self.__default__('template_usage', [self.names[name]] + c[1:], None) return self.__default__('template_usage', c, None) class ApplyTemplates(Transformer_InPlace): """Apply the templates, creating new rules that represent the used templates""" def __init__(self, rule_defs): self.rule_defs = rule_defs self.replacer = _ReplaceSymbols() self.created_templates = set() def template_usage(self, c): name = c[0].name args = c[1:] result_name = "%s{%s}" % (name, ",".join(a.name for a in args)) if result_name not in self.created_templates: self.created_templates.add(result_name) (_n, params, tree, options) ,= (t for t in self.rule_defs if t[0] == name) assert len(params) == len(args), args result_tree = deepcopy(tree) self.replacer.names = dict(zip(params, args)) self.replacer.transform(result_tree) self.rule_defs.append((result_name, [], result_tree, deepcopy(options))) return NonTerminal(result_name) def _rfind(s, choices): return max(s.rfind(c) for c in choices) def eval_escaping(s): w = '' i = iter(s) for n in i: w += n if n == '\\': try: n2 = next(i) except StopIteration: raise GrammarError("Literal ended unexpectedly (bad escaping): `%r`" % s) if n2 == '\\': w += '\\\\' elif n2 not in 'Uuxnftr': w += '\\' w += n2 w = w.replace('\\"', '"').replace("'", "\\'") to_eval = "u'''%s'''" % w try: s = literal_eval(to_eval) except SyntaxError as e: raise GrammarError(s, e) return s def _literal_to_pattern(literal): assert isinstance(literal, Token) v = literal.value flag_start = _rfind(v, '/"')+1 assert flag_start > 0 flags = v[flag_start:] assert all(f in _RE_FLAGS for f in flags), flags if literal.type == 'STRING' and '\n' in v: raise GrammarError('You cannot put newlines in string literals') if literal.type == 'REGEXP' and '\n' in v and 'x' not in flags: raise GrammarError('You can only use newlines in regular expressions ' 'with the `x` (verbose) flag') v = v[:flag_start] assert v[0] == v[-1] and v[0] in '"/' x = v[1:-1] s = eval_escaping(x) if s == "": raise GrammarError("Empty terminals are not allowed (%s)" % literal) if literal.type == 'STRING': s = s.replace('\\\\', '\\') return PatternStr(s, flags, raw=literal.value) elif literal.type == 'REGEXP': return PatternRE(s, flags, raw=literal.value) else: assert False, 'Invariant failed: literal.type not in ["STRING", "REGEXP"]' @inline_args class PrepareLiterals(Transformer_InPlace): def literal(self, literal): return ST('pattern', [_literal_to_pattern(literal)]) def range(self, start, end): assert start.type == end.type == 'STRING' start = start.value[1:-1] end = end.value[1:-1] assert len(eval_escaping(start)) == len(eval_escaping(end)) == 1 regexp = '[%s-%s]' % (start, end) return ST('pattern', [PatternRE(regexp)]) def _make_joined_pattern(regexp, flags_set) -> PatternRE: return PatternRE(regexp, ()) class TerminalTreeToPattern(Transformer_NonRecursive): def pattern(self, ps): p ,= ps return p def expansion(self, items: List[Pattern]) -> Pattern: if not items: return PatternStr('') if len(items) == 1: return items[0] pattern = ''.join(i.to_regexp() for i in items) return _make_joined_pattern(pattern, {i.flags for i in items}) def expansions(self, exps: List[Pattern]) -> Pattern: if len(exps) == 1: return exps[0] # Do a bit of sorting to make sure that the longest option is returned # (Python's re module otherwise prefers just 'l' when given (l|ll) and both could match) exps.sort(key=lambda x: (-x.max_width, -x.min_width, -len(x.value))) pattern = '(?:%s)' % ('|'.join(i.to_regexp() for i in exps)) return _make_joined_pattern(pattern, {i.flags for i in exps}) def expr(self, args) -> Pattern: inner: Pattern inner, op = args[:2] if op == '~': if len(args) == 3: op = "{%d}" % int(args[2]) else: mn, mx = map(int, args[2:]) if mx < mn: raise GrammarError("Bad Range for %s (%d..%d isn't allowed)" % (inner, mn, mx)) op = "{%d,%d}" % (mn, mx) else: assert len(args) == 2 return PatternRE('(?:%s)%s' % (inner.to_regexp(), op), inner.flags) def maybe(self, expr): return self.expr(expr + ['?']) def alias(self, t): raise GrammarError("Aliasing not allowed in terminals (You used -> in the wrong place)") def value(self, v): return v[0] class ValidateSymbols(Transformer_InPlace): def value(self, v): v ,= v assert isinstance(v, (Tree, Symbol)) return v def nr_deepcopy_tree(t): """Deepcopy tree `t` without recursion""" return Transformer_NonRecursive(False).transform(t) class Grammar: term_defs: List[Tuple[str, Tuple[Tree, int]]] rule_defs: List[Tuple[str, Tuple[str, ...], Tree, RuleOptions]] ignore: List[str] def __init__(self, rule_defs: List[Tuple[str, Tuple[str, ...], Tree, RuleOptions]], term_defs: List[Tuple[str, Tuple[Tree, int]]], ignore: List[str]) -> None: self.term_defs = term_defs self.rule_defs = rule_defs self.ignore = ignore def compile(self, start, terminals_to_keep) -> Tuple[List[TerminalDef], List[Rule], List[str]]: # We change the trees in-place (to support huge grammars) # So deepcopy allows calling compile more than once. term_defs = [(n, (nr_deepcopy_tree(t), p)) for n, (t, p) in self.term_defs] rule_defs = [(n, p, nr_deepcopy_tree(t), o) for n, p, t, o in self.rule_defs] # =================== # Compile Terminals # =================== # Convert terminal-trees to strings/regexps for name, (term_tree, priority) in term_defs: if term_tree is None: # Terminal added through %declare continue expansions = list(term_tree.find_data('expansion')) if len(expansions) == 1 and not expansions[0].children: raise GrammarError("Terminals cannot be empty (%s)" % name) transformer = PrepareLiterals() * TerminalTreeToPattern() terminals = [TerminalDef(name, transformer.transform(term_tree), priority) for name, (term_tree, priority) in term_defs if term_tree] # ================= # Compile Rules # ================= # 1. Pre-process terminals anon_tokens_transf = PrepareAnonTerminals(terminals) transformer = PrepareLiterals() * ValidateSymbols() * anon_tokens_transf # Adds to terminals # 2. Inline Templates transformer *= ApplyTemplates(rule_defs) # 3. Convert EBNF to BNF (and apply step 1 & 2) ebnf_to_bnf = EBNF_to_BNF() rules = [] i = 0 while i < len(rule_defs): # We have to do it like this because rule_defs might grow due to templates name, params, rule_tree, options = rule_defs[i] i += 1 if len(params) != 0: # Dont transform templates continue rule_options = RuleOptions(keep_all_tokens=True) if options and options.keep_all_tokens else None ebnf_to_bnf.rule_options = rule_options ebnf_to_bnf.prefix = name anon_tokens_transf.rule_options = rule_options tree = transformer.transform(rule_tree) res: Tree = ebnf_to_bnf.transform(tree) rules.append((name, res, options)) rules += ebnf_to_bnf.new_rules assert len(rules) == len({name for name, _t, _o in rules}), "Whoops, name collision" # 4. Compile tree to Rule objects rule_tree_to_text = RuleTreeToText() simplify_rule = SimplifyRule_Visitor() compiled_rules: List[Rule] = [] for rule_content in rules: name, tree, options = rule_content simplify_rule.visit(tree) expansions = rule_tree_to_text.transform(tree) for i, (expansion, alias) in enumerate(expansions): if alias and name.startswith('_'): raise GrammarError("Rule %s is marked for expansion (it starts with an underscore) and isn't allowed to have aliases (alias=%s)"% (name, alias)) empty_indices = tuple(x==_EMPTY for x in expansion) if any(empty_indices): exp_options = copy(options) or RuleOptions() exp_options.empty_indices = empty_indices expansion = [x for x in expansion if x!=_EMPTY] else: exp_options = options for sym in expansion: assert isinstance(sym, Symbol) if sym.is_term and exp_options and exp_options.keep_all_tokens: assert isinstance(sym, Terminal) sym.filter_out = False rule = Rule(NonTerminal(name), expansion, i, alias, exp_options) compiled_rules.append(rule) # Remove duplicates of empty rules, throw error for non-empty duplicates if len(set(compiled_rules)) != len(compiled_rules): duplicates = classify(compiled_rules, lambda x: x) for dups in duplicates.values(): if len(dups) > 1: if dups[0].expansion: raise GrammarError("Rules defined twice: %s\n\n(Might happen due to colliding expansion of optionals: [] or ?)" % ''.join('\n * %s' % i for i in dups)) # Empty rule; assert all other attributes are equal assert len({(r.alias, r.order, r.options) for r in dups}) == len(dups) # Remove duplicates compiled_rules = list(OrderedSet(compiled_rules)) # Filter out unused rules while True: c = len(compiled_rules) used_rules = {s for r in compiled_rules for s in r.expansion if isinstance(s, NonTerminal) and s != r.origin} used_rules |= {NonTerminal(s) for s in start} compiled_rules, unused = classify_bool(compiled_rules, lambda r: r.origin in used_rules) for r in unused: logger.debug("Unused rule: %s", r) if len(compiled_rules) == c: break # Filter out unused terminals if terminals_to_keep != '*': used_terms = {t.name for r in compiled_rules for t in r.expansion if isinstance(t, Terminal)} terminals, unused = classify_bool(terminals, lambda t: t.name in used_terms or t.name in self.ignore or t.name in terminals_to_keep) if unused: logger.debug("Unused terminals: %s", [t.name for t in unused]) return terminals, compiled_rules, self.ignore PackageResource = namedtuple('PackageResource', 'pkg_name path') class FromPackageLoader: """ Provides a simple way of creating custom import loaders that load from packages via ``pkgutil.get_data`` instead of using `open`. This allows them to be compatible even from within zip files. Relative imports are handled, so you can just freely use them. pkg_name: The name of the package. You can probably provide `__name__` most of the time search_paths: All the path that will be search on absolute imports. """ pkg_name: str search_paths: Sequence[str] def __init__(self, pkg_name: str, search_paths: Sequence[str]=("", )) -> None: self.pkg_name = pkg_name self.search_paths = search_paths def __repr__(self): return "%s(%r, %r)" % (type(self).__name__, self.pkg_name, self.search_paths) def __call__(self, base_path: Union[None, str, PackageResource], grammar_path: str) -> Tuple[PackageResource, str]: if base_path is None: to_try = self.search_paths else: # Check whether or not the importing grammar was loaded by this module. if not isinstance(base_path, PackageResource) or base_path.pkg_name != self.pkg_name: # Technically false, but FileNotFound doesn't exist in python2.7, and this message should never reach the end user anyway raise IOError() to_try = [base_path.path] err = None for path in to_try: full_path = os.path.join(path, grammar_path) try: text: Optional[bytes] = pkgutil.get_data(self.pkg_name, full_path) except IOError as e: err = e continue else: return PackageResource(self.pkg_name, full_path), (text.decode() if text else '') raise IOError('Cannot find grammar in given paths') from err stdlib_loader = FromPackageLoader('lark', IMPORT_PATHS) def resolve_term_references(term_dict): # TODO Solve with transitive closure (maybe) while True: changed = False for name, token_tree in term_dict.items(): if token_tree is None: # Terminal added through %declare continue for exp in token_tree.find_data('value'): item ,= exp.children if isinstance(item, NonTerminal): raise GrammarError("Rules aren't allowed inside terminals (%s in %s)" % (item, name)) elif isinstance(item, Terminal): try: term_value = term_dict[item.name] except KeyError: raise GrammarError("Terminal used but not defined: %s" % item.name) assert term_value is not None exp.children[0] = term_value changed = True else: assert isinstance(item, Tree) if not changed: break for name, term in term_dict.items(): if term: # Not just declared for child in term.children: ids = [id(x) for x in child.iter_subtrees()] if id(term) in ids: raise GrammarError("Recursion in terminal '%s' (recursion is only allowed in rules, not terminals)" % name) def symbol_from_strcase(s): assert isinstance(s, str) return Terminal(s, filter_out=s.startswith('_')) if s.isupper() else NonTerminal(s) @inline_args class PrepareGrammar(Transformer_InPlace): def terminal(self, name): return Terminal(str(name), filter_out=name.startswith('_')) def nonterminal(self, name): return NonTerminal(name.value) def _find_used_symbols(tree): assert tree.data == 'expansions' return {t.name for x in tree.find_data('expansion') for t in x.scan_values(lambda t: isinstance(t, Symbol))} def _get_parser(): try: return _get_parser.cache except AttributeError: terminals = [TerminalDef(name, PatternRE(value)) for name, value in TERMINALS.items()] rules = [(name.lstrip('?'), x, RuleOptions(expand1=name.startswith('?'))) for name, x in RULES.items()] rules = [Rule(NonTerminal(r), [symbol_from_strcase(s) for s in x.split()], i, None, o) for r, xs, o in rules for i, x in enumerate(xs)] callback = ParseTreeBuilder(rules, ST).create_callback() import re lexer_conf = LexerConf(terminals, re, ['WS', 'COMMENT', 'BACKSLASH']) parser_conf = ParserConf(rules, callback, ['start']) lexer_conf.lexer_type = 'basic' parser_conf.parser_type = 'lalr' _get_parser.cache = ParsingFrontend(lexer_conf, parser_conf, None) return _get_parser.cache GRAMMAR_ERRORS = [ ('Incorrect type of value', ['a: 1\n']), ('Unclosed parenthesis', ['a: (\n']), ('Unmatched closing parenthesis', ['a: )\n', 'a: [)\n', 'a: (]\n']), ('Expecting rule or terminal definition (missing colon)', ['a\n', 'A\n', 'a->\n', 'A->\n', 'a A\n']), ('Illegal name for rules or terminals', ['Aa:\n']), ('Alias expects lowercase name', ['a: -> "a"\n']), ('Unexpected colon', ['a::\n', 'a: b:\n', 'a: B:\n', 'a: "a":\n']), ('Misplaced operator', ['a: b??', 'a: b(?)', 'a:+\n', 'a:?\n', 'a:*\n', 'a:|*\n']), ('Expecting option ("|") or a new rule or terminal definition', ['a:a\n()\n']), ('Terminal names cannot contain dots', ['A.B\n']), ('Expecting rule or terminal definition', ['"a"\n']), ('%import expects a name', ['%import "a"\n']), ('%ignore expects a value', ['%ignore %import\n']), ] def _translate_parser_exception(parse, e): error = e.match_examples(parse, GRAMMAR_ERRORS, use_accepts=True) if error: return error elif 'STRING' in e.expected: return "Expecting a value" def _parse_grammar(text, name, start='start'): try: tree = _get_parser().parse(text + '\n', start) except UnexpectedCharacters as e: context = e.get_context(text) raise GrammarError("Unexpected input at line %d column %d in %s: \n\n%s" % (e.line, e.column, name, context)) except UnexpectedToken as e: context = e.get_context(text) error = _translate_parser_exception(_get_parser().parse, e) if error: raise GrammarError("%s, at line %s column %s\n\n%s" % (error, e.line, e.column, context)) raise return PrepareGrammar().transform(tree) def _error_repr(error): if isinstance(error, UnexpectedToken): error2 = _translate_parser_exception(_get_parser().parse, error) if error2: return error2 expected = ', '.join(error.accepts or error.expected) return "Unexpected token %r. Expected one of: {%s}" % (str(error.token), expected) else: return str(error) def _search_interactive_parser(interactive_parser, predicate): def expand(node): path, p = node for choice in p.choices(): t = Token(choice, '') try: new_p = p.feed_token(t) except ParseError: # Illegal pass else: yield path + (choice,), new_p for path, p in bfs_all_unique([((), interactive_parser)], expand): if predicate(p): return path, p def find_grammar_errors(text: str, start: str='start') -> List[Tuple[UnexpectedInput, str]]: errors = [] def on_error(e): errors.append((e, _error_repr(e))) # recover to a new line token_path, _ = _search_interactive_parser(e.interactive_parser.as_immutable(), lambda p: '_NL' in p.choices()) for token_type in token_path: e.interactive_parser.feed_token(Token(token_type, '')) e.interactive_parser.feed_token(Token('_NL', '\n')) return True _tree = _get_parser().parse(text + '\n', start, on_error=on_error) errors_by_line = classify(errors, lambda e: e[0].line) errors = [el[0] for el in errors_by_line.values()] # already sorted for e in errors: e[0].interactive_parser = None return errors def _get_mangle(prefix, aliases, base_mangle=None): def mangle(s): if s in aliases: s = aliases[s] else: if s[0] == '_': s = '_%s__%s' % (prefix, s[1:]) else: s = '%s__%s' % (prefix, s) if base_mangle is not None: s = base_mangle(s) return s return mangle def _mangle_definition_tree(exp, mangle): if mangle is None: return exp exp = deepcopy(exp) # TODO: is this needed? for t in exp.iter_subtrees(): for i, c in enumerate(t.children): if isinstance(c, Symbol): t.children[i] = c.renamed(mangle) return exp def _make_rule_tuple(modifiers_tree, name, params, priority_tree, expansions): if modifiers_tree.children: m ,= modifiers_tree.children expand1 = '?' in m if expand1 and name.startswith('_'): raise GrammarError("Inlined rules (_rule) cannot use the ?rule modifier.") keep_all_tokens = '!' in m else: keep_all_tokens = False expand1 = False if priority_tree.children: p ,= priority_tree.children priority = int(p) else: priority = None if params is not None: params = [t.value for t in params.children] # For the grammar parser return name, params, expansions, RuleOptions(keep_all_tokens, expand1, priority=priority, template_source=(name if params else None)) class Definition: def __init__(self, is_term, tree, params=(), options=None): self.is_term = is_term self.tree = tree self.params = tuple(params) self.options = options class GrammarBuilder: global_keep_all_tokens: bool import_paths: List[Union[str, Callable]] used_files: Dict[str, str] _definitions: Dict[str, Definition] _ignore_names: List[str] def __init__(self, global_keep_all_tokens: bool=False, import_paths: Optional[List[Union[str, Callable]]]=None, used_files: Optional[Dict[str, str]]=None) -> None: self.global_keep_all_tokens = global_keep_all_tokens self.import_paths = import_paths or [] self.used_files = used_files or {} self._definitions: Dict[str, Definition] = {} self._ignore_names: List[str] = [] def _grammar_error(self, is_term, msg, *names): args = {} for i, name in enumerate(names, start=1): postfix = '' if i == 1 else str(i) args['name' + postfix] = name args['type' + postfix] = lowercase_type = ("rule", "terminal")[is_term] args['Type' + postfix] = lowercase_type.title() raise GrammarError(msg.format(**args)) def _check_options(self, is_term, options): if is_term: if options is None: options = 1 elif not isinstance(options, int): raise GrammarError("Terminal require a single int as 'options' (e.g. priority), got %s" % (type(options),)) else: if options is None: options = RuleOptions() elif not isinstance(options, RuleOptions): raise GrammarError("Rules require a RuleOptions instance as 'options'") if self.global_keep_all_tokens: options.keep_all_tokens = True return options def _define(self, name, is_term, exp, params=(), options=None, *, override=False): if name in self._definitions: if not override: self._grammar_error(is_term, "{Type} '{name}' defined more than once", name) elif override: self._grammar_error(is_term, "Cannot override a nonexisting {type} {name}", name) if name.startswith('__'): self._grammar_error(is_term, 'Names starting with double-underscore are reserved (Error at {name})', name) self._definitions[name] = Definition(is_term, exp, params, self._check_options(is_term, options)) def _extend(self, name, is_term, exp, params=(), options=None): if name not in self._definitions: self._grammar_error(is_term, "Can't extend {type} {name} as it wasn't defined before", name) d = self._definitions[name] if is_term != d.is_term: self._grammar_error(is_term, "Cannot extend {type} {name} - one is a terminal, while the other is not.", name) if tuple(params) != d.params: self._grammar_error(is_term, "Cannot extend {type} with different parameters: {name}", name) if d.tree is None: self._grammar_error(is_term, "Can't extend {type} {name} - it is abstract.", name) # TODO: think about what to do with 'options' base = d.tree assert isinstance(base, Tree) and base.data == 'expansions' base.children.insert(0, exp) def _ignore(self, exp_or_name): if isinstance(exp_or_name, str): self._ignore_names.append(exp_or_name) else: assert isinstance(exp_or_name, Tree) t = exp_or_name if t.data == 'expansions' and len(t.children) == 1: t2 ,= t.children if t2.data=='expansion' and len(t2.children) == 1: item ,= t2.children if item.data == 'value': item ,= item.children if isinstance(item, Terminal): # Keep terminal name, no need to create a new definition self._ignore_names.append(item.name) return name = '__IGNORE_%d'% len(self._ignore_names) self._ignore_names.append(name) self._definitions[name] = Definition(True, t, options=TOKEN_DEFAULT_PRIORITY) def _unpack_import(self, stmt, grammar_name): if len(stmt.children) > 1: path_node, arg1 = stmt.children else: path_node, = stmt.children arg1 = None if isinstance(arg1, Tree): # Multi import dotted_path = tuple(path_node.children) names = arg1.children aliases = dict(zip(names, names)) # Can't have aliased multi import, so all aliases will be the same as names else: # Single import dotted_path = tuple(path_node.children[:-1]) if not dotted_path: name ,= path_node.children raise GrammarError("Nothing was imported from grammar `%s`" % name) name = path_node.children[-1] # Get name from dotted path aliases = {name.value: (arg1 or name).value} # Aliases if exist if path_node.data == 'import_lib': # Import from library base_path = None else: # Relative import if grammar_name == '': # Import relative to script file path if grammar is coded in script try: base_file = os.path.abspath(sys.modules['__main__'].__file__) except AttributeError: base_file = None else: base_file = grammar_name # Import relative to grammar file path if external grammar file if base_file: if isinstance(base_file, PackageResource): base_path = PackageResource(base_file.pkg_name, os.path.split(base_file.path)[0]) else: base_path = os.path.split(base_file)[0] else: base_path = os.path.abspath(os.path.curdir) return dotted_path, base_path, aliases def _unpack_definition(self, tree, mangle): if tree.data == 'rule': name, params, exp, opts = _make_rule_tuple(*tree.children) is_term = False else: name = tree.children[0].value params = () # TODO terminal templates opts = int(tree.children[1]) if len(tree.children) == 3 else TOKEN_DEFAULT_PRIORITY # priority exp = tree.children[-1] is_term = True if mangle is not None: params = tuple(mangle(p) for p in params) name = mangle(name) exp = _mangle_definition_tree(exp, mangle) return name, is_term, exp, params, opts def load_grammar(self, grammar_text: str, grammar_name: str="", mangle: Optional[Callable[[str], str]]=None) -> None: tree = _parse_grammar(grammar_text, grammar_name) imports: Dict[Tuple[str, ...], Tuple[Optional[str], Dict[str, str]]] = {} for stmt in tree.children: if stmt.data == 'import': dotted_path, base_path, aliases = self._unpack_import(stmt, grammar_name) try: import_base_path, import_aliases = imports[dotted_path] assert base_path == import_base_path, 'Inconsistent base_path for %s.' % '.'.join(dotted_path) import_aliases.update(aliases) except KeyError: imports[dotted_path] = base_path, aliases for dotted_path, (base_path, aliases) in imports.items(): self.do_import(dotted_path, base_path, aliases, mangle) for stmt in tree.children: if stmt.data in ('term', 'rule'): self._define(*self._unpack_definition(stmt, mangle)) elif stmt.data == 'override': r ,= stmt.children self._define(*self._unpack_definition(r, mangle), override=True) elif stmt.data == 'extend': r ,= stmt.children self._extend(*self._unpack_definition(r, mangle)) elif stmt.data == 'ignore': # if mangle is not None, we shouldn't apply ignore, since we aren't in a toplevel grammar if mangle is None: self._ignore(*stmt.children) elif stmt.data == 'declare': for symbol in stmt.children: assert isinstance(symbol, Symbol), symbol is_term = isinstance(symbol, Terminal) if mangle is None: name = symbol.name else: name = mangle(symbol.name) self._define(name, is_term, None) elif stmt.data == 'import': pass else: assert False, stmt term_defs = { name: d.tree for name, d in self._definitions.items() if d.is_term } resolve_term_references(term_defs) def _remove_unused(self, used): def rule_dependencies(symbol): try: d = self._definitions[symbol] except KeyError: return [] if d.is_term: return [] return _find_used_symbols(d.tree) - set(d.params) _used = set(bfs(used, rule_dependencies)) self._definitions = {k: v for k, v in self._definitions.items() if k in _used} def do_import(self, dotted_path: Tuple[str, ...], base_path: Optional[str], aliases: Dict[str, str], base_mangle: Optional[Callable[[str], str]]=None) -> None: assert dotted_path mangle = _get_mangle('__'.join(dotted_path), aliases, base_mangle) grammar_path = os.path.join(*dotted_path) + EXT to_try = self.import_paths + ([base_path] if base_path is not None else []) + [stdlib_loader] for source in to_try: try: if callable(source): joined_path, text = source(base_path, grammar_path) else: joined_path = os.path.join(source, grammar_path) with open(joined_path, encoding='utf8') as f: text = f.read() except IOError: continue else: h = sha256_digest(text) if self.used_files.get(joined_path, h) != h: raise RuntimeError("Grammar file was changed during importing") self.used_files[joined_path] = h gb = GrammarBuilder(self.global_keep_all_tokens, self.import_paths, self.used_files) gb.load_grammar(text, joined_path, mangle) gb._remove_unused(map(mangle, aliases)) for name in gb._definitions: if name in self._definitions: raise GrammarError("Cannot import '%s' from '%s': Symbol already defined." % (name, grammar_path)) self._definitions.update(**gb._definitions) break else: # Search failed. Make Python throw a nice error. open(grammar_path, encoding='utf8') assert False, "Couldn't import grammar %s, but a corresponding file was found at a place where lark doesn't search for it" % (dotted_path,) def validate(self) -> None: for name, d in self._definitions.items(): params = d.params exp = d.tree for i, p in enumerate(params): if p in self._definitions: raise GrammarError("Template Parameter conflicts with rule %s (in template %s)" % (p, name)) if p in params[:i]: raise GrammarError("Duplicate Template Parameter %s (in template %s)" % (p, name)) if exp is None: # Remaining checks don't apply to abstract rules/terminals (created with %declare) continue for temp in exp.find_data('template_usage'): sym = temp.children[0].name args = temp.children[1:] if sym not in params: if sym not in self._definitions: self._grammar_error(d.is_term, "Template '%s' used but not defined (in {type} {name})" % sym, name) if len(args) != len(self._definitions[sym].params): expected, actual = len(self._definitions[sym].params), len(args) self._grammar_error(d.is_term, "Wrong number of template arguments used for {name} " "(expected %s, got %s) (in {type2} {name2})" % (expected, actual), sym, name) for sym in _find_used_symbols(exp): if sym not in self._definitions and sym not in params: self._grammar_error(d.is_term, "{Type} '{name}' used but not defined (in {type2} {name2})", sym, name) if not set(self._definitions).issuperset(self._ignore_names): raise GrammarError("Terminals %s were marked to ignore but were not defined!" % (set(self._ignore_names) - set(self._definitions))) def build(self) -> Grammar: self.validate() rule_defs = [] term_defs = [] for name, d in self._definitions.items(): (params, exp, options) = d.params, d.tree, d.options if d.is_term: assert len(params) == 0 term_defs.append((name, (exp, options))) else: rule_defs.append((name, params, exp, options)) # resolve_term_references(term_defs) return Grammar(rule_defs, term_defs, self._ignore_names) def verify_used_files(file_hashes): for path, old in file_hashes.items(): text = None if isinstance(path, str) and os.path.exists(path): with open(path, encoding='utf8') as f: text = f.read() elif isinstance(path, PackageResource): with suppress(IOError): text = pkgutil.get_data(*path).decode('utf-8') if text is None: # We don't know how to load the path. ignore it. continue current = sha256_digest(text) if old != current: logger.info("File %r changed, rebuilding Parser" % path) return False return True def list_grammar_imports(grammar, import_paths=[]): "Returns a list of paths to the lark grammars imported by the given grammar (recursively)" builder = GrammarBuilder(False, import_paths) builder.load_grammar(grammar, '') return list(builder.used_files.keys()) def load_grammar(grammar, source, import_paths, global_keep_all_tokens): builder = GrammarBuilder(global_keep_all_tokens, import_paths) builder.load_grammar(grammar, source) return builder.build(), builder.used_files def sha256_digest(s: str) -> str: """Get the sha256 digest of a string Supports the `usedforsecurity` argument for Python 3.9+ to allow running on a FIPS-enabled system. """ if sys.version_info >= (3, 9): return hashlib.sha256(s.encode('utf8'), usedforsecurity=False).hexdigest() else: return hashlib.sha256(s.encode('utf8')).hexdigest() poetry-core-2.1.1/src/poetry/core/_vendor/lark/parse_tree_builder.py000066400000000000000000000341141475444614500256430ustar00rootroot00000000000000"""Provides functions for the automatic building and shaping of the parse-tree.""" from typing import List from .exceptions import GrammarError, ConfigurationError from .lexer import Token from .tree import Tree from .visitors import Transformer_InPlace from .visitors import _vargs_meta, _vargs_meta_inline ###{standalone from functools import partial, wraps from itertools import product class ExpandSingleChild: def __init__(self, node_builder): self.node_builder = node_builder def __call__(self, children): if len(children) == 1: return children[0] else: return self.node_builder(children) class PropagatePositions: def __init__(self, node_builder, node_filter=None): self.node_builder = node_builder self.node_filter = node_filter def __call__(self, children): res = self.node_builder(children) if isinstance(res, Tree): # Calculate positions while the tree is streaming, according to the rule: # - nodes start at the start of their first child's container, # and end at the end of their last child's container. # Containers are nodes that take up space in text, but have been inlined in the tree. res_meta = res.meta first_meta = self._pp_get_meta(children) if first_meta is not None: if not hasattr(res_meta, 'line'): # meta was already set, probably because the rule has been inlined (e.g. `?rule`) res_meta.line = getattr(first_meta, 'container_line', first_meta.line) res_meta.column = getattr(first_meta, 'container_column', first_meta.column) res_meta.start_pos = getattr(first_meta, 'container_start_pos', first_meta.start_pos) res_meta.empty = False res_meta.container_line = getattr(first_meta, 'container_line', first_meta.line) res_meta.container_column = getattr(first_meta, 'container_column', first_meta.column) res_meta.container_start_pos = getattr(first_meta, 'container_start_pos', first_meta.start_pos) last_meta = self._pp_get_meta(reversed(children)) if last_meta is not None: if not hasattr(res_meta, 'end_line'): res_meta.end_line = getattr(last_meta, 'container_end_line', last_meta.end_line) res_meta.end_column = getattr(last_meta, 'container_end_column', last_meta.end_column) res_meta.end_pos = getattr(last_meta, 'container_end_pos', last_meta.end_pos) res_meta.empty = False res_meta.container_end_line = getattr(last_meta, 'container_end_line', last_meta.end_line) res_meta.container_end_column = getattr(last_meta, 'container_end_column', last_meta.end_column) res_meta.container_end_pos = getattr(last_meta, 'container_end_pos', last_meta.end_pos) return res def _pp_get_meta(self, children): for c in children: if self.node_filter is not None and not self.node_filter(c): continue if isinstance(c, Tree): if not c.meta.empty: return c.meta elif isinstance(c, Token): return c elif hasattr(c, '__lark_meta__'): return c.__lark_meta__() def make_propagate_positions(option): if callable(option): return partial(PropagatePositions, node_filter=option) elif option is True: return PropagatePositions elif option is False: return None raise ConfigurationError('Invalid option for propagate_positions: %r' % option) class ChildFilter: def __init__(self, to_include, append_none, node_builder): self.node_builder = node_builder self.to_include = to_include self.append_none = append_none def __call__(self, children): filtered = [] for i, to_expand, add_none in self.to_include: if add_none: filtered += [None] * add_none if to_expand: filtered += children[i].children else: filtered.append(children[i]) if self.append_none: filtered += [None] * self.append_none return self.node_builder(filtered) class ChildFilterLALR(ChildFilter): """Optimized childfilter for LALR (assumes no duplication in parse tree, so it's safe to change it)""" def __call__(self, children): filtered = [] for i, to_expand, add_none in self.to_include: if add_none: filtered += [None] * add_none if to_expand: if filtered: filtered += children[i].children else: # Optimize for left-recursion filtered = children[i].children else: filtered.append(children[i]) if self.append_none: filtered += [None] * self.append_none return self.node_builder(filtered) class ChildFilterLALR_NoPlaceholders(ChildFilter): "Optimized childfilter for LALR (assumes no duplication in parse tree, so it's safe to change it)" def __init__(self, to_include, node_builder): self.node_builder = node_builder self.to_include = to_include def __call__(self, children): filtered = [] for i, to_expand in self.to_include: if to_expand: if filtered: filtered += children[i].children else: # Optimize for left-recursion filtered = children[i].children else: filtered.append(children[i]) return self.node_builder(filtered) def _should_expand(sym): return not sym.is_term and sym.name.startswith('_') def maybe_create_child_filter(expansion, keep_all_tokens, ambiguous, _empty_indices: List[bool]): # Prepare empty_indices as: How many Nones to insert at each index? if _empty_indices: assert _empty_indices.count(False) == len(expansion) s = ''.join(str(int(b)) for b in _empty_indices) empty_indices = [len(ones) for ones in s.split('0')] assert len(empty_indices) == len(expansion)+1, (empty_indices, len(expansion)) else: empty_indices = [0] * (len(expansion)+1) to_include = [] nones_to_add = 0 for i, sym in enumerate(expansion): nones_to_add += empty_indices[i] if keep_all_tokens or not (sym.is_term and sym.filter_out): to_include.append((i, _should_expand(sym), nones_to_add)) nones_to_add = 0 nones_to_add += empty_indices[len(expansion)] if _empty_indices or len(to_include) < len(expansion) or any(to_expand for i, to_expand,_ in to_include): if _empty_indices or ambiguous: return partial(ChildFilter if ambiguous else ChildFilterLALR, to_include, nones_to_add) else: # LALR without placeholders return partial(ChildFilterLALR_NoPlaceholders, [(i, x) for i,x,_ in to_include]) class AmbiguousExpander: """Deal with the case where we're expanding children ('_rule') into a parent but the children are ambiguous. i.e. (parent->_ambig->_expand_this_rule). In this case, make the parent itself ambiguous with as many copies as there are ambiguous children, and then copy the ambiguous children into the right parents in the right places, essentially shifting the ambiguity up the tree.""" def __init__(self, to_expand, tree_class, node_builder): self.node_builder = node_builder self.tree_class = tree_class self.to_expand = to_expand def __call__(self, children): def _is_ambig_tree(t): return hasattr(t, 'data') and t.data == '_ambig' # -- When we're repeatedly expanding ambiguities we can end up with nested ambiguities. # All children of an _ambig node should be a derivation of that ambig node, hence # it is safe to assume that if we see an _ambig node nested within an ambig node # it is safe to simply expand it into the parent _ambig node as an alternative derivation. ambiguous = [] for i, child in enumerate(children): if _is_ambig_tree(child): if i in self.to_expand: ambiguous.append(i) child.expand_kids_by_data('_ambig') if not ambiguous: return self.node_builder(children) expand = [child.children if i in ambiguous else (child,) for i, child in enumerate(children)] return self.tree_class('_ambig', [self.node_builder(list(f)) for f in product(*expand)]) def maybe_create_ambiguous_expander(tree_class, expansion, keep_all_tokens): to_expand = [i for i, sym in enumerate(expansion) if keep_all_tokens or ((not (sym.is_term and sym.filter_out)) and _should_expand(sym))] if to_expand: return partial(AmbiguousExpander, to_expand, tree_class) class AmbiguousIntermediateExpander: """ Propagate ambiguous intermediate nodes and their derivations up to the current rule. In general, converts rule _iambig _inter someChildren1 ... _inter someChildren2 ... someChildren3 ... to _ambig rule someChildren1 ... someChildren3 ... rule someChildren2 ... someChildren3 ... rule childrenFromNestedIambigs ... someChildren3 ... ... propagating up any nested '_iambig' nodes along the way. """ def __init__(self, tree_class, node_builder): self.node_builder = node_builder self.tree_class = tree_class def __call__(self, children): def _is_iambig_tree(child): return hasattr(child, 'data') and child.data == '_iambig' def _collapse_iambig(children): """ Recursively flatten the derivations of the parent of an '_iambig' node. Returns a list of '_inter' nodes guaranteed not to contain any nested '_iambig' nodes, or None if children does not contain an '_iambig' node. """ # Due to the structure of the SPPF, # an '_iambig' node can only appear as the first child if children and _is_iambig_tree(children[0]): iambig_node = children[0] result = [] for grandchild in iambig_node.children: collapsed = _collapse_iambig(grandchild.children) if collapsed: for child in collapsed: child.children += children[1:] result += collapsed else: new_tree = self.tree_class('_inter', grandchild.children + children[1:]) result.append(new_tree) return result collapsed = _collapse_iambig(children) if collapsed: processed_nodes = [self.node_builder(c.children) for c in collapsed] return self.tree_class('_ambig', processed_nodes) return self.node_builder(children) def inplace_transformer(func): @wraps(func) def f(children): # function name in a Transformer is a rule name. tree = Tree(func.__name__, children) return func(tree) return f def apply_visit_wrapper(func, name, wrapper): if wrapper is _vargs_meta or wrapper is _vargs_meta_inline: raise NotImplementedError("Meta args not supported for internal transformer") @wraps(func) def f(children): return wrapper(func, name, children, None) return f class ParseTreeBuilder: def __init__(self, rules, tree_class, propagate_positions=False, ambiguous=False, maybe_placeholders=False): self.tree_class = tree_class self.propagate_positions = propagate_positions self.ambiguous = ambiguous self.maybe_placeholders = maybe_placeholders self.rule_builders = list(self._init_builders(rules)) def _init_builders(self, rules): propagate_positions = make_propagate_positions(self.propagate_positions) for rule in rules: options = rule.options keep_all_tokens = options.keep_all_tokens expand_single_child = options.expand1 wrapper_chain = list(filter(None, [ (expand_single_child and not rule.alias) and ExpandSingleChild, maybe_create_child_filter(rule.expansion, keep_all_tokens, self.ambiguous, options.empty_indices if self.maybe_placeholders else None), propagate_positions, self.ambiguous and maybe_create_ambiguous_expander(self.tree_class, rule.expansion, keep_all_tokens), self.ambiguous and partial(AmbiguousIntermediateExpander, self.tree_class) ])) yield rule, wrapper_chain def create_callback(self, transformer=None): callbacks = {} default_handler = getattr(transformer, '__default__', None) if default_handler: def default_callback(data, children): return default_handler(data, children, None) else: default_callback = self.tree_class for rule, wrapper_chain in self.rule_builders: user_callback_name = rule.alias or rule.options.template_source or rule.origin.name try: f = getattr(transformer, user_callback_name) wrapper = getattr(f, 'visit_wrapper', None) if wrapper is not None: f = apply_visit_wrapper(f, user_callback_name, wrapper) elif isinstance(transformer, Transformer_InPlace): f = inplace_transformer(f) except AttributeError: f = partial(default_callback, user_callback_name) for w in wrapper_chain: f = w(f) if rule in callbacks: raise GrammarError("Rule '%s' already exists" % (rule,)) callbacks[rule] = f return callbacks ###} poetry-core-2.1.1/src/poetry/core/_vendor/lark/parser_frontends.py000066400000000000000000000236651475444614500253730ustar00rootroot00000000000000from typing import Any, Callable, Dict, Optional, Collection, Union, TYPE_CHECKING from .exceptions import ConfigurationError, GrammarError, assert_config from .utils import get_regexp_width, Serialize from .lexer import LexerThread, BasicLexer, ContextualLexer, Lexer from .parsers import earley, xearley, cyk from .parsers.lalr_parser import LALR_Parser from .tree import Tree from .common import LexerConf, ParserConf, _ParserArgType, _LexerArgType if TYPE_CHECKING: from .parsers.lalr_analysis import ParseTableBase ###{standalone def _wrap_lexer(lexer_class): future_interface = getattr(lexer_class, '__future_interface__', False) if future_interface: return lexer_class else: class CustomLexerWrapper(Lexer): def __init__(self, lexer_conf): self.lexer = lexer_class(lexer_conf) def lex(self, lexer_state, parser_state): return self.lexer.lex(lexer_state.text) return CustomLexerWrapper def _deserialize_parsing_frontend(data, memo, lexer_conf, callbacks, options): parser_conf = ParserConf.deserialize(data['parser_conf'], memo) cls = (options and options._plugins.get('LALR_Parser')) or LALR_Parser parser = cls.deserialize(data['parser'], memo, callbacks, options.debug) parser_conf.callbacks = callbacks return ParsingFrontend(lexer_conf, parser_conf, options, parser=parser) _parser_creators: 'Dict[str, Callable[[LexerConf, Any, Any], Any]]' = {} class ParsingFrontend(Serialize): __serialize_fields__ = 'lexer_conf', 'parser_conf', 'parser' lexer_conf: LexerConf parser_conf: ParserConf options: Any def __init__(self, lexer_conf: LexerConf, parser_conf: ParserConf, options, parser=None): self.parser_conf = parser_conf self.lexer_conf = lexer_conf self.options = options # Set-up parser if parser: # From cache self.parser = parser else: create_parser = _parser_creators.get(parser_conf.parser_type) assert create_parser is not None, "{} is not supported in standalone mode".format( parser_conf.parser_type ) self.parser = create_parser(lexer_conf, parser_conf, options) # Set-up lexer lexer_type = lexer_conf.lexer_type self.skip_lexer = False if lexer_type in ('dynamic', 'dynamic_complete'): assert lexer_conf.postlex is None self.skip_lexer = True return if isinstance(lexer_type, type): assert issubclass(lexer_type, Lexer) self.lexer = _wrap_lexer(lexer_type)(lexer_conf) elif isinstance(lexer_type, str): create_lexer = { 'basic': create_basic_lexer, 'contextual': create_contextual_lexer, }[lexer_type] self.lexer = create_lexer(lexer_conf, self.parser, lexer_conf.postlex, options) else: raise TypeError("Bad value for lexer_type: {lexer_type}") if lexer_conf.postlex: self.lexer = PostLexConnector(self.lexer, lexer_conf.postlex) def _verify_start(self, start=None): if start is None: start_decls = self.parser_conf.start if len(start_decls) > 1: raise ConfigurationError("Lark initialized with more than 1 possible start rule. Must specify which start rule to parse", start_decls) start ,= start_decls elif start not in self.parser_conf.start: raise ConfigurationError("Unknown start rule %s. Must be one of %r" % (start, self.parser_conf.start)) return start def _make_lexer_thread(self, text: str) -> Union[str, LexerThread]: cls = (self.options and self.options._plugins.get('LexerThread')) or LexerThread return text if self.skip_lexer else cls.from_text(self.lexer, text) def parse(self, text: str, start=None, on_error=None): chosen_start = self._verify_start(start) kw = {} if on_error is None else {'on_error': on_error} stream = self._make_lexer_thread(text) return self.parser.parse(stream, chosen_start, **kw) def parse_interactive(self, text: Optional[str]=None, start=None): # TODO BREAK - Change text from Optional[str] to text: str = ''. # Would break behavior of exhaust_lexer(), which currently raises TypeError, and after the change would just return [] chosen_start = self._verify_start(start) if self.parser_conf.parser_type != 'lalr': raise ConfigurationError("parse_interactive() currently only works with parser='lalr' ") stream = self._make_lexer_thread(text) # type: ignore[arg-type] return self.parser.parse_interactive(stream, chosen_start) def _validate_frontend_args(parser, lexer) -> None: assert_config(parser, ('lalr', 'earley', 'cyk')) if not isinstance(lexer, type): # not custom lexer? expected = { 'lalr': ('basic', 'contextual'), 'earley': ('basic', 'dynamic', 'dynamic_complete'), 'cyk': ('basic', ), }[parser] assert_config(lexer, expected, 'Parser %r does not support lexer %%r, expected one of %%s' % parser) def _get_lexer_callbacks(transformer, terminals): result = {} for terminal in terminals: callback = getattr(transformer, terminal.name, None) if callback is not None: result[terminal.name] = callback return result class PostLexConnector: def __init__(self, lexer, postlexer): self.lexer = lexer self.postlexer = postlexer def lex(self, lexer_state, parser_state): i = self.lexer.lex(lexer_state, parser_state) return self.postlexer.process(i) def create_basic_lexer(lexer_conf, parser, postlex, options) -> BasicLexer: cls = (options and options._plugins.get('BasicLexer')) or BasicLexer return cls(lexer_conf) def create_contextual_lexer(lexer_conf: LexerConf, parser, postlex, options) -> ContextualLexer: cls = (options and options._plugins.get('ContextualLexer')) or ContextualLexer parse_table: ParseTableBase[int] = parser._parse_table states: Dict[int, Collection[str]] = {idx:list(t.keys()) for idx, t in parse_table.states.items()} always_accept: Collection[str] = postlex.always_accept if postlex else () return cls(lexer_conf, states, always_accept=always_accept) def create_lalr_parser(lexer_conf: LexerConf, parser_conf: ParserConf, options=None) -> LALR_Parser: debug = options.debug if options else False strict = options.strict if options else False cls = (options and options._plugins.get('LALR_Parser')) or LALR_Parser return cls(parser_conf, debug=debug, strict=strict) _parser_creators['lalr'] = create_lalr_parser ###} class EarleyRegexpMatcher: def __init__(self, lexer_conf): self.regexps = {} for t in lexer_conf.terminals: regexp = t.pattern.to_regexp() try: width = get_regexp_width(regexp)[0] except ValueError: raise GrammarError("Bad regexp in token %s: %s" % (t.name, regexp)) else: if width == 0: raise GrammarError("Dynamic Earley doesn't allow zero-width regexps", t) if lexer_conf.use_bytes: regexp = regexp.encode('utf-8') self.regexps[t.name] = lexer_conf.re_module.compile(regexp, lexer_conf.g_regex_flags) def match(self, term, text, index=0): return self.regexps[term.name].match(text, index) def create_earley_parser__dynamic(lexer_conf: LexerConf, parser_conf: ParserConf, **kw): if lexer_conf.callbacks: raise GrammarError("Earley's dynamic lexer doesn't support lexer_callbacks.") earley_matcher = EarleyRegexpMatcher(lexer_conf) return xearley.Parser(lexer_conf, parser_conf, earley_matcher.match, **kw) def _match_earley_basic(term, token): return term.name == token.type def create_earley_parser__basic(lexer_conf: LexerConf, parser_conf: ParserConf, **kw): return earley.Parser(lexer_conf, parser_conf, _match_earley_basic, **kw) def create_earley_parser(lexer_conf: LexerConf, parser_conf: ParserConf, options) -> earley.Parser: resolve_ambiguity = options.ambiguity == 'resolve' debug = options.debug if options else False tree_class = options.tree_class or Tree if options.ambiguity != 'forest' else None extra = {} if lexer_conf.lexer_type == 'dynamic': f = create_earley_parser__dynamic elif lexer_conf.lexer_type == 'dynamic_complete': extra['complete_lex'] = True f = create_earley_parser__dynamic else: f = create_earley_parser__basic return f(lexer_conf, parser_conf, resolve_ambiguity=resolve_ambiguity, debug=debug, tree_class=tree_class, ordered_sets=options.ordered_sets, **extra) class CYK_FrontEnd: def __init__(self, lexer_conf, parser_conf, options=None): self.parser = cyk.Parser(parser_conf.rules) self.callbacks = parser_conf.callbacks def parse(self, lexer_thread, start): tokens = list(lexer_thread.lex(None)) tree = self.parser.parse(tokens, start) return self._transform(tree) def _transform(self, tree): subtrees = list(tree.iter_subtrees()) for subtree in subtrees: subtree.children = [self._apply_callback(c) if isinstance(c, Tree) else c for c in subtree.children] return self._apply_callback(tree) def _apply_callback(self, tree): return self.callbacks[tree.rule](tree.children) _parser_creators['earley'] = create_earley_parser _parser_creators['cyk'] = CYK_FrontEnd def _construct_parsing_frontend( parser_type: _ParserArgType, lexer_type: _LexerArgType, lexer_conf, parser_conf, options ): assert isinstance(lexer_conf, LexerConf) assert isinstance(parser_conf, ParserConf) parser_conf.parser_type = parser_type lexer_conf.lexer_type = lexer_type return ParsingFrontend(lexer_conf, parser_conf, options) poetry-core-2.1.1/src/poetry/core/_vendor/lark/parsers/000077500000000000000000000000001475444614500231065ustar00rootroot00000000000000poetry-core-2.1.1/src/poetry/core/_vendor/lark/parsers/__init__.py000066400000000000000000000000001475444614500252050ustar00rootroot00000000000000poetry-core-2.1.1/src/poetry/core/_vendor/lark/parsers/cyk.py000066400000000000000000000276401475444614500242570ustar00rootroot00000000000000"""This module implements a CYK parser.""" # Author: https://github.com/ehudt (2018) # # Adapted by Erez from collections import defaultdict import itertools from ..exceptions import ParseError from ..lexer import Token from ..tree import Tree from ..grammar import Terminal as T, NonTerminal as NT, Symbol def match(t, s): assert isinstance(t, T) return t.name == s.type class Rule: """Context-free grammar rule.""" def __init__(self, lhs, rhs, weight, alias): super(Rule, self).__init__() assert isinstance(lhs, NT), lhs assert all(isinstance(x, NT) or isinstance(x, T) for x in rhs), rhs self.lhs = lhs self.rhs = rhs self.weight = weight self.alias = alias def __str__(self): return '%s -> %s' % (str(self.lhs), ' '.join(str(x) for x in self.rhs)) def __repr__(self): return str(self) def __hash__(self): return hash((self.lhs, tuple(self.rhs))) def __eq__(self, other): return self.lhs == other.lhs and self.rhs == other.rhs def __ne__(self, other): return not (self == other) class Grammar: """Context-free grammar.""" def __init__(self, rules): self.rules = frozenset(rules) def __eq__(self, other): return self.rules == other.rules def __str__(self): return '\n' + '\n'.join(sorted(repr(x) for x in self.rules)) + '\n' def __repr__(self): return str(self) # Parse tree data structures class RuleNode: """A node in the parse tree, which also contains the full rhs rule.""" def __init__(self, rule, children, weight=0): self.rule = rule self.children = children self.weight = weight def __repr__(self): return 'RuleNode(%s, [%s])' % (repr(self.rule.lhs), ', '.join(str(x) for x in self.children)) class Parser: """Parser wrapper.""" def __init__(self, rules): super(Parser, self).__init__() self.orig_rules = {rule: rule for rule in rules} rules = [self._to_rule(rule) for rule in rules] self.grammar = to_cnf(Grammar(rules)) def _to_rule(self, lark_rule): """Converts a lark rule, (lhs, rhs, callback, options), to a Rule.""" assert isinstance(lark_rule.origin, NT) assert all(isinstance(x, Symbol) for x in lark_rule.expansion) return Rule( lark_rule.origin, lark_rule.expansion, weight=lark_rule.options.priority if lark_rule.options.priority else 0, alias=lark_rule) def parse(self, tokenized, start): # pylint: disable=invalid-name """Parses input, which is a list of tokens.""" assert start start = NT(start) table, trees = _parse(tokenized, self.grammar) # Check if the parse succeeded. if all(r.lhs != start for r in table[(0, len(tokenized) - 1)]): raise ParseError('Parsing failed.') parse = trees[(0, len(tokenized) - 1)][start] return self._to_tree(revert_cnf(parse)) def _to_tree(self, rule_node): """Converts a RuleNode parse tree to a lark Tree.""" orig_rule = self.orig_rules[rule_node.rule.alias] children = [] for child in rule_node.children: if isinstance(child, RuleNode): children.append(self._to_tree(child)) else: assert isinstance(child.name, Token) children.append(child.name) t = Tree(orig_rule.origin, children) t.rule=orig_rule return t def print_parse(node, indent=0): if isinstance(node, RuleNode): print(' ' * (indent * 2) + str(node.rule.lhs)) for child in node.children: print_parse(child, indent + 1) else: print(' ' * (indent * 2) + str(node.s)) def _parse(s, g): """Parses sentence 's' using CNF grammar 'g'.""" # The CYK table. Indexed with a 2-tuple: (start pos, end pos) table = defaultdict(set) # Top-level structure is similar to the CYK table. Each cell is a dict from # rule name to the best (lightest) tree for that rule. trees = defaultdict(dict) # Populate base case with existing terminal production rules for i, w in enumerate(s): for terminal, rules in g.terminal_rules.items(): if match(terminal, w): for rule in rules: table[(i, i)].add(rule) if (rule.lhs not in trees[(i, i)] or rule.weight < trees[(i, i)][rule.lhs].weight): trees[(i, i)][rule.lhs] = RuleNode(rule, [T(w)], weight=rule.weight) # Iterate over lengths of sub-sentences for l in range(2, len(s) + 1): # Iterate over sub-sentences with the given length for i in range(len(s) - l + 1): # Choose partition of the sub-sentence in [1, l) for p in range(i + 1, i + l): span1 = (i, p - 1) span2 = (p, i + l - 1) for r1, r2 in itertools.product(table[span1], table[span2]): for rule in g.nonterminal_rules.get((r1.lhs, r2.lhs), []): table[(i, i + l - 1)].add(rule) r1_tree = trees[span1][r1.lhs] r2_tree = trees[span2][r2.lhs] rule_total_weight = rule.weight + r1_tree.weight + r2_tree.weight if (rule.lhs not in trees[(i, i + l - 1)] or rule_total_weight < trees[(i, i + l - 1)][rule.lhs].weight): trees[(i, i + l - 1)][rule.lhs] = RuleNode(rule, [r1_tree, r2_tree], weight=rule_total_weight) return table, trees # This section implements context-free grammar converter to Chomsky normal form. # It also implements a conversion of parse trees from its CNF to the original # grammar. # Overview: # Applies the following operations in this order: # * TERM: Eliminates non-solitary terminals from all rules # * BIN: Eliminates rules with more than 2 symbols on their right-hand-side. # * UNIT: Eliminates non-terminal unit rules # # The following grammar characteristics aren't featured: # * Start symbol appears on RHS # * Empty rules (epsilon rules) class CnfWrapper: """CNF wrapper for grammar. Validates that the input grammar is CNF and provides helper data structures. """ def __init__(self, grammar): super(CnfWrapper, self).__init__() self.grammar = grammar self.rules = grammar.rules self.terminal_rules = defaultdict(list) self.nonterminal_rules = defaultdict(list) for r in self.rules: # Validate that the grammar is CNF and populate auxiliary data structures. assert isinstance(r.lhs, NT), r if len(r.rhs) not in [1, 2]: raise ParseError("CYK doesn't support empty rules") if len(r.rhs) == 1 and isinstance(r.rhs[0], T): self.terminal_rules[r.rhs[0]].append(r) elif len(r.rhs) == 2 and all(isinstance(x, NT) for x in r.rhs): self.nonterminal_rules[tuple(r.rhs)].append(r) else: assert False, r def __eq__(self, other): return self.grammar == other.grammar def __repr__(self): return repr(self.grammar) class UnitSkipRule(Rule): """A rule that records NTs that were skipped during transformation.""" def __init__(self, lhs, rhs, skipped_rules, weight, alias): super(UnitSkipRule, self).__init__(lhs, rhs, weight, alias) self.skipped_rules = skipped_rules def __eq__(self, other): return isinstance(other, type(self)) and self.skipped_rules == other.skipped_rules __hash__ = Rule.__hash__ def build_unit_skiprule(unit_rule, target_rule): skipped_rules = [] if isinstance(unit_rule, UnitSkipRule): skipped_rules += unit_rule.skipped_rules skipped_rules.append(target_rule) if isinstance(target_rule, UnitSkipRule): skipped_rules += target_rule.skipped_rules return UnitSkipRule(unit_rule.lhs, target_rule.rhs, skipped_rules, weight=unit_rule.weight + target_rule.weight, alias=unit_rule.alias) def get_any_nt_unit_rule(g): """Returns a non-terminal unit rule from 'g', or None if there is none.""" for rule in g.rules: if len(rule.rhs) == 1 and isinstance(rule.rhs[0], NT): return rule return None def _remove_unit_rule(g, rule): """Removes 'rule' from 'g' without changing the language produced by 'g'.""" new_rules = [x for x in g.rules if x != rule] refs = [x for x in g.rules if x.lhs == rule.rhs[0]] new_rules += [build_unit_skiprule(rule, ref) for ref in refs] return Grammar(new_rules) def _split(rule): """Splits a rule whose len(rhs) > 2 into shorter rules.""" rule_str = str(rule.lhs) + '__' + '_'.join(str(x) for x in rule.rhs) rule_name = '__SP_%s' % (rule_str) + '_%d' yield Rule(rule.lhs, [rule.rhs[0], NT(rule_name % 1)], weight=rule.weight, alias=rule.alias) for i in range(1, len(rule.rhs) - 2): yield Rule(NT(rule_name % i), [rule.rhs[i], NT(rule_name % (i + 1))], weight=0, alias='Split') yield Rule(NT(rule_name % (len(rule.rhs) - 2)), rule.rhs[-2:], weight=0, alias='Split') def _term(g): """Applies the TERM rule on 'g' (see top comment).""" all_t = {x for rule in g.rules for x in rule.rhs if isinstance(x, T)} t_rules = {t: Rule(NT('__T_%s' % str(t)), [t], weight=0, alias='Term') for t in all_t} new_rules = [] for rule in g.rules: if len(rule.rhs) > 1 and any(isinstance(x, T) for x in rule.rhs): new_rhs = [t_rules[x].lhs if isinstance(x, T) else x for x in rule.rhs] new_rules.append(Rule(rule.lhs, new_rhs, weight=rule.weight, alias=rule.alias)) new_rules.extend(v for k, v in t_rules.items() if k in rule.rhs) else: new_rules.append(rule) return Grammar(new_rules) def _bin(g): """Applies the BIN rule to 'g' (see top comment).""" new_rules = [] for rule in g.rules: if len(rule.rhs) > 2: new_rules += _split(rule) else: new_rules.append(rule) return Grammar(new_rules) def _unit(g): """Applies the UNIT rule to 'g' (see top comment).""" nt_unit_rule = get_any_nt_unit_rule(g) while nt_unit_rule: g = _remove_unit_rule(g, nt_unit_rule) nt_unit_rule = get_any_nt_unit_rule(g) return g def to_cnf(g): """Creates a CNF grammar from a general context-free grammar 'g'.""" g = _unit(_bin(_term(g))) return CnfWrapper(g) def unroll_unit_skiprule(lhs, orig_rhs, skipped_rules, children, weight, alias): if not skipped_rules: return RuleNode(Rule(lhs, orig_rhs, weight=weight, alias=alias), children, weight=weight) else: weight = weight - skipped_rules[0].weight return RuleNode( Rule(lhs, [skipped_rules[0].lhs], weight=weight, alias=alias), [ unroll_unit_skiprule(skipped_rules[0].lhs, orig_rhs, skipped_rules[1:], children, skipped_rules[0].weight, skipped_rules[0].alias) ], weight=weight) def revert_cnf(node): """Reverts a parse tree (RuleNode) to its original non-CNF form (Node).""" if isinstance(node, T): return node # Reverts TERM rule. if node.rule.lhs.name.startswith('__T_'): return node.children[0] else: children = [] for child in map(revert_cnf, node.children): # Reverts BIN rule. if isinstance(child, RuleNode) and child.rule.lhs.name.startswith('__SP_'): children += child.children else: children.append(child) # Reverts UNIT rule. if isinstance(node.rule, UnitSkipRule): return unroll_unit_skiprule(node.rule.lhs, node.rule.rhs, node.rule.skipped_rules, children, node.rule.weight, node.rule.alias) else: return RuleNode(node.rule, children) poetry-core-2.1.1/src/poetry/core/_vendor/lark/parsers/earley.py000066400000000000000000000354151475444614500247510ustar00rootroot00000000000000"""This module implements an Earley parser. The core Earley algorithm used here is based on Elizabeth Scott's implementation, here: https://www.sciencedirect.com/science/article/pii/S1571066108001497 That is probably the best reference for understanding the algorithm here. The Earley parser outputs an SPPF-tree as per that document. The SPPF tree format is explained here: https://lark-parser.readthedocs.io/en/latest/_static/sppf/sppf.html """ from typing import TYPE_CHECKING, Callable, Optional, List, Any from collections import deque from ..lexer import Token from ..tree import Tree from ..exceptions import UnexpectedEOF, UnexpectedToken from ..utils import logger, OrderedSet, dedup_list from .grammar_analysis import GrammarAnalyzer from ..grammar import NonTerminal from .earley_common import Item from .earley_forest import ForestSumVisitor, SymbolNode, StableSymbolNode, TokenNode, ForestToParseTree if TYPE_CHECKING: from ..common import LexerConf, ParserConf class Parser: lexer_conf: 'LexerConf' parser_conf: 'ParserConf' debug: bool def __init__(self, lexer_conf: 'LexerConf', parser_conf: 'ParserConf', term_matcher: Callable, resolve_ambiguity: bool=True, debug: bool=False, tree_class: Optional[Callable[[str, List], Any]]=Tree, ordered_sets: bool=True): analysis = GrammarAnalyzer(parser_conf) self.lexer_conf = lexer_conf self.parser_conf = parser_conf self.resolve_ambiguity = resolve_ambiguity self.debug = debug self.Tree = tree_class self.Set = OrderedSet if ordered_sets else set self.SymbolNode = StableSymbolNode if ordered_sets else SymbolNode self.FIRST = analysis.FIRST self.NULLABLE = analysis.NULLABLE self.callbacks = parser_conf.callbacks # TODO add typing info self.predictions = {} # type: ignore[var-annotated] ## These could be moved to the grammar analyzer. Pre-computing these is *much* faster than # the slow 'isupper' in is_terminal. self.TERMINALS = { sym for r in parser_conf.rules for sym in r.expansion if sym.is_term } self.NON_TERMINALS = { sym for r in parser_conf.rules for sym in r.expansion if not sym.is_term } self.forest_sum_visitor = None for rule in parser_conf.rules: if rule.origin not in self.predictions: self.predictions[rule.origin] = [x.rule for x in analysis.expand_rule(rule.origin)] ## Detect if any rules/terminals have priorities set. If the user specified priority = None, then # the priorities will be stripped from all rules/terminals before they reach us, allowing us to # skip the extra tree walk. We'll also skip this if the user just didn't specify priorities # on any rules/terminals. if self.forest_sum_visitor is None and rule.options.priority is not None: self.forest_sum_visitor = ForestSumVisitor # Check terminals for priorities # Ignore terminal priorities if the basic lexer is used if self.lexer_conf.lexer_type != 'basic' and self.forest_sum_visitor is None: for term in self.lexer_conf.terminals: if term.priority: self.forest_sum_visitor = ForestSumVisitor break self.term_matcher = term_matcher def predict_and_complete(self, i, to_scan, columns, transitives): """The core Earley Predictor and Completer. At each stage of the input, we handling any completed items (things that matched on the last cycle) and use those to predict what should come next in the input stream. The completions and any predicted non-terminals are recursively processed until we reach a set of, which can be added to the scan list for the next scanner cycle.""" # Held Completions (H in E.Scotts paper). node_cache = {} held_completions = {} column = columns[i] # R (items) = Ei (column.items) items = deque(column) while items: item = items.pop() # remove an element, A say, from R ### The Earley completer if item.is_complete: ### (item.s == string) if item.node is None: label = (item.s, item.start, i) item.node = node_cache[label] if label in node_cache else node_cache.setdefault(label, self.SymbolNode(*label)) item.node.add_family(item.s, item.rule, item.start, None, None) # create_leo_transitives(item.rule.origin, item.start) ###R Joop Leo right recursion Completer if item.rule.origin in transitives[item.start]: transitive = transitives[item.start][item.s] if transitive.previous in transitives[transitive.column]: root_transitive = transitives[transitive.column][transitive.previous] else: root_transitive = transitive new_item = Item(transitive.rule, transitive.ptr, transitive.start) label = (root_transitive.s, root_transitive.start, i) new_item.node = node_cache[label] if label in node_cache else node_cache.setdefault(label, self.SymbolNode(*label)) new_item.node.add_path(root_transitive, item.node) if new_item.expect in self.TERMINALS: # Add (B :: aC.B, h, y) to Q to_scan.add(new_item) elif new_item not in column: # Add (B :: aC.B, h, y) to Ei and R column.add(new_item) items.append(new_item) ###R Regular Earley completer else: # Empty has 0 length. If we complete an empty symbol in a particular # parse step, we need to be able to use that same empty symbol to complete # any predictions that result, that themselves require empty. Avoids # infinite recursion on empty symbols. # held_completions is 'H' in E.Scott's paper. is_empty_item = item.start == i if is_empty_item: held_completions[item.rule.origin] = item.node originators = [originator for originator in columns[item.start] if originator.expect is not None and originator.expect == item.s] for originator in originators: new_item = originator.advance() label = (new_item.s, originator.start, i) new_item.node = node_cache[label] if label in node_cache else node_cache.setdefault(label, self.SymbolNode(*label)) new_item.node.add_family(new_item.s, new_item.rule, i, originator.node, item.node) if new_item.expect in self.TERMINALS: # Add (B :: aC.B, h, y) to Q to_scan.add(new_item) elif new_item not in column: # Add (B :: aC.B, h, y) to Ei and R column.add(new_item) items.append(new_item) ### The Earley predictor elif item.expect in self.NON_TERMINALS: ### (item.s == lr0) new_items = [] for rule in self.predictions[item.expect]: new_item = Item(rule, 0, i) new_items.append(new_item) # Process any held completions (H). if item.expect in held_completions: new_item = item.advance() label = (new_item.s, item.start, i) new_item.node = node_cache[label] if label in node_cache else node_cache.setdefault(label, self.SymbolNode(*label)) new_item.node.add_family(new_item.s, new_item.rule, new_item.start, item.node, held_completions[item.expect]) new_items.append(new_item) for new_item in new_items: if new_item.expect in self.TERMINALS: to_scan.add(new_item) elif new_item not in column: column.add(new_item) items.append(new_item) def _parse(self, lexer, columns, to_scan, start_symbol=None): def is_quasi_complete(item): if item.is_complete: return True quasi = item.advance() while not quasi.is_complete: if quasi.expect not in self.NULLABLE: return False if quasi.rule.origin == start_symbol and quasi.expect == start_symbol: return False quasi = quasi.advance() return True # def create_leo_transitives(origin, start): # ... # removed at commit 4c1cfb2faf24e8f8bff7112627a00b94d261b420 def scan(i, token, to_scan): """The core Earley Scanner. This is a custom implementation of the scanner that uses the Lark lexer to match tokens. The scan list is built by the Earley predictor, based on the previously completed tokens. This ensures that at each phase of the parse we have a custom lexer context, allowing for more complex ambiguities.""" next_to_scan = self.Set() next_set = self.Set() columns.append(next_set) transitives.append({}) node_cache = {} for item in self.Set(to_scan): if match(item.expect, token): new_item = item.advance() label = (new_item.s, new_item.start, i) # 'terminals' may not contain token.type when using %declare # Additionally, token is not always a Token # For example, it can be a Tree when using TreeMatcher term = terminals.get(token.type) if isinstance(token, Token) else None # Set the priority of the token node to 0 so that the # terminal priorities do not affect the Tree chosen by # ForestSumVisitor after the basic lexer has already # "used up" the terminal priorities token_node = TokenNode(token, term, priority=0) new_item.node = node_cache[label] if label in node_cache else node_cache.setdefault(label, self.SymbolNode(*label)) new_item.node.add_family(new_item.s, item.rule, new_item.start, item.node, token_node) if new_item.expect in self.TERMINALS: # add (B ::= Aai+1.B, h, y) to Q' next_to_scan.add(new_item) else: # add (B ::= Aa+1.B, h, y) to Ei+1 next_set.add(new_item) if not next_set and not next_to_scan: expect = {i.expect.name for i in to_scan} raise UnexpectedToken(token, expect, considered_rules=set(to_scan), state=frozenset(i.s for i in to_scan)) return next_to_scan # Define parser functions match = self.term_matcher terminals = self.lexer_conf.terminals_by_name # Cache for nodes & tokens created in a particular parse step. transitives = [{}] ## The main Earley loop. # Run the Prediction/Completion cycle for any Items in the current Earley set. # Completions will be added to the SPPF tree, and predictions will be recursively # processed down to terminals/empty nodes to be added to the scanner for the next # step. expects = {i.expect for i in to_scan} i = 0 for token in lexer.lex(expects): self.predict_and_complete(i, to_scan, columns, transitives) to_scan = scan(i, token, to_scan) i += 1 expects.clear() expects |= {i.expect for i in to_scan} self.predict_and_complete(i, to_scan, columns, transitives) ## Column is now the final column in the parse. assert i == len(columns)-1 return to_scan def parse(self, lexer, start): assert start, start start_symbol = NonTerminal(start) columns = [self.Set()] to_scan = self.Set() # The scan buffer. 'Q' in E.Scott's paper. ## Predict for the start_symbol. # Add predicted items to the first Earley set (for the predictor) if they # result in a non-terminal, or the scanner if they result in a terminal. for rule in self.predictions[start_symbol]: item = Item(rule, 0, 0) if item.expect in self.TERMINALS: to_scan.add(item) else: columns[0].add(item) to_scan = self._parse(lexer, columns, to_scan, start_symbol) # If the parse was successful, the start # symbol should have been completed in the last step of the Earley cycle, and will be in # this column. Find the item for the start_symbol, which is the root of the SPPF tree. solutions = dedup_list(n.node for n in columns[-1] if n.is_complete and n.node is not None and n.s == start_symbol and n.start == 0) if not solutions: expected_terminals = [t.expect.name for t in to_scan] raise UnexpectedEOF(expected_terminals, state=frozenset(i.s for i in to_scan)) if self.debug: from .earley_forest import ForestToPyDotVisitor try: debug_walker = ForestToPyDotVisitor() except ImportError: logger.warning("Cannot find dependency 'pydot', will not generate sppf debug image") else: for i, s in enumerate(solutions): debug_walker.visit(s, f"sppf{i}.png") if self.Tree is not None: # Perform our SPPF -> AST conversion # Disable the ForestToParseTree cache when ambiguity='resolve' # to prevent a tree construction bug. See issue #1283 use_cache = not self.resolve_ambiguity transformer = ForestToParseTree(self.Tree, self.callbacks, self.forest_sum_visitor and self.forest_sum_visitor(), self.resolve_ambiguity, use_cache) solutions = [transformer.transform(s) for s in solutions] if len(solutions) > 1 and not self.resolve_ambiguity: t: Tree = self.Tree('_ambig', solutions) t.expand_kids_by_data('_ambig') # solutions may themselves be _ambig nodes return t return solutions[0] # return the root of the SPPF # TODO return a list of solutions, or join them together somehow return solutions[0] poetry-core-2.1.1/src/poetry/core/_vendor/lark/parsers/earley_common.py000066400000000000000000000031241475444614500263110ustar00rootroot00000000000000"""This module implements useful building blocks for the Earley parser """ class Item: "An Earley Item, the atom of the algorithm." __slots__ = ('s', 'rule', 'ptr', 'start', 'is_complete', 'expect', 'previous', 'node', '_hash') def __init__(self, rule, ptr, start): self.is_complete = len(rule.expansion) == ptr self.rule = rule # rule self.ptr = ptr # ptr self.start = start # j self.node = None # w if self.is_complete: self.s = rule.origin self.expect = None self.previous = rule.expansion[ptr - 1] if ptr > 0 and len(rule.expansion) else None else: self.s = (rule, ptr) self.expect = rule.expansion[ptr] self.previous = rule.expansion[ptr - 1] if ptr > 0 and len(rule.expansion) else None self._hash = hash((self.s, self.start, self.rule)) def advance(self): return Item(self.rule, self.ptr + 1, self.start) def __eq__(self, other): return self is other or (self.s == other.s and self.start == other.start and self.rule == other.rule) def __hash__(self): return self._hash def __repr__(self): before = ( expansion.name for expansion in self.rule.expansion[:self.ptr] ) after = ( expansion.name for expansion in self.rule.expansion[self.ptr:] ) symbol = "{} ::= {}* {}".format(self.rule.origin.name, ' '.join(before), ' '.join(after)) return '%s (%d)' % (symbol, self.start) # class TransitiveItem(Item): # ... # removed at commit 4c1cfb2faf24e8f8bff7112627a00b94d261b420 poetry-core-2.1.1/src/poetry/core/_vendor/lark/parsers/earley_forest.py000066400000000000000000000751441475444614500263360ustar00rootroot00000000000000""""This module implements an SPPF implementation This is used as the primary output mechanism for the Earley parser in order to store complex ambiguities. Full reference and more details is here: https://web.archive.org/web/20190616123959/http://www.bramvandersanden.com/post/2014/06/shared-packed-parse-forest/ """ from typing import Type, AbstractSet from random import randint from collections import deque from operator import attrgetter from importlib import import_module from functools import partial from ..parse_tree_builder import AmbiguousIntermediateExpander from ..visitors import Discard from ..utils import logger, OrderedSet from ..tree import Tree class ForestNode: pass class SymbolNode(ForestNode): """ A Symbol Node represents a symbol (or Intermediate LR0). Symbol nodes are keyed by the symbol (s). For intermediate nodes s will be an LR0, stored as a tuple of (rule, ptr). For completed symbol nodes, s will be a string representing the non-terminal origin (i.e. the left hand side of the rule). The children of a Symbol or Intermediate Node will always be Packed Nodes; with each Packed Node child representing a single derivation of a production. Hence a Symbol Node with a single child is unambiguous. Parameters: s: A Symbol, or a tuple of (rule, ptr) for an intermediate node. start: For dynamic lexers, the index of the start of the substring matched by this symbol (inclusive). end: For dynamic lexers, the index of the end of the substring matched by this symbol (exclusive). Properties: is_intermediate: True if this node is an intermediate node. priority: The priority of the node's symbol. """ Set: Type[AbstractSet] = set # Overridden by StableSymbolNode __slots__ = ('s', 'start', 'end', '_children', 'paths', 'paths_loaded', 'priority', 'is_intermediate') def __init__(self, s, start, end): self.s = s self.start = start self.end = end self._children = self.Set() self.paths = self.Set() self.paths_loaded = False ### We use inf here as it can be safely negated without resorting to conditionals, # unlike None or float('NaN'), and sorts appropriately. self.priority = float('-inf') self.is_intermediate = isinstance(s, tuple) def add_family(self, lr0, rule, start, left, right): self._children.add(PackedNode(self, lr0, rule, start, left, right)) def add_path(self, transitive, node): self.paths.add((transitive, node)) def load_paths(self): for transitive, node in self.paths: if transitive.next_titem is not None: vn = type(self)(transitive.next_titem.s, transitive.next_titem.start, self.end) vn.add_path(transitive.next_titem, node) self.add_family(transitive.reduction.rule.origin, transitive.reduction.rule, transitive.reduction.start, transitive.reduction.node, vn) else: self.add_family(transitive.reduction.rule.origin, transitive.reduction.rule, transitive.reduction.start, transitive.reduction.node, node) self.paths_loaded = True @property def is_ambiguous(self): """Returns True if this node is ambiguous.""" return len(self.children) > 1 @property def children(self): """Returns a list of this node's children sorted from greatest to least priority.""" if not self.paths_loaded: self.load_paths() return sorted(self._children, key=attrgetter('sort_key')) def __iter__(self): return iter(self._children) def __repr__(self): if self.is_intermediate: rule = self.s[0] ptr = self.s[1] before = ( expansion.name for expansion in rule.expansion[:ptr] ) after = ( expansion.name for expansion in rule.expansion[ptr:] ) symbol = "{} ::= {}* {}".format(rule.origin.name, ' '.join(before), ' '.join(after)) else: symbol = self.s.name return "({}, {}, {}, {})".format(symbol, self.start, self.end, self.priority) class StableSymbolNode(SymbolNode): "A version of SymbolNode that uses OrderedSet for output stability" Set = OrderedSet class PackedNode(ForestNode): """ A Packed Node represents a single derivation in a symbol node. Parameters: rule: The rule associated with this node. parent: The parent of this node. left: The left child of this node. ``None`` if one does not exist. right: The right child of this node. ``None`` if one does not exist. priority: The priority of this node. """ __slots__ = ('parent', 's', 'rule', 'start', 'left', 'right', 'priority', '_hash') def __init__(self, parent, s, rule, start, left, right): self.parent = parent self.s = s self.start = start self.rule = rule self.left = left self.right = right self.priority = float('-inf') self._hash = hash((self.left, self.right)) @property def is_empty(self): return self.left is None and self.right is None @property def sort_key(self): """ Used to sort PackedNode children of SymbolNodes. A SymbolNode has multiple PackedNodes if it matched ambiguously. Hence, we use the sort order to identify the order in which ambiguous children should be considered. """ return self.is_empty, -self.priority, self.rule.order @property def children(self): """Returns a list of this node's children.""" return [x for x in [self.left, self.right] if x is not None] def __iter__(self): yield self.left yield self.right def __eq__(self, other): if not isinstance(other, PackedNode): return False return self is other or (self.left == other.left and self.right == other.right) def __hash__(self): return self._hash def __repr__(self): if isinstance(self.s, tuple): rule = self.s[0] ptr = self.s[1] before = ( expansion.name for expansion in rule.expansion[:ptr] ) after = ( expansion.name for expansion in rule.expansion[ptr:] ) symbol = "{} ::= {}* {}".format(rule.origin.name, ' '.join(before), ' '.join(after)) else: symbol = self.s.name return "({}, {}, {}, {})".format(symbol, self.start, self.priority, self.rule.order) class TokenNode(ForestNode): """ A Token Node represents a matched terminal and is always a leaf node. Parameters: token: The Token associated with this node. term: The TerminalDef matched by the token. priority: The priority of this node. """ __slots__ = ('token', 'term', 'priority', '_hash') def __init__(self, token, term, priority=None): self.token = token self.term = term if priority is not None: self.priority = priority else: self.priority = term.priority if term is not None else 0 self._hash = hash(token) def __eq__(self, other): if not isinstance(other, TokenNode): return False return self is other or (self.token == other.token) def __hash__(self): return self._hash def __repr__(self): return repr(self.token) class ForestVisitor: """ An abstract base class for building forest visitors. This class performs a controllable depth-first walk of an SPPF. The visitor will not enter cycles and will backtrack if one is encountered. Subclasses are notified of cycles through the ``on_cycle`` method. Behavior for visit events is defined by overriding the ``visit*node*`` functions. The walk is controlled by the return values of the ``visit*node_in`` methods. Returning a node(s) will schedule them to be visited. The visitor will begin to backtrack if no nodes are returned. Parameters: single_visit: If ``True``, non-Token nodes will only be visited once. """ def __init__(self, single_visit=False): self.single_visit = single_visit def visit_token_node(self, node): """Called when a ``Token`` is visited. ``Token`` nodes are always leaves.""" pass def visit_symbol_node_in(self, node): """Called when a symbol node is visited. Nodes that are returned will be scheduled to be visited. If ``visit_intermediate_node_in`` is not implemented, this function will be called for intermediate nodes as well.""" pass def visit_symbol_node_out(self, node): """Called after all nodes returned from a corresponding ``visit_symbol_node_in`` call have been visited. If ``visit_intermediate_node_out`` is not implemented, this function will be called for intermediate nodes as well.""" pass def visit_packed_node_in(self, node): """Called when a packed node is visited. Nodes that are returned will be scheduled to be visited. """ pass def visit_packed_node_out(self, node): """Called after all nodes returned from a corresponding ``visit_packed_node_in`` call have been visited.""" pass def on_cycle(self, node, path): """Called when a cycle is encountered. Parameters: node: The node that causes a cycle. path: The list of nodes being visited: nodes that have been entered but not exited. The first element is the root in a forest visit, and the last element is the node visited most recently. ``path`` should be treated as read-only. """ pass def get_cycle_in_path(self, node, path): """A utility function for use in ``on_cycle`` to obtain a slice of ``path`` that only contains the nodes that make up the cycle.""" index = len(path) - 1 while id(path[index]) != id(node): index -= 1 return path[index:] def visit(self, root): # Visiting is a list of IDs of all symbol/intermediate nodes currently in # the stack. It serves two purposes: to detect when we 'recurse' in and out # of a symbol/intermediate so that we can process both up and down. Also, # since the SPPF can have cycles it allows us to detect if we're trying # to recurse into a node that's already on the stack (infinite recursion). visiting = set() # set of all nodes that have been visited visited = set() # a list of nodes that are currently being visited # used for the `on_cycle` callback path = [] # We do not use recursion here to walk the Forest due to the limited # stack size in python. Therefore input_stack is essentially our stack. input_stack = deque([root]) # It is much faster to cache these as locals since they are called # many times in large parses. vpno = getattr(self, 'visit_packed_node_out') vpni = getattr(self, 'visit_packed_node_in') vsno = getattr(self, 'visit_symbol_node_out') vsni = getattr(self, 'visit_symbol_node_in') vino = getattr(self, 'visit_intermediate_node_out', vsno) vini = getattr(self, 'visit_intermediate_node_in', vsni) vtn = getattr(self, 'visit_token_node') oc = getattr(self, 'on_cycle') while input_stack: current = next(reversed(input_stack)) try: next_node = next(current) except StopIteration: input_stack.pop() continue except TypeError: ### If the current object is not an iterator, pass through to Token/SymbolNode pass else: if next_node is None: continue if id(next_node) in visiting: oc(next_node, path) continue input_stack.append(next_node) continue if isinstance(current, TokenNode): vtn(current.token) input_stack.pop() continue current_id = id(current) if current_id in visiting: if isinstance(current, PackedNode): vpno(current) elif current.is_intermediate: vino(current) else: vsno(current) input_stack.pop() path.pop() visiting.remove(current_id) visited.add(current_id) elif self.single_visit and current_id in visited: input_stack.pop() else: visiting.add(current_id) path.append(current) if isinstance(current, PackedNode): next_node = vpni(current) elif current.is_intermediate: next_node = vini(current) else: next_node = vsni(current) if next_node is None: continue if not isinstance(next_node, ForestNode): next_node = iter(next_node) elif id(next_node) in visiting: oc(next_node, path) continue input_stack.append(next_node) class ForestTransformer(ForestVisitor): """The base class for a bottom-up forest transformation. Most users will want to use ``TreeForestTransformer`` instead as it has a friendlier interface and covers most use cases. Transformations are applied via inheritance and overriding of the ``transform*node`` methods. ``transform_token_node`` receives a ``Token`` as an argument. All other methods receive the node that is being transformed and a list of the results of the transformations of that node's children. The return value of these methods are the resulting transformations. If ``Discard`` is raised in a node's transformation, no data from that node will be passed to its parent's transformation. """ def __init__(self): super(ForestTransformer, self).__init__() # results of transformations self.data = dict() # used to track parent nodes self.node_stack = deque() def transform(self, root): """Perform a transformation on an SPPF.""" self.node_stack.append('result') self.data['result'] = [] self.visit(root) assert len(self.data['result']) <= 1 if self.data['result']: return self.data['result'][0] def transform_symbol_node(self, node, data): """Transform a symbol node.""" return node def transform_intermediate_node(self, node, data): """Transform an intermediate node.""" return node def transform_packed_node(self, node, data): """Transform a packed node.""" return node def transform_token_node(self, node): """Transform a ``Token``.""" return node def visit_symbol_node_in(self, node): self.node_stack.append(id(node)) self.data[id(node)] = [] return node.children def visit_packed_node_in(self, node): self.node_stack.append(id(node)) self.data[id(node)] = [] return node.children def visit_token_node(self, node): transformed = self.transform_token_node(node) if transformed is not Discard: self.data[self.node_stack[-1]].append(transformed) def _visit_node_out_helper(self, node, method): self.node_stack.pop() transformed = method(node, self.data[id(node)]) if transformed is not Discard: self.data[self.node_stack[-1]].append(transformed) del self.data[id(node)] def visit_symbol_node_out(self, node): self._visit_node_out_helper(node, self.transform_symbol_node) def visit_intermediate_node_out(self, node): self._visit_node_out_helper(node, self.transform_intermediate_node) def visit_packed_node_out(self, node): self._visit_node_out_helper(node, self.transform_packed_node) class ForestSumVisitor(ForestVisitor): """ A visitor for prioritizing ambiguous parts of the Forest. This visitor is used when support for explicit priorities on rules is requested (whether normal, or invert). It walks the forest (or subsets thereof) and cascades properties upwards from the leaves. It would be ideal to do this during parsing, however this would require processing each Earley item multiple times. That's a big performance drawback; so running a forest walk is the lesser of two evils: there can be significantly more Earley items created during parsing than there are SPPF nodes in the final tree. """ def __init__(self): super(ForestSumVisitor, self).__init__(single_visit=True) def visit_packed_node_in(self, node): yield node.left yield node.right def visit_symbol_node_in(self, node): return iter(node.children) def visit_packed_node_out(self, node): priority = node.rule.options.priority if not node.parent.is_intermediate and node.rule.options.priority else 0 priority += getattr(node.right, 'priority', 0) priority += getattr(node.left, 'priority', 0) node.priority = priority def visit_symbol_node_out(self, node): node.priority = max(child.priority for child in node.children) class PackedData(): """Used in transformationss of packed nodes to distinguish the data that comes from the left child and the right child. """ class _NoData(): pass NO_DATA = _NoData() def __init__(self, node, data): self.left = self.NO_DATA self.right = self.NO_DATA if data: if node.left is not None: self.left = data[0] if len(data) > 1: self.right = data[1] else: self.right = data[0] class ForestToParseTree(ForestTransformer): """Used by the earley parser when ambiguity equals 'resolve' or 'explicit'. Transforms an SPPF into an (ambiguous) parse tree. Parameters: tree_class: The tree class to use for construction callbacks: A dictionary of rules to functions that output a tree prioritizer: A ``ForestVisitor`` that manipulates the priorities of ForestNodes resolve_ambiguity: If True, ambiguities will be resolved based on priorities. Otherwise, `_ambig` nodes will be in the resulting tree. use_cache: If True, the results of packed node transformations will be cached. """ def __init__(self, tree_class=Tree, callbacks=dict(), prioritizer=ForestSumVisitor(), resolve_ambiguity=True, use_cache=True): super(ForestToParseTree, self).__init__() self.tree_class = tree_class self.callbacks = callbacks self.prioritizer = prioritizer self.resolve_ambiguity = resolve_ambiguity self._use_cache = use_cache self._cache = {} self._on_cycle_retreat = False self._cycle_node = None self._successful_visits = set() def visit(self, root): if self.prioritizer: self.prioritizer.visit(root) super(ForestToParseTree, self).visit(root) self._cache = {} def on_cycle(self, node, path): logger.debug("Cycle encountered in the SPPF at node: %s. " "As infinite ambiguities cannot be represented in a tree, " "this family of derivations will be discarded.", node) self._cycle_node = node self._on_cycle_retreat = True def _check_cycle(self, node): if self._on_cycle_retreat: if id(node) == id(self._cycle_node) or id(node) in self._successful_visits: self._cycle_node = None self._on_cycle_retreat = False else: return Discard def _collapse_ambig(self, children): new_children = [] for child in children: if hasattr(child, 'data') and child.data == '_ambig': new_children += child.children else: new_children.append(child) return new_children def _call_rule_func(self, node, data): # called when transforming children of symbol nodes # data is a list of trees or tokens that correspond to the # symbol's rule expansion return self.callbacks[node.rule](data) def _call_ambig_func(self, node, data): # called when transforming a symbol node # data is a list of trees where each tree's data is # equal to the name of the symbol or one of its aliases. if len(data) > 1: return self.tree_class('_ambig', data) elif data: return data[0] return Discard def transform_symbol_node(self, node, data): if id(node) not in self._successful_visits: return Discard r = self._check_cycle(node) if r is Discard: return r self._successful_visits.remove(id(node)) data = self._collapse_ambig(data) return self._call_ambig_func(node, data) def transform_intermediate_node(self, node, data): if id(node) not in self._successful_visits: return Discard r = self._check_cycle(node) if r is Discard: return r self._successful_visits.remove(id(node)) if len(data) > 1: children = [self.tree_class('_inter', c) for c in data] return self.tree_class('_iambig', children) return data[0] def transform_packed_node(self, node, data): r = self._check_cycle(node) if r is Discard: return r if self.resolve_ambiguity and id(node.parent) in self._successful_visits: return Discard if self._use_cache and id(node) in self._cache: return self._cache[id(node)] children = [] assert len(data) <= 2 data = PackedData(node, data) if data.left is not PackedData.NO_DATA: if node.left.is_intermediate and isinstance(data.left, list): children += data.left else: children.append(data.left) if data.right is not PackedData.NO_DATA: children.append(data.right) transformed = children if node.parent.is_intermediate else self._call_rule_func(node, children) if self._use_cache: self._cache[id(node)] = transformed return transformed def visit_symbol_node_in(self, node): super(ForestToParseTree, self).visit_symbol_node_in(node) if self._on_cycle_retreat: return return node.children def visit_packed_node_in(self, node): self._on_cycle_retreat = False to_visit = super(ForestToParseTree, self).visit_packed_node_in(node) if not self.resolve_ambiguity or id(node.parent) not in self._successful_visits: if not self._use_cache or id(node) not in self._cache: return to_visit def visit_packed_node_out(self, node): super(ForestToParseTree, self).visit_packed_node_out(node) if not self._on_cycle_retreat: self._successful_visits.add(id(node.parent)) def handles_ambiguity(func): """Decorator for methods of subclasses of ``TreeForestTransformer``. Denotes that the method should receive a list of transformed derivations.""" func.handles_ambiguity = True return func class TreeForestTransformer(ForestToParseTree): """A ``ForestTransformer`` with a tree ``Transformer``-like interface. By default, it will construct a tree. Methods provided via inheritance are called based on the rule/symbol names of nodes in the forest. Methods that act on rules will receive a list of the results of the transformations of the rule's children. By default, trees and tokens. Methods that act on tokens will receive a token. Alternatively, methods that act on rules may be annotated with ``handles_ambiguity``. In this case, the function will receive a list of all the transformations of all the derivations of the rule. By default, a list of trees where each tree.data is equal to the rule name or one of its aliases. Non-tree transformations are made possible by override of ``__default__``, ``__default_token__``, and ``__default_ambig__``. Note: Tree shaping features such as inlined rules and token filtering are not built into the transformation. Positions are also not propagated. Parameters: tree_class: The tree class to use for construction prioritizer: A ``ForestVisitor`` that manipulates the priorities of nodes in the SPPF. resolve_ambiguity: If True, ambiguities will be resolved based on priorities. use_cache (bool): If True, caches the results of some transformations, potentially improving performance when ``resolve_ambiguity==False``. Only use if you know what you are doing: i.e. All transformation functions are pure and referentially transparent. """ def __init__(self, tree_class=Tree, prioritizer=ForestSumVisitor(), resolve_ambiguity=True, use_cache=False): super(TreeForestTransformer, self).__init__(tree_class, dict(), prioritizer, resolve_ambiguity, use_cache) def __default__(self, name, data): """Default operation on tree (for override). Returns a tree with name with data as children. """ return self.tree_class(name, data) def __default_ambig__(self, name, data): """Default operation on ambiguous rule (for override). Wraps data in an '_ambig_' node if it contains more than one element. """ if len(data) > 1: return self.tree_class('_ambig', data) elif data: return data[0] return Discard def __default_token__(self, node): """Default operation on ``Token`` (for override). Returns ``node``. """ return node def transform_token_node(self, node): return getattr(self, node.type, self.__default_token__)(node) def _call_rule_func(self, node, data): name = node.rule.alias or node.rule.options.template_source or node.rule.origin.name user_func = getattr(self, name, self.__default__) if user_func == self.__default__ or hasattr(user_func, 'handles_ambiguity'): user_func = partial(self.__default__, name) if not self.resolve_ambiguity: wrapper = partial(AmbiguousIntermediateExpander, self.tree_class) user_func = wrapper(user_func) return user_func(data) def _call_ambig_func(self, node, data): name = node.s.name user_func = getattr(self, name, self.__default_ambig__) if user_func == self.__default_ambig__ or not hasattr(user_func, 'handles_ambiguity'): user_func = partial(self.__default_ambig__, name) return user_func(data) class ForestToPyDotVisitor(ForestVisitor): """ A Forest visitor which writes the SPPF to a PNG. The SPPF can get really large, really quickly because of the amount of meta-data it stores, so this is probably only useful for trivial trees and learning how the SPPF is structured. """ def __init__(self, rankdir="TB"): super(ForestToPyDotVisitor, self).__init__(single_visit=True) self.pydot = import_module('pydot') self.graph = self.pydot.Dot(graph_type='digraph', rankdir=rankdir) def visit(self, root, filename): super(ForestToPyDotVisitor, self).visit(root) try: self.graph.write_png(filename) except FileNotFoundError as e: logger.error("Could not write png: ", e) def visit_token_node(self, node): graph_node_id = str(id(node)) graph_node_label = "\"{}\"".format(node.value.replace('"', '\\"')) graph_node_color = 0x808080 graph_node_style = "\"filled,rounded\"" graph_node_shape = "diamond" graph_node = self.pydot.Node(graph_node_id, style=graph_node_style, fillcolor="#{:06x}".format(graph_node_color), shape=graph_node_shape, label=graph_node_label) self.graph.add_node(graph_node) def visit_packed_node_in(self, node): graph_node_id = str(id(node)) graph_node_label = repr(node) graph_node_color = 0x808080 graph_node_style = "filled" graph_node_shape = "diamond" graph_node = self.pydot.Node(graph_node_id, style=graph_node_style, fillcolor="#{:06x}".format(graph_node_color), shape=graph_node_shape, label=graph_node_label) self.graph.add_node(graph_node) yield node.left yield node.right def visit_packed_node_out(self, node): graph_node_id = str(id(node)) graph_node = self.graph.get_node(graph_node_id)[0] for child in [node.left, node.right]: if child is not None: child_graph_node_id = str(id(child.token if isinstance(child, TokenNode) else child)) child_graph_node = self.graph.get_node(child_graph_node_id)[0] self.graph.add_edge(self.pydot.Edge(graph_node, child_graph_node)) else: #### Try and be above the Python object ID range; probably impl. specific, but maybe this is okay. child_graph_node_id = str(randint(100000000000000000000000000000,123456789012345678901234567890)) child_graph_node_style = "invis" child_graph_node = self.pydot.Node(child_graph_node_id, style=child_graph_node_style, label="None") child_edge_style = "invis" self.graph.add_node(child_graph_node) self.graph.add_edge(self.pydot.Edge(graph_node, child_graph_node, style=child_edge_style)) def visit_symbol_node_in(self, node): graph_node_id = str(id(node)) graph_node_label = repr(node) graph_node_color = 0x808080 graph_node_style = "\"filled\"" if node.is_intermediate: graph_node_shape = "ellipse" else: graph_node_shape = "rectangle" graph_node = self.pydot.Node(graph_node_id, style=graph_node_style, fillcolor="#{:06x}".format(graph_node_color), shape=graph_node_shape, label=graph_node_label) self.graph.add_node(graph_node) return iter(node.children) def visit_symbol_node_out(self, node): graph_node_id = str(id(node)) graph_node = self.graph.get_node(graph_node_id)[0] for child in node.children: child_graph_node_id = str(id(child)) child_graph_node = self.graph.get_node(child_graph_node_id)[0] self.graph.add_edge(self.pydot.Edge(graph_node, child_graph_node)) poetry-core-2.1.1/src/poetry/core/_vendor/lark/parsers/grammar_analysis.py000066400000000000000000000157451475444614500270250ustar00rootroot00000000000000"Provides for superficial grammar analysis." from collections import Counter, defaultdict from typing import List, Dict, Iterator, FrozenSet, Set from ..utils import bfs, fzset, classify, OrderedSet from ..exceptions import GrammarError from ..grammar import Rule, Terminal, NonTerminal, Symbol from ..common import ParserConf class RulePtr: __slots__ = ('rule', 'index') rule: Rule index: int def __init__(self, rule: Rule, index: int): assert isinstance(rule, Rule) assert index <= len(rule.expansion) self.rule = rule self.index = index def __repr__(self): before = [x.name for x in self.rule.expansion[:self.index]] after = [x.name for x in self.rule.expansion[self.index:]] return '<%s : %s * %s>' % (self.rule.origin.name, ' '.join(before), ' '.join(after)) @property def next(self) -> Symbol: return self.rule.expansion[self.index] def advance(self, sym: Symbol) -> 'RulePtr': assert self.next == sym return RulePtr(self.rule, self.index+1) @property def is_satisfied(self) -> bool: return self.index == len(self.rule.expansion) def __eq__(self, other) -> bool: if not isinstance(other, RulePtr): return NotImplemented return self.rule == other.rule and self.index == other.index def __hash__(self) -> int: return hash((self.rule, self.index)) State = FrozenSet[RulePtr] # state generation ensures no duplicate LR0ItemSets class LR0ItemSet: __slots__ = ('kernel', 'closure', 'transitions', 'lookaheads') kernel: State closure: State transitions: Dict[Symbol, 'LR0ItemSet'] lookaheads: Dict[Symbol, Set[Rule]] def __init__(self, kernel, closure): self.kernel = fzset(kernel) self.closure = fzset(closure) self.transitions = {} self.lookaheads = defaultdict(set) def __repr__(self): return '{%s | %s}' % (', '.join([repr(r) for r in self.kernel]), ', '.join([repr(r) for r in self.closure])) def update_set(set1, set2): if not set2 or set1 > set2: return False copy = set(set1) set1 |= set2 return set1 != copy def calculate_sets(rules): """Calculate FOLLOW sets. Adapted from: http://lara.epfl.ch/w/cc09:algorithm_for_first_and_follow_sets""" symbols = {sym for rule in rules for sym in rule.expansion} | {rule.origin for rule in rules} # foreach grammar rule X ::= Y(1) ... Y(k) # if k=0 or {Y(1),...,Y(k)} subset of NULLABLE then # NULLABLE = NULLABLE union {X} # for i = 1 to k # if i=1 or {Y(1),...,Y(i-1)} subset of NULLABLE then # FIRST(X) = FIRST(X) union FIRST(Y(i)) # for j = i+1 to k # if i=k or {Y(i+1),...Y(k)} subset of NULLABLE then # FOLLOW(Y(i)) = FOLLOW(Y(i)) union FOLLOW(X) # if i+1=j or {Y(i+1),...,Y(j-1)} subset of NULLABLE then # FOLLOW(Y(i)) = FOLLOW(Y(i)) union FIRST(Y(j)) # until none of NULLABLE,FIRST,FOLLOW changed in last iteration NULLABLE = set() FIRST = {} FOLLOW = {} for sym in symbols: FIRST[sym]={sym} if sym.is_term else set() FOLLOW[sym]=set() # Calculate NULLABLE and FIRST changed = True while changed: changed = False for rule in rules: if set(rule.expansion) <= NULLABLE: if update_set(NULLABLE, {rule.origin}): changed = True for i, sym in enumerate(rule.expansion): if set(rule.expansion[:i]) <= NULLABLE: if update_set(FIRST[rule.origin], FIRST[sym]): changed = True else: break # Calculate FOLLOW changed = True while changed: changed = False for rule in rules: for i, sym in enumerate(rule.expansion): if i==len(rule.expansion)-1 or set(rule.expansion[i+1:]) <= NULLABLE: if update_set(FOLLOW[sym], FOLLOW[rule.origin]): changed = True for j in range(i+1, len(rule.expansion)): if set(rule.expansion[i+1:j]) <= NULLABLE: if update_set(FOLLOW[sym], FIRST[rule.expansion[j]]): changed = True return FIRST, FOLLOW, NULLABLE class GrammarAnalyzer: def __init__(self, parser_conf: ParserConf, debug: bool=False, strict: bool=False): self.debug = debug self.strict = strict root_rules = {start: Rule(NonTerminal('$root_' + start), [NonTerminal(start), Terminal('$END')]) for start in parser_conf.start} rules = parser_conf.rules + list(root_rules.values()) self.rules_by_origin: Dict[NonTerminal, List[Rule]] = classify(rules, lambda r: r.origin) if len(rules) != len(set(rules)): duplicates = [item for item, count in Counter(rules).items() if count > 1] raise GrammarError("Rules defined twice: %s" % ', '.join(str(i) for i in duplicates)) for r in rules: for sym in r.expansion: if not (sym.is_term or sym in self.rules_by_origin): raise GrammarError("Using an undefined rule: %s" % sym) self.start_states = {start: self.expand_rule(root_rule.origin) for start, root_rule in root_rules.items()} self.end_states = {start: fzset({RulePtr(root_rule, len(root_rule.expansion))}) for start, root_rule in root_rules.items()} lr0_root_rules = {start: Rule(NonTerminal('$root_' + start), [NonTerminal(start)]) for start in parser_conf.start} lr0_rules = parser_conf.rules + list(lr0_root_rules.values()) assert(len(lr0_rules) == len(set(lr0_rules))) self.lr0_rules_by_origin = classify(lr0_rules, lambda r: r.origin) # cache RulePtr(r, 0) in r (no duplicate RulePtr objects) self.lr0_start_states = {start: LR0ItemSet([RulePtr(root_rule, 0)], self.expand_rule(root_rule.origin, self.lr0_rules_by_origin)) for start, root_rule in lr0_root_rules.items()} self.FIRST, self.FOLLOW, self.NULLABLE = calculate_sets(rules) def expand_rule(self, source_rule: NonTerminal, rules_by_origin=None) -> OrderedSet[RulePtr]: "Returns all init_ptrs accessible by rule (recursive)" if rules_by_origin is None: rules_by_origin = self.rules_by_origin init_ptrs = OrderedSet[RulePtr]() def _expand_rule(rule: NonTerminal) -> Iterator[NonTerminal]: assert not rule.is_term, rule for r in rules_by_origin[rule]: init_ptr = RulePtr(r, 0) init_ptrs.add(init_ptr) if r.expansion: # if not empty rule new_r = init_ptr.next if not new_r.is_term: assert isinstance(new_r, NonTerminal) yield new_r for _ in bfs([source_rule], _expand_rule): pass return init_ptrs poetry-core-2.1.1/src/poetry/core/_vendor/lark/parsers/lalr_analysis.py000066400000000000000000000276571475444614500263360ustar00rootroot00000000000000"""This module builds a LALR(1) transition-table for lalr_parser.py For now, shift/reduce conflicts are automatically resolved as shifts. """ # Author: Erez Shinan (2017) # Email : erezshin@gmail.com from typing import Dict, Set, Iterator, Tuple, List, TypeVar, Generic from collections import defaultdict from ..utils import classify, classify_bool, bfs, fzset, Enumerator, logger from ..exceptions import GrammarError from .grammar_analysis import GrammarAnalyzer, Terminal, LR0ItemSet, RulePtr, State from ..grammar import Rule, Symbol from ..common import ParserConf ###{standalone class Action: def __init__(self, name): self.name = name def __str__(self): return self.name def __repr__(self): return str(self) Shift = Action('Shift') Reduce = Action('Reduce') StateT = TypeVar("StateT") class ParseTableBase(Generic[StateT]): states: Dict[StateT, Dict[str, Tuple]] start_states: Dict[str, StateT] end_states: Dict[str, StateT] def __init__(self, states, start_states, end_states): self.states = states self.start_states = start_states self.end_states = end_states def serialize(self, memo): tokens = Enumerator() states = { state: {tokens.get(token): ((1, arg.serialize(memo)) if action is Reduce else (0, arg)) for token, (action, arg) in actions.items()} for state, actions in self.states.items() } return { 'tokens': tokens.reversed(), 'states': states, 'start_states': self.start_states, 'end_states': self.end_states, } @classmethod def deserialize(cls, data, memo): tokens = data['tokens'] states = { state: {tokens[token]: ((Reduce, Rule.deserialize(arg, memo)) if action==1 else (Shift, arg)) for token, (action, arg) in actions.items()} for state, actions in data['states'].items() } return cls(states, data['start_states'], data['end_states']) class ParseTable(ParseTableBase['State']): """Parse-table whose key is State, i.e. set[RulePtr] Slower than IntParseTable, but useful for debugging """ pass class IntParseTable(ParseTableBase[int]): """Parse-table whose key is int. Best for performance.""" @classmethod def from_ParseTable(cls, parse_table: ParseTable): enum = list(parse_table.states) state_to_idx: Dict['State', int] = {s:i for i,s in enumerate(enum)} int_states = {} for s, la in parse_table.states.items(): la = {k:(v[0], state_to_idx[v[1]]) if v[0] is Shift else v for k,v in la.items()} int_states[ state_to_idx[s] ] = la start_states = {start:state_to_idx[s] for start, s in parse_table.start_states.items()} end_states = {start:state_to_idx[s] for start, s in parse_table.end_states.items()} return cls(int_states, start_states, end_states) ###} # digraph and traverse, see The Theory and Practice of Compiler Writing # computes F(x) = G(x) union (union { G(y) | x R y }) # X: nodes # R: relation (function mapping node -> list of nodes that satisfy the relation) # G: set valued function def digraph(X, R, G): F = {} S = [] N = dict.fromkeys(X, 0) for x in X: # this is always true for the first iteration, but N[x] may be updated in traverse below if N[x] == 0: traverse(x, S, N, X, R, G, F) return F # x: single node # S: stack # N: weights # X: nodes # R: relation (see above) # G: set valued function # F: set valued function we are computing (map of input -> output) def traverse(x, S, N, X, R, G, F): S.append(x) d = len(S) N[x] = d F[x] = G[x] for y in R[x]: if N[y] == 0: traverse(y, S, N, X, R, G, F) n_x = N[x] assert(n_x > 0) n_y = N[y] assert(n_y != 0) if (n_y > 0) and (n_y < n_x): N[x] = n_y F[x].update(F[y]) if N[x] == d: f_x = F[x] while True: z = S.pop() N[z] = -1 F[z] = f_x if z == x: break class LALR_Analyzer(GrammarAnalyzer): lr0_itemsets: Set[LR0ItemSet] nonterminal_transitions: List[Tuple[LR0ItemSet, Symbol]] lookback: Dict[Tuple[LR0ItemSet, Symbol], Set[Tuple[LR0ItemSet, Rule]]] includes: Dict[Tuple[LR0ItemSet, Symbol], Set[Tuple[LR0ItemSet, Symbol]]] reads: Dict[Tuple[LR0ItemSet, Symbol], Set[Tuple[LR0ItemSet, Symbol]]] directly_reads: Dict[Tuple[LR0ItemSet, Symbol], Set[Symbol]] def __init__(self, parser_conf: ParserConf, debug: bool=False, strict: bool=False): GrammarAnalyzer.__init__(self, parser_conf, debug, strict) self.nonterminal_transitions = [] self.directly_reads = defaultdict(set) self.reads = defaultdict(set) self.includes = defaultdict(set) self.lookback = defaultdict(set) def compute_lr0_states(self) -> None: self.lr0_itemsets = set() # map of kernels to LR0ItemSets cache: Dict['State', LR0ItemSet] = {} def step(state: LR0ItemSet) -> Iterator[LR0ItemSet]: _, unsat = classify_bool(state.closure, lambda rp: rp.is_satisfied) d = classify(unsat, lambda rp: rp.next) for sym, rps in d.items(): kernel = fzset({rp.advance(sym) for rp in rps}) new_state = cache.get(kernel, None) if new_state is None: closure = set(kernel) for rp in kernel: if not rp.is_satisfied and not rp.next.is_term: closure |= self.expand_rule(rp.next, self.lr0_rules_by_origin) new_state = LR0ItemSet(kernel, closure) cache[kernel] = new_state state.transitions[sym] = new_state yield new_state self.lr0_itemsets.add(state) for _ in bfs(self.lr0_start_states.values(), step): pass def compute_reads_relations(self): # handle start state for root in self.lr0_start_states.values(): assert(len(root.kernel) == 1) for rp in root.kernel: assert(rp.index == 0) self.directly_reads[(root, rp.next)] = set([ Terminal('$END') ]) for state in self.lr0_itemsets: seen = set() for rp in state.closure: if rp.is_satisfied: continue s = rp.next # if s is a not a nonterminal if s not in self.lr0_rules_by_origin: continue if s in seen: continue seen.add(s) nt = (state, s) self.nonterminal_transitions.append(nt) dr = self.directly_reads[nt] r = self.reads[nt] next_state = state.transitions[s] for rp2 in next_state.closure: if rp2.is_satisfied: continue s2 = rp2.next # if s2 is a terminal if s2 not in self.lr0_rules_by_origin: dr.add(s2) if s2 in self.NULLABLE: r.add((next_state, s2)) def compute_includes_lookback(self): for nt in self.nonterminal_transitions: state, nonterminal = nt includes = [] lookback = self.lookback[nt] for rp in state.closure: if rp.rule.origin != nonterminal: continue # traverse the states for rp(.rule) state2 = state for i in range(rp.index, len(rp.rule.expansion)): s = rp.rule.expansion[i] nt2 = (state2, s) state2 = state2.transitions[s] if nt2 not in self.reads: continue for j in range(i + 1, len(rp.rule.expansion)): if rp.rule.expansion[j] not in self.NULLABLE: break else: includes.append(nt2) # state2 is at the final state for rp.rule if rp.index == 0: for rp2 in state2.closure: if (rp2.rule == rp.rule) and rp2.is_satisfied: lookback.add((state2, rp2.rule)) for nt2 in includes: self.includes[nt2].add(nt) def compute_lookaheads(self): read_sets = digraph(self.nonterminal_transitions, self.reads, self.directly_reads) follow_sets = digraph(self.nonterminal_transitions, self.includes, read_sets) for nt, lookbacks in self.lookback.items(): for state, rule in lookbacks: for s in follow_sets[nt]: state.lookaheads[s].add(rule) def compute_lalr1_states(self) -> None: m: Dict[LR0ItemSet, Dict[str, Tuple]] = {} reduce_reduce = [] for itemset in self.lr0_itemsets: actions: Dict[Symbol, Tuple] = {la: (Shift, next_state.closure) for la, next_state in itemset.transitions.items()} for la, rules in itemset.lookaheads.items(): if len(rules) > 1: # Try to resolve conflict based on priority p = [(r.options.priority or 0, r) for r in rules] p.sort(key=lambda r: r[0], reverse=True) best, second_best = p[:2] if best[0] > second_best[0]: rules = {best[1]} else: reduce_reduce.append((itemset, la, rules)) continue rule ,= rules if la in actions: if self.strict: raise GrammarError(f"Shift/Reduce conflict for terminal {la.name}. [strict-mode]\n ") elif self.debug: logger.warning('Shift/Reduce conflict for terminal %s: (resolving as shift)', la.name) logger.warning(' * %s', rule) else: logger.debug('Shift/Reduce conflict for terminal %s: (resolving as shift)', la.name) logger.debug(' * %s', rule) else: actions[la] = (Reduce, rule) m[itemset] = { k.name: v for k, v in actions.items() } if reduce_reduce: msgs = [] for itemset, la, rules in reduce_reduce: msg = 'Reduce/Reduce collision in %s between the following rules: %s' % (la, ''.join([ '\n\t- ' + str(r) for r in rules ])) if self.debug: msg += '\n collision occurred in state: {%s\n }' % ''.join(['\n\t' + str(x) for x in itemset.closure]) msgs.append(msg) raise GrammarError('\n\n'.join(msgs)) states = { k.closure: v for k, v in m.items() } # compute end states end_states: Dict[str, 'State'] = {} for state in states: for rp in state: for start in self.lr0_start_states: if rp.rule.origin.name == ('$root_' + start) and rp.is_satisfied: assert start not in end_states end_states[start] = state start_states = { start: state.closure for start, state in self.lr0_start_states.items() } _parse_table = ParseTable(states, start_states, end_states) if self.debug: self.parse_table = _parse_table else: self.parse_table = IntParseTable.from_ParseTable(_parse_table) def compute_lalr(self): self.compute_lr0_states() self.compute_reads_relations() self.compute_includes_lookback() self.compute_lookaheads() self.compute_lalr1_states() poetry-core-2.1.1/src/poetry/core/_vendor/lark/parsers/lalr_interactive_parser.py000066400000000000000000000131751475444614500303720ustar00rootroot00000000000000# This module provides a LALR interactive parser, which is used for debugging and error handling from typing import Iterator, List from copy import copy import warnings from lark.exceptions import UnexpectedToken from lark.lexer import Token, LexerThread from .lalr_parser_state import ParserState ###{standalone class InteractiveParser: """InteractiveParser gives you advanced control over parsing and error handling when parsing with LALR. For a simpler interface, see the ``on_error`` argument to ``Lark.parse()``. """ def __init__(self, parser, parser_state: ParserState, lexer_thread: LexerThread): self.parser = parser self.parser_state = parser_state self.lexer_thread = lexer_thread self.result = None @property def lexer_state(self) -> LexerThread: warnings.warn("lexer_state will be removed in subsequent releases. Use lexer_thread instead.", DeprecationWarning) return self.lexer_thread def feed_token(self, token: Token): """Feed the parser with a token, and advance it to the next state, as if it received it from the lexer. Note that ``token`` has to be an instance of ``Token``. """ return self.parser_state.feed_token(token, token.type == '$END') def iter_parse(self) -> Iterator[Token]: """Step through the different stages of the parse, by reading tokens from the lexer and feeding them to the parser, one per iteration. Returns an iterator of the tokens it encounters. When the parse is over, the resulting tree can be found in ``InteractiveParser.result``. """ for token in self.lexer_thread.lex(self.parser_state): yield token self.result = self.feed_token(token) def exhaust_lexer(self) -> List[Token]: """Try to feed the rest of the lexer state into the interactive parser. Note that this modifies the instance in place and does not feed an '$END' Token """ return list(self.iter_parse()) def feed_eof(self, last_token=None): """Feed a '$END' Token. Borrows from 'last_token' if given.""" eof = Token.new_borrow_pos('$END', '', last_token) if last_token is not None else self.lexer_thread._Token('$END', '', 0, 1, 1) return self.feed_token(eof) def __copy__(self): """Create a new interactive parser with a separate state. Calls to feed_token() won't affect the old instance, and vice-versa. """ return self.copy() def copy(self, deepcopy_values=True): return type(self)( self.parser, self.parser_state.copy(deepcopy_values=deepcopy_values), copy(self.lexer_thread), ) def __eq__(self, other): if not isinstance(other, InteractiveParser): return False return self.parser_state == other.parser_state and self.lexer_thread == other.lexer_thread def as_immutable(self): """Convert to an ``ImmutableInteractiveParser``.""" p = copy(self) return ImmutableInteractiveParser(p.parser, p.parser_state, p.lexer_thread) def pretty(self): """Print the output of ``choices()`` in a way that's easier to read.""" out = ["Parser choices:"] for k, v in self.choices().items(): out.append('\t- %s -> %r' % (k, v)) out.append('stack size: %s' % len(self.parser_state.state_stack)) return '\n'.join(out) def choices(self): """Returns a dictionary of token types, matched to their action in the parser. Only returns token types that are accepted by the current state. Updated by ``feed_token()``. """ return self.parser_state.parse_conf.parse_table.states[self.parser_state.position] def accepts(self): """Returns the set of possible tokens that will advance the parser into a new valid state.""" accepts = set() conf_no_callbacks = copy(self.parser_state.parse_conf) # We don't want to call callbacks here since those might have arbitrary side effects # and are unnecessarily slow. conf_no_callbacks.callbacks = {} for t in self.choices(): if t.isupper(): # is terminal? new_cursor = self.copy(deepcopy_values=False) new_cursor.parser_state.parse_conf = conf_no_callbacks try: new_cursor.feed_token(self.lexer_thread._Token(t, '')) except UnexpectedToken: pass else: accepts.add(t) return accepts def resume_parse(self): """Resume automated parsing from the current state. """ return self.parser.parse_from_state(self.parser_state, last_token=self.lexer_thread.state.last_token) class ImmutableInteractiveParser(InteractiveParser): """Same as ``InteractiveParser``, but operations create a new instance instead of changing it in-place. """ result = None def __hash__(self): return hash((self.parser_state, self.lexer_thread)) def feed_token(self, token): c = copy(self) c.result = InteractiveParser.feed_token(c, token) return c def exhaust_lexer(self): """Try to feed the rest of the lexer state into the parser. Note that this returns a new ImmutableInteractiveParser and does not feed an '$END' Token""" cursor = self.as_mutable() cursor.exhaust_lexer() return cursor.as_immutable() def as_mutable(self): """Convert to an ``InteractiveParser``.""" p = copy(self) return InteractiveParser(p.parser, p.parser_state, p.lexer_thread) ###} poetry-core-2.1.1/src/poetry/core/_vendor/lark/parsers/lalr_parser.py000066400000000000000000000107521475444614500257730ustar00rootroot00000000000000"""This module implements a LALR(1) Parser """ # Author: Erez Shinan (2017) # Email : erezshin@gmail.com from typing import Dict, Any, Optional from ..lexer import Token, LexerThread from ..utils import Serialize from ..common import ParserConf, ParserCallbacks from .lalr_analysis import LALR_Analyzer, IntParseTable, ParseTableBase from .lalr_interactive_parser import InteractiveParser from lark.exceptions import UnexpectedCharacters, UnexpectedInput, UnexpectedToken from .lalr_parser_state import ParserState, ParseConf ###{standalone class LALR_Parser(Serialize): def __init__(self, parser_conf: ParserConf, debug: bool=False, strict: bool=False): analysis = LALR_Analyzer(parser_conf, debug=debug, strict=strict) analysis.compute_lalr() callbacks = parser_conf.callbacks self._parse_table = analysis.parse_table self.parser_conf = parser_conf self.parser = _Parser(analysis.parse_table, callbacks, debug) @classmethod def deserialize(cls, data, memo, callbacks, debug=False): inst = cls.__new__(cls) inst._parse_table = IntParseTable.deserialize(data, memo) inst.parser = _Parser(inst._parse_table, callbacks, debug) return inst def serialize(self, memo: Any = None) -> Dict[str, Any]: return self._parse_table.serialize(memo) def parse_interactive(self, lexer: LexerThread, start: str): return self.parser.parse(lexer, start, start_interactive=True) def parse(self, lexer, start, on_error=None): try: return self.parser.parse(lexer, start) except UnexpectedInput as e: if on_error is None: raise while True: if isinstance(e, UnexpectedCharacters): s = e.interactive_parser.lexer_thread.state p = s.line_ctr.char_pos if not on_error(e): raise e if isinstance(e, UnexpectedCharacters): # If user didn't change the character position, then we should if p == s.line_ctr.char_pos: s.line_ctr.feed(s.text[p:p+1]) try: return e.interactive_parser.resume_parse() except UnexpectedToken as e2: if (isinstance(e, UnexpectedToken) and e.token.type == e2.token.type == '$END' and e.interactive_parser == e2.interactive_parser): # Prevent infinite loop raise e2 e = e2 except UnexpectedCharacters as e2: e = e2 class _Parser: parse_table: ParseTableBase callbacks: ParserCallbacks debug: bool def __init__(self, parse_table: ParseTableBase, callbacks: ParserCallbacks, debug: bool=False): self.parse_table = parse_table self.callbacks = callbacks self.debug = debug def parse(self, lexer: LexerThread, start: str, value_stack=None, state_stack=None, start_interactive=False): parse_conf = ParseConf(self.parse_table, self.callbacks, start) parser_state = ParserState(parse_conf, lexer, state_stack, value_stack) if start_interactive: return InteractiveParser(self, parser_state, parser_state.lexer) return self.parse_from_state(parser_state) def parse_from_state(self, state: ParserState, last_token: Optional[Token]=None): """Run the main LALR parser loop Parameters: state - the initial state. Changed in-place. last_token - Used only for line information in case of an empty lexer. """ try: token = last_token for token in state.lexer.lex(state): assert token is not None state.feed_token(token) end_token = Token.new_borrow_pos('$END', '', token) if token else Token('$END', '', 0, 1, 1) return state.feed_token(end_token, True) except UnexpectedInput as e: try: e.interactive_parser = InteractiveParser(self, state, state.lexer) except NameError: pass raise e except Exception as e: if self.debug: print("") print("STATE STACK DUMP") print("----------------") for i, s in enumerate(state.state_stack): print('%d)' % i , s) print("") raise ###} poetry-core-2.1.1/src/poetry/core/_vendor/lark/parsers/lalr_parser_state.py000066400000000000000000000073211475444614500271710ustar00rootroot00000000000000from copy import deepcopy, copy from typing import Dict, Any, Generic, List from ..lexer import Token, LexerThread from ..common import ParserCallbacks from .lalr_analysis import Shift, ParseTableBase, StateT from lark.exceptions import UnexpectedToken ###{standalone class ParseConf(Generic[StateT]): __slots__ = 'parse_table', 'callbacks', 'start', 'start_state', 'end_state', 'states' parse_table: ParseTableBase[StateT] callbacks: ParserCallbacks start: str start_state: StateT end_state: StateT states: Dict[StateT, Dict[str, tuple]] def __init__(self, parse_table: ParseTableBase[StateT], callbacks: ParserCallbacks, start: str): self.parse_table = parse_table self.start_state = self.parse_table.start_states[start] self.end_state = self.parse_table.end_states[start] self.states = self.parse_table.states self.callbacks = callbacks self.start = start class ParserState(Generic[StateT]): __slots__ = 'parse_conf', 'lexer', 'state_stack', 'value_stack' parse_conf: ParseConf[StateT] lexer: LexerThread state_stack: List[StateT] value_stack: list def __init__(self, parse_conf: ParseConf[StateT], lexer: LexerThread, state_stack=None, value_stack=None): self.parse_conf = parse_conf self.lexer = lexer self.state_stack = state_stack or [self.parse_conf.start_state] self.value_stack = value_stack or [] @property def position(self) -> StateT: return self.state_stack[-1] # Necessary for match_examples() to work def __eq__(self, other) -> bool: if not isinstance(other, ParserState): return NotImplemented return len(self.state_stack) == len(other.state_stack) and self.position == other.position def __copy__(self): return self.copy() def copy(self, deepcopy_values=True) -> 'ParserState[StateT]': return type(self)( self.parse_conf, self.lexer, # XXX copy copy(self.state_stack), deepcopy(self.value_stack) if deepcopy_values else copy(self.value_stack), ) def feed_token(self, token: Token, is_end=False) -> Any: state_stack = self.state_stack value_stack = self.value_stack states = self.parse_conf.states end_state = self.parse_conf.end_state callbacks = self.parse_conf.callbacks while True: state = state_stack[-1] try: action, arg = states[state][token.type] except KeyError: expected = {s for s in states[state].keys() if s.isupper()} raise UnexpectedToken(token, expected, state=self, interactive_parser=None) assert arg != end_state if action is Shift: # shift once and return assert not is_end state_stack.append(arg) value_stack.append(token if token.type not in callbacks else callbacks[token.type](token)) return else: # reduce+shift as many times as necessary rule = arg size = len(rule.expansion) if size: s = value_stack[-size:] del state_stack[-size:] del value_stack[-size:] else: s = [] value = callbacks[rule](s) if callbacks else s _action, new_state = states[state_stack[-1]][rule.origin.name] assert _action is Shift state_stack.append(new_state) value_stack.append(value) if is_end and state_stack[-1] == end_state: return value_stack[-1] ###} poetry-core-2.1.1/src/poetry/core/_vendor/lark/parsers/xearley.py000066400000000000000000000172211475444614500251340ustar00rootroot00000000000000"""This module implements an Earley parser with a dynamic lexer The core Earley algorithm used here is based on Elizabeth Scott's implementation, here: https://www.sciencedirect.com/science/article/pii/S1571066108001497 That is probably the best reference for understanding the algorithm here. The Earley parser outputs an SPPF-tree as per that document. The SPPF tree format is better documented here: http://www.bramvandersanden.com/post/2014/06/shared-packed-parse-forest/ Instead of running a lexer beforehand, or using a costy char-by-char method, this parser uses regular expressions by necessity, achieving high-performance while maintaining all of Earley's power in parsing any CFG. """ from typing import TYPE_CHECKING, Callable, Optional, List, Any from collections import defaultdict from ..tree import Tree from ..exceptions import UnexpectedCharacters from ..lexer import Token from ..grammar import Terminal from .earley import Parser as BaseParser from .earley_forest import TokenNode if TYPE_CHECKING: from ..common import LexerConf, ParserConf class Parser(BaseParser): def __init__(self, lexer_conf: 'LexerConf', parser_conf: 'ParserConf', term_matcher: Callable, resolve_ambiguity: bool=True, complete_lex: bool=False, debug: bool=False, tree_class: Optional[Callable[[str, List], Any]]=Tree, ordered_sets: bool=True): BaseParser.__init__(self, lexer_conf, parser_conf, term_matcher, resolve_ambiguity, debug, tree_class, ordered_sets) self.ignore = [Terminal(t) for t in lexer_conf.ignore] self.complete_lex = complete_lex def _parse(self, stream, columns, to_scan, start_symbol=None): def scan(i, to_scan): """The core Earley Scanner. This is a custom implementation of the scanner that uses the Lark lexer to match tokens. The scan list is built by the Earley predictor, based on the previously completed tokens. This ensures that at each phase of the parse we have a custom lexer context, allowing for more complex ambiguities.""" node_cache = {} # 1) Loop the expectations and ask the lexer to match. # Since regexp is forward looking on the input stream, and we only # want to process tokens when we hit the point in the stream at which # they complete, we push all tokens into a buffer (delayed_matches), to # be held possibly for a later parse step when we reach the point in the # input stream at which they complete. for item in self.Set(to_scan): m = match(item.expect, stream, i) if m: t = Token(item.expect.name, m.group(0), i, text_line, text_column) delayed_matches[m.end()].append( (item, i, t) ) if self.complete_lex: s = m.group(0) for j in range(1, len(s)): m = match(item.expect, s[:-j]) if m: t = Token(item.expect.name, m.group(0), i, text_line, text_column) delayed_matches[i+m.end()].append( (item, i, t) ) # XXX The following 3 lines were commented out for causing a bug. See issue #768 # # Remove any items that successfully matched in this pass from the to_scan buffer. # # This ensures we don't carry over tokens that already matched, if we're ignoring below. # to_scan.remove(item) # 3) Process any ignores. This is typically used for e.g. whitespace. # We carry over any unmatched items from the to_scan buffer to be matched again after # the ignore. This should allow us to use ignored symbols in non-terminals to implement # e.g. mandatory spacing. for x in self.ignore: m = match(x, stream, i) if m: # Carry over any items still in the scan buffer, to past the end of the ignored items. delayed_matches[m.end()].extend([(item, i, None) for item in to_scan ]) # If we're ignoring up to the end of the file, # carry over the start symbol if it already completed. delayed_matches[m.end()].extend([(item, i, None) for item in columns[i] if item.is_complete and item.s == start_symbol]) next_to_scan = self.Set() next_set = self.Set() columns.append(next_set) transitives.append({}) ## 4) Process Tokens from delayed_matches. # This is the core of the Earley scanner. Create an SPPF node for each Token, # and create the symbol node in the SPPF tree. Advance the item that completed, # and add the resulting new item to either the Earley set (for processing by the # completer/predictor) or the to_scan buffer for the next parse step. for item, start, token in delayed_matches[i+1]: if token is not None: token.end_line = text_line token.end_column = text_column + 1 token.end_pos = i + 1 new_item = item.advance() label = (new_item.s, new_item.start, i + 1) token_node = TokenNode(token, terminals[token.type]) new_item.node = node_cache[label] if label in node_cache else node_cache.setdefault(label, self.SymbolNode(*label)) new_item.node.add_family(new_item.s, item.rule, new_item.start, item.node, token_node) else: new_item = item if new_item.expect in self.TERMINALS: # add (B ::= Aai+1.B, h, y) to Q' next_to_scan.add(new_item) else: # add (B ::= Aa+1.B, h, y) to Ei+1 next_set.add(new_item) del delayed_matches[i+1] # No longer needed, so unburden memory if not next_set and not delayed_matches and not next_to_scan: considered_rules = list(sorted(to_scan, key=lambda key: key.rule.origin.name)) raise UnexpectedCharacters(stream, i, text_line, text_column, {item.expect.name for item in to_scan}, set(to_scan), state=frozenset(i.s for i in to_scan), considered_rules=considered_rules ) return next_to_scan delayed_matches = defaultdict(list) match = self.term_matcher terminals = self.lexer_conf.terminals_by_name # Cache for nodes & tokens created in a particular parse step. transitives = [{}] text_line = 1 text_column = 1 ## The main Earley loop. # Run the Prediction/Completion cycle for any Items in the current Earley set. # Completions will be added to the SPPF tree, and predictions will be recursively # processed down to terminals/empty nodes to be added to the scanner for the next # step. i = 0 for token in stream: self.predict_and_complete(i, to_scan, columns, transitives) to_scan = scan(i, to_scan) if token == '\n': text_line += 1 text_column = 1 else: text_column += 1 i += 1 self.predict_and_complete(i, to_scan, columns, transitives) ## Column is now the final column in the parse. assert i == len(columns)-1 return to_scan poetry-core-2.1.1/src/poetry/core/_vendor/lark/py.typed000066400000000000000000000000001475444614500231140ustar00rootroot00000000000000poetry-core-2.1.1/src/poetry/core/_vendor/lark/reconstruct.py000066400000000000000000000072631475444614500243640ustar00rootroot00000000000000"""This is an experimental tool for reconstructing text from a shaped tree, based on a Lark grammar. """ from typing import Dict, Callable, Iterable, Optional from .lark import Lark from .tree import Tree, ParseTree from .visitors import Transformer_InPlace from .lexer import Token, PatternStr, TerminalDef from .grammar import Terminal, NonTerminal, Symbol from .tree_matcher import TreeMatcher, is_discarded_terminal from .utils import is_id_continue def is_iter_empty(i): try: _ = next(i) return False except StopIteration: return True class WriteTokensTransformer(Transformer_InPlace): "Inserts discarded tokens into their correct place, according to the rules of grammar" tokens: Dict[str, TerminalDef] term_subs: Dict[str, Callable[[Symbol], str]] def __init__(self, tokens: Dict[str, TerminalDef], term_subs: Dict[str, Callable[[Symbol], str]]) -> None: self.tokens = tokens self.term_subs = term_subs def __default__(self, data, children, meta): if not getattr(meta, 'match_tree', False): return Tree(data, children) iter_args = iter(children) to_write = [] for sym in meta.orig_expansion: if is_discarded_terminal(sym): try: v = self.term_subs[sym.name](sym) except KeyError: t = self.tokens[sym.name] if not isinstance(t.pattern, PatternStr): raise NotImplementedError("Reconstructing regexps not supported yet: %s" % t) v = t.pattern.value to_write.append(v) else: x = next(iter_args) if isinstance(x, list): to_write += x else: if isinstance(x, Token): assert Terminal(x.type) == sym, x else: assert NonTerminal(x.data) == sym, (sym, x) to_write.append(x) assert is_iter_empty(iter_args) return to_write class Reconstructor(TreeMatcher): """ A Reconstructor that will, given a full parse Tree, generate source code. Note: The reconstructor cannot generate values from regexps. If you need to produce discarded regexes, such as newlines, use `term_subs` and provide default values for them. Parameters: parser: a Lark instance term_subs: a dictionary of [Terminal name as str] to [output text as str] """ write_tokens: WriteTokensTransformer def __init__(self, parser: Lark, term_subs: Optional[Dict[str, Callable[[Symbol], str]]]=None) -> None: TreeMatcher.__init__(self, parser) self.write_tokens = WriteTokensTransformer({t.name:t for t in self.tokens}, term_subs or {}) def _reconstruct(self, tree): unreduced_tree = self.match_tree(tree, tree.data) res = self.write_tokens.transform(unreduced_tree) for item in res: if isinstance(item, Tree): # TODO use orig_expansion.rulename to support templates yield from self._reconstruct(item) else: yield item def reconstruct(self, tree: ParseTree, postproc: Optional[Callable[[Iterable[str]], Iterable[str]]]=None, insert_spaces: bool=True) -> str: x = self._reconstruct(tree) if postproc: x = postproc(x) y = [] prev_item = '' for item in x: if insert_spaces and prev_item and item and is_id_continue(prev_item[-1]) and is_id_continue(item[0]): y.append(' ') y.append(item) prev_item = item return ''.join(y) poetry-core-2.1.1/src/poetry/core/_vendor/lark/tools/000077500000000000000000000000001475444614500225675ustar00rootroot00000000000000poetry-core-2.1.1/src/poetry/core/_vendor/lark/tools/__init__.py000066400000000000000000000046451475444614500247110ustar00rootroot00000000000000import sys from argparse import ArgumentParser, FileType from textwrap import indent from logging import DEBUG, INFO, WARN, ERROR from typing import Optional import warnings from lark import Lark, logger try: from interegular import logger as interegular_logger has_interegular = True except ImportError: has_interegular = False lalr_argparser = ArgumentParser(add_help=False, epilog='Look at the Lark documentation for more info on the options') flags = [ ('d', 'debug'), 'keep_all_tokens', 'regex', 'propagate_positions', 'maybe_placeholders', 'use_bytes' ] options = ['start', 'lexer'] lalr_argparser.add_argument('-v', '--verbose', action='count', default=0, help="Increase Logger output level, up to three times") lalr_argparser.add_argument('-s', '--start', action='append', default=[]) lalr_argparser.add_argument('-l', '--lexer', default='contextual', choices=('basic', 'contextual')) lalr_argparser.add_argument('-o', '--out', type=FileType('w', encoding='utf-8'), default=sys.stdout, help='the output file (default=stdout)') lalr_argparser.add_argument('grammar_file', type=FileType('r', encoding='utf-8'), help='A valid .lark file') for flag in flags: if isinstance(flag, tuple): options.append(flag[1]) lalr_argparser.add_argument('-' + flag[0], '--' + flag[1], action='store_true') elif isinstance(flag, str): options.append(flag) lalr_argparser.add_argument('--' + flag, action='store_true') else: raise NotImplementedError("flags must only contain strings or tuples of strings") def build_lalr(namespace): logger.setLevel((ERROR, WARN, INFO, DEBUG)[min(namespace.verbose, 3)]) if has_interegular: interegular_logger.setLevel(logger.getEffectiveLevel()) if len(namespace.start) == 0: namespace.start.append('start') kwargs = {n: getattr(namespace, n) for n in options} return Lark(namespace.grammar_file, parser='lalr', **kwargs), namespace.out def showwarning_as_comment(message, category, filename, lineno, file=None, line=None): # Based on warnings._showwarnmsg_impl text = warnings.formatwarning(message, category, filename, lineno, line) text = indent(text, '# ') if file is None: file = sys.stderr if file is None: return try: file.write(text) except OSError: pass def make_warnings_comments(): warnings.showwarning = showwarning_as_comment poetry-core-2.1.1/src/poetry/core/_vendor/lark/tools/nearley.py000066400000000000000000000141711475444614500246040ustar00rootroot00000000000000"Converts Nearley grammars to Lark" import os.path import sys import codecs import argparse from lark import Lark, Transformer, v_args nearley_grammar = r""" start: (ruledef|directive)+ directive: "@" NAME (STRING|NAME) | "@" JS -> js_code ruledef: NAME "->" expansions | NAME REGEXP "->" expansions -> macro expansions: expansion ("|" expansion)* expansion: expr+ js ?expr: item (":" /[+*?]/)? ?item: rule|string|regexp|null | "(" expansions ")" rule: NAME string: STRING regexp: REGEXP null: "null" JS: /{%.*?%}/s js: JS? NAME: /[a-zA-Z_$]\w*/ COMMENT: /#[^\n]*/ REGEXP: /\[.*?\]/ STRING: _STRING "i"? %import common.ESCAPED_STRING -> _STRING %import common.WS %ignore WS %ignore COMMENT """ nearley_grammar_parser = Lark(nearley_grammar, parser='earley', lexer='basic') def _get_rulename(name): name = {'_': '_ws_maybe', '__': '_ws'}.get(name, name) return 'n_' + name.replace('$', '__DOLLAR__').lower() @v_args(inline=True) class NearleyToLark(Transformer): def __init__(self): self._count = 0 self.extra_rules = {} self.extra_rules_rev = {} self.alias_js_code = {} def _new_function(self, code): name = 'alias_%d' % self._count self._count += 1 self.alias_js_code[name] = code return name def _extra_rule(self, rule): if rule in self.extra_rules_rev: return self.extra_rules_rev[rule] name = 'xrule_%d' % len(self.extra_rules) assert name not in self.extra_rules self.extra_rules[name] = rule self.extra_rules_rev[rule] = name return name def rule(self, name): return _get_rulename(name) def ruledef(self, name, exps): return '!%s: %s' % (_get_rulename(name), exps) def expr(self, item, op): rule = '(%s)%s' % (item, op) return self._extra_rule(rule) def regexp(self, r): return '/%s/' % r def null(self): return '' def string(self, s): return self._extra_rule(s) def expansion(self, *x): x, js = x[:-1], x[-1] if js.children: js_code ,= js.children js_code = js_code[2:-2] alias = '-> ' + self._new_function(js_code) else: alias = '' return ' '.join(x) + alias def expansions(self, *x): return '%s' % ('\n |'.join(x)) def start(self, *rules): return '\n'.join(filter(None, rules)) def _nearley_to_lark(g, builtin_path, n2l, js_code, folder_path, includes): rule_defs = [] tree = nearley_grammar_parser.parse(g) for statement in tree.children: if statement.data == 'directive': directive, arg = statement.children if directive in ('builtin', 'include'): folder = builtin_path if directive == 'builtin' else folder_path path = os.path.join(folder, arg[1:-1]) if path not in includes: includes.add(path) with codecs.open(path, encoding='utf8') as f: text = f.read() rule_defs += _nearley_to_lark(text, builtin_path, n2l, js_code, os.path.abspath(os.path.dirname(path)), includes) else: assert False, directive elif statement.data == 'js_code': code ,= statement.children code = code[2:-2] js_code.append(code) elif statement.data == 'macro': pass # TODO Add support for macros! elif statement.data == 'ruledef': rule_defs.append(n2l.transform(statement)) else: raise Exception("Unknown statement: %s" % statement) return rule_defs def create_code_for_nearley_grammar(g, start, builtin_path, folder_path, es6=False): import js2py emit_code = [] def emit(x=None): if x: emit_code.append(x) emit_code.append('\n') js_code = ['function id(x) {return x[0];}'] n2l = NearleyToLark() rule_defs = _nearley_to_lark(g, builtin_path, n2l, js_code, folder_path, set()) lark_g = '\n'.join(rule_defs) lark_g += '\n'+'\n'.join('!%s: %s' % item for item in n2l.extra_rules.items()) emit('from lark import Lark, Transformer') emit() emit('grammar = ' + repr(lark_g)) emit() for alias, code in n2l.alias_js_code.items(): js_code.append('%s = (%s);' % (alias, code)) if es6: emit(js2py.translate_js6('\n'.join(js_code))) else: emit(js2py.translate_js('\n'.join(js_code))) emit('class TransformNearley(Transformer):') for alias in n2l.alias_js_code: emit(" %s = var.get('%s').to_python()" % (alias, alias)) emit(" __default__ = lambda self, n, c, m: c if c else None") emit() emit('parser = Lark(grammar, start="n_%s", maybe_placeholders=False)' % start) emit('def parse(text):') emit(' return TransformNearley().transform(parser.parse(text))') return ''.join(emit_code) def main(fn, start, nearley_lib, es6=False): with codecs.open(fn, encoding='utf8') as f: grammar = f.read() return create_code_for_nearley_grammar(grammar, start, os.path.join(nearley_lib, 'builtin'), os.path.abspath(os.path.dirname(fn)), es6=es6) def get_arg_parser(): parser = argparse.ArgumentParser(description='Reads a Nearley grammar (with js functions), and outputs an equivalent lark parser.') parser.add_argument('nearley_grammar', help='Path to the file containing the nearley grammar') parser.add_argument('start_rule', help='Rule within the nearley grammar to make the base rule') parser.add_argument('nearley_lib', help='Path to root directory of nearley codebase (used for including builtins)') parser.add_argument('--es6', help='Enable experimental ES6 support', action='store_true') return parser if __name__ == '__main__': parser = get_arg_parser() if len(sys.argv) == 1: parser.print_help(sys.stderr) sys.exit(1) args = parser.parse_args() print(main(fn=args.nearley_grammar, start=args.start_rule, nearley_lib=args.nearley_lib, es6=args.es6)) poetry-core-2.1.1/src/poetry/core/_vendor/lark/tools/serialize.py000066400000000000000000000017051475444614500251330ustar00rootroot00000000000000import sys import json from lark.grammar import Rule from lark.lexer import TerminalDef from lark.tools import lalr_argparser, build_lalr import argparse argparser = argparse.ArgumentParser(prog='python -m lark.tools.serialize', parents=[lalr_argparser], description="Lark Serialization Tool - Stores Lark's internal state & LALR analysis as a JSON file", epilog='Look at the Lark documentation for more info on the options') def serialize(lark_inst, outfile): data, memo = lark_inst.memo_serialize([TerminalDef, Rule]) outfile.write('{\n') outfile.write(' "data": %s,\n' % json.dumps(data)) outfile.write(' "memo": %s\n' % json.dumps(memo)) outfile.write('}\n') def main(): if len(sys.argv)==1: argparser.print_help(sys.stderr) sys.exit(1) ns = argparser.parse_args() serialize(*build_lalr(ns)) if __name__ == '__main__': main() poetry-core-2.1.1/src/poetry/core/_vendor/lark/tools/standalone.py000066400000000000000000000127731475444614500253030ustar00rootroot00000000000000###{standalone # # # Lark Stand-alone Generator Tool # ---------------------------------- # Generates a stand-alone LALR(1) parser # # Git: https://github.com/erezsh/lark # Author: Erez Shinan (erezshin@gmail.com) # # # >>> LICENSE # # This tool and its generated code use a separate license from Lark, # and are subject to the terms of the Mozilla Public License, v. 2.0. # If a copy of the MPL was not distributed with this # file, You can obtain one at https://mozilla.org/MPL/2.0/. # # If you wish to purchase a commercial license for this tool and its # generated code, you may contact me via email or otherwise. # # If MPL2 is incompatible with your free or open-source project, # contact me and we'll work it out. # # from copy import deepcopy from abc import ABC, abstractmethod from types import ModuleType from typing import ( TypeVar, Generic, Type, Tuple, List, Dict, Iterator, Collection, Callable, Optional, FrozenSet, Any, Union, Iterable, IO, TYPE_CHECKING, overload, Sequence, Pattern as REPattern, ClassVar, Set, Mapping ) ###} import sys import token, tokenize import os from os import path from collections import defaultdict from functools import partial from argparse import ArgumentParser import lark from lark.tools import lalr_argparser, build_lalr, make_warnings_comments from lark.grammar import Rule from lark.lexer import TerminalDef _dir = path.dirname(__file__) _larkdir = path.join(_dir, path.pardir) EXTRACT_STANDALONE_FILES = [ 'tools/standalone.py', 'exceptions.py', 'utils.py', 'tree.py', 'visitors.py', 'grammar.py', 'lexer.py', 'common.py', 'parse_tree_builder.py', 'parsers/lalr_analysis.py', 'parsers/lalr_parser_state.py', 'parsers/lalr_parser.py', 'parsers/lalr_interactive_parser.py', 'parser_frontends.py', 'lark.py', 'indenter.py', ] def extract_sections(lines): section = None text = [] sections = defaultdict(list) for line in lines: if line.startswith('###'): if line[3] == '{': section = line[4:].strip() elif line[3] == '}': sections[section] += text section = None text = [] else: raise ValueError(line) elif section: text.append(line) return {name: ''.join(text) for name, text in sections.items()} def strip_docstrings(line_gen): """ Strip comments and docstrings from a file. Based on code from: https://stackoverflow.com/questions/1769332/script-to-remove-python-comments-docstrings """ res = [] prev_toktype = token.INDENT last_lineno = -1 last_col = 0 tokgen = tokenize.generate_tokens(line_gen) for toktype, ttext, (slineno, scol), (elineno, ecol), ltext in tokgen: if slineno > last_lineno: last_col = 0 if scol > last_col: res.append(" " * (scol - last_col)) if toktype == token.STRING and prev_toktype == token.INDENT: # Docstring res.append("#--") elif toktype == tokenize.COMMENT: # Comment res.append("##\n") else: res.append(ttext) prev_toktype = toktype last_col = ecol last_lineno = elineno return ''.join(res) def gen_standalone(lark_inst, output=None, out=sys.stdout, compress=False): if output is None: output = partial(print, file=out) import pickle, zlib, base64 def compressed_output(obj): s = pickle.dumps(obj, pickle.HIGHEST_PROTOCOL) c = zlib.compress(s) output(repr(base64.b64encode(c))) def output_decompress(name): output('%(name)s = pickle.loads(zlib.decompress(base64.b64decode(%(name)s)))' % locals()) output('# The file was automatically generated by Lark v%s' % lark.__version__) output('__version__ = "%s"' % lark.__version__) output() for i, pyfile in enumerate(EXTRACT_STANDALONE_FILES): with open(os.path.join(_larkdir, pyfile)) as f: code = extract_sections(f)['standalone'] if i: # if not this file code = strip_docstrings(partial(next, iter(code.splitlines(True)))) output(code) data, m = lark_inst.memo_serialize([TerminalDef, Rule]) output('import pickle, zlib, base64') if compress: output('DATA = (') compressed_output(data) output(')') output_decompress('DATA') output('MEMO = (') compressed_output(m) output(')') output_decompress('MEMO') else: output('DATA = (') output(data) output(')') output('MEMO = (') output(m) output(')') output('Shift = 0') output('Reduce = 1') output("def Lark_StandAlone(**kwargs):") output(" return Lark._load_from_dict(DATA, MEMO, **kwargs)") def main(): make_warnings_comments() parser = ArgumentParser(prog="prog='python -m lark.tools.standalone'", description="Lark Stand-alone Generator Tool", parents=[lalr_argparser], epilog='Look at the Lark documentation for more info on the options') parser.add_argument('-c', '--compress', action='store_true', default=0, help="Enable compression") if len(sys.argv) == 1: parser.print_help(sys.stderr) sys.exit(1) ns = parser.parse_args() lark_inst, out = build_lalr(ns) gen_standalone(lark_inst, out=out, compress=ns.compress) ns.out.close() ns.grammar_file.close() if __name__ == '__main__': main() poetry-core-2.1.1/src/poetry/core/_vendor/lark/tree.py000066400000000000000000000205121475444614500227400ustar00rootroot00000000000000import sys from copy import deepcopy from typing import List, Callable, Iterator, Union, Optional, Generic, TypeVar, TYPE_CHECKING if TYPE_CHECKING: from .lexer import TerminalDef, Token try: import rich except ImportError: pass from typing import Literal ###{standalone class Meta: empty: bool line: int column: int start_pos: int end_line: int end_column: int end_pos: int orig_expansion: 'List[TerminalDef]' match_tree: bool def __init__(self): self.empty = True _Leaf_T = TypeVar("_Leaf_T") Branch = Union[_Leaf_T, 'Tree[_Leaf_T]'] class Tree(Generic[_Leaf_T]): """The main tree class. Creates a new tree, and stores "data" and "children" in attributes of the same name. Trees can be hashed and compared. Parameters: data: The name of the rule or alias children: List of matched sub-rules and terminals meta: Line & Column numbers (if ``propagate_positions`` is enabled). meta attributes: (line, column, end_line, end_column, start_pos, end_pos, container_line, container_column, container_end_line, container_end_column) container_* attributes consider all symbols, including those that have been inlined in the tree. For example, in the rule 'a: _A B _C', the regular attributes will mark the start and end of B, but the container_* attributes will also include _A and _C in the range. However, rules that contain 'a' will consider it in full, including _A and _C for all attributes. """ data: str children: 'List[Branch[_Leaf_T]]' def __init__(self, data: str, children: 'List[Branch[_Leaf_T]]', meta: Optional[Meta]=None) -> None: self.data = data self.children = children self._meta = meta @property def meta(self) -> Meta: if self._meta is None: self._meta = Meta() return self._meta def __repr__(self): return 'Tree(%r, %r)' % (self.data, self.children) def _pretty_label(self): return self.data def _pretty(self, level, indent_str): yield f'{indent_str*level}{self._pretty_label()}' if len(self.children) == 1 and not isinstance(self.children[0], Tree): yield f'\t{self.children[0]}\n' else: yield '\n' for n in self.children: if isinstance(n, Tree): yield from n._pretty(level+1, indent_str) else: yield f'{indent_str*(level+1)}{n}\n' def pretty(self, indent_str: str=' ') -> str: """Returns an indented string representation of the tree. Great for debugging. """ return ''.join(self._pretty(0, indent_str)) def __rich__(self, parent:Optional['rich.tree.Tree']=None) -> 'rich.tree.Tree': """Returns a tree widget for the 'rich' library. Example: :: from rich import print from lark import Tree tree = Tree('root', ['node1', 'node2']) print(tree) """ return self._rich(parent) def _rich(self, parent): if parent: tree = parent.add(f'[bold]{self.data}[/bold]') else: import rich.tree tree = rich.tree.Tree(self.data) for c in self.children: if isinstance(c, Tree): c._rich(tree) else: tree.add(f'[green]{c}[/green]') return tree def __eq__(self, other): try: return self.data == other.data and self.children == other.children except AttributeError: return False def __ne__(self, other): return not (self == other) def __hash__(self) -> int: return hash((self.data, tuple(self.children))) def iter_subtrees(self) -> 'Iterator[Tree[_Leaf_T]]': """Depth-first iteration. Iterates over all the subtrees, never returning to the same node twice (Lark's parse-tree is actually a DAG). """ queue = [self] subtrees = dict() for subtree in queue: subtrees[id(subtree)] = subtree queue += [c for c in reversed(subtree.children) if isinstance(c, Tree) and id(c) not in subtrees] del queue return reversed(list(subtrees.values())) def iter_subtrees_topdown(self): """Breadth-first iteration. Iterates over all the subtrees, return nodes in order like pretty() does. """ stack = [self] stack_append = stack.append stack_pop = stack.pop while stack: node = stack_pop() if not isinstance(node, Tree): continue yield node for child in reversed(node.children): stack_append(child) def find_pred(self, pred: 'Callable[[Tree[_Leaf_T]], bool]') -> 'Iterator[Tree[_Leaf_T]]': """Returns all nodes of the tree that evaluate pred(node) as true.""" return filter(pred, self.iter_subtrees()) def find_data(self, data: str) -> 'Iterator[Tree[_Leaf_T]]': """Returns all nodes of the tree whose data equals the given data.""" return self.find_pred(lambda t: t.data == data) ###} def expand_kids_by_data(self, *data_values): """Expand (inline) children with any of the given data values. Returns True if anything changed""" changed = False for i in range(len(self.children)-1, -1, -1): child = self.children[i] if isinstance(child, Tree) and child.data in data_values: self.children[i:i+1] = child.children changed = True return changed def scan_values(self, pred: 'Callable[[Branch[_Leaf_T]], bool]') -> Iterator[_Leaf_T]: """Return all values in the tree that evaluate pred(value) as true. This can be used to find all the tokens in the tree. Example: >>> all_tokens = tree.scan_values(lambda v: isinstance(v, Token)) """ for c in self.children: if isinstance(c, Tree): for t in c.scan_values(pred): yield t else: if pred(c): yield c def __deepcopy__(self, memo): return type(self)(self.data, deepcopy(self.children, memo), meta=self._meta) def copy(self) -> 'Tree[_Leaf_T]': return type(self)(self.data, self.children) def set(self, data: str, children: 'List[Branch[_Leaf_T]]') -> None: self.data = data self.children = children ParseTree = Tree['Token'] class SlottedTree(Tree): __slots__ = 'data', 'children', 'rule', '_meta' def pydot__tree_to_png(tree: Tree, filename: str, rankdir: 'Literal["TB", "LR", "BT", "RL"]'="LR", **kwargs) -> None: graph = pydot__tree_to_graph(tree, rankdir, **kwargs) graph.write_png(filename) def pydot__tree_to_dot(tree: Tree, filename, rankdir="LR", **kwargs): graph = pydot__tree_to_graph(tree, rankdir, **kwargs) graph.write(filename) def pydot__tree_to_graph(tree: Tree, rankdir="LR", **kwargs): """Creates a colorful image that represents the tree (data+children, without meta) Possible values for `rankdir` are "TB", "LR", "BT", "RL", corresponding to directed graphs drawn from top to bottom, from left to right, from bottom to top, and from right to left, respectively. `kwargs` can be any graph attribute (e. g. `dpi=200`). For a list of possible attributes, see https://www.graphviz.org/doc/info/attrs.html. """ import pydot # type: ignore[import-not-found] graph = pydot.Dot(graph_type='digraph', rankdir=rankdir, **kwargs) i = [0] def new_leaf(leaf): node = pydot.Node(i[0], label=repr(leaf)) i[0] += 1 graph.add_node(node) return node def _to_pydot(subtree): color = hash(subtree.data) & 0xffffff color |= 0x808080 subnodes = [_to_pydot(child) if isinstance(child, Tree) else new_leaf(child) for child in subtree.children] node = pydot.Node(i[0], style="filled", fillcolor="#%x" % color, label=subtree.data) i[0] += 1 graph.add_node(node) for subnode in subnodes: graph.add_edge(pydot.Edge(node, subnode)) return node _to_pydot(tree) return graph poetry-core-2.1.1/src/poetry/core/_vendor/lark/tree_matcher.py000066400000000000000000000135631475444614500244530ustar00rootroot00000000000000"""Tree matcher based on Lark grammar""" import re from collections import defaultdict from . import Tree, Token from .common import ParserConf from .parsers import earley from .grammar import Rule, Terminal, NonTerminal def is_discarded_terminal(t): return t.is_term and t.filter_out class _MakeTreeMatch: def __init__(self, name, expansion): self.name = name self.expansion = expansion def __call__(self, args): t = Tree(self.name, args) t.meta.match_tree = True t.meta.orig_expansion = self.expansion return t def _best_from_group(seq, group_key, cmp_key): d = {} for item in seq: key = group_key(item) if key in d: v1 = cmp_key(item) v2 = cmp_key(d[key]) if v2 > v1: d[key] = item else: d[key] = item return list(d.values()) def _best_rules_from_group(rules): rules = _best_from_group(rules, lambda r: r, lambda r: -len(r.expansion)) rules.sort(key=lambda r: len(r.expansion)) return rules def _match(term, token): if isinstance(token, Tree): name, _args = parse_rulename(term.name) return token.data == name elif isinstance(token, Token): return term == Terminal(token.type) assert False, (term, token) def make_recons_rule(origin, expansion, old_expansion): return Rule(origin, expansion, alias=_MakeTreeMatch(origin.name, old_expansion)) def make_recons_rule_to_term(origin, term): return make_recons_rule(origin, [Terminal(term.name)], [term]) def parse_rulename(s): "Parse rule names that may contain a template syntax (like rule{a, b, ...})" name, args_str = re.match(r'(\w+)(?:{(.+)})?', s).groups() args = args_str and [a.strip() for a in args_str.split(',')] return name, args class ChildrenLexer: def __init__(self, children): self.children = children def lex(self, parser_state): return self.children class TreeMatcher: """Match the elements of a tree node, based on an ontology provided by a Lark grammar. Supports templates and inlined rules (`rule{a, b,..}` and `_rule`) Initialize with an instance of Lark. """ def __init__(self, parser): # XXX TODO calling compile twice returns different results! assert not parser.options.maybe_placeholders # XXX TODO: we just ignore the potential existence of a postlexer self.tokens, rules, _extra = parser.grammar.compile(parser.options.start, set()) self.rules_for_root = defaultdict(list) self.rules = list(self._build_recons_rules(rules)) self.rules.reverse() # Choose the best rule from each group of {rule => [rule.alias]}, since we only really need one derivation. self.rules = _best_rules_from_group(self.rules) self.parser = parser self._parser_cache = {} def _build_recons_rules(self, rules): "Convert tree-parsing/construction rules to tree-matching rules" expand1s = {r.origin for r in rules if r.options.expand1} aliases = defaultdict(list) for r in rules: if r.alias: aliases[r.origin].append(r.alias) rule_names = {r.origin for r in rules} nonterminals = {sym for sym in rule_names if sym.name.startswith('_') or sym in expand1s or sym in aliases} seen = set() for r in rules: recons_exp = [sym if sym in nonterminals else Terminal(sym.name) for sym in r.expansion if not is_discarded_terminal(sym)] # Skip self-recursive constructs if recons_exp == [r.origin] and r.alias is None: continue sym = NonTerminal(r.alias) if r.alias else r.origin rule = make_recons_rule(sym, recons_exp, r.expansion) if sym in expand1s and len(recons_exp) != 1: self.rules_for_root[sym.name].append(rule) if sym.name not in seen: yield make_recons_rule_to_term(sym, sym) seen.add(sym.name) else: if sym.name.startswith('_') or sym in expand1s: yield rule else: self.rules_for_root[sym.name].append(rule) for origin, rule_aliases in aliases.items(): for alias in rule_aliases: yield make_recons_rule_to_term(origin, NonTerminal(alias)) yield make_recons_rule_to_term(origin, origin) def match_tree(self, tree, rulename): """Match the elements of `tree` to the symbols of rule `rulename`. Parameters: tree (Tree): the tree node to match rulename (str): The expected full rule name (including template args) Returns: Tree: an unreduced tree that matches `rulename` Raises: UnexpectedToken: If no match was found. Note: It's the callers' responsibility match the tree recursively. """ if rulename: # validate name, _args = parse_rulename(rulename) assert tree.data == name else: rulename = tree.data # TODO: ambiguity? try: parser = self._parser_cache[rulename] except KeyError: rules = self.rules + _best_rules_from_group(self.rules_for_root[rulename]) # TODO pass callbacks through dict, instead of alias? callbacks = {rule: rule.alias for rule in rules} conf = ParserConf(rules, callbacks, [rulename]) parser = earley.Parser(self.parser.lexer_conf, conf, _match, resolve_ambiguity=True) self._parser_cache[rulename] = parser # find a full derivation unreduced_tree = parser.parse(ChildrenLexer(tree.children), rulename) assert unreduced_tree.data == rulename return unreduced_tree poetry-core-2.1.1/src/poetry/core/_vendor/lark/tree_templates.py000066400000000000000000000135071475444614500250240ustar00rootroot00000000000000"""This module defines utilities for matching and translation tree templates. A tree templates is a tree that contains nodes that are template variables. """ from typing import Union, Optional, Mapping, Dict, Tuple, Iterator from lark import Tree, Transformer from lark.exceptions import MissingVariableError Branch = Union[Tree[str], str] TreeOrCode = Union[Tree[str], str] MatchResult = Dict[str, Tree] _TEMPLATE_MARKER = '$' class TemplateConf: """Template Configuration Allows customization for different uses of Template parse() must return a Tree instance. """ def __init__(self, parse=None): self._parse = parse def test_var(self, var: Union[Tree[str], str]) -> Optional[str]: """Given a tree node, if it is a template variable return its name. Otherwise, return None. This method may be overridden for customization Parameters: var: Tree | str - The tree node to test """ if isinstance(var, str): return _get_template_name(var) if ( isinstance(var, Tree) and var.data == "var" and len(var.children) > 0 and isinstance(var.children[0], str) ): return _get_template_name(var.children[0]) return None def _get_tree(self, template: TreeOrCode) -> Tree[str]: if isinstance(template, str): assert self._parse template = self._parse(template) if not isinstance(template, Tree): raise TypeError("template parser must return a Tree instance") return template def __call__(self, template: Tree[str]) -> 'Template': return Template(template, conf=self) def _match_tree_template(self, template: TreeOrCode, tree: Branch) -> Optional[MatchResult]: """Returns dict of {var: match} if found a match, else None """ template_var = self.test_var(template) if template_var: if not isinstance(tree, Tree): raise TypeError(f"Template variables can only match Tree instances. Not {tree!r}") return {template_var: tree} if isinstance(template, str): if template == tree: return {} return None assert isinstance(template, Tree) and isinstance(tree, Tree), f"template={template} tree={tree}" if template.data == tree.data and len(template.children) == len(tree.children): res = {} for t1, t2 in zip(template.children, tree.children): matches = self._match_tree_template(t1, t2) if matches is None: return None res.update(matches) return res return None class _ReplaceVars(Transformer[str, Tree[str]]): def __init__(self, conf: TemplateConf, vars: Mapping[str, Tree[str]]) -> None: super().__init__() self._conf = conf self._vars = vars def __default__(self, data, children, meta) -> Tree[str]: tree = super().__default__(data, children, meta) var = self._conf.test_var(tree) if var: try: return self._vars[var] except KeyError: raise MissingVariableError(f"No mapping for template variable ({var})") return tree class Template: """Represents a tree template, tied to a specific configuration A tree template is a tree that contains nodes that are template variables. Those variables will match any tree. (future versions may support annotations on the variables, to allow more complex templates) """ def __init__(self, tree: Tree[str], conf: TemplateConf = TemplateConf()): self.conf = conf self.tree = conf._get_tree(tree) def match(self, tree: TreeOrCode) -> Optional[MatchResult]: """Match a tree template to a tree. A tree template without variables will only match ``tree`` if it is equal to the template. Parameters: tree (Tree): The tree to match to the template Returns: Optional[Dict[str, Tree]]: If match is found, returns a dictionary mapping template variable names to their matching tree nodes. If no match was found, returns None. """ tree = self.conf._get_tree(tree) return self.conf._match_tree_template(self.tree, tree) def search(self, tree: TreeOrCode) -> Iterator[Tuple[Tree[str], MatchResult]]: """Search for all occurrences of the tree template inside ``tree``. """ tree = self.conf._get_tree(tree) for subtree in tree.iter_subtrees(): res = self.match(subtree) if res: yield subtree, res def apply_vars(self, vars: Mapping[str, Tree[str]]) -> Tree[str]: """Apply vars to the template tree """ return _ReplaceVars(self.conf, vars).transform(self.tree) def translate(t1: Template, t2: Template, tree: TreeOrCode): """Search tree and translate each occurrence of t1 into t2. """ tree = t1.conf._get_tree(tree) # ensure it's a tree, parse if necessary and possible for subtree, vars in t1.search(tree): res = t2.apply_vars(vars) subtree.set(res.data, res.children) return tree class TemplateTranslator: """Utility class for translating a collection of patterns """ def __init__(self, translations: Mapping[Template, Template]): assert all(isinstance(k, Template) and isinstance(v, Template) for k, v in translations.items()) self.translations = translations def translate(self, tree: Tree[str]): for k, v in self.translations.items(): tree = translate(k, v, tree) return tree def _get_template_name(value: str) -> Optional[str]: return value.lstrip(_TEMPLATE_MARKER) if value.startswith(_TEMPLATE_MARKER) else None poetry-core-2.1.1/src/poetry/core/_vendor/lark/utils.py000066400000000000000000000260061475444614500231450ustar00rootroot00000000000000import unicodedata import os from itertools import product from collections import deque from typing import Callable, Iterator, List, Optional, Tuple, Type, TypeVar, Union, Dict, Any, Sequence, Iterable, AbstractSet ###{standalone import sys, re import logging logger: logging.Logger = logging.getLogger("lark") logger.addHandler(logging.StreamHandler()) # Set to highest level, since we have some warnings amongst the code # By default, we should not output any log messages logger.setLevel(logging.CRITICAL) NO_VALUE = object() T = TypeVar("T") def classify(seq: Iterable, key: Optional[Callable] = None, value: Optional[Callable] = None) -> Dict: d: Dict[Any, Any] = {} for item in seq: k = key(item) if (key is not None) else item v = value(item) if (value is not None) else item try: d[k].append(v) except KeyError: d[k] = [v] return d def _deserialize(data: Any, namespace: Dict[str, Any], memo: Dict) -> Any: if isinstance(data, dict): if '__type__' in data: # Object class_ = namespace[data['__type__']] return class_.deserialize(data, memo) elif '@' in data: return memo[data['@']] return {key:_deserialize(value, namespace, memo) for key, value in data.items()} elif isinstance(data, list): return [_deserialize(value, namespace, memo) for value in data] return data _T = TypeVar("_T", bound="Serialize") class Serialize: """Safe-ish serialization interface that doesn't rely on Pickle Attributes: __serialize_fields__ (List[str]): Fields (aka attributes) to serialize. __serialize_namespace__ (list): List of classes that deserialization is allowed to instantiate. Should include all field types that aren't builtin types. """ def memo_serialize(self, types_to_memoize: List) -> Any: memo = SerializeMemoizer(types_to_memoize) return self.serialize(memo), memo.serialize() def serialize(self, memo = None) -> Dict[str, Any]: if memo and memo.in_types(self): return {'@': memo.memoized.get(self)} fields = getattr(self, '__serialize_fields__') res = {f: _serialize(getattr(self, f), memo) for f in fields} res['__type__'] = type(self).__name__ if hasattr(self, '_serialize'): self._serialize(res, memo) return res @classmethod def deserialize(cls: Type[_T], data: Dict[str, Any], memo: Dict[int, Any]) -> _T: namespace = getattr(cls, '__serialize_namespace__', []) namespace = {c.__name__:c for c in namespace} fields = getattr(cls, '__serialize_fields__') if '@' in data: return memo[data['@']] inst = cls.__new__(cls) for f in fields: try: setattr(inst, f, _deserialize(data[f], namespace, memo)) except KeyError as e: raise KeyError("Cannot find key for class", cls, e) if hasattr(inst, '_deserialize'): inst._deserialize() return inst class SerializeMemoizer(Serialize): "A version of serialize that memoizes objects to reduce space" __serialize_fields__ = 'memoized', def __init__(self, types_to_memoize: List) -> None: self.types_to_memoize = tuple(types_to_memoize) self.memoized = Enumerator() def in_types(self, value: Serialize) -> bool: return isinstance(value, self.types_to_memoize) def serialize(self) -> Dict[int, Any]: # type: ignore[override] return _serialize(self.memoized.reversed(), None) @classmethod def deserialize(cls, data: Dict[int, Any], namespace: Dict[str, Any], memo: Dict[Any, Any]) -> Dict[int, Any]: # type: ignore[override] return _deserialize(data, namespace, memo) try: import regex _has_regex = True except ImportError: _has_regex = False if sys.version_info >= (3, 11): import re._parser as sre_parse import re._constants as sre_constants else: import sre_parse import sre_constants categ_pattern = re.compile(r'\\p{[A-Za-z_]+}') def get_regexp_width(expr: str) -> Union[Tuple[int, int], List[int]]: if _has_regex: # Since `sre_parse` cannot deal with Unicode categories of the form `\p{Mn}`, we replace these with # a simple letter, which makes no difference as we are only trying to get the possible lengths of the regex # match here below. regexp_final = re.sub(categ_pattern, 'A', expr) else: if re.search(categ_pattern, expr): raise ImportError('`regex` module must be installed in order to use Unicode categories.', expr) regexp_final = expr try: # Fixed in next version (past 0.960) of typeshed return [int(x) for x in sre_parse.parse(regexp_final).getwidth()] except sre_constants.error: if not _has_regex: raise ValueError(expr) else: # sre_parse does not support the new features in regex. To not completely fail in that case, # we manually test for the most important info (whether the empty string is matched) c = regex.compile(regexp_final) # Python 3.11.7 introducded sre_parse.MAXWIDTH that is used instead of MAXREPEAT # See lark-parser/lark#1376 and python/cpython#109859 MAXWIDTH = getattr(sre_parse, "MAXWIDTH", sre_constants.MAXREPEAT) if c.match('') is None: # MAXREPEAT is a none pickable subclass of int, therefore needs to be converted to enable caching return 1, int(MAXWIDTH) else: return 0, int(MAXWIDTH) ###} _ID_START = 'Lu', 'Ll', 'Lt', 'Lm', 'Lo', 'Mn', 'Mc', 'Pc' _ID_CONTINUE = _ID_START + ('Nd', 'Nl',) def _test_unicode_category(s: str, categories: Sequence[str]) -> bool: if len(s) != 1: return all(_test_unicode_category(char, categories) for char in s) return s == '_' or unicodedata.category(s) in categories def is_id_continue(s: str) -> bool: """ Checks if all characters in `s` are alphanumeric characters (Unicode standard, so diacritics, indian vowels, non-latin numbers, etc. all pass). Synonymous with a Python `ID_CONTINUE` identifier. See PEP 3131 for details. """ return _test_unicode_category(s, _ID_CONTINUE) def is_id_start(s: str) -> bool: """ Checks if all characters in `s` are alphabetic characters (Unicode standard, so diacritics, indian vowels, non-latin numbers, etc. all pass). Synonymous with a Python `ID_START` identifier. See PEP 3131 for details. """ return _test_unicode_category(s, _ID_START) def dedup_list(l: Sequence[T]) -> List[T]: """Given a list (l) will removing duplicates from the list, preserving the original order of the list. Assumes that the list entries are hashable.""" return list(dict.fromkeys(l)) class Enumerator(Serialize): def __init__(self) -> None: self.enums: Dict[Any, int] = {} def get(self, item) -> int: if item not in self.enums: self.enums[item] = len(self.enums) return self.enums[item] def __len__(self): return len(self.enums) def reversed(self) -> Dict[int, Any]: r = {v: k for k, v in self.enums.items()} assert len(r) == len(self.enums) return r def combine_alternatives(lists): """ Accepts a list of alternatives, and enumerates all their possible concatenations. Examples: >>> combine_alternatives([range(2), [4,5]]) [[0, 4], [0, 5], [1, 4], [1, 5]] >>> combine_alternatives(["abc", "xy", '$']) [['a', 'x', '$'], ['a', 'y', '$'], ['b', 'x', '$'], ['b', 'y', '$'], ['c', 'x', '$'], ['c', 'y', '$']] >>> combine_alternatives([]) [[]] """ if not lists: return [[]] assert all(l for l in lists), lists return list(product(*lists)) try: import atomicwrites _has_atomicwrites = True except ImportError: _has_atomicwrites = False class FS: exists = staticmethod(os.path.exists) @staticmethod def open(name, mode="r", **kwargs): if _has_atomicwrites and "w" in mode: return atomicwrites.atomic_write(name, mode=mode, overwrite=True, **kwargs) else: return open(name, mode, **kwargs) class fzset(frozenset): def __repr__(self): return '{%s}' % ', '.join(map(repr, self)) def classify_bool(seq: Iterable, pred: Callable) -> Any: false_elems = [] true_elems = [elem for elem in seq if pred(elem) or false_elems.append(elem)] # type: ignore[func-returns-value] return true_elems, false_elems def bfs(initial: Iterable, expand: Callable) -> Iterator: open_q = deque(list(initial)) visited = set(open_q) while open_q: node = open_q.popleft() yield node for next_node in expand(node): if next_node not in visited: visited.add(next_node) open_q.append(next_node) def bfs_all_unique(initial, expand): "bfs, but doesn't keep track of visited (aka seen), because there can be no repetitions" open_q = deque(list(initial)) while open_q: node = open_q.popleft() yield node open_q += expand(node) def _serialize(value: Any, memo: Optional[SerializeMemoizer]) -> Any: if isinstance(value, Serialize): return value.serialize(memo) elif isinstance(value, list): return [_serialize(elem, memo) for elem in value] elif isinstance(value, frozenset): return list(value) # TODO reversible? elif isinstance(value, dict): return {key:_serialize(elem, memo) for key, elem in value.items()} # assert value is None or isinstance(value, (int, float, str, tuple)), value return value def small_factors(n: int, max_factor: int) -> List[Tuple[int, int]]: """ Splits n up into smaller factors and summands <= max_factor. Returns a list of [(a, b), ...] so that the following code returns n: n = 1 for a, b in values: n = n * a + b Currently, we also keep a + b <= max_factor, but that might change """ assert n >= 0 assert max_factor > 2 if n <= max_factor: return [(n, 0)] for a in range(max_factor, 1, -1): r, b = divmod(n, a) if a + b <= max_factor: return small_factors(r, max_factor) + [(a, b)] assert False, "Failed to factorize %s" % n class OrderedSet(AbstractSet[T]): """A minimal OrderedSet implementation, using a dictionary. (relies on the dictionary being ordered) """ def __init__(self, items: Iterable[T] =()): self.d = dict.fromkeys(items) def __contains__(self, item: Any) -> bool: return item in self.d def add(self, item: T): self.d[item] = None def __iter__(self) -> Iterator[T]: return iter(self.d) def remove(self, item: T): del self.d[item] def __bool__(self): return bool(self.d) def __len__(self) -> int: return len(self.d) def __repr__(self): return f"{type(self).__name__}({', '.join(map(repr,self))})" poetry-core-2.1.1/src/poetry/core/_vendor/lark/visitors.py000066400000000000000000000516561475444614500237000ustar00rootroot00000000000000from typing import TypeVar, Tuple, List, Callable, Generic, Type, Union, Optional, Any, cast from abc import ABC from .utils import combine_alternatives from .tree import Tree, Branch from .exceptions import VisitError, GrammarError from .lexer import Token ###{standalone from functools import wraps, update_wrapper from inspect import getmembers, getmro _Return_T = TypeVar('_Return_T') _Return_V = TypeVar('_Return_V') _Leaf_T = TypeVar('_Leaf_T') _Leaf_U = TypeVar('_Leaf_U') _R = TypeVar('_R') _FUNC = Callable[..., _Return_T] _DECORATED = Union[_FUNC, type] class _DiscardType: """When the Discard value is returned from a transformer callback, that node is discarded and won't appear in the parent. Note: This feature is disabled when the transformer is provided to Lark using the ``transformer`` keyword (aka Tree-less LALR mode). Example: :: class T(Transformer): def ignore_tree(self, children): return Discard def IGNORE_TOKEN(self, token): return Discard """ def __repr__(self): return "lark.visitors.Discard" Discard = _DiscardType() # Transformers class _Decoratable: "Provides support for decorating methods with @v_args" @classmethod def _apply_v_args(cls, visit_wrapper): mro = getmro(cls) assert mro[0] is cls libmembers = {name for _cls in mro[1:] for name, _ in getmembers(_cls)} for name, value in getmembers(cls): # Make sure the function isn't inherited (unless it's overwritten) if name.startswith('_') or (name in libmembers and name not in cls.__dict__): continue if not callable(value): continue # Skip if v_args already applied (at the function level) if isinstance(cls.__dict__[name], _VArgsWrapper): continue setattr(cls, name, _VArgsWrapper(cls.__dict__[name], visit_wrapper)) return cls def __class_getitem__(cls, _): return cls class Transformer(_Decoratable, ABC, Generic[_Leaf_T, _Return_T]): """Transformers work bottom-up (or depth-first), starting with visiting the leaves and working their way up until ending at the root of the tree. For each node visited, the transformer will call the appropriate method (callbacks), according to the node's ``data``, and use the returned value to replace the node, thereby creating a new tree structure. Transformers can be used to implement map & reduce patterns. Because nodes are reduced from leaf to root, at any point the callbacks may assume the children have already been transformed (if applicable). If the transformer cannot find a method with the right name, it will instead call ``__default__``, which by default creates a copy of the node. To discard a node, return Discard (``lark.visitors.Discard``). ``Transformer`` can do anything ``Visitor`` can do, but because it reconstructs the tree, it is slightly less efficient. A transformer without methods essentially performs a non-memoized partial deepcopy. All these classes implement the transformer interface: - ``Transformer`` - Recursively transforms the tree. This is the one you probably want. - ``Transformer_InPlace`` - Non-recursive. Changes the tree in-place instead of returning new instances - ``Transformer_InPlaceRecursive`` - Recursive. Changes the tree in-place instead of returning new instances Parameters: visit_tokens (bool, optional): Should the transformer visit tokens in addition to rules. Setting this to ``False`` is slightly faster. Defaults to ``True``. (For processing ignored tokens, use the ``lexer_callbacks`` options) """ __visit_tokens__ = True # For backwards compatibility def __init__(self, visit_tokens: bool=True) -> None: self.__visit_tokens__ = visit_tokens def _call_userfunc(self, tree, new_children=None): # Assumes tree is already transformed children = new_children if new_children is not None else tree.children try: f = getattr(self, tree.data) except AttributeError: return self.__default__(tree.data, children, tree.meta) else: try: wrapper = getattr(f, 'visit_wrapper', None) if wrapper is not None: return f.visit_wrapper(f, tree.data, children, tree.meta) else: return f(children) except GrammarError: raise except Exception as e: raise VisitError(tree.data, tree, e) def _call_userfunc_token(self, token): try: f = getattr(self, token.type) except AttributeError: return self.__default_token__(token) else: try: return f(token) except GrammarError: raise except Exception as e: raise VisitError(token.type, token, e) def _transform_children(self, children): for c in children: if isinstance(c, Tree): res = self._transform_tree(c) elif self.__visit_tokens__ and isinstance(c, Token): res = self._call_userfunc_token(c) else: res = c if res is not Discard: yield res def _transform_tree(self, tree): children = list(self._transform_children(tree.children)) return self._call_userfunc(tree, children) def transform(self, tree: Tree[_Leaf_T]) -> _Return_T: "Transform the given tree, and return the final result" res = list(self._transform_children([tree])) if not res: return None # type: ignore[return-value] assert len(res) == 1 return res[0] def __mul__( self: 'Transformer[_Leaf_T, Tree[_Leaf_U]]', other: 'Union[Transformer[_Leaf_U, _Return_V], TransformerChain[_Leaf_U, _Return_V,]]' ) -> 'TransformerChain[_Leaf_T, _Return_V]': """Chain two transformers together, returning a new transformer. """ return TransformerChain(self, other) def __default__(self, data, children, meta): """Default function that is called if there is no attribute matching ``data`` Can be overridden. Defaults to creating a new copy of the tree node (i.e. ``return Tree(data, children, meta)``) """ return Tree(data, children, meta) def __default_token__(self, token): """Default function that is called if there is no attribute matching ``token.type`` Can be overridden. Defaults to returning the token as-is. """ return token def merge_transformers(base_transformer=None, **transformers_to_merge): """Merge a collection of transformers into the base_transformer, each into its own 'namespace'. When called, it will collect the methods from each transformer, and assign them to base_transformer, with their name prefixed with the given keyword, as ``prefix__methodname``. This function is especially useful for processing grammars that import other grammars, thereby creating some of their rules in a 'namespace'. (i.e with a consistent name prefix). In this case, the key for the transformer should match the name of the imported grammar. Parameters: base_transformer (Transformer, optional): The transformer that all other transformers will be added to. **transformers_to_merge: Keyword arguments, in the form of ``name_prefix = transformer``. Raises: AttributeError: In case of a name collision in the merged methods Example: :: class TBase(Transformer): def start(self, children): return children[0] + 'bar' class TImportedGrammar(Transformer): def foo(self, children): return "foo" composed_transformer = merge_transformers(TBase(), imported=TImportedGrammar()) t = Tree('start', [ Tree('imported__foo', []) ]) assert composed_transformer.transform(t) == 'foobar' """ if base_transformer is None: base_transformer = Transformer() for prefix, transformer in transformers_to_merge.items(): for method_name in dir(transformer): method = getattr(transformer, method_name) if not callable(method): continue if method_name.startswith("_") or method_name == "transform": continue prefixed_method = prefix + "__" + method_name if hasattr(base_transformer, prefixed_method): raise AttributeError("Cannot merge: method '%s' appears more than once" % prefixed_method) setattr(base_transformer, prefixed_method, method) return base_transformer class InlineTransformer(Transformer): # XXX Deprecated def _call_userfunc(self, tree, new_children=None): # Assumes tree is already transformed children = new_children if new_children is not None else tree.children try: f = getattr(self, tree.data) except AttributeError: return self.__default__(tree.data, children, tree.meta) else: return f(*children) class TransformerChain(Generic[_Leaf_T, _Return_T]): transformers: 'Tuple[Union[Transformer, TransformerChain], ...]' def __init__(self, *transformers: 'Union[Transformer, TransformerChain]') -> None: self.transformers = transformers def transform(self, tree: Tree[_Leaf_T]) -> _Return_T: for t in self.transformers: tree = t.transform(tree) return cast(_Return_T, tree) def __mul__( self: 'TransformerChain[_Leaf_T, Tree[_Leaf_U]]', other: 'Union[Transformer[_Leaf_U, _Return_V], TransformerChain[_Leaf_U, _Return_V]]' ) -> 'TransformerChain[_Leaf_T, _Return_V]': return TransformerChain(*self.transformers + (other,)) class Transformer_InPlace(Transformer[_Leaf_T, _Return_T]): """Same as Transformer, but non-recursive, and changes the tree in-place instead of returning new instances Useful for huge trees. Conservative in memory. """ def _transform_tree(self, tree): # Cancel recursion return self._call_userfunc(tree) def transform(self, tree: Tree[_Leaf_T]) -> _Return_T: for subtree in tree.iter_subtrees(): subtree.children = list(self._transform_children(subtree.children)) return self._transform_tree(tree) class Transformer_NonRecursive(Transformer[_Leaf_T, _Return_T]): """Same as Transformer but non-recursive. Like Transformer, it doesn't change the original tree. Useful for huge trees. """ def transform(self, tree: Tree[_Leaf_T]) -> _Return_T: # Tree to postfix rev_postfix = [] q: List[Branch[_Leaf_T]] = [tree] while q: t = q.pop() rev_postfix.append(t) if isinstance(t, Tree): q += t.children # Postfix to tree stack: List = [] for x in reversed(rev_postfix): if isinstance(x, Tree): size = len(x.children) if size: args = stack[-size:] del stack[-size:] else: args = [] res = self._call_userfunc(x, args) if res is not Discard: stack.append(res) elif self.__visit_tokens__ and isinstance(x, Token): res = self._call_userfunc_token(x) if res is not Discard: stack.append(res) else: stack.append(x) result, = stack # We should have only one tree remaining # There are no guarantees on the type of the value produced by calling a user func for a # child will produce. This means type system can't statically know that the final result is # _Return_T. As a result a cast is required. return cast(_Return_T, result) class Transformer_InPlaceRecursive(Transformer): "Same as Transformer, recursive, but changes the tree in-place instead of returning new instances" def _transform_tree(self, tree): tree.children = list(self._transform_children(tree.children)) return self._call_userfunc(tree) # Visitors class VisitorBase: def _call_userfunc(self, tree): return getattr(self, tree.data, self.__default__)(tree) def __default__(self, tree): """Default function that is called if there is no attribute matching ``tree.data`` Can be overridden. Defaults to doing nothing. """ return tree def __class_getitem__(cls, _): return cls class Visitor(VisitorBase, ABC, Generic[_Leaf_T]): """Tree visitor, non-recursive (can handle huge trees). Visiting a node calls its methods (provided by the user via inheritance) according to ``tree.data`` """ def visit(self, tree: Tree[_Leaf_T]) -> Tree[_Leaf_T]: "Visits the tree, starting with the leaves and finally the root (bottom-up)" for subtree in tree.iter_subtrees(): self._call_userfunc(subtree) return tree def visit_topdown(self, tree: Tree[_Leaf_T]) -> Tree[_Leaf_T]: "Visit the tree, starting at the root, and ending at the leaves (top-down)" for subtree in tree.iter_subtrees_topdown(): self._call_userfunc(subtree) return tree class Visitor_Recursive(VisitorBase, Generic[_Leaf_T]): """Bottom-up visitor, recursive. Visiting a node calls its methods (provided by the user via inheritance) according to ``tree.data`` Slightly faster than the non-recursive version. """ def visit(self, tree: Tree[_Leaf_T]) -> Tree[_Leaf_T]: "Visits the tree, starting with the leaves and finally the root (bottom-up)" for child in tree.children: if isinstance(child, Tree): self.visit(child) self._call_userfunc(tree) return tree def visit_topdown(self,tree: Tree[_Leaf_T]) -> Tree[_Leaf_T]: "Visit the tree, starting at the root, and ending at the leaves (top-down)" self._call_userfunc(tree) for child in tree.children: if isinstance(child, Tree): self.visit_topdown(child) return tree class Interpreter(_Decoratable, ABC, Generic[_Leaf_T, _Return_T]): """Interpreter walks the tree starting at the root. Visits the tree, starting with the root and finally the leaves (top-down) For each tree node, it calls its methods (provided by user via inheritance) according to ``tree.data``. Unlike ``Transformer`` and ``Visitor``, the Interpreter doesn't automatically visit its sub-branches. The user has to explicitly call ``visit``, ``visit_children``, or use the ``@visit_children_decor``. This allows the user to implement branching and loops. """ def visit(self, tree: Tree[_Leaf_T]) -> _Return_T: # There are no guarantees on the type of the value produced by calling a user func for a # child will produce. So only annotate the public method and use an internal method when # visiting child trees. return self._visit_tree(tree) def _visit_tree(self, tree: Tree[_Leaf_T]): f = getattr(self, tree.data) wrapper = getattr(f, 'visit_wrapper', None) if wrapper is not None: return f.visit_wrapper(f, tree.data, tree.children, tree.meta) else: return f(tree) def visit_children(self, tree: Tree[_Leaf_T]) -> List: return [self._visit_tree(child) if isinstance(child, Tree) else child for child in tree.children] def __getattr__(self, name): return self.__default__ def __default__(self, tree): return self.visit_children(tree) _InterMethod = Callable[[Type[Interpreter], _Return_T], _R] def visit_children_decor(func: _InterMethod) -> _InterMethod: "See Interpreter" @wraps(func) def inner(cls, tree): values = cls.visit_children(tree) return func(cls, values) return inner # Decorators def _apply_v_args(obj, visit_wrapper): try: _apply = obj._apply_v_args except AttributeError: return _VArgsWrapper(obj, visit_wrapper) else: return _apply(visit_wrapper) class _VArgsWrapper: """ A wrapper around a Callable. It delegates `__call__` to the Callable. If the Callable has a `__get__`, that is also delegate and the resulting function is wrapped. Otherwise, we use the original function mirroring the behaviour without a __get__. We also have the visit_wrapper attribute to be used by Transformers. """ base_func: Callable def __init__(self, func: Callable, visit_wrapper: Callable[[Callable, str, list, Any], Any]): if isinstance(func, _VArgsWrapper): func = func.base_func self.base_func = func self.visit_wrapper = visit_wrapper update_wrapper(self, func) def __call__(self, *args, **kwargs): return self.base_func(*args, **kwargs) def __get__(self, instance, owner=None): try: # Use the __get__ attribute of the type instead of the instance # to fully mirror the behavior of getattr g = type(self.base_func).__get__ except AttributeError: return self else: return _VArgsWrapper(g(self.base_func, instance, owner), self.visit_wrapper) def __set_name__(self, owner, name): try: f = type(self.base_func).__set_name__ except AttributeError: return else: f(self.base_func, owner, name) def _vargs_inline(f, _data, children, _meta): return f(*children) def _vargs_meta_inline(f, _data, children, meta): return f(meta, *children) def _vargs_meta(f, _data, children, meta): return f(meta, children) def _vargs_tree(f, data, children, meta): return f(Tree(data, children, meta)) def v_args(inline: bool = False, meta: bool = False, tree: bool = False, wrapper: Optional[Callable] = None) -> Callable[[_DECORATED], _DECORATED]: """A convenience decorator factory for modifying the behavior of user-supplied visitor methods. By default, callback methods of transformers/visitors accept one argument - a list of the node's children. ``v_args`` can modify this behavior. When used on a transformer/visitor class definition, it applies to all the callback methods inside it. ``v_args`` can be applied to a single method, or to an entire class. When applied to both, the options given to the method take precedence. Parameters: inline (bool, optional): Children are provided as ``*args`` instead of a list argument (not recommended for very long lists). meta (bool, optional): Provides two arguments: ``meta`` and ``children`` (instead of just the latter) tree (bool, optional): Provides the entire tree as the argument, instead of the children. wrapper (function, optional): Provide a function to decorate all methods. Example: :: @v_args(inline=True) class SolveArith(Transformer): def add(self, left, right): return left + right @v_args(meta=True) def mul(self, meta, children): logger.info(f'mul at line {meta.line}') left, right = children return left * right class ReverseNotation(Transformer_InPlace): @v_args(tree=True) def tree_node(self, tree): tree.children = tree.children[::-1] """ if tree and (meta or inline): raise ValueError("Visitor functions cannot combine 'tree' with 'meta' or 'inline'.") func = None if meta: if inline: func = _vargs_meta_inline else: func = _vargs_meta elif inline: func = _vargs_inline elif tree: func = _vargs_tree if wrapper is not None: if func is not None: raise ValueError("Cannot use 'wrapper' along with 'tree', 'meta' or 'inline'.") func = wrapper def _visitor_args_dec(obj): return _apply_v_args(obj, func) return _visitor_args_dec ###} # --- Visitor Utilities --- class CollapseAmbiguities(Transformer): """ Transforms a tree that contains any number of _ambig nodes into a list of trees, each one containing an unambiguous tree. The length of the resulting list is the product of the length of all _ambig nodes. Warning: This may quickly explode for highly ambiguous trees. """ def _ambig(self, options): return sum(options, []) def __default__(self, data, children_lists, meta): return [Tree(data, children, meta) for children in combine_alternatives(children_lists)] def __default_token__(self, t): return [t] poetry-core-2.1.1/src/poetry/core/_vendor/packaging/000077500000000000000000000000001475444614500224225ustar00rootroot00000000000000poetry-core-2.1.1/src/poetry/core/_vendor/packaging/LICENSE000066400000000000000000000003051475444614500234250ustar00rootroot00000000000000This software is made available under the terms of *either* of the licenses found in LICENSE.APACHE or LICENSE.BSD. Contributions to this software is made under the terms of *both* these licenses. poetry-core-2.1.1/src/poetry/core/_vendor/packaging/LICENSE.APACHE000066400000000000000000000236761475444614500243650ustar00rootroot00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS poetry-core-2.1.1/src/poetry/core/_vendor/packaging/LICENSE.BSD000066400000000000000000000025001475444614500240330ustar00rootroot00000000000000Copyright (c) Donald Stufft and individual contributors. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. poetry-core-2.1.1/src/poetry/core/_vendor/packaging/__init__.py000066400000000000000000000007561475444614500245430ustar00rootroot00000000000000# This file is dual licensed under the terms of the Apache License, Version # 2.0, and the BSD License. See the LICENSE file in the root of this repository # for complete details. __title__ = "packaging" __summary__ = "Core utilities for Python packages" __uri__ = "https://github.com/pypa/packaging" __version__ = "24.2" __author__ = "Donald Stufft and individual contributors" __email__ = "donald@stufft.io" __license__ = "BSD-2-Clause or Apache-2.0" __copyright__ = f"2014 {__author__}" poetry-core-2.1.1/src/poetry/core/_vendor/packaging/_elffile.py000066400000000000000000000063521475444614500245470ustar00rootroot00000000000000""" ELF file parser. This provides a class ``ELFFile`` that parses an ELF executable in a similar interface to ``ZipFile``. Only the read interface is implemented. Based on: https://gist.github.com/lyssdod/f51579ae8d93c8657a5564aefc2ffbca ELF header: https://refspecs.linuxfoundation.org/elf/gabi4+/ch4.eheader.html """ from __future__ import annotations import enum import os import struct from typing import IO class ELFInvalid(ValueError): pass class EIClass(enum.IntEnum): C32 = 1 C64 = 2 class EIData(enum.IntEnum): Lsb = 1 Msb = 2 class EMachine(enum.IntEnum): I386 = 3 S390 = 22 Arm = 40 X8664 = 62 AArc64 = 183 class ELFFile: """ Representation of an ELF executable. """ def __init__(self, f: IO[bytes]) -> None: self._f = f try: ident = self._read("16B") except struct.error as e: raise ELFInvalid("unable to parse identification") from e magic = bytes(ident[:4]) if magic != b"\x7fELF": raise ELFInvalid(f"invalid magic: {magic!r}") self.capacity = ident[4] # Format for program header (bitness). self.encoding = ident[5] # Data structure encoding (endianness). try: # e_fmt: Format for program header. # p_fmt: Format for section header. # p_idx: Indexes to find p_type, p_offset, and p_filesz. e_fmt, self._p_fmt, self._p_idx = { (1, 1): ("HHIIIIIHHH", ">IIIIIIII", (0, 1, 4)), # 32-bit MSB. (2, 1): ("HHIQQQIHHH", ">IIQQQQQQ", (0, 2, 5)), # 64-bit MSB. }[(self.capacity, self.encoding)] except KeyError as e: raise ELFInvalid( f"unrecognized capacity ({self.capacity}) or " f"encoding ({self.encoding})" ) from e try: ( _, self.machine, # Architecture type. _, _, self._e_phoff, # Offset of program header. _, self.flags, # Processor-specific flags. _, self._e_phentsize, # Size of section. self._e_phnum, # Number of sections. ) = self._read(e_fmt) except struct.error as e: raise ELFInvalid("unable to parse machine and section information") from e def _read(self, fmt: str) -> tuple[int, ...]: return struct.unpack(fmt, self._f.read(struct.calcsize(fmt))) @property def interpreter(self) -> str | None: """ The path recorded in the ``PT_INTERP`` section header. """ for index in range(self._e_phnum): self._f.seek(self._e_phoff + self._e_phentsize * index) try: data = self._read(self._p_fmt) except struct.error: continue if data[self._p_idx[0]] != 3: # Not PT_INTERP. continue self._f.seek(data[self._p_idx[1]]) return os.fsdecode(self._f.read(data[self._p_idx[2]])).strip("\0") return None poetry-core-2.1.1/src/poetry/core/_vendor/packaging/_manylinux.py000066400000000000000000000226141475444614500251640ustar00rootroot00000000000000from __future__ import annotations import collections import contextlib import functools import os import re import sys import warnings from typing import Generator, Iterator, NamedTuple, Sequence from ._elffile import EIClass, EIData, ELFFile, EMachine EF_ARM_ABIMASK = 0xFF000000 EF_ARM_ABI_VER5 = 0x05000000 EF_ARM_ABI_FLOAT_HARD = 0x00000400 # `os.PathLike` not a generic type until Python 3.9, so sticking with `str` # as the type for `path` until then. @contextlib.contextmanager def _parse_elf(path: str) -> Generator[ELFFile | None, None, None]: try: with open(path, "rb") as f: yield ELFFile(f) except (OSError, TypeError, ValueError): yield None def _is_linux_armhf(executable: str) -> bool: # hard-float ABI can be detected from the ELF header of the running # process # https://static.docs.arm.com/ihi0044/g/aaelf32.pdf with _parse_elf(executable) as f: return ( f is not None and f.capacity == EIClass.C32 and f.encoding == EIData.Lsb and f.machine == EMachine.Arm and f.flags & EF_ARM_ABIMASK == EF_ARM_ABI_VER5 and f.flags & EF_ARM_ABI_FLOAT_HARD == EF_ARM_ABI_FLOAT_HARD ) def _is_linux_i686(executable: str) -> bool: with _parse_elf(executable) as f: return ( f is not None and f.capacity == EIClass.C32 and f.encoding == EIData.Lsb and f.machine == EMachine.I386 ) def _have_compatible_abi(executable: str, archs: Sequence[str]) -> bool: if "armv7l" in archs: return _is_linux_armhf(executable) if "i686" in archs: return _is_linux_i686(executable) allowed_archs = { "x86_64", "aarch64", "ppc64", "ppc64le", "s390x", "loongarch64", "riscv64", } return any(arch in allowed_archs for arch in archs) # If glibc ever changes its major version, we need to know what the last # minor version was, so we can build the complete list of all versions. # For now, guess what the highest minor version might be, assume it will # be 50 for testing. Once this actually happens, update the dictionary # with the actual value. _LAST_GLIBC_MINOR: dict[int, int] = collections.defaultdict(lambda: 50) class _GLibCVersion(NamedTuple): major: int minor: int def _glibc_version_string_confstr() -> str | None: """ Primary implementation of glibc_version_string using os.confstr. """ # os.confstr is quite a bit faster than ctypes.DLL. It's also less likely # to be broken or missing. This strategy is used in the standard library # platform module. # https://github.com/python/cpython/blob/fcf1d003bf4f0100c/Lib/platform.py#L175-L183 try: # Should be a string like "glibc 2.17". version_string: str | None = os.confstr("CS_GNU_LIBC_VERSION") assert version_string is not None _, version = version_string.rsplit() except (AssertionError, AttributeError, OSError, ValueError): # os.confstr() or CS_GNU_LIBC_VERSION not available (or a bad value)... return None return version def _glibc_version_string_ctypes() -> str | None: """ Fallback implementation of glibc_version_string using ctypes. """ try: import ctypes except ImportError: return None # ctypes.CDLL(None) internally calls dlopen(NULL), and as the dlopen # manpage says, "If filename is NULL, then the returned handle is for the # main program". This way we can let the linker do the work to figure out # which libc our process is actually using. # # We must also handle the special case where the executable is not a # dynamically linked executable. This can occur when using musl libc, # for example. In this situation, dlopen() will error, leading to an # OSError. Interestingly, at least in the case of musl, there is no # errno set on the OSError. The single string argument used to construct # OSError comes from libc itself and is therefore not portable to # hard code here. In any case, failure to call dlopen() means we # can proceed, so we bail on our attempt. try: process_namespace = ctypes.CDLL(None) except OSError: return None try: gnu_get_libc_version = process_namespace.gnu_get_libc_version except AttributeError: # Symbol doesn't exist -> therefore, we are not linked to # glibc. return None # Call gnu_get_libc_version, which returns a string like "2.5" gnu_get_libc_version.restype = ctypes.c_char_p version_str: str = gnu_get_libc_version() # py2 / py3 compatibility: if not isinstance(version_str, str): version_str = version_str.decode("ascii") return version_str def _glibc_version_string() -> str | None: """Returns glibc version string, or None if not using glibc.""" return _glibc_version_string_confstr() or _glibc_version_string_ctypes() def _parse_glibc_version(version_str: str) -> tuple[int, int]: """Parse glibc version. We use a regexp instead of str.split because we want to discard any random junk that might come after the minor version -- this might happen in patched/forked versions of glibc (e.g. Linaro's version of glibc uses version strings like "2.20-2014.11"). See gh-3588. """ m = re.match(r"(?P[0-9]+)\.(?P[0-9]+)", version_str) if not m: warnings.warn( f"Expected glibc version with 2 components major.minor," f" got: {version_str}", RuntimeWarning, stacklevel=2, ) return -1, -1 return int(m.group("major")), int(m.group("minor")) @functools.lru_cache def _get_glibc_version() -> tuple[int, int]: version_str = _glibc_version_string() if version_str is None: return (-1, -1) return _parse_glibc_version(version_str) # From PEP 513, PEP 600 def _is_compatible(arch: str, version: _GLibCVersion) -> bool: sys_glibc = _get_glibc_version() if sys_glibc < version: return False # Check for presence of _manylinux module. try: import _manylinux except ImportError: return True if hasattr(_manylinux, "manylinux_compatible"): result = _manylinux.manylinux_compatible(version[0], version[1], arch) if result is not None: return bool(result) return True if version == _GLibCVersion(2, 5): if hasattr(_manylinux, "manylinux1_compatible"): return bool(_manylinux.manylinux1_compatible) if version == _GLibCVersion(2, 12): if hasattr(_manylinux, "manylinux2010_compatible"): return bool(_manylinux.manylinux2010_compatible) if version == _GLibCVersion(2, 17): if hasattr(_manylinux, "manylinux2014_compatible"): return bool(_manylinux.manylinux2014_compatible) return True _LEGACY_MANYLINUX_MAP = { # CentOS 7 w/ glibc 2.17 (PEP 599) (2, 17): "manylinux2014", # CentOS 6 w/ glibc 2.12 (PEP 571) (2, 12): "manylinux2010", # CentOS 5 w/ glibc 2.5 (PEP 513) (2, 5): "manylinux1", } def platform_tags(archs: Sequence[str]) -> Iterator[str]: """Generate manylinux tags compatible to the current platform. :param archs: Sequence of compatible architectures. The first one shall be the closest to the actual architecture and be the part of platform tag after the ``linux_`` prefix, e.g. ``x86_64``. The ``linux_`` prefix is assumed as a prerequisite for the current platform to be manylinux-compatible. :returns: An iterator of compatible manylinux tags. """ if not _have_compatible_abi(sys.executable, archs): return # Oldest glibc to be supported regardless of architecture is (2, 17). too_old_glibc2 = _GLibCVersion(2, 16) if set(archs) & {"x86_64", "i686"}: # On x86/i686 also oldest glibc to be supported is (2, 5). too_old_glibc2 = _GLibCVersion(2, 4) current_glibc = _GLibCVersion(*_get_glibc_version()) glibc_max_list = [current_glibc] # We can assume compatibility across glibc major versions. # https://sourceware.org/bugzilla/show_bug.cgi?id=24636 # # Build a list of maximum glibc versions so that we can # output the canonical list of all glibc from current_glibc # down to too_old_glibc2, including all intermediary versions. for glibc_major in range(current_glibc.major - 1, 1, -1): glibc_minor = _LAST_GLIBC_MINOR[glibc_major] glibc_max_list.append(_GLibCVersion(glibc_major, glibc_minor)) for arch in archs: for glibc_max in glibc_max_list: if glibc_max.major == too_old_glibc2.major: min_minor = too_old_glibc2.minor else: # For other glibc major versions oldest supported is (x, 0). min_minor = -1 for glibc_minor in range(glibc_max.minor, min_minor, -1): glibc_version = _GLibCVersion(glibc_max.major, glibc_minor) tag = "manylinux_{}_{}".format(*glibc_version) if _is_compatible(arch, glibc_version): yield f"{tag}_{arch}" # Handle the legacy manylinux1, manylinux2010, manylinux2014 tags. if glibc_version in _LEGACY_MANYLINUX_MAP: legacy_tag = _LEGACY_MANYLINUX_MAP[glibc_version] if _is_compatible(arch, glibc_version): yield f"{legacy_tag}_{arch}" poetry-core-2.1.1/src/poetry/core/_vendor/packaging/_musllinux.py000066400000000000000000000052061475444614500251760ustar00rootroot00000000000000"""PEP 656 support. This module implements logic to detect if the currently running Python is linked against musl, and what musl version is used. """ from __future__ import annotations import functools import re import subprocess import sys from typing import Iterator, NamedTuple, Sequence from ._elffile import ELFFile class _MuslVersion(NamedTuple): major: int minor: int def _parse_musl_version(output: str) -> _MuslVersion | None: lines = [n for n in (n.strip() for n in output.splitlines()) if n] if len(lines) < 2 or lines[0][:4] != "musl": return None m = re.match(r"Version (\d+)\.(\d+)", lines[1]) if not m: return None return _MuslVersion(major=int(m.group(1)), minor=int(m.group(2))) @functools.lru_cache def _get_musl_version(executable: str) -> _MuslVersion | None: """Detect currently-running musl runtime version. This is done by checking the specified executable's dynamic linking information, and invoking the loader to parse its output for a version string. If the loader is musl, the output would be something like:: musl libc (x86_64) Version 1.2.2 Dynamic Program Loader """ try: with open(executable, "rb") as f: ld = ELFFile(f).interpreter except (OSError, TypeError, ValueError): return None if ld is None or "musl" not in ld: return None proc = subprocess.run([ld], stderr=subprocess.PIPE, text=True) return _parse_musl_version(proc.stderr) def platform_tags(archs: Sequence[str]) -> Iterator[str]: """Generate musllinux tags compatible to the current platform. :param archs: Sequence of compatible architectures. The first one shall be the closest to the actual architecture and be the part of platform tag after the ``linux_`` prefix, e.g. ``x86_64``. The ``linux_`` prefix is assumed as a prerequisite for the current platform to be musllinux-compatible. :returns: An iterator of compatible musllinux tags. """ sys_musl = _get_musl_version(sys.executable) if sys_musl is None: # Python not dynamically linked against musl. return for arch in archs: for minor in range(sys_musl.minor, -1, -1): yield f"musllinux_{sys_musl.major}_{minor}_{arch}" if __name__ == "__main__": # pragma: no cover import sysconfig plat = sysconfig.get_platform() assert plat.startswith("linux-"), "not linux" print("plat:", plat) print("musl:", _get_musl_version(sys.executable)) print("tags:", end=" ") for t in platform_tags(re.sub(r"[.-]", "_", plat.split("-", 1)[-1])): print(t, end="\n ") poetry-core-2.1.1/src/poetry/core/_vendor/packaging/_parser.py000066400000000000000000000237741475444614500244440ustar00rootroot00000000000000"""Handwritten parser of dependency specifiers. The docstring for each __parse_* function contains EBNF-inspired grammar representing the implementation. """ from __future__ import annotations import ast from typing import NamedTuple, Sequence, Tuple, Union from ._tokenizer import DEFAULT_RULES, Tokenizer class Node: def __init__(self, value: str) -> None: self.value = value def __str__(self) -> str: return self.value def __repr__(self) -> str: return f"<{self.__class__.__name__}('{self}')>" def serialize(self) -> str: raise NotImplementedError class Variable(Node): def serialize(self) -> str: return str(self) class Value(Node): def serialize(self) -> str: return f'"{self}"' class Op(Node): def serialize(self) -> str: return str(self) MarkerVar = Union[Variable, Value] MarkerItem = Tuple[MarkerVar, Op, MarkerVar] MarkerAtom = Union[MarkerItem, Sequence["MarkerAtom"]] MarkerList = Sequence[Union["MarkerList", MarkerAtom, str]] class ParsedRequirement(NamedTuple): name: str url: str extras: list[str] specifier: str marker: MarkerList | None # -------------------------------------------------------------------------------------- # Recursive descent parser for dependency specifier # -------------------------------------------------------------------------------------- def parse_requirement(source: str) -> ParsedRequirement: return _parse_requirement(Tokenizer(source, rules=DEFAULT_RULES)) def _parse_requirement(tokenizer: Tokenizer) -> ParsedRequirement: """ requirement = WS? IDENTIFIER WS? extras WS? requirement_details """ tokenizer.consume("WS") name_token = tokenizer.expect( "IDENTIFIER", expected="package name at the start of dependency specifier" ) name = name_token.text tokenizer.consume("WS") extras = _parse_extras(tokenizer) tokenizer.consume("WS") url, specifier, marker = _parse_requirement_details(tokenizer) tokenizer.expect("END", expected="end of dependency specifier") return ParsedRequirement(name, url, extras, specifier, marker) def _parse_requirement_details( tokenizer: Tokenizer, ) -> tuple[str, str, MarkerList | None]: """ requirement_details = AT URL (WS requirement_marker?)? | specifier WS? (requirement_marker)? """ specifier = "" url = "" marker = None if tokenizer.check("AT"): tokenizer.read() tokenizer.consume("WS") url_start = tokenizer.position url = tokenizer.expect("URL", expected="URL after @").text if tokenizer.check("END", peek=True): return (url, specifier, marker) tokenizer.expect("WS", expected="whitespace after URL") # The input might end after whitespace. if tokenizer.check("END", peek=True): return (url, specifier, marker) marker = _parse_requirement_marker( tokenizer, span_start=url_start, after="URL and whitespace" ) else: specifier_start = tokenizer.position specifier = _parse_specifier(tokenizer) tokenizer.consume("WS") if tokenizer.check("END", peek=True): return (url, specifier, marker) marker = _parse_requirement_marker( tokenizer, span_start=specifier_start, after=( "version specifier" if specifier else "name and no valid version specifier" ), ) return (url, specifier, marker) def _parse_requirement_marker( tokenizer: Tokenizer, *, span_start: int, after: str ) -> MarkerList: """ requirement_marker = SEMICOLON marker WS? """ if not tokenizer.check("SEMICOLON"): tokenizer.raise_syntax_error( f"Expected end or semicolon (after {after})", span_start=span_start, ) tokenizer.read() marker = _parse_marker(tokenizer) tokenizer.consume("WS") return marker def _parse_extras(tokenizer: Tokenizer) -> list[str]: """ extras = (LEFT_BRACKET wsp* extras_list? wsp* RIGHT_BRACKET)? """ if not tokenizer.check("LEFT_BRACKET", peek=True): return [] with tokenizer.enclosing_tokens( "LEFT_BRACKET", "RIGHT_BRACKET", around="extras", ): tokenizer.consume("WS") extras = _parse_extras_list(tokenizer) tokenizer.consume("WS") return extras def _parse_extras_list(tokenizer: Tokenizer) -> list[str]: """ extras_list = identifier (wsp* ',' wsp* identifier)* """ extras: list[str] = [] if not tokenizer.check("IDENTIFIER"): return extras extras.append(tokenizer.read().text) while True: tokenizer.consume("WS") if tokenizer.check("IDENTIFIER", peek=True): tokenizer.raise_syntax_error("Expected comma between extra names") elif not tokenizer.check("COMMA"): break tokenizer.read() tokenizer.consume("WS") extra_token = tokenizer.expect("IDENTIFIER", expected="extra name after comma") extras.append(extra_token.text) return extras def _parse_specifier(tokenizer: Tokenizer) -> str: """ specifier = LEFT_PARENTHESIS WS? version_many WS? RIGHT_PARENTHESIS | WS? version_many WS? """ with tokenizer.enclosing_tokens( "LEFT_PARENTHESIS", "RIGHT_PARENTHESIS", around="version specifier", ): tokenizer.consume("WS") parsed_specifiers = _parse_version_many(tokenizer) tokenizer.consume("WS") return parsed_specifiers def _parse_version_many(tokenizer: Tokenizer) -> str: """ version_many = (SPECIFIER (WS? COMMA WS? SPECIFIER)*)? """ parsed_specifiers = "" while tokenizer.check("SPECIFIER"): span_start = tokenizer.position parsed_specifiers += tokenizer.read().text if tokenizer.check("VERSION_PREFIX_TRAIL", peek=True): tokenizer.raise_syntax_error( ".* suffix can only be used with `==` or `!=` operators", span_start=span_start, span_end=tokenizer.position + 1, ) if tokenizer.check("VERSION_LOCAL_LABEL_TRAIL", peek=True): tokenizer.raise_syntax_error( "Local version label can only be used with `==` or `!=` operators", span_start=span_start, span_end=tokenizer.position, ) tokenizer.consume("WS") if not tokenizer.check("COMMA"): break parsed_specifiers += tokenizer.read().text tokenizer.consume("WS") return parsed_specifiers # -------------------------------------------------------------------------------------- # Recursive descent parser for marker expression # -------------------------------------------------------------------------------------- def parse_marker(source: str) -> MarkerList: return _parse_full_marker(Tokenizer(source, rules=DEFAULT_RULES)) def _parse_full_marker(tokenizer: Tokenizer) -> MarkerList: retval = _parse_marker(tokenizer) tokenizer.expect("END", expected="end of marker expression") return retval def _parse_marker(tokenizer: Tokenizer) -> MarkerList: """ marker = marker_atom (BOOLOP marker_atom)+ """ expression = [_parse_marker_atom(tokenizer)] while tokenizer.check("BOOLOP"): token = tokenizer.read() expr_right = _parse_marker_atom(tokenizer) expression.extend((token.text, expr_right)) return expression def _parse_marker_atom(tokenizer: Tokenizer) -> MarkerAtom: """ marker_atom = WS? LEFT_PARENTHESIS WS? marker WS? RIGHT_PARENTHESIS WS? | WS? marker_item WS? """ tokenizer.consume("WS") if tokenizer.check("LEFT_PARENTHESIS", peek=True): with tokenizer.enclosing_tokens( "LEFT_PARENTHESIS", "RIGHT_PARENTHESIS", around="marker expression", ): tokenizer.consume("WS") marker: MarkerAtom = _parse_marker(tokenizer) tokenizer.consume("WS") else: marker = _parse_marker_item(tokenizer) tokenizer.consume("WS") return marker def _parse_marker_item(tokenizer: Tokenizer) -> MarkerItem: """ marker_item = WS? marker_var WS? marker_op WS? marker_var WS? """ tokenizer.consume("WS") marker_var_left = _parse_marker_var(tokenizer) tokenizer.consume("WS") marker_op = _parse_marker_op(tokenizer) tokenizer.consume("WS") marker_var_right = _parse_marker_var(tokenizer) tokenizer.consume("WS") return (marker_var_left, marker_op, marker_var_right) def _parse_marker_var(tokenizer: Tokenizer) -> MarkerVar: """ marker_var = VARIABLE | QUOTED_STRING """ if tokenizer.check("VARIABLE"): return process_env_var(tokenizer.read().text.replace(".", "_")) elif tokenizer.check("QUOTED_STRING"): return process_python_str(tokenizer.read().text) else: tokenizer.raise_syntax_error( message="Expected a marker variable or quoted string" ) def process_env_var(env_var: str) -> Variable: if env_var in ("platform_python_implementation", "python_implementation"): return Variable("platform_python_implementation") else: return Variable(env_var) def process_python_str(python_str: str) -> Value: value = ast.literal_eval(python_str) return Value(str(value)) def _parse_marker_op(tokenizer: Tokenizer) -> Op: """ marker_op = IN | NOT IN | OP """ if tokenizer.check("IN"): tokenizer.read() return Op("in") elif tokenizer.check("NOT"): tokenizer.read() tokenizer.expect("WS", expected="whitespace after 'not'") tokenizer.expect("IN", expected="'in' after 'not'") return Op("not in") elif tokenizer.check("OP"): return Op(tokenizer.read().text) else: return tokenizer.raise_syntax_error( "Expected marker operator, one of " "<=, <, !=, ==, >=, >, ~=, ===, in, not in" ) poetry-core-2.1.1/src/poetry/core/_vendor/packaging/_structures.py000066400000000000000000000026271475444614500253650ustar00rootroot00000000000000# This file is dual licensed under the terms of the Apache License, Version # 2.0, and the BSD License. See the LICENSE file in the root of this repository # for complete details. class InfinityType: def __repr__(self) -> str: return "Infinity" def __hash__(self) -> int: return hash(repr(self)) def __lt__(self, other: object) -> bool: return False def __le__(self, other: object) -> bool: return False def __eq__(self, other: object) -> bool: return isinstance(other, self.__class__) def __gt__(self, other: object) -> bool: return True def __ge__(self, other: object) -> bool: return True def __neg__(self: object) -> "NegativeInfinityType": return NegativeInfinity Infinity = InfinityType() class NegativeInfinityType: def __repr__(self) -> str: return "-Infinity" def __hash__(self) -> int: return hash(repr(self)) def __lt__(self, other: object) -> bool: return True def __le__(self, other: object) -> bool: return True def __eq__(self, other: object) -> bool: return isinstance(other, self.__class__) def __gt__(self, other: object) -> bool: return False def __ge__(self, other: object) -> bool: return False def __neg__(self: object) -> InfinityType: return Infinity NegativeInfinity = NegativeInfinityType() poetry-core-2.1.1/src/poetry/core/_vendor/packaging/_tokenizer.py000066400000000000000000000122311475444614500251440ustar00rootroot00000000000000from __future__ import annotations import contextlib import re from dataclasses import dataclass from typing import Iterator, NoReturn from .specifiers import Specifier @dataclass class Token: name: str text: str position: int class ParserSyntaxError(Exception): """The provided source text could not be parsed correctly.""" def __init__( self, message: str, *, source: str, span: tuple[int, int], ) -> None: self.span = span self.message = message self.source = source super().__init__() def __str__(self) -> str: marker = " " * self.span[0] + "~" * (self.span[1] - self.span[0]) + "^" return "\n ".join([self.message, self.source, marker]) DEFAULT_RULES: dict[str, str | re.Pattern[str]] = { "LEFT_PARENTHESIS": r"\(", "RIGHT_PARENTHESIS": r"\)", "LEFT_BRACKET": r"\[", "RIGHT_BRACKET": r"\]", "SEMICOLON": r";", "COMMA": r",", "QUOTED_STRING": re.compile( r""" ( ('[^']*') | ("[^"]*") ) """, re.VERBOSE, ), "OP": r"(===|==|~=|!=|<=|>=|<|>)", "BOOLOP": r"\b(or|and)\b", "IN": r"\bin\b", "NOT": r"\bnot\b", "VARIABLE": re.compile( r""" \b( python_version |python_full_version |os[._]name |sys[._]platform |platform_(release|system) |platform[._](version|machine|python_implementation) |python_implementation |implementation_(name|version) |extra )\b """, re.VERBOSE, ), "SPECIFIER": re.compile( Specifier._operator_regex_str + Specifier._version_regex_str, re.VERBOSE | re.IGNORECASE, ), "AT": r"\@", "URL": r"[^ \t]+", "IDENTIFIER": r"\b[a-zA-Z0-9][a-zA-Z0-9._-]*\b", "VERSION_PREFIX_TRAIL": r"\.\*", "VERSION_LOCAL_LABEL_TRAIL": r"\+[a-z0-9]+(?:[-_\.][a-z0-9]+)*", "WS": r"[ \t]+", "END": r"$", } class Tokenizer: """Context-sensitive token parsing. Provides methods to examine the input stream to check whether the next token matches. """ def __init__( self, source: str, *, rules: dict[str, str | re.Pattern[str]], ) -> None: self.source = source self.rules: dict[str, re.Pattern[str]] = { name: re.compile(pattern) for name, pattern in rules.items() } self.next_token: Token | None = None self.position = 0 def consume(self, name: str) -> None: """Move beyond provided token name, if at current position.""" if self.check(name): self.read() def check(self, name: str, *, peek: bool = False) -> bool: """Check whether the next token has the provided name. By default, if the check succeeds, the token *must* be read before another check. If `peek` is set to `True`, the token is not loaded and would need to be checked again. """ assert ( self.next_token is None ), f"Cannot check for {name!r}, already have {self.next_token!r}" assert name in self.rules, f"Unknown token name: {name!r}" expression = self.rules[name] match = expression.match(self.source, self.position) if match is None: return False if not peek: self.next_token = Token(name, match[0], self.position) return True def expect(self, name: str, *, expected: str) -> Token: """Expect a certain token name next, failing with a syntax error otherwise. The token is *not* read. """ if not self.check(name): raise self.raise_syntax_error(f"Expected {expected}") return self.read() def read(self) -> Token: """Consume the next token and return it.""" token = self.next_token assert token is not None self.position += len(token.text) self.next_token = None return token def raise_syntax_error( self, message: str, *, span_start: int | None = None, span_end: int | None = None, ) -> NoReturn: """Raise ParserSyntaxError at the given position.""" span = ( self.position if span_start is None else span_start, self.position if span_end is None else span_end, ) raise ParserSyntaxError( message, source=self.source, span=span, ) @contextlib.contextmanager def enclosing_tokens( self, open_token: str, close_token: str, *, around: str ) -> Iterator[None]: if self.check(open_token): open_position = self.position self.read() else: open_position = None yield if open_position is None: return if not self.check(close_token): self.raise_syntax_error( f"Expected matching {close_token} for {open_token}, after {around}", span_start=open_position, ) self.read() poetry-core-2.1.1/src/poetry/core/_vendor/packaging/licenses/000077500000000000000000000000001475444614500242275ustar00rootroot00000000000000poetry-core-2.1.1/src/poetry/core/_vendor/packaging/licenses/__init__.py000066400000000000000000000131231475444614500263400ustar00rootroot00000000000000####################################################################################### # # Adapted from: # https://github.com/pypa/hatch/blob/5352e44/backend/src/hatchling/licenses/parse.py # # MIT License # # Copyright (c) 2017-present Ofek Lev # # Permission is hereby granted, free of charge, to any person obtaining a copy of this # software and associated documentation files (the "Software"), to deal in the Software # without restriction, including without limitation the rights to use, copy, modify, # merge, publish, distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to the following # conditions: # # The above copyright notice and this permission notice shall be included in all copies # or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, # INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A # PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT # HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF # CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE # OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # # # With additional allowance of arbitrary `LicenseRef-` identifiers, not just # `LicenseRef-Public-Domain` and `LicenseRef-Proprietary`. # ####################################################################################### from __future__ import annotations import re from typing import NewType, cast from packaging.licenses._spdx import EXCEPTIONS, LICENSES __all__ = [ "NormalizedLicenseExpression", "InvalidLicenseExpression", "canonicalize_license_expression", ] license_ref_allowed = re.compile("^[A-Za-z0-9.-]*$") NormalizedLicenseExpression = NewType("NormalizedLicenseExpression", str) class InvalidLicenseExpression(ValueError): """Raised when a license-expression string is invalid >>> canonicalize_license_expression("invalid") Traceback (most recent call last): ... packaging.licenses.InvalidLicenseExpression: Invalid license expression: 'invalid' """ def canonicalize_license_expression( raw_license_expression: str, ) -> NormalizedLicenseExpression: if not raw_license_expression: message = f"Invalid license expression: {raw_license_expression!r}" raise InvalidLicenseExpression(message) # Pad any parentheses so tokenization can be achieved by merely splitting on # whitespace. license_expression = raw_license_expression.replace("(", " ( ").replace(")", " ) ") licenseref_prefix = "LicenseRef-" license_refs = { ref.lower(): "LicenseRef-" + ref[len(licenseref_prefix) :] for ref in license_expression.split() if ref.lower().startswith(licenseref_prefix.lower()) } # Normalize to lower case so we can look up licenses/exceptions # and so boolean operators are Python-compatible. license_expression = license_expression.lower() tokens = license_expression.split() # Rather than implementing boolean logic, we create an expression that Python can # parse. Everything that is not involved with the grammar itself is treated as # `False` and the expression should evaluate as such. python_tokens = [] for token in tokens: if token not in {"or", "and", "with", "(", ")"}: python_tokens.append("False") elif token == "with": python_tokens.append("or") elif token == "(" and python_tokens and python_tokens[-1] not in {"or", "and"}: message = f"Invalid license expression: {raw_license_expression!r}" raise InvalidLicenseExpression(message) else: python_tokens.append(token) python_expression = " ".join(python_tokens) try: invalid = eval(python_expression, globals(), locals()) except Exception: invalid = True if invalid is not False: message = f"Invalid license expression: {raw_license_expression!r}" raise InvalidLicenseExpression(message) from None # Take a final pass to check for unknown licenses/exceptions. normalized_tokens = [] for token in tokens: if token in {"or", "and", "with", "(", ")"}: normalized_tokens.append(token.upper()) continue if normalized_tokens and normalized_tokens[-1] == "WITH": if token not in EXCEPTIONS: message = f"Unknown license exception: {token!r}" raise InvalidLicenseExpression(message) normalized_tokens.append(EXCEPTIONS[token]["id"]) else: if token.endswith("+"): final_token = token[:-1] suffix = "+" else: final_token = token suffix = "" if final_token.startswith("licenseref-"): if not license_ref_allowed.match(final_token): message = f"Invalid licenseref: {final_token!r}" raise InvalidLicenseExpression(message) normalized_tokens.append(license_refs[final_token] + suffix) else: if final_token not in LICENSES: message = f"Unknown license: {final_token!r}" raise InvalidLicenseExpression(message) normalized_tokens.append(LICENSES[final_token]["id"] + suffix) normalized_expression = " ".join(normalized_tokens) return cast( NormalizedLicenseExpression, normalized_expression.replace("( ", "(").replace(" )", ")"), ) poetry-core-2.1.1/src/poetry/core/_vendor/packaging/licenses/_spdx.py000066400000000000000000001364161475444614500257310ustar00rootroot00000000000000 from __future__ import annotations from typing import TypedDict class SPDXLicense(TypedDict): id: str deprecated: bool class SPDXException(TypedDict): id: str deprecated: bool VERSION = '3.25.0' LICENSES: dict[str, SPDXLicense] = { '0bsd': {'id': '0BSD', 'deprecated': False}, '3d-slicer-1.0': {'id': '3D-Slicer-1.0', 'deprecated': False}, 'aal': {'id': 'AAL', 'deprecated': False}, 'abstyles': {'id': 'Abstyles', 'deprecated': False}, 'adacore-doc': {'id': 'AdaCore-doc', 'deprecated': False}, 'adobe-2006': {'id': 'Adobe-2006', 'deprecated': False}, 'adobe-display-postscript': {'id': 'Adobe-Display-PostScript', 'deprecated': False}, 'adobe-glyph': {'id': 'Adobe-Glyph', 'deprecated': False}, 'adobe-utopia': {'id': 'Adobe-Utopia', 'deprecated': False}, 'adsl': {'id': 'ADSL', 'deprecated': False}, 'afl-1.1': {'id': 'AFL-1.1', 'deprecated': False}, 'afl-1.2': {'id': 'AFL-1.2', 'deprecated': False}, 'afl-2.0': {'id': 'AFL-2.0', 'deprecated': False}, 'afl-2.1': {'id': 'AFL-2.1', 'deprecated': False}, 'afl-3.0': {'id': 'AFL-3.0', 'deprecated': False}, 'afmparse': {'id': 'Afmparse', 'deprecated': False}, 'agpl-1.0': {'id': 'AGPL-1.0', 'deprecated': True}, 'agpl-1.0-only': {'id': 'AGPL-1.0-only', 'deprecated': False}, 'agpl-1.0-or-later': {'id': 'AGPL-1.0-or-later', 'deprecated': False}, 'agpl-3.0': {'id': 'AGPL-3.0', 'deprecated': True}, 'agpl-3.0-only': {'id': 'AGPL-3.0-only', 'deprecated': False}, 'agpl-3.0-or-later': {'id': 'AGPL-3.0-or-later', 'deprecated': False}, 'aladdin': {'id': 'Aladdin', 'deprecated': False}, 'amd-newlib': {'id': 'AMD-newlib', 'deprecated': False}, 'amdplpa': {'id': 'AMDPLPA', 'deprecated': False}, 'aml': {'id': 'AML', 'deprecated': False}, 'aml-glslang': {'id': 'AML-glslang', 'deprecated': False}, 'ampas': {'id': 'AMPAS', 'deprecated': False}, 'antlr-pd': {'id': 'ANTLR-PD', 'deprecated': False}, 'antlr-pd-fallback': {'id': 'ANTLR-PD-fallback', 'deprecated': False}, 'any-osi': {'id': 'any-OSI', 'deprecated': False}, 'apache-1.0': {'id': 'Apache-1.0', 'deprecated': False}, 'apache-1.1': {'id': 'Apache-1.1', 'deprecated': False}, 'apache-2.0': {'id': 'Apache-2.0', 'deprecated': False}, 'apafml': {'id': 'APAFML', 'deprecated': False}, 'apl-1.0': {'id': 'APL-1.0', 'deprecated': False}, 'app-s2p': {'id': 'App-s2p', 'deprecated': False}, 'apsl-1.0': {'id': 'APSL-1.0', 'deprecated': False}, 'apsl-1.1': {'id': 'APSL-1.1', 'deprecated': False}, 'apsl-1.2': {'id': 'APSL-1.2', 'deprecated': False}, 'apsl-2.0': {'id': 'APSL-2.0', 'deprecated': False}, 'arphic-1999': {'id': 'Arphic-1999', 'deprecated': False}, 'artistic-1.0': {'id': 'Artistic-1.0', 'deprecated': False}, 'artistic-1.0-cl8': {'id': 'Artistic-1.0-cl8', 'deprecated': False}, 'artistic-1.0-perl': {'id': 'Artistic-1.0-Perl', 'deprecated': False}, 'artistic-2.0': {'id': 'Artistic-2.0', 'deprecated': False}, 'aswf-digital-assets-1.0': {'id': 'ASWF-Digital-Assets-1.0', 'deprecated': False}, 'aswf-digital-assets-1.1': {'id': 'ASWF-Digital-Assets-1.1', 'deprecated': False}, 'baekmuk': {'id': 'Baekmuk', 'deprecated': False}, 'bahyph': {'id': 'Bahyph', 'deprecated': False}, 'barr': {'id': 'Barr', 'deprecated': False}, 'bcrypt-solar-designer': {'id': 'bcrypt-Solar-Designer', 'deprecated': False}, 'beerware': {'id': 'Beerware', 'deprecated': False}, 'bitstream-charter': {'id': 'Bitstream-Charter', 'deprecated': False}, 'bitstream-vera': {'id': 'Bitstream-Vera', 'deprecated': False}, 'bittorrent-1.0': {'id': 'BitTorrent-1.0', 'deprecated': False}, 'bittorrent-1.1': {'id': 'BitTorrent-1.1', 'deprecated': False}, 'blessing': {'id': 'blessing', 'deprecated': False}, 'blueoak-1.0.0': {'id': 'BlueOak-1.0.0', 'deprecated': False}, 'boehm-gc': {'id': 'Boehm-GC', 'deprecated': False}, 'borceux': {'id': 'Borceux', 'deprecated': False}, 'brian-gladman-2-clause': {'id': 'Brian-Gladman-2-Clause', 'deprecated': False}, 'brian-gladman-3-clause': {'id': 'Brian-Gladman-3-Clause', 'deprecated': False}, 'bsd-1-clause': {'id': 'BSD-1-Clause', 'deprecated': False}, 'bsd-2-clause': {'id': 'BSD-2-Clause', 'deprecated': False}, 'bsd-2-clause-darwin': {'id': 'BSD-2-Clause-Darwin', 'deprecated': False}, 'bsd-2-clause-first-lines': {'id': 'BSD-2-Clause-first-lines', 'deprecated': False}, 'bsd-2-clause-freebsd': {'id': 'BSD-2-Clause-FreeBSD', 'deprecated': True}, 'bsd-2-clause-netbsd': {'id': 'BSD-2-Clause-NetBSD', 'deprecated': True}, 'bsd-2-clause-patent': {'id': 'BSD-2-Clause-Patent', 'deprecated': False}, 'bsd-2-clause-views': {'id': 'BSD-2-Clause-Views', 'deprecated': False}, 'bsd-3-clause': {'id': 'BSD-3-Clause', 'deprecated': False}, 'bsd-3-clause-acpica': {'id': 'BSD-3-Clause-acpica', 'deprecated': False}, 'bsd-3-clause-attribution': {'id': 'BSD-3-Clause-Attribution', 'deprecated': False}, 'bsd-3-clause-clear': {'id': 'BSD-3-Clause-Clear', 'deprecated': False}, 'bsd-3-clause-flex': {'id': 'BSD-3-Clause-flex', 'deprecated': False}, 'bsd-3-clause-hp': {'id': 'BSD-3-Clause-HP', 'deprecated': False}, 'bsd-3-clause-lbnl': {'id': 'BSD-3-Clause-LBNL', 'deprecated': False}, 'bsd-3-clause-modification': {'id': 'BSD-3-Clause-Modification', 'deprecated': False}, 'bsd-3-clause-no-military-license': {'id': 'BSD-3-Clause-No-Military-License', 'deprecated': False}, 'bsd-3-clause-no-nuclear-license': {'id': 'BSD-3-Clause-No-Nuclear-License', 'deprecated': False}, 'bsd-3-clause-no-nuclear-license-2014': {'id': 'BSD-3-Clause-No-Nuclear-License-2014', 'deprecated': False}, 'bsd-3-clause-no-nuclear-warranty': {'id': 'BSD-3-Clause-No-Nuclear-Warranty', 'deprecated': False}, 'bsd-3-clause-open-mpi': {'id': 'BSD-3-Clause-Open-MPI', 'deprecated': False}, 'bsd-3-clause-sun': {'id': 'BSD-3-Clause-Sun', 'deprecated': False}, 'bsd-4-clause': {'id': 'BSD-4-Clause', 'deprecated': False}, 'bsd-4-clause-shortened': {'id': 'BSD-4-Clause-Shortened', 'deprecated': False}, 'bsd-4-clause-uc': {'id': 'BSD-4-Clause-UC', 'deprecated': False}, 'bsd-4.3reno': {'id': 'BSD-4.3RENO', 'deprecated': False}, 'bsd-4.3tahoe': {'id': 'BSD-4.3TAHOE', 'deprecated': False}, 'bsd-advertising-acknowledgement': {'id': 'BSD-Advertising-Acknowledgement', 'deprecated': False}, 'bsd-attribution-hpnd-disclaimer': {'id': 'BSD-Attribution-HPND-disclaimer', 'deprecated': False}, 'bsd-inferno-nettverk': {'id': 'BSD-Inferno-Nettverk', 'deprecated': False}, 'bsd-protection': {'id': 'BSD-Protection', 'deprecated': False}, 'bsd-source-beginning-file': {'id': 'BSD-Source-beginning-file', 'deprecated': False}, 'bsd-source-code': {'id': 'BSD-Source-Code', 'deprecated': False}, 'bsd-systemics': {'id': 'BSD-Systemics', 'deprecated': False}, 'bsd-systemics-w3works': {'id': 'BSD-Systemics-W3Works', 'deprecated': False}, 'bsl-1.0': {'id': 'BSL-1.0', 'deprecated': False}, 'busl-1.1': {'id': 'BUSL-1.1', 'deprecated': False}, 'bzip2-1.0.5': {'id': 'bzip2-1.0.5', 'deprecated': True}, 'bzip2-1.0.6': {'id': 'bzip2-1.0.6', 'deprecated': False}, 'c-uda-1.0': {'id': 'C-UDA-1.0', 'deprecated': False}, 'cal-1.0': {'id': 'CAL-1.0', 'deprecated': False}, 'cal-1.0-combined-work-exception': {'id': 'CAL-1.0-Combined-Work-Exception', 'deprecated': False}, 'caldera': {'id': 'Caldera', 'deprecated': False}, 'caldera-no-preamble': {'id': 'Caldera-no-preamble', 'deprecated': False}, 'catharon': {'id': 'Catharon', 'deprecated': False}, 'catosl-1.1': {'id': 'CATOSL-1.1', 'deprecated': False}, 'cc-by-1.0': {'id': 'CC-BY-1.0', 'deprecated': False}, 'cc-by-2.0': {'id': 'CC-BY-2.0', 'deprecated': False}, 'cc-by-2.5': {'id': 'CC-BY-2.5', 'deprecated': False}, 'cc-by-2.5-au': {'id': 'CC-BY-2.5-AU', 'deprecated': False}, 'cc-by-3.0': {'id': 'CC-BY-3.0', 'deprecated': False}, 'cc-by-3.0-at': {'id': 'CC-BY-3.0-AT', 'deprecated': False}, 'cc-by-3.0-au': {'id': 'CC-BY-3.0-AU', 'deprecated': False}, 'cc-by-3.0-de': {'id': 'CC-BY-3.0-DE', 'deprecated': False}, 'cc-by-3.0-igo': {'id': 'CC-BY-3.0-IGO', 'deprecated': False}, 'cc-by-3.0-nl': {'id': 'CC-BY-3.0-NL', 'deprecated': False}, 'cc-by-3.0-us': {'id': 'CC-BY-3.0-US', 'deprecated': False}, 'cc-by-4.0': {'id': 'CC-BY-4.0', 'deprecated': False}, 'cc-by-nc-1.0': {'id': 'CC-BY-NC-1.0', 'deprecated': False}, 'cc-by-nc-2.0': {'id': 'CC-BY-NC-2.0', 'deprecated': False}, 'cc-by-nc-2.5': {'id': 'CC-BY-NC-2.5', 'deprecated': False}, 'cc-by-nc-3.0': {'id': 'CC-BY-NC-3.0', 'deprecated': False}, 'cc-by-nc-3.0-de': {'id': 'CC-BY-NC-3.0-DE', 'deprecated': False}, 'cc-by-nc-4.0': {'id': 'CC-BY-NC-4.0', 'deprecated': False}, 'cc-by-nc-nd-1.0': {'id': 'CC-BY-NC-ND-1.0', 'deprecated': False}, 'cc-by-nc-nd-2.0': {'id': 'CC-BY-NC-ND-2.0', 'deprecated': False}, 'cc-by-nc-nd-2.5': {'id': 'CC-BY-NC-ND-2.5', 'deprecated': False}, 'cc-by-nc-nd-3.0': {'id': 'CC-BY-NC-ND-3.0', 'deprecated': False}, 'cc-by-nc-nd-3.0-de': {'id': 'CC-BY-NC-ND-3.0-DE', 'deprecated': False}, 'cc-by-nc-nd-3.0-igo': {'id': 'CC-BY-NC-ND-3.0-IGO', 'deprecated': False}, 'cc-by-nc-nd-4.0': {'id': 'CC-BY-NC-ND-4.0', 'deprecated': False}, 'cc-by-nc-sa-1.0': {'id': 'CC-BY-NC-SA-1.0', 'deprecated': False}, 'cc-by-nc-sa-2.0': {'id': 'CC-BY-NC-SA-2.0', 'deprecated': False}, 'cc-by-nc-sa-2.0-de': {'id': 'CC-BY-NC-SA-2.0-DE', 'deprecated': False}, 'cc-by-nc-sa-2.0-fr': {'id': 'CC-BY-NC-SA-2.0-FR', 'deprecated': False}, 'cc-by-nc-sa-2.0-uk': {'id': 'CC-BY-NC-SA-2.0-UK', 'deprecated': False}, 'cc-by-nc-sa-2.5': {'id': 'CC-BY-NC-SA-2.5', 'deprecated': False}, 'cc-by-nc-sa-3.0': {'id': 'CC-BY-NC-SA-3.0', 'deprecated': False}, 'cc-by-nc-sa-3.0-de': {'id': 'CC-BY-NC-SA-3.0-DE', 'deprecated': False}, 'cc-by-nc-sa-3.0-igo': {'id': 'CC-BY-NC-SA-3.0-IGO', 'deprecated': False}, 'cc-by-nc-sa-4.0': {'id': 'CC-BY-NC-SA-4.0', 'deprecated': False}, 'cc-by-nd-1.0': {'id': 'CC-BY-ND-1.0', 'deprecated': False}, 'cc-by-nd-2.0': {'id': 'CC-BY-ND-2.0', 'deprecated': False}, 'cc-by-nd-2.5': {'id': 'CC-BY-ND-2.5', 'deprecated': False}, 'cc-by-nd-3.0': {'id': 'CC-BY-ND-3.0', 'deprecated': False}, 'cc-by-nd-3.0-de': {'id': 'CC-BY-ND-3.0-DE', 'deprecated': False}, 'cc-by-nd-4.0': {'id': 'CC-BY-ND-4.0', 'deprecated': False}, 'cc-by-sa-1.0': {'id': 'CC-BY-SA-1.0', 'deprecated': False}, 'cc-by-sa-2.0': {'id': 'CC-BY-SA-2.0', 'deprecated': False}, 'cc-by-sa-2.0-uk': {'id': 'CC-BY-SA-2.0-UK', 'deprecated': False}, 'cc-by-sa-2.1-jp': {'id': 'CC-BY-SA-2.1-JP', 'deprecated': False}, 'cc-by-sa-2.5': {'id': 'CC-BY-SA-2.5', 'deprecated': False}, 'cc-by-sa-3.0': {'id': 'CC-BY-SA-3.0', 'deprecated': False}, 'cc-by-sa-3.0-at': {'id': 'CC-BY-SA-3.0-AT', 'deprecated': False}, 'cc-by-sa-3.0-de': {'id': 'CC-BY-SA-3.0-DE', 'deprecated': False}, 'cc-by-sa-3.0-igo': {'id': 'CC-BY-SA-3.0-IGO', 'deprecated': False}, 'cc-by-sa-4.0': {'id': 'CC-BY-SA-4.0', 'deprecated': False}, 'cc-pddc': {'id': 'CC-PDDC', 'deprecated': False}, 'cc0-1.0': {'id': 'CC0-1.0', 'deprecated': False}, 'cddl-1.0': {'id': 'CDDL-1.0', 'deprecated': False}, 'cddl-1.1': {'id': 'CDDL-1.1', 'deprecated': False}, 'cdl-1.0': {'id': 'CDL-1.0', 'deprecated': False}, 'cdla-permissive-1.0': {'id': 'CDLA-Permissive-1.0', 'deprecated': False}, 'cdla-permissive-2.0': {'id': 'CDLA-Permissive-2.0', 'deprecated': False}, 'cdla-sharing-1.0': {'id': 'CDLA-Sharing-1.0', 'deprecated': False}, 'cecill-1.0': {'id': 'CECILL-1.0', 'deprecated': False}, 'cecill-1.1': {'id': 'CECILL-1.1', 'deprecated': False}, 'cecill-2.0': {'id': 'CECILL-2.0', 'deprecated': False}, 'cecill-2.1': {'id': 'CECILL-2.1', 'deprecated': False}, 'cecill-b': {'id': 'CECILL-B', 'deprecated': False}, 'cecill-c': {'id': 'CECILL-C', 'deprecated': False}, 'cern-ohl-1.1': {'id': 'CERN-OHL-1.1', 'deprecated': False}, 'cern-ohl-1.2': {'id': 'CERN-OHL-1.2', 'deprecated': False}, 'cern-ohl-p-2.0': {'id': 'CERN-OHL-P-2.0', 'deprecated': False}, 'cern-ohl-s-2.0': {'id': 'CERN-OHL-S-2.0', 'deprecated': False}, 'cern-ohl-w-2.0': {'id': 'CERN-OHL-W-2.0', 'deprecated': False}, 'cfitsio': {'id': 'CFITSIO', 'deprecated': False}, 'check-cvs': {'id': 'check-cvs', 'deprecated': False}, 'checkmk': {'id': 'checkmk', 'deprecated': False}, 'clartistic': {'id': 'ClArtistic', 'deprecated': False}, 'clips': {'id': 'Clips', 'deprecated': False}, 'cmu-mach': {'id': 'CMU-Mach', 'deprecated': False}, 'cmu-mach-nodoc': {'id': 'CMU-Mach-nodoc', 'deprecated': False}, 'cnri-jython': {'id': 'CNRI-Jython', 'deprecated': False}, 'cnri-python': {'id': 'CNRI-Python', 'deprecated': False}, 'cnri-python-gpl-compatible': {'id': 'CNRI-Python-GPL-Compatible', 'deprecated': False}, 'coil-1.0': {'id': 'COIL-1.0', 'deprecated': False}, 'community-spec-1.0': {'id': 'Community-Spec-1.0', 'deprecated': False}, 'condor-1.1': {'id': 'Condor-1.1', 'deprecated': False}, 'copyleft-next-0.3.0': {'id': 'copyleft-next-0.3.0', 'deprecated': False}, 'copyleft-next-0.3.1': {'id': 'copyleft-next-0.3.1', 'deprecated': False}, 'cornell-lossless-jpeg': {'id': 'Cornell-Lossless-JPEG', 'deprecated': False}, 'cpal-1.0': {'id': 'CPAL-1.0', 'deprecated': False}, 'cpl-1.0': {'id': 'CPL-1.0', 'deprecated': False}, 'cpol-1.02': {'id': 'CPOL-1.02', 'deprecated': False}, 'cronyx': {'id': 'Cronyx', 'deprecated': False}, 'crossword': {'id': 'Crossword', 'deprecated': False}, 'crystalstacker': {'id': 'CrystalStacker', 'deprecated': False}, 'cua-opl-1.0': {'id': 'CUA-OPL-1.0', 'deprecated': False}, 'cube': {'id': 'Cube', 'deprecated': False}, 'curl': {'id': 'curl', 'deprecated': False}, 'cve-tou': {'id': 'cve-tou', 'deprecated': False}, 'd-fsl-1.0': {'id': 'D-FSL-1.0', 'deprecated': False}, 'dec-3-clause': {'id': 'DEC-3-Clause', 'deprecated': False}, 'diffmark': {'id': 'diffmark', 'deprecated': False}, 'dl-de-by-2.0': {'id': 'DL-DE-BY-2.0', 'deprecated': False}, 'dl-de-zero-2.0': {'id': 'DL-DE-ZERO-2.0', 'deprecated': False}, 'doc': {'id': 'DOC', 'deprecated': False}, 'docbook-schema': {'id': 'DocBook-Schema', 'deprecated': False}, 'docbook-xml': {'id': 'DocBook-XML', 'deprecated': False}, 'dotseqn': {'id': 'Dotseqn', 'deprecated': False}, 'drl-1.0': {'id': 'DRL-1.0', 'deprecated': False}, 'drl-1.1': {'id': 'DRL-1.1', 'deprecated': False}, 'dsdp': {'id': 'DSDP', 'deprecated': False}, 'dtoa': {'id': 'dtoa', 'deprecated': False}, 'dvipdfm': {'id': 'dvipdfm', 'deprecated': False}, 'ecl-1.0': {'id': 'ECL-1.0', 'deprecated': False}, 'ecl-2.0': {'id': 'ECL-2.0', 'deprecated': False}, 'ecos-2.0': {'id': 'eCos-2.0', 'deprecated': True}, 'efl-1.0': {'id': 'EFL-1.0', 'deprecated': False}, 'efl-2.0': {'id': 'EFL-2.0', 'deprecated': False}, 'egenix': {'id': 'eGenix', 'deprecated': False}, 'elastic-2.0': {'id': 'Elastic-2.0', 'deprecated': False}, 'entessa': {'id': 'Entessa', 'deprecated': False}, 'epics': {'id': 'EPICS', 'deprecated': False}, 'epl-1.0': {'id': 'EPL-1.0', 'deprecated': False}, 'epl-2.0': {'id': 'EPL-2.0', 'deprecated': False}, 'erlpl-1.1': {'id': 'ErlPL-1.1', 'deprecated': False}, 'etalab-2.0': {'id': 'etalab-2.0', 'deprecated': False}, 'eudatagrid': {'id': 'EUDatagrid', 'deprecated': False}, 'eupl-1.0': {'id': 'EUPL-1.0', 'deprecated': False}, 'eupl-1.1': {'id': 'EUPL-1.1', 'deprecated': False}, 'eupl-1.2': {'id': 'EUPL-1.2', 'deprecated': False}, 'eurosym': {'id': 'Eurosym', 'deprecated': False}, 'fair': {'id': 'Fair', 'deprecated': False}, 'fbm': {'id': 'FBM', 'deprecated': False}, 'fdk-aac': {'id': 'FDK-AAC', 'deprecated': False}, 'ferguson-twofish': {'id': 'Ferguson-Twofish', 'deprecated': False}, 'frameworx-1.0': {'id': 'Frameworx-1.0', 'deprecated': False}, 'freebsd-doc': {'id': 'FreeBSD-DOC', 'deprecated': False}, 'freeimage': {'id': 'FreeImage', 'deprecated': False}, 'fsfap': {'id': 'FSFAP', 'deprecated': False}, 'fsfap-no-warranty-disclaimer': {'id': 'FSFAP-no-warranty-disclaimer', 'deprecated': False}, 'fsful': {'id': 'FSFUL', 'deprecated': False}, 'fsfullr': {'id': 'FSFULLR', 'deprecated': False}, 'fsfullrwd': {'id': 'FSFULLRWD', 'deprecated': False}, 'ftl': {'id': 'FTL', 'deprecated': False}, 'furuseth': {'id': 'Furuseth', 'deprecated': False}, 'fwlw': {'id': 'fwlw', 'deprecated': False}, 'gcr-docs': {'id': 'GCR-docs', 'deprecated': False}, 'gd': {'id': 'GD', 'deprecated': False}, 'gfdl-1.1': {'id': 'GFDL-1.1', 'deprecated': True}, 'gfdl-1.1-invariants-only': {'id': 'GFDL-1.1-invariants-only', 'deprecated': False}, 'gfdl-1.1-invariants-or-later': {'id': 'GFDL-1.1-invariants-or-later', 'deprecated': False}, 'gfdl-1.1-no-invariants-only': {'id': 'GFDL-1.1-no-invariants-only', 'deprecated': False}, 'gfdl-1.1-no-invariants-or-later': {'id': 'GFDL-1.1-no-invariants-or-later', 'deprecated': False}, 'gfdl-1.1-only': {'id': 'GFDL-1.1-only', 'deprecated': False}, 'gfdl-1.1-or-later': {'id': 'GFDL-1.1-or-later', 'deprecated': False}, 'gfdl-1.2': {'id': 'GFDL-1.2', 'deprecated': True}, 'gfdl-1.2-invariants-only': {'id': 'GFDL-1.2-invariants-only', 'deprecated': False}, 'gfdl-1.2-invariants-or-later': {'id': 'GFDL-1.2-invariants-or-later', 'deprecated': False}, 'gfdl-1.2-no-invariants-only': {'id': 'GFDL-1.2-no-invariants-only', 'deprecated': False}, 'gfdl-1.2-no-invariants-or-later': {'id': 'GFDL-1.2-no-invariants-or-later', 'deprecated': False}, 'gfdl-1.2-only': {'id': 'GFDL-1.2-only', 'deprecated': False}, 'gfdl-1.2-or-later': {'id': 'GFDL-1.2-or-later', 'deprecated': False}, 'gfdl-1.3': {'id': 'GFDL-1.3', 'deprecated': True}, 'gfdl-1.3-invariants-only': {'id': 'GFDL-1.3-invariants-only', 'deprecated': False}, 'gfdl-1.3-invariants-or-later': {'id': 'GFDL-1.3-invariants-or-later', 'deprecated': False}, 'gfdl-1.3-no-invariants-only': {'id': 'GFDL-1.3-no-invariants-only', 'deprecated': False}, 'gfdl-1.3-no-invariants-or-later': {'id': 'GFDL-1.3-no-invariants-or-later', 'deprecated': False}, 'gfdl-1.3-only': {'id': 'GFDL-1.3-only', 'deprecated': False}, 'gfdl-1.3-or-later': {'id': 'GFDL-1.3-or-later', 'deprecated': False}, 'giftware': {'id': 'Giftware', 'deprecated': False}, 'gl2ps': {'id': 'GL2PS', 'deprecated': False}, 'glide': {'id': 'Glide', 'deprecated': False}, 'glulxe': {'id': 'Glulxe', 'deprecated': False}, 'glwtpl': {'id': 'GLWTPL', 'deprecated': False}, 'gnuplot': {'id': 'gnuplot', 'deprecated': False}, 'gpl-1.0': {'id': 'GPL-1.0', 'deprecated': True}, 'gpl-1.0+': {'id': 'GPL-1.0+', 'deprecated': True}, 'gpl-1.0-only': {'id': 'GPL-1.0-only', 'deprecated': False}, 'gpl-1.0-or-later': {'id': 'GPL-1.0-or-later', 'deprecated': False}, 'gpl-2.0': {'id': 'GPL-2.0', 'deprecated': True}, 'gpl-2.0+': {'id': 'GPL-2.0+', 'deprecated': True}, 'gpl-2.0-only': {'id': 'GPL-2.0-only', 'deprecated': False}, 'gpl-2.0-or-later': {'id': 'GPL-2.0-or-later', 'deprecated': False}, 'gpl-2.0-with-autoconf-exception': {'id': 'GPL-2.0-with-autoconf-exception', 'deprecated': True}, 'gpl-2.0-with-bison-exception': {'id': 'GPL-2.0-with-bison-exception', 'deprecated': True}, 'gpl-2.0-with-classpath-exception': {'id': 'GPL-2.0-with-classpath-exception', 'deprecated': True}, 'gpl-2.0-with-font-exception': {'id': 'GPL-2.0-with-font-exception', 'deprecated': True}, 'gpl-2.0-with-gcc-exception': {'id': 'GPL-2.0-with-GCC-exception', 'deprecated': True}, 'gpl-3.0': {'id': 'GPL-3.0', 'deprecated': True}, 'gpl-3.0+': {'id': 'GPL-3.0+', 'deprecated': True}, 'gpl-3.0-only': {'id': 'GPL-3.0-only', 'deprecated': False}, 'gpl-3.0-or-later': {'id': 'GPL-3.0-or-later', 'deprecated': False}, 'gpl-3.0-with-autoconf-exception': {'id': 'GPL-3.0-with-autoconf-exception', 'deprecated': True}, 'gpl-3.0-with-gcc-exception': {'id': 'GPL-3.0-with-GCC-exception', 'deprecated': True}, 'graphics-gems': {'id': 'Graphics-Gems', 'deprecated': False}, 'gsoap-1.3b': {'id': 'gSOAP-1.3b', 'deprecated': False}, 'gtkbook': {'id': 'gtkbook', 'deprecated': False}, 'gutmann': {'id': 'Gutmann', 'deprecated': False}, 'haskellreport': {'id': 'HaskellReport', 'deprecated': False}, 'hdparm': {'id': 'hdparm', 'deprecated': False}, 'hidapi': {'id': 'HIDAPI', 'deprecated': False}, 'hippocratic-2.1': {'id': 'Hippocratic-2.1', 'deprecated': False}, 'hp-1986': {'id': 'HP-1986', 'deprecated': False}, 'hp-1989': {'id': 'HP-1989', 'deprecated': False}, 'hpnd': {'id': 'HPND', 'deprecated': False}, 'hpnd-dec': {'id': 'HPND-DEC', 'deprecated': False}, 'hpnd-doc': {'id': 'HPND-doc', 'deprecated': False}, 'hpnd-doc-sell': {'id': 'HPND-doc-sell', 'deprecated': False}, 'hpnd-export-us': {'id': 'HPND-export-US', 'deprecated': False}, 'hpnd-export-us-acknowledgement': {'id': 'HPND-export-US-acknowledgement', 'deprecated': False}, 'hpnd-export-us-modify': {'id': 'HPND-export-US-modify', 'deprecated': False}, 'hpnd-export2-us': {'id': 'HPND-export2-US', 'deprecated': False}, 'hpnd-fenneberg-livingston': {'id': 'HPND-Fenneberg-Livingston', 'deprecated': False}, 'hpnd-inria-imag': {'id': 'HPND-INRIA-IMAG', 'deprecated': False}, 'hpnd-intel': {'id': 'HPND-Intel', 'deprecated': False}, 'hpnd-kevlin-henney': {'id': 'HPND-Kevlin-Henney', 'deprecated': False}, 'hpnd-markus-kuhn': {'id': 'HPND-Markus-Kuhn', 'deprecated': False}, 'hpnd-merchantability-variant': {'id': 'HPND-merchantability-variant', 'deprecated': False}, 'hpnd-mit-disclaimer': {'id': 'HPND-MIT-disclaimer', 'deprecated': False}, 'hpnd-netrek': {'id': 'HPND-Netrek', 'deprecated': False}, 'hpnd-pbmplus': {'id': 'HPND-Pbmplus', 'deprecated': False}, 'hpnd-sell-mit-disclaimer-xserver': {'id': 'HPND-sell-MIT-disclaimer-xserver', 'deprecated': False}, 'hpnd-sell-regexpr': {'id': 'HPND-sell-regexpr', 'deprecated': False}, 'hpnd-sell-variant': {'id': 'HPND-sell-variant', 'deprecated': False}, 'hpnd-sell-variant-mit-disclaimer': {'id': 'HPND-sell-variant-MIT-disclaimer', 'deprecated': False}, 'hpnd-sell-variant-mit-disclaimer-rev': {'id': 'HPND-sell-variant-MIT-disclaimer-rev', 'deprecated': False}, 'hpnd-uc': {'id': 'HPND-UC', 'deprecated': False}, 'hpnd-uc-export-us': {'id': 'HPND-UC-export-US', 'deprecated': False}, 'htmltidy': {'id': 'HTMLTIDY', 'deprecated': False}, 'ibm-pibs': {'id': 'IBM-pibs', 'deprecated': False}, 'icu': {'id': 'ICU', 'deprecated': False}, 'iec-code-components-eula': {'id': 'IEC-Code-Components-EULA', 'deprecated': False}, 'ijg': {'id': 'IJG', 'deprecated': False}, 'ijg-short': {'id': 'IJG-short', 'deprecated': False}, 'imagemagick': {'id': 'ImageMagick', 'deprecated': False}, 'imatix': {'id': 'iMatix', 'deprecated': False}, 'imlib2': {'id': 'Imlib2', 'deprecated': False}, 'info-zip': {'id': 'Info-ZIP', 'deprecated': False}, 'inner-net-2.0': {'id': 'Inner-Net-2.0', 'deprecated': False}, 'intel': {'id': 'Intel', 'deprecated': False}, 'intel-acpi': {'id': 'Intel-ACPI', 'deprecated': False}, 'interbase-1.0': {'id': 'Interbase-1.0', 'deprecated': False}, 'ipa': {'id': 'IPA', 'deprecated': False}, 'ipl-1.0': {'id': 'IPL-1.0', 'deprecated': False}, 'isc': {'id': 'ISC', 'deprecated': False}, 'isc-veillard': {'id': 'ISC-Veillard', 'deprecated': False}, 'jam': {'id': 'Jam', 'deprecated': False}, 'jasper-2.0': {'id': 'JasPer-2.0', 'deprecated': False}, 'jpl-image': {'id': 'JPL-image', 'deprecated': False}, 'jpnic': {'id': 'JPNIC', 'deprecated': False}, 'json': {'id': 'JSON', 'deprecated': False}, 'kastrup': {'id': 'Kastrup', 'deprecated': False}, 'kazlib': {'id': 'Kazlib', 'deprecated': False}, 'knuth-ctan': {'id': 'Knuth-CTAN', 'deprecated': False}, 'lal-1.2': {'id': 'LAL-1.2', 'deprecated': False}, 'lal-1.3': {'id': 'LAL-1.3', 'deprecated': False}, 'latex2e': {'id': 'Latex2e', 'deprecated': False}, 'latex2e-translated-notice': {'id': 'Latex2e-translated-notice', 'deprecated': False}, 'leptonica': {'id': 'Leptonica', 'deprecated': False}, 'lgpl-2.0': {'id': 'LGPL-2.0', 'deprecated': True}, 'lgpl-2.0+': {'id': 'LGPL-2.0+', 'deprecated': True}, 'lgpl-2.0-only': {'id': 'LGPL-2.0-only', 'deprecated': False}, 'lgpl-2.0-or-later': {'id': 'LGPL-2.0-or-later', 'deprecated': False}, 'lgpl-2.1': {'id': 'LGPL-2.1', 'deprecated': True}, 'lgpl-2.1+': {'id': 'LGPL-2.1+', 'deprecated': True}, 'lgpl-2.1-only': {'id': 'LGPL-2.1-only', 'deprecated': False}, 'lgpl-2.1-or-later': {'id': 'LGPL-2.1-or-later', 'deprecated': False}, 'lgpl-3.0': {'id': 'LGPL-3.0', 'deprecated': True}, 'lgpl-3.0+': {'id': 'LGPL-3.0+', 'deprecated': True}, 'lgpl-3.0-only': {'id': 'LGPL-3.0-only', 'deprecated': False}, 'lgpl-3.0-or-later': {'id': 'LGPL-3.0-or-later', 'deprecated': False}, 'lgpllr': {'id': 'LGPLLR', 'deprecated': False}, 'libpng': {'id': 'Libpng', 'deprecated': False}, 'libpng-2.0': {'id': 'libpng-2.0', 'deprecated': False}, 'libselinux-1.0': {'id': 'libselinux-1.0', 'deprecated': False}, 'libtiff': {'id': 'libtiff', 'deprecated': False}, 'libutil-david-nugent': {'id': 'libutil-David-Nugent', 'deprecated': False}, 'liliq-p-1.1': {'id': 'LiLiQ-P-1.1', 'deprecated': False}, 'liliq-r-1.1': {'id': 'LiLiQ-R-1.1', 'deprecated': False}, 'liliq-rplus-1.1': {'id': 'LiLiQ-Rplus-1.1', 'deprecated': False}, 'linux-man-pages-1-para': {'id': 'Linux-man-pages-1-para', 'deprecated': False}, 'linux-man-pages-copyleft': {'id': 'Linux-man-pages-copyleft', 'deprecated': False}, 'linux-man-pages-copyleft-2-para': {'id': 'Linux-man-pages-copyleft-2-para', 'deprecated': False}, 'linux-man-pages-copyleft-var': {'id': 'Linux-man-pages-copyleft-var', 'deprecated': False}, 'linux-openib': {'id': 'Linux-OpenIB', 'deprecated': False}, 'loop': {'id': 'LOOP', 'deprecated': False}, 'lpd-document': {'id': 'LPD-document', 'deprecated': False}, 'lpl-1.0': {'id': 'LPL-1.0', 'deprecated': False}, 'lpl-1.02': {'id': 'LPL-1.02', 'deprecated': False}, 'lppl-1.0': {'id': 'LPPL-1.0', 'deprecated': False}, 'lppl-1.1': {'id': 'LPPL-1.1', 'deprecated': False}, 'lppl-1.2': {'id': 'LPPL-1.2', 'deprecated': False}, 'lppl-1.3a': {'id': 'LPPL-1.3a', 'deprecated': False}, 'lppl-1.3c': {'id': 'LPPL-1.3c', 'deprecated': False}, 'lsof': {'id': 'lsof', 'deprecated': False}, 'lucida-bitmap-fonts': {'id': 'Lucida-Bitmap-Fonts', 'deprecated': False}, 'lzma-sdk-9.11-to-9.20': {'id': 'LZMA-SDK-9.11-to-9.20', 'deprecated': False}, 'lzma-sdk-9.22': {'id': 'LZMA-SDK-9.22', 'deprecated': False}, 'mackerras-3-clause': {'id': 'Mackerras-3-Clause', 'deprecated': False}, 'mackerras-3-clause-acknowledgment': {'id': 'Mackerras-3-Clause-acknowledgment', 'deprecated': False}, 'magaz': {'id': 'magaz', 'deprecated': False}, 'mailprio': {'id': 'mailprio', 'deprecated': False}, 'makeindex': {'id': 'MakeIndex', 'deprecated': False}, 'martin-birgmeier': {'id': 'Martin-Birgmeier', 'deprecated': False}, 'mcphee-slideshow': {'id': 'McPhee-slideshow', 'deprecated': False}, 'metamail': {'id': 'metamail', 'deprecated': False}, 'minpack': {'id': 'Minpack', 'deprecated': False}, 'miros': {'id': 'MirOS', 'deprecated': False}, 'mit': {'id': 'MIT', 'deprecated': False}, 'mit-0': {'id': 'MIT-0', 'deprecated': False}, 'mit-advertising': {'id': 'MIT-advertising', 'deprecated': False}, 'mit-cmu': {'id': 'MIT-CMU', 'deprecated': False}, 'mit-enna': {'id': 'MIT-enna', 'deprecated': False}, 'mit-feh': {'id': 'MIT-feh', 'deprecated': False}, 'mit-festival': {'id': 'MIT-Festival', 'deprecated': False}, 'mit-khronos-old': {'id': 'MIT-Khronos-old', 'deprecated': False}, 'mit-modern-variant': {'id': 'MIT-Modern-Variant', 'deprecated': False}, 'mit-open-group': {'id': 'MIT-open-group', 'deprecated': False}, 'mit-testregex': {'id': 'MIT-testregex', 'deprecated': False}, 'mit-wu': {'id': 'MIT-Wu', 'deprecated': False}, 'mitnfa': {'id': 'MITNFA', 'deprecated': False}, 'mmixware': {'id': 'MMIXware', 'deprecated': False}, 'motosoto': {'id': 'Motosoto', 'deprecated': False}, 'mpeg-ssg': {'id': 'MPEG-SSG', 'deprecated': False}, 'mpi-permissive': {'id': 'mpi-permissive', 'deprecated': False}, 'mpich2': {'id': 'mpich2', 'deprecated': False}, 'mpl-1.0': {'id': 'MPL-1.0', 'deprecated': False}, 'mpl-1.1': {'id': 'MPL-1.1', 'deprecated': False}, 'mpl-2.0': {'id': 'MPL-2.0', 'deprecated': False}, 'mpl-2.0-no-copyleft-exception': {'id': 'MPL-2.0-no-copyleft-exception', 'deprecated': False}, 'mplus': {'id': 'mplus', 'deprecated': False}, 'ms-lpl': {'id': 'MS-LPL', 'deprecated': False}, 'ms-pl': {'id': 'MS-PL', 'deprecated': False}, 'ms-rl': {'id': 'MS-RL', 'deprecated': False}, 'mtll': {'id': 'MTLL', 'deprecated': False}, 'mulanpsl-1.0': {'id': 'MulanPSL-1.0', 'deprecated': False}, 'mulanpsl-2.0': {'id': 'MulanPSL-2.0', 'deprecated': False}, 'multics': {'id': 'Multics', 'deprecated': False}, 'mup': {'id': 'Mup', 'deprecated': False}, 'naist-2003': {'id': 'NAIST-2003', 'deprecated': False}, 'nasa-1.3': {'id': 'NASA-1.3', 'deprecated': False}, 'naumen': {'id': 'Naumen', 'deprecated': False}, 'nbpl-1.0': {'id': 'NBPL-1.0', 'deprecated': False}, 'ncbi-pd': {'id': 'NCBI-PD', 'deprecated': False}, 'ncgl-uk-2.0': {'id': 'NCGL-UK-2.0', 'deprecated': False}, 'ncl': {'id': 'NCL', 'deprecated': False}, 'ncsa': {'id': 'NCSA', 'deprecated': False}, 'net-snmp': {'id': 'Net-SNMP', 'deprecated': True}, 'netcdf': {'id': 'NetCDF', 'deprecated': False}, 'newsletr': {'id': 'Newsletr', 'deprecated': False}, 'ngpl': {'id': 'NGPL', 'deprecated': False}, 'nicta-1.0': {'id': 'NICTA-1.0', 'deprecated': False}, 'nist-pd': {'id': 'NIST-PD', 'deprecated': False}, 'nist-pd-fallback': {'id': 'NIST-PD-fallback', 'deprecated': False}, 'nist-software': {'id': 'NIST-Software', 'deprecated': False}, 'nlod-1.0': {'id': 'NLOD-1.0', 'deprecated': False}, 'nlod-2.0': {'id': 'NLOD-2.0', 'deprecated': False}, 'nlpl': {'id': 'NLPL', 'deprecated': False}, 'nokia': {'id': 'Nokia', 'deprecated': False}, 'nosl': {'id': 'NOSL', 'deprecated': False}, 'noweb': {'id': 'Noweb', 'deprecated': False}, 'npl-1.0': {'id': 'NPL-1.0', 'deprecated': False}, 'npl-1.1': {'id': 'NPL-1.1', 'deprecated': False}, 'nposl-3.0': {'id': 'NPOSL-3.0', 'deprecated': False}, 'nrl': {'id': 'NRL', 'deprecated': False}, 'ntp': {'id': 'NTP', 'deprecated': False}, 'ntp-0': {'id': 'NTP-0', 'deprecated': False}, 'nunit': {'id': 'Nunit', 'deprecated': True}, 'o-uda-1.0': {'id': 'O-UDA-1.0', 'deprecated': False}, 'oar': {'id': 'OAR', 'deprecated': False}, 'occt-pl': {'id': 'OCCT-PL', 'deprecated': False}, 'oclc-2.0': {'id': 'OCLC-2.0', 'deprecated': False}, 'odbl-1.0': {'id': 'ODbL-1.0', 'deprecated': False}, 'odc-by-1.0': {'id': 'ODC-By-1.0', 'deprecated': False}, 'offis': {'id': 'OFFIS', 'deprecated': False}, 'ofl-1.0': {'id': 'OFL-1.0', 'deprecated': False}, 'ofl-1.0-no-rfn': {'id': 'OFL-1.0-no-RFN', 'deprecated': False}, 'ofl-1.0-rfn': {'id': 'OFL-1.0-RFN', 'deprecated': False}, 'ofl-1.1': {'id': 'OFL-1.1', 'deprecated': False}, 'ofl-1.1-no-rfn': {'id': 'OFL-1.1-no-RFN', 'deprecated': False}, 'ofl-1.1-rfn': {'id': 'OFL-1.1-RFN', 'deprecated': False}, 'ogc-1.0': {'id': 'OGC-1.0', 'deprecated': False}, 'ogdl-taiwan-1.0': {'id': 'OGDL-Taiwan-1.0', 'deprecated': False}, 'ogl-canada-2.0': {'id': 'OGL-Canada-2.0', 'deprecated': False}, 'ogl-uk-1.0': {'id': 'OGL-UK-1.0', 'deprecated': False}, 'ogl-uk-2.0': {'id': 'OGL-UK-2.0', 'deprecated': False}, 'ogl-uk-3.0': {'id': 'OGL-UK-3.0', 'deprecated': False}, 'ogtsl': {'id': 'OGTSL', 'deprecated': False}, 'oldap-1.1': {'id': 'OLDAP-1.1', 'deprecated': False}, 'oldap-1.2': {'id': 'OLDAP-1.2', 'deprecated': False}, 'oldap-1.3': {'id': 'OLDAP-1.3', 'deprecated': False}, 'oldap-1.4': {'id': 'OLDAP-1.4', 'deprecated': False}, 'oldap-2.0': {'id': 'OLDAP-2.0', 'deprecated': False}, 'oldap-2.0.1': {'id': 'OLDAP-2.0.1', 'deprecated': False}, 'oldap-2.1': {'id': 'OLDAP-2.1', 'deprecated': False}, 'oldap-2.2': {'id': 'OLDAP-2.2', 'deprecated': False}, 'oldap-2.2.1': {'id': 'OLDAP-2.2.1', 'deprecated': False}, 'oldap-2.2.2': {'id': 'OLDAP-2.2.2', 'deprecated': False}, 'oldap-2.3': {'id': 'OLDAP-2.3', 'deprecated': False}, 'oldap-2.4': {'id': 'OLDAP-2.4', 'deprecated': False}, 'oldap-2.5': {'id': 'OLDAP-2.5', 'deprecated': False}, 'oldap-2.6': {'id': 'OLDAP-2.6', 'deprecated': False}, 'oldap-2.7': {'id': 'OLDAP-2.7', 'deprecated': False}, 'oldap-2.8': {'id': 'OLDAP-2.8', 'deprecated': False}, 'olfl-1.3': {'id': 'OLFL-1.3', 'deprecated': False}, 'oml': {'id': 'OML', 'deprecated': False}, 'openpbs-2.3': {'id': 'OpenPBS-2.3', 'deprecated': False}, 'openssl': {'id': 'OpenSSL', 'deprecated': False}, 'openssl-standalone': {'id': 'OpenSSL-standalone', 'deprecated': False}, 'openvision': {'id': 'OpenVision', 'deprecated': False}, 'opl-1.0': {'id': 'OPL-1.0', 'deprecated': False}, 'opl-uk-3.0': {'id': 'OPL-UK-3.0', 'deprecated': False}, 'opubl-1.0': {'id': 'OPUBL-1.0', 'deprecated': False}, 'oset-pl-2.1': {'id': 'OSET-PL-2.1', 'deprecated': False}, 'osl-1.0': {'id': 'OSL-1.0', 'deprecated': False}, 'osl-1.1': {'id': 'OSL-1.1', 'deprecated': False}, 'osl-2.0': {'id': 'OSL-2.0', 'deprecated': False}, 'osl-2.1': {'id': 'OSL-2.1', 'deprecated': False}, 'osl-3.0': {'id': 'OSL-3.0', 'deprecated': False}, 'padl': {'id': 'PADL', 'deprecated': False}, 'parity-6.0.0': {'id': 'Parity-6.0.0', 'deprecated': False}, 'parity-7.0.0': {'id': 'Parity-7.0.0', 'deprecated': False}, 'pddl-1.0': {'id': 'PDDL-1.0', 'deprecated': False}, 'php-3.0': {'id': 'PHP-3.0', 'deprecated': False}, 'php-3.01': {'id': 'PHP-3.01', 'deprecated': False}, 'pixar': {'id': 'Pixar', 'deprecated': False}, 'pkgconf': {'id': 'pkgconf', 'deprecated': False}, 'plexus': {'id': 'Plexus', 'deprecated': False}, 'pnmstitch': {'id': 'pnmstitch', 'deprecated': False}, 'polyform-noncommercial-1.0.0': {'id': 'PolyForm-Noncommercial-1.0.0', 'deprecated': False}, 'polyform-small-business-1.0.0': {'id': 'PolyForm-Small-Business-1.0.0', 'deprecated': False}, 'postgresql': {'id': 'PostgreSQL', 'deprecated': False}, 'ppl': {'id': 'PPL', 'deprecated': False}, 'psf-2.0': {'id': 'PSF-2.0', 'deprecated': False}, 'psfrag': {'id': 'psfrag', 'deprecated': False}, 'psutils': {'id': 'psutils', 'deprecated': False}, 'python-2.0': {'id': 'Python-2.0', 'deprecated': False}, 'python-2.0.1': {'id': 'Python-2.0.1', 'deprecated': False}, 'python-ldap': {'id': 'python-ldap', 'deprecated': False}, 'qhull': {'id': 'Qhull', 'deprecated': False}, 'qpl-1.0': {'id': 'QPL-1.0', 'deprecated': False}, 'qpl-1.0-inria-2004': {'id': 'QPL-1.0-INRIA-2004', 'deprecated': False}, 'radvd': {'id': 'radvd', 'deprecated': False}, 'rdisc': {'id': 'Rdisc', 'deprecated': False}, 'rhecos-1.1': {'id': 'RHeCos-1.1', 'deprecated': False}, 'rpl-1.1': {'id': 'RPL-1.1', 'deprecated': False}, 'rpl-1.5': {'id': 'RPL-1.5', 'deprecated': False}, 'rpsl-1.0': {'id': 'RPSL-1.0', 'deprecated': False}, 'rsa-md': {'id': 'RSA-MD', 'deprecated': False}, 'rscpl': {'id': 'RSCPL', 'deprecated': False}, 'ruby': {'id': 'Ruby', 'deprecated': False}, 'ruby-pty': {'id': 'Ruby-pty', 'deprecated': False}, 'sax-pd': {'id': 'SAX-PD', 'deprecated': False}, 'sax-pd-2.0': {'id': 'SAX-PD-2.0', 'deprecated': False}, 'saxpath': {'id': 'Saxpath', 'deprecated': False}, 'scea': {'id': 'SCEA', 'deprecated': False}, 'schemereport': {'id': 'SchemeReport', 'deprecated': False}, 'sendmail': {'id': 'Sendmail', 'deprecated': False}, 'sendmail-8.23': {'id': 'Sendmail-8.23', 'deprecated': False}, 'sgi-b-1.0': {'id': 'SGI-B-1.0', 'deprecated': False}, 'sgi-b-1.1': {'id': 'SGI-B-1.1', 'deprecated': False}, 'sgi-b-2.0': {'id': 'SGI-B-2.0', 'deprecated': False}, 'sgi-opengl': {'id': 'SGI-OpenGL', 'deprecated': False}, 'sgp4': {'id': 'SGP4', 'deprecated': False}, 'shl-0.5': {'id': 'SHL-0.5', 'deprecated': False}, 'shl-0.51': {'id': 'SHL-0.51', 'deprecated': False}, 'simpl-2.0': {'id': 'SimPL-2.0', 'deprecated': False}, 'sissl': {'id': 'SISSL', 'deprecated': False}, 'sissl-1.2': {'id': 'SISSL-1.2', 'deprecated': False}, 'sl': {'id': 'SL', 'deprecated': False}, 'sleepycat': {'id': 'Sleepycat', 'deprecated': False}, 'smlnj': {'id': 'SMLNJ', 'deprecated': False}, 'smppl': {'id': 'SMPPL', 'deprecated': False}, 'snia': {'id': 'SNIA', 'deprecated': False}, 'snprintf': {'id': 'snprintf', 'deprecated': False}, 'softsurfer': {'id': 'softSurfer', 'deprecated': False}, 'soundex': {'id': 'Soundex', 'deprecated': False}, 'spencer-86': {'id': 'Spencer-86', 'deprecated': False}, 'spencer-94': {'id': 'Spencer-94', 'deprecated': False}, 'spencer-99': {'id': 'Spencer-99', 'deprecated': False}, 'spl-1.0': {'id': 'SPL-1.0', 'deprecated': False}, 'ssh-keyscan': {'id': 'ssh-keyscan', 'deprecated': False}, 'ssh-openssh': {'id': 'SSH-OpenSSH', 'deprecated': False}, 'ssh-short': {'id': 'SSH-short', 'deprecated': False}, 'ssleay-standalone': {'id': 'SSLeay-standalone', 'deprecated': False}, 'sspl-1.0': {'id': 'SSPL-1.0', 'deprecated': False}, 'standardml-nj': {'id': 'StandardML-NJ', 'deprecated': True}, 'sugarcrm-1.1.3': {'id': 'SugarCRM-1.1.3', 'deprecated': False}, 'sun-ppp': {'id': 'Sun-PPP', 'deprecated': False}, 'sun-ppp-2000': {'id': 'Sun-PPP-2000', 'deprecated': False}, 'sunpro': {'id': 'SunPro', 'deprecated': False}, 'swl': {'id': 'SWL', 'deprecated': False}, 'swrule': {'id': 'swrule', 'deprecated': False}, 'symlinks': {'id': 'Symlinks', 'deprecated': False}, 'tapr-ohl-1.0': {'id': 'TAPR-OHL-1.0', 'deprecated': False}, 'tcl': {'id': 'TCL', 'deprecated': False}, 'tcp-wrappers': {'id': 'TCP-wrappers', 'deprecated': False}, 'termreadkey': {'id': 'TermReadKey', 'deprecated': False}, 'tgppl-1.0': {'id': 'TGPPL-1.0', 'deprecated': False}, 'threeparttable': {'id': 'threeparttable', 'deprecated': False}, 'tmate': {'id': 'TMate', 'deprecated': False}, 'torque-1.1': {'id': 'TORQUE-1.1', 'deprecated': False}, 'tosl': {'id': 'TOSL', 'deprecated': False}, 'tpdl': {'id': 'TPDL', 'deprecated': False}, 'tpl-1.0': {'id': 'TPL-1.0', 'deprecated': False}, 'ttwl': {'id': 'TTWL', 'deprecated': False}, 'ttyp0': {'id': 'TTYP0', 'deprecated': False}, 'tu-berlin-1.0': {'id': 'TU-Berlin-1.0', 'deprecated': False}, 'tu-berlin-2.0': {'id': 'TU-Berlin-2.0', 'deprecated': False}, 'ubuntu-font-1.0': {'id': 'Ubuntu-font-1.0', 'deprecated': False}, 'ucar': {'id': 'UCAR', 'deprecated': False}, 'ucl-1.0': {'id': 'UCL-1.0', 'deprecated': False}, 'ulem': {'id': 'ulem', 'deprecated': False}, 'umich-merit': {'id': 'UMich-Merit', 'deprecated': False}, 'unicode-3.0': {'id': 'Unicode-3.0', 'deprecated': False}, 'unicode-dfs-2015': {'id': 'Unicode-DFS-2015', 'deprecated': False}, 'unicode-dfs-2016': {'id': 'Unicode-DFS-2016', 'deprecated': False}, 'unicode-tou': {'id': 'Unicode-TOU', 'deprecated': False}, 'unixcrypt': {'id': 'UnixCrypt', 'deprecated': False}, 'unlicense': {'id': 'Unlicense', 'deprecated': False}, 'upl-1.0': {'id': 'UPL-1.0', 'deprecated': False}, 'urt-rle': {'id': 'URT-RLE', 'deprecated': False}, 'vim': {'id': 'Vim', 'deprecated': False}, 'vostrom': {'id': 'VOSTROM', 'deprecated': False}, 'vsl-1.0': {'id': 'VSL-1.0', 'deprecated': False}, 'w3c': {'id': 'W3C', 'deprecated': False}, 'w3c-19980720': {'id': 'W3C-19980720', 'deprecated': False}, 'w3c-20150513': {'id': 'W3C-20150513', 'deprecated': False}, 'w3m': {'id': 'w3m', 'deprecated': False}, 'watcom-1.0': {'id': 'Watcom-1.0', 'deprecated': False}, 'widget-workshop': {'id': 'Widget-Workshop', 'deprecated': False}, 'wsuipa': {'id': 'Wsuipa', 'deprecated': False}, 'wtfpl': {'id': 'WTFPL', 'deprecated': False}, 'wxwindows': {'id': 'wxWindows', 'deprecated': True}, 'x11': {'id': 'X11', 'deprecated': False}, 'x11-distribute-modifications-variant': {'id': 'X11-distribute-modifications-variant', 'deprecated': False}, 'x11-swapped': {'id': 'X11-swapped', 'deprecated': False}, 'xdebug-1.03': {'id': 'Xdebug-1.03', 'deprecated': False}, 'xerox': {'id': 'Xerox', 'deprecated': False}, 'xfig': {'id': 'Xfig', 'deprecated': False}, 'xfree86-1.1': {'id': 'XFree86-1.1', 'deprecated': False}, 'xinetd': {'id': 'xinetd', 'deprecated': False}, 'xkeyboard-config-zinoviev': {'id': 'xkeyboard-config-Zinoviev', 'deprecated': False}, 'xlock': {'id': 'xlock', 'deprecated': False}, 'xnet': {'id': 'Xnet', 'deprecated': False}, 'xpp': {'id': 'xpp', 'deprecated': False}, 'xskat': {'id': 'XSkat', 'deprecated': False}, 'xzoom': {'id': 'xzoom', 'deprecated': False}, 'ypl-1.0': {'id': 'YPL-1.0', 'deprecated': False}, 'ypl-1.1': {'id': 'YPL-1.1', 'deprecated': False}, 'zed': {'id': 'Zed', 'deprecated': False}, 'zeeff': {'id': 'Zeeff', 'deprecated': False}, 'zend-2.0': {'id': 'Zend-2.0', 'deprecated': False}, 'zimbra-1.3': {'id': 'Zimbra-1.3', 'deprecated': False}, 'zimbra-1.4': {'id': 'Zimbra-1.4', 'deprecated': False}, 'zlib': {'id': 'Zlib', 'deprecated': False}, 'zlib-acknowledgement': {'id': 'zlib-acknowledgement', 'deprecated': False}, 'zpl-1.1': {'id': 'ZPL-1.1', 'deprecated': False}, 'zpl-2.0': {'id': 'ZPL-2.0', 'deprecated': False}, 'zpl-2.1': {'id': 'ZPL-2.1', 'deprecated': False}, } EXCEPTIONS: dict[str, SPDXException] = { '389-exception': {'id': '389-exception', 'deprecated': False}, 'asterisk-exception': {'id': 'Asterisk-exception', 'deprecated': False}, 'asterisk-linking-protocols-exception': {'id': 'Asterisk-linking-protocols-exception', 'deprecated': False}, 'autoconf-exception-2.0': {'id': 'Autoconf-exception-2.0', 'deprecated': False}, 'autoconf-exception-3.0': {'id': 'Autoconf-exception-3.0', 'deprecated': False}, 'autoconf-exception-generic': {'id': 'Autoconf-exception-generic', 'deprecated': False}, 'autoconf-exception-generic-3.0': {'id': 'Autoconf-exception-generic-3.0', 'deprecated': False}, 'autoconf-exception-macro': {'id': 'Autoconf-exception-macro', 'deprecated': False}, 'bison-exception-1.24': {'id': 'Bison-exception-1.24', 'deprecated': False}, 'bison-exception-2.2': {'id': 'Bison-exception-2.2', 'deprecated': False}, 'bootloader-exception': {'id': 'Bootloader-exception', 'deprecated': False}, 'classpath-exception-2.0': {'id': 'Classpath-exception-2.0', 'deprecated': False}, 'clisp-exception-2.0': {'id': 'CLISP-exception-2.0', 'deprecated': False}, 'cryptsetup-openssl-exception': {'id': 'cryptsetup-OpenSSL-exception', 'deprecated': False}, 'digirule-foss-exception': {'id': 'DigiRule-FOSS-exception', 'deprecated': False}, 'ecos-exception-2.0': {'id': 'eCos-exception-2.0', 'deprecated': False}, 'erlang-otp-linking-exception': {'id': 'erlang-otp-linking-exception', 'deprecated': False}, 'fawkes-runtime-exception': {'id': 'Fawkes-Runtime-exception', 'deprecated': False}, 'fltk-exception': {'id': 'FLTK-exception', 'deprecated': False}, 'fmt-exception': {'id': 'fmt-exception', 'deprecated': False}, 'font-exception-2.0': {'id': 'Font-exception-2.0', 'deprecated': False}, 'freertos-exception-2.0': {'id': 'freertos-exception-2.0', 'deprecated': False}, 'gcc-exception-2.0': {'id': 'GCC-exception-2.0', 'deprecated': False}, 'gcc-exception-2.0-note': {'id': 'GCC-exception-2.0-note', 'deprecated': False}, 'gcc-exception-3.1': {'id': 'GCC-exception-3.1', 'deprecated': False}, 'gmsh-exception': {'id': 'Gmsh-exception', 'deprecated': False}, 'gnat-exception': {'id': 'GNAT-exception', 'deprecated': False}, 'gnome-examples-exception': {'id': 'GNOME-examples-exception', 'deprecated': False}, 'gnu-compiler-exception': {'id': 'GNU-compiler-exception', 'deprecated': False}, 'gnu-javamail-exception': {'id': 'gnu-javamail-exception', 'deprecated': False}, 'gpl-3.0-interface-exception': {'id': 'GPL-3.0-interface-exception', 'deprecated': False}, 'gpl-3.0-linking-exception': {'id': 'GPL-3.0-linking-exception', 'deprecated': False}, 'gpl-3.0-linking-source-exception': {'id': 'GPL-3.0-linking-source-exception', 'deprecated': False}, 'gpl-cc-1.0': {'id': 'GPL-CC-1.0', 'deprecated': False}, 'gstreamer-exception-2005': {'id': 'GStreamer-exception-2005', 'deprecated': False}, 'gstreamer-exception-2008': {'id': 'GStreamer-exception-2008', 'deprecated': False}, 'i2p-gpl-java-exception': {'id': 'i2p-gpl-java-exception', 'deprecated': False}, 'kicad-libraries-exception': {'id': 'KiCad-libraries-exception', 'deprecated': False}, 'lgpl-3.0-linking-exception': {'id': 'LGPL-3.0-linking-exception', 'deprecated': False}, 'libpri-openh323-exception': {'id': 'libpri-OpenH323-exception', 'deprecated': False}, 'libtool-exception': {'id': 'Libtool-exception', 'deprecated': False}, 'linux-syscall-note': {'id': 'Linux-syscall-note', 'deprecated': False}, 'llgpl': {'id': 'LLGPL', 'deprecated': False}, 'llvm-exception': {'id': 'LLVM-exception', 'deprecated': False}, 'lzma-exception': {'id': 'LZMA-exception', 'deprecated': False}, 'mif-exception': {'id': 'mif-exception', 'deprecated': False}, 'nokia-qt-exception-1.1': {'id': 'Nokia-Qt-exception-1.1', 'deprecated': True}, 'ocaml-lgpl-linking-exception': {'id': 'OCaml-LGPL-linking-exception', 'deprecated': False}, 'occt-exception-1.0': {'id': 'OCCT-exception-1.0', 'deprecated': False}, 'openjdk-assembly-exception-1.0': {'id': 'OpenJDK-assembly-exception-1.0', 'deprecated': False}, 'openvpn-openssl-exception': {'id': 'openvpn-openssl-exception', 'deprecated': False}, 'pcre2-exception': {'id': 'PCRE2-exception', 'deprecated': False}, 'ps-or-pdf-font-exception-20170817': {'id': 'PS-or-PDF-font-exception-20170817', 'deprecated': False}, 'qpl-1.0-inria-2004-exception': {'id': 'QPL-1.0-INRIA-2004-exception', 'deprecated': False}, 'qt-gpl-exception-1.0': {'id': 'Qt-GPL-exception-1.0', 'deprecated': False}, 'qt-lgpl-exception-1.1': {'id': 'Qt-LGPL-exception-1.1', 'deprecated': False}, 'qwt-exception-1.0': {'id': 'Qwt-exception-1.0', 'deprecated': False}, 'romic-exception': {'id': 'romic-exception', 'deprecated': False}, 'rrdtool-floss-exception-2.0': {'id': 'RRDtool-FLOSS-exception-2.0', 'deprecated': False}, 'sane-exception': {'id': 'SANE-exception', 'deprecated': False}, 'shl-2.0': {'id': 'SHL-2.0', 'deprecated': False}, 'shl-2.1': {'id': 'SHL-2.1', 'deprecated': False}, 'stunnel-exception': {'id': 'stunnel-exception', 'deprecated': False}, 'swi-exception': {'id': 'SWI-exception', 'deprecated': False}, 'swift-exception': {'id': 'Swift-exception', 'deprecated': False}, 'texinfo-exception': {'id': 'Texinfo-exception', 'deprecated': False}, 'u-boot-exception-2.0': {'id': 'u-boot-exception-2.0', 'deprecated': False}, 'ubdl-exception': {'id': 'UBDL-exception', 'deprecated': False}, 'universal-foss-exception-1.0': {'id': 'Universal-FOSS-exception-1.0', 'deprecated': False}, 'vsftpd-openssl-exception': {'id': 'vsftpd-openssl-exception', 'deprecated': False}, 'wxwindows-exception-3.1': {'id': 'WxWindows-exception-3.1', 'deprecated': False}, 'x11vnc-openssl-exception': {'id': 'x11vnc-openssl-exception', 'deprecated': False}, } poetry-core-2.1.1/src/poetry/core/_vendor/packaging/markers.py000066400000000000000000000245011475444614500244420ustar00rootroot00000000000000# This file is dual licensed under the terms of the Apache License, Version # 2.0, and the BSD License. See the LICENSE file in the root of this repository # for complete details. from __future__ import annotations import operator import os import platform import sys from typing import Any, Callable, TypedDict, cast from ._parser import MarkerAtom, MarkerList, Op, Value, Variable from ._parser import parse_marker as _parse_marker from ._tokenizer import ParserSyntaxError from .specifiers import InvalidSpecifier, Specifier from .utils import canonicalize_name __all__ = [ "InvalidMarker", "Marker", "UndefinedComparison", "UndefinedEnvironmentName", "default_environment", ] Operator = Callable[[str, str], bool] class InvalidMarker(ValueError): """ An invalid marker was found, users should refer to PEP 508. """ class UndefinedComparison(ValueError): """ An invalid operation was attempted on a value that doesn't support it. """ class UndefinedEnvironmentName(ValueError): """ A name was attempted to be used that does not exist inside of the environment. """ class Environment(TypedDict): implementation_name: str """The implementation's identifier, e.g. ``'cpython'``.""" implementation_version: str """ The implementation's version, e.g. ``'3.13.0a2'`` for CPython 3.13.0a2, or ``'7.3.13'`` for PyPy3.10 v7.3.13. """ os_name: str """ The value of :py:data:`os.name`. The name of the operating system dependent module imported, e.g. ``'posix'``. """ platform_machine: str """ Returns the machine type, e.g. ``'i386'``. An empty string if the value cannot be determined. """ platform_release: str """ The system's release, e.g. ``'2.2.0'`` or ``'NT'``. An empty string if the value cannot be determined. """ platform_system: str """ The system/OS name, e.g. ``'Linux'``, ``'Windows'`` or ``'Java'``. An empty string if the value cannot be determined. """ platform_version: str """ The system's release version, e.g. ``'#3 on degas'``. An empty string if the value cannot be determined. """ python_full_version: str """ The Python version as string ``'major.minor.patchlevel'``. Note that unlike the Python :py:data:`sys.version`, this value will always include the patchlevel (it defaults to 0). """ platform_python_implementation: str """ A string identifying the Python implementation, e.g. ``'CPython'``. """ python_version: str """The Python version as string ``'major.minor'``.""" sys_platform: str """ This string contains a platform identifier that can be used to append platform-specific components to :py:data:`sys.path`, for instance. For Unix systems, except on Linux and AIX, this is the lowercased OS name as returned by ``uname -s`` with the first part of the version as returned by ``uname -r`` appended, e.g. ``'sunos5'`` or ``'freebsd8'``, at the time when Python was built. """ def _normalize_extra_values(results: Any) -> Any: """ Normalize extra values. """ if isinstance(results[0], tuple): lhs, op, rhs = results[0] if isinstance(lhs, Variable) and lhs.value == "extra": normalized_extra = canonicalize_name(rhs.value) rhs = Value(normalized_extra) elif isinstance(rhs, Variable) and rhs.value == "extra": normalized_extra = canonicalize_name(lhs.value) lhs = Value(normalized_extra) results[0] = lhs, op, rhs return results def _format_marker( marker: list[str] | MarkerAtom | str, first: bool | None = True ) -> str: assert isinstance(marker, (list, tuple, str)) # Sometimes we have a structure like [[...]] which is a single item list # where the single item is itself it's own list. In that case we want skip # the rest of this function so that we don't get extraneous () on the # outside. if ( isinstance(marker, list) and len(marker) == 1 and isinstance(marker[0], (list, tuple)) ): return _format_marker(marker[0]) if isinstance(marker, list): inner = (_format_marker(m, first=False) for m in marker) if first: return " ".join(inner) else: return "(" + " ".join(inner) + ")" elif isinstance(marker, tuple): return " ".join([m.serialize() for m in marker]) else: return marker _operators: dict[str, Operator] = { "in": lambda lhs, rhs: lhs in rhs, "not in": lambda lhs, rhs: lhs not in rhs, "<": operator.lt, "<=": operator.le, "==": operator.eq, "!=": operator.ne, ">=": operator.ge, ">": operator.gt, } def _eval_op(lhs: str, op: Op, rhs: str) -> bool: try: spec = Specifier("".join([op.serialize(), rhs])) except InvalidSpecifier: pass else: return spec.contains(lhs, prereleases=True) oper: Operator | None = _operators.get(op.serialize()) if oper is None: raise UndefinedComparison(f"Undefined {op!r} on {lhs!r} and {rhs!r}.") return oper(lhs, rhs) def _normalize(*values: str, key: str) -> tuple[str, ...]: # PEP 685 – Comparison of extra names for optional distribution dependencies # https://peps.python.org/pep-0685/ # > When comparing extra names, tools MUST normalize the names being # > compared using the semantics outlined in PEP 503 for names if key == "extra": return tuple(canonicalize_name(v) for v in values) # other environment markers don't have such standards return values def _evaluate_markers(markers: MarkerList, environment: dict[str, str]) -> bool: groups: list[list[bool]] = [[]] for marker in markers: assert isinstance(marker, (list, tuple, str)) if isinstance(marker, list): groups[-1].append(_evaluate_markers(marker, environment)) elif isinstance(marker, tuple): lhs, op, rhs = marker if isinstance(lhs, Variable): environment_key = lhs.value lhs_value = environment[environment_key] rhs_value = rhs.value else: lhs_value = lhs.value environment_key = rhs.value rhs_value = environment[environment_key] lhs_value, rhs_value = _normalize(lhs_value, rhs_value, key=environment_key) groups[-1].append(_eval_op(lhs_value, op, rhs_value)) else: assert marker in ["and", "or"] if marker == "or": groups.append([]) return any(all(item) for item in groups) def format_full_version(info: sys._version_info) -> str: version = f"{info.major}.{info.minor}.{info.micro}" kind = info.releaselevel if kind != "final": version += kind[0] + str(info.serial) return version def default_environment() -> Environment: iver = format_full_version(sys.implementation.version) implementation_name = sys.implementation.name return { "implementation_name": implementation_name, "implementation_version": iver, "os_name": os.name, "platform_machine": platform.machine(), "platform_release": platform.release(), "platform_system": platform.system(), "platform_version": platform.version(), "python_full_version": platform.python_version(), "platform_python_implementation": platform.python_implementation(), "python_version": ".".join(platform.python_version_tuple()[:2]), "sys_platform": sys.platform, } class Marker: def __init__(self, marker: str) -> None: # Note: We create a Marker object without calling this constructor in # packaging.requirements.Requirement. If any additional logic is # added here, make sure to mirror/adapt Requirement. try: self._markers = _normalize_extra_values(_parse_marker(marker)) # The attribute `_markers` can be described in terms of a recursive type: # MarkerList = List[Union[Tuple[Node, ...], str, MarkerList]] # # For example, the following expression: # python_version > "3.6" or (python_version == "3.6" and os_name == "unix") # # is parsed into: # [ # (, ')>, ), # 'and', # [ # (, , ), # 'or', # (, , ) # ] # ] except ParserSyntaxError as e: raise InvalidMarker(str(e)) from e def __str__(self) -> str: return _format_marker(self._markers) def __repr__(self) -> str: return f"" def __hash__(self) -> int: return hash((self.__class__.__name__, str(self))) def __eq__(self, other: Any) -> bool: if not isinstance(other, Marker): return NotImplemented return str(self) == str(other) def evaluate(self, environment: dict[str, str] | None = None) -> bool: """Evaluate a marker. Return the boolean from evaluating the given marker against the environment. environment is an optional argument to override all or part of the determined environment. The environment is determined from the current Python process. """ current_environment = cast("dict[str, str]", default_environment()) current_environment["extra"] = "" if environment is not None: current_environment.update(environment) # The API used to allow setting extra to None. We need to handle this # case for backwards compatibility. if current_environment["extra"] is None: current_environment["extra"] = "" return _evaluate_markers( self._markers, _repair_python_full_version(current_environment) ) def _repair_python_full_version(env: dict[str, str]) -> dict[str, str]: """ Work around platform.python_version() returning something that is not PEP 440 compliant for non-tagged Python builds. """ if env["python_full_version"].endswith("+"): env["python_full_version"] += "local" return env poetry-core-2.1.1/src/poetry/core/_vendor/packaging/metadata.py000066400000000000000000001037121475444614500245600ustar00rootroot00000000000000from __future__ import annotations import email.feedparser import email.header import email.message import email.parser import email.policy import pathlib import sys import typing from typing import ( Any, Callable, Generic, Literal, TypedDict, cast, ) from . import licenses, requirements, specifiers, utils from . import version as version_module from .licenses import NormalizedLicenseExpression T = typing.TypeVar("T") if sys.version_info >= (3, 11): # pragma: no cover ExceptionGroup = ExceptionGroup else: # pragma: no cover class ExceptionGroup(Exception): """A minimal implementation of :external:exc:`ExceptionGroup` from Python 3.11. If :external:exc:`ExceptionGroup` is already defined by Python itself, that version is used instead. """ message: str exceptions: list[Exception] def __init__(self, message: str, exceptions: list[Exception]) -> None: self.message = message self.exceptions = exceptions def __repr__(self) -> str: return f"{self.__class__.__name__}({self.message!r}, {self.exceptions!r})" class InvalidMetadata(ValueError): """A metadata field contains invalid data.""" field: str """The name of the field that contains invalid data.""" def __init__(self, field: str, message: str) -> None: self.field = field super().__init__(message) # The RawMetadata class attempts to make as few assumptions about the underlying # serialization formats as possible. The idea is that as long as a serialization # formats offer some very basic primitives in *some* way then we can support # serializing to and from that format. class RawMetadata(TypedDict, total=False): """A dictionary of raw core metadata. Each field in core metadata maps to a key of this dictionary (when data is provided). The key is lower-case and underscores are used instead of dashes compared to the equivalent core metadata field. Any core metadata field that can be specified multiple times or can hold multiple values in a single field have a key with a plural name. See :class:`Metadata` whose attributes match the keys of this dictionary. Core metadata fields that can be specified multiple times are stored as a list or dict depending on which is appropriate for the field. Any fields which hold multiple values in a single field are stored as a list. """ # Metadata 1.0 - PEP 241 metadata_version: str name: str version: str platforms: list[str] summary: str description: str keywords: list[str] home_page: str author: str author_email: str license: str # Metadata 1.1 - PEP 314 supported_platforms: list[str] download_url: str classifiers: list[str] requires: list[str] provides: list[str] obsoletes: list[str] # Metadata 1.2 - PEP 345 maintainer: str maintainer_email: str requires_dist: list[str] provides_dist: list[str] obsoletes_dist: list[str] requires_python: str requires_external: list[str] project_urls: dict[str, str] # Metadata 2.0 # PEP 426 attempted to completely revamp the metadata format # but got stuck without ever being able to build consensus on # it and ultimately ended up withdrawn. # # However, a number of tools had started emitting METADATA with # `2.0` Metadata-Version, so for historical reasons, this version # was skipped. # Metadata 2.1 - PEP 566 description_content_type: str provides_extra: list[str] # Metadata 2.2 - PEP 643 dynamic: list[str] # Metadata 2.3 - PEP 685 # No new fields were added in PEP 685, just some edge case were # tightened up to provide better interoptability. # Metadata 2.4 - PEP 639 license_expression: str license_files: list[str] _STRING_FIELDS = { "author", "author_email", "description", "description_content_type", "download_url", "home_page", "license", "license_expression", "maintainer", "maintainer_email", "metadata_version", "name", "requires_python", "summary", "version", } _LIST_FIELDS = { "classifiers", "dynamic", "license_files", "obsoletes", "obsoletes_dist", "platforms", "provides", "provides_dist", "provides_extra", "requires", "requires_dist", "requires_external", "supported_platforms", } _DICT_FIELDS = { "project_urls", } def _parse_keywords(data: str) -> list[str]: """Split a string of comma-separated keywords into a list of keywords.""" return [k.strip() for k in data.split(",")] def _parse_project_urls(data: list[str]) -> dict[str, str]: """Parse a list of label/URL string pairings separated by a comma.""" urls = {} for pair in data: # Our logic is slightly tricky here as we want to try and do # *something* reasonable with malformed data. # # The main thing that we have to worry about, is data that does # not have a ',' at all to split the label from the Value. There # isn't a singular right answer here, and we will fail validation # later on (if the caller is validating) so it doesn't *really* # matter, but since the missing value has to be an empty str # and our return value is dict[str, str], if we let the key # be the missing value, then they'd have multiple '' values that # overwrite each other in a accumulating dict. # # The other potentional issue is that it's possible to have the # same label multiple times in the metadata, with no solid "right" # answer with what to do in that case. As such, we'll do the only # thing we can, which is treat the field as unparseable and add it # to our list of unparsed fields. parts = [p.strip() for p in pair.split(",", 1)] parts.extend([""] * (max(0, 2 - len(parts)))) # Ensure 2 items # TODO: The spec doesn't say anything about if the keys should be # considered case sensitive or not... logically they should # be case-preserving and case-insensitive, but doing that # would open up more cases where we might have duplicate # entries. label, url = parts if label in urls: # The label already exists in our set of urls, so this field # is unparseable, and we can just add the whole thing to our # unparseable data and stop processing it. raise KeyError("duplicate labels in project urls") urls[label] = url return urls def _get_payload(msg: email.message.Message, source: bytes | str) -> str: """Get the body of the message.""" # If our source is a str, then our caller has managed encodings for us, # and we don't need to deal with it. if isinstance(source, str): payload = msg.get_payload() assert isinstance(payload, str) return payload # If our source is a bytes, then we're managing the encoding and we need # to deal with it. else: bpayload = msg.get_payload(decode=True) assert isinstance(bpayload, bytes) try: return bpayload.decode("utf8", "strict") except UnicodeDecodeError as exc: raise ValueError("payload in an invalid encoding") from exc # The various parse_FORMAT functions here are intended to be as lenient as # possible in their parsing, while still returning a correctly typed # RawMetadata. # # To aid in this, we also generally want to do as little touching of the # data as possible, except where there are possibly some historic holdovers # that make valid data awkward to work with. # # While this is a lower level, intermediate format than our ``Metadata`` # class, some light touch ups can make a massive difference in usability. # Map METADATA fields to RawMetadata. _EMAIL_TO_RAW_MAPPING = { "author": "author", "author-email": "author_email", "classifier": "classifiers", "description": "description", "description-content-type": "description_content_type", "download-url": "download_url", "dynamic": "dynamic", "home-page": "home_page", "keywords": "keywords", "license": "license", "license-expression": "license_expression", "license-file": "license_files", "maintainer": "maintainer", "maintainer-email": "maintainer_email", "metadata-version": "metadata_version", "name": "name", "obsoletes": "obsoletes", "obsoletes-dist": "obsoletes_dist", "platform": "platforms", "project-url": "project_urls", "provides": "provides", "provides-dist": "provides_dist", "provides-extra": "provides_extra", "requires": "requires", "requires-dist": "requires_dist", "requires-external": "requires_external", "requires-python": "requires_python", "summary": "summary", "supported-platform": "supported_platforms", "version": "version", } _RAW_TO_EMAIL_MAPPING = {raw: email for email, raw in _EMAIL_TO_RAW_MAPPING.items()} def parse_email(data: bytes | str) -> tuple[RawMetadata, dict[str, list[str]]]: """Parse a distribution's metadata stored as email headers (e.g. from ``METADATA``). This function returns a two-item tuple of dicts. The first dict is of recognized fields from the core metadata specification. Fields that can be parsed and translated into Python's built-in types are converted appropriately. All other fields are left as-is. Fields that are allowed to appear multiple times are stored as lists. The second dict contains all other fields from the metadata. This includes any unrecognized fields. It also includes any fields which are expected to be parsed into a built-in type but were not formatted appropriately. Finally, any fields that are expected to appear only once but are repeated are included in this dict. """ raw: dict[str, str | list[str] | dict[str, str]] = {} unparsed: dict[str, list[str]] = {} if isinstance(data, str): parsed = email.parser.Parser(policy=email.policy.compat32).parsestr(data) else: parsed = email.parser.BytesParser(policy=email.policy.compat32).parsebytes(data) # We have to wrap parsed.keys() in a set, because in the case of multiple # values for a key (a list), the key will appear multiple times in the # list of keys, but we're avoiding that by using get_all(). for name in frozenset(parsed.keys()): # Header names in RFC are case insensitive, so we'll normalize to all # lower case to make comparisons easier. name = name.lower() # We use get_all() here, even for fields that aren't multiple use, # because otherwise someone could have e.g. two Name fields, and we # would just silently ignore it rather than doing something about it. headers = parsed.get_all(name) or [] # The way the email module works when parsing bytes is that it # unconditionally decodes the bytes as ascii using the surrogateescape # handler. When you pull that data back out (such as with get_all() ), # it looks to see if the str has any surrogate escapes, and if it does # it wraps it in a Header object instead of returning the string. # # As such, we'll look for those Header objects, and fix up the encoding. value = [] # Flag if we have run into any issues processing the headers, thus # signalling that the data belongs in 'unparsed'. valid_encoding = True for h in headers: # It's unclear if this can return more types than just a Header or # a str, so we'll just assert here to make sure. assert isinstance(h, (email.header.Header, str)) # If it's a header object, we need to do our little dance to get # the real data out of it. In cases where there is invalid data # we're going to end up with mojibake, but there's no obvious, good # way around that without reimplementing parts of the Header object # ourselves. # # That should be fine since, if mojibacked happens, this key is # going into the unparsed dict anyways. if isinstance(h, email.header.Header): # The Header object stores it's data as chunks, and each chunk # can be independently encoded, so we'll need to check each # of them. chunks: list[tuple[bytes, str | None]] = [] for bin, encoding in email.header.decode_header(h): try: bin.decode("utf8", "strict") except UnicodeDecodeError: # Enable mojibake. encoding = "latin1" valid_encoding = False else: encoding = "utf8" chunks.append((bin, encoding)) # Turn our chunks back into a Header object, then let that # Header object do the right thing to turn them into a # string for us. value.append(str(email.header.make_header(chunks))) # This is already a string, so just add it. else: value.append(h) # We've processed all of our values to get them into a list of str, # but we may have mojibake data, in which case this is an unparsed # field. if not valid_encoding: unparsed[name] = value continue raw_name = _EMAIL_TO_RAW_MAPPING.get(name) if raw_name is None: # This is a bit of a weird situation, we've encountered a key that # we don't know what it means, so we don't know whether it's meant # to be a list or not. # # Since we can't really tell one way or another, we'll just leave it # as a list, even though it may be a single item list, because that's # what makes the most sense for email headers. unparsed[name] = value continue # If this is one of our string fields, then we'll check to see if our # value is a list of a single item. If it is then we'll assume that # it was emitted as a single string, and unwrap the str from inside # the list. # # If it's any other kind of data, then we haven't the faintest clue # what we should parse it as, and we have to just add it to our list # of unparsed stuff. if raw_name in _STRING_FIELDS and len(value) == 1: raw[raw_name] = value[0] # If this is one of our list of string fields, then we can just assign # the value, since email *only* has strings, and our get_all() call # above ensures that this is a list. elif raw_name in _LIST_FIELDS: raw[raw_name] = value # Special Case: Keywords # The keywords field is implemented in the metadata spec as a str, # but it conceptually is a list of strings, and is serialized using # ", ".join(keywords), so we'll do some light data massaging to turn # this into what it logically is. elif raw_name == "keywords" and len(value) == 1: raw[raw_name] = _parse_keywords(value[0]) # Special Case: Project-URL # The project urls is implemented in the metadata spec as a list of # specially-formatted strings that represent a key and a value, which # is fundamentally a mapping, however the email format doesn't support # mappings in a sane way, so it was crammed into a list of strings # instead. # # We will do a little light data massaging to turn this into a map as # it logically should be. elif raw_name == "project_urls": try: raw[raw_name] = _parse_project_urls(value) except KeyError: unparsed[name] = value # Nothing that we've done has managed to parse this, so it'll just # throw it in our unparseable data and move on. else: unparsed[name] = value # We need to support getting the Description from the message payload in # addition to getting it from the the headers. This does mean, though, there # is the possibility of it being set both ways, in which case we put both # in 'unparsed' since we don't know which is right. try: payload = _get_payload(parsed, data) except ValueError: unparsed.setdefault("description", []).append( parsed.get_payload(decode=isinstance(data, bytes)) # type: ignore[call-overload] ) else: if payload: # Check to see if we've already got a description, if so then both # it, and this body move to unparseable. if "description" in raw: description_header = cast(str, raw.pop("description")) unparsed.setdefault("description", []).extend( [description_header, payload] ) elif "description" in unparsed: unparsed["description"].append(payload) else: raw["description"] = payload # We need to cast our `raw` to a metadata, because a TypedDict only support # literal key names, but we're computing our key names on purpose, but the # way this function is implemented, our `TypedDict` can only have valid key # names. return cast(RawMetadata, raw), unparsed _NOT_FOUND = object() # Keep the two values in sync. _VALID_METADATA_VERSIONS = ["1.0", "1.1", "1.2", "2.1", "2.2", "2.3", "2.4"] _MetadataVersion = Literal["1.0", "1.1", "1.2", "2.1", "2.2", "2.3", "2.4"] _REQUIRED_ATTRS = frozenset(["metadata_version", "name", "version"]) class _Validator(Generic[T]): """Validate a metadata field. All _process_*() methods correspond to a core metadata field. The method is called with the field's raw value. If the raw value is valid it is returned in its "enriched" form (e.g. ``version.Version`` for the ``Version`` field). If the raw value is invalid, :exc:`InvalidMetadata` is raised (with a cause as appropriate). """ name: str raw_name: str added: _MetadataVersion def __init__( self, *, added: _MetadataVersion = "1.0", ) -> None: self.added = added def __set_name__(self, _owner: Metadata, name: str) -> None: self.name = name self.raw_name = _RAW_TO_EMAIL_MAPPING[name] def __get__(self, instance: Metadata, _owner: type[Metadata]) -> T: # With Python 3.8, the caching can be replaced with functools.cached_property(). # No need to check the cache as attribute lookup will resolve into the # instance's __dict__ before __get__ is called. cache = instance.__dict__ value = instance._raw.get(self.name) # To make the _process_* methods easier, we'll check if the value is None # and if this field is NOT a required attribute, and if both of those # things are true, we'll skip the the converter. This will mean that the # converters never have to deal with the None union. if self.name in _REQUIRED_ATTRS or value is not None: try: converter: Callable[[Any], T] = getattr(self, f"_process_{self.name}") except AttributeError: pass else: value = converter(value) cache[self.name] = value try: del instance._raw[self.name] # type: ignore[misc] except KeyError: pass return cast(T, value) def _invalid_metadata( self, msg: str, cause: Exception | None = None ) -> InvalidMetadata: exc = InvalidMetadata( self.raw_name, msg.format_map({"field": repr(self.raw_name)}) ) exc.__cause__ = cause return exc def _process_metadata_version(self, value: str) -> _MetadataVersion: # Implicitly makes Metadata-Version required. if value not in _VALID_METADATA_VERSIONS: raise self._invalid_metadata(f"{value!r} is not a valid metadata version") return cast(_MetadataVersion, value) def _process_name(self, value: str) -> str: if not value: raise self._invalid_metadata("{field} is a required field") # Validate the name as a side-effect. try: utils.canonicalize_name(value, validate=True) except utils.InvalidName as exc: raise self._invalid_metadata( f"{value!r} is invalid for {{field}}", cause=exc ) from exc else: return value def _process_version(self, value: str) -> version_module.Version: if not value: raise self._invalid_metadata("{field} is a required field") try: return version_module.parse(value) except version_module.InvalidVersion as exc: raise self._invalid_metadata( f"{value!r} is invalid for {{field}}", cause=exc ) from exc def _process_summary(self, value: str) -> str: """Check the field contains no newlines.""" if "\n" in value: raise self._invalid_metadata("{field} must be a single line") return value def _process_description_content_type(self, value: str) -> str: content_types = {"text/plain", "text/x-rst", "text/markdown"} message = email.message.EmailMessage() message["content-type"] = value content_type, parameters = ( # Defaults to `text/plain` if parsing failed. message.get_content_type().lower(), message["content-type"].params, ) # Check if content-type is valid or defaulted to `text/plain` and thus was # not parseable. if content_type not in content_types or content_type not in value.lower(): raise self._invalid_metadata( f"{{field}} must be one of {list(content_types)}, not {value!r}" ) charset = parameters.get("charset", "UTF-8") if charset != "UTF-8": raise self._invalid_metadata( f"{{field}} can only specify the UTF-8 charset, not {list(charset)}" ) markdown_variants = {"GFM", "CommonMark"} variant = parameters.get("variant", "GFM") # Use an acceptable default. if content_type == "text/markdown" and variant not in markdown_variants: raise self._invalid_metadata( f"valid Markdown variants for {{field}} are {list(markdown_variants)}, " f"not {variant!r}", ) return value def _process_dynamic(self, value: list[str]) -> list[str]: for dynamic_field in map(str.lower, value): if dynamic_field in {"name", "version", "metadata-version"}: raise self._invalid_metadata( f"{dynamic_field!r} is not allowed as a dynamic field" ) elif dynamic_field not in _EMAIL_TO_RAW_MAPPING: raise self._invalid_metadata( f"{dynamic_field!r} is not a valid dynamic field" ) return list(map(str.lower, value)) def _process_provides_extra( self, value: list[str], ) -> list[utils.NormalizedName]: normalized_names = [] try: for name in value: normalized_names.append(utils.canonicalize_name(name, validate=True)) except utils.InvalidName as exc: raise self._invalid_metadata( f"{name!r} is invalid for {{field}}", cause=exc ) from exc else: return normalized_names def _process_requires_python(self, value: str) -> specifiers.SpecifierSet: try: return specifiers.SpecifierSet(value) except specifiers.InvalidSpecifier as exc: raise self._invalid_metadata( f"{value!r} is invalid for {{field}}", cause=exc ) from exc def _process_requires_dist( self, value: list[str], ) -> list[requirements.Requirement]: reqs = [] try: for req in value: reqs.append(requirements.Requirement(req)) except requirements.InvalidRequirement as exc: raise self._invalid_metadata( f"{req!r} is invalid for {{field}}", cause=exc ) from exc else: return reqs def _process_license_expression( self, value: str ) -> NormalizedLicenseExpression | None: try: return licenses.canonicalize_license_expression(value) except ValueError as exc: raise self._invalid_metadata( f"{value!r} is invalid for {{field}}", cause=exc ) from exc def _process_license_files(self, value: list[str]) -> list[str]: paths = [] for path in value: if ".." in path: raise self._invalid_metadata( f"{path!r} is invalid for {{field}}, " "parent directory indicators are not allowed" ) if "*" in path: raise self._invalid_metadata( f"{path!r} is invalid for {{field}}, paths must be resolved" ) if ( pathlib.PurePosixPath(path).is_absolute() or pathlib.PureWindowsPath(path).is_absolute() ): raise self._invalid_metadata( f"{path!r} is invalid for {{field}}, paths must be relative" ) if pathlib.PureWindowsPath(path).as_posix() != path: raise self._invalid_metadata( f"{path!r} is invalid for {{field}}, " "paths must use '/' delimiter" ) paths.append(path) return paths class Metadata: """Representation of distribution metadata. Compared to :class:`RawMetadata`, this class provides objects representing metadata fields instead of only using built-in types. Any invalid metadata will cause :exc:`InvalidMetadata` to be raised (with a :py:attr:`~BaseException.__cause__` attribute as appropriate). """ _raw: RawMetadata @classmethod def from_raw(cls, data: RawMetadata, *, validate: bool = True) -> Metadata: """Create an instance from :class:`RawMetadata`. If *validate* is true, all metadata will be validated. All exceptions related to validation will be gathered and raised as an :class:`ExceptionGroup`. """ ins = cls() ins._raw = data.copy() # Mutations occur due to caching enriched values. if validate: exceptions: list[Exception] = [] try: metadata_version = ins.metadata_version metadata_age = _VALID_METADATA_VERSIONS.index(metadata_version) except InvalidMetadata as metadata_version_exc: exceptions.append(metadata_version_exc) metadata_version = None # Make sure to check for the fields that are present, the required # fields (so their absence can be reported). fields_to_check = frozenset(ins._raw) | _REQUIRED_ATTRS # Remove fields that have already been checked. fields_to_check -= {"metadata_version"} for key in fields_to_check: try: if metadata_version: # Can't use getattr() as that triggers descriptor protocol which # will fail due to no value for the instance argument. try: field_metadata_version = cls.__dict__[key].added except KeyError: exc = InvalidMetadata(key, f"unrecognized field: {key!r}") exceptions.append(exc) continue field_age = _VALID_METADATA_VERSIONS.index( field_metadata_version ) if field_age > metadata_age: field = _RAW_TO_EMAIL_MAPPING[key] exc = InvalidMetadata( field, f"{field} introduced in metadata version " f"{field_metadata_version}, not {metadata_version}", ) exceptions.append(exc) continue getattr(ins, key) except InvalidMetadata as exc: exceptions.append(exc) if exceptions: raise ExceptionGroup("invalid metadata", exceptions) return ins @classmethod def from_email(cls, data: bytes | str, *, validate: bool = True) -> Metadata: """Parse metadata from email headers. If *validate* is true, the metadata will be validated. All exceptions related to validation will be gathered and raised as an :class:`ExceptionGroup`. """ raw, unparsed = parse_email(data) if validate: exceptions: list[Exception] = [] for unparsed_key in unparsed: if unparsed_key in _EMAIL_TO_RAW_MAPPING: message = f"{unparsed_key!r} has invalid data" else: message = f"unrecognized field: {unparsed_key!r}" exceptions.append(InvalidMetadata(unparsed_key, message)) if exceptions: raise ExceptionGroup("unparsed", exceptions) try: return cls.from_raw(raw, validate=validate) except ExceptionGroup as exc_group: raise ExceptionGroup( "invalid or unparsed metadata", exc_group.exceptions ) from None metadata_version: _Validator[_MetadataVersion] = _Validator() """:external:ref:`core-metadata-metadata-version` (required; validated to be a valid metadata version)""" # `name` is not normalized/typed to NormalizedName so as to provide access to # the original/raw name. name: _Validator[str] = _Validator() """:external:ref:`core-metadata-name` (required; validated using :func:`~packaging.utils.canonicalize_name` and its *validate* parameter)""" version: _Validator[version_module.Version] = _Validator() """:external:ref:`core-metadata-version` (required)""" dynamic: _Validator[list[str] | None] = _Validator( added="2.2", ) """:external:ref:`core-metadata-dynamic` (validated against core metadata field names and lowercased)""" platforms: _Validator[list[str] | None] = _Validator() """:external:ref:`core-metadata-platform`""" supported_platforms: _Validator[list[str] | None] = _Validator(added="1.1") """:external:ref:`core-metadata-supported-platform`""" summary: _Validator[str | None] = _Validator() """:external:ref:`core-metadata-summary` (validated to contain no newlines)""" description: _Validator[str | None] = _Validator() # TODO 2.1: can be in body """:external:ref:`core-metadata-description`""" description_content_type: _Validator[str | None] = _Validator(added="2.1") """:external:ref:`core-metadata-description-content-type` (validated)""" keywords: _Validator[list[str] | None] = _Validator() """:external:ref:`core-metadata-keywords`""" home_page: _Validator[str | None] = _Validator() """:external:ref:`core-metadata-home-page`""" download_url: _Validator[str | None] = _Validator(added="1.1") """:external:ref:`core-metadata-download-url`""" author: _Validator[str | None] = _Validator() """:external:ref:`core-metadata-author`""" author_email: _Validator[str | None] = _Validator() """:external:ref:`core-metadata-author-email`""" maintainer: _Validator[str | None] = _Validator(added="1.2") """:external:ref:`core-metadata-maintainer`""" maintainer_email: _Validator[str | None] = _Validator(added="1.2") """:external:ref:`core-metadata-maintainer-email`""" license: _Validator[str | None] = _Validator() """:external:ref:`core-metadata-license`""" license_expression: _Validator[NormalizedLicenseExpression | None] = _Validator( added="2.4" ) """:external:ref:`core-metadata-license-expression`""" license_files: _Validator[list[str] | None] = _Validator(added="2.4") """:external:ref:`core-metadata-license-file`""" classifiers: _Validator[list[str] | None] = _Validator(added="1.1") """:external:ref:`core-metadata-classifier`""" requires_dist: _Validator[list[requirements.Requirement] | None] = _Validator( added="1.2" ) """:external:ref:`core-metadata-requires-dist`""" requires_python: _Validator[specifiers.SpecifierSet | None] = _Validator( added="1.2" ) """:external:ref:`core-metadata-requires-python`""" # Because `Requires-External` allows for non-PEP 440 version specifiers, we # don't do any processing on the values. requires_external: _Validator[list[str] | None] = _Validator(added="1.2") """:external:ref:`core-metadata-requires-external`""" project_urls: _Validator[dict[str, str] | None] = _Validator(added="1.2") """:external:ref:`core-metadata-project-url`""" # PEP 685 lets us raise an error if an extra doesn't pass `Name` validation # regardless of metadata version. provides_extra: _Validator[list[utils.NormalizedName] | None] = _Validator( added="2.1", ) """:external:ref:`core-metadata-provides-extra`""" provides_dist: _Validator[list[str] | None] = _Validator(added="1.2") """:external:ref:`core-metadata-provides-dist`""" obsoletes_dist: _Validator[list[str] | None] = _Validator(added="1.2") """:external:ref:`core-metadata-obsoletes-dist`""" requires: _Validator[list[str] | None] = _Validator(added="1.1") """``Requires`` (deprecated)""" provides: _Validator[list[str] | None] = _Validator(added="1.1") """``Provides`` (deprecated)""" obsoletes: _Validator[list[str] | None] = _Validator(added="1.1") """``Obsoletes`` (deprecated)""" poetry-core-2.1.1/src/poetry/core/_vendor/packaging/py.typed000066400000000000000000000000001475444614500241070ustar00rootroot00000000000000poetry-core-2.1.1/src/poetry/core/_vendor/packaging/requirements.py000066400000000000000000000056031475444614500255230ustar00rootroot00000000000000# This file is dual licensed under the terms of the Apache License, Version # 2.0, and the BSD License. See the LICENSE file in the root of this repository # for complete details. from __future__ import annotations from typing import Any, Iterator from ._parser import parse_requirement as _parse_requirement from ._tokenizer import ParserSyntaxError from .markers import Marker, _normalize_extra_values from .specifiers import SpecifierSet from .utils import canonicalize_name class InvalidRequirement(ValueError): """ An invalid requirement was found, users should refer to PEP 508. """ class Requirement: """Parse a requirement. Parse a given requirement string into its parts, such as name, specifier, URL, and extras. Raises InvalidRequirement on a badly-formed requirement string. """ # TODO: Can we test whether something is contained within a requirement? # If so how do we do that? Do we need to test against the _name_ of # the thing as well as the version? What about the markers? # TODO: Can we normalize the name and extra name? def __init__(self, requirement_string: str) -> None: try: parsed = _parse_requirement(requirement_string) except ParserSyntaxError as e: raise InvalidRequirement(str(e)) from e self.name: str = parsed.name self.url: str | None = parsed.url or None self.extras: set[str] = set(parsed.extras or []) self.specifier: SpecifierSet = SpecifierSet(parsed.specifier) self.marker: Marker | None = None if parsed.marker is not None: self.marker = Marker.__new__(Marker) self.marker._markers = _normalize_extra_values(parsed.marker) def _iter_parts(self, name: str) -> Iterator[str]: yield name if self.extras: formatted_extras = ",".join(sorted(self.extras)) yield f"[{formatted_extras}]" if self.specifier: yield str(self.specifier) if self.url: yield f"@ {self.url}" if self.marker: yield " " if self.marker: yield f"; {self.marker}" def __str__(self) -> str: return "".join(self._iter_parts(self.name)) def __repr__(self) -> str: return f"" def __hash__(self) -> int: return hash( ( self.__class__.__name__, *self._iter_parts(canonicalize_name(self.name)), ) ) def __eq__(self, other: Any) -> bool: if not isinstance(other, Requirement): return NotImplemented return ( canonicalize_name(self.name) == canonicalize_name(other.name) and self.extras == other.extras and self.specifier == other.specifier and self.url == other.url and self.marker == other.marker ) poetry-core-2.1.1/src/poetry/core/_vendor/packaging/specifiers.py000066400000000000000000001162121475444614500251330ustar00rootroot00000000000000# This file is dual licensed under the terms of the Apache License, Version # 2.0, and the BSD License. See the LICENSE file in the root of this repository # for complete details. """ .. testsetup:: from packaging.specifiers import Specifier, SpecifierSet, InvalidSpecifier from packaging.version import Version """ from __future__ import annotations import abc import itertools import re from typing import Callable, Iterable, Iterator, TypeVar, Union from .utils import canonicalize_version from .version import Version UnparsedVersion = Union[Version, str] UnparsedVersionVar = TypeVar("UnparsedVersionVar", bound=UnparsedVersion) CallableOperator = Callable[[Version, str], bool] def _coerce_version(version: UnparsedVersion) -> Version: if not isinstance(version, Version): version = Version(version) return version class InvalidSpecifier(ValueError): """ Raised when attempting to create a :class:`Specifier` with a specifier string that is invalid. >>> Specifier("lolwat") Traceback (most recent call last): ... packaging.specifiers.InvalidSpecifier: Invalid specifier: 'lolwat' """ class BaseSpecifier(metaclass=abc.ABCMeta): @abc.abstractmethod def __str__(self) -> str: """ Returns the str representation of this Specifier-like object. This should be representative of the Specifier itself. """ @abc.abstractmethod def __hash__(self) -> int: """ Returns a hash value for this Specifier-like object. """ @abc.abstractmethod def __eq__(self, other: object) -> bool: """ Returns a boolean representing whether or not the two Specifier-like objects are equal. :param other: The other object to check against. """ @property @abc.abstractmethod def prereleases(self) -> bool | None: """Whether or not pre-releases as a whole are allowed. This can be set to either ``True`` or ``False`` to explicitly enable or disable prereleases or it can be set to ``None`` (the default) to use default semantics. """ @prereleases.setter def prereleases(self, value: bool) -> None: """Setter for :attr:`prereleases`. :param value: The value to set. """ @abc.abstractmethod def contains(self, item: str, prereleases: bool | None = None) -> bool: """ Determines if the given item is contained within this specifier. """ @abc.abstractmethod def filter( self, iterable: Iterable[UnparsedVersionVar], prereleases: bool | None = None ) -> Iterator[UnparsedVersionVar]: """ Takes an iterable of items and filters them so that only items which are contained within this specifier are allowed in it. """ class Specifier(BaseSpecifier): """This class abstracts handling of version specifiers. .. tip:: It is generally not required to instantiate this manually. You should instead prefer to work with :class:`SpecifierSet` instead, which can parse comma-separated version specifiers (which is what package metadata contains). """ _operator_regex_str = r""" (?P(~=|==|!=|<=|>=|<|>|===)) """ _version_regex_str = r""" (?P (?: # The identity operators allow for an escape hatch that will # do an exact string match of the version you wish to install. # This will not be parsed by PEP 440 and we cannot determine # any semantic meaning from it. This operator is discouraged # but included entirely as an escape hatch. (?<====) # Only match for the identity operator \s* [^\s;)]* # The arbitrary version can be just about anything, # we match everything except for whitespace, a # semi-colon for marker support, and a closing paren # since versions can be enclosed in them. ) | (?: # The (non)equality operators allow for wild card and local # versions to be specified so we have to define these two # operators separately to enable that. (?<===|!=) # Only match for equals and not equals \s* v? (?:[0-9]+!)? # epoch [0-9]+(?:\.[0-9]+)* # release # You cannot use a wild card and a pre-release, post-release, a dev or # local version together so group them with a | and make them optional. (?: \.\* # Wild card syntax of .* | (?: # pre release [-_\.]? (alpha|beta|preview|pre|a|b|c|rc) [-_\.]? [0-9]* )? (?: # post release (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*) )? (?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release (?:\+[a-z0-9]+(?:[-_\.][a-z0-9]+)*)? # local )? ) | (?: # The compatible operator requires at least two digits in the # release segment. (?<=~=) # Only match for the compatible operator \s* v? (?:[0-9]+!)? # epoch [0-9]+(?:\.[0-9]+)+ # release (We have a + instead of a *) (?: # pre release [-_\.]? (alpha|beta|preview|pre|a|b|c|rc) [-_\.]? [0-9]* )? (?: # post release (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*) )? (?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release ) | (?: # All other operators only allow a sub set of what the # (non)equality operators do. Specifically they do not allow # local versions to be specified nor do they allow the prefix # matching wild cards. (?=": "greater_than_equal", "<": "less_than", ">": "greater_than", "===": "arbitrary", } def __init__(self, spec: str = "", prereleases: bool | None = None) -> None: """Initialize a Specifier instance. :param spec: The string representation of a specifier which will be parsed and normalized before use. :param prereleases: This tells the specifier if it should accept prerelease versions if applicable or not. The default of ``None`` will autodetect it from the given specifiers. :raises InvalidSpecifier: If the given specifier is invalid (i.e. bad syntax). """ match = self._regex.search(spec) if not match: raise InvalidSpecifier(f"Invalid specifier: {spec!r}") self._spec: tuple[str, str] = ( match.group("operator").strip(), match.group("version").strip(), ) # Store whether or not this Specifier should accept prereleases self._prereleases = prereleases # https://github.com/python/mypy/pull/13475#pullrequestreview-1079784515 @property # type: ignore[override] def prereleases(self) -> bool: # If there is an explicit prereleases set for this, then we'll just # blindly use that. if self._prereleases is not None: return self._prereleases # Look at all of our specifiers and determine if they are inclusive # operators, and if they are if they are including an explicit # prerelease. operator, version = self._spec if operator in ["==", ">=", "<=", "~=", "===", ">", "<"]: # The == specifier can include a trailing .*, if it does we # want to remove before parsing. if operator == "==" and version.endswith(".*"): version = version[:-2] # Parse the version, and if it is a pre-release than this # specifier allows pre-releases. if Version(version).is_prerelease: return True return False @prereleases.setter def prereleases(self, value: bool) -> None: self._prereleases = value @property def operator(self) -> str: """The operator of this specifier. >>> Specifier("==1.2.3").operator '==' """ return self._spec[0] @property def version(self) -> str: """The version of this specifier. >>> Specifier("==1.2.3").version '1.2.3' """ return self._spec[1] def __repr__(self) -> str: """A representation of the Specifier that shows all internal state. >>> Specifier('>=1.0.0') =1.0.0')> >>> Specifier('>=1.0.0', prereleases=False) =1.0.0', prereleases=False)> >>> Specifier('>=1.0.0', prereleases=True) =1.0.0', prereleases=True)> """ pre = ( f", prereleases={self.prereleases!r}" if self._prereleases is not None else "" ) return f"<{self.__class__.__name__}({str(self)!r}{pre})>" def __str__(self) -> str: """A string representation of the Specifier that can be round-tripped. >>> str(Specifier('>=1.0.0')) '>=1.0.0' >>> str(Specifier('>=1.0.0', prereleases=False)) '>=1.0.0' """ return "{}{}".format(*self._spec) @property def _canonical_spec(self) -> tuple[str, str]: canonical_version = canonicalize_version( self._spec[1], strip_trailing_zero=(self._spec[0] != "~="), ) return self._spec[0], canonical_version def __hash__(self) -> int: return hash(self._canonical_spec) def __eq__(self, other: object) -> bool: """Whether or not the two Specifier-like objects are equal. :param other: The other object to check against. The value of :attr:`prereleases` is ignored. >>> Specifier("==1.2.3") == Specifier("== 1.2.3.0") True >>> (Specifier("==1.2.3", prereleases=False) == ... Specifier("==1.2.3", prereleases=True)) True >>> Specifier("==1.2.3") == "==1.2.3" True >>> Specifier("==1.2.3") == Specifier("==1.2.4") False >>> Specifier("==1.2.3") == Specifier("~=1.2.3") False """ if isinstance(other, str): try: other = self.__class__(str(other)) except InvalidSpecifier: return NotImplemented elif not isinstance(other, self.__class__): return NotImplemented return self._canonical_spec == other._canonical_spec def _get_operator(self, op: str) -> CallableOperator: operator_callable: CallableOperator = getattr( self, f"_compare_{self._operators[op]}" ) return operator_callable def _compare_compatible(self, prospective: Version, spec: str) -> bool: # Compatible releases have an equivalent combination of >= and ==. That # is that ~=2.2 is equivalent to >=2.2,==2.*. This allows us to # implement this in terms of the other specifiers instead of # implementing it ourselves. The only thing we need to do is construct # the other specifiers. # We want everything but the last item in the version, but we want to # ignore suffix segments. prefix = _version_join( list(itertools.takewhile(_is_not_suffix, _version_split(spec)))[:-1] ) # Add the prefix notation to the end of our string prefix += ".*" return self._get_operator(">=")(prospective, spec) and self._get_operator("==")( prospective, prefix ) def _compare_equal(self, prospective: Version, spec: str) -> bool: # We need special logic to handle prefix matching if spec.endswith(".*"): # In the case of prefix matching we want to ignore local segment. normalized_prospective = canonicalize_version( prospective.public, strip_trailing_zero=False ) # Get the normalized version string ignoring the trailing .* normalized_spec = canonicalize_version(spec[:-2], strip_trailing_zero=False) # Split the spec out by bangs and dots, and pretend that there is # an implicit dot in between a release segment and a pre-release segment. split_spec = _version_split(normalized_spec) # Split the prospective version out by bangs and dots, and pretend # that there is an implicit dot in between a release segment and # a pre-release segment. split_prospective = _version_split(normalized_prospective) # 0-pad the prospective version before shortening it to get the correct # shortened version. padded_prospective, _ = _pad_version(split_prospective, split_spec) # Shorten the prospective version to be the same length as the spec # so that we can determine if the specifier is a prefix of the # prospective version or not. shortened_prospective = padded_prospective[: len(split_spec)] return shortened_prospective == split_spec else: # Convert our spec string into a Version spec_version = Version(spec) # If the specifier does not have a local segment, then we want to # act as if the prospective version also does not have a local # segment. if not spec_version.local: prospective = Version(prospective.public) return prospective == spec_version def _compare_not_equal(self, prospective: Version, spec: str) -> bool: return not self._compare_equal(prospective, spec) def _compare_less_than_equal(self, prospective: Version, spec: str) -> bool: # NB: Local version identifiers are NOT permitted in the version # specifier, so local version labels can be universally removed from # the prospective version. return Version(prospective.public) <= Version(spec) def _compare_greater_than_equal(self, prospective: Version, spec: str) -> bool: # NB: Local version identifiers are NOT permitted in the version # specifier, so local version labels can be universally removed from # the prospective version. return Version(prospective.public) >= Version(spec) def _compare_less_than(self, prospective: Version, spec_str: str) -> bool: # Convert our spec to a Version instance, since we'll want to work with # it as a version. spec = Version(spec_str) # Check to see if the prospective version is less than the spec # version. If it's not we can short circuit and just return False now # instead of doing extra unneeded work. if not prospective < spec: return False # This special case is here so that, unless the specifier itself # includes is a pre-release version, that we do not accept pre-release # versions for the version mentioned in the specifier (e.g. <3.1 should # not match 3.1.dev0, but should match 3.0.dev0). if not spec.is_prerelease and prospective.is_prerelease: if Version(prospective.base_version) == Version(spec.base_version): return False # If we've gotten to here, it means that prospective version is both # less than the spec version *and* it's not a pre-release of the same # version in the spec. return True def _compare_greater_than(self, prospective: Version, spec_str: str) -> bool: # Convert our spec to a Version instance, since we'll want to work with # it as a version. spec = Version(spec_str) # Check to see if the prospective version is greater than the spec # version. If it's not we can short circuit and just return False now # instead of doing extra unneeded work. if not prospective > spec: return False # This special case is here so that, unless the specifier itself # includes is a post-release version, that we do not accept # post-release versions for the version mentioned in the specifier # (e.g. >3.1 should not match 3.0.post0, but should match 3.2.post0). if not spec.is_postrelease and prospective.is_postrelease: if Version(prospective.base_version) == Version(spec.base_version): return False # Ensure that we do not allow a local version of the version mentioned # in the specifier, which is technically greater than, to match. if prospective.local is not None: if Version(prospective.base_version) == Version(spec.base_version): return False # If we've gotten to here, it means that prospective version is both # greater than the spec version *and* it's not a pre-release of the # same version in the spec. return True def _compare_arbitrary(self, prospective: Version, spec: str) -> bool: return str(prospective).lower() == str(spec).lower() def __contains__(self, item: str | Version) -> bool: """Return whether or not the item is contained in this specifier. :param item: The item to check for. This is used for the ``in`` operator and behaves the same as :meth:`contains` with no ``prereleases`` argument passed. >>> "1.2.3" in Specifier(">=1.2.3") True >>> Version("1.2.3") in Specifier(">=1.2.3") True >>> "1.0.0" in Specifier(">=1.2.3") False >>> "1.3.0a1" in Specifier(">=1.2.3") False >>> "1.3.0a1" in Specifier(">=1.2.3", prereleases=True) True """ return self.contains(item) def contains(self, item: UnparsedVersion, prereleases: bool | None = None) -> bool: """Return whether or not the item is contained in this specifier. :param item: The item to check for, which can be a version string or a :class:`Version` instance. :param prereleases: Whether or not to match prereleases with this Specifier. If set to ``None`` (the default), it uses :attr:`prereleases` to determine whether or not prereleases are allowed. >>> Specifier(">=1.2.3").contains("1.2.3") True >>> Specifier(">=1.2.3").contains(Version("1.2.3")) True >>> Specifier(">=1.2.3").contains("1.0.0") False >>> Specifier(">=1.2.3").contains("1.3.0a1") False >>> Specifier(">=1.2.3", prereleases=True).contains("1.3.0a1") True >>> Specifier(">=1.2.3").contains("1.3.0a1", prereleases=True) True """ # Determine if prereleases are to be allowed or not. if prereleases is None: prereleases = self.prereleases # Normalize item to a Version, this allows us to have a shortcut for # "2.0" in Specifier(">=2") normalized_item = _coerce_version(item) # Determine if we should be supporting prereleases in this specifier # or not, if we do not support prereleases than we can short circuit # logic if this version is a prereleases. if normalized_item.is_prerelease and not prereleases: return False # Actually do the comparison to determine if this item is contained # within this Specifier or not. operator_callable: CallableOperator = self._get_operator(self.operator) return operator_callable(normalized_item, self.version) def filter( self, iterable: Iterable[UnparsedVersionVar], prereleases: bool | None = None ) -> Iterator[UnparsedVersionVar]: """Filter items in the given iterable, that match the specifier. :param iterable: An iterable that can contain version strings and :class:`Version` instances. The items in the iterable will be filtered according to the specifier. :param prereleases: Whether or not to allow prereleases in the returned iterator. If set to ``None`` (the default), it will be intelligently decide whether to allow prereleases or not (based on the :attr:`prereleases` attribute, and whether the only versions matching are prereleases). This method is smarter than just ``filter(Specifier().contains, [...])`` because it implements the rule from :pep:`440` that a prerelease item SHOULD be accepted if no other versions match the given specifier. >>> list(Specifier(">=1.2.3").filter(["1.2", "1.3", "1.5a1"])) ['1.3'] >>> list(Specifier(">=1.2.3").filter(["1.2", "1.2.3", "1.3", Version("1.4")])) ['1.2.3', '1.3', ] >>> list(Specifier(">=1.2.3").filter(["1.2", "1.5a1"])) ['1.5a1'] >>> list(Specifier(">=1.2.3").filter(["1.3", "1.5a1"], prereleases=True)) ['1.3', '1.5a1'] >>> list(Specifier(">=1.2.3", prereleases=True).filter(["1.3", "1.5a1"])) ['1.3', '1.5a1'] """ yielded = False found_prereleases = [] kw = {"prereleases": prereleases if prereleases is not None else True} # Attempt to iterate over all the values in the iterable and if any of # them match, yield them. for version in iterable: parsed_version = _coerce_version(version) if self.contains(parsed_version, **kw): # If our version is a prerelease, and we were not set to allow # prereleases, then we'll store it for later in case nothing # else matches this specifier. if parsed_version.is_prerelease and not ( prereleases or self.prereleases ): found_prereleases.append(version) # Either this is not a prerelease, or we should have been # accepting prereleases from the beginning. else: yielded = True yield version # Now that we've iterated over everything, determine if we've yielded # any values, and if we have not and we have any prereleases stored up # then we will go ahead and yield the prereleases. if not yielded and found_prereleases: for version in found_prereleases: yield version _prefix_regex = re.compile(r"^([0-9]+)((?:a|b|c|rc)[0-9]+)$") def _version_split(version: str) -> list[str]: """Split version into components. The split components are intended for version comparison. The logic does not attempt to retain the original version string, so joining the components back with :func:`_version_join` may not produce the original version string. """ result: list[str] = [] epoch, _, rest = version.rpartition("!") result.append(epoch or "0") for item in rest.split("."): match = _prefix_regex.search(item) if match: result.extend(match.groups()) else: result.append(item) return result def _version_join(components: list[str]) -> str: """Join split version components into a version string. This function assumes the input came from :func:`_version_split`, where the first component must be the epoch (either empty or numeric), and all other components numeric. """ epoch, *rest = components return f"{epoch}!{'.'.join(rest)}" def _is_not_suffix(segment: str) -> bool: return not any( segment.startswith(prefix) for prefix in ("dev", "a", "b", "rc", "post") ) def _pad_version(left: list[str], right: list[str]) -> tuple[list[str], list[str]]: left_split, right_split = [], [] # Get the release segment of our versions left_split.append(list(itertools.takewhile(lambda x: x.isdigit(), left))) right_split.append(list(itertools.takewhile(lambda x: x.isdigit(), right))) # Get the rest of our versions left_split.append(left[len(left_split[0]) :]) right_split.append(right[len(right_split[0]) :]) # Insert our padding left_split.insert(1, ["0"] * max(0, len(right_split[0]) - len(left_split[0]))) right_split.insert(1, ["0"] * max(0, len(left_split[0]) - len(right_split[0]))) return ( list(itertools.chain.from_iterable(left_split)), list(itertools.chain.from_iterable(right_split)), ) class SpecifierSet(BaseSpecifier): """This class abstracts handling of a set of version specifiers. It can be passed a single specifier (``>=3.0``), a comma-separated list of specifiers (``>=3.0,!=3.1``), or no specifier at all. """ def __init__( self, specifiers: str | Iterable[Specifier] = "", prereleases: bool | None = None, ) -> None: """Initialize a SpecifierSet instance. :param specifiers: The string representation of a specifier or a comma-separated list of specifiers which will be parsed and normalized before use. May also be an iterable of ``Specifier`` instances, which will be used as is. :param prereleases: This tells the SpecifierSet if it should accept prerelease versions if applicable or not. The default of ``None`` will autodetect it from the given specifiers. :raises InvalidSpecifier: If the given ``specifiers`` are not parseable than this exception will be raised. """ if isinstance(specifiers, str): # Split on `,` to break each individual specifier into its own item, and # strip each item to remove leading/trailing whitespace. split_specifiers = [s.strip() for s in specifiers.split(",") if s.strip()] # Make each individual specifier a Specifier and save in a frozen set # for later. self._specs = frozenset(map(Specifier, split_specifiers)) else: # Save the supplied specifiers in a frozen set. self._specs = frozenset(specifiers) # Store our prereleases value so we can use it later to determine if # we accept prereleases or not. self._prereleases = prereleases @property def prereleases(self) -> bool | None: # If we have been given an explicit prerelease modifier, then we'll # pass that through here. if self._prereleases is not None: return self._prereleases # If we don't have any specifiers, and we don't have a forced value, # then we'll just return None since we don't know if this should have # pre-releases or not. if not self._specs: return None # Otherwise we'll see if any of the given specifiers accept # prereleases, if any of them do we'll return True, otherwise False. return any(s.prereleases for s in self._specs) @prereleases.setter def prereleases(self, value: bool) -> None: self._prereleases = value def __repr__(self) -> str: """A representation of the specifier set that shows all internal state. Note that the ordering of the individual specifiers within the set may not match the input string. >>> SpecifierSet('>=1.0.0,!=2.0.0') =1.0.0')> >>> SpecifierSet('>=1.0.0,!=2.0.0', prereleases=False) =1.0.0', prereleases=False)> >>> SpecifierSet('>=1.0.0,!=2.0.0', prereleases=True) =1.0.0', prereleases=True)> """ pre = ( f", prereleases={self.prereleases!r}" if self._prereleases is not None else "" ) return f"" def __str__(self) -> str: """A string representation of the specifier set that can be round-tripped. Note that the ordering of the individual specifiers within the set may not match the input string. >>> str(SpecifierSet(">=1.0.0,!=1.0.1")) '!=1.0.1,>=1.0.0' >>> str(SpecifierSet(">=1.0.0,!=1.0.1", prereleases=False)) '!=1.0.1,>=1.0.0' """ return ",".join(sorted(str(s) for s in self._specs)) def __hash__(self) -> int: return hash(self._specs) def __and__(self, other: SpecifierSet | str) -> SpecifierSet: """Return a SpecifierSet which is a combination of the two sets. :param other: The other object to combine with. >>> SpecifierSet(">=1.0.0,!=1.0.1") & '<=2.0.0,!=2.0.1' =1.0.0')> >>> SpecifierSet(">=1.0.0,!=1.0.1") & SpecifierSet('<=2.0.0,!=2.0.1') =1.0.0')> """ if isinstance(other, str): other = SpecifierSet(other) elif not isinstance(other, SpecifierSet): return NotImplemented specifier = SpecifierSet() specifier._specs = frozenset(self._specs | other._specs) if self._prereleases is None and other._prereleases is not None: specifier._prereleases = other._prereleases elif self._prereleases is not None and other._prereleases is None: specifier._prereleases = self._prereleases elif self._prereleases == other._prereleases: specifier._prereleases = self._prereleases else: raise ValueError( "Cannot combine SpecifierSets with True and False prerelease " "overrides." ) return specifier def __eq__(self, other: object) -> bool: """Whether or not the two SpecifierSet-like objects are equal. :param other: The other object to check against. The value of :attr:`prereleases` is ignored. >>> SpecifierSet(">=1.0.0,!=1.0.1") == SpecifierSet(">=1.0.0,!=1.0.1") True >>> (SpecifierSet(">=1.0.0,!=1.0.1", prereleases=False) == ... SpecifierSet(">=1.0.0,!=1.0.1", prereleases=True)) True >>> SpecifierSet(">=1.0.0,!=1.0.1") == ">=1.0.0,!=1.0.1" True >>> SpecifierSet(">=1.0.0,!=1.0.1") == SpecifierSet(">=1.0.0") False >>> SpecifierSet(">=1.0.0,!=1.0.1") == SpecifierSet(">=1.0.0,!=1.0.2") False """ if isinstance(other, (str, Specifier)): other = SpecifierSet(str(other)) elif not isinstance(other, SpecifierSet): return NotImplemented return self._specs == other._specs def __len__(self) -> int: """Returns the number of specifiers in this specifier set.""" return len(self._specs) def __iter__(self) -> Iterator[Specifier]: """ Returns an iterator over all the underlying :class:`Specifier` instances in this specifier set. >>> sorted(SpecifierSet(">=1.0.0,!=1.0.1"), key=str) [, =1.0.0')>] """ return iter(self._specs) def __contains__(self, item: UnparsedVersion) -> bool: """Return whether or not the item is contained in this specifier. :param item: The item to check for. This is used for the ``in`` operator and behaves the same as :meth:`contains` with no ``prereleases`` argument passed. >>> "1.2.3" in SpecifierSet(">=1.0.0,!=1.0.1") True >>> Version("1.2.3") in SpecifierSet(">=1.0.0,!=1.0.1") True >>> "1.0.1" in SpecifierSet(">=1.0.0,!=1.0.1") False >>> "1.3.0a1" in SpecifierSet(">=1.0.0,!=1.0.1") False >>> "1.3.0a1" in SpecifierSet(">=1.0.0,!=1.0.1", prereleases=True) True """ return self.contains(item) def contains( self, item: UnparsedVersion, prereleases: bool | None = None, installed: bool | None = None, ) -> bool: """Return whether or not the item is contained in this SpecifierSet. :param item: The item to check for, which can be a version string or a :class:`Version` instance. :param prereleases: Whether or not to match prereleases with this SpecifierSet. If set to ``None`` (the default), it uses :attr:`prereleases` to determine whether or not prereleases are allowed. >>> SpecifierSet(">=1.0.0,!=1.0.1").contains("1.2.3") True >>> SpecifierSet(">=1.0.0,!=1.0.1").contains(Version("1.2.3")) True >>> SpecifierSet(">=1.0.0,!=1.0.1").contains("1.0.1") False >>> SpecifierSet(">=1.0.0,!=1.0.1").contains("1.3.0a1") False >>> SpecifierSet(">=1.0.0,!=1.0.1", prereleases=True).contains("1.3.0a1") True >>> SpecifierSet(">=1.0.0,!=1.0.1").contains("1.3.0a1", prereleases=True) True """ # Ensure that our item is a Version instance. if not isinstance(item, Version): item = Version(item) # Determine if we're forcing a prerelease or not, if we're not forcing # one for this particular filter call, then we'll use whatever the # SpecifierSet thinks for whether or not we should support prereleases. if prereleases is None: prereleases = self.prereleases # We can determine if we're going to allow pre-releases by looking to # see if any of the underlying items supports them. If none of them do # and this item is a pre-release then we do not allow it and we can # short circuit that here. # Note: This means that 1.0.dev1 would not be contained in something # like >=1.0.devabc however it would be in >=1.0.debabc,>0.0.dev0 if not prereleases and item.is_prerelease: return False if installed and item.is_prerelease: item = Version(item.base_version) # We simply dispatch to the underlying specs here to make sure that the # given version is contained within all of them. # Note: This use of all() here means that an empty set of specifiers # will always return True, this is an explicit design decision. return all(s.contains(item, prereleases=prereleases) for s in self._specs) def filter( self, iterable: Iterable[UnparsedVersionVar], prereleases: bool | None = None ) -> Iterator[UnparsedVersionVar]: """Filter items in the given iterable, that match the specifiers in this set. :param iterable: An iterable that can contain version strings and :class:`Version` instances. The items in the iterable will be filtered according to the specifier. :param prereleases: Whether or not to allow prereleases in the returned iterator. If set to ``None`` (the default), it will be intelligently decide whether to allow prereleases or not (based on the :attr:`prereleases` attribute, and whether the only versions matching are prereleases). This method is smarter than just ``filter(SpecifierSet(...).contains, [...])`` because it implements the rule from :pep:`440` that a prerelease item SHOULD be accepted if no other versions match the given specifier. >>> list(SpecifierSet(">=1.2.3").filter(["1.2", "1.3", "1.5a1"])) ['1.3'] >>> list(SpecifierSet(">=1.2.3").filter(["1.2", "1.3", Version("1.4")])) ['1.3', ] >>> list(SpecifierSet(">=1.2.3").filter(["1.2", "1.5a1"])) [] >>> list(SpecifierSet(">=1.2.3").filter(["1.3", "1.5a1"], prereleases=True)) ['1.3', '1.5a1'] >>> list(SpecifierSet(">=1.2.3", prereleases=True).filter(["1.3", "1.5a1"])) ['1.3', '1.5a1'] An "empty" SpecifierSet will filter items based on the presence of prerelease versions in the set. >>> list(SpecifierSet("").filter(["1.3", "1.5a1"])) ['1.3'] >>> list(SpecifierSet("").filter(["1.5a1"])) ['1.5a1'] >>> list(SpecifierSet("", prereleases=True).filter(["1.3", "1.5a1"])) ['1.3', '1.5a1'] >>> list(SpecifierSet("").filter(["1.3", "1.5a1"], prereleases=True)) ['1.3', '1.5a1'] """ # Determine if we're forcing a prerelease or not, if we're not forcing # one for this particular filter call, then we'll use whatever the # SpecifierSet thinks for whether or not we should support prereleases. if prereleases is None: prereleases = self.prereleases # If we have any specifiers, then we want to wrap our iterable in the # filter method for each one, this will act as a logical AND amongst # each specifier. if self._specs: for spec in self._specs: iterable = spec.filter(iterable, prereleases=bool(prereleases)) return iter(iterable) # If we do not have any specifiers, then we need to have a rough filter # which will filter out any pre-releases, unless there are no final # releases. else: filtered: list[UnparsedVersionVar] = [] found_prereleases: list[UnparsedVersionVar] = [] for item in iterable: parsed_version = _coerce_version(item) # Store any item which is a pre-release for later unless we've # already found a final version or we are accepting prereleases if parsed_version.is_prerelease and not prereleases: if not filtered: found_prereleases.append(item) else: filtered.append(item) # If we've found no items except for pre-releases, then we'll go # ahead and use the pre-releases if not filtered and found_prereleases and prereleases is None: return iter(found_prereleases) return iter(filtered) poetry-core-2.1.1/src/poetry/core/_vendor/packaging/tags.py000066400000000000000000000510261475444614500237360ustar00rootroot00000000000000# This file is dual licensed under the terms of the Apache License, Version # 2.0, and the BSD License. See the LICENSE file in the root of this repository # for complete details. from __future__ import annotations import logging import platform import re import struct import subprocess import sys import sysconfig from importlib.machinery import EXTENSION_SUFFIXES from typing import ( Iterable, Iterator, Sequence, Tuple, cast, ) from . import _manylinux, _musllinux logger = logging.getLogger(__name__) PythonVersion = Sequence[int] AppleVersion = Tuple[int, int] INTERPRETER_SHORT_NAMES: dict[str, str] = { "python": "py", # Generic. "cpython": "cp", "pypy": "pp", "ironpython": "ip", "jython": "jy", } _32_BIT_INTERPRETER = struct.calcsize("P") == 4 class Tag: """ A representation of the tag triple for a wheel. Instances are considered immutable and thus are hashable. Equality checking is also supported. """ __slots__ = ["_abi", "_hash", "_interpreter", "_platform"] def __init__(self, interpreter: str, abi: str, platform: str) -> None: self._interpreter = interpreter.lower() self._abi = abi.lower() self._platform = platform.lower() # The __hash__ of every single element in a Set[Tag] will be evaluated each time # that a set calls its `.disjoint()` method, which may be called hundreds of # times when scanning a page of links for packages with tags matching that # Set[Tag]. Pre-computing the value here produces significant speedups for # downstream consumers. self._hash = hash((self._interpreter, self._abi, self._platform)) @property def interpreter(self) -> str: return self._interpreter @property def abi(self) -> str: return self._abi @property def platform(self) -> str: return self._platform def __eq__(self, other: object) -> bool: if not isinstance(other, Tag): return NotImplemented return ( (self._hash == other._hash) # Short-circuit ASAP for perf reasons. and (self._platform == other._platform) and (self._abi == other._abi) and (self._interpreter == other._interpreter) ) def __hash__(self) -> int: return self._hash def __str__(self) -> str: return f"{self._interpreter}-{self._abi}-{self._platform}" def __repr__(self) -> str: return f"<{self} @ {id(self)}>" def parse_tag(tag: str) -> frozenset[Tag]: """ Parses the provided tag (e.g. `py3-none-any`) into a frozenset of Tag instances. Returning a set is required due to the possibility that the tag is a compressed tag set. """ tags = set() interpreters, abis, platforms = tag.split("-") for interpreter in interpreters.split("."): for abi in abis.split("."): for platform_ in platforms.split("."): tags.add(Tag(interpreter, abi, platform_)) return frozenset(tags) def _get_config_var(name: str, warn: bool = False) -> int | str | None: value: int | str | None = sysconfig.get_config_var(name) if value is None and warn: logger.debug( "Config variable '%s' is unset, Python ABI tag may be incorrect", name ) return value def _normalize_string(string: str) -> str: return string.replace(".", "_").replace("-", "_").replace(" ", "_") def _is_threaded_cpython(abis: list[str]) -> bool: """ Determine if the ABI corresponds to a threaded (`--disable-gil`) build. The threaded builds are indicated by a "t" in the abiflags. """ if len(abis) == 0: return False # expect e.g., cp313 m = re.match(r"cp\d+(.*)", abis[0]) if not m: return False abiflags = m.group(1) return "t" in abiflags def _abi3_applies(python_version: PythonVersion, threading: bool) -> bool: """ Determine if the Python version supports abi3. PEP 384 was first implemented in Python 3.2. The threaded (`--disable-gil`) builds do not support abi3. """ return len(python_version) > 1 and tuple(python_version) >= (3, 2) and not threading def _cpython_abis(py_version: PythonVersion, warn: bool = False) -> list[str]: py_version = tuple(py_version) # To allow for version comparison. abis = [] version = _version_nodot(py_version[:2]) threading = debug = pymalloc = ucs4 = "" with_debug = _get_config_var("Py_DEBUG", warn) has_refcount = hasattr(sys, "gettotalrefcount") # Windows doesn't set Py_DEBUG, so checking for support of debug-compiled # extension modules is the best option. # https://github.com/pypa/pip/issues/3383#issuecomment-173267692 has_ext = "_d.pyd" in EXTENSION_SUFFIXES if with_debug or (with_debug is None and (has_refcount or has_ext)): debug = "d" if py_version >= (3, 13) and _get_config_var("Py_GIL_DISABLED", warn): threading = "t" if py_version < (3, 8): with_pymalloc = _get_config_var("WITH_PYMALLOC", warn) if with_pymalloc or with_pymalloc is None: pymalloc = "m" if py_version < (3, 3): unicode_size = _get_config_var("Py_UNICODE_SIZE", warn) if unicode_size == 4 or ( unicode_size is None and sys.maxunicode == 0x10FFFF ): ucs4 = "u" elif debug: # Debug builds can also load "normal" extension modules. # We can also assume no UCS-4 or pymalloc requirement. abis.append(f"cp{version}{threading}") abis.insert(0, f"cp{version}{threading}{debug}{pymalloc}{ucs4}") return abis def cpython_tags( python_version: PythonVersion | None = None, abis: Iterable[str] | None = None, platforms: Iterable[str] | None = None, *, warn: bool = False, ) -> Iterator[Tag]: """ Yields the tags for a CPython interpreter. The tags consist of: - cp-- - cp-abi3- - cp-none- - cp-abi3- # Older Python versions down to 3.2. If python_version only specifies a major version then user-provided ABIs and the 'none' ABItag will be used. If 'abi3' or 'none' are specified in 'abis' then they will be yielded at their normal position and not at the beginning. """ if not python_version: python_version = sys.version_info[:2] interpreter = f"cp{_version_nodot(python_version[:2])}" if abis is None: if len(python_version) > 1: abis = _cpython_abis(python_version, warn) else: abis = [] abis = list(abis) # 'abi3' and 'none' are explicitly handled later. for explicit_abi in ("abi3", "none"): try: abis.remove(explicit_abi) except ValueError: pass platforms = list(platforms or platform_tags()) for abi in abis: for platform_ in platforms: yield Tag(interpreter, abi, platform_) threading = _is_threaded_cpython(abis) use_abi3 = _abi3_applies(python_version, threading) if use_abi3: yield from (Tag(interpreter, "abi3", platform_) for platform_ in platforms) yield from (Tag(interpreter, "none", platform_) for platform_ in platforms) if use_abi3: for minor_version in range(python_version[1] - 1, 1, -1): for platform_ in platforms: version = _version_nodot((python_version[0], minor_version)) interpreter = f"cp{version}" yield Tag(interpreter, "abi3", platform_) def _generic_abi() -> list[str]: """ Return the ABI tag based on EXT_SUFFIX. """ # The following are examples of `EXT_SUFFIX`. # We want to keep the parts which are related to the ABI and remove the # parts which are related to the platform: # - linux: '.cpython-310-x86_64-linux-gnu.so' => cp310 # - mac: '.cpython-310-darwin.so' => cp310 # - win: '.cp310-win_amd64.pyd' => cp310 # - win: '.pyd' => cp37 (uses _cpython_abis()) # - pypy: '.pypy38-pp73-x86_64-linux-gnu.so' => pypy38_pp73 # - graalpy: '.graalpy-38-native-x86_64-darwin.dylib' # => graalpy_38_native ext_suffix = _get_config_var("EXT_SUFFIX", warn=True) if not isinstance(ext_suffix, str) or ext_suffix[0] != ".": raise SystemError("invalid sysconfig.get_config_var('EXT_SUFFIX')") parts = ext_suffix.split(".") if len(parts) < 3: # CPython3.7 and earlier uses ".pyd" on Windows. return _cpython_abis(sys.version_info[:2]) soabi = parts[1] if soabi.startswith("cpython"): # non-windows abi = "cp" + soabi.split("-")[1] elif soabi.startswith("cp"): # windows abi = soabi.split("-")[0] elif soabi.startswith("pypy"): abi = "-".join(soabi.split("-")[:2]) elif soabi.startswith("graalpy"): abi = "-".join(soabi.split("-")[:3]) elif soabi: # pyston, ironpython, others? abi = soabi else: return [] return [_normalize_string(abi)] def generic_tags( interpreter: str | None = None, abis: Iterable[str] | None = None, platforms: Iterable[str] | None = None, *, warn: bool = False, ) -> Iterator[Tag]: """ Yields the tags for a generic interpreter. The tags consist of: - -- The "none" ABI will be added if it was not explicitly provided. """ if not interpreter: interp_name = interpreter_name() interp_version = interpreter_version(warn=warn) interpreter = "".join([interp_name, interp_version]) if abis is None: abis = _generic_abi() else: abis = list(abis) platforms = list(platforms or platform_tags()) if "none" not in abis: abis.append("none") for abi in abis: for platform_ in platforms: yield Tag(interpreter, abi, platform_) def _py_interpreter_range(py_version: PythonVersion) -> Iterator[str]: """ Yields Python versions in descending order. After the latest version, the major-only version will be yielded, and then all previous versions of that major version. """ if len(py_version) > 1: yield f"py{_version_nodot(py_version[:2])}" yield f"py{py_version[0]}" if len(py_version) > 1: for minor in range(py_version[1] - 1, -1, -1): yield f"py{_version_nodot((py_version[0], minor))}" def compatible_tags( python_version: PythonVersion | None = None, interpreter: str | None = None, platforms: Iterable[str] | None = None, ) -> Iterator[Tag]: """ Yields the sequence of tags that are compatible with a specific version of Python. The tags consist of: - py*-none- - -none-any # ... if `interpreter` is provided. - py*-none-any """ if not python_version: python_version = sys.version_info[:2] platforms = list(platforms or platform_tags()) for version in _py_interpreter_range(python_version): for platform_ in platforms: yield Tag(version, "none", platform_) if interpreter: yield Tag(interpreter, "none", "any") for version in _py_interpreter_range(python_version): yield Tag(version, "none", "any") def _mac_arch(arch: str, is_32bit: bool = _32_BIT_INTERPRETER) -> str: if not is_32bit: return arch if arch.startswith("ppc"): return "ppc" return "i386" def _mac_binary_formats(version: AppleVersion, cpu_arch: str) -> list[str]: formats = [cpu_arch] if cpu_arch == "x86_64": if version < (10, 4): return [] formats.extend(["intel", "fat64", "fat32"]) elif cpu_arch == "i386": if version < (10, 4): return [] formats.extend(["intel", "fat32", "fat"]) elif cpu_arch == "ppc64": # TODO: Need to care about 32-bit PPC for ppc64 through 10.2? if version > (10, 5) or version < (10, 4): return [] formats.append("fat64") elif cpu_arch == "ppc": if version > (10, 6): return [] formats.extend(["fat32", "fat"]) if cpu_arch in {"arm64", "x86_64"}: formats.append("universal2") if cpu_arch in {"x86_64", "i386", "ppc64", "ppc", "intel"}: formats.append("universal") return formats def mac_platforms( version: AppleVersion | None = None, arch: str | None = None ) -> Iterator[str]: """ Yields the platform tags for a macOS system. The `version` parameter is a two-item tuple specifying the macOS version to generate platform tags for. The `arch` parameter is the CPU architecture to generate platform tags for. Both parameters default to the appropriate value for the current system. """ version_str, _, cpu_arch = platform.mac_ver() if version is None: version = cast("AppleVersion", tuple(map(int, version_str.split(".")[:2]))) if version == (10, 16): # When built against an older macOS SDK, Python will report macOS 10.16 # instead of the real version. version_str = subprocess.run( [ sys.executable, "-sS", "-c", "import platform; print(platform.mac_ver()[0])", ], check=True, env={"SYSTEM_VERSION_COMPAT": "0"}, stdout=subprocess.PIPE, text=True, ).stdout version = cast("AppleVersion", tuple(map(int, version_str.split(".")[:2]))) else: version = version if arch is None: arch = _mac_arch(cpu_arch) else: arch = arch if (10, 0) <= version and version < (11, 0): # Prior to Mac OS 11, each yearly release of Mac OS bumped the # "minor" version number. The major version was always 10. major_version = 10 for minor_version in range(version[1], -1, -1): compat_version = major_version, minor_version binary_formats = _mac_binary_formats(compat_version, arch) for binary_format in binary_formats: yield f"macosx_{major_version}_{minor_version}_{binary_format}" if version >= (11, 0): # Starting with Mac OS 11, each yearly release bumps the major version # number. The minor versions are now the midyear updates. minor_version = 0 for major_version in range(version[0], 10, -1): compat_version = major_version, minor_version binary_formats = _mac_binary_formats(compat_version, arch) for binary_format in binary_formats: yield f"macosx_{major_version}_{minor_version}_{binary_format}" if version >= (11, 0): # Mac OS 11 on x86_64 is compatible with binaries from previous releases. # Arm64 support was introduced in 11.0, so no Arm binaries from previous # releases exist. # # However, the "universal2" binary format can have a # macOS version earlier than 11.0 when the x86_64 part of the binary supports # that version of macOS. major_version = 10 if arch == "x86_64": for minor_version in range(16, 3, -1): compat_version = major_version, minor_version binary_formats = _mac_binary_formats(compat_version, arch) for binary_format in binary_formats: yield f"macosx_{major_version}_{minor_version}_{binary_format}" else: for minor_version in range(16, 3, -1): compat_version = major_version, minor_version binary_format = "universal2" yield f"macosx_{major_version}_{minor_version}_{binary_format}" def ios_platforms( version: AppleVersion | None = None, multiarch: str | None = None ) -> Iterator[str]: """ Yields the platform tags for an iOS system. :param version: A two-item tuple specifying the iOS version to generate platform tags for. Defaults to the current iOS version. :param multiarch: The CPU architecture+ABI to generate platform tags for - (the value used by `sys.implementation._multiarch` e.g., `arm64_iphoneos` or `x84_64_iphonesimulator`). Defaults to the current multiarch value. """ if version is None: # if iOS is the current platform, ios_ver *must* be defined. However, # it won't exist for CPython versions before 3.13, which causes a mypy # error. _, release, _, _ = platform.ios_ver() # type: ignore[attr-defined, unused-ignore] version = cast("AppleVersion", tuple(map(int, release.split(".")[:2]))) if multiarch is None: multiarch = sys.implementation._multiarch multiarch = multiarch.replace("-", "_") ios_platform_template = "ios_{major}_{minor}_{multiarch}" # Consider any iOS major.minor version from the version requested, down to # 12.0. 12.0 is the first iOS version that is known to have enough features # to support CPython. Consider every possible minor release up to X.9. There # highest the minor has ever gone is 8 (14.8 and 15.8) but having some extra # candidates that won't ever match doesn't really hurt, and it saves us from # having to keep an explicit list of known iOS versions in the code. Return # the results descending order of version number. # If the requested major version is less than 12, there won't be any matches. if version[0] < 12: return # Consider the actual X.Y version that was requested. yield ios_platform_template.format( major=version[0], minor=version[1], multiarch=multiarch ) # Consider every minor version from X.0 to the minor version prior to the # version requested by the platform. for minor in range(version[1] - 1, -1, -1): yield ios_platform_template.format( major=version[0], minor=minor, multiarch=multiarch ) for major in range(version[0] - 1, 11, -1): for minor in range(9, -1, -1): yield ios_platform_template.format( major=major, minor=minor, multiarch=multiarch ) def _linux_platforms(is_32bit: bool = _32_BIT_INTERPRETER) -> Iterator[str]: linux = _normalize_string(sysconfig.get_platform()) if not linux.startswith("linux_"): # we should never be here, just yield the sysconfig one and return yield linux return if is_32bit: if linux == "linux_x86_64": linux = "linux_i686" elif linux == "linux_aarch64": linux = "linux_armv8l" _, arch = linux.split("_", 1) archs = {"armv8l": ["armv8l", "armv7l"]}.get(arch, [arch]) yield from _manylinux.platform_tags(archs) yield from _musllinux.platform_tags(archs) for arch in archs: yield f"linux_{arch}" def _generic_platforms() -> Iterator[str]: yield _normalize_string(sysconfig.get_platform()) def platform_tags() -> Iterator[str]: """ Provides the platform tags for this installation. """ if platform.system() == "Darwin": return mac_platforms() elif platform.system() == "iOS": return ios_platforms() elif platform.system() == "Linux": return _linux_platforms() else: return _generic_platforms() def interpreter_name() -> str: """ Returns the name of the running interpreter. Some implementations have a reserved, two-letter abbreviation which will be returned when appropriate. """ name = sys.implementation.name return INTERPRETER_SHORT_NAMES.get(name) or name def interpreter_version(*, warn: bool = False) -> str: """ Returns the version of the running interpreter. """ version = _get_config_var("py_version_nodot", warn=warn) if version: version = str(version) else: version = _version_nodot(sys.version_info[:2]) return version def _version_nodot(version: PythonVersion) -> str: return "".join(map(str, version)) def sys_tags(*, warn: bool = False) -> Iterator[Tag]: """ Returns the sequence of tag triples for the running interpreter. The order of the sequence corresponds to priority order for the interpreter, from most to least important. """ interp_name = interpreter_name() if interp_name == "cp": yield from cpython_tags(warn=warn) else: yield from generic_tags() if interp_name == "pp": interp = "pp3" elif interp_name == "cp": interp = "cp" + interpreter_version(warn=warn) else: interp = None yield from compatible_tags(interpreter=interp) poetry-core-2.1.1/src/poetry/core/_vendor/packaging/utils.py000066400000000000000000000116721475444614500241430ustar00rootroot00000000000000# This file is dual licensed under the terms of the Apache License, Version # 2.0, and the BSD License. See the LICENSE file in the root of this repository # for complete details. from __future__ import annotations import functools import re from typing import NewType, Tuple, Union, cast from .tags import Tag, parse_tag from .version import InvalidVersion, Version, _TrimmedRelease BuildTag = Union[Tuple[()], Tuple[int, str]] NormalizedName = NewType("NormalizedName", str) class InvalidName(ValueError): """ An invalid distribution name; users should refer to the packaging user guide. """ class InvalidWheelFilename(ValueError): """ An invalid wheel filename was found, users should refer to PEP 427. """ class InvalidSdistFilename(ValueError): """ An invalid sdist filename was found, users should refer to the packaging user guide. """ # Core metadata spec for `Name` _validate_regex = re.compile( r"^([A-Z0-9]|[A-Z0-9][A-Z0-9._-]*[A-Z0-9])$", re.IGNORECASE ) _canonicalize_regex = re.compile(r"[-_.]+") _normalized_regex = re.compile(r"^([a-z0-9]|[a-z0-9]([a-z0-9-](?!--))*[a-z0-9])$") # PEP 427: The build number must start with a digit. _build_tag_regex = re.compile(r"(\d+)(.*)") def canonicalize_name(name: str, *, validate: bool = False) -> NormalizedName: if validate and not _validate_regex.match(name): raise InvalidName(f"name is invalid: {name!r}") # This is taken from PEP 503. value = _canonicalize_regex.sub("-", name).lower() return cast(NormalizedName, value) def is_normalized_name(name: str) -> bool: return _normalized_regex.match(name) is not None @functools.singledispatch def canonicalize_version( version: Version | str, *, strip_trailing_zero: bool = True ) -> str: """ Return a canonical form of a version as a string. >>> canonicalize_version('1.0.1') '1.0.1' Per PEP 625, versions may have multiple canonical forms, differing only by trailing zeros. >>> canonicalize_version('1.0.0') '1' >>> canonicalize_version('1.0.0', strip_trailing_zero=False) '1.0.0' Invalid versions are returned unaltered. >>> canonicalize_version('foo bar baz') 'foo bar baz' """ return str(_TrimmedRelease(str(version)) if strip_trailing_zero else version) @canonicalize_version.register def _(version: str, *, strip_trailing_zero: bool = True) -> str: try: parsed = Version(version) except InvalidVersion: # Legacy versions cannot be normalized return version return canonicalize_version(parsed, strip_trailing_zero=strip_trailing_zero) def parse_wheel_filename( filename: str, ) -> tuple[NormalizedName, Version, BuildTag, frozenset[Tag]]: if not filename.endswith(".whl"): raise InvalidWheelFilename( f"Invalid wheel filename (extension must be '.whl'): {filename!r}" ) filename = filename[:-4] dashes = filename.count("-") if dashes not in (4, 5): raise InvalidWheelFilename( f"Invalid wheel filename (wrong number of parts): {filename!r}" ) parts = filename.split("-", dashes - 2) name_part = parts[0] # See PEP 427 for the rules on escaping the project name. if "__" in name_part or re.match(r"^[\w\d._]*$", name_part, re.UNICODE) is None: raise InvalidWheelFilename(f"Invalid project name: {filename!r}") name = canonicalize_name(name_part) try: version = Version(parts[1]) except InvalidVersion as e: raise InvalidWheelFilename( f"Invalid wheel filename (invalid version): {filename!r}" ) from e if dashes == 5: build_part = parts[2] build_match = _build_tag_regex.match(build_part) if build_match is None: raise InvalidWheelFilename( f"Invalid build number: {build_part} in {filename!r}" ) build = cast(BuildTag, (int(build_match.group(1)), build_match.group(2))) else: build = () tags = parse_tag(parts[-1]) return (name, version, build, tags) def parse_sdist_filename(filename: str) -> tuple[NormalizedName, Version]: if filename.endswith(".tar.gz"): file_stem = filename[: -len(".tar.gz")] elif filename.endswith(".zip"): file_stem = filename[: -len(".zip")] else: raise InvalidSdistFilename( f"Invalid sdist filename (extension must be '.tar.gz' or '.zip'):" f" {filename!r}" ) # We are requiring a PEP 440 version, which cannot contain dashes, # so we split on the last dash. name_part, sep, version_part = file_stem.rpartition("-") if not sep: raise InvalidSdistFilename(f"Invalid sdist filename: {filename!r}") name = canonicalize_name(name_part) try: version = Version(version_part) except InvalidVersion as e: raise InvalidSdistFilename( f"Invalid sdist filename (invalid version): {filename!r}" ) from e return (name, version) poetry-core-2.1.1/src/poetry/core/_vendor/packaging/version.py000066400000000000000000000404441475444614500244670ustar00rootroot00000000000000# This file is dual licensed under the terms of the Apache License, Version # 2.0, and the BSD License. See the LICENSE file in the root of this repository # for complete details. """ .. testsetup:: from packaging.version import parse, Version """ from __future__ import annotations import itertools import re from typing import Any, Callable, NamedTuple, SupportsInt, Tuple, Union from ._structures import Infinity, InfinityType, NegativeInfinity, NegativeInfinityType __all__ = ["VERSION_PATTERN", "InvalidVersion", "Version", "parse"] LocalType = Tuple[Union[int, str], ...] CmpPrePostDevType = Union[InfinityType, NegativeInfinityType, Tuple[str, int]] CmpLocalType = Union[ NegativeInfinityType, Tuple[Union[Tuple[int, str], Tuple[NegativeInfinityType, Union[int, str]]], ...], ] CmpKey = Tuple[ int, Tuple[int, ...], CmpPrePostDevType, CmpPrePostDevType, CmpPrePostDevType, CmpLocalType, ] VersionComparisonMethod = Callable[[CmpKey, CmpKey], bool] class _Version(NamedTuple): epoch: int release: tuple[int, ...] dev: tuple[str, int] | None pre: tuple[str, int] | None post: tuple[str, int] | None local: LocalType | None def parse(version: str) -> Version: """Parse the given version string. >>> parse('1.0.dev1') :param version: The version string to parse. :raises InvalidVersion: When the version string is not a valid version. """ return Version(version) class InvalidVersion(ValueError): """Raised when a version string is not a valid version. >>> Version("invalid") Traceback (most recent call last): ... packaging.version.InvalidVersion: Invalid version: 'invalid' """ class _BaseVersion: _key: tuple[Any, ...] def __hash__(self) -> int: return hash(self._key) # Please keep the duplicated `isinstance` check # in the six comparisons hereunder # unless you find a way to avoid adding overhead function calls. def __lt__(self, other: _BaseVersion) -> bool: if not isinstance(other, _BaseVersion): return NotImplemented return self._key < other._key def __le__(self, other: _BaseVersion) -> bool: if not isinstance(other, _BaseVersion): return NotImplemented return self._key <= other._key def __eq__(self, other: object) -> bool: if not isinstance(other, _BaseVersion): return NotImplemented return self._key == other._key def __ge__(self, other: _BaseVersion) -> bool: if not isinstance(other, _BaseVersion): return NotImplemented return self._key >= other._key def __gt__(self, other: _BaseVersion) -> bool: if not isinstance(other, _BaseVersion): return NotImplemented return self._key > other._key def __ne__(self, other: object) -> bool: if not isinstance(other, _BaseVersion): return NotImplemented return self._key != other._key # Deliberately not anchored to the start and end of the string, to make it # easier for 3rd party code to reuse _VERSION_PATTERN = r""" v? (?: (?:(?P[0-9]+)!)? # epoch (?P[0-9]+(?:\.[0-9]+)*) # release segment (?P
                                          # pre-release
            [-_\.]?
            (?Palpha|a|beta|b|preview|pre|c|rc)
            [-_\.]?
            (?P[0-9]+)?
        )?
        (?P                                         # post release
            (?:-(?P[0-9]+))
            |
            (?:
                [-_\.]?
                (?Ppost|rev|r)
                [-_\.]?
                (?P[0-9]+)?
            )
        )?
        (?P                                          # dev release
            [-_\.]?
            (?Pdev)
            [-_\.]?
            (?P[0-9]+)?
        )?
    )
    (?:\+(?P[a-z0-9]+(?:[-_\.][a-z0-9]+)*))?       # local version
"""

VERSION_PATTERN = _VERSION_PATTERN
"""
A string containing the regular expression used to match a valid version.

The pattern is not anchored at either end, and is intended for embedding in larger
expressions (for example, matching a version number as part of a file name). The
regular expression should be compiled with the ``re.VERBOSE`` and ``re.IGNORECASE``
flags set.

:meta hide-value:
"""


class Version(_BaseVersion):
    """This class abstracts handling of a project's versions.

    A :class:`Version` instance is comparison aware and can be compared and
    sorted using the standard Python interfaces.

    >>> v1 = Version("1.0a5")
    >>> v2 = Version("1.0")
    >>> v1
    
    >>> v2
    
    >>> v1 < v2
    True
    >>> v1 == v2
    False
    >>> v1 > v2
    False
    >>> v1 >= v2
    False
    >>> v1 <= v2
    True
    """

    _regex = re.compile(r"^\s*" + VERSION_PATTERN + r"\s*$", re.VERBOSE | re.IGNORECASE)
    _key: CmpKey

    def __init__(self, version: str) -> None:
        """Initialize a Version object.

        :param version:
            The string representation of a version which will be parsed and normalized
            before use.
        :raises InvalidVersion:
            If the ``version`` does not conform to PEP 440 in any way then this
            exception will be raised.
        """

        # Validate the version and parse it into pieces
        match = self._regex.search(version)
        if not match:
            raise InvalidVersion(f"Invalid version: {version!r}")

        # Store the parsed out pieces of the version
        self._version = _Version(
            epoch=int(match.group("epoch")) if match.group("epoch") else 0,
            release=tuple(int(i) for i in match.group("release").split(".")),
            pre=_parse_letter_version(match.group("pre_l"), match.group("pre_n")),
            post=_parse_letter_version(
                match.group("post_l"), match.group("post_n1") or match.group("post_n2")
            ),
            dev=_parse_letter_version(match.group("dev_l"), match.group("dev_n")),
            local=_parse_local_version(match.group("local")),
        )

        # Generate a key which will be used for sorting
        self._key = _cmpkey(
            self._version.epoch,
            self._version.release,
            self._version.pre,
            self._version.post,
            self._version.dev,
            self._version.local,
        )

    def __repr__(self) -> str:
        """A representation of the Version that shows all internal state.

        >>> Version('1.0.0')
        
        """
        return f""

    def __str__(self) -> str:
        """A string representation of the version that can be round-tripped.

        >>> str(Version("1.0a5"))
        '1.0a5'
        """
        parts = []

        # Epoch
        if self.epoch != 0:
            parts.append(f"{self.epoch}!")

        # Release segment
        parts.append(".".join(str(x) for x in self.release))

        # Pre-release
        if self.pre is not None:
            parts.append("".join(str(x) for x in self.pre))

        # Post-release
        if self.post is not None:
            parts.append(f".post{self.post}")

        # Development release
        if self.dev is not None:
            parts.append(f".dev{self.dev}")

        # Local version segment
        if self.local is not None:
            parts.append(f"+{self.local}")

        return "".join(parts)

    @property
    def epoch(self) -> int:
        """The epoch of the version.

        >>> Version("2.0.0").epoch
        0
        >>> Version("1!2.0.0").epoch
        1
        """
        return self._version.epoch

    @property
    def release(self) -> tuple[int, ...]:
        """The components of the "release" segment of the version.

        >>> Version("1.2.3").release
        (1, 2, 3)
        >>> Version("2.0.0").release
        (2, 0, 0)
        >>> Version("1!2.0.0.post0").release
        (2, 0, 0)

        Includes trailing zeroes but not the epoch or any pre-release / development /
        post-release suffixes.
        """
        return self._version.release

    @property
    def pre(self) -> tuple[str, int] | None:
        """The pre-release segment of the version.

        >>> print(Version("1.2.3").pre)
        None
        >>> Version("1.2.3a1").pre
        ('a', 1)
        >>> Version("1.2.3b1").pre
        ('b', 1)
        >>> Version("1.2.3rc1").pre
        ('rc', 1)
        """
        return self._version.pre

    @property
    def post(self) -> int | None:
        """The post-release number of the version.

        >>> print(Version("1.2.3").post)
        None
        >>> Version("1.2.3.post1").post
        1
        """
        return self._version.post[1] if self._version.post else None

    @property
    def dev(self) -> int | None:
        """The development number of the version.

        >>> print(Version("1.2.3").dev)
        None
        >>> Version("1.2.3.dev1").dev
        1
        """
        return self._version.dev[1] if self._version.dev else None

    @property
    def local(self) -> str | None:
        """The local version segment of the version.

        >>> print(Version("1.2.3").local)
        None
        >>> Version("1.2.3+abc").local
        'abc'
        """
        if self._version.local:
            return ".".join(str(x) for x in self._version.local)
        else:
            return None

    @property
    def public(self) -> str:
        """The public portion of the version.

        >>> Version("1.2.3").public
        '1.2.3'
        >>> Version("1.2.3+abc").public
        '1.2.3'
        >>> Version("1!1.2.3dev1+abc").public
        '1!1.2.3.dev1'
        """
        return str(self).split("+", 1)[0]

    @property
    def base_version(self) -> str:
        """The "base version" of the version.

        >>> Version("1.2.3").base_version
        '1.2.3'
        >>> Version("1.2.3+abc").base_version
        '1.2.3'
        >>> Version("1!1.2.3dev1+abc").base_version
        '1!1.2.3'

        The "base version" is the public version of the project without any pre or post
        release markers.
        """
        parts = []

        # Epoch
        if self.epoch != 0:
            parts.append(f"{self.epoch}!")

        # Release segment
        parts.append(".".join(str(x) for x in self.release))

        return "".join(parts)

    @property
    def is_prerelease(self) -> bool:
        """Whether this version is a pre-release.

        >>> Version("1.2.3").is_prerelease
        False
        >>> Version("1.2.3a1").is_prerelease
        True
        >>> Version("1.2.3b1").is_prerelease
        True
        >>> Version("1.2.3rc1").is_prerelease
        True
        >>> Version("1.2.3dev1").is_prerelease
        True
        """
        return self.dev is not None or self.pre is not None

    @property
    def is_postrelease(self) -> bool:
        """Whether this version is a post-release.

        >>> Version("1.2.3").is_postrelease
        False
        >>> Version("1.2.3.post1").is_postrelease
        True
        """
        return self.post is not None

    @property
    def is_devrelease(self) -> bool:
        """Whether this version is a development release.

        >>> Version("1.2.3").is_devrelease
        False
        >>> Version("1.2.3.dev1").is_devrelease
        True
        """
        return self.dev is not None

    @property
    def major(self) -> int:
        """The first item of :attr:`release` or ``0`` if unavailable.

        >>> Version("1.2.3").major
        1
        """
        return self.release[0] if len(self.release) >= 1 else 0

    @property
    def minor(self) -> int:
        """The second item of :attr:`release` or ``0`` if unavailable.

        >>> Version("1.2.3").minor
        2
        >>> Version("1").minor
        0
        """
        return self.release[1] if len(self.release) >= 2 else 0

    @property
    def micro(self) -> int:
        """The third item of :attr:`release` or ``0`` if unavailable.

        >>> Version("1.2.3").micro
        3
        >>> Version("1").micro
        0
        """
        return self.release[2] if len(self.release) >= 3 else 0


class _TrimmedRelease(Version):
    @property
    def release(self) -> tuple[int, ...]:
        """
        Release segment without any trailing zeros.

        >>> _TrimmedRelease('1.0.0').release
        (1,)
        >>> _TrimmedRelease('0.0').release
        (0,)
        """
        rel = super().release
        nonzeros = (index for index, val in enumerate(rel) if val)
        last_nonzero = max(nonzeros, default=0)
        return rel[: last_nonzero + 1]


def _parse_letter_version(
    letter: str | None, number: str | bytes | SupportsInt | None
) -> tuple[str, int] | None:
    if letter:
        # We consider there to be an implicit 0 in a pre-release if there is
        # not a numeral associated with it.
        if number is None:
            number = 0

        # We normalize any letters to their lower case form
        letter = letter.lower()

        # We consider some words to be alternate spellings of other words and
        # in those cases we want to normalize the spellings to our preferred
        # spelling.
        if letter == "alpha":
            letter = "a"
        elif letter == "beta":
            letter = "b"
        elif letter in ["c", "pre", "preview"]:
            letter = "rc"
        elif letter in ["rev", "r"]:
            letter = "post"

        return letter, int(number)

    assert not letter
    if number:
        # We assume if we are given a number, but we are not given a letter
        # then this is using the implicit post release syntax (e.g. 1.0-1)
        letter = "post"

        return letter, int(number)

    return None


_local_version_separators = re.compile(r"[\._-]")


def _parse_local_version(local: str | None) -> LocalType | None:
    """
    Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve").
    """
    if local is not None:
        return tuple(
            part.lower() if not part.isdigit() else int(part)
            for part in _local_version_separators.split(local)
        )
    return None


def _cmpkey(
    epoch: int,
    release: tuple[int, ...],
    pre: tuple[str, int] | None,
    post: tuple[str, int] | None,
    dev: tuple[str, int] | None,
    local: LocalType | None,
) -> CmpKey:
    # When we compare a release version, we want to compare it with all of the
    # trailing zeros removed. So we'll use a reverse the list, drop all the now
    # leading zeros until we come to something non zero, then take the rest
    # re-reverse it back into the correct order and make it a tuple and use
    # that for our sorting key.
    _release = tuple(
        reversed(list(itertools.dropwhile(lambda x: x == 0, reversed(release))))
    )

    # We need to "trick" the sorting algorithm to put 1.0.dev0 before 1.0a0.
    # We'll do this by abusing the pre segment, but we _only_ want to do this
    # if there is not a pre or a post segment. If we have one of those then
    # the normal sorting rules will handle this case correctly.
    if pre is None and post is None and dev is not None:
        _pre: CmpPrePostDevType = NegativeInfinity
    # Versions without a pre-release (except as noted above) should sort after
    # those with one.
    elif pre is None:
        _pre = Infinity
    else:
        _pre = pre

    # Versions without a post segment should sort before those with one.
    if post is None:
        _post: CmpPrePostDevType = NegativeInfinity

    else:
        _post = post

    # Versions without a development segment should sort after those with one.
    if dev is None:
        _dev: CmpPrePostDevType = Infinity

    else:
        _dev = dev

    if local is None:
        # Versions without a local segment should sort before those with one.
        _local: CmpLocalType = NegativeInfinity
    else:
        # Versions with a local segment need that segment parsed to implement
        # the sorting rules in PEP440.
        # - Alpha numeric segments sort before numeric segments
        # - Alpha numeric segments sort lexicographically
        # - Numeric segments sort numerically
        # - Shorter versions sort before longer versions when the prefixes
        #   match exactly
        _local = tuple(
            (i, "") if isinstance(i, int) else (NegativeInfinity, i) for i in local
        )

    return epoch, _release, _pre, _post, _dev, _local
poetry-core-2.1.1/src/poetry/core/_vendor/tomli/000077500000000000000000000000001475444614500216225ustar00rootroot00000000000000poetry-core-2.1.1/src/poetry/core/_vendor/tomli/LICENSE000066400000000000000000000020601475444614500226250ustar00rootroot00000000000000MIT License

Copyright (c) 2021 Taneli Hukkinen

Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:

The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.

THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
poetry-core-2.1.1/src/poetry/core/_vendor/tomli/__init__.py000066400000000000000000000004721475444614500237360ustar00rootroot00000000000000# SPDX-License-Identifier: MIT
# SPDX-FileCopyrightText: 2021 Taneli Hukkinen
# Licensed to PSF under a Contributor Agreement.

__all__ = ("loads", "load", "TOMLDecodeError")
__version__ = "2.2.1"  # DO NOT EDIT THIS LINE MANUALLY. LET bump2version UTILITY DO IT

from ._parser import TOMLDecodeError, load, loads
poetry-core-2.1.1/src/poetry/core/_vendor/tomli/_parser.py000066400000000000000000000617671475444614500236500ustar00rootroot00000000000000# SPDX-License-Identifier: MIT
# SPDX-FileCopyrightText: 2021 Taneli Hukkinen
# Licensed to PSF under a Contributor Agreement.

from __future__ import annotations

from collections.abc import Iterable
import string
import sys
from types import MappingProxyType
from typing import IO, Any, Final, NamedTuple
import warnings

from ._re import (
    RE_DATETIME,
    RE_LOCALTIME,
    RE_NUMBER,
    match_to_datetime,
    match_to_localtime,
    match_to_number,
)
from ._types import Key, ParseFloat, Pos

# Inline tables/arrays are implemented using recursion. Pathologically
# nested documents cause pure Python to raise RecursionError (which is OK),
# but mypyc binary wheels will crash unrecoverably (not OK). According to
# mypyc docs this will be fixed in the future:
# https://mypyc.readthedocs.io/en/latest/differences_from_python.html#stack-overflows
# Before mypyc's fix is in, recursion needs to be limited by this library.
# Choosing `sys.getrecursionlimit()` as maximum inline table/array nesting
# level, as it allows more nesting than pure Python, but still seems a far
# lower number than where mypyc binaries crash.
MAX_INLINE_NESTING: Final = sys.getrecursionlimit()

ASCII_CTRL: Final = frozenset(chr(i) for i in range(32)) | frozenset(chr(127))

# Neither of these sets include quotation mark or backslash. They are
# currently handled as separate cases in the parser functions.
ILLEGAL_BASIC_STR_CHARS: Final = ASCII_CTRL - frozenset("\t")
ILLEGAL_MULTILINE_BASIC_STR_CHARS: Final = ASCII_CTRL - frozenset("\t\n")

ILLEGAL_LITERAL_STR_CHARS: Final = ILLEGAL_BASIC_STR_CHARS
ILLEGAL_MULTILINE_LITERAL_STR_CHARS: Final = ILLEGAL_MULTILINE_BASIC_STR_CHARS

ILLEGAL_COMMENT_CHARS: Final = ILLEGAL_BASIC_STR_CHARS

TOML_WS: Final = frozenset(" \t")
TOML_WS_AND_NEWLINE: Final = TOML_WS | frozenset("\n")
BARE_KEY_CHARS: Final = frozenset(string.ascii_letters + string.digits + "-_")
KEY_INITIAL_CHARS: Final = BARE_KEY_CHARS | frozenset("\"'")
HEXDIGIT_CHARS: Final = frozenset(string.hexdigits)

BASIC_STR_ESCAPE_REPLACEMENTS: Final = MappingProxyType(
    {
        "\\b": "\u0008",  # backspace
        "\\t": "\u0009",  # tab
        "\\n": "\u000A",  # linefeed
        "\\f": "\u000C",  # form feed
        "\\r": "\u000D",  # carriage return
        '\\"': "\u0022",  # quote
        "\\\\": "\u005C",  # backslash
    }
)


class DEPRECATED_DEFAULT:
    """Sentinel to be used as default arg during deprecation
    period of TOMLDecodeError's free-form arguments."""


class TOMLDecodeError(ValueError):
    """An error raised if a document is not valid TOML.

    Adds the following attributes to ValueError:
    msg: The unformatted error message
    doc: The TOML document being parsed
    pos: The index of doc where parsing failed
    lineno: The line corresponding to pos
    colno: The column corresponding to pos
    """

    def __init__(
        self,
        msg: str | type[DEPRECATED_DEFAULT] = DEPRECATED_DEFAULT,
        doc: str | type[DEPRECATED_DEFAULT] = DEPRECATED_DEFAULT,
        pos: Pos | type[DEPRECATED_DEFAULT] = DEPRECATED_DEFAULT,
        *args: Any,
    ):
        if (
            args
            or not isinstance(msg, str)
            or not isinstance(doc, str)
            or not isinstance(pos, int)
        ):
            warnings.warn(
                "Free-form arguments for TOMLDecodeError are deprecated. "
                "Please set 'msg' (str), 'doc' (str) and 'pos' (int) arguments only.",
                DeprecationWarning,
                stacklevel=2,
            )
            if pos is not DEPRECATED_DEFAULT:
                args = pos, *args
            if doc is not DEPRECATED_DEFAULT:
                args = doc, *args
            if msg is not DEPRECATED_DEFAULT:
                args = msg, *args
            ValueError.__init__(self, *args)
            return

        lineno = doc.count("\n", 0, pos) + 1
        if lineno == 1:
            colno = pos + 1
        else:
            colno = pos - doc.rindex("\n", 0, pos)

        if pos >= len(doc):
            coord_repr = "end of document"
        else:
            coord_repr = f"line {lineno}, column {colno}"
        errmsg = f"{msg} (at {coord_repr})"
        ValueError.__init__(self, errmsg)

        self.msg = msg
        self.doc = doc
        self.pos = pos
        self.lineno = lineno
        self.colno = colno


def load(__fp: IO[bytes], *, parse_float: ParseFloat = float) -> dict[str, Any]:
    """Parse TOML from a binary file object."""
    b = __fp.read()
    try:
        s = b.decode()
    except AttributeError:
        raise TypeError(
            "File must be opened in binary mode, e.g. use `open('foo.toml', 'rb')`"
        ) from None
    return loads(s, parse_float=parse_float)


def loads(__s: str, *, parse_float: ParseFloat = float) -> dict[str, Any]:  # noqa: C901
    """Parse TOML from a string."""

    # The spec allows converting "\r\n" to "\n", even in string
    # literals. Let's do so to simplify parsing.
    try:
        src = __s.replace("\r\n", "\n")
    except (AttributeError, TypeError):
        raise TypeError(
            f"Expected str object, not '{type(__s).__qualname__}'"
        ) from None
    pos = 0
    out = Output(NestedDict(), Flags())
    header: Key = ()
    parse_float = make_safe_parse_float(parse_float)

    # Parse one statement at a time
    # (typically means one line in TOML source)
    while True:
        # 1. Skip line leading whitespace
        pos = skip_chars(src, pos, TOML_WS)

        # 2. Parse rules. Expect one of the following:
        #    - end of file
        #    - end of line
        #    - comment
        #    - key/value pair
        #    - append dict to list (and move to its namespace)
        #    - create dict (and move to its namespace)
        # Skip trailing whitespace when applicable.
        try:
            char = src[pos]
        except IndexError:
            break
        if char == "\n":
            pos += 1
            continue
        if char in KEY_INITIAL_CHARS:
            pos = key_value_rule(src, pos, out, header, parse_float)
            pos = skip_chars(src, pos, TOML_WS)
        elif char == "[":
            try:
                second_char: str | None = src[pos + 1]
            except IndexError:
                second_char = None
            out.flags.finalize_pending()
            if second_char == "[":
                pos, header = create_list_rule(src, pos, out)
            else:
                pos, header = create_dict_rule(src, pos, out)
            pos = skip_chars(src, pos, TOML_WS)
        elif char != "#":
            raise TOMLDecodeError("Invalid statement", src, pos)

        # 3. Skip comment
        pos = skip_comment(src, pos)

        # 4. Expect end of line or end of file
        try:
            char = src[pos]
        except IndexError:
            break
        if char != "\n":
            raise TOMLDecodeError(
                "Expected newline or end of document after a statement", src, pos
            )
        pos += 1

    return out.data.dict


class Flags:
    """Flags that map to parsed keys/namespaces."""

    # Marks an immutable namespace (inline array or inline table).
    FROZEN: Final = 0
    # Marks a nest that has been explicitly created and can no longer
    # be opened using the "[table]" syntax.
    EXPLICIT_NEST: Final = 1

    def __init__(self) -> None:
        self._flags: dict[str, dict] = {}
        self._pending_flags: set[tuple[Key, int]] = set()

    def add_pending(self, key: Key, flag: int) -> None:
        self._pending_flags.add((key, flag))

    def finalize_pending(self) -> None:
        for key, flag in self._pending_flags:
            self.set(key, flag, recursive=False)
        self._pending_flags.clear()

    def unset_all(self, key: Key) -> None:
        cont = self._flags
        for k in key[:-1]:
            if k not in cont:
                return
            cont = cont[k]["nested"]
        cont.pop(key[-1], None)

    def set(self, key: Key, flag: int, *, recursive: bool) -> None:  # noqa: A003
        cont = self._flags
        key_parent, key_stem = key[:-1], key[-1]
        for k in key_parent:
            if k not in cont:
                cont[k] = {"flags": set(), "recursive_flags": set(), "nested": {}}
            cont = cont[k]["nested"]
        if key_stem not in cont:
            cont[key_stem] = {"flags": set(), "recursive_flags": set(), "nested": {}}
        cont[key_stem]["recursive_flags" if recursive else "flags"].add(flag)

    def is_(self, key: Key, flag: int) -> bool:
        if not key:
            return False  # document root has no flags
        cont = self._flags
        for k in key[:-1]:
            if k not in cont:
                return False
            inner_cont = cont[k]
            if flag in inner_cont["recursive_flags"]:
                return True
            cont = inner_cont["nested"]
        key_stem = key[-1]
        if key_stem in cont:
            inner_cont = cont[key_stem]
            return flag in inner_cont["flags"] or flag in inner_cont["recursive_flags"]
        return False


class NestedDict:
    def __init__(self) -> None:
        # The parsed content of the TOML document
        self.dict: dict[str, Any] = {}

    def get_or_create_nest(
        self,
        key: Key,
        *,
        access_lists: bool = True,
    ) -> dict:
        cont: Any = self.dict
        for k in key:
            if k not in cont:
                cont[k] = {}
            cont = cont[k]
            if access_lists and isinstance(cont, list):
                cont = cont[-1]
            if not isinstance(cont, dict):
                raise KeyError("There is no nest behind this key")
        return cont

    def append_nest_to_list(self, key: Key) -> None:
        cont = self.get_or_create_nest(key[:-1])
        last_key = key[-1]
        if last_key in cont:
            list_ = cont[last_key]
            if not isinstance(list_, list):
                raise KeyError("An object other than list found behind this key")
            list_.append({})
        else:
            cont[last_key] = [{}]


class Output(NamedTuple):
    data: NestedDict
    flags: Flags


def skip_chars(src: str, pos: Pos, chars: Iterable[str]) -> Pos:
    try:
        while src[pos] in chars:
            pos += 1
    except IndexError:
        pass
    return pos


def skip_until(
    src: str,
    pos: Pos,
    expect: str,
    *,
    error_on: frozenset[str],
    error_on_eof: bool,
) -> Pos:
    try:
        new_pos = src.index(expect, pos)
    except ValueError:
        new_pos = len(src)
        if error_on_eof:
            raise TOMLDecodeError(f"Expected {expect!r}", src, new_pos) from None

    if not error_on.isdisjoint(src[pos:new_pos]):
        while src[pos] not in error_on:
            pos += 1
        raise TOMLDecodeError(f"Found invalid character {src[pos]!r}", src, pos)
    return new_pos


def skip_comment(src: str, pos: Pos) -> Pos:
    try:
        char: str | None = src[pos]
    except IndexError:
        char = None
    if char == "#":
        return skip_until(
            src, pos + 1, "\n", error_on=ILLEGAL_COMMENT_CHARS, error_on_eof=False
        )
    return pos


def skip_comments_and_array_ws(src: str, pos: Pos) -> Pos:
    while True:
        pos_before_skip = pos
        pos = skip_chars(src, pos, TOML_WS_AND_NEWLINE)
        pos = skip_comment(src, pos)
        if pos == pos_before_skip:
            return pos


def create_dict_rule(src: str, pos: Pos, out: Output) -> tuple[Pos, Key]:
    pos += 1  # Skip "["
    pos = skip_chars(src, pos, TOML_WS)
    pos, key = parse_key(src, pos)

    if out.flags.is_(key, Flags.EXPLICIT_NEST) or out.flags.is_(key, Flags.FROZEN):
        raise TOMLDecodeError(f"Cannot declare {key} twice", src, pos)
    out.flags.set(key, Flags.EXPLICIT_NEST, recursive=False)
    try:
        out.data.get_or_create_nest(key)
    except KeyError:
        raise TOMLDecodeError("Cannot overwrite a value", src, pos) from None

    if not src.startswith("]", pos):
        raise TOMLDecodeError(
            "Expected ']' at the end of a table declaration", src, pos
        )
    return pos + 1, key


def create_list_rule(src: str, pos: Pos, out: Output) -> tuple[Pos, Key]:
    pos += 2  # Skip "[["
    pos = skip_chars(src, pos, TOML_WS)
    pos, key = parse_key(src, pos)

    if out.flags.is_(key, Flags.FROZEN):
        raise TOMLDecodeError(f"Cannot mutate immutable namespace {key}", src, pos)
    # Free the namespace now that it points to another empty list item...
    out.flags.unset_all(key)
    # ...but this key precisely is still prohibited from table declaration
    out.flags.set(key, Flags.EXPLICIT_NEST, recursive=False)
    try:
        out.data.append_nest_to_list(key)
    except KeyError:
        raise TOMLDecodeError("Cannot overwrite a value", src, pos) from None

    if not src.startswith("]]", pos):
        raise TOMLDecodeError(
            "Expected ']]' at the end of an array declaration", src, pos
        )
    return pos + 2, key


def key_value_rule(
    src: str, pos: Pos, out: Output, header: Key, parse_float: ParseFloat
) -> Pos:
    pos, key, value = parse_key_value_pair(src, pos, parse_float, nest_lvl=0)
    key_parent, key_stem = key[:-1], key[-1]
    abs_key_parent = header + key_parent

    relative_path_cont_keys = (header + key[:i] for i in range(1, len(key)))
    for cont_key in relative_path_cont_keys:
        # Check that dotted key syntax does not redefine an existing table
        if out.flags.is_(cont_key, Flags.EXPLICIT_NEST):
            raise TOMLDecodeError(f"Cannot redefine namespace {cont_key}", src, pos)
        # Containers in the relative path can't be opened with the table syntax or
        # dotted key/value syntax in following table sections.
        out.flags.add_pending(cont_key, Flags.EXPLICIT_NEST)

    if out.flags.is_(abs_key_parent, Flags.FROZEN):
        raise TOMLDecodeError(
            f"Cannot mutate immutable namespace {abs_key_parent}", src, pos
        )

    try:
        nest = out.data.get_or_create_nest(abs_key_parent)
    except KeyError:
        raise TOMLDecodeError("Cannot overwrite a value", src, pos) from None
    if key_stem in nest:
        raise TOMLDecodeError("Cannot overwrite a value", src, pos)
    # Mark inline table and array namespaces recursively immutable
    if isinstance(value, (dict, list)):
        out.flags.set(header + key, Flags.FROZEN, recursive=True)
    nest[key_stem] = value
    return pos


def parse_key_value_pair(
    src: str, pos: Pos, parse_float: ParseFloat, nest_lvl: int
) -> tuple[Pos, Key, Any]:
    pos, key = parse_key(src, pos)
    try:
        char: str | None = src[pos]
    except IndexError:
        char = None
    if char != "=":
        raise TOMLDecodeError("Expected '=' after a key in a key/value pair", src, pos)
    pos += 1
    pos = skip_chars(src, pos, TOML_WS)
    pos, value = parse_value(src, pos, parse_float, nest_lvl)
    return pos, key, value


def parse_key(src: str, pos: Pos) -> tuple[Pos, Key]:
    pos, key_part = parse_key_part(src, pos)
    key: Key = (key_part,)
    pos = skip_chars(src, pos, TOML_WS)
    while True:
        try:
            char: str | None = src[pos]
        except IndexError:
            char = None
        if char != ".":
            return pos, key
        pos += 1
        pos = skip_chars(src, pos, TOML_WS)
        pos, key_part = parse_key_part(src, pos)
        key += (key_part,)
        pos = skip_chars(src, pos, TOML_WS)


def parse_key_part(src: str, pos: Pos) -> tuple[Pos, str]:
    try:
        char: str | None = src[pos]
    except IndexError:
        char = None
    if char in BARE_KEY_CHARS:
        start_pos = pos
        pos = skip_chars(src, pos, BARE_KEY_CHARS)
        return pos, src[start_pos:pos]
    if char == "'":
        return parse_literal_str(src, pos)
    if char == '"':
        return parse_one_line_basic_str(src, pos)
    raise TOMLDecodeError("Invalid initial character for a key part", src, pos)


def parse_one_line_basic_str(src: str, pos: Pos) -> tuple[Pos, str]:
    pos += 1
    return parse_basic_str(src, pos, multiline=False)


def parse_array(
    src: str, pos: Pos, parse_float: ParseFloat, nest_lvl: int
) -> tuple[Pos, list]:
    pos += 1
    array: list = []

    pos = skip_comments_and_array_ws(src, pos)
    if src.startswith("]", pos):
        return pos + 1, array
    while True:
        pos, val = parse_value(src, pos, parse_float, nest_lvl)
        array.append(val)
        pos = skip_comments_and_array_ws(src, pos)

        c = src[pos : pos + 1]
        if c == "]":
            return pos + 1, array
        if c != ",":
            raise TOMLDecodeError("Unclosed array", src, pos)
        pos += 1

        pos = skip_comments_and_array_ws(src, pos)
        if src.startswith("]", pos):
            return pos + 1, array


def parse_inline_table(
    src: str, pos: Pos, parse_float: ParseFloat, nest_lvl: int
) -> tuple[Pos, dict]:
    pos += 1
    nested_dict = NestedDict()
    flags = Flags()

    pos = skip_chars(src, pos, TOML_WS)
    if src.startswith("}", pos):
        return pos + 1, nested_dict.dict
    while True:
        pos, key, value = parse_key_value_pair(src, pos, parse_float, nest_lvl)
        key_parent, key_stem = key[:-1], key[-1]
        if flags.is_(key, Flags.FROZEN):
            raise TOMLDecodeError(f"Cannot mutate immutable namespace {key}", src, pos)
        try:
            nest = nested_dict.get_or_create_nest(key_parent, access_lists=False)
        except KeyError:
            raise TOMLDecodeError("Cannot overwrite a value", src, pos) from None
        if key_stem in nest:
            raise TOMLDecodeError(f"Duplicate inline table key {key_stem!r}", src, pos)
        nest[key_stem] = value
        pos = skip_chars(src, pos, TOML_WS)
        c = src[pos : pos + 1]
        if c == "}":
            return pos + 1, nested_dict.dict
        if c != ",":
            raise TOMLDecodeError("Unclosed inline table", src, pos)
        if isinstance(value, (dict, list)):
            flags.set(key, Flags.FROZEN, recursive=True)
        pos += 1
        pos = skip_chars(src, pos, TOML_WS)


def parse_basic_str_escape(
    src: str, pos: Pos, *, multiline: bool = False
) -> tuple[Pos, str]:
    escape_id = src[pos : pos + 2]
    pos += 2
    if multiline and escape_id in {"\\ ", "\\\t", "\\\n"}:
        # Skip whitespace until next non-whitespace character or end of
        # the doc. Error if non-whitespace is found before newline.
        if escape_id != "\\\n":
            pos = skip_chars(src, pos, TOML_WS)
            try:
                char = src[pos]
            except IndexError:
                return pos, ""
            if char != "\n":
                raise TOMLDecodeError("Unescaped '\\' in a string", src, pos)
            pos += 1
        pos = skip_chars(src, pos, TOML_WS_AND_NEWLINE)
        return pos, ""
    if escape_id == "\\u":
        return parse_hex_char(src, pos, 4)
    if escape_id == "\\U":
        return parse_hex_char(src, pos, 8)
    try:
        return pos, BASIC_STR_ESCAPE_REPLACEMENTS[escape_id]
    except KeyError:
        raise TOMLDecodeError("Unescaped '\\' in a string", src, pos) from None


def parse_basic_str_escape_multiline(src: str, pos: Pos) -> tuple[Pos, str]:
    return parse_basic_str_escape(src, pos, multiline=True)


def parse_hex_char(src: str, pos: Pos, hex_len: int) -> tuple[Pos, str]:
    hex_str = src[pos : pos + hex_len]
    if len(hex_str) != hex_len or not HEXDIGIT_CHARS.issuperset(hex_str):
        raise TOMLDecodeError("Invalid hex value", src, pos)
    pos += hex_len
    hex_int = int(hex_str, 16)
    if not is_unicode_scalar_value(hex_int):
        raise TOMLDecodeError(
            "Escaped character is not a Unicode scalar value", src, pos
        )
    return pos, chr(hex_int)


def parse_literal_str(src: str, pos: Pos) -> tuple[Pos, str]:
    pos += 1  # Skip starting apostrophe
    start_pos = pos
    pos = skip_until(
        src, pos, "'", error_on=ILLEGAL_LITERAL_STR_CHARS, error_on_eof=True
    )
    return pos + 1, src[start_pos:pos]  # Skip ending apostrophe


def parse_multiline_str(src: str, pos: Pos, *, literal: bool) -> tuple[Pos, str]:
    pos += 3
    if src.startswith("\n", pos):
        pos += 1

    if literal:
        delim = "'"
        end_pos = skip_until(
            src,
            pos,
            "'''",
            error_on=ILLEGAL_MULTILINE_LITERAL_STR_CHARS,
            error_on_eof=True,
        )
        result = src[pos:end_pos]
        pos = end_pos + 3
    else:
        delim = '"'
        pos, result = parse_basic_str(src, pos, multiline=True)

    # Add at maximum two extra apostrophes/quotes if the end sequence
    # is 4 or 5 chars long instead of just 3.
    if not src.startswith(delim, pos):
        return pos, result
    pos += 1
    if not src.startswith(delim, pos):
        return pos, result + delim
    pos += 1
    return pos, result + (delim * 2)


def parse_basic_str(src: str, pos: Pos, *, multiline: bool) -> tuple[Pos, str]:
    if multiline:
        error_on = ILLEGAL_MULTILINE_BASIC_STR_CHARS
        parse_escapes = parse_basic_str_escape_multiline
    else:
        error_on = ILLEGAL_BASIC_STR_CHARS
        parse_escapes = parse_basic_str_escape
    result = ""
    start_pos = pos
    while True:
        try:
            char = src[pos]
        except IndexError:
            raise TOMLDecodeError("Unterminated string", src, pos) from None
        if char == '"':
            if not multiline:
                return pos + 1, result + src[start_pos:pos]
            if src.startswith('"""', pos):
                return pos + 3, result + src[start_pos:pos]
            pos += 1
            continue
        if char == "\\":
            result += src[start_pos:pos]
            pos, parsed_escape = parse_escapes(src, pos)
            result += parsed_escape
            start_pos = pos
            continue
        if char in error_on:
            raise TOMLDecodeError(f"Illegal character {char!r}", src, pos)
        pos += 1


def parse_value(  # noqa: C901
    src: str, pos: Pos, parse_float: ParseFloat, nest_lvl: int
) -> tuple[Pos, Any]:
    if nest_lvl > MAX_INLINE_NESTING:
        # Pure Python should have raised RecursionError already.
        # This ensures mypyc binaries eventually do the same.
        raise RecursionError(  # pragma: no cover
            "TOML inline arrays/tables are nested more than the allowed"
            f" {MAX_INLINE_NESTING} levels"
        )

    try:
        char: str | None = src[pos]
    except IndexError:
        char = None

    # IMPORTANT: order conditions based on speed of checking and likelihood

    # Basic strings
    if char == '"':
        if src.startswith('"""', pos):
            return parse_multiline_str(src, pos, literal=False)
        return parse_one_line_basic_str(src, pos)

    # Literal strings
    if char == "'":
        if src.startswith("'''", pos):
            return parse_multiline_str(src, pos, literal=True)
        return parse_literal_str(src, pos)

    # Booleans
    if char == "t":
        if src.startswith("true", pos):
            return pos + 4, True
    if char == "f":
        if src.startswith("false", pos):
            return pos + 5, False

    # Arrays
    if char == "[":
        return parse_array(src, pos, parse_float, nest_lvl + 1)

    # Inline tables
    if char == "{":
        return parse_inline_table(src, pos, parse_float, nest_lvl + 1)

    # Dates and times
    datetime_match = RE_DATETIME.match(src, pos)
    if datetime_match:
        try:
            datetime_obj = match_to_datetime(datetime_match)
        except ValueError as e:
            raise TOMLDecodeError("Invalid date or datetime", src, pos) from e
        return datetime_match.end(), datetime_obj
    localtime_match = RE_LOCALTIME.match(src, pos)
    if localtime_match:
        return localtime_match.end(), match_to_localtime(localtime_match)

    # Integers and "normal" floats.
    # The regex will greedily match any type starting with a decimal
    # char, so needs to be located after handling of dates and times.
    number_match = RE_NUMBER.match(src, pos)
    if number_match:
        return number_match.end(), match_to_number(number_match, parse_float)

    # Special floats
    first_three = src[pos : pos + 3]
    if first_three in {"inf", "nan"}:
        return pos + 3, parse_float(first_three)
    first_four = src[pos : pos + 4]
    if first_four in {"-inf", "+inf", "-nan", "+nan"}:
        return pos + 4, parse_float(first_four)

    raise TOMLDecodeError("Invalid value", src, pos)


def is_unicode_scalar_value(codepoint: int) -> bool:
    return (0 <= codepoint <= 55295) or (57344 <= codepoint <= 1114111)


def make_safe_parse_float(parse_float: ParseFloat) -> ParseFloat:
    """A decorator to make `parse_float` safe.

    `parse_float` must not return dicts or lists, because these types
    would be mixed with parsed TOML tables and arrays, thus confusing
    the parser. The returned decorated callable raises `ValueError`
    instead of returning illegal types.
    """
    # The default `float` callable never returns illegal types. Optimize it.
    if parse_float is float:
        return float

    def safe_parse_float(float_str: str) -> Any:
        float_value = parse_float(float_str)
        if isinstance(float_value, (dict, list)):
            raise ValueError("parse_float must not return dicts or lists")
        return float_value

    return safe_parse_float
poetry-core-2.1.1/src/poetry/core/_vendor/tomli/_re.py000066400000000000000000000061431475444614500227450ustar00rootroot00000000000000# SPDX-License-Identifier: MIT
# SPDX-FileCopyrightText: 2021 Taneli Hukkinen
# Licensed to PSF under a Contributor Agreement.

from __future__ import annotations

from datetime import date, datetime, time, timedelta, timezone, tzinfo
from functools import lru_cache
import re
from typing import Any, Final

from ._types import ParseFloat

# E.g.
# - 00:32:00.999999
# - 00:32:00
_TIME_RE_STR: Final = (
    r"([01][0-9]|2[0-3]):([0-5][0-9]):([0-5][0-9])(?:\.([0-9]{1,6})[0-9]*)?"
)

RE_NUMBER: Final = re.compile(
    r"""
0
(?:
    x[0-9A-Fa-f](?:_?[0-9A-Fa-f])*   # hex
    |
    b[01](?:_?[01])*                 # bin
    |
    o[0-7](?:_?[0-7])*               # oct
)
|
[+-]?(?:0|[1-9](?:_?[0-9])*)         # dec, integer part
(?P
    (?:\.[0-9](?:_?[0-9])*)?         # optional fractional part
    (?:[eE][+-]?[0-9](?:_?[0-9])*)?  # optional exponent part
)
""",
    flags=re.VERBOSE,
)
RE_LOCALTIME: Final = re.compile(_TIME_RE_STR)
RE_DATETIME: Final = re.compile(
    rf"""
([0-9]{{4}})-(0[1-9]|1[0-2])-(0[1-9]|[12][0-9]|3[01])  # date, e.g. 1988-10-27
(?:
    [Tt ]
    {_TIME_RE_STR}
    (?:([Zz])|([+-])([01][0-9]|2[0-3]):([0-5][0-9]))?  # optional time offset
)?
""",
    flags=re.VERBOSE,
)


def match_to_datetime(match: re.Match) -> datetime | date:
    """Convert a `RE_DATETIME` match to `datetime.datetime` or `datetime.date`.

    Raises ValueError if the match does not correspond to a valid date
    or datetime.
    """
    (
        year_str,
        month_str,
        day_str,
        hour_str,
        minute_str,
        sec_str,
        micros_str,
        zulu_time,
        offset_sign_str,
        offset_hour_str,
        offset_minute_str,
    ) = match.groups()
    year, month, day = int(year_str), int(month_str), int(day_str)
    if hour_str is None:
        return date(year, month, day)
    hour, minute, sec = int(hour_str), int(minute_str), int(sec_str)
    micros = int(micros_str.ljust(6, "0")) if micros_str else 0
    if offset_sign_str:
        tz: tzinfo | None = cached_tz(
            offset_hour_str, offset_minute_str, offset_sign_str
        )
    elif zulu_time:
        tz = timezone.utc
    else:  # local date-time
        tz = None
    return datetime(year, month, day, hour, minute, sec, micros, tzinfo=tz)


# No need to limit cache size. This is only ever called on input
# that matched RE_DATETIME, so there is an implicit bound of
# 24 (hours) * 60 (minutes) * 2 (offset direction) = 2880.
@lru_cache(maxsize=None)
def cached_tz(hour_str: str, minute_str: str, sign_str: str) -> timezone:
    sign = 1 if sign_str == "+" else -1
    return timezone(
        timedelta(
            hours=sign * int(hour_str),
            minutes=sign * int(minute_str),
        )
    )


def match_to_localtime(match: re.Match) -> time:
    hour_str, minute_str, sec_str, micros_str = match.groups()
    micros = int(micros_str.ljust(6, "0")) if micros_str else 0
    return time(int(hour_str), int(minute_str), int(sec_str), micros)


def match_to_number(match: re.Match, parse_float: ParseFloat) -> Any:
    if match.group("floatpart"):
        return parse_float(match.group())
    return int(match.group(), 0)
poetry-core-2.1.1/src/poetry/core/_vendor/tomli/_types.py000066400000000000000000000003761475444614500235050ustar00rootroot00000000000000# SPDX-License-Identifier: MIT
# SPDX-FileCopyrightText: 2021 Taneli Hukkinen
# Licensed to PSF under a Contributor Agreement.

from typing import Any, Callable, Tuple

# Type annotations
ParseFloat = Callable[[str], Any]
Key = Tuple[str, ...]
Pos = int
poetry-core-2.1.1/src/poetry/core/_vendor/tomli/py.typed000066400000000000000000000000321475444614500233140ustar00rootroot00000000000000# Marker file for PEP 561
poetry-core-2.1.1/src/poetry/core/_vendor/vendor.txt000066400000000000000000000004241475444614500225340ustar00rootroot00000000000000fastjsonschema==2.21.1 ; python_version >= "3.9" and python_version < "4.0"
lark==1.2.2 ; python_version >= "3.9" and python_version < "4.0"
packaging==24.2 ; python_version >= "3.9" and python_version < "4.0"
tomli==2.2.1 ; python_version >= "3.9" and python_version < "4.0"
poetry-core-2.1.1/src/poetry/core/constraints/000077500000000000000000000000001475444614500214115ustar00rootroot00000000000000poetry-core-2.1.1/src/poetry/core/constraints/__init__.py000066400000000000000000000000001475444614500235100ustar00rootroot00000000000000poetry-core-2.1.1/src/poetry/core/constraints/generic/000077500000000000000000000000001475444614500230255ustar00rootroot00000000000000poetry-core-2.1.1/src/poetry/core/constraints/generic/__init__.py000066400000000000000000000014671475444614500251460ustar00rootroot00000000000000from __future__ import annotations

from poetry.core.constraints.generic.any_constraint import AnyConstraint
from poetry.core.constraints.generic.base_constraint import BaseConstraint
from poetry.core.constraints.generic.constraint import Constraint
from poetry.core.constraints.generic.empty_constraint import EmptyConstraint
from poetry.core.constraints.generic.multi_constraint import MultiConstraint
from poetry.core.constraints.generic.parser import parse_constraint
from poetry.core.constraints.generic.parser import parse_extra_constraint
from poetry.core.constraints.generic.union_constraint import UnionConstraint


__all__ = (
    "AnyConstraint",
    "BaseConstraint",
    "Constraint",
    "EmptyConstraint",
    "MultiConstraint",
    "UnionConstraint",
    "parse_constraint",
    "parse_extra_constraint",
)
poetry-core-2.1.1/src/poetry/core/constraints/generic/any_constraint.py000066400000000000000000000023241475444614500264330ustar00rootroot00000000000000from __future__ import annotations

from poetry.core.constraints.generic.base_constraint import BaseConstraint
from poetry.core.constraints.generic.empty_constraint import EmptyConstraint


class AnyConstraint(BaseConstraint):
    def allows(self, other: BaseConstraint) -> bool:
        return True

    def allows_all(self, other: BaseConstraint) -> bool:
        return True

    def allows_any(self, other: BaseConstraint) -> bool:
        return True

    def invert(self) -> BaseConstraint:
        return EmptyConstraint()

    def difference(self, other: BaseConstraint) -> BaseConstraint:
        if other.is_any():
            return EmptyConstraint()

        raise ValueError("Unimplemented constraint difference")

    def intersect(self, other: BaseConstraint) -> BaseConstraint:
        return other

    def union(self, other: BaseConstraint) -> AnyConstraint:
        return AnyConstraint()

    def is_any(self) -> bool:
        return True

    def is_empty(self) -> bool:
        return False

    def __str__(self) -> str:
        return "*"

    def __eq__(self, other: object) -> bool:
        return isinstance(other, BaseConstraint) and other.is_any()

    def __hash__(self) -> int:
        return hash("any")
poetry-core-2.1.1/src/poetry/core/constraints/generic/base_constraint.py000066400000000000000000000021251475444614500265550ustar00rootroot00000000000000from __future__ import annotations


class BaseConstraint:
    def allows(self, other: BaseConstraint) -> bool:
        raise NotImplementedError

    def allows_all(self, other: BaseConstraint) -> bool:
        raise NotImplementedError

    def allows_any(self, other: BaseConstraint) -> bool:
        raise NotImplementedError

    def invert(self) -> BaseConstraint:
        raise NotImplementedError()

    def difference(self, other: BaseConstraint) -> BaseConstraint:
        raise NotImplementedError

    def intersect(self, other: BaseConstraint) -> BaseConstraint:
        raise NotImplementedError

    def union(self, other: BaseConstraint) -> BaseConstraint:
        raise NotImplementedError

    def is_any(self) -> bool:
        return False

    def is_empty(self) -> bool:
        return False

    def __repr__(self) -> str:
        return f"<{self.__class__.__name__} {self}>"

    def __str__(self) -> str:
        raise NotImplementedError

    def __hash__(self) -> int:
        raise NotImplementedError

    def __eq__(self, other: object) -> bool:
        raise NotImplementedError
poetry-core-2.1.1/src/poetry/core/constraints/generic/constraint.py000066400000000000000000000176161475444614500255760ustar00rootroot00000000000000from __future__ import annotations

import operator

from typing import Callable
from typing import ClassVar

from poetry.core.constraints.generic.any_constraint import AnyConstraint
from poetry.core.constraints.generic.base_constraint import BaseConstraint
from poetry.core.constraints.generic.empty_constraint import EmptyConstraint


OperatorType = Callable[[object, object], bool]


def contains(a: object, b: object, /) -> bool:
    return operator.contains(a, b)  # type: ignore[arg-type]


def not_contains(a: object, b: object, /) -> bool:
    return not contains(a, b)


class Constraint(BaseConstraint):
    OP_EQ = operator.eq
    OP_NE = operator.ne
    OP_IN = contains
    OP_NC = not_contains

    _trans_op_str: ClassVar[dict[str, OperatorType]] = {
        "=": OP_EQ,
        "==": OP_EQ,
        "!=": OP_NE,
        "in": OP_IN,
        "not in": OP_NC,
    }

    _trans_op_int: ClassVar[dict[OperatorType, str]] = {
        OP_EQ: "==",
        OP_NE: "!=",
        OP_IN: "in",
        OP_NC: "not in",
    }

    _trans_op_inv: ClassVar[dict[str, str]] = {
        "!=": "==",
        "==": "!=",
        "not in": "in",
        "in": "not in",
    }

    def __init__(self, value: str, operator: str = "==") -> None:
        if operator == "=":
            operator = "=="

        self._value = value
        self._operator = operator
        self._op = self._trans_op_str[operator]

    @property
    def value(self) -> str:
        return self._value

    @property
    def operator(self) -> str:
        return self._operator

    def allows(self, other: BaseConstraint) -> bool:
        if not isinstance(other, Constraint) or other.operator != "==":
            raise ValueError(
                f"Invalid argument for allows"
                f' ("other" must be a constraint with operator "=="): {other}'
            )

        if op := self._trans_op_str.get(self._operator):
            return op(other.value, self._value)

        return False

    def allows_all(self, other: BaseConstraint) -> bool:
        from poetry.core.constraints.generic import MultiConstraint
        from poetry.core.constraints.generic import UnionConstraint

        if isinstance(other, Constraint):
            if other.operator == "==":
                return self.allows(other)

            if other.operator == "in" and self._operator == "in":
                return self.value in other.value

            if other.operator == "not in":
                if self._operator == "not in":
                    return other.value in self.value
                if self._operator == "!=":
                    return self.value not in other.value

            return self == other

        if isinstance(other, MultiConstraint):
            return any(self.allows_all(c) for c in other.constraints)

        if isinstance(other, UnionConstraint):
            return all(self.allows_all(c) for c in other.constraints)

        return other.is_empty()

    def allows_any(self, other: BaseConstraint) -> bool:
        from poetry.core.constraints.generic import MultiConstraint
        from poetry.core.constraints.generic import UnionConstraint

        if self._operator == "==":
            return other.allows(self)

        if isinstance(other, Constraint):
            if other.operator == "==":
                return self.allows(other)

            if other.operator == "!=" and self._operator == "==":
                return self._value != other.value

            if other.operator == "not in" and self._operator == "in":
                return other.value not in self.value

            if other.operator == "in" and self._operator == "not in":
                return self.value not in other.value

            return True

        elif isinstance(other, MultiConstraint):
            return self._operator == "!="

        elif isinstance(other, UnionConstraint):
            return self._operator == "!=" and any(
                self.allows_any(c) for c in other.constraints
            )

        return other.is_any()

    def invert(self) -> Constraint:
        return self.__class__(self._value, self._trans_op_inv[self.operator])

    def difference(self, other: BaseConstraint) -> Constraint | EmptyConstraint:
        if other.allows(self):
            return EmptyConstraint()

        return self

    def intersect(self, other: BaseConstraint) -> BaseConstraint:
        from poetry.core.constraints.generic.multi_constraint import MultiConstraint

        if isinstance(other, Constraint):
            if other == self:
                return self

            if self.allows_all(other):
                return other

            if other.allows_all(self):
                return self

            if not self.allows_any(other) or not other.allows_any(self):
                return EmptyConstraint()

            return MultiConstraint(self, other)

        return other.intersect(self)

    def union(self, other: BaseConstraint) -> BaseConstraint:
        from poetry.core.constraints.generic.union_constraint import UnionConstraint

        if isinstance(other, Constraint):
            if other == self:
                return self

            if self.allows_all(other):
                return self

            if other.allows_all(self):
                return other

            ops = {self.operator, other.operator}
            if (
                (ops in ({"!="}, {"not in"}))
                or (
                    (
                        ops in ({"in", "!="}, {"in", "not in"})
                        and (self.operator == "in" and self.value in other.value)
                    )
                    or (other.operator == "in" and other.value in self.value)
                )
                or self.invert() == other
            ):
                return AnyConstraint()

            return UnionConstraint(self, other)

        # to preserve order (functionally not necessary)
        if isinstance(other, UnionConstraint):
            return UnionConstraint(self).union(other)

        return other.union(self)

    def is_any(self) -> bool:
        return False

    def is_empty(self) -> bool:
        return False

    def __eq__(self, other: object) -> bool:
        if not isinstance(other, self.__class__):
            return False

        return (self.value, self.operator) == (other.value, other.operator)

    def __hash__(self) -> int:
        return hash((self._operator, self._value))

    def __str__(self) -> str:
        if self._operator in {"in", "not in"}:
            return f"'{self._value}' {self._operator}"
        op = self._operator if self._operator != "==" else ""
        return f"{op}{self._value}"


class ExtraConstraint(Constraint):
    def __init__(self, value: str, operator: str = "==") -> None:
        super().__init__(value, operator)
        # Do the check after calling the super constructor,
        # i.e. after the operator has been normalized.
        if self._operator not in {"==", "!="}:
            raise ValueError(
                'Only the operators "==" and "!=" are supported for extra constraints'
            )

    def intersect(self, other: BaseConstraint) -> BaseConstraint:
        from poetry.core.constraints.generic.multi_constraint import (
            ExtraMultiConstraint,
        )

        if isinstance(other, Constraint):
            if other == self:
                return self

            if self._value == other._value and self._operator != other.operator:
                return EmptyConstraint()

            return ExtraMultiConstraint(self, other)

        return super().intersect(other)

    def union(self, other: BaseConstraint) -> BaseConstraint:
        from poetry.core.constraints.generic.union_constraint import UnionConstraint

        if isinstance(other, Constraint):
            if other == self:
                return self

            if self._value == other._value and self._operator != other.operator:
                return AnyConstraint()

            return UnionConstraint(self, other)

        return super().union(other)
poetry-core-2.1.1/src/poetry/core/constraints/generic/empty_constraint.py000066400000000000000000000021771475444614500270100ustar00rootroot00000000000000from __future__ import annotations

from poetry.core.constraints.generic.base_constraint import BaseConstraint


class EmptyConstraint(BaseConstraint):
    pretty_string = None

    def is_empty(self) -> bool:
        return True

    def allows(self, other: BaseConstraint) -> bool:
        return False

    def allows_all(self, other: BaseConstraint) -> bool:
        return other.is_empty()

    def allows_any(self, other: BaseConstraint) -> bool:
        return False

    def invert(self) -> BaseConstraint:
        from poetry.core.constraints.generic.any_constraint import AnyConstraint

        return AnyConstraint()

    def intersect(self, other: BaseConstraint) -> BaseConstraint:
        return self

    def union(self, other: BaseConstraint) -> BaseConstraint:
        return other

    def difference(self, other: BaseConstraint) -> BaseConstraint:
        return self

    def __eq__(self, other: object) -> bool:
        if not isinstance(other, BaseConstraint):
            return False

        return other.is_empty()

    def __hash__(self) -> int:
        return hash("empty")

    def __str__(self) -> str:
        return ""
poetry-core-2.1.1/src/poetry/core/constraints/generic/multi_constraint.py000066400000000000000000000130721475444614500270000ustar00rootroot00000000000000from __future__ import annotations

import itertools

from typing import TYPE_CHECKING

from poetry.core.constraints.generic import AnyConstraint
from poetry.core.constraints.generic import EmptyConstraint
from poetry.core.constraints.generic.base_constraint import BaseConstraint
from poetry.core.constraints.generic.constraint import Constraint


if TYPE_CHECKING:
    from poetry.core.constraints.generic import UnionConstraint


class MultiConstraint(BaseConstraint):
    OPERATORS: tuple[str, ...] = ("!=", "in", "not in")

    def __init__(self, *constraints: Constraint) -> None:
        if any(c.operator not in self.OPERATORS for c in constraints):
            raise ValueError(
                "A multi-constraint can only be comprised of negative constraints"
            )

        self._constraints = constraints

    @property
    def constraints(self) -> tuple[Constraint, ...]:
        return self._constraints

    def allows(self, other: BaseConstraint) -> bool:
        return all(constraint.allows(other) for constraint in self._constraints)

    def allows_all(self, other: BaseConstraint) -> bool:
        if isinstance(other, MultiConstraint):
            return all(c in other.constraints for c in self._constraints)

        return all(c.allows_all(other) for c in self._constraints)

    def allows_any(self, other: BaseConstraint) -> bool:
        from poetry.core.constraints.generic import UnionConstraint

        if isinstance(other, Constraint):
            if other.operator == "==":
                return self.allows(other)

            return other.operator == "!="

        if isinstance(other, UnionConstraint):
            return any(
                all(c1.allows_any(c2) for c1 in self.constraints)
                for c2 in other.constraints
            )

        return isinstance(other, MultiConstraint) or other.is_any()

    def invert(self) -> UnionConstraint:
        from poetry.core.constraints.generic import UnionConstraint

        return UnionConstraint(*(c.invert() for c in self._constraints))

    def intersect(self, other: BaseConstraint) -> BaseConstraint:
        if isinstance(other, MultiConstraint):
            ours = set(self.constraints)
            union = list(self.constraints) + [
                c for c in other.constraints if c not in ours
            ]
            return self.__class__(*union)

        if not isinstance(other, Constraint):
            return other.intersect(self)

        if other in self._constraints:
            return self

        if other.value in (c.value for c in self._constraints):
            # same value but different operator, e.g. '== "linux"' and '!= "linux"'
            return EmptyConstraint()

        if other.operator == "==" and "==" not in self.OPERATORS:
            return other

        return self.__class__(*self._constraints, other)

    def union(self, other: BaseConstraint) -> BaseConstraint:
        if isinstance(other, MultiConstraint):
            theirs = set(other.constraints)
            common = [c for c in self.constraints if c in theirs]
            return self.__class__(*common)

        if not isinstance(other, Constraint):
            return other.union(self)

        if other in self._constraints:
            return other

        if other.value not in (c.value for c in self._constraints):
            if other.operator == "!=":
                return AnyConstraint()

            return self

        constraints = [c for c in self._constraints if c.value != other.value]

        if len(constraints) == 1:
            return constraints[0]

        return self.__class__(*constraints)

    def __eq__(self, other: object) -> bool:
        if not isinstance(other, self.__class__):
            return False

        return self._constraints == other._constraints

    def __hash__(self) -> int:
        return hash(("multi", *self._constraints))

    def __str__(self) -> str:
        constraints = [str(constraint) for constraint in self._constraints]
        return ", ".join(constraints)


class ExtraMultiConstraint(MultiConstraint):
    # Since the extra marker can have multiple values at the same time,
    # "==extra1, ==extra2" is not empty!
    OPERATORS = ("==", "!=")

    def intersect(self, other: BaseConstraint) -> BaseConstraint:
        if isinstance(other, MultiConstraint):
            op_values = {}
            for op in self.OPERATORS:
                op_values[op] = {
                    c.value
                    for c in itertools.chain(self._constraints, other.constraints)
                    if c.operator == op
                }
            if op_values["=="] & op_values["!="]:
                return EmptyConstraint()

        return super().intersect(other)

    def union(self, other: BaseConstraint) -> BaseConstraint:
        from poetry.core.constraints.generic import UnionConstraint

        if isinstance(other, MultiConstraint):
            if set(other.constraints) == set(self._constraints):
                return self
            return UnionConstraint(self, other)

        if isinstance(other, Constraint):
            if other in self._constraints:
                return other

            if len(self._constraints) == 2 and other.value in (
                c.value for c in self._constraints
            ):
                # same value but different operator
                constraints: list[BaseConstraint] = [
                    *(c for c in self._constraints if c.value != other.value),
                    other,
                ]
            else:
                constraints = [self, other]

            return UnionConstraint(*constraints)

        return super().union(other)
poetry-core-2.1.1/src/poetry/core/constraints/generic/parser.py000066400000000000000000000056321475444614500247010ustar00rootroot00000000000000from __future__ import annotations

import functools
import re

from typing import TYPE_CHECKING

from poetry.core.constraints.generic.any_constraint import AnyConstraint
from poetry.core.constraints.generic.constraint import Constraint
from poetry.core.constraints.generic.constraint import ExtraConstraint
from poetry.core.constraints.generic.union_constraint import UnionConstraint
from poetry.core.constraints.version.exceptions import ParseConstraintError


if TYPE_CHECKING:
    from poetry.core.constraints.generic.base_constraint import BaseConstraint


BASIC_CONSTRAINT = re.compile(r"^(!?==?)?\s*([^\s]+?)\s*$")
STR_CMP_CONSTRAINT = re.compile(
    r"""(?ix)^ # case insensitive and verbose mode
    (?P['"]) # Single or double quotes
    (?P.+?) # The value itself inside quotes
    \1 # Closing single of double quote
    \s* # Space
    (?P(not\sin|in)) # Literal match of 'in' or 'not in'
    $"""
)


@functools.cache
def parse_constraint(constraints: str) -> BaseConstraint:
    return _parse_constraint(constraints, Constraint)


@functools.cache
def parse_extra_constraint(constraints: str) -> BaseConstraint:
    return _parse_constraint(constraints, ExtraConstraint)


def _parse_constraint(
    constraints: str, constraint_type: type[Constraint]
) -> BaseConstraint:
    if constraints == "*":
        return AnyConstraint()

    or_constraints = re.split(r"\s*\|\|?\s*", constraints.strip())
    or_groups = []
    for constraints in or_constraints:
        and_constraints = re.split(r"\s*,\s*", constraints)
        constraint_objects = []

        if len(and_constraints) > 1:
            for constraint in and_constraints:
                constraint_objects.append(
                    _parse_single_constraint(constraint, constraint_type)
                )
        else:
            constraint_objects.append(
                _parse_single_constraint(and_constraints[0], constraint_type)
            )

        if len(constraint_objects) == 1:
            constraint = constraint_objects[0]
        else:
            constraint = constraint_objects[0]
            for next_constraint in constraint_objects[1:]:
                constraint = constraint.intersect(next_constraint)

        or_groups.append(constraint)

    if len(or_groups) == 1:
        return or_groups[0]
    else:
        return UnionConstraint(*or_groups)


def _parse_single_constraint(
    constraint: str, constraint_type: type[Constraint]
) -> Constraint:
    # string comparator
    if m := STR_CMP_CONSTRAINT.match(constraint):
        op = m.group("op")
        value = m.group("value").strip()
        return constraint_type(value, op)

    # Basic comparator

    if m := BASIC_CONSTRAINT.match(constraint):
        op = m.group(1)
        if op is None:
            op = "=="

        version = m.group(2).strip()

        return constraint_type(version, op)

    raise ParseConstraintError(f"Could not parse version constraint: {constraint}")
poetry-core-2.1.1/src/poetry/core/constraints/generic/union_constraint.py000066400000000000000000000154411475444614500270000ustar00rootroot00000000000000from __future__ import annotations

import itertools

from poetry.core.constraints.generic import AnyConstraint
from poetry.core.constraints.generic.base_constraint import BaseConstraint
from poetry.core.constraints.generic.constraint import Constraint
from poetry.core.constraints.generic.constraint import ExtraConstraint
from poetry.core.constraints.generic.empty_constraint import EmptyConstraint
from poetry.core.constraints.generic.multi_constraint import ExtraMultiConstraint
from poetry.core.constraints.generic.multi_constraint import MultiConstraint


class UnionConstraint(BaseConstraint):
    def __init__(self, *constraints: BaseConstraint) -> None:
        self._constraints = constraints

    @property
    def constraints(self) -> tuple[BaseConstraint, ...]:
        return self._constraints

    def allows(
        self,
        other: BaseConstraint,
    ) -> bool:
        return any(constraint.allows(other) for constraint in self._constraints)

    def allows_any(self, other: BaseConstraint) -> bool:
        if isinstance(other, UnionConstraint):
            return any(
                c1.allows_any(c2)
                for c1 in self._constraints
                for c2 in other.constraints
            )

        return any(c.allows_any(other) for c in self._constraints)

    def allows_all(self, other: BaseConstraint) -> bool:
        if isinstance(other, UnionConstraint):
            return all(
                any(c1.allows_all(c2) for c1 in self._constraints)
                for c2 in other.constraints
            )

        return any(c.allows_all(other) for c in self._constraints)

    def invert(self) -> MultiConstraint:
        inverted_constraints = [c.invert() for c in self._constraints]
        if any(not isinstance(c, Constraint) for c in inverted_constraints):
            raise NotImplementedError(
                "Inversion of complex union constraints not implemented"
            )
        if any(isinstance(c, ExtraConstraint) for c in inverted_constraints):
            multi_type: type[MultiConstraint] = ExtraMultiConstraint
        else:
            multi_type = MultiConstraint
        return multi_type(*inverted_constraints)  # type: ignore[arg-type]

    def intersect(self, other: BaseConstraint) -> BaseConstraint:
        if other.is_any():
            return self

        if other.is_empty():
            return other

        if isinstance(other, UnionConstraint) and set(other.constraints) == set(
            self._constraints
        ):
            return self

        if isinstance(other, ExtraConstraint) and other in self._constraints:
            return other

        if isinstance(other, Constraint):
            # (A or B) and C => (A and C) or (B and C)
            # just a special case of UnionConstraint
            other = UnionConstraint(other)

        new_constraints = []
        if isinstance(other, UnionConstraint):
            # (A or B) and (C or D) => (A and C) or (A and D) or (B and C) or (B and D)
            for our_constraint in self._constraints:
                for their_constraint in other.constraints:
                    intersection = our_constraint.intersect(their_constraint)

                    if not (intersection.is_empty() or intersection in new_constraints):
                        new_constraints.append(intersection)

        else:
            assert isinstance(other, MultiConstraint)
            # (A or B) and (C and D) => (A and C and D) or (B and C and D)

            for our_constraint in self._constraints:
                intersection = our_constraint
                for their_constraint in other.constraints:
                    intersection = intersection.intersect(their_constraint)

                if not (intersection.is_empty() or intersection in new_constraints):
                    new_constraints.append(intersection)

        if not new_constraints:
            return EmptyConstraint()

        if len(new_constraints) == 1:
            return new_constraints[0]

        return UnionConstraint(*new_constraints)

    def union(self, other: BaseConstraint) -> BaseConstraint:
        if other.is_any():
            return other

        if other.is_empty():
            return self

        if other == self:
            return self

        if isinstance(other, Constraint):
            # (A or B) or C => A or B or C
            # just a special case of UnionConstraint
            other = UnionConstraint(other)

        new_constraints: list[BaseConstraint] = []
        if isinstance(other, UnionConstraint):
            # (A or B) or (C or D) => A or B or C or D
            our_new_constraints: list[BaseConstraint] = []
            their_new_constraints: list[BaseConstraint] = []
            merged_new_constraints: list[BaseConstraint] = []
            for their_constraint in other.constraints:
                for our_constraint in self._constraints:
                    union = our_constraint.union(their_constraint)
                    if union.is_any():
                        return AnyConstraint()
                    if isinstance(union, Constraint):
                        if union == our_constraint:
                            if union not in our_new_constraints:
                                our_new_constraints.append(union)
                        elif union == their_constraint:
                            if union not in their_new_constraints:
                                their_new_constraints.append(their_constraint)
                        elif union not in merged_new_constraints:
                            merged_new_constraints.append(union)
                    else:
                        if our_constraint not in our_new_constraints:
                            our_new_constraints.append(our_constraint)
                        if their_constraint not in their_new_constraints:
                            their_new_constraints.append(their_constraint)
            new_constraints = our_new_constraints
            for constraint in itertools.chain(
                their_new_constraints, merged_new_constraints
            ):
                if constraint not in new_constraints:
                    new_constraints.append(constraint)

        else:
            assert isinstance(other, MultiConstraint)
            # (A or B) or (C and D) => nothing to do

            new_constraints = [*self._constraints, other]

        if len(new_constraints) == 1:
            return new_constraints[0]

        return UnionConstraint(*new_constraints)

    def __eq__(self, other: object) -> bool:
        if not isinstance(other, UnionConstraint):
            return False

        return self._constraints == other._constraints

    def __hash__(self) -> int:
        return hash(("union", *self._constraints))

    def __str__(self) -> str:
        constraints = [str(constraint) for constraint in self._constraints]
        return " || ".join(constraints)
poetry-core-2.1.1/src/poetry/core/constraints/version/000077500000000000000000000000001475444614500230765ustar00rootroot00000000000000poetry-core-2.1.1/src/poetry/core/constraints/version/__init__.py000066400000000000000000000016721475444614500252150ustar00rootroot00000000000000from __future__ import annotations

from poetry.core.constraints.version.empty_constraint import EmptyConstraint
from poetry.core.constraints.version.parser import parse_constraint
from poetry.core.constraints.version.parser import parse_marker_version_constraint
from poetry.core.constraints.version.util import constraint_regions
from poetry.core.constraints.version.version import Version
from poetry.core.constraints.version.version_constraint import VersionConstraint
from poetry.core.constraints.version.version_range import VersionRange
from poetry.core.constraints.version.version_range_constraint import (
    VersionRangeConstraint,
)
from poetry.core.constraints.version.version_union import VersionUnion


__all__ = (
    "EmptyConstraint",
    "Version",
    "VersionConstraint",
    "VersionRange",
    "VersionRangeConstraint",
    "VersionUnion",
    "constraint_regions",
    "parse_constraint",
    "parse_marker_version_constraint",
)
poetry-core-2.1.1/src/poetry/core/constraints/version/empty_constraint.py000066400000000000000000000033441475444614500270560ustar00rootroot00000000000000from __future__ import annotations

from typing import TYPE_CHECKING

from poetry.core.constraints.version.version_constraint import VersionConstraint


if TYPE_CHECKING:
    from poetry.core.constraints.version.version import Version
    from poetry.core.constraints.version.version_range_constraint import (
        VersionRangeConstraint,
    )


class EmptyConstraint(VersionConstraint):
    def is_empty(self) -> bool:
        return True

    def is_any(self) -> bool:
        return False

    def is_simple(self) -> bool:
        return True

    def has_upper_bound(self) -> bool:
        # Rationale:
        # 1. If no version can satisfy the constraint,
        #    this is like an upper bound of 0 (not included).
        # 2. The opposite of an empty constraint, which is *, has no upper bound
        #    and the two extremes often behave the other way around.
        return True

    def allows(self, version: Version) -> bool:
        return False

    def allows_all(self, other: VersionConstraint) -> bool:
        return other.is_empty()

    def allows_any(self, other: VersionConstraint) -> bool:
        return False

    def intersect(self, other: VersionConstraint) -> EmptyConstraint:
        return self

    def union(self, other: VersionConstraint) -> VersionConstraint:
        return other

    def difference(self, other: VersionConstraint) -> EmptyConstraint:
        return self

    def flatten(self) -> list[VersionRangeConstraint]:
        return []

    def __str__(self) -> str:
        return ""

    def __eq__(self, other: object) -> bool:
        if not isinstance(other, VersionConstraint):
            return False

        return other.is_empty()

    def __hash__(self) -> int:
        return hash("empty")
poetry-core-2.1.1/src/poetry/core/constraints/version/exceptions.py000066400000000000000000000001261475444614500256300ustar00rootroot00000000000000from __future__ import annotations


class ParseConstraintError(ValueError):
    pass
poetry-core-2.1.1/src/poetry/core/constraints/version/parser.py000066400000000000000000000206431475444614500247510ustar00rootroot00000000000000from __future__ import annotations

import functools
import re

from typing import TYPE_CHECKING

from poetry.core.constraints.version.exceptions import ParseConstraintError
from poetry.core.version.exceptions import InvalidVersionError


if TYPE_CHECKING:
    from poetry.core.constraints.version.version import Version
    from poetry.core.constraints.version.version_constraint import VersionConstraint


@functools.cache
def parse_constraint(constraints: str) -> VersionConstraint:
    return _parse_constraint(constraints=constraints)


def parse_marker_version_constraint(
    constraints: str, *, pep440: bool = True
) -> VersionConstraint:
    return _parse_constraint(
        constraints=constraints, is_marker_constraint=True, pep440=pep440
    )


def _parse_constraint(
    constraints: str, *, is_marker_constraint: bool = False, pep440: bool = True
) -> VersionConstraint:
    if constraints == "*":
        from poetry.core.constraints.version.version_range import VersionRange

        return VersionRange()

    or_constraints = re.split(r"\s*\|\|?\s*", constraints.strip())
    or_groups = []
    for constraints in or_constraints:
        # allow trailing commas for robustness (even though it may not be
        # standard-compliant it seems to occur in some packages)
        constraints = constraints.rstrip(",").rstrip()
        and_constraints = re.split(
            r"(?< ,]) *(? 1:
            for constraint in and_constraints:
                constraint_objects.append(
                    parse_single_constraint(
                        constraint,
                        is_marker_constraint=is_marker_constraint,
                        pep440=pep440,
                    )
                )
        else:
            constraint_objects.append(
                parse_single_constraint(
                    and_constraints[0],
                    is_marker_constraint=is_marker_constraint,
                    pep440=pep440,
                )
            )

        if len(constraint_objects) == 1:
            constraint = constraint_objects[0]
        else:
            constraint = constraint_objects[0]
            for next_constraint in constraint_objects[1:]:
                constraint = constraint.intersect(next_constraint)

        or_groups.append(constraint)

    if len(or_groups) == 1:
        return or_groups[0]
    else:
        from poetry.core.constraints.version.version_union import VersionUnion

        return VersionUnion.of(*or_groups)


def parse_single_constraint(
    constraint: str, *, is_marker_constraint: bool = False, pep440: bool = True
) -> VersionConstraint:
    from poetry.core.constraints.version.patterns import BASIC_CONSTRAINT
    from poetry.core.constraints.version.patterns import BASIC_RELEASE_CONSTRAINT
    from poetry.core.constraints.version.patterns import CARET_CONSTRAINT
    from poetry.core.constraints.version.patterns import TILDE_CONSTRAINT
    from poetry.core.constraints.version.patterns import TILDE_PEP440_CONSTRAINT
    from poetry.core.constraints.version.patterns import X_CONSTRAINT
    from poetry.core.constraints.version.version import Version
    from poetry.core.constraints.version.version_range import VersionRange
    from poetry.core.constraints.version.version_union import VersionUnion

    m = re.match(r"(?i)^v?[xX*](\.[xX*])*$", constraint)
    if m:
        return VersionRange()

    # Tilde range
    m = TILDE_CONSTRAINT.match(constraint)
    if m:
        try:
            version = Version.parse(m.group("version"))
        except InvalidVersionError as e:
            raise ParseConstraintError(
                f"Could not parse version constraint: {constraint}"
            ) from e

        high = version.stable.next_minor()
        if version.release.precision == 1:
            high = version.stable.next_major()

        return VersionRange(version, high, include_min=True)

    # PEP 440 Tilde range (~=)
    m = TILDE_PEP440_CONSTRAINT.match(constraint)
    if m:
        try:
            version = Version.parse(m.group("version"))
        except InvalidVersionError as e:
            raise ParseConstraintError(
                f"Could not parse version constraint: {constraint}"
            ) from e

        if version.release.precision == 2:
            high = version.stable.next_major()
        else:
            high = version.stable.next_minor()

        return VersionRange(version, high, include_min=True)

    # Caret range
    m = CARET_CONSTRAINT.match(constraint)
    if m:
        try:
            version = Version.parse(m.group("version"))
        except InvalidVersionError as e:
            raise ParseConstraintError(
                f"Could not parse version constraint: {constraint}"
            ) from e

        return VersionRange(version, version.next_breaking(), include_min=True)

    # X Range
    m = X_CONSTRAINT.match(constraint)
    if m:
        op = m.group("op")

        try:
            return _make_x_constraint_range(
                version=Version.parse(m.group("version")),
                invert=op == "!=",
                is_marker_constraint=is_marker_constraint,
            )
        except ValueError:
            raise ValueError(f"Could not parse version constraint: {constraint}")

    # Basic comparator
    m = BASIC_CONSTRAINT.match(constraint)
    if m:
        op = m.group("op")
        version_string = m.group("version")

        if version_string == "dev":
            version_string = "0.0-dev"

        try:
            version = Version.parse(version_string)
        except InvalidVersionError as e:
            raise ParseConstraintError(
                f"Could not parse version constraint: {constraint}"
            ) from e

        if op == "<":
            return VersionRange(max=version)
        if op == "<=":
            return VersionRange(max=version, include_max=True)
        if op == ">":
            return VersionRange(min=version)
        if op == ">=":
            return VersionRange(min=version, include_min=True)

        if m.group("wildcard") is not None:
            return _make_x_constraint_range(
                version=version,
                invert=op == "!=",
                is_marker_constraint=is_marker_constraint,
            )

        if op == "!=":
            return VersionUnion(VersionRange(max=version), VersionRange(min=version))

        return version

    # These below should be reserved for comparing non python packages such as OS
    # versions using `platform_release`
    if not pep440 and (m := BASIC_RELEASE_CONSTRAINT.match(constraint)):
        op = m.group("op")
        release_string = m.group("release")
        build = m.group("build")

        try:
            version = Version(
                release=Version.parse(release_string).release,
                local=build,
            )
        except InvalidVersionError as e:
            raise ParseConstraintError(
                f"Could not parse version constraint: {constraint}"
            ) from e

        if op == "<":
            return VersionRange(max=version)
        if op == "<=":
            return VersionRange(max=version, include_max=True)
        if op == ">":
            return VersionRange(min=version)
        if op == ">=":
            return VersionRange(min=version, include_min=True)
        if op == "!=":
            return VersionUnion(VersionRange(max=version), VersionRange(min=version))
        return version

    raise ParseConstraintError(f"Could not parse version constraint: {constraint}")


def _make_x_constraint_range(
    version: Version, *, invert: bool = False, is_marker_constraint: bool = False
) -> VersionConstraint:
    from poetry.core.constraints.version.version_range import VersionRange

    if version.is_postrelease():
        _next = version.next_postrelease()
    elif version.is_stable():
        _next = version.next_stable()
    elif version.is_prerelease():
        _next = version.next_prerelease()
    elif version.is_devrelease():
        _next = version.next_devrelease()
    else:
        raise RuntimeError("version is neither stable, nor pre-release nor dev-release")

    _min = version
    _max = _next

    if not is_marker_constraint:
        _min = _min.first_devrelease()
        if not _max.is_devrelease():
            _max = _max.first_devrelease()

    result = VersionRange(_min, _max, include_min=True)

    if invert:
        return VersionRange().difference(result)

    return result
poetry-core-2.1.1/src/poetry/core/constraints/version/patterns.py000066400000000000000000000024071475444614500253130ustar00rootroot00000000000000from __future__ import annotations

import re

from packaging.version import VERSION_PATTERN


COMPLETE_VERSION = re.compile(VERSION_PATTERN, re.VERBOSE | re.IGNORECASE)

CARET_CONSTRAINT = re.compile(
    rf"^\^\s*(?P{VERSION_PATTERN})$", re.VERBOSE | re.IGNORECASE
)
TILDE_CONSTRAINT = re.compile(
    rf"^~(?!=)\s*(?P{VERSION_PATTERN})$", re.VERBOSE | re.IGNORECASE
)
TILDE_PEP440_CONSTRAINT = re.compile(
    rf"^~=\s*(?P{VERSION_PATTERN})$", re.VERBOSE | re.IGNORECASE
)
X_CONSTRAINT = re.compile(
    r"^(?P!=|==)?\s*v?(?P(\d+)(?:\.(\d+))?(?:\.(\d+))?)(?:\.\*)+$"
)

# note that we also allow technically incorrect version patterns with astrix (eg: 3.5.*)
# as this is supported by pip and appears in metadata within python packages
BASIC_CONSTRAINT = re.compile(
    rf"^(?P<>|!=|>=?|<=?|==?)?\s*(?P{VERSION_PATTERN}|dev)(?P\.\*)?$",
    re.VERBOSE | re.IGNORECASE,
)

RELEASE_PATTERN = r"""
(?P[0-9]+(?:\.[0-9]+)*)
(?:(\+|-)(?P
    [0-9a-zA-Z-]+
    (?:\.[0-9a-zA-Z-]+)*
))?
"""

# pattern for non Python versions such as OS versions in `platform_release`
BASIC_RELEASE_CONSTRAINT = re.compile(
    rf"^(?P<>|!=|>=?|<=?|==?)?\s*(?P{RELEASE_PATTERN})$",
    re.VERBOSE | re.IGNORECASE,
)
poetry-core-2.1.1/src/poetry/core/constraints/version/util.py000066400000000000000000000030071475444614500244250ustar00rootroot00000000000000from __future__ import annotations

from typing import TYPE_CHECKING

from poetry.core.constraints.version.version_range import VersionRange


if TYPE_CHECKING:
    from poetry.core.constraints.version.version_constraint import VersionConstraint


def constraint_regions(constraints: list[VersionConstraint]) -> list[VersionRange]:
    """
    Transform a list of VersionConstraints into a list of VersionRanges that mark out
    the distinct regions of version-space.

    eg input >=3.6 and >=2.7,<3.0.0 || >=3.4.0
    output <2.7, >=2.7,<3.0.0, >=3.0.0,<3.4.0, >=3.4.0,<3.6, >=3.6.
    """
    flattened = []
    for constraint in constraints:
        flattened += constraint.flatten()

    mins = {
        (constraint.min, not constraint.include_min)
        for constraint in flattened
        if constraint.min is not None
    }
    maxs = {
        (constraint.max, constraint.include_max)
        for constraint in flattened
        if constraint.max is not None
    }

    edges = sorted(mins | maxs)
    if not edges:
        return [VersionRange(None, None)]

    start = edges[0]
    regions = [
        VersionRange(None, start[0], include_max=start[1]),
    ]

    for low, high in zip(edges, edges[1:]):
        version_range = VersionRange(
            low[0],
            high[0],
            include_min=not low[1],
            include_max=high[1],
        )
        regions.append(version_range)

    end = edges[-1]
    regions.append(
        VersionRange(end[0], None, include_min=not end[1]),
    )

    return regions
poetry-core-2.1.1/src/poetry/core/constraints/version/version.py000066400000000000000000000117311475444614500251400ustar00rootroot00000000000000from __future__ import annotations

import dataclasses

from typing import TYPE_CHECKING

from poetry.core.constraints.version.empty_constraint import EmptyConstraint
from poetry.core.constraints.version.version_range_constraint import (
    VersionRangeConstraint,
)
from poetry.core.constraints.version.version_union import VersionUnion
from poetry.core.version.pep440 import Release
from poetry.core.version.pep440.version import PEP440Version


if TYPE_CHECKING:
    from poetry.core.constraints.version.version_constraint import VersionConstraint
    from poetry.core.version.pep440 import LocalSegmentType
    from poetry.core.version.pep440 import ReleaseTag


@dataclasses.dataclass(frozen=True)
class Version(PEP440Version, VersionRangeConstraint):
    """
    A version constraint representing a single version.
    """

    @property
    def precision(self) -> int:
        return self.release.precision

    @property
    def stable(self) -> Version:
        if self.is_stable():
            return self

        post = self.post if self.pre is None else None
        return Version(release=self.release, post=post, epoch=self.epoch)

    def next_breaking(self) -> Version:
        if self.major > 0 or self.minor is None:
            return self.stable.next_major()

        if self.minor > 0 or self.patch is None:
            return self.stable.next_minor()

        return self.stable.next_patch()

    @property
    def min(self) -> Version:
        return self

    @property
    def max(self) -> Version:
        return self

    @property
    def full_max(self) -> Version:
        return self

    @property
    def include_min(self) -> bool:
        return True

    @property
    def include_max(self) -> bool:
        return True

    def is_any(self) -> bool:
        return False

    def is_empty(self) -> bool:
        return False

    def is_simple(self) -> bool:
        return True

    def allows(self, version: Version | None) -> bool:
        if version is None:
            return False

        _this, _other = self, version

        # allow weak equality to allow `3.0.0+local.1` for `3.0.0`
        if not _this.is_local() and _other.is_local():
            _other = _other.without_local()

        return _this == _other

    def allows_all(self, other: VersionConstraint) -> bool:
        return other.is_empty() or (
            self.allows(other) if isinstance(other, self.__class__) else other == self
        )

    def allows_any(self, other: VersionConstraint) -> bool:
        intersection = self.intersect(other)
        return not intersection.is_empty()

    def intersect(self, other: VersionConstraint) -> VersionConstraint:
        if isinstance(other, Version):
            if self.allows(other):
                return other

            if other.allows(self):
                return self

            return EmptyConstraint()

        return other.intersect(self)

    def union(self, other: VersionConstraint) -> VersionConstraint:
        from poetry.core.constraints.version.version_range import VersionRange

        if other.allows(self):
            return other

        if isinstance(other, VersionRangeConstraint):
            if self.allows(other.min):
                return VersionRange(
                    other.min,
                    other.max,
                    include_min=True,
                    include_max=other.include_max,
                )

            if self.allows(other.max):
                return VersionRange(
                    other.min,
                    other.max,
                    include_min=other.include_min,
                    include_max=True,
                )

        return VersionUnion.of(self, other)

    def difference(self, other: VersionConstraint) -> Version | EmptyConstraint:
        if other.allows(self):
            return EmptyConstraint()

        return self

    def flatten(self) -> list[VersionRangeConstraint]:
        return [self]

    def __str__(self) -> str:
        return self.text

    def __eq__(self, other: object) -> bool:
        from poetry.core.constraints.version.version_range import VersionRange

        if isinstance(other, VersionRange):
            return (
                self == other.min
                and self == other.max
                and (other.include_min or other.include_max)
            )
        return super().__eq__(other)

    @classmethod
    def from_parts(
        cls,
        major: int,
        minor: int | None = None,
        patch: int | None = None,
        extra: int | tuple[int, ...] = (),
        pre: ReleaseTag | None = None,
        post: ReleaseTag | None = None,
        dev: ReleaseTag | None = None,
        local: LocalSegmentType = None,
        *,
        epoch: int = 0,
    ) -> Version:
        if isinstance(extra, int):
            extra = (extra,)
        return cls(
            release=Release(major=major, minor=minor, patch=patch, extra=extra),
            pre=pre,
            post=post,
            dev=dev,
            local=local,
            epoch=epoch,
        )
poetry-core-2.1.1/src/poetry/core/constraints/version/version_constraint.py000066400000000000000000000066471475444614500274160ustar00rootroot00000000000000from __future__ import annotations

from abc import abstractmethod
from typing import TYPE_CHECKING


if TYPE_CHECKING:
    from poetry.core.constraints.version.version import Version
    from poetry.core.constraints.version.version_range_constraint import (
        VersionRangeConstraint,
    )


class VersionConstraint:
    @abstractmethod
    def is_empty(self) -> bool:
        raise NotImplementedError

    @abstractmethod
    def is_any(self) -> bool:
        raise NotImplementedError

    @abstractmethod
    def is_simple(self) -> bool:
        raise NotImplementedError

    @abstractmethod
    def has_upper_bound(self) -> bool:
        raise NotImplementedError

    @abstractmethod
    def allows(self, version: Version) -> bool:
        raise NotImplementedError

    @abstractmethod
    def allows_all(self, other: VersionConstraint) -> bool:
        raise NotImplementedError

    @abstractmethod
    def allows_any(self, other: VersionConstraint) -> bool:
        raise NotImplementedError

    @abstractmethod
    def intersect(self, other: VersionConstraint) -> VersionConstraint:
        raise NotImplementedError

    @abstractmethod
    def union(self, other: VersionConstraint) -> VersionConstraint:
        raise NotImplementedError

    @abstractmethod
    def difference(self, other: VersionConstraint) -> VersionConstraint:
        raise NotImplementedError

    @abstractmethod
    def flatten(self) -> list[VersionRangeConstraint]:
        raise NotImplementedError

    def __repr__(self) -> str:
        return f"<{self.__class__.__name__} {self}>"

    def __str__(self) -> str:
        raise NotImplementedError

    def __hash__(self) -> int:
        raise NotImplementedError

    def __eq__(self, other: object) -> bool:
        raise NotImplementedError


def _is_wildcard_candidate(
    min_: Version, max_: Version, *, inverted: bool = False
) -> bool:
    if (
        min_.is_local()
        or max_.is_local()
        or min_.is_prerelease()
        or max_.is_prerelease()
        or min_.is_postrelease() is not max_.is_postrelease()
        or min_.first_devrelease() != min_
        or (max_.is_devrelease() and max_.first_devrelease() != max_)
    ):
        return False

    first = max_ if inverted else min_
    second = min_ if inverted else max_

    parts_first = list(first.parts)
    parts_second = list(second.parts)

    # remove trailing zeros from second
    while parts_second and parts_second[-1] == 0:
        del parts_second[-1]

    # fill up first with zeros
    parts_first += [0] * (len(parts_second) - len(parts_first))

    # all exceeding parts of first must be zero
    if set(parts_first[len(parts_second) :]) not in [set(), {0}]:
        return False

    parts_first = parts_first[: len(parts_second)]

    if first.is_postrelease():
        assert first.post is not None
        return parts_first == parts_second and first.post.next() == second.post

    return (
        parts_first[:-1] == parts_second[:-1]
        and parts_first[-1] + 1 == parts_second[-1]
    )


def _single_wildcard_range_string(first: Version, second: Version) -> str:
    if first.is_postrelease():
        base_version = str(first.without_devrelease())

    else:
        parts = list(second.parts)

        # remove trailing zeros from max
        while parts and parts[-1] == 0:
            del parts[-1]

        parts[-1] = parts[-1] - 1

        base_version = ".".join(str(part) for part in parts)

    return f"{base_version}.*"
poetry-core-2.1.1/src/poetry/core/constraints/version/version_range.py000066400000000000000000000365771475444614500263330ustar00rootroot00000000000000from __future__ import annotations

from contextlib import suppress
from functools import cached_property
from typing import TYPE_CHECKING

from poetry.core.constraints.version.empty_constraint import EmptyConstraint
from poetry.core.constraints.version.version_constraint import _is_wildcard_candidate
from poetry.core.constraints.version.version_constraint import (
    _single_wildcard_range_string,
)
from poetry.core.constraints.version.version_range_constraint import (
    VersionRangeConstraint,
)
from poetry.core.constraints.version.version_union import VersionUnion


if TYPE_CHECKING:
    from poetry.core.constraints.version.version import Version
    from poetry.core.constraints.version.version_constraint import VersionConstraint


class VersionRange(VersionRangeConstraint):
    def __init__(
        self,
        min: Version | None = None,
        max: Version | None = None,
        include_min: bool = False,
        include_max: bool = False,
    ) -> None:
        self._max = max
        self._min = min
        self._include_min = include_min
        self._include_max = include_max

    @property
    def min(self) -> Version | None:
        return self._min

    @property
    def max(self) -> Version | None:
        return self._max

    @property
    def include_min(self) -> bool:
        return self._include_min

    @property
    def include_max(self) -> bool:
        return self._include_max

    def is_empty(self) -> bool:
        return False

    def is_any(self) -> bool:
        return self._min is None and self._max is None

    def is_simple(self) -> bool:
        return self._min is None or self._max is None

    def allows(self, other: Version) -> bool:
        if self._min is not None:
            _this, _other = self.allowed_min, other

            assert _this is not None

            if (
                not self._include_min
                and not _this.is_postrelease()
                and _other.is_postrelease()
            ):
                # The exclusive ordered comparison >V MUST NOT allow a post-release
                # of the given version unless V itself is a post release.
                # https://peps.python.org/pep-0440/#exclusive-ordered-comparison
                # e.g. "2.0.post1" does not match ">2"
                _other = _other.without_postrelease()

            if not _this.is_local() and _other.is_local():
                # The exclusive ordered comparison >V MUST NOT match
                # a local version of the specified version.
                # https://peps.python.org/pep-0440/#exclusive-ordered-comparison
                # e.g. "2.0+local.version" does not match ">2"
                _other = other.without_local()

            if _other < _this:
                return False

            if not self._include_min and (_other == self._min or _other == _this):
                return False

        if self.max is not None:
            _this, _other = self.allowed_max, other

            assert _this is not None

            if not _this.is_local() and _other.is_local():
                # allow weak equality to allow `3.0.0+local.1` for `<=3.0.0`
                _other = _other.without_local()

            if _other > _this:
                return False

            if not self._include_max and (_other == self._max or _other == _this):
                return False

        return True

    def allows_all(self, other: VersionConstraint) -> bool:
        from poetry.core.constraints.version.version import Version

        if other.is_empty():
            return True

        if isinstance(other, Version):
            return self.allows(other)

        if isinstance(other, VersionUnion):
            return all(self.allows_all(constraint) for constraint in other.ranges)

        if isinstance(other, VersionRangeConstraint):
            return not other.allows_lower(self) and not other.allows_higher(self)

        raise ValueError(f"Unknown VersionConstraint type {other}.")

    def allows_any(self, other: VersionConstraint) -> bool:
        from poetry.core.constraints.version.version import Version

        if other.is_empty():
            return False

        if isinstance(other, Version):
            if self.allows(other):
                return True

            # Although `>=1.2.3+local` does not allow the exact version `1.2.3`, both of
            # those versions do allow `1.2.3+local`.
            return (
                self.min is not None and self.min.is_local() and other.allows(self.min)
            )

        if isinstance(other, VersionUnion):
            return any(self.allows_any(constraint) for constraint in other.ranges)

        if isinstance(other, VersionRangeConstraint):
            return not (other.is_strictly_lower(self) or other.is_strictly_higher(self))

        raise ValueError(f"Unknown VersionConstraint type {other}.")

    def intersect(self, other: VersionConstraint) -> VersionConstraint:
        from poetry.core.constraints.version.version import Version

        if other.is_empty():
            return other

        if isinstance(other, VersionUnion):
            return other.intersect(self)

        if isinstance(other, Version):
            # A range and a Version just yields the version if it's in the range.
            if self.allows(other):
                return other

            # `>=1.2.3+local` intersects `1.2.3` to return `>=1.2.3+local,<1.2.4`.
            if self.min is not None and self.min.is_local() and other.allows(self.min):
                upper = other.stable.next_patch()
                return VersionRange(
                    min=self.min,
                    max=upper,
                    include_min=self.include_min,
                    include_max=False,
                )

            return EmptyConstraint()

        if not isinstance(other, VersionRangeConstraint):
            raise ValueError(f"Unknown VersionConstraint type {other}.")

        if self.allows_lower(other):
            if self.is_strictly_lower(other):
                return EmptyConstraint()

            intersect_min = other.min
            intersect_include_min = other.include_min
        else:
            if other.is_strictly_lower(self):
                return EmptyConstraint()

            intersect_min = self._min
            intersect_include_min = self._include_min

        if self.allows_higher(other):
            intersect_max = other.max
            intersect_include_max = other.include_max
        else:
            intersect_max = self._max
            intersect_include_max = self._include_max

        if intersect_min is None and intersect_max is None:
            return VersionRange()

        # If the range is just a single version.
        if intersect_min == intersect_max:
            # Because we already verified that the lower range isn't strictly
            # lower, there must be some overlap.
            assert intersect_include_min and intersect_include_max
            assert intersect_min is not None

            return intersect_min

        # If we got here, there is an actual range.
        return VersionRange(
            intersect_min, intersect_max, intersect_include_min, intersect_include_max
        )

    def union(self, other: VersionConstraint) -> VersionConstraint:
        from poetry.core.constraints.version.version import Version

        if isinstance(other, Version):
            if self.allows(other):
                return self

            if other == self.min:
                return VersionRange(
                    self.min, self.max, include_min=True, include_max=self.include_max
                )

            if other == self.max:
                return VersionRange(
                    self.min, self.max, include_min=self.include_min, include_max=True
                )

            return VersionUnion.of(self, other)

        if isinstance(other, VersionRangeConstraint):
            # If the two ranges don't overlap, we won't be able to create a single
            # VersionRange for both of them.
            edges_touch = (
                self.max == other.min and (self.include_max or other.include_min)
            ) or (self.min == other.max and (self.include_min or other.include_max))

            if not edges_touch and not self.allows_any(other):
                return VersionUnion.of(self, other)

            if self.allows_lower(other):
                union_min = self.min
                union_include_min = self.include_min
            else:
                union_min = other.min
                union_include_min = other.include_min

            if self.allows_higher(other):
                union_max = self.max
                union_include_max = self.include_max
            else:
                union_max = other.max
                union_include_max = other.include_max

            return VersionRange(
                union_min,
                union_max,
                include_min=union_include_min,
                include_max=union_include_max,
            )

        return VersionUnion.of(self, other)

    def difference(self, other: VersionConstraint) -> VersionConstraint:
        from poetry.core.constraints.version.version import Version

        if other.is_empty():
            return self

        if isinstance(other, Version):
            if not self.allows(other):
                return self

            if other == self.min:
                if not self.include_min:
                    return self

                return VersionRange(self.min, self.max, False, self.include_max)

            if other == self.max:
                if not self.include_max:
                    return self

                return VersionRange(self.min, self.max, self.include_min, False)

            return VersionUnion.of(
                VersionRange(self.min, other, self.include_min, False),
                VersionRange(other, self.max, False, self.include_max),
            )
        elif isinstance(other, VersionRangeConstraint):
            if not self.allows_any(other):
                return self

            before: VersionConstraint | None
            if not self.allows_lower(other):
                before = None
            elif self.min == other.min:
                before = self.min
            else:
                before = VersionRange(
                    self.min, other.min, self.include_min, not other.include_min
                )

            after: VersionConstraint | None
            if not self.allows_higher(other):
                after = None
            elif self.max == other.max:
                after = self.max
            else:
                after = VersionRange(
                    other.max, self.max, not other.include_max, self.include_max
                )

            if before is None and after is None:
                return EmptyConstraint()

            if before is None:
                assert after is not None
                return after

            if after is None:
                return before

            return VersionUnion.of(before, after)
        elif isinstance(other, VersionUnion):
            ranges: list[VersionRangeConstraint] = []
            current: VersionRangeConstraint = self

            for range in other.ranges:
                # Skip any ranges that are strictly lower than [current].
                if range.is_strictly_lower(current):
                    continue

                # If we reach a range strictly higher than [current], no more ranges
                # will be relevant so we can bail early.
                if range.is_strictly_higher(current):
                    break

                difference = current.difference(range)
                if difference.is_empty():
                    return EmptyConstraint()
                elif isinstance(difference, VersionUnion):
                    # If [range] split [current] in half, we only need to continue
                    # checking future ranges against the latter half.
                    ranges.append(difference.ranges[0])
                    current = difference.ranges[-1]
                else:
                    assert isinstance(difference, VersionRangeConstraint)
                    current = difference

            if not ranges:
                return current

            return VersionUnion.of(*([*ranges, current]))

        raise ValueError(f"Unknown VersionConstraint type {other}.")

    def flatten(self) -> list[VersionRangeConstraint]:
        return [self]

    @cached_property
    def _single_wildcard_range_string(self) -> str:
        if not self.is_single_wildcard_range:
            raise ValueError("Not a valid wildcard range")

        assert self.min is not None
        assert self.max is not None
        return f"=={_single_wildcard_range_string(self.min, self.max)}"

    @cached_property
    def is_single_wildcard_range(self) -> bool:
        # e.g.
        # - "1.*" equals ">=1.0.dev0, <2" (equivalent to ">=1.0.dev0, <2.0.dev0")
        # - "1.0.*" equals ">=1.0.dev0, <1.1"
        # - "1.2.*" equals ">=1.2.dev0, <1.3"
        if (
            self.min is None
            or self.max is None
            or not self.include_min
            or self.include_max
        ):
            return False

        return _is_wildcard_candidate(self.min, self.max)

    def __eq__(self, other: object) -> bool:
        if not isinstance(other, VersionRangeConstraint):
            return False

        return (
            self._min == other.min
            and self._max == other.max
            and self._include_min == other.include_min
            and self._include_max == other.include_max
        )

    def __lt__(self, other: VersionRangeConstraint) -> bool:
        return self._cmp(other) < 0

    def __le__(self, other: VersionRangeConstraint) -> bool:
        return self._cmp(other) <= 0

    def __gt__(self, other: VersionRangeConstraint) -> bool:
        return self._cmp(other) > 0

    def __ge__(self, other: VersionRangeConstraint) -> bool:
        return self._cmp(other) >= 0

    def _cmp(self, other: VersionRangeConstraint) -> int:
        if self.min is None:
            return self._compare_max(other) if other.min is None else -1
        elif other.min is None:
            return 1

        if self.min > other.min:
            return 1
        elif self.min < other.min:
            return -1

        if self.include_min != other.include_min:
            return -1 if self.include_min else 1

        return self._compare_max(other)

    def _compare_max(self, other: VersionRangeConstraint) -> int:
        if self.max is None:
            return 0 if other.max is None else 1
        elif other.max is None:
            return -1

        if self.max > other.max:
            return 1
        elif self.max < other.max:
            return -1

        if self.include_max != other.include_max:
            return 1 if self.include_max else -1

        return 0

    def __str__(self) -> str:
        with suppress(ValueError):
            return self._single_wildcard_range_string

        text = ""

        if self.min is not None:
            text += ">=" if self.include_min else ">"
            text += self.min.text

        if self.max is not None:
            if self.min is not None:
                text += ","

            op = "<=" if self.include_max else "<"
            text += f"{op}{self.max.text}"

        if self.min is None and self.max is None:
            return "*"

        return text

    def __hash__(self) -> int:
        return (
            hash(self.min)
            ^ hash(self.max)
            ^ hash(self.include_min)
            ^ hash(self.include_max)
        )
poetry-core-2.1.1/src/poetry/core/constraints/version/version_range_constraint.py000066400000000000000000000072071475444614500305630ustar00rootroot00000000000000from __future__ import annotations

from abc import abstractmethod
from functools import cached_property
from typing import TYPE_CHECKING

from poetry.core.constraints.version.version_constraint import VersionConstraint


if TYPE_CHECKING:
    from poetry.core.constraints.version.version import Version


class VersionRangeConstraint(VersionConstraint):
    @property
    @abstractmethod
    def min(self) -> Version | None:
        raise NotImplementedError

    @property
    @abstractmethod
    def max(self) -> Version | None:
        raise NotImplementedError

    @property
    @abstractmethod
    def include_min(self) -> bool:
        raise NotImplementedError

    @property
    @abstractmethod
    def include_max(self) -> bool:
        raise NotImplementedError

    @property
    def allowed_min(self) -> Version | None:
        # That is a bit inaccurate because
        # 1) The exclusive ordered comparison >V MUST NOT allow a post-release
        #    of the given version unless V itself is a post release.
        # 2) The exclusive ordered comparison >V MUST NOT match
        #    a local version of the specified version.
        # https://peps.python.org/pep-0440/#exclusive-ordered-comparison
        # However, there is no specific min greater than the greatest post release
        # or greatest local version identifier. These cases have to be handled by
        # the callers of allowed_min.
        return self.min

    @cached_property
    def allowed_max(self) -> Version | None:
        if self.max is None:
            return None

        if self.include_max or self.max.is_unstable():
            return self.max

        if self.min == self.max and (self.include_min or self.include_max):
            # this is an equality range
            return self.max

        # The exclusive ordered comparison  bool:
        return self.max is not None

    def allows_lower(self, other: VersionRangeConstraint) -> bool:
        _this, _other = self.allowed_min, other.allowed_min

        if _this is None:
            return _other is not None

        if _other is None:
            return False

        if _this < _other:
            return True

        if _this > _other:
            return False

        return self.include_min and not other.include_min

    def allows_higher(self, other: VersionRangeConstraint) -> bool:
        _this, _other = self.allowed_max, other.allowed_max

        if _this is None:
            return _other is not None

        if _other is None:
            return False

        if _this < _other:
            return False

        if _this > _other:
            return True

        return self.include_max and not other.include_max

    def is_strictly_lower(self, other: VersionRangeConstraint) -> bool:
        _this, _other = self.allowed_max, other.allowed_min

        if _this is None or _other is None:
            return False

        if _this < _other:
            return True

        if _this > _other:
            return False

        return not (self.include_max and other.include_min)

    def is_strictly_higher(self, other: VersionRangeConstraint) -> bool:
        return other.is_strictly_lower(self)

    def is_adjacent_to(self, other: VersionRangeConstraint) -> bool:
        if self.max != other.min:
            return False

        return (self.include_max and not other.include_min) or (
            not self.include_max and other.include_min
        )
poetry-core-2.1.1/src/poetry/core/constraints/version/version_union.py000066400000000000000000000253041475444614500263510ustar00rootroot00000000000000from __future__ import annotations

import operator as op

from functools import cached_property
from functools import reduce
from typing import TYPE_CHECKING

from poetry.core.constraints.version.empty_constraint import EmptyConstraint
from poetry.core.constraints.version.version_constraint import VersionConstraint
from poetry.core.constraints.version.version_constraint import _is_wildcard_candidate
from poetry.core.constraints.version.version_constraint import (
    _single_wildcard_range_string,
)
from poetry.core.constraints.version.version_range_constraint import (
    VersionRangeConstraint,
)


if TYPE_CHECKING:
    from poetry.core.constraints.version.version import Version


class VersionUnion(VersionConstraint):
    """
    A version constraint representing a union of multiple disjoint version
    ranges.

    An instance of this will only be created if the version can't be represented
    as a non-compound value.
    """

    def __init__(self, *ranges: VersionRangeConstraint) -> None:
        self._ranges = list(ranges)

    @property
    def ranges(self) -> list[VersionRangeConstraint]:
        return self._ranges

    @classmethod
    def of(cls, *ranges: VersionConstraint) -> VersionConstraint:
        from poetry.core.constraints.version.version_range import VersionRange

        flattened: list[VersionRangeConstraint] = []
        for constraint in ranges:
            if constraint.is_empty():
                continue

            if isinstance(constraint, VersionUnion):
                flattened += constraint.ranges
                continue

            assert isinstance(constraint, VersionRangeConstraint)
            flattened.append(constraint)

        if not flattened:
            return EmptyConstraint()

        if any(constraint.is_any() for constraint in flattened):
            return VersionRange()

        # Only allow Versions and VersionRanges here so we can more easily reason
        # about everything in flattened. _EmptyVersions and VersionUnions are
        # filtered out above.
        for constraint in flattened:
            if not isinstance(constraint, VersionRangeConstraint):
                raise ValueError(f"Unknown VersionConstraint type {constraint}.")

        flattened.sort()  # type: ignore[call-overload]

        merged: list[VersionRangeConstraint] = []
        for constraint in flattened:
            # Merge this constraint with the previous one, but only if they touch.
            if not merged or (
                not merged[-1].allows_any(constraint)
                and not merged[-1].is_adjacent_to(constraint)
            ):
                merged.append(constraint)
            else:
                new_constraint = merged[-1].union(constraint)
                assert isinstance(new_constraint, VersionRangeConstraint)
                merged[-1] = new_constraint

        if len(merged) == 1:
            return merged[0]

        return VersionUnion(*merged)

    def is_empty(self) -> bool:
        return False

    def is_any(self) -> bool:
        return False

    def is_simple(self) -> bool:
        return self.excludes_single_version

    def has_upper_bound(self) -> bool:
        return all(constraint.has_upper_bound() for constraint in self._ranges)

    def allows(self, version: Version) -> bool:
        if self.excludes_single_version:
            # when excluded version is local, special handling is required
            # to ensure that a constraint (!=2.0+deadbeef) will allow the
            # provided version (2.0)

            excluded = self._excluded_single_version

            if excluded.is_local():
                return excluded != version

        return any(constraint.allows(version) for constraint in self._ranges)

    def allows_all(self, other: VersionConstraint) -> bool:
        our_ranges = iter(self._ranges)
        their_ranges = iter(other.flatten())

        our_current_range = next(our_ranges, None)
        their_current_range = next(their_ranges, None)

        while our_current_range and their_current_range:
            if our_current_range.allows_all(their_current_range):
                their_current_range = next(their_ranges, None)
            else:
                our_current_range = next(our_ranges, None)

        return their_current_range is None

    def allows_any(self, other: VersionConstraint) -> bool:
        our_ranges = iter(self._ranges)
        their_ranges = iter(other.flatten())

        our_current_range = next(our_ranges, None)
        their_current_range = next(their_ranges, None)

        while our_current_range and their_current_range:
            if our_current_range.allows_any(their_current_range):
                return True

            if their_current_range.allows_higher(our_current_range):
                our_current_range = next(our_ranges, None)
            else:
                their_current_range = next(their_ranges, None)

        return False

    def intersect(self, other: VersionConstraint) -> VersionConstraint:
        our_ranges = iter(self._ranges)
        their_ranges = iter(other.flatten())
        new_ranges = []

        our_current_range = next(our_ranges, None)
        their_current_range = next(their_ranges, None)

        while our_current_range and their_current_range:
            intersection = our_current_range.intersect(their_current_range)

            if not intersection.is_empty():
                new_ranges.append(intersection)

            if their_current_range.allows_higher(our_current_range):
                our_current_range = next(our_ranges, None)
            else:
                their_current_range = next(their_ranges, None)

        return VersionUnion.of(*new_ranges)

    def union(self, other: VersionConstraint) -> VersionConstraint:
        return VersionUnion.of(self, other)

    def difference(self, other: VersionConstraint) -> VersionConstraint:
        our_ranges = iter(self._ranges)
        their_ranges = iter(other.flatten())
        new_ranges: list[VersionConstraint] = []

        state = {
            "current": next(our_ranges, None),
            "their_range": next(their_ranges, None),
        }

        def their_next_range() -> bool:
            state["their_range"] = next(their_ranges, None)
            if state["their_range"]:
                return True

            assert state["current"] is not None
            new_ranges.append(state["current"])
            our_current = next(our_ranges, None)
            while our_current:
                new_ranges.append(our_current)
                our_current = next(our_ranges, None)

            return False

        def our_next_range(include_current: bool = True) -> bool:
            if include_current:
                assert state["current"] is not None
                new_ranges.append(state["current"])

            our_current = next(our_ranges, None)
            if not our_current:
                return False

            state["current"] = our_current

            return True

        while True:
            if state["their_range"] is None:
                break

            assert state["current"] is not None
            if state["their_range"].is_strictly_lower(state["current"]):
                if not their_next_range():
                    break

                continue

            if state["their_range"].is_strictly_higher(state["current"]):
                if not our_next_range():
                    break

                continue

            difference = state["current"].difference(state["their_range"])
            if isinstance(difference, VersionUnion):
                assert len(difference.ranges) == 2
                new_ranges.append(difference.ranges[0])
                state["current"] = difference.ranges[-1]

                if not their_next_range():
                    break
            elif difference.is_empty():
                if not our_next_range(False):
                    break
            else:
                assert isinstance(difference, VersionRangeConstraint)
                state["current"] = difference

                if state["current"].allows_higher(state["their_range"]):
                    if not their_next_range():
                        break
                elif not our_next_range():
                    break

        if not new_ranges:
            return EmptyConstraint()

        if len(new_ranges) == 1:
            return new_ranges[0]

        return VersionUnion.of(*new_ranges)

    def flatten(self) -> list[VersionRangeConstraint]:
        return self.ranges

    @cached_property
    def _exclude_single_wildcard_range_string(self) -> str:
        """
        Helper method to convert this instance into a wild card range
        string.
        """
        if not self.excludes_single_wildcard_range:
            raise ValueError("Not a valid wildcard range")

        idx_order = (0, 1) if self._ranges[0].max else (1, 0)
        one = self._ranges[idx_order[0]]
        two = self._ranges[idx_order[1]]

        assert one.max is not None
        assert two.min is not None
        return f"!={_single_wildcard_range_string(one.max, two.min)}"

    @cached_property
    def excludes_single_wildcard_range(self) -> bool:
        if len(self._ranges) != 2:
            return False

        idx_order = (0, 1) if self._ranges[0].max else (1, 0)
        one = self._ranges[idx_order[0]]
        two = self._ranges[idx_order[1]]

        if (
            one.max is None
            or one.include_max
            or one.min is not None
            or two.min is None
            or not two.include_min
            or two.max is not None
        ):
            return False

        return _is_wildcard_candidate(two.min, one.max, inverted=True)

    @cached_property
    def excludes_single_version(self) -> bool:
        from poetry.core.constraints.version.version import Version

        return isinstance(self._inverted, Version)

    @cached_property
    def _excluded_single_version(self) -> Version:
        from poetry.core.constraints.version.version import Version

        excluded = self._inverted
        assert isinstance(excluded, Version)
        return excluded

    @cached_property
    def _inverted(self) -> VersionConstraint:
        from poetry.core.constraints.version.version_range import VersionRange

        return VersionRange().difference(self)

    def __eq__(self, other: object) -> bool:
        if not isinstance(other, VersionUnion):
            return False

        return self._ranges == other.ranges

    def __hash__(self) -> int:
        return reduce(op.xor, map(hash, self._ranges))

    def __str__(self) -> str:
        if self.excludes_single_version:
            return f"!={self._excluded_single_version}"

        try:
            return self._exclude_single_wildcard_range_string
        except ValueError:
            return " || ".join([str(r) for r in self._ranges])
poetry-core-2.1.1/src/poetry/core/exceptions/000077500000000000000000000000001475444614500212235ustar00rootroot00000000000000poetry-core-2.1.1/src/poetry/core/exceptions/__init__.py000066400000000000000000000001751475444614500233370ustar00rootroot00000000000000from __future__ import annotations

from poetry.core.exceptions.base import PoetryCoreError


__all__ = ("PoetryCoreError",)
poetry-core-2.1.1/src/poetry/core/exceptions/base.py000066400000000000000000000001201475444614500225000ustar00rootroot00000000000000from __future__ import annotations


class PoetryCoreError(Exception):
    pass
poetry-core-2.1.1/src/poetry/core/factory.py000066400000000000000000001017451475444614500210730ustar00rootroot00000000000000from __future__ import annotations

import logging

from collections import defaultdict
from collections.abc import Mapping
from pathlib import Path
from typing import TYPE_CHECKING
from typing import Any
from typing import Literal
from typing import Union
from typing import cast

from packaging.utils import canonicalize_name

from poetry.core.utils.helpers import combine_unicode
from poetry.core.utils.helpers import readme_content_type


if TYPE_CHECKING:
    from packaging.utils import NormalizedName

    from poetry.core.packages.dependency import Dependency
    from poetry.core.packages.dependency_group import DependencyGroup
    from poetry.core.packages.project_package import ProjectPackage
    from poetry.core.poetry import Poetry
    from poetry.core.pyproject.toml import PyProjectTOML
    from poetry.core.spdx.license import License

    DependencyConstraint = Union[str, Mapping[str, Any]]
    DependencyConfig = Mapping[
        str, Union[list[DependencyConstraint], DependencyConstraint]
    ]


logger = logging.getLogger(__name__)


class Factory:
    """
    Factory class to create various elements needed by Poetry.
    """

    def create_poetry(
        self, cwd: Path | None = None, with_groups: bool = True
    ) -> Poetry:
        from poetry.core.poetry import Poetry
        from poetry.core.pyproject.toml import PyProjectTOML

        poetry_file = self.locate(cwd)
        pyproject = PyProjectTOML(path=poetry_file)

        # Checking validity
        check_result = self.validate(pyproject.data)
        if check_result["errors"]:
            message = ""
            for error in check_result["errors"]:
                message += f"  - {error}\n"

            raise RuntimeError("The Poetry configuration is invalid:\n" + message)

        for warning in check_result["warnings"]:
            logger.warning(warning)

        # Load package
        # If name or version were missing in package mode, we would have already
        # raised an error, so we can safely assume they might only be missing
        # in non-package mode and use some dummy values in this case.
        project = pyproject.data.get("project", {})
        name = project.get("name") or pyproject.poetry_config.get(
            "name", "non-package-mode"
        )
        assert isinstance(name, str)
        version = project.get("version") or pyproject.poetry_config.get("version", "0")
        assert isinstance(version, str)
        package = self.get_package(name, version)
        self.configure_package(
            package, pyproject, poetry_file.parent, with_groups=with_groups
        )

        return Poetry(poetry_file, pyproject.poetry_config, package)

    @classmethod
    def get_package(cls, name: str, version: str) -> ProjectPackage:
        from poetry.core.packages.project_package import ProjectPackage

        return ProjectPackage(name, version)

    @classmethod
    def _add_package_group_dependencies(
        cls,
        package: ProjectPackage,
        group: str | DependencyGroup,
        dependencies: DependencyConfig,
    ) -> None:
        from poetry.core.packages.dependency_group import MAIN_GROUP

        if isinstance(group, str):
            if package.has_dependency_group(group):
                group = package.dependency_group(group)
            else:
                from poetry.core.packages.dependency_group import DependencyGroup

                group = DependencyGroup(group)

        for name, constraints in dependencies.items():
            _constraints = (
                constraints if isinstance(constraints, list) else [constraints]
            )
            for _constraint in _constraints:
                if name.lower() == "python":
                    if group.name == MAIN_GROUP and isinstance(_constraint, str):
                        package.python_versions = _constraint
                    continue

                group.add_poetry_dependency(
                    cls.create_dependency(
                        name,
                        _constraint,
                        groups=[group.name],
                        root_dir=package.root_dir,
                    )
                )

        package.add_dependency_group(group)

    @classmethod
    def configure_package(
        cls,
        package: ProjectPackage,
        pyproject: PyProjectTOML,
        root: Path,
        with_groups: bool = True,
    ) -> None:
        project = pyproject.data.get("project", {})
        tool_poetry = pyproject.poetry_config

        package.root_dir = root

        cls._configure_package_metadata(package, project, tool_poetry, root)
        cls._configure_entry_points(package, project, tool_poetry)
        cls._configure_package_dependencies(
            package, project, tool_poetry, with_groups=with_groups
        )
        cls._configure_package_poetry_specifics(package, tool_poetry)

    @classmethod
    def _configure_package_metadata(
        cls,
        package: ProjectPackage,
        project: dict[str, Any],
        tool_poetry: dict[str, Any],
        root: Path,
    ) -> None:
        from poetry.core.spdx.helpers import license_by_id

        for key in ("authors", "maintainers"):
            if entries := project.get(key):
                participants = []
                for entry in entries:
                    name, email = entry.get("name"), entry.get("email")
                    if name and email:
                        participants.append(combine_unicode(f"{name} <{email}>"))
                    elif name:
                        participants.append(combine_unicode(name))
                    else:
                        participants.append(combine_unicode(email))
            else:
                participants = [
                    combine_unicode(author) for author in tool_poetry.get(key, [])
                ]
            if key == "authors":
                package.authors = participants
            else:
                package.maintainers = participants

        package.description = project.get("description") or tool_poetry.get(
            "description", ""
        )
        if project_license := project.get("license"):
            if isinstance(project_license, str):
                raw_license = project_license
            else:
                raw_license = project_license.get("text", "")
                if not raw_license and (
                    license_file := cast(str, project_license.get("file", ""))
                ):
                    license_path = (root / license_file).absolute()
                    try:
                        raw_license = Path(license_path).read_text(encoding="utf-8")
                    except FileNotFoundError as e:
                        raise FileNotFoundError(
                            f"Poetry: license file '{license_path}' not found"
                        ) from e
        else:
            raw_license = tool_poetry.get("license", "")
        try:
            license_: License | None = license_by_id(raw_license)
        except ValueError:
            license_ = None
        package.license = license_

        package.requires_python = project.get("requires-python", "*")
        package.keywords = project.get("keywords") or tool_poetry.get("keywords", [])
        package.classifiers = (
            static_classifiers := project.get("classifiers")
        ) or tool_poetry.get("classifiers", [])
        package.dynamic_classifiers = not static_classifiers

        if urls := project.get("urls"):
            custom_urls = {}
            for name, url in urls.items():
                lower_name = name.lower()
                if lower_name == "homepage":
                    package.homepage = url
                elif lower_name == "repository":
                    package.repository_url = url
                elif lower_name == "documentation":
                    package.documentation_url = url
                else:
                    custom_urls[name] = url
            package.custom_urls = custom_urls
        else:
            package.homepage = tool_poetry.get("homepage")
            package.repository_url = tool_poetry.get("repository")
            package.documentation_url = tool_poetry.get("documentation")
            if "urls" in tool_poetry:
                package.custom_urls = tool_poetry["urls"]

        if readme := project.get("readme"):
            if isinstance(readme, str):
                package.readmes = (root / readme,)
            elif "file" in readme:
                package.readmes = (root / readme["file"],)
                package.readme_content_type = readme["content-type"]
            elif "text" in readme:
                package.readme_content = root / readme["text"]
                package.readme_content_type = readme["content-type"]
        elif custom_readme := tool_poetry.get("readme"):
            custom_readmes = (
                (custom_readme,) if isinstance(custom_readme, str) else custom_readme
            )
            package.readmes = tuple(root / r for r in custom_readmes if r)

    @classmethod
    def _configure_entry_points(
        cls,
        package: ProjectPackage,
        project: dict[str, Any],
        tool_poetry: dict[str, Any],
    ) -> None:
        entry_points: defaultdict[str, dict[str, str]] = defaultdict(dict)

        if scripts := project.get("scripts"):
            entry_points["console-scripts"] = scripts
        elif scripts := tool_poetry.get("scripts"):
            for name, specification in scripts.items():
                if isinstance(specification, str):
                    specification = {"reference": specification, "type": "console"}

                if specification.get("type") != "console":
                    continue

                reference = specification.get("reference")

                if reference:
                    entry_points["console-scripts"][name] = reference

        if scripts := project.get("gui-scripts"):
            entry_points["gui-scripts"] = scripts

        if other_scripts := project.get("entry-points"):
            for group_name, scripts in sorted(other_scripts.items()):
                if group_name in {"console-scripts", "gui-scripts"}:
                    raise ValueError(
                        f"Group '{group_name}' is reserved and cannot be used"
                        " as a custom entry-point group."
                    )
                entry_points[group_name] = scripts
        elif other_scripts := tool_poetry.get("plugins"):
            for group_name, scripts in sorted(other_scripts.items()):
                entry_points[group_name] = scripts

        package.entry_points = dict(entry_points)

    @classmethod
    def _configure_package_dependencies(
        cls,
        package: ProjectPackage,
        project: dict[str, Any],
        tool_poetry: dict[str, Any],
        with_groups: bool = True,
    ) -> None:
        from poetry.core.packages.dependency import Dependency
        from poetry.core.packages.dependency_group import MAIN_GROUP
        from poetry.core.packages.dependency_group import DependencyGroup

        dependencies = project.get("dependencies", {})
        optional_dependencies = project.get("optional-dependencies", {})
        dynamic = project.get("dynamic", [])

        package_extras: dict[NormalizedName, list[Dependency]]
        if dependencies or optional_dependencies:
            group = DependencyGroup(
                MAIN_GROUP,
                mixed_dynamic=(
                    "dependencies" in dynamic or "optional-dependencies" in dynamic
                ),
            )
            package.add_dependency_group(group)

            for constraint in dependencies:
                group.add_dependency(
                    Dependency.create_from_pep_508(
                        constraint, relative_to=package.root_dir
                    )
                )
            package_extras = {}
            for extra_name, dependencies in optional_dependencies.items():
                extra_name = canonicalize_name(extra_name)
                package_extras[extra_name] = []

                for dependency_constraint in dependencies:
                    dependency = Dependency.create_from_pep_508(
                        dependency_constraint, relative_to=package.root_dir
                    )
                    dependency._optional = True
                    dependency._in_extras = [extra_name]

                    package_extras[extra_name].append(dependency)
                    group.add_dependency(dependency)

            package.extras = package_extras

        if "dependencies" in tool_poetry:
            cls._add_package_group_dependencies(
                package=package,
                group=MAIN_GROUP,
                dependencies=tool_poetry["dependencies"],
            )

        if with_groups and "group" in tool_poetry:
            for group_name, group_config in tool_poetry["group"].items():
                group = DependencyGroup(
                    group_name, optional=group_config.get("optional", False)
                )
                cls._add_package_group_dependencies(
                    package=package,
                    group=group,
                    dependencies=group_config["dependencies"],
                )

        if with_groups and "dev-dependencies" in tool_poetry:
            cls._add_package_group_dependencies(
                package=package,
                group="dev",
                dependencies=tool_poetry["dev-dependencies"],
            )

        # ignore extras in [tool.poetry] if dependencies or optional-dependencies
        # are declared in [project]
        if not dependencies and not optional_dependencies:
            package_extras = {}
            extras = tool_poetry.get("extras", {})
            for extra_name, requirements in extras.items():
                extra_name = canonicalize_name(extra_name)
                package_extras[extra_name] = []

                # Checking for dependency
                for req in requirements:
                    req = Dependency(req, "*")

                    for dep in package.requires:
                        if dep.name == req.name:
                            dep._in_extras = [*dep._in_extras, extra_name]
                            package_extras[extra_name].append(dep)

            package.extras = package_extras

    @classmethod
    def _prepare_formats(
        cls,
        items: list[dict[str, Any]],
        default_formats: list[Literal["sdist", "wheel"]],
    ) -> list[dict[str, Any]]:
        result = []
        for item in items:
            formats = item.get("format", default_formats)
            if not isinstance(formats, list):
                formats = [formats]

            result.append({**item, "format": formats})

        return result

    @classmethod
    def _configure_package_poetry_specifics(
        cls, package: ProjectPackage, tool_poetry: dict[str, Any]
    ) -> None:
        if build := tool_poetry.get("build"):
            if not isinstance(build, dict):
                build = {"script": build}
            package.build_config = build or {}

        if includes := tool_poetry.get("include"):
            includes = [
                include if isinstance(include, dict) else {"path": include}
                for include in includes
            ]

            package.include = cls._prepare_formats(includes, default_formats=["sdist"])

        if exclude := tool_poetry.get("exclude"):
            package.exclude = exclude

        if packages := tool_poetry.get("packages"):
            package.packages = cls._prepare_formats(
                packages, default_formats=["sdist", "wheel"]
            )

    @classmethod
    def create_dependency(
        cls,
        name: str,
        constraint: DependencyConstraint,
        groups: list[str] | None = None,
        root_dir: Path | None = None,
    ) -> Dependency:
        from poetry.core.constraints.generic import (
            parse_constraint as parse_generic_constraint,
        )
        from poetry.core.constraints.version import (
            parse_constraint as parse_version_constraint,
        )
        from poetry.core.packages.dependency import Dependency
        from poetry.core.packages.dependency_group import MAIN_GROUP
        from poetry.core.packages.directory_dependency import DirectoryDependency
        from poetry.core.packages.file_dependency import FileDependency
        from poetry.core.packages.url_dependency import URLDependency
        from poetry.core.packages.utils.utils import create_nested_marker
        from poetry.core.packages.vcs_dependency import VCSDependency
        from poetry.core.version.markers import AnyMarker
        from poetry.core.version.markers import parse_marker

        if groups is None:
            groups = [MAIN_GROUP]

        if constraint is None:
            constraint = "*"

        if isinstance(constraint, Mapping):
            optional = constraint.get("optional", False)
            python_versions = constraint.get("python")
            platform = constraint.get("platform")
            markers = constraint.get("markers")
            allows_prereleases = constraint.get("allow-prereleases")

            dependency: Dependency
            if "git" in constraint:
                # VCS dependency
                dependency = VCSDependency(
                    name,
                    "git",
                    constraint["git"],
                    branch=constraint.get("branch", None),
                    tag=constraint.get("tag", None),
                    rev=constraint.get("rev", None),
                    directory=constraint.get("subdirectory", None),
                    groups=groups,
                    optional=optional,
                    develop=constraint.get("develop", False),
                    extras=constraint.get("extras", []),
                )
            elif "file" in constraint:
                file_path = Path(constraint["file"])

                dependency = FileDependency(
                    name,
                    file_path,
                    directory=constraint.get("subdirectory", None),
                    groups=groups,
                    base=root_dir,
                    extras=constraint.get("extras", []),
                )
            elif "path" in constraint:
                path = Path(constraint["path"])

                if root_dir:
                    is_file = root_dir.joinpath(path).is_file()
                else:
                    is_file = path.is_file()

                if is_file:
                    dependency = FileDependency(
                        name,
                        path,
                        directory=constraint.get("subdirectory", None),
                        groups=groups,
                        optional=optional,
                        base=root_dir,
                        extras=constraint.get("extras", []),
                    )
                else:
                    subdirectory = constraint.get("subdirectory", None)
                    if subdirectory:
                        path = path / subdirectory
                    dependency = DirectoryDependency(
                        name,
                        path,
                        groups=groups,
                        optional=optional,
                        base=root_dir,
                        develop=constraint.get("develop", False),
                        extras=constraint.get("extras", []),
                    )
            elif "url" in constraint:
                dependency = URLDependency(
                    name,
                    constraint["url"],
                    directory=constraint.get("subdirectory", None),
                    groups=groups,
                    optional=optional,
                    extras=constraint.get("extras", []),
                )
            else:
                version = constraint.get("version", "*")

                dependency = Dependency(
                    name,
                    version,
                    optional=optional,
                    groups=groups,
                    allows_prereleases=allows_prereleases,
                    extras=constraint.get("extras", []),
                )
                # Normally not valid, but required for enriching [project] dependencies
                dependency._develop = constraint.get("develop", False)

            marker = parse_marker(markers) if markers else AnyMarker()

            if python_versions:
                marker = marker.intersect(
                    parse_marker(
                        create_nested_marker(
                            "python_version", parse_version_constraint(python_versions)
                        )
                    )
                )

            if platform:
                marker = marker.intersect(
                    parse_marker(
                        create_nested_marker(
                            "sys_platform", parse_generic_constraint(platform)
                        )
                    )
                )

            if not marker.is_any():
                dependency.marker = marker

            dependency.source_name = constraint.get("source")
        else:
            dependency = Dependency(name, constraint, groups=groups)

        return dependency

    @classmethod
    def validate(
        cls, toml_data: dict[str, Any], strict: bool = False
    ) -> dict[str, list[str]]:
        """
        Checks the validity of a configuration
        """
        from poetry.core.json import validate_object

        result: dict[str, list[str]] = {"errors": [], "warnings": []}

        # Validate against schemas
        project = toml_data.get("project")
        if project is not None:
            project_validation_errors = [
                e.replace("data", "project")
                for e in validate_object(project, "project-schema")
            ]
            result["errors"] += project_validation_errors
        # With PEP 621 [tool.poetry] is not mandatory anymore. We still create and
        # validate it so that default values (e.g. for package-mode) are set.
        tool_poetry = toml_data.setdefault("tool", {}).setdefault("poetry", {})
        tool_poetry_validation_errors = [
            e.replace("data.", "tool.poetry.")
            for e in validate_object(tool_poetry, "poetry-schema")
        ]
        result["errors"] += tool_poetry_validation_errors

        # Check for required fields if package mode.
        # In non-package mode, there are no required fields.
        package_mode = tool_poetry.get("package-mode", True)
        if package_mode:
            for key in ("name", "version"):
                value = (project or {}).get(key) or tool_poetry.get(key)
                if not value:
                    result["errors"].append(
                        f"Either [project.{key}] or [tool.poetry.{key}]"
                        " is required in package mode."
                    )

        config = tool_poetry

        if "dev-dependencies" in config:
            result["warnings"].append(
                'The "poetry.dev-dependencies" section is deprecated'
                " and will be removed in a future version."
                ' Use "poetry.group.dev.dependencies" instead.'
            )

        if strict:
            # Validate relation between [project] and [tool.poetry]
            cls._validate_legacy_vs_project(toml_data, result)

            cls._validate_strict(config, result)

        return result

    @classmethod
    def _validate_legacy_vs_project(
        cls, toml_data: dict[str, Any], result: dict[str, list[str]]
    ) -> None:
        project = toml_data.get("project", {})
        dynamic = project.get("dynamic", [])
        tool_poetry = toml_data["tool"]["poetry"]

        redundant_fields = [
            # name, deprecated (if not dynamic), new name (or None if same as old)
            ("name", True, None),
            # version can be dynamically set via `build --local-version` or plugins
            ("version", False, None),
            ("description", True, None),
            # multiple readmes are not supported in [project.readme]
            ("readme", False, None),
            ("license", True, None),
            ("authors", True, None),
            ("maintainers", True, None),
            ("keywords", True, None),
            # classifiers are enriched dynamically per default
            ("classifiers", False, None),
            ("homepage", True, "urls"),
            ("repository", True, "urls"),
            ("documentation", True, "urls"),
            ("urls", True, "urls"),
            ("plugins", True, "entry-points"),
            ("extras", True, "optional-dependencies"),
        ]
        dynamic_information = {
            "version": (
                "If you want to set the version dynamically via"
                " `poetry build --local-version` or you are using a plugin, which"
                " sets the version dynamically, you should define the version in"
                " [tool.poetry] and add 'version' to [project.dynamic]."
            ),
            "readme": (
                "If you want to define multiple readmes, you should define them in"
                " [tool.poetry] and add 'readme' to [project.dynamic]."
            ),
            "classifiers": (
                "ATTENTION: Per default Poetry determines classifiers for supported"
                " Python versions and license automatically. If you define classifiers"
                " in [project], you disable the automatic enrichment. In other words,"
                " you have to define all classifiers manually."
                " If you want to use Poetry's automatic enrichment of classifiers,"
                " you should define them in [tool.poetry] and add 'classifiers'"
                " to [project.dynamic]."
            ),
        }
        assert {f[0] for f in redundant_fields if not f[1]} == set(dynamic_information)

        for name, deprecated, new_name in redundant_fields:
            new_name = new_name or name
            if name in tool_poetry:
                warning = ""
                if new_name in project:
                    warning = (
                        f"[project.{new_name}] and [tool.poetry.{name}] are both set."
                        " The latter will be ignored."
                    )
                elif deprecated:
                    warning = (
                        f"[tool.poetry.{name}] is deprecated."
                        f" Use [project.{new_name}] instead."
                    )
                elif new_name not in dynamic:
                    warning = (
                        f"[tool.poetry.{name}] is set but '{new_name}' is not in"
                        f" [project.dynamic]. If it is static use [project.{new_name}]."
                        f" If it is dynamic, add '{new_name}' to [project.dynamic]."
                    )
                if warning:
                    if additional_info := dynamic_information.get(name):
                        warning += f"\n{additional_info}"
                    result["warnings"].append(warning)

        # scripts are special because entry-points are deprecated
        # but files are not because there is no equivalent in [project]
        if scripts := tool_poetry.get("scripts"):
            for __, script in scripts.items():
                if not isinstance(script, dict) or script.get("type") != "file":
                    if "scripts" in project:
                        warning = (
                            "[project.scripts] is set and there are console scripts in"
                            " [tool.poetry.scripts]. The latter will be ignored."
                        )
                    else:
                        warning = (
                            "Defining console scripts in [tool.poetry.scripts] is"
                            " deprecated. Use [project.scripts] instead."
                            " ([tool.poetry.scripts] should only be used for scripts"
                            " of type 'file')."
                        )
                    result["warnings"].append(warning)
                    break

        # dependencies are special because we consider
        # [project.dependencies] as abstract dependencies for building
        # and [tool.poetry.dependencies] as the concrete dependencies for locking
        if (
            "dependencies" in tool_poetry
            and "project" in toml_data
            and "dependencies" not in project
            and "dependencies" not in project.get("dynamic", [])
        ):
            result["warnings"].append(
                "[tool.poetry.dependencies] is set but [project.dependencies] is not"
                " and 'dependencies' is not in [project.dynamic]."
                " You should either migrate [tool.poetry.depencencies] to"
                " [project.dependencies] (if you do not need Poetry-specific features)"
                " or add [project.dependencies] in addition to"
                " [tool.poetry.dependencies] or add 'dependencies' to"
                " [project.dynamic]."
            )

        # requires-python in [project] and python in [tool.poetry.dependencies] are
        # special because we consider requires-python as abstract python version
        # for building and python as concrete python version for locking
        if (
            "python" in tool_poetry.get("dependencies", {})
            and "project" in toml_data
            and "requires-python" not in project
            and "requires-python" not in project.get("dynamic", [])
        ):
            result["warnings"].append(
                "[tool.poetry.dependencies.python] is set but [project.requires-python]"
                " is not set and 'requires-python' is not in [project.dynamic]."
            )

    @classmethod
    def _validate_strict(
        cls, config: dict[str, Any], result: dict[str, list[str]]
    ) -> None:
        if "dependencies" in config:
            python_versions = config["dependencies"].get("python")
            if python_versions == "*":
                result["warnings"].append(
                    "A wildcard Python dependency is ambiguous. "
                    "Consider specifying a more explicit one."
                )

            for name, constraint in config["dependencies"].items():
                if not isinstance(constraint, dict):
                    continue

                if "allows-prereleases" in constraint:
                    result["warnings"].append(
                        f'The "{name}" dependency specifies '
                        'the "allows-prereleases" property, which is deprecated. '
                        'Use "allow-prereleases" instead.'
                    )

        if "extras" in config:
            for extra_name, requirements in config["extras"].items():
                extra_name = canonicalize_name(extra_name)

                for req in requirements:
                    req_name = canonicalize_name(req)
                    for dependency in config.get("dependencies", {}):
                        dep_name = canonicalize_name(dependency)
                        if req_name == dep_name:
                            break
                    else:
                        result["errors"].append(
                            f'Cannot find dependency "{req}" for extra '
                            f'"{extra_name}" in main dependencies.'
                        )

        # Checking for scripts with extras
        if "scripts" in config:
            scripts = config["scripts"]
            config_extras = config.get("extras", {})

            for name, script in scripts.items():
                if not isinstance(script, dict):
                    continue

                extras = script.get("extras", [])
                if extras:
                    result["warnings"].append(
                        f'The script "{name}" depends on an extra. Scripts'
                        " depending on extras are deprecated and support for them"
                        " will be removed in a future version of"
                        " poetry/poetry-core. See"
                        " https://packaging.python.org/en/latest/specifications/entry-points/#data-model"
                        " for details."
                    )
                for extra in extras:
                    if extra not in config_extras:
                        result["errors"].append(
                            f'The script "{name}" requires extra "{extra}"'
                            " which is not defined."
                        )

        # Checking types of all readme files (must match)
        if "readme" in config and not isinstance(config["readme"], str):
            readme_types = {readme_content_type(r) for r in config["readme"]}
            if len(readme_types) > 1:
                result["errors"].append(
                    "Declared README files must be of same type: found"
                    f" {', '.join(sorted(readme_types))}"
                )

    @classmethod
    def locate(cls, cwd: Path | None = None) -> Path:
        cwd = Path(cwd or Path.cwd())
        candidates = [cwd]
        candidates.extend(cwd.parents)

        for path in candidates:
            poetry_file = path / "pyproject.toml"

            if poetry_file.exists():
                return poetry_file

        else:
            raise RuntimeError(
                f"Poetry could not find a pyproject.toml file in {cwd} or its parents"
            )
poetry-core-2.1.1/src/poetry/core/json/000077500000000000000000000000001475444614500200135ustar00rootroot00000000000000poetry-core-2.1.1/src/poetry/core/json/__init__.py000066400000000000000000000013541475444614500221270ustar00rootroot00000000000000from __future__ import annotations

import json

from importlib.resources import files
from typing import Any

import fastjsonschema

from fastjsonschema.exceptions import JsonSchemaException


class ValidationError(ValueError):
    pass


def validate_object(obj: dict[str, Any], schema_name: str) -> list[str]:
    schema_file = files(__package__) / "schemas" / f"{schema_name}.json"

    if not schema_file.is_file():
        raise ValueError(f"Schema {schema_name} does not exist.")

    with schema_file.open(encoding="utf-8") as f:
        schema = json.load(f)

    validate = fastjsonschema.compile(schema)

    errors = []
    try:
        validate(obj)
    except JsonSchemaException as e:
        errors = [e.message]

    return errors
poetry-core-2.1.1/src/poetry/core/json/schemas/000077500000000000000000000000001475444614500214365ustar00rootroot00000000000000poetry-core-2.1.1/src/poetry/core/json/schemas/poetry-schema.json000066400000000000000000000472261475444614500251240ustar00rootroot00000000000000{
  "$schema": "http://json-schema.org/draft-04/schema#",
  "name": "Package",
  "type": "object",
  "additionalProperties": true,
  "properties": {
    "package-mode": {
      "type": "boolean",
      "default": true,
      "description": "Whether Poetry is operated in package mode or non-package mode."
    },
    "name": {
      "type": "string",
      "description": "Package name (legacy)."
    },
    "version": {
      "type": "string",
      "description": "Package version (legacy)."
    },
    "description": {
      "type": "string",
      "description": "Short package description (legacy).",
      "pattern": "\\A[^\\n]*\\Z"
    },
    "keywords": {
      "type": "array",
      "items": {
        "type": "string",
        "description": "A tag/keyword that this package relates to (legacy)."
      }
    },
    "homepage": {
      "type": "string",
      "description": "Homepage URL for the project (legacy).",
      "format": "uri"
    },
    "repository": {
      "type": "string",
      "description": "Repository URL for the project (legacy).",
      "format": "uri"
    },
    "documentation": {
      "type": "string",
      "description": "Documentation URL for the project (legacy).",
      "format": "uri"
    },
    "license": {
      "type": "string",
      "description": "License name (legacy)."
    },
    "authors": {
      "$ref": "#/definitions/authors",
      "description": "Authors (legacy)."
    },
    "maintainers": {
      "$ref": "#/definitions/maintainers",
      "description": "Maintainers (legacy)."
    },
    "readme": {
      "anyOf": [
        {
          "type": "string",
          "description": "The path to the README file (legacy)."
        },
        {
          "type": "array",
          "description": "A list of paths to the readme files.",
          "items": {
            "type": "string"
          }
        }
      ]
    },
    "classifiers": {
      "type": "array",
      "description": "A list of trove classifiers (legacy)."
    },
    "packages": {
      "type": "array",
      "description": "A list of packages to include in the final distribution.",
      "items": {
        "type": "object",
        "description": "Information about where the package resides.",
        "additionalProperties": false,
        "required": [
          "include"
        ],
        "properties": {
          "include": {
            "$ref": "#/definitions/include-path"
          },
          "from": {
            "type": "string",
            "description": "Where the source directory of the package resides."
          },
          "format": {
            "$ref": "#/definitions/package-formats"
          },
          "to": {
            "type": "string",
            "description": "Where the package should be installed in the final distribution."
          }
        }
      }
    },
    "include": {
      "type": "array",
      "description": "A list of files and folders to include.",
      "items": {
        "anyOf": [
          {
            "$ref": "#/definitions/include-path"
          },
          {
            "type": "object",
            "additionalProperties": false,
            "required": [
              "path"
            ],
            "properties": {
              "path": {
                "$ref": "#/definitions/include-path"
              },
              "format": {
                "$ref": "#/definitions/package-formats"
              }
            }
          }
        ]
      }
    },
    "exclude": {
      "type": "array",
      "description": "A list of files and folders to exclude."
    },
    "dependencies": {
      "type": "object",
      "description": "This is a hash of package name (keys) and version constraints (values) that are required to run this package.",
      "properties": {
        "python": {
          "type": "string",
          "description": "The Python versions the package is compatible with."
        }
      },
      "$ref": "#/definitions/dependencies",
      "additionalProperties": false
    },
    "dev-dependencies": {
      "type": "object",
      "description": "This is a hash of package name (keys) and version constraints (values) that this package requires for developing it (testing tools and such).",
      "$ref": "#/definitions/dependencies",
      "additionalProperties": false
    },
    "extras": {
      "type": "object",
      "description": "Extras (legacy).",
      "patternProperties": {
        "^[a-zA-Z-_.0-9]+$": {
          "type": "array",
          "items": {
            "type": "string",
            "pattern": "^[a-zA-Z-_.0-9]+$"
          }
        }
      }
    },
    "group": {
      "type": "object",
      "description": "This represents groups of dependencies",
      "patternProperties": {
        "^[a-zA-Z-_.0-9]+$": {
          "type": "object",
          "description": "This represents a single dependency group",
          "required": [
            "dependencies"
          ],
          "properties": {
            "optional": {
              "type": "boolean",
              "description": "Whether the dependency group is optional or not"
            },
            "dependencies": {
              "type": "object",
              "description": "The dependencies of this dependency group",
              "$ref": "#/definitions/dependencies",
              "additionalProperties": false
            }
          },
          "additionalProperties": false
        }
      }
    },
    "build": {
      "$ref": "#/definitions/build-section"
    },
    "scripts": {
      "type": "object",
      "description": "A hash of scripts to be installed.",
      "patternProperties": {
        "^[a-zA-Z-_.0-9]+$": {
          "oneOf": [
            {
              "$ref": "#/definitions/script-legacy"
            },
            {
              "$ref": "#/definitions/script-table"
            }
          ]
        }
      }
    },
    "plugins": {
      "type": "object",
      "description": "A hash of hashes representing plugins",
      "patternProperties": {
        "^[a-zA-Z-_.0-9]+$": {
          "type": "object",
          "patternProperties": {
            "^[a-zA-Z-_.0-9]+$": {
              "type": "string"
            }
          }
        }
      }
    },
    "urls": {
      "type": "object",
      "patternProperties": {
        "^.+$": {
          "type": "string",
          "description": "The full url of the custom url (Legacy)."
        }
      }
    }
  },
  "definitions": {
    "authors": {
      "type": "array",
      "description": "List of authors that contributed to the package. This is typically the main maintainers, not the full list (legacy).",
      "items": {
        "type": "string"
      }
    },
    "maintainers": {
      "type": "array",
      "description": "List of maintainers, other than the original author(s), that upkeep the package (legacy).",
      "items": {
        "type": "string"
      }
    },
    "include-path": {
      "type": "string",
      "description": "Path to file or directory to include."
    },
    "package-format": {
      "type": "string",
      "enum": [
        "sdist",
        "wheel"
      ],
      "description": "A Python packaging format."
    },
    "package-formats": {
      "oneOf": [
        {
          "$ref": "#/definitions/package-format"
        },
        {
          "type": "array",
          "items": {
            "$ref": "#/definitions/package-format"
          }
        }
      ],
      "description": "The format(s) for which the package must be included."
    },
    "dependencies": {
      "type": "object",
      "patternProperties": {
        "^[a-zA-Z-_.0-9]+$": {
          "oneOf": [
            {
              "$ref": "#/definitions/dependency"
            },
            {
              "$ref": "#/definitions/long-dependency"
            },
            {
              "$ref": "#/definitions/git-dependency"
            },
            {
              "$ref": "#/definitions/file-dependency"
            },
            {
              "$ref": "#/definitions/path-dependency"
            },
            {
              "$ref": "#/definitions/url-dependency"
            },
            {
              "$ref": "#/definitions/multiple-constraints-dependency"
            },
            {
              "$ref": "#/definitions/dependency-options"
            }
          ]
        }
      }
    },
    "dependency": {
      "type": "string",
      "description": "The constraint of the dependency."
    },
    "long-dependency": {
      "type": "object",
      "required": [
        "version"
      ],
      "additionalProperties": false,
      "properties": {
        "version": {
          "type": "string",
          "description": "The constraint of the dependency."
        },
        "python": {
          "type": "string",
          "description": "The python versions for which the dependency should be installed."
        },
        "platform": {
          "type": "string",
          "description": "The platform(s) for which the dependency should be installed."
        },
        "markers": {
          "type": "string",
          "description": "The PEP 508 compliant environment markers for which the dependency should be installed."
        },
        "allow-prereleases": {
          "type": "boolean",
          "description": "Whether the dependency allows prereleases or not."
        },
        "allows-prereleases": {
          "type": "boolean",
          "description": "Whether the dependency allows prereleases or not."
        },
        "optional": {
          "type": "boolean",
          "description": "Whether the dependency is optional or not."
        },
        "extras": {
          "type": "array",
          "description": "The required extras for this dependency.",
          "items": {
            "type": "string"
          }
        },
        "source": {
          "type": "string",
          "description": "The exclusive source used to search for this dependency."
        }
      }
    },
    "git-dependency": {
      "type": "object",
      "required": [
        "git"
      ],
      "additionalProperties": false,
      "properties": {
        "git": {
          "type": "string",
          "description": "The url of the git repository."
        },
        "branch": {
          "type": "string",
          "description": "The branch to checkout."
        },
        "tag": {
          "type": "string",
          "description": "The tag to checkout."
        },
        "rev": {
          "type": "string",
          "description": "The revision to checkout."
        },
        "subdirectory": {
          "type": "string",
          "description": "The relative path to the directory where the package is located."
        },
        "python": {
          "type": "string",
          "description": "The python versions for which the dependency should be installed."
        },
        "platform": {
          "type": "string",
          "description": "The platform(s) for which the dependency should be installed."
        },
        "markers": {
          "type": "string",
          "description": "The PEP 508 compliant environment markers for which the dependency should be installed."
        },
        "allow-prereleases": {
          "type": "boolean",
          "description": "Whether the dependency allows prereleases or not."
        },
        "allows-prereleases": {
          "type": "boolean",
          "description": "Whether the dependency allows prereleases or not."
        },
        "optional": {
          "type": "boolean",
          "description": "Whether the dependency is optional or not."
        },
        "extras": {
          "type": "array",
          "description": "The required extras for this dependency.",
          "items": {
            "type": "string"
          }
        },
        "develop": {
          "type": "boolean",
          "description": "Whether to install the dependency in development mode."
        }
      }
    },
    "file-dependency": {
      "type": "object",
      "required": [
        "file"
      ],
      "additionalProperties": false,
      "properties": {
        "file": {
          "type": "string",
          "description": "The path to the file."
        },
        "subdirectory": {
          "type": "string",
          "description": "The relative path to the directory where the package is located."
        },
        "python": {
          "type": "string",
          "description": "The python versions for which the dependency should be installed."
        },
        "platform": {
          "type": "string",
          "description": "The platform(s) for which the dependency should be installed."
        },
        "markers": {
          "type": "string",
          "description": "The PEP 508 compliant environment markers for which the dependency should be installed."
        },
        "optional": {
          "type": "boolean",
          "description": "Whether the dependency is optional or not."
        },
        "extras": {
          "type": "array",
          "description": "The required extras for this dependency.",
          "items": {
            "type": "string"
          }
        }
      }
    },
    "path-dependency": {
      "type": "object",
      "required": [
        "path"
      ],
      "additionalProperties": false,
      "properties": {
        "path": {
          "type": "string",
          "description": "The path to the dependency."
        },
        "subdirectory": {
          "type": "string",
          "description": "The relative path to the directory where the package is located."
        },
        "python": {
          "type": "string",
          "description": "The python versions for which the dependency should be installed."
        },
        "platform": {
          "type": "string",
          "description": "The platform(s) for which the dependency should be installed."
        },
        "markers": {
          "type": "string",
          "description": "The PEP 508 compliant environment markers for which the dependency should be installed."
        },
        "optional": {
          "type": "boolean",
          "description": "Whether the dependency is optional or not."
        },
        "extras": {
          "type": "array",
          "description": "The required extras for this dependency.",
          "items": {
            "type": "string"
          }
        },
        "develop": {
          "type": "boolean",
          "description": "Whether to install the dependency in development mode."
        }
      }
    },
    "url-dependency": {
      "type": "object",
      "required": [
        "url"
      ],
      "additionalProperties": false,
      "properties": {
        "url": {
          "type": "string",
          "description": "The url to the file."
        },
        "subdirectory": {
          "type": "string",
          "description": "The relative path to the directory where the package is located."
        },
        "python": {
          "type": "string",
          "description": "The python versions for which the dependency should be installed."
        },
        "platform": {
          "type": "string",
          "description": "The platform(s) for which the dependency should be installed."
        },
        "markers": {
          "type": "string",
          "description": "The PEP 508 compliant environment markers for which the dependency should be installed."
        },
        "optional": {
          "type": "boolean",
          "description": "Whether the dependency is optional or not."
        },
        "extras": {
          "type": "array",
          "description": "The required extras for this dependency.",
          "items": {
            "type": "string"
          }
        }
      }
    },
    "dependency-options": {
      "type": "object",
      "additionalProperties": false,
      "properties": {
        "python": {
          "type": "string",
          "description": "The python versions for which the dependency should be installed."
        },
        "platform": {
          "type": "string",
          "description": "The platform(s) for which the dependency should be installed."
        },
        "markers": {
          "type": "string",
          "description": "The PEP 508 compliant environment markers for which the dependency should be installed."
        },
        "allow-prereleases": {
          "type": "boolean",
          "description": "Whether the dependency allows prereleases or not."
        },
        "source": {
          "type": "string",
          "description": "The exclusive source used to search for this dependency."
        },
        "develop": {
          "type": "boolean",
          "description": "Whether to install the dependency in development mode."
        }
      }
    },
    "multiple-constraints-dependency": {
      "type": "array",
      "minItems": 1,
      "items": {
        "oneOf": [
          {
            "$ref": "#/definitions/dependency"
          },
          {
            "$ref": "#/definitions/long-dependency"
          },
          {
            "$ref": "#/definitions/git-dependency"
          },
          {
            "$ref": "#/definitions/file-dependency"
          },
          {
            "$ref": "#/definitions/path-dependency"
          },
          {
            "$ref": "#/definitions/url-dependency"
          },
          {
            "$ref": "#/definitions/dependency-options"
          }
        ]
      }
    },
    "script-table": {
      "type": "object",
      "oneOf": [
        {
          "$ref": "#/definitions/extra-script-legacy"
        },
        {
          "$ref": "#/definitions/extra-scripts"
        }
      ]
    },
    "script-legacy": {
      "type": "string",
      "description": "A simple script pointing to a callable object."
    },
    "extra-scripts": {
      "type": "object",
      "description": "Either a console entry point or a script file that'll be included in the distribution package.",
      "additionalProperties": false,
      "properties": {
        "reference": {
          "type": "string",
          "description": "If type is file this is the relative path of the script file, if console it is the module name."
        },
        "type": {
          "description": "Value can be either file or console.",
          "type": "string",
          "enum": [
            "file",
            "console"
          ]
        },
        "extras": {
          "type": "array",
          "description": "The required extras for this script. Only applicable if type is console.",
          "items": {
            "type": "string"
          }
        }
      },
      "required": [
        "reference",
        "type"
      ]
    },
    "extra-script-legacy": {
      "type": "object",
      "description": "A script that should be installed only if extras are activated.",
      "additionalProperties": false,
      "properties": {
        "callable": {
          "$ref": "#/definitions/script-legacy",
          "description": "The entry point of the script. Deprecated in favour of reference."
        },
        "extras": {
          "type": "array",
          "description": "The required extras for this script.",
          "items": {
            "type": "string"
          }
        }
      }
    },
    "build-script": {
      "type": "string",
      "description": "The python script file used to build extensions."
    },
    "build-config": {
      "type": "object",
      "description": "Build specific configurations.",
      "additionalProperties": false,
      "properties": {
        "generate-setup-file": {
          "type": "boolean",
          "description": "Generate and include a setup.py file in sdist.",
          "default": false
        },
        "script": {
          "$ref": "#/definitions/build-script"
        }
      }
    },
    "build-section": {
      "oneOf": [
        {
          "$ref": "#/definitions/build-script"
        },
        {
          "$ref": "#/definitions/build-config"
        }
      ]
    }
  }
}
poetry-core-2.1.1/src/poetry/core/json/schemas/project-schema.json000066400000000000000000000166411475444614500252450ustar00rootroot00000000000000{
  "$schema": "http://json-schema.org/draft-07/schema#",
  "name": "project",
  "type": "object",
  "additionalProperties": true,
  "required": [
    "name"
  ],
  "properties": {
    "name": {
      "title": "Project name",
      "type": "string",
      "pattern": "^([a-zA-Z\\d]|[a-zA-Z\\d][\\w.-]*[a-zA-Z\\d])$"
    },
    "version": {
      "title": "Project version",
      "type": "string",
      "pattern": "^v?((([0-9]+)!)?([0-9]+(\\.[0-9]+)*)([-_\\.]?(alpha|a|beta|b|preview|pre|c|rc)[-_\\.]?([0-9]+)?)?((-([0-9]+))|([-_\\.]?(post|rev|r)[-_\\.]?([0-9]+)?))?([-_\\.]?(dev)[-_\\.]?([0-9]+)?)?)(\\+([a-z0-9]+([-_\\.][a-z0-9]+)*))?$",
      "examples": [
        "42.0.1",
        "0.3.9rc7.post0.dev5"
      ]
    },
    "description": {
      "title": "Project summary description",
      "type": "string"
    },
    "readme": {
      "title": "Project full description",
      "description": "AKA the README",
      "oneOf": [
        {
          "title": "README file path",
          "type": "string"
        },
        {
          "type": "object",
          "required": [
            "content-type"
          ],
          "properties": {
            "content-type": {
              "title": "README text content-type",
              "description": "RFC 1341 compliant content-type (with optional charset, defaulting to UTF-8)",
              "type": "string"
            }
          },
          "oneOf": [
            {
              "additionalProperties": false,
              "required": [
                "file"
              ],
              "properties": {
                "content-type": true,
                "file": {
                  "title": "README file path",
                  "type": "string"
                }
              }
            },
            {
              "additionalProperties": false,
              "required": [
                "text"
              ],
              "properties": {
                "content-type": true,
                "text": {
                  "title": "README text",
                  "type": "string"
                }
              }
            }
          ]
        }
      ],
      "examples": [
        "README.md",
        {
          "file": "README.txt",
          "content-type": "text/plain"
        },
        {
          "text": "# Example project\n\nAn example project",
          "content-type": "text/markdown"
        }
      ]
    },
    "requires-python": {
      "title": "Python version compatibility",
      "type": "string",
      "examples": [
        ">= 3.7"
      ]
    },
    "license": {
      "title": "Project license",
      "oneOf": [
        {
          "type": "object",
          "additionalProperties": false,
          "required": [
            "file"
          ],
          "properties": {
            "file": {
              "title": "License file path",
              "type": "string"
            }
          }
        },
        {
          "type": "object",
          "additionalProperties": false,
          "required": [
            "text"
          ],
          "properties": {
            "text": {
              "title": "License text",
              "type": "string"
            }
          }
        },
        {
          "type": "string",
          "description": "A SPDX license identifier"
        }
      ],
      "examples": [
        {
          "text": "MIT"
        },
        {
          "file": "LICENSE"
        },
        "MIT",
        "LicenseRef-Proprietary"
      ]
    },
    "authors": {
      "title": "Project authors",
      "type": "array",
      "items": {
        "$ref": "#/definitions/projectAuthor"
      }
    },
    "maintainers": {
      "title": "Project maintainers",
      "type": "array",
      "items": {
        "$ref": "#/definitions/projectAuthor"
      }
    },
    "keywords": {
      "title": "Project keywords",
      "type": "array",
      "items": {
        "type": "string"
      }
    },
    "classifiers": {
      "title": "Applicable Trove classifiers",
      "type": "array",
      "items": {
        "type": "string"
      }
    },
    "urls": {
      "title": "Project URLs",
      "type": "object",
      "additionalProperties": {
        "type": "string",
        "format": "uri"
      },
      "examples": [
        {
          "homepage": "https://example.com/example-project"
        }
      ]
    },
    "scripts": {
      "title": "Console scripts",
      "type": "object",
      "additionalProperties": {
        "type": "string"
      },
      "examples": [
        {
          "mycmd": "package.module:object.function"
        }
      ]
    },
    "gui-scripts": {
      "title": "GUI scripts",
      "type": "object",
      "additionalProperties": {
        "type": "string"
      },
      "examples": [
        {
          "mycmd": "package.module:object.function"
        }
      ]
    },
    "entry-points": {
      "title": "Other entry-point groups",
      "type": "object",
      "additionalProperties": false,
      "patternProperties": {
        "^\\w+(\\.\\w+)*$": {
          "type": "object",
          "additionalProperties": {
            "type": "string"
          }
        }
      },
      "propertyNames": {
        "not": {
          "anyOf": [
            {
              "const": "console_scripts"
            },
            {
              "const": "gui_scripts"
            }
          ]
        }
      },
      "examples": [
        {
          "pygments.styles": {
            "monokai": "package.module:object.attribute"
          }
        }
      ]
    },
    "dependencies": {
      "title": "Project dependency requirements",
      "type": "array",
      "items": {
        "type": "string"
      },
      "examples": [
        [
          "attrs",
          "requests ~= 2.28"
        ]
      ]
    },
    "optional-dependencies": {
      "title": "Project extra dependency requirements",
      "description": "keys are extra names",
      "type": "object",
      "patternProperties": {
        "^([a-z\\d]|[a-z\\d]([a-z\\d-](?!--))*[a-z\\d])$": {
          "type": "array",
          "items": {
            "type": "string"
          }
        }
      },
      "examples": [
        {
          "typing": [
            "boto3-stubs",
            "typing-extensions ~= 4.1"
          ]
        }
      ]
    },
    "dynamic": {
      "title": "Dynamic metadata values",
      "type": "array",
      "items": {
        "type": "string",
        "enum": [
          "version",
          "description",
          "readme",
          "requires-python",
          "license",
          "authors",
          "maintainers",
          "keywords",
          "classifiers",
          "urls",
          "scripts",
          "gui-scripts",
          "entry-points",
          "dependencies",
          "optional-dependencies"
        ]
      },
      "examples": [
        [
          "version"
        ]
      ]
    }
  },
  "definitions": {
    "projectAuthor": {
      "type": "object",
      "additionalProperties": false,
      "anyOf": [
        {
          "required": [
            "name"
          ],
          "properties": {
            "name": true
          }
        },
        {
          "required": [
            "email"
          ],
          "properties": {
            "email": true
          }
        }
      ],
      "properties": {
        "name": {
          "title": "Author name",
          "type": "string"
        },
        "email": {
          "title": "Author email",
          "type": "string",
          "format": "email"
        }
      }
    }
  }
}
poetry-core-2.1.1/src/poetry/core/masonry/000077500000000000000000000000001475444614500205325ustar00rootroot00000000000000poetry-core-2.1.1/src/poetry/core/masonry/__init__.py000066400000000000000000000003771475444614500226520ustar00rootroot00000000000000"""
This module handles the packaging and publishing
of python projects.

A lot of the code used here has been taken from
`flit `__ and adapted
to work with the poetry codebase, so kudos to them for showing the way.
"""
poetry-core-2.1.1/src/poetry/core/masonry/api.py000066400000000000000000000054771475444614500216720ustar00rootroot00000000000000"""
PEP-517 compliant buildsystem API
"""

from __future__ import annotations

import logging

from pathlib import Path
from typing import Any

from poetry.core.factory import Factory
from poetry.core.masonry.builders.sdist import SdistBuilder
from poetry.core.masonry.builders.wheel import WheelBuilder


log = logging.getLogger(__name__)


def get_requires_for_build_wheel(
    config_settings: dict[str, Any] | None = None,
) -> list[str]:
    """
    Returns an additional list of requirements for building, as PEP508 strings,
    above and beyond those specified in the pyproject.toml file.

    This implementation is optional. At the moment it only returns an empty list,
    which would be the same as if not define. So this is just for completeness
    for future implementation.
    """

    return []


# For now, we require all dependencies to build either a wheel or an sdist.
get_requires_for_build_sdist = get_requires_for_build_wheel


def prepare_metadata_for_build_wheel(
    metadata_directory: str, config_settings: dict[str, Any] | None = None
) -> str:
    poetry = Factory().create_poetry(Path().resolve(), with_groups=False)
    builder = WheelBuilder(poetry, config_settings=config_settings)
    metadata_path = Path(metadata_directory)
    dist_info = builder.prepare_metadata(metadata_path)
    return dist_info.name


def build_wheel(
    wheel_directory: str,
    config_settings: dict[str, Any] | None = None,
    metadata_directory: str | None = None,
) -> str:
    """Builds a wheel, places it in wheel_directory"""
    poetry = Factory().create_poetry(Path().resolve(), with_groups=False)
    metadata_path = None if metadata_directory is None else Path(metadata_directory)

    return WheelBuilder.make_in(
        poetry,
        Path(wheel_directory),
        metadata_directory=metadata_path,
        config_settings=config_settings,
    )


def build_sdist(
    sdist_directory: str, config_settings: dict[str, Any] | None = None
) -> str:
    """Builds an sdist, places it in sdist_directory"""
    poetry = Factory().create_poetry(Path().resolve(), with_groups=False)

    path = SdistBuilder(poetry, config_settings=config_settings).build(
        Path(sdist_directory)
    )

    return path.name


def build_editable(
    wheel_directory: str,
    config_settings: dict[str, Any] | None = None,
    metadata_directory: str | None = None,
) -> str:
    poetry = Factory().create_poetry(Path().resolve(), with_groups=False)
    metadata_path = None if metadata_directory is None else Path(metadata_directory)

    return WheelBuilder.make_in(
        poetry,
        Path(wheel_directory),
        metadata_directory=metadata_path,
        editable=True,
        config_settings=config_settings,
    )


get_requires_for_build_editable = get_requires_for_build_wheel
prepare_metadata_for_build_editable = prepare_metadata_for_build_wheel
poetry-core-2.1.1/src/poetry/core/masonry/builders/000077500000000000000000000000001475444614500223435ustar00rootroot00000000000000poetry-core-2.1.1/src/poetry/core/masonry/builders/__init__.py000066400000000000000000000000001475444614500244420ustar00rootroot00000000000000poetry-core-2.1.1/src/poetry/core/masonry/builders/builder.py000066400000000000000000000311211475444614500243410ustar00rootroot00000000000000from __future__ import annotations

import logging
import sys
import textwrap

from functools import cached_property
from pathlib import Path
from typing import TYPE_CHECKING
from typing import Any


if TYPE_CHECKING:
    from poetry.core.masonry.utils.module import Module
    from poetry.core.poetry import Poetry


METADATA_BASE = """\
Metadata-Version: 2.3
Name: {name}
Version: {version}
Summary: {summary}
"""

logger = logging.getLogger(__name__)


class Builder:
    format: str | None = None

    def __init__(
        self,
        poetry: Poetry,
        executable: Path | None = None,
        config_settings: dict[str, Any] | None = None,
    ) -> None:
        from poetry.core.masonry.metadata import Metadata

        if not poetry.is_package_mode:
            raise RuntimeError(
                "Building a package is not possible in non-package mode."
            )

        self._config_settings = config_settings or {}

        self._poetry = poetry
        self._apply_local_version_label()

        self._package = poetry.package
        self._path: Path = poetry.pyproject_path.parent
        self._excluded_files: set[str] | None = None
        self._executable = Path(executable or sys.executable)
        self._meta = Metadata.from_package(self._package)

    @cached_property
    def _module(self) -> Module:
        from poetry.core.masonry.utils.module import Module

        packages = [
            item
            for item in self._package.packages
            if not self.format or self.format in item["format"]
        ]
        includes = [
            item
            for item in self._package.include
            if not self.format or self.format in item["format"]
        ]

        return Module(
            self._package.name,
            self._path.as_posix(),
            packages=packages,
            includes=includes,
        )

    @property
    def executable(self) -> Path:
        return self._executable

    @property
    def default_target_dir(self) -> Path:
        return self._path / "dist"

    def _apply_local_version_label(self) -> None:
        """Apply local version label from config settings to the poetry package version if present."""
        if local_version_label := self._config_settings.get("local-version"):
            self._poetry.package.version = self._poetry.package.version.replace(
                local=local_version_label
            )

    def build(self, target_dir: Path | None) -> Path:
        raise NotImplementedError

    def find_excluded_files(self, fmt: str | None = None) -> set[str]:
        if self._excluded_files is None:
            from poetry.core.vcs import get_vcs

            # Checking VCS
            vcs = get_vcs(self._path)
            vcs_ignored_files = set(vcs.get_ignored_files()) if vcs else set()

            explicitly_excluded = set()
            for excluded_glob in self._package.exclude:
                for excluded in self._path.glob(str(excluded_glob)):
                    explicitly_excluded.add(
                        Path(excluded).relative_to(self._path).as_posix()
                    )

            explicitly_included = set()
            for inc in self._module.explicit_includes:
                if fmt and fmt not in inc.formats:
                    continue

                for included in inc.elements:
                    explicitly_included.add(included.relative_to(self._path).as_posix())

            ignored = (vcs_ignored_files | explicitly_excluded) - explicitly_included
            for ignored_file in ignored:
                logger.debug(f"Ignoring: {ignored_file}")

            self._excluded_files = ignored

        return self._excluded_files

    def is_excluded(self, filepath: str | Path) -> bool:
        exclude_path = Path(filepath)

        if "__pycache__" in exclude_path.parts or exclude_path.suffix == ".pyc":
            return True

        while True:
            if exclude_path.as_posix() in self.find_excluded_files(fmt=self.format):
                return True

            if len(exclude_path.parts) > 1:
                exclude_path = exclude_path.parent
            else:
                break

        return False

    def find_files_to_add(self, exclude_build: bool = True) -> set[BuildIncludeFile]:
        """
        Finds all files to add to the tarball
        """
        from poetry.core.masonry.utils.package_include import PackageInclude

        to_add = set()

        for include in self._module.includes:
            include.refresh()
            formats = include.formats

            for file in include.elements:
                if "__pycache__" in file.parts:
                    # This is just a shortcut. It will be ignored later anyway.
                    continue

                if (
                    isinstance(include, PackageInclude)
                    and include.source
                    and self.format == "wheel"
                ):
                    source_root = include.base
                else:
                    source_root = self._path

                if (
                    isinstance(include, PackageInclude)
                    and include.target
                    and self.format == "wheel"
                ):
                    target_dir = include.target
                else:
                    target_dir = None

                if file.is_dir():
                    if self.format in formats:
                        for current_file in file.glob("**/*"):
                            include_file = BuildIncludeFile(
                                path=current_file,
                                project_root=self._path,
                                source_root=source_root,
                                target_dir=target_dir,
                            )

                            if not (
                                current_file.is_dir()
                                or self.is_excluded(
                                    include_file.relative_to_project_root()
                                )
                            ):
                                to_add.add(include_file)
                    continue

                include_file = BuildIncludeFile(
                    path=file,
                    project_root=self._path,
                    source_root=source_root,
                    target_dir=target_dir,
                )

                if self.is_excluded(
                    include_file.relative_to_project_root()
                ) and isinstance(include, PackageInclude):
                    continue

                logger.debug(f"Adding: {file}")
                to_add.add(include_file)

        # add build script if it is specified and explicitly required
        if self._package.build_script and not exclude_build:
            to_add.add(
                BuildIncludeFile(
                    path=self._package.build_script,
                    project_root=self._path,
                    source_root=self._path,
                )
            )

        return to_add

    def get_metadata_content(self) -> str:
        content = METADATA_BASE.format(
            name=self._meta.name,
            version=self._meta.version,
            summary=str(self._meta.summary),
        )

        if self._meta.license:
            license_field = "License: "
            # Indentation is not only for readability, but required
            # so that the line break is not treated as end of field.
            # The exact indentation does not matter,
            # but it is essential to also indent empty lines.
            escaped_license = textwrap.indent(
                self._meta.license, " " * len(license_field), lambda line: True
            ).strip()
            content += f"{license_field}{escaped_license}\n"

        if self._meta.keywords:
            content += f"Keywords: {self._meta.keywords}\n"

        if self._meta.author:
            content += f"Author: {self._meta.author}\n"

        if self._meta.author_email:
            content += f"Author-email: {self._meta.author_email}\n"

        if self._meta.maintainer:
            content += f"Maintainer: {self._meta.maintainer}\n"

        if self._meta.maintainer_email:
            content += f"Maintainer-email: {self._meta.maintainer_email}\n"

        if self._meta.requires_python:
            content += f"Requires-Python: {self._meta.requires_python}\n"

        for classifier in self._meta.classifiers:
            content += f"Classifier: {classifier}\n"

        for extra in sorted(self._meta.provides_extra):
            content += f"Provides-Extra: {extra}\n"

        for dep in sorted(self._meta.requires_dist):
            content += f"Requires-Dist: {dep}\n"

        for url in sorted(self._meta.project_urls, key=lambda u: u[0]):
            content += f"Project-URL: {url}\n"

        if self._meta.description_content_type:
            content += (
                f"Description-Content-Type: {self._meta.description_content_type}\n"
            )

        if self._meta.description is not None:
            content += f"\n{self._meta.description}\n"

        return content

    def convert_entry_points(self) -> dict[str, list[str]]:
        result: dict[str, list[str]] = {}

        for group_name, group in self._poetry.package.entry_points.items():
            if group_name == "console-scripts":
                group_name = "console_scripts"
            elif group_name == "gui-scripts":
                group_name = "gui_scripts"
            result[group_name] = sorted(
                f"{name} = {specification}" for name, specification in group.items()
            )

        return result

    def convert_script_files(self) -> list[Path]:
        script_files: list[Path] = []

        for name, specification in self._poetry.local_config.get("scripts", {}).items():
            if isinstance(specification, dict) and specification.get("type") == "file":
                source = specification["reference"]

                if Path(source).is_absolute():
                    raise RuntimeError(
                        f"{source} in {name} is an absolute path. Expected relative"
                        " path."
                    )

                abs_path = Path.joinpath(self._path, source)

                if not self._package.build_script:
                    # scripts can be generated by build_script, in this case they do not exist here
                    if not abs_path.exists():
                        raise RuntimeError(
                            f"{abs_path} in script specification ({name}) is not found."
                        )
                    if not abs_path.is_file():
                        raise RuntimeError(
                            f"{abs_path} in script specification ({name}) is not a file."
                        )

                script_files.append(abs_path)

        return script_files

    def _get_legal_files(self) -> set[Path]:
        include_files_patterns = {"COPYING*", "LICEN[SC]E*", "AUTHORS*", "NOTICE*"}
        files: set[Path] = set()

        for pattern in include_files_patterns:
            files.update(self._path.glob(pattern))

        files.update(self._path.joinpath("LICENSES").glob("**/*"))
        return files


class BuildIncludeFile:
    def __init__(
        self,
        path: Path | str,
        project_root: Path | str,
        source_root: Path | str,
        target_dir: Path | str | None = None,
    ) -> None:
        """
        :param project_root: the full path of the project's root
        :param path: a full path to the file to be included
        :param source_root: the full root path to resolve to
        :param target_dir: the relative target root to resolve to
        """
        self.path = Path(path)
        self.project_root = Path(project_root).resolve()
        self.source_root = Path(source_root).resolve()
        self.target_dir = None if not target_dir else Path(target_dir)
        if not self.path.is_absolute():
            self.path = self.source_root / self.path
        else:
            self.path = self.path

        self.path = self.path.resolve()

    def __eq__(self, other: object) -> bool:
        if not isinstance(other, BuildIncludeFile):
            return False

        return self.path == other.path

    def __hash__(self) -> int:
        return hash(self.path)

    def __repr__(self) -> str:
        return str(self.path)

    def relative_to_project_root(self) -> Path:
        return self.path.relative_to(self.project_root)

    def relative_to_source_root(self) -> Path:
        return self.path.relative_to(self.source_root)

    def relative_to_target_root(self) -> Path:
        path = self.relative_to_source_root()
        if self.target_dir is not None:
            return self.target_dir / path
        return path
poetry-core-2.1.1/src/poetry/core/masonry/builders/sdist.py000066400000000000000000000356721475444614500240600ustar00rootroot00000000000000from __future__ import annotations

import logging
import os
import re
import tarfile

from collections import defaultdict
from contextlib import contextmanager
from copy import copy
from functools import cached_property
from gzip import GzipFile
from io import BytesIO
from pathlib import Path
from posixpath import join as pjoin
from pprint import pformat
from typing import TYPE_CHECKING

from poetry.core.masonry.builders.builder import Builder
from poetry.core.masonry.builders.builder import BuildIncludeFile
from poetry.core.masonry.utils.helpers import distribution_name


if TYPE_CHECKING:
    from collections.abc import Iterator
    from tarfile import TarInfo

    from poetry.core.masonry.utils.package_include import PackageInclude
    from poetry.core.packages.dependency import Dependency
    from poetry.core.packages.project_package import ProjectPackage

SETUP = """\
# -*- coding: utf-8 -*-
from setuptools import setup

{before}
setup_kwargs = {{
    'name': {name!r},
    'version': {version!r},
    'description': {description!r},
    'long_description': {long_description!r},
    'author': {author!r},
    'author_email': {author_email!r},
    'maintainer': {maintainer!r},
    'maintainer_email': {maintainer_email!r},
    'url': {url!r},
    {extra}
}}
{after}

setup(**setup_kwargs)
"""

logger = logging.getLogger(__name__)


class SdistBuilder(Builder):
    format = "sdist"

    def build(
        self,
        target_dir: Path | None = None,
    ) -> Path:
        logger.info("Building sdist")
        target_dir = target_dir or self.default_target_dir

        if not target_dir.exists():
            target_dir.mkdir(parents=True)

        name = distribution_name(self._package.name)
        target = target_dir / f"{name}-{self._meta.version}.tar.gz"
        gz = GzipFile(target.as_posix(), mode="wb", mtime=self._archive_mtime)
        tar = tarfile.TarFile(
            target.as_posix(), mode="w", fileobj=gz, format=tarfile.PAX_FORMAT
        )

        try:
            tar_dir = f"{name}-{self._meta.version}"

            files_to_add = self.find_files_to_add(exclude_build=False)

            for file in sorted(files_to_add, key=lambda x: x.relative_to_source_root()):
                tar_info = tar.gettarinfo(
                    str(file.path),
                    arcname=pjoin(tar_dir, str(file.relative_to_source_root())),
                )
                tar_info = self.clean_tarinfo(tar_info)

                if tar_info.isreg():
                    with file.path.open("rb") as f:
                        tar.addfile(tar_info, f)
                else:
                    tar.addfile(tar_info)  # Symlinks & ?

            if self._poetry.package.build_should_generate_setup():
                setup = self.build_setup()
                self.add_file_to_tar(tar, pjoin(tar_dir, "setup.py"), setup)

            pkg_info = self.build_pkg_info()
            self.add_file_to_tar(tar, pjoin(tar_dir, "PKG-INFO"), pkg_info)
        finally:
            tar.close()
            gz.close()

        logger.info(f"Built {target.name}")
        return target

    def add_file_to_tar(
        self, tar: tarfile.TarFile, file_name: str, content: bytes
    ) -> None:
        tar_info = tarfile.TarInfo(file_name)
        tar_info.size = len(content)
        tar_info = self.clean_tarinfo(tar_info)
        tar.addfile(tar_info, BytesIO(content))

    def build_setup(self) -> bytes:
        from poetry.core.masonry.utils.package_include import PackageInclude

        before, extra, after = [], [], []
        package_dir: dict[str, str] = {}

        # If we have a build script, use it
        if self._package.build_script:
            import_name = ".".join(
                Path(self._package.build_script).with_suffix("").parts
            )
            after += [f"from {import_name} import *", "build(setup_kwargs)"]

        modules = []
        packages = []
        package_data = {}
        for include in self._module.includes:
            if include.formats and "sdist" not in include.formats:
                continue

            if isinstance(include, PackageInclude):
                if include.is_package():
                    pkg_dir, _packages, _package_data = self.find_packages(include)

                    if pkg_dir is not None:
                        pkg_root = os.path.relpath(pkg_dir, str(self._path))
                        if "" in package_dir:
                            package_dir.update(
                                (p, (Path(pkg_root) / p.replace(".", "/")).as_posix())
                                for p in _packages
                            )
                        else:
                            package_dir[""] = pkg_root

                    packages += [p for p in _packages if p not in packages]
                    package_data.update(_package_data)
                else:
                    module = include.elements[0].relative_to(include.base).stem

                    if include.source is not None:
                        package_dir[""] = str(include.base.relative_to(self._path))

                    if module not in modules:
                        modules.append(module)
            else:
                pass

        if package_dir:
            before.append(f"package_dir = \\\n{pformat(package_dir)}\n")
            extra.append("'package_dir': package_dir,")

        if packages:
            before.append(f"packages = \\\n{pformat(sorted(packages))}\n")
            extra.append("'packages': packages,")

        if package_data:
            before.append(f"package_data = \\\n{pformat(package_data)}\n")
            extra.append("'package_data': package_data,")

        if modules:
            before.append(f"modules = \\\n{pformat(modules)}")
            extra.append("'py_modules': modules,")

        dependencies, extras = self.convert_dependencies(
            self._package, self._package.requires
        )
        if dependencies:
            before.append(f"install_requires = \\\n{pformat(sorted(dependencies))}\n")
            extra.append("'install_requires': install_requires,")

        if extras:
            before.append(f"extras_require = \\\n{pformat(extras)}\n")
            extra.append("'extras_require': extras_require,")

        entry_points = self.convert_entry_points()
        if entry_points:
            before.append(f"entry_points = \\\n{pformat(entry_points)}\n")
            extra.append("'entry_points': entry_points,")

        script_files = self.convert_script_files()
        if script_files:
            rel_paths = [str(p.relative_to(self._path)) for p in script_files]
            before.append(f"scripts = \\\n{pformat(rel_paths)}\n")
            extra.append("'scripts': scripts,")

        if self._meta.requires_python:
            extra.append(f"'python_requires': {self._meta.requires_python!r},")

        return SETUP.format(
            before="\n".join(before),
            name=str(self._meta.name),
            version=self._meta.version,
            description=str(self._meta.summary),
            long_description=str(self._meta.description),
            author=str(self._meta.author),
            author_email=str(self._meta.author_email),
            maintainer=str(self._meta.maintainer),
            maintainer_email=str(self._meta.maintainer_email),
            url=str(self._meta.home_page),
            extra="\n    ".join(extra),
            after="\n".join(after),
        ).encode()

    @contextmanager
    def setup_py(self) -> Iterator[Path]:
        setup = self._path / "setup.py"
        has_setup = setup.exists()

        if has_setup:
            logger.warning("A setup.py file already exists. Using it.")
        else:
            with setup.open("w", encoding="utf-8") as f:
                f.write(self.build_setup().decode())

        yield setup

        if not has_setup:
            setup.unlink()

    def build_pkg_info(self) -> bytes:
        return self.get_metadata_content().encode()

    def find_packages(
        self, include: PackageInclude
    ) -> tuple[str | None, list[str], dict[str, list[str]]]:
        """
        Discover subpackages and data.

        It also retrieves necessary files.
        """
        pkgdir = None
        if include.source is not None:
            pkgdir = str(include.base)

        base = str(include.elements[0].parent)

        pkg_name = include.package
        pkg_data: dict[str, list[str]] = defaultdict(list)
        # Undocumented setup() feature:
        # the empty string matches all package names
        pkg_data[""].append("*")
        packages = [pkg_name]
        subpkg_paths = set()

        def find_nearest_pkg(rel_path: str) -> tuple[str, str]:
            parts = Path(rel_path).parts
            for i in reversed(range(1, len(parts))):
                ancestor = "/".join(parts[:i])
                if ancestor in subpkg_paths:
                    pkg = ".".join([pkg_name, *parts[:i]])
                    return pkg, "/".join(parts[i:])

            # Relative to the top-level package
            return pkg_name, Path(rel_path).as_posix()

        for path, _dirnames, filenames in os.walk(base, topdown=True):
            if Path(path).name == "__pycache__":
                # This is just a shortcut. It will be ignored later anyway.
                continue

            from_top_level = os.path.relpath(path, base)
            if from_top_level == ".":
                continue

            is_subpkg = any(
                filename.endswith(".py") for filename in filenames
            ) and not all(
                self.is_excluded(Path(path, filename).relative_to(self._path))
                for filename in filenames
                if filename.endswith(".py")
            )
            if is_subpkg:
                subpkg_paths.add(from_top_level)
                parts = Path(from_top_level).parts
                packages.append(".".join([pkg_name, *parts]))
            else:
                pkg, from_nearest_pkg = find_nearest_pkg(from_top_level)

                data_elements = [
                    f.relative_to(self._path)
                    for f in Path(path).glob("*")
                    if not f.is_dir()
                ]

                data = [e for e in data_elements if not self.is_excluded(e)]
                if not data:
                    continue

                if len(data) == len(data_elements):
                    pkg_data[pkg].append(pjoin(from_nearest_pkg, "*"))
                else:
                    for d in data:
                        if d.is_dir():
                            continue

                        pkg_data[pkg] += [pjoin(from_nearest_pkg, d.name) for d in data]

        # Sort values in pkg_data
        pkg_data = {k: sorted(v) for (k, v) in pkg_data.items() if v}

        return pkgdir, sorted(packages), pkg_data

    def find_files_to_add(self, exclude_build: bool = False) -> set[BuildIncludeFile]:
        to_add = super().find_files_to_add(exclude_build)

        # add any additional files
        additional_files: set[Path] = set()

        # add legal files
        additional_files.update(self._get_legal_files())

        # add script files
        additional_files.update(self.convert_script_files())

        # Include project files
        additional_files.add(Path("pyproject.toml"))

        # add readme files if specified
        additional_files.update(Path(r) for r in self._poetry.package.readmes)

        for additional_file in additional_files:
            file = BuildIncludeFile(
                path=additional_file, project_root=self._path, source_root=self._path
            )
            if file.path.exists():
                logger.debug(f"Adding: {file.relative_to_source_root()}")
                to_add.add(file)

        return to_add

    @classmethod
    def convert_dependencies(
        cls, package: ProjectPackage, dependencies: list[Dependency]
    ) -> tuple[list[str], dict[str, list[str]]]:
        main = []
        extras = defaultdict(list)
        req_regex = re.compile(r"^(.+) \((.+)\)$")

        for dependency in dependencies:
            if dependency.is_optional():
                for extra_name, reqs in package.extras.items():
                    for req in reqs:
                        if req.name == dependency.name:
                            requirement = dependency.to_pep_508(with_extras=False)
                            if ";" in requirement:
                                requirement, conditions = requirement.split(";")

                                requirement = requirement.strip()
                                if req_regex.match(requirement):
                                    requirement = req_regex.sub(
                                        "\\1\\2", requirement.strip()
                                    )

                                extras[extra_name + ":" + conditions.strip()].append(
                                    requirement
                                )

                                continue

                            requirement = requirement.strip()
                            if req_regex.match(requirement):
                                requirement = req_regex.sub(
                                    "\\1\\2", requirement.strip()
                                )
                            extras[extra_name].append(requirement)
                continue

            requirement = dependency.to_pep_508()
            if ";" in requirement:
                requirement, conditions = requirement.split(";")

                requirement = requirement.strip()
                if req_regex.match(requirement):
                    requirement = req_regex.sub("\\1\\2", requirement.strip())

                extras[":" + conditions.strip()].append(requirement)

                continue

            requirement = requirement.strip()
            if req_regex.match(requirement):
                requirement = req_regex.sub("\\1\\2", requirement.strip())

            main.append(requirement)

        return main, dict(extras)

    def clean_tarinfo(self, tar_info: TarInfo) -> TarInfo:
        """
        Clean metadata from a TarInfo object to make it more reproducible.

            - Set uid & gid to 0
            - Set uname and gname to ""
            - Normalise permissions to 644 or 755
            - Set mtime if not None
        """
        from poetry.core.masonry.utils.helpers import normalize_file_permissions

        ti = copy(tar_info)
        ti.uid = 0
        ti.gid = 0
        ti.uname = ""
        ti.gname = ""
        ti.mtime = self._archive_mtime
        ti.mode = normalize_file_permissions(ti.mode)

        return ti

    @cached_property
    def _archive_mtime(self) -> int:
        if source_date_epoch := os.getenv("SOURCE_DATE_EPOCH"):
            try:
                return int(source_date_epoch)
            except ValueError:
                logger.warning(
                    "SOURCE_DATE_EPOCH environment variable is not an int,"
                    " using mtime=0"
                )
                return 0
        logger.debug("SOURCE_DATE_EPOCH environment variable is not set, using mtime=0")
        return 0
poetry-core-2.1.1/src/poetry/core/masonry/builders/wheel.py000066400000000000000000000457701475444614500240360ustar00rootroot00000000000000from __future__ import annotations

import contextlib
import csv
import hashlib
import logging
import os
import shutil
import stat
import subprocess
import sys
import sysconfig
import tempfile
import zipfile

from base64 import urlsafe_b64encode
from functools import cached_property
from io import StringIO
from pathlib import Path
from typing import TYPE_CHECKING
from typing import Any
from typing import TextIO

import packaging.tags

from poetry.core import __version__
from poetry.core.constraints.version import parse_constraint
from poetry.core.masonry.builders.builder import Builder
from poetry.core.masonry.builders.sdist import SdistBuilder
from poetry.core.masonry.utils.helpers import distribution_name
from poetry.core.masonry.utils.helpers import normalize_file_permissions
from poetry.core.masonry.utils.package_include import PackageInclude
from poetry.core.utils.helpers import temporary_directory


if TYPE_CHECKING:
    from collections.abc import Iterator

    from packaging.utils import NormalizedName

    from poetry.core.poetry import Poetry

    ZipInfoTimestamp = tuple[int, int, int, int, int, int]

wheel_file_template = """\
Wheel-Version: 1.0
Generator: poetry-core {version}
Root-Is-Purelib: {pure_lib}
Tag: {tag}
"""

logger = logging.getLogger(__name__)


class WheelBuilder(Builder):
    format = "wheel"

    def __init__(
        self,
        poetry: Poetry,
        original: Path | None = None,
        executable: Path | None = None,
        editable: bool = False,
        metadata_directory: Path | None = None,
        config_settings: dict[str, Any] | None = None,
    ) -> None:
        super().__init__(poetry, executable=executable, config_settings=config_settings)

        self._records: list[tuple[str, str, int]] = []
        self._original_path = self._path
        if original:
            self._original_path = original.parent
        self._editable = editable
        self._metadata_directory = metadata_directory

    @classmethod
    def make_in(
        cls,
        poetry: Poetry,
        directory: Path | None = None,
        original: Path | None = None,
        executable: Path | None = None,
        editable: bool = False,
        metadata_directory: Path | None = None,
        config_settings: dict[str, Any] | None = None,
    ) -> str:
        wb = WheelBuilder(
            poetry,
            original=original,
            executable=executable,
            editable=editable,
            metadata_directory=metadata_directory,
            config_settings=config_settings,
        )
        wb.build(target_dir=directory)

        return wb.wheel_filename

    @classmethod
    def make(cls, poetry: Poetry, executable: Path | None = None) -> None:
        """Build a wheel in the dist/ directory, and optionally upload it."""
        cls.make_in(poetry, executable=executable)

    def build(
        self,
        target_dir: Path | None = None,
    ) -> Path:
        logger.info("Building wheel")

        target_dir = target_dir or self.default_target_dir
        if not target_dir.exists():
            target_dir.mkdir(parents=True)

        fd, temp = tempfile.mkstemp(suffix=".whl")

        temp_path = Path(temp)
        st_mode = temp_path.stat().st_mode
        new_mode = normalize_file_permissions(st_mode)
        temp_path.chmod(new_mode)

        with (
            os.fdopen(fd, "w+b") as fd_file,
            zipfile.ZipFile(
                fd_file, mode="w", compression=zipfile.ZIP_DEFLATED
            ) as zip_file,
        ):
            if self._editable:
                self._build(zip_file)
                self._add_pth(zip_file)
            elif self._poetry.package.build_should_generate_setup():
                self._copy_module(zip_file)
                self._build(zip_file)
            else:
                self._build(zip_file)
                self._copy_module(zip_file)

            self._copy_file_scripts(zip_file)

            if self._metadata_directory is None:
                with temporary_directory() as temp_dir:
                    metadata_directory = self.prepare_metadata(Path(temp_dir))
                    self._copy_dist_info(zip_file, metadata_directory)
            else:
                self._copy_dist_info(zip_file, self._metadata_directory)

            self._write_record(zip_file)

        wheel_path = target_dir / self.wheel_filename
        if wheel_path.exists():
            wheel_path.unlink()
        shutil.move(str(temp_path), str(wheel_path))

        logger.info(f"Built {self.wheel_filename}")
        return wheel_path

    def _add_pth(self, wheel: zipfile.ZipFile) -> None:
        paths = set()
        for include in self._module.includes:
            if isinstance(include, PackageInclude) and (
                include.is_module() or include.is_package()
            ):
                paths.add(include.base.resolve().as_posix())

        content = ""
        for path in paths:
            content += path + os.linesep

        pth_file = Path(self._module.name).with_suffix(".pth")

        with self._write_to_zip(wheel, str(pth_file)) as f:
            f.write(content)

    def _build(self, wheel: zipfile.ZipFile) -> None:
        if self._package.build_script:
            if not self._poetry.package.build_should_generate_setup():
                # Since we have a build script but no setup.py generation is required,
                # we assume that the build script will build and copy the files
                # directly.
                # That way they will be picked up when adding files to the wheel.
                current_path = Path.cwd()
                try:
                    os.chdir(self._path)
                    self._run_build_script(self._package.build_script)
                finally:
                    os.chdir(current_path)
            else:
                with SdistBuilder(poetry=self._poetry).setup_py() as setup:
                    # We need to place ourselves in the temporary
                    # directory in order to build the package
                    current_path = Path.cwd()
                    try:
                        os.chdir(self._path)
                        self._run_build_command(setup)
                    finally:
                        os.chdir(current_path)

                    if self._editable:
                        # For an editable install, the extension modules will be built
                        # in-place - so there's no need to copy them into the zip
                        return

                    lib = self._get_build_lib_dir()
                    if lib is None:
                        # The result of building the extensions
                        # does not exist, this may due to conditional
                        # builds, so we assume that it's okay
                        return

                    for pkg in sorted(lib.glob("**/*")):
                        if pkg.is_dir() or self.is_excluded(pkg):
                            continue

                        rel_path = pkg.relative_to(lib)

                        if rel_path.as_posix() in wheel.namelist():
                            continue

                        logger.debug(f"Adding: {rel_path}")

                        self._add_file(wheel, pkg, rel_path)

    def _get_build_purelib_dir(self) -> Path:
        return self._path / "build" / "lib"

    def _get_build_platlib_dir(self) -> Path:
        # Roughly equivalent to the naming convention in used by distutils, see:
        # distutils.command.build.build.finalize_options
        if self.executable != Path(sys.executable):
            # poetry-core is not run in the build environment
            # -> this is probably not a PEP 517 build but a poetry build
            try:
                output = subprocess.check_output(
                    [
                        self.executable.as_posix(),
                        "-c",
                        """
import sysconfig
import sys
print(sysconfig.get_platform(), sys.implementation.cache_tag, sep='-')
""",
                    ],
                    stderr=subprocess.STDOUT,
                    text=True,
                    encoding="utf-8",
                )
            except subprocess.CalledProcessError as e:
                raise RuntimeError(
                    "Failed to get build_platlib_dir for python interpreter"
                    f" '{self.executable.as_posix()}':\n{e.output}"
                )
            plat_specifier = output.strip()
        else:
            plat_specifier = "-".join(
                (sysconfig.get_platform(), sys.implementation.cache_tag)
            )
        return self._path / "build" / f"lib.{plat_specifier}"

    def _get_build_lib_dir(self) -> Path | None:
        # Either the purelib or platlib path will have been used when building
        build_platlib = self._get_build_platlib_dir()
        build_purelib = self._get_build_purelib_dir()
        if build_platlib.exists():
            return build_platlib
        elif build_purelib.exists():
            return build_purelib
        return None

    def _copy_file_scripts(self, wheel: zipfile.ZipFile) -> None:
        file_scripts = self.convert_script_files()

        for abs_path in file_scripts:
            self._add_file(
                wheel,
                abs_path,
                Path(self.wheel_data_folder) / "scripts" / abs_path.name,
            )

    def _run_build_command(self, setup: Path) -> None:
        if self._editable:
            subprocess.check_call(
                [
                    self.executable.as_posix(),
                    str(setup),
                    "build_ext",
                    "--inplace",
                ]
            )
        subprocess.check_call(
            [
                self.executable.as_posix(),
                str(setup),
                "build",
                "-b",
                str(self._path / "build"),
                "--build-purelib",
                str(self._get_build_purelib_dir()),
                "--build-platlib",
                str(self._get_build_platlib_dir()),
            ]
        )

    def _run_build_script(self, build_script: str) -> None:
        logger.debug(f"Executing build script: {build_script}")
        subprocess.check_call([self.executable.as_posix(), build_script])

    def _copy_module(self, wheel: zipfile.ZipFile) -> None:
        to_add = self.find_files_to_add()

        # Walk the files and compress them,
        # sorting everything so the order is stable.
        for file in sorted(to_add, key=lambda x: x.path):
            self._add_file(wheel, file.path, file.relative_to_target_root())

    def prepare_metadata(self, metadata_directory: Path) -> Path:
        dist_info = metadata_directory / self.dist_info
        dist_info.mkdir(parents=True, exist_ok=True)

        if self._poetry.package.entry_points:
            with (dist_info / "entry_points.txt").open(
                "w", encoding="utf-8", newline="\n"
            ) as f:
                self._write_entry_points(f)

        with (dist_info / "WHEEL").open("w", encoding="utf-8", newline="\n") as f:
            self._write_wheel_file(f)

        with (dist_info / "METADATA").open("w", encoding="utf-8", newline="\n") as f:
            self._write_metadata_file(f)

        for legal_file in self._get_legal_files():
            if not legal_file.is_file():
                logger.debug(f"Skipping: {legal_file.as_posix()}")
                continue

            dest = dist_info / legal_file.relative_to(self._path)
            dest.parent.mkdir(parents=True, exist_ok=True)
            shutil.copy(legal_file, dest)

        return dist_info

    def _write_record(self, wheel: zipfile.ZipFile) -> None:
        # Write a record of the files in the wheel
        with self._write_to_zip(wheel, self.dist_info + "/RECORD") as f:
            record = StringIO()

            csv_writer = csv.writer(
                record,
                delimiter=csv.excel.delimiter,
                quotechar=csv.excel.quotechar,
                lineterminator="\n",
            )
            for path, hash, size in self._records:
                csv_writer.writerow((path, f"sha256={hash}", size))

            # RECORD itself is recorded with no hash or size
            csv_writer.writerow((self.dist_info + "/RECORD", "", ""))

            f.write(record.getvalue())

    def _copy_dist_info(self, wheel: zipfile.ZipFile, source: Path) -> None:
        dist_info = Path(self.dist_info)
        for file in sorted(source.glob("**/*")):
            if not file.is_file():
                continue

            rel_path = file.relative_to(source)
            target = dist_info / rel_path
            self._add_file(wheel, file, target)

    @property
    def dist_info(self) -> str:
        return self.dist_info_name(self._package.name, self._meta.version)

    @property
    def wheel_data_folder(self) -> str:
        name = distribution_name(self._package.name)
        return f"{name}-{self._meta.version}.data"

    @property
    def wheel_filename(self) -> str:
        name = distribution_name(self._package.name)
        version = self._meta.version
        return f"{name}-{version}-{self.tag}.whl"

    def supports_python2(self) -> bool:
        return self._package.python_constraint.allows_any(
            parse_constraint(">=2.0.0 <3.0.0")
        )

    def dist_info_name(self, name: NormalizedName, version: str) -> str:
        escaped_name = distribution_name(name)
        return f"{escaped_name}-{version}.dist-info"

    def _get_sys_tags(self) -> list[str]:
        """Get sys_tags via subprocess.
        Required if poetry-core is not run inside the build environment.
        """
        try:
            output = subprocess.check_output(
                [
                    self.executable.as_posix(),
                    "-c",
                    f"""
import importlib.util
import sys

from pathlib import Path

spec = importlib.util.spec_from_file_location(
    "packaging", Path(r"{packaging.__file__}")
)

packaging = importlib.util.module_from_spec(spec)
sys.modules[spec.name] = packaging

spec = importlib.util.spec_from_file_location(
    "packaging.tags", Path(r"{packaging.tags.__file__}")
)
packaging_tags = importlib.util.module_from_spec(spec)
spec.loader.exec_module(packaging_tags)
for t in packaging_tags.sys_tags():
    print(t.interpreter, t.abi, t.platform, sep="-")
""",
                ],
                stderr=subprocess.STDOUT,
                text=True,
                encoding="utf-8",
            )
        except subprocess.CalledProcessError as e:
            raise RuntimeError(
                "Failed to get sys_tags for python interpreter"
                f" '{self.executable.as_posix()}':\n{e.output}"
            )
        return output.strip().splitlines()

    @property
    def tag(self) -> str:
        if self._package.build_script:
            if self.executable != Path(sys.executable):
                # poetry-core is not run in the build environment
                # -> this is probably not a PEP 517 build but a poetry build
                return self._get_sys_tags()[0]
            sys_tag = next(packaging.tags.sys_tags())
            tag = (sys_tag.interpreter, sys_tag.abi, sys_tag.platform)
        else:
            platform = "any"
            impl = "py2.py3" if self.supports_python2() else "py3"
            tag = (impl, "none", platform)
        return "-".join(tag)

    def _add_file(
        self,
        wheel: zipfile.ZipFile,
        full_path: Path,
        rel_path: Path,
    ) -> None:
        # We always want to have /-separated paths in the zip file and in RECORD
        rel_path_name = rel_path.as_posix()
        zinfo = zipfile.ZipInfo(rel_path_name, self._zipfile_date_time)

        # Normalize permission bits to either 755 (executable) or 644
        st_mode = full_path.stat().st_mode
        new_mode = normalize_file_permissions(st_mode)
        zinfo.external_attr = (new_mode & 0xFFFF) << 16  # Unix attributes

        if stat.S_ISDIR(st_mode):
            zinfo.external_attr |= 0x10  # MS-DOS directory flag

        hashsum = hashlib.sha256()
        with full_path.open("rb") as src:
            while True:
                buf = src.read(1024 * 8)
                if not buf:
                    break
                hashsum.update(buf)

            src.seek(0)
            wheel.writestr(zinfo, src.read(), compress_type=zipfile.ZIP_DEFLATED)

        size = full_path.stat().st_size
        hash_digest = urlsafe_b64encode(hashsum.digest()).decode("ascii").rstrip("=")

        self._records.append((rel_path_name, hash_digest, size))

    @contextlib.contextmanager
    def _write_to_zip(
        self, wheel: zipfile.ZipFile, rel_path: str
    ) -> Iterator[StringIO]:
        sio = StringIO()
        yield sio

        date_time = self._zipfile_date_time
        zi = zipfile.ZipInfo(rel_path, date_time)
        zi.external_attr = (0o644 & 0xFFFF) << 16  # Unix attributes
        b = sio.getvalue().encode("utf-8")
        hashsum = hashlib.sha256(b)
        hash_digest = urlsafe_b64encode(hashsum.digest()).decode("ascii").rstrip("=")

        wheel.writestr(zi, b, compress_type=zipfile.ZIP_DEFLATED)
        self._records.append((rel_path, hash_digest, len(b)))

    @cached_property
    def _zipfile_date_time(self) -> ZipInfoTimestamp:
        import time

        # The default is a fixed timestamp rather than the current time, so
        # that building a wheel twice on the same computer can automatically
        # give you the exact same result.
        default = (2016, 1, 1, 0, 0, 0)
        try:
            _env_date = time.gmtime(int(os.environ["SOURCE_DATE_EPOCH"]))[:6]
        except ValueError:
            logger.warning(
                "SOURCE_DATE_EPOCH environment variable value"
                " is not an int, setting zipinfo date to default=%s",
                default,
            )
            return default
        except KeyError:
            logger.debug(
                "SOURCE_DATE_EPOCH environment variable not set,"
                " setting zipinfo date to default=%s",
                default,
            )
            return default
        else:
            if _env_date[0] < 1980:
                logger.warning(
                    "zipinfo date can't be earlier than 1980,"
                    " setting zipinfo date to default=%s",
                    default,
                )
                return default
            return _env_date

    def _write_entry_points(self, fp: TextIO) -> None:
        """
        Write entry_points.txt.
        """
        entry_points = self.convert_entry_points()

        for group_name in sorted(entry_points):
            fp.write(f"[{group_name}]\n")
            for ep in sorted(entry_points[group_name]):
                fp.write(ep.replace(" ", "") + "\n")

            fp.write("\n")

    def _write_wheel_file(self, fp: TextIO) -> None:
        fp.write(
            wheel_file_template.format(
                version=__version__,
                pure_lib="true" if self._package.build_script is None else "false",
                tag=self.tag,
            )
        )

    def _write_metadata_file(self, fp: TextIO) -> None:
        """
        Write out metadata in the 2.x format (email like)
        """
        fp.write(self.get_metadata_content())
poetry-core-2.1.1/src/poetry/core/masonry/metadata.py000066400000000000000000000074721475444614500226760ustar00rootroot00000000000000from __future__ import annotations

from typing import TYPE_CHECKING

from poetry.core.utils.helpers import readme_content_type


if TYPE_CHECKING:
    from packaging.utils import NormalizedName

    from poetry.core.packages.project_package import ProjectPackage


class Metadata:
    metadata_version = "2.3"
    # version 1.0
    name: str | None = None
    version: str
    platforms: tuple[str, ...] = ()
    supported_platforms: tuple[str, ...] = ()
    summary: str | None = None
    description: str | None = None
    keywords: str | None = None
    home_page: str | None = None
    download_url: str | None = None
    author: str | None = None
    author_email: str | None = None
    license: str | None = None
    # version 1.1
    classifiers: tuple[str, ...] = ()
    requires: tuple[str, ...] = ()
    provides: tuple[str, ...] = ()
    obsoletes: tuple[str, ...] = ()
    # version 1.2
    maintainer: str | None = None
    maintainer_email: str | None = None
    requires_python: str | None = None
    requires_external: tuple[str, ...] = ()
    requires_dist: list[str] = []  # noqa: RUF012
    provides_dist: tuple[str, ...] = ()
    obsoletes_dist: tuple[str, ...] = ()
    project_urls: tuple[str, ...] = ()

    # Version 2.1
    description_content_type: str | None = None
    provides_extra: list[NormalizedName] = []  # noqa: RUF012

    @classmethod
    def from_package(cls, package: ProjectPackage) -> Metadata:
        from poetry.core.version.helpers import format_python_constraint

        meta = cls()

        meta.name = package.pretty_name
        meta.version = package.version.to_string()
        meta.summary = package.description
        if package.readme_content:
            meta.description = package.readme_content
        elif package.readmes:
            descriptions = []
            for readme in package.readmes:
                try:
                    descriptions.append(readme.read_text(encoding="utf-8"))
                except FileNotFoundError as e:
                    raise FileNotFoundError(
                        f"Readme path `{readme}` does not exist."
                    ) from e
                except IsADirectoryError as e:
                    raise IsADirectoryError(
                        f"Readme path `{readme}` is a directory."
                    ) from e
                except PermissionError as e:
                    raise PermissionError(
                        f"Readme path `{readme}` is not readable."
                    ) from e
            meta.description = "\n".join(descriptions)

        meta.keywords = ",".join(package.keywords)
        meta.home_page = package.homepage or package.repository_url
        meta.author = package.author_name
        meta.author_email = package.author_email

        if package.license:
            meta.license = package.license.id

        meta.classifiers = tuple(package.all_classifiers)

        # Version 1.2
        meta.maintainer = package.maintainer_name
        meta.maintainer_email = package.maintainer_email

        # Requires python
        if package.requires_python != "*":
            meta.requires_python = package.requires_python
        elif package.python_versions != "*":
            meta.requires_python = format_python_constraint(package.python_constraint)

        meta.requires_dist = [
            d.to_pep_508()
            for d in package.requires
            if not d.is_optional() or d.in_extras
        ]

        # Version 2.1
        if package.readme_content_type:
            meta.description_content_type = package.readme_content_type
        elif package.readmes:
            meta.description_content_type = readme_content_type(package.readmes[0])

        meta.provides_extra = list(package.extras)

        meta.project_urls = tuple(
            f"{name}, {url}" for name, url in package.urls.items()
        )

        return meta
poetry-core-2.1.1/src/poetry/core/masonry/utils/000077500000000000000000000000001475444614500216725ustar00rootroot00000000000000poetry-core-2.1.1/src/poetry/core/masonry/utils/__init__.py000066400000000000000000000000001475444614500237710ustar00rootroot00000000000000poetry-core-2.1.1/src/poetry/core/masonry/utils/helpers.py000066400000000000000000000036551475444614500237170ustar00rootroot00000000000000from __future__ import annotations

from typing import TYPE_CHECKING
from typing import NewType
from typing import cast


if TYPE_CHECKING:
    from packaging.utils import NormalizedName


DistributionName = NewType("DistributionName", str)


def normalize_file_permissions(st_mode: int) -> int:
    """
    Normalizes the permission bits in the st_mode field from stat to 644/755

    Popular VCSs only track whether a file is executable or not. The exact
    permissions can vary on systems with different umasks. Normalising
    to 644 (non executable) or 755 (executable) makes builds more reproducible.
    """
    # Set 644 permissions, leaving higher bits of st_mode unchanged
    new_mode = (st_mode | 0o644) & ~0o133
    if st_mode & 0o100:
        new_mode |= 0o111  # Executable: 644 -> 755

    return new_mode


def distribution_name(name: NormalizedName) -> DistributionName:
    """
    A normalized name, but with "-" replaced by "_". This is used in various places:

    https://packaging.python.org/en/latest/specifications/binary-distribution-format/#escaping-and-unicode

    In distribution names ... This is equivalent to PEP 503 normalisation followed by
    replacing - with _.

    https://packaging.python.org/en/latest/specifications/source-distribution-format/#source-distribution-file-name

    ... {name} is normalised according to the same rules as for binary distributions

    https://packaging.python.org/en/latest/specifications/recording-installed-packages/#the-dist-info-directory

    This directory is named as {name}-{version}.dist-info, with name and version fields
    corresponding to Core metadata specifications. Both fields must be normalized
    (see PEP 503 and PEP 440 for the definition of normalization for each field
    respectively), and replace dash (-) characters with underscore (_) characters ...
    """
    distribution_name = name.replace("-", "_")
    return cast("DistributionName", distribution_name)
poetry-core-2.1.1/src/poetry/core/masonry/utils/include.py000066400000000000000000000020511475444614500236650ustar00rootroot00000000000000from __future__ import annotations

from typing import TYPE_CHECKING


if TYPE_CHECKING:
    from pathlib import Path


class Include:
    """
    Represents an "include" entry.

    It can be a glob string, a single file or a directory.

    This class will then detect the type of this include:

        - a package
        - a module
        - a file
        - a directory
    """

    def __init__(self, base: Path, include: str, formats: list[str]) -> None:
        self._base = base
        self._include = str(include)
        self._formats = formats

        self._elements: list[Path] = sorted(self._base.glob(str(self._include)))

    @property
    def base(self) -> Path:
        return self._base

    @property
    def elements(self) -> list[Path]:
        return self._elements

    @property
    def formats(self) -> list[str]:
        return self._formats

    def is_empty(self) -> bool:
        return len(self._elements) == 0

    def refresh(self) -> Include:
        self._elements = sorted(self._base.glob(self._include))

        return self
poetry-core-2.1.1/src/poetry/core/masonry/utils/module.py000066400000000000000000000075031475444614500235360ustar00rootroot00000000000000from __future__ import annotations

from pathlib import Path
from typing import TYPE_CHECKING
from typing import Any


if TYPE_CHECKING:
    from collections.abc import Mapping
    from collections.abc import Sequence

    from poetry.core.masonry.utils.include import Include


class ModuleOrPackageNotFoundError(ValueError):
    pass


class Module:
    def __init__(
        self,
        name: str,
        directory: str = ".",
        packages: Sequence[Mapping[str, Any]] = (),
        includes: Sequence[Mapping[str, Any]] = (),
    ) -> None:
        from poetry.core.masonry.utils.include import Include
        from poetry.core.masonry.utils.package_include import PackageInclude
        from poetry.core.utils.helpers import module_name

        self._name = module_name(name)
        self._in_src = False
        self._is_package = False
        self._path = Path(directory)
        self._package_includes: list[PackageInclude] = []
        self._explicit_includes: list[Include] = []

        if not packages:
            # It must exist either as a .py file or a directory, but not both
            pkg_dir = Path(directory, self._name)
            py_file = Path(directory, self._name + ".py")
            default_package: dict[str, Any]
            if pkg_dir.is_dir() and py_file.is_file():
                raise ValueError(f"Both {pkg_dir} and {py_file} exist")
            elif pkg_dir.is_dir():
                default_package = {"include": str(pkg_dir.relative_to(self._path))}
            elif py_file.is_file():
                default_package = {"include": str(py_file.relative_to(self._path))}
            else:
                # Searching for a src module
                src = Path(directory, "src")
                src_pkg_dir = src / self._name
                src_py_file = src / (self._name + ".py")

                if src_pkg_dir.is_dir() and src_py_file.is_file():
                    raise ValueError(f"Both {pkg_dir} and {py_file} exist")
                elif src_pkg_dir.is_dir():
                    default_package = {
                        "include": str(src_pkg_dir.relative_to(src)),
                        "from": str(src.relative_to(self._path)),
                    }
                elif src_py_file.is_file():
                    default_package = {
                        "include": str(src_py_file.relative_to(src)),
                        "from": str(src.relative_to(self._path)),
                    }
                else:
                    raise ModuleOrPackageNotFoundError(
                        f"No file/folder found for package {name}"
                    )
            default_package["format"] = ["sdist", "wheel"]
            packages = [default_package]

        for package in packages:
            self._package_includes.append(
                PackageInclude(
                    self._path,
                    package["include"],
                    formats=package["format"],
                    source=package.get("from"),
                    target=package.get("to"),
                )
            )

        for include in includes:
            self._explicit_includes.append(
                Include(self._path, include["path"], formats=include["format"])
            )

    @property
    def name(self) -> str:
        return self._name

    @property
    def path(self) -> Path:
        return self._path

    @property
    def file(self) -> Path:
        if self._is_package:
            return self._path / "__init__.py"
        else:
            return self._path

    @property
    def includes(self) -> list[Include]:
        return [*self._package_includes, *self._explicit_includes]

    @property
    def explicit_includes(self) -> list[Include]:
        return self._explicit_includes

    def is_package(self) -> bool:
        return self._is_package

    def is_in_src(self) -> bool:
        return self._in_src
poetry-core-2.1.1/src/poetry/core/masonry/utils/package_include.py000066400000000000000000000053501475444614500253450ustar00rootroot00000000000000from __future__ import annotations

from typing import TYPE_CHECKING

from poetry.core.masonry.utils.include import Include


if TYPE_CHECKING:
    from pathlib import Path


class PackageInclude(Include):
    def __init__(
        self,
        base: Path,
        include: str,
        formats: list[str],
        source: str | None = None,
        target: str | None = None,
    ) -> None:
        self._package: str
        self._is_package = False
        self._is_module = False
        self._source = source
        self._target = target

        if source is not None:
            base = base / source

        super().__init__(base, include, formats=formats)
        self.check_elements()

    @property
    def package(self) -> str:
        return self._package

    @property
    def source(self) -> str | None:
        return self._source

    @property
    def target(self) -> str | None:
        return self._target

    def is_package(self) -> bool:
        return self._is_package

    def is_module(self) -> bool:
        return self._is_module

    def refresh(self) -> PackageInclude:
        super().refresh()

        return self.check_elements()

    def is_stub_only(self) -> bool:
        # returns `True` if this a PEP 561 stub-only package,
        # see [PEP 561](https://www.python.org/dev/peps/pep-0561/#stub-only-packages)
        return (self.package or "").endswith("-stubs") and all(
            el.suffix == ".pyi" or el.name == "py.typed"
            for el in self.elements
            if el.is_file()
        )

    def has_modules(self) -> bool:
        # Packages no longer need an __init__.py in python3, but there must
        # at least be one .py file for it to be considered a package
        return any(element.suffix == ".py" for element in self.elements)

    def check_elements(self) -> PackageInclude:
        if not self._elements:
            raise ValueError(
                f"{self._base / self._include} does not contain any element"
            )

        root = self._elements[0]
        if len(self._elements) > 1:
            # Probably glob
            self._is_package = True
            self._package = root.parent.name

            if not (self.is_stub_only() or self.has_modules()):
                raise ValueError(f"{root.name} is not a package.")

        elif root.is_dir():
            # If it's a directory, we include everything inside it
            self._package = root.name
            self._elements: list[Path] = sorted(root.glob("**/*"))

            if not (self.is_stub_only() or self.has_modules()):
                raise ValueError(f"{root.name} is not a package.")

            self._is_package = True
        else:
            self._package = root.stem
            self._is_module = True

        return self
poetry-core-2.1.1/src/poetry/core/packages/000077500000000000000000000000001475444614500206205ustar00rootroot00000000000000poetry-core-2.1.1/src/poetry/core/packages/__init__.py000066400000000000000000000000001475444614500227170ustar00rootroot00000000000000poetry-core-2.1.1/src/poetry/core/packages/dependency.py000066400000000000000000000441361475444614500233200ustar00rootroot00000000000000from __future__ import annotations

import os
import re

from contextlib import suppress
from pathlib import Path
from typing import TYPE_CHECKING
from typing import TypeVar

from packaging.utils import canonicalize_name

from poetry.core.constraints.generic import parse_constraint as parse_generic_constraint
from poetry.core.constraints.version import parse_constraint
from poetry.core.packages.dependency_group import MAIN_GROUP
from poetry.core.packages.specification import PackageSpecification
from poetry.core.packages.utils.utils import contains_group_without_marker
from poetry.core.packages.utils.utils import create_nested_marker
from poetry.core.packages.utils.utils import normalize_python_version_markers
from poetry.core.version.markers import parse_marker


if TYPE_CHECKING:
    from collections.abc import Iterable
    from collections.abc import Sequence

    from packaging.utils import NormalizedName

    from poetry.core.constraints.version import VersionConstraint
    from poetry.core.packages.directory_dependency import DirectoryDependency
    from poetry.core.packages.file_dependency import FileDependency
    from poetry.core.version.markers import BaseMarker

    T = TypeVar("T", bound="Dependency")


class Dependency(PackageSpecification):
    def __init__(
        self,
        name: str,
        constraint: str | VersionConstraint,
        optional: bool = False,
        groups: Iterable[str] | None = None,
        allows_prereleases: bool | None = None,
        extras: Iterable[str] | None = None,
        source_type: str | None = None,
        source_url: str | None = None,
        source_reference: str | None = None,
        source_resolved_reference: str | None = None,
        source_subdirectory: str | None = None,
    ) -> None:
        from poetry.core.version.markers import AnyMarker

        super().__init__(
            name,
            source_type=source_type,
            source_url=source_url,
            source_reference=source_reference,
            source_resolved_reference=source_resolved_reference,
            source_subdirectory=source_subdirectory,
            features=extras,
        )

        # Attributes must be immutable for clone() to be safe!
        # (For performance reasons, clone only creates a copy instead of a deep copy).

        self._constraint: VersionConstraint
        self._pretty_constraint: str
        self.constraint = constraint  # type: ignore[assignment]

        self._optional = optional

        if not groups:
            groups = [MAIN_GROUP]

        self._groups = frozenset(groups)
        self._allows_prereleases = allows_prereleases
        # "_develop" is only required for enriching [project] dependencies
        self._develop = False

        self._python_versions = "*"
        self._python_constraint = parse_constraint("*")
        self._transitive_marker: BaseMarker | None = None

        self._in_extras: Sequence[NormalizedName] = []

        self._activated = not self._optional

        self.is_root = False
        self._marker: BaseMarker = AnyMarker()
        self.source_name: str | None = None

    @property
    def name(self) -> NormalizedName:
        return self._name

    @property
    def constraint(self) -> VersionConstraint:
        return self._constraint

    @constraint.setter
    def constraint(self, constraint: str | VersionConstraint) -> None:
        if isinstance(constraint, str):
            self._constraint = parse_constraint(constraint)
        else:
            self._constraint = constraint

        self._pretty_constraint = str(constraint)

    @property
    def pretty_constraint(self) -> str:
        return self._pretty_constraint

    @property
    def pretty_name(self) -> str:
        return self._pretty_name

    @property
    def groups(self) -> frozenset[str]:
        return self._groups

    @property
    def python_versions(self) -> str:
        return self._python_versions

    @python_versions.setter
    def python_versions(self, value: str) -> None:
        self._python_versions = value
        self._python_constraint = parse_constraint(value)
        if not self._python_constraint.is_any():
            self._marker = self._marker.intersect(
                parse_marker(
                    create_nested_marker("python_version", self._python_constraint)
                )
            )

    @property
    def marker(self) -> BaseMarker:
        return self._marker

    @marker.setter
    def marker(self, marker: str | BaseMarker) -> None:
        from poetry.core.constraints.version import parse_constraint
        from poetry.core.packages.utils.utils import convert_markers
        from poetry.core.version.markers import BaseMarker
        from poetry.core.version.markers import parse_marker

        if not isinstance(marker, BaseMarker):
            marker = parse_marker(marker)

        self._marker = marker

        markers = convert_markers(marker)

        if "extra" in markers:
            # If we have extras, the dependency is optional
            self.deactivate()

            new_in_extras = []
            for or_ in markers["extra"]:
                for op, extra in or_:
                    if op == "==":
                        new_in_extras.append(canonicalize_name(extra))
                    elif op == "" and ("||" in extra or "," in extra):
                        sep = "||" if "||" in extra else ","
                        extra_values = [e.strip() for e in extra.split(sep)]
                        for _extra in extra_values:
                            if not _extra.startswith("!="):
                                new_in_extras.append(canonicalize_name(_extra))
            self._in_extras = [*self._in_extras, *new_in_extras]

        # Recalculate python versions.
        self._python_versions = "*"
        if not contains_group_without_marker(markers, "python_version"):
            python_version_markers = markers["python_version"]
            self._python_versions = normalize_python_version_markers(
                python_version_markers
            )

        self._python_constraint = parse_constraint(self._python_versions)

    @property
    def transitive_marker(self) -> BaseMarker:
        if self._transitive_marker is None:
            return self.marker

        return self._transitive_marker

    @transitive_marker.setter
    def transitive_marker(self, value: BaseMarker) -> None:
        self._transitive_marker = value

    @property
    def python_constraint(self) -> VersionConstraint:
        return self._python_constraint

    @property
    def extras(self) -> frozenset[NormalizedName]:
        # extras activated in a dependency is the same as features
        return self._features

    @property
    def in_extras(self) -> Sequence[NormalizedName]:
        return self._in_extras

    @property
    def base_pep_508_name(self) -> str:
        from poetry.core.constraints.version import Version
        from poetry.core.constraints.version import VersionUnion

        requirement = self.complete_pretty_name

        constraint = self.constraint
        if isinstance(constraint, VersionUnion):
            if (
                constraint.excludes_single_version
                or constraint.excludes_single_wildcard_range
            ):
                # This branch is a short-circuit logic for special cases and
                # avoids having to split and parse constraint again. This has
                # no functional difference with the logic in the else branch.
                requirement += f" ({constraint})"
            else:
                constraints = ",".join(
                    str(parse_constraint(c)) for c in self.pretty_constraint.split(",")
                )
                requirement += f" ({constraints})"
        elif isinstance(constraint, Version):
            requirement += f" (=={constraint.text})"
        elif not constraint.is_any():
            requirement += f" ({str(constraint).replace(' ', '')})"

        return requirement

    @property
    def base_pep_508_name_resolved(self) -> str:
        return self.base_pep_508_name

    def allows_prereleases(self) -> bool | None:
        """
        None (default): only use pre-release versions
                        if no stable version satisfies the constraint
        False: do not allow pre-release versions
               even if this means there is no solution
        True: do not distinguish between stable and pre-release versions
        """
        return self._allows_prereleases

    def is_optional(self) -> bool:
        return self._optional

    def is_activated(self) -> bool:
        return self._activated

    def is_vcs(self) -> bool:
        return False

    def is_file(self) -> bool:
        return False

    def is_directory(self) -> bool:
        return False

    def is_url(self) -> bool:
        return False

    def to_pep_508(self, with_extras: bool = True, *, resolved: bool = False) -> str:
        from poetry.core.packages.utils.utils import convert_markers

        if resolved:
            requirement = self.base_pep_508_name_resolved
        else:
            requirement = self.base_pep_508_name

        markers = []
        has_extras = False
        if not self.marker.is_any():
            marker = self.marker
            if not with_extras:
                marker = marker.without_extras()

            # we re-check for any marker here since the without extra marker might
            # return an any marker again
            if not (marker.is_empty() or marker.is_any()):
                markers.append(str(marker))

            has_extras = "extra" in convert_markers(marker)
        else:
            # Python marker
            if self.python_versions != "*":
                python_constraint = self.python_constraint

                markers.append(
                    create_nested_marker("python_version", python_constraint)
                )

        in_extras = " || ".join(self._in_extras)
        if in_extras and with_extras and not has_extras:
            markers.append(
                create_nested_marker("extra", parse_generic_constraint(in_extras))
            )

        if markers:
            if len(markers) > 1:
                marker_str = " and ".join(f"({m})" for m in markers)
            else:
                marker_str = markers[0]
            requirement += f" ; {marker_str}"

        return requirement

    def activate(self) -> None:
        """
        Set the dependency as mandatory.
        """
        self._activated = True

    def deactivate(self) -> None:
        """
        Set the dependency as optional.
        """
        if not self._optional:
            self._optional = True

        self._activated = False

    def with_constraint(self: T, constraint: str | VersionConstraint) -> T:
        dependency = self.clone()
        dependency.constraint = constraint  # type: ignore[assignment]
        return dependency

    @classmethod
    def create_from_pep_508(
        cls, name: str, relative_to: Path | None = None
    ) -> Dependency:
        """
        Resolve a PEP-508 requirement string to a `Dependency` instance. If a
        `relative_to` path is specified, this is used as the base directory if the
        identified dependency is of file or directory type.
        """
        from poetry.core.packages.url_dependency import URLDependency
        from poetry.core.packages.utils.link import Link
        from poetry.core.packages.utils.utils import cached_is_dir
        from poetry.core.packages.utils.utils import is_archive_file
        from poetry.core.packages.utils.utils import is_python_project
        from poetry.core.packages.utils.utils import is_url
        from poetry.core.packages.utils.utils import path_to_url
        from poetry.core.packages.utils.utils import strip_extras
        from poetry.core.packages.utils.utils import url_to_path
        from poetry.core.packages.vcs_dependency import VCSDependency
        from poetry.core.utils.patterns import wheel_file_re
        from poetry.core.vcs.git import ParsedUrl
        from poetry.core.version.requirements import parse_requirement

        # Removing comments
        parts = name.split(" #", 1)
        name = parts[0].strip()
        if len(parts) > 1:
            rest = parts[1]
            if " ;" in rest:
                name += " ;" + rest.split(" ;", 1)[1]

        req = parse_requirement(name)

        name = req.name
        link = None

        if is_url(name):
            link = Link(name)
        elif req.url:
            link = Link(req.url)
        else:
            path_str = os.path.normpath(os.path.abspath(name))  # noqa: PTH100
            p, extras = strip_extras(path_str)
            if cached_is_dir(p) and (os.path.sep in name or name.startswith(".")):
                if not is_python_project(Path(name)):
                    raise ValueError(
                        f"Directory {name!r} is not installable. Not a Python project."
                    )
                link = Link(path_to_url(p))
            elif is_archive_file(p):
                link = Link(path_to_url(p))

        # it's a local file, dir, or url
        if link:
            is_file_uri = link.scheme == "file"
            is_relative_uri = is_file_uri and re.search(r"\.\./", link.url)

            # Handle relative file URLs
            if is_file_uri and is_relative_uri:
                path = Path(link.path)
                if relative_to:
                    path = relative_to / path
                link = Link(path_to_url(path))

            # wheel file
            version = None
            if link.is_wheel:
                m = wheel_file_re.match(link.filename)
                if not m:
                    raise ValueError(f"Invalid wheel name: {link.filename}")
                name = m.group("name")
                version = m.group("ver")

            dep: Dependency | None = None

            if link.scheme.startswith("git+"):
                url = ParsedUrl.parse(link.url)
                dep = VCSDependency(
                    name,
                    "git",
                    url.url,
                    rev=url.rev,
                    directory=url.subdirectory,
                    extras=req.extras,
                )
            elif link.scheme == "git":
                dep = VCSDependency(
                    name, "git", link.url_without_fragment, extras=req.extras
                )
            elif link.scheme in ("http", "https"):
                dep = URLDependency(
                    name,
                    link.url_without_fragment,
                    directory=link.subdirectory_fragment,
                    extras=req.extras,
                )
            elif is_file_uri:
                # handle RFC 8089 references
                path = url_to_path(req.url)
                dep = _make_file_or_dir_dep(
                    name=name,
                    path=path,
                    base=relative_to,
                    subdirectory=link.subdirectory_fragment,
                    extras=req.extras,
                )
            else:
                with suppress(ValueError):
                    # this is a local path not using the file URI scheme
                    dep = _make_file_or_dir_dep(
                        name=name,
                        path=Path(req.url),
                        base=relative_to,
                        extras=req.extras,
                    )

            if dep is None:
                dep = Dependency(name, version or "*", extras=req.extras)

            if version:
                dep._constraint = parse_constraint(version)
        else:
            constraint: VersionConstraint | str
            constraint = req.constraint if req.pretty_constraint else "*"
            dep = Dependency(name, constraint, extras=req.extras)

        if req.marker:
            dep.marker = req.marker

        return dep

    def __eq__(self, other: object) -> bool:
        if not isinstance(other, Dependency):
            return NotImplemented

        # "constraint" is implicitly given for direct origin dependencies and might not
        # be set yet ("*"). Thus, it shouldn't be used to determine if two direct origin
        # dependencies are equal.
        # Calling is_direct_origin() for one dependency is sufficient because
        # super().__eq__() returns False for different origins.
        return super().__eq__(other) and (
            self._constraint == other.constraint or self.is_direct_origin()
        )

    def __hash__(self) -> int:
        # don't include _constraint in hash because it is mutable!
        return super().__hash__()

    def __str__(self) -> str:
        if self.is_root:
            return self._pretty_name
        if self.is_direct_origin():
            # adding version since this information is especially useful in debug output
            parts = [p.strip() for p in self.base_pep_508_name.split("@", 1)]
            return f"{parts[0]} ({self._pretty_constraint}) @ {parts[1]}"
        return self.base_pep_508_name

    def __repr__(self) -> str:
        return f"<{self.__class__.__name__} {self}>"


def _make_file_or_dir_dep(
    name: str,
    path: Path,
    base: Path | None = None,
    subdirectory: str | None = None,
    extras: Iterable[str] | None = None,
) -> FileDependency | DirectoryDependency | None:
    """
    Helper function to create a file or directoru dependency with the given arguments.

    If path is not a file or directory that exists, a guess is made based on the suffix
    of the given path. This is done to prevent dependendencies from being parsed as normal
    dependencies. This allows for downstream error handling.

    See also: poetry#10068
    """
    from poetry.core.packages.directory_dependency import DirectoryDependency
    from poetry.core.packages.file_dependency import FileDependency

    _path = path

    if not path.is_absolute() and base:
        # a base path was specified, so we should respect that
        _path = Path(base) / path

    # we check if it is a file (if it exists) or rely on suffix to guess
    is_file = _path.is_file() if _path.exists() else path.suffix != ""

    if is_file:
        return FileDependency(
            name, path, base=base, directory=subdirectory, extras=extras
        )

    if subdirectory:
        path = path / subdirectory

    return DirectoryDependency(name, path, base=base, extras=extras)
poetry-core-2.1.1/src/poetry/core/packages/dependency_group.py000066400000000000000000000133721475444614500245320ustar00rootroot00000000000000from __future__ import annotations

from collections import defaultdict
from typing import TYPE_CHECKING

from poetry.core.version.markers import parse_marker


if TYPE_CHECKING:
    from poetry.core.packages.dependency import Dependency
    from poetry.core.version.markers import BaseMarker

MAIN_GROUP = "main"


class DependencyGroup:
    def __init__(
        self, name: str, *, optional: bool = False, mixed_dynamic: bool = False
    ) -> None:
        self._name: str = name
        self._optional: bool = optional
        self._mixed_dynamic = mixed_dynamic
        self._dependencies: list[Dependency] = []
        self._poetry_dependencies: list[Dependency] = []

    @property
    def name(self) -> str:
        return self._name

    @property
    def dependencies(self) -> list[Dependency]:
        if not self._dependencies:
            # legacy mode
            return self._poetry_dependencies
        if self._mixed_dynamic and self._poetry_dependencies:
            if all(dep.is_optional() for dep in self._dependencies):
                return [
                    *self._dependencies,
                    *(d for d in self._poetry_dependencies if not d.is_optional()),
                ]
            if all(not dep.is_optional() for dep in self._dependencies):
                return [
                    *self._dependencies,
                    *(d for d in self._poetry_dependencies if d.is_optional()),
                ]
        return self._dependencies

    @property
    def dependencies_for_locking(self) -> list[Dependency]:
        if not self._poetry_dependencies:
            return self._dependencies
        if not self._dependencies:
            return self._poetry_dependencies

        poetry_dependencies_by_name = defaultdict(list)
        for dep in self._poetry_dependencies:
            poetry_dependencies_by_name[dep.name].append(dep)

        dependencies = []
        for dep in self.dependencies:
            if dep.name in poetry_dependencies_by_name:
                enriched = False
                dep_marker = dep.marker
                if dep.in_extras:
                    dep_marker = dep.marker.intersect(
                        parse_marker(
                            " or ".join(
                                f"extra == '{extra}'" for extra in dep.in_extras
                            )
                        )
                    )
                for poetry_dep in poetry_dependencies_by_name[dep.name]:
                    marker = dep_marker.intersect(poetry_dep.marker)
                    if not marker.is_empty():
                        if marker == dep_marker:
                            marker = dep.marker
                        enriched = True
                        dependencies.append(_enrich_dependency(dep, poetry_dep, marker))
                if not enriched:
                    dependencies.append(dep)
            else:
                dependencies.append(dep)

        return dependencies

    def is_optional(self) -> bool:
        return self._optional

    def add_dependency(self, dependency: Dependency) -> None:
        if not self._dependencies and self._poetry_dependencies:
            self._poetry_dependencies.append(dependency)
        else:
            self._dependencies.append(dependency)

    def add_poetry_dependency(self, dependency: Dependency) -> None:
        self._poetry_dependencies.append(dependency)

    def remove_dependency(self, name: str) -> None:
        from packaging.utils import canonicalize_name

        name = canonicalize_name(name)

        dependencies = []
        for dependency in self.dependencies:
            if dependency.name == name:
                continue
            dependencies.append(dependency)
        self._dependencies = dependencies

        dependencies = []
        for dependency in self._poetry_dependencies:
            if dependency.name == name:
                continue
            dependencies.append(dependency)
        self._poetry_dependencies = dependencies

    def __eq__(self, other: object) -> bool:
        if not isinstance(other, DependencyGroup):
            return NotImplemented

        return (
            self._name == other.name
            and set(self._dependencies) == set(other.dependencies)
            and set(self._poetry_dependencies) == set(other._poetry_dependencies)
        )

    def __repr__(self) -> str:
        cls = self.__class__.__name__
        return f"{cls}({self._name}, optional={self._optional})"


def _enrich_dependency(
    project_dependency: Dependency, poetry_dependency: Dependency, marker: BaseMarker
) -> Dependency:
    if (
        project_dependency.source_type is not None
        and poetry_dependency.source_type is not None
        and not poetry_dependency.is_same_source_as(project_dependency)
    ):
        raise ValueError(
            "Cannot enrich dependency with different sources: "
            f"{project_dependency} and {poetry_dependency}"
        )

    constraint = project_dependency.constraint.intersect(poetry_dependency.constraint)
    if constraint.is_empty():
        raise ValueError(
            "Cannot enrich dependency with incompatible constraints: "
            f"{project_dependency} and {poetry_dependency}"
        )

    if project_dependency.source_type is not None:
        from poetry.core.packages.directory_dependency import DirectoryDependency
        from poetry.core.packages.vcs_dependency import VCSDependency

        dependency = project_dependency.clone()
        if isinstance(project_dependency, (DirectoryDependency, VCSDependency)):
            dependency._develop = poetry_dependency._develop  # type: ignore[has-type]
    else:
        dependency = poetry_dependency.with_features(project_dependency.features)

    dependency.constraint = constraint
    dependency.marker = marker

    return dependency
poetry-core-2.1.1/src/poetry/core/packages/directory_dependency.py000066400000000000000000000036541475444614500254040ustar00rootroot00000000000000from __future__ import annotations

import functools

from typing import TYPE_CHECKING

from poetry.core.packages.path_dependency import PathDependency
from poetry.core.packages.utils.utils import is_python_project
from poetry.core.pyproject.toml import PyProjectTOML


if TYPE_CHECKING:
    from collections.abc import Iterable
    from pathlib import Path


class DirectoryDependency(PathDependency):
    def __init__(
        self,
        name: str,
        path: Path,
        groups: Iterable[str] | None = None,
        optional: bool = False,
        base: Path | None = None,
        develop: bool = False,
        extras: Iterable[str] | None = None,
    ) -> None:
        super().__init__(
            name,
            path,
            source_type="directory",
            groups=groups,
            optional=optional,
            base=base,
            extras=extras,
        )
        # Attributes must be immutable for clone() to be safe!
        # (For performance reasons, clone only creates a copy instead of a deep copy).
        self._develop = develop

        # cache this function to avoid multiple IO reads and parsing
        self.supports_poetry = functools.lru_cache(maxsize=1)(self._supports_poetry)

    @property
    def develop(self) -> bool:
        return self._develop

    def _validate(self) -> str:
        message = super()._validate()
        if message:
            return message

        if self._full_path.is_file():
            return (
                f"{self._full_path} for {self.pretty_name} is a file,"
                " expected a directory"
            )
        if not is_python_project(self._full_path):
            return (
                f"Directory {self._full_path} for {self.pretty_name} does not seem"
                " to be a Python package"
            )
        return ""

    def _supports_poetry(self) -> bool:
        return PyProjectTOML(self._full_path / "pyproject.toml").is_poetry_project()
poetry-core-2.1.1/src/poetry/core/packages/file_dependency.py000066400000000000000000000030631475444614500243110ustar00rootroot00000000000000from __future__ import annotations

from typing import TYPE_CHECKING

from poetry.core.packages.path_dependency import PathDependency


if TYPE_CHECKING:
    from collections.abc import Iterable
    from pathlib import Path


class FileDependency(PathDependency):
    def __init__(
        self,
        name: str,
        path: Path,
        *,
        directory: str | None = None,
        groups: Iterable[str] | None = None,
        optional: bool = False,
        base: Path | None = None,
        extras: Iterable[str] | None = None,
    ) -> None:
        super().__init__(
            name,
            path,
            source_type="file",
            groups=groups,
            optional=optional,
            base=base,
            subdirectory=directory,
            extras=extras,
        )
        # Attributes must be immutable for clone() to be safe!
        # (For performance reasons, clone only creates a copy instead of a deep copy).

    @property
    def directory(self) -> str | None:
        return self.source_subdirectory

    @property
    def base_pep_508_name(self) -> str:
        requirement = super().base_pep_508_name

        if self.directory:
            requirement += f"#subdirectory={self.directory}"

        return requirement

    def _validate(self) -> str:
        message = super()._validate()
        if message:
            return message

        if self._full_path.is_dir():
            return (
                f"{self._full_path} for {self.pretty_name} is a directory,"
                " expected a file"
            )
        return ""
poetry-core-2.1.1/src/poetry/core/packages/package.py000066400000000000000000000500261475444614500225700ustar00rootroot00000000000000from __future__ import annotations

import warnings

from typing import TYPE_CHECKING
from typing import ClassVar
from typing import TypeVar

from poetry.core.constraints.version import parse_constraint
from poetry.core.constraints.version.exceptions import ParseConstraintError
from poetry.core.packages.dependency_group import MAIN_GROUP
from poetry.core.packages.specification import PackageSpecification
from poetry.core.utils.patterns import AUTHOR_REGEX
from poetry.core.version.exceptions import InvalidVersionError


if TYPE_CHECKING:
    from collections.abc import Collection
    from collections.abc import Iterable
    from collections.abc import Mapping
    from collections.abc import Sequence
    from pathlib import Path

    from packaging.utils import NormalizedName

    from poetry.core.constraints.version import Version
    from poetry.core.constraints.version import VersionConstraint
    from poetry.core.packages.dependency import Dependency
    from poetry.core.packages.dependency_group import DependencyGroup
    from poetry.core.spdx.license import License
    from poetry.core.version.markers import BaseMarker

    T = TypeVar("T", bound="Package")


class Package(PackageSpecification):
    AVAILABLE_PYTHONS: ClassVar[set[str]] = {
        "2",
        "2.7",
        "3",
        "3.4",
        "3.5",
        "3.6",
        "3.7",
        "3.8",
        "3.9",
        "3.10",
        "3.11",
        "3.12",
        "3.13",
    }

    def __init__(
        self,
        name: str,
        version: str | Version,
        source_type: str | None = None,
        source_url: str | None = None,
        source_reference: str | None = None,
        source_resolved_reference: str | None = None,
        source_subdirectory: str | None = None,
        features: Iterable[str] | None = None,
        develop: bool = False,
        yanked: str | bool = False,
    ) -> None:
        """
        Creates a new in memory package.
        """
        from poetry.core.version.markers import AnyMarker

        super().__init__(
            name,
            source_type=source_type,
            source_url=source_url,
            source_reference=source_reference,
            source_resolved_reference=source_resolved_reference,
            source_subdirectory=source_subdirectory,
            features=features,
        )

        # Attributes must be immutable for clone() to be safe!
        # (For performance reasons, clone only creates a copy instead of a deep copy).

        self._set_version(version)

        self.description = ""

        self.authors: Sequence[str] = []
        self.maintainers: Sequence[str] = []

        self.homepage: str | None = None
        self.repository_url: str | None = None
        self.documentation_url: str | None = None
        self.keywords: Sequence[str] = []
        self._license: License | None = None
        self.readmes: tuple[Path, ...] = ()
        self.readme_content_type: str | None = None
        self.readme_content: str | None = None

        self.extras: Mapping[NormalizedName, Sequence[Dependency]] = {}

        self._dependency_groups: Mapping[str, DependencyGroup] = {}

        self.files: Sequence[Mapping[str, str]] = []
        self.optional = False

        self.classifiers: Sequence[str] = []

        self._python_versions = "*"
        self._python_constraint = parse_constraint("*")

        self.marker: BaseMarker = AnyMarker()

        self.root_dir: Path | None = None

        self.develop = develop

        self._yanked = yanked

    @property
    def name(self) -> NormalizedName:
        return self._name

    @property
    def pretty_name(self) -> str:
        return self._pretty_name

    @property
    def version(self) -> Version:
        return self._version

    @property
    def pretty_version(self) -> str:
        return self._version.text

    @property
    def unique_name(self) -> str:
        if self.is_root():
            return self._name

        return self.complete_name + "-" + self._version.text

    @property
    def pretty_string(self) -> str:
        return self.pretty_name + " " + self.pretty_version

    @property
    def full_pretty_version(self) -> str:
        if self.source_type in ("file", "directory", "url"):
            return f"{self.pretty_version} {self.source_url}"

        if self.source_type not in ("hg", "git"):
            return self.pretty_version

        ref: str | None
        if self.source_resolved_reference and len(self.source_resolved_reference) == 40:
            ref = self.source_resolved_reference[0:7]
            return f"{self.pretty_version} {ref}"

        # if source reference is a sha1 hash -- truncate
        if self.source_reference and len(self.source_reference) == 40:
            return f"{self.pretty_version} {self.source_reference[0:7]}"

        ref = self._source_resolved_reference or self._source_reference
        return f"{self.pretty_version} {ref}"

    @property
    def author_name(self) -> str | None:
        return self._get_author()["name"]

    @property
    def author_email(self) -> str | None:
        return self._get_author()["email"]

    @property
    def maintainer_name(self) -> str | None:
        return self._get_maintainer()["name"]

    @property
    def maintainer_email(self) -> str | None:
        return self._get_maintainer()["email"]

    @property
    def requires(self) -> list[Dependency]:
        """
        Returns the main dependencies.
        """
        if not self._dependency_groups or MAIN_GROUP not in self._dependency_groups:
            return []

        return self._dependency_groups[MAIN_GROUP].dependencies

    @property
    def all_requires(self) -> list[Dependency]:
        """
        Returns the main dependencies and group dependencies
        enriched with Poetry-specific information for locking.
        """
        return [
            dependency
            for group in self._dependency_groups.values()
            for dependency in group.dependencies_for_locking
        ]

    def _set_version(self, version: str | Version) -> None:
        from poetry.core.constraints.version import Version

        if not isinstance(version, Version):
            try:
                version = Version.parse(version)
            except InvalidVersionError:
                raise InvalidVersionError(
                    f"Invalid version '{version}' on package {self.name}"
                )

        self._version = version

    def _get_author(self) -> dict[str, str | None]:
        if not self.authors:
            return {"name": None, "email": None}

        m = AUTHOR_REGEX.match(self.authors[0])

        if m is None:
            raise ValueError(
                "Invalid author string. Must be in the format: "
                "John Smith "
            )

        name = m.group("name")
        email = m.group("email")

        return {"name": name, "email": email}

    def _get_maintainer(self) -> dict[str, str | None]:
        if not self.maintainers:
            return {"name": None, "email": None}

        m = AUTHOR_REGEX.match(self.maintainers[0])

        if m is None:
            raise ValueError(
                "Invalid maintainer string. Must be in the format: "
                "John Smith "
            )

        name = m.group("name")
        email = m.group("email")

        return {"name": name, "email": email}

    @property
    def python_versions(self) -> str:
        return self._python_versions

    @python_versions.setter
    def python_versions(self, value: str) -> None:
        try:
            constraint = parse_constraint(value)
        except ParseConstraintError:
            raise ParseConstraintError(f"Invalid python versions '{value}' on {self}")

        if constraint.is_empty():
            raise ParseConstraintError(f"Python versions '{value}' on {self} is empty")

        self._python_versions = value
        self._python_constraint = constraint

    @property
    def python_constraint(self) -> VersionConstraint:
        return self._python_constraint

    @property
    def python_marker(self) -> BaseMarker:
        from poetry.core.packages.utils.utils import create_nested_marker
        from poetry.core.version.markers import parse_marker

        warnings.warn(
            "`python_marker` is deprecated and will be removed in a future release.",
            DeprecationWarning,
            stacklevel=2,
        )

        return parse_marker(
            create_nested_marker("python_version", self._python_constraint)
        )

    @property
    def license(self) -> License | None:
        return self._license

    @license.setter
    def license(self, value: str | License | None) -> None:
        from poetry.core.spdx.helpers import license_by_id
        from poetry.core.spdx.license import License

        if value is None or isinstance(value, License):
            self._license = value
        else:
            self._license = license_by_id(value)

    @property
    def all_classifiers(self) -> list[str]:
        from poetry.core.constraints.version import Version

        classifiers = list(self.classifiers)

        # Automatically set python classifiers
        if self.python_versions == "*":
            python_constraint = parse_constraint("~2.7 || ^3.4")
        else:
            python_constraint = self.python_constraint

        python_classifier_prefix = "Programming Language :: Python"
        python_classifiers = []

        # we sort python versions by sorting an int tuple of (major, minor) version
        # to ensure we sort 3.10 after 3.9
        for version in sorted(
            self.AVAILABLE_PYTHONS, key=lambda x: tuple(map(int, x.split(".")))
        ):
            if len(version) == 1:
                constraint = parse_constraint(version + ".*")
            else:
                constraint = Version.parse(version)

            if python_constraint.allows_any(constraint):
                classifier = f"{python_classifier_prefix} :: {version}"
                if classifier not in python_classifiers:
                    python_classifiers.append(classifier)

        # Automatically set license classifiers
        if self.license:
            classifiers.append(self.license.classifier)

        # Sort classifiers and insert python classifiers at the right location. We do
        # it like this so that 3.10 is sorted after 3.9.
        sorted_classifiers = []
        python_classifiers_inserted = False
        for classifier in sorted(set(classifiers) - set(python_classifiers)):
            if (
                not python_classifiers_inserted
                and classifier > python_classifier_prefix
            ):
                sorted_classifiers.extend(python_classifiers)
                python_classifiers_inserted = True
            sorted_classifiers.append(classifier)

        if not python_classifiers_inserted:
            sorted_classifiers.extend(python_classifiers)

        return sorted_classifiers

    @property
    def urls(self) -> dict[str, str]:
        urls = {}

        if self.homepage:
            urls["Homepage"] = self.homepage

        if self.repository_url:
            urls["Repository"] = self.repository_url

        if self.documentation_url:
            urls["Documentation"] = self.documentation_url

        return urls

    @property
    def yanked(self) -> bool:
        return isinstance(self._yanked, str) or bool(self._yanked)

    @property
    def yanked_reason(self) -> str:
        if isinstance(self._yanked, str):
            return self._yanked
        return ""

    def is_prerelease(self) -> bool:
        return self._version.is_unstable()

    def is_root(self) -> bool:
        return False

    def dependency_group_names(self, include_optional: bool = False) -> set[str]:
        return {
            name
            for name, group in self._dependency_groups.items()
            if not group.is_optional() or include_optional
        }

    def add_dependency_group(self, group: DependencyGroup) -> None:
        groups = dict(self._dependency_groups)
        groups[group.name] = group
        self._dependency_groups = groups

    def has_dependency_group(self, name: str) -> bool:
        return name in self._dependency_groups

    def dependency_group(self, name: str) -> DependencyGroup:
        if not self.has_dependency_group(name):
            raise ValueError(f'The dependency group "{name}" does not exist.')

        return self._dependency_groups[name]

    def add_dependency(
        self,
        dependency: Dependency,
    ) -> Dependency:
        from poetry.core.packages.dependency_group import DependencyGroup

        for group_name in dependency.groups:
            if group_name not in self._dependency_groups:
                # Dynamically add the dependency group
                self.add_dependency_group(DependencyGroup(group_name))

            self._dependency_groups[group_name].add_dependency(dependency)

        return dependency

    def without_dependency_groups(self: T, groups: Collection[str]) -> T:
        """
        Returns a clone of the package with the given dependency groups excluded.
        """
        updated_groups = {
            group_name: group
            for group_name, group in self._dependency_groups.items()
            if group_name not in groups
        }

        package = self.clone()
        package._dependency_groups = updated_groups

        return package

    def without_optional_dependency_groups(self: T) -> T:
        """
        Returns a clone of the package without optional dependency groups.
        """
        updated_groups = {
            group_name: group
            for group_name, group in self._dependency_groups.items()
            if not group.is_optional()
        }
        package = self.clone()
        package._dependency_groups = updated_groups

        return package

    def with_dependency_groups(
        self: T, groups: Collection[str], only: bool = False
    ) -> T:
        """
        Returns a clone of the package with the given dependency groups opted in.

        Note that it will return all dependencies across all groups
        more the given, optional, groups.

        If `only` is set to True, then only the given groups will be selected.
        """
        updated_groups = {
            group_name: group
            for group_name, group in self._dependency_groups.items()
            if group_name in groups or (not only and not group.is_optional())
        }
        package = self.clone()
        package._dependency_groups = updated_groups

        return package

    def to_dependency(self) -> Dependency:
        from pathlib import Path

        from poetry.core.packages.dependency import Dependency
        from poetry.core.packages.directory_dependency import DirectoryDependency
        from poetry.core.packages.file_dependency import FileDependency
        from poetry.core.packages.url_dependency import URLDependency
        from poetry.core.packages.vcs_dependency import VCSDependency

        dep: Dependency
        if self.source_type == "directory":
            assert self._source_url is not None
            dep = DirectoryDependency(
                self._name,
                Path(self._source_url),
                groups=list(self._dependency_groups.keys()),
                optional=self.optional,
                base=self.root_dir,
                develop=self.develop,
                extras=self.features,
            )
        elif self.source_type == "file":
            assert self._source_url is not None
            dep = FileDependency(
                self._name,
                Path(self._source_url),
                directory=self.source_subdirectory,
                groups=list(self._dependency_groups.keys()),
                optional=self.optional,
                base=self.root_dir,
                extras=self.features,
            )
        elif self.source_type == "url":
            assert self._source_url is not None
            dep = URLDependency(
                self._name,
                self._source_url,
                directory=self.source_subdirectory,
                groups=list(self._dependency_groups.keys()),
                optional=self.optional,
                extras=self.features,
            )
        elif self.source_type == "git":
            assert self._source_url is not None
            dep = VCSDependency(
                self._name,
                self.source_type,
                self._source_url,
                rev=self.source_reference,
                resolved_rev=self.source_resolved_reference,
                directory=self.source_subdirectory,
                groups=list(self._dependency_groups.keys()),
                optional=self.optional,
                develop=self.develop,
                extras=self.features,
            )
        else:
            dep = Dependency(self._name, self._version, extras=self.features)

        if not self.marker.is_any():
            dep.marker = self.marker

        if not self.python_constraint.is_any():
            dep.python_versions = self.python_versions

        if not self.is_direct_origin():
            return dep

        return dep.with_constraint(self._version)

    def satisfies(
        self, dependency: Dependency, ignore_source_type: bool = False
    ) -> bool:
        """
        Helper method to check if this package satisfies a given dependency.

        This is determined by assessing if this instance provides the package specified
        by the given dependency. Further, version and source types are checked.
        """
        if self.name != dependency.name:
            return False

        if not dependency.constraint.allows(self.version):
            return False

        if not (ignore_source_type or self.source_satisfies(dependency)):  # noqa: SIM103
            return False

        return True

    def source_satisfies(self, dependency: Dependency) -> bool:
        """Determine whether this package's source satisfies the given dependency."""
        if dependency.source_type is None:
            if dependency.source_name is None:
                # The dependency doesn't care about the source, so this package
                # certainly satisfies it.
                return True

            # The dependency specifies a source_name but not a type: it wants either
            # pypi or a legacy repository.
            #
            # - If this package has no source type then it's from pypi, so it
            #   matches if and only if that's what the dependency wants
            # - Else this package is a match if and only if it is from the desired
            #   repository
            if self.source_type is None:
                return dependency.source_name.lower() == "pypi"

            return (
                self.source_type == "legacy"
                and self.source_reference is not None
                and self.source_reference.lower() == dependency.source_name.lower()
            )

        # The dependency specifies a source: this package matches if and only if it is
        # from that source.
        return dependency.is_same_source_as(self)

    def __eq__(self, other: object) -> bool:
        if not isinstance(other, Package):
            return NotImplemented

        return super().__eq__(other) and self._version == other.version

    def __hash__(self) -> int:
        return super().__hash__() ^ hash(self._version)

    def __str__(self) -> str:
        return f"{self.complete_name} ({self.full_pretty_version})"

    def __repr__(self) -> str:
        args = [repr(self._name), repr(self._version.text)]

        if self._features:
            args.append(f"features={self._features!r}")

        if self._source_type:
            args.append(f"source_type={self._source_type!r}")
            args.append(f"source_url={self._source_url!r}")

            if self._source_reference:
                args.append(f"source_reference={self._source_reference!r}")

            if self._source_resolved_reference:
                args.append(
                    f"source_resolved_reference={self._source_resolved_reference!r}"
                )
            if self._source_subdirectory:
                args.append(f"source_subdirectory={self._source_subdirectory!r}")

        args_str = ", ".join(args)
        return f"Package({args_str})"
poetry-core-2.1.1/src/poetry/core/packages/path_dependency.py000066400000000000000000000050611475444614500243260ustar00rootroot00000000000000from __future__ import annotations

import logging

from abc import ABC
from abc import abstractmethod
from pathlib import Path
from typing import TYPE_CHECKING

from poetry.core.packages.dependency import Dependency
from poetry.core.packages.utils.utils import path_to_url


if TYPE_CHECKING:
    from collections.abc import Iterable


logger = logging.getLogger(__name__)


class PathDependency(Dependency, ABC):
    @abstractmethod
    def __init__(
        self,
        name: str,
        path: Path,
        *,
        source_type: str,
        groups: Iterable[str] | None = None,
        optional: bool = False,
        base: Path | None = None,
        subdirectory: str | None = None,
        extras: Iterable[str] | None = None,
    ) -> None:
        # Attributes must be immutable for clone() to be safe!
        # (For performance reasons, clone only creates a copy instead of a deep copy).
        assert source_type in ("file", "directory")
        self._path = path
        self._base = base or Path.cwd()
        self._full_path = path

        if not self._path.is_absolute():
            self._full_path = self._base.joinpath(self._path).resolve()

        super().__init__(
            name,
            "*",
            groups=groups,
            optional=optional,
            allows_prereleases=True,
            source_type=source_type,
            source_url=self._full_path.as_posix(),
            source_subdirectory=subdirectory,
            extras=extras,
        )
        # cache validation result to avoid unnecessary file system access
        self._validation_error = self._validate()
        self.validate(raise_error=False)

    @property
    def path(self) -> Path:
        return self._path

    @property
    def full_path(self) -> Path:
        return self._full_path

    @property
    def base(self) -> Path:
        return self._base

    def is_file(self) -> bool:
        return self._source_type == "file"

    def is_directory(self) -> bool:
        return self._source_type == "directory"

    def validate(self, *, raise_error: bool) -> bool:
        if not self._validation_error:
            return True
        if raise_error:
            raise ValueError(self._validation_error)
        logger.warning(self._validation_error)
        return False

    @property
    def base_pep_508_name(self) -> str:
        return f"{self.complete_pretty_name} @ {path_to_url(self.full_path)}"

    def _validate(self) -> str:
        if not self._full_path.exists():
            return f"Path {self._full_path} for {self.pretty_name} does not exist"
        return ""
poetry-core-2.1.1/src/poetry/core/packages/project_package.py000066400000000000000000000073311475444614500243170ustar00rootroot00000000000000from __future__ import annotations

from typing import TYPE_CHECKING
from typing import Any

from poetry.core.constraints.version import parse_constraint


if TYPE_CHECKING:
    from collections.abc import Mapping
    from collections.abc import Sequence

    from poetry.core.constraints.version import Version
    from poetry.core.packages.dependency import Dependency

from poetry.core.packages.package import Package


class ProjectPackage(Package):
    def __init__(
        self,
        name: str,
        version: str | Version,
    ) -> None:
        super().__init__(name, version)

        # Attributes must be immutable for clone() to be safe!
        # (For performance reasons, clone only creates a copy instead of a deep copy).

        self.build_config: Mapping[str, Any] = {}
        self.packages: Sequence[Mapping[str, Any]] = []
        self.include: Sequence[Mapping[str, Any]] = []
        self.exclude: Sequence[Mapping[str, Any]] = []
        self.custom_urls: Mapping[str, str] = {}
        self._requires_python: str = "*"
        self.dynamic_classifiers = True

        self.entry_points: Mapping[str, dict[str, str]] = {}

        if self._python_versions == "*":
            self._python_constraint = parse_constraint("~2.7 || >=3.4")

    @property
    def build_script(self) -> str | None:
        return self.build_config.get("script")

    def is_root(self) -> bool:
        return True

    def to_dependency(self) -> Dependency:
        dependency = super().to_dependency()

        dependency.is_root = True

        return dependency

    @property
    def requires_python(self) -> str:
        return self._requires_python

    @requires_python.setter
    def requires_python(self, value: str) -> None:
        self._requires_python = value
        self.python_versions = value

    @property
    def python_versions(self) -> str:
        return self._python_versions

    @python_versions.setter
    def python_versions(self, value: str) -> None:
        self._python_versions = value

        if value == "*":
            if self._requires_python != "*":
                raise ValueError(
                    f'The Python constraint in [tool.poetry.dependencies] "{value}"'
                    ' is not a subset of "requires-python" in [project]'
                    f' "{self._requires_python}"'
                )
            value = "~2.7 || >=3.4"

        self._python_constraint = parse_constraint(value)
        if not parse_constraint(self._requires_python).allows_all(
            self._python_constraint
        ):
            raise ValueError(
                f'The Python constraint in [tool.poetry.dependencies] "{value}"'
                ' is not a subset of "requires-python" in [project]'
                f' "{self._requires_python}"'
            )

    @property
    def version(self) -> Version:
        # override version to make it settable
        return super().version

    @version.setter
    def version(self, value: str | Version) -> None:
        self._set_version(value)

    @property
    def all_classifiers(self) -> list[str]:
        if self.dynamic_classifiers:
            return super().all_classifiers

        return list(self.classifiers)

    @property
    def urls(self) -> dict[str, str]:
        urls = super().urls

        urls.update(self.custom_urls)

        return urls

    def __hash__(self) -> int:
        # The parent Package class's __hash__ incorporates the version because
        # a Package's version is immutable. But a ProjectPackage's version is
        # mutable. So call Package's parent hash function.
        return super(Package, self).__hash__()

    def build_should_generate_setup(self) -> bool:
        value: bool = self.build_config.get("generate-setup-file", False)
        return value
poetry-core-2.1.1/src/poetry/core/packages/specification.py000066400000000000000000000157131475444614500240210ustar00rootroot00000000000000from __future__ import annotations

import copy

from typing import TYPE_CHECKING
from typing import TypeVar

from packaging.utils import canonicalize_name


if TYPE_CHECKING:
    from collections.abc import Iterable

    from packaging.utils import NormalizedName

    T = TypeVar("T", bound="PackageSpecification")


class PackageSpecification:
    def __init__(
        self,
        name: str,
        source_type: str | None = None,
        source_url: str | None = None,
        source_reference: str | None = None,
        source_resolved_reference: str | None = None,
        source_subdirectory: str | None = None,
        features: Iterable[str] | None = None,
    ) -> None:
        from packaging.utils import canonicalize_name

        # Attributes must be immutable for clone() to be safe!
        # (For performance reasons, clone only creates a copy instead of a deep copy).

        self._pretty_name = name
        self._name = canonicalize_name(name)
        self._source_type = source_type
        self._source_url = self._normalize_source_url(source_type, source_url)
        self._source_reference = source_reference
        self._source_resolved_reference = source_resolved_reference
        self._source_subdirectory = source_subdirectory

        if not features:
            features = []

        self._features = frozenset(canonicalize_name(feature) for feature in features)

    @staticmethod
    def _normalize_source_url(
        source_type: str | None, source_url: str | None
    ) -> str | None:
        if source_type and source_url and source_type == "git":
            from poetry.core.vcs.git import ParsedUrl

            return ParsedUrl.parse(source_url).url

        return source_url

    @property
    def name(self) -> NormalizedName:
        return self._name

    @property
    def pretty_name(self) -> str:
        return self._pretty_name

    @property
    def complete_name(self) -> str:
        name: str = self._name

        if self._features:
            features = ",".join(sorted(self._features))
            name = f"{name}[{features}]"

        return name

    @property
    def complete_pretty_name(self) -> str:
        name = self._pretty_name

        if self._features:
            features = ",".join(sorted(self._features))
            name = f"{name}[{features}]"

        return name

    @property
    def source_type(self) -> str | None:
        return self._source_type

    @property
    def source_url(self) -> str | None:
        return self._source_url

    @property
    def source_reference(self) -> str | None:
        return self._source_reference

    @property
    def source_resolved_reference(self) -> str | None:
        return self._source_resolved_reference

    @property
    def source_subdirectory(self) -> str | None:
        return self._source_subdirectory

    @property
    def features(self) -> frozenset[NormalizedName]:
        return self._features

    def is_direct_origin(self) -> bool:
        return self._source_type in [
            "directory",
            "file",
            "url",
            "git",
        ]

    def provides(self, other: PackageSpecification) -> bool:
        """
        Helper method to determine if this package provides the given specification.

        This determination is made to be true, if the names are the same and this
        package provides all features required by the other specification.

        Source type checks are explicitly ignored here as this is not of interest.
        """
        return self.name == other.name and self.features.issuperset(other.features)

    def is_same_source_as(self, other: PackageSpecification) -> bool:
        if self._source_type != other.source_type:
            return False

        if not self._source_type:
            # both packages are of source type None
            # no need to check further
            return True

        if (
            self._source_url or other.source_url
        ) and self._source_url != other.source_url:
            return False

        if (
            self._source_subdirectory or other.source_subdirectory
        ) and self._source_subdirectory != other.source_subdirectory:
            return False

        # We check the resolved reference first:
        # if they match we assume equality regardless
        # of their source reference.
        # This is important when comparing a resolved branch VCS
        # dependency to a direct commit reference VCS dependency
        if (
            self._source_resolved_reference
            and other.source_resolved_reference
            and self._source_resolved_reference == other.source_resolved_reference
        ):
            return True

        if self._source_reference or other.source_reference:
            # special handling for packages with references
            if not (self._source_reference and other.source_reference):
                # case: one reference is defined and is non-empty, but other is not
                return False

            if not (
                self._source_reference == other.source_reference
                or self._source_reference.startswith(other.source_reference)
                or other.source_reference.startswith(self._source_reference)
            ):
                # case: both references defined, but one is not equal to or a short
                # representation of the other
                return False

            if (
                self._source_resolved_reference
                and other.source_resolved_reference
                and self._source_resolved_reference != other.source_resolved_reference
            ):
                return False

        return True

    def is_same_package_as(self, other: PackageSpecification) -> bool:
        if other.complete_name != self.complete_name:
            return False

        return self.is_same_source_as(other)

    def clone(self: T) -> T:
        return copy.copy(self)

    def with_features(self: T, features: Iterable[str]) -> T:
        package = self.clone()

        package._features = frozenset(
            canonicalize_name(feature) for feature in features
        )

        return package

    def without_features(self: T) -> T:
        return self.with_features([])

    def __eq__(self, other: object) -> bool:
        if not isinstance(other, PackageSpecification):
            return NotImplemented
        return self.is_same_package_as(other)

    def __hash__(self) -> int:
        result = hash(self.complete_name)  # complete_name includes features

        if self._source_type:
            # Don't include _source_reference and _source_resolved_reference in hash
            # because two specs can be equal even if these attributes are not equal.
            # (They must still meet certain conditions. See is_same_source_as().)
            result ^= (
                hash(self._source_type)
                ^ hash(self._source_url)
                ^ hash(self._source_subdirectory)
            )

        return result

    def __str__(self) -> str:
        raise NotImplementedError
poetry-core-2.1.1/src/poetry/core/packages/url_dependency.py000066400000000000000000000031201475444614500241660ustar00rootroot00000000000000from __future__ import annotations

from typing import TYPE_CHECKING
from urllib.parse import urlparse

from poetry.core.packages.dependency import Dependency


if TYPE_CHECKING:
    from collections.abc import Iterable


class URLDependency(Dependency):
    def __init__(
        self,
        name: str,
        url: str,
        *,
        directory: str | None = None,
        groups: Iterable[str] | None = None,
        optional: bool = False,
        extras: Iterable[str] | None = None,
    ) -> None:
        # Attributes must be immutable for clone() to be safe!
        # (For performance reasons, clone only creates a copy instead of a deep copy).
        self._url = url
        self._directory = directory

        parsed = urlparse(url)
        if not parsed.scheme or not parsed.netloc:
            raise ValueError(f"{url} does not seem like a valid url")

        super().__init__(
            name,
            "*",
            groups=groups,
            optional=optional,
            allows_prereleases=True,
            source_type="url",
            source_url=self._url,
            source_subdirectory=directory,
            extras=extras,
        )

    @property
    def url(self) -> str:
        return self._url

    @property
    def directory(self) -> str | None:
        return self._directory

    @property
    def base_pep_508_name(self) -> str:
        requirement = f"{self.complete_pretty_name} @ {self._url}"

        if self.directory:
            requirement += f"#subdirectory={self.directory}"

        return requirement

    def is_url(self) -> bool:
        return True
poetry-core-2.1.1/src/poetry/core/packages/utils/000077500000000000000000000000001475444614500217605ustar00rootroot00000000000000poetry-core-2.1.1/src/poetry/core/packages/utils/__init__.py000066400000000000000000000000001475444614500240570ustar00rootroot00000000000000poetry-core-2.1.1/src/poetry/core/packages/utils/link.py000066400000000000000000000154371475444614500233010ustar00rootroot00000000000000from __future__ import annotations

import posixpath
import re
import urllib.parse as urlparse

from functools import cached_property
from typing import TYPE_CHECKING

from poetry.core.packages.utils.utils import path_to_url
from poetry.core.packages.utils.utils import splitext


if TYPE_CHECKING:
    from collections.abc import Mapping


class Link:
    def __init__(
        self,
        url: str,
        *,
        requires_python: str | None = None,
        hashes: Mapping[str, str] | None = None,
        metadata: str | bool | dict[str, str] | None = None,
        yanked: str | bool = False,
    ) -> None:
        """
        Object representing a parsed link from https://pypi.python.org/simple/*

        url:
            url of the resource pointed to (href of the link)
        requires_python:
            String containing the `Requires-Python` metadata field, specified
            in PEP 345. This may be specified by a data-requires-python
            attribute in the HTML link tag, as described in PEP 503.
        hashes:
            A dictionary of hash names and associated hashes of the file.
            Only relevant for JSON-API (PEP 691).
        metadata:
            One of:
            - bool indicating that metadata is available
            - string of the syntax `=` representing the hash
              of the Core Metadata file according to PEP 658 (HTML).
            - dict with hash names and associated hashes of the Core Metadata file
              according to PEP 691 (JSON).
        yanked:
            False, if the data-yanked attribute is not present.
            A string, if the data-yanked attribute has a string value.
            True, if the data-yanked attribute is present but has no value.
            According to PEP 592.
        """

        # url can be a UNC windows share
        if url.startswith("\\\\"):
            url = path_to_url(url)

        self.url = url
        self.requires_python = requires_python if requires_python else None
        self._hashes = hashes

        if isinstance(metadata, str):
            metadata = {"true": True, "": False, "false": False}.get(
                metadata.strip().lower(), metadata
            )

        self._metadata = metadata
        self._yanked = yanked

    def __str__(self) -> str:
        if self.requires_python:
            rp = f" (requires-python:{self.requires_python})"
        else:
            rp = ""

        return f"{self.url}{rp}"

    def __repr__(self) -> str:
        return f""

    def __eq__(self, other: object) -> bool:
        if not isinstance(other, Link):
            return NotImplemented
        return self.url == other.url

    def __ne__(self, other: object) -> bool:
        if not isinstance(other, Link):
            return NotImplemented
        return self.url != other.url

    def __lt__(self, other: object) -> bool:
        if not isinstance(other, Link):
            return NotImplemented
        return self.url < other.url

    def __le__(self, other: object) -> bool:
        if not isinstance(other, Link):
            return NotImplemented
        return self.url <= other.url

    def __gt__(self, other: object) -> bool:
        if not isinstance(other, Link):
            return NotImplemented
        return self.url > other.url

    def __ge__(self, other: object) -> bool:
        if not isinstance(other, Link):
            return NotImplemented
        return self.url >= other.url

    def __hash__(self) -> int:
        return hash(self.url)

    @cached_property
    def filename(self) -> str:
        _, netloc, path, _, _ = urlparse.urlsplit(self.url)
        name = posixpath.basename(path.rstrip("/")) or netloc
        name = urlparse.unquote(name)

        return name

    @cached_property
    def scheme(self) -> str:
        return urlparse.urlsplit(self.url)[0]

    @cached_property
    def netloc(self) -> str:
        return urlparse.urlsplit(self.url)[1]

    @cached_property
    def path(self) -> str:
        return urlparse.unquote(urlparse.urlsplit(self.url)[2])

    def splitext(self) -> tuple[str, str]:
        return splitext(posixpath.basename(self.path.rstrip("/")))

    @cached_property
    def ext(self) -> str:
        return self.splitext()[1]

    @cached_property
    def url_without_fragment(self) -> str:
        scheme, netloc, path, query, fragment = urlparse.urlsplit(self.url)
        return urlparse.urlunsplit((scheme, netloc, path, query, None))

    _egg_fragment_re = re.compile(r"[#&]egg=([^&]*)")

    @cached_property
    def egg_fragment(self) -> str | None:
        match = self._egg_fragment_re.search(self.url)
        if not match:
            return None
        return match.group(1)

    _subdirectory_fragment_re = re.compile(r"[#&]subdirectory=([^&]*)")

    @cached_property
    def subdirectory_fragment(self) -> str | None:
        match = self._subdirectory_fragment_re.search(self.url)
        if not match:
            return None
        return match.group(1)

    _hash_re = re.compile(r"(sha1|sha224|sha384|sha256|sha512|md5)=([a-f0-9]+)")

    @cached_property
    def has_metadata(self) -> bool:
        if self._metadata is None:
            return False
        return bool(self._metadata) and (self.is_wheel or self.is_sdist)

    @cached_property
    def metadata_url(self) -> str | None:
        if self.has_metadata:
            return f"{self.url_without_fragment.split('?', 1)[0]}.metadata"
        return None

    @cached_property
    def metadata_hashes(self) -> Mapping[str, str]:
        if self.has_metadata:
            if isinstance(self._metadata, dict):
                return self._metadata
            if isinstance(self._metadata, str):
                match = self._hash_re.search(self._metadata)
                if match:
                    return {match.group(1): match.group(2)}
        return {}

    @cached_property
    def hashes(self) -> Mapping[str, str]:
        if self._hashes:
            return self._hashes
        match = self._hash_re.search(self.url)
        if match:
            return {match.group(1): match.group(2)}
        return {}

    @cached_property
    def show_url(self) -> str:
        return posixpath.basename(self.url.split("#", 1)[0].split("?", 1)[0])

    @cached_property
    def is_wheel(self) -> bool:
        return self.ext == ".whl"

    @cached_property
    def is_wininst(self) -> bool:
        return self.ext == ".exe"

    @cached_property
    def is_egg(self) -> bool:
        return self.ext == ".egg"

    @cached_property
    def is_sdist(self) -> bool:
        return self.ext in {".tar.bz2", ".tar.gz", ".zip"}

    @cached_property
    def yanked(self) -> bool:
        return isinstance(self._yanked, str) or bool(self._yanked)

    @cached_property
    def yanked_reason(self) -> str:
        if isinstance(self._yanked, str):
            return self._yanked
        return ""
poetry-core-2.1.1/src/poetry/core/packages/utils/utils.py000066400000000000000000000326001475444614500234730ustar00rootroot00000000000000from __future__ import annotations

import functools
import re
import sys

from contextlib import suppress
from pathlib import Path
from typing import TYPE_CHECKING
from urllib.parse import unquote
from urllib.parse import urlsplit
from urllib.request import url2pathname

from poetry.core.constraints.version import Version
from poetry.core.constraints.version import VersionRange
from poetry.core.constraints.version import parse_marker_version_constraint
from poetry.core.version.markers import SingleMarker
from poetry.core.version.markers import SingleMarkerLike
from poetry.core.version.markers import dnf


if TYPE_CHECKING:
    from poetry.core.constraints.generic import BaseConstraint
    from poetry.core.constraints.version import VersionConstraint
    from poetry.core.version.markers import BaseMarker

    # Even though we've `from __future__ import annotations`, mypy doesn't seem to like
    # this as `dict[str, ...]`
    ConvertedMarkers = dict[str, list[list[tuple[str, str]]]]


BZ2_EXTENSIONS = (".tar.bz2", ".tbz")
XZ_EXTENSIONS = (".tar.xz", ".txz", ".tlz", ".tar.lz", ".tar.lzma")
ZIP_EXTENSIONS = (".zip", ".whl")
TAR_EXTENSIONS = (".tar.gz", ".tgz", ".tar")
ARCHIVE_EXTENSIONS = ZIP_EXTENSIONS + BZ2_EXTENSIONS + TAR_EXTENSIONS + XZ_EXTENSIONS
SUPPORTED_EXTENSIONS: tuple[str, ...] = ZIP_EXTENSIONS + TAR_EXTENSIONS

with suppress(ImportError):
    import bz2  # noqa: F401

    SUPPORTED_EXTENSIONS += BZ2_EXTENSIONS

with suppress(ImportError):
    # Only for Python 3.3+
    import lzma  # noqa: F401

    SUPPORTED_EXTENSIONS += XZ_EXTENSIONS


def path_to_url(path: str | Path) -> str:
    """
    Convert a path to a file: URL.  The path will be made absolute unless otherwise
    specified and have quoted path parts.
    """
    return Path(path).absolute().as_uri()


def url_to_path(url: str) -> Path:
    """
    Convert an RFC8089 file URI to path.

    The logic used here is borrowed from pip
    https://github.com/pypa/pip/blob/4d1932fcdd1974c820ea60b3286984ebb0c3beaa/src/pip/_internal/utils/urls.py#L31
    """
    if not url.startswith("file:"):
        raise ValueError(f"{url} is not a valid file URI")

    _, netloc, path, _, _ = urlsplit(url)

    if not netloc or netloc == "localhost":
        # According to RFC 8089, same as empty authority.
        netloc = ""
    elif netloc not in {".", ".."} and sys.platform == "win32":
        # If we have a UNC path, prepend UNC share notation.
        netloc = "\\\\" + netloc
    else:
        raise ValueError(
            f"non-local file URIs are not supported on this platform: {url}"
        )

    return Path(url2pathname(netloc + unquote(path)))


def is_url(name: str) -> bool:
    if ":" not in name:
        return False
    scheme = name.split(":", 1)[0].lower()

    return scheme in [
        "http",
        "https",
        "file",
        "ftp",
        "ssh",
        "git",
        "hg",
        "bzr",
        "sftp",
        "svn",
        "ssh",
    ]


def strip_extras(path: str) -> tuple[Path, str | None]:
    m = re.match(r"^(.+)(\[[^\]]+\])$", path)
    extras = None
    if m:
        path_no_extras = m.group(1)
        extras = m.group(2)
    else:
        path_no_extras = path

    return Path(path_no_extras), extras


@functools.cache
def cached_is_dir(path: Path) -> bool:
    """A cached version of `Path.is_dir`."""
    return path.is_dir()


@functools.cache
def is_python_project(path: Path) -> bool:
    """Return true if the directory is a Python project"""
    if not cached_is_dir(path):
        return False

    setup_py = path / "setup.py"
    setup_cfg = path / "setup.cfg"
    setuptools_project = setup_py.exists() or setup_cfg.exists()

    pyproject = (path / "pyproject.toml").exists()

    return pyproject or setuptools_project


def is_archive_file(name: str | Path) -> bool:
    """Return True if `name` is a considered as an archive file."""
    ext = splitext(name)[1].lower()
    return ext in ARCHIVE_EXTENSIONS


def splitext(path: str | Path) -> tuple[str, str]:
    """Like pathlib.Path.stem and suffix, but take off .tar too"""
    if isinstance(path, str):
        path = Path(path)
    base, ext = path.stem, path.suffix
    if base.lower().endswith(".tar"):
        ext = f"{base[-4:]}{ext}"
        base = base[:-4]
    return base, ext


def convert_markers(marker: BaseMarker) -> ConvertedMarkers:
    from poetry.core.version.markers import MarkerUnion
    from poetry.core.version.markers import MultiMarker
    from poetry.core.version.markers import SingleMarker

    requirements: ConvertedMarkers = {}
    marker = dnf(marker)
    conjunctions = marker.markers if isinstance(marker, MarkerUnion) else [marker]
    group_count = len(conjunctions)

    def add_constraint(
        marker_name: str, constraint: tuple[str, str], group_index: int
    ) -> None:
        # python_full_version is equivalent to python_version
        # for Poetry so we merge them
        if marker_name == "python_full_version":
            marker_name = "python_version"
        if marker_name not in requirements:
            requirements[marker_name] = [[] for _ in range(group_count)]
        requirements[marker_name][group_index].append(constraint)

    for i, sub_marker in enumerate(conjunctions):
        if isinstance(sub_marker, MultiMarker):
            for m in sub_marker.markers:
                assert isinstance(m, SingleMarkerLike)
                if isinstance(m, SingleMarker):
                    add_constraint(m.name, (m.operator, m.value), i)
                else:
                    add_constraint(m.name, ("", str(m.constraint)), i)
        elif isinstance(sub_marker, SingleMarkerLike):
            if isinstance(sub_marker, SingleMarker):
                add_constraint(
                    sub_marker.name, (sub_marker.operator, sub_marker.value), i
                )
            else:
                add_constraint(sub_marker.name, ("", str(sub_marker.constraint)), i)

    for group_name in requirements:
        # remove duplicates
        seen = []
        for r in requirements[group_name]:
            if r not in seen:
                seen.append(r)
        requirements[group_name] = seen

    return requirements


def contains_group_without_marker(markers: ConvertedMarkers, marker_name: str) -> bool:
    return marker_name not in markers or [] in markers[marker_name]


def create_nested_marker(
    name: str,
    constraint: BaseConstraint | VersionConstraint,
) -> str:
    from poetry.core.constraints.generic import Constraint
    from poetry.core.constraints.generic import MultiConstraint
    from poetry.core.constraints.generic import UnionConstraint
    from poetry.core.constraints.version import VersionUnion

    if constraint.is_any():
        return ""

    if isinstance(constraint, (MultiConstraint, UnionConstraint)):
        multi_parts = []
        for c in constraint.constraints:
            multi = isinstance(c, (MultiConstraint, UnionConstraint))
            multi_parts.append((multi, create_nested_marker(name, c)))

        glue = " and "
        if isinstance(constraint, UnionConstraint):
            parts = [f"({part[1]})" if part[0] else part[1] for part in multi_parts]
            glue = " or "
        else:
            parts = [part[1] for part in multi_parts]

        marker = glue.join(parts)
    elif isinstance(constraint, Constraint):
        marker = f'{name} {constraint.operator} "{constraint.value}"'
    elif isinstance(constraint, VersionUnion):
        parts = [create_nested_marker(name, c) for c in constraint.ranges]
        glue = " or "
        parts = [f"({part})" for part in parts]
        marker = glue.join(parts)
    elif isinstance(constraint, Version):
        if name == "python_version" and constraint.precision >= 3:
            name = "python_full_version"

        marker = f'{name} == "{constraint.text}"'
    else:
        assert isinstance(constraint, VersionRange), (
            f"Unexpected constraint of type {type(constraint)}"
        )
        min_name = max_name = name

        parts = []

        # `python_version` is a special case: to keep the constructed marker equivalent
        # to the constraint we need to be careful with the precision.
        #
        # PEP 440 tells us that when we come to make the comparison the release
        # segment will be zero padded: eg "<= 3.10" is equivalent to "<= 3.10.0".
        #
        # But "python_version <= 3.10" is _not_ equivalent to "python_version <= 3.10.0"
        # - see normalize_python_version_markers.
        #
        # A similar issue arises for a constraint like "> 3.6".
        if constraint.min is not None:
            op = ">=" if constraint.include_min else ">"
            version = constraint.min
            if min_name == "python_version" and version.precision >= 3:
                min_name = "python_full_version"

            if (
                min_name == "python_version"
                and not constraint.include_min
                and version.precision < 3
            ):
                padding = ".0" * (3 - version.precision)
                part = f'python_full_version > "{version}{padding}"'
            else:
                part = f'{min_name} {op} "{version}"'

            parts.append(part)

        if constraint.max is not None:
            op = "<=" if constraint.include_max else "<"
            version = constraint.max
            if max_name == "python_version" and version.precision >= 3:
                max_name = "python_full_version"

            if (
                max_name == "python_version"
                and constraint.include_max
                and version.precision < 3
            ):
                padding = ".0" * (3 - version.precision)
                part = f'python_full_version <= "{version}{padding}"'
            else:
                part = f'{max_name} {op} "{version}"'

            parts.append(part)

        marker = " and ".join(parts)

    return marker


def get_python_constraint_from_marker(
    marker: BaseMarker,
) -> VersionConstraint:
    from poetry.core.constraints.version import EmptyConstraint
    from poetry.core.constraints.version import VersionRange

    python_marker = marker.only("python_version", "python_full_version")
    if python_marker.is_any():
        return VersionRange()

    if python_marker.is_empty():
        return EmptyConstraint()

    markers = convert_markers(marker)
    if contains_group_without_marker(markers, "python_version"):
        # groups are in disjunctive normal form (DNF),
        # an empty group means that python_version does not appear in this group,
        # which means that python_version is arbitrary for this group
        return VersionRange()

    python_version_markers = markers["python_version"]
    normalized = normalize_python_version_markers(python_version_markers)
    constraint = parse_marker_version_constraint(normalized)
    return constraint


def normalize_python_version_markers(  # NOSONAR
    disjunction: list[list[tuple[str, str]]],
) -> str:
    ors = []
    for or_ in disjunction:
        ands = []
        for op, version in or_:
            # Expand python version
            if op == "==" and "*" not in version and version.count(".") < 2:
                version = "~" + version
                op = ""

            elif op == "!=" and "*" not in version and version.count(".") < 2:
                version += ".*"

            elif op in ("<=", ">"):
                # Make adjustments on encountering versions with less than full
                # precision.
                #
                # Per PEP-508:
                # python_version <-> '.'.join(platform.python_version_tuple()[:2])
                #
                # So for two digits of precision we make the following adjustments:
                # - `python_version > "x.y"` requires version >= x.(y+1).anything
                # - `python_version <= "x.y"` requires version < x.(y+1).anything
                #
                # Treatment when we see a single digit of precision is less clear: is
                # that even a legitimate marker?
                #
                # Experiment suggests that pip behaviour is essentially to make a
                # lexicographical comparison, for example `python_version > "3"` is
                # satisfied by version 3.anything, whereas `python_version <= "3"` is
                # satisfied only by version 2.anything.
                #
                # We achieve the above by fiddling with the operator and version in the
                # marker.
                parsed_version = Version.parse(version)
                if parsed_version.precision < 3:
                    if op == "<=":
                        op = "<"
                    elif op == ">":
                        op = ">="

                if parsed_version.precision == 2:
                    version = parsed_version.next_minor().text

            elif op in ("in", "not in"):
                versions = []
                for v in SingleMarker.VALUE_SEPARATOR_RE.split(version):
                    split = v.split(".")
                    if len(split) in (1, 2):
                        split.append("*")
                        op_ = "" if op == "in" else "!="
                    else:
                        op_ = "==" if op == "in" else "!="

                    versions.append(op_ + ".".join(split))

                if versions:
                    glue = " || " if op == "in" else ", "
                    ands.append(glue.join(versions))

                continue

            ands.append(f"{op}{version}")

        ors.append(" ".join(ands))

    return " || ".join(ors)
poetry-core-2.1.1/src/poetry/core/packages/vcs_dependency.py000066400000000000000000000070111475444614500241620ustar00rootroot00000000000000from __future__ import annotations

from typing import TYPE_CHECKING

from poetry.core.packages.dependency import Dependency


if TYPE_CHECKING:
    from collections.abc import Iterable


class VCSDependency(Dependency):
    """
    Represents a VCS dependency
    """

    def __init__(
        self,
        name: str,
        vcs: str,
        source: str,
        branch: str | None = None,
        tag: str | None = None,
        rev: str | None = None,
        resolved_rev: str | None = None,
        directory: str | None = None,
        groups: Iterable[str] | None = None,
        optional: bool = False,
        develop: bool = False,
        extras: Iterable[str] | None = None,
    ) -> None:
        # Attributes must be immutable for clone() to be safe!
        # (For performance reasons, clone only creates a copy instead of a deep copy).
        self._vcs = vcs

        self._branch = branch
        self._tag = tag
        self._rev = rev
        self._directory = directory

        super().__init__(
            name,
            "*",
            groups=groups,
            optional=optional,
            allows_prereleases=True,
            source_type=self._vcs.lower(),
            source_url=source,
            source_reference=branch or tag or rev or "HEAD",
            source_resolved_reference=resolved_rev,
            source_subdirectory=directory,
            extras=extras,
        )

        self._source = self.source_url or source
        self._develop = develop

    @property
    def vcs(self) -> str:
        return self._vcs

    @property
    def source(self) -> str:
        return self._source

    @property
    def branch(self) -> str | None:
        return self._branch

    @property
    def tag(self) -> str | None:
        return self._tag

    @property
    def rev(self) -> str | None:
        return self._rev

    @property
    def directory(self) -> str | None:
        return self._directory

    @property
    def develop(self) -> bool:
        return self._develop

    @property
    def reference(self) -> str:
        reference = self._branch or self._tag or self._rev or ""
        return reference

    @property
    def pretty_constraint(self) -> str:
        if self._branch:
            what = "branch"
            version = self._branch
        elif self._tag:
            what = "tag"
            version = self._tag
        elif self._rev:
            what = "rev"
            version = self._rev
        else:
            return ""

        return f"{what} {version}"

    def _base_pep_508_name(self, *, resolved: bool = False) -> str:
        from poetry.core.vcs import git

        requirement = self.complete_pretty_name

        parsed_url = git.ParsedUrl.parse(self._source)
        if parsed_url.protocol is not None:
            requirement += f" @ {self._vcs}+{self._source}"
        else:
            requirement += f" @ {self._vcs}+ssh://{parsed_url.format()}"

        if resolved and self.source_resolved_reference:
            requirement += f"@{self.source_resolved_reference}"
        elif self.reference:
            requirement += f"@{self.reference}"

        if self._directory:
            requirement += f"#subdirectory={self._directory}"

        return requirement

    @property
    def base_pep_508_name(self) -> str:
        requirement = self._base_pep_508_name()
        return requirement

    @property
    def base_pep_508_name_resolved(self) -> str:
        requirement = self._base_pep_508_name(resolved=True)
        return requirement

    def is_vcs(self) -> bool:
        return True
poetry-core-2.1.1/src/poetry/core/poetry.py000066400000000000000000000057411475444614500207450ustar00rootroot00000000000000from __future__ import annotations

import logging

from pathlib import Path
from typing import TYPE_CHECKING
from typing import Any

from packaging.utils import canonicalize_name

from poetry.core.packages.dependency import Dependency
from poetry.core.packages.directory_dependency import DirectoryDependency
from poetry.core.packages.file_dependency import FileDependency
from poetry.core.pyproject.toml import PyProjectTOML


logger = logging.getLogger(__name__)


if TYPE_CHECKING:
    from poetry.core.packages.project_package import ProjectPackage


class Poetry:
    def __init__(
        self,
        file: Path,
        local_config: dict[str, Any],
        package: ProjectPackage,
        pyproject_type: type[PyProjectTOML] = PyProjectTOML,
    ) -> None:
        self._pyproject = pyproject_type(file)
        self._package = package
        self._local_config = local_config
        self._build_system_dependencies: list[Dependency] | None = None

    @property
    def pyproject(self) -> PyProjectTOML:
        return self._pyproject

    @property
    def pyproject_path(self) -> Path:
        return self._pyproject.path

    @property
    def package(self) -> ProjectPackage:
        return self._package

    @property
    def is_package_mode(self) -> bool:
        package_mode = self._local_config["package-mode"]
        assert isinstance(package_mode, bool)
        return package_mode

    @property
    def local_config(self) -> dict[str, Any]:
        return self._local_config

    def get_project_config(self, config: str, default: Any = None) -> Any:
        return self._local_config.get("config", {}).get(config, default)

    @property
    def build_system_dependencies(self) -> list[Dependency]:
        if self._build_system_dependencies is None:
            build_system = self.pyproject.build_system
            self._build_system_dependencies = []

            for requirement in build_system.requires:
                dependency = None
                try:
                    dependency = Dependency.create_from_pep_508(requirement)
                except ValueError:
                    # PEP 517 requires can be path if not PEP 508
                    path = Path(requirement)

                    if path.is_file():
                        dependency = FileDependency(
                            name=canonicalize_name(path.name), path=path
                        )
                    elif path.is_dir():
                        dependency = DirectoryDependency(
                            name=canonicalize_name(path.name), path=path
                        )

                # skip since we could not determine requirement
                if dependency:
                    self._build_system_dependencies.append(dependency)
                else:
                    logger.debug(
                        "Skipping build system dependency - could not determine requirement type: %s",
                        requirement,
                    )

        return self._build_system_dependencies
poetry-core-2.1.1/src/poetry/core/py.typed000066400000000000000000000000001475444614500205270ustar00rootroot00000000000000poetry-core-2.1.1/src/poetry/core/pyproject/000077500000000000000000000000001475444614500210615ustar00rootroot00000000000000poetry-core-2.1.1/src/poetry/core/pyproject/__init__.py000066400000000000000000000000001475444614500231600ustar00rootroot00000000000000poetry-core-2.1.1/src/poetry/core/pyproject/exceptions.py000066400000000000000000000002111475444614500236060ustar00rootroot00000000000000from __future__ import annotations

from poetry.core.exceptions import PoetryCoreError


class PyProjectError(PoetryCoreError):
    pass
poetry-core-2.1.1/src/poetry/core/pyproject/tables.py000066400000000000000000000035651475444614500227160ustar00rootroot00000000000000from __future__ import annotations

from pathlib import Path
from typing import TYPE_CHECKING


if TYPE_CHECKING:
    from poetry.core.packages.dependency import Dependency


# TODO: Convert to dataclass once python 2.7, 3.5 is dropped
class BuildSystem:
    def __init__(
        self, build_backend: str | None = None, requires: list[str] | None = None
    ) -> None:
        self.build_backend = (
            build_backend
            if build_backend is not None
            else "setuptools.build_meta:__legacy__"
        )
        self.requires = requires if requires is not None else ["setuptools", "wheel"]
        self._dependencies: list[Dependency] | None = None

    @property
    def dependencies(self) -> list[Dependency]:
        if self._dependencies is None:
            # avoid circular dependency when loading DirectoryDependency
            from poetry.core.packages.dependency import Dependency
            from poetry.core.packages.directory_dependency import DirectoryDependency
            from poetry.core.packages.file_dependency import FileDependency

            self._dependencies = []
            for requirement in self.requires:
                dependency = None
                try:
                    dependency = Dependency.create_from_pep_508(requirement)
                except ValueError:
                    # PEP 517 requires can be path if not PEP 508
                    path = Path(requirement)
                    if path.is_file():
                        dependency = FileDependency(name=path.name, path=path)
                    elif path.is_dir():
                        dependency = DirectoryDependency(name=path.name, path=path)

                if dependency is None:
                    # skip since we could not determine requirement
                    continue

                self._dependencies.append(dependency)

        return self._dependencies
poetry-core-2.1.1/src/poetry/core/pyproject/toml.py000066400000000000000000000062541475444614500224150ustar00rootroot00000000000000from __future__ import annotations

from contextlib import suppress
from typing import TYPE_CHECKING
from typing import Any

from poetry.core.pyproject.tables import BuildSystem
from poetry.core.utils._compat import tomllib


if TYPE_CHECKING:
    from pathlib import Path


class PyProjectTOML:
    def __init__(self, path: Path) -> None:
        self._path = path
        self._data: dict[str, Any] | None = None
        self._build_system: BuildSystem | None = None

    @property
    def path(self) -> Path:
        return self._path

    @property
    def data(self) -> dict[str, Any]:
        if self._data is None:
            if not self.path.exists():
                self._data = {}
            else:
                try:
                    with self.path.open("rb") as f:
                        self._data = tomllib.load(f)
                except tomllib.TOMLDecodeError as e:
                    from poetry.core.pyproject.exceptions import PyProjectError

                    msg = (
                        f"{self._path.as_posix()} is not a valid TOML file.\n"
                        f"{e.__class__.__name__}: {e}"
                    )

                    if str(e).startswith("Cannot overwrite a value"):
                        msg += "\nThis is often caused by a duplicate entry."

                    raise PyProjectError(msg) from e

        return self._data

    @property
    def build_system(self) -> BuildSystem:
        if self._build_system is None:
            build_backend = None
            requires = None

            if not self.path.exists():
                build_backend = "poetry.core.masonry.api"
                requires = ["poetry-core"]

            container = self.data.get("build-system", {})
            self._build_system = BuildSystem(
                build_backend=container.get("build-backend", build_backend),
                requires=container.get("requires", requires),
            )

        return self._build_system

    @property
    def poetry_config(self) -> dict[str, Any]:
        try:
            tool = self.data["tool"]
            assert isinstance(tool, dict)
            config = tool["poetry"]
            assert isinstance(config, dict)
            return config
        except KeyError as e:
            from poetry.core.pyproject.exceptions import PyProjectError

            raise PyProjectError(
                f"[tool.poetry] section not found in {self._path.as_posix()}"
            ) from e

    def is_poetry_project(self) -> bool:
        from poetry.core.pyproject.exceptions import PyProjectError

        if self.path.exists():
            with suppress(PyProjectError):
                _ = self.poetry_config
                return True

            # Even if there is no [tool.poetry] section, a project can still be a
            # valid Poetry project if there is a name and a version in [project]
            # and there are no dynamic fields.
            with suppress(KeyError):
                project = self.data["project"]
                if (
                    project["name"]
                    and project["version"]
                    and not project.get("dynamic")
                ):
                    return True

        return False
poetry-core-2.1.1/src/poetry/core/spdx/000077500000000000000000000000001475444614500200205ustar00rootroot00000000000000poetry-core-2.1.1/src/poetry/core/spdx/__init__.py000066400000000000000000000000001475444614500221170ustar00rootroot00000000000000poetry-core-2.1.1/src/poetry/core/spdx/data/000077500000000000000000000000001475444614500207315ustar00rootroot00000000000000poetry-core-2.1.1/src/poetry/core/spdx/data/licenses.json000066400000000000000000001611201475444614500234320ustar00rootroot00000000000000{
  "0BSD": [
    "BSD Zero Clause License",
    true,
    false
  ],
  "3D-Slicer-1.0": [
    "3D Slicer License v1.0",
    false,
    false
  ],
  "AAL": [
    "Attribution Assurance License",
    true,
    false
  ],
  "ADSL": [
    "Amazon Digital Services License",
    false,
    false
  ],
  "AFL-1.1": [
    "Academic Free License v1.1",
    true,
    false
  ],
  "AFL-1.2": [
    "Academic Free License v1.2",
    true,
    false
  ],
  "AFL-2.0": [
    "Academic Free License v2.0",
    true,
    false
  ],
  "AFL-2.1": [
    "Academic Free License v2.1",
    true,
    false
  ],
  "AFL-3.0": [
    "Academic Free License v3.0",
    true,
    false
  ],
  "AGPL-1.0": [
    "Affero General Public License v1.0",
    false,
    true
  ],
  "AGPL-1.0-only": [
    "Affero General Public License v1.0 only",
    false,
    false
  ],
  "AGPL-1.0-or-later": [
    "Affero General Public License v1.0 or later",
    false,
    false
  ],
  "AGPL-3.0": [
    "GNU Affero General Public License v3.0",
    true,
    true
  ],
  "AGPL-3.0-only": [
    "GNU Affero General Public License v3.0 only",
    true,
    false
  ],
  "AGPL-3.0-or-later": [
    "GNU Affero General Public License v3.0 or later",
    true,
    false
  ],
  "AMD-newlib": [
    "AMD newlib License",
    false,
    false
  ],
  "AMDPLPA": [
    "AMD's plpa_map.c License",
    false,
    false
  ],
  "AML": [
    "Apple MIT License",
    false,
    false
  ],
  "AML-glslang": [
    "AML glslang variant License",
    false,
    false
  ],
  "AMPAS": [
    "Academy of Motion Picture Arts and Sciences BSD",
    false,
    false
  ],
  "ANTLR-PD": [
    "ANTLR Software Rights Notice",
    false,
    false
  ],
  "ANTLR-PD-fallback": [
    "ANTLR Software Rights Notice with license fallback",
    false,
    false
  ],
  "APAFML": [
    "Adobe Postscript AFM License",
    false,
    false
  ],
  "APL-1.0": [
    "Adaptive Public License 1.0",
    true,
    false
  ],
  "APSL-1.0": [
    "Apple Public Source License 1.0",
    true,
    false
  ],
  "APSL-1.1": [
    "Apple Public Source License 1.1",
    true,
    false
  ],
  "APSL-1.2": [
    "Apple Public Source License 1.2",
    true,
    false
  ],
  "APSL-2.0": [
    "Apple Public Source License 2.0",
    true,
    false
  ],
  "ASWF-Digital-Assets-1.0": [
    "ASWF Digital Assets License version 1.0",
    false,
    false
  ],
  "ASWF-Digital-Assets-1.1": [
    "ASWF Digital Assets License 1.1",
    false,
    false
  ],
  "Abstyles": [
    "Abstyles License",
    false,
    false
  ],
  "AdaCore-doc": [
    "AdaCore Doc License",
    false,
    false
  ],
  "Adobe-2006": [
    "Adobe Systems Incorporated Source Code License Agreement",
    false,
    false
  ],
  "Adobe-Display-PostScript": [
    "Adobe Display PostScript License",
    false,
    false
  ],
  "Adobe-Glyph": [
    "Adobe Glyph List License",
    false,
    false
  ],
  "Adobe-Utopia": [
    "Adobe Utopia Font License",
    false,
    false
  ],
  "Afmparse": [
    "Afmparse License",
    false,
    false
  ],
  "Aladdin": [
    "Aladdin Free Public License",
    false,
    false
  ],
  "Apache-1.0": [
    "Apache License 1.0",
    false,
    false
  ],
  "Apache-1.1": [
    "Apache License 1.1",
    true,
    false
  ],
  "Apache-2.0": [
    "Apache License 2.0",
    true,
    false
  ],
  "App-s2p": [
    "App::s2p License",
    false,
    false
  ],
  "Arphic-1999": [
    "Arphic Public License",
    false,
    false
  ],
  "Artistic-1.0": [
    "Artistic License 1.0",
    true,
    false
  ],
  "Artistic-1.0-Perl": [
    "Artistic License 1.0 (Perl)",
    true,
    false
  ],
  "Artistic-1.0-cl8": [
    "Artistic License 1.0 w/clause 8",
    true,
    false
  ],
  "Artistic-2.0": [
    "Artistic License 2.0",
    true,
    false
  ],
  "BSD-1-Clause": [
    "BSD 1-Clause License",
    true,
    false
  ],
  "BSD-2-Clause": [
    "BSD 2-Clause \"Simplified\" License",
    true,
    false
  ],
  "BSD-2-Clause-Darwin": [
    "BSD 2-Clause - Ian Darwin variant",
    false,
    false
  ],
  "BSD-2-Clause-FreeBSD": [
    "BSD 2-Clause FreeBSD License",
    false,
    true
  ],
  "BSD-2-Clause-NetBSD": [
    "BSD 2-Clause NetBSD License",
    false,
    true
  ],
  "BSD-2-Clause-Patent": [
    "BSD-2-Clause Plus Patent License",
    true,
    false
  ],
  "BSD-2-Clause-Views": [
    "BSD 2-Clause with views sentence",
    false,
    false
  ],
  "BSD-2-Clause-first-lines": [
    "BSD 2-Clause - first lines requirement",
    false,
    false
  ],
  "BSD-3-Clause": [
    "BSD 3-Clause \"New\" or \"Revised\" License",
    true,
    false
  ],
  "BSD-3-Clause-Attribution": [
    "BSD with attribution",
    false,
    false
  ],
  "BSD-3-Clause-Clear": [
    "BSD 3-Clause Clear License",
    false,
    false
  ],
  "BSD-3-Clause-HP": [
    "Hewlett-Packard BSD variant license",
    false,
    false
  ],
  "BSD-3-Clause-LBNL": [
    "Lawrence Berkeley National Labs BSD variant license",
    true,
    false
  ],
  "BSD-3-Clause-Modification": [
    "BSD 3-Clause Modification",
    false,
    false
  ],
  "BSD-3-Clause-No-Military-License": [
    "BSD 3-Clause No Military License",
    false,
    false
  ],
  "BSD-3-Clause-No-Nuclear-License": [
    "BSD 3-Clause No Nuclear License",
    false,
    false
  ],
  "BSD-3-Clause-No-Nuclear-License-2014": [
    "BSD 3-Clause No Nuclear License 2014",
    false,
    false
  ],
  "BSD-3-Clause-No-Nuclear-Warranty": [
    "BSD 3-Clause No Nuclear Warranty",
    false,
    false
  ],
  "BSD-3-Clause-Open-MPI": [
    "BSD 3-Clause Open MPI variant",
    false,
    false
  ],
  "BSD-3-Clause-Sun": [
    "BSD 3-Clause Sun Microsystems",
    false,
    false
  ],
  "BSD-3-Clause-acpica": [
    "BSD 3-Clause acpica variant",
    false,
    false
  ],
  "BSD-3-Clause-flex": [
    "BSD 3-Clause Flex variant",
    false,
    false
  ],
  "BSD-4-Clause": [
    "BSD 4-Clause \"Original\" or \"Old\" License",
    false,
    false
  ],
  "BSD-4-Clause-Shortened": [
    "BSD 4 Clause Shortened",
    false,
    false
  ],
  "BSD-4-Clause-UC": [
    "BSD-4-Clause (University of California-Specific)",
    false,
    false
  ],
  "BSD-4.3RENO": [
    "BSD 4.3 RENO License",
    false,
    false
  ],
  "BSD-4.3TAHOE": [
    "BSD 4.3 TAHOE License",
    false,
    false
  ],
  "BSD-Advertising-Acknowledgement": [
    "BSD Advertising Acknowledgement License",
    false,
    false
  ],
  "BSD-Attribution-HPND-disclaimer": [
    "BSD with Attribution and HPND disclaimer",
    false,
    false
  ],
  "BSD-Inferno-Nettverk": [
    "BSD-Inferno-Nettverk",
    false,
    false
  ],
  "BSD-Protection": [
    "BSD Protection License",
    false,
    false
  ],
  "BSD-Source-Code": [
    "BSD Source Code Attribution",
    false,
    false
  ],
  "BSD-Source-beginning-file": [
    "BSD Source Code Attribution - beginning of file variant",
    false,
    false
  ],
  "BSD-Systemics": [
    "Systemics BSD variant license",
    false,
    false
  ],
  "BSD-Systemics-W3Works": [
    "Systemics W3Works BSD variant license",
    false,
    false
  ],
  "BSL-1.0": [
    "Boost Software License 1.0",
    true,
    false
  ],
  "BUSL-1.1": [
    "Business Source License 1.1",
    false,
    false
  ],
  "Baekmuk": [
    "Baekmuk License",
    false,
    false
  ],
  "Bahyph": [
    "Bahyph License",
    false,
    false
  ],
  "Barr": [
    "Barr License",
    false,
    false
  ],
  "Beerware": [
    "Beerware License",
    false,
    false
  ],
  "BitTorrent-1.0": [
    "BitTorrent Open Source License v1.0",
    false,
    false
  ],
  "BitTorrent-1.1": [
    "BitTorrent Open Source License v1.1",
    false,
    false
  ],
  "Bitstream-Charter": [
    "Bitstream Charter Font License",
    false,
    false
  ],
  "Bitstream-Vera": [
    "Bitstream Vera Font License",
    false,
    false
  ],
  "BlueOak-1.0.0": [
    "Blue Oak Model License 1.0.0",
    true,
    false
  ],
  "Boehm-GC": [
    "Boehm-Demers-Weiser GC License",
    false,
    false
  ],
  "Boehm-GC-without-fee": [
    "Boehm-Demers-Weiser GC License (without fee)",
    false,
    false
  ],
  "Borceux": [
    "Borceux license",
    false,
    false
  ],
  "Brian-Gladman-2-Clause": [
    "Brian Gladman 2-Clause License",
    false,
    false
  ],
  "Brian-Gladman-3-Clause": [
    "Brian Gladman 3-Clause License",
    false,
    false
  ],
  "C-UDA-1.0": [
    "Computational Use of Data Agreement v1.0",
    false,
    false
  ],
  "CAL-1.0": [
    "Cryptographic Autonomy License 1.0",
    true,
    false
  ],
  "CAL-1.0-Combined-Work-Exception": [
    "Cryptographic Autonomy License 1.0 (Combined Work Exception)",
    true,
    false
  ],
  "CATOSL-1.1": [
    "Computer Associates Trusted Open Source License 1.1",
    true,
    false
  ],
  "CC-BY-1.0": [
    "Creative Commons Attribution 1.0 Generic",
    false,
    false
  ],
  "CC-BY-2.0": [
    "Creative Commons Attribution 2.0 Generic",
    false,
    false
  ],
  "CC-BY-2.5": [
    "Creative Commons Attribution 2.5 Generic",
    false,
    false
  ],
  "CC-BY-2.5-AU": [
    "Creative Commons Attribution 2.5 Australia",
    false,
    false
  ],
  "CC-BY-3.0": [
    "Creative Commons Attribution 3.0 Unported",
    false,
    false
  ],
  "CC-BY-3.0-AT": [
    "Creative Commons Attribution 3.0 Austria",
    false,
    false
  ],
  "CC-BY-3.0-AU": [
    "Creative Commons Attribution 3.0 Australia",
    false,
    false
  ],
  "CC-BY-3.0-DE": [
    "Creative Commons Attribution 3.0 Germany",
    false,
    false
  ],
  "CC-BY-3.0-IGO": [
    "Creative Commons Attribution 3.0 IGO",
    false,
    false
  ],
  "CC-BY-3.0-NL": [
    "Creative Commons Attribution 3.0 Netherlands",
    false,
    false
  ],
  "CC-BY-3.0-US": [
    "Creative Commons Attribution 3.0 United States",
    false,
    false
  ],
  "CC-BY-4.0": [
    "Creative Commons Attribution 4.0 International",
    false,
    false
  ],
  "CC-BY-NC-1.0": [
    "Creative Commons Attribution Non Commercial 1.0 Generic",
    false,
    false
  ],
  "CC-BY-NC-2.0": [
    "Creative Commons Attribution Non Commercial 2.0 Generic",
    false,
    false
  ],
  "CC-BY-NC-2.5": [
    "Creative Commons Attribution Non Commercial 2.5 Generic",
    false,
    false
  ],
  "CC-BY-NC-3.0": [
    "Creative Commons Attribution Non Commercial 3.0 Unported",
    false,
    false
  ],
  "CC-BY-NC-3.0-DE": [
    "Creative Commons Attribution Non Commercial 3.0 Germany",
    false,
    false
  ],
  "CC-BY-NC-4.0": [
    "Creative Commons Attribution Non Commercial 4.0 International",
    false,
    false
  ],
  "CC-BY-NC-ND-1.0": [
    "Creative Commons Attribution Non Commercial No Derivatives 1.0 Generic",
    false,
    false
  ],
  "CC-BY-NC-ND-2.0": [
    "Creative Commons Attribution Non Commercial No Derivatives 2.0 Generic",
    false,
    false
  ],
  "CC-BY-NC-ND-2.5": [
    "Creative Commons Attribution Non Commercial No Derivatives 2.5 Generic",
    false,
    false
  ],
  "CC-BY-NC-ND-3.0": [
    "Creative Commons Attribution Non Commercial No Derivatives 3.0 Unported",
    false,
    false
  ],
  "CC-BY-NC-ND-3.0-DE": [
    "Creative Commons Attribution Non Commercial No Derivatives 3.0 Germany",
    false,
    false
  ],
  "CC-BY-NC-ND-3.0-IGO": [
    "Creative Commons Attribution Non Commercial No Derivatives 3.0 IGO",
    false,
    false
  ],
  "CC-BY-NC-ND-4.0": [
    "Creative Commons Attribution Non Commercial No Derivatives 4.0 International",
    false,
    false
  ],
  "CC-BY-NC-SA-1.0": [
    "Creative Commons Attribution Non Commercial Share Alike 1.0 Generic",
    false,
    false
  ],
  "CC-BY-NC-SA-2.0": [
    "Creative Commons Attribution Non Commercial Share Alike 2.0 Generic",
    false,
    false
  ],
  "CC-BY-NC-SA-2.0-DE": [
    "Creative Commons Attribution Non Commercial Share Alike 2.0 Germany",
    false,
    false
  ],
  "CC-BY-NC-SA-2.0-FR": [
    "Creative Commons Attribution-NonCommercial-ShareAlike 2.0 France",
    false,
    false
  ],
  "CC-BY-NC-SA-2.0-UK": [
    "Creative Commons Attribution Non Commercial Share Alike 2.0 England and Wales",
    false,
    false
  ],
  "CC-BY-NC-SA-2.5": [
    "Creative Commons Attribution Non Commercial Share Alike 2.5 Generic",
    false,
    false
  ],
  "CC-BY-NC-SA-3.0": [
    "Creative Commons Attribution Non Commercial Share Alike 3.0 Unported",
    false,
    false
  ],
  "CC-BY-NC-SA-3.0-DE": [
    "Creative Commons Attribution Non Commercial Share Alike 3.0 Germany",
    false,
    false
  ],
  "CC-BY-NC-SA-3.0-IGO": [
    "Creative Commons Attribution Non Commercial Share Alike 3.0 IGO",
    false,
    false
  ],
  "CC-BY-NC-SA-4.0": [
    "Creative Commons Attribution Non Commercial Share Alike 4.0 International",
    false,
    false
  ],
  "CC-BY-ND-1.0": [
    "Creative Commons Attribution No Derivatives 1.0 Generic",
    false,
    false
  ],
  "CC-BY-ND-2.0": [
    "Creative Commons Attribution No Derivatives 2.0 Generic",
    false,
    false
  ],
  "CC-BY-ND-2.5": [
    "Creative Commons Attribution No Derivatives 2.5 Generic",
    false,
    false
  ],
  "CC-BY-ND-3.0": [
    "Creative Commons Attribution No Derivatives 3.0 Unported",
    false,
    false
  ],
  "CC-BY-ND-3.0-DE": [
    "Creative Commons Attribution No Derivatives 3.0 Germany",
    false,
    false
  ],
  "CC-BY-ND-4.0": [
    "Creative Commons Attribution No Derivatives 4.0 International",
    false,
    false
  ],
  "CC-BY-SA-1.0": [
    "Creative Commons Attribution Share Alike 1.0 Generic",
    false,
    false
  ],
  "CC-BY-SA-2.0": [
    "Creative Commons Attribution Share Alike 2.0 Generic",
    false,
    false
  ],
  "CC-BY-SA-2.0-UK": [
    "Creative Commons Attribution Share Alike 2.0 England and Wales",
    false,
    false
  ],
  "CC-BY-SA-2.1-JP": [
    "Creative Commons Attribution Share Alike 2.1 Japan",
    false,
    false
  ],
  "CC-BY-SA-2.5": [
    "Creative Commons Attribution Share Alike 2.5 Generic",
    false,
    false
  ],
  "CC-BY-SA-3.0": [
    "Creative Commons Attribution Share Alike 3.0 Unported",
    false,
    false
  ],
  "CC-BY-SA-3.0-AT": [
    "Creative Commons Attribution Share Alike 3.0 Austria",
    false,
    false
  ],
  "CC-BY-SA-3.0-DE": [
    "Creative Commons Attribution Share Alike 3.0 Germany",
    false,
    false
  ],
  "CC-BY-SA-3.0-IGO": [
    "Creative Commons Attribution-ShareAlike 3.0 IGO",
    false,
    false
  ],
  "CC-BY-SA-4.0": [
    "Creative Commons Attribution Share Alike 4.0 International",
    false,
    false
  ],
  "CC-PDDC": [
    "Creative Commons Public Domain Dedication and Certification",
    false,
    false
  ],
  "CC-PDM-1.0": [
    "Creative    Commons Public Domain Mark 1.0 Universal",
    false,
    false
  ],
  "CC-SA-1.0": [
    "Creative Commons Share Alike 1.0 Generic",
    false,
    false
  ],
  "CC0-1.0": [
    "Creative Commons Zero v1.0 Universal",
    false,
    false
  ],
  "CDDL-1.0": [
    "Common Development and Distribution License 1.0",
    true,
    false
  ],
  "CDDL-1.1": [
    "Common Development and Distribution License 1.1",
    false,
    false
  ],
  "CDL-1.0": [
    "Common Documentation License 1.0",
    false,
    false
  ],
  "CDLA-Permissive-1.0": [
    "Community Data License Agreement Permissive 1.0",
    false,
    false
  ],
  "CDLA-Permissive-2.0": [
    "Community Data License Agreement Permissive 2.0",
    false,
    false
  ],
  "CDLA-Sharing-1.0": [
    "Community Data License Agreement Sharing 1.0",
    false,
    false
  ],
  "CECILL-1.0": [
    "CeCILL Free Software License Agreement v1.0",
    false,
    false
  ],
  "CECILL-1.1": [
    "CeCILL Free Software License Agreement v1.1",
    false,
    false
  ],
  "CECILL-2.0": [
    "CeCILL Free Software License Agreement v2.0",
    false,
    false
  ],
  "CECILL-2.1": [
    "CeCILL Free Software License Agreement v2.1",
    true,
    false
  ],
  "CECILL-B": [
    "CeCILL-B Free Software License Agreement",
    false,
    false
  ],
  "CECILL-C": [
    "CeCILL-C Free Software License Agreement",
    false,
    false
  ],
  "CERN-OHL-1.1": [
    "CERN Open Hardware Licence v1.1",
    false,
    false
  ],
  "CERN-OHL-1.2": [
    "CERN Open Hardware Licence v1.2",
    false,
    false
  ],
  "CERN-OHL-P-2.0": [
    "CERN Open Hardware Licence Version 2 - Permissive",
    true,
    false
  ],
  "CERN-OHL-S-2.0": [
    "CERN Open Hardware Licence Version 2 - Strongly Reciprocal",
    true,
    false
  ],
  "CERN-OHL-W-2.0": [
    "CERN Open Hardware Licence Version 2 - Weakly Reciprocal",
    true,
    false
  ],
  "CFITSIO": [
    "CFITSIO License",
    false,
    false
  ],
  "CMU-Mach": [
    "CMU Mach License",
    false,
    false
  ],
  "CMU-Mach-nodoc": [
    "CMU    Mach - no notices-in-documentation variant",
    false,
    false
  ],
  "CNRI-Jython": [
    "CNRI Jython License",
    false,
    false
  ],
  "CNRI-Python": [
    "CNRI Python License",
    true,
    false
  ],
  "CNRI-Python-GPL-Compatible": [
    "CNRI Python Open Source GPL Compatible License Agreement",
    false,
    false
  ],
  "COIL-1.0": [
    "Copyfree Open Innovation License",
    false,
    false
  ],
  "CPAL-1.0": [
    "Common Public Attribution License 1.0",
    true,
    false
  ],
  "CPL-1.0": [
    "Common Public License 1.0",
    true,
    false
  ],
  "CPOL-1.02": [
    "Code Project Open License 1.02",
    false,
    false
  ],
  "CUA-OPL-1.0": [
    "CUA Office Public License v1.0",
    true,
    false
  ],
  "Caldera": [
    "Caldera License",
    false,
    false
  ],
  "Caldera-no-preamble": [
    "Caldera License (without preamble)",
    false,
    false
  ],
  "Catharon": [
    "Catharon License",
    false,
    false
  ],
  "ClArtistic": [
    "Clarified Artistic License",
    false,
    false
  ],
  "Clips": [
    "Clips License",
    false,
    false
  ],
  "Community-Spec-1.0": [
    "Community Specification License 1.0",
    false,
    false
  ],
  "Condor-1.1": [
    "Condor Public License v1.1",
    false,
    false
  ],
  "Cornell-Lossless-JPEG": [
    "Cornell Lossless JPEG License",
    false,
    false
  ],
  "Cronyx": [
    "Cronyx License",
    false,
    false
  ],
  "Crossword": [
    "Crossword License",
    false,
    false
  ],
  "CrystalStacker": [
    "CrystalStacker License",
    false,
    false
  ],
  "Cube": [
    "Cube License",
    false,
    false
  ],
  "D-FSL-1.0": [
    "Deutsche Freie Software Lizenz",
    false,
    false
  ],
  "DEC-3-Clause": [
    "DEC 3-Clause License",
    false,
    false
  ],
  "DL-DE-BY-2.0": [
    "Data licence Germany – attribution – version 2.0",
    false,
    false
  ],
  "DL-DE-ZERO-2.0": [
    "Data licence Germany – zero – version 2.0",
    false,
    false
  ],
  "DOC": [
    "DOC License",
    false,
    false
  ],
  "DRL-1.0": [
    "Detection Rule License 1.0",
    false,
    false
  ],
  "DRL-1.1": [
    "Detection Rule License 1.1",
    false,
    false
  ],
  "DSDP": [
    "DSDP License",
    false,
    false
  ],
  "DocBook-Schema": [
    "DocBook Schema License",
    false,
    false
  ],
  "DocBook-Stylesheet": [
    "DocBook Stylesheet License",
    false,
    false
  ],
  "DocBook-XML": [
    "DocBook XML License",
    false,
    false
  ],
  "Dotseqn": [
    "Dotseqn License",
    false,
    false
  ],
  "ECL-1.0": [
    "Educational Community License v1.0",
    true,
    false
  ],
  "ECL-2.0": [
    "Educational Community License v2.0",
    true,
    false
  ],
  "EFL-1.0": [
    "Eiffel Forum License v1.0",
    true,
    false
  ],
  "EFL-2.0": [
    "Eiffel Forum License v2.0",
    true,
    false
  ],
  "EPICS": [
    "EPICS Open License",
    false,
    false
  ],
  "EPL-1.0": [
    "Eclipse Public License 1.0",
    true,
    false
  ],
  "EPL-2.0": [
    "Eclipse Public License 2.0",
    true,
    false
  ],
  "EUDatagrid": [
    "EU DataGrid Software License",
    true,
    false
  ],
  "EUPL-1.0": [
    "European Union Public License 1.0",
    false,
    false
  ],
  "EUPL-1.1": [
    "European Union Public License 1.1",
    true,
    false
  ],
  "EUPL-1.2": [
    "European Union Public License 1.2",
    true,
    false
  ],
  "Elastic-2.0": [
    "Elastic License 2.0",
    false,
    false
  ],
  "Entessa": [
    "Entessa Public License v1.0",
    true,
    false
  ],
  "ErlPL-1.1": [
    "Erlang Public License v1.1",
    false,
    false
  ],
  "Eurosym": [
    "Eurosym License",
    false,
    false
  ],
  "FBM": [
    "Fuzzy Bitmap License",
    false,
    false
  ],
  "FDK-AAC": [
    "Fraunhofer FDK AAC Codec Library",
    false,
    false
  ],
  "FSFAP": [
    "FSF All Permissive License",
    false,
    false
  ],
  "FSFAP-no-warranty-disclaimer": [
    "FSF All Permissive License (without Warranty)",
    false,
    false
  ],
  "FSFUL": [
    "FSF Unlimited License",
    false,
    false
  ],
  "FSFULLR": [
    "FSF Unlimited License (with License Retention)",
    false,
    false
  ],
  "FSFULLRWD": [
    "FSF Unlimited License (With License Retention and Warranty Disclaimer)",
    false,
    false
  ],
  "FTL": [
    "Freetype Project License",
    false,
    false
  ],
  "Fair": [
    "Fair License",
    true,
    false
  ],
  "Ferguson-Twofish": [
    "Ferguson Twofish License",
    false,
    false
  ],
  "Frameworx-1.0": [
    "Frameworx Open License 1.0",
    true,
    false
  ],
  "FreeBSD-DOC": [
    "FreeBSD Documentation License",
    false,
    false
  ],
  "FreeImage": [
    "FreeImage Public License v1.0",
    false,
    false
  ],
  "Furuseth": [
    "Furuseth License",
    false,
    false
  ],
  "GCR-docs": [
    "Gnome GCR Documentation License",
    false,
    false
  ],
  "GD": [
    "GD License",
    false,
    false
  ],
  "GFDL-1.1": [
    "GNU Free Documentation License v1.1",
    false,
    true
  ],
  "GFDL-1.1-invariants-only": [
    "GNU Free Documentation License v1.1 only - invariants",
    false,
    false
  ],
  "GFDL-1.1-invariants-or-later": [
    "GNU Free Documentation License v1.1 or later - invariants",
    false,
    false
  ],
  "GFDL-1.1-no-invariants-only": [
    "GNU Free Documentation License v1.1 only - no invariants",
    false,
    false
  ],
  "GFDL-1.1-no-invariants-or-later": [
    "GNU Free Documentation License v1.1 or later - no invariants",
    false,
    false
  ],
  "GFDL-1.1-only": [
    "GNU Free Documentation License v1.1 only",
    false,
    false
  ],
  "GFDL-1.1-or-later": [
    "GNU Free Documentation License v1.1 or later",
    false,
    false
  ],
  "GFDL-1.2": [
    "GNU Free Documentation License v1.2",
    false,
    true
  ],
  "GFDL-1.2-invariants-only": [
    "GNU Free Documentation License v1.2 only - invariants",
    false,
    false
  ],
  "GFDL-1.2-invariants-or-later": [
    "GNU Free Documentation License v1.2 or later - invariants",
    false,
    false
  ],
  "GFDL-1.2-no-invariants-only": [
    "GNU Free Documentation License v1.2 only - no invariants",
    false,
    false
  ],
  "GFDL-1.2-no-invariants-or-later": [
    "GNU Free Documentation License v1.2 or later - no invariants",
    false,
    false
  ],
  "GFDL-1.2-only": [
    "GNU Free Documentation License v1.2 only",
    false,
    false
  ],
  "GFDL-1.2-or-later": [
    "GNU Free Documentation License v1.2 or later",
    false,
    false
  ],
  "GFDL-1.3": [
    "GNU Free Documentation License v1.3",
    false,
    true
  ],
  "GFDL-1.3-invariants-only": [
    "GNU Free Documentation License v1.3 only - invariants",
    false,
    false
  ],
  "GFDL-1.3-invariants-or-later": [
    "GNU Free Documentation License v1.3 or later - invariants",
    false,
    false
  ],
  "GFDL-1.3-no-invariants-only": [
    "GNU Free Documentation License v1.3 only - no invariants",
    false,
    false
  ],
  "GFDL-1.3-no-invariants-or-later": [
    "GNU Free Documentation License v1.3 or later - no invariants",
    false,
    false
  ],
  "GFDL-1.3-only": [
    "GNU Free Documentation License v1.3 only",
    false,
    false
  ],
  "GFDL-1.3-or-later": [
    "GNU Free Documentation License v1.3 or later",
    false,
    false
  ],
  "GL2PS": [
    "GL2PS License",
    false,
    false
  ],
  "GLWTPL": [
    "Good Luck With That Public License",
    false,
    false
  ],
  "GPL-1.0": [
    "GNU General Public License v1.0 only",
    false,
    true
  ],
  "GPL-1.0+": [
    "GNU General Public License v1.0 or later",
    false,
    true
  ],
  "GPL-1.0-only": [
    "GNU General Public License v1.0 only",
    false,
    false
  ],
  "GPL-1.0-or-later": [
    "GNU General Public License v1.0 or later",
    false,
    false
  ],
  "GPL-2.0": [
    "GNU General Public License v2.0 only",
    true,
    true
  ],
  "GPL-2.0+": [
    "GNU General Public License v2.0 or later",
    true,
    true
  ],
  "GPL-2.0-only": [
    "GNU General Public License v2.0 only",
    true,
    false
  ],
  "GPL-2.0-or-later": [
    "GNU General Public License v2.0 or later",
    true,
    false
  ],
  "GPL-2.0-with-GCC-exception": [
    "GNU General Public License v2.0 w/GCC Runtime Library exception",
    false,
    true
  ],
  "GPL-2.0-with-autoconf-exception": [
    "GNU General Public License v2.0 w/Autoconf exception",
    false,
    true
  ],
  "GPL-2.0-with-bison-exception": [
    "GNU General Public License v2.0 w/Bison exception",
    false,
    true
  ],
  "GPL-2.0-with-classpath-exception": [
    "GNU General Public License v2.0 w/Classpath exception",
    false,
    true
  ],
  "GPL-2.0-with-font-exception": [
    "GNU General Public License v2.0 w/Font exception",
    false,
    true
  ],
  "GPL-3.0": [
    "GNU General Public License v3.0 only",
    true,
    true
  ],
  "GPL-3.0+": [
    "GNU General Public License v3.0 or later",
    true,
    true
  ],
  "GPL-3.0-only": [
    "GNU General Public License v3.0 only",
    true,
    false
  ],
  "GPL-3.0-or-later": [
    "GNU General Public License v3.0 or later",
    true,
    false
  ],
  "GPL-3.0-with-GCC-exception": [
    "GNU General Public License v3.0 w/GCC Runtime Library exception",
    true,
    true
  ],
  "GPL-3.0-with-autoconf-exception": [
    "GNU General Public License v3.0 w/Autoconf exception",
    false,
    true
  ],
  "Giftware": [
    "Giftware License",
    false,
    false
  ],
  "Glide": [
    "3dfx Glide License",
    false,
    false
  ],
  "Glulxe": [
    "Glulxe License",
    false,
    false
  ],
  "Graphics-Gems": [
    "Graphics Gems License",
    false,
    false
  ],
  "Gutmann": [
    "Gutmann License",
    false,
    false
  ],
  "HIDAPI": [
    "HIDAPI License",
    false,
    false
  ],
  "HP-1986": [
    "Hewlett-Packard 1986 License",
    false,
    false
  ],
  "HP-1989": [
    "Hewlett-Packard 1989 License",
    false,
    false
  ],
  "HPND": [
    "Historical Permission Notice and Disclaimer",
    true,
    false
  ],
  "HPND-DEC": [
    "Historical Permission Notice and Disclaimer - DEC variant",
    false,
    false
  ],
  "HPND-Fenneberg-Livingston": [
    "Historical Permission Notice and Disclaimer - Fenneberg-Livingston variant",
    false,
    false
  ],
  "HPND-INRIA-IMAG": [
    "Historical Permission Notice and Disclaimer    - INRIA-IMAG variant",
    false,
    false
  ],
  "HPND-Intel": [
    "Historical Permission Notice and Disclaimer - Intel variant",
    false,
    false
  ],
  "HPND-Kevlin-Henney": [
    "Historical Permission Notice and Disclaimer - Kevlin Henney variant",
    false,
    false
  ],
  "HPND-MIT-disclaimer": [
    "Historical Permission Notice and Disclaimer with MIT disclaimer",
    false,
    false
  ],
  "HPND-Markus-Kuhn": [
    "Historical Permission Notice and Disclaimer - Markus Kuhn variant",
    false,
    false
  ],
  "HPND-Netrek": [
    "Historical Permission Notice and Disclaimer - Netrek variant",
    false,
    false
  ],
  "HPND-Pbmplus": [
    "Historical Permission Notice and Disclaimer - Pbmplus variant",
    false,
    false
  ],
  "HPND-UC": [
    "Historical Permission Notice and Disclaimer - University of California variant",
    false,
    false
  ],
  "HPND-UC-export-US": [
    "Historical Permission Notice and Disclaimer - University of California, US export warning",
    false,
    false
  ],
  "HPND-doc": [
    "Historical Permission Notice and Disclaimer - documentation variant",
    false,
    false
  ],
  "HPND-doc-sell": [
    "Historical Permission Notice and Disclaimer - documentation sell variant",
    false,
    false
  ],
  "HPND-export-US": [
    "HPND with US Government export control warning",
    false,
    false
  ],
  "HPND-export-US-acknowledgement": [
    "HPND with US Government export control warning and acknowledgment",
    false,
    false
  ],
  "HPND-export-US-modify": [
    "HPND with US Government export control warning and modification rqmt",
    false,
    false
  ],
  "HPND-export2-US": [
    "HPND with US Government export control and 2 disclaimers",
    false,
    false
  ],
  "HPND-merchantability-variant": [
    "Historical Permission Notice and Disclaimer - merchantability variant",
    false,
    false
  ],
  "HPND-sell-MIT-disclaimer-xserver": [
    "Historical Permission Notice and Disclaimer - sell xserver variant with MIT disclaimer",
    false,
    false
  ],
  "HPND-sell-regexpr": [
    "Historical Permission Notice and Disclaimer - sell regexpr variant",
    false,
    false
  ],
  "HPND-sell-variant": [
    "Historical Permission Notice and Disclaimer - sell variant",
    false,
    false
  ],
  "HPND-sell-variant-MIT-disclaimer": [
    "HPND sell variant with MIT disclaimer",
    false,
    false
  ],
  "HPND-sell-variant-MIT-disclaimer-rev": [
    "HPND sell variant with MIT disclaimer - reverse",
    false,
    false
  ],
  "HTMLTIDY": [
    "HTML Tidy License",
    false,
    false
  ],
  "HaskellReport": [
    "Haskell Language Report License",
    false,
    false
  ],
  "Hippocratic-2.1": [
    "Hippocratic License 2.1",
    false,
    false
  ],
  "IBM-pibs": [
    "IBM PowerPC Initialization and Boot Software",
    false,
    false
  ],
  "ICU": [
    "ICU License",
    true,
    false
  ],
  "IEC-Code-Components-EULA": [
    "IEC    Code Components End-user licence agreement",
    false,
    false
  ],
  "IJG": [
    "Independent JPEG Group License",
    false,
    false
  ],
  "IJG-short": [
    "Independent JPEG Group License - short",
    false,
    false
  ],
  "IPA": [
    "IPA Font License",
    true,
    false
  ],
  "IPL-1.0": [
    "IBM Public License v1.0",
    true,
    false
  ],
  "ISC": [
    "ISC License",
    true,
    false
  ],
  "ISC-Veillard": [
    "ISC Veillard variant",
    false,
    false
  ],
  "ImageMagick": [
    "ImageMagick License",
    false,
    false
  ],
  "Imlib2": [
    "Imlib2 License",
    false,
    false
  ],
  "Info-ZIP": [
    "Info-ZIP License",
    false,
    false
  ],
  "Inner-Net-2.0": [
    "Inner Net License v2.0",
    false,
    false
  ],
  "InnoSetup": [
    "Inno Setup License",
    false,
    false
  ],
  "Intel": [
    "Intel Open Source License",
    true,
    false
  ],
  "Intel-ACPI": [
    "Intel ACPI Software License Agreement",
    false,
    false
  ],
  "Interbase-1.0": [
    "Interbase Public License v1.0",
    false,
    false
  ],
  "JPL-image": [
    "JPL Image Use Policy",
    false,
    false
  ],
  "JPNIC": [
    "Japan Network Information Center License",
    false,
    false
  ],
  "JSON": [
    "JSON License",
    false,
    false
  ],
  "Jam": [
    "Jam License",
    true,
    false
  ],
  "JasPer-2.0": [
    "JasPer License",
    false,
    false
  ],
  "Kastrup": [
    "Kastrup License",
    false,
    false
  ],
  "Kazlib": [
    "Kazlib License",
    false,
    false
  ],
  "Knuth-CTAN": [
    "Knuth CTAN License",
    false,
    false
  ],
  "LAL-1.2": [
    "Licence Art Libre 1.2",
    false,
    false
  ],
  "LAL-1.3": [
    "Licence Art Libre 1.3",
    false,
    false
  ],
  "LGPL-2.0": [
    "GNU Library General Public License v2 only",
    true,
    true
  ],
  "LGPL-2.0+": [
    "GNU Library General Public License v2 or later",
    true,
    true
  ],
  "LGPL-2.0-only": [
    "GNU Library General Public License v2 only",
    true,
    false
  ],
  "LGPL-2.0-or-later": [
    "GNU Library General Public License v2 or later",
    true,
    false
  ],
  "LGPL-2.1": [
    "GNU Lesser General Public License v2.1 only",
    true,
    true
  ],
  "LGPL-2.1+": [
    "GNU Lesser General Public License v2.1 or later",
    true,
    true
  ],
  "LGPL-2.1-only": [
    "GNU Lesser General Public License v2.1 only",
    true,
    false
  ],
  "LGPL-2.1-or-later": [
    "GNU Lesser General Public License v2.1 or later",
    true,
    false
  ],
  "LGPL-3.0": [
    "GNU Lesser General Public License v3.0 only",
    true,
    true
  ],
  "LGPL-3.0+": [
    "GNU Lesser General Public License v3.0 or later",
    true,
    true
  ],
  "LGPL-3.0-only": [
    "GNU Lesser General Public License v3.0 only",
    true,
    false
  ],
  "LGPL-3.0-or-later": [
    "GNU Lesser General Public License v3.0 or later",
    true,
    false
  ],
  "LGPLLR": [
    "Lesser General Public License For Linguistic Resources",
    false,
    false
  ],
  "LOOP": [
    "Common Lisp LOOP License",
    false,
    false
  ],
  "LPD-document": [
    "LPD Documentation License",
    false,
    false
  ],
  "LPL-1.0": [
    "Lucent Public License Version 1.0",
    true,
    false
  ],
  "LPL-1.02": [
    "Lucent Public License v1.02",
    true,
    false
  ],
  "LPPL-1.0": [
    "LaTeX Project Public License v1.0",
    false,
    false
  ],
  "LPPL-1.1": [
    "LaTeX Project Public License v1.1",
    false,
    false
  ],
  "LPPL-1.2": [
    "LaTeX Project Public License v1.2",
    false,
    false
  ],
  "LPPL-1.3a": [
    "LaTeX Project Public License v1.3a",
    false,
    false
  ],
  "LPPL-1.3c": [
    "LaTeX Project Public License v1.3c",
    true,
    false
  ],
  "LZMA-SDK-9.11-to-9.20": [
    "LZMA SDK License (versions 9.11 to 9.20)",
    false,
    false
  ],
  "LZMA-SDK-9.22": [
    "LZMA SDK License (versions 9.22 and beyond)",
    false,
    false
  ],
  "Latex2e": [
    "Latex2e License",
    false,
    false
  ],
  "Latex2e-translated-notice": [
    "Latex2e with translated notice permission",
    false,
    false
  ],
  "Leptonica": [
    "Leptonica License",
    false,
    false
  ],
  "LiLiQ-P-1.1": [
    "Licence Libre du Québec – Permissive version 1.1",
    true,
    false
  ],
  "LiLiQ-R-1.1": [
    "Licence Libre du Québec – Réciprocité version 1.1",
    true,
    false
  ],
  "LiLiQ-Rplus-1.1": [
    "Licence Libre du Québec – Réciprocité forte version 1.1",
    true,
    false
  ],
  "Libpng": [
    "libpng License",
    false,
    false
  ],
  "Linux-OpenIB": [
    "Linux Kernel Variant of OpenIB.org license",
    false,
    false
  ],
  "Linux-man-pages-1-para": [
    "Linux man-pages - 1 paragraph",
    false,
    false
  ],
  "Linux-man-pages-copyleft": [
    "Linux man-pages Copyleft",
    false,
    false
  ],
  "Linux-man-pages-copyleft-2-para": [
    "Linux man-pages Copyleft - 2 paragraphs",
    false,
    false
  ],
  "Linux-man-pages-copyleft-var": [
    "Linux man-pages Copyleft Variant",
    false,
    false
  ],
  "Lucida-Bitmap-Fonts": [
    "Lucida Bitmap Fonts License",
    false,
    false
  ],
  "MIPS": [
    "MIPS License",
    false,
    false
  ],
  "MIT": [
    "MIT License",
    true,
    false
  ],
  "MIT-0": [
    "MIT No Attribution",
    true,
    false
  ],
  "MIT-CMU": [
    "CMU License",
    false,
    false
  ],
  "MIT-Click": [
    "MIT Click License",
    false,
    false
  ],
  "MIT-Festival": [
    "MIT Festival Variant",
    false,
    false
  ],
  "MIT-Khronos-old": [
    "MIT Khronos - old variant",
    false,
    false
  ],
  "MIT-Modern-Variant": [
    "MIT License Modern Variant",
    true,
    false
  ],
  "MIT-Wu": [
    "MIT Tom Wu Variant",
    false,
    false
  ],
  "MIT-advertising": [
    "Enlightenment License (e16)",
    false,
    false
  ],
  "MIT-enna": [
    "enna License",
    false,
    false
  ],
  "MIT-feh": [
    "feh License",
    false,
    false
  ],
  "MIT-open-group": [
    "MIT Open Group variant",
    false,
    false
  ],
  "MIT-testregex": [
    "MIT testregex Variant",
    false,
    false
  ],
  "MITNFA": [
    "MIT +no-false-attribs license",
    false,
    false
  ],
  "MMIXware": [
    "MMIXware License",
    false,
    false
  ],
  "MPEG-SSG": [
    "MPEG Software Simulation",
    false,
    false
  ],
  "MPL-1.0": [
    "Mozilla Public License 1.0",
    true,
    false
  ],
  "MPL-1.1": [
    "Mozilla Public License 1.1",
    true,
    false
  ],
  "MPL-2.0": [
    "Mozilla Public License 2.0",
    true,
    false
  ],
  "MPL-2.0-no-copyleft-exception": [
    "Mozilla Public License 2.0 (no copyleft exception)",
    true,
    false
  ],
  "MS-LPL": [
    "Microsoft Limited Public License",
    false,
    false
  ],
  "MS-PL": [
    "Microsoft Public License",
    true,
    false
  ],
  "MS-RL": [
    "Microsoft Reciprocal License",
    true,
    false
  ],
  "MTLL": [
    "Matrix Template Library License",
    false,
    false
  ],
  "Mackerras-3-Clause": [
    "Mackerras 3-Clause License",
    false,
    false
  ],
  "Mackerras-3-Clause-acknowledgment": [
    "Mackerras 3-Clause - acknowledgment variant",
    false,
    false
  ],
  "MakeIndex": [
    "MakeIndex License",
    false,
    false
  ],
  "Martin-Birgmeier": [
    "Martin Birgmeier License",
    false,
    false
  ],
  "McPhee-slideshow": [
    "McPhee Slideshow License",
    false,
    false
  ],
  "Minpack": [
    "Minpack License",
    false,
    false
  ],
  "MirOS": [
    "The MirOS Licence",
    true,
    false
  ],
  "Motosoto": [
    "Motosoto License",
    true,
    false
  ],
  "MulanPSL-1.0": [
    "Mulan Permissive Software License, Version 1",
    false,
    false
  ],
  "MulanPSL-2.0": [
    "Mulan Permissive Software License, Version 2",
    true,
    false
  ],
  "Multics": [
    "Multics License",
    true,
    false
  ],
  "Mup": [
    "Mup License",
    false,
    false
  ],
  "NAIST-2003": [
    "Nara Institute of Science and Technology License (2003)",
    false,
    false
  ],
  "NASA-1.3": [
    "NASA Open Source Agreement 1.3",
    true,
    false
  ],
  "NBPL-1.0": [
    "Net Boolean Public License v1",
    false,
    false
  ],
  "NCBI-PD": [
    "NCBI Public Domain Notice",
    false,
    false
  ],
  "NCGL-UK-2.0": [
    "Non-Commercial Government Licence",
    false,
    false
  ],
  "NCL": [
    "NCL Source Code License",
    false,
    false
  ],
  "NCSA": [
    "University of Illinois/NCSA Open Source License",
    true,
    false
  ],
  "NGPL": [
    "Nethack General Public License",
    true,
    false
  ],
  "NICTA-1.0": [
    "NICTA Public Software License, Version 1.0",
    false,
    false
  ],
  "NIST-PD": [
    "NIST Public Domain Notice",
    false,
    false
  ],
  "NIST-PD-fallback": [
    "NIST Public Domain Notice with license fallback",
    false,
    false
  ],
  "NIST-Software": [
    "NIST Software License",
    false,
    false
  ],
  "NLOD-1.0": [
    "Norwegian Licence for Open Government Data (NLOD) 1.0",
    false,
    false
  ],
  "NLOD-2.0": [
    "Norwegian Licence for Open Government Data (NLOD) 2.0",
    false,
    false
  ],
  "NLPL": [
    "No Limit Public License",
    false,
    false
  ],
  "NOSL": [
    "Netizen Open Source License",
    false,
    false
  ],
  "NPL-1.0": [
    "Netscape Public License v1.0",
    false,
    false
  ],
  "NPL-1.1": [
    "Netscape Public License v1.1",
    false,
    false
  ],
  "NPOSL-3.0": [
    "Non-Profit Open Software License 3.0",
    true,
    false
  ],
  "NRL": [
    "NRL License",
    false,
    false
  ],
  "NTP": [
    "NTP License",
    true,
    false
  ],
  "NTP-0": [
    "NTP No Attribution",
    false,
    false
  ],
  "Naumen": [
    "Naumen Public License",
    true,
    false
  ],
  "Net-SNMP": [
    "Net-SNMP License",
    false,
    true
  ],
  "NetCDF": [
    "NetCDF license",
    false,
    false
  ],
  "Newsletr": [
    "Newsletr License",
    false,
    false
  ],
  "Nokia": [
    "Nokia Open Source License",
    true,
    false
  ],
  "Noweb": [
    "Noweb License",
    false,
    false
  ],
  "Nunit": [
    "Nunit License",
    false,
    true
  ],
  "O-UDA-1.0": [
    "Open Use of Data Agreement v1.0",
    false,
    false
  ],
  "OAR": [
    "OAR License",
    false,
    false
  ],
  "OCCT-PL": [
    "Open CASCADE Technology Public License",
    false,
    false
  ],
  "OCLC-2.0": [
    "OCLC Research Public License 2.0",
    true,
    false
  ],
  "ODC-By-1.0": [
    "Open Data Commons Attribution License v1.0",
    false,
    false
  ],
  "ODbL-1.0": [
    "Open Data Commons Open Database License v1.0",
    false,
    false
  ],
  "OFFIS": [
    "OFFIS License",
    false,
    false
  ],
  "OFL-1.0": [
    "SIL Open Font License 1.0",
    false,
    false
  ],
  "OFL-1.0-RFN": [
    "SIL Open Font License 1.0 with Reserved Font Name",
    false,
    false
  ],
  "OFL-1.0-no-RFN": [
    "SIL Open Font License 1.0 with no Reserved Font Name",
    false,
    false
  ],
  "OFL-1.1": [
    "SIL Open Font License 1.1",
    true,
    false
  ],
  "OFL-1.1-RFN": [
    "SIL Open Font License 1.1 with Reserved Font Name",
    true,
    false
  ],
  "OFL-1.1-no-RFN": [
    "SIL Open Font License 1.1 with no Reserved Font Name",
    true,
    false
  ],
  "OGC-1.0": [
    "OGC Software License, Version 1.0",
    false,
    false
  ],
  "OGDL-Taiwan-1.0": [
    "Taiwan Open Government Data License, version 1.0",
    false,
    false
  ],
  "OGL-Canada-2.0": [
    "Open Government Licence - Canada",
    false,
    false
  ],
  "OGL-UK-1.0": [
    "Open Government Licence v1.0",
    false,
    false
  ],
  "OGL-UK-2.0": [
    "Open Government Licence v2.0",
    false,
    false
  ],
  "OGL-UK-3.0": [
    "Open Government Licence v3.0",
    false,
    false
  ],
  "OGTSL": [
    "Open Group Test Suite License",
    true,
    false
  ],
  "OLDAP-1.1": [
    "Open LDAP Public License v1.1",
    false,
    false
  ],
  "OLDAP-1.2": [
    "Open LDAP Public License v1.2",
    false,
    false
  ],
  "OLDAP-1.3": [
    "Open LDAP Public License v1.3",
    false,
    false
  ],
  "OLDAP-1.4": [
    "Open LDAP Public License v1.4",
    false,
    false
  ],
  "OLDAP-2.0": [
    "Open LDAP Public License v2.0 (or possibly 2.0A and 2.0B)",
    false,
    false
  ],
  "OLDAP-2.0.1": [
    "Open LDAP Public License v2.0.1",
    false,
    false
  ],
  "OLDAP-2.1": [
    "Open LDAP Public License v2.1",
    false,
    false
  ],
  "OLDAP-2.2": [
    "Open LDAP Public License v2.2",
    false,
    false
  ],
  "OLDAP-2.2.1": [
    "Open LDAP Public License v2.2.1",
    false,
    false
  ],
  "OLDAP-2.2.2": [
    "Open LDAP Public License 2.2.2",
    false,
    false
  ],
  "OLDAP-2.3": [
    "Open LDAP Public License v2.3",
    false,
    false
  ],
  "OLDAP-2.4": [
    "Open LDAP Public License v2.4",
    false,
    false
  ],
  "OLDAP-2.5": [
    "Open LDAP Public License v2.5",
    false,
    false
  ],
  "OLDAP-2.6": [
    "Open LDAP Public License v2.6",
    false,
    false
  ],
  "OLDAP-2.7": [
    "Open LDAP Public License v2.7",
    false,
    false
  ],
  "OLDAP-2.8": [
    "Open LDAP Public License v2.8",
    true,
    false
  ],
  "OLFL-1.3": [
    "Open Logistics Foundation License Version 1.3",
    true,
    false
  ],
  "OML": [
    "Open Market License",
    false,
    false
  ],
  "OPL-1.0": [
    "Open Public License v1.0",
    false,
    false
  ],
  "OPL-UK-3.0": [
    "United    Kingdom Open Parliament Licence v3.0",
    false,
    false
  ],
  "OPUBL-1.0": [
    "Open Publication License v1.0",
    false,
    false
  ],
  "OSET-PL-2.1": [
    "OSET Public License version 2.1",
    true,
    false
  ],
  "OSL-1.0": [
    "Open Software License 1.0",
    true,
    false
  ],
  "OSL-1.1": [
    "Open Software License 1.1",
    false,
    false
  ],
  "OSL-2.0": [
    "Open Software License 2.0",
    true,
    false
  ],
  "OSL-2.1": [
    "Open Software License 2.1",
    true,
    false
  ],
  "OSL-3.0": [
    "Open Software License 3.0",
    true,
    false
  ],
  "OpenPBS-2.3": [
    "OpenPBS v2.3 Software License",
    false,
    false
  ],
  "OpenSSL": [
    "OpenSSL License",
    false,
    false
  ],
  "OpenSSL-standalone": [
    "OpenSSL License - standalone",
    false,
    false
  ],
  "OpenVision": [
    "OpenVision License",
    false,
    false
  ],
  "PADL": [
    "PADL License",
    false,
    false
  ],
  "PDDL-1.0": [
    "Open Data Commons Public Domain Dedication & License 1.0",
    false,
    false
  ],
  "PHP-3.0": [
    "PHP License v3.0",
    true,
    false
  ],
  "PHP-3.01": [
    "PHP License v3.01",
    true,
    false
  ],
  "PPL": [
    "Peer Production License",
    false,
    false
  ],
  "PSF-2.0": [
    "Python Software Foundation License 2.0",
    false,
    false
  ],
  "Parity-6.0.0": [
    "The Parity Public License 6.0.0",
    false,
    false
  ],
  "Parity-7.0.0": [
    "The Parity Public License 7.0.0",
    false,
    false
  ],
  "Pixar": [
    "Pixar License",
    false,
    false
  ],
  "Plexus": [
    "Plexus Classworlds License",
    false,
    false
  ],
  "PolyForm-Noncommercial-1.0.0": [
    "PolyForm Noncommercial License 1.0.0",
    false,
    false
  ],
  "PolyForm-Small-Business-1.0.0": [
    "PolyForm Small Business License 1.0.0",
    false,
    false
  ],
  "PostgreSQL": [
    "PostgreSQL License",
    true,
    false
  ],
  "Python-2.0": [
    "Python License 2.0",
    true,
    false
  ],
  "Python-2.0.1": [
    "Python License 2.0.1",
    false,
    false
  ],
  "QPL-1.0": [
    "Q Public License 1.0",
    true,
    false
  ],
  "QPL-1.0-INRIA-2004": [
    "Q Public License 1.0 - INRIA 2004 variant",
    false,
    false
  ],
  "Qhull": [
    "Qhull License",
    false,
    false
  ],
  "RHeCos-1.1": [
    "Red Hat eCos Public License v1.1",
    false,
    false
  ],
  "RPL-1.1": [
    "Reciprocal Public License 1.1",
    true,
    false
  ],
  "RPL-1.5": [
    "Reciprocal Public License 1.5",
    true,
    false
  ],
  "RPSL-1.0": [
    "RealNetworks Public Source License v1.0",
    true,
    false
  ],
  "RSA-MD": [
    "RSA Message-Digest License",
    false,
    false
  ],
  "RSCPL": [
    "Ricoh Source Code Public License",
    true,
    false
  ],
  "Rdisc": [
    "Rdisc License",
    false,
    false
  ],
  "Ruby": [
    "Ruby License",
    false,
    false
  ],
  "Ruby-pty": [
    "Ruby pty extension license",
    false,
    false
  ],
  "SAX-PD": [
    "Sax Public Domain Notice",
    false,
    false
  ],
  "SAX-PD-2.0": [
    "Sax Public Domain Notice 2.0",
    false,
    false
  ],
  "SCEA": [
    "SCEA Shared Source License",
    false,
    false
  ],
  "SGI-B-1.0": [
    "SGI Free Software License B v1.0",
    false,
    false
  ],
  "SGI-B-1.1": [
    "SGI Free Software License B v1.1",
    false,
    false
  ],
  "SGI-B-2.0": [
    "SGI Free Software License B v2.0",
    false,
    false
  ],
  "SGI-OpenGL": [
    "SGI OpenGL License",
    false,
    false
  ],
  "SGP4": [
    "SGP4 Permission Notice",
    false,
    false
  ],
  "SHL-0.5": [
    "Solderpad Hardware License v0.5",
    false,
    false
  ],
  "SHL-0.51": [
    "Solderpad Hardware License, Version 0.51",
    false,
    false
  ],
  "SISSL": [
    "Sun Industry Standards Source License v1.1",
    true,
    false
  ],
  "SISSL-1.2": [
    "Sun Industry Standards Source License v1.2",
    false,
    false
  ],
  "SL": [
    "SL License",
    false,
    false
  ],
  "SMAIL-GPL": [
    "SMAIL General Public License",
    false,
    false
  ],
  "SMLNJ": [
    "Standard ML of New Jersey License",
    false,
    false
  ],
  "SMPPL": [
    "Secure Messaging Protocol Public License",
    false,
    false
  ],
  "SNIA": [
    "SNIA Public License 1.1",
    false,
    false
  ],
  "SPL-1.0": [
    "Sun Public License v1.0",
    true,
    false
  ],
  "SSH-OpenSSH": [
    "SSH OpenSSH license",
    false,
    false
  ],
  "SSH-short": [
    "SSH short notice",
    false,
    false
  ],
  "SSLeay-standalone": [
    "SSLeay License - standalone",
    false,
    false
  ],
  "SSPL-1.0": [
    "Server Side Public License, v 1",
    false,
    false
  ],
  "SWL": [
    "Scheme Widget Library (SWL) Software License Agreement",
    false,
    false
  ],
  "Saxpath": [
    "Saxpath License",
    false,
    false
  ],
  "SchemeReport": [
    "Scheme Language Report License",
    false,
    false
  ],
  "Sendmail": [
    "Sendmail License",
    false,
    false
  ],
  "Sendmail-8.23": [
    "Sendmail License 8.23",
    false,
    false
  ],
  "Sendmail-Open-Source-1.1": [
    "Sendmail Open Source License v1.1",
    false,
    false
  ],
  "SimPL-2.0": [
    "Simple Public License 2.0",
    true,
    false
  ],
  "Sleepycat": [
    "Sleepycat License",
    true,
    false
  ],
  "Soundex": [
    "Soundex License",
    false,
    false
  ],
  "Spencer-86": [
    "Spencer License 86",
    false,
    false
  ],
  "Spencer-94": [
    "Spencer License 94",
    false,
    false
  ],
  "Spencer-99": [
    "Spencer License 99",
    false,
    false
  ],
  "StandardML-NJ": [
    "Standard ML of New Jersey License",
    false,
    true
  ],
  "SugarCRM-1.1.3": [
    "SugarCRM Public License v1.1.3",
    false,
    false
  ],
  "Sun-PPP": [
    "Sun PPP License",
    false,
    false
  ],
  "Sun-PPP-2000": [
    "Sun PPP License (2000)",
    false,
    false
  ],
  "SunPro": [
    "SunPro License",
    false,
    false
  ],
  "Symlinks": [
    "Symlinks License",
    false,
    false
  ],
  "TAPR-OHL-1.0": [
    "TAPR Open Hardware License v1.0",
    false,
    false
  ],
  "TCL": [
    "TCL/TK License",
    false,
    false
  ],
  "TCP-wrappers": [
    "TCP Wrappers License",
    false,
    false
  ],
  "TGPPL-1.0": [
    "Transitive Grace Period Public Licence 1.0",
    false,
    false
  ],
  "TMate": [
    "TMate Open Source License",
    false,
    false
  ],
  "TORQUE-1.1": [
    "TORQUE v2.5+ Software License v1.1",
    false,
    false
  ],
  "TOSL": [
    "Trusster Open Source License",
    false,
    false
  ],
  "TPDL": [
    "Time::ParseDate License",
    false,
    false
  ],
  "TPL-1.0": [
    "THOR Public License 1.0",
    false,
    false
  ],
  "TTWL": [
    "Text-Tabs+Wrap License",
    false,
    false
  ],
  "TTYP0": [
    "TTYP0 License",
    false,
    false
  ],
  "TU-Berlin-1.0": [
    "Technische Universitaet Berlin License 1.0",
    false,
    false
  ],
  "TU-Berlin-2.0": [
    "Technische Universitaet Berlin License 2.0",
    false,
    false
  ],
  "TermReadKey": [
    "TermReadKey License",
    false,
    false
  ],
  "ThirdEye": [
    "ThirdEye License",
    false,
    false
  ],
  "TrustedQSL": [
    "TrustedQSL License",
    false,
    false
  ],
  "UCAR": [
    "UCAR License",
    false,
    false
  ],
  "UCL-1.0": [
    "Upstream Compatibility License v1.0",
    true,
    false
  ],
  "UMich-Merit": [
    "Michigan/Merit Networks License",
    false,
    false
  ],
  "UPL-1.0": [
    "Universal Permissive License v1.0",
    true,
    false
  ],
  "URT-RLE": [
    "Utah Raster Toolkit Run Length Encoded License",
    false,
    false
  ],
  "Ubuntu-font-1.0": [
    "Ubuntu Font Licence v1.0",
    false,
    false
  ],
  "Unicode-3.0": [
    "Unicode License v3",
    true,
    false
  ],
  "Unicode-DFS-2015": [
    "Unicode License Agreement - Data Files and Software (2015)",
    false,
    false
  ],
  "Unicode-DFS-2016": [
    "Unicode License Agreement - Data Files and Software (2016)",
    true,
    false
  ],
  "Unicode-TOU": [
    "Unicode Terms of Use",
    false,
    false
  ],
  "UnixCrypt": [
    "UnixCrypt License",
    false,
    false
  ],
  "Unlicense": [
    "The Unlicense",
    true,
    false
  ],
  "VOSTROM": [
    "VOSTROM Public License for Open Source",
    false,
    false
  ],
  "VSL-1.0": [
    "Vovida Software License v1.0",
    true,
    false
  ],
  "Vim": [
    "Vim License",
    false,
    false
  ],
  "W3C": [
    "W3C Software Notice and License (2002-12-31)",
    true,
    false
  ],
  "W3C-19980720": [
    "W3C Software Notice and License (1998-07-20)",
    false,
    false
  ],
  "W3C-20150513": [
    "W3C Software Notice and Document License (2015-05-13)",
    true,
    false
  ],
  "WTFPL": [
    "Do What The F*ck You Want To Public License",
    false,
    false
  ],
  "Watcom-1.0": [
    "Sybase Open Watcom Public License 1.0",
    true,
    false
  ],
  "Widget-Workshop": [
    "Widget Workshop License",
    false,
    false
  ],
  "Wsuipa": [
    "Wsuipa License",
    false,
    false
  ],
  "X11": [
    "X11 License",
    false,
    false
  ],
  "X11-distribute-modifications-variant": [
    "X11 License Distribution Modification Variant",
    false,
    false
  ],
  "X11-swapped": [
    "X11 swapped final paragraphs",
    false,
    false
  ],
  "XFree86-1.1": [
    "XFree86 License 1.1",
    false,
    false
  ],
  "XSkat": [
    "XSkat License",
    false,
    false
  ],
  "Xdebug-1.03": [
    "Xdebug License v 1.03",
    false,
    false
  ],
  "Xerox": [
    "Xerox License",
    false,
    false
  ],
  "Xfig": [
    "Xfig License",
    false,
    false
  ],
  "Xnet": [
    "X.Net License",
    true,
    false
  ],
  "YPL-1.0": [
    "Yahoo! Public License v1.0",
    false,
    false
  ],
  "YPL-1.1": [
    "Yahoo! Public License v1.1",
    false,
    false
  ],
  "ZPL-1.1": [
    "Zope Public License 1.1",
    false,
    false
  ],
  "ZPL-2.0": [
    "Zope Public License 2.0",
    true,
    false
  ],
  "ZPL-2.1": [
    "Zope Public License 2.1",
    true,
    false
  ],
  "Zed": [
    "Zed License",
    false,
    false
  ],
  "Zeeff": [
    "Zeeff License",
    false,
    false
  ],
  "Zend-2.0": [
    "Zend License v2.0",
    false,
    false
  ],
  "Zimbra-1.3": [
    "Zimbra Public License v1.3",
    false,
    false
  ],
  "Zimbra-1.4": [
    "Zimbra Public License v1.4",
    false,
    false
  ],
  "Zlib": [
    "zlib License",
    true,
    false
  ],
  "any-OSI": [
    "Any OSI License",
    false,
    false
  ],
  "any-OSI-perl-modules": [
    "Any OSI License - Perl Modules",
    false,
    false
  ],
  "bcrypt-Solar-Designer": [
    "bcrypt Solar Designer License",
    false,
    false
  ],
  "blessing": [
    "SQLite Blessing",
    false,
    false
  ],
  "bzip2-1.0.5": [
    "bzip2 and libbzip2 License v1.0.5",
    false,
    true
  ],
  "bzip2-1.0.6": [
    "bzip2 and libbzip2 License v1.0.6",
    false,
    false
  ],
  "check-cvs": [
    "check-cvs License",
    false,
    false
  ],
  "checkmk": [
    "Checkmk License",
    false,
    false
  ],
  "copyleft-next-0.3.0": [
    "copyleft-next 0.3.0",
    false,
    false
  ],
  "copyleft-next-0.3.1": [
    "copyleft-next 0.3.1",
    false,
    false
  ],
  "curl": [
    "curl License",
    false,
    false
  ],
  "cve-tou": [
    "Common Vulnerability Enumeration ToU License",
    false,
    false
  ],
  "diffmark": [
    "diffmark license",
    false,
    false
  ],
  "dtoa": [
    "David M. Gay dtoa License",
    false,
    false
  ],
  "dvipdfm": [
    "dvipdfm License",
    false,
    false
  ],
  "eCos-2.0": [
    "eCos license version 2.0",
    false,
    true
  ],
  "eGenix": [
    "eGenix.com Public License 1.1.0",
    false,
    false
  ],
  "etalab-2.0": [
    "Etalab Open License 2.0",
    false,
    false
  ],
  "fwlw": [
    "fwlw License",
    false,
    false
  ],
  "gSOAP-1.3b": [
    "gSOAP Public License v1.3b",
    false,
    false
  ],
  "generic-xts": [
    "Generic XTS License",
    false,
    false
  ],
  "gnuplot": [
    "gnuplot License",
    false,
    false
  ],
  "gtkbook": [
    "gtkbook License",
    false,
    false
  ],
  "hdparm": [
    "hdparm License",
    false,
    false
  ],
  "iMatix": [
    "iMatix Standard Function Library Agreement",
    false,
    false
  ],
  "libpng-2.0": [
    "PNG Reference Library version 2",
    false,
    false
  ],
  "libselinux-1.0": [
    "libselinux public domain notice",
    false,
    false
  ],
  "libtiff": [
    "libtiff License",
    false,
    false
  ],
  "libutil-David-Nugent": [
    "libutil David Nugent License",
    false,
    false
  ],
  "lsof": [
    "lsof License",
    false,
    false
  ],
  "magaz": [
    "magaz License",
    false,
    false
  ],
  "mailprio": [
    "mailprio License",
    false,
    false
  ],
  "metamail": [
    "metamail License",
    false,
    false
  ],
  "mpi-permissive": [
    "mpi Permissive License",
    false,
    false
  ],
  "mpich2": [
    "mpich2 License",
    false,
    false
  ],
  "mplus": [
    "mplus Font License",
    false,
    false
  ],
  "pkgconf": [
    "pkgconf License",
    false,
    false
  ],
  "pnmstitch": [
    "pnmstitch License",
    false,
    false
  ],
  "psfrag": [
    "psfrag License",
    false,
    false
  ],
  "psutils": [
    "psutils License",
    false,
    false
  ],
  "python-ldap": [
    "Python ldap License",
    false,
    false
  ],
  "radvd": [
    "radvd License",
    false,
    false
  ],
  "snprintf": [
    "snprintf License",
    false,
    false
  ],
  "softSurfer": [
    "softSurfer License",
    false,
    false
  ],
  "ssh-keyscan": [
    "ssh-keyscan License",
    false,
    false
  ],
  "swrule": [
    "swrule License",
    false,
    false
  ],
  "threeparttable": [
    "threeparttable License",
    false,
    false
  ],
  "ulem": [
    "ulem License",
    false,
    false
  ],
  "w3m": [
    "w3m License",
    false,
    false
  ],
  "wwl": [
    "WWL License",
    false,
    false
  ],
  "wxWindows": [
    "wxWindows Library License",
    true,
    true
  ],
  "xinetd": [
    "xinetd License",
    false,
    false
  ],
  "xkeyboard-config-Zinoviev": [
    "xkeyboard-config Zinoviev License",
    false,
    false
  ],
  "xlock": [
    "xlock License",
    false,
    false
  ],
  "xpp": [
    "XPP License",
    false,
    false
  ],
  "xzoom": [
    "xzoom License",
    false,
    false
  ],
  "zlib-acknowledgement": [
    "zlib/libpng License with Acknowledgement",
    false,
    false
  ]
}
poetry-core-2.1.1/src/poetry/core/spdx/helpers.py000066400000000000000000000027571475444614500220470ustar00rootroot00000000000000from __future__ import annotations

import functools
import json

from importlib.resources import files
from typing import TYPE_CHECKING

from poetry.core.spdx.license import License


if TYPE_CHECKING:
    from importlib.abc import Traversable


def _get_license_file() -> Traversable:
    return files(__package__) / "data" / "licenses.json"


def license_by_id(identifier: str) -> License:
    if not identifier:
        raise ValueError("A license identifier is required")

    licenses = _load_licenses()
    return licenses.get(
        identifier.lower(), License(identifier, identifier, False, False)
    )


@functools.lru_cache
def _load_licenses() -> dict[str, License]:
    licenses = {}
    licenses_file = _get_license_file()

    with licenses_file.open(encoding="utf-8") as f:
        data = json.load(f)

    for name, license_info in data.items():
        license = License(name, license_info[0], license_info[1], license_info[2])
        licenses[name.lower()] = license

        full_name = license_info[0].lower()
        if full_name in licenses:
            existing_license = licenses[full_name]
            if not existing_license.is_deprecated:
                continue

        licenses[full_name] = license

    # Add a Proprietary license for non-standard licenses
    licenses["proprietary"] = License("Proprietary", "Proprietary", False, False)

    return licenses


if __name__ == "__main__":
    from poetry.core.spdx.updater import Updater

    updater = Updater()
    updater.dump()
poetry-core-2.1.1/src/poetry/core/spdx/license.py000066400000000000000000000133051475444614500220160ustar00rootroot00000000000000from __future__ import annotations

from collections import namedtuple
from typing import ClassVar


class License(namedtuple("License", "id name is_osi_approved is_deprecated")):
    id: str
    name: str
    is_osi_approved: bool
    is_deprecated: bool

    CLASSIFIER_SUPPORTED: ClassVar[set[str]] = {
        # Not OSI Approved
        "Aladdin",
        "CC0-1.0",
        "CECILL-B",
        "CECILL-C",
        "NPL-1.0",
        "NPL-1.1",
        # OSI Approved
        "AFPL",
        "AFL-1.1",
        "AFL-1.2",
        "AFL-2.0",
        "AFL-2.1",
        "AFL-3.0",
        "Apache-1.1",
        "Apache-2.0",
        "APSL-1.1",
        "APSL-1.2",
        "APSL-2.0",
        "Artistic-1.0",
        "Artistic-2.0",
        "AAL",
        "AGPL-3.0",
        "AGPL-3.0-only",
        "AGPL-3.0-or-later",
        "BSL-1.0",
        "BSD-2-Clause",
        "BSD-3-Clause",
        "CDDL-1.0",
        "CECILL-2.1",
        "CPL-1.0",
        "EFL-1.0",
        "EFL-2.0",
        "EPL-1.0",
        "EPL-2.0",
        "EUPL-1.1",
        "EUPL-1.2",
        "GPL-2.0",
        "GPL-2.0+",
        "GPL-2.0-only",
        "GPL-2.0-or-later",
        "GPL-3.0",
        "GPL-3.0+",
        "GPL-3.0-only",
        "GPL-3.0-or-later",
        "LGPL-2.0",
        "LGPL-2.0+",
        "LGPL-2.0-only",
        "LGPL-2.0-or-later",
        "LGPL-3.0",
        "LGPL-3.0+",
        "LGPL-3.0-only",
        "LGPL-3.0-or-later",
        "MIT",
        "MPL-1.0",
        "MPL-1.1",
        "MPL-1.2",
        "Nokia",
        "W3C",
        "ZPL-1.0",
        "ZPL-2.0",
        "ZPL-2.1",
    }

    CLASSIFIER_NAMES: ClassVar[dict[str, str]] = {
        # Not OSI Approved
        "Aladdin": "Aladdin Free Public License (AFPL)",
        "AFPL": "Aladdin Free Public License (AFPL)",
        "CC0-1.0": "CC0 1.0 Universal (CC0 1.0) Public Domain Dedication",
        "CECILL-B": "CeCILL-B Free Software License Agreement (CECILL-B)",
        "CECILL-C": "CeCILL-C Free Software License Agreement (CECILL-C)",
        "NPL-1.0": "Netscape Public License (NPL)",
        "NPL-1.1": "Netscape Public License (NPL)",
        # OSI Approved
        "AFL-1.1": "Academic Free License (AFL)",
        "AFL-1.2": "Academic Free License (AFL)",
        "AFL-2.0": "Academic Free License (AFL)",
        "AFL-2.1": "Academic Free License (AFL)",
        "AFL-3.0": "Academic Free License (AFL)",
        "Apache-1.1": "Apache Software License",
        "Apache-2.0": "Apache Software License",
        "APSL-1.1": "Apple Public Source License",
        "APSL-1.2": "Apple Public Source License",
        "APSL-2.0": "Apple Public Source License",
        "Artistic-1.0": "Artistic License",
        "Artistic-2.0": "Artistic License",
        "AAL": "Attribution Assurance License",
        "AGPL-3.0": "GNU Affero General Public License v3",
        "AGPL-3.0-only": "GNU Affero General Public License v3",
        "AGPL-3.0-or-later": "GNU Affero General Public License v3 or later (AGPLv3+)",
        "BSL-1.0": "Boost Software License 1.0 (BSL-1.0)",
        "BSD-2-Clause": "BSD License",
        "BSD-3-Clause": "BSD License",
        "CDDL-1.0": "Common Development and Distribution License 1.0 (CDDL-1.0)",
        "CECILL-2.1": "CEA CNRS Inria Logiciel Libre License, version 2.1 (CeCILL-2.1)",
        "CPL-1.0": "Common Public License",
        "EPL-1.0": "Eclipse Public License 1.0 (EPL-1.0)",
        "EPL-2.0": "Eclipse Public License 2.0 (EPL-2.0)",
        "EFL-1.0": "Eiffel Forum License",
        "EFL-2.0": "Eiffel Forum License",
        "EUPL-1.1": "European Union Public Licence 1.1 (EUPL 1.1)",
        "EUPL-1.2": "European Union Public Licence 1.2 (EUPL 1.2)",
        "GPL-2.0": "GNU General Public License v2 (GPLv2)",
        "GPL-2.0-only": "GNU General Public License v2 (GPLv2)",
        "GPL-2.0+": "GNU General Public License v2 or later (GPLv2+)",
        "GPL-2.0-or-later": "GNU General Public License v2 or later (GPLv2+)",
        "GPL-3.0": "GNU General Public License v3 (GPLv3)",
        "GPL-3.0-only": "GNU General Public License v3 (GPLv3)",
        "GPL-3.0+": "GNU General Public License v3 or later (GPLv3+)",
        "GPL-3.0-or-later": "GNU General Public License v3 or later (GPLv3+)",
        "LGPL-2.0": "GNU Lesser General Public License v2 (LGPLv2)",
        "LGPL-2.0-only": "GNU Lesser General Public License v2 (LGPLv2)",
        "LGPL-2.0+": "GNU Lesser General Public License v2 or later (LGPLv2+)",
        "LGPL-2.0-or-later": "GNU Lesser General Public License v2 or later (LGPLv2+)",
        "LGPL-3.0": "GNU Lesser General Public License v3 (LGPLv3)",
        "LGPL-3.0-only": "GNU Lesser General Public License v3 (LGPLv3)",
        "LGPL-3.0+": "GNU Lesser General Public License v3 or later (LGPLv3+)",
        "LGPL-3.0-or-later": "GNU Lesser General Public License v3 or later (LGPLv3+)",
        "MPL-1.0": "Mozilla Public License 1.0 (MPL)",
        "MPL-1.1": "Mozilla Public License 1.1 (MPL 1.1)",
        "MPL-2.0": "Mozilla Public License 2.0 (MPL 2.0)",
        "W3C": "W3C License",
        "ZPL-1.1": "Zope Public License",
        "ZPL-2.0": "Zope Public License",
        "ZPL-2.1": "Zope Public License",
    }

    @property
    def classifier(self) -> str:
        parts = ["License"]

        if self.is_osi_approved:
            parts.append("OSI Approved")

        name = self.classifier_name
        if name is not None:
            parts.append(name)

        return " :: ".join(parts)

    @property
    def classifier_name(self) -> str | None:
        if self.id not in self.CLASSIFIER_SUPPORTED:
            if self.is_osi_approved:
                return None

            return "Other/Proprietary License"

        if self.id in self.CLASSIFIER_NAMES:
            return self.CLASSIFIER_NAMES[self.id]

        return self.name
poetry-core-2.1.1/src/poetry/core/spdx/updater.py000066400000000000000000000021221475444614500220330ustar00rootroot00000000000000from __future__ import annotations

import json

from pathlib import Path
from typing import Any
from urllib.request import urlopen


class Updater:
    BASE_URL = "https://raw.githubusercontent.com/spdx/license-list-data/master/json/"

    def __init__(self, base_url: str = BASE_URL) -> None:
        self._base_url = base_url

    def dump(self, file: Path | None = None) -> None:
        if file is None:
            file = Path(__file__).parent / "data" / "licenses.json"

        licenses_url = self._base_url + "licenses.json"

        with file.open("w", encoding="utf-8") as f:
            f.write(
                json.dumps(self.get_licenses(licenses_url), indent=2, sort_keys=True)
            )

    def get_licenses(self, url: str) -> dict[str, Any]:
        licenses = {}
        with urlopen(url) as r:
            data = json.loads(r.read().decode())

        for info in data["licenses"]:
            licenses[info["licenseId"]] = [
                info["name"],
                info["isOsiApproved"],
                info["isDeprecatedLicenseId"],
            ]

        return licenses
poetry-core-2.1.1/src/poetry/core/utils/000077500000000000000000000000001475444614500202025ustar00rootroot00000000000000poetry-core-2.1.1/src/poetry/core/utils/__init__.py000066400000000000000000000000001475444614500223010ustar00rootroot00000000000000poetry-core-2.1.1/src/poetry/core/utils/_compat.py000066400000000000000000000003451475444614500222000ustar00rootroot00000000000000from __future__ import annotations

import sys


WINDOWS = sys.platform == "win32"


if sys.version_info < (3, 11):
    # compatibility for python <3.11
    import tomli as tomllib
else:
    import tomllib

__all__ = ["tomllib"]
poetry-core-2.1.1/src/poetry/core/utils/helpers.py000066400000000000000000000063431475444614500222240ustar00rootroot00000000000000from __future__ import annotations

import shutil
import stat
import sys
import tempfile
import time
import unicodedata

from contextlib import contextmanager
from pathlib import Path
from typing import TYPE_CHECKING
from typing import Any

from packaging.utils import canonicalize_name


if TYPE_CHECKING:
    from collections.abc import Iterator


def combine_unicode(string: str) -> str:
    return unicodedata.normalize("NFC", string)


def module_name(name: str) -> str:
    return canonicalize_name(name).replace("-", "_")


@contextmanager
def temporary_directory(*args: Any, **kwargs: Any) -> Iterator[Path]:
    if sys.version_info >= (3, 10):
        # mypy reports an error if ignore_cleanup_errors is
        # specified literally in the call
        kwargs["ignore_cleanup_errors"] = True
        with tempfile.TemporaryDirectory(*args, **kwargs) as name:
            yield Path(name)
    else:
        name = tempfile.mkdtemp(*args, **kwargs)
        try:
            yield Path(name)
        finally:
            robust_rmtree(name)


def parse_requires(requires: str) -> list[str]:
    lines = requires.split("\n")

    requires_dist = []
    current_marker = None
    for line in lines:
        line = line.strip()
        if not line:
            continue

        if line.startswith("["):
            # extras or conditional dependencies
            marker = line.lstrip("[").rstrip("]")
            if ":" not in marker:
                extra, marker = marker, ""
            else:
                extra, marker = marker.split(":")

            if extra:
                if marker:
                    marker = f'{marker} and extra == "{extra}"'
                else:
                    marker = f'extra == "{extra}"'

            if marker:
                current_marker = marker

            continue

        if current_marker:
            line = f"{line} ; {current_marker}"

        requires_dist.append(line)

    return requires_dist


def _on_rm_error(func: Any, path: str | Path, exc_info: Any) -> None:
    path = Path(path)
    if not path.exists():
        return

    path.chmod(stat.S_IWRITE)
    func(path)


def robust_rmtree(path: str | Path, max_timeout: float = 1) -> None:
    """
    Robustly tries to delete paths.
    Retries several times if an OSError occurs.
    If the final attempt fails, the Exception is propagated
    to the caller.
    """
    path = Path(path)  # make sure this is a Path object, not str
    timeout = 0.001
    while timeout < max_timeout:
        try:
            # both os.unlink and shutil.rmtree can throw exceptions on Windows
            # if the files are in use when called
            if path.is_symlink():
                path.unlink()
            else:
                shutil.rmtree(path)
            return  # Only hits this on success
        except OSError:
            # Increase the timeout and try again
            time.sleep(timeout)
            timeout *= 2

    # Final attempt, pass any Exceptions up to caller.
    shutil.rmtree(path, onerror=_on_rm_error)


def readme_content_type(path: str | Path) -> str:
    suffix = Path(path).suffix
    if suffix == ".rst":
        return "text/x-rst"
    elif suffix in (".md", ".markdown"):
        return "text/markdown"
    else:
        return "text/plain"
poetry-core-2.1.1/src/poetry/core/utils/patterns.py000066400000000000000000000005101475444614500224100ustar00rootroot00000000000000from __future__ import annotations

import re


AUTHOR_REGEX = re.compile(r"(?u)^(?P[^<>]+)(?: <(?P.+?)>)?$")

wheel_file_re = re.compile(
    r"""^(?P(?P.+?)(-(?P\d.+?))?)
        ((-(?P\d.*?))?-(?P.+?)-(?P.+?)-(?P.+?)
        \.whl|\.dist-info)$""",
    re.VERBOSE,
)
poetry-core-2.1.1/src/poetry/core/vcs/000077500000000000000000000000001475444614500176355ustar00rootroot00000000000000poetry-core-2.1.1/src/poetry/core/vcs/__init__.py000066400000000000000000000020651475444614500217510ustar00rootroot00000000000000from __future__ import annotations

import subprocess

from typing import TYPE_CHECKING

from poetry.core.vcs.git import Git


if TYPE_CHECKING:
    from pathlib import Path


def get_vcs(directory: Path) -> Git | None:
    directory = directory.resolve(strict=True)
    vcs: Git | None

    try:
        from poetry.core.vcs.git import executable

        check_ignore = subprocess.run(
            [executable(), "check-ignore", "."],
            stderr=subprocess.DEVNULL,
            stdout=subprocess.DEVNULL,
            cwd=directory,
        ).returncode

        if check_ignore == 0:
            vcs = None
        else:
            rel_path_to_git_dir = subprocess.check_output(
                [executable(), "rev-parse", "--show-cdup"],
                stderr=subprocess.STDOUT,
                text=True,
                encoding="utf-8",
                cwd=directory,
            ).strip()

            vcs = Git((directory / rel_path_to_git_dir).resolve())

    except (subprocess.CalledProcessError, OSError, RuntimeError):
        vcs = None

    return vcs
poetry-core-2.1.1/src/poetry/core/vcs/git.py000066400000000000000000000200551475444614500207740ustar00rootroot00000000000000from __future__ import annotations

import re
import subprocess

from collections import namedtuple
from pathlib import Path
from typing import Any

from poetry.core.utils._compat import WINDOWS


PROTOCOL = r"\w+"
# https://url.spec.whatwg.org/#forbidden-host-code-point
URL_RESTRICTED = r"[^/\?#:@<>\[\]\|]"
USER = rf"{URL_RESTRICTED}+"
USER_AUTH_HTTP = rf"((?P{USER})(:(?P{URL_RESTRICTED}*))?)"
RESOURCE = r"[a-zA-Z0-9_.-]+"
PORT = r"\d+"
PATH = r"[%\w~.\-\+/\\\$]+"
NAME = r"[%\w~.\-]+"
REV = r"[^@#]+?"
SUBDIR = r"[\w\-/\\]+"
PATTERN_SUFFIX = (
    r"(?:"
    rf"#(?:egg=.+?&subdirectory=|subdirectory=)(?P{SUBDIR})"
    r"|"
    r"#egg=?.+"
    r"|"
    rf"[@#](?P{REV})(?:[&#](?:(?:egg=.+?&subdirectory=|subdirectory=)(?P{SUBDIR})|egg=.+?))?"
    r")?"
    r"$"
)

PATTERNS = [
    re.compile(
        r"^(git\+)?"
        r"(?Pgit|ssh|rsync|file)://"
        rf"(?:(?P{USER})@)?"
        rf"(?P{RESOURCE})?"
        rf"(:(?P{PORT}))?"
        rf"(?P[:/\\]({PATH}[/\\])?"
        rf"((?P{NAME}?)(\.git|[/\\])?)?)"
        rf"{PATTERN_SUFFIX}"
    ),
    re.compile(
        r"^(git\+)?"
        r"(?Phttps?)://"
        rf"(?:(?P{USER_AUTH_HTTP})@)?"
        rf"(?P{RESOURCE})?"
        rf"(:(?P{PORT}))?"
        rf"(?P[:/\\]({PATH}[/\\])?"
        rf"((?P{NAME}?)(\.git|[/\\])?)?)"
        rf"{PATTERN_SUFFIX}"
    ),
    re.compile(
        r"(git\+)?"
        rf"((?P{PROTOCOL})://)"
        rf"(?:(?P{USER})@)?"
        rf"(?P{RESOURCE}:?)"
        rf"(:(?P{PORT}))?"
        rf"(?P({PATH})"
        rf"(?P{NAME})(\.git|/)?)"
        rf"{PATTERN_SUFFIX}"
    ),
    re.compile(
        rf"^(?:(?P{USER})@)?"
        rf"(?P{RESOURCE})"
        rf"(:(?P{PORT}))?"
        rf"(?P([:/]{PATH}/)"
        rf"(?P{NAME})(\.git|/)?)"
        rf"{PATTERN_SUFFIX}"
    ),
    re.compile(
        rf"((?P{USER})@)?"
        rf"(?P{RESOURCE})"
        r"[:/]{{1,2}}"
        rf"(?P({PATH})"
        rf"(?P{NAME})(\.git|/)?)"
        rf"{PATTERN_SUFFIX}"
    ),
]


class GitError(RuntimeError):
    pass


class ParsedUrl:
    def __init__(
        self,
        protocol: str | None,
        resource: str | None,
        pathname: str | None,
        user: str | None,
        port: str | None,
        name: str | None,
        rev: str | None,
        subdirectory: str | None = None,
    ) -> None:
        self.protocol = protocol
        self.resource = resource
        self.pathname = pathname
        self.user = user
        self.port = port
        self.name = name
        self.rev = rev
        self.subdirectory = subdirectory

    @classmethod
    def parse(cls, url: str) -> ParsedUrl:
        for pattern in PATTERNS:
            m = pattern.match(url)
            if m:
                groups = m.groupdict()
                return ParsedUrl(
                    groups.get("protocol", "ssh"),
                    groups.get("resource"),
                    groups.get("pathname"),
                    groups.get("user"),
                    groups.get("port"),
                    groups.get("name"),
                    groups.get("rev"),
                    groups.get("rev_subdirectory") or groups.get("subdirectory"),
                )

        raise ValueError(f'Invalid git url "{url}"')

    @property
    def url(self) -> str:
        protocol = f"{self.protocol}://" if self.protocol else ""
        user = f"{self.user}@" if self.user else ""
        port = f":{self.port}" if self.port else ""
        path = "/" + (self.pathname or "").lstrip(":/")
        return f"{protocol}{user}{self.resource}{port}{path}"

    def format(self) -> str:
        return self.url

    def __str__(self) -> str:
        return self.format()


GitUrl = namedtuple("GitUrl", ["url", "revision", "subdirectory"])


_executable: str | None = None


def executable() -> str:
    global _executable

    if _executable is not None:
        return _executable

    if WINDOWS:
        # Finding git via where.exe
        where = "%WINDIR%\\System32\\where.exe"
        paths = subprocess.check_output(
            [where, "git"], shell=True, encoding="oem"
        ).split("\n")
        for path in paths:
            if not path:
                continue

            _path = Path(path.strip())
            try:
                _path.relative_to(Path.cwd())
            except ValueError:
                _executable = str(_path)

                break
    else:
        _executable = "git"

    if _executable is None:
        raise RuntimeError("Unable to find a valid git executable")

    return _executable


def _reset_executable() -> None:
    global _executable

    _executable = None


class GitConfig:
    def __init__(self, requires_git_presence: bool = False) -> None:
        self._config = {}

        try:
            config_list = subprocess.check_output(
                [executable(), "config", "-l"], stderr=subprocess.STDOUT
            ).decode()

            m = re.findall("(?ms)^([^=]+)=(.*?)$", config_list)
            if m:
                for group in m:
                    self._config[group[0]] = group[1]
        except (subprocess.CalledProcessError, OSError):
            if requires_git_presence:
                raise

    def get(self, key: Any, default: Any | None = None) -> Any:
        return self._config.get(key, default)

    def __getitem__(self, item: Any) -> Any:
        return self._config[item]


class Git:
    def __init__(self, work_dir: Path | None = None) -> None:
        self._config = GitConfig(requires_git_presence=True)
        self._work_dir = work_dir

    @classmethod
    def normalize_url(cls, url: str) -> GitUrl:
        parsed = ParsedUrl.parse(url)

        formatted = re.sub(r"^git\+", "", url)
        if parsed.rev:
            formatted = re.sub(rf"[#@]{parsed.rev}(?=[#&]?)(?!\=)", "", formatted)

        if parsed.subdirectory:
            formatted = re.sub(
                rf"[#&]subdirectory={parsed.subdirectory}$", "", formatted
            )

        altered = parsed.format() != formatted

        if altered:
            if re.match(r"^git\+https?", url) and re.match(
                r"^/?:[^0-9]", parsed.pathname or ""
            ):
                normalized = re.sub(r"git\+(.*:[^:]+):(.*)", "\\1/\\2", url)
            elif re.match(r"^git\+file", url):
                normalized = re.sub(r"git\+", "", url)
            else:
                normalized = re.sub(r"^(?:git\+)?ssh://", "", url)
        else:
            normalized = parsed.format()

        return GitUrl(
            re.sub(r"#[^#]*$", "", normalized), parsed.rev, parsed.subdirectory
        )

    @property
    def config(self) -> GitConfig:
        return self._config

    @property
    def version(self) -> tuple[int, int, int]:
        output = self.run("version")
        version = re.search(r"(\d+)\.(\d+)\.(\d+)", output)
        if not version:
            return (0, 0, 0)
        return int(version.group(1)), int(version.group(2)), int(version.group(3))

    def get_ignored_files(self, folder: Path | None = None) -> list[str]:
        args = []
        if folder is None and self._work_dir:
            folder = self._work_dir

        if folder:
            args += [
                "--git-dir",
                (folder / ".git").as_posix(),
                "--work-tree",
                folder.as_posix(),
            ]

        args += ["ls-files", "--others", "-i", "--exclude-standard"]
        output = self.run(*args)

        return output.strip().split("\n")

    def run(self, *args: Any, **kwargs: Any) -> str:
        folder = kwargs.pop("folder", None)
        if folder:
            args = (
                "--git-dir",
                (folder / ".git").as_posix(),
                "--work-tree",
                folder.as_posix(),
                *args,
            )

        return (
            subprocess.check_output(
                [executable(), *list(args)], stderr=subprocess.STDOUT
            )
            .decode()
            .strip()
        )
poetry-core-2.1.1/src/poetry/core/version/000077500000000000000000000000001475444614500205275ustar00rootroot00000000000000poetry-core-2.1.1/src/poetry/core/version/__init__.py000066400000000000000000000000001475444614500226260ustar00rootroot00000000000000poetry-core-2.1.1/src/poetry/core/version/exceptions.py000066400000000000000000000001251475444614500232600ustar00rootroot00000000000000from __future__ import annotations


class InvalidVersionError(ValueError):
    pass
poetry-core-2.1.1/src/poetry/core/version/grammars/000077500000000000000000000000001475444614500223405ustar00rootroot00000000000000poetry-core-2.1.1/src/poetry/core/version/grammars/__init__.py000066400000000000000000000003261475444614500244520ustar00rootroot00000000000000from __future__ import annotations

from pathlib import Path


GRAMMAR_DIR = Path(__file__).parent

GRAMMAR_PEP_508_CONSTRAINTS = GRAMMAR_DIR / "pep508.lark"

GRAMMAR_PEP_508_MARKERS = GRAMMAR_DIR / "markers.lark"
poetry-core-2.1.1/src/poetry/core/version/grammars/markers.lark000066400000000000000000000017141475444614500246620ustar00rootroot00000000000000start: marker

marker: _atom (BOOL_OP _atom)*
_atom: item | (L_PAREN marker R_PAREN)
item: (MARKER_NAME MARKER_OP _marker_value) | (_marker_value MARKER_OP MARKER_NAME)
_marker_value: SINGLE_QUOTED_STRING | ESCAPED_STRING

MARKER_NAME: "implementation_version"
    | "platform_python_implementation"
    | "implementation_name"
    | "python_full_version"
    | "platform_release"
    | "platform_version"
    | "platform_machine"
    | "platform_system"
    | "python_version"
    | "sys_platform"
    | "os_name"
    | "os.name"
    | "sys.platform"
    | "platform.version"
    | "platform.machine"
    | "platform.python_implementation"
    | "python_implementation"
    | "extra"
MARKER_OP: "===" | "==" | ">=" | "<=" | ">" | "<" | "!=" | "~=" | "not in" | "in"
SINGLE_QUOTED_STRING: /'([^'])*'/
QUOTED_STRING: /"([^"])*"/
MARKER_VALUE: /(.+?)/
BOOL_OP: "and" | "or"
L_PAREN: "("
R_PAREN: ")"

%import common.WS_INLINE
%import common.ESCAPED_STRING
%ignore WS_INLINE
poetry-core-2.1.1/src/poetry/core/version/grammars/pep508.lark000066400000000000000000000025711475444614500242410ustar00rootroot00000000000000start: _requirement

_requirement: _full_name (_MARKER_SEPARATOR marker_spec)?
_full_name: NAME _extras? (version_specification | _url)?
_extras: _L_BRACKET _extra? _R_BRACKET
_extra: EXTRA (_COMMA EXTRA)*
version_specification: (_version_many | _L_PAREN _version_many _R_PAREN)
_version_many: _single_version (_COMMA _single_version)*
_single_version: LEGACY_VERSION_CONSTRAINT
_url: _AT URI
marker_spec: marker

NAME: /[a-zA-Z0-9][a-zA-Z0-9-_.]*/
FULL_NAME: NAME
EXTRA: NAME
VERSION_CONSTRAINT: /(~=|==|!=|<=|>=|<|>|===)((?:(?<====)\s*[^\s]*)|(?:(?<===|!=)\s*v?(?:[0-9]+!)?[0-9]+(?:\.[0-9]+)*(?:[-_.]?(a|b|c|rc|alpha|beta|pre|preview)[-_.]?[0-9]*)?(?:(?:-[0-9]+)|(?:[-_.]?(post|rev|r)[-_.]?[0-9]*))?(?:(?:[-_.]?dev[-_.]?[0-9]*)?(?:\+[a-z0-9]+(?:[-_.][a-z0-9]+)*)? # local|\.\*)?)|(?:(?<=~=)\s*v?(?:[0-9]+!)?[0-9]+(?:\.[0-9]+)+(?:[-_.]?(a|b|c|rc|alpha|beta|pre|preview)[-_.]?[0-9]*)?(?:(?:-[0-9]+)|(?:[-_.]?(post|rev|r)[-_.]?[0-9]*))?(?:[-_.]?dev[-_.]?[0-9]*)?)|(?:(?=|<|>)\s*[^,;\s)]*/i
URI: /[^ ]+/
_MARKER_SEPARATOR: ";"
_L_PAREN: "("
_R_PAREN: ")"
_L_BRACKET: "["
_R_BRACKET: "]"
_COMMA: ","
_AT: "@"

%import .markers.marker
%import common.WS_INLINE
%ignore WS_INLINE
poetry-core-2.1.1/src/poetry/core/version/helpers.py000066400000000000000000000031351475444614500225450ustar00rootroot00000000000000from __future__ import annotations

from typing import TYPE_CHECKING

from poetry.core.constraints.version import Version
from poetry.core.constraints.version import VersionUnion
from poetry.core.constraints.version import parse_constraint


if TYPE_CHECKING:
    from poetry.core.constraints.version import VersionConstraint

PYTHON_VERSION = [
    "2.7.*",
    "3.0.*",
    "3.1.*",
    "3.2.*",
    "3.3.*",
    "3.4.*",
    "3.5.*",
    "3.6.*",
    "3.7.*",
    "3.8.*",
    "3.9.*",
    "3.10.*",
    "3.11.*",
    "3.12.*",
    "3.13.*",
]


def format_python_constraint(constraint: VersionConstraint) -> str:
    """
    This helper will help in transforming
    disjunctive constraint into proper constraint.
    """
    if isinstance(constraint, Version):
        if constraint.precision >= 3:
            return f"=={constraint}"

        # Transform 3.6 or 3
        if constraint.precision == 2:
            # 3.6
            constraint = parse_constraint(f"~{constraint.major}.{constraint.minor}")
        else:
            constraint = parse_constraint(f"^{constraint.major}.0")

    if not isinstance(constraint, VersionUnion):
        return str(constraint)

    formatted = []
    accepted = []

    for version in PYTHON_VERSION:
        version_constraint = parse_constraint(version)
        matches = constraint.allows_any(version_constraint)
        if not matches:
            formatted.append("!=" + version)
        else:
            accepted.append(version)

    # Checking lower bound
    low = accepted[0]

    formatted.insert(0, ">=" + ".".join(low.split(".")[:2]))

    return ", ".join(formatted)
poetry-core-2.1.1/src/poetry/core/version/markers.py000066400000000000000000001263141475444614500225540ustar00rootroot00000000000000from __future__ import annotations

import functools
import itertools
import re
import threading

from abc import ABC
from abc import abstractmethod
from collections import defaultdict
from typing import TYPE_CHECKING
from typing import Any
from typing import ClassVar
from typing import Generic
from typing import TypeVar
from typing import Union

from packaging.utils import canonicalize_name

from poetry.core.constraints.generic import BaseConstraint
from poetry.core.constraints.generic import Constraint
from poetry.core.constraints.generic import MultiConstraint
from poetry.core.constraints.generic import UnionConstraint
from poetry.core.constraints.generic.parser import STR_CMP_CONSTRAINT
from poetry.core.constraints.version import VersionConstraint
from poetry.core.constraints.version import VersionRange
from poetry.core.constraints.version import VersionUnion
from poetry.core.constraints.version.exceptions import ParseConstraintError
from poetry.core.version.grammars import GRAMMAR_PEP_508_MARKERS
from poetry.core.version.parser import Parser


if TYPE_CHECKING:
    from collections.abc import Callable
    from collections.abc import Iterable
    from collections.abc import Mapping

    from lark import Tree


class InvalidMarkerError(ValueError):
    """
    An invalid marker was found, users should refer to PEP 508.
    """


class UndefinedComparisonError(ValueError):
    """
    An invalid operation was attempted on a value that doesn't support it.
    """


class UndefinedEnvironmentNameError(ValueError):
    """
    A name was attempted to be used that does not exist inside of the
    environment.
    """


ALIASES = {
    "os.name": "os_name",
    "sys.platform": "sys_platform",
    "platform.version": "platform_version",
    "platform.machine": "platform_machine",
    "platform.python_implementation": "platform_python_implementation",
    "python_implementation": "platform_python_implementation",
}

PYTHON_VERSION_MARKERS = {"python_version", "python_full_version"}

# Parser: PEP 508 Environment Markers
_parser = Parser(GRAMMAR_PEP_508_MARKERS, "lalr")


class BaseMarker(ABC):
    @property
    def complexity(self) -> tuple[int, int]:
        """
        first element: number of single markers, where SingleMarkerLike count as
                       actual number
        second element: number of single markers, where SingleMarkerLike count as 1
        """
        return 1, 1

    @abstractmethod
    def intersect(self, other: BaseMarker) -> BaseMarker:
        raise NotImplementedError

    @abstractmethod
    def union(self, other: BaseMarker) -> BaseMarker:
        raise NotImplementedError

    def is_any(self) -> bool:
        return False

    def is_empty(self) -> bool:
        return False

    @abstractmethod
    def validate(self, environment: Mapping[str, Any] | None) -> bool:
        raise NotImplementedError

    @abstractmethod
    def without_extras(self) -> BaseMarker:
        raise NotImplementedError

    @abstractmethod
    def exclude(self, marker_name: str) -> BaseMarker:
        raise NotImplementedError

    @abstractmethod
    def only(self, *marker_names: str) -> BaseMarker:
        raise NotImplementedError

    @abstractmethod
    def reduce_by_python_constraint(
        self, python_constraint: VersionConstraint
    ) -> BaseMarker:
        raise NotImplementedError

    @abstractmethod
    def invert(self) -> BaseMarker:
        raise NotImplementedError

    def __repr__(self) -> str:
        return f"<{self.__class__.__name__} {self}>"

    @abstractmethod
    def __hash__(self) -> int:
        raise NotImplementedError

    @abstractmethod
    def __eq__(self, other: object) -> bool:
        raise NotImplementedError


class AnyMarker(BaseMarker):
    def intersect(self, other: BaseMarker) -> BaseMarker:
        return other

    def union(self, other: BaseMarker) -> BaseMarker:
        return self

    def is_any(self) -> bool:
        return True

    def validate(self, environment: Mapping[str, Any] | None) -> bool:
        return True

    def without_extras(self) -> BaseMarker:
        return self

    def exclude(self, marker_name: str) -> BaseMarker:
        return self

    def only(self, *marker_names: str) -> BaseMarker:
        return self

    def reduce_by_python_constraint(
        self, python_constraint: VersionConstraint
    ) -> BaseMarker:
        return self

    def invert(self) -> EmptyMarker:
        return EmptyMarker()

    def __str__(self) -> str:
        return ""

    def __repr__(self) -> str:
        return ""

    def __hash__(self) -> int:
        return hash("any")

    def __eq__(self, other: object) -> bool:
        if not isinstance(other, BaseMarker):
            return NotImplemented

        return isinstance(other, AnyMarker)


class EmptyMarker(BaseMarker):
    def intersect(self, other: BaseMarker) -> BaseMarker:
        return self

    def union(self, other: BaseMarker) -> BaseMarker:
        return other

    def is_empty(self) -> bool:
        return True

    def validate(self, environment: Mapping[str, Any] | None) -> bool:
        return False

    def without_extras(self) -> BaseMarker:
        return self

    def exclude(self, marker_name: str) -> EmptyMarker:
        return self

    def only(self, *marker_names: str) -> BaseMarker:
        return self

    def reduce_by_python_constraint(
        self, python_constraint: VersionConstraint
    ) -> BaseMarker:
        return self

    def invert(self) -> AnyMarker:
        return AnyMarker()

    def __str__(self) -> str:
        return ""

    def __repr__(self) -> str:
        return ""

    def __hash__(self) -> int:
        return hash("empty")

    def __eq__(self, other: object) -> bool:
        if not isinstance(other, BaseMarker):
            return NotImplemented

        return isinstance(other, EmptyMarker)


SingleMarkerConstraint = TypeVar(
    "SingleMarkerConstraint", bound=Union[BaseConstraint, VersionConstraint]
)


class SingleMarkerLike(BaseMarker, ABC, Generic[SingleMarkerConstraint]):
    def __init__(self, name: str, constraint: SingleMarkerConstraint) -> None:
        from poetry.core.constraints.generic import (
            parse_constraint as parse_generic_constraint,
        )
        from poetry.core.constraints.generic import parse_extra_constraint
        from poetry.core.constraints.version import parse_marker_version_constraint

        self._name = ALIASES.get(name, name)
        self._constraint = constraint
        self._parser: Callable[[str], BaseConstraint | VersionConstraint]
        if isinstance(constraint, VersionConstraint):
            self._parser = functools.partial(
                parse_marker_version_constraint, pep440=name != "platform_release"
            )
        elif name == "extra":
            self._parser = parse_extra_constraint
        else:
            self._parser = parse_generic_constraint

    @property
    def name(self) -> str:
        return self._name

    @property
    def constraint(self) -> SingleMarkerConstraint:
        return self._constraint

    @property
    def _key(self) -> tuple[object, ...]:
        return self._name, self._constraint

    def validate(self, environment: Mapping[str, Any] | None) -> bool:
        if environment is None:
            return True

        if self._name not in environment:
            return True

        # "extra" is special because it can have multiple values at the same time.
        # "extra == 'a'" will be true if "a" is one of the active extras.
        # "extra != 'a'" will be true if "a" is not one of the active extras.
        # Further, extra names are normalized for comparison.
        if self._name == "extra":
            extras = environment["extra"]
            if isinstance(extras, str):
                extras = {extras}
            extras = {canonicalize_name(extra) for extra in extras}
            assert isinstance(self._constraint, Constraint)
            normalized_value = canonicalize_name(self._constraint.value)
            if self._constraint.operator == "==":
                return normalized_value in extras
            assert self._constraint.operator == "!="
            return normalized_value not in extras

        # The type of constraint returned by the parser matches our constraint: either
        # both are BaseConstraint or both are VersionConstraint. But it's hard for mypy
        # to know that.
        constraint = self._parser(environment[self._name])
        return self._constraint.allows(constraint)  # type: ignore[arg-type]

    def without_extras(self) -> BaseMarker:
        return self.exclude("extra")

    def exclude(self, marker_name: str) -> BaseMarker:
        if self.name == marker_name:
            return AnyMarker()

        return self

    def only(self, *marker_names: str) -> BaseMarker:
        if self.name not in marker_names:
            return AnyMarker()

        return self

    def reduce_by_python_constraint(
        self, python_constraint: VersionConstraint
    ) -> BaseMarker:
        return self

    def intersect(self, other: BaseMarker) -> BaseMarker:
        if isinstance(other, SingleMarkerLike):
            merged = _merge_single_markers(self, other, MultiMarker)
            if merged is not None:
                return merged

            return MultiMarker(self, other)

        return other.intersect(self)

    def union(self, other: BaseMarker) -> BaseMarker:
        if isinstance(other, SingleMarkerLike):
            merged = _merge_single_markers(self, other, MarkerUnion)
            if merged is not None:
                return merged

            return MarkerUnion(self, other)

        return other.union(self)

    def __eq__(self, other: object) -> bool:
        if not isinstance(other, SingleMarkerLike):
            return NotImplemented

        return self._key == other._key

    def __hash__(self) -> int:
        return hash(self._key)


class SingleMarker(SingleMarkerLike[Union[BaseConstraint, VersionConstraint]]):
    _CONSTRAINT_RE_PATTERN_1 = re.compile(
        r"(?i)^(?P~=|!=|>=?|<=?|==?=?|not in|in)?\s*(?P.+)$"
    )
    _CONSTRAINT_RE_PATTERN_2 = STR_CMP_CONSTRAINT

    VALUE_SEPARATOR_RE = re.compile("[ ,|]+")
    _VERSION_LIKE_MARKER_NAME: ClassVar[set[str]] = {
        "python_version",
        "python_full_version",
        "platform_release",
    }

    def __init__(
        self,
        name: str,
        constraint: str | BaseConstraint | VersionConstraint,
        swapped_name_value: bool = False,
    ) -> None:
        from poetry.core.constraints.generic import (
            parse_constraint as parse_generic_constraint,
        )
        from poetry.core.constraints.generic import parse_extra_constraint
        from poetry.core.constraints.version import parse_marker_version_constraint

        parsed_constraint: BaseConstraint | VersionConstraint
        parser: Callable[[str], BaseConstraint | VersionConstraint]
        original_constraint_string = constraint_string = str(constraint)
        self._swapped_name_value: bool = swapped_name_value

        if swapped_name_value:
            pattern = self._CONSTRAINT_RE_PATTERN_2
        else:
            pattern = self._CONSTRAINT_RE_PATTERN_1

        m = pattern.match(constraint_string)
        if m is None:
            raise InvalidMarkerError(
                f"Invalid marker for '{name}': {constraint_string}"
            )

        self._operator = m.group("op")
        if self._operator is None:
            self._operator = "=="

        self._value = m.group("value")
        parser = parse_extra_constraint if name == "extra" else parse_generic_constraint

        if swapped_name_value and name not in PYTHON_VERSION_MARKERS:
            # Something like `"tegra" in platform_release`
            # or `"arm" not in platform_version`.
            pass
        elif name in self._VERSION_LIKE_MARKER_NAME:
            parser = functools.partial(
                parse_marker_version_constraint, pep440=name != "platform_release"
            )

            if self._operator in {"in", "not in"}:
                versions = []
                for v in self.VALUE_SEPARATOR_RE.split(self._value):
                    split = v.split(".")
                    if len(split) in (1, 2):
                        split.append("*")
                        op = "" if self._operator == "in" else "!="
                    else:
                        op = "==" if self._operator == "in" else "!="

                    versions.append(op + ".".join(split))

                glue = ", "
                if self._operator == "in":
                    glue = " || "

                constraint_string = glue.join(versions)
            elif name == "python_full_version" and not swapped_name_value:
                # fix precision of python_full_version marker
                precision = self._value.count(".") + 1
                if precision < 3:
                    suffix = ".0" * (3 - precision)
                    self._value += suffix
                    constraint_string += suffix
        else:
            # if we have a in/not in operator we split the constraint
            # into a union/multi-constraint of single constraint
            if self._operator in {"in", "not in"}:
                op, glue = ("==", " || ") if self._operator == "in" else ("!=", ", ")
                values = self.VALUE_SEPARATOR_RE.split(self._value)
                constraint_string = glue.join(f"{op} {value}" for value in values)

        try:
            parsed_constraint = parser(constraint_string)
        except ParseConstraintError as e:
            raise InvalidMarkerError(
                f"Invalid marker for '{name}': {original_constraint_string}"
            ) from e

        super().__init__(name, parsed_constraint)

    @property
    def operator(self) -> str:
        return self._operator

    @property
    def value(self) -> str:
        return self._value

    @property
    def _key(self) -> tuple[object, ...]:
        return self._name, self._operator, self._value

    def reduce_by_python_constraint(
        self, python_constraint: VersionConstraint
    ) -> BaseMarker:
        if self.name in PYTHON_VERSION_MARKERS:
            from poetry.core.packages.utils.utils import (
                get_python_constraint_from_marker,
            )

            assert isinstance(self._constraint, VersionConstraint)
            constraint = get_python_constraint_from_marker(self)
            if constraint.allows_all(python_constraint):
                return AnyMarker()
            elif not constraint.allows_any(python_constraint):
                return EmptyMarker()

        return self

    def invert(self) -> BaseMarker:
        if self._operator in ("===", "=="):
            operator = "!="
        elif self._operator == "!=":
            operator = "=="
        elif self._operator == ">":
            operator = "<="
        elif self._operator == ">=":
            operator = "<"
        elif self._operator == "<":
            operator = ">="
        elif self._operator == "<=":
            operator = ">"
        elif self._operator == "in":
            operator = "not in"
        elif self._operator == "not in":
            operator = "in"
        elif self._operator == "~=":
            # This one is more tricky to handle
            # since it's technically a multi marker
            # so the inverse will be a union of inverse
            from poetry.core.constraints.version import VersionRangeConstraint

            if not isinstance(self._constraint, VersionRangeConstraint):
                # The constraint must be a version range, otherwise
                # it's an internal error
                raise RuntimeError(
                    "The '~=' operator should only represent version ranges"
                )

            min_ = self._constraint.min
            min_operator = ">=" if self._constraint.include_min else ">"
            max_ = self._constraint.max
            max_operator = "<=" if self._constraint.include_max else "<"

            return MultiMarker(
                SingleMarker(self._name, f"{min_operator} {min_}"),
                SingleMarker(self._name, f"{max_operator} {max_}"),
            ).invert()
        else:
            # We should never go there
            raise RuntimeError(f"Invalid marker operator '{self._operator}'")

        if self._swapped_name_value:
            constraint = f'"{self._value}" {operator} {self._name}'
        else:
            constraint = f'{self._name} {operator} "{self._value}"'
        return parse_marker(constraint)

    def __eq__(self, other: object) -> bool:
        if not isinstance(other, SingleMarker):
            return NotImplemented

        return self._key == other._key

    def __hash__(self) -> int:
        return hash(self._key)

    def __str__(self) -> str:
        if self._swapped_name_value:
            return f'"{self._value}" {self._operator} {self._name}'
        return f'{self._name} {self._operator} "{self._value}"'


class AtomicMultiMarker(SingleMarkerLike[MultiConstraint]):
    def __init__(self, name: str, constraint: MultiConstraint) -> None:
        assert all(
            c.operator in ({"==", "!="} if name == "extra" else {"!="})
            for c in constraint.constraints
        )
        super().__init__(name, constraint)

    @property
    def complexity(self) -> tuple[int, int]:
        return len(self._constraint.constraints), 1

    def validate(self, environment: Mapping[str, Any] | None) -> bool:
        if self._name == "extra":
            return self.expand().validate(environment)
        return super().validate(environment)

    def invert(self) -> BaseMarker:
        return AtomicMarkerUnion(self._name, self._constraint.invert())

    def expand(self) -> MultiMarker:
        return MultiMarker(
            *(SingleMarker(self._name, c) for c in self._constraint.constraints)
        )

    def __str__(self) -> str:
        return " and ".join(
            f'{self._name} {c.operator} "{c.value}"'
            for c in self._constraint.constraints
        )


class AtomicMarkerUnion(SingleMarkerLike[UnionConstraint]):
    def __init__(self, name: str, constraint: UnionConstraint) -> None:
        assert all(
            isinstance(c, Constraint)
            and c.operator in ({"==", "!="} if name == "extra" else {"=="})
            for c in constraint.constraints
        )
        super().__init__(name, constraint)

    @property
    def complexity(self) -> tuple[int, int]:
        return len(self._constraint.constraints), 1

    def validate(self, environment: Mapping[str, Any] | None) -> bool:
        if self._name == "extra":
            return self.expand().validate(environment)
        return super().validate(environment)

    def invert(self) -> BaseMarker:
        return AtomicMultiMarker(self._name, self._constraint.invert())

    def expand(self) -> MarkerUnion:
        return MarkerUnion(
            *(SingleMarker(self._name, c) for c in self._constraint.constraints)
        )

    def __str__(self) -> str:
        # In __init__ we've made sure that we have a UnionConstraint that
        # contains only elements of type Constraint (instead of BaseConstraint)
        # but mypy can't see that.
        return " or ".join(
            f'{self._name} {c.operator} "{c.value}"'  # type: ignore[attr-defined]
            for c in self._constraint.constraints
        )


def _flatten_markers(
    markers: Iterable[BaseMarker],
    flatten_class: type[MarkerUnion | MultiMarker],
) -> list[BaseMarker]:
    flattened = []

    for marker in markers:
        if isinstance(marker, flatten_class):
            for _marker in _flatten_markers(
                marker.markers,  # type: ignore[attr-defined]
                flatten_class,
            ):
                if _marker not in flattened:
                    flattened.append(_marker)

        elif marker not in flattened:
            flattened.append(marker)

    return flattened


class MultiMarker(BaseMarker):
    def __init__(self, *markers: BaseMarker) -> None:
        self._markers = tuple(_flatten_markers(markers, MultiMarker))

    @property
    def markers(self) -> tuple[BaseMarker, ...]:
        return self._markers

    @property
    def complexity(self) -> tuple[int, int]:
        return tuple(sum(c) for c in zip(*(m.complexity for m in self._markers)))

    @classmethod
    def of(cls, *markers: BaseMarker) -> BaseMarker:
        new_markers = _flatten_markers(markers, MultiMarker)
        old_markers: list[BaseMarker] = []

        while old_markers != new_markers:
            old_markers = new_markers
            new_markers = []
            for marker in old_markers:
                if marker in new_markers:
                    continue

                if marker.is_any():
                    continue

                intersected = False
                for i, mark in enumerate(new_markers):
                    # If we have a SingleMarker then with any luck after intersection
                    # it'll become another SingleMarker.
                    if isinstance(mark, SingleMarkerLike):
                        new_marker = mark.intersect(marker)
                        if new_marker.is_empty():
                            return EmptyMarker()

                        if isinstance(new_marker, SingleMarkerLike):
                            new_markers[i] = new_marker
                            intersected = True
                            break

                    # If we have a MarkerUnion then we can look for the simplifications
                    # implemented in intersect_simplify().
                    elif isinstance(mark, MarkerUnion):
                        intersection = mark.intersect_simplify(marker)
                        if intersection is not None:
                            new_markers[i] = intersection
                            intersected = True
                            break

                if intersected:
                    # flatten again because intersect_simplify may return a multi
                    new_markers = _flatten_markers(new_markers, MultiMarker)
                    continue

                new_markers.append(marker)

        if any(m.is_empty() for m in new_markers):
            return EmptyMarker()

        if not new_markers:
            return AnyMarker()

        if len(new_markers) == 1:
            return new_markers[0]

        return MultiMarker(*new_markers)

    def intersect(self, other: BaseMarker) -> BaseMarker:
        return intersection(self, other)

    def union(self, other: BaseMarker) -> BaseMarker:
        return union(self, other)

    def union_simplify(self, other: BaseMarker) -> BaseMarker | None:
        """
        Finds a couple of easy simplifications for union on MultiMarkers:

            - union with any marker that appears as part of the multi is just that
              marker

            - union between two multimarkers where one is contained by the other is just
              the larger of the two

            - union between two multimarkers where there are some common markers
              and the union of unique markers is a single marker
        """
        if other in self._markers:
            return other

        if isinstance(other, MultiMarker):
            our_markers = set(self.markers)
            their_markers = set(other.markers)

            if our_markers.issubset(their_markers):
                return self

            if their_markers.issubset(our_markers):
                return other

            shared_markers = our_markers.intersection(their_markers)
            if not shared_markers:
                return None

            unique_markers = our_markers - their_markers
            other_unique_markers = their_markers - our_markers
            unique_union = MultiMarker(*unique_markers).union(
                MultiMarker(*other_unique_markers)
            )
            if isinstance(unique_union, (SingleMarkerLike, AnyMarker)):
                # Use list instead of set for deterministic order.
                common_markers = [
                    marker for marker in self.markers if marker in shared_markers
                ]
                return unique_union.intersect(MultiMarker(*common_markers))

        return None

    def validate(self, environment: Mapping[str, Any] | None) -> bool:
        return all(m.validate(environment) for m in self._markers)

    def without_extras(self) -> BaseMarker:
        return self.exclude("extra")

    def exclude(self, marker_name: str) -> BaseMarker:
        new_markers = []

        for m in self._markers:
            if isinstance(m, SingleMarkerLike) and m.name == marker_name:
                # The marker is not relevant since it must be excluded
                continue

            marker = m.exclude(marker_name)

            if not marker.is_empty():
                new_markers.append(marker)

        return intersection(*new_markers)

    def only(self, *marker_names: str) -> BaseMarker:
        return self.of(*(m.only(*marker_names) for m in self._markers))

    def reduce_by_python_constraint(
        self, python_constraint: VersionConstraint
    ) -> BaseMarker:
        return self.of(
            *(m.reduce_by_python_constraint(python_constraint) for m in self._markers)
        )

    def invert(self) -> BaseMarker:
        markers = [marker.invert() for marker in self._markers]

        return MarkerUnion(*markers)

    def __eq__(self, other: object) -> bool:
        if not isinstance(other, MultiMarker):
            return False

        return self._markers == other.markers

    def __hash__(self) -> int:
        return hash(("multi", *self._markers))

    def __str__(self) -> str:
        elements = []
        for m in self._markers:
            if isinstance(m, (SingleMarker, MultiMarker, AtomicMultiMarker)):
                elements.append(str(m))
            else:
                elements.append(f"({m})")

        return " and ".join(elements)


class MarkerUnion(BaseMarker):
    def __init__(self, *markers: BaseMarker) -> None:
        self._markers = tuple(_flatten_markers(markers, MarkerUnion))

    @property
    def markers(self) -> tuple[BaseMarker, ...]:
        return self._markers

    @property
    def complexity(self) -> tuple[int, int]:
        return tuple(sum(c) for c in zip(*(m.complexity for m in self._markers)))

    @classmethod
    def of(cls, *markers: BaseMarker) -> BaseMarker:
        new_markers = _flatten_markers(markers, MarkerUnion)
        old_markers: list[BaseMarker] = []

        while old_markers != new_markers:
            old_markers = new_markers
            new_markers = []
            for marker in old_markers:
                if marker in new_markers:
                    continue

                if marker.is_empty():
                    continue

                included = False
                for i, mark in enumerate(new_markers):
                    # If we have a SingleMarker then with any luck after union it'll
                    # become another SingleMarker.
                    if isinstance(mark, SingleMarkerLike):
                        new_marker = mark.union(marker)
                        if new_marker.is_any():
                            return AnyMarker()

                        if isinstance(new_marker, SingleMarkerLike):
                            new_markers[i] = new_marker
                            included = True
                            break

                    # If we have a MultiMarker then we can look for the simplifications
                    # implemented in union_simplify().
                    elif isinstance(mark, MultiMarker):
                        union = mark.union_simplify(marker)
                        if union is not None:
                            new_markers[i] = union
                            included = True
                            break

                if included:
                    # flatten again because union_simplify may return a union
                    new_markers = _flatten_markers(new_markers, MarkerUnion)
                    continue

                new_markers.append(marker)

        if any(m.is_any() for m in new_markers):
            return AnyMarker()

        if not new_markers:
            return EmptyMarker()

        if len(new_markers) == 1:
            return new_markers[0]

        return MarkerUnion(*new_markers)

    def intersect(self, other: BaseMarker) -> BaseMarker:
        return intersection(self, other)

    def union(self, other: BaseMarker) -> BaseMarker:
        return union(self, other)

    def intersect_simplify(self, other: BaseMarker) -> BaseMarker | None:
        """
        Finds a couple of easy simplifications for intersection on MarkerUnions:

            - intersection with any marker that appears as part of the union is just
              that marker

            - intersection between two markerunions where one is contained by the other
              is just the smaller of the two

            - intersection between two markerunions where there are some common markers
              and the intersection of unique markers is not a single marker
        """
        if other in self._markers:
            return other

        if isinstance(other, MarkerUnion):
            our_markers = set(self.markers)
            their_markers = set(other.markers)

            if our_markers.issubset(their_markers):
                return self

            if their_markers.issubset(our_markers):
                return other

            shared_markers = our_markers.intersection(their_markers)
            if not shared_markers:
                return None

            unique_markers = our_markers - their_markers
            other_unique_markers = their_markers - our_markers
            unique_intersection = MarkerUnion(*unique_markers).intersect(
                MarkerUnion(*other_unique_markers)
            )
            if isinstance(unique_intersection, (SingleMarkerLike, EmptyMarker)):
                # Use list instead of set for deterministic order.
                common_markers = [
                    marker for marker in self.markers if marker in shared_markers
                ]
                return unique_intersection.union(MarkerUnion(*common_markers))

        return None

    def validate(self, environment: Mapping[str, Any] | None) -> bool:
        return any(m.validate(environment) for m in self._markers)

    def without_extras(self) -> BaseMarker:
        return self.exclude("extra")

    def exclude(self, marker_name: str) -> BaseMarker:
        new_markers = []

        for m in self._markers:
            if isinstance(m, SingleMarkerLike) and m.name == marker_name:
                # The marker is not relevant since it must be excluded
                continue

            marker = m.exclude(marker_name)
            new_markers.append(marker)

        if not new_markers:
            # All markers were the excluded marker.
            return AnyMarker()

        return union(*new_markers)

    def only(self, *marker_names: str) -> BaseMarker:
        return self.of(*(m.only(*marker_names) for m in self._markers))

    def reduce_by_python_constraint(
        self, python_constraint: VersionConstraint
    ) -> BaseMarker:
        from poetry.core.packages.utils.utils import get_python_constraint_from_marker

        markers: Iterable[BaseMarker] = self._markers
        if isinstance(python_constraint, (VersionRange, VersionUnion)):
            python_only_markers = []
            other_markers = []
            for m in self._markers:
                if m == m.only(*PYTHON_VERSION_MARKERS):
                    python_only_markers.append(m)
                else:
                    other_markers.append(m)
            if get_python_constraint_from_marker(
                self.of(*python_only_markers)
            ).allows_all(python_constraint):
                if not other_markers:
                    return AnyMarker()
                markers = other_markers

        return self.of(
            *(m.reduce_by_python_constraint(python_constraint) for m in markers)
        )

    def invert(self) -> BaseMarker:
        markers = [marker.invert() for marker in self._markers]
        return MultiMarker(*markers)

    def __eq__(self, other: object) -> bool:
        if not isinstance(other, MarkerUnion):
            return False

        return self._markers == other.markers

    def __hash__(self) -> int:
        return hash(("union", *self._markers))

    def __str__(self) -> str:
        return " or ".join(str(m) for m in self._markers)


@functools.cache
def parse_marker(marker: str) -> BaseMarker:
    if marker == "":
        return EmptyMarker()

    if not marker or marker == "*":
        return AnyMarker()

    parsed = _parser.parse(marker)

    markers = _compact_markers(parsed.children)

    return markers


def _compact_markers(
    tree_elements: Tree, tree_prefix: str = "", top_level: bool = True
) -> BaseMarker:
    from lark import Token

    # groups is a disjunction of conjunctions
    # eg [[A, B], [C, D]] represents "(A and B) or (C and D)"
    groups: list[list[BaseMarker]] = [[]]

    for token in tree_elements:
        if isinstance(token, Token):
            if token.type == f"{tree_prefix}BOOL_OP" and token.value == "or":
                groups.append([])

            continue

        if token.data == "marker":
            sub_marker = _compact_markers(
                token.children, tree_prefix=tree_prefix, top_level=False
            )
            groups[-1].append(sub_marker)

        elif token.data == f"{tree_prefix}item":
            name, op, value = token.children
            swapped_name_value = value.type == f"{tree_prefix}MARKER_NAME"
            stringed_value = name.type in {
                f"{tree_prefix}ESCAPED_STRING",
                f"{tree_prefix}SINGLE_QUOTED_STRING",
            }
            if swapped_name_value:
                name, value = value, name

            value = value[1:-1]

            sub_marker = SingleMarker(
                str(name),
                f'"{value}" {op}' if stringed_value else f"{op}{value}",
                swapped_name_value=swapped_name_value,
            )
            groups[-1].append(sub_marker)

        elif token.data == f"{tree_prefix}BOOL_OP" and token.children[0] == "or":
            groups.append([])

    # Combine the groups.
    sub_markers = [
        group[0] if len(group) == 1 else MultiMarker(*group) for group in groups
    ]

    # This function calls itself recursively. In the inner calls we don't perform any
    # simplification, instead doing it all only when we have the complete marker.
    if not top_level:
        return MarkerUnion(*sub_markers)

    return union(*sub_markers)


@functools.cache
def cnf(marker: BaseMarker) -> BaseMarker:
    """Transforms the marker into CNF (conjunctive normal form)."""
    if isinstance(marker, MarkerUnion):
        cnf_markers = [cnf(m) for m in marker.markers]
        sub_marker_lists = [
            m.markers if isinstance(m, MultiMarker) else [m] for m in cnf_markers
        ]
        return MultiMarker.of(
            *[MarkerUnion.of(*c) for c in itertools.product(*sub_marker_lists)]
        )

    if isinstance(marker, MultiMarker):
        return MultiMarker.of(*[cnf(m) for m in marker.markers])

    return marker


@functools.cache
def dnf(marker: BaseMarker) -> BaseMarker:
    """Transforms the marker into DNF (disjunctive normal form)."""
    if isinstance(marker, MultiMarker):
        dnf_markers = [dnf(m) for m in marker.markers]
        sub_marker_lists = [
            m.markers if isinstance(m, MarkerUnion) else [m] for m in dnf_markers
        ]
        return MarkerUnion.of(
            *[MultiMarker.of(*c) for c in itertools.product(*sub_marker_lists)]
        )

    if isinstance(marker, MarkerUnion):
        return MarkerUnion.of(*[dnf(m) for m in marker.markers])

    return marker


def detect_recursion(func: Callable[..., BaseMarker]) -> Callable[..., BaseMarker]:
    """Decorator to detect recursions in `intersection` and `union` early."""
    func.call_args = defaultdict(list)  # type: ignore[attr-defined]

    def decorated(*markers: BaseMarker) -> BaseMarker:
        thread_id = threading.get_ident()
        call_args = func.call_args[thread_id]  # type: ignore[attr-defined]
        if markers in call_args:
            raise RecursionError
        call_args.append(markers)
        try:
            result = func(*markers)
        finally:
            call_args.pop()
        return result

    return decorated


@detect_recursion
def intersection(*markers: BaseMarker) -> BaseMarker:
    # Sometimes normalization makes it more complicated instead of simple
    # -> choose candidate with the least complexity
    unnormalized: BaseMarker = MultiMarker(*markers)
    while (
        isinstance(unnormalized, (MultiMarker, MarkerUnion))
        and len(unnormalized.markers) == 1
    ):
        unnormalized = unnormalized.markers[0]

    disjunction = dnf(unnormalized)
    if not isinstance(disjunction, MarkerUnion):
        return disjunction

    try:
        conjunction = cnf(disjunction)
        if not isinstance(conjunction, MultiMarker):
            return conjunction
    except RecursionError:
        candidates = [disjunction, unnormalized]
    else:
        candidates = [disjunction, conjunction, unnormalized]

    return min(*candidates, key=lambda x: x.complexity)


@detect_recursion
def union(*markers: BaseMarker) -> BaseMarker:
    # Sometimes normalization makes it more complicated instead of simple
    # -> choose candidate with the least complexity
    unnormalized: BaseMarker = MarkerUnion(*markers)
    while (
        isinstance(unnormalized, (MultiMarker, MarkerUnion))
        and len(unnormalized.markers) == 1
    ):
        unnormalized = unnormalized.markers[0]

    conjunction = cnf(unnormalized)
    if not isinstance(conjunction, MultiMarker):
        return conjunction

    try:
        disjunction = dnf(conjunction)
        if not isinstance(disjunction, MarkerUnion):
            return disjunction
    except RecursionError:
        candidates = [conjunction, unnormalized]
    else:
        candidates = [disjunction, conjunction, unnormalized]

    return min(*candidates, key=lambda x: x.complexity)


@functools.cache
def _merge_single_markers(
    marker1: SingleMarkerLike[SingleMarkerConstraint],
    marker2: SingleMarkerLike[SingleMarkerConstraint],
    merge_class: type[MultiMarker | MarkerUnion],
) -> BaseMarker | None:
    if {marker1.name, marker2.name} == PYTHON_VERSION_MARKERS:
        assert isinstance(marker1, SingleMarker)
        assert isinstance(marker2, SingleMarker)
        return _merge_python_version_single_markers(marker1, marker2, merge_class)

    if marker1.name != marker2.name:
        return None

    if merge_class == MultiMarker:
        merge_method = marker1.constraint.intersect
    else:
        merge_method = marker1.constraint.union
    # Markers with the same name have the same constraint type,
    # but mypy can't see that.
    result_constraint = merge_method(marker2.constraint)  # type: ignore[arg-type]

    result_marker: BaseMarker | None = None
    if result_constraint.is_empty():
        result_marker = EmptyMarker()
    elif result_constraint.is_any():
        result_marker = AnyMarker()
    elif result_constraint == marker1.constraint:
        result_marker = marker1
    elif result_constraint == marker2.constraint:
        result_marker = marker2
    elif isinstance(result_constraint, Constraint) or (
        isinstance(result_constraint, VersionConstraint)
        and result_constraint.is_simple()
    ):
        result_marker = SingleMarker(marker1.name, result_constraint)
    elif isinstance(result_constraint, UnionConstraint) and all(
        isinstance(c, Constraint)
        and c.operator in ({"==", "!="} if marker1.name == "extra" else {"=="})
        for c in result_constraint.constraints
    ):
        result_marker = AtomicMarkerUnion(marker1.name, result_constraint)
    elif isinstance(result_constraint, MultiConstraint) and all(
        c.operator in ({"==", "!="} if marker1.name == "extra" else {"!="})
        for c in result_constraint.constraints
    ):
        result_marker = AtomicMultiMarker(marker1.name, result_constraint)
    elif marker1.name == "python_version":
        from poetry.core.packages.utils.utils import get_python_constraint_from_marker

        if isinstance(result_constraint, VersionRange) and result_constraint.min:
            # Convert 'python_version >= "3.8" and python_version < "3.9"'
            # to 'python_version == "3.8"'
            candidate = parse_marker(f'{marker1.name} == "{result_constraint.min}"')
            if get_python_constraint_from_marker(candidate) == result_constraint:
                result_marker = candidate

        elif isinstance(result_constraint, VersionUnion) and merge_class == MarkerUnion:
            # Convert 'python_version == "3.8" or python_version >= "3.9"'
            # to 'python_version >= "3.8"'.
            # Convert 'python_version <= "3.8" or python_version >= "3.9"' to "any".
            result_constraint = get_python_constraint_from_marker(marker1).union(
                get_python_constraint_from_marker(marker2)
            )
            if result_constraint.is_any():
                result_marker = AnyMarker()
            elif result_constraint.is_simple():
                result_marker = SingleMarker(marker1.name, result_constraint)

    return result_marker


def _merge_python_version_single_markers(
    marker1: SingleMarker,
    marker2: SingleMarker,
    merge_class: type[MultiMarker | MarkerUnion],
) -> BaseMarker | None:
    from poetry.core.packages.utils.utils import get_python_constraint_from_marker

    if marker1.name == "python_version":
        version_marker = marker1
        full_version_marker = marker2
    else:
        version_marker = marker2
        full_version_marker = marker1

    normalized_constraint = get_python_constraint_from_marker(version_marker)
    normalized_marker = SingleMarker("python_full_version", normalized_constraint)
    merged_marker = _merge_single_markers(
        normalized_marker, full_version_marker, merge_class
    )
    if merged_marker == normalized_marker:
        # prefer original marker to avoid unnecessary changes
        return version_marker
    if merged_marker and isinstance(merged_marker, SingleMarker):
        # We have to fix markers like 'python_full_version == "3.6"'
        # to receive 'python_full_version == "3.6.0"'.
        # It seems a bit hacky to convert to string and back to marker,
        # but it's probably much simpler than to consider the different constraint
        # classes (mostly VersonRangeConstraint, but VersionUnion for "!=") and
        # since this conversion is only required for python_full_version markers
        # it may be sufficient to handle it here.
        marker_string = str(merged_marker)
        precision = marker_string.count(".") + 1
        target_precision = 3
        if precision < target_precision:
            if merged_marker.operator in {"<", ">="}:
                target_precision = 2
                marker_string = marker_string.replace(
                    "python_full_version", "python_version"
                )
            marker_string = (
                marker_string[:-1] + ".0" * (target_precision - precision) + '"'
            )
        elif (
            precision == target_precision
            and merged_marker.operator in {"<", ">="}
            and marker_string[:-1].endswith(".0")
        ):
            marker_string = marker_string.replace(
                "python_full_version", "python_version"
            )
            marker_string = marker_string[:-3] + '"'  # drop trailing ".0"
        merged_marker = parse_marker(marker_string)
    return merged_marker
poetry-core-2.1.1/src/poetry/core/version/parser.py000066400000000000000000000013601475444614500223750ustar00rootroot00000000000000from __future__ import annotations

from typing import TYPE_CHECKING
from typing import Any


if TYPE_CHECKING:
    from pathlib import Path

    from lark import Lark
    from lark import Tree


class Parser:
    def __init__(
        self, grammar: Path, parser: str = "lalr", debug: bool = False
    ) -> None:
        self._grammar = grammar
        self._parser = parser
        self._debug = debug
        self._lark: Lark | None = None

    def parse(self, text: str, **kwargs: Any) -> Tree:
        from lark import Lark

        if self._lark is None:
            self._lark = Lark.open(
                grammar_filename=self._grammar, parser=self._parser, debug=self._debug
            )

        return self._lark.parse(text=text, **kwargs)
poetry-core-2.1.1/src/poetry/core/version/pep440/000077500000000000000000000000001475444614500215435ustar00rootroot00000000000000poetry-core-2.1.1/src/poetry/core/version/pep440/__init__.py000066400000000000000000000005401475444614500236530ustar00rootroot00000000000000from __future__ import annotations

from poetry.core.version.pep440.segments import LocalSegmentType
from poetry.core.version.pep440.segments import Release
from poetry.core.version.pep440.segments import ReleaseTag
from poetry.core.version.pep440.version import PEP440Version


__all__ = ("LocalSegmentType", "PEP440Version", "Release", "ReleaseTag")
poetry-core-2.1.1/src/poetry/core/version/pep440/parser.py000066400000000000000000000055011475444614500234120ustar00rootroot00000000000000from __future__ import annotations

import functools
import re

from typing import TYPE_CHECKING
from typing import TypeVar

from packaging.version import VERSION_PATTERN

from poetry.core.version.exceptions import InvalidVersionError
from poetry.core.version.pep440 import Release
from poetry.core.version.pep440 import ReleaseTag


if TYPE_CHECKING:
    from poetry.core.version.pep440 import LocalSegmentType
    from poetry.core.version.pep440.version import PEP440Version

T = TypeVar("T", bound="PEP440Version")


class PEP440Parser:
    _regex = re.compile(r"^\s*" + VERSION_PATTERN + r"\s*$", re.VERBOSE | re.IGNORECASE)
    _local_version_separators = re.compile(r"[._-]")

    @classmethod
    def _get_release(cls, match: re.Match[str] | None) -> Release:
        if not match or match.group("release") is None:
            return Release(0)
        return Release.from_parts(*(int(i) for i in match.group("release").split(".")))

    @classmethod
    def _get_prerelease(cls, match: re.Match[str] | None) -> ReleaseTag | None:
        if not match or match.group("pre") is None:
            return None
        return ReleaseTag(match.group("pre_l"), int(match.group("pre_n") or 0))

    @classmethod
    def _get_postrelease(cls, match: re.Match[str] | None) -> ReleaseTag | None:
        if not match or match.group("post") is None:
            return None

        return ReleaseTag(
            match.group("post_l") or "post",
            int(match.group("post_n1") or match.group("post_n2") or 0),
        )

    @classmethod
    def _get_devrelease(cls, match: re.Match[str] | None) -> ReleaseTag | None:
        if not match or match.group("dev") is None:
            return None
        return ReleaseTag(match.group("dev_l"), int(match.group("dev_n") or 0))

    @classmethod
    def _get_local(cls, match: re.Match[str] | None) -> LocalSegmentType | None:
        if not match or match.group("local") is None:
            return None

        return tuple(
            part.lower()
            for part in cls._local_version_separators.split(match.group("local"))
        )

    @classmethod
    @functools.cache
    def parse(cls, value: str, version_class: type[T]) -> T:
        match = cls._regex.search(value) if value else None
        if not match:
            raise InvalidVersionError(f"Invalid PEP 440 version: '{value}'")

        return version_class(
            epoch=int(match.group("epoch")) if match.group("epoch") else 0,
            release=cls._get_release(match),
            pre=cls._get_prerelease(match),
            post=cls._get_postrelease(match),
            dev=cls._get_devrelease(match),
            local=cls._get_local(match),
            text=value,
        )


def parse_pep440(value: str, version_class: type[T]) -> T:
    version: T = PEP440Parser.parse(value, version_class)  # type: ignore[arg-type]
    return version
poetry-core-2.1.1/src/poetry/core/version/pep440/segments.py000066400000000000000000000121401475444614500237400ustar00rootroot00000000000000from __future__ import annotations

import dataclasses

from typing import TYPE_CHECKING
from typing import Optional
from typing import Union


if TYPE_CHECKING:
    from collections.abc import Sequence


# Release phase IDs according to PEP440
RELEASE_PHASE_ID_ALPHA = "a"
RELEASE_PHASE_ID_BETA = "b"
RELEASE_PHASE_ID_RC = "rc"
RELEASE_PHASE_ID_POST = "post"
RELEASE_PHASE_ID_DEV = "dev"

RELEASE_PHASE_SPELLINGS = {
    RELEASE_PHASE_ID_ALPHA: {RELEASE_PHASE_ID_ALPHA, "alpha"},
    RELEASE_PHASE_ID_BETA: {RELEASE_PHASE_ID_BETA, "beta"},
    RELEASE_PHASE_ID_RC: {RELEASE_PHASE_ID_RC, "c", "pre", "preview"},
    RELEASE_PHASE_ID_POST: {RELEASE_PHASE_ID_POST, "r", "rev", "-"},
    RELEASE_PHASE_ID_DEV: {RELEASE_PHASE_ID_DEV},
}
RELEASE_PHASE_NORMALIZATIONS = {
    s: id_ for id_, spellings in RELEASE_PHASE_SPELLINGS.items() for s in spellings
}


@dataclasses.dataclass(frozen=True, eq=True, order=True)
class Release:
    major: int = dataclasses.field(default=0, compare=False)
    minor: int | None = dataclasses.field(default=None, compare=False)
    patch: int | None = dataclasses.field(default=None, compare=False)
    # some projects use non-semver versioning schemes, eg: 1.2.3.4
    extra: tuple[int, ...] = dataclasses.field(default=(), compare=False)
    precision: int = dataclasses.field(init=False, compare=False)
    text: str = dataclasses.field(init=False, compare=False)
    _compare_key: tuple[int, ...] = dataclasses.field(init=False, compare=True)

    def __post_init__(self) -> None:
        if self.extra:
            if self.minor is None:
                object.__setattr__(self, "minor", 0)
            if self.patch is None:
                object.__setattr__(self, "patch", 0)
        parts = [
            str(part)
            for part in (self.major, self.minor, self.patch, *self.extra)
            if part is not None
        ]
        object.__setattr__(self, "text", ".".join(parts))
        object.__setattr__(self, "precision", len(parts))

        compare_key = [self.major, self.minor or 0, self.patch or 0, *self.extra]
        while compare_key and compare_key[-1] == 0:
            del compare_key[-1]
        object.__setattr__(self, "_compare_key", tuple(compare_key))

    @classmethod
    def from_parts(cls, *parts: int) -> Release:
        if not parts:
            return cls()

        return cls(
            major=parts[0],
            minor=parts[1] if len(parts) > 1 else None,
            patch=parts[2] if len(parts) > 2 else None,
            extra=parts[3:],
        )

    def to_parts(self) -> Sequence[int]:
        return tuple(
            part
            for part in [self.major, self.minor, self.patch, *self.extra]
            if part is not None
        )

    def to_string(self) -> str:
        return self.text

    def next_major(self) -> Release:
        return dataclasses.replace(
            self,
            major=self.major + 1,
            minor=0 if self.minor is not None else None,
            patch=0 if self.patch is not None else None,
            extra=tuple(0 for _ in self.extra),
        )

    def next_minor(self) -> Release:
        return dataclasses.replace(
            self,
            major=self.major,
            minor=self.minor + 1 if self.minor is not None else 1,
            patch=0 if self.patch is not None else None,
            extra=tuple(0 for _ in self.extra),
        )

    def next_patch(self) -> Release:
        return dataclasses.replace(
            self,
            major=self.major,
            minor=self.minor if self.minor is not None else 0,
            patch=self.patch + 1 if self.patch is not None else 1,
            extra=tuple(0 for _ in self.extra),
        )

    def next(self) -> Release:
        if self.precision == 1:
            return self.next_major()

        if self.precision == 2:
            return self.next_minor()

        if self.precision == 3:
            return self.next_patch()

        return dataclasses.replace(
            self,
            major=self.major,
            minor=self.minor,
            patch=self.patch,
            extra=(*self.extra[:-1], self.extra[-1] + 1),
        )


@dataclasses.dataclass(frozen=True, eq=True, order=True)
class ReleaseTag:
    phase: str
    number: int = dataclasses.field(default=0)

    def __post_init__(self) -> None:
        object.__setattr__(
            self, "phase", RELEASE_PHASE_NORMALIZATIONS.get(self.phase, self.phase)
        )

    def to_string(self) -> str:
        return f"{self.phase}{self.number}"

    def next(self) -> ReleaseTag:
        return dataclasses.replace(self, phase=self.phase, number=self.number + 1)

    def next_phase(self) -> ReleaseTag | None:
        if self.phase in [
            RELEASE_PHASE_ID_POST,
            RELEASE_PHASE_ID_RC,
            RELEASE_PHASE_ID_DEV,
        ]:
            return None

        if self.phase == RELEASE_PHASE_ID_ALPHA:
            _phase = RELEASE_PHASE_ID_BETA
        elif self.phase == RELEASE_PHASE_ID_BETA:
            _phase = RELEASE_PHASE_ID_RC
        else:
            return None

        return self.__class__(phase=_phase, number=0)


LocalSegmentType = Optional[Union[str, int, tuple[Union[str, int], ...]]]
poetry-core-2.1.1/src/poetry/core/version/pep440/version.py000066400000000000000000000252451475444614500236120ustar00rootroot00000000000000from __future__ import annotations

import dataclasses
import functools
import warnings

from typing import TYPE_CHECKING
from typing import Any
from typing import TypeVar

from poetry.core.version.pep440.segments import RELEASE_PHASE_ID_ALPHA
from poetry.core.version.pep440.segments import RELEASE_PHASE_ID_DEV
from poetry.core.version.pep440.segments import RELEASE_PHASE_ID_POST
from poetry.core.version.pep440.segments import Release
from poetry.core.version.pep440.segments import ReleaseTag


if TYPE_CHECKING:
    from collections.abc import Sequence

    from poetry.core.version.pep440.segments import LocalSegmentType


@functools.total_ordering
class AlwaysSmaller:
    def __lt__(self, other: object) -> bool:
        return True


@functools.total_ordering
class AlwaysGreater:
    def __gt__(self, other: object) -> bool:
        return True


class Infinity(AlwaysGreater, int):
    pass


class NegativeInfinity(AlwaysSmaller, int):
    pass


T = TypeVar("T", bound="PEP440Version")

# we use the phase "z" to ensure we always sort this after other phases
_INF_TAG = ReleaseTag("z", Infinity())
# we use the phase "" to ensure we always sort this before other phases
_NEG_INF_TAG = ReleaseTag("", NegativeInfinity())


@dataclasses.dataclass(frozen=True, eq=True, order=True)
class PEP440Version:
    epoch: int = dataclasses.field(default=0, compare=False)
    release: Release = dataclasses.field(default_factory=Release, compare=False)
    pre: ReleaseTag | None = dataclasses.field(default=None, compare=False)
    post: ReleaseTag | None = dataclasses.field(default=None, compare=False)
    dev: ReleaseTag | None = dataclasses.field(default=None, compare=False)
    local: LocalSegmentType = dataclasses.field(default=None, compare=False)
    text: str = dataclasses.field(default="", compare=False)
    _compare_key: tuple[
        int, Release, ReleaseTag, ReleaseTag, ReleaseTag, tuple[int | str, ...]
    ] = dataclasses.field(init=False, compare=True)

    def __post_init__(self) -> None:
        if self.local is not None and not isinstance(self.local, tuple):
            object.__setattr__(self, "local", (self.local,))

        if isinstance(self.release, tuple):
            object.__setattr__(self, "release", Release(*self.release))

        # we do this here to handle both None and tomlkit string values
        object.__setattr__(
            self, "text", self.to_string() if not self.text else str(self.text)
        )

        object.__setattr__(self, "_compare_key", self._make_compare_key())

    def _make_compare_key(
        self,
    ) -> tuple[
        int,
        Release,
        ReleaseTag,
        ReleaseTag,
        ReleaseTag,
        tuple[tuple[int, int | str], ...],
    ]:
        """
        This code is based on the implementation of packaging.version._cmpkey(..)
        """
        # We need to "trick" the sorting algorithm to put 1.0.dev0 before 1.0a0.
        # We'll do this by abusing the pre segment, but we _only_ want to do this
        # if there is not a pre or a post segment. If we have one of those then
        # the normal sorting rules will handle this case correctly.
        if self.pre is None and self.post is None and self.dev is not None:
            _pre = _NEG_INF_TAG
        # Versions without a pre-release (except as noted above) should sort after
        # those with one.
        elif self.pre is None:
            _pre = _INF_TAG
        else:
            _pre = self.pre

        # Versions without a post segment should sort before those with one.
        _post = _NEG_INF_TAG if self.post is None else self.post

        # Versions without a development segment should sort after those with one.
        _dev = _INF_TAG if self.dev is None else self.dev

        _local: tuple[tuple[int, int | str], ...]
        if self.local is None:
            # Versions without a local segment should sort before those with one.
            _local = ((NegativeInfinity(), ""),)
        else:
            # Versions with a local segment need that segment parsed to implement
            # the sorting rules in PEP440.
            # - Alpha numeric segments sort before numeric segments
            # - Alpha numeric segments sort lexicographically
            # - Numeric segments sort numerically
            # - Shorter versions sort before longer versions when the prefixes
            #   match exactly
            assert isinstance(self.local, tuple)
            # We convert strings that are integers so that they can be compared
            _local = tuple(
                (int(i), "") if str(i).isnumeric() else (NegativeInfinity(), i)
                for i in self.local
            )
        return self.epoch, self.release, _pre, _post, _dev, _local

    @property
    def major(self) -> int:
        return self.release.major

    @property
    def minor(self) -> int | None:
        return self.release.minor

    @property
    def patch(self) -> int | None:
        return self.release.patch

    @property
    def non_semver_parts(self) -> Sequence[int]:
        return self.release.extra

    @property
    def parts(self) -> Sequence[int]:
        return self.release.to_parts()

    def to_string(self) -> str:
        version_string = self.release.to_string()

        if self.epoch:
            # if epoch is non-zero we should include it
            version_string = f"{self.epoch}!{version_string}"

        if self.pre:
            version_string += self.pre.to_string()

        if self.post:
            version_string = f"{version_string}.{self.post.to_string()}"

        if self.dev:
            version_string = f"{version_string}.{self.dev.to_string()}"

        if self.local:
            assert isinstance(self.local, tuple)
            version_string += "+" + ".".join(map(str, self.local))

        return version_string.lower()

    @classmethod
    def parse(cls: type[T], value: str) -> T:
        from poetry.core.version.pep440.parser import parse_pep440

        return parse_pep440(value, cls)

    def is_prerelease(self) -> bool:
        return self.pre is not None

    def is_postrelease(self) -> bool:
        return self.post is not None

    def is_devrelease(self) -> bool:
        return self.dev is not None

    def is_local(self) -> bool:
        return self.local is not None

    def is_no_suffix_release(self) -> bool:
        return not (self.pre or self.post or self.dev)

    def is_unstable(self) -> bool:
        return self.is_prerelease() or self.is_devrelease()

    def is_stable(self) -> bool:
        return not self.is_unstable()

    def _is_increment_required(self) -> bool:
        return self.is_stable() or (not self.is_prerelease() and self.is_postrelease())

    def next_major(self: T) -> T:
        release = self.release
        if self._is_increment_required() or Release(release.major, 0, 0) < release:
            release = release.next_major()
        return self.__class__(epoch=self.epoch, release=release)

    def next_minor(self: T) -> T:
        release = self.release
        if (
            self._is_increment_required()
            or Release(release.major, release.minor, 0) < release
        ):
            release = release.next_minor()
        return self.__class__(epoch=self.epoch, release=release)

    def next_patch(self: T) -> T:
        release = self.release
        if (
            self._is_increment_required()
            or Release(release.major, release.minor, release.patch) < release
        ):
            release = release.next_patch()
        return self.__class__(epoch=self.epoch, release=release)

    def next_stable(self: T) -> T:
        release = self.release.next() if self.is_stable() else self.release
        return self.__class__(epoch=self.epoch, release=release, local=self.local)

    def next_prerelease(self: T, next_phase: bool = False) -> T:
        if self.is_stable():
            warnings.warn(
                "Calling next_prerelease() on a stable release is deprecated for"
                " its ambiguity. Use next_major(), next_minor(), etc. together with"
                " first_prerelease()",
                DeprecationWarning,
                stacklevel=2,
            )
        if self.is_prerelease():
            assert self.pre is not None
            if not self.is_devrelease() or self.is_postrelease():
                pre = self.pre.next_phase() if next_phase else self.pre.next()
            else:
                pre = self.pre
        else:
            pre = ReleaseTag(RELEASE_PHASE_ID_ALPHA)
        return self.__class__(epoch=self.epoch, release=self.release, pre=pre)

    def next_postrelease(self: T) -> T:
        if self.is_postrelease():
            assert self.post is not None
            post = self.post.next() if self.dev is None else self.post
        else:
            post = ReleaseTag(RELEASE_PHASE_ID_POST)
        return self.__class__(
            epoch=self.epoch,
            release=self.release,
            pre=self.pre,
            post=post,
        )

    def next_devrelease(self: T) -> T:
        if self.is_devrelease():
            assert self.dev is not None
            dev = self.dev.next()
        else:
            warnings.warn(
                "Calling next_devrelease() on a non dev release is deprecated for"
                " its ambiguity. Use next_major(), next_minor(), etc. together with"
                " first_devrelease()",
                DeprecationWarning,
                stacklevel=2,
            )
            dev = ReleaseTag(RELEASE_PHASE_ID_DEV)
        return self.__class__(
            epoch=self.epoch,
            release=self.release,
            pre=self.pre,
            post=self.post,
            dev=dev,
        )

    def first_prerelease(self: T) -> T:
        return self.__class__(
            epoch=self.epoch,
            release=self.release,
            pre=ReleaseTag(RELEASE_PHASE_ID_ALPHA),
        )

    def first_devrelease(self: T) -> T:
        return self.__class__(
            epoch=self.epoch,
            release=self.release,
            pre=self.pre,
            post=self.post,
            dev=ReleaseTag(RELEASE_PHASE_ID_DEV),
        )

    def replace(self: T, **kwargs: Any) -> T:
        return self.__class__(
            **{
                **{
                    k: getattr(self, k)
                    for k in self.__dataclass_fields__
                    if k not in ("_compare_key", "text")
                },  # setup defaults with current values, excluding compare keys and text
                **kwargs,  # keys to replace
            }
        )

    def without_local(self: T) -> T:
        return self.replace(local=None)

    def without_postrelease(self: T) -> T:
        if self.is_postrelease():
            return self.replace(post=None, dev=None)
        return self

    def without_devrelease(self: T) -> T:
        return self.replace(dev=None)
poetry-core-2.1.1/src/poetry/core/version/requirements.py000066400000000000000000000071451475444614500236330ustar00rootroot00000000000000from __future__ import annotations

import functools
import urllib.parse as urlparse

from typing import TYPE_CHECKING

from poetry.core.constraints.version import parse_constraint
from poetry.core.constraints.version.exceptions import ParseConstraintError
from poetry.core.version.grammars import GRAMMAR_PEP_508_CONSTRAINTS
from poetry.core.version.markers import _compact_markers
from poetry.core.version.parser import Parser


if TYPE_CHECKING:
    from collections.abc import Sequence


class InvalidRequirementError(ValueError):
    """
    An invalid requirement was found, users should refer to PEP 508.
    """


# Parser: PEP 508 Constraints
_parser = Parser(GRAMMAR_PEP_508_CONSTRAINTS, "lalr")


class Requirement:
    """
    Parse a requirement.

    Parse a given requirement string into its parts, such as name, specifier,
    URL, and extras. Raises InvalidRequirementError on a badly-formed requirement
    string.
    """

    def __init__(self, requirement_string: str) -> None:
        from lark import UnexpectedCharacters
        from lark import UnexpectedToken

        try:
            parsed = _parser.parse(requirement_string)
        except (UnexpectedCharacters, UnexpectedToken) as e:
            raise InvalidRequirementError(
                "The requirement is invalid: Unexpected character at column"
                f" {e.column}\n\n{e.get_context(requirement_string)}"
            )

        self.name: str = next(parsed.scan_values(lambda t: t.type == "NAME")).value
        url = next(parsed.scan_values(lambda t: t.type == "URI"), None)

        if url:
            url = url.value
            parsed_url = urlparse.urlparse(url)
            if parsed_url.scheme == "file":
                if urlparse.urlunparse(parsed_url) != url:
                    raise InvalidRequirementError(
                        f'The requirement is invalid: invalid URL "{url}"'
                    )
            elif (
                not (parsed_url.scheme and parsed_url.netloc)
            ) and not parsed_url.path:
                raise InvalidRequirementError(
                    f'The requirement is invalid: invalid URL "{url}"'
                )
            self.url = url
        else:
            self.url = None

        self.extras: Sequence[str] = [
            e.value for e in parsed.scan_values(lambda t: t.type == "EXTRA")
        ]
        constraint = next(parsed.find_data("version_specification"), None)
        constraint = ",".join(constraint.children) if constraint else "*"

        try:
            self.constraint = parse_constraint(constraint)
        except ParseConstraintError:
            raise InvalidRequirementError(
                f'The requirement is invalid: invalid version constraint "{constraint}"'
            )

        self.pretty_constraint = constraint

        marker = next(parsed.find_data("marker_spec"), None)
        if marker:
            marker = _compact_markers(
                marker.children[0].children, tree_prefix="markers__"
            )

        self.marker = marker

    def __str__(self) -> str:
        parts = [self.name]

        if self.extras:
            extras = ",".join(sorted(self.extras))
            parts.append(f"[{extras}]")

        if self.pretty_constraint:
            parts.append(self.pretty_constraint)

        if self.url:
            parts.append(f"@ {self.url}")

        if self.marker:
            parts.append(f"; {self.marker}")

        return "".join(parts)

    def __repr__(self) -> str:
        return f""


@functools.cache
def parse_requirement(requirement_string: str) -> Requirement:
    return Requirement(requirement_string)
poetry-core-2.1.1/tests/000077500000000000000000000000001475444614500151435ustar00rootroot00000000000000poetry-core-2.1.1/tests/__init__.py000066400000000000000000000000001475444614500172420ustar00rootroot00000000000000poetry-core-2.1.1/tests/conftest.py000066400000000000000000000063201475444614500173430ustar00rootroot00000000000000from __future__ import annotations

import os
import sys
import tempfile

from pathlib import Path
from typing import TYPE_CHECKING
from typing import Callable

import pytest
import virtualenv

from poetry.core.factory import Factory
from poetry.core.utils._compat import WINDOWS


if TYPE_CHECKING:
    from collections.abc import Iterator

    from pytest import Config
    from pytest import Parser
    from pytest_mock import MockerFixture


def pytest_addoption(parser: Parser) -> None:
    parser.addoption(
        "--integration",
        action="store_true",
        dest="integration",
        default=False,
        help="enable integration tests",
    )


def pytest_configure(config: Config) -> None:
    config.addinivalue_line("markers", "integration: mark integration tests")

    if not config.option.integration:
        config.option.markexpr = "not integration"


def get_project_from_dir(base_directory: Path) -> Callable[[str], Path]:
    def get(name: str) -> Path:
        path = base_directory / name
        if not path.exists():
            raise FileNotFoundError(str(path))
        return path

    return get


@pytest.fixture(scope="session")
def project_source_root() -> Path:
    return Path(__file__).parent.parent


@pytest.fixture(scope="session")
def project_source_test_root() -> Path:
    return Path(__file__).parent


@pytest.fixture(scope="session")
def common_fixtures_directory(project_source_test_root: Path) -> Path:
    return project_source_test_root / "fixtures"


@pytest.fixture(scope="session")
def common_project(common_fixtures_directory: Path) -> Callable[[str], Path]:
    return get_project_from_dir(common_fixtures_directory)


@pytest.fixture(scope="session")
def masonry_fixtures_directory(project_source_test_root: Path) -> Path:
    return project_source_test_root / "masonry" / "builders" / "fixtures"


@pytest.fixture(scope="session")
def masonry_project(
    masonry_fixtures_directory: Path,
) -> Callable[[str], Path]:
    return get_project_from_dir(masonry_fixtures_directory)


@pytest.fixture
def temporary_directory() -> Iterator[Path]:
    with tempfile.TemporaryDirectory(prefix="poetry-core") as tmp:
        yield Path(tmp)


@pytest.fixture
def venv(temporary_directory: Path) -> Path:
    venv_dir = temporary_directory / ".venv"
    virtualenv.cli_run(
        [
            "--no-download",
            "--no-periodic-update",
            "--python",
            sys.executable,
            venv_dir.as_posix(),
        ]
    )
    return venv_dir


@pytest.fixture
def python(venv: Path) -> str:
    return venv.joinpath("Scripts/Python.exe" if WINDOWS else "bin/python").as_posix()


@pytest.fixture()
def f() -> Factory:
    return Factory()


@pytest.fixture(autouse=True)
def with_mocked_get_vcs(mocker: MockerFixture) -> None:
    from poetry.core.vcs.git import Git

    mocker.patch(
        "poetry.core.vcs.git.Git.run", return_value="This is a mocked Git.run() output."
    )
    mocker.patch("poetry.core.vcs.get_vcs", return_value=Git())


@pytest.fixture(autouse=True)
def clear_env_source_date_epoch() -> None:
    """Clear SOURCE_DATE_EPOCH from environment to avoid non-deterministic failures"""
    if "SOURCE_DATE_EPOCH" in os.environ:
        del os.environ["SOURCE_DATE_EPOCH"]
poetry-core-2.1.1/tests/constraints/000077500000000000000000000000001475444614500175125ustar00rootroot00000000000000poetry-core-2.1.1/tests/constraints/__init__.py000066400000000000000000000000001475444614500216110ustar00rootroot00000000000000poetry-core-2.1.1/tests/constraints/generic/000077500000000000000000000000001475444614500211265ustar00rootroot00000000000000poetry-core-2.1.1/tests/constraints/generic/__init__.py000066400000000000000000000000001475444614500232250ustar00rootroot00000000000000poetry-core-2.1.1/tests/constraints/generic/test_constraint.py000066400000000000000000001535041475444614500247330ustar00rootroot00000000000000from __future__ import annotations

from typing import TYPE_CHECKING

import pytest

from poetry.core.constraints.generic import AnyConstraint
from poetry.core.constraints.generic import Constraint
from poetry.core.constraints.generic import EmptyConstraint
from poetry.core.constraints.generic import MultiConstraint
from poetry.core.constraints.generic import UnionConstraint
from poetry.core.constraints.generic.constraint import ExtraConstraint
from poetry.core.constraints.generic.multi_constraint import ExtraMultiConstraint


if TYPE_CHECKING:
    from poetry.core.constraints.generic import BaseConstraint


@pytest.mark.parametrize(
    ("constraint1", "constraint2", "expected"),
    [
        (Constraint("win32"), Constraint("win32"), True),
        (Constraint("win32"), Constraint("linux"), False),
        (Constraint("win32", "!="), Constraint("win32"), False),
        (Constraint("win32", "!="), Constraint("linux"), True),
        (Constraint("tegra", "in"), Constraint("1.2-tegra"), True),
        (Constraint("tegra", "in"), Constraint("1.2-teg"), False),
        (Constraint("tegra", "not in"), Constraint("1.2-tegra"), False),
        (Constraint("tegra", "not in"), Constraint("1.2-teg"), True),
    ],
)
def test_allows(
    constraint1: Constraint, constraint2: Constraint, expected: bool
) -> None:
    assert constraint1.allows(constraint2) is expected
    # allows_any() and allows_all() should be the same as allows()
    # if the second constraint is a `==` constraint
    assert constraint1.allows_any(constraint2) is expected
    assert constraint1.allows_all(constraint2) is expected


@pytest.mark.parametrize(
    ("constraint1", "constraint2", "expected_any", "expected_all"),
    [
        (Constraint("win32"), EmptyConstraint(), False, True),
        (Constraint("win32"), AnyConstraint(), True, False),
        (Constraint("win32"), Constraint("win32"), True, True),
        (Constraint("win32"), Constraint("linux"), False, False),
        (Constraint("win32"), Constraint("win32", "!="), False, False),
        (Constraint("win32"), Constraint("linux", "!="), True, False),
        (
            Constraint("win32"),
            UnionConstraint(Constraint("win32"), Constraint("linux")),
            True,
            False,
        ),
        (
            Constraint("win32"),
            UnionConstraint(Constraint("darwin"), Constraint("linux")),
            False,
            False,
        ),
        (
            Constraint("win32"),
            UnionConstraint(Constraint("win32", "!="), Constraint("linux", "!=")),
            True,
            False,
        ),
        (
            Constraint("win32"),
            UnionConstraint(Constraint("darwin", "!="), Constraint("linux", "!=")),
            True,
            False,
        ),
        (
            Constraint("win32"),
            MultiConstraint(Constraint("win32", "!="), Constraint("linux", "!=")),
            False,
            False,
        ),
        (
            Constraint("win32"),
            MultiConstraint(Constraint("darwin", "!="), Constraint("linux", "!=")),
            True,
            False,
        ),
        (Constraint("win32", "!="), EmptyConstraint(), False, True),
        (Constraint("win32", "!="), AnyConstraint(), True, False),
        (Constraint("win32", "!="), Constraint("win32"), False, False),
        (Constraint("win32", "!="), Constraint("linux"), True, True),
        (Constraint("win32", "!="), Constraint("win32", "!="), True, True),
        (Constraint("win32", "!="), Constraint("linux", "!="), True, False),
        (
            Constraint("win32", "!="),
            UnionConstraint(Constraint("win32"), Constraint("linux")),
            True,
            False,
        ),
        (
            Constraint("win32", "!="),
            UnionConstraint(Constraint("darwin"), Constraint("linux")),
            True,
            True,
        ),
        (
            Constraint("win32", "!="),
            UnionConstraint(Constraint("win32", "!="), Constraint("linux", "!=")),
            True,
            False,
        ),
        (
            Constraint("win32", "!="),
            UnionConstraint(Constraint("darwin", "!="), Constraint("linux", "!=")),
            True,
            False,
        ),
        (
            Constraint("win32", "!="),
            MultiConstraint(Constraint("win32", "!="), Constraint("linux", "!=")),
            True,
            True,
        ),
        (
            Constraint("win32", "!="),
            MultiConstraint(Constraint("darwin", "!="), Constraint("linux", "!=")),
            True,
            False,
        ),
        (
            Constraint("tegra", "not in"),
            Constraint("tegra", "not in"),
            True,
            True,
        ),
        (
            Constraint("tegra", "in"),
            Constraint("tegra", "not in"),
            False,
            False,
        ),
        (
            Constraint("tegra", "in"),
            Constraint("tegra", "in"),
            True,
            True,
        ),
        (
            Constraint("tegra", "not in"),
            Constraint("tegra", "in"),
            False,
            False,
        ),
        (
            Constraint("tegra", "in"),
            Constraint("teg", "in"),
            True,
            False,
        ),
        (
            Constraint("teg", "in"),
            Constraint("tegra", "in"),
            True,
            True,
        ),
        (
            Constraint("teg", "not in"),
            Constraint("tegra", "not in"),
            True,
            False,
        ),
        (
            Constraint("tegra", "not in"),
            Constraint("teg", "not in"),
            True,
            True,
        ),
        (
            Constraint("teg", "not in"),
            Constraint("tegra", "in"),
            False,
            False,
        ),
        (
            Constraint("tegra", "in"),
            Constraint("teg", "not in"),
            False,
            False,
        ),
        (
            Constraint("teg", "in"),
            Constraint("tegra", "not in"),
            True,
            False,
        ),
        (
            Constraint("tegra", "not in"),
            Constraint("teg", "in"),
            True,
            False,
        ),
        (
            Constraint("tegra", "in"),
            Constraint("rpi", "in"),
            True,
            False,
        ),
        (
            Constraint("1.2.3-tegra", "!="),
            Constraint("tegra", "in"),
            True,
            False,
        ),
        (
            Constraint("tegra", "in"),
            Constraint("1.2.3-tegra", "!="),
            True,
            False,
        ),
        (
            Constraint("1.2.3-tegra", "!="),
            Constraint("teg", "in"),
            True,
            False,
        ),
        (
            Constraint("teg", "in"),
            Constraint("1.2.3-tegra", "!="),
            True,
            False,
        ),
        (
            Constraint("1.2.3-tegra", "!="),
            Constraint("tegra", "not in"),
            True,
            True,
        ),
        (
            Constraint("tegra", "not in"),
            Constraint("1.2.3-tegra", "!="),
            True,
            False,
        ),
        (
            Constraint("1.2.3-tegra", "!="),
            Constraint("teg", "not in"),
            True,
            True,
        ),
        (
            Constraint("teg", "not in"),
            Constraint("1.2.3-tegra", "!="),
            True,
            False,
        ),
        (
            Constraint("1.2.3-tegra", "=="),
            Constraint("tegra", "in"),
            True,
            False,
        ),
        (
            Constraint("1.2.3-tegra", "=="),
            Constraint("tegra", "not in"),
            False,
            False,
        ),
    ],
)
def test_allows_any_and_allows_all(
    constraint1: Constraint,
    constraint2: BaseConstraint,
    expected_any: bool,
    expected_all: bool,
) -> None:
    assert constraint1.allows_any(constraint2) is expected_any
    assert constraint1.allows_all(constraint2) is expected_all


@pytest.mark.parametrize(
    ("constraint", "inverted"),
    [
        (EmptyConstraint(), AnyConstraint()),
        (Constraint("foo"), Constraint("foo", "!=")),
        (
            MultiConstraint(Constraint("foo", "!="), Constraint("bar", "!=")),
            UnionConstraint(Constraint("foo"), Constraint("bar")),
        ),
        (Constraint("tegra", "not in"), Constraint("tegra", "in")),
    ],
)
def test_invert(constraint: BaseConstraint, inverted: BaseConstraint) -> None:
    assert constraint.invert() == inverted
    assert inverted.invert() == constraint


@pytest.mark.parametrize(
    ("constraint", "inverted"),
    [
        (ExtraConstraint("foo"), ExtraConstraint("foo", "!=")),
        (
            ExtraMultiConstraint(ExtraConstraint("foo"), ExtraConstraint("bar", "!=")),
            UnionConstraint(ExtraConstraint("foo", "!="), ExtraConstraint("bar")),
        ),
    ],
)
def test_invert_extra(constraint: BaseConstraint, inverted: BaseConstraint) -> None:
    assert constraint.invert() == inverted
    assert inverted.invert() == constraint


@pytest.mark.parametrize(
    ("constraint1", "constraint2", "expected"),
    [
        (
            EmptyConstraint(),
            Constraint("win32"),
            EmptyConstraint(),
        ),
        (
            EmptyConstraint(),
            MultiConstraint(Constraint("darwin", "!="), Constraint("linux", "!=")),
            EmptyConstraint(),
        ),
        (
            EmptyConstraint(),
            UnionConstraint(Constraint("win32"), Constraint("linux")),
            EmptyConstraint(),
        ),
        (
            AnyConstraint(),
            Constraint("win32"),
            Constraint("win32"),
        ),
        (
            AnyConstraint(),
            MultiConstraint(Constraint("darwin", "!="), Constraint("linux", "!=")),
            MultiConstraint(Constraint("darwin", "!="), Constraint("linux", "!=")),
        ),
        (
            AnyConstraint(),
            UnionConstraint(Constraint("win32"), Constraint("linux")),
            UnionConstraint(Constraint("win32"), Constraint("linux")),
        ),
        (
            EmptyConstraint(),
            AnyConstraint(),
            EmptyConstraint(),
        ),
        (
            EmptyConstraint(),
            EmptyConstraint(),
            EmptyConstraint(),
        ),
        (
            AnyConstraint(),
            AnyConstraint(),
            AnyConstraint(),
        ),
        (
            Constraint("win32"),
            Constraint("win32"),
            Constraint("win32"),
        ),
        (
            Constraint("win32"),
            Constraint("linux"),
            EmptyConstraint(),
        ),
        (
            Constraint("win32"),
            MultiConstraint(Constraint("darwin", "!="), Constraint("linux", "!=")),
            Constraint("win32"),
        ),
        (
            Constraint("win32"),
            MultiConstraint(Constraint("win32", "!="), Constraint("linux", "!=")),
            EmptyConstraint(),
        ),
        (
            Constraint("win32"),
            UnionConstraint(Constraint("win32"), Constraint("linux")),
            Constraint("win32"),
        ),
        (
            Constraint("win32"),
            UnionConstraint(Constraint("linux"), Constraint("linux2")),
            EmptyConstraint(),
        ),
        (
            Constraint("win32"),
            Constraint("linux", "!="),
            Constraint("win32"),
        ),
        (
            Constraint("win32", "!="),
            Constraint("linux"),
            Constraint("linux"),
        ),
        (
            Constraint("win32", "!="),
            Constraint("linux", "!="),
            (
                MultiConstraint(Constraint("win32", "!="), Constraint("linux", "!=")),
                MultiConstraint(Constraint("linux", "!="), Constraint("win32", "!=")),
            ),
        ),
        (
            Constraint("win32", "!="),
            MultiConstraint(Constraint("win32", "!="), Constraint("linux", "!=")),
            MultiConstraint(Constraint("win32", "!="), Constraint("linux", "!=")),
        ),
        (
            Constraint("darwin", "!="),
            MultiConstraint(Constraint("win32", "!="), Constraint("linux", "!=")),
            MultiConstraint(
                Constraint("win32", "!="),
                Constraint("linux", "!="),
                Constraint("darwin", "!="),
            ),
        ),
        (
            Constraint("win32", "!="),
            UnionConstraint(Constraint("win32"), Constraint("linux")),
            Constraint("linux"),
        ),
        (
            Constraint("win32", "!="),
            UnionConstraint(
                Constraint("win32"), Constraint("linux"), Constraint("darwin")
            ),
            UnionConstraint(Constraint("linux"), Constraint("darwin")),
        ),
        (
            Constraint("win32", "!="),
            UnionConstraint(Constraint("linux"), Constraint("linux2")),
            UnionConstraint(Constraint("linux"), Constraint("linux2")),
        ),
        (
            UnionConstraint(Constraint("win32"), Constraint("linux")),
            UnionConstraint(Constraint("win32"), Constraint("linux")),
            UnionConstraint(Constraint("win32"), Constraint("linux")),
        ),
        (
            UnionConstraint(Constraint("win32"), Constraint("linux")),
            UnionConstraint(Constraint("linux"), Constraint("win32")),
            (
                UnionConstraint(Constraint("win32"), Constraint("linux")),
                UnionConstraint(Constraint("linux"), Constraint("win32")),
            ),
        ),
        (
            UnionConstraint(Constraint("win32"), Constraint("linux")),
            UnionConstraint(Constraint("win32"), Constraint("darwin")),
            Constraint("win32"),
        ),
        (
            UnionConstraint(
                Constraint("win32"), Constraint("linux"), Constraint("darwin")
            ),
            UnionConstraint(
                Constraint("win32"), Constraint("cygwin"), Constraint("darwin")
            ),
            UnionConstraint(
                Constraint("win32"),
                Constraint("darwin"),
            ),
        ),
        (
            UnionConstraint(Constraint("win32"), Constraint("linux")),
            MultiConstraint(Constraint("win32", "!="), Constraint("darwin", "!=")),
            Constraint("linux"),
        ),
        (
            UnionConstraint(Constraint("win32"), Constraint("linux")),
            MultiConstraint(Constraint("win32", "!="), Constraint("linux", "!=")),
            EmptyConstraint(),
        ),
        (
            MultiConstraint(Constraint("win32", "!="), Constraint("linux", "!=")),
            MultiConstraint(Constraint("win32", "!="), Constraint("linux", "!=")),
            MultiConstraint(Constraint("win32", "!="), Constraint("linux", "!=")),
        ),
        (
            MultiConstraint(Constraint("win32", "!="), Constraint("linux", "!=")),
            MultiConstraint(Constraint("linux", "!="), Constraint("win32", "!=")),
            (
                MultiConstraint(Constraint("win32", "!="), Constraint("linux", "!=")),
                MultiConstraint(Constraint("linux", "!="), Constraint("win32", "!=")),
            ),
        ),
        (
            MultiConstraint(Constraint("win32", "!="), Constraint("linux", "!=")),
            MultiConstraint(Constraint("win32", "!="), Constraint("darwin", "!=")),
            (
                MultiConstraint(
                    Constraint("win32", "!="),
                    Constraint("linux", "!="),
                    Constraint("darwin", "!="),
                ),
                MultiConstraint(
                    Constraint("win32", "!="),
                    Constraint("darwin", "!="),
                    Constraint("linux", "!="),
                ),
            ),
        ),
        (
            Constraint("tegra", "not in"),
            Constraint("tegra", "not in"),
            Constraint("tegra", "not in"),
        ),
        (
            Constraint("tegra", "in"),
            Constraint("tegra", "not in"),
            EmptyConstraint(),
        ),
        (
            Constraint("tegra", "in"),
            Constraint("tegra", "in"),
            Constraint("tegra", "in"),
        ),
        (
            Constraint("teg", "in"),
            Constraint("tegra", "in"),
            Constraint("tegra", "in"),
        ),
        (
            Constraint("teg", "not in"),
            Constraint("tegra", "in"),
            EmptyConstraint(),
        ),
        (
            Constraint("teg", "in"),
            Constraint("tegra", "not in"),
            (
                MultiConstraint(Constraint("teg", "in"), Constraint("tegra", "not in")),
                MultiConstraint(Constraint("tegra", "not in"), Constraint("teg", "in")),
            ),
        ),
        (
            Constraint("tegra", "in"),
            Constraint("rpi", "in"),
            (
                MultiConstraint(Constraint("tegra", "in"), Constraint("rpi", "in")),
                MultiConstraint(Constraint("rpi", "in"), Constraint("tegra", "in")),
            ),
        ),
        (
            Constraint("tegra", "in"),
            Constraint("1.2.3-tegra", "=="),
            Constraint("1.2.3-tegra", "=="),
        ),
        (
            Constraint("tegra", "in"),
            Constraint("1.2.3-tegra", "!="),
            (
                MultiConstraint(
                    Constraint("tegra", "in"), Constraint("1.2.3-tegra", "!=")
                ),
                MultiConstraint(
                    Constraint("1.2.3-tegra", "!="),
                    Constraint("tegra", "in"),
                ),
            ),
        ),
        (
            Constraint("tegra", "not in"),
            Constraint("1.2.3-tegra", "=="),
            EmptyConstraint(),
        ),
        (
            Constraint("tegra", "not in"),
            Constraint("1.2.3-tegra", "!="),
            Constraint("tegra", "not in"),
        ),
        (
            Constraint("tegra", "not in"),
            Constraint("rpi", "not in"),
            (
                MultiConstraint(
                    Constraint("tegra", "not in"),
                    Constraint("rpi", "not in"),
                ),
                MultiConstraint(
                    Constraint("rpi", "not in"),
                    Constraint("tegra", "not in"),
                ),
            ),
        ),
    ],
)
def test_intersect(
    constraint1: BaseConstraint,
    constraint2: BaseConstraint,
    expected: BaseConstraint | tuple[BaseConstraint, BaseConstraint],
) -> None:
    if not isinstance(expected, tuple):
        expected = (expected, expected)
    assert constraint1.intersect(constraint2) == expected[0]
    assert constraint2.intersect(constraint1) == expected[1]


@pytest.mark.parametrize(
    ("constraint1", "constraint2", "expected"),
    [
        (
            EmptyConstraint(),
            ExtraConstraint("extra1"),
            EmptyConstraint(),
        ),
        (
            EmptyConstraint(),
            ExtraMultiConstraint(
                ExtraConstraint("extra1", "!="), ExtraConstraint("extra2", "!=")
            ),
            EmptyConstraint(),
        ),
        (
            EmptyConstraint(),
            UnionConstraint(ExtraConstraint("extra1"), ExtraConstraint("extra2")),
            EmptyConstraint(),
        ),
        (
            AnyConstraint(),
            ExtraConstraint("extra1"),
            ExtraConstraint("extra1"),
        ),
        (
            AnyConstraint(),
            ExtraMultiConstraint(
                ExtraConstraint("extra1", "!="), ExtraConstraint("extra2", "!=")
            ),
            ExtraMultiConstraint(
                ExtraConstraint("extra1", "!="), ExtraConstraint("extra2", "!=")
            ),
        ),
        (
            AnyConstraint(),
            UnionConstraint(ExtraConstraint("extra1"), ExtraConstraint("extra2")),
            UnionConstraint(ExtraConstraint("extra1"), ExtraConstraint("extra2")),
        ),
        (
            ExtraConstraint("extra1"),
            ExtraConstraint("extra1"),
            ExtraConstraint("extra1"),
        ),
        (
            ExtraConstraint("extra1"),
            ExtraConstraint("extra1", "!="),
            EmptyConstraint(),
        ),
        (
            ExtraConstraint("extra1"),
            ExtraConstraint("extra2"),
            (
                ExtraMultiConstraint(
                    ExtraConstraint("extra1"), ExtraConstraint("extra2")
                ),
                ExtraMultiConstraint(
                    ExtraConstraint("extra2"), ExtraConstraint("extra1")
                ),
            ),
        ),
        (
            ExtraConstraint("extra1"),
            ExtraMultiConstraint(
                ExtraConstraint("extra2", "!="), ExtraConstraint("extra3", "!=")
            ),
            ExtraMultiConstraint(
                ExtraConstraint("extra2", "!="),
                ExtraConstraint("extra3", "!="),
                ExtraConstraint("extra1"),
            ),
        ),
        (
            ExtraConstraint("extra1"),
            ExtraMultiConstraint(
                ExtraConstraint("extra1", "!="), ExtraConstraint("extra2", "!=")
            ),
            EmptyConstraint(),
        ),
        (
            ExtraConstraint("extra1"),
            UnionConstraint(ExtraConstraint("extra1"), ExtraConstraint("extra2")),
            ExtraConstraint("extra1"),
        ),
        (
            ExtraConstraint("extra1"),
            UnionConstraint(ExtraConstraint("extra2"), ExtraConstraint("extra3")),
            UnionConstraint(
                ExtraMultiConstraint(
                    ExtraConstraint("extra2"), ExtraConstraint("extra1")
                ),
                ExtraMultiConstraint(
                    ExtraConstraint("extra3"), ExtraConstraint("extra1")
                ),
            ),
        ),
        (
            ExtraConstraint("extra1"),
            ExtraConstraint("extra2", "!="),
            (
                ExtraMultiConstraint(
                    ExtraConstraint("extra1"), ExtraConstraint("extra2", "!=")
                ),
                ExtraMultiConstraint(
                    ExtraConstraint("extra2", "!="), ExtraConstraint("extra1")
                ),
            ),
        ),
        (
            ExtraConstraint("extra1", "!="),
            ExtraConstraint("extra2"),
            (
                ExtraMultiConstraint(
                    ExtraConstraint("extra1", "!="), ExtraConstraint("extra2")
                ),
                ExtraMultiConstraint(
                    ExtraConstraint("extra2"), ExtraConstraint("extra1", "!=")
                ),
            ),
        ),
        (
            ExtraConstraint("extra1", "!="),
            ExtraConstraint("extra2", "!="),
            (
                ExtraMultiConstraint(
                    ExtraConstraint("extra1", "!="), ExtraConstraint("extra2", "!=")
                ),
                ExtraMultiConstraint(
                    ExtraConstraint("extra2", "!="), ExtraConstraint("extra1", "!=")
                ),
            ),
        ),
        (
            ExtraConstraint("extra1", "!="),
            ExtraMultiConstraint(
                ExtraConstraint("extra1", "!="), ExtraConstraint("extra2", "!=")
            ),
            ExtraMultiConstraint(
                ExtraConstraint("extra1", "!="), ExtraConstraint("extra2", "!=")
            ),
        ),
        (
            ExtraConstraint("extra3", "!="),
            ExtraMultiConstraint(
                ExtraConstraint("extra1", "!="), ExtraConstraint("extra2", "!=")
            ),
            ExtraMultiConstraint(
                ExtraConstraint("extra1", "!="),
                ExtraConstraint("extra2", "!="),
                ExtraConstraint("extra3", "!="),
            ),
        ),
        (
            ExtraConstraint("extra1", "!="),
            UnionConstraint(ExtraConstraint("extra1"), ExtraConstraint("extra2")),
            ExtraMultiConstraint(
                ExtraConstraint("extra2"), ExtraConstraint("extra1", "!=")
            ),
        ),
        (
            ExtraConstraint("extra1", "!="),
            UnionConstraint(
                ExtraConstraint("extra1"),
                ExtraConstraint("extra2"),
                ExtraConstraint("extra3"),
            ),
            UnionConstraint(
                ExtraMultiConstraint(
                    ExtraConstraint("extra2"), ExtraConstraint("extra1", "!=")
                ),
                ExtraMultiConstraint(
                    ExtraConstraint("extra3"), ExtraConstraint("extra1", "!=")
                ),
            ),
        ),
        (
            ExtraConstraint("extra1", "!="),
            UnionConstraint(ExtraConstraint("extra2"), ExtraConstraint("extra3")),
            UnionConstraint(
                ExtraMultiConstraint(
                    ExtraConstraint("extra2"), ExtraConstraint("extra1", "!=")
                ),
                ExtraMultiConstraint(
                    ExtraConstraint("extra3"), ExtraConstraint("extra1", "!=")
                ),
            ),
        ),
        (
            ExtraMultiConstraint(ExtraConstraint("extra1"), ExtraConstraint("extra2")),
            ExtraMultiConstraint(ExtraConstraint("extra1"), ExtraConstraint("extra2")),
            ExtraMultiConstraint(ExtraConstraint("extra1"), ExtraConstraint("extra2")),
        ),
        (
            ExtraMultiConstraint(ExtraConstraint("extra1"), ExtraConstraint("extra2")),
            ExtraMultiConstraint(ExtraConstraint("extra2"), ExtraConstraint("extra1")),
            (
                ExtraMultiConstraint(
                    ExtraConstraint("extra1"), ExtraConstraint("extra2")
                ),
                ExtraMultiConstraint(
                    ExtraConstraint("extra2"), ExtraConstraint("extra1")
                ),
            ),
        ),
        (
            UnionConstraint(ExtraConstraint("extra1"), ExtraConstraint("extra2")),
            UnionConstraint(ExtraConstraint("extra1"), ExtraConstraint("extra2")),
            UnionConstraint(ExtraConstraint("extra1"), ExtraConstraint("extra2")),
        ),
        (
            UnionConstraint(ExtraConstraint("extra1"), ExtraConstraint("extra2")),
            UnionConstraint(ExtraConstraint("extra2"), ExtraConstraint("extra1")),
            (
                UnionConstraint(ExtraConstraint("extra1"), ExtraConstraint("extra2")),
                UnionConstraint(ExtraConstraint("extra2"), ExtraConstraint("extra1")),
            ),
        ),
        (
            UnionConstraint(ExtraConstraint("extra1"), ExtraConstraint("extra2")),
            UnionConstraint(ExtraConstraint("extra1"), ExtraConstraint("extra3")),
            (
                UnionConstraint(
                    ExtraConstraint("extra1"),
                    ExtraMultiConstraint(
                        ExtraConstraint("extra1"), ExtraConstraint("extra3")
                    ),
                    ExtraMultiConstraint(
                        ExtraConstraint("extra2"), ExtraConstraint("extra1")
                    ),
                    ExtraMultiConstraint(
                        ExtraConstraint("extra2"), ExtraConstraint("extra3")
                    ),
                ),
                UnionConstraint(
                    ExtraConstraint("extra1"),
                    ExtraMultiConstraint(
                        ExtraConstraint("extra1"), ExtraConstraint("extra2")
                    ),
                    ExtraMultiConstraint(
                        ExtraConstraint("extra3"), ExtraConstraint("extra1")
                    ),
                    ExtraMultiConstraint(
                        ExtraConstraint("extra3"), ExtraConstraint("extra2")
                    ),
                ),
            ),
        ),
        (
            UnionConstraint(ExtraConstraint("extra1"), ExtraConstraint("extra2")),
            ExtraMultiConstraint(
                ExtraConstraint("extra1", "!="), ExtraConstraint("extra3", "!=")
            ),
            ExtraMultiConstraint(
                ExtraConstraint("extra2"),
                ExtraConstraint("extra1", "!="),
                ExtraConstraint("extra3", "!="),
            ),
        ),
        (
            UnionConstraint(ExtraConstraint("extra1"), ExtraConstraint("extra2")),
            ExtraMultiConstraint(
                ExtraConstraint("extra1", "!="), ExtraConstraint("extra2", "!=")
            ),
            EmptyConstraint(),
        ),
        (
            ExtraMultiConstraint(
                ExtraConstraint("extra1", "!="), ExtraConstraint("extra2", "!=")
            ),
            ExtraMultiConstraint(
                ExtraConstraint("extra1", "!="), ExtraConstraint("extra3", "!=")
            ),
            (
                ExtraMultiConstraint(
                    ExtraConstraint("extra1", "!="),
                    ExtraConstraint("extra2", "!="),
                    ExtraConstraint("extra3", "!="),
                ),
                ExtraMultiConstraint(
                    ExtraConstraint("extra1", "!="),
                    ExtraConstraint("extra3", "!="),
                    ExtraConstraint("extra2", "!="),
                ),
            ),
        ),
        (
            ExtraMultiConstraint(ExtraConstraint("extra1"), ExtraConstraint("extra2")),
            ExtraMultiConstraint(
                ExtraConstraint("extra1", "!="), ExtraConstraint("extra3", "!=")
            ),
            EmptyConstraint(),
        ),
        (
            ExtraMultiConstraint(ExtraConstraint("extra1"), ExtraConstraint("extra2")),
            ExtraMultiConstraint(
                ExtraConstraint("extra3", "!="), ExtraConstraint("extra4", "!=")
            ),
            (
                ExtraMultiConstraint(
                    ExtraConstraint("extra1"),
                    ExtraConstraint("extra2"),
                    ExtraConstraint("extra3", "!="),
                    ExtraConstraint("extra4", "!="),
                ),
                ExtraMultiConstraint(
                    ExtraConstraint("extra3", "!="),
                    ExtraConstraint("extra4", "!="),
                    ExtraConstraint("extra1"),
                    ExtraConstraint("extra2"),
                ),
            ),
        ),
        (
            ExtraMultiConstraint(
                ExtraConstraint("extra1", "!="), ExtraConstraint("extra2", "!=")
            ),
            UnionConstraint(ExtraConstraint("extra1"), ExtraConstraint("extra2")),
            EmptyConstraint(),
        ),
    ],
)
def test_intersect_extra(
    constraint1: BaseConstraint,
    constraint2: BaseConstraint,
    expected: BaseConstraint | tuple[BaseConstraint, BaseConstraint],
) -> None:
    if not isinstance(expected, tuple):
        expected = (expected, expected)
    assert constraint1.intersect(constraint2) == expected[0]
    assert constraint2.intersect(constraint1) == expected[1]


@pytest.mark.parametrize(
    ("constraint1", "constraint2", "expected"),
    [
        (
            EmptyConstraint(),
            Constraint("win32"),
            Constraint("win32"),
        ),
        (
            EmptyConstraint(),
            MultiConstraint(Constraint("darwin", "!="), Constraint("linux", "!=")),
            MultiConstraint(Constraint("darwin", "!="), Constraint("linux", "!=")),
        ),
        (
            EmptyConstraint(),
            UnionConstraint(Constraint("win32"), Constraint("linux")),
            UnionConstraint(Constraint("win32"), Constraint("linux")),
        ),
        (
            AnyConstraint(),
            Constraint("win32"),
            AnyConstraint(),
        ),
        (
            AnyConstraint(),
            MultiConstraint(Constraint("darwin", "!="), Constraint("linux", "!=")),
            AnyConstraint(),
        ),
        (
            AnyConstraint(),
            UnionConstraint(Constraint("win32"), Constraint("linux")),
            AnyConstraint(),
        ),
        (
            EmptyConstraint(),
            AnyConstraint(),
            AnyConstraint(),
        ),
        (
            EmptyConstraint(),
            EmptyConstraint(),
            EmptyConstraint(),
        ),
        (
            AnyConstraint(),
            AnyConstraint(),
            AnyConstraint(),
        ),
        (
            Constraint("win32"),
            Constraint("win32"),
            Constraint("win32"),
        ),
        (
            Constraint("win32"),
            Constraint("linux"),
            (
                UnionConstraint(Constraint("win32"), Constraint("linux")),
                UnionConstraint(Constraint("linux"), Constraint("win32")),
            ),
        ),
        (
            Constraint("win32"),
            MultiConstraint(Constraint("darwin", "!="), Constraint("linux", "!=")),
            MultiConstraint(Constraint("darwin", "!="), Constraint("linux", "!=")),
        ),
        (
            Constraint("win32"),
            MultiConstraint(Constraint("win32", "!="), Constraint("linux", "!=")),
            Constraint("linux", "!="),
        ),
        (
            Constraint("win32"),
            MultiConstraint(
                Constraint("win32", "!="),
                Constraint("linux", "!="),
                Constraint("darwin", "!="),
            ),
            MultiConstraint(Constraint("linux", "!="), Constraint("darwin", "!=")),
        ),
        (
            Constraint("win32"),
            UnionConstraint(Constraint("win32"), Constraint("linux")),
            UnionConstraint(Constraint("win32"), Constraint("linux")),
        ),
        (
            Constraint("win32"),
            UnionConstraint(Constraint("linux"), Constraint("linux2")),
            (
                UnionConstraint(
                    Constraint("win32"), Constraint("linux"), Constraint("linux2")
                ),
                UnionConstraint(
                    Constraint("linux"), Constraint("linux2"), Constraint("win32")
                ),
            ),
        ),
        (
            Constraint("win32"),
            Constraint("linux", "!="),
            Constraint("linux", "!="),
        ),
        (
            Constraint("win32", "!="),
            Constraint("linux"),
            Constraint("win32", "!="),
        ),
        (
            Constraint("win32", "!="),
            Constraint("linux", "!="),
            AnyConstraint(),
        ),
        (
            Constraint("win32", "!="),
            MultiConstraint(Constraint("win32", "!="), Constraint("linux", "!=")),
            Constraint("win32", "!="),
        ),
        (
            Constraint("darwin", "!="),
            MultiConstraint(Constraint("win32", "!="), Constraint("linux", "!=")),
            AnyConstraint(),
        ),
        (
            Constraint("win32", "!="),
            UnionConstraint(Constraint("win32"), Constraint("linux")),
            AnyConstraint(),
        ),
        (
            Constraint("win32", "!="),
            UnionConstraint(Constraint("linux"), Constraint("linux2")),
            Constraint("win32", "!="),
        ),
        (
            UnionConstraint(Constraint("win32"), Constraint("linux")),
            UnionConstraint(Constraint("win32"), Constraint("linux")),
            UnionConstraint(Constraint("win32"), Constraint("linux")),
        ),
        (
            UnionConstraint(Constraint("win32"), Constraint("linux")),
            UnionConstraint(Constraint("linux"), Constraint("win32")),
            (
                UnionConstraint(Constraint("win32"), Constraint("linux")),
                UnionConstraint(Constraint("linux"), Constraint("win32")),
            ),
        ),
        (
            UnionConstraint(Constraint("win32"), Constraint("linux")),
            UnionConstraint(Constraint("win32"), Constraint("darwin")),
            (
                UnionConstraint(
                    Constraint("win32"), Constraint("linux"), Constraint("darwin")
                ),
                UnionConstraint(
                    Constraint("win32"), Constraint("darwin"), Constraint("linux")
                ),
            ),
        ),
        (
            UnionConstraint(
                Constraint("win32"), Constraint("linux"), Constraint("darwin")
            ),
            UnionConstraint(
                Constraint("win32"), Constraint("cygwin"), Constraint("darwin")
            ),
            (
                UnionConstraint(
                    Constraint("win32"),
                    Constraint("linux"),
                    Constraint("darwin"),
                    Constraint("cygwin"),
                ),
                UnionConstraint(
                    Constraint("win32"),
                    Constraint("cygwin"),
                    Constraint("darwin"),
                    Constraint("linux"),
                ),
            ),
        ),
        (
            UnionConstraint(Constraint("win32"), Constraint("linux")),
            MultiConstraint(Constraint("win32", "!="), Constraint("darwin", "!=")),
            UnionConstraint(
                Constraint("win32"),
                Constraint("linux"),
                MultiConstraint(Constraint("win32", "!="), Constraint("darwin", "!=")),
            ),
        ),
        (
            UnionConstraint(Constraint("win32"), Constraint("linux")),
            MultiConstraint(Constraint("win32", "!="), Constraint("linux", "!=")),
            UnionConstraint(
                Constraint("win32"),
                Constraint("linux"),
                MultiConstraint(Constraint("win32", "!="), Constraint("linux", "!=")),
            ),
        ),
        (
            MultiConstraint(Constraint("win32", "!="), Constraint("linux", "!=")),
            MultiConstraint(Constraint("win32", "!="), Constraint("linux", "!=")),
            MultiConstraint(Constraint("win32", "!="), Constraint("linux", "!=")),
        ),
        (
            MultiConstraint(Constraint("win32", "!="), Constraint("linux", "!=")),
            MultiConstraint(Constraint("linux", "!="), Constraint("win32", "!=")),
            (
                MultiConstraint(Constraint("win32", "!="), Constraint("linux", "!=")),
                MultiConstraint(Constraint("linux", "!="), Constraint("win32", "!=")),
            ),
        ),
        (
            MultiConstraint(Constraint("win32", "!="), Constraint("linux", "!=")),
            MultiConstraint(Constraint("win32", "!="), Constraint("darwin", "!=")),
            MultiConstraint(Constraint("win32", "!=")),
        ),
        (
            Constraint("tegra", "not in"),
            Constraint("tegra", "not in"),
            Constraint("tegra", "not in"),
        ),
        (
            Constraint("tegra", "in"),
            Constraint("tegra", "not in"),
            AnyConstraint(),
        ),
        (
            Constraint("tegra", "in"),
            Constraint("tegra", "in"),
            Constraint("tegra", "in"),
        ),
        (
            Constraint("teg", "in"),
            Constraint("tegra", "in"),
            Constraint("teg", "in"),
        ),
        (
            Constraint("teg", "in"),
            Constraint("tegra", "not in"),
            AnyConstraint(),
        ),
        (
            Constraint("teg", "not in"),
            Constraint("tegra", "in"),
            (
                UnionConstraint(Constraint("teg", "not in"), Constraint("tegra", "in")),
                UnionConstraint(Constraint("tegra", "in"), Constraint("teg", "not in")),
            ),
        ),
        (
            Constraint("tegra", "in"),
            Constraint("rpi", "in"),
            (
                UnionConstraint(Constraint("tegra", "in"), Constraint("rpi", "in")),
                UnionConstraint(Constraint("rpi", "in"), Constraint("tegra", "in")),
            ),
        ),
        (
            Constraint("tegra", "not in"),
            Constraint("rpi", "not in"),
            AnyConstraint(),
        ),
        (
            Constraint("tegra", "in"),
            Constraint("1.2.3-tegra", "!="),
            AnyConstraint(),
        ),
        (
            Constraint("tegra", "not in"),
            Constraint("1.2.3-tegra", "!="),
            Constraint("1.2.3-tegra", "!="),
        ),
        (
            Constraint("tegra", "in"),
            Constraint("1.2.3-tegra", "=="),
            Constraint("tegra", "in"),
        ),
        (
            Constraint("tegra", "not in"),
            Constraint("1.2.3-tegra", "=="),
            (
                UnionConstraint(
                    Constraint("tegra", "not in"), Constraint("1.2.3-tegra", "==")
                ),
                UnionConstraint(
                    Constraint("1.2.3-tegra", "=="), Constraint("tegra", "not in")
                ),
            ),
        ),
    ],
)
def test_union(
    constraint1: BaseConstraint,
    constraint2: BaseConstraint,
    expected: BaseConstraint | tuple[BaseConstraint, BaseConstraint],
) -> None:
    if not isinstance(expected, tuple):
        expected = (expected, expected)

    assert constraint1.union(constraint2) == expected[0]
    assert constraint2.union(constraint1) == expected[1]


@pytest.mark.parametrize(
    ("constraint1", "constraint2", "expected"),
    [
        (
            EmptyConstraint(),
            ExtraConstraint("extra1"),
            ExtraConstraint("extra1"),
        ),
        (
            EmptyConstraint(),
            ExtraMultiConstraint(
                ExtraConstraint("extra1", "!="), ExtraConstraint("extra2", "!=")
            ),
            ExtraMultiConstraint(
                ExtraConstraint("extra1", "!="), ExtraConstraint("extra2", "!=")
            ),
        ),
        (
            EmptyConstraint(),
            UnionConstraint(ExtraConstraint("extra1"), ExtraConstraint("extra2")),
            UnionConstraint(ExtraConstraint("extra1"), ExtraConstraint("extra2")),
        ),
        (
            AnyConstraint(),
            ExtraConstraint("extra1"),
            AnyConstraint(),
        ),
        (
            AnyConstraint(),
            ExtraMultiConstraint(
                ExtraConstraint("extra1", "!="), ExtraConstraint("extra2", "!=")
            ),
            AnyConstraint(),
        ),
        (
            AnyConstraint(),
            UnionConstraint(ExtraConstraint("extra1"), ExtraConstraint("extra2")),
            AnyConstraint(),
        ),
        (
            ExtraConstraint("extra1"),
            ExtraConstraint("extra1"),
            ExtraConstraint("extra1"),
        ),
        (
            ExtraConstraint("extra1"),
            ExtraConstraint("extra1", "!="),
            AnyConstraint(),
        ),
        (
            ExtraConstraint("extra1"),
            ExtraConstraint("extra2"),
            (
                UnionConstraint(ExtraConstraint("extra1"), ExtraConstraint("extra2")),
                UnionConstraint(ExtraConstraint("extra2"), ExtraConstraint("extra1")),
            ),
        ),
        (
            ExtraConstraint("extra1"),
            ExtraMultiConstraint(
                ExtraConstraint("extra2", "!="), ExtraConstraint("extra3", "!=")
            ),
            UnionConstraint(
                ExtraMultiConstraint(
                    ExtraConstraint("extra2", "!="), ExtraConstraint("extra3", "!=")
                ),
                ExtraConstraint("extra1"),
            ),
        ),
        (
            ExtraConstraint("extra1"),
            ExtraMultiConstraint(
                ExtraConstraint("extra1", "!="), ExtraConstraint("extra2", "!=")
            ),
            UnionConstraint(ExtraConstraint("extra2", "!="), ExtraConstraint("extra1")),
        ),
        (
            ExtraConstraint("extra1"),
            ExtraMultiConstraint(
                ExtraConstraint("extra1", "!="),
                ExtraConstraint("extra2", "!="),
                ExtraConstraint("extra3", "!="),
            ),
            UnionConstraint(
                ExtraMultiConstraint(
                    ExtraConstraint("extra1", "!="),
                    ExtraConstraint("extra2", "!="),
                    ExtraConstraint("extra3", "!="),
                ),
                ExtraConstraint("extra1"),
            ),
        ),
        (
            ExtraConstraint("extra1"),
            UnionConstraint(ExtraConstraint("extra1"), ExtraConstraint("extra2")),
            UnionConstraint(ExtraConstraint("extra1"), ExtraConstraint("extra2")),
        ),
        (
            ExtraConstraint("extra1"),
            UnionConstraint(ExtraConstraint("extra2"), ExtraConstraint("extra3")),
            (
                UnionConstraint(
                    ExtraConstraint("extra1"),
                    ExtraConstraint("extra2"),
                    ExtraConstraint("extra3"),
                ),
                UnionConstraint(
                    ExtraConstraint("extra2"),
                    ExtraConstraint("extra3"),
                    ExtraConstraint("extra1"),
                ),
            ),
        ),
        (
            ExtraConstraint("extra1"),
            ExtraConstraint("extra2", "!="),
            (
                UnionConstraint(
                    ExtraConstraint("extra1"), ExtraConstraint("extra2", "!=")
                ),
                UnionConstraint(
                    ExtraConstraint("extra2", "!="), ExtraConstraint("extra1")
                ),
            ),
        ),
        (
            ExtraConstraint("extra1", "!="),
            ExtraConstraint("extra2"),
            (
                UnionConstraint(
                    ExtraConstraint("extra1", "!="), ExtraConstraint("extra2")
                ),
                UnionConstraint(
                    ExtraConstraint("extra2"), ExtraConstraint("extra1", "!=")
                ),
            ),
        ),
        (
            ExtraConstraint("extra1", "!="),
            ExtraConstraint("extra2", "!="),
            (
                UnionConstraint(
                    ExtraConstraint("extra1", "!="), ExtraConstraint("extra2", "!=")
                ),
                UnionConstraint(
                    ExtraConstraint("extra2", "!="), ExtraConstraint("extra1", "!=")
                ),
            ),
        ),
        (
            ExtraConstraint("extra1", "!="),
            ExtraMultiConstraint(
                ExtraConstraint("extra1", "!="), ExtraConstraint("extra2", "!=")
            ),
            ExtraConstraint("extra1", "!="),
        ),
        (
            ExtraConstraint("extra1", "!="),
            ExtraMultiConstraint(
                ExtraConstraint("extra2", "!="), ExtraConstraint("extra3", "!=")
            ),
            UnionConstraint(
                ExtraMultiConstraint(
                    ExtraConstraint("extra2", "!="), ExtraConstraint("extra3", "!=")
                ),
                ExtraConstraint("extra1", "!="),
            ),
        ),
        (
            ExtraConstraint("extra1", "!="),
            UnionConstraint(ExtraConstraint("extra1"), ExtraConstraint("extra2")),
            AnyConstraint(),
        ),
        (
            ExtraConstraint("extra1", "!="),
            UnionConstraint(ExtraConstraint("extra2"), ExtraConstraint("extra3")),
            (
                UnionConstraint(
                    ExtraConstraint("extra1", "!="),
                    ExtraConstraint("extra2"),
                    ExtraConstraint("extra3"),
                ),
                UnionConstraint(
                    ExtraConstraint("extra2"),
                    ExtraConstraint("extra3"),
                    ExtraConstraint("extra1", "!="),
                ),
            ),
        ),
        (
            ExtraMultiConstraint(ExtraConstraint("extra1"), ExtraConstraint("extra2")),
            ExtraMultiConstraint(ExtraConstraint("extra1"), ExtraConstraint("extra2")),
            ExtraMultiConstraint(ExtraConstraint("extra1"), ExtraConstraint("extra2")),
        ),
        (
            ExtraMultiConstraint(ExtraConstraint("extra1"), ExtraConstraint("extra2")),
            ExtraMultiConstraint(ExtraConstraint("extra2"), ExtraConstraint("extra1")),
            (
                ExtraMultiConstraint(
                    ExtraConstraint("extra1"), ExtraConstraint("extra2")
                ),
                ExtraMultiConstraint(
                    ExtraConstraint("extra2"), ExtraConstraint("extra1")
                ),
            ),
        ),
        (
            UnionConstraint(ExtraConstraint("extra1"), ExtraConstraint("extra2")),
            UnionConstraint(ExtraConstraint("extra1"), ExtraConstraint("extra2")),
            UnionConstraint(ExtraConstraint("extra1"), ExtraConstraint("extra2")),
        ),
        (
            UnionConstraint(ExtraConstraint("extra1"), ExtraConstraint("extra2")),
            UnionConstraint(ExtraConstraint("extra2"), ExtraConstraint("extra1")),
            (
                UnionConstraint(ExtraConstraint("extra1"), ExtraConstraint("extra2")),
                UnionConstraint(ExtraConstraint("extra2"), ExtraConstraint("extra1")),
            ),
        ),
        (
            UnionConstraint(ExtraConstraint("extra1"), ExtraConstraint("extra2")),
            UnionConstraint(ExtraConstraint("extra1"), ExtraConstraint("extra3")),
            (
                UnionConstraint(
                    ExtraConstraint("extra1"),
                    ExtraConstraint("extra2"),
                    ExtraConstraint("extra3"),
                ),
                UnionConstraint(
                    ExtraConstraint("extra1"),
                    ExtraConstraint("extra3"),
                    ExtraConstraint("extra2"),
                ),
            ),
        ),
        (
            UnionConstraint(
                ExtraConstraint("extra1"),
                ExtraConstraint("extra2"),
                ExtraConstraint("extra3"),
            ),
            UnionConstraint(
                ExtraConstraint("extra1"),
                ExtraConstraint("extra4"),
                ExtraConstraint("extra3"),
            ),
            (
                UnionConstraint(
                    ExtraConstraint("extra1"),
                    ExtraConstraint("extra2"),
                    ExtraConstraint("extra3"),
                    ExtraConstraint("extra4"),
                ),
                UnionConstraint(
                    ExtraConstraint("extra1"),
                    ExtraConstraint("extra4"),
                    ExtraConstraint("extra3"),
                    ExtraConstraint("extra2"),
                ),
            ),
        ),
        (
            UnionConstraint(ExtraConstraint("extra1"), ExtraConstraint("extra2")),
            ExtraMultiConstraint(
                ExtraConstraint("extra1", "!="), ExtraConstraint("extra3", "!=")
            ),
            UnionConstraint(
                ExtraConstraint("extra1"),
                ExtraConstraint("extra2"),
                ExtraMultiConstraint(
                    ExtraConstraint("extra1", "!="), ExtraConstraint("extra3", "!=")
                ),
            ),
        ),
        (
            UnionConstraint(ExtraConstraint("extra1"), ExtraConstraint("extra2")),
            ExtraMultiConstraint(
                ExtraConstraint("extra1", "!="), ExtraConstraint("extra2", "!=")
            ),
            UnionConstraint(
                ExtraConstraint("extra1"),
                ExtraConstraint("extra2"),
                ExtraMultiConstraint(
                    ExtraConstraint("extra1", "!="), ExtraConstraint("extra2", "!=")
                ),
            ),
        ),
        (
            ExtraMultiConstraint(ExtraConstraint("extra1"), ExtraConstraint("extra2")),
            ExtraMultiConstraint(ExtraConstraint("extra1"), ExtraConstraint("extra2")),
            ExtraMultiConstraint(ExtraConstraint("extra1"), ExtraConstraint("extra2")),
        ),
        (
            ExtraMultiConstraint(ExtraConstraint("extra1"), ExtraConstraint("extra2")),
            ExtraMultiConstraint(ExtraConstraint("extra1"), ExtraConstraint("extra3")),
            (
                UnionConstraint(
                    ExtraMultiConstraint(
                        ExtraConstraint("extra1"), ExtraConstraint("extra2")
                    ),
                    ExtraMultiConstraint(
                        ExtraConstraint("extra1"), ExtraConstraint("extra3")
                    ),
                ),
                UnionConstraint(
                    ExtraMultiConstraint(
                        ExtraConstraint("extra1"), ExtraConstraint("extra3")
                    ),
                    ExtraMultiConstraint(
                        ExtraConstraint("extra1"), ExtraConstraint("extra2")
                    ),
                ),
            ),
        ),
        (
            ExtraMultiConstraint(
                ExtraConstraint("extra1", "!="), ExtraConstraint("extra2", "!=")
            ),
            ExtraMultiConstraint(
                ExtraConstraint("extra1", "!="), ExtraConstraint("extra3", "!=")
            ),
            (
                UnionConstraint(
                    ExtraMultiConstraint(
                        ExtraConstraint("extra1", "!="), ExtraConstraint("extra2", "!=")
                    ),
                    ExtraMultiConstraint(
                        ExtraConstraint("extra1", "!="), ExtraConstraint("extra3", "!=")
                    ),
                ),
                UnionConstraint(
                    ExtraMultiConstraint(
                        ExtraConstraint("extra1", "!="), ExtraConstraint("extra3", "!=")
                    ),
                    ExtraMultiConstraint(
                        ExtraConstraint("extra1", "!="), ExtraConstraint("extra2", "!=")
                    ),
                ),
            ),
        ),
    ],
)
def test_union_extra(
    constraint1: BaseConstraint,
    constraint2: BaseConstraint,
    expected: BaseConstraint | tuple[BaseConstraint, BaseConstraint],
) -> None:
    if not isinstance(expected, tuple):
        expected = (expected, expected)

    assert constraint1.union(constraint2) == expected[0]
    assert constraint2.union(constraint1) == expected[1]


def test_difference() -> None:
    c = Constraint("win32")

    assert c.difference(Constraint("win32")).is_empty()
    assert c.difference(Constraint("linux")) == c


@pytest.mark.parametrize(
    "constraint",
    [
        EmptyConstraint(),
        AnyConstraint(),
        Constraint("win32"),
        UnionConstraint(Constraint("win32"), Constraint("linux")),
        MultiConstraint(Constraint("win32", "!="), Constraint("linux", "!=")),
    ],
)
def test_constraints_are_hashable(constraint: BaseConstraint) -> None:
    # We're just testing that constraints are hashable, there's nothing much to say
    # about the result.
    hash(constraint)
poetry-core-2.1.1/tests/constraints/generic/test_main.py000066400000000000000000000111401475444614500234600ustar00rootroot00000000000000from __future__ import annotations

import pytest

from poetry.core.constraints.generic import AnyConstraint
from poetry.core.constraints.generic import Constraint
from poetry.core.constraints.generic import MultiConstraint
from poetry.core.constraints.generic import UnionConstraint
from poetry.core.constraints.generic import parse_constraint
from poetry.core.constraints.generic.constraint import ExtraConstraint
from poetry.core.constraints.generic.multi_constraint import ExtraMultiConstraint
from poetry.core.constraints.generic.parser import parse_extra_constraint


@pytest.mark.parametrize(
    ("input", "constraint"),
    [
        ("*", AnyConstraint()),
        ("win32", Constraint("win32", "=")),
        ("=win32", Constraint("win32", "=")),
        ("==win32", Constraint("win32", "=")),
        ("!=win32", Constraint("win32", "!=")),
        ("!= win32", Constraint("win32", "!=")),
        ("'tegra' not in", Constraint("tegra", "not in")),
        ("'tegra' in", Constraint("tegra", "in")),
    ],
)
def test_parse_constraint(input: str, constraint: AnyConstraint | Constraint) -> None:
    assert parse_constraint(input) == constraint


@pytest.mark.parametrize(
    ("input", "constraint"),
    [
        (
            "!=win32,!=linux",
            MultiConstraint(Constraint("win32", "!="), Constraint("linux", "!=")),
        ),
        (
            "!=win32,!=linux,!=linux2",
            MultiConstraint(
                Constraint("win32", "!="),
                Constraint("linux", "!="),
                Constraint("linux2", "!="),
            ),
        ),
        (
            "'tegra' not in,'rpi-v8' not in",
            MultiConstraint(
                Constraint("tegra", "not in"),
                Constraint("rpi-v8", "not in"),
            ),
        ),
    ],
)
def test_parse_constraint_multi(input: str, constraint: MultiConstraint) -> None:
    assert parse_constraint(input) == constraint


@pytest.mark.parametrize(
    ("input", "constraint"),
    [
        ("win32 || linux", UnionConstraint(Constraint("win32"), Constraint("linux"))),
        (
            "win32 || !=linux2",
            UnionConstraint(Constraint("win32"), Constraint("linux2", "!=")),
        ),
        (
            "'tegra' in || 'rpi-v8' in",
            UnionConstraint(Constraint("tegra", "in"), Constraint("rpi-v8", "in")),
        ),
    ],
)
def test_parse_constraint_union(input: str, constraint: UnionConstraint) -> None:
    assert parse_constraint(input) == constraint


def test_constraint_is_not_equal_to_extra_constraint() -> None:
    constraint = Constraint("a", "=")
    extra_constraint = ExtraConstraint("a", "=")
    assert constraint != extra_constraint
    assert extra_constraint != constraint


@pytest.mark.parametrize(
    ("input", "constraint"),
    [
        ("*", AnyConstraint()),
        ("extra1", ExtraConstraint("extra1", "=")),
        ("=extra1", ExtraConstraint("extra1", "=")),
        ("==extra1", ExtraConstraint("extra1", "=")),
        ("!=extra1", ExtraConstraint("extra1", "!=")),
        ("!= extra1", ExtraConstraint("extra1", "!=")),
    ],
)
def test_parse_extra_constraint(
    input: str, constraint: AnyConstraint | Constraint
) -> None:
    parsed_constraint = parse_extra_constraint(input)

    assert type(parsed_constraint) is type(constraint)
    assert parsed_constraint == constraint


@pytest.mark.parametrize(
    ("input", "constraint"),
    [
        (
            "!=extra1,!=extra2",
            ExtraMultiConstraint(
                ExtraConstraint("extra1", "!="), ExtraConstraint("extra2", "!=")
            ),
        ),
        (
            "==extra1,==extra2",
            ExtraMultiConstraint(
                ExtraConstraint("extra1", "=="), ExtraConstraint("extra2", "==")
            ),
        ),
        (
            "==extra1,!=extra2,==extra3",
            ExtraMultiConstraint(
                ExtraConstraint("extra1", "=="),
                ExtraConstraint("extra2", "!="),
                ExtraConstraint("extra3", "=="),
            ),
        ),
    ],
)
def test_parse_extra_constraint_multi(input: str, constraint: MultiConstraint) -> None:
    assert parse_extra_constraint(input) == constraint


@pytest.mark.parametrize(
    ("input", "constraint"),
    [
        (
            "extra1 || extra2",
            UnionConstraint(ExtraConstraint("extra1"), ExtraConstraint("extra2")),
        ),
        (
            "extra1 || !=extra2",
            UnionConstraint(ExtraConstraint("extra1"), ExtraConstraint("extra2", "!=")),
        ),
    ],
)
def test_parse_extra_constraint_union(input: str, constraint: UnionConstraint) -> None:
    assert parse_extra_constraint(input) == constraint
poetry-core-2.1.1/tests/constraints/generic/test_multi_constraint.py000066400000000000000000000057511475444614500261450ustar00rootroot00000000000000from __future__ import annotations

import pytest

from poetry.core.constraints.generic import AnyConstraint
from poetry.core.constraints.generic import BaseConstraint
from poetry.core.constraints.generic import Constraint
from poetry.core.constraints.generic import EmptyConstraint
from poetry.core.constraints.generic import MultiConstraint
from poetry.core.constraints.generic import UnionConstraint


def test_allows() -> None:
    c = MultiConstraint(Constraint("win32", "!="), Constraint("linux", "!="))

    assert not c.allows(Constraint("win32"))
    assert not c.allows(Constraint("linux"))
    assert c.allows(Constraint("darwin"))


@pytest.mark.parametrize(
    ("constraint", "expected_any", "expected_all"),
    [
        (EmptyConstraint(), False, True),
        (AnyConstraint(), True, False),
        (Constraint("win32"), False, False),
        (Constraint("linux"), False, False),
        (Constraint("darwin"), True, True),
        (Constraint("win32", "!="), True, False),
        (Constraint("linux", "!="), True, False),
        (Constraint("darwin", "!="), True, False),
        (
            UnionConstraint(Constraint("win32"), Constraint("linux")),
            False,
            False,
        ),
        (
            UnionConstraint(
                Constraint("win32"), Constraint("linux"), Constraint("darwin")
            ),
            True,
            False,
        ),
        (
            UnionConstraint(Constraint("darwin"), Constraint("linux")),
            True,
            False,
        ),
        (
            UnionConstraint(Constraint("darwin"), Constraint("osx")),
            True,
            True,
        ),
        (
            UnionConstraint(Constraint("win32", "!="), Constraint("linux", "!=")),
            True,
            False,
        ),
        (
            UnionConstraint(Constraint("darwin", "!="), Constraint("linux", "!=")),
            True,
            False,
        ),
        (
            UnionConstraint(Constraint("darwin", "!="), Constraint("osx", "!=")),
            True,
            False,
        ),
        (
            MultiConstraint(Constraint("win32", "!="), Constraint("linux", "!=")),
            True,
            True,
        ),
        (
            MultiConstraint(
                Constraint("win32", "!="),
                Constraint("linux", "!="),
                Constraint("darwin", "!="),
            ),
            True,
            True,
        ),
        (
            MultiConstraint(Constraint("darwin", "!="), Constraint("linux", "!=")),
            True,
            False,
        ),
        (
            MultiConstraint(Constraint("darwin", "!="), Constraint("osx", "!=")),
            True,
            False,
        ),
    ],
)
def test_allows_any_and_allows_all(
    constraint: BaseConstraint, expected_any: bool, expected_all: bool
) -> None:
    c = MultiConstraint(Constraint("win32", "!="), Constraint("linux", "!="))
    assert c.allows_any(constraint) == expected_any
    assert c.allows_all(constraint) == expected_all
poetry-core-2.1.1/tests/constraints/generic/test_union_constraint.py000066400000000000000000000057171475444614500261450ustar00rootroot00000000000000from __future__ import annotations

import pytest

from poetry.core.constraints.generic import AnyConstraint
from poetry.core.constraints.generic import BaseConstraint
from poetry.core.constraints.generic import Constraint
from poetry.core.constraints.generic import EmptyConstraint
from poetry.core.constraints.generic import MultiConstraint
from poetry.core.constraints.generic import UnionConstraint


def test_allows() -> None:
    c = UnionConstraint(Constraint("win32"), Constraint("linux"))

    assert c.allows(Constraint("win32"))
    assert c.allows(Constraint("linux"))
    assert not c.allows(Constraint("darwin"))


@pytest.mark.parametrize(
    ("constraint", "expected_any", "expected_all"),
    [
        (EmptyConstraint(), False, True),
        (AnyConstraint(), True, False),
        (Constraint("win32"), True, True),
        (Constraint("linux"), True, True),
        (Constraint("darwin"), False, False),
        (Constraint("win32", "!="), True, False),
        (Constraint("linux", "!="), True, False),
        (Constraint("darwin", "!="), True, False),
        (
            UnionConstraint(Constraint("win32"), Constraint("linux")),
            True,
            True,
        ),
        (
            UnionConstraint(
                Constraint("win32"), Constraint("linux"), Constraint("darwin")
            ),
            True,
            False,
        ),
        (
            UnionConstraint(Constraint("darwin"), Constraint("linux")),
            True,
            False,
        ),
        (
            UnionConstraint(Constraint("darwin"), Constraint("osx")),
            False,
            False,
        ),
        (
            UnionConstraint(Constraint("win32", "!="), Constraint("linux", "!=")),
            True,
            False,
        ),
        (
            UnionConstraint(Constraint("darwin", "!="), Constraint("linux", "!=")),
            True,
            False,
        ),
        (
            UnionConstraint(Constraint("darwin", "!="), Constraint("osx", "!=")),
            True,
            False,
        ),
        (
            MultiConstraint(Constraint("win32", "!="), Constraint("linux", "!=")),
            False,
            False,
        ),
        (
            MultiConstraint(
                Constraint("win32", "!="),
                Constraint("linux", "!="),
                Constraint("darwin", "!="),
            ),
            False,
            False,
        ),
        (
            MultiConstraint(Constraint("darwin", "!="), Constraint("linux", "!=")),
            True,
            False,
        ),
        (
            MultiConstraint(Constraint("darwin", "!="), Constraint("osx", "!=")),
            True,
            False,
        ),
    ],
)
def test_allows_any_and_allows_all(
    constraint: BaseConstraint, expected_any: bool, expected_all: bool
) -> None:
    c = UnionConstraint(Constraint("win32"), Constraint("linux"))
    assert c.allows_any(constraint) == expected_any
    assert c.allows_all(constraint) == expected_all
poetry-core-2.1.1/tests/constraints/version/000077500000000000000000000000001475444614500211775ustar00rootroot00000000000000poetry-core-2.1.1/tests/constraints/version/__init__.py000066400000000000000000000000001475444614500232760ustar00rootroot00000000000000poetry-core-2.1.1/tests/constraints/version/test_parse_constraint.py000066400000000000000000000435401475444614500261740ustar00rootroot00000000000000from __future__ import annotations

import pytest

from poetry.core.constraints.version import Version
from poetry.core.constraints.version import VersionConstraint
from poetry.core.constraints.version import VersionRange
from poetry.core.constraints.version import VersionUnion
from poetry.core.constraints.version import parse_constraint
from poetry.core.constraints.version import parse_marker_version_constraint
from poetry.core.constraints.version.exceptions import ParseConstraintError
from poetry.core.version.pep440 import ReleaseTag


@pytest.mark.parametrize(
    "input,constraint",
    [
        ("*", VersionRange()),
        ("*.*", VersionRange()),
        ("v*.*", VersionRange()),
        (">1.0.0", VersionRange(min=Version.from_parts(1, 0, 0))),
        ("<1.2.3", VersionRange(max=Version.from_parts(1, 2, 3))),
        ("<=1.2.3", VersionRange(max=Version.from_parts(1, 2, 3), include_max=True)),
        (">=1.2.3", VersionRange(min=Version.from_parts(1, 2, 3), include_min=True)),
        ("=1.2.3", Version.from_parts(1, 2, 3)),
        ("1.2.3", Version.from_parts(1, 2, 3)),
        ("1!2.3.4", Version.from_parts(2, 3, 4, epoch=1)),
        ("=1.0", Version.from_parts(1, 0, 0)),
        ("1.2.3b5", Version.from_parts(1, 2, 3, pre=ReleaseTag("beta", 5))),
        (
            "1.0.0a1.dev0",
            Version.from_parts(
                1, 0, 0, pre=ReleaseTag("a", 1), dev=ReleaseTag("dev", 0)
            ),
        ),
        (
            ">dev",
            VersionRange(min=Version.from_parts(0, 0, dev=ReleaseTag("dev"))),
        ),  # Issue 206
    ],
)
def test_parse_constraint(input: str, constraint: Version | VersionRange) -> None:
    assert parse_constraint(input) == constraint


@pytest.mark.parametrize(
    "input,constraint",
    [
        (
            "v2.*",
            VersionRange(Version.parse("2.dev0"), Version.parse("3.dev0"), True),
        ),
        (
            "2.*.*",
            VersionRange(Version.parse("2.dev0"), Version.parse("3.dev0"), True),
        ),
        (
            "20.*",
            VersionRange(Version.parse("20.dev0"), Version.parse("21.dev0"), True),
        ),
        (
            "20.*.*",
            VersionRange(Version.parse("20.dev0"), Version.parse("21.dev0"), True),
        ),
        (
            "2.0.*",
            VersionRange(Version.parse("2.0.dev0"), Version.parse("2.1.dev0"), True),
        ),
        ("0.*", VersionRange(Version.parse("0.dev0"), Version.parse("1.dev0"), True)),
        ("0.*.*", VersionRange(Version.parse("0.dev0"), Version.parse("1.dev0"), True)),
        (
            "2.0.post1.*",
            VersionRange(
                min=Version.parse("2.0.post1.dev0"),
                max=Version.parse("2.0.post2.dev0"),
                include_min=True,
                include_max=False,
            ),
        ),
        (
            "2.0a1.*",
            VersionRange(
                min=Version.parse("2.0a1.dev0"),
                max=Version.parse("2.0a2.dev0"),
                include_min=True,
                include_max=False,
            ),
        ),
        (
            "2.0dev0.*",
            VersionRange(
                min=Version.parse("2.0dev0"),
                max=Version.parse("2.0dev1"),
                include_min=True,
                include_max=False,
            ),
        ),
    ],
)
def test_parse_constraint_wildcard(input: str, constraint: VersionRange) -> None:
    assert parse_constraint(input) == constraint


@pytest.mark.parametrize(
    "input,constraint",
    [
        (
            "~v1",
            VersionRange(
                Version.from_parts(1, 0, 0), Version.from_parts(2, 0, 0), True
            ),
        ),
        (
            "~1.0",
            VersionRange(
                Version.from_parts(1, 0, 0), Version.from_parts(1, 1, 0), True
            ),
        ),
        (
            "~1.0.0",
            VersionRange(
                Version.from_parts(1, 0, 0), Version.from_parts(1, 1, 0), True
            ),
        ),
        (
            "~1.2",
            VersionRange(
                Version.from_parts(1, 2, 0), Version.from_parts(1, 3, 0), True
            ),
        ),
        (
            "~1.2.3",
            VersionRange(
                Version.from_parts(1, 2, 3), Version.from_parts(1, 3, 0), True
            ),
        ),
        (
            "~1.0.0a1",
            VersionRange(
                min=Version.from_parts(1, 0, 0, pre=ReleaseTag("a", 1)),
                max=Version.from_parts(1, 1, 0),
                include_min=True,
            ),
        ),
        (
            "~1.0.0a1.dev0",
            VersionRange(
                min=Version.from_parts(
                    1, 0, 0, pre=ReleaseTag("a", 1), dev=ReleaseTag("dev", 0)
                ),
                max=Version.from_parts(1, 1, 0),
                include_min=True,
            ),
        ),
        (
            "~1.2-beta",
            VersionRange(
                Version.from_parts(1, 2, 0, pre=ReleaseTag("beta")),
                Version.from_parts(1, 3, 0),
                True,
            ),
        ),
        (
            "~1.2-b2",
            VersionRange(
                Version.from_parts(1, 2, 0, pre=ReleaseTag("beta", 2)),
                Version.from_parts(1, 3, 0),
                True,
            ),
        ),
        (
            "~0.3",
            VersionRange(
                Version.from_parts(0, 3, 0), Version.from_parts(0, 4, 0), True
            ),
        ),
        (
            "~3.5",
            VersionRange(
                Version.from_parts(3, 5, 0), Version.from_parts(3, 6, 0), True
            ),
        ),
        (
            "~=3.5",
            VersionRange(
                Version.from_parts(3, 5, 0), Version.from_parts(4, 0, 0), True
            ),
        ),  # PEP 440
        (
            "~=3.5.3",
            VersionRange(
                Version.from_parts(3, 5, 3), Version.from_parts(3, 6, 0), True
            ),
        ),  # PEP 440
        (
            "~=3.5.3rc1",
            VersionRange(
                Version.from_parts(3, 5, 3, pre=ReleaseTag("rc", 1)),
                Version.from_parts(3, 6, 0),
                True,
            ),
        ),  # PEP 440
    ],
)
def test_parse_constraint_tilde(input: str, constraint: VersionRange) -> None:
    assert parse_constraint(input) == constraint


@pytest.mark.parametrize(
    "input,constraint",
    [
        (
            "^v1",
            VersionRange(
                Version.from_parts(1, 0, 0), Version.from_parts(2, 0, 0), True
            ),
        ),
        ("^0", VersionRange(Version.from_parts(0), Version.from_parts(1), True)),
        (
            "^0.0",
            VersionRange(
                Version.from_parts(0, 0, 0), Version.from_parts(0, 1, 0), True
            ),
        ),
        (
            "^1.2",
            VersionRange(
                Version.from_parts(1, 2, 0), Version.from_parts(2, 0, 0), True
            ),
        ),
        (
            "^1.2.3-beta.2",
            VersionRange(
                Version.from_parts(1, 2, 3, pre=ReleaseTag("beta", 2)),
                Version.from_parts(2, 0, 0),
                True,
            ),
        ),
        (
            "^1.2.3",
            VersionRange(
                Version.from_parts(1, 2, 3), Version.from_parts(2, 0, 0), True
            ),
        ),
        (
            "^0.2.3",
            VersionRange(
                Version.from_parts(0, 2, 3), Version.from_parts(0, 3, 0), True
            ),
        ),
        (
            "^0.2",
            VersionRange(
                Version.from_parts(0, 2, 0), Version.from_parts(0, 3, 0), True
            ),
        ),
        (
            "^0.2.0",
            VersionRange(
                Version.from_parts(0, 2, 0), Version.from_parts(0, 3, 0), True
            ),
        ),
        (
            "^0.0.3",
            VersionRange(
                Version.from_parts(0, 0, 3), Version.from_parts(0, 0, 4), True
            ),
        ),
        (
            "^0.0.3-alpha.21",
            VersionRange(
                Version.from_parts(0, 0, 3, pre=ReleaseTag("alpha", 21)),
                Version.from_parts(0, 0, 4),
                True,
            ),
        ),
        (
            "^0.1.3-alpha.21",
            VersionRange(
                Version.from_parts(0, 1, 3, pre=ReleaseTag("alpha", 21)),
                Version.from_parts(0, 2, 0),
                True,
            ),
        ),
        (
            "^0.0.0-alpha.21",
            VersionRange(
                Version.from_parts(0, 0, 0, pre=ReleaseTag("alpha", 21)),
                Version.from_parts(0, 0, 1),
                True,
            ),
        ),
        (
            "^1.0.0a1",
            VersionRange(
                min=Version.from_parts(1, 0, 0, pre=ReleaseTag("a", 1)),
                max=Version.from_parts(2, 0, 0),
                include_min=True,
            ),
        ),
        (
            "^1.0.0a1.dev0",
            VersionRange(
                min=Version.from_parts(
                    1, 0, 0, pre=ReleaseTag("a", 1), dev=ReleaseTag("dev", 0)
                ),
                max=Version.from_parts(2, 0, 0),
                include_min=True,
            ),
        ),
    ],
)
def test_parse_constraint_caret(input: str, constraint: VersionRange) -> None:
    assert parse_constraint(input) == constraint


def test_parse_constraint_multi() -> None:
    assert parse_constraint(">2.0,<=3.0") == VersionRange(
        Version.from_parts(2, 0, 0),
        Version.from_parts(3, 0, 0),
        include_min=False,
        include_max=True,
    )


@pytest.mark.parametrize(
    "input, output",
    [
        (
            ">1!2,<=2!3",
            VersionRange(
                Version.from_parts(2, 0, 0, epoch=1),
                Version.from_parts(3, 0, 0, epoch=2),
                include_min=False,
                include_max=True,
            ),
        ),
        (
            ">=1!2,<2!3",
            VersionRange(
                Version.from_parts(2, 0, 0, epoch=1),
                Version.from_parts(3, 0, 0, epoch=2),
                include_min=True,
                include_max=False,
            ),
        ),
    ],
)
def test_parse_constraint_multi_with_epochs(input: str, output: VersionRange) -> None:
    assert parse_constraint(input) == output


def test_parse_constraint_multi_wildcard() -> None:
    assert parse_constraint(">=2.7,!=3.0.*,!=3.1.*") == VersionUnion(
        VersionRange(Version.parse("2.7"), Version.parse("3.0.dev0"), True, False),
        VersionRange(Version.parse("3.2.dev0"), None, True, False),
    )


@pytest.mark.parametrize(
    "input,constraint",
    [
        (
            "!=v2.*",
            VersionRange(max=Version.parse("2.0.0.dev0")).union(
                VersionRange(Version.parse("3.0.dev0"), include_min=True)
            ),
        ),
        (
            "!=2.*.*",
            VersionRange(max=Version.parse("2.0.0.dev0")).union(
                VersionRange(Version.parse("3.0.dev0"), include_min=True)
            ),
        ),
        (
            "!=2.0.*",
            VersionRange(max=Version.parse("2.0.0.dev0")).union(
                VersionRange(Version.parse("2.1.dev0"), include_min=True)
            ),
        ),
        (
            "!=0.*",
            VersionRange(max=Version.parse("0.dev0")).union(
                VersionRange(Version.parse("1.0.dev0"), include_min=True)
            ),
        ),
        (
            "!=0.*.*",
            VersionRange(max=Version.parse("0.dev0")).union(
                VersionRange(Version.parse("1.0.dev0"), include_min=True)
            ),
        ),
    ],
)
def test_parse_constraints_negative_wildcard(
    input: str, constraint: VersionRange
) -> None:
    assert parse_constraint(input) == constraint


@pytest.mark.parametrize(
    "input,constraint",
    [
        (">3.7,", VersionRange(min=Version.parse("3.7"))),
        (">3.7 , ", VersionRange(min=Version.parse("3.7"))),
        (
            ">3.7,<3.8,",
            VersionRange(min=Version.parse("3.7"), max=Version.parse("3.8")),
        ),
        (
            ">3.7,||<3.6,",
            VersionRange(min=Version.parse("3.7")).union(
                VersionRange(max=Version.parse("3.6"))
            ),
        ),
        (
            ">3.7 , || <3.6 , ",
            VersionRange(min=Version.parse("3.7")).union(
                VersionRange(max=Version.parse("3.6"))
            ),
        ),
        (
            ">3.7, <3.8, || <3.6, >3.5",
            VersionRange(min=Version.parse("3.7"), max=Version.parse("3.8")).union(
                VersionRange(min=Version.parse("3.5"), max=Version.parse("3.6"))
            ),
        ),
    ],
)
def test_parse_constraints_with_trailing_comma(
    input: str, constraint: VersionRange
) -> None:
    assert parse_constraint(input) == constraint


@pytest.mark.parametrize(
    "input, expected",
    [
        ("1", "1"),
        ("1.2", "1.2"),
        ("1.2.3", "1.2.3"),
        ("!=1", "!=1"),
        ("!=1.2", "!=1.2"),
        ("!=1.2.3", "!=1.2.3"),
        ("^1", ">=1,<2"),
        ("^1.0", ">=1.0,<2.0"),
        ("^1.0.0", ">=1.0.0,<2.0.0"),
        ("^1.0.0-alpha.1", ">=1.0.0-alpha.1,<2.0.0"),
        ("^0", ">=0,<1"),
        ("^0.1", ">=0.1,<0.2"),
        ("^0.0.2", ">=0.0.2,<0.0.3"),
        ("^0.1.2", ">=0.1.2,<0.2.0"),
        ("^0-alpha.1", ">=0-alpha.1,<1"),
        ("^0.1-alpha.1", ">=0.1-alpha.1,<0.2"),
        ("^0.0.2-alpha.1", ">=0.0.2-alpha.1,<0.0.3"),
        ("^0.1.2-alpha.1", ">=0.1.2-alpha.1,<0.2.0"),
        ("~1", ">=1,<2"),
        ("~1.0", ">=1.0,<1.1"),
        ("~1.0.0", ">=1.0.0,<1.1.0"),
    ],
)
def test_constraints_keep_version_precision(input: str, expected: str) -> None:
    assert str(parse_constraint(input)) == expected


@pytest.mark.parametrize(
    "constraint_parts,expected",
    [
        (["3.8"], Version.from_parts(3, 8)),
        (["=", "3.8"], Version.from_parts(3, 8)),
        (["==", "3.8"], Version.from_parts(3, 8)),
        ([">", "3.8"], VersionRange(min=Version.from_parts(3, 8))),
        ([">=", "3.8"], VersionRange(min=Version.from_parts(3, 8), include_min=True)),
        (["<", "3.8"], VersionRange(max=Version.from_parts(3, 8))),
        (["<=", "3.8"], VersionRange(max=Version.from_parts(3, 8), include_max=True)),
        (
            ["^", "3.8"],
            VersionRange(
                min=Version.from_parts(3, 8),
                max=Version.from_parts(4, 0),
                include_min=True,
            ),
        ),
        (
            ["~", "3.8"],
            VersionRange(
                min=Version.from_parts(3, 8),
                max=Version.from_parts(3, 9),
                include_min=True,
            ),
        ),
        (
            ["~=", "3.8"],
            VersionRange(
                min=Version.from_parts(3, 8),
                max=Version.from_parts(4, 0),
                include_min=True,
            ),
        ),
        (
            ["3.8.*"],
            VersionRange(
                min=Version.parse("3.8.0.dev0"),
                max=Version.parse("3.9.0.dev0"),
                include_min=True,
            ),
        ),
        (
            ["==", "3.8.*"],
            VersionRange(
                min=Version.parse("3.8.0.dev0"),
                max=Version.parse("3.9.0.dev0"),
                include_min=True,
            ),
        ),
        (
            ["!=", "3.8.*"],
            VersionRange(max=Version.parse("3.8.dev0")).union(
                VersionRange(Version.parse("3.9.dev0"), include_min=True)
            ),
        ),
        (
            [">", "3.8", ",", "<=", "6.5"],
            VersionRange(
                min=Version.from_parts(3, 8),
                max=Version.from_parts(6, 5),
                include_max=True,
            ),
        ),
        (
            [">=", "2.7", ",", "!=", "3.0.*", ",", "!=", "3.1.*"],
            VersionUnion(
                VersionRange(
                    Version.parse("2.7"), Version.parse("3.0.dev0"), True, False
                ),
                VersionRange(Version.parse("3.2.dev0"), None, True, False),
            ),
        ),
        (
            ["~", "2.7", "||", "~", "3.8"],
            VersionUnion(
                VersionRange(
                    min=Version.from_parts(2, 7),
                    max=Version.from_parts(2, 8),
                    include_min=True,
                ),
                VersionRange(
                    min=Version.from_parts(3, 8),
                    max=Version.from_parts(3, 9),
                    include_min=True,
                ),
            ),
        ),
        (
            ["~", "2.7", "||", "~", "3.8", "|", ">=", "3.10", ",", "<", "3.12"],
            VersionUnion(
                VersionRange(
                    min=Version.from_parts(2, 7),
                    max=Version.from_parts(2, 8),
                    include_min=True,
                ),
                VersionRange(
                    min=Version.from_parts(3, 8),
                    max=Version.from_parts(3, 9),
                    include_min=True,
                ),
                VersionRange(
                    min=Version.from_parts(3, 10),
                    max=Version.from_parts(3, 12),
                    include_min=True,
                ),
            ),
        ),
    ],
)
@pytest.mark.parametrize(("with_whitespace_padding",), [(True,), (False,)])
def test_parse_constraint_with_white_space_padding(
    constraint_parts: list[str],
    expected: VersionConstraint,
    with_whitespace_padding: bool,
) -> None:
    padding = " " * (4 if with_whitespace_padding else 0)
    constraint = padding.join(["", *constraint_parts, ""])
    assert parse_constraint(constraint) == expected


def test_parse_marker_constraint_does_not_allow_invalid_version() -> None:
    with pytest.raises(ParseConstraintError):
        parse_marker_version_constraint("4.9.253-tegra")


def test_parse_marker_constraint_does_allow_invalid_version_if_requested() -> None:
    assert parse_marker_version_constraint(
        "4.9.253-tegra", pep440=False
    ) == Version.from_parts(4, 9, 253, local="tegra")
poetry-core-2.1.1/tests/constraints/version/test_utils.py000066400000000000000000000055551475444614500237620ustar00rootroot00000000000000from __future__ import annotations

from typing import TYPE_CHECKING

import pytest

from poetry.core.constraints.version import EmptyConstraint
from poetry.core.constraints.version import Version
from poetry.core.constraints.version import VersionRange
from poetry.core.constraints.version import constraint_regions


if TYPE_CHECKING:
    from poetry.core.constraints.version import VersionConstraint


PY27 = Version.parse("2.7")
PY30 = Version.parse("3")
PY36 = Version.parse("3.6.0")
PY37 = Version.parse("3.7")
PY38 = Version.parse("3.8.0")
PY40 = Version.parse("4.0.0")


@pytest.mark.parametrize(
    "versions, expected",
    [
        ([VersionRange(None, None)], [VersionRange(None, None)]),
        ([EmptyConstraint()], [VersionRange(None, None)]),
        (
            [VersionRange(PY27, None, include_min=True)],
            [
                VersionRange(None, PY27, include_max=False),
                VersionRange(PY27, None, include_min=True),
            ],
        ),
        (
            [VersionRange(None, PY40, include_max=False)],
            [
                VersionRange(None, PY40, include_max=False),
                VersionRange(PY40, None, include_min=True),
            ],
        ),
        (
            [VersionRange(PY27, PY27, include_min=True, include_max=True)],
            [
                VersionRange(None, PY27, include_max=False),
                VersionRange(PY27, PY27, include_min=True, include_max=True),
                VersionRange(PY27, None, include_min=False),
            ],
        ),
        (
            [VersionRange(PY27, PY30, include_min=True, include_max=False)],
            [
                VersionRange(None, PY27, include_max=False),
                VersionRange(PY27, PY30, include_min=True, include_max=False),
                VersionRange(PY30, None, include_min=True),
            ],
        ),
        (
            [
                VersionRange(PY27, PY30, include_min=True, include_max=False).union(
                    VersionRange(PY37, PY40, include_min=False, include_max=True)
                ),
                VersionRange(PY36, PY38, include_min=True, include_max=False),
            ],
            [
                VersionRange(None, PY27, include_max=False),
                VersionRange(PY27, PY30, include_min=True, include_max=False),
                VersionRange(PY30, PY36, include_min=True, include_max=False),
                VersionRange(PY36, PY37, include_min=True, include_max=True),
                VersionRange(PY37, PY38, include_min=False, include_max=False),
                VersionRange(PY38, PY40, include_min=True, include_max=True),
                VersionRange(PY40, None, include_min=False),
            ],
        ),
    ],
)
def test_constraint_regions(
    versions: list[VersionConstraint], expected: list[VersionRange]
) -> None:
    regions = constraint_regions(versions)
    assert regions == expected
poetry-core-2.1.1/tests/constraints/version/test_version.py000066400000000000000000000440541475444614500243040ustar00rootroot00000000000000from __future__ import annotations

from typing import TYPE_CHECKING

import pytest

from poetry.core.constraints.version import EmptyConstraint
from poetry.core.constraints.version import Version
from poetry.core.constraints.version import VersionRange
from poetry.core.version.exceptions import InvalidVersionError
from poetry.core.version.pep440 import ReleaseTag


if TYPE_CHECKING:
    from poetry.core.constraints.version import VersionConstraint


@pytest.mark.parametrize(
    "text,version",
    [
        ("1.0.0", Version.from_parts(1, 0, 0)),
        ("1", Version.from_parts(1, 0, 0)),
        ("1.0", Version.from_parts(1, 0, 0)),
        ("1b1", Version.from_parts(1, 0, 0, pre=ReleaseTag("beta", 1))),
        ("1.0b1", Version.from_parts(1, 0, 0, pre=ReleaseTag("beta", 1))),
        ("1.0.0b1", Version.from_parts(1, 0, 0, pre=ReleaseTag("beta", 1))),
        ("1.0.0-b1", Version.from_parts(1, 0, 0, pre=ReleaseTag("beta", 1))),
        ("1.0.0-beta.1", Version.from_parts(1, 0, 0, pre=ReleaseTag("beta", 1))),
        ("1.0.0+1", Version.from_parts(1, 0, 0, local=1)),
        ("1.0.0-1", Version.from_parts(1, 0, 0, post=ReleaseTag("post", 1))),
        ("1.0.0.0", Version.from_parts(1, 0, 0, extra=0)),
        ("1.0.0-post", Version.from_parts(1, 0, 0, post=ReleaseTag("post"))),
        ("1.0.0-post1", Version.from_parts(1, 0, 0, post=ReleaseTag("post", 1))),
        ("0.6c", Version.from_parts(0, 6, 0, pre=ReleaseTag("rc", 0))),
        ("0.6pre", Version.from_parts(0, 6, 0, pre=ReleaseTag("preview", 0))),
        ("1!2.3.4", Version.from_parts(2, 3, 4, epoch=1)),
    ],
)
def test_parse_valid(text: str, version: Version) -> None:
    parsed = Version.parse(text)

    assert parsed == version
    assert parsed.text == text


@pytest.mark.parametrize("value", [None, "example"])
def test_parse_invalid(value: str | None) -> None:
    with pytest.raises(InvalidVersionError):
        Version.parse(value)  # type: ignore[arg-type]


@pytest.mark.parametrize(
    "version, expected",
    [
        ("1", "1"),
        ("1.2", "1.2"),
        ("1.2.3", "1.2.3"),
        ("2!1.2.3", "2!1.2.3"),
        ("1.2.3+local", "1.2.3+local"),
        ("1.2.3.4", "1.2.3.4"),
        ("1.dev0", "1"),
        ("1.2dev0", "1.2"),
        ("1.2.3dev0", "1.2.3"),
        ("1.2.3.4dev0", "1.2.3.4"),
        ("1.post1", "1.post1"),
        ("1.2.post1", "1.2.post1"),
        ("1.2.3.post1", "1.2.3.post1"),
        ("1.post1.dev0", "1.post1"),
        ("1.2.post1.dev0", "1.2.post1"),
        ("1.2.3.post1.dev0", "1.2.3.post1"),
        ("1.a1", "1"),
        ("1.2a1", "1.2"),
        ("1.2.3a1", "1.2.3"),
        ("1.2.3.4a1", "1.2.3.4"),
        ("1.a1.post2", "1"),
        ("1.2a1.post2", "1.2"),
        ("1.2.3a1.post2", "1.2.3"),
        ("1.2.3.4a1.post2", "1.2.3.4"),
        ("1.a1.post2.dev0", "1"),
        ("1.2a1.post2.dev0", "1.2"),
        ("1.2.3a1.post2.dev0", "1.2.3"),
        ("1.2.3.4a1.post2.dev0", "1.2.3.4"),
    ],
)
def test_stable(version: str, expected: str) -> None:
    subject = Version.parse(version)

    assert subject.stable.text == expected


@pytest.mark.parametrize(
    "version, expected",
    [
        ("1", "2"),
        ("1.2", "2.0"),
        ("1.2.3", "2.0.0"),
        ("2!1.2.3", "2!2.0.0"),
        ("1.2.3+local", "2.0.0"),
        ("1.2.3.4", "2.0.0.0"),
        ("1.dev0", "2"),
        ("1.2dev0", "2.0"),
        ("1.2.3dev0", "2.0.0"),
        ("1.2.3.4dev0", "2.0.0.0"),
        ("1.post1", "2"),
        ("1.2.post1", "2.0"),
        ("1.2.3.post1", "2.0.0"),
        ("1.post1.dev0", "2"),
        ("1.2.post1.dev0", "2.0"),
        ("1.2.3.post1.dev0", "2.0.0"),
        ("2.a1", "3"),
        ("2.2a1", "3.0"),
        ("2.2.3a1", "3.0.0"),
        ("2.2.3.4a1", "3.0.0.0"),
        ("2.a1.post2", "3"),
        ("2.2a1.post2", "3.0"),
        ("2.2.3a1.post2", "3.0.0"),
        ("2.2.3.4a1.post2", "3.0.0.0"),
        ("2.a1.post2.dev0", "3"),
        ("2.2a1.post2.dev0", "3.0"),
        ("2.2.3a1.post2.dev0", "3.0.0"),
        ("2.2.3.4a1.post2.dev0", "3.0.0.0"),
    ],
)
def test_next_breaking_for_major_over_0_results_into_next_major_and_preserves_precision(
    version: str, expected: str
) -> None:
    subject = Version.parse(version)

    assert subject.next_breaking().text == expected


@pytest.mark.parametrize(
    "version, expected",
    [
        ("0", "1"),
        ("0.0", "0.1"),
        ("0.2", "0.3"),
        ("0.2.3", "0.3.0"),
        ("2!0.2.3", "2!0.3.0"),
        ("0.2.3+local", "0.3.0"),
        ("0.2.3.4", "0.3.0.0"),
        ("0.0.3.4", "0.0.4.0"),
        ("0.dev0", "1"),
        ("0.0dev0", "0.1"),
        ("0.2dev0", "0.3"),
        ("0.2.3dev0", "0.3.0"),
        ("0.0.3dev0", "0.0.4"),
        ("0.post1", "1"),
        ("0.0.post1", "0.1"),
        ("0.2.post1", "0.3"),
        ("0.2.3.post1", "0.3.0"),
        ("0.0.3.post1", "0.0.4"),
        ("0.post1.dev0", "1"),
        ("0.0.post1.dev0", "0.1"),
        ("0.2.post1.dev0", "0.3"),
        ("0.2.3.post1.dev0", "0.3.0"),
        ("0.0.3.post1.dev0", "0.0.4"),
        ("0.a1", "1"),
        ("0.0a1", "0.1"),
        ("0.2a1", "0.3"),
        ("0.2.3a1", "0.3.0"),
        ("0.2.3.4a1", "0.3.0.0"),
        ("0.0.3.4a1", "0.0.4.0"),
        ("0.a1.post2", "1"),
        ("0.0a1.post2", "0.1"),
        ("0.2a1.post2", "0.3"),
        ("0.2.3a1.post2", "0.3.0"),
        ("0.2.3.4a1.post2", "0.3.0.0"),
        ("0.0.3.4a1.post2", "0.0.4.0"),
        ("0.a1.post2.dev0", "1"),
        ("0.0a1.post2.dev0", "0.1"),
        ("0.2a1.post2.dev0", "0.3"),
        ("0.2.3a1.post2.dev0", "0.3.0"),
        ("0.2.3.4a1.post2.dev0", "0.3.0.0"),
        ("0.0.3.4a1.post2.dev0", "0.0.4.0"),
        ("0-alpha.1", "1"),
        ("0.0-alpha.1", "0.1"),
        ("0.2-alpha.1", "0.3"),
        ("0.0.1-alpha.2", "0.0.2"),
        ("0.1.2-alpha.1", "0.2.0"),
    ],
)
def test_next_breaking_for_major_0_is_treated_with_more_care_and_preserves_precision(
    version: str, expected: str
) -> None:
    subject = Version.parse(version)

    assert subject.next_breaking().text == expected


@pytest.mark.parametrize(
    "versions",
    [
        [
            "1.0.0-alpha",
            "1.0.0-alpha.1",
            "1.0.0-beta.2",
            "1.0.0-beta.11",
            "1.0.0-rc.1",
            "1.0.0-rc.1+build.1",
            "1.0.0",
            "1.0.0+0.3.7",
            "1.3.7+build",
            "1.3.7+build.2.b8f12d7",
            "1.3.7+build.11.e0f985a",
            "2.0.0",
            "2.1.0",
            "2.2.0",
            "2.11.0",
            "2.11.1",
        ],
        # PEP 440 example comparisons
        [
            "1.0.dev456",
            "1.0.dev456+local",
            "1.0.dev457",
            "1.0a1",
            "1.0a1+local",
            "1.0a2.dev456",
            "1.0a2.dev456+local",
            "1.0a2.dev457",
            "1.0a2",
            "1.0a12.dev455",
            "1.0a12",
            "1.0b1.dev456",
            "1.0b2",
            "1.0b2.post345.dev456",
            "1.0b2.post345",
            "1.0rc1.dev456",
            "1.0rc1",
            "1.0",
            "1.0+local",
            "1.0.post456.dev34",
            "1.0.post456.dev34+local",
            "1.0.post456.dev35",
            "1.0.post456",
            "1.0.post456+local",
            "1.0.post457",
            "1.1.dev1",
        ],
        # PEP 440 local versions
        [
            "1.0",
            # Comparison and ordering of local versions considers each segment
            # of the local version (divided by a .) separately.
            "1.0+abc.2",
            # If a segment consists entirely of ASCII digits then
            # that section should be considered an integer for comparison purposes
            "1.0+abc.10",
            # and if a segment contains any ASCII letters then
            # that segment is compared lexicographically with case insensitivity.
            "1.0+ABD.1",
            # When comparing a numeric and lexicographic segment, the numeric section
            # always compares as greater than the lexicographic segment.
            "1.0+5",
            # Additionally, a local version with a great number of segments will always
            # compare as greater than a local version with fewer segments,
            # as long as the shorter local version's segments match the beginning of
            # the longer local version's segments exactly.
            "1.0+5.0",
            "1.1",
        ],
    ],
)
def test_comparison(versions: list[str]) -> None:
    for i in range(len(versions)):
        for j in range(len(versions)):
            a = Version.parse(versions[i])
            b = Version.parse(versions[j])

            assert (a < b) == (i < j)
            assert (a > b) == (i > j)
            assert (a <= b) == (i <= j)
            assert (a >= b) == (i >= j)
            assert (a == b) == (i == j)
            assert (a != b) == (i != j)


def test_equality() -> None:
    assert Version.parse("1.2.3") == Version.parse("01.2.3")
    assert Version.parse("1.2.3") == Version.parse("1.02.3")
    assert Version.parse("1.2.3") == Version.parse("1.2.03")
    assert Version.parse("1.2.3-1") == Version.parse("1.2.3-01")
    assert Version.parse("1.2.3+1") == Version.parse("1.2.3+01")


def test_allows() -> None:
    v = Version.parse("1.2.3")
    assert v.allows(v)
    assert not v.allows(Version.parse("1.2"))
    assert not v.allows(Version.parse("2.2.3"))
    assert not v.allows(Version.parse("1.3.3"))
    assert not v.allows(Version.parse("1.2.4"))
    assert not v.allows(Version.parse("1.2.3-dev"))
    assert not v.allows(Version.parse("1.2.3-1"))
    assert not v.allows(Version.parse("1.2.3-1+build"))
    assert v.allows(Version.parse("1.2.3+build"))


@pytest.mark.parametrize(
    ("version1", "version2"),
    [
        ("1", "1.0"),
        ("1", "1.0.0"),
        ("1", "1.0.0.0"),
        ("1.2", "1.2.0"),
        ("1.2", "1.2.0.0"),
        ("1.2", "1.2.0.0.0"),
        ("1.2.3", "1.2.3.0"),
        ("1.2.3", "1.2.3.0.0"),
        ("1.2.3.4", "1.2.3.4.0"),
        ("1.2.3.4", "1.2.3.4.0.0"),
        ("1.2.3.4a1", "1.2.3.4.0a1"),
    ],
)
def test_allows_zero_padding(version1: str, version2: str) -> None:
    v1 = Version.parse(version1)
    v2 = Version.parse(version2)
    assert v1.allows(v2)
    assert v2.allows(v1)
    assert v1.allows_all(v2)
    assert v2.allows_all(v1)
    assert v1.allows_any(v2)
    assert v2.allows_any(v1)


def test_allows_with_local() -> None:
    v = Version.parse("1.2.3+build.1")
    assert v.allows(v)
    assert not v.allows(Version.parse("1.2.3"))
    assert not v.allows(Version.parse("1.3.3"))
    assert not v.allows(Version.parse("1.2.3-dev"))
    assert not v.allows(Version.parse("1.2.3+build.2"))
    # local version with a great number of segments will always compare as
    # greater than a local version with fewer segments
    assert not v.allows(Version.parse("1.2.3+build.1.0"))
    assert not v.allows(Version.parse("1.2.3-1"))
    assert not v.allows(Version.parse("1.2.3-1+build.1"))


def test_allows_with_post() -> None:
    v = Version.parse("1.2.3-1")
    assert v.allows(v)
    assert not v.allows(Version.parse("1.2.3"))
    assert not v.allows(Version.parse("1.2.3-2"))
    assert not v.allows(Version.parse("2.2.3"))
    assert not v.allows(Version.parse("1.2.3-dev"))
    assert not v.allows(Version.parse("1.2.3+build.2"))
    assert v.allows(Version.parse("1.2.3-1+build.1"))


def test_allows_all() -> None:
    v = Version.parse("1.2.3")

    assert v.allows_all(v)
    assert not v.allows_all(Version.parse("0.0.3"))
    assert not v.allows_all(
        VersionRange(Version.parse("1.1.4"), Version.parse("1.2.4"))
    )
    assert not v.allows_all(VersionRange())
    assert v.allows_all(EmptyConstraint())


@pytest.mark.parametrize(
    ("version1", "version2", "expected"),
    [
        (
            Version.parse("1.2.3"),
            Version.parse("1.2.3"),
            True,
        ),
        (
            Version.parse("1.2.3"),
            Version.parse("1.2.3+cpu"),
            True,
        ),
        (
            Version.parse("1.2.3"),
            VersionRange(Version.parse("1.2.3+local"), include_min=True),
            True,
        ),
        (
            Version.parse("1.2.3+cpu"),
            Version.parse("1.2.3"),
            True,
        ),
        (
            Version.parse("1.2.3"),
            Version.parse("0.0.3"),
            False,
        ),
        (
            Version.parse("1.2.3"),
            VersionRange(Version.parse("1.1.4"), Version.parse("1.2.4")),
            True,
        ),
        (
            Version.parse("1.2.3"),
            VersionRange(),
            True,
        ),
        (
            Version.parse("1.2.3"),
            EmptyConstraint(),
            False,
        ),
    ],
)
def test_allows_any(
    version1: VersionConstraint,
    version2: VersionConstraint,
    expected: bool,
) -> None:
    actual = version1.allows_any(version2)
    assert actual == expected


@pytest.mark.parametrize(
    ("version1", "version2", "expected"),
    [
        (
            Version.parse("1.2.3"),
            Version.parse("1.1.4"),
            EmptyConstraint(),
        ),
        (
            Version.parse("1.2.3"),
            VersionRange(Version.parse("1.1.4"), Version.parse("1.2.4")),
            Version.parse("1.2.3"),
        ),
        (
            Version.parse("1.1.4"),
            VersionRange(Version.parse("1.2.3"), Version.parse("1.2.4")),
            EmptyConstraint(),
        ),
        (
            Version.parse("1.2.3"),
            Version.parse("1.2.3.post0"),
            EmptyConstraint(),
        ),
        (
            Version.parse("1.2.3"),
            Version.parse("1.2.3+local"),
            Version.parse("1.2.3+local"),
        ),
        (
            Version.parse("1.2.3"),
            VersionRange(Version.parse("1.2.3+local"), include_min=True),
            VersionRange(
                Version.parse("1.2.3+local"),
                Version.parse("1.2.4"),
                include_min=True,
                include_max=False,
            ),
        ),
    ],
)
def test_intersect(
    version1: VersionConstraint,
    version2: VersionConstraint,
    expected: VersionConstraint,
) -> None:
    assert version1.intersect(version2) == expected
    assert version2.intersect(version1) == expected


def test_union() -> None:
    v = Version.parse("1.2.3")

    assert v.union(v) == v

    result = v.union(Version.parse("0.8.0"))
    assert result.allows(v)
    assert result.allows(Version.parse("0.8.0"))
    assert not result.allows(Version.parse("1.1.4"))

    range = VersionRange(Version.parse("1.1.4"), Version.parse("1.2.4"))
    assert v.union(range) == range

    union = Version.parse("1.1.4").union(
        VersionRange(Version.parse("1.1.4"), Version.parse("1.2.4"))
    )
    assert union == VersionRange(
        Version.parse("1.1.4"), Version.parse("1.2.4"), include_min=True
    )

    result = v.union(VersionRange(Version.parse("0.0.3"), Version.parse("1.1.4")))
    assert result.allows(v)
    assert result.allows(Version.parse("0.1.0"))


def test_difference() -> None:
    v = Version.parse("1.2.3")

    assert v.difference(v).is_empty()
    assert v.difference(Version.parse("0.8.0")) == v
    assert v.difference(
        VersionRange(Version.parse("1.1.4"), Version.parse("1.2.4"))
    ).is_empty()
    assert (
        v.difference(VersionRange(Version.parse("1.4.0"), Version.parse("3.0.0"))) == v
    )


@pytest.mark.parametrize(
    "version,normalized_version",
    [
        (  # already normalized version
            "1!2.3.4.5.6a7.post8.dev9+local1.123.abc",
            "1!2.3.4.5.6a7.post8.dev9+local1.123.abc",
        ),
        # PEP 440 Normalization
        # Case sensitivity
        ("1.1RC1", "1.1rc1"),
        # Integer Normalization
        ("00", "0"),
        ("09000", "9000"),
        ("1.0+foo0100", "1.0+foo0100"),
        # Pre-release separators
        ("1.1.a1", "1.1a1"),
        ("1.1-a1", "1.1a1"),
        ("1.1_a1", "1.1a1"),
        ("1.1a.1", "1.1a1"),
        ("1.1a-1", "1.1a1"),
        ("1.1a_1", "1.1a1"),
        # Pre-release spelling
        ("1.1alpha1", "1.1a1"),
        ("1.1beta2", "1.1b2"),
        ("1.1c3", "1.1rc3"),
        ("1.1pre4", "1.1rc4"),
        ("1.1preview5", "1.1rc5"),
        # Implicit pre-release number
        ("1.2a", "1.2a0"),
        # Post release separators
        ("1.2.post2", "1.2.post2"),
        ("1.2-post2", "1.2.post2"),
        ("1.2_post2", "1.2.post2"),
        ("1.2post.2", "1.2.post2"),
        ("1.2post-2", "1.2.post2"),
        ("1.2post_2", "1.2.post2"),
        # Post release spelling
        ("1.0-r4", "1.0.post4"),
        ("1.0-rev4", "1.0.post4"),
        # Implicit post release number
        ("1.2.post", "1.2.post0"),
        # Implicit post releases
        ("1.0-1", "1.0.post1"),
        # Development release separators
        ("1.2.dev2", "1.2.dev2"),
        ("1.2-dev2", "1.2.dev2"),
        ("1.2_dev2", "1.2.dev2"),
        ("1.2dev.2", "1.2.dev2"),
        ("1.2dev-2", "1.2.dev2"),
        ("1.2dev_2", "1.2.dev2"),
        # Implicit development release number
        ("1.2.dev", "1.2.dev0"),
        # Local version segments
        ("1.0+ubuntu-1", "1.0+ubuntu.1"),
        ("1.0+ubuntu_1", "1.0+ubuntu.1"),
        # Preceding v character
        ("v1.0", "1.0"),
        # Leading and Trailing Whitespace
        (" 1.0 ", "1.0"),
        ("\t1.0\t", "1.0"),
        ("\n1.0\n", "1.0"),
        ("\r\n1.0\r\n", "1.0"),
        ("\f1.0\f", "1.0"),
        ("\v1.0\v", "1.0"),
    ],
)
def test_to_string_normalizes(version: str, normalized_version: str) -> None:
    assert Version.parse(version).to_string() == normalized_version


@pytest.mark.parametrize(
    "unsorted, sorted_",
    [
        (["1.0.3", "1.0.2", "1.0.1"], ["1.0.1", "1.0.2", "1.0.3"]),
        (["1.0.0.2", "1.0.0.0rc2"], ["1.0.0.0rc2", "1.0.0.2"]),
        (["1.0.0.0", "1.0.0.0rc2"], ["1.0.0.0rc2", "1.0.0.0"]),
        (["1.0.0.0.0", "1.0.0.0rc2"], ["1.0.0.0rc2", "1.0.0.0.0"]),
        (["1.0.0rc2", "1.0.0rc1"], ["1.0.0rc1", "1.0.0rc2"]),
        (["1.0.0rc2", "1.0.0b1"], ["1.0.0b1", "1.0.0rc2"]),
    ],
)
def test_versions_are_sortable(unsorted: list[str], sorted_: list[str]) -> None:
    unsorted_parsed = [Version.parse(u) for u in unsorted]
    sorted_parsed = [Version.parse(s) for s in sorted_]

    assert sorted(unsorted_parsed) == sorted_parsed
poetry-core-2.1.1/tests/constraints/version/test_version_constraint.py000066400000000000000000000034731475444614500265500ustar00rootroot00000000000000from __future__ import annotations

from typing import TYPE_CHECKING

import pytest

from poetry.core.constraints.version import EmptyConstraint
from poetry.core.constraints.version import Version
from poetry.core.constraints.version import VersionRange
from poetry.core.constraints.version import VersionUnion


if TYPE_CHECKING:
    from poetry.core.constraints.version import VersionConstraint


@pytest.mark.parametrize(
    "constraint",
    [
        EmptyConstraint(),
        Version.parse("1"),
        VersionRange(Version.parse("1"), Version.parse("2")),
        VersionUnion(
            VersionRange(Version.parse("1"), Version.parse("2")),
            VersionRange(Version.parse("3"), Version.parse("4")),
        ),
    ],
)
def test_constraints_are_hashable(constraint: VersionConstraint) -> None:
    # We're just testing that constraints are hashable, there's nothing much to say
    # about the result.
    hash(constraint)


@pytest.mark.parametrize(
    ("constraint", "expected"),
    [
        (EmptyConstraint(), True),
        (Version.parse("1"), True),
        (VersionRange(), False),
        (VersionRange(Version.parse("1")), False),
        (VersionRange(max=Version.parse("2")), True),
        (VersionRange(Version.parse("1"), Version.parse("2")), True),
        (
            VersionUnion(
                VersionRange(Version.parse("1"), Version.parse("2")),
                VersionRange(Version.parse("3"), Version.parse("4")),
            ),
            True,
        ),
        (
            VersionUnion(
                VersionRange(Version.parse("1"), Version.parse("2")),
                VersionRange(Version.parse("3")),
            ),
            False,
        ),
    ],
)
def test_has_upper_bound(constraint: VersionConstraint, expected: bool) -> None:
    assert constraint.has_upper_bound() is expected
poetry-core-2.1.1/tests/constraints/version/test_version_range.py000066400000000000000000000542731475444614500254640ustar00rootroot00000000000000from __future__ import annotations

import pytest

from poetry.core.constraints.version import EmptyConstraint
from poetry.core.constraints.version import Version
from poetry.core.constraints.version import VersionRange
from poetry.core.constraints.version import parse_constraint


@pytest.fixture()
def v003() -> Version:
    return Version.parse("0.0.3")


@pytest.fixture()
def v010() -> Version:
    return Version.parse("0.1.0")


@pytest.fixture()
def v080() -> Version:
    return Version.parse("0.8.0")


@pytest.fixture()
def v072() -> Version:
    return Version.parse("0.7.2")


@pytest.fixture()
def v114() -> Version:
    return Version.parse("1.1.4")


@pytest.fixture()
def v123() -> Version:
    return Version.parse("1.2.3")


@pytest.fixture()
def v124() -> Version:
    return Version.parse("1.2.4")


@pytest.fixture()
def v130() -> Version:
    return Version.parse("1.3.0")


@pytest.fixture()
def v140() -> Version:
    return Version.parse("1.4.0")


@pytest.fixture()
def v200() -> Version:
    return Version.parse("2.0.0")


@pytest.fixture()
def v234() -> Version:
    return Version.parse("2.3.4")


@pytest.fixture()
def v250() -> Version:
    return Version.parse("2.5.0")


@pytest.fixture()
def v300() -> Version:
    return Version.parse("3.0.0")


@pytest.fixture()
def v300b1() -> Version:
    return Version.parse("3.0.0b1")


@pytest.mark.parametrize(
    ("constraint", "check_version", "allowed"),
    [
        # Inclusive ordering
        ("<=3.0.0", "3.0.0", True),
        ("<=3.0.0", "3.0.0+local.1", True),
        (">=3.0.0", "3.0.0", True),
        (">=3.0.0", "3.0.0+local.1", True),
        (">=3.0.0", "3.0.0", True),
        (">=3.0.0", "3.0.0-1", True),
        ("<=3.0.0+local.1", "3.0.0", True),
        ("<=3.0.0+local.1", "3.0.0+local.1", True),
        ("<=3.0.0+local.1", "3.0.0+local.2", False),
        ("<=3.0.0+local.1", "3.0.0-1", False),
        ("<=3.0.0+local.1", "3.0.0-1+local.1", False),
        (">=3.0.0+local.1", "3.0.0", False),
        (">=3.0.0+local.1", "3.0.0+local.1", True),
        (">=3.0.0+local.1", "3.0.0+local.2", True),
        (">=3.0.0+local.1", "3.0.0-1", True),
        (">=3.0.0+local.1", "3.0.0-1+local.1", True),
        ("<=3.0.0+local.2", "3.0.0+local.1", True),
        ("<=3.0.0+local.2", "3.0.0+local.2", True),
        (">=3.0.0+local.2", "3.0.0+local.1", False),
        (">=3.0.0+local.2", "3.0.0+local.2", True),
        (">=3.0.0+local.2", "3.0.0-1+local.1", True),
        ("<=3.0.0-1", "3.0.0", True),
        ("<=3.0.0-1", "3.0.0+local.1", True),
        ("<=3.0.0-1", "3.0.0+local.2", True),
        ("<=3.0.0-1", "3.0.0-1", True),
        ("<=3.0.0-1", "3.0.0-1+local.1", True),
        ("<=3.0.0-1", "3.0.0-2", False),
        (">=3.0.0-1", "3.0.0", False),
        (">=3.0.0-1", "3.0.0+local.1", False),
        (">=3.0.0-1", "3.0.0+local.2", False),
        (">=3.0.0-1", "3.0.0-1+local.1", True),
        (">=3.0.0-1", "3.0.0-2", True),
        ("<=3.0.0-1+local.1", "3.0.0+local.1", True),
        ("<=3.0.0-1+local.1", "3.0.0+local.2", True),
        ("<=3.0.0-1+local.1", "3.0.0-1", True),
        (">=3.0.0-1+local.1", "3.0.0+local.1", False),
        (">=3.0.0-1+local.1", "3.0.0+local.2", False),
        (">=3.0.0-1+local.1", "3.0.0-1", False),
        ("<=3.0.0-2", "3.0.0-1", True),
        ("<=3.0.0-2", "3.0.0-2", True),
        (">=3.0.0-2", "3.0.0-1", False),
        (">=3.0.0-2", "3.0.0-2", True),
        # Exclusive ordering
        (">1.7", "1.7.0", False),
        (">1.7", "1.7.1", True),
        (">1.7", "1.6.1", False),
        ("<1.7", "1.7.0", False),
        ("<1.7", "1.7.1", False),
        ("<1.7", "1.6.1", True),
        ## >V MUST NOT allow a post-release of the given version unless V itself is a post release
        (">1.7", "1.7.0.post1", False),
        (">1.7.post2", "1.7.0", False),
        (">1.7.post2", "1.7.1", True),
        (">1.7.post2", "1.7.0.post2", False),
        (">1.7.post2", "1.7.0.post3", True),
        ## >V MUST NOT match a local version of the specified version
        (">1.7.0", "1.7.0+local.1", False),
        ("<1.7.0", "1.7.0+local.1", False),  # spec does not clarify this
        ("<1.7.0+local.2", "1.7.0+local.1", False),  # spec does not clarify this
        ## =3.0.0+cuda", "3.0.0+cuda", True),
        (">=3.0.0+cpu", "3.0.0+cuda", True),  # cuda > cpu (lexicographically)
    ],
)
def test_version_ranges(constraint: str, check_version: str, allowed: bool) -> None:
    assert parse_constraint(constraint).allows(Version.parse(check_version)) == allowed


def test_allows_all(
    v123: Version, v124: Version, v140: Version, v250: Version, v300: Version
) -> None:
    assert VersionRange(v123, v250).allows_all(EmptyConstraint())

    range = VersionRange(v123, v250, include_max=True)
    assert not range.allows_all(v123)
    assert range.allows_all(v124)
    assert range.allows_all(v250)
    assert not range.allows_all(v300)


def test_allows_all_with_no_min(
    v080: Version, v140: Version, v250: Version, v300: Version
) -> None:
    range = VersionRange(max=v250)
    assert range.allows_all(VersionRange(v080, v140))
    assert not range.allows_all(VersionRange(v080, v300))
    assert range.allows_all(VersionRange(max=v140))
    assert not range.allows_all(VersionRange(max=v300))
    assert range.allows_all(range)
    assert not range.allows_all(VersionRange())


def test_allows_all_with_no_max(
    v003: Version, v010: Version, v080: Version, v140: Version
) -> None:
    range = VersionRange(min=v010)
    assert range.allows_all(VersionRange(v080, v140))
    assert not range.allows_all(VersionRange(v003, v140))
    assert range.allows_all(VersionRange(v080))
    assert not range.allows_all(VersionRange(v003))
    assert range.allows_all(range)
    assert not range.allows_all(VersionRange())


def test_allows_all_bordering_range_not_more_inclusive(
    v010: Version, v250: Version
) -> None:
    # Allows bordering range that is not more inclusive
    exclusive = VersionRange(v010, v250)
    inclusive = VersionRange(v010, v250, True, True)
    assert inclusive.allows_all(exclusive)
    assert inclusive.allows_all(inclusive)
    assert not exclusive.allows_all(inclusive)
    assert exclusive.allows_all(exclusive)


def test_allows_all_contained_unions(
    v010: Version,
    v114: Version,
    v123: Version,
    v124: Version,
    v140: Version,
    v200: Version,
    v234: Version,
) -> None:
    # Allows unions that are completely contained
    range = VersionRange(v114, v200)
    assert range.allows_all(VersionRange(v123, v124).union(v140))
    assert not range.allows_all(VersionRange(v010, v124).union(v140))
    assert not range.allows_all(VersionRange(v123, v234).union(v140))


def test_allows_any(
    v003: Version,
    v010: Version,
    v072: Version,
    v080: Version,
    v114: Version,
    v123: Version,
    v124: Version,
    v140: Version,
    v200: Version,
    v234: Version,
    v250: Version,
    v300: Version,
) -> None:
    # disallows an empty constraint
    assert not VersionRange(v123, v250).allows_any(EmptyConstraint())

    # allows allowed versions
    range = VersionRange(v123, v250, include_max=True)
    assert not range.allows_any(v123)
    assert range.allows_any(v124)
    assert range.allows_any(v250)
    assert not range.allows_any(v300)

    # with no min
    range = VersionRange(max=v200)
    assert range.allows_any(VersionRange(v140, v300))
    assert not range.allows_any(VersionRange(v234, v300))
    assert range.allows_any(VersionRange(v140))
    assert not range.allows_any(VersionRange(v234))
    assert range.allows_any(range)

    # with no max
    range = VersionRange(min=v072)
    assert range.allows_any(VersionRange(v003, v140))
    assert not range.allows_any(VersionRange(v003, v010))
    assert range.allows_any(VersionRange(max=v080))
    assert not range.allows_any(VersionRange(max=v003))
    assert range.allows_any(range)

    # with min and max
    range = VersionRange(v072, v200)
    assert range.allows_any(VersionRange(v003, v140))
    assert range.allows_any(VersionRange(v140, v300))
    assert not range.allows_any(VersionRange(v003, v010))
    assert not range.allows_any(VersionRange(v234, v300))
    assert not range.allows_any(VersionRange(max=v010))
    assert not range.allows_any(VersionRange(v234))
    assert range.allows_any(range)

    # allows a bordering range when both are inclusive
    assert not VersionRange(max=v250).allows_any(VersionRange(min=v250))
    assert not VersionRange(max=v250, include_max=True).allows_any(
        VersionRange(min=v250)
    )
    assert not VersionRange(max=v250).allows_any(
        VersionRange(min=v250, include_min=True)
    )
    assert not VersionRange(min=v250).allows_any(VersionRange(max=v250))
    assert VersionRange(max=v250, include_max=True).allows_any(
        VersionRange(min=v250, include_min=True)
    )

    # allows unions that are partially contained'
    range = VersionRange(v114, v200)
    assert range.allows_any(VersionRange(v010, v080).union(v140))
    assert range.allows_any(VersionRange(v123, v234).union(v300))
    assert not range.allows_any(VersionRange(v234, v300).union(v010))

    # pre-release min does not allow lesser than itself
    range = VersionRange(Version.parse("1.9b1"), include_min=True)
    assert not range.allows_any(
        VersionRange(Version.parse("1.8.0"), Version.parse("1.9.0b0"), include_min=True)
    )


def test_intersect(
    v114: Version,
    v123: Version,
    v124: Version,
    v200: Version,
    v250: Version,
    v300: Version,
) -> None:
    # two overlapping ranges
    assert VersionRange(v123, v250).intersect(VersionRange(v200, v300)) == VersionRange(
        v200, v250
    )

    # a non-overlapping range allows no versions
    a = VersionRange(v114, v124)
    b = VersionRange(v200, v250)
    assert a.intersect(b).is_empty()

    # adjacent ranges allow no versions if exclusive
    a = VersionRange(v114, v124)
    b = VersionRange(v124, v200)
    assert a.intersect(b).is_empty()

    # adjacent ranges allow version if inclusive
    a = VersionRange(v114, v124, include_max=True)
    b = VersionRange(v124, v200, include_min=True)
    assert a.intersect(b) == v124

    # with an open range
    open = VersionRange()
    a = VersionRange(v114, v124)
    assert open.intersect(open) == open
    assert open.intersect(a) == a

    # returns the version if the range allows it
    assert VersionRange(v114, v124).intersect(v123) == v123
    assert VersionRange(v123, v124).intersect(v114).is_empty()


def test_union(
    v003: Version,
    v010: Version,
    v072: Version,
    v080: Version,
    v114: Version,
    v123: Version,
    v124: Version,
    v130: Version,
    v140: Version,
    v200: Version,
    v234: Version,
    v250: Version,
    v300: Version,
) -> None:
    # with a version returns the range if it contains the version
    range = VersionRange(v114, v124)
    assert range.union(v123) == range

    # with a version on the edge of the range, expands the range
    range = VersionRange(v114, v124)
    assert range.union(v124) == VersionRange(v114, v124, include_max=True)
    assert range.union(v114) == VersionRange(v114, v124, include_min=True)

    # with a version allows both the range and the version if the range
    # doesn't contain the version
    result = VersionRange(v003, v114).union(v124)
    assert result.allows(v010)
    assert not result.allows(v123)
    assert result.allows(v124)

    # returns a VersionUnion for a disjoint range
    result = VersionRange(v003, v114).union(VersionRange(v130, v200))
    assert result.allows(v080)
    assert not result.allows(v123)
    assert result.allows(v140)

    # considers open ranges disjoint
    result = VersionRange(v003, v114).union(VersionRange(v114, v200))
    assert result.allows(v080)
    assert not result.allows(v114)
    assert result.allows(v140)
    result = VersionRange(v114, v200).union(VersionRange(v003, v114))
    assert result.allows(v080)
    assert not result.allows(v114)
    assert result.allows(v140)

    # returns a merged range for an overlapping range
    result = VersionRange(v003, v114).union(VersionRange(v080, v200))
    assert result == VersionRange(v003, v200)

    # considers closed ranges overlapping
    result = VersionRange(v003, v114, include_max=True).union(VersionRange(v114, v200))
    assert result == VersionRange(v003, v200)
    result = VersionRange(v003, v114).union(VersionRange(v114, v200, include_min=True))
    assert result == VersionRange(v003, v200)


@pytest.mark.parametrize(
    ("version", "spec", "expected"),
    [
        (v, s, True)
        for v, s in [
            # Test the equality operation
            ("2.0", "==2"),
            ("2.0", "==2.0"),
            ("2.0", "==2.0.0"),
            ("2.0+deadbeef", "==2"),
            ("2.0+deadbeef", "==2.0"),
            ("2.0+deadbeef", "==2.0.0"),
            ("2.0+deadbeef", "==2+deadbeef"),
            ("2.0+deadbeef", "==2.0+deadbeef"),
            ("2.0+deadbeef", "==2.0.0+deadbeef"),
            ("2.0+deadbeef.0", "==2.0.0+deadbeef.00"),
            # Test the equality operation with a prefix
            ("2.dev1", "==2.*"),
            ("2a1", "==2.*"),
            ("2a1.post1", "==2.*"),
            ("2b1", "==2.*"),
            ("2b1.dev1", "==2.*"),
            ("2c1", "==2.*"),
            ("2c1.post1.dev1", "==2.*"),
            ("2rc1", "==2.*"),
            ("2", "==2.*"),
            ("2.0", "==2.*"),
            ("2.0.0", "==2.*"),
            ("2.0.post1", "==2.0.post1.*"),
            ("2.0.post1.dev1", "==2.0.post1.*"),
            ("2.1+local.version", "==2.1.*"),
            # Test the in-equality operation
            ("2.1", "!=2"),
            ("2.1", "!=2.0"),
            ("2.0.1", "!=2"),
            ("2.0.1", "!=2.0"),
            ("2.0.1", "!=2.0.0"),
            ("2.0", "!=2.0+deadbeef"),
            # Test the in-equality operation with a prefix
            ("2.0", "!=3.*"),
            ("2.1", "!=2.0.*"),
            # Test the greater than equal operation
            ("2.0", ">=2"),
            ("2.0", ">=2.0"),
            ("2.0", ">=2.0.0"),
            ("2.0.post1", ">=2"),
            ("2.0.post1.dev1", ">=2"),
            ("3", ">=2"),
            # Test the less than equal operation
            ("2.0", "<=2"),
            ("2.0", "<=2.0"),
            ("2.0", "<=2.0.0"),
            ("2.0.dev1", "<=2"),
            ("2.0a1", "<=2"),
            ("2.0a1.dev1", "<=2"),
            ("2.0b1", "<=2"),
            ("2.0b1.post1", "<=2"),
            ("2.0c1", "<=2"),
            ("2.0c1.post1.dev1", "<=2"),
            ("2.0rc1", "<=2"),
            ("1", "<=2"),
            # Test the greater than operation
            ("3", ">2"),
            ("2.1", ">2.0"),
            ("2.0.1", ">2"),
            ("2.1.post1", ">2"),
            ("2.1+local.version", ">2"),
            # Test the less than operation
            ("1", "<2"),
            ("2.0", "<2.1"),
            ("2.0.dev0", "<2.1"),
            # Test the compatibility operation
            ("1", "~=1.0"),
            ("1.0.1", "~=1.0"),
            ("1.1", "~=1.0"),
            ("1.9999999", "~=1.0"),
            ("1.1", "~=1.0a1"),
            # Test that epochs are handled sanely
            ("2!1.0", "~=2!1.0"),
            ("2!1.0", "==2!1.*"),
            ("2!1.0", "==2!1.0"),
            ("2!1.0", "!=1.0"),
            ("1.0", "!=2!1.0"),
            ("1.0", "<=2!0.1"),
            ("2!1.0", ">=2.0"),
            ("1.0", "<2!0.1"),
            ("2!1.0", ">2.0"),
            # Test some normalization rules
            ("2.0.5", ">2.0dev"),
        ]
    ]
    + [
        (v, s, False)
        for v, s in [
            # Test the equality operation
            ("2.1", "==2"),
            ("2.1", "==2.0"),
            ("2.1", "==2.0.0"),
            ("2.0", "==2.0+deadbeef"),
            # Test the equality operation with a prefix
            ("2.0", "==3.*"),
            ("2.1", "==2.0.*"),
            # Test the in-equality operation
            ("2.0", "!=2"),
            ("2.0", "!=2.0"),
            ("2.0", "!=2.0.0"),
            ("2.0+deadbeef", "!=2"),
            ("2.0+deadbeef", "!=2.0"),
            ("2.0+deadbeef", "!=2.0.0"),
            ("2.0+deadbeef", "!=2+deadbeef"),
            ("2.0+deadbeef", "!=2.0+deadbeef"),
            ("2.0+deadbeef", "!=2.0.0+deadbeef"),
            ("2.0+deadbeef.0", "!=2.0.0+deadbeef.00"),
            # Test the in-equality operation with a prefix
            ("2.dev1", "!=2.*"),
            ("2a1", "!=2.*"),
            ("2a1.post1", "!=2.*"),
            ("2b1", "!=2.*"),
            ("2b1.dev1", "!=2.*"),
            ("2c1", "!=2.*"),
            ("2c1.post1.dev1", "!=2.*"),
            ("2rc1", "!=2.*"),
            ("2", "!=2.*"),
            ("2.0", "!=2.*"),
            ("2.0.0", "!=2.*"),
            ("2.0.post1", "!=2.0.post1.*"),
            ("2.0.post1.dev1", "!=2.0.post1.*"),
            # Test the greater than equal operation
            ("2.0.dev1", ">=2"),
            ("2.0a1", ">=2"),
            ("2.0a1.dev1", ">=2"),
            ("2.0b1", ">=2"),
            ("2.0b1.post1", ">=2"),
            ("2.0c1", ">=2"),
            ("2.0c1.post1.dev1", ">=2"),
            ("2.0rc1", ">=2"),
            ("1", ">=2"),
            # Test the less than equal operation
            ("2.0.post1", "<=2"),
            ("2.0.post1.dev1", "<=2"),
            ("3", "<=2"),
            # Test the greater than operation
            ("1", ">2"),
            ("2.0.dev1", ">2"),
            ("2.0a1", ">2"),
            ("2.0a1.post1", ">2"),
            ("2.0b1", ">2"),
            ("2.0b1.dev1", ">2"),
            ("2.0c1", ">2"),
            ("2.0c1.post1.dev1", ">2"),
            ("2.0rc1", ">2"),
            ("2.0", ">2"),
            ("2.0.post1", ">2"),
            ("2.0.post1.dev1", ">2"),
            ("2.0+local.version", ">2"),
            # Test the less than operation
            ("2.0.dev1", "<2"),
            ("2.0a1", "<2"),
            ("2.0a1.post1", "<2"),
            ("2.0b1", "<2"),
            ("2.0b2.dev1", "<2"),
            ("2.0c1", "<2"),
            ("2.0c1.post1.dev1", "<2"),
            ("2.0rc1", "<2"),
            ("2.0", "<2"),
            ("2.post1", "<2"),
            ("2.post1.dev1", "<2"),
            ("3", "<2"),
            # Test the compatibility operation
            ("2.0", "~=1.0"),
            ("1.1.0", "~=1.0.0"),
            ("1.1.post1", "~=1.0.0"),
            # Test that epochs are handled sanely
            ("1.0", "~=2!1.0"),
            ("2!1.0", "~=1.0"),
            ("2!1.0", "==1.0"),
            ("1.0", "==2!1.0"),
            ("2!1.0", "==1.*"),
            ("1.0", "==2!1.*"),
            ("2!1.0", "!=2!1.0"),
        ]
    ],
)
def test_specifiers(version: str, spec: str, expected: bool) -> None:
    """
    Test derived from
    https://github.com/pypa/packaging/blob/8b86d85797b9f26d98ecfbe0271ce4dc9495d98c/tests/test_specifiers.py#L469
    """
    constraint = parse_constraint(spec)

    v = Version.parse(version)

    if expected:
        # Test that the plain string form works
        # assert version in spec
        assert constraint.allows(v)

        # Test that the version instance form works
        # assert version in spec
        assert constraint.allows(v)
    else:
        # Test that the plain string form works
        # assert version not in spec
        assert not constraint.allows(v)

        # Test that the version instance form works
        # assert version not in spec
        assert not constraint.allows(v)


@pytest.mark.parametrize(
    ("include_min", "include_max", "expected"),
    [
        (True, False, True),
        (False, False, False),
        (False, True, False),
        (True, True, False),
    ],
)
def test_is_single_wildcard_range_include_min_include_max(
    include_min: bool, include_max: bool, expected: bool
) -> None:
    version_range = VersionRange(
        Version.parse("1.2.dev0"), Version.parse("1.3"), include_min, include_max
    )
    assert version_range.is_single_wildcard_range is expected


@pytest.mark.parametrize(
    ("min", "max", "expected"),
    [
        # simple wildcard ranges
        ("1.2.dev0", "1.3", True),
        ("1.2.dev0", "1.3.dev0", True),
        ("1.dev0", "2", True),
        ("1.2.3.4.5.dev0", "1.2.3.4.6", True),
        # simple non wilcard ranges
        (None, "1.3", False),
        ("1.2.dev0", None, False),
        (None, None, False),
        ("1.2a0", "1.3", False),
        ("1.2.post0", "1.3", False),
        ("1.2.dev0+local", "1.3", False),
        ("1.2", "1.3", False),
        ("1.2.dev1", "1.3", False),
        ("1.2.dev0", "1.3.post0.dev0", False),
        ("1.2.dev0", "1.3a0.dev0", False),
        ("1.2.dev0", "1.3.dev0+local", False),
        ("1.2.dev0", "1.3.dev1", False),
        # more complicated ranges
        ("1.dev0", "1.0.0.1", True),
        ("1.2.dev0", "1.3.0.0", True),
        ("1.2.dev0", "1.3.0.0.dev0", True),
        ("1.2.0.dev0", "1.3", True),
        ("1.2.1.dev0", "1.3.0.0", False),
        ("1.2.dev0", "1.4", False),
        ("1.2.dev0", "2.3", False),
        # post releases
        ("2.0.post1.dev0", "2.0.post2", True),
        ("2.0.post1.dev0", "2.0.post2.dev0", True),
        ("2.0.post1.dev1", "2.0.post2", False),
        ("2.0.post1.dev0", "2.0.post2.dev1", False),
        ("2.0.post1.dev0", "2.0.post3", False),
        ("2.0.post1.dev0", "2.0.post1", False),
    ],
)
def test_is_single_wildcard_range(
    min: str | None, max: str | None, expected: bool
) -> None:
    version_range = VersionRange(
        Version.parse(min) if min else None,
        Version.parse(max) if max else None,
        include_min=True,
    )
    assert version_range.is_single_wildcard_range is expected


@pytest.mark.parametrize(
    ("version", "expected"),
    [
        # simple ranges
        ("*", "*"),
        (">1.2", ">1.2"),
        (">=1.2", ">=1.2"),
        ("<1.3", "<1.3"),
        ("<=1.3", "<=1.3"),
        (">=1.2,<1.3", ">=1.2,<1.3"),
        # wildcard ranges
        ("1.*", "==1.*"),
        ("1.0.*", "==1.0.*"),
        ("1.2.*", "==1.2.*"),
        ("1.2.3.4.5.*", "==1.2.3.4.5.*"),
        ("2.0.post1.*", "==2.0.post1.*"),
        ("2.1.post0.*", "==2.1.post0.*"),
        (">=1.dev0,<2", "==1.*"),
    ],
)
def test_str(version: str, expected: str) -> None:
    assert str(parse_constraint(version)) == expected
poetry-core-2.1.1/tests/constraints/version/test_version_union.py000066400000000000000000000114451475444614500255120ustar00rootroot00000000000000from __future__ import annotations

import pytest

from poetry.core.constraints.version import Version
from poetry.core.constraints.version import VersionRange
from poetry.core.constraints.version import VersionUnion
from poetry.core.constraints.version import parse_constraint


@pytest.mark.parametrize(
    ("ranges", "expected"),
    [
        (  # positive
            [
                VersionRange(max=Version.parse("1.2")),
                VersionRange(Version.parse("1.3.dev0"), include_min=True),
            ],
            True,
        ),
        (  # positive inverted order
            [
                VersionRange(Version.parse("1.3.dev0"), include_min=True),
                VersionRange(max=Version.parse("1.2")),
            ],
            True,
        ),
        (  # too many ranges
            [
                VersionRange(max=Version.parse("1.2")),
                VersionRange(Version.parse("1.3"), include_min=True),
                VersionRange(max=Version.parse("1.4")),
            ],
            False,
        ),
        ([VersionRange(max=Version.parse("1.2"))], False),  # too little ranges
        (  # additional include_max
            [
                VersionRange(max=Version.parse("1.2"), include_max=True),
                VersionRange(Version.parse("1.3"), include_min=True),
            ],
            False,
        ),
        (  # missing include_min
            [
                VersionRange(max=Version.parse("1.2")),
                VersionRange(Version.parse("1.3")),
            ],
            False,
        ),
        (  # additional min
            [
                VersionRange(Version.parse("1.0"), Version.parse("1.2")),
                VersionRange(Version.parse("1.3"), include_min=True),
            ],
            False,
        ),
        (  # additional max
            [
                VersionRange(max=Version.parse("1.2")),
                VersionRange(
                    Version.parse("1.3"), Version.parse("1.4"), include_min=True
                ),
            ],
            False,
        ),
        (  # missing max
            [
                VersionRange(),
                VersionRange(Version.parse("1.3"), include_min=True),
            ],
            False,
        ),
        (  # missing min
            [
                VersionRange(max=Version.parse("1.2")),
                VersionRange(include_min=True),
            ],
            False,
        ),
    ],
)
def test_excludes_single_wildcard_range_basics(
    ranges: list[VersionRange], expected: bool
) -> None:
    assert VersionUnion(*ranges).excludes_single_wildcard_range is expected


@pytest.mark.parametrize(
    ("max", "min", "expected"),
    [
        # simple exclude wildcard range
        ("1.2", "1.3.dev0", True),
        ("1.2.dev0", "1.3.dev0", True),
        ("1", "2.dev0", True),
        ("1.2.3.4.5", "1.2.3.4.6.dev0", True),
        # simple non exclude wildcard range
        ("1.2", "1.3", False),
        ("1.2", "1.3a0.dev0", False),
        ("1.2", "1.3.post0.dev0", False),
        ("1.2", "1.3.dev0+local", False),
        ("1.2", "1.3.dev1", False),
        ("1.2.post0", "1.3.dev0", False),
        ("1.2a0", "1.3.dev0", False),
        ("1.2+local", "1.3.dev0", False),
        ("1.2.dev1", "1.3.dev0", False),
        # more complicated cases
        ("1", "1.0.0.1.dev0", True),
        ("1.2.0.0", "1.3.dev0", True),
        ("1.2.0.0.dev0", "1.3.dev0", True),
        ("1.2", "1.3.0.dev0", True),
        ("1.2.0.0", "1.3.1.dev0", False),
        ("1.2", "1.4.dev0", False),
        ("1.2", "2.3.dev0", False),
        # post releases
        ("2.0.post1", "2.0.post2.dev0", True),
        ("2.0.post1.dev0", "2.0.post2.dev0", True),
        ("2.0.post1.dev1", "2.0.post2.dev0", False),
        ("2.0.post1", "2.0.post2.dev1", False),
        ("2.0.post0", "2.0.post2.dev0", False),
        ("2.0.post1", "2.0.post1.dev0", False),
    ],
)
def test_excludes_single_wildcard_range(max: str, min: str, expected: bool) -> None:
    version_union = VersionUnion(
        VersionRange(max=Version.parse(max)),
        VersionRange(Version.parse(min), include_min=True),
    )
    assert version_union.excludes_single_wildcard_range is expected


@pytest.mark.parametrize(
    ("version", "expected"),
    [
        # simple unions
        ("<1 || >=2", "<1 || >=2"),
        ("<1.2 || >=2.3.dev0", "<1.2 || >=2.3.dev0"),
        # version exclusions
        ("!=1.0", "!=1.0"),
        ("!=1.0+local", "!=1.0+local"),
        # wildcard exclusions
        ("!=1.*", "!=1.*"),
        ("!=1.0.*", "!=1.0.*"),
        ("!=1.2.*", "!=1.2.*"),
        ("!=1.2.3.4.5.*", "!=1.2.3.4.5.*"),
        ("!=2.0.post1.*", "!=2.0.post1.*"),
        ("!=2.1.post0.*", "!=2.1.post0.*"),
        ("<1 || >=2.dev0", "!=1.*"),
    ],
)
def test_str(version: str, expected: str) -> None:
    assert str(parse_constraint(version)) == expected
poetry-core-2.1.1/tests/fixtures/000077500000000000000000000000001475444614500170145ustar00rootroot00000000000000poetry-core-2.1.1/tests/fixtures/complete.toml000066400000000000000000000031401475444614500215170ustar00rootroot00000000000000[tool.poetry]
name = "poetry"
version = "0.5.0"
description = "Python dependency management and packaging made easy."
authors = [
    "Sébastien Eustace "
]
maintainers = [
    "Sébastien Eustace "
]
license = "MIT"

readme = "README.rst"

homepage = "https://python-poetry.org/"
repository = "https://github.com/python-poetry/poetry"
documentation = "https://python-poetry.org/docs"

keywords = ["packaging", "dependency", "poetry"]
classifiers = [
    "Topic :: Software Development :: Build Tools",
    "Topic :: Software Development :: Libraries :: Python Modules"
]

# Requirements
[tool.poetry.dependencies]
python = "^3.8"  # Compatible python versions must be declared here
toml = "^0.9"
# Dependencies with extras
requests = { version = "^2.13", extras = [ "security" ] }
# Python specific dependencies with prereleases allowed
pathlib2 = { version = "^2.2", python = "~3.8", allow-prereleases = true }
# Git dependencies
cleo = { git = "https://github.com/sdispater/cleo.git", branch = "main" }

# Optional dependencies (extras)
pendulum = { version = "^1.4", optional = true }

[tool.poetry.extras]
time = [ "pendulum" ]

[tool.poetry.group.dev.dependencies]
pytest = "^3.0"
pytest-cov = "^2.4"

[tool.poetry.scripts]
my-script = 'my_package:main'
sample_pyscript = { reference = "script-files/sample_script.py", type= "file" }
sample_shscript = { reference = "script-files/sample_script.sh", type= "file" }

[tool.poetry.plugins."poetry.application.plugin"]
my-command = "my_package.plugins:MyApplicationPlugin"


[[tool.poetry.source]]
name = "foo"
url = "https://bar.com"
poetry-core-2.1.1/tests/fixtures/complete_duplicates.toml000066400000000000000000000052721475444614500237440ustar00rootroot00000000000000[project]
name = "poetry"
version = "0.5.0"
description = "Python dependency management and packaging made easy."
readme = "README.rst"
requires-python = ">=3.8"
license = { "text" = "MIT" }
authors = [
    { "name" = "Sébastien Eustace", "email" = "sebastien@eustace.io" }
]
maintainers = [
    { name = "Sébastien Eustace", email = "sebastien@eustace.io" }
]
keywords = ["packaging", "dependency", "poetry"]
classifiers = [
    "Topic :: Software Development :: Build Tools",
    "Topic :: Software Development :: Libraries :: Python Modules"
]
dependencies = [
    "toml>=0.9",
    "requests[security]>=2.13,<3.0",
    "pathlib2 ~=2.2 ; python_version == '3.8'",
    "cleo @ git+https://github.com/sdispater/cleo.git@main",
]

[project.urls]
homepage = "https://python-poetry.org/"
repository = "https://github.com/python-poetry/poetry"
documentation = "https://python-poetry.org/docs"

[project.optional-dependencies]
time = [ "pendulum>1.4,<2.0" ]

[project.scripts]
my-script = "my_package:main"

[project.entry-points."poetry.application.plugin"]
my-command = "my_package.plugins:MyApplicationPlugin"

[tool.poetry]
name = "poetry"
version = "0.5.0"
description = "Python dependency management and packaging made easy."
authors = [
    "Sébastien Eustace "
]
maintainers = [
    "Sébastien Eustace "
]
license = "MIT"

readme = "README.rst"

homepage = "https://python-poetry.org/"
repository = "https://github.com/python-poetry/poetry"
documentation = "https://python-poetry.org/docs"

keywords = ["packaging", "dependency", "poetry"]
classifiers = [
    "Topic :: Software Development :: Build Tools",
    "Topic :: Software Development :: Libraries :: Python Modules"
]

# Requirements
[tool.poetry.dependencies]
python = "^3.8"  # Compatible python versions must be declared here
toml = "^0.9"
# Dependencies with extras
requests = { version = "^2.13", extras = [ "security" ] }
# Python specific dependencies with prereleases allowed
pathlib2 = { version = "^2.2", python = "~3.8", allow-prereleases = true }
# Git dependencies
cleo = { git = "https://github.com/sdispater/cleo.git", branch = "master" }

# Optional dependencies (extras)
pendulum = { version = "^1.4", optional = true }

[tool.poetry.extras]
time = [ "pendulum" ]

[tool.poetry.group.dev.dependencies]
pytest = "^3.0"
pytest-cov = "^2.4"

[tool.poetry.scripts]
my-script = 'my_package:main'
sample_pyscript = { reference = "script-files/sample_script.py", type= "file" }
sample_shscript = { reference = "script-files/sample_script.sh", type= "file" }

[tool.poetry.plugins."poetry.application.plugin"]
my-command = "my_package.plugins:MyApplicationPlugin"


[[tool.poetry.source]]
name = "foo"
url = "https://bar.com"
poetry-core-2.1.1/tests/fixtures/complete_new.toml000066400000000000000000000035121475444614500223730ustar00rootroot00000000000000[project]
name = "poetry"
version = "0.5.0"
description = "Python dependency management and packaging made easy."
readme = "README.rst"
requires-python = ">=3.8"
license = { "text" = "MIT" }
authors = [
    { "name" = "Sébastien Eustace", "email" = "sebastien@eustace.io" }
]
maintainers = [
    { name = "Sébastien Eustace", email = "sebastien@eustace.io" }
]
keywords = ["packaging", "dependency", "poetry"]
classifiers = [
    "Topic :: Software Development :: Build Tools",
    "Topic :: Software Development :: Libraries :: Python Modules"
]
dependencies = [
    "toml>=0.9",
    "requests[security]>=2.13,<3.0",
    "pathlib2 ~=2.2 ; python_version == '3.8'",
    "cleo @ git+https://github.com/sdispater/cleo.git@main",
]

[project.urls]
homepage = "https://python-poetry.org/"
repository = "https://github.com/python-poetry/poetry"
documentation = "https://python-poetry.org/docs"

[project.optional-dependencies]
time = [ "pendulum>1.4,<2.0" ]

[project.scripts]
my-script = "my_package:main"

[project.entry-points."poetry.application.plugin"]
my-command = "my_package.plugins:MyApplicationPlugin"

# Requirements
[tool.poetry.dependencies]
python = "^3.8"  # Compatible python versions must be declared here
toml = "^0.9"
# Dependencies with extras
requests = { version = "^2.13", extras = [ "security" ] }
# Python specific dependencies with prereleases allowed
pathlib2 = { version = "^2.2", python = "~3.8", allow-prereleases = true }
# Git dependencies
cleo = { git = "https://github.com/sdispater/cleo.git", branch = "master" }

[tool.poetry.group.dev.dependencies]
pytest = "^3.0"
pytest-cov = "^2.4"

[tool.poetry.scripts]
sample_pyscript = { reference = "script-files/sample_script.py", type= "file" }
sample_shscript = { reference = "script-files/sample_script.sh", type= "file" }


[[tool.poetry.source]]
name = "foo"
url = "https://bar.com"
poetry-core-2.1.1/tests/fixtures/complete_new_dynamic_invalid.toml000066400000000000000000000036641475444614500256150ustar00rootroot00000000000000[project]
dynamic = [
    "name",  # This is not valid and will trigger an error in Factory.validate()
    "version",
    "description",
    "readme",
    "requires-python",
    "license",
    "authors",
    "maintainers",
    "keywords",
    "classifiers",
    "urls",
    "dependencies",
    "optional-dependencies",
    "scripts",
]

[tool.poetry]
name = "poetry"
version = "0.5.0"
description = "Python dependency management and packaging made easy."
authors = [
    "Sébastien Eustace "
]
maintainers = [
    "Sébastien Eustace "
]
license = "MIT"

readme = "README.rst"

homepage = "https://python-poetry.org/"
repository = "https://github.com/python-poetry/poetry"
documentation = "https://python-poetry.org/docs"

keywords = ["packaging", "dependency", "poetry"]
classifiers = [
    "Topic :: Software Development :: Build Tools",
    "Topic :: Software Development :: Libraries :: Python Modules"
]

# Requirements
[tool.poetry.dependencies]
python = "^3.8"  # Compatible python versions must be declared here
toml = "^0.9"
# Dependencies with extras
requests = { version = "^2.13", extras = [ "security" ] }
# Python specific dependencies with prereleases allowed
pathlib2 = { version = "^2.2", python = "~3.8", allow-prereleases = true }
# Git dependencies
cleo = { git = "https://github.com/sdispater/cleo.git", branch = "master" }

# Optional dependencies (extras)
pendulum = { version = "^1.4", optional = true }

[tool.poetry.extras]
time = [ "pendulum" ]

[tool.poetry.group.dev.dependencies]
pytest = "^3.0"
pytest-cov = "^2.4"

[tool.poetry.scripts]
my-script = 'my_package:main'
sample_pyscript = { reference = "script-files/sample_script.py", type= "file" }
sample_shscript = { reference = "script-files/sample_script.sh", type= "file" }

[project.entry-points."poetry.application.plugin"]
my-command = "my_package.plugins:MyApplicationPlugin"


[[tool.poetry.source]]
name = "foo"
url = "https://bar.com"
poetry-core-2.1.1/tests/fixtures/distributions/000077500000000000000000000000001475444614500217165ustar00rootroot00000000000000poetry-core-2.1.1/tests/fixtures/distributions/demo-0.1.0-in-subdir.zip000066400000000000000000000032651475444614500257200ustar00rootroot00000000000000PKbUsubdir/PKbUsubdir/demo/PK
bU
qsubdir/demo/__init__.py__version__ = '0.1.0'
PKbU-Gsubdir/PKG-INFOMJA}2+
N3:2Aр
J26NOs$GB\Q{
60y&ch:\
VB
e'үM{K͞KC6o&uΫs8^dQ1ZF}ط>"T*2QN\D'.ԻlSΠ%>xޝN#}a1Wp:3^pZu.
#
PKbU{zsubdir/pyproject.toml=N19H4$FrvY㈈}8^"Ey~foCCP+;i
ffnf:6A&fyᘊﯝMcNb[	=kB:C?o͝:vaM/,+l=WZ%f|p_\r$y\Yˀ|L$5uPKbUp
Hsubdir/setup.py]IN0>CZ$PTlزaYM^T@$vԫ&i2Ź*,UAXg04W0-6`]P`pЖP+`u֑5ޒNqi,"7 '\ItEWmL{dVբ&\jtee?D<9OІ+3:^okEY(YfcEI.s]3c9H9A͸hI||hi1.y{z9޻B&]Ї	ItF>`%M'.I.PK?bU$subdir/
 ??
PK?bU$%subdir/demo/
 c١c١PK?
bU
q$ Osubdir/demo/__init__.py
 c١c١c١PK?bU-G$ subdir/PKG-INFO
 c١c١c١PK?bU{z$ subdir/pyproject.toml
 c١c١c١PK?bUp
H$ subdir/setup.py
 c١c١c١PKIVpoetry-core-2.1.1/tests/fixtures/distributions/demo-0.1.0-py2.py3-none-any.whl000066400000000000000000000021341475444614500267540ustar00rootroot00000000000000PK!
qdemo/__init__.py__version__ = '0.1.0'
PK!H@WXdemo-0.1.0.dist-info/WHEEL
A
 н#fDI޾p}pfCSmֻ.,"檸m
- |PK!H&pdemo-0.1.0.dist-info/METADATAJ@)^i1KF+)k,zLb6[G#[JW;󝙁'%:X)OX4cͼRhحV
SqW<l"ؖqݫ6f_sNRco.hEP,K\jH^tp(Oy+HbFkRRX>52l*`Sp3Nݨ+χ<:7:7;7,PO:hyKzލN&}!ϡ!Wp<pZoҵ=PK!Hsdemo-0.1.0.dist-info/RECORDm̻v0oI0p@MD.9J
Di%}~]ܼp뛞ai;N4{ϪλPp8KN
po lYRMs<.{ǤG{UIѪ/ke\ IX?Xt̓@҈&75KSk-ͮ]f`vPK!
qdemo/__init__.pyPK!H@WXDdemo-0.1.0.dist-info/WHEELPK!H&pdemo-0.1.0.dist-info/METADATAPK!Hs(demo-0.1.0.dist-info/RECORDPK,poetry-core-2.1.1/tests/fixtures/distributions/demo-0.1.0.tar.gz000066400000000000000000000017011475444614500244220ustar00rootroot00000000000000A[demo-0.1.0.tarWn65.#ѲhsСa[@o2O`m"Ee1}{~v!R<7&?|4'ONsf\>
A|S6|C×5S0Co?z-
>t&CzH4J3^ $	Mq?D)tjOɆIpe\BYط7wh@\wq0!9q|8p.qjQ)3je?*Gt3Á2'Z-&dggn!
ЌS%*ݱbI*ޫ?xj-xSry;	o)"7Ahu^2/g
^OP1S2o$i0-GĆ\q־]#ˡUCJ}KLJK3!l
Sڍ{ǖ.[Yہ溩OGt:ੜrFAgJx&jhd*Ǣ*6=gsn:E't6c٦+O' -Qqy/PםdmiuF:=0-giejnƿBkv{-9?>bsmI粜gmDz]kk&k1/M]mT>][ߴjyIuqrFѫGcp=܍B,ea@AJgz~ɷ_bl$s͌O[/L1%:L蚷jEsYiM
}a-ŷݯk
.C/;'	Nv欮ř0sŊևS7%de$޺p=F.$){{CM&x]6c󏺜x3e(poetry-core-2.1.1/tests/fixtures/invalid_mode/000077500000000000000000000000001475444614500214465ustar00rootroot00000000000000poetry-core-2.1.1/tests/fixtures/invalid_mode/pyproject.toml000066400000000000000000000000471475444614500243630ustar00rootroot00000000000000[tool.poetry]
package-mode = "invalid"
poetry-core-2.1.1/tests/fixtures/invalid_pyproject/000077500000000000000000000000001475444614500225415ustar00rootroot00000000000000poetry-core-2.1.1/tests/fixtures/invalid_pyproject/pyproject.toml000066400000000000000000000002251475444614500254540ustar00rootroot00000000000000[tool.poetry]
# name missing
# version missing

[tool.poetry.dependencies]
python = "*"
pendulum = {"version" = "^2.0.5", allows-prereleases = true}
poetry-core-2.1.1/tests/fixtures/missing_license_file/000077500000000000000000000000001475444614500231665ustar00rootroot00000000000000poetry-core-2.1.1/tests/fixtures/missing_license_file/pyproject.toml000066400000000000000000000002751475444614500261060ustar00rootroot00000000000000[project]
name = "my-package"
version = "0.1"
license = { file = "LICENSE" }  # This file is intentionally missing
keywords = ["special"]  # field that comes after license in core metadata
poetry-core-2.1.1/tests/fixtures/non_package_mode/000077500000000000000000000000001475444614500222655ustar00rootroot00000000000000poetry-core-2.1.1/tests/fixtures/non_package_mode/pyproject.toml000066400000000000000000000002561475444614500252040ustar00rootroot00000000000000[tool.poetry]
package-mode = false

[tool.poetry.dependencies]
python = "^3.8"
cleo = "^0.6"
pendulum = { git = "https://github.com/sdispater/pendulum.git", branch = "2.0" }
poetry-core-2.1.1/tests/fixtures/pep_517_backend/000077500000000000000000000000001475444614500216435ustar00rootroot00000000000000poetry-core-2.1.1/tests/fixtures/pep_517_backend/README.md000066400000000000000000000001561475444614500231240ustar00rootroot00000000000000This fixture allows testing a project that uses the repository version of `poetry-core`
as a PEP 517 backend.
poetry-core-2.1.1/tests/fixtures/pep_517_backend/foo/000077500000000000000000000000001475444614500224265ustar00rootroot00000000000000poetry-core-2.1.1/tests/fixtures/pep_517_backend/foo/__init__.py000066400000000000000000000000001475444614500245250ustar00rootroot00000000000000poetry-core-2.1.1/tests/fixtures/pep_517_backend/pyproject.toml000066400000000000000000000022461475444614500245630ustar00rootroot00000000000000[tool.poetry]
name = "foo"
version = "1.2.3"
description = "Some description."
authors = ["Foo "]
license = "MIT"
readme = "README.md"

homepage = "https://example.com"
repository = "https://github.com/example/example"
documentation = "https://example.com"

keywords = ["example", "packaging"]

classifiers = [
    "Topic :: Software Development :: Build Tools",
    "Topic :: Software Development :: Libraries :: Python Modules"
]

[tool.poetry.dependencies]
python = "^3.7"
attrs = "^22.1.0"

[tool.poetry.group.dev.dependencies]
pytest = "7.1.3"

# Non-regression test for https://github.com/python-poetry/poetry-core/pull/492.
# The underlying issue occurred because `tomlkit` can either return a TOML table as `Table` instance or an
# `OutOfOrderProxy` one, if a table is discontinuous and multiple sections of a table are separated by a non-related
# table, but we were too strict in our type check assertions.
# So adding `tool.black` here ensure that we have discontinuous tables, so that we don't re-introduce the issue caused
# by the type check assertion that ended up being reverted.
[tool.black]
preview = true

[tool.poetry.scripts]
my-script = "my_package:main"
poetry-core-2.1.1/tests/fixtures/project_duplicate_dependency/000077500000000000000000000000001475444614500247125ustar00rootroot00000000000000poetry-core-2.1.1/tests/fixtures/project_duplicate_dependency/pyproject.toml000066400000000000000000000001451475444614500276260ustar00rootroot00000000000000[project]
name = "minimal"
version = "1"

[tool.poetry.dependencies]
python = "^3.8"
python = "^3.8"
poetry-core-2.1.1/tests/fixtures/project_failing_strict_validation/000077500000000000000000000000001475444614500257555ustar00rootroot00000000000000poetry-core-2.1.1/tests/fixtures/project_failing_strict_validation/pyproject.toml000066400000000000000000000011721475444614500306720ustar00rootroot00000000000000[tool.poetry]
readme = ["README.rst", "README_WITH_ANOTHER_EXTENSION.md"]

[tool.poetry.dependencies]
python = "*"
pathlib2 = { version = "^2.2", python = "3.7", allows-prereleases = true }

[tool.poetry.extras]
some_extras = ["missing_extra", "another_missing_extra"]

[tool.poetry.scripts]
a_script_with_unknown_extra = { reference = "a_script_with_unknown_extra.py", type = "file", extras = ["foo"] }
a_script_without_extras = { reference = "a_script_without_extras.py", type = "file" }
a_script_with_empty_extras = { reference = "a_script_with_empty_extras.py", type = "file", extras = [] }
another_script = "another_script:main"
poetry-core-2.1.1/tests/fixtures/project_minimal/000077500000000000000000000000001475444614500221705ustar00rootroot00000000000000poetry-core-2.1.1/tests/fixtures/project_minimal/pyproject.toml000066400000000000000000000000511475444614500251000ustar00rootroot00000000000000[project]
name = "minimal"
version = "1"
poetry-core-2.1.1/tests/fixtures/project_with_build_system_requires/000077500000000000000000000000001475444614500262175ustar00rootroot00000000000000poetry-core-2.1.1/tests/fixtures/project_with_build_system_requires/pyproject.toml000066400000000000000000000006351475444614500311370ustar00rootroot00000000000000[build-system]
requires = [
  "poetry-core",
  "Cython~=0.29.6",
]
build-backend = "poetry.core.masonry.api"

[tool.poetry]
name = "poetry-cython-example"
version = "0.1.0"
description = ""
authors = []
include = [{ path = "project/**/*.so", format = "wheel" }]

[tool.poetry.build]
generate-setup-file = false
script = "build.py"

[tool.poetry.dependencies]
python = "^3.7"

[tool.poetry.group.dev.dependencies]
poetry-core-2.1.1/tests/fixtures/project_with_dependencies_with_subdirectory/000077500000000000000000000000001475444614500300545ustar00rootroot00000000000000poetry-core-2.1.1/tests/fixtures/project_with_dependencies_with_subdirectory/README.rst000066400000000000000000000000261475444614500315410ustar00rootroot00000000000000My Package
==========
poetry-core-2.1.1/tests/fixtures/project_with_dependencies_with_subdirectory/pyproject.toml000066400000000000000000000023171475444614500327730ustar00rootroot00000000000000[tool.poetry]
name = "my-package"
version = "1.2.3"
description = "Some description."
authors = [
    "Sébastien Eustace "
]
license = "MIT"

readme = "README.rst"

homepage = "https://python-poetry.org"
repository = "https://github.com/python-poetry/poetry"
documentation = "https://python-poetry.org/docs"

keywords = ["packaging", "dependency", "poetry"]

classifiers = [
    "Topic :: Software Development :: Build Tools",
    "Topic :: Software Development :: Libraries :: Python Modules"
]

# Requirements
[tool.poetry.dependencies]
python = "^3.6"

# Git dependency with subdirectory
pendulum = { git = "https://github.com/sdispater/pendulum.git", subdirectory = "sub", branch = "2.0" }

# File dependency with subdirectory
demo = [
    { path = "../distributions/demo-0.1.0-in-subdir.zip", subdirectory = "sub", platform = "linux" },
    { file = "../distributions/demo-0.1.0-in-subdir.zip", subdirectory = "sub", platform = "win32" }
]

# Dir dependency with subdirectory (same as path "../simple_project" without subdirectory)
simple-project = { path = "..", subdirectory = "simple_project" }

# Url dependency with subdirectory
foo = { url = "https://example.com/foo.zip", subdirectory = "sub" }
poetry-core-2.1.1/tests/fixtures/project_with_duplicated_classifiers/000077500000000000000000000000001475444614500263025ustar00rootroot00000000000000poetry-core-2.1.1/tests/fixtures/project_with_duplicated_classifiers/pyproject.toml000066400000000000000000000011431475444614500312150ustar00rootroot00000000000000[tool.poetry]
name = "project_with_duplicated_classifiers"
version = "1.2.3"
description = "This is a description"
authors = ["Your Name "]
license = "MIT"

classifiers = [
    # Duplicated classifiers
    "Topic :: Software Development :: Build Tools",
    "Topic :: Software Development :: Build Tools",
    # The following classifiers are automatically generated
    "License :: OSI Approved :: MIT License",
    "Programming Language :: Python :: 3.8",
    "Programming Language :: Python :: 3.9",
    "Programming Language :: Python :: 3.10",
]

[tool.poetry.dependencies]
python = "^3.8"
poetry-core-2.1.1/tests/fixtures/project_with_groups_and_explicit_main/000077500000000000000000000000001475444614500266435ustar00rootroot00000000000000poetry-core-2.1.1/tests/fixtures/project_with_groups_and_explicit_main/README.rst000066400000000000000000000000261475444614500303300ustar00rootroot00000000000000My Package
==========
poetry-core-2.1.1/tests/fixtures/project_with_groups_and_explicit_main/pyproject.toml000066400000000000000000000005061475444614500315600ustar00rootroot00000000000000[tool.poetry]
name = "simple-project"
version = "0.1.0"
description = ""
authors = ["Your Name "]

[tool.poetry.dependencies]
python = "^3.7"

[tool.poetry.group.main.dependencies]
aiohttp = "^2.17.0"

[tools.poetry]

[build-system]
requires = ["poetry-core>=1.0.0"]
build-backend = "poetry.core.masonry.api"
poetry-core-2.1.1/tests/fixtures/project_with_groups_and_explicit_main/simple_project/000077500000000000000000000000001475444614500316625ustar00rootroot00000000000000poetry-core-2.1.1/tests/fixtures/project_with_groups_and_explicit_main/simple_project/__init__.py000066400000000000000000000000001475444614500337610ustar00rootroot00000000000000poetry-core-2.1.1/tests/fixtures/project_with_groups_and_legacy_dev/000077500000000000000000000000001475444614500261205ustar00rootroot00000000000000poetry-core-2.1.1/tests/fixtures/project_with_groups_and_legacy_dev/README.rst000066400000000000000000000000261475444614500276050ustar00rootroot00000000000000My Package
==========
poetry-core-2.1.1/tests/fixtures/project_with_groups_and_legacy_dev/pyproject.toml000066400000000000000000000005701475444614500310360ustar00rootroot00000000000000[tool.poetry]
name = "simple-project"
version = "0.1.0"
description = ""
authors = ["Your Name "]

[tool.poetry.dependencies]
python = "^3.7"

[tool.poetry.group.dev.dependencies]
pre-commit = "^2.17.0"

[tool.poetry.dev-dependencies]
pytest = "^5.2"

[tools.poetry]

[build-system]
requires = ["poetry-core>=1.0.0"]
build-backend = "poetry.core.masonry.api"
poetry-core-2.1.1/tests/fixtures/project_with_groups_and_legacy_dev/simple_project/000077500000000000000000000000001475444614500311375ustar00rootroot00000000000000poetry-core-2.1.1/tests/fixtures/project_with_groups_and_legacy_dev/simple_project/__init__.py000066400000000000000000000000001475444614500332360ustar00rootroot00000000000000poetry-core-2.1.1/tests/fixtures/project_with_invalid_dev_deps/000077500000000000000000000000001475444614500250745ustar00rootroot00000000000000poetry-core-2.1.1/tests/fixtures/project_with_invalid_dev_deps/pyproject.toml000066400000000000000000000004341475444614500300110ustar00rootroot00000000000000[tool.poetry]
name = "my-package"
version = "1.2.3"
description = "Some description."
authors = ["Awesome Hacker "]
license = "MIT"

[tool.poetry.dependencies]

[tool.poetry.extras]

[tool.poetry.group.dev.dependencies]
mylib = { path = "../mylib", develop = true}
poetry-core-2.1.1/tests/fixtures/project_with_markers_and_extras/000077500000000000000000000000001475444614500254515ustar00rootroot00000000000000poetry-core-2.1.1/tests/fixtures/project_with_markers_and_extras/project/000077500000000000000000000000001475444614500271175ustar00rootroot00000000000000poetry-core-2.1.1/tests/fixtures/project_with_markers_and_extras/project/__init__.py000066400000000000000000000000001475444614500312160ustar00rootroot00000000000000poetry-core-2.1.1/tests/fixtures/project_with_markers_and_extras/pyproject.toml000066400000000000000000000011321475444614500303620ustar00rootroot00000000000000[tool.poetry]
name = "project-with-markers-and-extras"
version = "1.2.3"
description = "This is a description"
authors = ["Your Name "]
license = "MIT"

packages = [
    {include = "project"}
]

[tool.poetry.dependencies]
python = "*"
orjson = [
    { url = "https://example/location/orjson-3.8.0-cp310-cp310-manylinux_2_28_x86_64.whl", platform = "linux", optional = true },
    { url = "https://example/location/orjson-3.8.0-cp310-cp310-macosx_10_9_x86_64.macosx_11_0_arm64.macosx_10_9_universal2.whl", platform = "darwin", optional = true }
]

[tool.poetry.extras]
all = ["orjson"]
poetry-core-2.1.1/tests/fixtures/project_with_multi_constraints_dependency/000077500000000000000000000000001475444614500275545ustar00rootroot00000000000000poetry-core-2.1.1/tests/fixtures/project_with_multi_constraints_dependency/project/000077500000000000000000000000001475444614500312225ustar00rootroot00000000000000poetry-core-2.1.1/tests/fixtures/project_with_multi_constraints_dependency/project/__init__.py000066400000000000000000000000001475444614500333210ustar00rootroot00000000000000poetry-core-2.1.1/tests/fixtures/project_with_multi_constraints_dependency/pyproject.toml000066400000000000000000000006201475444614500324660ustar00rootroot00000000000000[tool.poetry]
name = "project-with-multi-constraints-dependency"
version = "1.2.3"
description = "This is a description"
authors = ["Your Name "]
license = "MIT"

packages = [
    {include = "project"}
]

[tool.poetry.dependencies]
python = "*"
pendulum = [
    { version = "^1.5", python = "<3.4" },
    { version = "^2.0", python = "^3.4" }
]

[tool.poetry.group.dev.dependencies]
poetry-core-2.1.1/tests/fixtures/project_with_pep517_non_poetry/000077500000000000000000000000001475444614500250725ustar00rootroot00000000000000poetry-core-2.1.1/tests/fixtures/project_with_pep517_non_poetry/pyproject.toml000066400000000000000000000003771475444614500300150ustar00rootroot00000000000000[build-system]
requires = ["flit_core >=3.7.1,<4"]
build-backend = "flit_core.buildapi"

[project]
name = "flit"
authors = []
dependencies = [
    "flit_core >=3.7.1",
]
requires-python = ">=3.6"
readme = "README.rst"
dynamic = ['version', 'description']
poetry-core-2.1.1/tests/fixtures/project_with_setup/000077500000000000000000000000001475444614500227355ustar00rootroot00000000000000poetry-core-2.1.1/tests/fixtures/project_with_setup/my_package/000077500000000000000000000000001475444614500250355ustar00rootroot00000000000000poetry-core-2.1.1/tests/fixtures/project_with_setup/my_package/__init__.py000066400000000000000000000000001475444614500271340ustar00rootroot00000000000000poetry-core-2.1.1/tests/fixtures/project_with_setup/setup.py000066400000000000000000000005321475444614500244470ustar00rootroot00000000000000from setuptools import setup


setup(
    name="my-package",
    license="MIT",
    version="0.1.2",
    description="Demo project.",
    author="Sébastien Eustace",
    author_email="sebastien@eustace.io",
    url="https://github.com/demo/demo",
    packages=["my_package"],
    install_requires=["pendulum>=1.4.4", "cachy[msgpack]>=0.2.0"],
)
poetry-core-2.1.1/tests/fixtures/project_with_setup_cfg_only/000077500000000000000000000000001475444614500246155ustar00rootroot00000000000000poetry-core-2.1.1/tests/fixtures/project_with_setup_cfg_only/setup.cfg000066400000000000000000000006741475444614500264450ustar00rootroot00000000000000[metadata]
name = my_package
version = attr: my_package.VERSION
description = My package description
long_description = file: README.rst, CHANGELOG.rst, LICENSE.rst
keywords = one, two
license = BSD 3-Clause License
classifiers =
    Framework :: Django
    Programming Language :: Python :: 3

[options]
zip_safe = False
include_package_data = True
packages = find:
install_requires =
    requests
    importlib-metadata; python_version<"3.8"
poetry-core-2.1.1/tests/fixtures/sample_project/000077500000000000000000000000001475444614500220235ustar00rootroot00000000000000poetry-core-2.1.1/tests/fixtures/sample_project/README.rst000066400000000000000000000000261475444614500235100ustar00rootroot00000000000000My Package
==========
poetry-core-2.1.1/tests/fixtures/sample_project/pyproject.toml000066400000000000000000000044541475444614500247460ustar00rootroot00000000000000[tool.poetry]
name = "my-package"
version = "1.2.3"
description = "Some description."
authors = [
    "Sébastien Eustace "
]
maintainers = [
    "Sébastien Eustace "
]
license = "MIT"

readme = "README.rst"

homepage = "https://python-poetry.org"
repository = "https://github.com/python-poetry/poetry"
documentation = "https://python-poetry.org/docs"

keywords = ["packaging", "dependency", "poetry"]

classifiers = [
    "Topic :: Software Development :: Build Tools",
    "Topic :: Software Development :: Libraries :: Python Modules"
]

# Requirements
[tool.poetry.dependencies]
python = ">=3.6"
cleo = "^0.6"
pendulum = { git = "https://github.com/sdispater/pendulum.git", branch = "2.0" }
tomlkit = { git = "https://github.com/sdispater/tomlkit.git", rev = "3bff550", develop = true }
requests = { version = "^2.18", optional = true, extras = [ "security" ] }
pathlib2 = { version = "^2.2", python = "~2.7" }

orator = { version = "^0.9", optional = true }

# File dependency
demo = { path = "../distributions/demo-0.1.0-py2.py3-none-any.whl" }

# Dir dependency with setup.py
my-package = { path = "../project_with_setup/" }

# Dir dependency with pyproject.toml
simple-project = { path = "../simple_project/" }

# Dependency with markers
functools32 = { version = "^3.2.3", markers = "python_version ~= '2.7' and sys_platform == 'win32' or python_version in '3.4 3.5'" }

# Dependency with python constraint
dataclasses = { version = "^0.7", python = ">=3.6.1,<3.7" }


[tool.poetry.extras]
db = [ "orator" ]
network = [ "requests" ]

# Non-regression test for https://github.com/python-poetry/poetry-core/pull/492.
# The underlying issue occurred because `tomlkit` can either return a TOML table as `Table` instance or an
# `OutOfOrderProxy` one, if a table is discontinuous and multiple sections of a table are separated by a non-related
# table, but we were too strict in our type check assertions.
# So adding `tool.black` here ensure that we have discontinuous tables, so that we don't re-introduce the issue caused
# by the type check assertion that ended up being reverted.
[tool.black]
preview = true

[tool.poetry.group.dev.dependencies]
pytest = "~3.4"


[tool.poetry.scripts]
my-script = "my_package:main"


[tool.poetry.plugins."blogtool.parsers"]
".rst" = "some_module::SomeClass"
poetry-core-2.1.1/tests/fixtures/sample_project_dynamic/000077500000000000000000000000001475444614500235275ustar00rootroot00000000000000poetry-core-2.1.1/tests/fixtures/sample_project_dynamic/README.rst000066400000000000000000000000261475444614500252140ustar00rootroot00000000000000My Package
==========
poetry-core-2.1.1/tests/fixtures/sample_project_dynamic/pyproject.toml000066400000000000000000000047321475444614500264510ustar00rootroot00000000000000[project]
name = "my-package"
version = "1.2.3"
description = "Some description."
readme = "README.rst"
requires-python = ">=3.6"
license = { text = "MIT" }
keywords = ["packaging", "dependency", "poetry"]
authors = [
    { name = "Sébastien Eustace", email = "sebastien@eustace.io" }
]
maintainers = [
    { name = "Sébastien Eustace", email = "sebastien@eustace.io" }
]
dynamic = [ "version", "readme", "dependencies", "classifiers" ]

[project.optional-dependencies]
db = [
    "orator ~=0.9"
]
network = [
    "requests[security] ~=2.18"
]

[project.urls]
homepage = "https://python-poetry.org"
repository = "https://github.com/python-poetry/poetry"
documentation = "https://python-poetry.org/docs"

[project.scripts]
my-script = "my_package:main"

[tool.poetry]
version = "1.2.3"
readme = "README.rst"
classifiers = [
    "Topic :: Software Development :: Build Tools",
    "Topic :: Software Development :: Libraries :: Python Modules"
]

# Requirements
[tool.poetry.dependencies]
python = ">=3.6"
cleo = "^0.6"
pendulum = { git = "https://github.com/sdispater/pendulum.git", branch = "2.0" }
tomlkit = { git = "https://github.com/sdispater/tomlkit.git", rev = "3bff550", develop = true }
pathlib2 = { version = "^2.2", python = "~2.7" }

# File dependency
demo = { path = "../distributions/demo-0.1.0-py2.py3-none-any.whl" }

# Dir dependency with setup.py
my-package = { path = "../project_with_setup/" }

# Dir dependency with pyproject.toml
simple-project = { path = "../simple_project/" }

# Dependency with markers
functools32 = { version = "^3.2.3", markers = "python_version ~= '2.7' and sys_platform == 'win32' or python_version in '3.4 3.5'" }

# Dependency with python constraint
dataclasses = { version = "^0.7", python = ">=3.6.1,<3.7" }


# Non-regression test for https://github.com/python-poetry/poetry-core/pull/492.
# The underlying issue occurred because `tomlkit` can either return a TOML table as `Table` instance or an
# `OutOfOrderProxy` one, if a table is discontinuous and multiple sections of a table are separated by a non-related
# table, but we were too strict in our type check assertions.
# So adding `tool.black` here ensure that we have discontinuous tables, so that we don't re-introduce the issue caused
# by the type check assertion that ended up being reverted.
[tool.black]
preview = true

[tool.poetry.group.dev.dependencies]
pytest = "~3.4"


[tool.poetry.scripts]
my-script = "my_package:main"


[tool.poetry.plugins."blogtool.parsers"]
".rst" = "some_module::SomeClass"
poetry-core-2.1.1/tests/fixtures/sample_project_new/000077500000000000000000000000001475444614500226745ustar00rootroot00000000000000poetry-core-2.1.1/tests/fixtures/sample_project_new/README.rst000066400000000000000000000000261475444614500243610ustar00rootroot00000000000000My Package
==========
poetry-core-2.1.1/tests/fixtures/sample_project_new/pyproject.toml000066400000000000000000000033511475444614500256120ustar00rootroot00000000000000[project]
name = "my-package"
version = "1.2.3"
description = "Some description."
readme = "README.rst"
requires-python = ">=3.6"
license = { text = "MIT" }
keywords = ["packaging", "dependency", "poetry"]
authors = [
    { name = "Sébastien Eustace", email = "sebastien@eustace.io" }
]
maintainers = [
    { name = "Sébastien Eustace", email = "sebastien@eustace.io" }
]

classifiers = [
    "Topic :: Software Development :: Build Tools",
    "Topic :: Software Development :: Libraries :: Python Modules"
]

# Requirements
dependencies = [
    "cleo ~=0.6",
    "pendulum @ git+https://github.com/sdispater/pendulum.git@2.0",
    "tomlkit @ git+https://github.com/sdispater/tomlkit.git@3bff550",
    "pathlib2 ~=2.2 ; python_version == '2.7'",
    # File dependency
    "demo @ ../distributions/demo-0.1.0-py2.py3-none-any.whl",
    # Dir dependency with setup.py
    "my-package @ ../project_with_setup/",
    # Dir dependency with pyproject.toml
    "simple-project @ ../simple_project/",
    # Dependency with markers
    "functools32 ~=3.2.3 ; python_version ~= '2.7' and sys_platform == 'win32' or python_version in '3.4 3.5'",
    # Dependency with python constraint
    "dataclasses ~=0.7 ; python_full_version >= '3.6.1' and python_version < '3.7'"
]

[project.optional-dependencies]
db = [
    "orator ~=0.9"
]
network = [
    "requests[security] ~=2.18"
]

[project.urls]
homepage = "https://python-poetry.org"
repository = "https://github.com/python-poetry/poetry"
documentation = "https://python-poetry.org/docs"

[project.scripts]
my-script = "my_package:main"

[project.entry-points."blogtool.parsers"]
".rst" = "some_module::SomeClass"

[tool.poetry.dependencies]
tomlkit = { develop = true }

[tool.poetry.group.dev.dependencies]
pytest = "~3.4"
poetry-core-2.1.1/tests/fixtures/script-files/000077500000000000000000000000001475444614500214205ustar00rootroot00000000000000poetry-core-2.1.1/tests/fixtures/script-files/sample_script.py000066400000000000000000000001231475444614500246330ustar00rootroot00000000000000#!/usr/bin/env python

from __future__ import annotations


hello = "Hello World!"
poetry-core-2.1.1/tests/fixtures/script-files/sample_script.sh000066400000000000000000000000511475444614500246150ustar00rootroot00000000000000#!/usr/bin/env bash

echo "Hello World!"
poetry-core-2.1.1/tests/fixtures/simple_project/000077500000000000000000000000001475444614500220335ustar00rootroot00000000000000poetry-core-2.1.1/tests/fixtures/simple_project/README.rst000066400000000000000000000000261475444614500235200ustar00rootroot00000000000000My Package
==========
poetry-core-2.1.1/tests/fixtures/simple_project/dist/000077500000000000000000000000001475444614500227765ustar00rootroot00000000000000poetry-core-2.1.1/tests/fixtures/simple_project/dist/simple_project-1.2.3-py2.py3-none-any.whl000066400000000000000000000024501475444614500321350ustar00rootroot00000000000000PK!simple_project/__init__.pyPK!H|n-WY$simple_project-1.2.3.dist-info/WHEEL
A
 н#Z;/"
bFF]xzwK;<*mTֻ0*Ri.4Vm0[H,JPK!HCӷ^'simple_project-1.2.3.dist-info/METADATASn0+k`Q6y@ڭaFf+rYrD菕<
 FܙYbZn!D䅘Bɣc*
1cZڒoU0A$J=D>y(HR0ȖFٱ/>`б^2h48Nz5l!M{/#pSXJS'p__	uiy!&|&NI(0|'캖1;q^_tcP~%Y6ɗUM[P1/F*fchkO;Fu-уp5z:ѰK2/P75D},K@t"%uvr
vהϔjFD
+pR>=$,8aQ}@_hHj-W3cw]Ƈ>FWN\}ʗn7X3:<Qyk$ɎscIW0/rH	y`PK!simple_project/__init__.pyPK!H|n-WY$:simple_project-1.2.3.dist-info/WHEELPK!HCӷ^'simple_project-1.2.3.dist-info/METADATAPK!H0<62%simple_project-1.2.3.dist-info/RECORDPKBpoetry-core-2.1.1/tests/fixtures/simple_project/dist/simple_project-1.2.3.tar.gz000066400000000000000000000021221475444614500276000ustar00rootroot00000000000000}]\simple-project-1.2.3.tarYr63ejDQpLi<V94	˨I@'~G/9TiNl.΋Q"DR6]޲@r.&?'A<\[v޾m}ȣN,Fo8ltNGq|ڽ42QSsin&uLMdnO&
<ͪ:4:[e׵&B1(l6i`8-fT5'D!#c*tHA.)@)..NZ9T/pB>$)/3<>`2A[S$zQ
Bdt٠E('ٲᔈ4,Ur4;Ҕ˱;ʢbZV*N6"4i8S8\Ng4#!<8W]0<ķ88gr>'`*ܚiǘ)='+^O@V:F}#A=
kc\n'J_);OR"|eVo?ywS]3hH"UWph%& 1YBJyT(8+z՘O~0<[^UDR/Q^=CAqPkCyv1MHEZϘ;U'US0uqXEkQ-
ѫ.	𛁃L*V]SuE-9"_}j-ow
Fx'˷;7&BX*rՃ6=z6ܢ8$`'IfrX8)ժ(')x]q-u髽`Y@ipd<< qN(:<ÃL.-T}x|V7 IHINsZ-YG-=mQok1mCfmxFU8ăD^O㭺iÅ=a
iHF(poetry-core-2.1.1/tests/fixtures/simple_project/pyproject.toml000066400000000000000000000011161475444614500247460ustar00rootroot00000000000000[tool.poetry]
name = "simple-project"
version = "1.2.3"
description = "Some description."
authors = [
    "Sébastien Eustace "
]
license = "MIT"

readme = "README.rst"

homepage = "https://python-poetry.org"
repository = "https://github.com/python-poetry/poetry"
documentation = "https://python-poetry.org/docs"

keywords = ["packaging", "dependency", "poetry"]

classifiers = [
    "Topic :: Software Development :: Build Tools",
    "Topic :: Software Development :: Libraries :: Python Modules"
]

# Requirements
[tool.poetry.dependencies]
python = "~2.7 || ^3.4"
poetry-core-2.1.1/tests/fixtures/simple_project/simple_project/000077500000000000000000000000001475444614500250525ustar00rootroot00000000000000poetry-core-2.1.1/tests/fixtures/simple_project/simple_project/__init__.py000066400000000000000000000000001475444614500271510ustar00rootroot00000000000000poetry-core-2.1.1/tests/fixtures/with_license_type_file/000077500000000000000000000000001475444614500235315ustar00rootroot00000000000000poetry-core-2.1.1/tests/fixtures/with_license_type_file/LICENSE000066400000000000000000000001251475444614500245340ustar00rootroot00000000000000Some license text
with multiple lines,

empty lines and non-ASCII characters: éöß
poetry-core-2.1.1/tests/fixtures/with_license_type_file/pyproject.toml000066400000000000000000000002271475444614500264460ustar00rootroot00000000000000[project]
name = "my-package"
version = "0.1"
license = { file = "LICENSE" }
keywords = ["special"]  # field that comes after license in core metadata
poetry-core-2.1.1/tests/fixtures/with_license_type_str/000077500000000000000000000000001475444614500234225ustar00rootroot00000000000000poetry-core-2.1.1/tests/fixtures/with_license_type_str/pyproject.toml000066400000000000000000000002101475444614500263270ustar00rootroot00000000000000[project]
name = "my-package"
version = "0.1"
license = "MIT"
keywords = ["special"]  # field that comes after license in core metadata
poetry-core-2.1.1/tests/fixtures/with_license_type_text/000077500000000000000000000000001475444614500235765ustar00rootroot00000000000000poetry-core-2.1.1/tests/fixtures/with_license_type_text/pyproject.toml000066400000000000000000000003511475444614500265110ustar00rootroot00000000000000[project]
name = "my-package"
version = "0.1"
license = { text = """Some license text
with multiple lines,

empty lines and non-ASCII characters: éöß" """}
keywords = ["special"]  # field that comes after license in core metadata
poetry-core-2.1.1/tests/fixtures/with_readme_files/000077500000000000000000000000001475444614500224665ustar00rootroot00000000000000poetry-core-2.1.1/tests/fixtures/with_readme_files/README-1.rst000066400000000000000000000000341475444614500243100ustar00rootroot00000000000000Single Python
=============
poetry-core-2.1.1/tests/fixtures/with_readme_files/README-2.rst000066400000000000000000000000241475444614500243100ustar00rootroot00000000000000Changelog
=========
poetry-core-2.1.1/tests/fixtures/with_readme_files/my_package/000077500000000000000000000000001475444614500245665ustar00rootroot00000000000000poetry-core-2.1.1/tests/fixtures/with_readme_files/my_package/__init__.py000066400000000000000000000001171475444614500266760ustar00rootroot00000000000000"""Example module"""

from __future__ import annotations


__version__ = "0.1"
poetry-core-2.1.1/tests/fixtures/with_readme_files/pyproject.toml000066400000000000000000000002061475444614500254000ustar00rootroot00000000000000[project]
name = "my-package"
version = "0.1"
dynamic = ["readme"]

[tool.poetry]
readme = [
    "README-1.rst",
    "README-2.rst"
]
poetry-core-2.1.1/tests/integration/000077500000000000000000000000001475444614500174665ustar00rootroot00000000000000poetry-core-2.1.1/tests/integration/__init__.py000066400000000000000000000000001475444614500215650ustar00rootroot00000000000000poetry-core-2.1.1/tests/integration/test_pep517.py000066400000000000000000000046661475444614500221340ustar00rootroot00000000000000from __future__ import annotations

from pathlib import Path
from typing import TYPE_CHECKING

import pytest


# noinspection PyProtectedMember
from build.__main__ import build_package
from build.util import project_wheel_metadata

from tests.testutils import subprocess_run
from tests.testutils import temporary_project_directory


if TYPE_CHECKING:
    from pytest import FixtureRequest

pytestmark = pytest.mark.integration


@pytest.mark.parametrize(
    "getter, project",
    [
        ("common_project", "simple_project"),
        ("masonry_project", "src_extended"),
        ("masonry_project", "disable_setup_py"),
    ],
)
def test_pep517_check_poetry_managed(
    request: FixtureRequest, getter: str, project: str
) -> None:
    with temporary_project_directory(request.getfixturevalue(getter)(project)) as path:
        assert project_wheel_metadata(path)


def test_pep517_check(project_source_root: Path) -> None:
    assert project_wheel_metadata(str(project_source_root))


def test_pep517_build_sdist(
    temporary_directory: Path, project_source_root: Path
) -> None:
    build_package(
        srcdir=str(project_source_root),
        outdir=str(temporary_directory),
        distributions=["sdist"],
    )
    distributions = list(temporary_directory.glob("poetry_core-*.tar.gz"))
    assert len(distributions) == 1


def test_pep517_build_wheel(
    temporary_directory: Path, project_source_root: Path
) -> None:
    build_package(
        srcdir=str(project_source_root),
        outdir=str(temporary_directory),
        distributions=["wheel"],
    )
    distributions = list(temporary_directory.glob("poetry_core-*-none-any.whl"))
    assert len(distributions) == 1


def test_pip_wheel_build(temporary_directory: Path, project_source_root: Path) -> None:
    tmp = str(temporary_directory)
    pip = subprocess_run(
        "pip", "wheel", "--use-pep517", "-w", tmp, str(project_source_root)
    )
    assert "Successfully built poetry-core" in pip.stdout

    assert pip.returncode == 0

    wheels = list(Path(tmp).glob("poetry_core-*-none-any.whl"))
    assert len(wheels) == 1


def test_pip_install_no_binary(python: str, project_source_root: Path) -> None:
    subprocess_run(
        python,
        "-m",
        "pip",
        "install",
        "--no-binary",
        ":all:",
        project_source_root.as_posix(),
    )

    pip_show = subprocess_run(python, "-m", "pip", "show", "poetry-core")
    assert "Name: poetry-core" in pip_show.stdout
poetry-core-2.1.1/tests/integration/test_pep517_backend.py000066400000000000000000000030051475444614500235650ustar00rootroot00000000000000from __future__ import annotations

import shutil

from pathlib import Path

import pytest

from tests.testutils import subprocess_run


pytestmark = pytest.mark.integration


BUILD_SYSTEM_TEMPLATE = """
[build-system]
requires = ["poetry-core @ file://{project_path}"]
build-backend = "poetry.core.masonry.api"
"""


def test_pip_install(
    temporary_directory: Path, project_source_root: Path, python: str
) -> None:
    """
    Ensure that a project using the repository version of poetry-core as
    a PEP 517 backend can be built.
    """
    temp_pep_517_backend_path = temporary_directory / "pep_517_backend"

    # Copy `pep_517_backend` to a temporary directory as we need to dynamically add the
    # build system during the test. This ensures that we don't update the source, since
    # the value of `requires` is dynamic.
    shutil.copytree(
        Path(__file__).parent.parent / "fixtures/pep_517_backend",
        temp_pep_517_backend_path,
    )

    # Append dynamic `build-system` section to `pyproject.toml` in the temporary
    # project directory.
    with (temp_pep_517_backend_path / "pyproject.toml").open(
        mode="a", encoding="utf-8"
    ) as f:
        f.write(
            BUILD_SYSTEM_TEMPLATE.format(project_path=project_source_root.as_posix())
        )

    subprocess_run(
        python,
        "-m",
        "pip",
        "install",
        temp_pep_517_backend_path.as_posix(),
    )

    pip_show = subprocess_run(python, "-m", "pip", "show", "foo")
    assert "Name: foo" in pip_show.stdout
poetry-core-2.1.1/tests/json/000077500000000000000000000000001475444614500161145ustar00rootroot00000000000000poetry-core-2.1.1/tests/json/__init__.py000066400000000000000000000000001475444614500202130ustar00rootroot00000000000000poetry-core-2.1.1/tests/json/test_poetry_schema.py000066400000000000000000000057511475444614500223770ustar00rootroot00000000000000from __future__ import annotations

from typing import Any

import pytest

from poetry.core.json import validate_object


@pytest.fixture
def base_object() -> dict[str, Any]:
    return {
        "name": "myapp",
        "version": "1.0.0",
        "description": "Some description.",
        "authors": ["Your Name "],
        "dependencies": {"python": "^3.6"},
        "group": {"dev": {"dependencies": {}}},
    }


@pytest.fixture
def multi_url_object() -> dict[str, Any]:
    return {
        "name": "myapp",
        "version": "1.0.0",
        "description": "Some description.",
        "authors": ["Your Name "],
        "dependencies": {
            "python": [
                {
                    "url": "https://download.pytorch.org/whl/cpu/torch-1.4.0%2Bcpu-cp37-cp37m-linux_x86_64.whl",
                    "platform": "linux",
                },
                {"path": "../foo", "platform": "darwin"},
            ]
        },
    }


def test_non_package_mode_no_metadata() -> None:
    assert len(validate_object({"package-mode": False}, "poetry-schema")) == 0


def test_non_package_mode_with_metadata(base_object: dict[str, Any]) -> None:
    base_object["package-mode"] = False
    assert len(validate_object(base_object, "poetry-schema")) == 0


def test_invalid_mode() -> None:
    assert len(validate_object({"package-mode": "foo"}, "poetry-schema")) == 1


def test_path_dependencies(base_object: dict[str, Any]) -> None:
    base_object["dependencies"].update({"foo": {"path": "../foo"}})
    base_object["group"]["dev"]["dependencies"].update({"foo": {"path": "../foo"}})

    assert len(validate_object(base_object, "poetry-schema")) == 0


def test_multi_url_dependencies(multi_url_object: dict[str, Any]) -> None:
    assert len(validate_object(multi_url_object, "poetry-schema")) == 0


@pytest.mark.parametrize(
    "git",
    [
        "https://github.com/example/example-repository.git",
        "git@github.com:example/example-repository.git",
    ],
)
def test_git_dependencies(base_object: dict[str, Any], git: str) -> None:
    base_object["dependencies"].update({"git-dependency": {"git": git}})

    assert len(validate_object(base_object, "poetry-schema")) == 0


@pytest.mark.parametrize(
    "bad_description",
    ["Some multi-\nline string", "Some multiline string\n", "\nSome multi-line string"],
)
def test_multiline_description(
    base_object: dict[str, Any], bad_description: str
) -> None:
    base_object["description"] = bad_description

    errors = validate_object(base_object, "poetry-schema")

    assert len(errors) == 1

    regex = r"\A[^\n]*\Z"
    assert errors[0] == f"data.description must match pattern {regex}"


def test_bad_extra(base_object: dict[str, Any]) -> None:
    bad_extra = "a{[*+"
    base_object["extras"] = {}
    base_object["extras"]["test"] = [bad_extra]

    errors = validate_object(base_object, "poetry-schema")
    assert len(errors) == 1
    assert errors[0] == "data.extras.test[0] must match pattern ^[a-zA-Z-_.0-9]+$"
poetry-core-2.1.1/tests/masonry/000077500000000000000000000000001475444614500166335ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/__init__.py000066400000000000000000000000001475444614500207320ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/000077500000000000000000000000001475444614500204445ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/__init__.py000066400000000000000000000000001475444614500225430ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/conftest.py000066400000000000000000000014021475444614500226400ustar00rootroot00000000000000from __future__ import annotations

import shutil

from pathlib import Path

import pytest


fixtures_dir = Path(__file__).parent / "fixtures"


@pytest.fixture
def complete_with_pycache_and_pyc_files(tmp_path: Path) -> Path:
    root = fixtures_dir / "complete"
    tmp_root = tmp_path / "complete"  # not git repo!

    shutil.copytree(root, tmp_root)
    for location in (".", "sub_pkg1"):
        abs_location = tmp_root / "my_package" / location
        (abs_location / "module1.cpython-39.pyc").touch()
        pycache_location = tmp_root / "my_package" / location / "__pycache__"
        pycache_location.mkdir(parents=True)
        (pycache_location / "module1.cpython-39.pyc").touch()
        (pycache_location / "some_other_file").touch()

    return tmp_root
poetry-core-2.1.1/tests/masonry/builders/fixtures/000077500000000000000000000000001475444614500223155ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/Pretty.Name/000077500000000000000000000000001475444614500244635ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/Pretty.Name/README.rst000066400000000000000000000000301475444614500261430ustar00rootroot00000000000000Pretty.Name
===========
poetry-core-2.1.1/tests/masonry/builders/fixtures/Pretty.Name/pretty_name.py000066400000000000000000000000251475444614500273610ustar00rootroot00000000000000"""Example module"""
poetry-core-2.1.1/tests/masonry/builders/fixtures/Pretty.Name/pyproject.toml000066400000000000000000000003431475444614500273770ustar00rootroot00000000000000[tool.poetry]
name = "Pretty.Name"
version = "1.0"
description = "Some description."
authors = [
    "Sébastien Eustace "
]
license = "MIT"

readme = "README.rst"

homepage = "https://python-poetry.org/"
poetry-core-2.1.1/tests/masonry/builders/fixtures/build_script_creates_package/000077500000000000000000000000001475444614500301615ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/build_script_creates_package/build.py000066400000000000000000000003701475444614500316320ustar00rootroot00000000000000import os
import shutil


package = "my_package"
source = "src_my_package"


def build() -> None:
    if os.path.isdir(package):
        shutil.rmtree(package)
    shutil.copytree("src_my_package", package)


if __name__ == "__main__":
    build()
poetry-core-2.1.1/tests/masonry/builders/fixtures/build_script_creates_package/pyproject.toml000066400000000000000000000004111475444614500330710ustar00rootroot00000000000000[tool.poetry]
name = "my_package"
version = "0.1"
description = "Some description."
authors = [
    "Rodrigo Agundez "
]
license = "MIT"
homepage = "https://python-poetry.org/"
packages = [
    { include = "my_package" },
]

build = "build.py"
poetry-core-2.1.1/tests/masonry/builders/fixtures/build_script_creates_package/src_my_package/000077500000000000000000000000001475444614500331305ustar00rootroot00000000000000__init__.py000066400000000000000000000000001475444614500351500ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/build_script_creates_package/src_my_packagepoetry-core-2.1.1/tests/masonry/builders/fixtures/build_script_creates_package/src_my_package/foo.py000066400000000000000000000000001475444614500342530ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/build_script_in_subdir/000077500000000000000000000000001475444614500270365ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/build_script_in_subdir/pyproject.toml000066400000000000000000000004461475444614500317560ustar00rootroot00000000000000[tool.poetry]
name = "build_script_in_subdir"
version = "0.1"
description = "Some description."
authors = [
    "Brandon Chinn "
]
license = "MIT"
homepage = "https://python-poetry.org/"
packages = [
    { include = "*", from = "src" },
]

build = "scripts/build.py"
poetry-core-2.1.1/tests/masonry/builders/fixtures/build_script_in_subdir/scripts/000077500000000000000000000000001475444614500305255ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/build_script_in_subdir/scripts/build.py000066400000000000000000000000001475444614500321640ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/build_script_in_subdir/src/000077500000000000000000000000001475444614500276255ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/build_script_in_subdir/src/foo.py000066400000000000000000000000001475444614500307500ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/build_with_build_py_only/000077500000000000000000000000001475444614500273775ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/build_with_build_py_only/README.rst000066400000000000000000000000221475444614500310600ustar00rootroot00000000000000Module 1
========
poetry-core-2.1.1/tests/masonry/builders/fixtures/build_with_build_py_only/build.py000066400000000000000000000005761475444614500310600ustar00rootroot00000000000000from pathlib import Path
from setuptools.command.build_py import build_py


class BuildPyCommand(build_py):
    def run(self):
        gen_file = Path("build_with_build_py_only/generated/file.py")
        gen_file.touch()
        ret = super().run()
        gen_file.unlink()
        return ret


def build(setup_kwargs):
    setup_kwargs["cmdclass"] = {"build_py": BuildPyCommand}
poetry-core-2.1.1/tests/masonry/builders/fixtures/build_with_build_py_only/build_with_build_py_only/000077500000000000000000000000001475444614500344615ustar00rootroot00000000000000__init__.py000066400000000000000000000000001475444614500365010ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/build_with_build_py_only/build_with_build_py_onlygenerated/000077500000000000000000000000001475444614500363405ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/build_with_build_py_only/build_with_build_py_only__init__.py000066400000000000000000000000001475444614500404370ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/build_with_build_py_only/build_with_build_py_only/generatedpoetry-core-2.1.1/tests/masonry/builders/fixtures/build_with_build_py_only/pyproject.toml000066400000000000000000000004651475444614500323200ustar00rootroot00000000000000[tool.poetry]
name = "build_with_build_py_only"
version = "0.1"
description = "Some description."
authors = [
    "Robert Belter "
]
license = "MIT"

readme = "README.rst"

homepage = "https://python-poetry.org/"

[tool.poetry.build]
script = "build.py"
generate-setup-file = true
poetry-core-2.1.1/tests/masonry/builders/fixtures/case_sensitive_exclusions/000077500000000000000000000000001475444614500275755ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/case_sensitive_exclusions/LICENSE000066400000000000000000000020461475444614500306040ustar00rootroot00000000000000Copyright (c) 2018 Sébastien Eustace

Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:

The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.

THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
poetry-core-2.1.1/tests/masonry/builders/fixtures/case_sensitive_exclusions/README.rst000066400000000000000000000000261475444614500312620ustar00rootroot00000000000000My Package
==========
poetry-core-2.1.1/tests/masonry/builders/fixtures/case_sensitive_exclusions/my_package/000077500000000000000000000000001475444614500316755ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/case_sensitive_exclusions/my_package/Foo/000077500000000000000000000000001475444614500324205ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/case_sensitive_exclusions/my_package/Foo/Bar.py000066400000000000000000000000001475444614500334640ustar00rootroot00000000000000IncludedBar.py000066400000000000000000000000001475444614500350550ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/case_sensitive_exclusions/my_package/FooSecondBar.py000066400000000000000000000000001475444614500345410ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/case_sensitive_exclusions/my_package/Foolowercasebar.py000066400000000000000000000000001475444614500353520ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/case_sensitive_exclusions/my_package/Foopoetry-core-2.1.1/tests/masonry/builders/fixtures/case_sensitive_exclusions/my_package/FooBar/000077500000000000000000000000001475444614500330455ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/case_sensitive_exclusions/my_package/FooBar/Bar.py000066400000000000000000000000001475444614500341110ustar00rootroot00000000000000lowercasebar.py000066400000000000000000000000001475444614500357770ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/case_sensitive_exclusions/my_package/FooBarpoetry-core-2.1.1/tests/masonry/builders/fixtures/case_sensitive_exclusions/my_package/__init__.py000066400000000000000000000000261475444614500340040ustar00rootroot00000000000000__version__ = "1.2.3"
poetry-core-2.1.1/tests/masonry/builders/fixtures/case_sensitive_exclusions/my_package/bar/000077500000000000000000000000001475444614500324415ustar00rootroot00000000000000CapitalFoo.py000066400000000000000000000000001475444614500347430ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/case_sensitive_exclusions/my_package/barpoetry-core-2.1.1/tests/masonry/builders/fixtures/case_sensitive_exclusions/my_package/bar/foo.py000066400000000000000000000000001475444614500335640ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/case_sensitive_exclusions/pyproject.toml000066400000000000000000000022161475444614500325120ustar00rootroot00000000000000[tool.poetry]
name = "my-package"
version = "1.2.3"
description = "Some description."
authors = [
    "Sébastien Eustace "
]
license = "MIT"

readme = "README.rst"

homepage = "https://python-poetry.org/"
repository = "https://github.com/python-poetry/poetry"
documentation = "https://python-poetry.org/docs"

keywords = ["packaging", "dependency", "poetry"]

classifiers = [
    "Topic :: Software Development :: Build Tools",
    "Topic :: Software Development :: Libraries :: Python Modules"
]

exclude = [
    "**/SecondBar.py",
    "my_package/FooBar/*",
    "my_package/Foo/Bar.py",
    "my_package/Foo/lowercasebar.py",
    "my_package/bar/foo.py",
    "my_package/bar/CapitalFoo.py"
]

# Requirements
[tool.poetry.dependencies]
python = "^3.6"
cleo = "^0.6"
cachy = { version = "^0.2.0", extras = ["msgpack"] }

pendulum = { version = "^1.4", optional = true }

[tool.poetry.group.dev.dependencies]
pytest = "~3.4"

[tool.poetry.extras]
time = ["pendulum"]

[tool.poetry.scripts]
my-script = "my_package:main"
my-2nd-script = "my_package:main2"
extra-script = {reference = "my_package.extra:main", extras = ["time"], type = "console"}
poetry-core-2.1.1/tests/masonry/builders/fixtures/comma_file/000077500000000000000000000000001475444614500244105ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/comma_file/comma_file/000077500000000000000000000000001475444614500265035ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/comma_file/comma_file/__init__.py000066400000000000000000000000001475444614500306020ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/comma_file/comma_file/a,b.py000066400000000000000000000000001475444614500275010ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/comma_file/pyproject.toml000066400000000000000000000003461475444614500273270ustar00rootroot00000000000000[tool.poetry]
name = "comma-file"
version = "1.2.3"
description = "Some description."
authors = [
    "Sébastien Eustace "
]
[tool.poetry.dependencies]
python = "^3.6"

[tool.poetry.group.dev.dependencies]

poetry-core-2.1.1/tests/masonry/builders/fixtures/complete/000077500000000000000000000000001475444614500241255ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/complete/AUTHORS000066400000000000000000000000001475444614500251630ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/complete/COPYING000066400000000000000000000000001475444614500251460ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/complete/LICENCE000066400000000000000000000000001475444614500251000ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/complete/LICENSE000066400000000000000000000020461475444614500251340ustar00rootroot00000000000000Copyright (c) 2018 Sébastien Eustace

Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:

The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.

THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
poetry-core-2.1.1/tests/masonry/builders/fixtures/complete/README.rst000066400000000000000000000000261475444614500256120ustar00rootroot00000000000000My Package
==========
poetry-core-2.1.1/tests/masonry/builders/fixtures/complete/bin/000077500000000000000000000000001475444614500246755ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/complete/bin/script.sh000066400000000000000000000000501475444614500265300ustar00rootroot00000000000000#!/usr/bin/env bash

echo "Hello World!"poetry-core-2.1.1/tests/masonry/builders/fixtures/complete/my_package/000077500000000000000000000000001475444614500262255ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/complete/my_package/__init__.py000066400000000000000000000000261475444614500303340ustar00rootroot00000000000000__version__ = "1.2.3"
poetry-core-2.1.1/tests/masonry/builders/fixtures/complete/my_package/data1/000077500000000000000000000000001475444614500272175ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/complete/my_package/data1/test.json000066400000000000000000000000031475444614500310620ustar00rootroot00000000000000{}
poetry-core-2.1.1/tests/masonry/builders/fixtures/complete/my_package/sub_pkg1/000077500000000000000000000000001475444614500277405ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/complete/my_package/sub_pkg1/__init__.py000066400000000000000000000000001475444614500320370ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/complete/my_package/sub_pkg1/extra_file.xml000066400000000000000000000000001475444614500325720ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/complete/my_package/sub_pkg2/000077500000000000000000000000001475444614500277415ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/complete/my_package/sub_pkg2/__init__.py000066400000000000000000000000001475444614500320400ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/complete/my_package/sub_pkg2/data2/000077500000000000000000000000001475444614500307345ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/complete/my_package/sub_pkg2/data2/data.json000066400000000000000000000000031475444614500325310ustar00rootroot00000000000000{}
poetry-core-2.1.1/tests/masonry/builders/fixtures/complete/my_package/sub_pkg3/000077500000000000000000000000001475444614500277425ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/complete/my_package/sub_pkg3/foo.py000066400000000000000000000000001475444614500310650ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/complete/pyproject.toml000066400000000000000000000026621475444614500270470ustar00rootroot00000000000000[tool.poetry]
name = "my-package"
version = "1.2.3"
description = "Some description."
authors = [
    "Sébastien Eustace "
]
maintainers = [
    "People Everywhere "
]
license = "MIT"

readme = "README.rst"

homepage = "https://python-poetry.org/"
repository = "https://github.com/python-poetry/poetry"
documentation = "https://python-poetry.org/docs"

keywords = ["packaging", "dependency", "poetry"]

classifiers = [
    "Topic :: Software Development :: Build Tools",
    "Topic :: Software Development :: Libraries :: Python Modules"
]

exclude = [
    "does-not-exist",
    "**/*.xml"
]

# Requirements
[tool.poetry.dependencies]
python = "^3.6"
cleo = "^0.6"
cachy = { version = "^0.2.0", extras = ["msgpack"] }

[tool.poetry.dependencies.pendulum]
version = "^1.4"
markers = 'python_version ~= "2.7" and sys_platform == "win32" or python_version in "3.4 3.5"'
optional = true

[tool.poetry.group.dev.dependencies]
pytest = "~3.4"

[tool.poetry.extras]
time = ["pendulum"]

[tool.poetry.scripts]
my-script = "my_package:main"
my-2nd-script = "my_package:main2"
file-script = { reference = "bin/script.sh", type = "file" }
extra-script = { reference = "my_package.extra:main", extras = ["time"], type = "console" }

[tool.poetry.plugins."poetry.application.plugin"]
my-command = "my_package.plugins:MyApplicationPlugin"

[tool.poetry.urls]
"Issue Tracker" = "https://github.com/python-poetry/poetry/issues"
poetry-core-2.1.1/tests/masonry/builders/fixtures/complete_dynamic/000077500000000000000000000000001475444614500256315ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/complete_dynamic/AUTHORS000066400000000000000000000000001475444614500266670ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/complete_dynamic/COPYING000066400000000000000000000000001475444614500266520ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/complete_dynamic/LICENCE000066400000000000000000000000001475444614500266040ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/complete_dynamic/LICENSE000066400000000000000000000020461475444614500266400ustar00rootroot00000000000000Copyright (c) 2018 Sébastien Eustace

Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:

The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.

THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
poetry-core-2.1.1/tests/masonry/builders/fixtures/complete_dynamic/README.rst000066400000000000000000000000261475444614500273160ustar00rootroot00000000000000My Package
==========
poetry-core-2.1.1/tests/masonry/builders/fixtures/complete_dynamic/bin/000077500000000000000000000000001475444614500264015ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/complete_dynamic/bin/script.sh000066400000000000000000000000501475444614500302340ustar00rootroot00000000000000#!/usr/bin/env bash

echo "Hello World!"poetry-core-2.1.1/tests/masonry/builders/fixtures/complete_dynamic/my_package/000077500000000000000000000000001475444614500277315ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/complete_dynamic/my_package/__init__.py000066400000000000000000000000261475444614500320400ustar00rootroot00000000000000__version__ = "1.2.3"
poetry-core-2.1.1/tests/masonry/builders/fixtures/complete_dynamic/my_package/data1/000077500000000000000000000000001475444614500307235ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/complete_dynamic/my_package/data1/test.json000066400000000000000000000000031475444614500325660ustar00rootroot00000000000000{}
poetry-core-2.1.1/tests/masonry/builders/fixtures/complete_dynamic/my_package/sub_pkg1/000077500000000000000000000000001475444614500314445ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/complete_dynamic/my_package/sub_pkg1/__init__.py000066400000000000000000000000001475444614500335430ustar00rootroot00000000000000extra_file.xml000066400000000000000000000000001475444614500342170ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/complete_dynamic/my_package/sub_pkg1poetry-core-2.1.1/tests/masonry/builders/fixtures/complete_dynamic/my_package/sub_pkg2/000077500000000000000000000000001475444614500314455ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/complete_dynamic/my_package/sub_pkg2/__init__.py000066400000000000000000000000001475444614500335440ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/complete_dynamic/my_package/sub_pkg2/data2/000077500000000000000000000000001475444614500324405ustar00rootroot00000000000000data.json000066400000000000000000000000031475444614500341560ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/complete_dynamic/my_package/sub_pkg2/data2{}
poetry-core-2.1.1/tests/masonry/builders/fixtures/complete_dynamic/my_package/sub_pkg3/000077500000000000000000000000001475444614500314465ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/complete_dynamic/my_package/sub_pkg3/foo.py000066400000000000000000000000001475444614500325710ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/complete_dynamic/pyproject.toml000066400000000000000000000032351475444614500305500ustar00rootroot00000000000000[project]
name = "my-package"
version = "1.2.3"
description = "Some description."
requires-python = ">=3.6,<4.0"
license = { "text" = "MIT" }
authors = [
    { "name" = "Sébastien Eustace", "email" = "sebastien@eustace.io" }
]
maintainers = [
    { name = "People Everywhere", email = "people@everywhere.com" }
]
keywords = ["packaging", "dependency", "poetry"]
dynamic = [ "version", "classifiers", "readme", "dependencies" ]

[project.optional-dependencies]
time = [ "pendulum>=1.4,<2.0 ; python_version ~= '2.7' and sys_platform == 'win32' or python_version in '3.4 3.5'" ]

[project.urls]
homepage = "https://python-poetry.org/"
repository = "https://github.com/python-poetry/poetry"
documentation = "https://python-poetry.org/docs"
"Issue Tracker" = "https://github.com/python-poetry/poetry/issues"

[project.scripts]
my-script = "my_package:main"
my-2nd-script = "my_package:main2"
extra-script = "my_package.extra:main"

[project.entry-points."poetry.application.plugin"]
my-command = "my_package.plugins:MyApplicationPlugin"


[tool.poetry]
version = "1.2.3"
readme = "README.rst"

classifiers = [
    "Topic :: Software Development :: Build Tools",
    "Topic :: Software Development :: Libraries :: Python Modules"
]

exclude = [
    "does-not-exist",
    "**/*.xml"
]

# Requirements
[tool.poetry.dependencies]
cleo = "^0.6"
cachy = { version = "^0.2.0", extras = ["msgpack"] }

[tool.poetry.dependencies.pendulum]
version = "^1.4"
markers = 'python_version ~= "2.7" and sys_platform == "win32" or python_version in "3.4 3.5"'
optional = true

[tool.poetry.group.dev.dependencies]
pytest = "~3.4"

[tool.poetry.scripts]
file-script = { reference = "bin/script.sh", type = "file" }
poetry-core-2.1.1/tests/masonry/builders/fixtures/complete_new/000077500000000000000000000000001475444614500247765ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/complete_new/AUTHORS000066400000000000000000000000001475444614500260340ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/complete_new/COPYING000066400000000000000000000000001475444614500260170ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/complete_new/LICENCE000066400000000000000000000000001475444614500257510ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/complete_new/LICENSE000066400000000000000000000020461475444614500260050ustar00rootroot00000000000000Copyright (c) 2018 Sébastien Eustace

Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:

The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.

THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
poetry-core-2.1.1/tests/masonry/builders/fixtures/complete_new/README.rst000066400000000000000000000000261475444614500264630ustar00rootroot00000000000000My Package
==========
poetry-core-2.1.1/tests/masonry/builders/fixtures/complete_new/bin/000077500000000000000000000000001475444614500255465ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/complete_new/bin/script.sh000066400000000000000000000000501475444614500274010ustar00rootroot00000000000000#!/usr/bin/env bash

echo "Hello World!"poetry-core-2.1.1/tests/masonry/builders/fixtures/complete_new/my_package/000077500000000000000000000000001475444614500270765ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/complete_new/my_package/__init__.py000066400000000000000000000000261475444614500312050ustar00rootroot00000000000000__version__ = "1.2.3"
poetry-core-2.1.1/tests/masonry/builders/fixtures/complete_new/my_package/data1/000077500000000000000000000000001475444614500300705ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/complete_new/my_package/data1/test.json000066400000000000000000000000031475444614500317330ustar00rootroot00000000000000{}
poetry-core-2.1.1/tests/masonry/builders/fixtures/complete_new/my_package/sub_pkg1/000077500000000000000000000000001475444614500306115ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/complete_new/my_package/sub_pkg1/__init__.py000066400000000000000000000000001475444614500327100ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/complete_new/my_package/sub_pkg1/extra_file.xml000066400000000000000000000000001475444614500334430ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/complete_new/my_package/sub_pkg2/000077500000000000000000000000001475444614500306125ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/complete_new/my_package/sub_pkg2/__init__.py000066400000000000000000000000001475444614500327110ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/complete_new/my_package/sub_pkg2/data2/000077500000000000000000000000001475444614500316055ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/complete_new/my_package/sub_pkg2/data2/data.json000066400000000000000000000000031475444614500334020ustar00rootroot00000000000000{}
poetry-core-2.1.1/tests/masonry/builders/fixtures/complete_new/my_package/sub_pkg3/000077500000000000000000000000001475444614500306135ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/complete_new/my_package/sub_pkg3/foo.py000066400000000000000000000000001475444614500317360ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/complete_new/pyproject.toml000066400000000000000000000026301475444614500277130ustar00rootroot00000000000000[project]
name = "my-package"
version = "1.2.3"
description = "Some description."
readme = "README.rst"
requires-python = ">=3.6,<4.0"
license = { "text" = "MIT" }
authors = [
    { "name" = "Sébastien Eustace", "email" = "sebastien@eustace.io" }
]
maintainers = [
    { name = "People Everywhere", email = "people@everywhere.com" }
]
keywords = ["packaging", "dependency", "poetry"]
dependencies = [
    "cleo>=0.6,<0.7",
    "cachy[msgpack]>=0.2.0,<0.3.0",
]
dynamic = [ "classifiers" ]

[project.optional-dependencies]
time = [ "pendulum>=1.4,<2.0 ; python_version ~= '2.7' and sys_platform == 'win32' or python_version in '3.4 3.5'" ]

[project.urls]
homepage = "https://python-poetry.org/"
repository = "https://github.com/python-poetry/poetry"
documentation = "https://python-poetry.org/docs"
"Issue Tracker" = "https://github.com/python-poetry/poetry/issues"

[project.scripts]
my-script = "my_package:main"
my-2nd-script = "my_package:main2"
extra-script = "my_package.extra:main"

[project.entry-points."poetry.application.plugin"]
my-command = "my_package.plugins:MyApplicationPlugin"

[tool.poetry]
classifiers = [
    "Topic :: Software Development :: Build Tools",
    "Topic :: Software Development :: Libraries :: Python Modules"
]

exclude = [
    "does-not-exist",
    "**/*.xml"
]

[tool.poetry.dev-dependencies]
pytest = "~3.4"

[tool.poetry.scripts]
file-script = { reference = "bin/script.sh", type = "file" }
poetry-core-2.1.1/tests/masonry/builders/fixtures/default_src_with_excluded_data/000077500000000000000000000000001475444614500305115ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/default_src_with_excluded_data/LICENSE000066400000000000000000000020461475444614500315200ustar00rootroot00000000000000Copyright (c) 2018 Sébastien Eustace

Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:

The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.

THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
poetry-core-2.1.1/tests/masonry/builders/fixtures/default_src_with_excluded_data/README.rst000066400000000000000000000000261475444614500321760ustar00rootroot00000000000000My Package
==========
poetry-core-2.1.1/tests/masonry/builders/fixtures/default_src_with_excluded_data/pyproject.toml000066400000000000000000000015601475444614500334270ustar00rootroot00000000000000[tool.poetry]
name = "my-package"
version = "1.2.3"
description = "Some description."
authors = [
    "Sébastien Eustace "
]
license = "MIT"

readme = "README.rst"

homepage = "https://python-poetry.org/"
repository = "https://github.com/python-poetry/poetry"
documentation = "https://python-poetry.org/docs"

keywords = ["packaging", "dependency", "poetry"]

classifiers = [
    "Topic :: Software Development :: Build Tools",
    "Topic :: Software Development :: Libraries :: Python Modules"
]

# Requirements
[tool.poetry.dependencies]
python = "^3.6"
cleo = "^0.6"
cachy = { version = "^0.2.0", extras = ["msgpack"] }

pendulum = { version = "^1.4", optional = true }

[tool.poetry.group.dev.dependencies]
pytest = "~3.4"

[tool.poetry.extras]
time = ["pendulum"]

[tool.poetry.scripts]
my-script = "my_package:main"
my-2nd-script = "my_package:main2"
poetry-core-2.1.1/tests/masonry/builders/fixtures/default_src_with_excluded_data/src/000077500000000000000000000000001475444614500313005ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/default_src_with_excluded_data/src/my_package/000077500000000000000000000000001475444614500334005ustar00rootroot00000000000000__init__.py000066400000000000000000000000001475444614500354200ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/default_src_with_excluded_data/src/my_packagedata/000077500000000000000000000000001475444614500342325ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/default_src_with_excluded_data/src/my_packagedata1.txt000066400000000000000000000000001475444614500357530ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/default_src_with_excluded_data/src/my_package/datasub_data/000077500000000000000000000000001475444614500360145ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/default_src_with_excluded_data/src/my_package/datadata2.txt000066400000000000000000000000001475444614500375360ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/default_src_with_excluded_data/src/my_package/data/sub_datadata3.txt000066400000000000000000000000001475444614500375370ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/default_src_with_excluded_data/src/my_package/data/sub_datapoetry-core-2.1.1/tests/masonry/builders/fixtures/default_with_excluded_data/000077500000000000000000000000001475444614500276425ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/default_with_excluded_data/LICENSE000066400000000000000000000020461475444614500306510ustar00rootroot00000000000000Copyright (c) 2018 Sébastien Eustace

Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:

The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.

THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
poetry-core-2.1.1/tests/masonry/builders/fixtures/default_with_excluded_data/README.rst000066400000000000000000000000261475444614500313270ustar00rootroot00000000000000My Package
==========
poetry-core-2.1.1/tests/masonry/builders/fixtures/default_with_excluded_data/my_package/000077500000000000000000000000001475444614500317425ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/default_with_excluded_data/my_package/__init__.py000066400000000000000000000000001475444614500340410ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/default_with_excluded_data/my_package/data/000077500000000000000000000000001475444614500326535ustar00rootroot00000000000000data1.txt000066400000000000000000000000001475444614500343150ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/default_with_excluded_data/my_package/datasub_data/000077500000000000000000000000001475444614500343565ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/default_with_excluded_data/my_package/datadata2.txt000066400000000000000000000000001475444614500361000ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/default_with_excluded_data/my_package/data/sub_datadata3.txt000066400000000000000000000000001475444614500361010ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/default_with_excluded_data/my_package/data/sub_datapoetry-core-2.1.1/tests/masonry/builders/fixtures/default_with_excluded_data/pyproject.toml000066400000000000000000000015601475444614500325600ustar00rootroot00000000000000[tool.poetry]
name = "my-package"
version = "1.2.3"
description = "Some description."
authors = [
    "Sébastien Eustace "
]
license = "MIT"

readme = "README.rst"

homepage = "https://python-poetry.org/"
repository = "https://github.com/python-poetry/poetry"
documentation = "https://python-poetry.org/docs"

keywords = ["packaging", "dependency", "poetry"]

classifiers = [
    "Topic :: Software Development :: Build Tools",
    "Topic :: Software Development :: Libraries :: Python Modules"
]

# Requirements
[tool.poetry.dependencies]
python = "^3.6"
cleo = "^0.6"
cachy = { version = "^0.2.0", extras = ["msgpack"] }

pendulum = { version = "^1.4", optional = true }

[tool.poetry.group.dev.dependencies]
pytest = "~3.4"

[tool.poetry.extras]
time = ["pendulum"]

[tool.poetry.scripts]
my-script = "my_package:main"
my-2nd-script = "my_package:main2"
poetry-core-2.1.1/tests/masonry/builders/fixtures/default_with_excluded_data_toml/000077500000000000000000000000001475444614500306755ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/default_with_excluded_data_toml/LICENSE000066400000000000000000000020461475444614500317040ustar00rootroot00000000000000Copyright (c) 2018 Sébastien Eustace

Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:

The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.

THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
poetry-core-2.1.1/tests/masonry/builders/fixtures/default_with_excluded_data_toml/README.rst000066400000000000000000000000261475444614500323620ustar00rootroot00000000000000My Package
==========
poetry-core-2.1.1/tests/masonry/builders/fixtures/default_with_excluded_data_toml/my_package/000077500000000000000000000000001475444614500327755ustar00rootroot00000000000000__init__.py000066400000000000000000000000001475444614500350150ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/default_with_excluded_data_toml/my_packagepoetry-core-2.1.1/tests/masonry/builders/fixtures/default_with_excluded_data_toml/my_package/data/000077500000000000000000000000001475444614500337065ustar00rootroot00000000000000data1.txt000066400000000000000000000000001475444614500353500ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/default_with_excluded_data_toml/my_package/datasub_data/000077500000000000000000000000001475444614500354115ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/default_with_excluded_data_toml/my_package/datadata2.txt000066400000000000000000000000001475444614500371330ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/default_with_excluded_data_toml/my_package/data/sub_datadata3.txt000066400000000000000000000000001475444614500371340ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/default_with_excluded_data_toml/my_package/data/sub_datapoetry-core-2.1.1/tests/masonry/builders/fixtures/default_with_excluded_data_toml/pyproject.toml000066400000000000000000000016311475444614500336120ustar00rootroot00000000000000[tool.poetry]
name = "my-package"
version = "1.2.3"
description = "Some description."
authors = [
    "Sébastien Eustace "
]
license = "MIT"

readme = "README.rst"

exclude = ["my_package/data/data1.txt"]

homepage = "https://python-poetry.org/"
repository = "https://github.com/python-poetry/poetry"
documentation = "https://python-poetry.org/docs"

keywords = ["packaging", "dependency", "poetry"]

classifiers = [
    "Topic :: Software Development :: Build Tools",
    "Topic :: Software Development :: Libraries :: Python Modules"
]

# Requirements
[tool.poetry.dependencies]
python = "^3.6"
cleo = "^0.6"
cachy = { version = "^0.2.0", extras = ["msgpack"] }

pendulum = { version = "^1.4", optional = true }

[tool.poetry.group.dev.dependencies]
pytest = "~3.4"

[tool.poetry.extras]
time = ["pendulum"]

[tool.poetry.scripts]
my-script = "my_package:main"
my-2nd-script = "my_package:main2"
poetry-core-2.1.1/tests/masonry/builders/fixtures/disable_setup_py/000077500000000000000000000000001475444614500256505ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/disable_setup_py/README.rst000066400000000000000000000000261475444614500273350ustar00rootroot00000000000000My Package
==========
poetry-core-2.1.1/tests/masonry/builders/fixtures/disable_setup_py/my_package/000077500000000000000000000000001475444614500277505ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/disable_setup_py/my_package/__init__.py000066400000000000000000000000001475444614500320470ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/disable_setup_py/pyproject.toml000066400000000000000000000013521475444614500305650ustar00rootroot00000000000000[tool.poetry]
name = "my-package"
version = "1.2.3"
description = "Some description."
authors = [
    "Poetry Team "
]
license = "MIT"

readme = "README.rst"

homepage = "https://python-poetry.org"
repository = "https://github.com/python-poetry/poetry"
documentation = "https://python-poetry.org/docs"

keywords = ["packaging", "dependency", "poetry"]

classifiers = [
    "Topic :: Software Development :: Build Tools",
    "Topic :: Software Development :: Libraries :: Python Modules"
]

[tool.poetry.build]
generate-setup-file = false

# Requirements
[tool.poetry.dependencies]
python = "~2.7 || ^3.6"

[tool.poetry.extras]

[tool.poetry.group.dev.dependencies]

[tool.poetry.scripts]
my-script = "my_package:main"
poetry-core-2.1.1/tests/masonry/builders/fixtures/epoch/000077500000000000000000000000001475444614500234135ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/epoch/README.rst000066400000000000000000000000141475444614500250750ustar00rootroot00000000000000Epoch
=====
poetry-core-2.1.1/tests/masonry/builders/fixtures/epoch/epoch.py000066400000000000000000000000541475444614500250620ustar00rootroot00000000000000"""Example module"""

__version__ = "1!2.0"
poetry-core-2.1.1/tests/masonry/builders/fixtures/epoch/pyproject.toml000066400000000000000000000003371475444614500263320ustar00rootroot00000000000000[tool.poetry]
name = "epoch"
version = "1!2.0"
description = "Some description."
authors = [
    "Sébastien Eustace "
]
license = "MIT"

readme = "README.rst"

homepage = "https://python-poetry.org/"
poetry-core-2.1.1/tests/masonry/builders/fixtures/exclude-whl-include-sdist/000077500000000000000000000000001475444614500273035ustar00rootroot00000000000000exclude_whl_include_sdist/000077500000000000000000000000001475444614500344405ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/exclude-whl-include-sdist__init__.py000066400000000000000000000000261475444614500365470ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/exclude-whl-include-sdist/exclude_whl_include_sdist__version__ = "0.1.0"
compiled/000077500000000000000000000000001475444614500362345ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/exclude-whl-include-sdist/exclude_whl_include_sdistsource.c000066400000000000000000000000001475444614500376660ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/exclude-whl-include-sdist/exclude_whl_include_sdist/compiledsource.h000066400000000000000000000000001475444614500376730ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/exclude-whl-include-sdist/exclude_whl_include_sdist/compiledcython_code.pyx000066400000000000000000000000001475444614500374660ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/exclude-whl-include-sdist/exclude_whl_include_sdistpoetry-core-2.1.1/tests/masonry/builders/fixtures/exclude-whl-include-sdist/pyproject.toml000066400000000000000000000007321475444614500322210ustar00rootroot00000000000000[tool.poetry]
name = "exclude-whl-include-sdist"
description = ""
authors = []
version = "0.1.0"
exclude = ["exclude_whl_include_sdist/compiled", "exclude_whl_include_sdist/*.pyx"]
include = [
    { path = "exclude_whl_include_sdist/compiled/**/*", format = "sdist" },
    { path = "exclude_whl_include_sdist/*.pyx", format = "sdist" }
]

[tool.poetry.dependencies]
python = "^3.9"

[build-system]
requires = ["poetry-core>=1.0.0"]
build-backend = "poetry.core.masonry.api"
poetry-core-2.1.1/tests/masonry/builders/fixtures/exclude_nested_data_toml/000077500000000000000000000000001475444614500273345ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/exclude_nested_data_toml/LICENSE000066400000000000000000000020461475444614500303430ustar00rootroot00000000000000Copyright (c) 2018 Sébastien Eustace

Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:

The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.

THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
poetry-core-2.1.1/tests/masonry/builders/fixtures/exclude_nested_data_toml/README.rst000066400000000000000000000000261475444614500310210ustar00rootroot00000000000000My Package
==========
poetry-core-2.1.1/tests/masonry/builders/fixtures/exclude_nested_data_toml/my_package/000077500000000000000000000000001475444614500314345ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/exclude_nested_data_toml/my_package/__init__.py000066400000000000000000000000001475444614500335330ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/exclude_nested_data_toml/my_package/data/000077500000000000000000000000001475444614500323455ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/exclude_nested_data_toml/my_package/data/data1.txt000066400000000000000000000000001475444614500340660ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/exclude_nested_data_toml/my_package/data/data2.txt000066400000000000000000000000001475444614500340670ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/exclude_nested_data_toml/my_package/data/sub_data/000077500000000000000000000000001475444614500341275ustar00rootroot00000000000000data2.txt000066400000000000000000000000001475444614500355720ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/exclude_nested_data_toml/my_package/data/sub_datadata3.txt000066400000000000000000000000001475444614500355730ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/exclude_nested_data_toml/my_package/data/sub_datapoetry-core-2.1.1/tests/masonry/builders/fixtures/exclude_nested_data_toml/my_package/puplic/000077500000000000000000000000001475444614500327305ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/exclude_nested_data_toml/my_package/puplic/item1/000077500000000000000000000000001475444614500337475ustar00rootroot00000000000000itemdata1.txt000066400000000000000000000000001475444614500362700ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/exclude_nested_data_toml/my_package/puplic/item1subitem/000077500000000000000000000000001475444614500353405ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/exclude_nested_data_toml/my_package/puplic/item1subitemdata.txt000066400000000000000000000000001475444614500403710ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/exclude_nested_data_toml/my_package/puplic/item1/subitempoetry-core-2.1.1/tests/masonry/builders/fixtures/exclude_nested_data_toml/my_package/puplic/item2/000077500000000000000000000000001475444614500337505ustar00rootroot00000000000000itemdata2.txt000066400000000000000000000000001475444614500362720ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/exclude_nested_data_toml/my_package/puplic/item2publicdata.txt000066400000000000000000000000001475444614500355100ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/exclude_nested_data_toml/my_package/puplicpoetry-core-2.1.1/tests/masonry/builders/fixtures/exclude_nested_data_toml/pyproject.toml000066400000000000000000000017441475444614500322560ustar00rootroot00000000000000[tool.poetry]
name = "my-package"
version = "1.2.3"
description = "Some description."
authors = [
    "Sébastien Eustace "
]
license = "MIT"

readme = "README.rst"

exclude = ["**/data/", "**/*/item*"]
include = [{path = "my_package/data/data2.txt", format = ["sdist", "wheel"]}]

homepage = "https://python-poetry.org/"
repository = "https://github.com/python-poetry/poetry"
documentation = "https://python-poetry.org/docs"

keywords = ["packaging", "dependency", "poetry"]

classifiers = [
    "Topic :: Software Development :: Build Tools",
    "Topic :: Software Development :: Libraries :: Python Modules"
]

# Requirements
[tool.poetry.dependencies]
python = "^3.6"
cleo = "^0.6"
cachy = { version = "^0.2.0", extras = ["msgpack"] }

pendulum = { version = "^1.4", optional = true }

[tool.poetry.group.dev.dependencies]
pytest = "~3.4"

[tool.poetry.extras]
time = ["pendulum"]

[tool.poetry.scripts]
my-script = "my_package:main"
my-2nd-script = "my_package:main2"
poetry-core-2.1.1/tests/masonry/builders/fixtures/excluded_subpackage/000077500000000000000000000000001475444614500262775ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/excluded_subpackage/README.rst000066400000000000000000000000251475444614500277630ustar00rootroot00000000000000My Package
==========poetry-core-2.1.1/tests/masonry/builders/fixtures/excluded_subpackage/example/000077500000000000000000000000001475444614500277325ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/excluded_subpackage/example/__init__.py000066400000000000000000000000261475444614500320410ustar00rootroot00000000000000__version__ = "0.1.0"
poetry-core-2.1.1/tests/masonry/builders/fixtures/excluded_subpackage/example/test/000077500000000000000000000000001475444614500307115ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/excluded_subpackage/example/test/__init__.py000066400000000000000000000000001475444614500330100ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/excluded_subpackage/example/test/excluded.py000066400000000000000000000002241475444614500330560ustar00rootroot00000000000000from tests.masonry.builders.fixtures.excluded_subpackage.example import __version__


def test_version() -> None:
    assert __version__ == "0.1.0"
poetry-core-2.1.1/tests/masonry/builders/fixtures/excluded_subpackage/pyproject.toml000066400000000000000000000005171475444614500312160ustar00rootroot00000000000000[tool.poetry]
name = "example"
version = "0.1.0"
description = ""
authors = ["Sébastien Eustace "]
exclude = [
    "**/test/**/*",
]

[tool.poetry.dependencies]
python = "^3.6"

[tool.poetry.group.dev.dependencies]
pytest = "^3.0"

[build-system]
requires = ["poetry>=0.12"]
build-backend = "poetry.masonry.api"
poetry-core-2.1.1/tests/masonry/builders/fixtures/extended/000077500000000000000000000000001475444614500241155ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/extended/README.rst000066400000000000000000000000221475444614500255760ustar00rootroot00000000000000Module 1
========
poetry-core-2.1.1/tests/masonry/builders/fixtures/extended/build.py000066400000000000000000000002721475444614500255670ustar00rootroot00000000000000from setuptools import Extension


extensions = [Extension("extended.extended", ["extended/extended.c"])]


def build(setup_kwargs):
    setup_kwargs.update({"ext_modules": extensions})
poetry-core-2.1.1/tests/masonry/builders/fixtures/extended/extended/000077500000000000000000000000001475444614500257155ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/extended/extended/__init__.py000066400000000000000000000000001475444614500300140ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/extended/extended/extended.c000066400000000000000000000015651475444614500276700ustar00rootroot00000000000000#include 


static PyObject *hello(PyObject *self) {
    return PyUnicode_FromString("Hello");
}


static PyMethodDef module_methods[] = {
    {
        "hello",
        (PyCFunction) hello,
        0,
        PyDoc_STR("Say hello.")
    },
    {NULL}
};

#if PY_MAJOR_VERSION >= 3
static struct PyModuleDef moduledef = {
    PyModuleDef_HEAD_INIT,
    "extended",
    NULL,
    -1,
    module_methods,
    NULL,
    NULL,
    NULL,
    NULL,
};
#endif

PyMODINIT_FUNC
#if PY_MAJOR_VERSION >= 3
PyInit_extended(void)
#else
init_extended(void)
#endif
{
    PyObject *module;

#if PY_MAJOR_VERSION >= 3
    module = PyModule_Create(&moduledef);
#else
    module = Py_InitModule3("extended", module_methods, NULL);
#endif

    if (module == NULL)
#if PY_MAJOR_VERSION >= 3
        return NULL;
#else
        return;
#endif

#if PY_MAJOR_VERSION >= 3
    return module;
#endif
}
poetry-core-2.1.1/tests/masonry/builders/fixtures/extended/pyproject.toml000066400000000000000000000004441475444614500270330ustar00rootroot00000000000000[tool.poetry]
name = "extended"
version = "0.1"
description = "Some description."
authors = [
    "Sébastien Eustace "
]
license = "MIT"

readme = "README.rst"

homepage = "https://python-poetry.org/"

[tool.poetry.build]
script = "build.py"
generate-setup-file = true
poetry-core-2.1.1/tests/masonry/builders/fixtures/extended/setup.py000066400000000000000000000010331475444614500256240ustar00rootroot00000000000000from setuptools import setup

packages = ["extended"]

package_data = {"": ["*"]}

setup_kwargs = {
    "name": "extended",
    "version": "0.1",
    "description": "Some description.",
    "long_description": "Module 1\n========\n",
    "author": "Sébastien Eustace",
    "author_email": "sebastien@eustace.io",
    "maintainer": "None",
    "maintainer_email": "None",
    "url": "https://python-poetry.org/",
    "packages": packages,
    "package_data": package_data,
}
from build import *

build(setup_kwargs)

setup(**setup_kwargs)
poetry-core-2.1.1/tests/masonry/builders/fixtures/extended_with_no_setup/000077500000000000000000000000001475444614500270645ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/extended_with_no_setup/README.rst000066400000000000000000000000221475444614500305450ustar00rootroot00000000000000Module 1
========
poetry-core-2.1.1/tests/masonry/builders/fixtures/extended_with_no_setup/build.py000066400000000000000000000013611475444614500305360ustar00rootroot00000000000000import os
import shutil

from setuptools.command.build_ext import build_ext
from setuptools import Distribution, Extension


extensions = [Extension("extended.extended", ["extended/extended.c"])]


def build() -> None:
    distribution = Distribution({"name": "extended", "ext_modules": extensions})

    cmd = build_ext(distribution)
    cmd.finalize_options()
    cmd.run()

    # Copy built extensions back to the project
    for output in cmd.get_outputs():
        relative_extension = os.path.relpath(output, cmd.build_lib)
        shutil.copyfile(output, relative_extension)
        mode = os.stat(relative_extension).st_mode
        mode |= (mode & 0o444) >> 2
        os.chmod(relative_extension, mode)


if __name__ == "__main__":
    build()
poetry-core-2.1.1/tests/masonry/builders/fixtures/extended_with_no_setup/extended/000077500000000000000000000000001475444614500306645ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/extended_with_no_setup/extended/__init__.py000066400000000000000000000000001475444614500327630ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/extended_with_no_setup/extended/extended.c000066400000000000000000000015651475444614500326370ustar00rootroot00000000000000#include 


static PyObject *hello(PyObject *self) {
    return PyUnicode_FromString("Hello");
}


static PyMethodDef module_methods[] = {
    {
        "hello",
        (PyCFunction) hello,
        0,
        PyDoc_STR("Say hello.")
    },
    {NULL}
};

#if PY_MAJOR_VERSION >= 3
static struct PyModuleDef moduledef = {
    PyModuleDef_HEAD_INIT,
    "extended",
    NULL,
    -1,
    module_methods,
    NULL,
    NULL,
    NULL,
    NULL,
};
#endif

PyMODINIT_FUNC
#if PY_MAJOR_VERSION >= 3
PyInit_extended(void)
#else
init_extended(void)
#endif
{
    PyObject *module;

#if PY_MAJOR_VERSION >= 3
    module = PyModule_Create(&moduledef);
#else
    module = Py_InitModule3("extended", module_methods, NULL);
#endif

    if (module == NULL)
#if PY_MAJOR_VERSION >= 3
        return NULL;
#else
        return;
#endif

#if PY_MAJOR_VERSION >= 3
    return module;
#endif
}
poetry-core-2.1.1/tests/masonry/builders/fixtures/extended_with_no_setup/pyproject.toml000066400000000000000000000004451475444614500320030ustar00rootroot00000000000000[tool.poetry]
name = "extended"
version = "0.1"
description = "Some description."
authors = [
    "Sébastien Eustace "
]
license = "MIT"

readme = "README.rst"

homepage = "https://python-poetry.org/"

[tool.poetry.build]
script = "build.py"
generate-setup-file = false
poetry-core-2.1.1/tests/masonry/builders/fixtures/generated_script_file/000077500000000000000000000000001475444614500266365ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/generated_script_file/README.rst000066400000000000000000000000221475444614500303170ustar00rootroot00000000000000Module 1
========
poetry-core-2.1.1/tests/masonry/builders/fixtures/generated_script_file/build.py000066400000000000000000000006451475444614500303140ustar00rootroot00000000000000from pathlib import Path
from setuptools.command.build_py import build_py


class BuildPyCommand(build_py):
    def run(self):
        with open("generated_script_file/generated/script.sh", "w", encoding="utf-8") as f:
            f.write('#!/usr/bin/env bash\n\necho "Hello World!"\n')
        ret = super().run()
        return ret


def build(setup_kwargs):
    setup_kwargs["cmdclass"] = {"build_py": BuildPyCommand}
poetry-core-2.1.1/tests/masonry/builders/fixtures/generated_script_file/generated_script_file/000077500000000000000000000000001475444614500331575ustar00rootroot00000000000000__init__.py000066400000000000000000000000001475444614500351770ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/generated_script_file/generated_script_filegenerated/000077500000000000000000000000001475444614500350365ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/generated_script_file/generated_script_file__init__.py000066400000000000000000000000001475444614500371350ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/generated_script_file/generated_script_file/generatedpoetry-core-2.1.1/tests/masonry/builders/fixtures/generated_script_file/pyproject.toml000066400000000000000000000007351475444614500315570ustar00rootroot00000000000000[tool.poetry]
name = "generated_script_file"
version = "0.1"
description = "Some description."
authors = [
    "Poetry Maintainers "
]
license = "MIT"
readme = "README.rst"

[tool.poetry.scripts]
sh-script = { reference = "generated_script_file/generated/script.sh", type = "file" }

[tool.poetry.build]
script = "build.py"
generate-setup-file = true

[build-system]
requires = ["poetry-core", "setuptools"]
build-backend = "poetry.core.masonry.api"
poetry-core-2.1.1/tests/masonry/builders/fixtures/include_excluded_code/000077500000000000000000000000001475444614500266075ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/include_excluded_code/lib/000077500000000000000000000000001475444614500273555ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/include_excluded_code/lib/my_package/000077500000000000000000000000001475444614500314555ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/include_excluded_code/lib/my_package/__init__.py000066400000000000000000000000001475444614500335540ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/include_excluded_code/lib/my_package/generated.py000066400000000000000000000000001475444614500337530ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/include_excluded_code/pyproject.toml000066400000000000000000000010231475444614500315170ustar00rootroot00000000000000[tool.poetry]
name = "my_package"
version = "0.1.0"
description = ""
authors = ["Audun Skaugen "]

packages = [{include='my_package', from='lib'}]
# Simulate excluding due to .gitignore
exclude = ['lib/my_package/generated.py']
# Include again
include = [{ path = 'lib/my_package/generated.py', format = ["sdist", "wheel"] }]

[tool.poetry.dependencies]
python = "^3.8"

[tool.poetry.group.dev.dependencies]

[build-system]
requires = ["poetry-core>=1.0.0"]
build-backend = "poetry.core.masonry.api"
poetry-core-2.1.1/tests/masonry/builders/fixtures/invalid_case_sensitive_exclusions/000077500000000000000000000000001475444614500313035ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/invalid_case_sensitive_exclusions/LICENSE000066400000000000000000000020461475444614500323120ustar00rootroot00000000000000Copyright (c) 2018 Sébastien Eustace

Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:

The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.

THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
poetry-core-2.1.1/tests/masonry/builders/fixtures/invalid_case_sensitive_exclusions/README.rst000066400000000000000000000000261475444614500327700ustar00rootroot00000000000000My Package
==========
poetry-core-2.1.1/tests/masonry/builders/fixtures/invalid_case_sensitive_exclusions/my_package/000077500000000000000000000000001475444614500334035ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/invalid_case_sensitive_exclusions/my_package/Bar/000077500000000000000000000000001475444614500341075ustar00rootroot00000000000000foo/000077500000000000000000000000001475444614500346135ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/invalid_case_sensitive_exclusions/my_package/Barbar/000077500000000000000000000000001475444614500353575ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/invalid_case_sensitive_exclusions/my_package/Bar/fooFoo.py000066400000000000000000000000001475444614500364420ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/invalid_case_sensitive_exclusions/my_package/Bar/foo/barpoetry-core-2.1.1/tests/masonry/builders/fixtures/invalid_case_sensitive_exclusions/my_package/Foo/000077500000000000000000000000001475444614500341265ustar00rootroot00000000000000Bar.py000066400000000000000000000000001475444614500351130ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/invalid_case_sensitive_exclusions/my_package/FooIncludedBar.py000066400000000000000000000000001475444614500365630ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/invalid_case_sensitive_exclusions/my_package/FooSecondBar.py000066400000000000000000000000001475444614500362470ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/invalid_case_sensitive_exclusions/my_package/Foolowercasebar.py000066400000000000000000000000001475444614500370600ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/invalid_case_sensitive_exclusions/my_package/FooFooBar/000077500000000000000000000000001475444614500344745ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/invalid_case_sensitive_exclusions/my_packageBar.py000066400000000000000000000000001475444614500355400ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/invalid_case_sensitive_exclusions/my_package/FooBarlowercasebar.py000066400000000000000000000000001475444614500375050ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/invalid_case_sensitive_exclusions/my_package/FooBar__init__.py000066400000000000000000000000261475444614500354330ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/invalid_case_sensitive_exclusions/my_package__version__ = "1.2.3"
poetry-core-2.1.1/tests/masonry/builders/fixtures/invalid_case_sensitive_exclusions/pyproject.toml000066400000000000000000000017711475444614500342250ustar00rootroot00000000000000[tool.poetry]
name = "my-package"
version = "1.2.3"
description = "Some description."
authors = [
    "Sébastien Eustace "
]
license = "MIT"

readme = "README.rst"

homepage = "https://python-poetry.org/"
repository = "https://github.com/python-poetry/poetry"
documentation = "https://python-poetry.org/docs"

keywords = ["packaging", "dependency", "poetry"]

classifiers = [
    "Topic :: Software Development :: Build Tools",
    "Topic :: Software Development :: Libraries :: Python Modules"
]

exclude = [
    "my_package/Bar/*/bar/*.py"
]

# Requirements
[tool.poetry.dependencies]
python = "^3.6"
cleo = "^0.6"
cachy = { version = "^0.2.0", extras = ["msgpack"] }

pendulum = { version = "^1.4", optional = true }

[tool.poetry.group.dev.dependencies]
pytest = "~3.4"

[tool.poetry.extras]
time = ["pendulum"]

[tool.poetry.scripts]
my-script = "my_package:main"
my-2nd-script = "my_package:main2"
extra-script = {reference = "my_package.extra:main", extras = ["time"], type = "console"}
poetry-core-2.1.1/tests/masonry/builders/fixtures/licenses_and_copying/000077500000000000000000000000001475444614500264745ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/licenses_and_copying/COPYING000066400000000000000000000000001475444614500275150ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/licenses_and_copying/COPYING.txt000066400000000000000000000000001475444614500303330ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/licenses_and_copying/LICENSE000066400000000000000000000020461475444614500275030ustar00rootroot00000000000000Copyright (c) 2018 Sébastien Eustace

Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:

The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.

THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
poetry-core-2.1.1/tests/masonry/builders/fixtures/licenses_and_copying/LICENSE.md000066400000000000000000000000001475444614500300660ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/licenses_and_copying/LICENSES/000077500000000000000000000000001475444614500277015ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/licenses_and_copying/LICENSES/BSD-3.md000066400000000000000000000000001475444614500307610ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/licenses_and_copying/LICENSES/CUSTOM-LICENSE000066400000000000000000000000001475444614500317040ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/licenses_and_copying/LICENSES/MIT.txt000066400000000000000000000000001475444614500310610ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/licenses_and_copying/README.rst000066400000000000000000000000261475444614500301610ustar00rootroot00000000000000My Package
==========
poetry-core-2.1.1/tests/masonry/builders/fixtures/licenses_and_copying/my_package/000077500000000000000000000000001475444614500305745ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/licenses_and_copying/my_package/__init__.py000066400000000000000000000000261475444614500327030ustar00rootroot00000000000000__version__ = "1.2.3"
poetry-core-2.1.1/tests/masonry/builders/fixtures/licenses_and_copying/pyproject.toml000066400000000000000000000021731475444614500314130ustar00rootroot00000000000000[tool.poetry]
name = "my-package"
version = "1.2.3"
description = "Some description."
authors = [
    "Sébastien Eustace "
]
maintainers = [
    "People Everywhere "
]
license = "MIT"

readme = "README.rst"

homepage = "https://python-poetry.org/"
repository = "https://github.com/python-poetry/poetry"
documentation = "https://python-poetry.org/docs"

keywords = ["packaging", "dependency", "poetry"]

classifiers = [
    "Topic :: Software Development :: Build Tools",
    "Topic :: Software Development :: Libraries :: Python Modules"
]

# Requirements
[tool.poetry.dependencies]
python = "^3.6"
cleo = "^0.6"
cachy = { version = "^0.2.0", extras = ["msgpack"] }

[tool.poetry.dependencies.pendulum]
version = "^1.4"
markers= 'python_version ~= "2.7" and sys_platform == "win32" or python_version in "3.4 3.5"'
optional = true

[tool.poetry.group.dev.dependencies]
pytest = "~3.4"

[tool.poetry.extras]
time = ["pendulum"]

[tool.poetry.scripts]
my-script = "my_package:main"
my-2nd-script = "my_package:main2"

[tool.poetry.urls]
"Issue Tracker" = "https://github.com/python-poetry/poetry/issues"
poetry-core-2.1.1/tests/masonry/builders/fixtures/localversionlabel/000077500000000000000000000000001475444614500260155ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/localversionlabel/localversionlabel.py000066400000000000000000000001121475444614500320610ustar00rootroot00000000000000"""Test fixture for https://github.com/python-poetry/poetry/issues/756"""
poetry-core-2.1.1/tests/masonry/builders/fixtures/localversionlabel/pyproject.toml000066400000000000000000000002051475444614500307260ustar00rootroot00000000000000[tool.poetry]
name = "localversionlabel"
description = "Local Version Label"
version = "0.1-beta.1+gitbranch-buildno-1"
authors = []
poetry-core-2.1.1/tests/masonry/builders/fixtures/module1/000077500000000000000000000000001475444614500236635ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/module1/README.rst000066400000000000000000000000221475444614500253440ustar00rootroot00000000000000Module 1
========
poetry-core-2.1.1/tests/masonry/builders/fixtures/module1/module1.py000066400000000000000000000000521475444614500256000ustar00rootroot00000000000000"""Example module"""

__version__ = "0.1"
poetry-core-2.1.1/tests/masonry/builders/fixtures/module1/pyproject.toml000066400000000000000000000004111475444614500265730ustar00rootroot00000000000000[tool.poetry]
name = "module1"
version = "0.1"
description = "Some description."
authors = [
    "Sébastien Eustace "
]
license = "MIT"

readme = "README.rst"

homepage = "https://python-poetry.org/"


[tool.poetry.dependencies]
python = "*"
poetry-core-2.1.1/tests/masonry/builders/fixtures/pep_561_stub_only/000077500000000000000000000000001475444614500255725ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/pep_561_stub_only/pkg-stubs/000077500000000000000000000000001475444614500275115ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/pep_561_stub_only/pkg-stubs/__init__.pyi000066400000000000000000000000001475444614500317610ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/pep_561_stub_only/pkg-stubs/module.pyi000066400000000000000000000001231475444614500315150ustar00rootroot00000000000000"""Example module"""
from typing import Tuple

version_info = Tuple[int, int, int]
poetry-core-2.1.1/tests/masonry/builders/fixtures/pep_561_stub_only/pkg-stubs/subpkg/000077500000000000000000000000001475444614500310045ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/pep_561_stub_only/pkg-stubs/subpkg/__init__.pyi000066400000000000000000000000001475444614500332540ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/pep_561_stub_only/pyproject.toml000066400000000000000000000004051475444614500305050ustar00rootroot00000000000000[tool.poetry]
name = "pep-561-stubs"
version = "0.1"
description = "PEP 561 stub package example"
authors = [
    "Oleg Höfling "
]
license = "MIT"
packages = [
    {include = "pkg-stubs"}
]

[tool.poetry.dependencies]
python = "^3.6"
poetry-core-2.1.1/tests/masonry/builders/fixtures/pep_561_stub_only_partial/000077500000000000000000000000001475444614500273065ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/pep_561_stub_only_partial/pkg-stubs/000077500000000000000000000000001475444614500312255ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/pep_561_stub_only_partial/pkg-stubs/__init__.pyi000066400000000000000000000000001475444614500334750ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/pep_561_stub_only_partial/pkg-stubs/module.pyi000066400000000000000000000001231475444614500332310ustar00rootroot00000000000000"""Example module"""
from typing import Tuple

version_info = Tuple[int, int, int]
poetry-core-2.1.1/tests/masonry/builders/fixtures/pep_561_stub_only_partial/pkg-stubs/py.typed000066400000000000000000000000101475444614500327130ustar00rootroot00000000000000partial
poetry-core-2.1.1/tests/masonry/builders/fixtures/pep_561_stub_only_partial/pkg-stubs/subpkg/000077500000000000000000000000001475444614500325205ustar00rootroot00000000000000__init__.pyi000066400000000000000000000000001475444614500347110ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/pep_561_stub_only_partial/pkg-stubs/subpkgpoetry-core-2.1.1/tests/masonry/builders/fixtures/pep_561_stub_only_partial/pyproject.toml000066400000000000000000000004431475444614500322230ustar00rootroot00000000000000[tool.poetry]
name = "pep-561-stubs"
version = "0.1"
description = "PEP 561 stub package example with the py.typed marker file"
authors = [
    "Oleg Höfling "
]
license = "MIT"
packages = [
    {include = "pkg-stubs"}
]

[tool.poetry.dependencies]
python = "^3.6"
poetry-core-2.1.1/tests/masonry/builders/fixtures/pep_561_stub_only_partial_namespace/000077500000000000000000000000001475444614500313225ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/pep_561_stub_only_partial_namespace/pkg-stubs/000077500000000000000000000000001475444614500332415ustar00rootroot00000000000000module.pyi000066400000000000000000000001231475444614500351660ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/pep_561_stub_only_partial_namespace/pkg-stubs"""Example module"""
from typing import Tuple

version_info = Tuple[int, int, int]
subpkg/000077500000000000000000000000001475444614500344555ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/pep_561_stub_only_partial_namespace/pkg-stubs__init__.pyi000066400000000000000000000000001475444614500367250ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/pep_561_stub_only_partial_namespace/pkg-stubs/subpkgpy.typed000066400000000000000000000000101475444614500361430ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/pep_561_stub_only_partial_namespace/pkg-stubs/subpkgpartial
poetry-core-2.1.1/tests/masonry/builders/fixtures/pep_561_stub_only_partial_namespace/pyproject.toml000066400000000000000000000004621475444614500342400ustar00rootroot00000000000000[tool.poetry]
name = "pep-561-stubs"
version = "0.1"
description = "PEP 561 stub namespace package example with the py.typed marker file"
authors = [
    "Henrik Bruåsdal "
]
license = "MIT"
packages = [
    {include = "pkg-stubs"}
]

[tool.poetry.dependencies]
python = "^3.6"
poetry-core-2.1.1/tests/masonry/builders/fixtures/pep_561_stub_only_src/000077500000000000000000000000001475444614500264415ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/pep_561_stub_only_src/pyproject.toml000066400000000000000000000004461475444614500313610ustar00rootroot00000000000000[tool.poetry]
name = "pep-561-stubs"
version = "0.1"
description = "PEP 561 stub package example with an src layout"
authors = [
    "Oleg Höfling "
]
license = "MIT"
packages = [
    {include = "pkg-stubs", from = "src"}
]

[tool.poetry.dependencies]
python = "^3.6"
poetry-core-2.1.1/tests/masonry/builders/fixtures/pep_561_stub_only_src/src/000077500000000000000000000000001475444614500272305ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/pep_561_stub_only_src/src/pkg-stubs/000077500000000000000000000000001475444614500311475ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/pep_561_stub_only_src/src/pkg-stubs/__init__.pyi000066400000000000000000000000001475444614500334170ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/pep_561_stub_only_src/src/pkg-stubs/module.pyi000066400000000000000000000001231475444614500331530ustar00rootroot00000000000000"""Example module"""
from typing import Tuple

version_info = Tuple[int, int, int]
poetry-core-2.1.1/tests/masonry/builders/fixtures/pep_561_stub_only_src/src/pkg-stubs/subpkg/000077500000000000000000000000001475444614500324425ustar00rootroot00000000000000__init__.pyi000066400000000000000000000000001475444614500346330ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/pep_561_stub_only_src/src/pkg-stubs/subpkgpoetry-core-2.1.1/tests/masonry/builders/fixtures/prerelease/000077500000000000000000000000001475444614500244445ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/prerelease/README.rst000066400000000000000000000000261475444614500261310ustar00rootroot00000000000000Prerelease
==========
poetry-core-2.1.1/tests/masonry/builders/fixtures/prerelease/prerelease.py000066400000000000000000000000521475444614500271420ustar00rootroot00000000000000"""Example module"""

__version__ = "0.1"
poetry-core-2.1.1/tests/masonry/builders/fixtures/prerelease/pyproject.toml000066400000000000000000000003511475444614500273570ustar00rootroot00000000000000[tool.poetry]
name = "prerelease"
version = "0.1-beta.1"
description = "Some description."
authors = [
    "Sébastien Eustace "
]
license = "MIT"

readme = "README.rst"

homepage = "https://python-poetry.org/"
poetry-core-2.1.1/tests/masonry/builders/fixtures/script_callable_legacy_string/000077500000000000000000000000001475444614500303525ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/script_callable_legacy_string/README.rst000066400000000000000000000000261475444614500320370ustar00rootroot00000000000000My Package
==========
poetry-core-2.1.1/tests/masonry/builders/fixtures/script_callable_legacy_string/my_package/000077500000000000000000000000001475444614500324525ustar00rootroot00000000000000__init__.py000066400000000000000000000000261475444614500345020ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/script_callable_legacy_string/my_package__version__ = "1.2.3"
poetry-core-2.1.1/tests/masonry/builders/fixtures/script_callable_legacy_string/pyproject.toml000066400000000000000000000005401475444614500332650ustar00rootroot00000000000000[tool.poetry]
name = "my-package"
version = "1.2.3"
description = "Some description."
authors = [
    "Poetry Maintainers "
]
license = "MIT"
readme = "README.rst"

[tool.poetry.dependencies]
python = "^3.6"

[tool.poetry.group.dev.dependencies]

[tool.poetry.extras]

[tool.poetry.scripts]
script-legacy = "my_package:main"
poetry-core-2.1.1/tests/masonry/builders/fixtures/script_reference_console/000077500000000000000000000000001475444614500273615ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/script_reference_console/README.rst000066400000000000000000000000261475444614500310460ustar00rootroot00000000000000My Package
==========
poetry-core-2.1.1/tests/masonry/builders/fixtures/script_reference_console/my_package/000077500000000000000000000000001475444614500314615ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/script_reference_console/my_package/__init__.py000066400000000000000000000000261475444614500335700ustar00rootroot00000000000000__version__ = "1.2.3"
poetry-core-2.1.1/tests/masonry/builders/fixtures/script_reference_console/pyproject.toml000066400000000000000000000007471475444614500323050ustar00rootroot00000000000000[tool.poetry]
name = "my-package"
version = "1.2.3"
description = "Some description."
authors = [
    "Poetry Maintainers "
]
license = "MIT"
readme = "README.rst"

[tool.poetry.dependencies]
python = "^3.6"

[tool.poetry.group.dev.dependencies]

[tool.poetry.extras]
time = []

[tool.poetry.scripts]
script = { reference = "my_package.extra:main", type = "console" }
extra-script = { reference = "my_package.extra:main", extras = ["time"], type = "console" }
poetry-core-2.1.1/tests/masonry/builders/fixtures/script_reference_file/000077500000000000000000000000001475444614500266365ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/script_reference_file/README.rst000066400000000000000000000000261475444614500303230ustar00rootroot00000000000000My Package
==========
poetry-core-2.1.1/tests/masonry/builders/fixtures/script_reference_file/bin/000077500000000000000000000000001475444614500274065ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/script_reference_file/bin/script.sh000066400000000000000000000000501475444614500312410ustar00rootroot00000000000000#!/usr/bin/env bash

echo "Hello World!"poetry-core-2.1.1/tests/masonry/builders/fixtures/script_reference_file/my_package/000077500000000000000000000000001475444614500307365ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/script_reference_file/my_package/__init__.py000066400000000000000000000000261475444614500330450ustar00rootroot00000000000000__version__ = "1.2.3"
poetry-core-2.1.1/tests/masonry/builders/fixtures/script_reference_file/pyproject.toml000066400000000000000000000005711475444614500315550ustar00rootroot00000000000000[tool.poetry]
name = "my-package"
version = "1.2.3"
description = "Some description."
authors = [
    "Poetry Maintainers "
]
license = "MIT"
readme = "README.rst"

[tool.poetry.dependencies]
python = "^3.6"

[tool.poetry.group.dev.dependencies]

[tool.poetry.extras]

[tool.poetry.scripts]
sh-script = { reference = "bin/script.sh", type = "file" }
poetry-core-2.1.1/tests/masonry/builders/fixtures/script_reference_file_invalid_definition/000077500000000000000000000000001475444614500325545ustar00rootroot00000000000000README.rst000066400000000000000000000000261475444614500341620ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/script_reference_file_invalid_definitionMy Package
==========
poetry-core-2.1.1/tests/masonry/builders/fixtures/script_reference_file_invalid_definition/bin/000077500000000000000000000000001475444614500333245ustar00rootroot00000000000000script.sh000066400000000000000000000000501475444614500351000ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/script_reference_file_invalid_definition/bin#!/usr/bin/env bash

echo "Hello World!"my_package/000077500000000000000000000000001475444614500345755ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/script_reference_file_invalid_definition__init__.py000066400000000000000000000000261475444614500367040ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/script_reference_file_invalid_definition/my_package__version__ = "1.2.3"
pyproject.toml000066400000000000000000000006061475444614500354130ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/script_reference_file_invalid_definition[tool.poetry]
name = "my-package"
version = "1.2.3"
description = "Some description."
authors = [
    "Poetry Maintainers "
]
license = "MIT"
readme = "README.rst"

[tool.poetry.dependencies]
python = "^3.6"

[tool.poetry.group.dev.dependencies]

[tool.poetry.extras]

[tool.poetry.scripts]
invalid_definition = { reference = "bin/script.sh", type = "ffiillee" }
poetry-core-2.1.1/tests/masonry/builders/fixtures/script_reference_file_missing/000077500000000000000000000000001475444614500303675ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/script_reference_file_missing/README.rst000066400000000000000000000000261475444614500320540ustar00rootroot00000000000000My Package
==========
poetry-core-2.1.1/tests/masonry/builders/fixtures/script_reference_file_missing/my_package/000077500000000000000000000000001475444614500324675ustar00rootroot00000000000000__init__.py000066400000000000000000000000261475444614500345170ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/script_reference_file_missing/my_package__version__ = "1.2.3"
poetry-core-2.1.1/tests/masonry/builders/fixtures/script_reference_file_missing/pyproject.toml000066400000000000000000000005711475444614500333060ustar00rootroot00000000000000[tool.poetry]
name = "my-package"
version = "1.2.3"
description = "Some description."
authors = [
    "Poetry Maintainers "
]
license = "MIT"
readme = "README.rst"

[tool.poetry.dependencies]
python = "^3.6"

[tool.poetry.group.dev.dependencies]

[tool.poetry.extras]

[tool.poetry.scripts]
sh-script = { reference = "bin/script.sh", type = "file" }
poetry-core-2.1.1/tests/masonry/builders/fixtures/simple_version/000077500000000000000000000000001475444614500253535ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/simple_version/README.rst000066400000000000000000000000221475444614500270340ustar00rootroot00000000000000Module 1
========
poetry-core-2.1.1/tests/masonry/builders/fixtures/simple_version/pyproject.toml000066400000000000000000000003311475444614500302640ustar00rootroot00000000000000[tool.poetry]
name = "simple-version"
version = "0.1"
description = "Some description."
authors = [
    "Sébastien Eustace "
]

readme = "README.rst"


[tool.poetry.dependencies]
python = "3.6"
poetry-core-2.1.1/tests/masonry/builders/fixtures/simple_version/simple_version.py000066400000000000000000000000521475444614500307600ustar00rootroot00000000000000"""Example module"""

__version__ = "0.1"
poetry-core-2.1.1/tests/masonry/builders/fixtures/single_python/000077500000000000000000000000001475444614500251775ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/single_python/README.rst000066400000000000000000000000341475444614500266630ustar00rootroot00000000000000Single Python
=============
poetry-core-2.1.1/tests/masonry/builders/fixtures/single_python/pyproject.toml000066400000000000000000000004241475444614500301130ustar00rootroot00000000000000[tool.poetry]
name = "single-python"
version = "0.1"
description = "Some description."
authors = [
    "Sébastien Eustace "
]
license = "MIT"

readme = "README.rst"

homepage = "https://python-poetry.org/"


[tool.poetry.dependencies]
python = "2.7.15"
poetry-core-2.1.1/tests/masonry/builders/fixtures/single_python/single_python.py000066400000000000000000000000521475444614500304300ustar00rootroot00000000000000"""Example module"""

__version__ = "0.1"
poetry-core-2.1.1/tests/masonry/builders/fixtures/source_file/000077500000000000000000000000001475444614500246145ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/source_file/README.rst000066400000000000000000000000221475444614500262750ustar00rootroot00000000000000Module 1
========
poetry-core-2.1.1/tests/masonry/builders/fixtures/source_file/pyproject.toml000066400000000000000000000004141475444614500275270ustar00rootroot00000000000000[tool.poetry]
name = "module-src"
version = "0.1"
description = "Some description."
authors = [
    "Sébastien Eustace "
]
license = "MIT"

readme = "README.rst"

homepage = "https://python-poetry.org/"


[tool.poetry.dependencies]
python = "*"
poetry-core-2.1.1/tests/masonry/builders/fixtures/source_file/src/000077500000000000000000000000001475444614500254035ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/source_file/src/module_src.py000066400000000000000000000000521475444614500301060ustar00rootroot00000000000000"""Example module"""

__version__ = "0.1"
poetry-core-2.1.1/tests/masonry/builders/fixtures/source_package/000077500000000000000000000000001475444614500252705ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/source_package/README.rst000066400000000000000000000000221475444614500267510ustar00rootroot00000000000000Module 1
========
poetry-core-2.1.1/tests/masonry/builders/fixtures/source_package/pyproject.toml000066400000000000000000000004141475444614500302030ustar00rootroot00000000000000[tool.poetry]
name = "package-src"
version = "0.1"
description = "Some description."
authors = [
    "Sébastien Eustace "
]
license = "MIT"

readme = "README.rst"

homepage = "https://python-poetry.org/"

[tool.poetry.dependencies]
python = "*"
poetry-core-2.1.1/tests/masonry/builders/fixtures/source_package/src/000077500000000000000000000000001475444614500260575ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/source_package/src/package_src/000077500000000000000000000000001475444614500303215ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/source_package/src/package_src/__init__.py000066400000000000000000000000001475444614500324200ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/source_package/src/package_src/module.py000066400000000000000000000000521475444614500321550ustar00rootroot00000000000000"""Example module"""

__version__ = "0.1"
poetry-core-2.1.1/tests/masonry/builders/fixtures/split_source/000077500000000000000000000000001475444614500250305ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/split_source/lib_a/000077500000000000000000000000001475444614500260765ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/split_source/lib_a/module_a/000077500000000000000000000000001475444614500276635ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/split_source/lib_a/module_a/__init__.py000066400000000000000000000000001475444614500317620ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/split_source/lib_b/000077500000000000000000000000001475444614500260775ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/split_source/lib_b/module_b/000077500000000000000000000000001475444614500276655ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/split_source/lib_b/module_b/__init__.py000066400000000000000000000000001475444614500317640ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/split_source/pyproject.toml000066400000000000000000000005131475444614500277430ustar00rootroot00000000000000[tool.poetry]
name = "split-source"
version = "0.1"
description = "Combine packages from different locations."
authors = [
    "Jan Harkes "
]
license = "MIT"
packages = [
    { include = "module_a", from = "lib_a" },
    { include = "module_b", from = "lib_b" },
]

[tool.poetry.dependencies]
python = "^3.6"
poetry-core-2.1.1/tests/masonry/builders/fixtures/src_extended/000077500000000000000000000000001475444614500247645ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/src_extended/README.rst000066400000000000000000000000221475444614500264450ustar00rootroot00000000000000Module 1
========
poetry-core-2.1.1/tests/masonry/builders/fixtures/src_extended/build.py000066400000000000000000000002761475444614500264420ustar00rootroot00000000000000from setuptools import Extension


extensions = [Extension("extended.extended", ["src/extended/extended.c"])]


def build(setup_kwargs):
    setup_kwargs.update({"ext_modules": extensions})
poetry-core-2.1.1/tests/masonry/builders/fixtures/src_extended/pyproject.toml000066400000000000000000000004441475444614500277020ustar00rootroot00000000000000[tool.poetry]
name = "extended"
version = "0.1"
description = "Some description."
authors = [
    "Sébastien Eustace "
]
license = "MIT"

readme = "README.rst"

homepage = "https://python-poetry.org/"

[tool.poetry.build]
script = "build.py"
generate-setup-file = true
poetry-core-2.1.1/tests/masonry/builders/fixtures/src_extended/setup.py000066400000000000000000000011261475444614500264760ustar00rootroot00000000000000from setuptools import setup

package_dir = {"": "src"}

packages = ["extended"]

package_data = {"": ["*"]}

setup_kwargs = {
    "name": "extended",
    "version": "0.1",
    "description": "Some description.",
    "long_description": "Module 1\n========\n",
    "author": "Sébastien Eustace",
    "author_email": "sebastien@eustace.io",
    "maintainer": "None",
    "maintainer_email": "None",
    "url": "https://python-poetry.org/",
    "package_dir": package_dir,
    "packages": packages,
    "package_data": package_data,
}
from build import *

build(setup_kwargs)

setup(**setup_kwargs)
poetry-core-2.1.1/tests/masonry/builders/fixtures/src_extended/src/000077500000000000000000000000001475444614500255535ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/src_extended/src/extended/000077500000000000000000000000001475444614500273535ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/src_extended/src/extended/__init__.py000066400000000000000000000000001475444614500314520ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/src_extended/src/extended/extended.c000066400000000000000000000015651475444614500313260ustar00rootroot00000000000000#include 


static PyObject *hello(PyObject *self) {
    return PyUnicode_FromString("Hello");
}


static PyMethodDef module_methods[] = {
    {
        "hello",
        (PyCFunction) hello,
        0,
        PyDoc_STR("Say hello.")
    },
    {NULL}
};

#if PY_MAJOR_VERSION >= 3
static struct PyModuleDef moduledef = {
    PyModuleDef_HEAD_INIT,
    "extended",
    NULL,
    -1,
    module_methods,
    NULL,
    NULL,
    NULL,
    NULL,
};
#endif

PyMODINIT_FUNC
#if PY_MAJOR_VERSION >= 3
PyInit_extended(void)
#else
init_extended(void)
#endif
{
    PyObject *module;

#if PY_MAJOR_VERSION >= 3
    module = PyModule_Create(&moduledef);
#else
    module = Py_InitModule3("extended", module_methods, NULL);
#endif

    if (module == NULL)
#if PY_MAJOR_VERSION >= 3
        return NULL;
#else
        return;
#endif

#if PY_MAJOR_VERSION >= 3
    return module;
#endif
}
poetry-core-2.1.1/tests/masonry/builders/fixtures/with-include-formats/000077500000000000000000000000001475444614500263625ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/with-include-formats/both.txt000066400000000000000000000000001475444614500300450ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/with-include-formats/both/000077500000000000000000000000001475444614500273165ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/with-include-formats/both/file.txt000066400000000000000000000000011475444614500307650ustar00rootroot00000000000000
poetry-core-2.1.1/tests/masonry/builders/fixtures/with-include-formats/both/sub/000077500000000000000000000000001475444614500301075ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/with-include-formats/both/sub/file.txt000066400000000000000000000000011475444614500315560ustar00rootroot00000000000000
poetry-core-2.1.1/tests/masonry/builders/fixtures/with-include-formats/default.txt000066400000000000000000000000001475444614500305350ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/with-include-formats/default/000077500000000000000000000000001475444614500300065ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/with-include-formats/default/file.txt000066400000000000000000000000011475444614500314550ustar00rootroot00000000000000
poetry-core-2.1.1/tests/masonry/builders/fixtures/with-include-formats/default/sub/000077500000000000000000000000001475444614500305775ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/with-include-formats/default/sub/file.txt000066400000000000000000000000011475444614500322460ustar00rootroot00000000000000
poetry-core-2.1.1/tests/masonry/builders/fixtures/with-include-formats/pyproject.toml000066400000000000000000000034601475444614500313010ustar00rootroot00000000000000[tool.poetry]
name = "with-include"
version = "1.2.3"
description = "Some description."
authors = [
    "Sébastien Eustace "
]
license = "MIT"

homepage = "https://python-poetry.org/"
repository = "https://github.com/python-poetry/poetry"
documentation = "https://python-poetry.org/docs"

keywords = ["packaging", "dependency", "poetry"]

classifiers = [
    "Topic :: Software Development :: Build Tools",
    "Topic :: Software Development :: Libraries :: Python Modules"
]

packages = [
    # modules
    { include = "mod_default.py", from = "src" },
    { include = "mod_sdist_only.py", from = "src", format = "sdist" },
    { include = "mod_wheel_only.py", from = "src", format = "wheel" },
    { include = "mod_both.py", from = "src", format = [ "sdist", "wheel" ]},
    # packages
    { include = "pkg_default", from = "src" },
    { include = "pkg_sdist_only", from = "src", format = "sdist" },
    { include = "pkg_wheel_only", from = "src", format = "wheel" },
    { include = "pkg_both", from = "src", format = [ "sdist", "wheel" ]},
]

include = [
    # files
    { path = "default.txt" },
    { path = "sdist_only.txt", format = "sdist" },
    { path = "wheel_only.txt", format = "wheel" },
    { path = "both.txt", format = [ "sdist", "wheel" ] },
    # directories
    { path = "default" },
    { path = "sdist_only", format = "sdist" },
    { path = "wheel_only", format = "wheel" },
    { path = "both", format = [ "sdist", "wheel" ] },
]


# Requirements
[tool.poetry.dependencies]
python = "^3.6"
cleo = "^0.6"
cachy = { version = "^0.2.0", extras = ["msgpack"] }

pendulum = { version = "^1.4", optional = true }

[tool.poetry.group.dev.dependencies]
pytest = "~3.4"

[tool.poetry.extras]
time = ["pendulum"]

[tool.poetry.scripts]
my-script = "my_package:main"
my-2nd-script = "my_package:main2"
poetry-core-2.1.1/tests/masonry/builders/fixtures/with-include-formats/sdist_only.txt000066400000000000000000000000001475444614500313000ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/with-include-formats/sdist_only/000077500000000000000000000000001475444614500305515ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/with-include-formats/sdist_only/file.txt000066400000000000000000000000011475444614500322200ustar00rootroot00000000000000
poetry-core-2.1.1/tests/masonry/builders/fixtures/with-include-formats/sdist_only/sub/000077500000000000000000000000001475444614500313425ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/with-include-formats/sdist_only/sub/file.txt000066400000000000000000000000011475444614500330110ustar00rootroot00000000000000
poetry-core-2.1.1/tests/masonry/builders/fixtures/with-include-formats/src/000077500000000000000000000000001475444614500271515ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/with-include-formats/src/mod_both.py000066400000000000000000000000001475444614500313040ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/with-include-formats/src/mod_default.py000066400000000000000000000000001475444614500317740ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/with-include-formats/src/mod_sdist_only.py000066400000000000000000000000001475444614500325370ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/with-include-formats/src/mod_wheel_only.py000066400000000000000000000000001475444614500325150ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/with-include-formats/src/pkg_both/000077500000000000000000000000001475444614500307465ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/with-include-formats/src/pkg_both/__init__.py000066400000000000000000000000001475444614500330450ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/with-include-formats/src/pkg_both/sub/000077500000000000000000000000001475444614500315375ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/with-include-formats/src/pkg_both/sub/__init__.py000066400000000000000000000000001475444614500336360ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/with-include-formats/src/pkg_default/000077500000000000000000000000001475444614500314365ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/with-include-formats/src/pkg_default/__init__.py000066400000000000000000000000001475444614500335350ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/with-include-formats/src/pkg_default/sub/000077500000000000000000000000001475444614500322275ustar00rootroot00000000000000__init__.py000066400000000000000000000000001475444614500342470ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/with-include-formats/src/pkg_default/subpoetry-core-2.1.1/tests/masonry/builders/fixtures/with-include-formats/src/pkg_sdist_only/000077500000000000000000000000001475444614500322015ustar00rootroot00000000000000__init__.py000066400000000000000000000000001475444614500342210ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/with-include-formats/src/pkg_sdist_onlypoetry-core-2.1.1/tests/masonry/builders/fixtures/with-include-formats/src/pkg_sdist_only/sub/000077500000000000000000000000001475444614500327725ustar00rootroot00000000000000__init__.py000066400000000000000000000000001475444614500350120ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/with-include-formats/src/pkg_sdist_only/subpoetry-core-2.1.1/tests/masonry/builders/fixtures/with-include-formats/src/pkg_wheel_only/000077500000000000000000000000001475444614500321575ustar00rootroot00000000000000__init__.py000066400000000000000000000000001475444614500341770ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/with-include-formats/src/pkg_wheel_onlypoetry-core-2.1.1/tests/masonry/builders/fixtures/with-include-formats/src/pkg_wheel_only/sub/000077500000000000000000000000001475444614500327505ustar00rootroot00000000000000__init__.py000066400000000000000000000000001475444614500347700ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/with-include-formats/src/pkg_wheel_only/subpoetry-core-2.1.1/tests/masonry/builders/fixtures/with-include-formats/wheel_only.txt000066400000000000000000000000001475444614500312560ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/with-include-formats/wheel_only/000077500000000000000000000000001475444614500305275ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/with-include-formats/wheel_only/file.txt000066400000000000000000000000011475444614500321760ustar00rootroot00000000000000
poetry-core-2.1.1/tests/masonry/builders/fixtures/with-include-formats/wheel_only/sub/000077500000000000000000000000001475444614500313205ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/with-include-formats/wheel_only/sub/file.txt000066400000000000000000000000011475444614500327670ustar00rootroot00000000000000
poetry-core-2.1.1/tests/masonry/builders/fixtures/with-include/000077500000000000000000000000001475444614500247115ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/with-include/LICENSE000066400000000000000000000020461475444614500257200ustar00rootroot00000000000000Copyright (c) 2018 Sébastien Eustace

Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:

The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.

THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
poetry-core-2.1.1/tests/masonry/builders/fixtures/with-include/README.rst000066400000000000000000000000261475444614500263760ustar00rootroot00000000000000My Package
==========
poetry-core-2.1.1/tests/masonry/builders/fixtures/with-include/etc/000077500000000000000000000000001475444614500254645ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/with-include/etc/from_to/000077500000000000000000000000001475444614500271315ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/with-include/etc/from_to/__init__.py000066400000000000000000000000001475444614500312300ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/with-include/extra_dir/000077500000000000000000000000001475444614500266725ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/with-include/extra_dir/README.md000066400000000000000000000000001475444614500301370ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/with-include/extra_dir/__init__.py000066400000000000000000000000001475444614500307710ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/with-include/extra_dir/sub_pkg/000077500000000000000000000000001475444614500303245ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/with-include/extra_dir/sub_pkg/__init__.py000066400000000000000000000000001475444614500324230ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/with-include/extra_dir/sub_pkg/vcs_excluded.py000066400000000000000000000000001475444614500333340ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/with-include/extra_dir/vcs_excluded.py000066400000000000000000000000001475444614500317020ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/with-include/for_wheel_only/000077500000000000000000000000001475444614500277245ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/with-include/for_wheel_only/__init__.py000066400000000000000000000000001475444614500320230ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/with-include/my_module.py000066400000000000000000000000001475444614500272430ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/with-include/my_module_to.py000066400000000000000000000000001475444614500277450ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/with-include/notes.txt000066400000000000000000000000001475444614500265700ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/with-include/package_with_include/000077500000000000000000000000001475444614500310425ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/with-include/package_with_include/__init__.py000066400000000000000000000000261475444614500331510ustar00rootroot00000000000000__version__ = "1.2.3"
poetry-core-2.1.1/tests/masonry/builders/fixtures/with-include/pyproject.toml000066400000000000000000000026611475444614500276320ustar00rootroot00000000000000[tool.poetry]
name = "with-include"
version = "1.2.3"
description = "Some description."
authors = [
    "Sébastien Eustace "
]
license = "MIT"

readme = "README.rst"

homepage = "https://python-poetry.org/"
repository = "https://github.com/python-poetry/poetry"
documentation = "https://python-poetry.org/docs"

keywords = ["packaging", "dependency", "poetry"]

classifiers = [
    "Topic :: Software Development :: Build Tools",
    "Topic :: Software Development :: Libraries :: Python Modules"
]

packages = [
    { include = "extra_dir/**/*.py" },
    { include = "extra_dir/**/*.py" },
    { include = "my_module.py" },
    { include = "package_with_include" },
    { include = "tests", format = "sdist" },
    { include = "for_wheel_only", format = ["wheel"] },
    { include = "src_package", from = "src"},
    { include = "from_to", from = "etc", "to" = "target_from_to"},
    { include = "my_module_to.py", "to" = "target_module"},
]

include = [
    { path = "extra_dir/vcs_excluded.py", format = ["sdist", "wheel"] },
    "notes.txt",  # default is sdist only
]


# Requirements
[tool.poetry.dependencies]
python = "^3.6"
cleo = "^0.6"
cachy = { version = "^0.2.0", extras = ["msgpack"] }

pendulum = { version = "^1.4", optional = true }

[tool.poetry.group.dev.dependencies]
pytest = "~3.4"

[tool.poetry.extras]
time = ["pendulum"]

[tool.poetry.scripts]
my-script = "my_package:main"
my-2nd-script = "my_package:main2"
poetry-core-2.1.1/tests/masonry/builders/fixtures/with-include/src/000077500000000000000000000000001475444614500255005ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/with-include/src/src_package/000077500000000000000000000000001475444614500277425ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/with-include/src/src_package/__init__.py000066400000000000000000000000001475444614500320410ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/with-include/tests/000077500000000000000000000000001475444614500260535ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/with-include/tests/__init__.py000066400000000000000000000000001475444614500301520ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/with_bad_path_dep/000077500000000000000000000000001475444614500257425ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/with_bad_path_dep/pyproject.toml000066400000000000000000000003561475444614500306620ustar00rootroot00000000000000[tool.poetry]
name = "with_bad_path_dep"
version = "1.2.3"
description = "Some description."
authors = ["Awesome Hacker "]

[tool.poetry.dependencies]
python = "^3.6"
bogus = { path = "../only/in/dev", develop = true }
poetry-core-2.1.1/tests/masonry/builders/fixtures/with_bad_path_dep/with_bad_path_dep/000077500000000000000000000000001475444614500313675ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/with_bad_path_dep/with_bad_path_dep/__init__.py000066400000000000000000000000001475444614500334660ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/with_bad_path_dev_dep/000077500000000000000000000000001475444614500266005ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/with_bad_path_dev_dep/pyproject.toml000066400000000000000000000004301475444614500315110ustar00rootroot00000000000000[tool.poetry]
name = "with_bad_path_dev_dep"
version = "1.2.3"
description = "Some description."
authors = ["Awesome Hacker "]

[tool.poetry.dependencies]
python = "^3.6"

[tool.poetry.group.dev.dependencies]
bogus = { path = "../only/in/dev", develop = true }
poetry-core-2.1.1/tests/masonry/builders/fixtures/with_bad_path_dev_dep/with_bad_path_dev_dep/000077500000000000000000000000001475444614500330635ustar00rootroot00000000000000__init__.py000066400000000000000000000000001475444614500351030ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/with_bad_path_dev_dep/with_bad_path_dev_deppoetry-core-2.1.1/tests/masonry/builders/fixtures/with_optional_without_extras/000077500000000000000000000000001475444614500303465ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/with_optional_without_extras/pyproject.toml000066400000000000000000000007511475444614500332650ustar00rootroot00000000000000[project]
name = "my-packager"
description = "Something"
version = "0.1"
classifiers = [
    "Topic :: Software Development :: Build Tools",
    "Topic :: Software Development :: Libraries :: Python Modules"
]
dynamic = ["dependencies"]

[tool.poetry.dependencies]
requests = { version = "^0.28.1", optional = true }
httpx = { version = "^0.28.1", optional = true }
grpcio = { version = "^0.2.0" }
pycowsay = { version = "^0.1.0" }

[tool.poetry.extras]
http = ["httpx"]
grpc = ["grpcio"]
poetry-core-2.1.1/tests/masonry/builders/fixtures/with_url_dependency/000077500000000000000000000000001475444614500263505ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/with_url_dependency/pyproject.toml000066400000000000000000000012201475444614500312570ustar00rootroot00000000000000[tool.poetry]
name = "with-url-dependency"
version = "1.2.3"
description = "Some description."
authors = [
    "Sébastien Eustace "
]
license = "MIT"

homepage = "https://python-poetry.org/"
repository = "https://github.com/python-poetry/poetry"
documentation = "https://python-poetry.org/docs"

keywords = ["packaging", "dependency", "poetry"]

classifiers = [
    "Topic :: Software Development :: Build Tools",
    "Topic :: Software Development :: Libraries :: Python Modules"
]

# Requirements
[tool.poetry.dependencies]
python = "^3.6"
demo = { url = "https://python-poetry.org/distributions/demo-0.1.0-py2.py3-none-any.whl" }
poetry-core-2.1.1/tests/masonry/builders/fixtures/with_url_dependency/with_url_dependency/000077500000000000000000000000001475444614500324035ustar00rootroot00000000000000__init__.py000066400000000000000000000000001475444614500344230ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/with_url_dependency/with_url_dependencypoetry-core-2.1.1/tests/masonry/builders/fixtures/with_vcs_dependency/000077500000000000000000000000001475444614500263415ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/with_vcs_dependency/pyproject.toml000066400000000000000000000012011475444614500312470ustar00rootroot00000000000000[tool.poetry]
name = "with-vcs-dependency"
version = "1.2.3"
description = "Some description."
authors = [
    "Sébastien Eustace "
]
license = "MIT"

homepage = "https://python-poetry.org/"
repository = "https://github.com/python-poetry/poetry"
documentation = "https://python-poetry.org/docs"

keywords = ["packaging", "dependency", "poetry"]

classifiers = [
    "Topic :: Software Development :: Build Tools",
    "Topic :: Software Development :: Libraries :: Python Modules"
]

# Requirements
[tool.poetry.dependencies]
python = "^3.6"
cleo = { git = "https://github.com/sdispater/cleo.git", branch = "master" }
poetry-core-2.1.1/tests/masonry/builders/fixtures/with_vcs_dependency/with_vcs_dependency/000077500000000000000000000000001475444614500323655ustar00rootroot00000000000000__init__.py000066400000000000000000000000001475444614500344050ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/with_vcs_dependency/with_vcs_dependencypoetry-core-2.1.1/tests/masonry/builders/fixtures/with_wildcard_dependency_constraint/000077500000000000000000000000001475444614500316035ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/with_wildcard_dependency_constraint/my_package/000077500000000000000000000000001475444614500337035ustar00rootroot00000000000000__init__.py000066400000000000000000000000001475444614500357230ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/builders/fixtures/with_wildcard_dependency_constraint/my_packagepoetry-core-2.1.1/tests/masonry/builders/fixtures/with_wildcard_dependency_constraint/pyproject.toml000066400000000000000000000003541475444614500345210ustar00rootroot00000000000000[tool.poetry]
name = "my-package"
version = "1.2.3"
description = "Some description."
authors = [
    "People Everywhere "
]

[tool.poetry.dependencies]
python = "^3.10"
google-api-python-client = ">=1.8,!=2.0.*"
poetry-core-2.1.1/tests/masonry/builders/test_builder.py000066400000000000000000000246451475444614500235160ustar00rootroot00000000000000from __future__ import annotations

import sys

from email.parser import Parser
from pathlib import Path
from typing import TYPE_CHECKING

import pytest

from poetry.core.factory import Factory
from poetry.core.masonry.builders.builder import Builder


if TYPE_CHECKING:
    from pytest_mock import MockerFixture


def test_building_not_possible_in_non_package_mode() -> None:
    with pytest.raises(RuntimeError) as err:
        Builder(
            Factory().create_poetry(
                Path(__file__).parent.parent.parent / "fixtures" / "non_package_mode"
            )
        )

    assert str(err.value) == "Building a package is not possible in non-package mode."


def test_builder_find_excluded_files(mocker: MockerFixture) -> None:
    mocker.patch("poetry.core.vcs.git.Git.get_ignored_files", return_value=[])

    builder = Builder(
        Factory().create_poetry(Path(__file__).parent / "fixtures" / "complete")
    )

    assert builder.find_excluded_files() == {"my_package/sub_pkg1/extra_file.xml"}


@pytest.mark.xfail(
    sys.platform == "win32",
    reason="Windows is case insensitive for the most part",
)
def test_builder_find_case_sensitive_excluded_files(mocker: MockerFixture) -> None:
    mocker.patch("poetry.core.vcs.git.Git.get_ignored_files", return_value=[])

    builder = Builder(
        Factory().create_poetry(
            Path(__file__).parent / "fixtures" / "case_sensitive_exclusions"
        )
    )

    assert builder.find_excluded_files() == {
        "my_package/FooBar/Bar.py",
        "my_package/FooBar/lowercasebar.py",
        "my_package/Foo/SecondBar.py",
        "my_package/Foo/Bar.py",
        "my_package/Foo/lowercasebar.py",
        "my_package/bar/foo.py",
        "my_package/bar/CapitalFoo.py",
    }


@pytest.mark.xfail(
    sys.platform == "win32",
    reason="Windows is case insensitive for the most part",
)
def test_builder_find_invalid_case_sensitive_excluded_files(
    mocker: MockerFixture,
) -> None:
    mocker.patch("poetry.core.vcs.git.Git.get_ignored_files", return_value=[])

    builder = Builder(
        Factory().create_poetry(
            Path(__file__).parent / "fixtures" / "invalid_case_sensitive_exclusions"
        )
    )

    assert {"my_package/Bar/foo/bar/Foo.py"} == builder.find_excluded_files()


def test_get_metadata_content() -> None:
    builder = Builder(
        Factory().create_poetry(Path(__file__).parent / "fixtures" / "complete")
    )

    metadata = builder.get_metadata_content()

    p = Parser()
    parsed = p.parsestr(metadata)

    assert parsed["Metadata-Version"] == "2.3"
    assert parsed["Name"] == "my-package"
    assert parsed["Version"] == "1.2.3"
    assert parsed["Summary"] == "Some description."
    assert parsed["Author"] == "Sébastien Eustace"
    assert parsed["Author-email"] == "sebastien@eustace.io"
    assert parsed["Keywords"] == "packaging,dependency,poetry"
    assert parsed["Requires-Python"] == ">=3.6,<4.0"
    assert parsed["License"] == "MIT"
    assert parsed["Home-page"] is None

    classifiers = parsed.get_all("Classifier")
    assert classifiers == [
        "License :: OSI Approved :: MIT License",
        "Programming Language :: Python :: 3",
        "Programming Language :: Python :: 3.6",
        "Programming Language :: Python :: 3.7",
        "Programming Language :: Python :: 3.8",
        "Programming Language :: Python :: 3.9",
        "Programming Language :: Python :: 3.10",
        "Programming Language :: Python :: 3.11",
        "Programming Language :: Python :: 3.12",
        "Programming Language :: Python :: 3.13",
        "Topic :: Software Development :: Build Tools",
        "Topic :: Software Development :: Libraries :: Python Modules",
    ]

    extras = parsed.get_all("Provides-Extra")
    assert extras == ["time"]

    requires = parsed.get_all("Requires-Dist")
    assert requires == [
        "cachy[msgpack] (>=0.2.0,<0.3.0)",
        "cleo (>=0.6,<0.7)",
        (
            'pendulum (>=1.4,<2.0) ; (python_version ~= "2.7" and sys_platform =='
            ' "win32" or python_version in "3.4 3.5") and (extra == "time")'
        ),
    ]

    urls = parsed.get_all("Project-URL")
    assert urls == [
        "Documentation, https://python-poetry.org/docs",
        "Homepage, https://python-poetry.org/",
        "Issue Tracker, https://github.com/python-poetry/poetry/issues",
        "Repository, https://github.com/python-poetry/poetry",
    ]


def test_metadata_pretty_name() -> None:
    builder = Builder(
        Factory().create_poetry(Path(__file__).parent / "fixtures" / "Pretty.Name")
    )

    metadata = Parser().parsestr(builder.get_metadata_content())

    assert metadata["Name"] == "Pretty.Name"


def test_metadata_homepage_default() -> None:
    builder = Builder(
        Factory().create_poetry(Path(__file__).parent / "fixtures" / "simple_version")
    )

    metadata = Parser().parsestr(builder.get_metadata_content())

    assert metadata["Home-page"] is None


@pytest.mark.parametrize("license_type", ["file", "text", "str"])
def test_metadata_license_type_file(license_type: str) -> None:
    project_path = (
        Path(__file__).parent.parent.parent
        / "fixtures"
        / f"with_license_type_{license_type}"
    )
    builder = Builder(Factory().create_poetry(project_path))

    if license_type == "file":
        license_text = (project_path / "LICENSE").read_text(encoding="utf-8")
    elif license_type == "text":
        license_text = (
            (project_path / "pyproject.toml")
            .read_text(encoding="utf-8")
            .split('"""')[1]
        )
    elif license_type == "str":
        license_text = "MIT"
    else:
        raise RuntimeError("unexpected license type")

    raw_content = builder.get_metadata_content()
    metadata = Parser().parsestr(raw_content)

    license_lines = metadata["License"].splitlines()
    unindented_license = "\n".join([line.strip() for line in license_lines])
    assert unindented_license == license_text.rstrip()

    # Check that field after "license" is read correctly
    assert raw_content.index("License:") < raw_content.index("Keywords:")
    assert metadata["Keywords"] == "special"


def test_metadata_with_vcs_dependencies() -> None:
    builder = Builder(
        Factory().create_poetry(
            Path(__file__).parent / "fixtures" / "with_vcs_dependency"
        )
    )

    metadata = Parser().parsestr(builder.get_metadata_content())

    requires_dist = metadata["Requires-Dist"]

    assert requires_dist == "cleo @ git+https://github.com/sdispater/cleo.git@master"


def test_metadata_with_url_dependencies() -> None:
    builder = Builder(
        Factory().create_poetry(
            Path(__file__).parent / "fixtures" / "with_url_dependency"
        )
    )

    metadata = Parser().parsestr(builder.get_metadata_content())

    requires_dist = metadata["Requires-Dist"]

    assert (
        requires_dist == "demo @"
        " https://python-poetry.org/distributions/demo-0.1.0-py2.py3-none-any.whl"
    )


def test_missing_script_files_throws_error() -> None:
    builder = Builder(
        Factory().create_poetry(
            Path(__file__).parent / "fixtures" / "script_reference_file_missing"
        )
    )

    with pytest.raises(RuntimeError) as err:
        builder.convert_script_files()

    assert "is not found." in str(err.value)


def test_invalid_script_files_definition() -> None:
    with pytest.raises(RuntimeError) as err:
        Builder(
            Factory().create_poetry(
                Path(__file__).parent
                / "fixtures"
                / "script_reference_file_invalid_definition"
            )
        )

    assert "configuration is invalid" in str(err.value)
    assert "scripts.invalid_definition" in str(err.value)


@pytest.mark.parametrize(
    "fixture, result",
    [
        (
            "script_callable_legacy_string",
            {"console_scripts": ["script-legacy = my_package:main"]},
        ),
        (
            "script_reference_console",
            {
                "console_scripts": [
                    "extra-script = my_package.extra:main",
                    "script = my_package.extra:main",
                ]
            },
        ),
        (
            "script_reference_file",
            {},
        ),
    ],
)
def test_builder_convert_entry_points(
    fixture: str, result: dict[str, list[str]]
) -> None:
    entry_points = Builder(
        Factory().create_poetry(Path(__file__).parent / "fixtures" / fixture)
    ).convert_entry_points()
    assert entry_points == result


@pytest.mark.parametrize(
    "fixture, result",
    [
        (
            "script_callable_legacy_string",
            [],
        ),
        (
            "script_reference_console",
            [],
        ),
        (
            "script_reference_file",
            [Path("bin") / "script.sh"],
        ),
    ],
)
def test_builder_convert_script_files(fixture: str, result: list[Path]) -> None:
    project_root = Path(__file__).parent / "fixtures" / fixture
    script_files = Builder(Factory().create_poetry(project_root)).convert_script_files()
    assert [p.relative_to(project_root) for p in script_files] == result


def test_metadata_with_readme_files() -> None:
    test_path = Path(__file__).parent.parent.parent / "fixtures" / "with_readme_files"
    builder = Builder(Factory().create_poetry(test_path))

    metadata = Parser().parsestr(builder.get_metadata_content())

    readme1 = test_path / "README-1.rst"
    readme2 = test_path / "README-2.rst"
    description = "\n".join(
        [readme1.read_text(encoding="utf-8"), readme2.read_text(encoding="utf-8"), ""]
    )

    assert metadata.get_payload() == description


def test_metadata_with_wildcard_dependency_constraint() -> None:
    test_path = (
        Path(__file__).parent / "fixtures" / "with_wildcard_dependency_constraint"
    )
    builder = Builder(Factory().create_poetry(test_path))

    metadata = Parser().parsestr(builder.get_metadata_content())

    requires = metadata.get_all("Requires-Dist")
    assert requires == ["google-api-python-client (>=1.8,!=2.0.*)"]


@pytest.mark.parametrize(
    ["local_version", "expected_version"],
    [
        ("", "1.2.3"),
        ("some-label", "1.2.3+some-label"),
    ],
)
def test_builder_apply_local_version_label(
    local_version: str, expected_version: str
) -> None:
    builder = Builder(
        Factory().create_poetry(Path(__file__).parent / "fixtures" / "complete"),
        config_settings={"local-version": local_version},
    )

    assert builder._poetry.package.version.text == expected_version
poetry-core-2.1.1/tests/masonry/builders/test_complete.py000066400000000000000000000322171475444614500236720ustar00rootroot00000000000000from __future__ import annotations

import csv
import platform
import re
import shutil
import sys
import tarfile
import tempfile
import zipfile

from pathlib import Path
from typing import TYPE_CHECKING

import pytest

from poetry.core import __version__
from poetry.core.factory import Factory
from poetry.core.masonry.builders.sdist import SdistBuilder
from poetry.core.masonry.builders.wheel import WheelBuilder
from tests.masonry.builders.test_wheel import WHEEL_TAG_REGEX


if TYPE_CHECKING:
    from collections.abc import Iterator

    from pytest_mock import MockerFixture

fixtures_dir = Path(__file__).parent / "fixtures"


@pytest.fixture(autouse=True)
def setup() -> Iterator[None]:
    clear_samples_dist()

    yield

    clear_samples_dist()


def clear_samples_dist() -> None:
    for dist in fixtures_dir.glob("**/dist"):
        if dist.is_dir():
            shutil.rmtree(str(dist))


@pytest.mark.skipif(
    platform.python_implementation().lower() == "pypy", reason="Disable test for PyPy"
)
@pytest.mark.parametrize(
    ["project", "exptected_c_dir"],
    [
        ("extended", "extended"),
        ("extended_with_no_setup", "extended"),
        ("src_extended", "src/extended"),
    ],
)
def test_wheel_c_extension(project: str, exptected_c_dir: str) -> None:
    module_path = fixtures_dir / project
    poetry = Factory().create_poetry(module_path)
    SdistBuilder(poetry).build()
    WheelBuilder(poetry).build()

    sdist = fixtures_dir / project / "dist" / "extended-0.1.tar.gz"
    assert sdist.exists()

    with tarfile.open(sdist, "r") as tar:
        assert "extended-0.1/build.py" in tar.getnames()
        assert f"extended-0.1/{exptected_c_dir}/extended.c" in tar.getnames()

    whl = next(iter((module_path / "dist").glob("extended-0.1-cp*-cp*-*.whl")))
    assert whl.exists()

    with zipfile.ZipFile(whl) as zipf:
        has_compiled_extension = False
        for name in zipf.namelist():
            if name.startswith("extended/extended") and name.endswith((".so", ".pyd")):
                has_compiled_extension = True
        assert has_compiled_extension

        wheel_data = zipf.read("extended-0.1.dist-info/WHEEL").decode()
        assert (
            re.match(
                f"""(?m)^\
Wheel-Version: 1.0
Generator: poetry-core {__version__}
Root-Is-Purelib: false
Tag: {WHEEL_TAG_REGEX}
$""",
                wheel_data,
            )
            is not None
        )

        record = zipf.read("extended-0.1.dist-info/RECORD").decode()
        records = csv.reader(record.splitlines())
        record_files = [row[0] for row in records]
        assert re.search(r"\s+extended/extended.*\.(so|pyd)", record) is not None

        # Files in RECORD should match files in wheel.
        assert zipf.namelist() == record_files
        assert len(set(record_files)) == len(record_files)


@pytest.mark.parametrize("project", ["complete", "complete_new", "complete_dynamic"])
@pytest.mark.parametrize("no_vcs", [False, True])
def test_complete(project: str, no_vcs: bool) -> None:
    module_path = fixtures_dir / project

    if no_vcs:
        # Copy the complete fixtures dir to a temporary directory
        temporary_dir = Path(tempfile.mkdtemp()) / project
        shutil.copytree(module_path.as_posix(), temporary_dir.as_posix())
        module_path = temporary_dir

    poetry = Factory().create_poetry(module_path)
    SdistBuilder(poetry).build()
    WheelBuilder(poetry).build()

    whl = module_path / "dist" / "my_package-1.2.3-py3-none-any.whl"

    assert whl.exists()
    if sys.platform != "win32":
        assert (whl.stat().st_mode & 0o777) == 0o644

    expected_name_list = [
        "my_package/__init__.py",
        "my_package/data1/test.json",
        "my_package/sub_pkg1/__init__.py",
        "my_package/sub_pkg2/__init__.py",
        "my_package/sub_pkg2/data2/data.json",
        "my_package/sub_pkg3/foo.py",
        "my_package-1.2.3.data/scripts/script.sh",
        *sorted(
            [
                "my_package-1.2.3.dist-info/entry_points.txt",
                "my_package-1.2.3.dist-info/LICENSE",
                "my_package-1.2.3.dist-info/METADATA",
                "my_package-1.2.3.dist-info/WHEEL",
                "my_package-1.2.3.dist-info/COPYING",
                "my_package-1.2.3.dist-info/LICENCE",
                "my_package-1.2.3.dist-info/AUTHORS",
            ],
            key=lambda x: Path(x),
        ),
        "my_package-1.2.3.dist-info/RECORD",
    ]

    with zipfile.ZipFile(str(whl)) as zipf:
        assert zipf.namelist() == expected_name_list
        assert (
            "Hello World"
            in zipf.read("my_package-1.2.3.data/scripts/script.sh").decode()
        )

        entry_points = zipf.read("my_package-1.2.3.dist-info/entry_points.txt")

        assert (
            entry_points.decode()
            == """\
[console_scripts]
extra-script=my_package.extra:main
my-2nd-script=my_package:main2
my-script=my_package:main

[poetry.application.plugin]
my-command=my_package.plugins:MyApplicationPlugin

"""
        )
        wheel_data = zipf.read("my_package-1.2.3.dist-info/WHEEL").decode()

        assert (
            wheel_data
            == f"""\
Wheel-Version: 1.0
Generator: poetry-core {__version__}
Root-Is-Purelib: true
Tag: py3-none-any
"""
        )
        wheel_data = zipf.read("my_package-1.2.3.dist-info/METADATA").decode()

        assert (
            wheel_data
            == """\
Metadata-Version: 2.3
Name: my-package
Version: 1.2.3
Summary: Some description.
License: MIT
Keywords: packaging,dependency,poetry
Author: Sébastien Eustace
Author-email: sebastien@eustace.io
Maintainer: People Everywhere
Maintainer-email: people@everywhere.com
Requires-Python: >=3.6,<4.0
Classifier: License :: OSI Approved :: MIT License
Classifier: Programming Language :: Python :: 3
Classifier: Programming Language :: Python :: 3.6
Classifier: Programming Language :: Python :: 3.7
Classifier: Programming Language :: Python :: 3.8
Classifier: Programming Language :: Python :: 3.9
Classifier: Programming Language :: Python :: 3.10
Classifier: Programming Language :: Python :: 3.11
Classifier: Programming Language :: Python :: 3.12
Classifier: Programming Language :: Python :: 3.13
Classifier: Topic :: Software Development :: Build Tools
Classifier: Topic :: Software Development :: Libraries :: Python Modules
Provides-Extra: time
Requires-Dist: cachy[msgpack] (>=0.2.0,<0.3.0)
Requires-Dist: cleo (>=0.6,<0.7)
Requires-Dist: pendulum (>=1.4,<2.0) ; (python_version ~= "2.7"\
 and sys_platform == "win32" or python_version in "3.4 3.5") and (extra == "time")
Project-URL: Documentation, https://python-poetry.org/docs
Project-URL: Homepage, https://python-poetry.org/
Project-URL: Issue Tracker, https://github.com/python-poetry/poetry/issues
Project-URL: Repository, https://github.com/python-poetry/poetry
Description-Content-Type: text/x-rst

My Package
==========

"""
        )
        actual_records = zipf.read("my_package-1.2.3.dist-info/RECORD").decode()

        # The SHA hashes vary per operating systems.
        # So instead of 1:1 assertion, let's do a bit clunkier one:
        actual_files = [row[0] for row in csv.reader(actual_records.splitlines())]

        assert actual_files == expected_name_list


def test_module_src() -> None:
    module_path = fixtures_dir / "source_file"
    poetry = Factory().create_poetry(module_path)
    SdistBuilder(poetry).build()
    WheelBuilder(poetry).build()

    sdist = module_path / "dist" / "module_src-0.1.tar.gz"

    assert sdist.exists()

    with tarfile.open(str(sdist), "r") as tar:
        assert "module_src-0.1/src/module_src.py" in tar.getnames()

    whl = module_path / "dist" / "module_src-0.1-py2.py3-none-any.whl"

    assert whl.exists()

    with zipfile.ZipFile(str(whl)) as zipf:
        assert "module_src.py" in zipf.namelist()


def test_package_src() -> None:
    module_path = fixtures_dir / "source_package"
    poetry = Factory().create_poetry(module_path)
    SdistBuilder(poetry).build()
    WheelBuilder(poetry).build()

    sdist = module_path / "dist" / "package_src-0.1.tar.gz"

    assert sdist.exists()

    with tarfile.open(str(sdist), "r") as tar:
        assert "package_src-0.1/src/package_src/module.py" in tar.getnames()

    whl = module_path / "dist" / "package_src-0.1-py2.py3-none-any.whl"

    assert whl.exists()

    with zipfile.ZipFile(str(whl)) as zipf:
        assert "package_src/__init__.py" in zipf.namelist()
        assert "package_src/module.py" in zipf.namelist()


def test_split_source() -> None:
    module_path = fixtures_dir / "split_source"
    poetry = Factory().create_poetry(module_path)
    SdistBuilder(poetry).build()
    WheelBuilder(poetry).build()

    sdist = module_path / "dist" / "split_source-0.1.tar.gz"

    assert sdist.exists()

    with tarfile.open(str(sdist), "r") as tar:
        assert "split_source-0.1/lib_a/module_a/__init__.py" in tar.getnames()
        assert "split_source-0.1/lib_b/module_b/__init__.py" in tar.getnames()

    whl = module_path / "dist" / "split_source-0.1-py3-none-any.whl"

    assert whl.exists()

    with zipfile.ZipFile(str(whl)) as zipf:
        assert "module_a/__init__.py" in zipf.namelist()
        assert "module_b/__init__.py" in zipf.namelist()


def test_package_with_include(mocker: MockerFixture) -> None:
    module_path = fixtures_dir / "with-include"

    # Patch git module to return specific excluded files
    mocker.patch(
        "poetry.core.vcs.git.Git.get_ignored_files",
        return_value=["extra_dir/vcs_excluded.py", "extra_dir/sub_pkg/vcs_excluded.py"],
    )

    poetry = Factory().create_poetry(module_path)
    SdistBuilder(poetry).build()
    WheelBuilder(poetry).build()

    sdist = fixtures_dir / "with-include" / "dist" / "with_include-1.2.3.tar.gz"

    assert sdist.exists()

    with tarfile.open(str(sdist), "r") as tar:
        names = tar.getnames()
        assert len(names) == len(set(names))
        assert "with_include-1.2.3/LICENSE" in names
        assert "with_include-1.2.3/README.rst" in names
        assert "with_include-1.2.3/extra_dir/__init__.py" in names
        assert "with_include-1.2.3/extra_dir/vcs_excluded.py" in names
        assert "with_include-1.2.3/extra_dir/sub_pkg/__init__.py" in names
        assert "with_include-1.2.3/extra_dir/sub_pkg/vcs_excluded.py" not in names
        assert "with_include-1.2.3/my_module.py" in names
        assert "with_include-1.2.3/notes.txt" in names
        assert "with_include-1.2.3/package_with_include/__init__.py" in names
        assert "with_include-1.2.3/tests/__init__.py" in names
        assert "with_include-1.2.3/pyproject.toml" in names
        assert "with_include-1.2.3/PKG-INFO" in names
        assert "with_include-1.2.3/for_wheel_only/__init__.py" not in names
        assert "with_include-1.2.3/src/src_package/__init__.py" in names
        assert "with_include-1.2.3/etc/from_to/__init__.py" in names

    whl = module_path / "dist" / "with_include-1.2.3-py3-none-any.whl"

    assert whl.exists()

    with zipfile.ZipFile(str(whl)) as z:
        names = z.namelist()
        assert len(names) == len(set(names))
        assert "with_include-1.2.3.dist-info/LICENSE" in names
        assert "extra_dir/__init__.py" in names
        assert "extra_dir/vcs_excluded.py" in names
        assert "extra_dir/sub_pkg/__init__.py" in names
        assert "extra_dir/sub_pkg/vcs_excluded.py" not in names
        assert "for_wheel_only/__init__.py" in names
        assert "my_module.py" in names
        assert "notes.txt" not in names
        assert "package_with_include/__init__.py" in names
        assert "tests/__init__.py" not in names
        assert "src_package/__init__.py" in names
        assert "target_from_to/from_to/__init__.py" in names
        assert "target_module/my_module_to.py" in names


def test_respect_format_for_explicit_included_files() -> None:
    module_path = fixtures_dir / "exclude-whl-include-sdist"
    poetry = Factory().create_poetry(module_path)
    SdistBuilder(poetry).build()
    WheelBuilder(poetry).build()

    sdist = module_path / "dist" / "exclude_whl_include_sdist-0.1.0.tar.gz"

    assert sdist.exists()

    with tarfile.open(str(sdist), "r") as tar:
        names = tar.getnames()
        assert (
            "exclude_whl_include_sdist-0.1.0/exclude_whl_include_sdist/__init__.py"
            in names
        )
        assert (
            "exclude_whl_include_sdist-0.1.0/exclude_whl_include_sdist/compiled/source.c"
            in names
        )
        assert (
            "exclude_whl_include_sdist-0.1.0/exclude_whl_include_sdist/compiled/source.h"
            in names
        )
        assert (
            "exclude_whl_include_sdist-0.1.0/exclude_whl_include_sdist/cython_code.pyx"
            in names
        )
        assert "exclude_whl_include_sdist-0.1.0/pyproject.toml" in names
        assert "exclude_whl_include_sdist-0.1.0/PKG-INFO" in names

    whl = module_path / "dist" / "exclude_whl_include_sdist-0.1.0-py3-none-any.whl"

    assert whl.exists()

    with zipfile.ZipFile(str(whl)) as z:
        names = z.namelist()
        assert "exclude_whl_include_sdist/__init__.py" in names
        assert "exclude_whl_include_sdist/compiled/source.c" not in names
        assert "exclude_whl_include_sdist/compiled/source.h" not in names
        assert "exclude_whl_include_sdist/cython_code.pyx" not in names
poetry-core-2.1.1/tests/masonry/builders/test_metadata.py000066400000000000000000000013261475444614500236370ustar00rootroot00000000000000from __future__ import annotations

import pytest

from poetry.core.masonry.metadata import Metadata
from poetry.core.packages.project_package import ProjectPackage


@pytest.mark.parametrize(
    ("requires_python", "python", "expected"),
    [
        (">=3.8", None, ">=3.8"),
        (None, "^3.8", ">=3.8,<4.0"),
        (">=3.8", "^3.8", ">=3.8"),
    ],
)
def test_requires_python(
    requires_python: str | None, python: str | None, expected: str
) -> None:
    package = ProjectPackage("foo", "1")
    if requires_python:
        package.requires_python = requires_python
    if python:
        package.python_versions = python

    meta = Metadata.from_package(package)

    assert meta.requires_python == expected
poetry-core-2.1.1/tests/masonry/builders/test_sdist.py000066400000000000000000000606431475444614500232140ustar00rootroot00000000000000from __future__ import annotations

import ast
import gzip
import hashlib
import logging
import shutil
import tarfile

from email.parser import Parser
from pathlib import Path
from typing import TYPE_CHECKING
from typing import Any

import pytest

from packaging.utils import canonicalize_name

from poetry.core.factory import Factory
from poetry.core.masonry.builders.sdist import SdistBuilder
from poetry.core.masonry.utils.package_include import PackageInclude
from poetry.core.packages.dependency import Dependency
from poetry.core.packages.project_package import ProjectPackage
from poetry.core.packages.vcs_dependency import VCSDependency


if TYPE_CHECKING:
    from collections.abc import Iterator

    from pytest import LogCaptureFixture
    from pytest import MonkeyPatch
    from pytest_mock import MockerFixture

fixtures_dir = Path(__file__).parent / "fixtures"


@pytest.fixture(autouse=True)
def setup() -> Iterator[None]:
    clear_samples_dist()

    yield

    clear_samples_dist()


def clear_samples_dist() -> None:
    for dist in fixtures_dir.glob("**/dist"):
        if dist.is_dir():
            shutil.rmtree(str(dist))


def project(name: str) -> Path:
    return Path(__file__).parent / "fixtures" / name


def test_convert_dependencies() -> None:
    package = ProjectPackage("foo", "1.2.3")
    result = SdistBuilder.convert_dependencies(
        package,
        [
            Dependency("A", "^1.0"),
            Dependency("B", "~1.0"),
            Dependency("C", "1.2.3"),
            VCSDependency("D", "git", "https://github.com/sdispater/d.git"),
            Dependency("E", "^1.0"),
            Dependency("F", "^1.0,!=1.3"),
        ],
    )
    main = [
        "A>=1.0,<2.0",
        "B>=1.0,<1.1",
        "C==1.2.3",
        "D @ git+https://github.com/sdispater/d.git",
        "E>=1.0,<2.0",
        "F>=1.0,<2.0,!=1.3",
    ]
    extras: dict[str, Any] = {}

    assert result == (main, extras)

    package = ProjectPackage("foo", "1.2.3")
    package.extras = {canonicalize_name("bar"): [Dependency("A", "*")]}

    result = SdistBuilder.convert_dependencies(
        package,
        [
            Dependency("A", ">=1.2", optional=True),
            Dependency("B", "~1.0"),
            Dependency("C", "1.2.3"),
        ],
    )
    main = ["B>=1.0,<1.1", "C==1.2.3"]
    extras = {"bar": ["A>=1.2"]}

    assert result == (main, extras)

    c = Dependency("C", "1.2.3")
    c.python_versions = "~2.7 || ^3.6"
    d = Dependency("D", "3.4.5", optional=True)
    d.python_versions = "~2.7 || ^3.4"

    package.extras = {canonicalize_name("baz"): [Dependency("D", "*")]}

    result = SdistBuilder.convert_dependencies(
        package,
        [Dependency("A", ">=1.2", optional=True), Dependency("B", "~1.0"), c, d],
    )
    main = ["B>=1.0,<1.1"]

    extra_python = (
        ':python_version == "2.7" or python_version >= "3.6" and python_version < "4.0"'
    )
    extra_d_dependency = (
        'baz:python_version == "2.7" '
        'or python_version >= "3.4" and python_version < "4.0"'
    )
    extras = {extra_python: ["C==1.2.3"], extra_d_dependency: ["D==3.4.5"]}

    assert result == (main, extras)


@pytest.mark.parametrize(
    "project_name", ["complete", "complete_new", "complete_dynamic"]
)
def test_make_setup(project_name: str) -> None:
    poetry = Factory().create_poetry(project(project_name))

    builder = SdistBuilder(poetry)
    setup = builder.build_setup()
    setup_ast = ast.parse(setup)

    setup_ast.body = [n for n in setup_ast.body if isinstance(n, ast.Assign)]
    ns: dict[str, Any] = {}
    exec(compile(setup_ast, filename="setup.py", mode="exec"), ns)
    assert ns["packages"] == [
        "my_package",
        "my_package.sub_pkg1",
        "my_package.sub_pkg2",
        "my_package.sub_pkg3",
    ]
    assert ns["install_requires"] == ["cachy[msgpack]>=0.2.0,<0.3.0", "cleo>=0.6,<0.7"]
    assert ns["entry_points"] == {
        "console_scripts": [
            "extra-script = my_package.extra:main",
            "my-2nd-script = my_package:main2",
            "my-script = my_package:main",
        ],
        "poetry.application.plugin": [
            "my-command = my_package.plugins:MyApplicationPlugin"
        ],
    }
    assert ns["scripts"] == [str(Path("bin") / "script.sh")]
    assert ns["extras_require"] == {
        'time:python_version ~= "2.7" and sys_platform == "win32" or python_version in "3.4 3.5"': [
            "pendulum>=1.4,<2.0"
        ]
    }


@pytest.mark.parametrize(
    "project_name", ["complete", "complete_new", "complete_dynamic"]
)
def test_make_pkg_info(project_name: str, mocker: MockerFixture) -> None:
    get_metadata_content = mocker.patch(
        "poetry.core.masonry.builders.builder.Builder.get_metadata_content"
    )
    poetry = Factory().create_poetry(project(project_name))

    builder = SdistBuilder(poetry)
    builder.build_pkg_info()

    assert get_metadata_content.called


def test_make_pkg_info_any_python() -> None:
    poetry = Factory().create_poetry(project("module1"))

    builder = SdistBuilder(poetry)
    pkg_info = builder.build_pkg_info()
    p = Parser()
    parsed = p.parsestr(pkg_info.decode())

    assert "Requires-Python" not in parsed


@pytest.mark.parametrize(
    "project_name", ["complete", "complete_new", "complete_dynamic"]
)
def test_find_files_to_add(project_name: str) -> None:
    poetry = Factory().create_poetry(project(project_name))

    builder = SdistBuilder(poetry)
    result = {f.relative_to_source_root() for f in builder.find_files_to_add()}

    assert result == {
        Path("AUTHORS"),
        Path("COPYING"),
        Path("LICENCE"),
        Path("LICENSE"),
        Path("README.rst"),
        Path("bin/script.sh"),
        Path("my_package/__init__.py"),
        Path("my_package/data1/test.json"),
        Path("my_package/sub_pkg1/__init__.py"),
        Path("my_package/sub_pkg2/__init__.py"),
        Path("my_package/sub_pkg2/data2/data.json"),
        Path("my_package/sub_pkg3/foo.py"),
        Path("pyproject.toml"),
    }


def test_find_files_to_add_with_multiple_readme_files() -> None:
    poetry = Factory().create_poetry(
        Path(__file__).parent.parent.parent / "fixtures" / "with_readme_files"
    )

    builder = SdistBuilder(poetry)
    result = {f.relative_to_source_root() for f in builder.find_files_to_add()}

    assert result == {
        Path("README-1.rst"),
        Path("README-2.rst"),
        Path("my_package/__init__.py"),
        Path("pyproject.toml"),
    }


def test_make_pkg_info_multi_constraints_dependency() -> None:
    poetry = Factory().create_poetry(
        Path(__file__).parent.parent.parent
        / "fixtures"
        / "project_with_multi_constraints_dependency"
    )

    builder = SdistBuilder(poetry)
    pkg_info = builder.build_pkg_info()
    p = Parser()
    parsed = p.parsestr(pkg_info.decode())

    requires = parsed.get_all("Requires-Dist")
    assert requires == [
        'pendulum (>=1.5,<2.0) ; python_version < "3.4"',
        'pendulum (>=2.0,<3.0) ; python_version >= "3.4" and python_version < "4.0"',
    ]


@pytest.mark.parametrize(
    "project_name", ["complete", "complete_new", "complete_dynamic"]
)
def test_find_packages(project_name: str) -> None:
    poetry = Factory().create_poetry(project(project_name))

    builder = SdistBuilder(poetry)

    base = project(project_name)
    include = PackageInclude(base, "my_package", formats=["sdist"])

    pkg_dir, packages, pkg_data = builder.find_packages(include)

    assert pkg_dir is None
    assert packages == [
        "my_package",
        "my_package.sub_pkg1",
        "my_package.sub_pkg2",
        "my_package.sub_pkg3",
    ]
    assert pkg_data == {
        "": ["*"],
        "my_package": ["data1/*"],
        "my_package.sub_pkg2": ["data2/*"],
    }

    poetry = Factory().create_poetry(project("source_package"))

    builder = SdistBuilder(poetry)

    base = project("source_package")
    include = PackageInclude(base, "package_src", source="src", formats=["sdist"])

    pkg_dir, packages, pkg_data = builder.find_packages(include)

    assert pkg_dir == str(base / "src")
    assert packages == ["package_src"]
    assert pkg_data == {"": ["*"]}


@pytest.mark.parametrize(
    "project_name", ["complete", "complete_new", "complete_dynamic"]
)
def test_package(project_name: str) -> None:
    poetry = Factory().create_poetry(project(project_name))

    builder = SdistBuilder(poetry)
    builder.build()

    sdist = fixtures_dir / project_name / "dist" / "my_package-1.2.3.tar.gz"

    assert sdist.exists()

    with tarfile.open(str(sdist), "r") as tar:
        assert "my_package-1.2.3/LICENSE" in tar.getnames()


@pytest.mark.parametrize("target_dir", [None, "dist", "dist/build"])
def test_package_target_dir(tmp_path: Path, target_dir: str | None) -> None:
    poetry = Factory().create_poetry(project("complete"))

    builder = SdistBuilder(poetry)
    builder.build(target_dir=tmp_path / target_dir if target_dir else None)

    sdist = (
        tmp_path / target_dir if target_dir else fixtures_dir / "complete" / "dist"
    ) / "my_package-1.2.3.tar.gz"

    assert sdist.exists()

    with tarfile.open(str(sdist), "r") as tar:
        assert "my_package-1.2.3/LICENSE" in tar.getnames()


@pytest.mark.parametrize(
    "project_name", ["complete", "complete_new", "complete_dynamic"]
)
def test_sdist_reproducibility(project_name: str) -> None:
    poetry = Factory().create_poetry(project(project_name))

    hashes = set()

    for _ in range(2):
        builder = SdistBuilder(poetry)
        builder.build()

        sdist = fixtures_dir / project_name / "dist" / "my_package-1.2.3.tar.gz"

        assert sdist.exists()

        hashes.add(hashlib.sha256(sdist.read_bytes()).hexdigest())

    assert len(hashes) == 1


@pytest.mark.parametrize(
    "project_name", ["complete", "complete_new", "complete_dynamic"]
)
def test_setup_py_context(project_name: str) -> None:
    poetry = Factory().create_poetry(project(project_name))

    builder = SdistBuilder(poetry)

    project_setup_py = poetry.pyproject_path.parent / "setup.py"

    assert not project_setup_py.exists()

    try:
        with builder.setup_py() as setup:
            assert setup.exists()
            assert project_setup_py == setup

            with setup.open(mode="rb") as f:
                # we convert to string  and replace line endings here for compatibility
                data = f.read().decode().replace("\r\n", "\n")
                assert data == builder.build_setup().decode()

        assert not project_setup_py.exists()
    finally:
        if project_setup_py.exists():
            project_setup_py.unlink()


def test_module() -> None:
    poetry = Factory().create_poetry(project("module1"))

    builder = SdistBuilder(poetry)
    builder.build()

    sdist = fixtures_dir / "module1" / "dist" / "module1-0.1.tar.gz"

    assert sdist.exists()

    with tarfile.open(str(sdist), "r") as tar:
        assert "module1-0.1/module1.py" in tar.getnames()


def test_prelease() -> None:
    poetry = Factory().create_poetry(project("prerelease"))

    builder = SdistBuilder(poetry)
    builder.build()

    sdist = fixtures_dir / "prerelease" / "dist" / "prerelease-0.1b1.tar.gz"

    assert sdist.exists()


@pytest.mark.parametrize("directory", ["extended", "extended_legacy_config"])
def test_with_c_extensions(directory: str) -> None:
    poetry = Factory().create_poetry(project("extended"))

    builder = SdistBuilder(poetry)
    builder.build()

    sdist = fixtures_dir / "extended" / "dist" / "extended-0.1.tar.gz"

    assert sdist.exists()

    with tarfile.open(str(sdist), "r") as tar:
        assert "extended-0.1/build.py" in tar.getnames()
        assert "extended-0.1/extended/extended.c" in tar.getnames()


def test_with_c_extensions_src_layout() -> None:
    poetry = Factory().create_poetry(project("src_extended"))

    builder = SdistBuilder(poetry)
    builder.build()

    sdist = fixtures_dir / "src_extended" / "dist" / "extended-0.1.tar.gz"

    assert sdist.exists()

    with tarfile.open(str(sdist), "r") as tar:
        assert "extended-0.1/build.py" in tar.getnames()
        assert "extended-0.1/src/extended/extended.c" in tar.getnames()


def test_with_build_script_in_subdir() -> None:
    poetry = Factory().create_poetry(project("build_script_in_subdir"))

    builder = SdistBuilder(poetry)
    setup = builder.build_setup()
    # should not error
    ast.parse(setup)


def test_with_src_module_file() -> None:
    poetry = Factory().create_poetry(project("source_file"))

    builder = SdistBuilder(poetry)

    # Check setup.py
    setup = builder.build_setup()
    setup_ast = ast.parse(setup)

    setup_ast.body = [n for n in setup_ast.body if isinstance(n, ast.Assign)]
    ns: dict[str, Any] = {}
    exec(compile(setup_ast, filename="setup.py", mode="exec"), ns)
    assert ns["package_dir"] == {"": "src"}
    assert ns["modules"] == ["module_src"]

    builder.build()

    sdist = fixtures_dir / "source_file" / "dist" / "module_src-0.1.tar.gz"

    assert sdist.exists()

    with tarfile.open(str(sdist), "r") as tar:
        assert "module_src-0.1/src/module_src.py" in tar.getnames()


def test_with_src_module_dir() -> None:
    poetry = Factory().create_poetry(project("source_package"))

    builder = SdistBuilder(poetry)

    # Check setup.py
    setup = builder.build_setup()
    setup_ast = ast.parse(setup)

    setup_ast.body = [n for n in setup_ast.body if isinstance(n, ast.Assign)]
    ns: dict[str, Any] = {}
    exec(compile(setup_ast, filename="setup.py", mode="exec"), ns)
    assert ns["package_dir"] == {"": "src"}
    assert ns["packages"] == ["package_src"]

    builder.build()

    sdist = fixtures_dir / "source_package" / "dist" / "package_src-0.1.tar.gz"

    assert sdist.exists()

    with tarfile.open(str(sdist), "r") as tar:
        assert "package_src-0.1/src/package_src/__init__.py" in tar.getnames()
        assert "package_src-0.1/src/package_src/module.py" in tar.getnames()


def test_default_with_excluded_data(mocker: MockerFixture) -> None:
    mocker.patch(
        "poetry.core.vcs.git.Git.get_ignored_files",
        return_value=["my_package/data/sub_data/data2.txt"],
    )
    poetry = Factory().create_poetry(project("default_with_excluded_data"))

    builder = SdistBuilder(poetry)

    # Check setup.py
    setup = builder.build_setup()
    setup_ast = ast.parse(setup)

    setup_ast.body = [n for n in setup_ast.body if isinstance(n, ast.Assign)]
    ns: dict[str, Any] = {}
    exec(compile(setup_ast, filename="setup.py", mode="exec"), ns)
    assert "package_dir" not in ns
    assert ns["packages"] == ["my_package"]
    assert ns["package_data"] == {
        "": ["*"],
        "my_package": ["data/*", "data/sub_data/data3.txt"],
    }

    builder.build()

    sdist = (
        fixtures_dir / "default_with_excluded_data" / "dist" / "my_package-1.2.3.tar.gz"
    )

    assert sdist.exists()
    with tarfile.open(str(sdist), "r") as tar:
        names = tar.getnames()
        assert len(names) == len(set(names))
        assert "my_package-1.2.3/LICENSE" in names
        assert "my_package-1.2.3/README.rst" in names
        assert "my_package-1.2.3/my_package/__init__.py" in names
        assert "my_package-1.2.3/my_package/data/data1.txt" in names
        assert "my_package-1.2.3/pyproject.toml" in names
        assert "my_package-1.2.3/PKG-INFO" in names


def test_src_excluded_nested_data() -> None:
    module_path = fixtures_dir / "exclude_nested_data_toml"
    poetry = Factory().create_poetry(module_path)

    builder = SdistBuilder(poetry)
    builder.build()

    sdist = module_path / "dist" / "my_package-1.2.3.tar.gz"

    assert sdist.exists()

    with tarfile.open(str(sdist), "r") as tar:
        names = tar.getnames()
        assert len(names) == len(set(names))
        assert "my_package-1.2.3/LICENSE" in names
        assert "my_package-1.2.3/README.rst" in names
        assert "my_package-1.2.3/pyproject.toml" in names
        assert "my_package-1.2.3/PKG-INFO" in names
        assert "my_package-1.2.3/my_package/__init__.py" in names
        assert "my_package-1.2.3/my_package/data/sub_data/data2.txt" not in names
        assert "my_package-1.2.3/my_package/data/sub_data/data3.txt" not in names
        assert "my_package-1.2.3/my_package/data/data1.txt" not in names
        assert "my_package-1.2.3/my_package/data/data2.txt" in names
        assert "my_package-1.2.3/my_package/puplic/publicdata.txt" in names
        assert "my_package-1.2.3/my_package/public/item1/itemdata1.txt" not in names
        assert (
            "my_package-1.2.3/my_package/public/item1/subitem/subitemdata.txt"
            not in names
        )
        assert "my_package-1.2.3/my_package/public/item2/itemdata2.txt" not in names


def test_proper_python_requires_if_two_digits_precision_version_specified() -> None:
    poetry = Factory().create_poetry(project("simple_version"))

    builder = SdistBuilder(poetry)
    pkg_info = builder.build_pkg_info()
    p = Parser()
    parsed = p.parsestr(pkg_info.decode())

    assert parsed["Requires-Python"] == ">=3.6,<3.7"


def test_proper_python_requires_if_three_digits_precision_version_specified() -> None:
    poetry = Factory().create_poetry(project("single_python"))

    builder = SdistBuilder(poetry)
    pkg_info = builder.build_pkg_info()
    p = Parser()
    parsed = p.parsestr(pkg_info.decode())

    assert parsed["Requires-Python"] == "==2.7.15"


def test_sdist_does_not_include_pycache_and_pyc_files(
    complete_with_pycache_and_pyc_files: Path,
) -> None:
    poetry = Factory().create_poetry(complete_with_pycache_and_pyc_files)

    builder = SdistBuilder(poetry)

    builder.build()

    sdist = complete_with_pycache_and_pyc_files / "dist" / "my_package-1.2.3.tar.gz"

    assert sdist.exists()

    with tarfile.open(str(sdist), "r") as tar:
        for name in tar.getnames():
            assert "__pycache__" not in name
            assert not name.endswith(".pyc")


def test_includes() -> None:
    poetry = Factory().create_poetry(project("with-include"))

    builder = SdistBuilder(poetry)

    builder.build()

    sdist = fixtures_dir / "with-include" / "dist" / "with_include-1.2.3.tar.gz"

    assert sdist.exists()

    with tarfile.open(str(sdist), "r") as tar:
        assert "with_include-1.2.3/extra_dir/vcs_excluded.py" in tar.getnames()
        assert "with_include-1.2.3/notes.txt" in tar.getnames()


def test_include_formats() -> None:
    poetry = Factory().create_poetry(project("with-include-formats"))

    builder = SdistBuilder(poetry)

    builder.build()

    sdist = fixtures_dir / "with-include-formats" / "dist" / "with_include-1.2.3.tar.gz"

    assert sdist.exists()

    with tarfile.open(str(sdist), "r") as tar:
        # packages
        assert "with_include-1.2.3/src/mod_default.py" in tar.getnames()
        assert "with_include-1.2.3/src/mod_sdist_only.py" in tar.getnames()
        assert "with_include-1.2.3/src/mod_wheel_only.py" not in tar.getnames()
        assert "with_include-1.2.3/src/mod_both.py" in tar.getnames()
        assert "with_include-1.2.3/src/pkg_default/__init__.py" in tar.getnames()
        assert "with_include-1.2.3/src/pkg_default/sub/__init__.py" in tar.getnames()
        assert "with_include-1.2.3/src/pkg_sdist_only/__init__.py" in tar.getnames()
        assert "with_include-1.2.3/src/pkg_sdist_only/sub/__init__.py" in tar.getnames()
        assert "with_include-1.2.3/src/pkg_wheel_only/__init__.py" not in tar.getnames()
        assert (
            "with_include-1.2.3/src/pkg_wheel_only/sub/__init__.py"
            not in tar.getnames()
        )
        assert "with_include-1.2.3/src/pkg_both/__init__.py" in tar.getnames()
        assert "with_include-1.2.3/src/pkg_both/sub/__init__.py" in tar.getnames()
        # other includes
        assert "with_include-1.2.3/default.txt" in tar.getnames()
        assert "with_include-1.2.3/sdist_only.txt" in tar.getnames()
        assert "with_include-1.2.3/wheel_only.txt" not in tar.getnames()
        assert "with_include-1.2.3/both.txt" in tar.getnames()
        assert "with_include-1.2.3/default/file.txt" in tar.getnames()
        assert "with_include-1.2.3/default/sub/file.txt" in tar.getnames()
        assert "with_include-1.2.3/sdist_only/file.txt" in tar.getnames()
        assert "with_include-1.2.3/sdist_only/sub/file.txt" in tar.getnames()
        assert "with_include-1.2.3/wheel_only/file.txt" not in tar.getnames()
        assert "with_include-1.2.3/wheel_only/sub/file.txt" not in tar.getnames()
        assert "with_include-1.2.3/both/file.txt" in tar.getnames()
        assert "with_include-1.2.3/both/sub/file.txt" in tar.getnames()


def test_excluded_subpackage() -> None:
    poetry = Factory().create_poetry(project("excluded_subpackage"))

    builder = SdistBuilder(poetry)
    setup = builder.build_setup()

    setup_ast = ast.parse(setup)

    setup_ast.body = [n for n in setup_ast.body if isinstance(n, ast.Assign)]
    ns: dict[str, Any] = {}
    exec(compile(setup_ast, filename="setup.py", mode="exec"), ns)

    assert ns["packages"] == ["example"]


def test_sdist_package_pep_561_stub_only() -> None:
    root = fixtures_dir / "pep_561_stub_only"
    poetry = Factory().create_poetry(root)

    builder = SdistBuilder(poetry)
    builder.build()

    sdist = root / "dist" / "pep_561_stubs-0.1.tar.gz"

    assert sdist.exists()

    with tarfile.open(str(sdist), "r") as tar:
        names = tar.getnames()
        assert "pep_561_stubs-0.1/pkg-stubs/__init__.pyi" in names
        assert "pep_561_stubs-0.1/pkg-stubs/module.pyi" in names
        assert "pep_561_stubs-0.1/pkg-stubs/subpkg/__init__.pyi" in names


def test_sdist_disable_setup_py() -> None:
    module_path = fixtures_dir / "disable_setup_py"
    poetry = Factory().create_poetry(module_path)

    builder = SdistBuilder(poetry)
    builder.build()

    sdist = module_path / "dist" / "my_package-1.2.3.tar.gz"

    assert sdist.exists()

    with tarfile.open(str(sdist), "r") as tar:
        assert set(tar.getnames()) == {
            "my_package-1.2.3/README.rst",
            "my_package-1.2.3/pyproject.toml",
            "my_package-1.2.3/PKG-INFO",
            "my_package-1.2.3/my_package/__init__.py",
        }


def test_sdist_mtime_zero() -> None:
    poetry = Factory().create_poetry(project("module1"))

    builder = SdistBuilder(poetry)
    builder.build()

    sdist = fixtures_dir / "module1" / "dist" / "module1-0.1.tar.gz"

    assert sdist.exists()

    with gzip.open(str(sdist), "rb") as gz:
        gz.read(100)
        assert gz.mtime == 0


def test_split_source() -> None:
    root = fixtures_dir / "split_source"
    poetry = Factory().create_poetry(root)

    builder = SdistBuilder(poetry)

    # Check setup.py
    setup = builder.build_setup()
    setup_ast = ast.parse(setup)

    setup_ast.body = [n for n in setup_ast.body if isinstance(n, ast.Assign)]
    ns: dict[str, Any] = {}
    exec(compile(setup_ast, filename="setup.py", mode="exec"), ns)
    assert "" in ns["package_dir"] and "module_b" in ns["package_dir"]


@pytest.mark.parametrize("log_level", [logging.INFO, logging.DEBUG])
def test_sdist_members_mtime_default(caplog: LogCaptureFixture, log_level: int) -> None:
    caplog.set_level(log_level)
    poetry = Factory().create_poetry(project("module1"))

    builder = SdistBuilder(poetry)
    builder.build()

    sdist = fixtures_dir / "module1" / "dist" / "module1-0.1.tar.gz"

    assert sdist.exists()

    with tarfile.open(str(sdist), "r") as tar:
        for tarinfo in tar.getmembers():
            assert tarinfo.mtime == 0

    source_data_epoch_message = (
        "SOURCE_DATE_EPOCH environment variable is not set, using mtime=0"
    )
    if log_level == logging.DEBUG:
        assert source_data_epoch_message in caplog.messages
    else:
        assert source_data_epoch_message not in caplog.messages


def test_sdist_mtime_set_from_envvar(monkeypatch: MonkeyPatch) -> None:
    monkeypatch.setenv("SOURCE_DATE_EPOCH", "1727883000")
    poetry = Factory().create_poetry(project("module1"))

    builder = SdistBuilder(poetry)
    builder.build()

    sdist = fixtures_dir / "module1" / "dist" / "module1-0.1.tar.gz"

    assert sdist.exists()

    with tarfile.open(str(sdist), "r") as tar:
        for tarinfo in tar.getmembers():
            assert tarinfo.mtime == 1727883000


def test_sdist_mtime_set_from_envvar_not_int(
    monkeypatch: MonkeyPatch, caplog: LogCaptureFixture
) -> None:
    monkeypatch.setenv("SOURCE_DATE_EPOCH", "october")
    poetry = Factory().create_poetry(project("module1"))

    builder = SdistBuilder(poetry)
    builder.build()

    sdist = fixtures_dir / "module1" / "dist" / "module1-0.1.tar.gz"

    assert sdist.exists()

    assert (
        "SOURCE_DATE_EPOCH environment variable is not an int, using mtime=0"
    ) in caplog.messages
poetry-core-2.1.1/tests/masonry/builders/test_wheel.py000066400000000000000000000472751475444614500232000ustar00rootroot00000000000000from __future__ import annotations

import importlib.machinery
import logging
import os
import re
import shutil
import zipfile

from pathlib import Path
from typing import TYPE_CHECKING
from typing import Any
from typing import TextIO

import pytest

from poetry.core.factory import Factory
from poetry.core.masonry.builders.wheel import WheelBuilder
from tests.masonry.builders.test_sdist import project


if TYPE_CHECKING:
    from collections.abc import Iterator

    from pytest import LogCaptureFixture
    from pytest import MonkeyPatch
    from pytest_mock import MockerFixture

fixtures_dir = Path(__file__).parent / "fixtures"


WHEEL_TAG_REGEX = "[cp]p[23]_?\\d+-(?:cp[23]_?\\d+m?u?|pypy[23]_?\\d+_pp\\d+)-.+"


shared_lib_extensions = importlib.machinery.EXTENSION_SUFFIXES


@pytest.fixture(autouse=True)
def setup() -> Iterator[None]:
    clear_samples_dist()
    clear_samples_build()

    yield

    clear_samples_dist()
    clear_samples_build()


def clear_samples_dist() -> None:
    for dist in fixtures_dir.glob("**/dist"):
        if dist.is_dir():
            shutil.rmtree(str(dist))


def clear_samples_build() -> None:
    for build in fixtures_dir.glob("**/build"):
        if build.is_dir():
            shutil.rmtree(str(build))
    for suffix in shared_lib_extensions:
        for shared_lib in fixtures_dir.glob(f"**/*{suffix}"):
            shared_lib.unlink()


def test_wheel_module() -> None:
    module_path = fixtures_dir / "module1"
    WheelBuilder.make(Factory().create_poetry(module_path))

    whl = module_path / "dist" / "module1-0.1-py2.py3-none-any.whl"

    assert whl.exists()

    with zipfile.ZipFile(str(whl)) as z:
        assert "module1.py" in z.namelist()


@pytest.mark.parametrize("project", ["complete", "complete_new", "complete_dynamic"])
def test_wheel_package(project: str) -> None:
    module_path = fixtures_dir / project
    WheelBuilder.make(Factory().create_poetry(module_path))

    whl = module_path / "dist" / "my_package-1.2.3-py3-none-any.whl"

    assert whl.exists()

    with zipfile.ZipFile(str(whl)) as z:
        assert "my_package/sub_pkg1/__init__.py" in z.namelist()


@pytest.mark.parametrize("target_dir", [None, "dist", "dist/build"])
def test_wheel_package_target_dir(tmp_path: Path, target_dir: str | None) -> None:
    module_path = fixtures_dir / "complete"

    WheelBuilder.make_in(
        Factory().create_poetry(module_path),
        directory=tmp_path / target_dir if target_dir else None,
    )

    whl = (
        tmp_path / target_dir if target_dir else module_path / "dist"
    ) / "my_package-1.2.3-py3-none-any.whl"

    assert whl.exists()

    with zipfile.ZipFile(str(whl)) as z:
        assert "my_package/sub_pkg1/__init__.py" in z.namelist()


def test_wheel_prerelease() -> None:
    module_path = fixtures_dir / "prerelease"
    WheelBuilder.make(Factory().create_poetry(module_path))

    whl = module_path / "dist" / "prerelease-0.1b1-py2.py3-none-any.whl"

    assert whl.exists()


def test_wheel_epoch() -> None:
    module_path = fixtures_dir / "epoch"
    WheelBuilder.make(Factory().create_poetry(module_path))

    whl = module_path / "dist" / "epoch-1!2.0-py2.py3-none-any.whl"

    assert whl.exists()

    with zipfile.ZipFile(str(whl)) as z:
        assert "epoch-1!2.0.dist-info/METADATA" in z.namelist()


def test_wheel_does_not_include_pycache_and_pyc_files(
    complete_with_pycache_and_pyc_files: Path,
) -> None:
    WheelBuilder.make_in(Factory().create_poetry(complete_with_pycache_and_pyc_files))

    whl = (
        complete_with_pycache_and_pyc_files / "dist"
    ) / "my_package-1.2.3-py3-none-any.whl"

    assert whl.exists()

    with zipfile.ZipFile(str(whl)) as z:
        for name in z.namelist():
            assert "__pycache__" not in name
            assert not name.endswith(".pyc")


def test_wheel_excluded_data() -> None:
    module_path = fixtures_dir / "default_with_excluded_data_toml"
    WheelBuilder.make(Factory().create_poetry(module_path))

    whl = module_path / "dist" / "my_package-1.2.3-py3-none-any.whl"

    assert whl.exists()

    with zipfile.ZipFile(str(whl)) as z:
        assert "my_package/__init__.py" in z.namelist()
        assert "my_package/data/sub_data/data2.txt" in z.namelist()
        assert "my_package/data/sub_data/data3.txt" in z.namelist()
        assert "my_package/data/data1.txt" not in z.namelist()


def test_wheel_excluded_nested_data() -> None:
    module_path = fixtures_dir / "exclude_nested_data_toml"
    poetry = Factory().create_poetry(module_path)
    WheelBuilder.make(poetry)

    whl = module_path / "dist" / "my_package-1.2.3-py3-none-any.whl"

    assert whl.exists()

    with zipfile.ZipFile(str(whl)) as z:
        assert "my_package/__init__.py" in z.namelist()
        assert "my_package/data/sub_data/data2.txt" not in z.namelist()
        assert "my_package/data/sub_data/data3.txt" not in z.namelist()
        assert "my_package/data/data1.txt" not in z.namelist()
        assert "my_package/data/data2.txt" in z.namelist()
        assert "my_package/puplic/publicdata.txt" in z.namelist()
        assert "my_package/public/item1/itemdata1.txt" not in z.namelist()
        assert "my_package/public/item1/subitem/subitemdata.txt" not in z.namelist()
        assert "my_package/public/item2/itemdata2.txt" not in z.namelist()


def test_include_excluded_code() -> None:
    module_path = fixtures_dir / "include_excluded_code"
    poetry = Factory().create_poetry(module_path)
    wb = WheelBuilder(poetry)
    wb.build()
    whl = module_path / "dist" / wb.wheel_filename
    assert whl.exists()

    with zipfile.ZipFile(str(whl)) as z:
        assert "my_package/__init__.py" in z.namelist()
        assert "my_package/generated.py" in z.namelist()
        assert "lib/my_package/generated.py" not in z.namelist()


def test_wheel_localversionlabel() -> None:
    module_path = fixtures_dir / "localversionlabel"
    project = Factory().create_poetry(module_path)
    WheelBuilder.make(project)
    local_version_string = "localversionlabel-0.1b1+gitbranch.buildno.1"
    whl = module_path / "dist" / (local_version_string + "-py2.py3-none-any.whl")

    assert whl.exists()

    with zipfile.ZipFile(str(whl)) as z:
        assert local_version_string + ".dist-info/METADATA" in z.namelist()


def test_wheel_package_src() -> None:
    module_path = fixtures_dir / "source_package"
    WheelBuilder.make(Factory().create_poetry(module_path))

    whl = module_path / "dist" / "package_src-0.1-py2.py3-none-any.whl"

    assert whl.exists()

    with zipfile.ZipFile(str(whl)) as z:
        assert "package_src/__init__.py" in z.namelist()
        assert "package_src/module.py" in z.namelist()


def test_wheel_module_src() -> None:
    module_path = fixtures_dir / "source_file"
    WheelBuilder.make(Factory().create_poetry(module_path))

    whl = module_path / "dist" / "module_src-0.1-py2.py3-none-any.whl"

    assert whl.exists()

    with zipfile.ZipFile(str(whl)) as z:
        assert "module_src.py" in z.namelist()


def test_wheel_build_script_creates_package() -> None:
    module_path = fixtures_dir / "build_script_creates_package"
    WheelBuilder.make(Factory().create_poetry(module_path))

    # Currently, if a  build.py script is used,
    # poetry just assumes the most specific tags
    whl = next((module_path / "dist").glob("my_package-0.1-*.whl"))

    assert whl.exists()

    with zipfile.ZipFile(str(whl)) as z:
        assert "my_package/__init__.py" in z.namelist()
        assert "my_package/foo.py" in z.namelist()

    shutil.rmtree(module_path / "my_package")


@pytest.mark.parametrize("project", ["complete", "complete_new", "complete_dynamic"])
def test_dist_info_file_permissions(project: str) -> None:
    module_path = fixtures_dir / project
    WheelBuilder.make(Factory().create_poetry(module_path))

    whl = module_path / "dist" / "my_package-1.2.3-py3-none-any.whl"

    with zipfile.ZipFile(str(whl)) as z:
        assert (
            z.getinfo("my_package-1.2.3.dist-info/WHEEL").external_attr & 0x1FF0000
            == 0o644 << 16
        )
        assert (
            z.getinfo("my_package-1.2.3.dist-info/METADATA").external_attr & 0x1FF0000
            == 0o644 << 16
        )
        assert (
            z.getinfo("my_package-1.2.3.dist-info/RECORD").external_attr & 0x1FF0000
            == 0o644 << 16
        )
        assert (
            z.getinfo("my_package-1.2.3.dist-info/entry_points.txt").external_attr
            & 0x1FF0000
            == 0o644 << 16
        )


def test_wheel_include_formats() -> None:
    module_path = fixtures_dir / "with-include-formats"
    WheelBuilder.make(Factory().create_poetry(module_path))

    whl = module_path / "dist" / "with_include-1.2.3-py3-none-any.whl"

    assert whl.exists()

    with zipfile.ZipFile(str(whl)) as z:
        # packages
        assert "mod_default.py" in z.namelist()
        assert "mod_sdist_only.py" not in z.namelist()
        assert "mod_wheel_only.py" in z.namelist()
        assert "mod_both.py" in z.namelist()
        assert "pkg_default/__init__.py" in z.namelist()
        assert "pkg_default/sub/__init__.py" in z.namelist()
        assert "pkg_sdist_only/__init__.py" not in z.namelist()
        assert "pkg_sdist_only/sub/__init__.py" not in z.namelist()
        assert "pkg_wheel_only/__init__.py" in z.namelist()
        assert "pkg_wheel_only/sub/__init__.py" in z.namelist()
        assert "pkg_both/__init__.py" in z.namelist()
        assert "pkg_both/sub/__init__.py" in z.namelist()
        # other includes
        assert "default.txt" not in z.namelist()
        assert "sdist_only.txt" not in z.namelist()
        assert "wheel_only.txt" in z.namelist()
        assert "both.txt" in z.namelist()
        assert "default/file.txt" not in z.namelist()
        assert "default/sub/file.txt" not in z.namelist()
        assert "sdist_only/file.txt" not in z.namelist()
        assert "sdist_only/sub/file.txt" not in z.namelist()
        assert "wheel_only/file.txt" in z.namelist()
        assert "wheel_only/sub/file.txt" in z.namelist()
        assert "both/file.txt" in z.namelist()
        assert "both/sub/file.txt" in z.namelist()


@pytest.mark.parametrize(
    "package",
    ["pep_561_stub_only", "pep_561_stub_only_partial", "pep_561_stub_only_src"],
)
def test_wheel_package_pep_561_stub_only(package: str) -> None:
    root = fixtures_dir / package
    WheelBuilder.make(Factory().create_poetry(root))

    whl = root / "dist" / "pep_561_stubs-0.1-py3-none-any.whl"

    assert whl.exists()

    with zipfile.ZipFile(str(whl)) as z:
        assert "pkg-stubs/__init__.pyi" in z.namelist()
        assert "pkg-stubs/module.pyi" in z.namelist()
        assert "pkg-stubs/subpkg/__init__.pyi" in z.namelist()


def test_wheel_package_pep_561_stub_only_partial_namespace() -> None:
    root = fixtures_dir / "pep_561_stub_only_partial_namespace"
    WheelBuilder.make(Factory().create_poetry(root))

    whl = root / "dist" / "pep_561_stubs-0.1-py3-none-any.whl"

    assert whl.exists()

    with zipfile.ZipFile(str(whl)) as z:
        assert "pkg-stubs/module.pyi" in z.namelist()
        assert "pkg-stubs/subpkg/__init__.pyi" in z.namelist()
        assert "pkg-stubs/subpkg/py.typed" in z.namelist()


def test_wheel_package_pep_561_stub_only_includes_typed_marker() -> None:
    root = fixtures_dir / "pep_561_stub_only_partial"
    WheelBuilder.make(Factory().create_poetry(root))

    whl = root / "dist" / "pep_561_stubs-0.1-py3-none-any.whl"

    assert whl.exists()

    with zipfile.ZipFile(str(whl)) as z:
        assert "pkg-stubs/py.typed" in z.namelist()


def test_wheel_includes_licenses_in_correct_paths() -> None:
    root = fixtures_dir / "licenses_and_copying"
    WheelBuilder.make(Factory().create_poetry(root))

    whl = root / "dist" / "my_package-1.2.3-py3-none-any.whl"

    assert whl.exists()
    with zipfile.ZipFile(str(whl)) as z:
        assert "my_package-1.2.3.dist-info/COPYING" in z.namelist()
        assert "my_package-1.2.3.dist-info/COPYING.txt" in z.namelist()
        assert "my_package-1.2.3.dist-info/LICENSE" in z.namelist()
        assert "my_package-1.2.3.dist-info/LICENSE.md" in z.namelist()
        assert "my_package-1.2.3.dist-info/LICENSES/CUSTOM-LICENSE" in z.namelist()
        assert "my_package-1.2.3.dist-info/LICENSES/BSD-3.md" in z.namelist()
        assert "my_package-1.2.3.dist-info/LICENSES/MIT.txt" in z.namelist()


def test_wheel_with_file_with_comma() -> None:
    root = fixtures_dir / "comma_file"
    WheelBuilder.make(Factory().create_poetry(root))

    whl = root / "dist" / "comma_file-1.2.3-py3-none-any.whl"

    assert whl.exists()

    with zipfile.ZipFile(str(whl)) as z:
        records = z.read("comma_file-1.2.3.dist-info/RECORD")
        assert '\n"comma_file/a,b.py"' in records.decode()


def test_default_src_with_excluded_data(mocker: MockerFixture) -> None:
    mocker.patch(
        "poetry.core.vcs.git.Git.get_ignored_files",
        return_value=["src/my_package/data/sub_data/data2.txt"],
    )
    poetry = Factory().create_poetry(project("default_src_with_excluded_data"))

    builder = WheelBuilder(poetry)
    builder.build()

    whl = (
        fixtures_dir
        / "default_src_with_excluded_data"
        / "dist"
        / "my_package-1.2.3-py3-none-any.whl"
    )

    assert whl.exists()

    with zipfile.ZipFile(str(whl)) as z:
        names = z.namelist()
        assert "my_package/__init__.py" in names
        assert "my_package/data/data1.txt" in names
        assert "my_package/data/sub_data/data2.txt" not in names
        assert "my_package/data/sub_data/data3.txt" in names


def test_wheel_file_is_closed(monkeypatch: MonkeyPatch) -> None:
    """Confirm that wheel zip files are explicitly closed."""

    # Using a list is a hack for Python 2.7 compatibility.
    fd_file: list[TextIO | None] = [None]

    real_fdopen = os.fdopen

    def capturing_fdopen(*args: Any, **kwargs: Any) -> TextIO | None:
        fd_file[0] = real_fdopen(*args, **kwargs)
        return fd_file[0]

    monkeypatch.setattr(os, "fdopen", capturing_fdopen)

    module_path = fixtures_dir / "module1"
    WheelBuilder.make(Factory().create_poetry(module_path))

    assert fd_file[0] is not None
    assert fd_file[0].closed


@pytest.mark.parametrize("in_venv_build", [True, False])
def test_tag(in_venv_build: bool, mocker: MockerFixture) -> None:
    """Tests that tag returns a valid tag if a build script is used,
    no matter if poetry-core lives inside the build environment or not.
    """
    root = fixtures_dir / "extended"
    builder = WheelBuilder(Factory().create_poetry(root))

    get_sys_tags_spy = mocker.spy(builder, "_get_sys_tags")
    if not in_venv_build:
        mocker.patch("sys.executable", "other/python")

    assert re.match(f"^{WHEEL_TAG_REGEX}$", builder.tag)
    if in_venv_build:
        get_sys_tags_spy.assert_not_called()
    else:
        get_sys_tags_spy.assert_called()


def test_extended_editable_wheel_build() -> None:
    """Tests that an editable wheel made from a project with extensions includes
    the .pth, but does not include the built package itself.
    """
    root = fixtures_dir / "extended"
    WheelBuilder.make_in(Factory().create_poetry(root), editable=True)

    whl = next((root / "dist").glob("extended-0.1-*.whl"))

    assert whl.exists()
    with zipfile.ZipFile(str(whl)) as z:
        assert "extended.pth" in z.namelist()
        # Ensure the directory "extended/" does not exist in the whl
        assert all(not n.startswith("extended/") for n in z.namelist())


def test_extended_editable_build_inplace() -> None:
    """Tests that a project with extensions builds the extension modules in-place
    when ran for an editable install.
    """
    root = fixtures_dir / "extended"
    WheelBuilder.make_in(Factory().create_poetry(root), editable=True)

    # Check that an extension with any of the allowed extensions was built in-place
    assert any(
        (root / "extended" / f"extended{ext}").exists() for ext in shared_lib_extensions
    )


def test_build_py_only_included() -> None:
    """Tests that a build.py that only defined the command build_py (which generates a
    lib folder) will have its artifacts included.
    """
    root = fixtures_dir / "build_with_build_py_only"
    WheelBuilder.make(Factory().create_poetry(root))

    whl = next((root / "dist").glob("build_with_build_py_only-0.1-*.whl"))

    assert whl.exists()

    with zipfile.ZipFile(str(whl)) as z:
        assert "build_with_build_py_only/generated/file.py" in z.namelist()


def test_generated_script_file(tmp_path: Path) -> None:
    """Tests that a file that is generated by build.py can be used as script."""
    root = fixtures_dir / "generated_script_file"
    # test only works on a fresh root without already generated script file:
    tmp_root = tmp_path / "generated_script_file"
    shutil.copytree(root, tmp_root)

    WheelBuilder.make(Factory().create_poetry(tmp_root))

    whl = next((tmp_root / "dist").glob("generated_script_file-0.1-*.whl"))

    assert whl.exists()

    with zipfile.ZipFile(str(whl)) as z:
        assert "generated_script_file-0.1.data/scripts/script.sh" in z.namelist()


@pytest.mark.parametrize("log_level", [logging.INFO, logging.DEBUG])
def test_dist_info_date_time_default_value(
    caplog: LogCaptureFixture, log_level: int
) -> None:
    caplog.set_level(log_level)
    module_path = fixtures_dir / "complete"
    WheelBuilder.make(Factory().create_poetry(module_path))

    whl = module_path / "dist" / "my_package-1.2.3-py3-none-any.whl"

    default_date_time = (2016, 1, 1, 0, 0, 0)

    with zipfile.ZipFile(str(whl)) as z:
        assert (
            z.getinfo("my_package-1.2.3.dist-info/WHEEL").date_time == default_date_time
        )

    source_data_epoch_message = (
        "SOURCE_DATE_EPOCH environment variable not set,"
        f" setting zipinfo date to default={default_date_time}"
    )
    if log_level == logging.DEBUG:
        assert source_data_epoch_message in caplog.messages
    else:
        assert source_data_epoch_message not in caplog.messages


def test_dist_info_date_time_value_from_envvar(monkeypatch: MonkeyPatch) -> None:
    monkeypatch.setenv("SOURCE_DATE_EPOCH", "1727883000")
    expected_date_time = (2024, 10, 2, 15, 30, 0)
    module_path = fixtures_dir / "complete"
    WheelBuilder.make(Factory().create_poetry(module_path))

    whl = module_path / "dist" / "my_package-1.2.3-py3-none-any.whl"

    with zipfile.ZipFile(str(whl)) as z:
        assert (
            z.getinfo("my_package-1.2.3.dist-info/WHEEL").date_time
            == expected_date_time
        )


def test_dist_info_date_time_value_from_envvar_not_int(
    monkeypatch: MonkeyPatch, caplog: LogCaptureFixture
) -> None:
    monkeypatch.setenv("SOURCE_DATE_EPOCH", "october")
    module_path = fixtures_dir / "complete"
    WheelBuilder.make(Factory().create_poetry(module_path))

    whl = module_path / "dist" / "my_package-1.2.3-py3-none-any.whl"

    default_date_time = (2016, 1, 1, 0, 0, 0)

    with zipfile.ZipFile(str(whl)) as z:
        assert (
            z.getinfo("my_package-1.2.3.dist-info/WHEEL").date_time == default_date_time
        )

    assert (
        "SOURCE_DATE_EPOCH environment variable value"
        f" is not an int, setting zipinfo date to default={default_date_time}"
    ) in caplog.messages


def test_dist_info_date_time_value_from_envvar_older_than_1980(
    monkeypatch: MonkeyPatch, caplog: LogCaptureFixture
) -> None:
    monkeypatch.setenv("SOURCE_DATE_EPOCH", "1000")
    module_path = fixtures_dir / "complete"
    WheelBuilder.make(Factory().create_poetry(module_path))

    whl = module_path / "dist" / "my_package-1.2.3-py3-none-any.whl"

    default_date_time = (2016, 1, 1, 0, 0, 0)

    with zipfile.ZipFile(str(whl)) as z:
        assert (
            z.getinfo("my_package-1.2.3.dist-info/WHEEL").date_time == default_date_time
        )

    assert (
        "zipinfo date can't be earlier than 1980,"
        f" setting zipinfo date to default={default_date_time}"
    ) in caplog.messages
poetry-core-2.1.1/tests/masonry/test_api.py000066400000000000000000000363421475444614500210250ustar00rootroot00000000000000from __future__ import annotations

import os
import zipfile

from contextlib import contextmanager
from pathlib import Path
from typing import TYPE_CHECKING

import pytest

from poetry.core import __version__
from poetry.core.masonry import api
from poetry.core.utils.helpers import temporary_directory
from tests.testutils import validate_sdist_contents
from tests.testutils import validate_wheel_contents


if TYPE_CHECKING:
    from collections.abc import Iterator

    from pytest import LogCaptureFixture


@contextmanager
def cwd(directory: str | Path) -> Iterator[None]:
    prev = Path.cwd()
    os.chdir(str(directory))
    try:
        yield
    finally:
        os.chdir(prev)


fixtures = Path(__file__).parent / "builders" / "fixtures"


@pytest.mark.parametrize("project", ["complete", "complete_new", "complete_dynamic"])
def test_get_requires_for_build_wheel(project: str) -> None:
    expected: list[str] = []
    with cwd(fixtures / project):
        assert api.get_requires_for_build_wheel() == expected


@pytest.mark.parametrize("project", ["complete", "complete_new", "complete_dynamic"])
def test_get_requires_for_build_sdist(project: str) -> None:
    expected: list[str] = []
    with cwd(fixtures / project):
        assert api.get_requires_for_build_sdist() == expected


@pytest.mark.parametrize("project", ["complete", "complete_new", "complete_dynamic"])
def test_build_wheel(project: str) -> None:
    with temporary_directory() as tmp_dir, cwd(fixtures / project):
        filename = api.build_wheel(str(tmp_dir))
        validate_wheel_contents(
            name="my_package",
            version="1.2.3",
            path=tmp_dir / filename,
            files=["entry_points.txt"],
        )


def test_build_wheel_with_local_version() -> None:
    with temporary_directory() as tmp_dir, cwd(fixtures / "complete"):
        filename = api.build_wheel(
            str(tmp_dir), config_settings={"local-version": "some-label"}
        )
        validate_wheel_contents(
            name="my_package",
            version="1.2.3+some-label",
            path=tmp_dir / filename,
            files=["entry_points.txt"],
        )


def test_build_wheel_with_include() -> None:
    with temporary_directory() as tmp_dir, cwd(fixtures / "with-include"):
        filename = api.build_wheel(str(tmp_dir))
        validate_wheel_contents(
            name="with_include",
            version="1.2.3",
            path=tmp_dir / filename,
            files=["entry_points.txt"],
        )


def test_build_wheel_with_bad_path_dev_dep_succeeds() -> None:
    with temporary_directory() as tmp_dir, cwd(fixtures / "with_bad_path_dev_dep"):
        api.build_wheel(str(tmp_dir))


def test_build_wheel_with_bad_path_dep_succeeds(caplog: LogCaptureFixture) -> None:
    with temporary_directory() as tmp_dir, cwd(fixtures / "with_bad_path_dep"):
        api.build_wheel(str(tmp_dir))
    assert len(caplog.records) == 1
    record = caplog.records[0]
    assert record.levelname == "WARNING"
    assert "does not exist" in record.message


def test_build_wheel_extended() -> None:
    with temporary_directory() as tmp_dir, cwd(fixtures / "extended"):
        filename = api.build_wheel(str(tmp_dir))
        whl = Path(tmp_dir) / filename
        assert whl.exists()
        validate_wheel_contents(name="extended", version="0.1", path=whl)


@pytest.mark.parametrize("project", ["complete", "complete_new", "complete_dynamic"])
def test_build_sdist(project: str) -> None:
    with temporary_directory() as tmp_dir, cwd(fixtures / project):
        filename = api.build_sdist(str(tmp_dir))
        validate_sdist_contents(
            name="my-package",
            version="1.2.3",
            path=tmp_dir / filename,
            files=["LICENSE"],
        )


def test_build_sdist_with_local_version() -> None:
    with temporary_directory() as tmp_dir, cwd(fixtures / "complete"):
        filename = api.build_sdist(
            str(tmp_dir), config_settings={"local-version": "some-label"}
        )
        validate_sdist_contents(
            name="my-package",
            version="1.2.3+some-label",
            path=tmp_dir / filename,
            files=["LICENSE"],
        )


def test_build_sdist_with_include() -> None:
    with temporary_directory() as tmp_dir, cwd(fixtures / "with-include"):
        filename = api.build_sdist(str(tmp_dir))
        validate_sdist_contents(
            name="with-include",
            version="1.2.3",
            path=tmp_dir / filename,
            files=["LICENSE"],
        )


def test_build_sdist_with_bad_path_dev_dep_succeeds() -> None:
    with temporary_directory() as tmp_dir, cwd(fixtures / "with_bad_path_dev_dep"):
        api.build_sdist(str(tmp_dir))


def test_build_sdist_with_bad_path_dep_succeeds(caplog: LogCaptureFixture) -> None:
    with temporary_directory() as tmp_dir, cwd(fixtures / "with_bad_path_dep"):
        api.build_sdist(str(tmp_dir))
    assert len(caplog.records) == 1
    record = caplog.records[0]
    assert record.levelname == "WARNING"
    assert "does not exist" in record.message


@pytest.mark.parametrize("project", ["complete", "complete_new", "complete_dynamic"])
def test_prepare_metadata_for_build_wheel(project: str) -> None:
    entry_points = """\
[console_scripts]
extra-script=my_package.extra:main
my-2nd-script=my_package:main2
my-script=my_package:main

[poetry.application.plugin]
my-command=my_package.plugins:MyApplicationPlugin

"""
    wheel_data = f"""\
Wheel-Version: 1.0
Generator: poetry-core {__version__}
Root-Is-Purelib: true
Tag: py3-none-any
"""
    metadata = """\
Metadata-Version: 2.3
Name: my-package
Version: 1.2.3
Summary: Some description.
License: MIT
Keywords: packaging,dependency,poetry
Author: Sébastien Eustace
Author-email: sebastien@eustace.io
Maintainer: People Everywhere
Maintainer-email: people@everywhere.com
Requires-Python: >=3.6,<4.0
Classifier: License :: OSI Approved :: MIT License
Classifier: Programming Language :: Python :: 3
Classifier: Programming Language :: Python :: 3.6
Classifier: Programming Language :: Python :: 3.7
Classifier: Programming Language :: Python :: 3.8
Classifier: Programming Language :: Python :: 3.9
Classifier: Programming Language :: Python :: 3.10
Classifier: Programming Language :: Python :: 3.11
Classifier: Programming Language :: Python :: 3.12
Classifier: Programming Language :: Python :: 3.13
Classifier: Topic :: Software Development :: Build Tools
Classifier: Topic :: Software Development :: Libraries :: Python Modules
Provides-Extra: time
Requires-Dist: cachy[msgpack] (>=0.2.0,<0.3.0)
Requires-Dist: cleo (>=0.6,<0.7)
Requires-Dist: pendulum (>=1.4,<2.0) ; (python_version ~= "2.7" and sys_platform == "win32" or python_version in "3.4 3.5") and (extra == "time")
Project-URL: Documentation, https://python-poetry.org/docs
Project-URL: Homepage, https://python-poetry.org/
Project-URL: Issue Tracker, https://github.com/python-poetry/poetry/issues
Project-URL: Repository, https://github.com/python-poetry/poetry
Description-Content-Type: text/x-rst

My Package
==========

"""
    with temporary_directory() as tmp_dir, cwd(fixtures / project):
        dirname = api.prepare_metadata_for_build_wheel(str(tmp_dir))

        assert dirname == "my_package-1.2.3.dist-info"

        dist_info = Path(tmp_dir, dirname)

        assert (dist_info / "entry_points.txt").exists()
        assert (dist_info / "WHEEL").exists()
        assert (dist_info / "METADATA").exists()

        with (dist_info / "entry_points.txt").open(encoding="utf-8") as f:
            assert f.read() == entry_points

        with (dist_info / "WHEEL").open(encoding="utf-8") as f:
            assert f.read() == wheel_data

        with (dist_info / "METADATA").open(encoding="utf-8") as f:
            assert f.read() == metadata


def test_prepare_metadata_for_build_wheel_with_local_version() -> None:
    local_version = "some-label"
    entry_points = """\
[console_scripts]
extra-script=my_package.extra:main
my-2nd-script=my_package:main2
my-script=my_package:main

[poetry.application.plugin]
my-command=my_package.plugins:MyApplicationPlugin

"""
    wheel_data = f"""\
Wheel-Version: 1.0
Generator: poetry-core {__version__}
Root-Is-Purelib: true
Tag: py3-none-any
"""
    metadata = f"""\
Metadata-Version: 2.3
Name: my-package
Version: 1.2.3+{local_version}
Summary: Some description.
License: MIT
Keywords: packaging,dependency,poetry
Author: Sébastien Eustace
Author-email: sebastien@eustace.io
Maintainer: People Everywhere
Maintainer-email: people@everywhere.com
Requires-Python: >=3.6,<4.0
Classifier: License :: OSI Approved :: MIT License
Classifier: Programming Language :: Python :: 3
Classifier: Programming Language :: Python :: 3.6
Classifier: Programming Language :: Python :: 3.7
Classifier: Programming Language :: Python :: 3.8
Classifier: Programming Language :: Python :: 3.9
Classifier: Programming Language :: Python :: 3.10
Classifier: Programming Language :: Python :: 3.11
Classifier: Programming Language :: Python :: 3.12
Classifier: Programming Language :: Python :: 3.13
Classifier: Topic :: Software Development :: Build Tools
Classifier: Topic :: Software Development :: Libraries :: Python Modules
Provides-Extra: time
Requires-Dist: cachy[msgpack] (>=0.2.0,<0.3.0)
Requires-Dist: cleo (>=0.6,<0.7)
Requires-Dist: pendulum (>=1.4,<2.0) ; (python_version ~= "2.7" and sys_platform == "win32" or python_version in "3.4 3.5") and (extra == "time")
Project-URL: Documentation, https://python-poetry.org/docs
Project-URL: Homepage, https://python-poetry.org/
Project-URL: Issue Tracker, https://github.com/python-poetry/poetry/issues
Project-URL: Repository, https://github.com/python-poetry/poetry
Description-Content-Type: text/x-rst

My Package
==========

"""
    with temporary_directory() as tmp_dir, cwd(fixtures / "complete"):
        dirname = api.prepare_metadata_for_build_wheel(
            str(tmp_dir), config_settings={"local-version": local_version}
        )

        assert dirname == f"my_package-1.2.3+{local_version}.dist-info"

        dist_info = Path(tmp_dir, dirname)

        assert (dist_info / "entry_points.txt").exists()
        assert (dist_info / "WHEEL").exists()
        assert (dist_info / "METADATA").exists()

        with (dist_info / "entry_points.txt").open(encoding="utf-8") as f:
            assert f.read() == entry_points

        with (dist_info / "WHEEL").open(encoding="utf-8") as f:
            assert f.read() == wheel_data

        with (dist_info / "METADATA").open(encoding="utf-8") as f:
            assert f.read() == metadata


def test_prepare_metadata_excludes_optional_without_extras() -> None:
    with (
        temporary_directory() as tmp_dir,
        cwd(fixtures / "with_optional_without_extras"),
    ):
        dirname = api.prepare_metadata_for_build_wheel(str(tmp_dir))
        dist_info = Path(tmp_dir, dirname)
        assert (dist_info / "METADATA").exists()

        with (dist_info / "METADATA").open(encoding="utf-8") as f:
            assert (
                f.read()
                == """\
Metadata-Version: 2.3
Name: my-packager
Version: 0.1
Summary: Something
Classifier: Topic :: Software Development :: Build Tools
Classifier: Topic :: Software Development :: Libraries :: Python Modules
Provides-Extra: grpc
Provides-Extra: http
Requires-Dist: grpcio (>=0.2.0,<0.3.0) ; extra == "grpc"
Requires-Dist: httpx (>=0.28.1,<0.29.0) ; extra == "http"
Requires-Dist: pycowsay (>=0.1.0,<0.2.0)
"""
            )


def test_prepare_metadata_for_build_wheel_with_bad_path_dev_dep_succeeds() -> None:
    with temporary_directory() as tmp_dir, cwd(fixtures / "with_bad_path_dev_dep"):
        api.prepare_metadata_for_build_wheel(str(tmp_dir))


def test_prepare_metadata_for_build_wheel_with_bad_path_dep_succeeds(
    caplog: LogCaptureFixture,
) -> None:
    with temporary_directory() as tmp_dir, cwd(fixtures / "with_bad_path_dep"):
        api.prepare_metadata_for_build_wheel(str(tmp_dir))
    assert len(caplog.records) == 1
    record = caplog.records[0]
    assert record.levelname == "WARNING"
    assert "does not exist" in record.message


@pytest.mark.parametrize("project", ["complete", "complete_new", "complete_dynamic"])
def test_build_editable_wheel(project: str) -> None:
    pkg_dir = fixtures / project
    with temporary_directory() as tmp_dir, cwd(pkg_dir):
        filename = api.build_editable(str(tmp_dir))
        wheel_pth = Path(tmp_dir) / filename

        validate_wheel_contents(
            name="my_package",
            version="1.2.3",
            path=wheel_pth,
        )

        with zipfile.ZipFile(wheel_pth) as z:
            namelist = z.namelist()

            assert "my_package.pth" in namelist
            assert z.read("my_package.pth").decode().strip() == pkg_dir.as_posix()


def test_build_editable_wheel_with_local_version() -> None:
    pkg_dir = fixtures / "complete"
    with temporary_directory() as tmp_dir, cwd(pkg_dir):
        filename = api.build_editable(
            str(tmp_dir), config_settings={"local-version": "some-label"}
        )
        wheel_pth = Path(tmp_dir) / filename

        validate_wheel_contents(
            name="my_package",
            version="1.2.3+some-label",
            path=wheel_pth,
        )


@pytest.mark.parametrize("project", ["complete", "complete_new", "complete_dynamic"])
def test_build_wheel_with_metadata_directory(project: str) -> None:
    pkg_dir = fixtures / project

    with temporary_directory() as metadata_tmp_dir, cwd(pkg_dir):
        metadata_directory = api.prepare_metadata_for_build_wheel(str(metadata_tmp_dir))

        with temporary_directory() as wheel_tmp_dir:
            dist_info_path = Path(metadata_tmp_dir) / metadata_directory
            (dist_info_path / "CUSTOM").touch()
            filename = api.build_wheel(
                str(wheel_tmp_dir), metadata_directory=str(dist_info_path)
            )
            wheel_pth = Path(wheel_tmp_dir) / filename

            validate_wheel_contents(
                name="my_package",
                version="1.2.3",
                path=wheel_pth,
                files=["entry_points.txt"],
            )

            with zipfile.ZipFile(wheel_pth) as z:
                namelist = z.namelist()

                assert f"{metadata_directory}/CUSTOM" in namelist


@pytest.mark.parametrize("project", ["complete", "complete_new", "complete_dynamic"])
def test_build_editable_wheel_with_metadata_directory(project: str) -> None:
    pkg_dir = fixtures / project

    with temporary_directory() as metadata_tmp_dir, cwd(pkg_dir):
        metadata_directory = api.prepare_metadata_for_build_editable(
            str(metadata_tmp_dir)
        )

        with temporary_directory() as wheel_tmp_dir:
            dist_info_path = Path(metadata_tmp_dir) / metadata_directory
            (dist_info_path / "CUSTOM").touch()
            filename = api.build_editable(
                str(wheel_tmp_dir), metadata_directory=str(dist_info_path)
            )
            wheel_pth = Path(wheel_tmp_dir) / filename

            validate_wheel_contents(
                name="my_package",
                version="1.2.3",
                path=wheel_pth,
                files=["entry_points.txt"],
            )

            with zipfile.ZipFile(wheel_pth) as z:
                namelist = z.namelist()

                assert "my_package.pth" in namelist
                assert z.read("my_package.pth").decode().strip() == pkg_dir.as_posix()
                assert f"{metadata_directory}/CUSTOM" in namelist
poetry-core-2.1.1/tests/masonry/test_metadata.py000066400000000000000000000034401475444614500220250ustar00rootroot00000000000000from __future__ import annotations

from pathlib import Path
from typing import TYPE_CHECKING

import pytest

from poetry.core.masonry.metadata import Metadata
from poetry.core.packages.project_package import ProjectPackage


if TYPE_CHECKING:
    from pytest_mock import MockerFixture


def test_from_package_readme(tmp_path: Path) -> None:
    readme_path = tmp_path / "README.md"
    readme_path.write_text("This is a description\néöß", encoding="utf-8")

    package = ProjectPackage("foo", "1.0")
    package.readmes = (readme_path,)

    metadata = Metadata.from_package(package)

    assert metadata.description == "This is a description\néöß"


def test_from_package_multiple_readmes(tmp_path: Path) -> None:
    readme_path1 = tmp_path / "README1.md"
    readme_path1.write_text("Description 1", encoding="utf-8")

    readme_path2 = tmp_path / "README2.md"
    readme_path2.write_text("Description 2", encoding="utf-8")

    package = ProjectPackage("foo", "1.0")
    package.readmes = (readme_path1, readme_path2)

    metadata = Metadata.from_package(package)

    assert metadata.description == "Description 1\nDescription 2"


@pytest.mark.parametrize(
    ("exception", "message"),
    [
        (FileNotFoundError, "Readme path `MyReadme.md` does not exist."),
        (IsADirectoryError, "Readme path `MyReadme.md` is a directory."),
        (PermissionError, "Readme path `MyReadme.md` is not readable."),
    ],
)
def test_from_package_readme_issues(
    mocker: MockerFixture, exception: type[OSError], message: str
) -> None:
    package = ProjectPackage("foo", "1.0")
    package.readmes = (Path("MyReadme.md"),)

    mocker.patch("pathlib.Path.read_text", side_effect=exception)

    with pytest.raises(exception) as e:
        Metadata.from_package(package)

    assert str(e.value) == message
poetry-core-2.1.1/tests/masonry/utils/000077500000000000000000000000001475444614500177735ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/utils/__init__.py000066400000000000000000000000001475444614500220720ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/utils/fixtures/000077500000000000000000000000001475444614500216445ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/utils/fixtures/pep_561_stub_only/000077500000000000000000000000001475444614500251215ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/utils/fixtures/pep_561_stub_only/bad/000077500000000000000000000000001475444614500256475ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/utils/fixtures/pep_561_stub_only/bad/__init__.pyi000066400000000000000000000000001475444614500301170ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/utils/fixtures/pep_561_stub_only/bad/module.pyi000066400000000000000000000001231475444614500276530ustar00rootroot00000000000000"""Example module"""
from typing import Tuple

version_info = Tuple[int, int, int]
poetry-core-2.1.1/tests/masonry/utils/fixtures/pep_561_stub_only/good-stubs/000077500000000000000000000000001475444614500272075ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/utils/fixtures/pep_561_stub_only/good-stubs/__init__.pyi000066400000000000000000000000001475444614500314570ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/utils/fixtures/pep_561_stub_only/good-stubs/module.pyi000066400000000000000000000001231475444614500312130ustar00rootroot00000000000000"""Example module"""
from typing import Tuple

version_info = Tuple[int, int, int]
poetry-core-2.1.1/tests/masonry/utils/fixtures/pep_561_stub_only_partial_namespace/000077500000000000000000000000001475444614500306515ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/utils/fixtures/pep_561_stub_only_partial_namespace/good-stubs/000077500000000000000000000000001475444614500327375ustar00rootroot00000000000000module.pyi000066400000000000000000000001231475444614500346640ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/utils/fixtures/pep_561_stub_only_partial_namespace/good-stubs"""Example module"""
from typing import Tuple

version_info = Tuple[int, int, int]
subpkg/000077500000000000000000000000001475444614500341535ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/utils/fixtures/pep_561_stub_only_partial_namespace/good-stubs__init__.pyi000066400000000000000000000000001475444614500364230ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/utils/fixtures/pep_561_stub_only_partial_namespace/good-stubs/subpkgpy.typed000066400000000000000000000000101475444614500356410ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/utils/fixtures/pep_561_stub_only_partial_namespace/good-stubs/subpkgpartial
poetry-core-2.1.1/tests/masonry/utils/fixtures/with_includes/000077500000000000000000000000001475444614500245055ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/utils/fixtures/with_includes/__init__.py000066400000000000000000000000001475444614500266040ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/utils/fixtures/with_includes/bar/000077500000000000000000000000001475444614500252515ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/utils/fixtures/with_includes/bar/baz.py000066400000000000000000000000001475444614500263650ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/utils/fixtures/with_includes/extra_package/000077500000000000000000000000001475444614500273035ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/utils/fixtures/with_includes/extra_package/some_dir/000077500000000000000000000000001475444614500311045ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/utils/fixtures/with_includes/extra_package/some_dir/foo.py000066400000000000000000000000001475444614500322270ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/utils/fixtures/with_includes/extra_package/some_dir/quux.py000066400000000000000000000000001475444614500324460ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/utils/fixtures/with_includes/not_a_python_pkg/000077500000000000000000000000001475444614500300475ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/utils/fixtures/with_includes/not_a_python_pkg/baz.txt000066400000000000000000000000001475444614500313520ustar00rootroot00000000000000poetry-core-2.1.1/tests/masonry/utils/test_package_include.py000066400000000000000000000061741475444614500245120ustar00rootroot00000000000000from __future__ import annotations

from pathlib import Path

import pytest

from poetry.core.masonry.utils.package_include import PackageInclude


fixtures_dir = Path(__file__).parent / "fixtures"
with_includes = fixtures_dir / "with_includes"


def test_package_include_with_multiple_dirs() -> None:
    pkg_include = PackageInclude(base=fixtures_dir, include="with_includes", formats=[])
    assert pkg_include.elements == [
        with_includes / "__init__.py",
        with_includes / "bar",
        with_includes / "bar/baz.py",
        with_includes / "extra_package",
        with_includes / "extra_package/some_dir",
        with_includes / "extra_package/some_dir/foo.py",
        with_includes / "extra_package/some_dir/quux.py",
        with_includes / "not_a_python_pkg",
        with_includes / "not_a_python_pkg/baz.txt",
    ]


def test_package_include_with_simple_dir() -> None:
    pkg_include = PackageInclude(base=with_includes, include="bar", formats=[])
    assert pkg_include.elements == [with_includes / "bar/baz.py"]


def test_package_include_with_nested_dir() -> None:
    pkg_include = PackageInclude(
        base=with_includes, include="extra_package/**/*.py", formats=[]
    )
    assert pkg_include.elements == [
        with_includes / "extra_package/some_dir/foo.py",
        with_includes / "extra_package/some_dir/quux.py",
    ]


def test_package_include_with_no_python_files_in_dir() -> None:
    with pytest.raises(ValueError) as e:
        PackageInclude(base=with_includes, include="not_a_python_pkg", formats=[])

    assert str(e.value) == "not_a_python_pkg is not a package."


def test_package_include_with_non_existent_directory() -> None:
    with pytest.raises(ValueError) as e:
        PackageInclude(base=with_includes, include="not_a_dir", formats=[])

    err_str = str(with_includes / "not_a_dir") + " does not contain any element"

    assert str(e.value) == err_str


def test_pep_561_stub_only_package_good_name_suffix() -> None:
    pkg_include = PackageInclude(
        base=fixtures_dir / "pep_561_stub_only", include="good-stubs", formats=[]
    )
    assert pkg_include.elements == [
        fixtures_dir / "pep_561_stub_only/good-stubs/__init__.pyi",
        fixtures_dir / "pep_561_stub_only/good-stubs/module.pyi",
    ]


def test_pep_561_stub_only_partial_namespace_package_good_name_suffix() -> None:
    pkg_include = PackageInclude(
        base=fixtures_dir / "pep_561_stub_only_partial_namespace",
        include="good-stubs",
        formats=[],
    )
    assert pkg_include.elements == [
        fixtures_dir / "pep_561_stub_only_partial_namespace/good-stubs/module.pyi",
        fixtures_dir / "pep_561_stub_only_partial_namespace/good-stubs/subpkg/",
        fixtures_dir
        / "pep_561_stub_only_partial_namespace/good-stubs/subpkg/__init__.pyi",
        fixtures_dir / "pep_561_stub_only_partial_namespace/good-stubs/subpkg/py.typed",
    ]


def test_pep_561_stub_only_package_bad_name_suffix() -> None:
    with pytest.raises(ValueError) as e:
        PackageInclude(
            base=fixtures_dir / "pep_561_stub_only", include="bad", formats=[]
        )

    assert str(e.value) == "bad is not a package."
poetry-core-2.1.1/tests/packages/000077500000000000000000000000001475444614500167215ustar00rootroot00000000000000poetry-core-2.1.1/tests/packages/__init__.py000066400000000000000000000000001475444614500210200ustar00rootroot00000000000000poetry-core-2.1.1/tests/packages/test_dependency.py000066400000000000000000000347121475444614500224570ustar00rootroot00000000000000from __future__ import annotations

import pytest

from packaging.utils import canonicalize_name

from poetry.core.constraints.version.exceptions import ParseConstraintError
from poetry.core.packages.dependency import Dependency
from poetry.core.packages.directory_dependency import DirectoryDependency
from poetry.core.packages.file_dependency import FileDependency
from poetry.core.version.markers import InvalidMarkerError
from poetry.core.version.markers import parse_marker
from poetry.core.version.requirements import InvalidRequirementError


@pytest.mark.parametrize(
    "constraint",
    [
        "^1.0",
        "^1.0.dev0",
        "^1.0.0",
        "^1.0.0.dev0",
        "^1.0.0.alpha0",
        "^1.0.0.alpha0+local",
        "^1.0.0.rc0+local",
        "^1.0.0-1",
    ],
)
@pytest.mark.parametrize("allows_prereleases", [None, False, True])
def test_allows_prerelease(constraint: str, allows_prereleases: bool) -> None:
    dependency = Dependency("A", constraint, allows_prereleases=allows_prereleases)
    assert dependency.allows_prereleases() == allows_prereleases


def test_python_versions_are_made_precise() -> None:
    dependency = Dependency("Django", "^1.23")
    dependency.python_versions = ">3.6,<=3.10"

    assert (
        str(dependency.marker)
        == 'python_full_version > "3.6.0" and python_full_version <= "3.10.0"'
    )
    assert str(dependency.python_constraint) == ">3.6,<=3.10"


def test_to_pep_508() -> None:
    dependency = Dependency("Django", "^1.23")

    result = dependency.to_pep_508()
    assert result == "Django (>=1.23,<2.0)"

    dependency = Dependency("Django", "^1.23")
    dependency.python_versions = "~2.7 || ^3.6"

    result = dependency.to_pep_508()
    assert (
        result == "Django (>=1.23,<2.0) ; "
        'python_version == "2.7" '
        'or python_version >= "3.6" and python_version < "4.0"'
    )


def test_to_pep_508_wilcard() -> None:
    dependency = Dependency("Django", "*")

    result = dependency.to_pep_508()
    assert result == "Django"


def test_to_pep_508_in_extras() -> None:
    dependency = Dependency("Django", "^1.23")
    dependency._in_extras = [canonicalize_name("foo")]

    result = dependency.to_pep_508()
    assert result == 'Django (>=1.23,<2.0) ; extra == "foo"'

    result = dependency.to_pep_508(with_extras=False)
    assert result == "Django (>=1.23,<2.0)"

    dependency._in_extras = [canonicalize_name("foo"), canonicalize_name("bar")]

    result = dependency.to_pep_508()
    assert result == 'Django (>=1.23,<2.0) ; extra == "foo" or extra == "bar"'

    dependency.python_versions = "~2.7 || ^3.6"

    result = dependency.to_pep_508()
    assert (
        result == "Django (>=1.23,<2.0) ; "
        "("
        'python_version == "2.7" '
        'or python_version >= "3.6" and python_version < "4.0"'
        ") "
        'and (extra == "foo" or extra == "bar")'
    )

    result = dependency.to_pep_508(with_extras=False)
    assert (
        result == "Django (>=1.23,<2.0) ; "
        'python_version == "2.7" '
        'or python_version >= "3.6" and python_version < "4.0"'
    )


def test_to_pep_508_in_extras_parsed() -> None:
    dependency = Dependency.create_from_pep_508(
        'foo[baz,bar] (>=1.23,<2.0) ; extra == "baz"'
    )

    result = dependency.to_pep_508()
    assert result == 'foo[bar,baz] (>=1.23,<2.0) ; extra == "baz"'

    result = dependency.to_pep_508(with_extras=False)
    assert result == "foo[bar,baz] (>=1.23,<2.0)"


@pytest.mark.parametrize(
    ("exclusion", "expected"),
    [
        ("!=1.2.3", "!=1.2.3"),
        ("!=1.2.*", "!=1.2.*"),
        ("<2.0 || >=2.1.dev0", "!=2.0.*"),
    ],
)
def test_to_pep_508_with_excluded_versions(exclusion: str, expected: str) -> None:
    dependency = Dependency("foo", exclusion)

    assert dependency.to_pep_508() == f"foo ({expected})"


@pytest.mark.parametrize(
    "python_versions, marker",
    [
        (">=3.5,<3.5.4", 'python_version >= "3.5" and python_full_version < "3.5.4"'),
        (">=3.5.4,<3.6", 'python_full_version >= "3.5.4" and python_version < "3.6"'),
        ("<3.5.4", 'python_full_version < "3.5.4"'),
        (">=3.5.4", 'python_full_version >= "3.5.4"'),
        ("== 3.5.4", 'python_full_version == "3.5.4"'),
    ],
)
def test_to_pep_508_with_patch_python_version(
    python_versions: str, marker: str
) -> None:
    dependency = Dependency("Django", "^1.23")
    dependency.python_versions = python_versions

    expected = f"Django (>=1.23,<2.0) ; {marker}"

    assert dependency.to_pep_508() == expected
    assert dependency.to_pep_508(resolved=True) == expected
    assert str(dependency.marker) == marker


def test_to_pep_508_tilde() -> None:
    dependency = Dependency("foo", "~1.2.3")

    assert dependency.to_pep_508() == "foo (>=1.2.3,<1.3.0)"

    dependency = Dependency("foo", "~1.2")

    assert dependency.to_pep_508() == "foo (>=1.2,<1.3)"

    dependency = Dependency("foo", "~0.2.3")

    assert dependency.to_pep_508() == "foo (>=0.2.3,<0.3.0)"

    dependency = Dependency("foo", "~0.2")

    assert dependency.to_pep_508() == "foo (>=0.2,<0.3)"


def test_to_pep_508_caret() -> None:
    dependency = Dependency("foo", "^1.2.3")

    assert dependency.to_pep_508() == "foo (>=1.2.3,<2.0.0)"

    dependency = Dependency("foo", "^1.2")

    assert dependency.to_pep_508() == "foo (>=1.2,<2.0)"

    dependency = Dependency("foo", "^0.2.3")

    assert dependency.to_pep_508() == "foo (>=0.2.3,<0.3.0)"

    dependency = Dependency("foo", "^0.2")

    assert dependency.to_pep_508() == "foo (>=0.2,<0.3)"


def test_to_pep_508_combination() -> None:
    dependency = Dependency("foo", "^1.2,!=1.3.5")

    assert dependency.to_pep_508() == "foo (>=1.2,<2.0,!=1.3.5)"

    dependency = Dependency("foo", "~1.2,!=1.2.5")

    assert dependency.to_pep_508() == "foo (>=1.2,<1.3,!=1.2.5)"


@pytest.mark.parametrize(
    "requirement",
    [
        "enum34; extra == ':python_version < \"3.4\"'",
        "enum34; extra == \":python_version < '3.4'\"",
    ],
)
def test_to_pep_508_with_invalid_marker(requirement: str) -> None:
    with pytest.raises(InvalidMarkerError):
        _ = Dependency.create_from_pep_508(requirement)


@pytest.mark.parametrize(
    "requirement",
    [
        'enum34; extra == ":python_version < "3.4""',
    ],
)
def test_to_pep_508_with_invalid_requirement(requirement: str) -> None:
    with pytest.raises(InvalidRequirementError):
        _ = Dependency.create_from_pep_508(requirement)


@pytest.mark.parametrize(
    ("requirement", "dependency_type"),
    [
        (
            "eflips-depot @ git@github.com/mpm-tu-berlin/eflips-depot.git@feature/allow-only-oppo-charging",
            DirectoryDependency,
        ),
        (
            "eflips-depot @ git@github.com/mpm-tu-berlin/eflips-depot.git@feature/allow-only-oppo-charging.whl",
            FileDependency,
        ),
    ],
)
def test_to_pep_508_with_invalid_path_requirement(
    requirement: str, dependency_type: type[FileDependency | DirectoryDependency]
) -> None:
    dependency = Dependency.create_from_pep_508(requirement)
    assert isinstance(dependency, dependency_type)
    assert dependency.source_url


def test_complete_name() -> None:
    assert Dependency("foo", ">=1.2.3").complete_name == "foo"
    assert (
        Dependency("foo", ">=1.2.3", extras=["baz", "bar"]).complete_name
        == "foo[bar,baz]"
    )


@pytest.mark.parametrize(
    "name,constraint,extras,expected",
    [
        ("A", ">2.7,<3.0", None, "A (>2.7,<3.0)"),
        ("A", ">2.7,<3.0", ["x"], "A[x] (>2.7,<3.0)"),
        ("A", ">=1.6.5,<1.8.0 || >1.8.0,<3.1.0", None, "A (>=1.6.5,!=1.8.0,<3.1.0)"),
        (
            "A",
            ">=1.6.5,<1.8.0 || >1.8.0,<3.1.0",
            ["x"],
            "A[x] (>=1.6.5,!=1.8.0,<3.1.0)",
        ),
        # test single version range (wildcard)
        ("A", "==2.*", None, "A (==2.*)"),
        ("A", "==2.0.*", None, "A (==2.0.*)"),
        ("A", "==0.0.*", None, "A (==0.0.*)"),
        ("A", "==0.1.*", None, "A (==0.1.*)"),
        ("A", "==0.*", None, "A (==0.*)"),
        ("A", ">=1.0.dev0,<2", None, "A (==1.*)"),
        ("A", ">=1.dev0,<2", None, "A (==1.*)"),
        ("A", ">=1.0.dev1,<2", None, "A (>=1.0.dev1,<2)"),
        ("A", ">=1.1.dev0,<2", None, "A (>=1.1.dev0,<2)"),
        ("A", ">=1.0.dev0,<2.0.dev0", None, "A (==1.*)"),
        ("A", ">=1.0.dev0,<2.0.dev1", None, "A (>=1.0.dev0,<2.0.dev1)"),
        ("A", ">=1,<2", None, "A (>=1,<2)"),
        ("A", ">=1.0.dev0,<1.1", None, "A (==1.0.*)"),
        ("A", ">=1.0.0.0.dev0,<1.1.0.0.0", None, "A (==1.0.*)"),
        # test single version range (wildcard) exclusions
        ("A", ">=1.8,!=2.0.*", None, "A (>=1.8,!=2.0.*)"),
        ("A", "!=0.0.*", None, "A (!=0.0.*)"),
        ("A", "!=0.1.*", None, "A (!=0.1.*)"),
        ("A", "!=0.*", None, "A (!=0.*)"),
        ("A", ">=1.8,!=2.*", None, "A (>=1.8,!=2.*)"),
        ("A", ">=1.8,!=2.*.*", None, "A (>=1.8,!=2.*)"),
        ("A", ">=1.8,<2.0 || >=2.1.0.dev0", None, "A (>=1.8,!=2.0.*)"),
        ("A", ">=1.8,<2.0.0 || >=3.0.0.dev0", None, "A (>=1.8,!=2.*)"),
        ("A", ">=1.8,<2.0 || >=3.dev0", None, "A (>=1.8,!=2.*)"),
        ("A", ">=1.8,<2 || >=2.1.0.dev0", None, "A (>=1.8,!=2.0.*)"),
        ("A", ">=1.8,<2 || >=2.1.dev0", None, "A (>=1.8,!=2.0.*)"),
        ("A", ">=1.8,!=2.0.*,!=3.0.*", None, "A (>=1.8,!=2.0.*,!=3.0.*)"),
        ("A", ">=1.8.0.0,<2.0.0.0 || >=2.0.1.0.dev0", None, "A (>=1.8.0.0,!=2.0.0.*)"),
        ("A", ">=1.8.0.0,<2 || >=2.0.1.0.dev0", None, "A (>=1.8.0.0,!=2.0.0.*)"),
        # we verify that the range exclusion logic is not too eager
        ("A", ">=1.8,<2.0 || >=2.2.0", None, "A (>=1.8,<2.0 || >=2.2.0)"),
        ("A", ">=1.8,<2.0 || >=2.1.5", None, "A (>=1.8,<2.0 || >=2.1.5)"),
        ("A", ">=1.8.0.0,<2 || >=2.0.1.5", None, "A (>=1.8.0.0,<2 || >=2.0.1.5)"),
        ("A", ">=1.8.0.0,!=2.0.0.*", None, "A (>=1.8.0.0,!=2.0.0.*)"),
    ],
)
def test_dependency_string_representation(
    name: str, constraint: str, extras: list[str] | None, expected: str
) -> None:
    dependency = Dependency(name=name, constraint=constraint, extras=extras)
    assert str(dependency) == expected


def test_set_constraint_sets_pretty_constraint() -> None:
    dependency = Dependency("A", "^1.0")
    assert dependency.pretty_constraint == "^1.0"
    dependency.constraint = "^2.0"  # type: ignore[assignment]
    assert dependency.pretty_constraint == "^2.0"


def test_set_bogus_constraint_raises_exception() -> None:
    dependency = Dependency("A", "^1.0")
    with pytest.raises(ParseConstraintError):
        dependency.constraint = "^=4.5"  # type: ignore[assignment]


def test_with_constraint() -> None:
    dependency = Dependency(
        "foo",
        "^1.2.3",
        optional=True,
        groups=["dev"],
        allows_prereleases=True,
        extras=["bar", "baz"],
    )
    dependency.marker = parse_marker(
        'python_version >= "3.6" and python_version < "4.0"'
    )
    dependency.transitive_marker = parse_marker(
        'python_version >= "3.7" and python_version < "4.0"'
    )
    dependency.python_versions = "^3.6"

    new = dependency.with_constraint("^1.2.6")

    assert new.name == dependency.name
    assert str(new.constraint) == ">=1.2.6,<2.0.0"
    assert new.is_optional()
    assert new.groups == frozenset(["dev"])
    assert new.allows_prereleases()
    assert set(new.extras) == {"bar", "baz"}
    assert new.marker == dependency.marker
    assert new.transitive_marker == dependency.transitive_marker
    assert new.python_constraint == dependency.python_constraint


@pytest.mark.parametrize(
    "marker, expected",
    [
        ('python_version >= "3.6" and python_version < "4.0"', ">=3.6,<4.0"),
        ('sys_platform == "linux"', "*"),
        ('python_version >= "3.9" or sys_platform == "linux"', "*"),
        ('python_version >= "3.9" and sys_platform == "linux"', ">=3.9"),
    ],
)
def test_marker_properly_sets_python_constraint(marker: str, expected: str) -> None:
    dependency = Dependency("foo", "^1.2.3")
    dependency.marker = marker  # type: ignore[assignment]
    assert str(dependency.python_constraint) == expected


def test_dependency_markers_are_the_same_as_markers() -> None:
    dependency = Dependency.create_from_pep_508('foo ; extra=="bar"')
    marker = parse_marker('extra=="bar"')

    assert dependency.marker == marker


def test_marker_properly_unsets_python_constraint() -> None:
    dependency = Dependency("foo", "^1.2.3")

    dependency.marker = 'python_version >= "3.6"'  # type: ignore[assignment]
    assert str(dependency.python_constraint) == ">=3.6"

    dependency.marker = "*"  # type: ignore[assignment]
    assert str(dependency.python_constraint) == "*"


def test_create_from_pep_508_url_with_activated_extras() -> None:
    dependency = Dependency.create_from_pep_508("name [fred,bar] @ http://foo.com")
    assert dependency.extras == {"fred", "bar"}


def test_create_from_pep_508_starting_with_digit() -> None:
    dependency = Dependency.create_from_pep_508("2captcha-python")
    assert dependency.name == "2captcha-python"


@pytest.mark.parametrize(
    "dependency1, dependency2, expected",
    [
        (Dependency("a", "1.0"), Dependency("a", "1.0"), True),
        (Dependency("a", "1.0"), Dependency("a", "1.0.1"), False),
        (Dependency("a", "1.0"), Dependency("a1", "1.0"), False),
        (Dependency("a", "1.0"), Dependency("a", "1.0", source_type="file"), False),
        # constraint is implicitly given for direct origin dependencies,
        # but might not be set
        (
            Dependency("a", "1.0", source_type="file"),
            Dependency("a", "*", source_type="file"),
            True,
        ),
        # constraint is not implicit for non direct origin dependencies
        (Dependency("a", "1.0"), Dependency("a", "*"), False),
        (
            Dependency("a", "1.0", source_type="legacy"),
            Dependency("a", "*", source_type="legacy"),
            False,
        ),
    ],
)
def test_eq(dependency1: Dependency, dependency2: Dependency, expected: bool) -> None:
    assert (dependency1 == dependency2) is expected
    assert (dependency2 == dependency1) is expected


@pytest.mark.parametrize(
    "attr_name, value",
    [
        ("constraint", "2.0"),
        ("python_versions", "<3.8"),
        ("marker", "sys_platform == 'linux'"),
        ("transitive_marker", "sys_platform == 'linux'"),
    ],
)
def test_mutable_attributes_not_in_hash(attr_name: str, value: str) -> None:
    dependency = Dependency("foo", "^1.2.3")
    ref_hash = hash(dependency)
    ref_value = getattr(dependency, attr_name)

    setattr(dependency, attr_name, value)

    assert value != ref_value
    assert hash(dependency) == ref_hash
poetry-core-2.1.1/tests/packages/test_dependency_group.py000066400000000000000000000420761475444614500236750ustar00rootroot00000000000000from __future__ import annotations

from pathlib import Path

import pytest

from packaging.utils import canonicalize_name

from poetry.core.packages.dependency import Dependency
from poetry.core.packages.dependency_group import DependencyGroup
from poetry.core.packages.directory_dependency import DirectoryDependency
from poetry.core.packages.vcs_dependency import VCSDependency


def create_dependency(
    name: str,
    constraint: str = "*",
    *,
    optional: bool = False,
    extras: tuple[str, ...] = (),
    in_extras: tuple[str, ...] = (),
    allows_prereleases: bool | None = None,
    develop: bool = False,
    source_name: str | None = None,
    marker: str | None = None,
) -> Dependency:
    dep = Dependency(
        name=name,
        constraint=constraint,
        optional=optional,
        extras=extras,
        allows_prereleases=allows_prereleases,
    )
    if in_extras:
        dep._optional = True
        dep._in_extras = [canonicalize_name(extra) for extra in in_extras]
    if develop:
        dep._develop = develop
    if source_name:
        dep.source_name = source_name
    if marker:
        dep.marker = marker  # type: ignore[assignment]
    return dep


@pytest.mark.parametrize("mixed_dynamic", [False, True])
@pytest.mark.parametrize(
    (
        "dependencies",
        "poetry_dependencies",
        "expected_dependencies",
    ),
    [
        ({Dependency("foo", "*", optional=True)}, set(), {"foo"}),
        (set(), {Dependency("bar", "*")}, {"bar"}),
        (set(), {Dependency("bar", "*", optional=True)}, {"bar"}),
        ({Dependency("foo", "*")}, {Dependency("bar", "*")}, {"foo"}),
        (
            {Dependency("foo", "*", optional=True)},
            {Dependency("bar", "*")},
            ({"foo"}, {"foo", "bar"}),
        ),
        (
            {Dependency("foo", "*")},
            {Dependency("bar", "*", optional=True)},
            ({"foo"}, {"foo", "bar"}),
        ),
        (
            {
                Dependency("foo", "*", optional=True),
                Dependency("baz", "*", optional=True),
            },
            {Dependency("bar", "*")},
            ({"foo", "baz"}, {"foo", "bar", "baz"}),
        ),
        (
            {
                Dependency("foo", "*", optional=True),
                Dependency("baz", "*", optional=False),
            },
            {Dependency("bar", "*")},
            {"foo", "baz"},
        ),
        (
            {Dependency("foo", "*", optional=True)},
            {Dependency("bar", "*"), Dependency("baz", "*", optional=True)},
            ({"foo"}, {"foo", "bar"}),
        ),
    ],
)
def test_dependencies(
    dependencies: set[Dependency],
    poetry_dependencies: set[Dependency],
    mixed_dynamic: bool,
    expected_dependencies: set[str] | tuple[set[str], set[str]],
) -> None:
    group = DependencyGroup(name="group", mixed_dynamic=mixed_dynamic)
    group._dependencies = list(dependencies)
    group._poetry_dependencies = list(poetry_dependencies)

    if isinstance(expected_dependencies, tuple):
        expected_dependencies = (
            expected_dependencies[1] if mixed_dynamic else expected_dependencies[0]
        )
    assert {d.name for d in group.dependencies} == expected_dependencies


@pytest.mark.parametrize(
    (
        "initial_dependencies",
        "initial_poetry_dependencies",
        "expected_dependencies",
        "expected_poetry_dependencies",
    ),
    [
        (set(), set(), {"new"}, set()),
        ({"foo"}, set(), {"foo", "new"}, set()),
        (set(), {"bar"}, set(), {"bar", "new"}),
        ({"foo"}, {"bar"}, {"foo", "new"}, {"bar"}),
    ],
)
def test_add_dependency_adds_to_correct_list(
    initial_dependencies: set[str],
    initial_poetry_dependencies: set[str],
    expected_dependencies: set[str],
    expected_poetry_dependencies: set[str],
) -> None:
    group = DependencyGroup(name="group")
    group._dependencies = [
        Dependency(name=name, constraint="*") for name in initial_dependencies
    ]
    group._poetry_dependencies = [
        Dependency(name=name, constraint="*") for name in initial_poetry_dependencies
    ]

    group.add_dependency(Dependency(name="new", constraint="*"))

    assert {d.name for d in group._dependencies} == expected_dependencies
    assert {d.name for d in group._poetry_dependencies} == expected_poetry_dependencies


def test_remove_dependency_removes_from_both_lists() -> None:
    group = DependencyGroup(name="group")
    group.add_dependency(Dependency(name="foo", constraint="*"))
    group.add_dependency(Dependency(name="bar", constraint="*"))
    group.add_dependency(Dependency(name="foo", constraint="*"))
    group.add_poetry_dependency(Dependency(name="baz", constraint="*"))
    group.add_poetry_dependency(Dependency(name="foo", constraint="*"))

    group.remove_dependency("foo")

    assert {d.name for d in group._dependencies} == {"bar"}
    assert {d.name for d in group._poetry_dependencies} == {"baz"}


@pytest.mark.parametrize("mixed_dynamic", [False, True])
@pytest.mark.parametrize(
    (
        "dependencies",
        "poetry_dependencies",
        "expected_dependencies",
    ),
    [
        ([Dependency.create_from_pep_508("foo")], [], [create_dependency("foo")]),
        ([], [Dependency.create_from_pep_508("bar")], [create_dependency("bar")]),
        (
            [create_dependency("foo")],
            [create_dependency("bar")],
            [create_dependency("foo")],
        ),
        (
            [create_dependency("foo", in_extras=("extra1",))],
            [create_dependency("bar")],
            (
                [create_dependency("foo", in_extras=("extra1",))],
                [
                    create_dependency("foo", in_extras=("extra1",)),
                    create_dependency("bar"),
                ],
            ),
        ),
        (
            [create_dependency("foo")],
            [create_dependency("bar", in_extras=("extra1",))],
            (
                [create_dependency("foo")],
                [
                    create_dependency("foo"),
                    create_dependency("bar", in_extras=("extra1",)),
                ],
            ),
        ),
        # refine constraint
        (
            [Dependency.create_from_pep_508("foo>=1")],
            [create_dependency("foo", "<2")],
            [create_dependency("foo", ">=1,<2")],
        ),
        # refine constraint + other dependency
        (
            [
                Dependency.create_from_pep_508("foo>=1"),
                Dependency.create_from_pep_508("bar>=2"),
            ],
            [create_dependency("foo", "<2")],
            [create_dependency("foo", ">=1,<2"), create_dependency("bar", ">=2")],
        ),
        # refine constraint depending on marker
        (
            [Dependency.create_from_pep_508("foo>=1")],
            [create_dependency("foo", "<2", marker="sys_platform == 'win32'")],
            [create_dependency("foo", ">=1,<2", marker="sys_platform == 'win32'")],
        ),
        # allow pre-releases
        (
            [Dependency.create_from_pep_508("foo>=1")],
            [create_dependency("foo", allows_prereleases=True)],
            [create_dependency("foo", ">=1", allows_prereleases=True)],
        ),
        (
            [Dependency.create_from_pep_508("foo>=1")],
            [create_dependency("foo", allows_prereleases=False)],
            [create_dependency("foo", ">=1", allows_prereleases=False)],
        ),
        # directory dependency - develop
        (
            [DirectoryDependency("foo", Path("path/to/foo"))],
            [create_dependency("foo", develop=True)],
            [DirectoryDependency("foo", Path("path/to/foo"), develop=True)],
        ),
        # directory dependency - develop (full spec)
        (
            [DirectoryDependency("foo", Path("path/to/foo"))],
            [DirectoryDependency("foo", Path("path/to/foo"), develop=True)],
            [DirectoryDependency("foo", Path("path/to/foo"), develop=True)],
        ),
        # vcs dependency - develop
        (
            [VCSDependency("foo", "git", "https://example.org/foo")],
            [create_dependency("foo", develop=True)],
            [VCSDependency("foo", "git", "https://example.org/foo", develop=True)],
        ),
        # vcs dependency - develop (full spec)
        (
            [VCSDependency("foo", "git", "https://example.org/foo")],
            [VCSDependency("foo", "git", "https://example.org/foo", develop=True)],
            [VCSDependency("foo", "git", "https://example.org/foo", develop=True)],
        ),
        # replace with directory dependency
        (
            [Dependency.create_from_pep_508("foo>=1")],
            [DirectoryDependency("foo", Path("path/to/foo"), develop=True)],
            [DirectoryDependency("foo", Path("path/to/foo"), develop=True)],
        ),
        # source
        (
            [Dependency.create_from_pep_508("foo>=1")],
            [create_dependency("foo", source_name="src")],
            [create_dependency("foo", ">=1", source_name="src")],
        ),
        # different sources depending on marker
        (
            [Dependency.create_from_pep_508("foo>=1")],
            [
                create_dependency(
                    "foo", source_name="src1", marker="sys_platform == 'win32'"
                ),
                create_dependency(
                    "foo", source_name="src2", marker="sys_platform == 'linux'"
                ),
            ],
            [
                create_dependency(
                    "foo", ">=1", source_name="src1", marker="sys_platform == 'win32'"
                ),
                create_dependency(
                    "foo", ">=1", source_name="src2", marker="sys_platform == 'linux'"
                ),
            ],
        ),
        # pairwise different sources depending on marker
        (
            [
                Dependency.create_from_pep_508("foo>=1; sys_platform == 'win32'"),
                Dependency.create_from_pep_508("foo>=1.1; sys_platform == 'linux'"),
            ],
            [
                create_dependency(
                    "foo", source_name="src1", marker="sys_platform == 'win32'"
                ),
                create_dependency(
                    "foo", source_name="src2", marker="sys_platform == 'linux'"
                ),
            ],
            [
                create_dependency(
                    "foo", ">=1", source_name="src1", marker="sys_platform == 'win32'"
                ),
                create_dependency(
                    "foo", ">=1.1", source_name="src2", marker="sys_platform == 'linux'"
                ),
            ],
        ),
        # enrich only one with source
        (
            [
                Dependency.create_from_pep_508("foo>=1; sys_platform == 'win32'"),
                Dependency.create_from_pep_508("foo>=1.1; sys_platform == 'linux'"),
            ],
            [
                create_dependency(
                    "foo", source_name="src1", marker="sys_platform == 'win32'"
                ),
            ],
            [
                create_dependency(
                    "foo", ">=1", source_name="src1", marker="sys_platform == 'win32'"
                ),
                create_dependency("foo", ">=1.1", marker="sys_platform == 'linux'"),
            ],
        ),
        # extras
        (
            [Dependency.create_from_pep_508("foo[extra1,extra2]")],
            [create_dependency("foo", source_name="src")],
            [create_dependency("foo", source_name="src", extras=("extra1", "extra2"))],
        ),
        (
            [Dependency.create_from_pep_508("foo;extra=='extra1'")],
            [create_dependency("foo", source_name="src", optional=True)],
            [create_dependency("foo", source_name="src", marker="extra == 'extra1'")],
        ),
        (
            [Dependency.create_from_pep_508("foo;extra=='extra1'")],
            [create_dependency("foo", source_name="src")],
            (
                [
                    create_dependency(
                        "foo", source_name="src", marker="extra == 'extra1'"
                    )
                ],
                [
                    create_dependency(
                        "foo", source_name="src", marker="extra == 'extra1'"
                    ),
                    create_dependency("foo", source_name="src"),
                ],
            ),
        ),
        # extras - special
        # root extras do not have an extra marker, they just have set _in_extras!
        (
            [
                Dependency.create_from_pep_508("foo;extra!='extra1'"),
                create_dependency("foo", in_extras=("extra1",)),
            ],
            [
                create_dependency("foo", marker="extra!='extra1'", source_name="src1"),
                create_dependency("foo", marker="extra=='extra1'", source_name="src2"),
            ],
            [
                create_dependency("foo", source_name="src1", marker="extra!='extra1'"),
                create_dependency("foo", source_name="src2", in_extras=("extra1",)),
            ],
        ),
        (
            [
                Dependency.create_from_pep_508(
                    "foo;extra!='extra1' and extra!='extra2'"
                ),
                create_dependency("foo", in_extras=("extra1", "extra2")),
            ],
            [
                create_dependency(
                    "foo",
                    marker="extra!='extra1' and extra!='extra2'",
                    source_name="src1",
                ),
                create_dependency(
                    "foo",
                    marker="extra=='extra1' or extra=='extra2'",
                    source_name="src2",
                ),
            ],
            [
                create_dependency(
                    "foo",
                    source_name="src1",
                    marker="extra!='extra1' and extra!='extra2'",
                ),
                create_dependency(
                    "foo", source_name="src2", in_extras=("extra1", "extra2")
                ),
            ],
        ),
        (
            [
                create_dependency(
                    "foo", marker="extra!='extra2'", in_extras=("extra1",)
                ),
                create_dependency(
                    "foo", marker="extra!='extra1'", in_extras=("extra2",)
                ),
            ],
            [
                create_dependency(
                    "foo",
                    marker="extra!='extra2' and extra=='extra1'",
                    source_name="src1",
                ),
                create_dependency(
                    "foo",
                    marker="extra!='extra1' and extra=='extra2'",
                    source_name="src2",
                ),
            ],
            [
                create_dependency(
                    "foo",
                    source_name="src1",
                    marker="extra!='extra2'",
                    in_extras=("extra1",),
                ),
                create_dependency(
                    "foo",
                    source_name="src2",
                    marker="extra!='extra1'",
                    in_extras=("extra2",),
                ),
            ],
        ),
    ],
)
def test_dependencies_for_locking(
    dependencies: list[Dependency],
    poetry_dependencies: list[Dependency],
    mixed_dynamic: bool,
    expected_dependencies: list[Dependency] | tuple[list[Dependency], list[Dependency]],
) -> None:
    group = DependencyGroup(name="group", mixed_dynamic=mixed_dynamic)
    group._dependencies = dependencies
    group._poetry_dependencies = poetry_dependencies

    if isinstance(expected_dependencies, tuple):
        expected_dependencies = (
            expected_dependencies[1] if mixed_dynamic else expected_dependencies[0]
        )

    assert group.dependencies_for_locking == expected_dependencies
    # explicitly check attributes that are not considered in __eq__
    assert [d.allows_prereleases() for d in group.dependencies_for_locking] == [
        d.allows_prereleases() for d in expected_dependencies
    ]
    assert [d.source_name for d in group.dependencies_for_locking] == [
        d.source_name for d in expected_dependencies
    ]
    assert [d.marker for d in group.dependencies_for_locking] == [
        d.marker for d in expected_dependencies
    ]
    assert [d._develop for d in group.dependencies_for_locking] == [
        d._develop for d in expected_dependencies
    ]
    assert [d.in_extras for d in group.dependencies_for_locking] == [
        d.in_extras for d in expected_dependencies
    ]


@pytest.mark.parametrize(
    (
        "dependencies",
        "poetry_dependencies",
    ),
    [
        (
            [Dependency.create_from_pep_508("foo>=1")],
            [create_dependency("foo", "<1")],
        ),
        (
            [DirectoryDependency("foo", Path("path/to/foo"))],
            [VCSDependency("foo", "git", "https://example.org/foo")],
        ),
    ],
)
def test_dependencies_for_locking_failure(
    dependencies: list[Dependency],
    poetry_dependencies: list[Dependency],
) -> None:
    group = DependencyGroup(name="group")
    group._dependencies = dependencies
    group._poetry_dependencies = poetry_dependencies

    with pytest.raises(ValueError):
        _ = group.dependencies_for_locking
poetry-core-2.1.1/tests/packages/test_directory_dependency.py000066400000000000000000000147511475444614500245440ustar00rootroot00000000000000from __future__ import annotations

from pathlib import Path
from typing import TYPE_CHECKING
from typing import cast

import pytest

from poetry.core.packages.dependency import Dependency
from poetry.core.packages.directory_dependency import DirectoryDependency


if TYPE_CHECKING:
    from pytest import LogCaptureFixture
    from pytest_mock import MockerFixture


DIST_PATH = Path(__file__).parent.parent / "fixtures" / "distributions"
SAMPLE_PROJECT = Path(__file__).parent.parent / "fixtures" / "sample_project"


def test_directory_dependency_does_not_exist(
    caplog: LogCaptureFixture, mocker: MockerFixture
) -> None:
    mock_exists = mocker.patch.object(Path, "exists")
    mock_exists.return_value = False
    dep = DirectoryDependency("demo", DIST_PATH / "invalid")
    assert len(caplog.records) == 1
    record = caplog.records[0]
    assert record.levelname == "WARNING"
    assert "does not exist" in record.message

    with pytest.raises(ValueError, match="does not exist"):
        dep.validate(raise_error=True)

    mock_exists.assert_called_once()


def test_directory_dependency_is_file(
    caplog: LogCaptureFixture, mocker: MockerFixture
) -> None:
    mock_is_file = mocker.patch.object(Path, "is_file")
    mock_is_file.return_value = True
    dep = DirectoryDependency("demo", DIST_PATH / "demo-0.1.0.tar.gz")
    assert len(caplog.records) == 1
    record = caplog.records[0]
    assert record.levelname == "WARNING"
    assert "is a file" in record.message

    with pytest.raises(ValueError, match="is a file"):
        dep.validate(raise_error=True)

    mock_is_file.assert_called_once()


def test_directory_dependency_is_not_a_python_project(
    caplog: LogCaptureFixture,
) -> None:
    dep = DirectoryDependency("demo", DIST_PATH)
    assert len(caplog.records) == 1
    record = caplog.records[0]
    assert record.levelname == "WARNING"
    assert "a Python package" in record.message

    with pytest.raises(ValueError, match="not .* a Python package"):
        dep.validate(raise_error=True)


def test_directory_dependency_minimal() -> None:
    path = Path(__file__).parent.parent / "fixtures" / "project_minimal"
    dep = DirectoryDependency("demo", path)
    dep.validate(raise_error=True)


def _test_directory_dependency_pep_508(
    name: str, path: Path, pep_508_input: str, pep_508_output: str | None = None
) -> None:
    dep = Dependency.create_from_pep_508(
        pep_508_input, relative_to=Path(__file__).parent
    )

    assert dep.is_directory()
    dep = cast("DirectoryDependency", dep)
    assert dep.name == name
    assert dep.path == path
    assert dep.to_pep_508() == (pep_508_output or pep_508_input)


def test_directory_dependency_pep_508_local_absolute() -> None:
    path = (
        Path(__file__).parent.parent
        / "fixtures"
        / "project_with_multi_constraints_dependency"
    )
    expected = f"demo @ {path.as_uri()}"

    requirement = f"demo @ file://{path.as_posix()}"
    _test_directory_dependency_pep_508("demo", path, requirement, expected)

    requirement = f"demo @ {path}"
    _test_directory_dependency_pep_508("demo", path, requirement, expected)


def test_directory_dependency_pep_508_localhost() -> None:
    path = (
        Path(__file__).parent.parent
        / "fixtures"
        / "project_with_multi_constraints_dependency"
    )
    requirement = f"demo @ file://localhost{path.as_posix()}"
    expected = f"demo @ {path.as_uri()}"
    _test_directory_dependency_pep_508("demo", path, requirement, expected)


def test_directory_dependency_pep_508_local_relative() -> None:
    path = Path("..") / "fixtures" / "project_with_multi_constraints_dependency"

    with pytest.raises(ValueError):
        requirement = f"demo @ file://{path.as_posix()}"
        _test_directory_dependency_pep_508("demo", path, requirement)

    requirement = f"demo @ {path}"
    base = Path(__file__).parent
    expected = f"demo @ {(base / path).resolve().as_uri()}"
    _test_directory_dependency_pep_508("demo", path, requirement, expected)


def test_directory_dependency_pep_508_with_subdirectory() -> None:
    path = (
        Path(__file__).parent.parent
        / "fixtures"
        / "project_with_multi_constraints_dependency"
    )
    expected = f"demo @ {path.as_uri()}"

    requirement = f"demo @ file://{path.parent.as_posix()}#subdirectory={path.name}"
    _test_directory_dependency_pep_508("demo", path, requirement, expected)


def test_directory_dependency_pep_508_extras() -> None:
    path = (
        Path(__file__).parent.parent
        / "fixtures"
        / "project_with_multi_constraints_dependency"
    )
    requirement = f"demo[foo,bar] @ file://{path.as_posix()}"
    expected = f"demo[bar,foo] @ {path.as_uri()}"
    _test_directory_dependency_pep_508("demo", path, requirement, expected)


def test_directory_dependency_pep_508_with_marker() -> None:
    path = (
        Path(__file__).parent.parent
        / "fixtures"
        / "project_with_multi_constraints_dependency"
    )
    requirement = f'demo @ file://{path.as_posix()} ; sys_platform == "linux"'
    expected = f'demo @ {path.as_uri()} ; sys_platform == "linux"'
    _test_directory_dependency_pep_508("demo", path, requirement, expected)


@pytest.mark.parametrize(
    "name,path,extras,constraint,expected",
    [
        (
            "my-package",
            SAMPLE_PROJECT,
            None,
            None,
            f"my-package (*) @ {SAMPLE_PROJECT.as_uri()}",
        ),
        (
            "my-package",
            SAMPLE_PROJECT,
            ["db"],
            "1.2",
            f"my-package[db] (1.2) @ {SAMPLE_PROJECT.as_uri()}",
        ),
    ],
)
def test_directory_dependency_string_representation(
    name: str,
    path: Path,
    extras: list[str] | None,
    constraint: str | None,
    expected: str,
) -> None:
    dependency = DirectoryDependency(name=name, path=path, extras=extras)
    if constraint:
        dependency.constraint = constraint  # type: ignore[assignment]
    assert str(dependency) == expected


@pytest.mark.parametrize(
    ("fixture", "name"),
    [
        ("project_with_pep517_non_poetry", "PEP 517"),
        ("project_with_setup_cfg_only", "setup.cfg"),
    ],
)
def test_directory_dependency_non_poetry_pep517(fixture: str, name: str) -> None:
    path = Path(__file__).parent.parent / "fixtures" / fixture

    try:
        DirectoryDependency("package", path)
    except ValueError as e:
        if "does not seem to be a Python package" not in str(e):
            raise e from e
        pytest.fail(f"A {name} project not recognized as valid directory dependency")
poetry-core-2.1.1/tests/packages/test_file_dependency.py000066400000000000000000000142401475444614500234500ustar00rootroot00000000000000from __future__ import annotations

from pathlib import Path
from typing import TYPE_CHECKING
from typing import cast

import pytest

from poetry.core.packages.dependency import Dependency
from poetry.core.packages.file_dependency import FileDependency
from poetry.core.version.markers import SingleMarker


if TYPE_CHECKING:
    from pytest import LogCaptureFixture
    from pytest_mock import MockerFixture

    from poetry.core.version.markers import BaseMarker

DIST_PATH = Path(__file__).parent.parent / "fixtures" / "distributions"
TEST_FILE = "demo-0.1.0.tar.gz"


def test_file_dependency_does_not_exist(
    caplog: LogCaptureFixture, mocker: MockerFixture
) -> None:
    mock_exists = mocker.patch.object(Path, "exists")
    mock_exists.return_value = False
    dep = FileDependency("demo", DIST_PATH / "demo-0.2.0.tar.gz")
    assert len(caplog.records) == 1
    record = caplog.records[0]
    assert record.levelname == "WARNING"
    assert "does not exist" in record.message

    with pytest.raises(ValueError, match="does not exist"):
        dep.validate(raise_error=True)

    mock_exists.assert_called_once()


def test_file_dependency_is_directory(
    caplog: LogCaptureFixture, mocker: MockerFixture
) -> None:
    mock_is_directory = mocker.patch.object(Path, "is_dir")
    mock_is_directory.return_value = True
    dep = FileDependency("demo", DIST_PATH)
    assert len(caplog.records) == 1
    record = caplog.records[0]
    assert record.levelname == "WARNING"
    assert "is a directory" in record.message

    with pytest.raises(ValueError, match="is a directory"):
        dep.validate(raise_error=True)

    mock_is_directory.assert_called_once()


def _test_file_dependency_pep_508(
    mocker: MockerFixture,
    name: str,
    path: Path,
    pep_508_input: str,
    pep_508_output: str | None = None,
    marker: BaseMarker | None = None,
) -> None:
    mocker.patch.object(Path, "exists").return_value = True
    mocker.patch.object(Path, "is_file").return_value = True

    dep = Dependency.create_from_pep_508(
        pep_508_input, relative_to=Path(__file__).parent
    )
    if marker:
        dep.marker = marker

    assert dep.is_file()
    dep = cast("FileDependency", dep)
    assert dep.name == name
    assert dep.path == path
    assert dep.to_pep_508() == (pep_508_output or pep_508_input)


def test_file_dependency_pep_508_local_file_absolute(mocker: MockerFixture) -> None:
    path = DIST_PATH / "demo-0.2.0.tar.gz"
    expected = f"demo @ {path.as_uri()}"

    requirement = f"demo @ file://{path.as_posix()}"
    _test_file_dependency_pep_508(mocker, "demo", path, requirement, expected)

    requirement = f"demo @ {path}"
    _test_file_dependency_pep_508(mocker, "demo", path, requirement, expected)


def test_file_dependency_pep_508_local_file_localhost(mocker: MockerFixture) -> None:
    path = DIST_PATH / "demo-0.2.0.tar.gz"
    requirement = f"demo @ file://localhost{path.as_posix()}"
    expected = f"demo @ {path.as_uri()}"
    _test_file_dependency_pep_508(mocker, "demo", path, requirement, expected)


def test_file_dependency_pep_508_local_file_relative_path(
    mocker: MockerFixture,
) -> None:
    path = Path("..") / "fixtures" / "distributions" / "demo-0.2.0.tar.gz"

    with pytest.raises(ValueError):
        requirement = f"demo @ file://{path.as_posix()}"
        _test_file_dependency_pep_508(mocker, "demo", path, requirement)

    requirement = f"demo @ {path}"
    base = Path(__file__).parent
    expected = f"demo @ {(base / path).resolve().as_uri()}"
    _test_file_dependency_pep_508(mocker, "demo", path, requirement, expected)


def test_file_dependency_pep_508_with_subdirectory(mocker: MockerFixture) -> None:
    path = DIST_PATH / "demo.zip"
    expected = f"demo @ {path.as_uri()}#subdirectory=sub"

    requirement = f"demo @ file://{path.as_posix()}#subdirectory=sub"
    _test_file_dependency_pep_508(mocker, "demo", path, requirement, expected)


def test_to_pep_508_with_marker(mocker: MockerFixture) -> None:
    wheel = "demo-0.1.0-py2.py3-none-any.whl"

    abs_path = DIST_PATH / wheel
    requirement = f'demo @ {abs_path.as_uri()} ; sys_platform == "linux"'
    _test_file_dependency_pep_508(
        mocker,
        "demo",
        abs_path,
        requirement,
        marker=SingleMarker("sys.platform", "linux"),
    )


def test_relative_file_dependency_to_pep_508_with_marker(mocker: MockerFixture) -> None:
    wheel = "demo-0.1.0-py2.py3-none-any.whl"

    rel_path = Path("..") / "fixtures" / "distributions" / wheel
    requirement = f'demo @ {rel_path.as_posix()} ; sys_platform == "linux"'
    base = Path(__file__).parent
    expected = (
        f'demo @ {(base / rel_path).resolve().as_uri()} ; sys_platform == "linux"'
    )
    _test_file_dependency_pep_508(
        mocker,
        "demo",
        rel_path,
        requirement,
        expected,
        marker=SingleMarker("sys.platform", "linux"),
    )


def test_file_dependency_pep_508_extras(mocker: MockerFixture) -> None:
    wheel = "demo-0.1.0-py2.py3-none-any.whl"

    rel_path = Path("..") / "fixtures" / "distributions" / wheel
    requirement = f'demo[foo,bar] @ {rel_path.as_posix()} ; sys_platform == "linux"'
    base = Path(__file__).parent
    expected = (
        f"demo[bar,foo] @ {(base / rel_path).resolve().as_uri()} ;"
        ' sys_platform == "linux"'
    )
    _test_file_dependency_pep_508(
        mocker,
        "demo",
        rel_path,
        requirement,
        expected,
    )


@pytest.mark.parametrize(
    "name,path,extras,constraint,expected",
    [
        (
            "demo",
            DIST_PATH / TEST_FILE,
            None,
            None,
            f"demo (*) @ {(DIST_PATH / TEST_FILE).as_uri()}",
        ),
        (
            "demo",
            DIST_PATH / TEST_FILE,
            ["foo"],
            "1.2",
            f"demo[foo] (1.2) @ {(DIST_PATH / TEST_FILE).as_uri()}",
        ),
    ],
)
def test_file_dependency_string_representation(
    name: str,
    path: Path,
    extras: list[str] | None,
    constraint: str | None,
    expected: str,
) -> None:
    dependency = FileDependency(name=name, path=path, extras=extras)
    if constraint:
        dependency.constraint = constraint  # type: ignore[assignment]
    assert str(dependency) == expected
poetry-core-2.1.1/tests/packages/test_main.py000066400000000000000000000254151475444614500212650ustar00rootroot00000000000000from __future__ import annotations

from typing import TYPE_CHECKING
from typing import cast

import pytest

from poetry.core.constraints.version import Version
from poetry.core.packages.dependency import Dependency


if TYPE_CHECKING:
    from poetry.core.packages.url_dependency import URLDependency
    from poetry.core.packages.vcs_dependency import VCSDependency


def test_dependency_from_pep_508() -> None:
    name = "requests"
    dep = Dependency.create_from_pep_508(name)

    assert dep.name == name
    assert str(dep.constraint) == "*"


def test_dependency_from_pep_508_with_version() -> None:
    name = "requests==2.18.0"
    dep = Dependency.create_from_pep_508(name)

    assert dep.name == "requests"
    assert str(dep.constraint) == "2.18.0"


def test_dependency_from_pep_508_with_parens() -> None:
    name = "requests (==2.18.0)"
    dep = Dependency.create_from_pep_508(name)

    assert dep.name == "requests"
    assert str(dep.constraint) == "2.18.0"


def test_dependency_from_pep_508_with_constraint() -> None:
    name = "requests>=2.12.0,!=2.17.*,<3.0"
    dep = Dependency.create_from_pep_508(name)

    assert dep.name == "requests"
    assert str(dep.constraint) == ">=2.12.0,<2.17.dev0 || >=2.18.dev0,<3.0"


def test_dependency_from_pep_508_with_extras() -> None:
    name = 'requests==2.18.0; extra == "foo" or extra == "bar"'
    dep = Dependency.create_from_pep_508(name)

    assert dep.name == "requests"
    assert str(dep.constraint) == "2.18.0"
    assert dep.in_extras == ["foo", "bar"]
    assert str(dep.marker) == 'extra == "foo" or extra == "bar"'


def test_dependency_from_pep_508_with_extra_and_inverse_extra() -> None:
    name = 'requests==2.18.0; extra != "foo" and extra == "bar"'
    dep = Dependency.create_from_pep_508(name)

    assert dep.name == "requests"
    assert str(dep.constraint) == "2.18.0"
    assert dep.in_extras == ["bar"]
    assert str(dep.marker) == 'extra != "foo" and extra == "bar"'


def test_dependency_from_pep_508_with_python_version() -> None:
    name = 'requests (==2.18.0); python_version == "2.7" or python_version == "2.6"'
    dep = Dependency.create_from_pep_508(name)

    assert dep.name == "requests"
    assert str(dep.constraint) == "2.18.0"
    assert dep.extras == frozenset()
    assert dep.python_versions == "~2.7 || ~2.6"
    assert str(dep.marker) == 'python_version == "2.7" or python_version == "2.6"'


def test_dependency_from_pep_508_with_single_python_version() -> None:
    name = 'requests (==2.18.0); python_version == "2.7"'
    dep = Dependency.create_from_pep_508(name)

    assert dep.name == "requests"
    assert str(dep.constraint) == "2.18.0"
    assert dep.extras == frozenset()
    assert dep.python_versions == "~2.7"
    assert str(dep.marker) == 'python_version == "2.7"'


def test_dependency_from_pep_508_with_platform() -> None:
    name = 'requests (==2.18.0); sys_platform == "win32" or sys_platform == "darwin"'
    dep = Dependency.create_from_pep_508(name)

    assert dep.name == "requests"
    assert str(dep.constraint) == "2.18.0"
    assert dep.extras == frozenset()
    assert dep.python_versions == "*"
    assert str(dep.marker) == 'sys_platform == "win32" or sys_platform == "darwin"'


def test_dependency_from_pep_508_complex() -> None:
    name = (
        "requests (==2.18.0); "
        'python_version >= "2.7" and python_version != "3.2" '
        'and (sys_platform == "win32" or sys_platform == "darwin") '
        'and extra == "foo"'
    )
    dep = Dependency.create_from_pep_508(name)

    assert dep.name == "requests"
    assert str(dep.constraint) == "2.18.0"
    assert dep.in_extras == ["foo"]
    assert dep.python_versions == ">=2.7 !=3.2.*"
    assert (
        str(dep.marker) == 'python_version >= "2.7" and python_version != "3.2" '
        'and (sys_platform == "win32" or sys_platform == "darwin") '
        'and extra == "foo"'
    )


@pytest.mark.parametrize(
    "marker_value",
    [
        "3.3 3.4 3.5",  # space
        "3.3, 3.4, 3.5",  # comma
        "3.3|3.4|3.5",  # pipe
    ],
)
def test_dependency_python_version_in_(marker_value: str) -> None:
    name = f"requests (==2.18.0); python_version in '{marker_value}'"
    dep = Dependency.create_from_pep_508(name)

    assert dep.name == "requests"
    assert str(dep.constraint) == "2.18.0"
    assert dep.python_versions == "3.3.* || 3.4.* || 3.5.*"
    assert str(dep.marker) == f'python_version in "{marker_value}"'


@pytest.mark.parametrize(
    "marker_value",
    [
        "win32 darwin",  # space
        "win32, darwin",  # comma
        "win32|darwin",  # pipe
    ],
)
def test_dependency_platform_in(marker_value: str) -> None:
    name = f"requests (==2.18.0); sys_platform in '{marker_value}'"
    dep = Dependency.create_from_pep_508(name)

    assert dep.name == "requests"
    assert str(dep.constraint) == "2.18.0"
    assert str(dep.marker) == f'sys_platform in "{marker_value}"'


def test_dependency_with_extra() -> None:
    name = "requests[security] (==2.18.0)"
    dep = Dependency.create_from_pep_508(name)

    assert dep.name == "requests"
    assert str(dep.constraint) == "2.18.0"

    assert len(dep.extras) == 1
    assert "security" in dep.extras


def test_dependency_from_pep_508_with_python_version_union_of_multi() -> None:
    name = (
        "requests (==2.18.0); "
        '(python_version >= "2.7" and python_version < "2.8") '
        'or (python_version >= "3.4" and python_version < "3.5")'
    )
    dep = Dependency.create_from_pep_508(name)

    assert dep.name == "requests"
    assert str(dep.constraint) == "2.18.0"
    assert dep.extras == frozenset()
    assert dep.python_versions == "~2.7 || ~3.4"
    assert str(dep.marker) == 'python_version == "2.7" or python_version == "3.4"'


def test_dependency_from_pep_508_with_not_in_op_marker() -> None:
    name = (
        'jinja2 (>=2.7,<2.8); python_version not in "3.0,3.1,3.2" and extra == "export"'
    )

    dep = Dependency.create_from_pep_508(name)

    assert dep.name == "jinja2"
    assert str(dep.constraint) == ">=2.7,<2.8"
    assert dep.in_extras == ["export"]
    assert dep.python_versions == "!=3.0.*, !=3.1.*, !=3.2.*"
    assert (
        str(dep.marker) == 'python_version not in "3.0,3.1,3.2" and extra == "export"'
    )


def test_dependency_from_pep_508_with_git_url() -> None:
    name = "django-utils @ git+ssh://git@corp-gitlab.com/corp-utils.git@1.2"

    dep = Dependency.create_from_pep_508(name)

    assert dep.name == "django-utils"
    assert dep.is_vcs()
    dep = cast("VCSDependency", dep)
    assert dep.vcs == "git"
    assert dep.source == "ssh://git@corp-gitlab.com/corp-utils.git"
    assert dep.reference == "1.2"


def test_dependency_from_pep_508_with_git_url_and_subdirectory() -> None:
    name = (
        "django-utils @"
        " git+ssh://git@corp-gitlab.com/corp-utils.git@1.2#subdirectory=package-dir"
    )

    dep = Dependency.create_from_pep_508(name)

    assert dep.name == "django-utils"
    assert dep.is_vcs()
    dep = cast("VCSDependency", dep)
    assert dep.vcs == "git"
    assert dep.source == "ssh://git@corp-gitlab.com/corp-utils.git"
    assert dep.reference == "1.2"
    assert dep.directory == "package-dir"


def test_dependency_from_pep_508_with_git_url_and_comment_and_extra() -> None:
    name = (
        "poetry @ git+https://github.com/python-poetry/poetry.git@b;ar;#egg=poetry"
        ' ; extra == "foo;"'
    )

    dep = Dependency.create_from_pep_508(name)

    assert dep.name == "poetry"
    assert dep.is_vcs()
    dep = cast("VCSDependency", dep)
    assert dep.vcs == "git"
    assert dep.source == "https://github.com/python-poetry/poetry.git"
    assert dep.reference == "b;ar;"
    assert dep.in_extras == ["foo;"]


def test_dependency_from_pep_508_with_url() -> None:
    name = "django-utils @ https://example.com/django-utils-1.0.0.tar.gz"

    dep = Dependency.create_from_pep_508(name)

    assert dep.name == "django-utils"
    assert dep.is_url()
    dep = cast("URLDependency", dep)
    assert dep.url == "https://example.com/django-utils-1.0.0.tar.gz"


def test_dependency_from_pep_508_with_url_and_subdirectory() -> None:
    name = (
        "django-utils @"
        " https://example.com/django-utils-1.0.0.tar.gz#subdirectory=django"
    )

    dep = Dependency.create_from_pep_508(name)

    assert dep.name == "django-utils"
    assert dep.is_url()
    dep = cast("URLDependency", dep)
    assert dep.url == "https://example.com/django-utils-1.0.0.tar.gz"
    assert dep.directory == "django"


def test_dependency_from_pep_508_with_wheel_url() -> None:
    name = (
        "example_wheel @ https://example.com/example_wheel-14.0.2-py2.py3-none-any.whl"
    )

    dep = Dependency.create_from_pep_508(name)

    assert dep.name == "example-wheel"
    assert str(dep.constraint) == "14.0.2"


def test_dependency_from_pep_508_with_python_full_version() -> None:
    name = (
        "requests (==2.18.0); "
        '(python_version >= "2.7" and python_version < "2.8") '
        'or (python_full_version >= "3.4" and python_full_version < "3.5.4")'
    )
    dep = Dependency.create_from_pep_508(name)

    assert dep.name == "requests"
    assert str(dep.constraint) == "2.18.0"
    assert dep.extras == frozenset()
    assert dep.python_versions == "~2.7 || >=3.4.0 <3.5.4"
    assert (
        str(dep.marker) == 'python_version == "2.7" '
        'or python_full_version >= "3.4.0" and python_full_version < "3.5.4"'
    )


def test_dependency_from_pep_508_with_python_full_version_pep440_compatible_release_astrix() -> (
    None
):
    name = 'pathlib2 ; python_version == "3.4.*" or python_version < "3"'
    dep = Dependency.create_from_pep_508(name)

    assert dep.name == "pathlib2"
    assert str(dep.constraint) == "*"
    assert dep.python_versions == "==3.4.* || <3"


def test_dependency_from_pep_508_with_python_full_version_pep440_compatible_release_tilde() -> (
    None
):
    name = 'pathlib2 ; python_version ~= "3.4" or python_version < "3"'
    dep = Dependency.create_from_pep_508(name)

    assert dep.name == "pathlib2"
    assert str(dep.constraint) == "*"
    assert dep.python_versions == "~=3.4 || <3"


def test_dependency_from_pep_508_should_not_produce_empty_constraints_for_correct_markers() -> (
    None
):
    name = (
        'pytest-mypy; python_implementation != "PyPy" and python_version <= "3.10" and'
        ' python_version > "3"'
    )
    dep = Dependency.create_from_pep_508(name)

    assert dep.name == "pytest-mypy"
    assert str(dep.constraint) == "*"
    assert dep.python_versions == "<3.11 >=3"
    assert dep.python_constraint.allows(Version.parse("3.6"))
    assert dep.python_constraint.allows(Version.parse("3.10.4"))
    assert dep.python_constraint.allows(Version.parse("3"))
    assert dep.python_constraint.allows(Version.parse("3.0.1"))
    assert (
        str(dep.marker)
        == 'platform_python_implementation != "PyPy" and python_version <= "3.10" and'
        ' python_version > "3"'
    )
poetry-core-2.1.1/tests/packages/test_package.py000066400000000000000000000477051475444614500217420ustar00rootroot00000000000000from __future__ import annotations

import random

from pathlib import Path
from typing import TYPE_CHECKING
from typing import cast

import pytest

from poetry.core.constraints.version import Version
from poetry.core.constraints.version.exceptions import ParseConstraintError
from poetry.core.factory import Factory
from poetry.core.packages.dependency import Dependency
from poetry.core.packages.dependency_group import DependencyGroup
from poetry.core.packages.package import Package
from poetry.core.packages.project_package import ProjectPackage
from poetry.core.version.exceptions import InvalidVersionError


if TYPE_CHECKING:
    from poetry.core.packages.directory_dependency import DirectoryDependency
    from poetry.core.packages.file_dependency import FileDependency
    from poetry.core.packages.url_dependency import URLDependency
    from poetry.core.packages.vcs_dependency import VCSDependency


@pytest.fixture()
def package_with_groups() -> Package:
    package = Package("foo", "1.2.3")

    optional_group = DependencyGroup("optional", optional=True)
    optional_group.add_dependency(Factory.create_dependency("bam", "^3.0.0"))

    package.add_dependency(Factory.create_dependency("bar", "^1.0.0"))
    package.add_dependency(Factory.create_dependency("baz", "^1.1.0"))
    package.add_dependency(Factory.create_dependency("bim", "^2.0.0", groups=["dev"]))
    package.add_dependency_group(optional_group)

    return package


def test_package_authors() -> None:
    package = Package("foo", "0.1.0")

    package.authors = ["Sébastien Eustace "]
    assert package.author_name == "Sébastien Eustace"
    assert package.author_email == "sebastien@eustace.io"

    package.authors = ["John Doe", *package.authors]
    assert package.author_name == "John Doe"
    assert package.author_email is None


def test_package_authors_invalid() -> None:
    package = Package("foo", "0.1.0")

    package.authors = [""
    )


@pytest.mark.parametrize(
    ("name", "email"),
    [
        ("Sébastien Eustace", "sebastien@eustace.io"),
        ("John Doe", None),
        ("'Jane Doe'", None),
        ('"Jane Doe"', None),
        ("MyCompany", None),
        ("Some Company’s", None),  # noqa: RUF001
        ("MyCompany's R&D", "rnd@MyCompanyName.MyTLD"),
        ("Doe, John", None),
        ("(Doe, John)", None),
        ("John Doe", "john@john.doe"),
        ("Doe, John", "dj@john.doe"),
        ("MyCompanyName R&D", "rnd@MyCompanyName.MyTLD"),
        ("John-Paul: Doe", None),
        ("John-Paul: Doe", "jp@nomail.none"),
        ("John Doe the 3rd", "3rd@jd.net"),
        ("FirstName LastName firstname.lastname@company.com", None),
        ("Surname, Given Name [Department]", None),
    ],
)
def test_package_authors_valid(name: str, email: str | None) -> None:
    package = Package("foo", "0.1.0")

    author = name if email is None else f"{name} <{email}>"
    package.authors = [author]
    assert package.author_name == name
    assert package.author_email == email


@pytest.mark.parametrize(
    ("name",),
    [
        ("",),
        ("",),
        (" None:
    package = Package("foo", "0.1.0")

    package.authors = [name]
    with pytest.raises(ValueError):
        package.author_name  # noqa: B018


@pytest.mark.parametrize("groups", [["main"], ["dev"]])
def test_package_add_dependency_vcs_groups(groups: list[str], f: Factory) -> None:
    package = Package("foo", "0.1.0")

    dependency = package.add_dependency(
        f.create_dependency(
            "poetry",
            {"git": "https://github.com/python-poetry/poetry.git"},
            groups=groups,
        )
    )
    assert dependency.groups == frozenset(groups)


def test_package_add_dependency_vcs_groups_default_main(f: Factory) -> None:
    package = Package("foo", "0.1.0")

    dependency = package.add_dependency(
        f.create_dependency(
            "poetry", {"git": "https://github.com/python-poetry/poetry.git"}
        )
    )
    assert dependency.groups == frozenset(["main"])


@pytest.mark.parametrize("groups", [["main"], ["dev"]])
@pytest.mark.parametrize("optional", [True, False])
def test_package_url_groups_optional(
    groups: list[str], optional: bool, f: Factory
) -> None:
    package = Package("foo", "0.1.0")

    dependency = package.add_dependency(
        f.create_dependency(
            "poetry",
            {
                "url": "https://github.com/python-poetry/poetry/releases/download/1.0.5/poetry-1.0.5-linux.tar.gz",
                "optional": optional,
            },
            groups=groups,
        )
    )
    assert dependency.groups == frozenset(groups)
    assert dependency.is_optional() == optional


def test_package_equality_simple() -> None:
    assert Package("foo", "0.1.0") == Package("foo", "0.1.0")
    assert Package("foo", "0.1.0") != Package("foo", "0.1.1")
    assert Package("bar", "0.1.0") != Package("foo", "0.1.0")


def test_package_equality_source_type() -> None:
    a1 = Package("a", "0.1.0", source_type="file")
    a2 = Package(a1.name, a1.version, source_type="directory")
    a3 = Package(a1.name, a1.version, source_type=a1.source_type)
    a4 = Package(a1.name, a1.version)

    assert a1 == a1
    assert a1 == a3
    assert a1 != a2
    assert a2 != a3
    assert a1 != a4
    assert a2 != a4


def test_package_equality_source_url() -> None:
    a1 = Package("a", "0.1.0", source_type="file", source_url="/some/path")
    a2 = Package(
        a1.name, a1.version, source_type=a1.source_type, source_url="/some/other/path"
    )
    a3 = Package(
        a1.name, a1.version, source_type=a1.source_type, source_url=a1.source_url
    )
    a4 = Package(a1.name, a1.version, source_type=a1.source_type)

    assert a1 == a1
    assert a1 == a3
    assert a1 != a2
    assert a2 != a3
    assert a1 != a4
    assert a2 != a4


def test_package_equality_source_reference() -> None:
    a1 = Package(
        "a",
        "0.1.0",
        source_type="git",
        source_url="https://foo.bar",
        source_reference="c01b317af582501c5ba07b23d5bef3fbada2d4ef",
    )
    a2 = Package(
        a1.name,
        a1.version,
        source_type="git",
        source_url="https://foo.bar",
        source_reference="a444731cd243cb5cd04e4d5fb81f86e1fecf8a00",
    )
    a3 = Package(
        a1.name,
        a1.version,
        source_type="git",
        source_url="https://foo.bar",
        source_reference="c01b317af582501c5ba07b23d5bef3fbada2d4ef",
    )
    a4 = Package(a1.name, a1.version, source_type="git")

    assert a1 == a1
    assert a1 == a3
    assert a1 != a2
    assert a2 != a3
    assert a1 != a4
    assert a2 != a4


def test_package_resolved_reference_is_relevant_for_equality_only_if_present_for_both_packages() -> (
    None
):
    a1 = Package(
        "a",
        "0.1.0",
        source_type="git",
        source_url="https://foo.bar",
        source_reference="master",
        source_resolved_reference="c01b317af582501c5ba07b23d5bef3fbada2d4ef",
    )
    a2 = Package(
        a1.name,
        a1.version,
        source_type="git",
        source_url="https://foo.bar",
        source_reference="master",
        source_resolved_reference="a444731cd243cb5cd04e4d5fb81f86e1fecf8a00",
    )
    a3 = Package(
        a1.name,
        a1.version,
        source_type="git",
        source_url="https://foo.bar",
        source_reference="master",
        source_resolved_reference="c01b317af582501c5ba07b23d5bef3fbada2d4ef",
    )
    a4 = Package(
        a1.name,
        a1.version,
        source_type="git",
        source_url="https://foo.bar",
        source_reference="master",
    )

    assert a1 == a1
    assert a1 == a3
    assert a1 != a2
    assert a2 != a3
    assert a1 == a4
    assert a2 == a4


def test_package_equality_source_subdirectory() -> None:
    a1 = Package(
        "a",
        "0.1.0",
        source_type="git",
        source_url="https://foo.bar",
        source_subdirectory="baz",
    )
    a2 = Package(
        a1.name,
        a1.version,
        source_type="git",
        source_url="https://foo.bar",
        source_subdirectory="qux",
    )
    a3 = Package(
        a1.name,
        a1.version,
        source_type="git",
        source_url="https://foo.bar",
        source_subdirectory="baz",
    )
    a4 = Package(a1.name, a1.version, source_type="git")

    assert a1 == a3
    assert a1 != a2
    assert a2 != a3
    assert a1 != a4
    assert a2 != a4


def test_complete_name() -> None:
    assert Package("foo", "1.2.3").complete_name == "foo"
    assert (
        Package("foo", "1.2.3", features=["baz", "bar"]).complete_name == "foo[bar,baz]"
    )


def test_to_dependency() -> None:
    package = Package("foo", "1.2.3")
    dep = package.to_dependency()

    assert dep.name == "foo"
    assert dep.constraint == package.version


def test_to_dependency_with_python_constraint() -> None:
    package = Package("foo", "1.2.3")
    package.python_versions = ">=3.6"
    dep = package.to_dependency()

    assert dep.name == "foo"
    assert dep.constraint == package.version
    assert dep.python_versions == ">=3.6"


def test_to_dependency_with_features() -> None:
    package = Package("foo", "1.2.3", features=["baz", "bar"])
    dep = package.to_dependency()

    assert dep.name == "foo"
    assert dep.constraint == package.version
    assert dep.features == frozenset({"bar", "baz"})


def test_to_dependency_for_directory() -> None:
    path = Path(__file__).parent.parent.joinpath("fixtures/simple_project")
    package = Package(
        "foo",
        "1.2.3",
        source_type="directory",
        source_url=path.as_posix(),
        features=["baz", "bar"],
    )
    dep = package.to_dependency()

    assert dep.name == "foo"
    assert dep.constraint == package.version
    assert dep.features == frozenset({"bar", "baz"})
    assert dep.is_directory()
    dep = cast("DirectoryDependency", dep)
    assert dep.path == path
    assert dep.source_type == "directory"
    assert dep.source_url == path.as_posix()


def test_to_dependency_for_file() -> None:
    path = Path(__file__).parent.parent.joinpath(
        "fixtures/distributions/demo-0.1.0.tar.gz"
    )
    package = Package(
        "foo",
        "1.2.3",
        source_type="file",
        source_url=path.as_posix(),
        source_subdirectory="qux",
        features=["baz", "bar"],
    )
    dep = package.to_dependency()

    assert dep.name == "foo"
    assert dep.constraint == package.version
    assert dep.features == frozenset({"bar", "baz"})
    assert dep.is_file()
    dep = cast("FileDependency", dep)
    assert dep.path == path
    assert dep.source_type == "file"
    assert dep.source_url == path.as_posix()
    assert dep.source_subdirectory == "qux"


def test_to_dependency_for_url() -> None:
    package = Package(
        "foo",
        "1.2.3",
        source_type="url",
        source_url="https://example.com/path.tar.gz",
        source_subdirectory="qux",
        features=["baz", "bar"],
    )
    dep = package.to_dependency()

    assert dep.name == "foo"
    assert dep.constraint == package.version
    assert dep.features == frozenset({"bar", "baz"})
    assert dep.is_url()
    dep = cast("URLDependency", dep)
    assert dep.url == "https://example.com/path.tar.gz"
    assert dep.source_type == "url"
    assert dep.source_url == "https://example.com/path.tar.gz"
    assert dep.source_subdirectory == "qux"


def test_to_dependency_for_vcs() -> None:
    package = Package(
        "foo",
        "1.2.3",
        source_type="git",
        source_url="https://github.com/foo/foo.git",
        source_reference="master",
        source_resolved_reference="123456",
        source_subdirectory="baz",
        features=["baz", "bar"],
    )
    dep = package.to_dependency()

    assert dep.name == "foo"
    assert dep.constraint == package.version
    assert dep.features == frozenset({"bar", "baz"})
    assert dep.is_vcs()
    dep = cast("VCSDependency", dep)
    assert dep.source_type == "git"
    assert dep.source == "https://github.com/foo/foo.git"
    assert dep.reference == "master"
    assert dep.source_reference == "master"
    assert dep.source_resolved_reference == "123456"
    assert dep.directory == "baz"
    assert dep.source_subdirectory == "baz"


def test_package_clone(f: Factory) -> None:
    # TODO(nic): this test is not future-proof, in that any attributes added
    #  to the Package object and not filled out in this test setup might
    #  cause comparisons to match that otherwise should not.  A factory method
    #  to create a Package object with all fields fully randomized would be the
    #  most rigorous test for this, but that's likely overkill.
    p = Package(
        "lol_wut",
        "3.141.5926535",
        source_type="git",
        source_url="http://some.url",
        source_reference="fe4d2adabf3feb5d32b70ab5c105285fa713b10c",
        source_resolved_reference="fe4d2adabf3feb5d32b70ab5c105285fa713b10c",
        features=["abc", "def"],
        develop=random.choice((True, False)),
    )
    p.add_dependency(Factory.create_dependency("foo", "^1.2.3"))
    p.add_dependency(Factory.create_dependency("foo", "^1.2.3", groups=["dev"]))
    p.files = (["file1", "file2", "file3"],)  # type: ignore[assignment]
    p.homepage = "https://some.other.url"
    p.repository_url = "http://bug.farm"
    p.documentation_url = "http://lorem.ipsum/dolor/sit.amet"
    p2 = p.clone()

    assert p == p2
    assert p.__dict__ == p2.__dict__
    assert len(p2.requires) == 1
    assert len(p2.all_requires) == 2


def test_dependency_groups(package_with_groups: Package) -> None:
    assert len(package_with_groups.requires) == 2
    assert len(package_with_groups.all_requires) == 4


def test_without_dependency_groups(package_with_groups: Package) -> None:
    package = package_with_groups.without_dependency_groups(["dev"])

    assert len(package.requires) == 2
    assert len(package.all_requires) == 3

    package = package_with_groups.without_dependency_groups(["dev", "optional"])

    assert len(package.requires) == 2
    assert len(package.all_requires) == 2


def test_with_dependency_groups(package_with_groups: Package) -> None:
    package = package_with_groups.with_dependency_groups([])

    assert len(package.requires) == 2
    assert len(package.all_requires) == 3

    package = package_with_groups.with_dependency_groups(["optional"])

    assert len(package.requires) == 2
    assert len(package.all_requires) == 4


def test_without_optional_dependency_groups(package_with_groups: Package) -> None:
    package = package_with_groups.without_optional_dependency_groups()

    assert len(package.requires) == 2
    assert len(package.all_requires) == 3


def test_only_with_dependency_groups(package_with_groups: Package) -> None:
    package = package_with_groups.with_dependency_groups(["dev"], only=True)

    assert len(package.requires) == 0
    assert len(package.all_requires) == 1

    package = package_with_groups.with_dependency_groups(["dev", "optional"], only=True)

    assert len(package.requires) == 0
    assert len(package.all_requires) == 2

    package = package_with_groups.with_dependency_groups(["main"], only=True)

    assert len(package.requires) == 2
    assert len(package.all_requires) == 2


@pytest.mark.parametrize(
    ("package", "dependency", "ignore_source_type", "result"),
    [
        (Package("foo", "0.1.0"), Dependency("foo", ">=0.1.0"), False, True),
        (Package("foo", "0.1.0"), Dependency("foo", "<0.1.0"), False, False),
        (
            Package("foo", "0.1.0"),
            Dependency("foo", ">=0.1.0", source_type="git"),
            False,
            False,
        ),
        (
            Package("foo", "0.1.0"),
            Dependency("foo", ">=0.1.0", source_type="git"),
            True,
            True,
        ),
        (
            Package("foo", "0.1.0"),
            Dependency("foo", "<0.1.0", source_type="git"),
            True,
            False,
        ),
    ],
)
def test_package_satisfies(
    package: Package, dependency: Dependency, ignore_source_type: bool, result: bool
) -> None:
    assert package.satisfies(dependency, ignore_source_type) == result


@pytest.mark.parametrize(
    ("package_repo", "dependency_repo", "result"),
    [
        ("pypi", None, True),
        ("private", None, True),
        ("pypi", "pypi", True),
        ("private", "private", True),
        ("pypi", "private", False),
        ("private", "pypi", False),
    ],
)
def test_package_satisfies_on_repositories(
    package_repo: str,
    dependency_repo: str | None,
    result: bool,
) -> None:
    source_type = None if package_repo == "pypi" else "legacy"
    source_reference = None if package_repo == "pypi" else package_repo
    package = Package(
        "foo", "0.1.0", source_type=source_type, source_reference=source_reference
    )

    dependency = Dependency("foo", ">=0.1.0")
    dependency.source_name = dependency_repo

    assert package.satisfies(dependency) == result


def test_package_pep592_default_not_yanked() -> None:
    package = Package("foo", "1.0")

    assert not package.yanked
    assert package.yanked_reason == ""


@pytest.mark.parametrize(
    ("yanked", "expected_yanked", "expected_yanked_reason"),
    [
        (True, True, ""),
        (False, False, ""),
        ("the reason", True, "the reason"),
        ("", True, ""),
    ],
)
def test_package_pep592_yanked(
    yanked: str | bool, expected_yanked: bool, expected_yanked_reason: str
) -> None:
    package = Package("foo", "1.0", yanked=yanked)

    assert package.yanked == expected_yanked
    assert package.yanked_reason == expected_yanked_reason


def test_python_versions_are_made_precise() -> None:
    package = Package("foo", "1.2.3")
    package.python_versions = ">3.6,<=3.10"

    with pytest.warns(DeprecationWarning):
        assert (
            str(package.python_marker)
            == 'python_full_version > "3.6.0" and python_full_version <= "3.10.0"'
        )
    assert str(package.python_constraint) == ">3.6,<=3.10"


def test_cannot_update_package_version() -> None:
    package = Package("foo", "1.2.3")
    with pytest.raises(AttributeError):
        package.version = "1.2.4"  # type: ignore[misc,assignment]


def test_project_package_version_update_string() -> None:
    package = ProjectPackage("foo", "1.2.3")
    package.version = "1.2.4"  # type: ignore[assignment]
    assert package.version.text == "1.2.4"


def test_project_package_version_update_version() -> None:
    package = ProjectPackage("foo", "1.2.3")
    package.version = Version.parse("1.2.4")
    assert package.version.text == "1.2.4"


def test_project_package_hash_not_changed_when_version_is_changed() -> None:
    package = ProjectPackage("foo", "1.2.3")
    package_hash = hash(package)
    package_clone = package.clone()
    assert package == package_clone
    assert hash(package) == hash(package_clone)

    package.version = Version.parse("1.2.4")

    assert hash(package) == package_hash, "Hash must not change!"
    assert hash(package_clone) == package_hash
    assert package != package_clone


def test_package_invalid_version() -> None:
    with pytest.raises(InvalidVersionError) as exc_info:
        Package("foo", "1.2.3.bogus")

    expected = "Invalid version '1.2.3.bogus' on package foo"
    assert str(exc_info.value) == expected


def test_package_invalid_python_versions() -> None:
    package = Package("foo", "1.2.3")
    with pytest.raises(ParseConstraintError) as exc_info:
        package.python_versions = ">=3.6.y"

    expected = "Invalid python versions '>=3.6.y' on foo (1.2.3)"
    assert str(exc_info.value) == expected


def test_package_empty_python_versions() -> None:
    package = Package("foo", "1.2.3")
    with pytest.raises(ParseConstraintError) as exc_info:
        package.python_versions = "~2.7, >=3.4, <3.8"

    expected = "Python versions '~2.7, >=3.4, <3.8' on foo (1.2.3) is empty"
    assert str(exc_info.value) == expected
poetry-core-2.1.1/tests/packages/test_specification.py000066400000000000000000000114501475444614500231530ustar00rootroot00000000000000from __future__ import annotations

import pytest

from poetry.core.packages.specification import PackageSpecification


@pytest.mark.parametrize(
    "spec1, spec2, expected",
    [
        (PackageSpecification("a"), PackageSpecification("a"), True),
        (PackageSpecification("a", "type1"), PackageSpecification("a", "type1"), True),
        (PackageSpecification("a", "type1"), PackageSpecification("a", "type2"), False),
        (PackageSpecification("a"), PackageSpecification("a", "type1"), False),
        (PackageSpecification("a", "type1"), PackageSpecification("a"), False),
    ],
)
def test_is_same_package_source_type(
    spec1: PackageSpecification,
    spec2: PackageSpecification,
    expected: bool,
) -> None:
    assert spec1.is_same_package_as(spec2) == expected


@pytest.mark.parametrize(
    ("source_type", "result"),
    [
        ("directory", True),
        ("file", True),
        ("url", True),
        ("git", True),
        ("legacy", False),
        (None, False),
    ],
)
def test_is_direct_origin(source_type: str | None, result: bool) -> None:
    assert PackageSpecification("package", source_type).is_direct_origin() == result


@pytest.mark.parametrize(
    "spec1, spec2, expected",
    [
        (PackageSpecification("a"), PackageSpecification("a"), True),
        (PackageSpecification("a"), PackageSpecification("b"), False),
        (PackageSpecification("a", features=["x"]), PackageSpecification("a"), True),
        (
            PackageSpecification("a", features=["x"]),
            PackageSpecification("a", features=["x"]),
            True,
        ),
        (
            PackageSpecification("a", features=["x"]),
            PackageSpecification("b", features=["x"]),
            False,
        ),
        (
            PackageSpecification("a", features=["x"]),
            PackageSpecification("a", features=["y"]),
            False,
        ),
        (
            PackageSpecification("a", features=["x"]),
            PackageSpecification("a", features=["x", "y"]),
            False,
        ),
        (
            PackageSpecification("a", features=["x", "y"]),
            PackageSpecification("a", features=["x"]),
            True,
        ),
    ],
)
def test_specification_provides(
    spec1: PackageSpecification,
    spec2: PackageSpecification,
    expected: bool,
) -> None:
    assert spec1.provides(spec2) == expected


@pytest.mark.parametrize(
    "spec1, spec2",
    [
        (
            # nothing except for name and features matters if no source
            PackageSpecification("a", None, "url1", "ref1", "resref1", "sub1"),
            PackageSpecification("a", None, "url2", "ref2", "resref2", "sub2"),
        ),
        (
            # ref does not matter if resolved ref is equal
            PackageSpecification("a", "type", "url", "ref1", "resref1"),
            PackageSpecification("a", "type", "url", "ref2", "resref1"),
        ),
        (
            # resolved ref does not matter if no ref
            PackageSpecification("a", "type", "url", None, "resref1"),
            PackageSpecification("a", "type", "url", None, "resref2"),
        ),
        (
            # resolved ref unset when ref starts with other
            PackageSpecification("a", "type", "url", "ref/a", "resref1"),
            PackageSpecification("a", "type", "url", "ref", None),
        ),
        (
            # resolved ref unset when ref starts with other
            PackageSpecification("a", "type", "url", "ref/a", None),
            PackageSpecification("a", "type", "url", "ref", "resref2"),
        ),
    ],
)
def test_equal_specifications_have_same_hash(
    spec1: PackageSpecification, spec2: PackageSpecification
) -> None:
    assert spec1 == spec2
    assert spec2 == spec1
    assert hash(spec1) == hash(spec2)


@pytest.mark.parametrize(
    "source_url,normalized_url",
    [
        ("https://github.com/demo/demo.git", "https://github.com/demo/demo.git"),
        ("git@github.com:demo/demo.git", "ssh://git@github.com/demo/demo.git"),
    ],
)
def test_specification_normalize_source_url_method(
    source_url: str, normalized_url: str
) -> None:
    assert (
        PackageSpecification._normalize_source_url("git", source_url) == normalized_url
    )
    assert (
        PackageSpecification._normalize_source_url("notgit", source_url) == source_url
    )


@pytest.mark.parametrize(
    "source_url,normalized_url",
    [
        ("https://github.com/demo/demo.git", "https://github.com/demo/demo.git"),
        ("git@github.com:demo/demo.git", "ssh://git@github.com/demo/demo.git"),
    ],
)
def test_specification_uses_normalize_source_url_for_git(
    source_url: str, normalized_url: str
) -> None:
    assert (
        PackageSpecification(
            name="demo",
            source_type="git",
            source_url=source_url,
        ).source_url
        == normalized_url
    )
poetry-core-2.1.1/tests/packages/test_url_dependency.py000066400000000000000000000050341475444614500233340ustar00rootroot00000000000000from __future__ import annotations

import pytest

from poetry.core.packages.url_dependency import URLDependency
from poetry.core.version.markers import SingleMarker


def test_to_pep_508() -> None:
    dependency = URLDependency(
        "pytorch",
        "https://download.pytorch.org/whl/cpu/torch-1.5.1%2Bcpu-cp38-cp38-linux_x86_64.whl",
    )

    expected = (
        "pytorch @"
        " https://download.pytorch.org/whl/cpu/torch-1.5.1%2Bcpu-cp38-cp38-linux_x86_64.whl"
    )
    assert dependency.to_pep_508() == expected


def test_to_pep_508_with_extras() -> None:
    dependency = URLDependency(
        "pytorch",
        "https://download.pytorch.org/whl/cpu/torch-1.5.1%2Bcpu-cp38-cp38-linux_x86_64.whl",
        extras=["foo", "bar"],
    )

    expected = (
        "pytorch[bar,foo] @"
        " https://download.pytorch.org/whl/cpu/torch-1.5.1%2Bcpu-cp38-cp38-linux_x86_64.whl"
    )
    assert expected == dependency.to_pep_508()


def test_to_pep_508_with_subdirectory() -> None:
    dependency = URLDependency(
        "demo",
        "https://github.com/foo/bar/archive/0.1.0.zip",
        directory="baz",
    )

    expected = "demo @ https://github.com/foo/bar/archive/0.1.0.zip#subdirectory=baz"
    assert expected == dependency.to_pep_508()


def test_to_pep_508_with_marker() -> None:
    dependency = URLDependency(
        "pytorch",
        "https://download.pytorch.org/whl/cpu/torch-1.5.1%2Bcpu-cp38-cp38-linux_x86_64.whl",
    )
    dependency.marker = SingleMarker("sys.platform", "linux")

    expected = (
        "pytorch @"
        " https://download.pytorch.org/whl/cpu/torch-1.5.1%2Bcpu-cp38-cp38-linux_x86_64.whl"
        ' ; sys_platform == "linux"'
    )
    assert dependency.to_pep_508() == expected


@pytest.mark.parametrize(
    "name,url,extras,constraint,expected",
    [
        (
            "example",
            "https://example.org/example.whl",
            None,
            None,
            "example (*) @ https://example.org/example.whl",
        ),
        (
            "example",
            "https://example.org/example.whl",
            ["foo"],
            "1.2",
            "example[foo] (1.2) @ https://example.org/example.whl",
        ),
    ],
)
def test_directory_dependency_string_representation(
    name: str,
    url: str,
    extras: list[str] | None,
    constraint: str | None,
    expected: str,
) -> None:
    dependency = URLDependency(name=name, url=url, extras=extras)
    if constraint:
        dependency.constraint = constraint  # type: ignore[assignment]
    assert str(dependency) == expected
poetry-core-2.1.1/tests/packages/test_vcs_dependency.py000066400000000000000000000164371475444614500233360ustar00rootroot00000000000000from __future__ import annotations

from typing import Any

import pytest

from packaging.utils import canonicalize_name

from poetry.core.packages.vcs_dependency import VCSDependency


@pytest.mark.parametrize(
    "kwargs, expected",
    [
        ({}, "poetry @ git+https://github.com/python-poetry/poetry.git"),
        (
            {"extras": ["foo"]},
            "poetry[foo] @ git+https://github.com/python-poetry/poetry.git",
        ),
        (
            {"extras": ["foo", "bar"]},
            "poetry[bar,foo] @ git+https://github.com/python-poetry/poetry.git",
        ),
        (
            {"extras": ["foo", "bar"], "branch": "main"},
            "poetry[bar,foo] @ git+https://github.com/python-poetry/poetry.git@main",
        ),
        (
            {"branch": "main"},
            "poetry @ git+https://github.com/python-poetry/poetry.git@main",
        ),
        (
            {"tag": "1.0"},
            "poetry @ git+https://github.com/python-poetry/poetry.git@1.0",
        ),
        (
            {"rev": "12345"},
            "poetry @ git+https://github.com/python-poetry/poetry.git@12345",
        ),
        (
            {"directory": "sub"},
            "poetry @ git+https://github.com/python-poetry/poetry.git#subdirectory=sub",
        ),
        (
            {"branch": "main", "directory": "sub"},
            (
                "poetry @ git+https://github.com/python-poetry/poetry.git"
                "@main#subdirectory=sub"
            ),
        ),
    ],
)
def test_to_pep_508(kwargs: dict[str, Any], expected: str) -> None:
    dependency = VCSDependency(
        "poetry", "git", "https://github.com/python-poetry/poetry.git", **kwargs
    )

    assert dependency.to_pep_508() == expected


@pytest.mark.parametrize(
    "kwargs, expected",
    [
        ({}, "poetry @ git+https://github.com/python-poetry/poetry.git"),
        (
            {"extras": ["foo"]},
            "poetry[foo] @ git+https://github.com/python-poetry/poetry.git",
        ),
        (
            {"extras": ["foo", "bar"]},
            "poetry[bar,foo] @ git+https://github.com/python-poetry/poetry.git",
        ),
        (
            {"extras": ["foo", "bar"], "branch": "main", "resolved_rev": "aaaa"},
            "poetry[bar,foo] @ git+https://github.com/python-poetry/poetry.git@aaaa",
        ),
        (
            {"branch": "main", "resolved_rev": "aaaa"},
            "poetry @ git+https://github.com/python-poetry/poetry.git@aaaa",
        ),
        (
            {"tag": "1.0", "resolved_rev": "aaaa"},
            "poetry @ git+https://github.com/python-poetry/poetry.git@aaaa",
        ),
        (
            {"rev": "12345", "resolved_rev": "aaaa"},
            "poetry @ git+https://github.com/python-poetry/poetry.git@aaaa",
        ),
        (
            {"directory": "sub"},
            "poetry @ git+https://github.com/python-poetry/poetry.git#subdirectory=sub",
        ),
        (
            {"branch": "main", "directory": "sub", "resolved_rev": "aaaa"},
            (
                "poetry @ git+https://github.com/python-poetry/poetry.git"
                "@aaaa#subdirectory=sub"
            ),
        ),
    ],
)
def test_to_pep_508_resolved(kwargs: dict[str, Any], expected: str) -> None:
    dependency = VCSDependency(
        "poetry", "git", "https://github.com/python-poetry/poetry.git", **kwargs
    )

    assert dependency.to_pep_508(resolved=True) == expected


def test_to_pep_508_ssh() -> None:
    dependency = VCSDependency("poetry", "git", "git@github.com:sdispater/poetry.git")

    expected = "poetry @ git+ssh://git@github.com/sdispater/poetry.git"

    assert dependency.to_pep_508() == expected


def test_to_pep_508_in_extras() -> None:
    dependency = VCSDependency(
        "poetry", "git", "https://github.com/python-poetry/poetry.git"
    )
    dependency._in_extras = [canonicalize_name("foo")]

    expected = (
        'poetry @ git+https://github.com/python-poetry/poetry.git ; extra == "foo"'
    )
    assert dependency.to_pep_508() == expected

    dependency = VCSDependency(
        "poetry", "git", "https://github.com/python-poetry/poetry.git", extras=["bar"]
    )
    dependency._in_extras = [canonicalize_name("foo")]

    expected = (
        'poetry[bar] @ git+https://github.com/python-poetry/poetry.git ; extra == "foo"'
    )

    assert dependency.to_pep_508() == expected

    dependency = VCSDependency(
        "poetry", "git", "https://github.com/python-poetry/poetry.git", "b;ar;"
    )
    dependency._in_extras = [canonicalize_name("foo;")]

    expected = (
        "poetry @ git+https://github.com/python-poetry/poetry.git@b;ar; ; extra =="
        ' "foo;"'
    )

    assert dependency.to_pep_508() == expected


@pytest.mark.parametrize(
    "name,source,branch,extras,constraint,expected",
    [
        (
            "example",
            "https://example.org/example.git",
            "main",
            None,
            None,
            "example (*) @ git+https://example.org/example.git@main",
        ),
        (
            "example",
            "https://example.org/example.git",
            "main",
            ["foo"],
            "1.2",
            "example[foo] (1.2) @ git+https://example.org/example.git@main",
        ),
    ],
)
def test_directory_dependency_string_representation(
    name: str,
    source: str,
    branch: str,
    extras: list[str] | None,
    constraint: str | None,
    expected: str,
) -> None:
    dependency = VCSDependency(
        name=name, vcs="git", source=source, branch=branch, extras=extras
    )
    if constraint:
        dependency.constraint = constraint  # type: ignore[assignment]
    assert str(dependency) == expected


@pytest.mark.parametrize("groups", [["main"], ["dev"]])
def test_category(groups: list[str]) -> None:
    dependency = VCSDependency(
        "poetry",
        "git",
        "https://github.com/python-poetry/poetry.git",
        groups=groups,
    )
    assert dependency.groups == frozenset(groups)


def test_vcs_dependency_can_have_resolved_reference_specified() -> None:
    dependency = VCSDependency(
        "poetry",
        "git",
        "https://github.com/python-poetry/poetry.git",
        branch="develop",
        resolved_rev="123456",
    )

    assert dependency.branch == "develop"
    assert dependency.source_reference == "develop"
    assert dependency.source_resolved_reference == "123456"


def test_vcs_dependencies_are_equal_if_resolved_references_match() -> None:
    dependency1 = VCSDependency(
        "poetry",
        "git",
        "https://github.com/python-poetry/poetry.git",
        branch="develop",
        resolved_rev="123456",
    )
    dependency2 = VCSDependency(
        "poetry",
        "git",
        "https://github.com/python-poetry/poetry.git",
        rev="123",
        resolved_rev="123456",
    )

    assert dependency1 == dependency2


@pytest.mark.parametrize(
    "source_url,normalized_url",
    [
        ("https://github.com/demo/demo.git", "https://github.com/demo/demo.git"),
        ("git@github.com:demo/demo.git", "ssh://git@github.com/demo/demo.git"),
    ],
)
def test_vcs_source_is_normalized(source_url: str, normalized_url: str) -> None:
    dependency = VCSDependency(
        name="demo",
        vcs="git",
        source=source_url,
        branch="main",
    )
    assert dependency.source == normalized_url
    assert dependency.source_url == normalized_url
poetry-core-2.1.1/tests/packages/utils/000077500000000000000000000000001475444614500200615ustar00rootroot00000000000000poetry-core-2.1.1/tests/packages/utils/__init__.py000066400000000000000000000000001475444614500221600ustar00rootroot00000000000000poetry-core-2.1.1/tests/packages/utils/test_utils.py000066400000000000000000000220771475444614500226420ustar00rootroot00000000000000from __future__ import annotations

from pathlib import Path

import pytest

from poetry.core.constraints.generic import parse_constraint as parse_generic_constraint
from poetry.core.constraints.version import parse_constraint as parse_version_constraint
from poetry.core.constraints.version import parse_marker_version_constraint
from poetry.core.packages.utils.utils import convert_markers
from poetry.core.packages.utils.utils import create_nested_marker
from poetry.core.packages.utils.utils import get_python_constraint_from_marker
from poetry.core.packages.utils.utils import is_python_project
from poetry.core.version.markers import parse_marker


@pytest.mark.parametrize(
    "marker, expected",
    [
        (
            (
                'sys_platform == "win32" and python_version < "3.6" or sys_platform =='
                ' "linux" and python_version < "3.6" and python_version >= "3.3" or'
                ' sys_platform == "darwin" and python_version < "3.3"'
            ),
            {
                "python_version": [
                    [("<", "3.6")],
                    [("<", "3.6"), (">=", "3.3")],
                    [("<", "3.3")],
                ],
                "sys_platform": [
                    [("==", "win32")],
                    [("==", "linux")],
                    [("==", "darwin")],
                ],
            },
        ),
        (
            (
                'sys_platform == "win32" and python_version < "3.6" or sys_platform =='
                ' "win32" and python_version < "3.6" and python_version >= "3.3" or'
                ' sys_platform == "win32" and python_version < "3.3"'
            ),
            {"python_version": [[("<", "3.6")]], "sys_platform": [[("==", "win32")]]},
        ),
        (
            'python_version == "2.7" or python_version == "2.6"',
            {"python_version": [[("==", "2.7")], [("==", "2.6")]]},
        ),
        (
            (
                '(python_version < "2.7" or python_full_version >= "3.0.0") and'
                ' python_full_version < "3.6.0"'
            ),
            {"python_version": [[("<", "2.7")], [(">=", "3.0.0"), ("<", "3.6.0")]]},
        ),
        (
            (
                '(python_version < "2.7" or python_full_version >= "3.0.0") and'
                ' extra == "foo"'
            ),
            {
                "extra": [[("==", "foo")]],
                "python_version": [[("<", "2.7")], [(">=", "3.0.0")]],
            },
        ),
        (
            'python_version >= "3.9" or sys_platform == "linux"',
            {
                "python_version": [[(">=", "3.9")], []],
                "sys_platform": [[], [("==", "linux")]],
            },
        ),
        (
            'python_version >= "3.9" and sys_platform == "linux"',
            {
                "python_version": [[(">=", "3.9")]],
                "sys_platform": [[("==", "linux")]],
            },
        ),
    ],
)
def test_convert_markers(
    marker: str, expected: dict[str, list[list[tuple[str, str]]]]
) -> None:
    parsed_marker = parse_marker(marker)
    converted = convert_markers(parsed_marker)
    assert converted == expected


@pytest.mark.parametrize(
    ["constraint", "expected"],
    [
        ("*", ""),
        ("==linux", 'sys_platform == "linux"'),
        ("!=win32", 'sys_platform != "win32"'),
        ("!=linux, !=win32", 'sys_platform != "linux" and sys_platform != "win32"'),
        ("==linux || ==win32", 'sys_platform == "linux" or sys_platform == "win32"'),
    ],
)
def test_create_nested_marker_base_constraint(constraint: str, expected: str) -> None:
    assert (
        create_nested_marker("sys_platform", parse_generic_constraint(constraint))
        == expected
    )


@pytest.mark.parametrize(
    ["constraint", "expected"],
    [
        ("*", ""),
        # simple version
        ("3", 'python_version == "3"'),
        ("3.9", 'python_version == "3.9"'),
        ("3.9.0", 'python_full_version == "3.9.0"'),
        ("3.9.1", 'python_full_version == "3.9.1"'),
        # min
        (">=3", 'python_version >= "3"'),
        (">=3.9", 'python_version >= "3.9"'),
        (">=3.9.0", 'python_full_version >= "3.9.0"'),
        (">=3.9.1", 'python_full_version >= "3.9.1"'),
        (">3", 'python_full_version > "3.0.0"'),
        (">3.9", 'python_full_version > "3.9.0"'),
        (">3.9.0", 'python_full_version > "3.9.0"'),
        (">3.9.1", 'python_full_version > "3.9.1"'),
        # max
        ("<3", 'python_version < "3"'),
        ("<3.9", 'python_version < "3.9"'),
        ("<3.9.0", 'python_full_version < "3.9.0"'),
        ("<3.9.1", 'python_full_version < "3.9.1"'),
        ("<=3", 'python_full_version <= "3.0.0"'),
        ("<=3.9", 'python_full_version <= "3.9.0"'),
        ("<=3.9.0", 'python_full_version <= "3.9.0"'),
        ("<=3.9.1", 'python_full_version <= "3.9.1"'),
        # min and max
        (">=3.7, <3.9", 'python_version >= "3.7" and python_version < "3.9"'),
        (">=3.7, <=3.9", 'python_version >= "3.7" and python_full_version <= "3.9.0"'),
        (">3.7, <3.9", 'python_full_version > "3.7.0" and python_version < "3.9"'),
        (
            ">3.7, <=3.9",
            'python_full_version > "3.7.0" and python_full_version <= "3.9.0"',
        ),
        # union
        ("<3.7 || >=3.8", '(python_version < "3.7") or (python_version >= "3.8")'),
        (
            ">=3.7,<3.8 || >=3.9,<=3.10",
            (
                '(python_version >= "3.7" and python_version < "3.8")'
                ' or (python_version >= "3.9" and python_full_version <= "3.10.0")'
            ),
        ),
    ],
)
def test_create_nested_marker_version_constraint(
    constraint: str,
    expected: str,
) -> None:
    assert (
        create_nested_marker("python_version", parse_version_constraint(constraint))
        == expected
    )


@pytest.mark.parametrize(
    ["marker", "constraint"],
    [
        # ==
        ('python_version == "3.6"', "~3.6"),
        ('python_version == "3.6.*"', "==3.6.*"),
        ('python_version == "3.6.* "', "==3.6.*"),
        # !=
        ('python_version != "3.6"', "!=3.6.*"),
        ('python_version != "3.6.*"', "!=3.6.*"),
        ('python_version != "3.6.* "', "!=3.6.*"),
        # <, <=, >, >= precision 1
        ('python_version < "3"', "<3"),
        ('python_version <= "3"', "<3"),
        ('python_version > "3"', ">=3"),
        ('python_version >= "3"', ">=3"),
        # <, <=, >, >= precision 2
        ('python_version < "3.6"', "<3.6"),
        ('python_version <= "3.6"', "<3.7"),
        ('python_version > "3.6"', ">=3.7"),
        ('python_version >= "3.6"', ">=3.6"),
        # in, not in
        ('python_version in "2.7, 3.6"', ">=2.7.0,<2.8.0 || >=3.6.0,<3.7.0"),
        ('python_version in "2.7, 3.6.2"', ">=2.7.0,<2.8.0 || 3.6.2"),
        ('python_version not in "2.7, 3.6"', "<2.7.0 || >=2.8.0,<3.6.0 || >=3.7.0"),
        ('python_version not in "2.7, 3.6.2"', "<2.7.0 || >=2.8.0,<3.6.2 || >3.6.2"),
        # and
        ('python_version >= "3.6" and python_full_version < "4.0"', ">=3.6, <4.0"),
        (
            'python_full_version >= "3.6.1" and python_full_version < "4.0.0"',
            ">=3.6.1, <4.0.0",
        ),
        # or
        ('python_version < "3.6" or python_version >= "3.9"', "<3.6 || >=3.9"),
        # and or
        (
            (
                'python_version >= "3.7" and python_version < "3.8" or python_version'
                ' >= "3.9" and python_version < "3.10"'
            ),
            ">=3.7,<3.8 || >=3.9,<3.10",
        ),
        (
            (
                '(python_version < "2.7" or python_full_version >= "3.0.0") and'
                ' python_full_version < "3.6.0"'
            ),
            "<2.7 || >=3.0,<3.6",
        ),
        # no python_version
        ('sys_platform == "linux"', "*"),
        ('sys_platform != "linux" and sys_platform != "win32"', "*"),
        ('sys_platform == "linux" or sys_platform == "win32"', "*"),
        # no relevant python_version
        ('python_version >= "3.9" or sys_platform == "linux"', "*"),
        # relevant python_version
        ('python_version >= "3.9" and sys_platform == "linux"', ">=3.9"),
        # exclude specific version
        (
            'python_version >= "3.5" and python_full_version != "3.7.6"',
            ">=3.5,<3.7.6 || >3.7.6",
        ),
        # Full exact version
        (
            'python_full_version == "3.6.1"',
            "3.6.1",
        ),
    ],
)
def test_get_python_constraint_from_marker(marker: str, constraint: str) -> None:
    marker_parsed = parse_marker(marker)
    constraint_parsed = parse_marker_version_constraint(constraint)
    assert get_python_constraint_from_marker(marker_parsed) == constraint_parsed


@pytest.mark.parametrize(
    ("fixture", "result"),
    [
        ("simple_project", True),
        ("project_with_setup_cfg_only", True),
        ("project_with_setup", True),
        ("project_with_pep517_non_poetry", True),
        ("project_without_pep517", False),
        ("does_not_exist", False),
    ],
)
def test_is_python_project(fixture: str, result: bool) -> None:
    path = Path(__file__).parent.parent.parent / "fixtures" / fixture
    assert is_python_project(path) == result
poetry-core-2.1.1/tests/packages/utils/test_utils_link.py000066400000000000000000000104641475444614500236540ustar00rootroot00000000000000from __future__ import annotations

import uuid

from hashlib import sha256

import pytest

from poetry.core.packages.utils.link import Link


def make_checksum() -> str:
    return sha256(str(uuid.uuid4()).encode()).hexdigest()


@pytest.fixture()
def file_checksum() -> str:
    return make_checksum()


@pytest.fixture()
def metadata_checksum() -> str:
    return make_checksum()


def make_url(
    ext: str,
    *,
    file_checksum: str | None = None,
    metadata_checksum: str | None = None,
    hashes: dict[str, str] | None = None,
    metadata: dict[str, str] | str | None = None,
) -> Link:
    url = f"https://files.pythonhosted.org/packages/16/52/dead/demo-1.0.0.{ext}"
    if not hashes:
        file_checksum = file_checksum or make_checksum()
        url += f"#sha256={file_checksum}"
    if not metadata:
        metadata = f"sha256={metadata_checksum}" if metadata_checksum else None
    return Link(url, hashes=hashes, metadata=metadata)


def test_package_link_hash(file_checksum: str) -> None:
    link = make_url(ext="whl", file_checksum=file_checksum)
    assert link.hashes == {"sha256": file_checksum}
    assert link.show_url == "demo-1.0.0.whl"

    # this is legacy PEP 503, no metadata hash is present
    assert not link.has_metadata
    assert not link.metadata_url
    assert not link.metadata_hashes


def test_package_link_hashes(file_checksum: str) -> None:
    link = make_url(ext="whl", hashes={"sha256": file_checksum, "other": "1234"})
    assert link.hashes == {"sha256": file_checksum, "other": "1234"}
    assert link.show_url == "demo-1.0.0.whl"


@pytest.mark.parametrize(
    ("ext", "check"),
    [
        ("whl", "wheel"),
        ("egg", "egg"),
        ("tar.gz", "sdist"),
        ("zip", "sdist"),
        ("cp36-cp36m-manylinux1_x86_64.whl", "wheel"),
    ],
)
def test_package_link_is_checks(ext: str, check: str) -> None:
    link = make_url(ext=ext)
    assert getattr(link, f"is_{check}")


@pytest.mark.parametrize(
    ("ext", "has_metadata"),
    [("whl", True), ("egg", False), ("tar.gz", True), ("zip", True)],
)
def test_package_link_pep658(
    ext: str, has_metadata: bool, metadata_checksum: str
) -> None:
    link = make_url(ext=ext, metadata_checksum=metadata_checksum)

    if has_metadata:
        assert link.has_metadata
        assert link.metadata_url == f"{link.url_without_fragment}.metadata"
        assert link.metadata_hashes == {"sha256": metadata_checksum}
    else:
        assert not link.has_metadata
        assert not link.metadata_url
        assert not link.metadata_hashes


def test_package_link_pep658_no_default_metadata() -> None:
    link = make_url(ext="whl")

    assert not link.has_metadata
    assert not link.metadata_url
    assert not link.metadata_hashes


@pytest.mark.parametrize(
    ("metadata", "has_metadata"),
    [
        ("true", True),
        ("false", False),
        ("", False),
    ],
)
def test_package_link_pep658_non_hash_metadata_value(
    file_checksum: str, metadata: str | bool, has_metadata: bool
) -> None:
    link = Link(
        "https://files.pythonhosted.org/packages/16/52/dead/"
        f"demo-1.0.0.whl#sha256={file_checksum}",
        metadata=metadata,
    )

    if has_metadata:
        assert link.has_metadata
        assert link.metadata_url == f"{link.url_without_fragment}.metadata"
    else:
        assert not link.has_metadata
        assert not link.metadata_url

    assert not link.metadata_hashes


def test_package_link_pep691() -> None:
    link = make_url(ext="whl", metadata={"sha256": "abcd", "sha512": "1234"})

    assert link.has_metadata
    assert link.metadata_url == f"{link.url_without_fragment}.metadata"
    assert link.metadata_hashes == {"sha256": "abcd", "sha512": "1234"}


def test_package_link_pep592_default_not_yanked() -> None:
    link = make_url(ext="whl")

    assert not link.yanked
    assert link.yanked_reason == ""


@pytest.mark.parametrize(
    ("yanked", "expected_yanked", "expected_yanked_reason"),
    [
        (True, True, ""),
        (False, False, ""),
        ("the reason", True, "the reason"),
        ("", True, ""),
    ],
)
def test_package_link_pep592_yanked(
    yanked: str | bool, expected_yanked: bool, expected_yanked_reason: str
) -> None:
    link = Link("https://example.org", yanked=yanked)

    assert link.yanked == expected_yanked
    assert link.yanked_reason == expected_yanked_reason
poetry-core-2.1.1/tests/packages/utils/test_utils_urls.py000066400000000000000000000043021475444614500236760ustar00rootroot00000000000000# These test scenarios are ported over from pypa/pip
# https://raw.githubusercontent.com/pypa/pip/b447f438df08303f4f07f2598f190e73876443ba/tests/unit/test_urls.py

from __future__ import annotations

import sys

from pathlib import Path

import pytest

from poetry.core.packages.utils.utils import path_to_url
from poetry.core.packages.utils.utils import url_to_path


@pytest.mark.skipif("sys.platform == 'win32'")
def test_path_to_url_unix() -> None:
    assert path_to_url("/tmp/file") == "file:///tmp/file"
    path = Path() / "file"
    assert path_to_url("file") == "file://" + path.absolute().as_posix()


@pytest.mark.skipif("sys.platform != 'win32'")
def test_path_to_url_win() -> None:
    assert path_to_url("c:/tmp/file") == "file:///c:/tmp/file"
    assert path_to_url("c:\\tmp\\file") == "file:///c:/tmp/file"
    assert path_to_url(r"\\unc\as\path") == "file://unc/as/path"
    path = Path() / "file"
    assert path_to_url("file") == "file:///" + path.absolute().as_posix()


@pytest.mark.parametrize(
    "url,win_expected,non_win_expected",
    [
        ("file:tmp", "tmp", "tmp"),
        ("file:c:/path/to/file", r"C:\path\to\file", "c:/path/to/file"),
        ("file:/path/to/file", r"\path\to\file", "/path/to/file"),
        ("file://localhost/tmp/file", r"\tmp\file", "/tmp/file"),
        ("file://localhost/c:/tmp/file", r"C:\tmp\file", "/c:/tmp/file"),
        ("file://somehost/tmp/file", r"\\somehost\tmp\file", None),
        ("file:///tmp/file", r"\tmp\file", "/tmp/file"),
        ("file:///c:/tmp/file", r"C:\tmp\file", "/c:/tmp/file"),
    ],
)
def test_url_to_path(url: str, win_expected: str, non_win_expected: str | None) -> None:
    if sys.platform == "win32":
        expected_path = win_expected
    else:
        expected_path = non_win_expected

    if expected_path is None:
        with pytest.raises(ValueError):
            url_to_path(url)
    else:
        assert url_to_path(url) == Path(expected_path)


@pytest.mark.skipif("sys.platform != 'win32'")
def test_url_to_path_path_to_url_symmetry_win() -> None:
    path = r"C:\tmp\file"
    assert url_to_path(path_to_url(path)) == Path(path)

    unc_path = r"\\unc\share\path"
    assert url_to_path(path_to_url(unc_path)) == Path(unc_path)
poetry-core-2.1.1/tests/pyproject/000077500000000000000000000000001475444614500171625ustar00rootroot00000000000000poetry-core-2.1.1/tests/pyproject/__init__.py000066400000000000000000000000001475444614500212610ustar00rootroot00000000000000poetry-core-2.1.1/tests/pyproject/conftest.py000066400000000000000000000025531475444614500213660ustar00rootroot00000000000000from __future__ import annotations

from typing import TYPE_CHECKING

import pytest


if TYPE_CHECKING:
    from pathlib import Path


@pytest.fixture
def pyproject_toml(tmp_path: Path) -> Path:
    path = tmp_path / "pyproject.toml"
    with path.open(mode="w", encoding="utf-8"):
        pass
    return path


@pytest.fixture
def build_system_section(pyproject_toml: Path) -> str:
    content = """
[build-system]
requires = ["poetry-core"]
build-backend = "poetry.core.masonry.api"
"""
    with pyproject_toml.open(mode="a", encoding="utf-8") as f:
        f.write(content)
    return content


@pytest.fixture
def poetry_section(pyproject_toml: Path) -> str:
    content = """
[tool.poetry]
name = "poetry"

[tool.poetry.dependencies]
python = "^3.5"
"""
    with pyproject_toml.open(mode="a", encoding="utf-8") as f:
        f.write(content)
    return content


@pytest.fixture
def project_section(pyproject_toml: Path) -> str:
    content = """
[project]
name = "poetry"
version = "1.0.0"
"""
    with pyproject_toml.open(mode="a", encoding="utf-8") as f:
        f.write(content)
    return content


@pytest.fixture
def project_section_dynamic(pyproject_toml: Path) -> str:
    content = """
[project]
name = "not-poetry"
version = "1.0.0"
dynamic = ["description"]
"""
    with pyproject_toml.open(mode="a", encoding="utf-8") as f:
        f.write(content)
    return content
poetry-core-2.1.1/tests/pyproject/test_pyproject_toml.py000066400000000000000000000072541475444614500236550ustar00rootroot00000000000000from __future__ import annotations

from pathlib import Path

import pytest

from poetry.core.pyproject.exceptions import PyProjectError
from poetry.core.pyproject.toml import PyProjectTOML
from poetry.core.utils._compat import tomllib


def test_pyproject_toml_simple(
    pyproject_toml: Path, build_system_section: str, poetry_section: str
) -> None:
    with pyproject_toml.open("rb") as f:
        data = tomllib.load(f)
    assert PyProjectTOML(pyproject_toml).data == data


def test_pyproject_toml_no_poetry_config(pyproject_toml: Path) -> None:
    pyproject = PyProjectTOML(pyproject_toml)

    assert not pyproject.is_poetry_project()

    with pytest.raises(PyProjectError) as excval:
        _ = pyproject.poetry_config

    assert f"[tool.poetry] section not found in {pyproject_toml.as_posix()}" in str(
        excval.value
    )


def test_pyproject_toml_no_poetry_config_but_project_section(
    pyproject_toml: Path, project_section: str
) -> None:
    pyproject = PyProjectTOML(pyproject_toml)

    assert pyproject.is_poetry_project()

    with pytest.raises(PyProjectError) as excval:
        _ = pyproject.poetry_config

    assert f"[tool.poetry] section not found in {pyproject_toml.as_posix()}" in str(
        excval.value
    )


def test_pyproject_toml_no_poetry_config_but_project_section_but_dynamic(
    pyproject_toml: Path, project_section_dynamic: str
) -> None:
    pyproject = PyProjectTOML(pyproject_toml)

    assert not pyproject.is_poetry_project()

    with pytest.raises(PyProjectError) as excval:
        _ = pyproject.poetry_config

    assert f"[tool.poetry] section not found in {pyproject_toml.as_posix()}" in str(
        excval.value
    )


def test_pyproject_toml_poetry_config(
    pyproject_toml: Path, poetry_section: str
) -> None:
    pyproject = PyProjectTOML(pyproject_toml)
    with pyproject_toml.open("rb") as f:
        doc = tomllib.load(f)
    config = doc["tool"]["poetry"]

    assert pyproject.is_poetry_project()
    assert pyproject.poetry_config == config


def test_pyproject_toml_no_build_system_defaults() -> None:
    pyproject_toml = (
        Path(__file__).parent.parent
        / "fixtures"
        / "project_with_build_system_requires"
        / "pyproject.toml"
    )

    build_system = PyProjectTOML(pyproject_toml).build_system
    assert build_system.requires == ["poetry-core", "Cython~=0.29.6"]

    assert len(build_system.dependencies) == 2
    assert build_system.dependencies[0].to_pep_508() == "poetry-core"
    assert build_system.dependencies[1].to_pep_508() == "Cython (>=0.29.6,<0.30.0)"


def test_pyproject_toml_build_requires_as_dependencies(pyproject_toml: Path) -> None:
    build_system = PyProjectTOML(pyproject_toml).build_system
    assert build_system.requires == ["setuptools", "wheel"]
    assert build_system.build_backend == "setuptools.build_meta:__legacy__"


def test_pyproject_toml_non_existent(pyproject_toml: Path) -> None:
    pyproject_toml.unlink()
    pyproject = PyProjectTOML(pyproject_toml)
    build_system = pyproject.build_system

    assert pyproject.data == {}
    assert build_system.requires == ["poetry-core"]
    assert build_system.build_backend == "poetry.core.masonry.api"


def test_unparseable_pyproject_toml() -> None:
    pyproject_toml = (
        Path(__file__).parent.parent
        / "fixtures"
        / "project_duplicate_dependency"
        / "pyproject.toml"
    )

    with pytest.raises(PyProjectError) as excval:
        _ = PyProjectTOML(pyproject_toml).build_system

    assert (
        f"{pyproject_toml.as_posix()} is not a valid TOML file.\n"
        "TOMLDecodeError: Cannot overwrite a value (at line 7, column 16)\n"
        "This is often caused by a duplicate entry"
    ) in str(excval.value)
poetry-core-2.1.1/tests/spdx/000077500000000000000000000000001475444614500161215ustar00rootroot00000000000000poetry-core-2.1.1/tests/spdx/__init__.py000066400000000000000000000000001475444614500202200ustar00rootroot00000000000000poetry-core-2.1.1/tests/spdx/test_helpers.py000066400000000000000000000034741475444614500212040ustar00rootroot00000000000000from __future__ import annotations

import pytest

from poetry.core.spdx.helpers import _load_licenses
from poetry.core.spdx.helpers import license_by_id


def test_license_by_id() -> None:
    license = license_by_id("MIT")

    assert license.id == "MIT"
    assert license.name == "MIT License"
    assert license.is_osi_approved
    assert not license.is_deprecated

    license = license_by_id("LGPL-3.0-or-later")

    assert license.id == "LGPL-3.0-or-later"
    assert license.name == "GNU Lesser General Public License v3.0 or later"
    assert license.is_osi_approved
    assert not license.is_deprecated


def test_license_by_id_is_case_insensitive() -> None:
    license = license_by_id("mit")

    assert license.id == "MIT"

    license = license_by_id("miT")

    assert license.id == "MIT"


def test_license_by_id_with_full_name() -> None:
    license = license_by_id("GNU Lesser General Public License v3.0 or later")

    assert license.id == "LGPL-3.0-or-later"
    assert license.name == "GNU Lesser General Public License v3.0 or later"
    assert license.is_osi_approved
    assert not license.is_deprecated


def test_license_by_id_invalid() -> None:
    with pytest.raises(ValueError):
        license_by_id("")


def test_license_by_id_custom() -> None:
    license = license_by_id("Custom")

    assert license.id == "Custom"
    assert license.name == "Custom"
    assert not license.is_osi_approved
    assert not license.is_deprecated


def test_valid_trove_classifiers() -> None:
    import trove_classifiers

    licenses = _load_licenses()

    for license_id, license in licenses.items():
        classifier = license.classifier
        valid_classifier = classifier in trove_classifiers.classifiers

        assert valid_classifier, (
            f"'{license_id}' returns invalid classifier '{classifier}'"
        )
poetry-core-2.1.1/tests/spdx/test_license.py000066400000000000000000000027211475444614500211560ustar00rootroot00000000000000from __future__ import annotations

from poetry.core.spdx.helpers import license_by_id


def test_classifier_name() -> None:
    license = license_by_id("lgpl-3.0-or-later")

    assert (
        license.classifier_name
        == "GNU Lesser General Public License v3 or later (LGPLv3+)"
    )


def test_classifier_name_no_classifer_osi_approved() -> None:
    license = license_by_id("LiLiQ-R-1.1")

    assert license.classifier_name is None


def test_classifier_name_no_classifer() -> None:
    license = license_by_id("Leptonica")

    assert license.classifier_name == "Other/Proprietary License"


def test_classifier() -> None:
    license = license_by_id("lgpl-3.0-or-later")

    assert (
        license.classifier == "License :: "
        "OSI Approved :: "
        "GNU Lesser General Public License v3 or later (LGPLv3+)"
    )


def test_classifier_no_classifer_osi_approved() -> None:
    license = license_by_id("LiLiQ-R-1.1")

    assert license.classifier == "License :: OSI Approved"


def test_classifier_no_classifer() -> None:
    license = license_by_id("Leptonica")

    assert license.classifier == "License :: Other/Proprietary License"


def test_proprietary_license() -> None:
    license = license_by_id("Proprietary")

    assert license.classifier == "License :: Other/Proprietary License"


def test_custom_license() -> None:
    license = license_by_id("Amazon Software License")

    assert license.classifier == "License :: Other/Proprietary License"
poetry-core-2.1.1/tests/test_core_version.py000066400000000000000000000005251475444614500212530ustar00rootroot00000000000000from __future__ import annotations

from pathlib import Path

from poetry.core import __version__
from poetry.core.pyproject.toml import PyProjectTOML


def test_version_is_synced() -> None:
    pyproject = PyProjectTOML(Path(__file__).parent.parent.joinpath("pyproject.toml"))
    assert __version__ == pyproject.data["project"]["version"]
poetry-core-2.1.1/tests/test_factory.py000066400000000000000000001135211475444614500202260ustar00rootroot00000000000000from __future__ import annotations

from pathlib import Path
from typing import TYPE_CHECKING
from typing import Any
from typing import cast

import pytest

from packaging.utils import canonicalize_name

from poetry.core.constraints.version import parse_constraint
from poetry.core.factory import Factory
from poetry.core.packages.dependency import Dependency
from poetry.core.packages.directory_dependency import DirectoryDependency
from poetry.core.packages.file_dependency import FileDependency
from poetry.core.packages.url_dependency import URLDependency
from poetry.core.packages.vcs_dependency import VCSDependency
from poetry.core.pyproject.tables import BuildSystem
from poetry.core.utils._compat import tomllib
from poetry.core.version.markers import SingleMarker


if TYPE_CHECKING:
    from pytest import LogCaptureFixture


fixtures_dir = Path(__file__).parent / "fixtures"


@pytest.fixture
def complete_legacy_warnings() -> list[str]:
    return [
        "[tool.poetry.name] is deprecated. Use [project.name] instead.",
        (
            "[tool.poetry.version] is set but 'version' is not in "
            "[project.dynamic]. If it is static use [project.version]. If it "
            "is dynamic, add 'version' to [project.dynamic].\n"
            "If you want to set the version dynamically via `poetry build "
            "--local-version` or you are using a plugin, which sets the "
            "version dynamically, you should define the version in "
            "[tool.poetry] and add 'version' to [project.dynamic]."
        ),
        "[tool.poetry.description] is deprecated. Use [project.description] instead.",
        (
            "[tool.poetry.readme] is set but 'readme' is not in "
            "[project.dynamic]. If it is static use [project.readme]. If it "
            "is dynamic, add 'readme' to [project.dynamic].\n"
            "If you want to define multiple readmes, you should define them "
            "in [tool.poetry] and add 'readme' to [project.dynamic]."
        ),
        "[tool.poetry.license] is deprecated. Use [project.license] instead.",
        "[tool.poetry.authors] is deprecated. Use [project.authors] instead.",
        "[tool.poetry.maintainers] is deprecated. Use [project.maintainers] instead.",
        "[tool.poetry.keywords] is deprecated. Use [project.keywords] instead.",
        (
            "[tool.poetry.classifiers] is set but 'classifiers' is not in "
            "[project.dynamic]. If it is static use [project.classifiers]. If it "
            "is dynamic, add 'classifiers' to [project.dynamic].\n"
            "ATTENTION: Per default Poetry determines classifiers for "
            "supported Python versions and license automatically. If you "
            "define classifiers in [project], you disable the automatic "
            "enrichment. In other words, you have to define all classifiers "
            "manually. If you want to use Poetry's automatic enrichment of "
            "classifiers, you should define them in [tool.poetry] and add "
            "'classifiers' to [project.dynamic]."
        ),
        "[tool.poetry.homepage] is deprecated. Use [project.urls] instead.",
        "[tool.poetry.repository] is deprecated. Use [project.urls] instead.",
        "[tool.poetry.documentation] is deprecated. Use [project.urls] instead.",
        "[tool.poetry.plugins] is deprecated. Use [project.entry-points] instead.",
        (
            "[tool.poetry.extras] is deprecated. Use "
            "[project.optional-dependencies] instead."
        ),
        (
            "Defining console scripts in [tool.poetry.scripts] is deprecated. "
            "Use [project.scripts] instead. "
            "([tool.poetry.scripts] should only be used for scripts of type 'file')."
        ),
    ]


@pytest.fixture
def complete_legacy_duplicate_warnings() -> list[str]:
    return [
        (
            "[project.name] and [tool.poetry.name] are both set. The latter "
            "will be ignored."
        ),
        (
            "[project.version] and [tool.poetry.version] are both set. The "
            "latter will be ignored.\n"
            "If you want to set the version dynamically via `poetry build "
            "--local-version` or you are using a plugin, which sets the "
            "version dynamically, you should define the version in "
            "[tool.poetry] and add 'version' to [project.dynamic]."
        ),
        (
            "[project.description] and [tool.poetry.description] are both "
            "set. The latter will be ignored."
        ),
        (
            "[project.readme] and [tool.poetry.readme] are both set. The "
            "latter will be ignored.\n"
            "If you want to define multiple readmes, you should define them "
            "in [tool.poetry] and add 'readme' to [project.dynamic]."
        ),
        (
            "[project.license] and [tool.poetry.license] are both set. The "
            "latter will be ignored."
        ),
        (
            "[project.authors] and [tool.poetry.authors] are both set. The "
            "latter will be ignored."
        ),
        (
            "[project.maintainers] and [tool.poetry.maintainers] are both "
            "set. The latter will be ignored."
        ),
        (
            "[project.keywords] and [tool.poetry.keywords] are both set. The "
            "latter will be ignored."
        ),
        (
            "[project.classifiers] and [tool.poetry.classifiers] are both "
            "set. The latter will be ignored.\n"
            "ATTENTION: Per default Poetry determines classifiers for "
            "supported Python versions and license automatically. If you "
            "define classifiers in [project], you disable the automatic "
            "enrichment. In other words, you have to define all classifiers "
            "manually. If you want to use Poetry's automatic enrichment of "
            "classifiers, you should define them in [tool.poetry] and add "
            "'classifiers' to [project.dynamic]."
        ),
        (
            "[project.urls] and [tool.poetry.homepage] are both set. The "
            "latter will be ignored."
        ),
        (
            "[project.urls] and [tool.poetry.repository] are both set. The "
            "latter will be ignored."
        ),
        (
            "[project.urls] and [tool.poetry.documentation] are both set. "
            "The latter will be ignored."
        ),
        (
            "[project.entry-points] and [tool.poetry.plugins] are both set. The "
            "latter will be ignored."
        ),
        (
            "[project.optional-dependencies] and [tool.poetry.extras] are "
            "both set. The latter will be ignored."
        ),
        (
            "[project.scripts] is set and there are console scripts "
            "in [tool.poetry.scripts]. The latter will be ignored."
        ),
    ]


@pytest.mark.parametrize(
    "project", ["sample_project", "sample_project_new", "sample_project_dynamic"]
)
def test_create_poetry(project: str) -> None:
    new_format = project == "sample_project_new"
    dynamic = project == "sample_project_dynamic"
    poetry = Factory().create_poetry(fixtures_dir / project)

    assert poetry.is_package_mode

    package = poetry.package

    assert package.name == "my-package"
    assert package.version.text == "1.2.3"
    assert package.description == "Some description."
    assert package.authors == ["Sébastien Eustace "]
    assert package.maintainers == ["Sébastien Eustace "]
    assert package.license
    assert package.license.id == "MIT"
    assert (
        package.readmes[0].relative_to(fixtures_dir).as_posix()
        == f"{project}/README.rst"
    )
    assert package.homepage == "https://python-poetry.org"
    assert package.repository_url == "https://github.com/python-poetry/poetry"
    assert package.keywords == ["packaging", "dependency", "poetry"]

    assert package.python_versions == ">=3.6"
    assert str(package.python_constraint) == ">=3.6"

    dependencies: dict[str, Dependency] = {}
    for dep in package.requires:
        dependencies[dep.name] = dep

    cleo = dependencies["cleo"]
    assert cleo.pretty_constraint == (">=0.6,<1.0" if new_format else "^0.6")
    assert not cleo.is_optional()

    pendulum = dependencies["pendulum"]
    assert pendulum.pretty_constraint == ("rev 2.0" if new_format else "branch 2.0")
    assert pendulum.is_vcs()
    pendulum = cast("VCSDependency", pendulum)
    assert pendulum.vcs == "git"
    assert pendulum.rev == "2.0" if new_format else pendulum.branch == "2.0"
    assert pendulum.source == "https://github.com/sdispater/pendulum.git"
    assert pendulum.allows_prereleases()
    assert not pendulum.develop

    tomlkit = dependencies["tomlkit"]
    assert tomlkit.pretty_constraint == "rev 3bff550"
    assert tomlkit.is_vcs()
    tomlkit = cast("VCSDependency", tomlkit)
    assert tomlkit.vcs == "git"
    assert tomlkit.rev == "3bff550"
    assert tomlkit.source == "https://github.com/sdispater/tomlkit.git"
    assert tomlkit.allows_prereleases()
    assert not tomlkit.develop if new_format else tomlkit.develop
    tomlkit_for_locking = next(d for d in package.all_requires if d.name == "tomlkit")
    assert isinstance(tomlkit_for_locking, VCSDependency)
    assert tomlkit_for_locking.develop

    requests = dependencies["requests"]
    assert requests.pretty_constraint == (
        ">=2.18,<3.0" if new_format or dynamic else "^2.18"
    )
    assert not requests.is_vcs()
    assert requests.allows_prereleases() is None
    assert requests.is_optional()
    assert requests.extras == frozenset({"security"})

    pathlib2 = dependencies["pathlib2"]
    assert pathlib2.pretty_constraint == (">=2.2,<3.0" if new_format else "^2.2")
    assert pathlib2.python_versions in {"~2.7", ">=2.7 <2.8"}
    assert not pathlib2.is_optional()

    demo = dependencies["demo"]
    assert demo.is_file()
    assert not demo.is_vcs()
    assert demo.name == "demo"
    assert demo.pretty_constraint == "*"

    demo = dependencies["my-package"]
    assert not demo.is_file()
    assert demo.is_directory()
    assert not demo.is_vcs()
    assert demo.name == "my-package"
    assert demo.pretty_constraint == "*"

    simple_project = dependencies["simple-project"]
    assert not simple_project.is_file()
    assert simple_project.is_directory()
    assert not simple_project.is_vcs()
    assert simple_project.name == "simple-project"
    assert simple_project.pretty_constraint == "*"

    functools32 = dependencies["functools32"]
    assert functools32.name == "functools32"
    assert functools32.pretty_constraint == (
        ">=3.2.3,<3.3.0" if new_format else "^3.2.3"
    )
    assert (
        str(functools32.marker)
        == 'python_version ~= "2.7" and sys_platform == "win32" or python_version in'
        ' "3.4 3.5"'
    )

    dataclasses = dependencies["dataclasses"]
    assert dataclasses.name == "dataclasses"
    assert dataclasses.pretty_constraint == (">=0.7,<1.0" if new_format else "^0.7")
    assert dataclasses.python_versions == ">=3.6.1 <3.7"
    assert (
        str(dataclasses.marker)
        == 'python_full_version >= "3.6.1" and python_version < "3.7"'
    )

    assert "db" in package.extras

    classifiers = package.classifiers

    assert classifiers == [
        "Topic :: Software Development :: Build Tools",
        "Topic :: Software Development :: Libraries :: Python Modules",
    ]

    if new_format:
        assert package.all_classifiers == package.classifiers
    else:
        assert package.all_classifiers == [
            "License :: OSI Approved :: MIT License",
            "Programming Language :: Python :: 3",
            "Programming Language :: Python :: 3.6",
            "Programming Language :: Python :: 3.7",
            "Programming Language :: Python :: 3.8",
            "Programming Language :: Python :: 3.9",
            "Programming Language :: Python :: 3.10",
            "Programming Language :: Python :: 3.11",
            "Programming Language :: Python :: 3.12",
            "Programming Language :: Python :: 3.13",
            "Topic :: Software Development :: Build Tools",
            "Topic :: Software Development :: Libraries :: Python Modules",
        ]


def test_create_poetry_with_dependencies_with_subdirectory() -> None:
    poetry = Factory().create_poetry(
        fixtures_dir / "project_with_dependencies_with_subdirectory"
    )
    package = poetry.package
    dependencies = {str(dep.name): dep for dep in package.requires}

    # git dependency
    pendulum = dependencies["pendulum"]
    assert pendulum.is_vcs()
    assert pendulum.pretty_constraint == "branch 2.0"
    pendulum = cast("VCSDependency", pendulum)
    assert pendulum.source == "https://github.com/sdispater/pendulum.git"
    assert pendulum.directory == "sub"

    # file dependency
    demo = dependencies["demo"]
    assert demo.is_file()
    assert demo.pretty_constraint == "*"
    demo = cast("FileDependency", demo)
    assert demo.path == Path("../distributions/demo-0.1.0-in-subdir.zip")
    assert demo.directory == "sub"
    demo_dependencies = [dep for dep in package.requires if dep.name == "demo"]
    assert len(demo_dependencies) == 2
    assert demo_dependencies[0] == demo_dependencies[1]
    assert {str(dep.marker) for dep in demo_dependencies} == {
        'sys_platform == "win32"',
        'sys_platform == "linux"',
    }

    # directory dependency
    simple_project = dependencies["simple-project"]
    assert simple_project.is_directory()
    assert simple_project.pretty_constraint == "*"
    simple_project = cast("DirectoryDependency", simple_project)
    assert simple_project.path == Path("../simple_project")
    with pytest.raises(AttributeError):
        _ = simple_project.directory  # type: ignore[attr-defined]

    # url dependency
    foo = dependencies["foo"]
    assert foo.is_url()
    assert foo.pretty_constraint == "*"
    foo = cast("URLDependency", foo)
    assert foo.url == "https://example.com/foo.zip"
    assert foo.directory == "sub"


def test_create_poetry_with_packages_and_includes() -> None:
    poetry = Factory().create_poetry(
        fixtures_dir.parent / "masonry" / "builders" / "fixtures" / "with-include"
    )

    package = poetry.package

    assert package.packages == [
        {"include": "extra_dir/**/*.py", "format": ["sdist", "wheel"]},
        {"include": "extra_dir/**/*.py", "format": ["sdist", "wheel"]},
        {"include": "my_module.py", "format": ["sdist", "wheel"]},
        {"include": "package_with_include", "format": ["sdist", "wheel"]},
        {"include": "tests", "format": ["sdist"]},
        {"include": "for_wheel_only", "format": ["wheel"]},
        {"include": "src_package", "from": "src", "format": ["sdist", "wheel"]},
        {
            "include": "from_to",
            "from": "etc",
            "to": "target_from_to",
            "format": ["sdist", "wheel"],
        },
        {
            "include": "my_module_to.py",
            "to": "target_module",
            "format": ["sdist", "wheel"],
        },
    ]

    assert package.include == [
        {"path": "extra_dir/vcs_excluded.py", "format": ["sdist", "wheel"]},
        {"path": "notes.txt", "format": ["sdist"]},
    ]


def test_create_poetry_with_multi_constraints_dependency() -> None:
    poetry = Factory().create_poetry(
        fixtures_dir / "project_with_multi_constraints_dependency"
    )

    package = poetry.package

    assert len(package.requires) == 2


def test_create_poetry_non_package_mode() -> None:
    poetry = Factory().create_poetry(fixtures_dir / "non_package_mode")

    assert not poetry.is_package_mode


@pytest.mark.parametrize("license_type", ["file", "text", "str"])
def test_create_poetry_with_license_type_file(license_type: str) -> None:
    project_dir = fixtures_dir / f"with_license_type_{license_type}"
    poetry = Factory().create_poetry(project_dir)

    if license_type == "file":
        license_content = (project_dir / "LICENSE").read_text(encoding="utf-8")
    elif license_type == "text":
        license_content = (
            (project_dir / "pyproject.toml").read_text(encoding="utf-8").split('"""')[1]
        )
    elif license_type == "str":
        license_content = "MIT"
    else:
        raise RuntimeError("unexpected license type")

    assert poetry.package.license
    assert poetry.package.license.id == license_content


def test_create_poetry_fails_with_missing_license_file() -> None:
    project_dir = fixtures_dir / "missing_license_file"
    with pytest.raises(FileNotFoundError) as e:
        Factory().create_poetry(project_dir)

    assert str((project_dir / "LICENSE").absolute()) in str(e.value)


@pytest.mark.parametrize(
    ("requires_python", "python", "expected_versions", "expected_constraint"),
    [
        (">=3.8", None, ">=3.8", ">=3.8"),
        (None, "^3.8", "^3.8", ">=3.8,<4.0"),
        (">=3.8", "^3.8", "^3.8", ">=3.8,<4.0"),
    ],
)
def test_create_poetry_python_version(
    requires_python: str,
    python: str,
    expected_versions: str,
    expected_constraint: str,
    tmp_path: Path,
) -> None:
    content = '[project]\nname = "foo"\nversion = "1"\n'
    if requires_python:
        content += f'requires-python = "{requires_python}"\n'
    if python:
        content += f'[tool.poetry.dependencies]\npython = "{python}"\n'
    (tmp_path / "pyproject.toml").write_text(content, encoding="utf-8")
    poetry = Factory().create_poetry(tmp_path)

    package = poetry.package
    assert package.requires_python == requires_python or python
    assert package.python_versions == expected_versions
    assert str(package.python_constraint) == expected_constraint


def test_create_poetry_python_version_not_compatible(tmp_path: Path) -> None:
    content = """
[project]
name = "foo"
version = "1"
requires-python = ">=3.8"

[tool.poetry.dependencies]
python = ">=3.7"
"""
    (tmp_path / "pyproject.toml").write_text(content, encoding="utf-8")
    with pytest.raises(ValueError) as e:
        Factory().create_poetry(tmp_path)

    assert "not a subset" in str(e.value)


@pytest.mark.parametrize(
    ("content", "expected"),
    [
        (  # static
            """\
[project]
name = "foo"
version = "1"
requires-python = "3.10"
classifiers = ["License :: OSI Approved :: MIT License"]
""",
            ["License :: OSI Approved :: MIT License"],
        ),
        (  # dynamic
            """\
[project]
name = "foo"
version = "1"
requires-python = "3.10"
dynamic = [ "classifiers" ]

[tool.poetry]
classifiers = ["License :: OSI Approved :: MIT License"]
""",
            [
                "License :: OSI Approved :: MIT License",
                "Programming Language :: Python :: 3",
                "Programming Language :: Python :: 3.10",
            ],
        ),
        (  # legacy
            """\
[tool.poetry]
name = "foo"
version = "1"
classifiers = ["License :: OSI Approved :: MIT License"]

[tool.poetry.dependencies]
python = "~3.10"
""",
            [
                "License :: OSI Approved :: MIT License",
                "Programming Language :: Python :: 3",
                "Programming Language :: Python :: 3.10",
            ],
        ),
    ],
)
def test_create_poetry_classifiers(
    content: str, expected: list[str], tmp_path: Path
) -> None:
    (tmp_path / "pyproject.toml").write_text(content, encoding="utf-8")
    poetry = Factory().create_poetry(tmp_path)

    assert poetry.package.all_classifiers == expected


def test_create_poetry_no_readme(tmp_path: Path) -> None:
    pyproject = tmp_path / "pyproject.toml"
    pyproject.write_text(
        '[tool.poetry]\nname="foo"\nversion="1"\nauthors = []\ndescription = ""\n',
        encoding="utf-8",
    )
    poetry = Factory().create_poetry(tmp_path)

    assert not poetry.package.readmes


def test_create_poetry_empty_readme(tmp_path: Path) -> None:
    pyproject = tmp_path / "pyproject.toml"
    pyproject.write_text(
        '[tool.poetry]\nname="foo"\nversion="1"\nauthors = []\ndescription = ""\n'
        'readme = ""\n',
        encoding="utf-8",
    )
    poetry = Factory().create_poetry(tmp_path)

    assert not poetry.package.readmes


def test_validate() -> None:
    complete = fixtures_dir / "complete.toml"
    with complete.open("rb") as f:
        content = tomllib.load(f)

    assert Factory.validate(content) == {"errors": [], "warnings": []}


def test_validate_strict_legacy_warnings(complete_legacy_warnings: list[str]) -> None:
    complete = fixtures_dir / "complete.toml"
    with complete.open("rb") as f:
        content = tomllib.load(f)

    assert Factory.validate(content, strict=True) == {
        "errors": [],
        "warnings": complete_legacy_warnings,
    }


def test_validate_strict_legacy_duplicate_warnings(
    complete_legacy_duplicate_warnings: list[str],
) -> None:
    complete = fixtures_dir / "complete_duplicates.toml"
    with complete.open("rb") as f:
        content = tomllib.load(f)

    assert Factory.validate(content, strict=True) == {
        "errors": [],
        "warnings": complete_legacy_duplicate_warnings,
    }


def test_validate_strict_new_no_warnings() -> None:
    complete = fixtures_dir / "complete_new.toml"
    with complete.open("rb") as f:
        content = tomllib.load(f)

    assert Factory.validate(content, strict=True) == {"errors": [], "warnings": []}


def test_validate_strict_dynamic_warnings() -> None:
    # some fields are allowed to be dynamic, but some are not
    complete = fixtures_dir / "complete_new_dynamic_invalid.toml"
    with complete.open("rb") as f:
        content = tomllib.load(f)

    assert Factory.validate(content, strict=True) == {
        "errors": ["project must contain ['name'] properties"],
        "warnings": [
            # version, readme and classifiers are allowed to be dynamic!
            "[tool.poetry.name] is deprecated. Use [project.name] instead.",
            (
                "[tool.poetry.description] is deprecated. Use "
                "[project.description] instead."
            ),
            "[tool.poetry.license] is deprecated. Use [project.license] instead.",
            "[tool.poetry.authors] is deprecated. Use [project.authors] instead.",
            (
                "[tool.poetry.maintainers] is deprecated. Use "
                "[project.maintainers] instead."
            ),
            "[tool.poetry.keywords] is deprecated. Use [project.keywords] instead.",
            "[tool.poetry.homepage] is deprecated. Use [project.urls] instead.",
            "[tool.poetry.repository] is deprecated. Use [project.urls] instead.",
            "[tool.poetry.documentation] is deprecated. Use [project.urls] instead.",
            (
                "[tool.poetry.extras] is deprecated. Use "
                "[project.optional-dependencies] instead."
            ),
            (
                "Defining console scripts in [tool.poetry.scripts] is deprecated. "
                "Use [project.scripts] instead. "
                "([tool.poetry.scripts] should only be used for scripts of type 'file')."
            ),
        ],
    }


def test_validate_fails() -> None:
    complete = fixtures_dir / "complete.toml"
    with complete.open("rb") as f:
        content = tomllib.load(f)
    content["tool"]["poetry"]["authors"] = "this is not a valid array"

    expected = "tool.poetry.authors must be array"

    assert Factory.validate(content) == {"errors": [expected], "warnings": []}


def test_validate_without_strict_fails_only_non_strict() -> None:
    project_failing_strict_validation = (
        fixtures_dir / "project_failing_strict_validation" / "pyproject.toml"
    )
    with project_failing_strict_validation.open("rb") as f:
        content = tomllib.load(f)

    assert Factory.validate(content) == {
        "errors": [
            "Either [project.name] or [tool.poetry.name] is required in package mode.",
            (
                "Either [project.version] or [tool.poetry.version] is required in "
                "package mode."
            ),
        ],
        "warnings": [],
    }


def test_validate_strict_fails_strict_and_non_strict() -> None:
    project_failing_strict_validation = (
        fixtures_dir / "project_failing_strict_validation" / "pyproject.toml"
    )
    with project_failing_strict_validation.open("rb") as f:
        content = tomllib.load(f)

    assert Factory.validate(content, strict=True) == {
        "errors": [
            "Either [project.name] or [tool.poetry.name] is required in package mode.",
            (
                "Either [project.version] or [tool.poetry.version] is required in "
                "package mode."
            ),
            (
                'Cannot find dependency "missing_extra" for extra "some-extras" in '
                "main dependencies."
            ),
            (
                'Cannot find dependency "another_missing_extra" for extra '
                '"some-extras" in main dependencies.'
            ),
            (
                'The script "a_script_with_unknown_extra" requires extra "foo" which is'
                " not defined."
            ),
            (
                "Declared README files must be of same type: found text/markdown,"
                " text/x-rst"
            ),
        ],
        "warnings": [
            (
                "[tool.poetry.readme] is set but 'readme' is not in "
                "[project.dynamic]. If it is static use [project.readme]. If it "
                "is dynamic, add 'readme' to [project.dynamic].\n"
                "If you want to define multiple readmes, you should define them "
                "in [tool.poetry] and add 'readme' to [project.dynamic]."
            ),
            (
                "[tool.poetry.extras] is deprecated. Use "
                "[project.optional-dependencies] instead."
            ),
            (
                "Defining console scripts in [tool.poetry.scripts] is deprecated. "
                "Use [project.scripts] instead. "
                "([tool.poetry.scripts] should only be used for scripts of type 'file')."
            ),
            (
                "A wildcard Python dependency is ambiguous. Consider specifying a more"
                " explicit one."
            ),
            (
                'The "pathlib2" dependency specifies the "allows-prereleases" property,'
                ' which is deprecated. Use "allow-prereleases" instead.'
            ),
            (
                'The script "a_script_with_unknown_extra" depends on an extra. Scripts'
                " depending on extras are deprecated and support for them will be"
                " removed in a future version of poetry/poetry-core. See"
                " https://packaging.python.org/en/latest/specifications/entry-points/#data-model"
                " for details."
            ),
        ],
    }


@pytest.mark.parametrize("with_project_section", [True, False])
def test_validate_dependencies_non_package_mode(with_project_section: bool) -> None:
    content: dict[str, Any] = {
        "tool": {"poetry": {"package-mode": False, "dependencies": {"foo": "*"}}}
    }
    expected: dict[str, list[str]] = {"errors": [], "warnings": []}
    if with_project_section:
        content["project"] = {"name": "my-project"}
        expected["warnings"] = [
            (
                "[tool.poetry.dependencies] is set but [project.dependencies] is "
                "not and 'dependencies' is not in [project.dynamic]. You should "
                "either migrate [tool.poetry.depencencies] to "
                "[project.dependencies] (if you do not need Poetry-specific "
                "features) or add [project.dependencies] in addition to "
                "[tool.poetry.dependencies] or add 'dependencies' to "
                "[project.dynamic]."
            )
        ]
    assert Factory.validate(content, strict=True) == expected


@pytest.mark.parametrize("with_project_section", [True, False])
def test_validate_python_non_package_mode(with_project_section: bool) -> None:
    content: dict[str, Any] = {
        "tool": {"poetry": {"package-mode": False, "dependencies": {"python": ">=3.9"}}}
    }
    expected: dict[str, list[str]] = {"errors": [], "warnings": []}
    if with_project_section:
        content["project"] = {"name": "my-project", "dynamic": ["dependencies"]}
        expected["warnings"] = [
            (
                "[tool.poetry.dependencies.python] is set but [project.requires-python]"
                " is not set and 'requires-python' is not in [project.dynamic]."
            )
        ]
    assert Factory.validate(content, strict=True) == expected


def test_strict_validation_success_on_multiple_readme_files() -> None:
    with_readme_files = fixtures_dir / "with_readme_files" / "pyproject.toml"
    with with_readme_files.open("rb") as f:
        content = tomllib.load(f)

    assert Factory.validate(content, strict=True) == {"errors": [], "warnings": []}


def test_strict_validation_fails_on_readme_files_with_unmatching_types() -> None:
    with_readme_files = fixtures_dir / "with_readme_files" / "pyproject.toml"
    with with_readme_files.open("rb") as f:
        content = tomllib.load(f)
    content["tool"]["poetry"]["readme"][0] = "README.md"

    assert Factory.validate(content, strict=True) == {
        "errors": [
            "Declared README files must be of same type: found text/markdown,"
            " text/x-rst"
        ],
        "warnings": [],
    }


def test_create_poetry_fails_on_invalid_configuration() -> None:
    with pytest.raises(RuntimeError) as e:
        Factory().create_poetry(
            Path(__file__).parent / "fixtures" / "invalid_pyproject" / "pyproject.toml"
        )

    expected = """\
The Poetry configuration is invalid:
  - Either [project.name] or [tool.poetry.name] is required in package mode.
  - Either [project.version] or [tool.poetry.version] is required in package mode.
"""
    assert str(e.value) == expected


def test_create_poetry_fails_on_invalid_mode() -> None:
    with pytest.raises(RuntimeError) as e:
        Factory().create_poetry(
            Path(__file__).parent / "fixtures" / "invalid_mode" / "pyproject.toml"
        )

    expected = """\
The Poetry configuration is invalid:
  - tool.poetry.package-mode must be boolean
  - Either [project.name] or [tool.poetry.name] is required in package mode.
  - Either [project.version] or [tool.poetry.version] is required in package mode.
"""
    assert str(e.value) == expected


def test_create_poetry_omits_dev_dependencies_iff_with_dev_is_false() -> None:
    poetry = Factory().create_poetry(fixtures_dir / "sample_project", with_groups=False)
    assert not any("dev" in r.groups for r in poetry.package.all_requires)

    poetry = Factory().create_poetry(fixtures_dir / "sample_project")
    assert any("dev" in r.groups for r in poetry.package.all_requires)


def test_create_poetry_with_invalid_dev_dependencies(caplog: LogCaptureFixture) -> None:
    poetry = Factory().create_poetry(
        fixtures_dir / "project_with_invalid_dev_deps", with_groups=False
    )
    assert not any("dev" in r.groups for r in poetry.package.all_requires)

    assert not caplog.records
    poetry = Factory().create_poetry(fixtures_dir / "project_with_invalid_dev_deps")
    assert len(caplog.records) == 1
    record = caplog.records[0]
    assert record.levelname == "WARNING"
    assert "does not exist" in record.message
    assert any("dev" in r.groups for r in poetry.package.all_requires)


def test_create_poetry_with_groups_and_legacy_dev(caplog: LogCaptureFixture) -> None:
    assert not caplog.records

    poetry = Factory().create_poetry(
        fixtures_dir / "project_with_groups_and_legacy_dev"
    )

    assert len(caplog.records) == 1
    record = caplog.records[0]
    assert record.levelname == "WARNING"
    assert '"poetry.dev-dependencies" section is deprecated' in record.message

    package = poetry.package
    dependencies = package.all_requires

    assert len(dependencies) == 2
    assert {dependency.name for dependency in dependencies} == {"pytest", "pre-commit"}


def test_create_poetry_with_groups_and_explicit_main() -> None:
    poetry = Factory().create_poetry(
        fixtures_dir / "project_with_groups_and_explicit_main"
    )

    package = poetry.package
    dependencies = package.requires

    assert len(dependencies) == 1
    assert {dependency.name for dependency in dependencies} == {
        "aiohttp",
    }


def test_create_poetry_with_markers_and_extras() -> None:
    poetry = Factory().create_poetry(fixtures_dir / "project_with_markers_and_extras")

    package = poetry.package
    dependencies = package.requires
    extras = package.extras

    assert len(dependencies) == 2
    assert {dependency.name for dependency in dependencies} == {"orjson"}
    assert set(extras[canonicalize_name("all")]) == set(dependencies)
    for dependency in dependencies:
        assert dependency.in_extras == ["all"]
        assert isinstance(dependency, URLDependency)
        assert isinstance(dependency.marker, SingleMarker)
        assert dependency.marker.name == "sys_platform"
        assert dependency.marker.value == (
            "darwin" if "macosx" in dependency.url else "linux"
        )


@pytest.mark.parametrize(
    "constraint, exp_python, exp_marker",
    [
        ({"python": "3.7"}, "~3.7", 'python_version == "3.7"'),
        ({"platform": "linux"}, "*", 'sys_platform == "linux"'),
        ({"markers": 'python_version == "3.7"'}, "~3.7", 'python_version == "3.7"'),
        (
            {"markers": 'platform_machine == "x86_64"'},
            "*",
            'platform_machine == "x86_64"',
        ),
        (
            {"python": "3.7", "markers": 'platform_machine == "x86_64"'},
            "~3.7",
            'platform_machine == "x86_64" and python_version == "3.7"',
        ),
        (
            {"platform": "linux", "markers": 'platform_machine == "x86_64"'},
            "*",
            'platform_machine == "x86_64" and sys_platform == "linux"',
        ),
        (
            {
                "python": "3.7",
                "platform": "linux",
                "markers": 'platform_machine == "x86_64"',
            },
            "~3.7",
            (
                'platform_machine == "x86_64" and python_version == "3.7" and'
                ' sys_platform == "linux"'
            ),
        ),
        (
            {"python": ">=3.7", "markers": 'python_version < "4.0"'},
            "<4.0 >=3.7",
            'python_version < "4.0" and python_version >= "3.7"',
        ),
        (
            {"platform": "linux", "markers": 'sys_platform == "win32"'},
            "*",
            "",
        ),
    ],
)
def test_create_dependency_marker_variants(
    constraint: dict[str, Any], exp_python: str, exp_marker: str
) -> None:
    constraint["version"] = "1.0.0"
    dep = Factory.create_dependency("foo", constraint)
    assert dep.python_versions == exp_python
    assert dep.python_constraint == parse_constraint(exp_python)
    assert str(dep.marker) == exp_marker


@pytest.mark.parametrize(
    ("constraint", "expected"),
    [
        ("1", None),
        ({"version": "1"}, None),
        ({"version": "1", "allow-prereleases": False}, False),
        ({"version": "1", "allow-prereleases": True}, True),
    ],
)
def test_create_dependency_allow_prereleases(
    constraint: str | dict[str, str], expected: bool | None
) -> None:
    dep = Factory.create_dependency("foo", constraint)
    assert dep.allows_prereleases() is expected


def test_all_classifiers_unique_even_if_classifiers_is_duplicated() -> None:
    poetry = Factory().create_poetry(
        fixtures_dir / "project_with_duplicated_classifiers"
    )
    package = poetry.package
    assert package.all_classifiers == [
        "License :: OSI Approved :: MIT License",
        "Programming Language :: Python :: 3",
        "Programming Language :: Python :: 3.8",
        "Programming Language :: Python :: 3.9",
        "Programming Language :: Python :: 3.10",
        "Programming Language :: Python :: 3.11",
        "Programming Language :: Python :: 3.12",
        "Programming Language :: Python :: 3.13",
        "Topic :: Software Development :: Build Tools",
    ]


@pytest.mark.parametrize(
    ("project", "expected"),
    [
        ("sample_project", set(BuildSystem().dependencies)),
        (
            "project_with_build_system_requires",
            {
                Dependency.create_from_pep_508("poetry-core"),
                Dependency.create_from_pep_508("Cython (>=0.29.6,<0.30.0)"),
            },
        ),
    ],
)
def test_poetry_build_system_dependencies_from_fixtures(
    project: str, expected: set[Dependency]
) -> None:
    poetry = Factory().create_poetry(fixtures_dir / project)
    assert set(poetry.build_system_dependencies) == expected


SAMPLE_PROJECT_DIRECTORY = fixtures_dir / "sample_project"
SIMPLE_PROJECT_WHEEL = (
    fixtures_dir
    / "simple_project"
    / "dist"
    / "simple_project-1.2.3-py2.py3-none-any.whl"
)


@pytest.mark.parametrize(
    ("requires", "expected"),
    [
        (BuildSystem().requires, set(BuildSystem().dependencies)),
        (["poetry-core>=2.0.0"], {Dependency("poetry-core", ">=2.0.0")}),
        (["****invalid****"], set()),
        (
            ["hatch", "numpy ; sys_platform == 'win32'"],
            {
                Dependency("hatch", "*"),
                Dependency.create_from_pep_508("numpy ; sys_platform == 'win32'"),
            },
        ),
        (
            [SAMPLE_PROJECT_DIRECTORY.as_posix()],
            {
                DirectoryDependency(
                    SAMPLE_PROJECT_DIRECTORY.name, SAMPLE_PROJECT_DIRECTORY
                )
            },
        ),
        (
            [SIMPLE_PROJECT_WHEEL.as_posix()],
            {FileDependency(SIMPLE_PROJECT_WHEEL.name, SIMPLE_PROJECT_WHEEL)},
        ),
    ],
)
def test_poetry_build_system_dependencies(
    requires: list[str], expected: set[Dependency], temporary_directory: Path
) -> None:
    pyproject_toml = temporary_directory / "pyproject.toml"
    build_system_requires = ", ".join(f'"{require}"' for require in requires)
    content = f"""[project]
name = "my-package"
version = "1.2.3"

[build-system]
requires = [{build_system_requires}]
build-backend = "some.api.we.do.not.care.about"

"""
    pyproject_toml.write_text(content)
    poetry = Factory().create_poetry(temporary_directory)

    assert set(poetry.build_system_dependencies) == expected
poetry-core-2.1.1/tests/testutils.py000066400000000000000000000054371475444614500175660ustar00rootroot00000000000000from __future__ import annotations

import shutil
import subprocess
import sys
import tarfile
import tempfile
import zipfile

from contextlib import contextmanager
from pathlib import Path
from typing import TYPE_CHECKING
from typing import Any

import tomli_w

from poetry.core.utils._compat import tomllib


if TYPE_CHECKING:
    from collections.abc import Generator


__toml_build_backend_patch__ = {
    "build-system": {
        "requires": [str(Path(__file__).parent.parent)],
        "build-backend": "poetry.core.masonry.api",
    }
}


@contextmanager
def temporary_project_directory(
    path: Path, toml_patch: dict[str, Any] | None = None
) -> Generator[str, None, None]:
    """
    Context manager that takes a project source directory, copies content to a temporary
    directory, patches the `pyproject.toml` using the provided patch, or using the
    default patch if one is not given. The default path replaces `build-system` section
    in order to use the working copy of poetry-core as the backend.

    Once the context, exists, the temporary directory is cleaned up.

    :param path: Source project root directory to copy from.
    :param toml_patch: Patch to use for the pyproject.toml,
                        defaults to build system patching.
    :return: A temporary copy
    """
    assert (path / "pyproject.toml").exists()

    with tempfile.TemporaryDirectory(prefix="poetry-core-pep517") as tmp:
        dst = Path(tmp) / path.name
        shutil.copytree(str(path), dst)
        toml = dst / "pyproject.toml"
        with toml.open("rb") as f:
            data = tomllib.load(f)
        data.update(toml_patch or __toml_build_backend_patch__)
        with toml.open("wb") as f:
            tomli_w.dump(data, f)
        yield str(dst)


def subprocess_run(*args: str, **kwargs: Any) -> subprocess.CompletedProcess[str]:
    """
    Helper method to run a subprocess. Asserts for success.
    """
    encoding = "locale" if sys.version_info >= (3, 10) else None
    result = subprocess.run(
        args, text=True, encoding=encoding, capture_output=True, **kwargs
    )
    assert result.returncode == 0
    return result


def validate_wheel_contents(
    name: str, version: str, path: Path, files: list[str] | None = None
) -> None:
    dist_info = f"{name}-{version}.dist-info"
    files = files or []

    with zipfile.ZipFile(path) as z:
        namelist = z.namelist()
        for filename in ["WHEEL", "METADATA", "RECORD", *files]:
            assert f"{dist_info}/{filename}" in namelist


def validate_sdist_contents(
    name: str, version: str, path: Path, files: list[str]
) -> None:
    escaped_name = name.replace("-", "_")
    with tarfile.open(path) as tar:
        namelist = tar.getnames()
        for filename in files:
            assert f"{escaped_name}-{version}/{filename}" in namelist
poetry-core-2.1.1/tests/utils/000077500000000000000000000000001475444614500163035ustar00rootroot00000000000000poetry-core-2.1.1/tests/utils/__init__.py000066400000000000000000000000001475444614500204020ustar00rootroot00000000000000poetry-core-2.1.1/tests/utils/test_helpers.py000066400000000000000000000167301475444614500213650ustar00rootroot00000000000000from __future__ import annotations

import sys
import tempfile

from pathlib import Path
from stat import S_IREAD
from typing import TYPE_CHECKING

import pytest


if TYPE_CHECKING:
    from pytest_mock import MockerFixture

from poetry.core.utils.helpers import combine_unicode
from poetry.core.utils.helpers import parse_requires
from poetry.core.utils.helpers import readme_content_type
from poetry.core.utils.helpers import robust_rmtree
from poetry.core.utils.helpers import temporary_directory


def test_parse_requires() -> None:
    requires = """\
jsonschema>=2.6.0.0,<3.0.0.0
lockfile>=0.12.0.0,<0.13.0.0
pip-tools>=1.11.0.0,<2.0.0.0
pkginfo>=1.4.0.0,<2.0.0.0
pyrsistent>=0.14.2.0,<0.15.0.0
toml>=0.9.0.0,<0.10.0.0
cleo>=0.6.0.0,<0.7.0.0
cachy>=0.1.1.0,<0.2.0.0
cachecontrol>=0.12.4.0,<0.13.0.0
requests>=2.18.0.0,<3.0.0.0
msgpack-python>=0.5.0.0,<0.6.0.0
pyparsing>=2.2.0.0,<3.0.0.0
requests-toolbelt>=0.8.0.0,<0.9.0.0

[:(python_version >= "2.7.0.0" and python_version < "2.8.0.0") or (python_version >= "3.4.0.0" and python_version < "3.5.0.0")]
typing>=3.6.0.0,<4.0.0.0

[:python_version >= "2.7.0.0" and python_version < "2.8.0.0"]
virtualenv>=15.2.0.0,<16.0.0.0
pathlib2>=2.3.0.0,<3.0.0.0

[:python_version >= "3.4.0.0" and python_version < "3.6.0.0"]
zipfile36>=0.1.0.0,<0.2.0.0

[dev]
isort@ git+git://github.com/timothycrosley/isort.git@e63ae06ec7d70b06df9e528357650281a3d3ec22#egg=isort
"""
    result = parse_requires(requires)
    expected = [
        "jsonschema>=2.6.0.0,<3.0.0.0",
        "lockfile>=0.12.0.0,<0.13.0.0",
        "pip-tools>=1.11.0.0,<2.0.0.0",
        "pkginfo>=1.4.0.0,<2.0.0.0",
        "pyrsistent>=0.14.2.0,<0.15.0.0",
        "toml>=0.9.0.0,<0.10.0.0",
        "cleo>=0.6.0.0,<0.7.0.0",
        "cachy>=0.1.1.0,<0.2.0.0",
        "cachecontrol>=0.12.4.0,<0.13.0.0",
        "requests>=2.18.0.0,<3.0.0.0",
        "msgpack-python>=0.5.0.0,<0.6.0.0",
        "pyparsing>=2.2.0.0,<3.0.0.0",
        "requests-toolbelt>=0.8.0.0,<0.9.0.0",
        (
            'typing>=3.6.0.0,<4.0.0.0 ; (python_version >= "2.7.0.0" and python_version'
            ' < "2.8.0.0") or (python_version >= "3.4.0.0" and python_version <'
            ' "3.5.0.0")'
        ),
        (
            'virtualenv>=15.2.0.0,<16.0.0.0 ; python_version >= "2.7.0.0" and'
            ' python_version < "2.8.0.0"'
        ),
        (
            'pathlib2>=2.3.0.0,<3.0.0.0 ; python_version >= "2.7.0.0" and'
            ' python_version < "2.8.0.0"'
        ),
        (
            'zipfile36>=0.1.0.0,<0.2.0.0 ; python_version >= "3.4.0.0" and'
            ' python_version < "3.6.0.0"'
        ),
        (
            "isort@"
            " git+git://github.com/timothycrosley/isort.git@e63ae06ec7d70b06df9e528357650281a3d3ec22#egg=isort"
            ' ; extra == "dev"'
        ),
    ]
    assert result == expected


def test_utils_helpers_combine_unicode() -> None:
    combined_expected = "é"
    decomposed = "é"
    assert combined_expected != decomposed

    combined = combine_unicode(decomposed)
    assert combined == combined_expected


def test_utils_helpers_temporary_directory_readonly_file() -> None:
    with temporary_directory() as temp_dir:
        readonly_filename = temp_dir / "file.txt"
        with readonly_filename.open(mode="w+", encoding="utf-8") as readonly_file:
            readonly_file.write("Poetry rocks!")
        readonly_filename.chmod(S_IREAD)

    assert not temp_dir.exists()
    assert not readonly_filename.exists()


@pytest.mark.parametrize(
    "readme, content_type",
    [
        ("README.rst", "text/x-rst"),
        ("README.md", "text/markdown"),
        ("README", "text/plain"),
        (Path("README.rst"), "text/x-rst"),
        (Path("README.md"), "text/markdown"),
        (Path("README"), "text/plain"),
    ],
)
def test_utils_helpers_readme_content_type(
    readme: str | Path, content_type: str
) -> None:
    assert readme_content_type(readme) == content_type


@pytest.mark.skipif(sys.version_info < (3, 10), reason="Requires Python 3.10 or higher")
def test_temporary_directory_python_3_10_or_newer(mocker: MockerFixture) -> None:
    mocked_rmtree = mocker.patch("shutil.rmtree")
    mocked_temp_dir = mocker.patch("tempfile.TemporaryDirectory")
    mocked_mkdtemp = mocker.patch("tempfile.mkdtemp")

    mocked_temp_dir.return_value.__enter__.return_value = "hello from test"

    with temporary_directory() as tmp:
        assert tmp == Path("hello from test")

    assert not mocked_rmtree.called
    assert not mocked_mkdtemp.called
    mocked_temp_dir.assert_called_with(ignore_cleanup_errors=True)


@pytest.mark.skipif(sys.version_info < (3, 10), reason="Requires Python 3.10 or higher")
def test_temporary_directory_python_3_10_or_newer_ensure_cleanup_on_error(
    mocker: MockerFixture,
) -> None:
    mocked_rmtree = mocker.patch("shutil.rmtree")
    mocked_temp_dir = mocker.patch("tempfile.TemporaryDirectory")
    mocked_mkdtemp = mocker.patch("tempfile.mkdtemp")

    mocked_temp_dir.return_value.__enter__.return_value = "hello from test"

    with (
        pytest.raises(Exception, match="Something went wrong"),
        temporary_directory() as tmp,
    ):
        assert tmp == Path("hello from test")

        raise Exception("Something went wrong")

    assert not mocked_rmtree.called
    assert not mocked_mkdtemp.called
    mocked_temp_dir.assert_called_with(ignore_cleanup_errors=True)


@pytest.mark.skipif(
    sys.version_info >= (3, 10), reason="Not supported on Python 3.10 or higher"
)
def test_temporary_directory_python_3_9_or_older(mocker: MockerFixture) -> None:
    mocked_rmtree = mocker.patch("shutil.rmtree")
    mocked_temp_dir = mocker.patch("tempfile.TemporaryDirectory")
    mocked_mkdtemp = mocker.patch("tempfile.mkdtemp")

    mocked_mkdtemp.return_value = "hello from test"

    with temporary_directory() as tmp:
        assert tmp == Path("hello from test")

    assert mocked_rmtree.called
    assert mocked_mkdtemp.called
    assert not mocked_temp_dir.called


@pytest.mark.skipif(
    sys.version_info >= (3, 10), reason="Not supported on Python 3.10 or higher"
)
def test_temporary_directory_python_3_9_or_older_ensure_cleanup_on_error(
    mocker: MockerFixture,
) -> None:
    mocked_rmtree = mocker.patch("shutil.rmtree")
    mocked_temp_dir = mocker.patch("tempfile.TemporaryDirectory")
    mocked_mkdtemp = mocker.patch("tempfile.mkdtemp")

    mocked_mkdtemp.return_value = "hello from test"

    with (
        pytest.raises(Exception, match="Something went wrong"),
        temporary_directory() as tmp,
    ):
        assert tmp == Path("hello from test")

        raise Exception("Something went wrong")

    assert mocked_rmtree.called
    assert mocked_mkdtemp.called
    assert not mocked_temp_dir.called


def test_robust_rmtree(mocker: MockerFixture) -> None:
    mocked_rmtree = mocker.patch("shutil.rmtree")

    # this should work after an initial exception
    name = tempfile.mkdtemp()
    mocked_rmtree.side_effect = [
        OSError(
            "Couldn't delete file yet, waiting for references to clear", "mocked path"
        ),
        None,
    ]
    robust_rmtree(name)

    # this should give up after retrying multiple times
    mocked_rmtree.side_effect = OSError(
        "Couldn't delete file yet, this error won't go away after first attempt"
    )
    with pytest.raises(OSError):
        robust_rmtree(name, max_timeout=0.04)

    # clear the side effect (breaks the tear-down otherwise)
    mocker.stop(mocked_rmtree)
    # use the real method to remove the temp folder we created for this test
    robust_rmtree(name)
    assert not Path(name).exists()
poetry-core-2.1.1/tests/vcs/000077500000000000000000000000001475444614500157365ustar00rootroot00000000000000poetry-core-2.1.1/tests/vcs/__init__.py000066400000000000000000000000001475444614500200350ustar00rootroot00000000000000poetry-core-2.1.1/tests/vcs/test_vcs.py000066400000000000000000000424701475444614500201510ustar00rootroot00000000000000from __future__ import annotations

import os
import subprocess

from pathlib import Path
from typing import TYPE_CHECKING
from typing import Any

import pytest

from poetry.core.utils._compat import WINDOWS
from poetry.core.vcs import get_vcs
from poetry.core.vcs.git import Git
from poetry.core.vcs.git import GitUrl
from poetry.core.vcs.git import ParsedUrl
from poetry.core.vcs.git import _reset_executable
from poetry.core.vcs.git import executable


if TYPE_CHECKING:
    from collections.abc import Iterator

    from pytest_mock import MockerFixture


@pytest.fixture
def reset_git() -> Iterator[None]:
    _reset_executable()
    try:
        yield
    finally:
        _reset_executable()


@pytest.fixture(autouse=True)
def with_mocked_get_vcs() -> None:
    # disabled global mocking of get_vcs
    pass


@pytest.mark.parametrize(
    "url, normalized",
    [
        (
            "git+ssh://user@hostname:project.git#commit",
            GitUrl("user@hostname:project.git", "commit", None),
        ),
        (
            "git+http://user@hostname/project/blah.git@commit",
            GitUrl("http://user@hostname/project/blah.git", "commit", None),
        ),
        (
            "git+https://user@hostname/project/blah.git",
            GitUrl("https://user@hostname/project/blah.git", None, None),
        ),
        (
            "git+https://user@hostname/project%20~_-.foo/blah%20~_-.bar.git",
            GitUrl(
                "https://user@hostname/project%20~_-.foo/blah%20~_-.bar.git", None, None
            ),
        ),
        (
            "git+https://user@hostname:project/blah.git",
            GitUrl("https://user@hostname/project/blah.git", None, None),
        ),
        (
            "git+ssh://git@github.com:sdispater/poetry.git#v1.0.27",
            GitUrl("git@github.com:sdispater/poetry.git", "v1.0.27", None),
        ),
        (
            "git+ssh://git@github.com:/sdispater/poetry.git",
            GitUrl("git@github.com:/sdispater/poetry.git", None, None),
        ),
        (
            "git+ssh://git@github.com:org/repo",
            GitUrl("git@github.com:org/repo", None, None),
        ),
        (
            "git+ssh://git@github.com/org/repo",
            GitUrl("ssh://git@github.com/org/repo", None, None),
        ),
        ("git+ssh://foo:22/some/path", GitUrl("ssh://foo:22/some/path", None, None)),
        ("git@github.com:org/repo", GitUrl("git@github.com:org/repo", None, None)),
        (
            "git+https://github.com/sdispater/pendulum",
            GitUrl("https://github.com/sdispater/pendulum", None, None),
        ),
        (
            "git+https://github.com/sdispater/pendulum#7a018f2d075b03a73409e8356f9b29c9ad4ea2c5",
            GitUrl(
                "https://github.com/sdispater/pendulum",
                "7a018f2d075b03a73409e8356f9b29c9ad4ea2c5",
                None,
            ),
        ),
        (
            "git+ssh://git@git.example.com:b/b.git#v1.0.0",
            GitUrl("git@git.example.com:b/b.git", "v1.0.0", None),
        ),
        (
            "git+ssh://git@github.com:sdispater/pendulum.git#foo/bar",
            GitUrl("git@github.com:sdispater/pendulum.git", "foo/bar", None),
        ),
        ("git+file:///foo/bar.git", GitUrl("file:///foo/bar.git", None, None)),
        (
            "git+file://C:\\Users\\hello\\testing.git#zkat/windows-files",
            GitUrl("file://C:\\Users\\hello\\testing.git", "zkat/windows-files", None),
        ),
        # hidden directories on Windows use $ in their path
        # python-poetry/poetry#5493
        (
            "git+file://C:\\Users\\hello$\\testing.git#zkat/windows-files",
            GitUrl("file://C:\\Users\\hello$\\testing.git", "zkat/windows-files", None),
        ),
        (
            "git+https://git.example.com/sdispater/project/my_repo.git",
            GitUrl("https://git.example.com/sdispater/project/my_repo.git", None, None),
        ),
        (
            "git+ssh://git@git.example.com:sdispater/project/my_repo.git",
            GitUrl("git@git.example.com:sdispater/project/my_repo.git", None, None),
        ),
        (
            "git+https://github.com/demo/pyproject-demo-subdirectory.git#subdirectory=project",
            GitUrl(
                "https://github.com/demo/pyproject-demo-subdirectory.git",
                None,
                "project",
            ),
        ),
        (
            "git+https://github.com/demo/pyproject-demo-subdirectory.git@commit#subdirectory=project",
            GitUrl(
                "https://github.com/demo/pyproject-demo-subdirectory.git",
                "commit",
                "project",
            ),
        ),
        (
            "git+https://github.com/demo/pyproject-demo-subdirectory.git#commit&subdirectory=project",
            GitUrl(
                "https://github.com/demo/pyproject-demo-subdirectory.git",
                "commit",
                "project",
            ),
        ),
        (
            "git+https://github.com/demo/pyproject-demo-subdirectory.git#commit#subdirectory=project",
            GitUrl(
                "https://github.com/demo/pyproject-demo-subdirectory.git",
                "commit",
                "project",
            ),
        ),
        (
            "git+https://github.com/demo/pyproject-demo-subdirectory.git@commit&subdirectory=project",
            GitUrl(
                "https://github.com/demo/pyproject-demo-subdirectory.git",
                "commit",
                "project",
            ),
        ),
        (
            "git+https://github.com/demo/pyproject-demo-subdirectory.git@subdirectory#subdirectory=subdirectory",
            GitUrl(
                "https://github.com/demo/pyproject-demo-subdirectory.git",
                "subdirectory",
                "subdirectory",
            ),
        ),
    ],
)
def test_normalize_url(url: str, normalized: GitUrl) -> None:
    assert Git.normalize_url(url) == normalized


@pytest.mark.parametrize(
    "url, parsed",
    [
        (
            "git+ssh://user@hostname:project.git#commit",
            ParsedUrl(
                "ssh", "hostname", ":project.git", "user", None, "project", "commit"
            ),
        ),
        (
            "git+http://user@hostname/project/blah.git@commit",
            ParsedUrl(
                "http", "hostname", "/project/blah.git", "user", None, "blah", "commit"
            ),
        ),
        (
            "git+https://user@hostname/project/blah.git",
            ParsedUrl(
                "https", "hostname", "/project/blah.git", "user", None, "blah", None
            ),
        ),
        (
            "git+https://user@hostname/project%20~_-.foo/blah%20~_-.bar.git",
            ParsedUrl(
                "https",
                "hostname",
                "/project%20~_-.foo/blah%20~_-.bar.git",
                "user",
                None,
                "blah%20~_-.bar",
                None,
            ),
        ),
        (
            "git+https://user@hostname:project/blah.git",
            ParsedUrl(
                "https", "hostname", ":project/blah.git", "user", None, "blah", None
            ),
        ),
        (
            "git+ssh://git@github.com:sdispater/poetry.git#v1.0.27",
            ParsedUrl(
                "ssh",
                "github.com",
                ":sdispater/poetry.git",
                "git",
                None,
                "poetry",
                "v1.0.27",
            ),
        ),
        (
            "git+ssh://git@github.com:sdispater/poetry.git#egg=name",
            ParsedUrl(
                "ssh",
                "github.com",
                ":sdispater/poetry.git",
                "git",
                None,
                "poetry",
                None,
            ),
        ),
        (
            "git+ssh://git@github.com:/sdispater/poetry.git",
            ParsedUrl(
                "ssh",
                "github.com",
                ":/sdispater/poetry.git",
                "git",
                None,
                "poetry",
                None,
            ),
        ),
        (
            "git+ssh://git@github.com:org/repo",
            ParsedUrl("ssh", "github.com", ":org/repo", "git", None, "repo", None),
        ),
        (
            "git+ssh://git@github.com/org/repo",
            ParsedUrl("ssh", "github.com", "/org/repo", "git", None, "repo", None),
        ),
        (
            "git+ssh://foo:22/some/path",
            ParsedUrl("ssh", "foo", "/some/path", None, "22", "path", None),
        ),
        (
            "git@github.com:org/repo",
            ParsedUrl("ssh", "github.com", ":org/repo", "git", None, "repo", None),
        ),
        (
            "git+https://github.com/sdispater/pendulum",
            ParsedUrl(
                "https",
                "github.com",
                "/sdispater/pendulum",
                None,
                None,
                "pendulum",
                None,
            ),
        ),
        (
            "git+https://username:@github.com/sdispater/pendulum",
            ParsedUrl(
                "https",
                "github.com",
                "/sdispater/pendulum",
                "username:",
                None,
                "pendulum",
                None,
            ),
        ),
        (
            "git+https://username:password@github.com/sdispater/pendulum",
            ParsedUrl(
                "https",
                "github.com",
                "/sdispater/pendulum",
                "username:password",
                None,
                "pendulum",
                None,
            ),
        ),
        (
            "git+https://username+suffix:password@github.com/sdispater/pendulum",
            ParsedUrl(
                "https",
                "github.com",
                "/sdispater/pendulum",
                "username+suffix:password",
                None,
                "pendulum",
                None,
            ),
        ),
        (
            "git+https://github.com/sdispater/pendulum#7a018f2d075b03a73409e8356f9b29c9ad4ea2c5",
            ParsedUrl(
                "https",
                "github.com",
                "/sdispater/pendulum",
                None,
                None,
                "pendulum",
                "7a018f2d075b03a73409e8356f9b29c9ad4ea2c5",
            ),
        ),
        (
            "git+ssh://git@git.example.com:b/b.git#v1.0.0",
            ParsedUrl("ssh", "git.example.com", ":b/b.git", "git", None, "b", "v1.0.0"),
        ),
        (
            "git+ssh://git@github.com:sdispater/pendulum.git#foo/bar",
            ParsedUrl(
                "ssh",
                "github.com",
                ":sdispater/pendulum.git",
                "git",
                None,
                "pendulum",
                "foo/bar",
            ),
        ),
        (
            "git+file:///foo/bar.git",
            ParsedUrl("file", None, "/foo/bar.git", None, None, "bar", None),
        ),
        (
            "git+file://C:\\Users\\hello\\testing.git#zkat/windows-files",
            ParsedUrl(
                "file",
                "C",
                ":\\Users\\hello\\testing.git",
                None,
                None,
                "testing",
                "zkat/windows-files",
            ),
        ),
        (
            "git+https://git.example.com/sdispater/project/my_repo.git",
            ParsedUrl(
                "https",
                "git.example.com",
                "/sdispater/project/my_repo.git",
                None,
                None,
                "my_repo",
                None,
            ),
        ),
        (
            "git+ssh://git@git.example.com:sdispater/project/my_repo.git",
            ParsedUrl(
                "ssh",
                "git.example.com",
                ":sdispater/project/my_repo.git",
                "git",
                None,
                "my_repo",
                None,
            ),
        ),
        (
            "git+ssh://git@git.example.com:sdispater/project/+git/my_repo.git",
            ParsedUrl(
                "ssh",
                "git.example.com",
                ":sdispater/project/+git/my_repo.git",
                "git",
                None,
                "my_repo",
                None,
            ),
        ),
        (
            "git+ssh://git@git.example.com:sdispater/project/my_repo.git#subdirectory=project-dir",
            ParsedUrl(
                "ssh",
                "git.example.com",
                ":sdispater/project/my_repo.git",
                "git",
                None,
                "my_repo",
                None,
                "project-dir",
            ),
        ),
        (
            "git+ssh://git@git.example.com:sdispater/project/my_repo.git#commit&subdirectory=project-dir",
            ParsedUrl(
                "ssh",
                "git.example.com",
                ":sdispater/project/my_repo.git",
                "git",
                None,
                "my_repo",
                "commit",
                "project-dir",
            ),
        ),
        (
            "git+ssh://git@git.example.com:sdispater/project/my_repo.git@commit#subdirectory=project-dir",
            ParsedUrl(
                "ssh",
                "git.example.com",
                ":sdispater/project/my_repo.git",
                "git",
                None,
                "my_repo",
                "commit",
                "project-dir",
            ),
        ),
        (
            "git+ssh://git@git.example.com:sdispater/project/my_repo.git@commit&subdirectory=project_dir",
            ParsedUrl(
                "ssh",
                "git.example.com",
                ":sdispater/project/my_repo.git",
                "git",
                None,
                "my_repo",
                "commit",
                "project_dir",
            ),
        ),
        (
            "git+ssh://git@git.example.com:sdispater/project/my_repo.git@commit#egg=package&subdirectory=project_dir",
            ParsedUrl(
                "ssh",
                "git.example.com",
                ":sdispater/project/my_repo.git",
                "git",
                None,
                "my_repo",
                "commit",
                "project_dir",
            ),
        ),
    ],
)
def test_parse_url(url: str, parsed: ParsedUrl) -> None:
    result = ParsedUrl.parse(url)
    assert result.name == parsed.name
    assert result.pathname == parsed.pathname
    assert result.port == parsed.port
    assert result.protocol == parsed.protocol
    assert result.resource == parsed.resource
    assert result.rev == parsed.rev
    assert result.url == parsed.url
    assert result.user == parsed.user
    assert result.subdirectory == parsed.subdirectory


def test_parse_url_should_fail() -> None:
    url = "https://" + "@" * 64 + "!"

    with pytest.raises(ValueError):
        ParsedUrl.parse(url)


@pytest.mark.skipif(
    not WINDOWS,
    reason=(
        "Retrieving the complete path to git is only necessary on Windows, for security"
        " reasons"
    ),
)
def test_ensure_absolute_path_to_git(reset_git: None, mocker: MockerFixture) -> None:
    def checkout_output(cmd: list[str], *args: Any, **kwargs: Any) -> str | bytes:
        if Path(cmd[0]).name == "where.exe":
            return "\n".join(
                [str(Path.cwd().joinpath("git.exe")), r"C:\Git\cmd\git.exe"]
            )

        return b""

    mock = mocker.patch.object(subprocess, "check_output", side_effect=checkout_output)

    Git().run("config")

    assert mock.call_args_list[-1][0][0] == [r"C:\Git\cmd\git.exe", "config"]

    mock = mocker.patch.object(subprocess, "check_output", return_value=b"")

    Git().run("config")

    assert mock.call_args_list[-1][0][0] == [r"C:\Git\cmd\git.exe", "config"]


def test_get_vcs_encoding(tmp_path: Path) -> None:
    repo_path = tmp_path / "répö"
    repo_path.mkdir()
    assert repo_path.exists()
    assert subprocess.check_call([executable(), "init"], cwd=repo_path) == 0
    vcs = get_vcs(repo_path)
    assert vcs is not None
    assert vcs._work_dir is not None
    assert vcs._work_dir.exists()
    assert vcs._work_dir == repo_path


def test_get_vc_subdir(tmp_path: Path) -> None:
    repo_path = tmp_path / "répö"
    repo_path.mkdir()
    assert repo_path.exists()
    assert subprocess.check_call([executable(), "init"], cwd=repo_path) == 0
    subdir = repo_path / "subdir"
    subdir.mkdir()
    vcs = get_vcs(subdir)
    assert vcs is not None
    assert vcs._work_dir is not None
    assert vcs._work_dir.exists()
    assert vcs._work_dir == repo_path


def test_get_vcs_no_repo(tmp_path: Path, mocker: MockerFixture) -> None:
    repo_path = tmp_path / "répö"
    repo_path.mkdir()
    assert repo_path.exists()
    assert subprocess.check_call([executable(), "init"], cwd=repo_path) == 0

    # This makes sure git fails to find the git directory even if one
    # exists at some higher level in the filesystem
    mocker.patch.dict(os.environ, {"GIT_DIR": os.devnull})

    vcs = get_vcs(repo_path)
    assert vcs is None


def test_get_vcs_ignored_subdir(tmp_path: Path) -> None:
    # See https://github.com/python-poetry/poetry-core/pull/611
    repo_path = tmp_path / "répö"
    repo_path.mkdir()
    assert repo_path.exists()
    assert subprocess.check_call([executable(), "init"], cwd=repo_path) == 0
    (repo_path / ".gitignore").write_text("/ignored", encoding="utf-8")
    subdir = repo_path / "ignored"
    subdir.mkdir()

    vcs = get_vcs(subdir)
    assert vcs is None
poetry-core-2.1.1/tests/version/000077500000000000000000000000001475444614500166305ustar00rootroot00000000000000poetry-core-2.1.1/tests/version/__init__.py000066400000000000000000000000001475444614500207270ustar00rootroot00000000000000poetry-core-2.1.1/tests/version/pep440/000077500000000000000000000000001475444614500176445ustar00rootroot00000000000000poetry-core-2.1.1/tests/version/pep440/__init__.py000066400000000000000000000000001475444614500217430ustar00rootroot00000000000000poetry-core-2.1.1/tests/version/pep440/test_segments.py000066400000000000000000000122621475444614500231050ustar00rootroot00000000000000from __future__ import annotations

import pytest

from poetry.core.version.pep440 import Release
from poetry.core.version.pep440 import ReleaseTag
from poetry.core.version.pep440.segments import RELEASE_PHASE_NORMALIZATIONS


def test_release_post_init_minor_and_patch() -> None:
    """
    Minor and patch must not be None but zero if there are extra parts.
    """
    release = Release(1, extra=(0,))
    assert release.minor == 0
    assert release.patch == 0


def test_release_post_init_zero_version() -> None:
    """
    Smoke test for edge case (because zeros are stripped for comparison).
    """
    Release(0)


@pytest.mark.parametrize("precision1", range(1, 6))
@pytest.mark.parametrize("precision2", range(1, 6))
def test_release_equal_zero_padding(precision1: int, precision2: int) -> None:
    release1 = Release.from_parts(*range(1, precision1 + 1))
    if precision1 > precision2:
        # e.g. 1.2.3 != 1.2
        release2 = Release.from_parts(*range(1, precision2 + 1))
        assert release1 != release2
        assert release2 != release1
    else:
        # e.g. 1.2 == 1.2.0
        release2 = Release.from_parts(
            *range(1, precision1 + 1), *[0] * (precision2 - precision1)
        )
        assert release1 == release2
        assert release2 == release1


@pytest.mark.parametrize(
    "parts,result",
    [
        ((1,), Release(1)),
        ((1, 2), Release(1, 2)),
        ((1, 2, 3), Release(1, 2, 3)),
        ((1, 2, 3, 4), Release(1, 2, 3, (4,))),
        ((1, 2, 3, 4, 5, 6), Release(1, 2, 3, (4, 5, 6))),
    ],
)
def test_release_from_parts_to_parts(parts: tuple[int, ...], result: Release) -> None:
    assert Release.from_parts(*parts) == result
    assert result.to_parts() == parts


@pytest.mark.parametrize("precision", list(range(1, 6)))
def test_release_precision(precision: int) -> None:
    """
    Semantically identical releases might have a different precision, e.g. 1 vs. 1.0
    """
    release = Release.from_parts(1, *[0] * (precision - 1))
    assert release.precision == precision
    assert len(release.to_parts()) == precision


@pytest.mark.parametrize("precision", list(range(1, 6)))
def test_release_text(precision: int) -> None:
    increments = list(range(1, precision + 1))
    zeros = [1] + [0] * (precision - 1)

    assert Release.from_parts(*increments).text == ".".join(str(i) for i in increments)
    assert Release.from_parts(*zeros).text == ".".join(str(i) for i in zeros)


@pytest.mark.parametrize("precision", list(range(1, 6)))
def test_release_next_major(precision: int) -> None:
    release = Release.from_parts(1, *[0] * (precision - 1))
    expected = Release.from_parts(2, *[0] * (precision - 1))
    assert release.next_major() == expected


@pytest.mark.parametrize("precision", list(range(1, 6)))
def test_release_next_minor(precision: int) -> None:
    release = Release.from_parts(1, *[0] * (precision - 1))
    expected = Release.from_parts(1, 1, *[0] * (precision - 2))
    assert release.next_minor() == expected


@pytest.mark.parametrize("precision", list(range(1, 6)))
def test_release_next_patch(precision: int) -> None:
    release = Release.from_parts(1, *[0] * (precision - 1))
    expected = Release.from_parts(1, 0, 1, *[0] * (precision - 3))
    assert release.next_patch() == expected


@pytest.mark.parametrize(
    ("release", "expected"),
    [
        (Release(0), Release(1)),
        (Release(1), Release(2)),
        (Release(0, 0), Release(0, 1)),
        (Release(1, 2), Release(1, 3)),
        (Release(0, 0, 0), Release(0, 0, 1)),
        (Release(1, 2, 3), Release(1, 2, 4)),
        (Release(0, 0, 0, (0,)), Release(0, 0, 0, (1,))),
        (Release(1, 2, 3, (4,)), Release(1, 2, 3, (5,))),
        (Release(0, 0, 0, (0, 0)), Release(0, 0, 0, (0, 1))),
        (Release(1, 2, 3, (4, 5)), Release(1, 2, 3, (4, 6))),
    ],
)
def test_release_next(release: Release, expected: Release) -> None:
    assert release.next() == expected


@pytest.mark.parametrize(
    "parts,result",
    [
        (("a",), ReleaseTag("alpha", 0)),
        (("a", 1), ReleaseTag("alpha", 1)),
        (("b",), ReleaseTag("beta", 0)),
        (("b", 1), ReleaseTag("beta", 1)),
        (("pre",), ReleaseTag("preview", 0)),
        (("pre", 1), ReleaseTag("preview", 1)),
        (("c",), ReleaseTag("rc", 0)),
        (("c", 1), ReleaseTag("rc", 1)),
        (("r",), ReleaseTag("rev", 0)),
        (("r", 1), ReleaseTag("rev", 1)),
    ],
)
def test_release_tag_normalisation(
    parts: tuple[str] | tuple[str, int], result: ReleaseTag
) -> None:
    tag = ReleaseTag(*parts)
    assert tag == result
    assert tag.to_string() == result.to_string()


@pytest.mark.parametrize(
    "parts,result",
    [
        (("a",), ReleaseTag("beta")),
        (("b",), ReleaseTag("rc")),
        (("post",), None),
        (("rc",), None),
        (("rev",), None),
        (("dev",), None),
    ],
)
def test_release_tag_next_phase(parts: tuple[str], result: ReleaseTag | None) -> None:
    assert ReleaseTag(*parts).next_phase() == result


@pytest.mark.parametrize("phase", list({*RELEASE_PHASE_NORMALIZATIONS.keys()}))
def test_release_tag_next(phase: str) -> None:
    tag = ReleaseTag(phase=phase).next()
    assert tag.phase == RELEASE_PHASE_NORMALIZATIONS[phase]
    assert tag.number == 1
poetry-core-2.1.1/tests/version/pep440/test_version.py000066400000000000000000000340341475444614500227460ustar00rootroot00000000000000from __future__ import annotations

from typing import TYPE_CHECKING

import pytest

from poetry.core.version.exceptions import InvalidVersionError
from poetry.core.version.pep440 import PEP440Version
from poetry.core.version.pep440 import Release
from poetry.core.version.pep440 import ReleaseTag


if TYPE_CHECKING:
    from collections.abc import Sequence


@pytest.mark.parametrize(
    "text,result",
    [
        ("1", PEP440Version(release=Release.from_parts(1))),
        ("1.2.3", PEP440Version(release=Release.from_parts(1, 2, 3))),
        (
            "1.2.3-1",
            PEP440Version(
                release=Release.from_parts(1, 2, 3), post=ReleaseTag("post", 1)
            ),
        ),
        (
            "1.2.3.dev1",
            PEP440Version(
                release=Release.from_parts(1, 2, 3), dev=ReleaseTag("dev", 1)
            ),
        ),
        (
            "1.2.3-1.dev1",
            PEP440Version(
                release=Release.from_parts(1, 2, 3),
                post=ReleaseTag("post", 1),
                dev=ReleaseTag("dev", 1),
            ),
        ),
        (
            "1.2.3+local",
            PEP440Version(release=Release.from_parts(1, 2, 3), local="local"),
        ),
        (
            "1.2.3+local.1",
            PEP440Version(release=Release.from_parts(1, 2, 3), local=("local", 1)),
        ),
        (
            "1.2.3+local1",
            PEP440Version(release=Release.from_parts(1, 2, 3), local="local1"),
        ),
        ("1.2.3+1", PEP440Version(release=Release.from_parts(1, 2, 3), local=1)),
        (
            "1.2.3a1",
            PEP440Version(
                release=Release.from_parts(1, 2, 3), pre=ReleaseTag("alpha", 1)
            ),
        ),
        (
            "1.2.3.a1",
            PEP440Version(
                release=Release.from_parts(1, 2, 3), pre=ReleaseTag("alpha", 1)
            ),
        ),
        (
            "1.2.3alpha1",
            PEP440Version(
                release=Release.from_parts(1, 2, 3), pre=ReleaseTag("alpha", 1)
            ),
        ),
        (
            "1.2.3b1",
            PEP440Version(
                release=Release.from_parts(1, 2, 3), pre=ReleaseTag("beta", 1)
            ),
        ),
        (
            "1.2.3.b1",
            PEP440Version(
                release=Release.from_parts(1, 2, 3), pre=ReleaseTag("beta", 1)
            ),
        ),
        (
            "1.2.3beta1",
            PEP440Version(
                release=Release.from_parts(1, 2, 3), pre=ReleaseTag("beta", 1)
            ),
        ),
        (
            "1.2.3rc1",
            PEP440Version(release=Release.from_parts(1, 2, 3), pre=ReleaseTag("rc", 1)),
        ),
        (
            "1.2.3.rc1",
            PEP440Version(release=Release.from_parts(1, 2, 3), pre=ReleaseTag("rc", 1)),
        ),
        (
            "2.2.0dev0+build.05669607",
            PEP440Version(
                release=Release.from_parts(2, 2, 0),
                dev=ReleaseTag("dev", 0),
                local=("build", "05669607"),
            ),
        ),
    ],
)
def test_pep440_parse_text(text: str, result: PEP440Version) -> None:
    assert PEP440Version.parse(text) == result


@pytest.mark.parametrize(
    "text", ["1.2.3.dev1-1", "example-1", "1.2.3-random1", "1.2.3-1-1"]
)
def test_pep440_parse_text_invalid_versions(text: str) -> None:
    with pytest.raises(InvalidVersionError):
        PEP440Version.parse(text)


@pytest.mark.parametrize(
    ("version", "major", "minor", "patch", "non_semver_parts", "parts"),
    [
        ("1", 1, None, None, (), (1,)),
        ("1.2", 1, 2, None, (), (1, 2)),
        ("1.2.3", 1, 2, 3, (), (1, 2, 3)),
        ("1.2.3.4", 1, 2, 3, (4,), (1, 2, 3, 4)),
        ("1.2.3.4.5", 1, 2, 3, (4, 5), (1, 2, 3, 4, 5)),
        ("9!1.2.3.4.5a6.post7.dev8", 1, 2, 3, (4, 5), (1, 2, 3, 4, 5)),
    ],
)
def test_properties(
    version: str,
    major: int,
    minor: int | None,
    patch: int | None,
    non_semver_parts: Sequence[int],
    parts: Sequence[int],
) -> None:
    v = PEP440Version.parse(version)
    assert v.major == major
    assert v.minor == minor
    assert v.patch == patch
    assert v.non_semver_parts == non_semver_parts
    assert v.parts == parts


@pytest.mark.parametrize(
    "version, expected",
    [
        ("1", False),
        ("1.dev0", False),
        ("1.a0", True),
        ("1.b1", True),
        ("1.rc3", True),
        ("1.a0.dev0", True),
        ("9!1.2.3a1.post2.dev3", True),
    ],
)
def test_is_prerelease(version: str, expected: bool) -> None:
    v = PEP440Version.parse(version)
    assert v.is_prerelease() is expected


@pytest.mark.parametrize(
    "version, expected",
    [
        ("1", False),
        ("1.post1", True),
        ("9!1.2.3a1.post2.dev3", True),
    ],
)
def test_is_postrelease(version: str, expected: bool) -> None:
    v = PEP440Version.parse(version)
    assert v.is_postrelease() is expected


@pytest.mark.parametrize(
    "version, expected",
    [
        ("1", False),
        ("1.dev0", True),
        ("1.a0.dev0", True),
        ("9!1.2.3a1.post2.dev3", True),
    ],
)
def test_is_devrelease(version: str, expected: bool) -> None:
    v = PEP440Version.parse(version)
    assert v.is_devrelease() is expected


@pytest.mark.parametrize(
    "version, expected",
    [
        ("1", False),
        ("1+local", True),
        ("1+local.dev0", True),
        ("9!1.2.3a1.post2.dev3+local", True),
    ],
)
def test_is_local(version: str, expected: bool) -> None:
    v = PEP440Version.parse(version)
    assert v.is_local() is expected


@pytest.mark.parametrize(
    "version, expected",
    [
        ("1", True),
        ("1.2", True),
        ("1+local", True),
        ("1.dev0", False),
        ("1a0", False),
        ("1.post0", False),
    ],
)
def test_is_no_suffix_release(version: str, expected: bool) -> None:
    v = PEP440Version.parse(version)
    assert v.is_no_suffix_release() is expected


@pytest.mark.parametrize(
    "version, expected",
    [
        ("1", True),
        ("1.2", True),
        ("1.2.3", True),
        ("2!1.2.3", True),
        ("1.2.3+local", True),
        ("1.2.3.4", True),
        ("1.dev0", False),
        ("1.2dev0", False),
        ("1.2.3dev0", False),
        ("1.2.3.4dev0", False),
        ("1.post1", True),
        ("1.2.post1", True),
        ("1.2.3.post1", True),
        ("1.post1.dev0", False),
        ("1.2.post1.dev0", False),
        ("1.2.3.post1.dev0", False),
        ("1.a1", False),
        ("1.2a1", False),
        ("1.2.3a1", False),
        ("1.2.3.4a1", False),
        ("1.a1.post2", False),
        ("1.2a1.post2", False),
        ("1.2.3a1.post2", False),
        ("1.2.3.4a1.post2", False),
        ("1.a1.post2.dev0", False),
        ("1.2a1.post2.dev0", False),
        ("1.2.3a1.post2.dev0", False),
        ("1.2.3.4a1.post2.dev0", False),
    ],
)
def test_is_stable(version: str, expected: bool) -> None:
    subject = PEP440Version.parse(version)

    assert subject.is_stable() is expected
    assert subject.is_unstable() is not expected


@pytest.mark.parametrize(
    "version, expected",
    [
        ("0", True),
        ("0.2", True),
        ("0.2.3", True),
        ("2!0.2.3", True),
        ("0.2.3+local", True),
        ("0.2.3.4", True),
        ("0.dev0", False),
        ("0.2dev0", False),
        ("0.2.3dev0", False),
        ("0.2.3.4dev0", False),
        ("0.post1", True),
        ("0.2.post1", True),
        ("0.2.3.post1", True),
        ("0.post1.dev0", False),
        ("0.2.post1.dev0", False),
        ("0.2.3.post1.dev0", False),
        ("0.a1", False),
        ("0.2a1", False),
        ("0.2.3a1", False),
        ("0.2.3.4a1", False),
        ("0.a1.post2", False),
        ("0.2a1.post2", False),
        ("0.2.3a1.post2", False),
        ("0.2.3.4a1.post2", False),
        ("0.a1.post2.dev0", False),
        ("0.2a1.post2.dev0", False),
        ("0.2.3a1.post2.dev0", False),
        ("0.2.3.4a1.post2.dev0", False),
    ],
)
def test_is_stable_all_major_0_versions_are_treated_as_normal_versions(
    version: str, expected: bool
) -> None:
    subject = PEP440Version.parse(version)

    assert subject.is_stable() is expected
    assert subject.is_unstable() is not expected


@pytest.mark.parametrize(
    "version, expected",
    [
        ("1", "2"),
        ("2!1", "2!2"),
        ("1+local", "2"),
        ("1.2", "2.0"),
        ("1.2.3", "2.0.0"),
        ("1.2.3.4", "2.0.0.0"),
        ("1.dev0", "1"),
        ("1.2.dev0", "2.0"),
        ("1.post1", "2"),
        ("1.2.post1", "2.0"),
        ("1.post1.dev0", "2"),
        ("1.2.post1.dev0", "2.0"),
        ("1.a1", "1"),
        ("1.2a1", "2.0"),
        ("1.a1.post2", "1"),
        ("1.2a1.post2", "2.0"),
        ("1.a1.post2.dev0", "1"),
        ("1.2a1.post2.dev0", "2.0"),
    ],
)
def test_next_major(version: str, expected: str) -> None:
    v = PEP440Version.parse(version)
    assert v.next_major().text == expected


@pytest.mark.parametrize(
    "version, expected",
    [
        ("1", "1.1"),
        ("1.2", "1.3"),
        ("2!1.2", "2!1.3"),
        ("1.2+local", "1.3"),
        ("1.2.3", "1.3.0"),
        ("1.2.3.4", "1.3.0.0"),
        ("1.dev0", "1"),
        ("1.2dev0", "1.2"),
        ("1.2.3dev0", "1.3.0"),
        ("1.post1", "1.1"),
        ("1.2.post1", "1.3"),
        ("1.2.3.post1", "1.3.0"),
        ("1.post1.dev0", "1.1"),
        ("1.2.post1.dev0", "1.3"),
        ("1.a1", "1"),
        ("1.2a1", "1.2"),
        ("1.2.3a1", "1.3.0"),
        ("1.a1.post2", "1"),
        ("1.2a1.post2", "1.2"),
        ("1.2.3a1.post2", "1.3.0"),
        ("1.a1.post2.dev0", "1"),
        ("1.2a1.post2.dev0", "1.2"),
        ("1.2.3a1.post2.dev0", "1.3.0"),
    ],
)
def test_next_minor(version: str, expected: str) -> None:
    v = PEP440Version.parse(version)
    assert v.next_minor().text == expected


@pytest.mark.parametrize(
    "version, expected",
    [
        ("1", "1.0.1"),
        ("1.2", "1.2.1"),
        ("1.2.3", "1.2.4"),
        ("2!1.2.3", "2!1.2.4"),
        ("1.2.3+local", "1.2.4"),
        ("1.2.3.4", "1.2.4.0"),
        ("1.dev0", "1"),
        ("1.2dev0", "1.2"),
        ("1.2.3dev0", "1.2.3"),
        ("1.2.3.4dev0", "1.2.4.0"),
        ("1.post1", "1.0.1"),
        ("1.2.post1", "1.2.1"),
        ("1.2.3.post1", "1.2.4"),
        ("1.post1.dev0", "1.0.1"),
        ("1.2.post1.dev0", "1.2.1"),
        ("1.2.3.post1.dev0", "1.2.4"),
        ("1.a1", "1"),
        ("1.2a1", "1.2"),
        ("1.2.3a1", "1.2.3"),
        ("1.2.3.4a1", "1.2.4.0"),
        ("1.a1.post2", "1"),
        ("1.2a1.post2", "1.2"),
        ("1.2.3a1.post2", "1.2.3"),
        ("1.2.3.4a1.post2", "1.2.4.0"),
        ("1.a1.post2.dev0", "1"),
        ("1.2a1.post2.dev0", "1.2"),
        ("1.2.3a1.post2.dev0", "1.2.3"),
        ("1.2.3.4a1.post2.dev0", "1.2.4.0"),
    ],
)
def test_next_patch(version: str, expected: str) -> None:
    v = PEP440Version.parse(version)
    assert v.next_patch().text == expected


@pytest.mark.parametrize(
    ("version", "expected"),
    [
        # simple versions (only "release" attribute) are tested in test_segments
        # via Release.next()
        ("1", "2"),
        ("2!1", "2!2"),
        ("1+local", "2+local"),
        ("1.post4", "2"),
        ("1.dev4", "1"),
        ("1.a4", "1"),
    ],
)
def test_next_stable(version: str, expected: str) -> None:
    v = PEP440Version.parse(version)
    assert v.next_stable().text == expected


@pytest.mark.parametrize(
    "version, expected",
    [
        ("1.2a1", "1.2a2"),
        ("2!1.2a1", "2!1.2a2"),
        ("1.2dev0", "1.2a0"),
        ("1.2a1.dev0", "1.2a1"),
        ("1.2a1.post1.dev0", "1.2a2"),
    ],
)
def test_next_prerelease(version: str, expected: str) -> None:
    v = PEP440Version.parse(version)
    assert v.next_prerelease().text == expected


@pytest.mark.parametrize(
    "version, expected",
    [
        ("1", "1.post0"),
        ("1.post1", "1.post2"),
        ("9!1.2.3.4", "9!1.2.3.4.post0"),
        ("9!1.2.3.4.post2", "9!1.2.3.4.post3"),
        ("1.dev0", "1.post0"),
        ("1.post1.dev0", "1.post1"),
        ("1a1", "1a1.post0"),
        ("1a1.dev0", "1a1.post0"),
        ("1a1.post2", "1a1.post3"),
        ("1a1.post2.dev0", "1a1.post2"),
    ],
)
def test_next_postrelease(version: str, expected: str) -> None:
    v = PEP440Version.parse(version)
    assert v.next_postrelease().text == expected


@pytest.mark.parametrize(
    "version, expected",
    [
        ("0.dev0", "0.dev1"),
        ("9!1.2.3a1.post2.dev3", "9!1.2.3a1.post2.dev4"),
    ],
)
def test_next_devrelease(version: str, expected: str) -> None:
    v = PEP440Version.parse(version)
    assert v.next_devrelease().text == expected


@pytest.mark.parametrize(
    "version, expected",
    [
        ("1", "1a0"),
        ("9!1.2.3a1.post2.dev3", "9!1.2.3a0"),
    ],
)
def test_first_prerelease(version: str, expected: str) -> None:
    v = PEP440Version.parse(version)
    assert v.first_prerelease().text == expected


@pytest.mark.parametrize(
    "version, expected",
    [
        ("1", "1.dev0"),
        ("1a1", "1a1.dev0"),
        ("1.post2", "1.post2.dev0"),
        ("9!1.2.3a1.post2.dev3", "9!1.2.3a1.post2.dev0"),
    ],
)
def test_first_devrelease(version: str, expected: str) -> None:
    v = PEP440Version.parse(version)
    assert v.first_devrelease().text == expected


@pytest.mark.parametrize(
    "version, expected",
    [
        ("1", "1"),
        ("1+local.dev0", "1"),
        ("9!1.2.3a1.post2.dev3+local", "9!1.2.3a1.post2.dev3"),
    ],
)
def test_without_local(version: str, expected: str) -> None:
    v = PEP440Version.parse(version)
    assert v.without_local().text == expected


@pytest.mark.parametrize(
    "version, expected",
    [
        ("1", "1"),
        ("1.dev0", "1.dev0"),
        ("9!1.2.3a1.post2.dev3", "9!1.2.3a1"),
    ],
)
def test_without_postrelease(version: str, expected: str) -> None:
    v = PEP440Version.parse(version)
    assert v.without_postrelease().text == expected


@pytest.mark.parametrize(
    "version, expected",
    [
        ("1", "1"),
        ("1.dev0", "1"),
        ("9!1.2.3a1.post2.dev3", "9!1.2.3a1.post2"),
    ],
)
def test_without_devrelease(version: str, expected: str) -> None:
    v = PEP440Version.parse(version)
    assert v.without_devrelease().text == expected
poetry-core-2.1.1/tests/version/test_markers.py000066400000000000000000002512761475444614500217220ustar00rootroot00000000000000from __future__ import annotations

import os

from typing import TYPE_CHECKING

import pytest

from poetry.core.constraints.generic import UnionConstraint
from poetry.core.constraints.generic import parse_constraint as parse_generic_constraint
from poetry.core.constraints.version import parse_constraint as parse_version_constraint
from poetry.core.version.markers import AnyMarker
from poetry.core.version.markers import AtomicMarkerUnion
from poetry.core.version.markers import EmptyMarker
from poetry.core.version.markers import InvalidMarkerError
from poetry.core.version.markers import MarkerUnion
from poetry.core.version.markers import MultiMarker
from poetry.core.version.markers import SingleMarker
from poetry.core.version.markers import cnf
from poetry.core.version.markers import dnf
from poetry.core.version.markers import intersection
from poetry.core.version.markers import parse_marker
from poetry.core.version.markers import union


if TYPE_CHECKING:
    from poetry.core.version.markers import BaseMarker

EMPTY = ""


@pytest.mark.parametrize(
    "marker",
    [
        'sys_platform == "linux" or sys_platform == "win32"',
        'sys_platform == "win32" or sys_platform == "linux"',
        (
            'sys_platform == "linux" or sys_platform == "win32"'
            ' or sys_platform == "darwin"'
        ),
        (
            'python_version >= "3.6" and extra == "foo"'
            ' or implementation_name == "pypy" and extra == "bar"'
        ),
        (
            'python_version < "3.9" or python_version >= "3.10"'
            ' and sys_platform == "linux" or sys_platform == "win32"'
        ),
        (
            'sys_platform == "win32" and python_version < "3.6" or sys_platform =='
            ' "linux" and python_version < "3.6" and python_version >= "3.3" or'
            ' sys_platform == "darwin" and python_version < "3.3"'
        ),
        # "extra" is a special marker that can have multiple values at the same time.
        # Thus, "extra == 'a' and extra == 'b'" is not empty.
        # Further, "extra == 'a' and extra != 'b'" cannot be simplified
        # because it has the meaning "extra 'a' must and extra 'b' must not be active"
        'extra == "a" and extra == "b"',
        'extra == "a" and extra != "b"',
        'extra != "a" and extra == "b"',
        'extra != "a" and extra != "b"',
        'extra == "a" and extra == "b" and extra != "c" and extra != "d"',
        'extra == "a" or extra == "b"',
        'extra == "a" or extra != "b"',
        'extra != "a" or extra == "b"',
        'extra != "a" or extra != "b"',
        'extra == "a" or extra == "b" or extra != "c" or extra != "d"',
        # String comparison markers
        '"tegra" in platform_release',
        '"tegra" not in platform_release',
        '"tegra" in platform_release or "rpi-v8" in platform_release',
        '"tegra" not in platform_release and "rpi-v8" not in platform_release',
    ],
)
def test_parse_marker(marker: str) -> None:
    assert str(parse_marker(marker)) == marker


@pytest.mark.parametrize(
    ("marker", "valid"),
    [
        ('platform_release != "4.9.253-tegra"', True),
        ('python_version != "4.9.253-tegra"', False),
    ],
)
def test_parse_marker_non_python_versions(marker: str, valid: bool) -> None:
    if valid:
        assert str(parse_marker(marker)) == marker
    else:
        with pytest.raises(InvalidMarkerError):
            parse_marker(marker)


@pytest.mark.parametrize(
    ("marker", "expected_name", "expected_constraint"),
    [
        ('sys_platform == "darwin"', "sys_platform", "darwin"),
        (
            'python_version in "2.7, 3.0, 3.1"',
            "python_version",
            ">=2.7,<2.8 || >=3.0,<3.2",
        ),
        ('"2.7" in python_version', "python_version", ">=2.7,<2.8"),
        (
            'python_version not in "2.7, 3.0, 3.1"',
            "python_version",
            "<2.7 || >=2.8,<3.0 || >=3.2",
        ),
        (
            (
                "platform_machine in 'x86_64 X86_64 aarch64 AARCH64 ppc64le PPC64LE"
                " amd64 AMD64 win32 WIN32'"
            ),
            "platform_machine",
            (
                "x86_64 || X86_64 || aarch64 || AARCH64 || ppc64le || PPC64LE || amd64"
                " || AMD64 || win32 || WIN32"
            ),
        ),
        (
            (
                "platform_machine not in 'x86_64 X86_64 aarch64 AARCH64 ppc64le PPC64LE"
                " amd64 AMD64 win32 WIN32'"
            ),
            "platform_machine",
            (
                "!=x86_64, !=X86_64, !=aarch64, !=AARCH64, !=ppc64le, !=PPC64LE,"
                " !=amd64, !=AMD64, !=win32, !=WIN32"
            ),
        ),
        (
            'platform_machine not in "aarch64|loongarch64"',
            "platform_machine",
            "!=aarch64, !=loongarch64",
        ),
        ('"tegra" not in platform_release', "platform_release", "'tegra' not in"),
        ('"rpi-v8" in platform_release', "platform_release", "'rpi-v8' in"),
        ('"arm" not in platform_version', "platform_version", "'arm' not in"),
        ('"arm" in platform_version', "platform_version", "'arm' in"),
    ],
)
def test_parse_single_marker(
    marker: str, expected_name: str, expected_constraint: str
) -> None:
    m = parse_marker(marker)

    assert isinstance(m, SingleMarker)
    assert m.name == expected_name
    assert str(m.constraint) == expected_constraint


def test_single_marker_normalisation() -> None:
    m1 = SingleMarker("python_version", ">=3.6")
    m2 = SingleMarker("python_version", ">= 3.6")
    assert m1 == m2
    assert hash(m1) == hash(m2)


def test_single_marker_intersect() -> None:
    m = parse_marker('sys_platform == "darwin"')

    intersection = m.intersect(parse_marker('implementation_name == "cpython"'))
    assert (
        str(intersection)
        == 'sys_platform == "darwin" and implementation_name == "cpython"'
    )

    m = parse_marker('python_version >= "3.4"')

    intersection = m.intersect(parse_marker('python_version < "3.6"'))
    assert str(intersection) == 'python_version >= "3.4" and python_version < "3.6"'


@pytest.mark.parametrize(
    ("marker1", "marker2", "expected"),
    [
        ('python_version < "3.6"', 'python_version < "3.4"', 'python_version < "3.4"'),
        (
            'python_version >= "3.6"',
            'python_version < "3.7"',
            'python_version == "3.6"',
        ),
    ],
)
def test_single_marker_intersect_is_single_marker(
    marker1: str, marker2: str, expected: str
) -> None:
    m = parse_marker(marker1)

    intersection = m.intersect(parse_marker(marker2))
    assert str(intersection) == expected


def test_single_marker_intersect_with_multi() -> None:
    m = parse_marker('sys_platform == "darwin"')

    intersection = m.intersect(
        parse_marker('implementation_name == "cpython" and python_version >= "3.6"')
    )
    assert (
        str(intersection)
        == 'implementation_name == "cpython" and python_version >= "3.6" and'
        ' sys_platform == "darwin"'
    )


def test_single_marker_intersect_with_multi_with_duplicate() -> None:
    m = parse_marker('python_version < "4.0"')

    intersection = m.intersect(
        parse_marker('sys_platform == "darwin" and python_version < "4.0"')
    )
    assert str(intersection) == 'sys_platform == "darwin" and python_version < "4.0"'


def test_single_marker_intersect_with_multi_compacts_constraint() -> None:
    m = parse_marker('python_version < "3.6"')

    intersection = m.intersect(
        parse_marker('implementation_name == "cpython" and python_version < "3.4"')
    )
    assert (
        str(intersection)
        == 'implementation_name == "cpython" and python_version < "3.4"'
    )


def test_single_marker_intersect_with_union_leads_to_single_marker() -> None:
    m = parse_marker('python_version >= "3.6"')

    intersection = m.intersect(
        parse_marker('python_version < "3.6" or python_version >= "3.7"')
    )
    assert str(intersection) == 'python_version >= "3.7"'


def test_single_marker_intersect_with_union_leads_to_empty() -> None:
    m = parse_marker('python_version == "3.7"')

    intersection = m.intersect(
        parse_marker('python_version < "3.7" or python_version >= "3.8"')
    )
    assert intersection.is_empty()


def test_single_marker_not_in_python_intersection() -> None:
    m = parse_marker('python_version not in "2.7, 3.0, 3.1"')

    intersection = m.intersect(
        parse_marker('python_version not in "2.7, 3.0, 3.1, 3.2"')
    )
    assert str(intersection) == 'python_version not in "2.7, 3.0, 3.1, 3.2"'


@pytest.mark.parametrize(
    ("marker1", "marker2", "expected"),
    [
        # same value
        ('extra == "a"', 'extra == "a"', 'extra == "a"'),
        ('extra == "a"', 'extra != "a"', ""),
        ('extra != "a"', 'extra == "a"', ""),
        ('extra != "a"', 'extra != "a"', 'extra != "a"'),
        # different values
        ('extra == "a"', 'extra == "b"', 'extra == "a" and extra == "b"'),
        ('extra == "a"', 'extra != "b"', 'extra == "a" and extra != "b"'),
        ('extra != "a"', 'extra == "b"', 'extra != "a" and extra == "b"'),
        ('extra != "a"', 'extra != "b"', 'extra != "a" and extra != "b"'),
        # AtomicMultiMarker
        (
            'extra == "a" and extra == "b"',
            'extra == "c"',
            'extra == "a" and extra == "b" and extra == "c"',
        ),
        (
            'extra != "a" and extra != "b"',
            'extra != "c"',
            'extra != "a" and extra != "b" and extra != "c"',
        ),
        (
            'extra != "a" and extra != "b"',
            'extra == "c"',
            'extra != "a" and extra != "b" and extra == "c"',
        ),
        (
            'extra == "a" and extra == "b"',
            'extra != "c"',
            'extra == "a" and extra == "b" and extra != "c"',
        ),
        (
            'extra == "a" and extra == "b"',
            'extra == "a" and extra == "b"',
            'extra == "a" and extra == "b"',
        ),
        (
            'extra == "a" and extra == "b"',
            'extra == "b" and extra == "a"',
            'extra == "a" and extra == "b"',
        ),
        (
            'extra == "a" and extra == "b"',
            'extra == "c" and extra != "d"',
            'extra == "a" and extra == "b" and extra == "c" and extra != "d"',
        ),
        ('extra != "a" and extra != "b"', 'extra == "a"', ""),
        ('extra != "a" and extra == "b"', 'extra == "a" and extra == "c"', ""),
        (
            'extra != "a" and extra != "b"',
            'extra != "a"',
            'extra != "a" and extra != "b"',
        ),
        (
            'extra == "a" and extra != "b"',
            'extra == "a"',
            'extra == "a" and extra != "b"',
        ),
        # AtomicMarkerUnion
        (
            'extra == "a" or extra == "b"',
            'extra == "c"',
            '(extra == "a" or extra == "b") and extra == "c"',
        ),
        (
            'extra == "a" or extra == "b"',
            'extra != "c"',
            '(extra == "a" or extra == "b") and extra != "c"',
        ),
        ('extra == "a" or extra == "b"', 'extra == "a"', 'extra == "a"'),
        ('extra != "a" or extra == "b"', 'extra != "a"', 'extra != "a"'),
        (
            'extra == "a" or extra == "b"',
            'extra != "a"',
            'extra == "b" and extra != "a"',
        ),
        (
            'extra == "a" or extra == "b"',
            'extra == "a" or extra == "b"',
            'extra == "a" or extra == "b"',
        ),
        (
            'extra == "a" or extra == "b"',
            'extra == "b" or extra == "a"',
            'extra == "a" or extra == "b"',
        ),
        (
            'extra == "a" or extra == "b"',
            'extra == "a" or extra != "c"',
            '(extra == "a" or extra == "b") and (extra == "a" or extra != "c")',
        ),
        # AtomicMultiMarker and AtomicMarkerUnion
        (
            'extra != "a" and extra != "b"',
            'extra == "a" or extra == "b"',
            "",
        ),
        (
            'extra != "a" and extra != "b"',
            'extra == "a" or extra == "c"',
            'extra == "c" and extra != "a" and extra != "b"',
        ),
    ],
)
def test_single_marker_intersect_extras(
    marker1: str, marker2: str, expected: str
) -> None:
    assert str(parse_marker(marker1).intersect(parse_marker(marker2))) == expected


def test_single_marker_union() -> None:
    m = parse_marker('sys_platform == "darwin"')

    union = m.union(parse_marker('implementation_name == "cpython"'))
    assert str(union) == 'sys_platform == "darwin" or implementation_name == "cpython"'


def test_single_marker_union_is_any() -> None:
    m = parse_marker('python_version >= "3.4"')

    union = m.union(parse_marker('python_version < "3.6"'))
    assert union.is_any()


@pytest.mark.parametrize(
    ("marker1", "marker2", "expected"),
    [
        (
            'python_version < "3.6"',
            'python_version < "3.4"',
            'python_version < "3.6"',
        ),
        (
            'sys_platform == "linux"',
            'sys_platform != "win32"',
            'sys_platform != "win32"',
        ),
        (
            'python_version == "3.6"',
            'python_version > "3.6"',
            'python_version >= "3.6"',
        ),
        (
            'python_version == "3.6"',
            'python_version < "3.6"',
            'python_version <= "3.6"',
        ),
        (
            'python_version < "3.6"',
            'python_version > "3.6"',
            'python_version != "3.6"',
        ),
        (
            'python_version == "3.6"',
            'python_version >= "3.7"',
            'python_version >= "3.6"',
        ),
        (
            'python_version <= "3.6"',
            'python_version >= "3.7"',
            "",
        ),
    ],
)
def test_single_marker_union_is_single_marker(
    marker1: str, marker2: str, expected: str
) -> None:
    m = parse_marker(marker1)

    union = m.union(parse_marker(marker2))
    assert str(union) == expected


def test_single_marker_union_with_multi() -> None:
    m = parse_marker('sys_platform == "darwin"')

    union = m.union(
        parse_marker('implementation_name == "cpython" and python_version >= "3.6"')
    )
    assert (
        str(union) == 'implementation_name == "cpython" and python_version >= "3.6" or'
        ' sys_platform == "darwin"'
    )


def test_single_marker_union_with_multi_duplicate() -> None:
    m = parse_marker('sys_platform == "darwin" and python_version >= "3.6"')

    union = m.union(
        parse_marker('sys_platform == "darwin" and python_version >= "3.6"')
    )
    assert str(union) == 'sys_platform == "darwin" and python_version >= "3.6"'


@pytest.mark.parametrize(
    ("single_marker", "multi_marker", "expected"),
    [
        (
            'python_version >= "3.6"',
            'python_version >= "3.7" and sys_platform == "win32"',
            'python_version >= "3.6"',
        ),
        (
            'sys_platform == "linux"',
            'sys_platform != "linux" and sys_platform != "win32"',
            'sys_platform != "win32"',
        ),
    ],
)
def test_single_marker_union_with_multi_is_single_marker(
    single_marker: str, multi_marker: str, expected: str
) -> None:
    m1 = parse_marker(single_marker)
    m2 = parse_marker(multi_marker)
    assert str(m1.union(m2)) == expected
    assert str(m2.union(m1)) == expected


def test_single_marker_union_with_multi_cannot_be_simplified() -> None:
    m = parse_marker('python_version >= "3.7"')
    union = m.union(parse_marker('python_version >= "3.6" and sys_platform == "win32"'))
    assert (
        str(union)
        == 'python_version >= "3.6" and sys_platform == "win32" or python_version >='
        ' "3.7"'
    )


def test_single_marker_union_with_multi_is_union_of_single_markers() -> None:
    m = parse_marker('python_version >= "3.6"')
    union = m.union(parse_marker('python_version < "3.6" and sys_platform == "win32"'))
    assert str(union) == 'sys_platform == "win32" or python_version >= "3.6"'


def test_single_marker_union_with_multi_union_is_union_of_single_markers() -> None:
    m = parse_marker('python_version >= "3.6"')
    union = m.union(
        parse_marker(
            'python_version < "3.6" and sys_platform == "win32" or python_version <'
            ' "3.6" and sys_platform == "linux"'
        )
    )
    assert (
        str(union)
        == 'sys_platform == "win32" or sys_platform == "linux" or python_version >='
        ' "3.6"'
    )


def test_single_marker_union_with_union() -> None:
    m = parse_marker('sys_platform == "darwin"')

    union = m.union(
        parse_marker('implementation_name == "cpython" or python_version >= "3.6"')
    )
    assert (
        str(union)
        == 'implementation_name == "cpython" or python_version >= "3.6" or sys_platform'
        ' == "darwin"'
    )


def test_single_marker_not_in_python_union() -> None:
    m = parse_marker('python_version not in "2.7, 3.0, 3.1"')

    union = m.union(parse_marker('python_version not in "2.7, 3.0, 3.1, 3.2"'))
    assert str(union) == 'python_version not in "2.7, 3.0, 3.1"'


def test_single_marker_union_with_union_duplicate() -> None:
    m = parse_marker('sys_platform == "darwin"')

    union = m.union(parse_marker('sys_platform == "darwin" or python_version >= "3.6"'))
    assert str(union) == 'sys_platform == "darwin" or python_version >= "3.6"'

    m = parse_marker('python_version >= "3.7"')

    union = m.union(parse_marker('sys_platform == "darwin" or python_version >= "3.6"'))
    assert str(union) == 'sys_platform == "darwin" or python_version >= "3.6"'

    m = parse_marker('python_version <= "3.6"')

    union = m.union(parse_marker('sys_platform == "darwin" or python_version < "3.4"'))
    assert str(union) == 'sys_platform == "darwin" or python_version <= "3.6"'


def test_single_marker_union_with_inverse() -> None:
    m = parse_marker('sys_platform == "darwin"')
    union = m.union(parse_marker('sys_platform != "darwin"'))
    assert union.is_any()


@pytest.mark.parametrize(
    ("marker1", "marker2", "expected"),
    [
        # same value
        ('extra == "a"', 'extra == "a"', 'extra == "a"'),
        ('extra == "a"', 'extra != "a"', ""),
        ('extra != "a"', 'extra == "a"', ""),
        ('extra != "a"', 'extra != "a"', 'extra != "a"'),
        # different values
        ('extra == "a"', 'extra == "b"', 'extra == "a" or extra == "b"'),
        ('extra == "a"', 'extra != "b"', 'extra == "a" or extra != "b"'),
        ('extra != "a"', 'extra == "b"', 'extra != "a" or extra == "b"'),
        ('extra != "a"', 'extra != "b"', 'extra != "a" or extra != "b"'),
        # AtomicMultiMarker
        (
            'extra == "a" and extra == "b"',
            'extra == "c"',
            'extra == "a" and extra == "b" or extra == "c"',
        ),
        (
            'extra != "a" and extra != "b"',
            'extra != "c"',
            'extra != "a" and extra != "b" or extra != "c"',
        ),
        (
            'extra != "a" and extra != "b"',
            'extra == "c"',
            'extra != "a" and extra != "b" or extra == "c"',
        ),
        (
            'extra == "a" and extra == "b"',
            'extra != "c"',
            'extra == "a" and extra == "b" or extra != "c"',
        ),
        (
            'extra == "a" and extra == "b"',
            'extra == "a" and extra == "b"',
            'extra == "a" and extra == "b"',
        ),
        (
            'extra == "a" and extra == "b"',
            'extra == "c" and extra != "d"',
            'extra == "a" and extra == "b" or extra == "c" and extra != "d"',
        ),
        (
            'extra != "a" and extra != "b"',
            'extra == "a"',
            'extra != "b" or extra == "a"',
        ),
        (
            'extra != "a" and extra == "b"',
            'extra == "a" and extra == "c"',
            'extra != "a" and extra == "b" or extra == "a" and extra == "c"',
        ),
        (
            'extra != "a" and extra != "b"',
            'extra != "a"',
            'extra != "a"',
        ),
        (
            'extra == "a" and extra != "b"',
            'extra == "a"',
            'extra == "a"',
        ),
        # AtomicMarkerUnion
        (
            'extra == "a" or extra == "b"',
            'extra == "c"',
            'extra == "a" or extra == "b" or extra == "c"',
        ),
        (
            'extra == "a" or extra == "b"',
            'extra != "c"',
            'extra == "a" or extra == "b" or extra != "c"',
        ),
        (
            'extra == "a" or extra == "b"',
            'extra == "a"',
            'extra == "a" or extra == "b"',
        ),
        (
            'extra != "a" or extra == "b"',
            'extra != "a"',
            'extra != "a" or extra == "b"',
        ),
        (
            'extra == "a" or extra == "b"',
            'extra != "a"',
            "",
        ),
        (
            'extra == "a" or extra == "b"',
            'extra == "a" or extra == "b"',
            'extra == "a" or extra == "b"',
        ),
        (
            'extra == "a" or extra == "b"',
            'extra == "a" or extra != "c"',
            'extra == "a" or extra == "b" or extra != "c"',
        ),
        # AtomicMultiMarker and AtomicMarkerUnion
        (
            'extra != "a" and extra != "b"',
            'extra == "a" or extra == "b"',
            'extra != "a" and extra != "b" or extra == "a" or extra == "b"',
        ),
        (
            'extra != "a" and extra != "b"',
            'extra == "a" or extra == "c"',
            'extra != "a" and extra != "b" or extra == "a" or extra == "c"',
        ),
    ],
)
def test_single_marker_union_extras(marker1: str, marker2: str, expected: str) -> None:
    assert str(parse_marker(marker1).union(parse_marker(marker2))) == expected


def test_multi_marker() -> None:
    m = parse_marker('sys_platform == "darwin" and implementation_name == "cpython"')

    assert isinstance(m, MultiMarker)
    assert m.markers == (
        parse_marker('sys_platform == "darwin"'),
        parse_marker('implementation_name == "cpython"'),
    )


def test_multi_marker_is_empty_is_contradictory() -> None:
    m = parse_marker(
        'sys_platform == "linux" and python_version >= "3.5" and python_version < "2.8"'
    )

    assert m.is_empty()

    m = parse_marker('sys_platform == "linux" and sys_platform == "win32"')

    assert m.is_empty()


def test_multi_complex_multi_marker_is_empty() -> None:
    m1 = parse_marker(
        'python_full_version >= "3.0.0" and python_full_version < "3.4.0"'
    )
    m2 = parse_marker(
        'python_version >= "3.6" and python_full_version < "3.0.0" and python_version <'
        ' "3.7"'
    )
    m3 = parse_marker(
        'python_version >= "3.6" and python_version < "3.7" and python_full_version >='
        ' "3.5.0"'
    )

    m = m1.intersect(m2.union(m3))

    assert m.is_empty()


def test_multi_marker_is_any() -> None:
    m1 = parse_marker('python_version != "3.6" or python_version == "3.6"')
    m2 = parse_marker('python_version != "3.7" or python_version == "3.7"')

    assert m1.intersect(m2).is_any()
    assert m2.intersect(m1).is_any()


def test_multi_marker_intersect_multi() -> None:
    m = parse_marker('sys_platform == "darwin" and implementation_name == "cpython"')

    intersection = m.intersect(
        parse_marker('python_version >= "3.6" and os_name == "Windows"')
    )
    assert (
        str(intersection)
        == 'sys_platform == "darwin" and implementation_name == "cpython" '
        'and python_version >= "3.6" and os_name == "Windows"'
    )


def test_multi_marker_intersect_multi_with_overlapping_constraints() -> None:
    m = parse_marker('sys_platform == "darwin" and python_version < "3.6"')

    intersection = m.intersect(
        parse_marker(
            'python_version <= "3.4" and os_name == "Windows" and sys_platform =='
            ' "darwin"'
        )
    )
    assert (
        str(intersection)
        == 'sys_platform == "darwin" and python_version <= "3.4" and os_name =='
        ' "Windows"'
    )


def test_multi_marker_intersect_with_union_drops_union() -> None:
    m = parse_marker('python_version >= "3" and python_version < "4"')
    m2 = parse_marker('python_version < "2" or python_version >= "3"')
    assert str(m.intersect(m2)) == str(m)
    assert str(m2.intersect(m)) == str(m)


def test_multi_marker_intersect_with_multi_union_leads_to_empty_in_one_step() -> None:
    # empty marker in one step
    # py == 2 and (py < 2 or py >= 3) -> empty
    m = parse_marker('sys_platform == "darwin" and python_version == "2"')
    m2 = parse_marker(
        'sys_platform == "darwin" and (python_version < "2" or python_version >= "3")'
    )
    assert m.intersect(m2).is_empty()
    assert m2.intersect(m).is_empty()


def test_multi_marker_intersect_with_multi_union_leads_to_empty_in_two_steps() -> None:
    # empty marker in two steps
    # py >= 2 and (py < 2 or py >= 3) -> py >= 3
    # py < 3 and py >= 3 -> empty
    m = parse_marker('python_version >= "2" and python_version < "3"')
    m2 = parse_marker(
        'sys_platform == "darwin" and (python_version < "2" or python_version >= "3")'
    )
    assert m.intersect(m2).is_empty()
    assert m2.intersect(m).is_empty()


def test_multi_marker_union_multi() -> None:
    m = parse_marker('sys_platform == "darwin" and implementation_name == "cpython"')

    union = m.union(parse_marker('python_version >= "3.6" and os_name == "Windows"'))
    assert (
        str(union) == 'sys_platform == "darwin" and implementation_name == "cpython" '
        'or python_version >= "3.6" and os_name == "Windows"'
    )


def test_multi_marker_union_multi_is_single_marker() -> None:
    m = parse_marker('python_version >= "3" and sys_platform == "win32"')
    m2 = parse_marker('sys_platform != "win32" and python_version >= "3"')
    assert str(m.union(m2)) == 'python_version >= "3"'
    assert str(m2.union(m)) == 'python_version >= "3"'


@pytest.mark.parametrize(
    "marker1, marker2, expected",
    [
        (
            'python_version >= "3" and sys_platform == "win32"',
            (
                'python_version >= "3" and sys_platform != "win32" and sys_platform !='
                ' "linux"'
            ),
            'python_version >= "3" and sys_platform != "linux"',
        ),
        (
            (
                'python_version >= "3.8" and python_version < "4.0" and sys_platform =='
                ' "win32"'
            ),
            'python_version >= "3.8" and python_version < "4.0"',
            'python_version >= "3.8" and python_version < "4.0"',
        ),
    ],
)
def test_multi_marker_union_multi_is_multi(
    marker1: str, marker2: str, expected: str
) -> None:
    m1 = parse_marker(marker1)
    m2 = parse_marker(marker2)
    assert str(m1.union(m2)) == expected
    assert str(m2.union(m1)) == expected


@pytest.mark.parametrize(
    "marker1, marker2, expected",
    [
        # Ranges with same start
        (
            'python_version >= "3.6" and python_full_version < "3.6.2"',
            'python_version >= "3.6" and python_version < "3.7"',
            'python_version == "3.6"',
        ),
        (
            'python_version >= "3.6" and python_full_version < "3.7.2"',
            'python_version >= "3.6" and python_version < "3.8"',
            'python_version >= "3.6" and python_version < "3.8"',
        ),
        (
            'python_version > "3.6" and python_full_version < "3.6.2"',
            'python_version > "3.6" and python_version < "3.7"',
            'python_version > "3.6" and python_version < "3.7"',
        ),
        (
            'python_version > "3.6" and python_full_version < "3.7.2"',
            'python_version > "3.6" and python_version < "3.8"',
            'python_version > "3.6" and python_version < "3.8"',
        ),
        # Ranges meet exactly
        (
            'python_version >= "3.6" and python_full_version < "3.6.2"',
            'python_full_version >= "3.6.2" and python_version < "3.7"',
            'python_version == "3.6"',
        ),
        (
            'python_version >= "3.6" and python_full_version < "3.7.2"',
            'python_full_version >= "3.6.2" and python_version < "3.8"',
            'python_version >= "3.6" and python_version < "3.8"',
        ),
        (
            'python_version >= "3.6" and python_full_version <= "3.6.2"',
            'python_full_version > "3.6.2" and python_version < "3.7"',
            'python_version == "3.6"',
        ),
        (
            'python_version >= "3.6" and python_full_version <= "3.7.2"',
            'python_full_version > "3.6.2" and python_version < "3.8"',
            'python_version >= "3.6" and python_version < "3.8"',
        ),
        # Ranges overlap
        (
            'python_version >= "3.6" and python_full_version <= "3.6.8"',
            'python_full_version >= "3.6.2" and python_version < "3.7"',
            'python_version == "3.6"',
        ),
        (
            'python_version >= "3.6" and python_full_version <= "3.7.8"',
            'python_full_version >= "3.6.2" and python_version < "3.8"',
            'python_version >= "3.6" and python_version < "3.8"',
        ),
        # Ranges with same end.
        (
            'python_version >= "3.6" and python_version < "3.7"',
            'python_full_version >= "3.6.2" and python_version < "3.7"',
            'python_version == "3.6"',
        ),
        (
            'python_version >= "3.6" and python_version < "3.8"',
            'python_full_version >= "3.6.2" and python_version < "3.8"',
            'python_version >= "3.6" and python_version < "3.8"',
        ),
        (
            'python_version >= "3.6" and python_version <= "3.7"',
            'python_full_version >= "3.6.2" and python_version <= "3.7"',
            'python_version >= "3.6" and python_version <= "3.7"',
        ),
        # A range covers an exact marker.
        (
            'python_version >= "3.6" and python_version <= "3.7"',
            'python_version == "3.6"',
            'python_version >= "3.6" and python_version <= "3.7"',
        ),
        (
            'python_version >= "3.6" and python_version <= "3.7"',
            'python_version == "3.6" and implementation_name == "cpython"',
            'python_version >= "3.6" and python_version <= "3.7"',
        ),
        (
            'python_version >= "3.6" and python_version <= "3.7"',
            'python_full_version == "3.6.2"',
            'python_version >= "3.6" and python_version <= "3.7"',
        ),
        (
            'python_version >= "3.6" and python_version <= "3.7"',
            'python_full_version == "3.6.2" and implementation_name == "cpython"',
            'python_version >= "3.6" and python_version <= "3.7"',
        ),
        (
            'python_version >= "3.6" and python_version <= "3.7"',
            'python_version == "3.7"',
            'python_version >= "3.6" and python_version <= "3.7"',
        ),
        (
            'python_version >= "3.6" and python_version <= "3.7"',
            'python_version == "3.7" and implementation_name == "cpython"',
            'python_version >= "3.6" and python_version <= "3.7"',
        ),
    ],
)
def test_version_ranges_collapse_on_union(
    marker1: str, marker2: str, expected: str
) -> None:
    m1 = parse_marker(marker1)
    m2 = parse_marker(marker2)
    assert str(m1.union(m2)) == expected
    assert str(m2.union(m1)) == expected


def test_multi_marker_union_with_union() -> None:
    m1 = parse_marker('sys_platform == "darwin" and implementation_name == "cpython"')
    m2 = parse_marker('python_version >= "3.6" or os_name == "Windows"')

    # Union isn't _quite_ symmetrical.
    expected1 = (
        'sys_platform == "darwin" and implementation_name == "cpython" or'
        ' python_version >= "3.6" or os_name == "Windows"'
    )
    assert str(m1.union(m2)) == expected1

    expected2 = (
        'python_version >= "3.6" or os_name == "Windows" or'
        ' sys_platform == "darwin" and implementation_name == "cpython"'
    )
    assert str(m2.union(m1)) == expected2


def test_multi_marker_union_with_multi_union_is_single_marker() -> None:
    m = parse_marker('sys_platform == "darwin" and python_version == "3"')
    m2 = parse_marker(
        'sys_platform == "darwin" and python_version < "3" or sys_platform == "darwin"'
        ' and python_version > "3"'
    )
    assert str(m.union(m2)) == 'sys_platform == "darwin"'
    assert str(m2.union(m)) == 'sys_platform == "darwin"'


def test_multi_marker_union_with_union_multi_is_single_marker() -> None:
    m = parse_marker('sys_platform == "darwin" and python_version == "3"')
    m2 = parse_marker(
        'sys_platform == "darwin" and (python_version < "3" or python_version > "3")'
    )
    assert str(m.union(m2)) == 'sys_platform == "darwin"'
    assert str(m2.union(m)) == 'sys_platform == "darwin"'


def test_marker_union() -> None:
    m = parse_marker('sys_platform == "darwin" or implementation_name == "cpython"')

    assert isinstance(m, MarkerUnion)
    assert m.markers == (
        parse_marker('sys_platform == "darwin"'),
        parse_marker('implementation_name == "cpython"'),
    )


def test_marker_union_deduplicate() -> None:
    m = parse_marker(
        'sys_platform == "darwin" or implementation_name == "cpython" or sys_platform'
        ' == "darwin"'
    )

    assert str(m) == 'sys_platform == "darwin" or implementation_name == "cpython"'


def test_marker_union_intersect_single_marker() -> None:
    m = parse_marker('sys_platform == "darwin" or python_version < "3.4"')

    intersection = m.intersect(parse_marker('implementation_name == "cpython"'))
    assert (
        str(intersection) == '(sys_platform == "darwin" or python_version < "3.4")'
        ' and implementation_name == "cpython"'
    )


def test_marker_union_intersect_single_with_overlapping_constraints() -> None:
    m = parse_marker('sys_platform == "darwin" or python_version < "3.4"')

    intersection = m.intersect(parse_marker('python_version <= "3.6"'))
    assert (
        str(intersection)
        == 'sys_platform == "darwin" and python_version <= "3.6" or python_version <'
        ' "3.4"'
    )

    m = parse_marker('sys_platform == "darwin" or python_version < "3.4"')
    intersection = m.intersect(parse_marker('sys_platform == "darwin"'))
    assert str(intersection) == 'sys_platform == "darwin"'


def test_marker_union_intersect_marker_union() -> None:
    m = parse_marker('sys_platform == "darwin" or python_version < "3.4"')

    intersection = m.intersect(
        parse_marker('implementation_name == "cpython" or os_name == "Windows"')
    )
    assert (
        str(intersection) == '(sys_platform == "darwin" or python_version < "3.4") and '
        '(implementation_name == "cpython" or os_name == "Windows")'
    )


def test_marker_union_intersect_marker_union_drops_unnecessary_markers() -> None:
    m = parse_marker(
        'python_version >= "2.7" and python_version < "2.8" '
        'or python_version >= "3.4" and python_version < "4.0"'
    )
    m2 = parse_marker(
        'python_version >= "2.7" and python_version < "2.8" '
        'or python_version >= "3.4" and python_version < "4.0"'
    )

    intersection = m.intersect(m2)
    expected = (
        'python_version == "2.7" or python_version >= "3.4" and python_version < "4.0"'
    )
    assert str(intersection) == expected


def test_marker_union_intersect_multi_marker() -> None:
    m1 = parse_marker('sys_platform == "darwin" or python_version < "3.4"')
    m2 = parse_marker('implementation_name == "cpython" and os_name == "Windows"')

    # Intersection isn't _quite_ symmetrical.
    expected1 = (
        '(sys_platform == "darwin" or python_version < "3.4")'
        ' and implementation_name == "cpython" and os_name == "Windows"'
    )

    intersection = m1.intersect(m2)
    assert str(intersection) == expected1

    expected2 = (
        'implementation_name == "cpython" and os_name == "Windows"'
        ' and (sys_platform == "darwin" or python_version < "3.4")'
    )

    intersection = m2.intersect(m1)
    assert str(intersection) == expected2


def test_marker_union_union_with_union() -> None:
    m = parse_marker('sys_platform == "darwin" or python_version < "3.4"')

    union = m.union(
        parse_marker('implementation_name == "cpython" or os_name == "Windows"')
    )
    assert (
        str(union) == 'sys_platform == "darwin" or python_version < "3.4" '
        'or implementation_name == "cpython" or os_name == "Windows"'
    )


def test_marker_union_union_duplicates() -> None:
    m = parse_marker('sys_platform == "darwin" or python_version < "3.4"')

    union = m.union(parse_marker('sys_platform == "darwin" or os_name == "Windows"'))
    assert (
        str(union)
        == 'sys_platform == "darwin" or python_version < "3.4" or os_name == "Windows"'
    )

    m = parse_marker('sys_platform == "darwin" or python_version < "3.4"')

    union = m.union(
        parse_marker(
            'sys_platform == "darwin" or os_name == "Windows" or python_version <='
            ' "3.6"'
        )
    )
    assert (
        str(union)
        == 'sys_platform == "darwin" or python_version <= "3.6" or os_name == "Windows"'
    )


def test_marker_union_all_any() -> None:
    union = MarkerUnion.of(parse_marker(""), parse_marker(""))

    assert union.is_any()


def test_marker_union_not_all_any() -> None:
    union = MarkerUnion.of(parse_marker(""), parse_marker(""), parse_marker(EMPTY))

    assert union.is_any()


def test_marker_union_all_empty() -> None:
    union = MarkerUnion.of(parse_marker(EMPTY), parse_marker(EMPTY))

    assert union.is_empty()


def test_marker_union_not_all_empty() -> None:
    union = MarkerUnion.of(parse_marker(EMPTY), parse_marker(EMPTY), parse_marker(""))

    assert not union.is_empty()


def test_intersect_compacts_constraints() -> None:
    m = parse_marker('python_version < "4.0"')

    intersection = m.intersect(parse_marker('python_version < "5.0"'))
    assert str(intersection) == 'python_version < "4.0"'


def test_multi_marker_removes_duplicates() -> None:
    m = parse_marker('sys_platform == "win32" and sys_platform == "win32"')

    assert str(m) == 'sys_platform == "win32"'

    m = parse_marker(
        'sys_platform == "darwin" and implementation_name == "cpython" '
        'and sys_platform == "darwin" and implementation_name == "cpython"'
    )

    assert str(m) == 'sys_platform == "darwin" and implementation_name == "cpython"'


@pytest.mark.parametrize(
    ("marker_string", "environment", "expected"),
    [
        (f"os_name == '{os.name}'", None, True),
        ("os_name == 'foo'", {"os_name": "foo"}, True),
        ("os_name == 'foo'", {"os_name": "bar"}, False),
        ("'2.7' in python_version", {"python_version": "2.7.5"}, True),
        ("'2.7' not in python_version", {"python_version": "2.7"}, False),
        (
            "os_name == 'foo' and python_version ~= '2.7.0'",
            {"os_name": "foo", "python_version": "2.7.6"},
            True,
        ),
        (
            "python_version ~= '2.7.0' and (os_name == 'foo' or os_name == 'bar')",
            {"os_name": "foo", "python_version": "2.7.4"},
            True,
        ),
        (
            "python_version ~= '2.7.0' and (os_name == 'foo' or os_name == 'bar')",
            {"os_name": "bar", "python_version": "2.7.4"},
            True,
        ),
        (
            "python_version ~= '2.7.0' and (os_name == 'foo' or os_name == 'bar')",
            {"os_name": "other", "python_version": "2.7.4"},
            False,
        ),
        (f"os.name == '{os.name}'", None, True),
        ("sys.platform == 'win32'", {"sys_platform": "linux2"}, False),
        ("platform.version in 'Ubuntu'", {"platform_version": "#39"}, False),
        ("platform.machine=='x86_64'", {"platform_machine": "x86_64"}, True),
        (
            "platform.python_implementation=='Jython'",
            {"platform_python_implementation": "CPython"},
            False,
        ),
        (
            "python_version == '2.5' and platform.python_implementation!= 'Jython'",
            {"python_version": "2.7"},
            False,
        ),
        (
            (
                "platform_machine in 'x86_64 X86_64 aarch64 AARCH64 ppc64le PPC64LE"
                " amd64 AMD64 win32 WIN32'"
            ),
            {"platform_machine": "foo"},
            False,
        ),
        (
            (
                "platform_machine in 'x86_64 X86_64 aarch64 AARCH64 ppc64le PPC64LE"
                " amd64 AMD64 win32 WIN32'"
            ),
            {"platform_machine": "x86_64"},
            True,
        ),
        (
            (
                "platform_machine not in 'x86_64 X86_64 aarch64 AARCH64 ppc64le PPC64LE"
                " amd64 AMD64 win32 WIN32'"
            ),
            {"platform_machine": "foo"},
            True,
        ),
        (
            (
                "platform_machine not in 'x86_64 X86_64 aarch64 AARCH64 ppc64le PPC64LE"
                " amd64 AMD64 win32 WIN32'"
            ),
            {"platform_machine": "x86_64"},
            False,
        ),
        ('"tegra" in platform_release', {"platform_release": "5.10.120-tegra"}, True),
        ('"tegra" in platform_release', {"platform_release": "5.10.120"}, False),
        (
            '"tegra" not in platform_release',
            {"platform_release": "5.10.120-tegra"},
            False,
        ),
        ('"tegra" not in platform_release', {"platform_release": "5.10.120"}, True),
        (
            "platform_machine == 'aarch64' and 'tegra' in platform_release",
            {"platform_release": "5.10.120-tegra", "platform_machine": "aarch64"},
            True,
        ),
        (
            "platform_release != '4.9.253-tegra'",
            {"platform_release": "4.9.254-tegra"},
            True,
        ),
        (
            "platform_release != '4.9.253-tegra'",
            {"platform_release": "4.9.253"},
            True,
        ),
        (
            "platform_release >= '6.6.0+rpt-rpi-v8'",
            {"platform_release": "6.6.20+rpt-rpi-v8"},
            True,
        ),
        (
            "platform_release < '5.10.123-tegra' and platform_release >= '4.9.254-tegra'",
            {"platform_release": "4.9.254-tegra"},
            True,
        ),
        # extras
        # single extra
        ("extra == 'security'", {"extra": "quux"}, False),
        ("extra == 'security'", {"extra": "security"}, True),
        ("extra != 'security'", {"extra": "quux"}, True),
        ("extra != 'security'", {"extra": "security"}, False),
        # normalization
        ("extra == 'Security.1'", {"extra": "security-1"}, True),
        # extra unknown
        ("extra == 'a'", {}, True),
        ("extra != 'a'", {}, True),
        ("extra == 'a' and extra == 'b'", {}, True),
        # extra explicitly not set
        ("extra == 'a'", {"extra": ()}, False),
        ("extra != 'b'", {"extra": ()}, True),
        ("extra == 'a' and extra == 'b'", {"extra": ()}, False),
        ("extra == 'a' or extra == 'b'", {"extra": ()}, False),
        ("extra != 'a' and extra != 'b'", {"extra": ()}, True),
        ("extra != 'a' or extra != 'b'", {"extra": ()}, True),
        ("extra != 'a' and extra == 'b'", {"extra": ()}, False),
        ("extra != 'a' or extra == 'b'", {"extra": ()}, True),
        # multiple extras
        ("extra == 'a'", {"extra": ("a", "b")}, True),
        ("extra == 'a'", {"extra": ("b", "c")}, False),
        ("extra != 'a'", {"extra": ("a", "b")}, False),
        ("extra != 'a'", {"extra": ("b", "c")}, True),
        ("extra == 'a' and extra == 'b'", {"extra": ("a", "b", "c")}, True),
        ("extra == 'a' and extra == 'b'", {"extra": ("a", "c")}, False),
        ("extra == 'a' or extra == 'b'", {"extra": ("a", "c")}, True),
        ("extra == 'a' or extra == 'b'", {"extra": ("b", "c")}, True),
        ("extra == 'a' or extra == 'b'", {"extra": ("c", "d")}, False),
        ("extra != 'a' and extra != 'b'", {"extra": ("a", "c")}, False),
        ("extra != 'a' and extra != 'b'", {"extra": ("b", "c")}, False),
        ("extra != 'a' and extra != 'b'", {"extra": ("c", "d")}, True),
        ("extra != 'a' or extra != 'b'", {"extra": ("a", "b", "c")}, False),
        ("extra != 'a' or extra != 'b'", {"extra": ("a", "c")}, True),
        ("extra != 'a' or extra != 'b'", {"extra": ("b", "c")}, True),
        ("extra != 'a' and extra == 'b'", {"extra": ("a", "b")}, False),
        ("extra != 'a' and extra == 'b'", {"extra": ("b", "c")}, True),
        ("extra != 'a' and extra == 'b'", {"extra": ("c", "d")}, False),
        ("extra != 'a' or extra == 'b'", {"extra": ("a", "b")}, True),
        ("extra != 'a' or extra == 'b'", {"extra": ("c", "d")}, True),
        ("extra != 'a' or extra == 'b'", {"extra": ("a", "c")}, False),
    ],
)
def test_validate(
    marker_string: str, environment: dict[str, str] | None, expected: bool
) -> None:
    m = parse_marker(marker_string)

    assert m.validate(environment) is expected


@pytest.mark.parametrize(
    "marker, env",
    [
        (
            'platform_release >= "9.0" and platform_release < "11.0"',
            {"platform_release": "10.0"},
        )
    ],
)
def test_parse_version_like_markers(marker: str, env: dict[str, str]) -> None:
    m = parse_marker(marker)

    assert m.validate(env)


@pytest.mark.parametrize(
    "marker, expected",
    [
        ('python_version >= "3.6"', 'python_version >= "3.6"'),
        ('python_version >= "3.6" and extra == "foo"', 'python_version >= "3.6"'),
        (
            'python_version >= "3.6" and (extra == "foo" or extra == "bar")',
            'python_version >= "3.6"',
        ),
        (
            (
                'python_version >= "3.6" and (extra == "foo" or extra == "bar") or'
                ' implementation_name == "pypy"'
            ),
            'python_version >= "3.6" or implementation_name == "pypy"',
        ),
        (
            (
                'python_version >= "3.6" and extra == "foo" or implementation_name =='
                ' "pypy" and extra == "bar"'
            ),
            'python_version >= "3.6" or implementation_name == "pypy"',
        ),
        (
            (
                'python_version >= "3.6" or extra == "foo" and implementation_name =='
                ' "pypy" or extra == "bar"'
            ),
            'python_version >= "3.6" or implementation_name == "pypy"',
        ),
        ('extra == "foo"', ""),
        ('extra == "foo" or extra == "bar"', ""),
    ],
)
def test_without_extras(marker: str, expected: str) -> None:
    m = parse_marker(marker)

    assert str(m.without_extras()) == expected


@pytest.mark.parametrize(
    "marker, excluded, expected",
    [
        ('python_version >= "3.6"', "implementation_name", 'python_version >= "3.6"'),
        ('python_version >= "3.6"', "python_version", "*"),
        ('python_version >= "3.6" and python_version < "3.11"', "python_version", "*"),
        (
            'python_version >= "3.6" and extra == "foo"',
            "extra",
            'python_version >= "3.6"',
        ),
        (
            'python_version >= "3.6" and (extra == "foo" or extra == "bar")',
            "python_version",
            'extra == "foo" or extra == "bar"',
        ),
        (
            (
                'python_version >= "3.6" and (extra == "foo" or extra == "bar") or'
                ' implementation_name == "pypy"'
            ),
            "python_version",
            'extra == "foo" or extra == "bar" or implementation_name == "pypy"',
        ),
        (
            (
                'python_version >= "3.6" and extra == "foo" or implementation_name =='
                ' "pypy" and extra == "bar"'
            ),
            "implementation_name",
            'python_version >= "3.6" and extra == "foo" or extra == "bar"',
        ),
        (
            (
                'python_version >= "3.6" or extra == "foo" and implementation_name =='
                ' "pypy" or extra == "bar"'
            ),
            "implementation_name",
            'python_version >= "3.6" or extra == "foo" or extra == "bar"',
        ),
        (
            'extra == "foo" and python_version >= "3.6" or python_version >= "3.6"',
            "extra",
            'python_version >= "3.6"',
        ),
        (
            (
                'python_version >= "2.7" and (python_version < "2.8"'
                ' or python_version >= "3.7") and python_version < "3.8"'
                ' and extra == "foo"'
            ),
            "extra",
            ('python_version == "2.7" or python_version == "3.7"'),
        ),
    ],
)
def test_exclude(marker: str, excluded: str, expected: str) -> None:
    m = parse_marker(marker)

    if expected == "*":
        assert m.exclude(excluded).is_any()
    else:
        assert str(m.exclude(excluded)) == expected


@pytest.mark.parametrize(
    "marker, only, expected",
    [
        ('python_version >= "3.6"', ["python_version"], 'python_version >= "3.6"'),
        ('python_version >= "3.6"', ["sys_platform"], ""),
        (
            'python_version >= "3.6" and extra == "foo"',
            ["python_version"],
            'python_version >= "3.6"',
        ),
        ('python_version >= "3.6" and extra == "foo"', ["sys_platform"], ""),
        ('python_version >= "3.6" or extra == "foo"', ["sys_platform"], ""),
        ('python_version >= "3.6" or extra == "foo"', ["python_version"], ""),
        (
            'python_version >= "3.6" and (extra == "foo" or extra == "bar")',
            ["extra"],
            'extra == "foo" or extra == "bar"',
        ),
        (
            (
                'python_version >= "3.6" and (extra == "foo" or extra == "bar") or'
                ' implementation_name == "pypy"'
            ),
            ["implementation_name"],
            "",
        ),
        (
            (
                'python_version >= "3.6" and (extra == "foo" or extra == "bar") or'
                ' implementation_name == "pypy"'
            ),
            ["implementation_name", "extra"],
            'extra == "foo" or extra == "bar" or implementation_name == "pypy"',
        ),
        (
            (
                'python_version >= "3.6" and (extra == "foo" or extra == "bar") or'
                ' implementation_name == "pypy"'
            ),
            ["implementation_name", "python_version"],
            'python_version >= "3.6" or implementation_name == "pypy"',
        ),
        (
            (
                'python_version >= "3.6" and extra == "foo" or implementation_name =='
                ' "pypy" and extra == "bar"'
            ),
            ["implementation_name", "extra"],
            'extra == "foo" or implementation_name == "pypy" and extra == "bar"',
        ),
    ],
)
def test_only(marker: str, only: list[str], expected: str) -> None:
    m = parse_marker(marker)

    assert str(m.only(*only)) == expected


@pytest.mark.parametrize(
    ("marker", "constraint", "expected"),
    [
        ("", "~3.8", ""),
        ("", "~3.8", ""),
        ('sys_platform == "linux"', "~3.8", 'sys_platform == "linux"'),
        ('python_version >= "3.8"', "~3.8", ""),
        ('python_version == "3.8"', ">=3.8.7,<3.9.0", ""),
        ('python_version == "3.8" or python_version >= "3.9"', ">=3.8.0,<4.0.0", ""),
        ('python_version == "3.8" or python_version >= "3.9"', ">=3.8.7,<4.0.0", ""),
        ('python_version > "3.7"', "~3.8", ""),
        ('python_version > "3.8"', "~3.8", ""),
        ('python_version >= "3.8"', "~3.8", ""),
        ('python_version >= "3.9"', "~3.8", ""),
        ('python_full_version >= "3.8.0"', "~3.8", ""),
        ('python_full_version >= "3.8.1"', "~3.8", 'python_full_version >= "3.8.1"'),
        ('python_full_version < "3.8.0"', "~3.8", ""),
        ('python_version >= "3.8" and python_version < "3.9"', "~3.8", ""),
        ('python_version >= "3.7" and python_version < "4.0"', "~3.8", ""),
        (
            'python_full_version >= "3.8.1" and python_version < "3.9"',
            "~3.8",
            'python_full_version >= "3.8.1"',
        ),
        (
            'python_version >= "3.8" and python_full_version < "3.8.2"',
            "~3.8",
            'python_full_version < "3.8.2"',
        ),
        (
            'python_version >= "3.8" and sys_platform == "linux" and python_version < "3.9"',
            "~3.8",
            'sys_platform == "linux"',
        ),
        ('python_version < "3.8" or python_version >= "3.9"', "~3.9", ""),
        (
            'python_version < "3.8" or python_version >= "3.9"',
            ">=3.7",
            'python_version < "3.8" or python_version >= "3.9"',
        ),
        ('python_version < "3.8" or python_version >= "3.9"', "~3.7", ""),
        (
            'python_version < "3.8" or python_version >= "3.9"',
            "<=3.10",
            'python_version < "3.8" or python_version >= "3.9"',
        ),
        (
            (
                'python_version < "3.8"'
                ' or python_version >= "3.9" and sys_platform == "linux"'
            ),
            "~3.9",
            'sys_platform == "linux"',
        ),
        ('python_version < "3.8" or python_version >= "3.9"', "~3.7 || ~3.9", ""),
        (
            'python_version < "3.8" or python_version >= "3.9"',
            "~3.6 || ~3.8",
            'python_version < "3.8"',
        ),
        (
            (
                'python_version < "3.8" or sys_platform == "linux"'
                ' or python_version >= "3.9"'
            ),
            "~3.7 || ~3.9",
            'sys_platform == "linux"',
        ),
        (
            (
                'python_version < "3.8" or sys_platform == "linux"'
                ' or python_version >= "3.9" or sys_platform == "win32"'
            ),
            "~3.7 || ~3.9",
            'sys_platform == "linux" or sys_platform == "win32"',
        ),
        (
            'python_version == "3.8" or sys_platform == "linux" or python_version >= "3.9"',
            ">=3.8.0,<4.0.0",
            'sys_platform == "linux"',
        ),
        (
            'python_version == "3.8" or sys_platform == "linux" or python_version >= "3.9"',
            ">=3.8.7,<4.0.0",
            'sys_platform == "linux"',
        ),
    ],
)
def test_reduce_by_python_constraint(
    marker: str, constraint: str, expected: str
) -> None:
    m = parse_marker(marker)
    c = parse_version_constraint(constraint)

    assert str(m.reduce_by_python_constraint(c)) == expected


def test_union_of_a_single_marker_is_the_single_marker() -> None:
    union = MarkerUnion.of(SingleMarker("python_version", ">= 2.7"))

    assert SingleMarker("python_version", ">= 2.7") == union


def test_union_of_multi_with_a_containing_single() -> None:
    single = parse_marker('python_version >= "2.7"')
    multi = parse_marker('python_version >= "2.7" and extra == "foo"')
    union = multi.union(single)

    assert union == single


@pytest.mark.parametrize(
    "marker, inverse",
    [
        ('implementation_name == "pypy"', 'implementation_name != "pypy"'),
        ('implementation_name === "pypy"', 'implementation_name != "pypy"'),
        ('implementation_name != "pypy"', 'implementation_name == "pypy"'),
        ('python_version in "2.7, 3.0, 3.1"', 'python_version not in "2.7, 3.0, 3.1"'),
        ('python_version not in "2.7, 3.0, 3.1"', 'python_version in "2.7, 3.0, 3.1"'),
        ('python_version < "3.6"', 'python_version >= "3.6"'),
        ('python_version >= "3.6"', 'python_version < "3.6"'),
        ('python_version <= "3.6"', 'python_version > "3.6"'),
        ('python_version > "3.6"', 'python_version <= "3.6"'),
        (
            'python_version > "3.6" or implementation_name == "pypy"',
            'python_version <= "3.6" and implementation_name != "pypy"',
        ),
        (
            'python_version <= "3.6" and implementation_name != "pypy"',
            'python_version > "3.6" or implementation_name == "pypy"',
        ),
        (
            'python_version ~= "3.6"',
            'python_version < "3.6" or python_version >= "4.0"',
        ),
        (
            'python_full_version ~= "3.6.3"',
            'python_full_version < "3.6.3" or python_full_version >= "3.7.0"',
        ),
        ('"tegra" in platform_release', '"tegra" not in platform_release'),
        ('"tegra" not in platform_release', '"tegra" in platform_release'),
    ],
)
def test_invert(marker: str, inverse: str) -> None:
    m = parse_marker(marker)

    assert parse_marker(inverse) == m.invert()


@pytest.mark.parametrize(
    "marker, expected",
    [
        (
            (
                'python_version >= "3.6" or python_version < "3.7" or python_version <'
                ' "3.6"'
            ),
            'python_version >= "3.6" or python_version < "3.7"',
        ),
    ],
)
def test_union_should_drop_markers_if_their_complement_is_present(
    marker: str, expected: str
) -> None:
    m = parse_marker(marker)

    assert parse_marker(expected) == m


@pytest.mark.parametrize(
    "scheme, marker, expected",
    [
        ("empty", EmptyMarker(), EmptyMarker()),
        ("any", AnyMarker(), AnyMarker()),
        (
            "A_",
            SingleMarker("python_version", ">=3.7"),
            SingleMarker("python_version", ">=3.7"),
        ),
        (
            "AB_",
            MultiMarker(
                SingleMarker("python_version", ">=3.7"),
                SingleMarker("python_version", "<3.9"),
            ),
            MultiMarker(
                SingleMarker("python_version", ">=3.7"),
                SingleMarker("python_version", "<3.9"),
            ),
        ),
        (
            "A+B_",
            MarkerUnion(
                SingleMarker("python_version", "<3.7"),
                SingleMarker("python_version", ">=3.9"),
            ),
            MarkerUnion(
                SingleMarker("python_version", "<3.7"),
                SingleMarker("python_version", ">=3.9"),
            ),
        ),
        (
            "(A+B)(C+D)_",
            MultiMarker(
                MarkerUnion(
                    SingleMarker("python_version", ">=3.7"),
                    SingleMarker("sys_platform", "win32"),
                ),
                MarkerUnion(
                    SingleMarker("python_version", "<3.9"),
                    SingleMarker("sys_platform", "linux"),
                ),
            ),
            MultiMarker(
                MarkerUnion(
                    SingleMarker("python_version", ">=3.7"),
                    SingleMarker("sys_platform", "win32"),
                ),
                MarkerUnion(
                    SingleMarker("python_version", "<3.9"),
                    SingleMarker("sys_platform", "linux"),
                ),
            ),
        ),
        (
            "AB+AC_A(B+C)",
            MarkerUnion(
                MultiMarker(
                    SingleMarker("python_version", ">=3.7"),
                    SingleMarker("python_version", "<3.9"),
                ),
                MultiMarker(
                    SingleMarker("python_version", ">=3.7"),
                    SingleMarker("sys_platform", "linux"),
                ),
            ),
            MultiMarker(
                SingleMarker("python_version", ">=3.7"),
                MarkerUnion(
                    SingleMarker("python_version", "<3.9"),
                    SingleMarker("sys_platform", "linux"),
                ),
            ),
        ),
        (
            "A+BC_(A+B)(A+C)",
            MarkerUnion(
                SingleMarker("python_version", "<3.7"),
                MultiMarker(
                    SingleMarker("python_version", ">=3.9"),
                    SingleMarker("sys_platform", "linux"),
                ),
            ),
            MultiMarker(
                MarkerUnion(
                    SingleMarker("python_version", "<3.7"),
                    SingleMarker("python_version", ">=3.9"),
                ),
                MarkerUnion(
                    SingleMarker("python_version", "<3.7"),
                    SingleMarker("sys_platform", "linux"),
                ),
            ),
        ),
        (
            "(A+B(C+D))(E+F)_(A+B)(A+C+D)(E+F)",
            MultiMarker(
                MarkerUnion(
                    SingleMarker("python_version", ">=3.9"),
                    MultiMarker(
                        SingleMarker("implementation_name", "cpython"),
                        MarkerUnion(
                            SingleMarker("python_version", "<3.7"),
                            SingleMarker("python_version", ">=3.8"),
                        ),
                    ),
                ),
                MarkerUnion(
                    SingleMarker("sys_platform", "win32"),
                    SingleMarker("sys_platform", "linux"),
                ),
            ),
            MultiMarker(
                MarkerUnion(
                    SingleMarker("python_version", ">=3.9"),
                    SingleMarker("implementation_name", "cpython"),
                ),
                MarkerUnion(
                    SingleMarker("python_version", ">=3.8"),
                    SingleMarker("python_version", "<3.7"),
                ),
                AtomicMarkerUnion(
                    "sys_platform",
                    UnionConstraint(
                        parse_generic_constraint("win32"),
                        parse_generic_constraint("linux"),
                    ),
                ),
            ),
        ),
        (
            "A(B+C)+(D+E)(F+G)_(A+D+E)(B+C+D+E)(A+F+G)(B+C+F+G)",
            MarkerUnion(
                MultiMarker(
                    SingleMarker("sys_platform", "!=win32"),
                    MarkerUnion(
                        SingleMarker("python_version", "<3.7"),
                        SingleMarker("python_version", ">=3.9"),
                    ),
                ),
                MultiMarker(
                    MarkerUnion(
                        SingleMarker("python_version", "<3.8"),
                        SingleMarker("python_version", ">=3.9"),
                    ),
                    MarkerUnion(
                        SingleMarker("sys_platform", "!=linux"),
                        SingleMarker("python_version", ">=3.9"),
                    ),
                ),
            ),
            MultiMarker(
                MarkerUnion(
                    SingleMarker("python_version", "<3.8"),
                    SingleMarker("python_version", ">=3.9"),
                ),
                MarkerUnion(
                    SingleMarker("python_version", "<3.7"),
                    SingleMarker("python_version", ">=3.9"),
                    SingleMarker("sys_platform", "!=linux"),
                ),
            ),
        ),
    ],
)
def test_cnf(scheme: str, marker: BaseMarker, expected: BaseMarker) -> None:
    assert cnf(marker) == expected


@pytest.mark.parametrize(
    "scheme, marker, expected",
    [
        ("empty", EmptyMarker(), EmptyMarker()),
        ("any", AnyMarker(), AnyMarker()),
        (
            "A_",
            SingleMarker("python_version", ">=3.7"),
            SingleMarker("python_version", ">=3.7"),
        ),
        (
            "AB_",
            MultiMarker(
                SingleMarker("python_version", ">=3.7"),
                SingleMarker("python_version", "<3.9"),
            ),
            MultiMarker(
                SingleMarker("python_version", ">=3.7"),
                SingleMarker("python_version", "<3.9"),
            ),
        ),
        (
            "A+B_",
            MarkerUnion(
                SingleMarker("python_version", "<3.7"),
                SingleMarker("python_version", ">=3.9"),
            ),
            MarkerUnion(
                SingleMarker("python_version", "<3.7"),
                SingleMarker("python_version", ">=3.9"),
            ),
        ),
        (
            "AB+AC_",
            MarkerUnion(
                MultiMarker(
                    SingleMarker("python_version", ">=3.7"),
                    SingleMarker("python_version", "<3.9"),
                ),
                MultiMarker(
                    SingleMarker("python_version", ">=3.7"),
                    SingleMarker("sys_platform", "linux"),
                ),
            ),
            MarkerUnion(
                MultiMarker(
                    SingleMarker("python_version", ">=3.7"),
                    SingleMarker("python_version", "<3.9"),
                ),
                MultiMarker(
                    SingleMarker("python_version", ">=3.7"),
                    SingleMarker("sys_platform", "linux"),
                ),
            ),
        ),
        (
            "A(B+C)_AB+AC",
            MultiMarker(
                SingleMarker("python_version", ">=3.7"),
                MarkerUnion(
                    SingleMarker("python_version", "<3.9"),
                    SingleMarker("sys_platform", "linux"),
                ),
            ),
            MarkerUnion(
                MultiMarker(
                    SingleMarker("python_version", ">=3.7"),
                    SingleMarker("python_version", "<3.9"),
                ),
                MultiMarker(
                    SingleMarker("python_version", ">=3.7"),
                    SingleMarker("sys_platform", "linux"),
                ),
            ),
        ),
        (
            "(A+B)(C+D)_AC+AD+BC+BD",
            MultiMarker(
                MarkerUnion(
                    SingleMarker("python_version", ">=3.7"),
                    SingleMarker("sys_platform", "win32"),
                ),
                MarkerUnion(
                    SingleMarker("python_version", "<3.9"),
                    SingleMarker("sys_platform", "linux"),
                ),
            ),
            MarkerUnion(
                MultiMarker(
                    SingleMarker("python_version", ">=3.7"),
                    SingleMarker("python_version", "<3.9"),
                ),
                MultiMarker(
                    SingleMarker("python_version", ">=3.7"),
                    SingleMarker("sys_platform", "linux"),
                ),
                MultiMarker(
                    SingleMarker("sys_platform", "win32"),
                    SingleMarker("python_version", "<3.9"),
                ),
            ),
        ),
        (
            "A(B+C)+(D+E)(F+G)_AB+AC+DF+DG+EF+DG",
            MarkerUnion(
                MultiMarker(
                    SingleMarker("sys_platform", "win32"),
                    MarkerUnion(
                        SingleMarker("python_version", "<3.7"),
                        SingleMarker("python_version", ">=3.9"),
                    ),
                ),
                MultiMarker(
                    MarkerUnion(
                        SingleMarker("python_version", "<3.8"),
                        SingleMarker("python_version", ">=3.9"),
                    ),
                    MarkerUnion(
                        SingleMarker("sys_platform", "linux"),
                        SingleMarker("python_version", ">=3.9"),
                    ),
                ),
            ),
            MarkerUnion(
                MultiMarker(
                    SingleMarker("sys_platform", "win32"),
                    SingleMarker("python_version", "<3.7"),
                ),
                SingleMarker("python_version", ">=3.9"),
                MultiMarker(
                    SingleMarker("python_version", "<3.8"),
                    SingleMarker("sys_platform", "linux"),
                ),
            ),
        ),
        (
            "(A+B(C+D))(E+F)_AE+AF+BCE+BCF+BDE+BDF",
            MultiMarker(
                MarkerUnion(
                    SingleMarker("python_version", ">=3.9"),
                    MultiMarker(
                        SingleMarker("implementation_name", "cpython"),
                        MarkerUnion(
                            SingleMarker("python_version", "<3.7"),
                            SingleMarker("python_version", ">=3.8"),
                        ),
                    ),
                ),
                MarkerUnion(
                    SingleMarker("sys_platform", "win32"),
                    SingleMarker("sys_platform", "linux"),
                ),
            ),
            MarkerUnion(
                MultiMarker(
                    SingleMarker("python_version", ">=3.9"),
                    AtomicMarkerUnion(
                        "sys_platform",
                        UnionConstraint(
                            parse_generic_constraint("win32"),
                            parse_generic_constraint("linux"),
                        ),
                    ),
                ),
                MultiMarker(
                    SingleMarker("implementation_name", "cpython"),
                    SingleMarker("python_version", "<3.7"),
                    AtomicMarkerUnion(
                        "sys_platform",
                        UnionConstraint(
                            parse_generic_constraint("win32"),
                            parse_generic_constraint("linux"),
                        ),
                    ),
                ),
                MultiMarker(
                    SingleMarker("implementation_name", "cpython"),
                    SingleMarker("python_version", ">=3.8"),
                    AtomicMarkerUnion(
                        "sys_platform",
                        UnionConstraint(
                            parse_generic_constraint("win32"),
                            parse_generic_constraint("linux"),
                        ),
                    ),
                ),
            ),
        ),
    ],
)
def test_dnf(scheme: str, marker: BaseMarker, expected: BaseMarker) -> None:
    assert dnf(marker) == expected


def test_single_markers_are_found_in_complex_intersection() -> None:
    m1 = parse_marker('implementation_name != "pypy" and python_version <= "3.6"')
    m2 = parse_marker(
        'python_version >= "3.6" and python_version < "4.0" and implementation_name =='
        ' "cpython"'
    )
    intersection = m1.intersect(m2)
    assert (
        str(intersection)
        == 'implementation_name == "cpython" and python_version == "3.6"'
    )


@pytest.mark.parametrize(
    "marker1, marker2",
    [
        (
            (
                '(platform_system != "Windows" or platform_machine != "x86") and'
                ' python_version == "3.8"'
            ),
            'platform_system == "Windows" and platform_machine == "x86"',
        ),
        # Following example via
        # https://github.com/python-poetry/poetry-plugin-export/issues/163
        (
            (
                'python_version >= "3.8" and python_version < "3.11" and'
                ' (python_version > "3.9" or platform_system != "Windows" or'
                ' platform_machine != "x86") or python_version >= "3.11" and'
                ' python_version < "3.12"'
            ),
            (
                'python_version == "3.8" and platform_system == "Windows" and'
                ' platform_machine == "x86" or python_version == "3.9" and'
                ' platform_system == "Windows" and platform_machine == "x86"'
            ),
        ),
    ],
)
def test_empty_marker_is_found_in_complex_intersection(
    marker1: str, marker2: str
) -> None:
    m1 = parse_marker(marker1)
    m2 = parse_marker(marker2)
    assert m1.intersect(m2).is_empty()
    assert m2.intersect(m1).is_empty()


def test_empty_marker_is_found_in_complex_parse() -> None:
    marker = parse_marker(
        '(python_implementation != "pypy" or python_version != "3.6") and '
        '((python_implementation != "pypy" and python_version != "3.6") or'
        ' (python_implementation == "pypy" and python_version == "3.6")) and '
        '(python_implementation == "pypy" or python_version == "3.6")'
    )
    assert marker.is_empty()


def test_complex_union() -> None:
    """
    real world example on the way to get mutually exclusive markers
    for numpy(>=1.21.2) of https://pypi.org/project/opencv-python/4.6.0.66/
    """
    markers = [
        parse_marker(m)
        for m in [
            (
                'python_version < "3.7" and python_version >= "3.6"'
                ' and platform_system == "Darwin" and platform_machine == "arm64"'
            ),
            (
                'python_version >= "3.10" or python_version >= "3.9"'
                ' and platform_system == "Darwin" and platform_machine == "arm64"'
            ),
            (
                'python_version >= "3.8" and platform_system == "Darwin"'
                ' and platform_machine == "arm64" and python_version < "3.9"'
            ),
            (
                'python_version >= "3.7" and platform_system == "Darwin"'
                ' and platform_machine == "arm64" and python_version < "3.8"'
            ),
        ]
    ]
    assert (
        str(union(*markers))
        == 'python_version >= "3.6" and platform_system == "Darwin"'
        ' and platform_machine == "arm64" or python_version >= "3.10"'
    )


def test_complex_intersection() -> None:
    """
    inverse of real world example on the way to get mutually exclusive markers
    for numpy(>=1.21.2) of https://pypi.org/project/opencv-python/4.6.0.66/
    """
    markers = [
        parse_marker(m).invert()
        for m in [
            (
                'python_version < "3.7" and python_version >= "3.6"'
                ' and platform_system == "Darwin" and platform_machine == "arm64"'
            ),
            (
                'python_version >= "3.10" or python_version >= "3.9"'
                ' and platform_system == "Darwin" and platform_machine == "arm64"'
            ),
            (
                'python_version >= "3.8" and platform_system == "Darwin"'
                ' and platform_machine == "arm64" and python_version < "3.9"'
            ),
            (
                'python_version >= "3.7" and platform_system == "Darwin"'
                ' and platform_machine == "arm64" and python_version < "3.8"'
            ),
        ]
    ]
    assert (
        str(dnf(intersection(*markers).invert()))
        == 'python_version >= "3.6" and platform_system == "Darwin"'
        ' and platform_machine == "arm64" or python_version >= "3.10"'
    )


def test_union_avoids_combinatorial_explosion() -> None:
    """
    combinatorial explosion without AtomicMultiMarker and AtomicMarkerUnion
    based gevent constraint of sqlalchemy 2.0.7
    see https://github.com/python-poetry/poetry/issues/7689 for details
    """
    expected = (
        'python_full_version >= "3.11.0" and python_version < "4.0"'
        ' and (platform_machine == "aarch64" or platform_machine == "ppc64le"'
        ' or platform_machine == "x86_64" or platform_machine == "amd64"'
        ' or platform_machine == "AMD64" or platform_machine == "win32"'
        ' or platform_machine == "WIN32")'
    )
    m1 = parse_marker(expected)
    m2 = parse_marker(
        'python_full_version >= "3.11.0" and python_full_version < "4.0.0"'
        ' and (platform_machine == "aarch64" or platform_machine == "ppc64le"'
        ' or platform_machine == "x86_64" or platform_machine == "amd64"'
        ' or platform_machine == "AMD64" or platform_machine == "win32"'
        ' or platform_machine == "WIN32")'
    )
    assert str(m1.union(m2)) == expected
    assert str(m2.union(m1)) == expected


def test_intersection_avoids_combinatorial_explosion() -> None:
    """
    combinatorial explosion without AtomicMultiMarker and AtomicMarkerUnion
    based gevent constraint of sqlalchemy 2.0.7
    see https://github.com/python-poetry/poetry/issues/7689 for details
    """
    m1 = parse_marker(
        'python_full_version >= "3.11.0" and python_full_version < "4.0.0"'
    )
    m2 = parse_marker(
        'python_version >= "3" and (platform_machine == "aarch64" '
        'or platform_machine == "ppc64le" or platform_machine == "x86_64" '
        'or platform_machine == "amd64" or platform_machine == "AMD64" '
        'or platform_machine == "win32" or platform_machine == "WIN32")'
    )
    assert (
        str(m1.intersect(m2))
        == 'python_version >= "3.11" and python_full_version < "4.0.0"'
        ' and (platform_machine == "aarch64" or platform_machine == "ppc64le"'
        ' or platform_machine == "x86_64" or platform_machine == "amd64"'
        ' or platform_machine == "AMD64" or platform_machine == "win32"'
        ' or platform_machine == "WIN32")'
    )
    assert (
        str(m2.intersect(m1)) == 'python_version >= "3.11"'
        ' and (platform_machine == "aarch64" or platform_machine == "ppc64le"'
        ' or platform_machine == "x86_64" or platform_machine == "amd64"'
        ' or platform_machine == "AMD64" or platform_machine == "win32"'
        ' or platform_machine == "WIN32") and python_full_version < "4.0.0"'
    )


def test_intersection_no_endless_recursion() -> None:
    m1 = parse_marker(
        '(python_version < "3.9" or extra != "bigquery" and extra != "parquet"'
        ' and extra != "motherduck" and extra != "athena" and extra != "synapse"'
        ' and extra != "clickhouse" and extra != "dremio" and extra != "lancedb"'
        ' and extra != "deltalake" and extra != "pyiceberg"'
        ' and python_version < "3.13") and extra != "postgres" and extra != "redshift"'
        ' and extra != "postgis"'
    )
    m2 = parse_marker(
        'python_version > "3.12" and python_version < "3.13" or extra != "databricks"'
    )
    expected = (
        '(python_version < "3.9" or extra != "bigquery" and extra != "parquet"'
        ' and extra != "motherduck" and extra != "athena" and extra != "synapse"'
        ' and extra != "clickhouse" and extra != "dremio" and extra != "lancedb"'
        ' and extra != "deltalake" and extra != "pyiceberg")'
        ' and python_version < "3.13" and extra != "postgres" and extra != "redshift"'
        ' and extra != "postgis" and (python_version > "3.12"'
        ' and python_version < "3.13" or extra != "databricks")'
    )
    assert str(m1.intersect(m2)) == expected


@pytest.mark.parametrize(
    "python_version, python_full_version, "
    "expected_intersection_version, expected_union_version",
    [
        # python_version > 3.6 (equal to python_full_version >= 3.7.0)
        ('> "3.6"', '> "3.5.2"', '> "3.6"', '> "3.5.2"'),
        ('> "3.6"', '>= "3.5.2"', '> "3.6"', '>= "3.5.2"'),
        ('> "3.6"', '> "3.6.2"', '> "3.6"', '> "3.6.2"'),
        ('> "3.6"', '>= "3.6.2"', '> "3.6"', '>= "3.6.2"'),
        ('> "3.6"', '> "3.7.0"', '> "3.7.0"', '> "3.6"'),
        ('> "3.6"', '>= "3.7.0"', '> "3.6"', '> "3.6"'),
        ('> "3.6"', '> "3.7.1"', '> "3.7.1"', '> "3.6"'),
        ('> "3.6"', '>= "3.7.1"', '>= "3.7.1"', '> "3.6"'),
        ('> "3.6"', '== "3.6.2"', EMPTY, None),
        ('> "3.6"', '== "3.7.0"', '== "3.7.0"', '> "3.6"'),
        ('> "3.6"', '== "3.7.1"', '== "3.7.1"', '> "3.6"'),
        ('> "3.6"', '!= "3.6.2"', '> "3.6"', '!= "3.6.2"'),
        ('> "3.6"', '!= "3.7.0"', '> "3.7.0"', ""),
        ('> "3.6"', '!= "3.7.1"', None, ""),
        ('> "3.6"', '< "3.7.0"', EMPTY, ""),
        ('> "3.6"', '<= "3.7.0"', '== "3.7.0"', ""),
        ('> "3.6"', '< "3.7.1"', None, ""),
        ('> "3.6"', '<= "3.7.1"', None, ""),
        # python_version >= 3.6 (equal to python_full_version >= 3.6.0)
        ('>= "3.6"', '> "3.5.2"', '>= "3.6"', '> "3.5.2"'),
        ('>= "3.6"', '>= "3.5.2"', '>= "3.6"', '>= "3.5.2"'),
        ('>= "3.6"', '> "3.6.0"', '> "3.6.0"', '>= "3.6"'),
        ('>= "3.6"', '>= "3.6.0"', '>= "3.6"', '>= "3.6"'),
        ('>= "3.6"', '> "3.6.1"', '> "3.6.1"', '>= "3.6"'),
        ('>= "3.6"', '>= "3.6.1"', '>= "3.6.1"', '>= "3.6"'),
        ('>= "3.6"', '== "3.5.2"', EMPTY, None),
        ('>= "3.6"', '== "3.6.0"', '== "3.6.0"', '>= "3.6"'),
        ('>= "3.6"', '!= "3.5.2"', '>= "3.6"', '!= "3.5.2"'),
        ('>= "3.6"', '!= "3.6.0"', '> "3.6.0"', ""),
        ('>= "3.6"', '!= "3.6.1"', None, ""),
        ('>= "3.6"', '!= "3.7.1"', None, ""),
        ('>= "3.6"', '< "3.6.0"', EMPTY, ""),
        ('>= "3.6"', '<= "3.6.0"', '== "3.6.0"', ""),
        ('>= "3.6"', '< "3.6.1"', None, ""),  # '== "3.6.0"'
        ('>= "3.6"', '<= "3.6.1"', None, ""),
        # python_version < 3.6 (equal to python_full_version < 3.6.0)
        ('< "3.6"', '< "3.5.2"', '< "3.5.2"', '< "3.6"'),
        ('< "3.6"', '<= "3.5.2"', '<= "3.5.2"', '< "3.6"'),
        ('< "3.6"', '< "3.6.0"', '< "3.6"', '< "3.6"'),
        ('< "3.6"', '<= "3.6.0"', '< "3.6"', '<= "3.6.0"'),
        ('< "3.6"', '< "3.6.1"', '< "3.6"', '< "3.6.1"'),
        ('< "3.6"', '<= "3.6.1"', '< "3.6"', '<= "3.6.1"'),
        ('< "3.6"', '== "3.5.2"', '== "3.5.2"', '< "3.6"'),
        ('< "3.6"', '== "3.6.0"', EMPTY, '<= "3.6.0"'),
        ('< "3.6"', '!= "3.5.2"', None, ""),
        ('< "3.6"', '!= "3.6.0"', '< "3.6"', '!= "3.6.0"'),
        ('< "3.6"', '> "3.6.0"', EMPTY, '!= "3.6.0"'),
        ('< "3.6"', '>= "3.6.0"', EMPTY, ""),
        ('< "3.6"', '> "3.5.2"', None, ""),
        ('< "3.6"', '>= "3.5.2"', None, ""),
        # python_version <= 3.6 (equal to python_full_version < 3.7.0)
        ('<= "3.6"', '< "3.6.1"', '< "3.6.1"', '<= "3.6"'),
        ('<= "3.6"', '<= "3.6.1"', '<= "3.6.1"', '<= "3.6"'),
        ('<= "3.6"', '< "3.7.0"', '<= "3.6"', '<= "3.6"'),
        ('<= "3.6"', '<= "3.7.0"', '<= "3.6"', '<= "3.7.0"'),
        ('<= "3.6"', '== "3.6.1"', '== "3.6.1"', '<= "3.6"'),
        ('<= "3.6"', '== "3.7.0"', EMPTY, '<= "3.7.0"'),
        ('<= "3.6"', '!= "3.6.1"', None, ""),
        ('<= "3.6"', '!= "3.7.0"', '<= "3.6"', '!= "3.7.0"'),
        ('<= "3.6"', '> "3.7.0"', EMPTY, '!= "3.7.0"'),
        ('<= "3.6"', '>= "3.7.0"', EMPTY, ""),
        ('<= "3.6"', '> "3.6.2"', None, ""),
        ('<= "3.6"', '>= "3.6.2"', None, ""),
        # python_version == 3.6  # noqa: ERA001
        # (equal to python_full_version >= 3.6.0 and python_full_version < 3.7.0)
        ('== "3.6"', '< "3.5.2"', EMPTY, None),
        ('== "3.6"', '<= "3.5.2"', EMPTY, None),
        ('== "3.6"', '> "3.5.2"', '== "3.6"', '> "3.5.2"'),
        ('== "3.6"', '>= "3.5.2"', '== "3.6"', '>= "3.5.2"'),
        ('== "3.6"', '!= "3.5.2"', '== "3.6"', '!= "3.5.2"'),
        ('== "3.6"', '< "3.6.0"', EMPTY, '< "3.7"'),
        ('== "3.6"', '<= "3.6.0"', '== "3.6.0"', '< "3.7"'),
        ('== "3.6"', '> "3.6.0"', None, '>= "3.6"'),
        ('== "3.6"', '>= "3.6.0"', '== "3.6"', '>= "3.6"'),
        ('== "3.6"', '!= "3.6.0"', None, ""),
        ('== "3.6"', '< "3.6.1"', None, '< "3.7"'),
        ('== "3.6"', '<= "3.6.1"', None, '< "3.7"'),
        ('== "3.6"', '> "3.6.1"', None, '>= "3.6"'),
        ('== "3.6"', '>= "3.6.1"', None, '>= "3.6"'),
        ('== "3.6"', '!= "3.6.1"', None, ""),
        ('== "3.6"', '< "3.7.0"', '== "3.6"', '< "3.7"'),
        ('== "3.6"', '<= "3.7.0"', '== "3.6"', '<= "3.7.0"'),
        ('== "3.6"', '> "3.7.0"', EMPTY, None),
        ('== "3.6"', '>= "3.7.0"', EMPTY, '>= "3.6"'),
        ('== "3.6"', '!= "3.7.0"', '== "3.6"', '!= "3.7.0"'),
        ('== "3.6"', '<= "3.7.1"', '== "3.6"', '<= "3.7.1"'),
        ('== "3.6"', '< "3.7.1"', '== "3.6"', '< "3.7.1"'),
        ('== "3.6"', '> "3.7.1"', EMPTY, None),
        ('== "3.6"', '>= "3.7.1"', EMPTY, None),
        ('== "3.6"', '!= "3.7.1"', '== "3.6"', '!= "3.7.1"'),
        # python_version != 3.6  # noqa: ERA001
        # (equal to python_full_version < 3.6.0 or python_full_version >= 3.7.0)
        ('!= "3.6"', '< "3.5.2"', '< "3.5.2"', '!= "3.6"'),
        ('!= "3.6"', '<= "3.5.2"', '<= "3.5.2"', '!= "3.6"'),
        ('!= "3.6"', '> "3.5.2"', None, ""),
        ('!= "3.6"', '>= "3.5.2"', None, ""),
        ('!= "3.6"', '!= "3.5.2"', None, ""),
        ('!= "3.6"', '< "3.6.0"', '< "3.6"', '!= "3.6"'),
        ('!= "3.6"', '<= "3.6.0"', '< "3.6"', None),
        ('!= "3.6"', '> "3.6.0"', '>= "3.7"', '!= "3.6.0"'),
        ('!= "3.6"', '>= "3.6.0"', '>= "3.7"', ""),
        ('!= "3.6"', '!= "3.6.0"', '!= "3.6"', '!= "3.6.0"'),
        ('!= "3.6"', '< "3.6.1"', '< "3.6"', None),
        ('!= "3.6"', '<= "3.6.1"', '< "3.6"', None),
        ('!= "3.6"', '> "3.6.1"', '>= "3.7"', None),
        ('!= "3.6"', '>= "3.6.1"', '>= "3.7"', None),
        ('!= "3.6"', '!= "3.6.1"', '!= "3.6"', '!= "3.6.1"'),
        ('!= "3.6"', '< "3.7.0"', '< "3.6"', ""),
        ('!= "3.6"', '<= "3.7.0"', None, ""),
        ('!= "3.6"', '> "3.7.0"', '> "3.7.0"', '!= "3.6"'),
        ('!= "3.6"', '>= "3.7.0"', '>= "3.7"', '!= "3.6"'),
        ('!= "3.6"', '!= "3.7.0"', None, ""),
        ('!= "3.6"', '<= "3.7.1"', None, ""),
        ('!= "3.6"', '< "3.7.1"', None, ""),
        ('!= "3.6"', '> "3.7.1"', '> "3.7.1"', '!= "3.6"'),
        ('!= "3.6"', '>= "3.7.1"', '>= "3.7.1"', '!= "3.6"'),
        ('!= "3.6"', '!= "3.7.1"', None, ""),
    ],
)
def test_merging_python_version_and_python_full_version(
    python_version: str,
    python_full_version: str,
    expected_intersection_version: str,
    expected_union_version: str,
) -> None:
    m = f"python_version {python_version}"
    m2 = f"python_full_version {python_full_version}"

    def get_expected_marker(expected_version: str, op: str) -> str:
        if expected_version is None:
            expected = f"{m} {op} {m2}"
        elif expected_version in ("", EMPTY):
            expected = expected_version
        else:
            expected_marker_name = (
                "python_version"
                if expected_version.count(".") < 2
                else "python_full_version"
            )
            expected = f"{expected_marker_name} {expected_version}"
        return expected

    expected_intersection = get_expected_marker(expected_intersection_version, "and")
    expected_union = get_expected_marker(expected_union_version, "or")

    intersection = parse_marker(m).intersect(parse_marker(m2))
    assert str(intersection) == expected_intersection

    union = parse_marker(m).union(parse_marker(m2))
    assert str(union) == expected_union
poetry-core-2.1.1/tests/version/test_requirements.py000066400000000000000000000111111475444614500227570ustar00rootroot00000000000000from __future__ import annotations

import re

from typing import Any

import pytest

from poetry.core.constraints.version import parse_constraint
from poetry.core.version.requirements import InvalidRequirementError
from poetry.core.version.requirements import Requirement


def assert_requirement(
    req: Requirement,
    name: str,
    url: str | None = None,
    extras: list[str] | None = None,
    constraint: str = "*",
    marker: str | None = None,
) -> None:
    if extras is None:
        extras = []

    assert name == req.name
    assert url == req.url
    assert sorted(extras) == sorted(req.extras)
    assert parse_constraint(constraint) == req.constraint

    if marker:
        assert marker == str(req.marker)


@pytest.mark.parametrize(
    ["string", "expected"],
    [
        ("A", {"name": "A"}),
        ("aa", {"name": "aa"}),
        ("name", {"name": "name"}),
        ("foo-bar.quux_baz", {"name": "foo-bar.quux_baz"}),
        ("name>=3", {"name": "name", "constraint": ">=3"}),
        ("name>=3.*", {"name": "name", "constraint": ">=3.0"}),
        ("name<3.*", {"name": "name", "constraint": "<3.0"}),
        ("name>3.5.*", {"name": "name", "constraint": ">3.5"}),
        ("name==1.0.post1", {"name": "name", "constraint": "==1.0.post1"}),
        ("name==1.2.0b1.dev0", {"name": "name", "constraint": "==1.2.0b1.dev0"}),
        (
            "name>=1.2.3;python_version=='2.6'",
            {
                "name": "name",
                "constraint": ">=1.2.3",
                "marker": 'python_version == "2.6"',
            },
        ),
        ("name (==4)", {"name": "name", "constraint": "==4"}),
        ("name>=2,<3", {"name": "name", "constraint": ">=2,<3"}),
        ("name >=2, <3", {"name": "name", "constraint": ">=2,<3"}),
        # PEP 440: https://www.python.org/dev/peps/pep-0440/#compatible-release
        ("name (~=3.2)", {"name": "name", "constraint": ">=3.2.0,<4.0"}),
        ("name (~=3.2.1)", {"name": "name", "constraint": ">=3.2.1,<3.3.0"}),
        # Extras
        ("foobar [quux,bar]", {"name": "foobar", "extras": ["quux", "bar"]}),
        ("foo[]", {"name": "foo"}),
        # Url
        ("foo @ http://example.com", {"name": "foo", "url": "http://example.com"}),
        (
            'foo @ http://example.com ; os_name=="a"',
            {"name": "foo", "url": "http://example.com", "marker": 'os_name == "a"'},
        ),
        (
            "name @ file:///absolute/path",
            {"name": "name", "url": "file:///absolute/path"},
        ),
        (
            "name @ file://.",
            {"name": "name", "url": "file://."},
        ),
        (
            "name [fred,bar] @ http://foo.com ; python_version=='2.7'",
            {
                "name": "name",
                "url": "http://foo.com",
                "extras": ["fred", "bar"],
                "marker": 'python_version == "2.7"',
            },
        ),
        (
            (
                "foo @ https://example.com/name;v=1.1/?query=foo&bar=baz#blah ;"
                " python_version=='3.4'"
            ),
            {
                "name": "foo",
                "url": "https://example.com/name;v=1.1/?query=foo&bar=baz#blah",
                "marker": 'python_version == "3.4"',
            },
        ),
        (
            (
                'foo (>=1.2.3) ; python_version >= "2.7" and python_version < "2.8" or'
                ' python_version >= "3.4" and python_version < "3.5"'
            ),
            {
                "name": "foo",
                "constraint": ">=1.2.3",
                "marker": ('python_version == "2.7" or python_version == "3.4"'),
            },
        ),
        (
            (
                'foo (>=1.2.3) ; "tegra" not in platform_release and python_version >= "3.10"'
            ),
            {
                "name": "foo",
                "constraint": ">=1.2.3",
                "marker": (
                    '"tegra" not in platform_release and python_version >= "3.10"'
                ),
            },
        ),
    ],
)
def test_requirement(string: str, expected: dict[str, Any]) -> None:
    req = Requirement(string)

    assert_requirement(req, **expected)


@pytest.mark.parametrize(
    ["string", "exception"],
    [
        ("foo!", "Unexpected character at column 4\n\nfoo!\n   ^\n"),
        ("foo (>=bar)", 'invalid version constraint ">=bar"'),
        ("name @ file:/.", "invalid URL"),
    ],
)
def test_invalid_requirement(string: str, exception: str) -> None:
    with pytest.raises(
        InvalidRequirementError,
        match=re.escape(f"The requirement is invalid: {exception}"),
    ):
        Requirement(string)
poetry-core-2.1.1/vendors/000077500000000000000000000000001475444614500154615ustar00rootroot00000000000000poetry-core-2.1.1/vendors/poetry.lock000066400000000000000000000150741475444614500176640ustar00rootroot00000000000000# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand.

[[package]]
name = "fastjsonschema"
version = "2.21.1"
description = "Fastest Python implementation of JSON schema"
optional = false
python-versions = "*"
files = [
    {file = "fastjsonschema-2.21.1-py3-none-any.whl", hash = "sha256:c9e5b7e908310918cf494a434eeb31384dd84a98b57a30bcb1f535015b554667"},
    {file = "fastjsonschema-2.21.1.tar.gz", hash = "sha256:794d4f0a58f848961ba16af7b9c85a3e88cd360df008c59aac6fc5ae9323b5d4"},
]

[package.extras]
devel = ["colorama", "json-spec", "jsonschema", "pylint", "pytest", "pytest-benchmark", "pytest-cache", "validictory"]

[[package]]
name = "lark"
version = "1.2.2"
description = "a modern parsing library"
optional = false
python-versions = ">=3.8"
files = [
    {file = "lark-1.2.2-py3-none-any.whl", hash = "sha256:c2276486b02f0f1b90be155f2c8ba4a8e194d42775786db622faccd652d8e80c"},
    {file = "lark-1.2.2.tar.gz", hash = "sha256:ca807d0162cd16cef15a8feecb862d7319e7a09bdb13aef927968e45040fed80"},
]

[package.extras]
atomic-cache = ["atomicwrites"]
interegular = ["interegular (>=0.3.1,<0.4.0)"]
nearley = ["js2py"]
regex = ["regex"]

[[package]]
name = "packaging"
version = "24.2"
description = "Core utilities for Python packages"
optional = false
python-versions = ">=3.8"
files = [
    {file = "packaging-24.2-py3-none-any.whl", hash = "sha256:09abb1bccd265c01f4a3aa3f7a7db064b36514d2cba19a2f694fe6150451a759"},
    {file = "packaging-24.2.tar.gz", hash = "sha256:c228a6dc5e932d346bc5739379109d49e8853dd8223571c7c5b55260edc0b97f"},
]

[[package]]
name = "tomli"
version = "2.2.1"
description = "A lil' TOML parser"
optional = false
python-versions = ">=3.8"
files = [
    {file = "tomli-2.2.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:678e4fa69e4575eb77d103de3df8a895e1591b48e740211bd1067378c69e8249"},
    {file = "tomli-2.2.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:023aa114dd824ade0100497eb2318602af309e5a55595f76b626d6d9f3b7b0a6"},
    {file = "tomli-2.2.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ece47d672db52ac607a3d9599a9d48dcb2f2f735c6c2d1f34130085bb12b112a"},
    {file = "tomli-2.2.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6972ca9c9cc9f0acaa56a8ca1ff51e7af152a9f87fb64623e31d5c83700080ee"},
    {file = "tomli-2.2.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c954d2250168d28797dd4e3ac5cf812a406cd5a92674ee4c8f123c889786aa8e"},
    {file = "tomli-2.2.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:8dd28b3e155b80f4d54beb40a441d366adcfe740969820caf156c019fb5c7ec4"},
    {file = "tomli-2.2.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:e59e304978767a54663af13c07b3d1af22ddee3bb2fb0618ca1593e4f593a106"},
    {file = "tomli-2.2.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:33580bccab0338d00994d7f16f4c4ec25b776af3ffaac1ed74e0b3fc95e885a8"},
    {file = "tomli-2.2.1-cp311-cp311-win32.whl", hash = "sha256:465af0e0875402f1d226519c9904f37254b3045fc5084697cefb9bdde1ff99ff"},
    {file = "tomli-2.2.1-cp311-cp311-win_amd64.whl", hash = "sha256:2d0f2fdd22b02c6d81637a3c95f8cd77f995846af7414c5c4b8d0545afa1bc4b"},
    {file = "tomli-2.2.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:4a8f6e44de52d5e6c657c9fe83b562f5f4256d8ebbfe4ff922c495620a7f6cea"},
    {file = "tomli-2.2.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8d57ca8095a641b8237d5b079147646153d22552f1c637fd3ba7f4b0b29167a8"},
    {file = "tomli-2.2.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4e340144ad7ae1533cb897d406382b4b6fede8890a03738ff1683af800d54192"},
    {file = "tomli-2.2.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:db2b95f9de79181805df90bedc5a5ab4c165e6ec3fe99f970d0e302f384ad222"},
    {file = "tomli-2.2.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:40741994320b232529c802f8bc86da4e1aa9f413db394617b9a256ae0f9a7f77"},
    {file = "tomli-2.2.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:400e720fe168c0f8521520190686ef8ef033fb19fc493da09779e592861b78c6"},
    {file = "tomli-2.2.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:02abe224de6ae62c19f090f68da4e27b10af2b93213d36cf44e6e1c5abd19fdd"},
    {file = "tomli-2.2.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:b82ebccc8c8a36f2094e969560a1b836758481f3dc360ce9a3277c65f374285e"},
    {file = "tomli-2.2.1-cp312-cp312-win32.whl", hash = "sha256:889f80ef92701b9dbb224e49ec87c645ce5df3fa2cc548664eb8a25e03127a98"},
    {file = "tomli-2.2.1-cp312-cp312-win_amd64.whl", hash = "sha256:7fc04e92e1d624a4a63c76474610238576942d6b8950a2d7f908a340494e67e4"},
    {file = "tomli-2.2.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f4039b9cbc3048b2416cc57ab3bda989a6fcf9b36cf8937f01a6e731b64f80d7"},
    {file = "tomli-2.2.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:286f0ca2ffeeb5b9bd4fcc8d6c330534323ec51b2f52da063b11c502da16f30c"},
    {file = "tomli-2.2.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a92ef1a44547e894e2a17d24e7557a5e85a9e1d0048b0b5e7541f76c5032cb13"},
    {file = "tomli-2.2.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9316dc65bed1684c9a98ee68759ceaed29d229e985297003e494aa825ebb0281"},
    {file = "tomli-2.2.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e85e99945e688e32d5a35c1ff38ed0b3f41f43fad8df0bdf79f72b2ba7bc5272"},
    {file = "tomli-2.2.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:ac065718db92ca818f8d6141b5f66369833d4a80a9d74435a268c52bdfa73140"},
    {file = "tomli-2.2.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:d920f33822747519673ee656a4b6ac33e382eca9d331c87770faa3eef562aeb2"},
    {file = "tomli-2.2.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:a198f10c4d1b1375d7687bc25294306e551bf1abfa4eace6650070a5c1ae2744"},
    {file = "tomli-2.2.1-cp313-cp313-win32.whl", hash = "sha256:d3f5614314d758649ab2ab3a62d4f2004c825922f9e370b29416484086b264ec"},
    {file = "tomli-2.2.1-cp313-cp313-win_amd64.whl", hash = "sha256:a38aa0308e754b0e3c67e344754dff64999ff9b513e691d0e786265c93583c69"},
    {file = "tomli-2.2.1-py3-none-any.whl", hash = "sha256:cb55c73c5f4408779d0cf3eef9f762b9c9f147a77de7b258bef0a5628adc85cc"},
    {file = "tomli-2.2.1.tar.gz", hash = "sha256:cd45e1dc79c835ce60f7404ec8119f2eb06d38b1deba146f07ced3bbc44505ff"},
]

[metadata]
lock-version = "2.0"
python-versions = "^3.9"
content-hash = "b717a145e08bd77ba47e7be56a4b7d872bfe83e5ab3f5603431867463b13f469"
poetry-core-2.1.1/vendors/pyproject.toml000066400000000000000000000011131475444614500203710ustar00rootroot00000000000000[tool.poetry]
name = "vendors"
version = "1.0.0"
description = "Vendors"
authors = ["Sébastien Eustace "]

license = "MIT"

readme = "README.md"

homepage = "https://github.com/python-poetry/core"
repository = "https://github.com/python-poetry/core"

keywords = ["packaging", "dependency", "poetry"]

classifiers = [
    "Topic :: Software Development :: Build Tools",
    "Topic :: Software Development :: Libraries :: Python Modules"
]

[tool.poetry.dependencies]
python = "^3.9"

fastjsonschema = "^2.18.0"
lark = "^1.1.3"
packaging = ">=22.0"
tomli = "^2.0.1"