pax_global_header00006660000000000000000000000064145646417330014527gustar00rootroot0000000000000052 comment=627bb0c4ab4ec875d5b683db4825a321b9c9072d LibCST-1.2.0/000077500000000000000000000000001456464173300126075ustar00rootroot00000000000000LibCST-1.2.0/.cargo/000077500000000000000000000000001456464173300137605ustar00rootroot00000000000000LibCST-1.2.0/.cargo/config.toml000066400000000000000000000003371456464173300161250ustar00rootroot00000000000000[target.x86_64-apple-darwin] rustflags = [ "-C", "link-arg=-undefined", "-C", "link-arg=dynamic_lookup", ] [target.aarch64-apple-darwin] rustflags = [ "-C", "link-arg=-undefined", "-C", "link-arg=dynamic_lookup", ]LibCST-1.2.0/.editorconfig000066400000000000000000000004421456464173300152640ustar00rootroot00000000000000root = true [*.{py,pyi,rs,toml,md}] charset = utf-8 end_of_line = lf indent_size = 4 indent_style = space insert_final_newline = true trim_trailing_whitespace = true max_line_length = 88 [*.rs] # https://github.com/rust-dev-tools/fmt-rfcs/blob/master/guide/guide.md max_line_length = 100 LibCST-1.2.0/.fixit.config.yaml000066400000000000000000000003471456464173300161440ustar00rootroot00000000000000block_list_patterns: - '@generated' - '@nolint' block_list_rules: ["UseFstringRule", "CompareSingletonPrimitivesByIsRule"] fixture_dir: ./fixtures formatter: ["black", "-"] packages: - fixit.rules repo_root: libcst rule_config: {} LibCST-1.2.0/.flake8000066400000000000000000000123561456464173300137710ustar00rootroot00000000000000[flake8] ignore = # unnecessary list comprehension; A generator only better than a list # comprehension if we don't always need to iterate through all items in # the generator (based on the use case). C407, # The following codes belong to pycodestyle, and overlap with black: # indentation contains mixed spaces and tabs E101, # indentation is not a multiple of four E111, # expected an indented block E112, # unexpected indentation E113, # indentation is not a multiple of four (comment) E114, # expected an indented block (comment) E115, # unexpected indentation (comment) E116, # continuation line under-indented for hanging indent E121, # continuation line missing indentation or outdented E122, # closing bracket does not match indentation of opening bracket’s line E123, # closing bracket does not match visual indentation E124, # continuation line with same indent as next logical line E125, # continuation line over-indented for hanging indent E126, # continuation line over-indented for visual indent; is harmless # (over-indent is visually unambiguous) and currently generates too # many warnings for existing code. E127, # continuation line under-indented for visual indent E128, # visually indented line with same indent as next logical line E129, # continuation line unaligned for hanging indent E131, # closing bracket is missing indentation E133, # whitespace after ‘(‘ E201, # whitespace before ‘)’ E202, # whitespace before ‘:’; this warning is invalid for slices E203, # whitespace before ‘(‘ E211, # multiple spaces before operator E221, # multiple spaces after operator E222, # tab before operator E223, # tab after operator E224, # missing whitespace around operator E225, # missing whitespace around arithmetic operator E226, # missing whitespace around bitwise or shift operator E227, # missing whitespace around modulo operator E228, # missing whitespace after ‘,’, ‘;’, or ‘:’ E231, # multiple spaces after ‘,’ E241, # tab after ‘,’ E242, # unexpected spaces around keyword / parameter equals E251, # at least two spaces before inline comment E261, # inline comment should start with ‘# ‘ E262, # block comment should start with ‘# ‘ E265, # too many leading ‘#’ for block comment E266, # multiple spaces after keyword E271, # multiple spaces before keyword E272, # tab after keyword E273, # tab before keyword E274, # missing whitespace after keyword E275, # expected 1 blank line, found 0 E301, # expected 2 blank lines, found 0 E302, # too many blank lines (3) E303, # blank lines found after function decorator E304, # expected 2 blank lines after end of function or class E305, # expected 1 blank line before a nested definition E306, # multiple imports on one line E401, # line too long (> 79 characters) E501, # the backslash is redundant between brackets E502, # multiple statements on one line (colon) E701, # multiple statements on one line (semicolon) E702, # statement ends with a semicolon E703, # multiple statements on one line (def) E704, # These are pycodestyle lints that black doesn't catch: # E711, # comparison to None should be ‘if cond is None:’ # E712, # comparison to True should be ‘if cond is True:’ or ‘if cond:’ # E713, # test for membership should be ‘not in’ # E714, # test for object identity should be ‘is not’ # E721, # do not compare types, use ‘isinstance()’ # E722, # do not use bare except, specify exception instead # E731, # do not assign a lambda expression, use a def # E741, # do not use variables named ‘l’, ‘O’, or ‘I’ # E742, # do not define classes named ‘l’, ‘O’, or ‘I’ # E743, # do not define functions named ‘l’, ‘O’, or ‘I’ # I think these are internal to pycodestyle? # E901, # SyntaxError or IndentationError # E902, # IOError # isn't aware of type-only imports, results in false-positives F811, # indentation contains tabs W191, # trailing whitespace W291, # no newline at end of file W292, # blank line contains whitespace W293, # blank line at end of file W391, # line break before binary operator; binary operator in a new line is # the standard W503, # line break after binary operator W504, # not part of PEP8; doc line too long (> 79 characters) W505, # These are pycodestyle lints that black doesn't catch: # W601, # .has_key() is deprecated, use ‘in’ # W602, # deprecated form of raising exception # W603, # ‘<>’ is deprecated, use ‘!=’ # W604, # backticks are deprecated, use ‘repr()’ # W605, # invalid escape sequence ‘x’ # W606, # ‘async’ and ‘await’ are reserved keywords starting with Python 3.7 # We should've silenced all of Flake8's line-length related lints, in favor of # Black. However, let's just set this to a large value just to be safe, in case # we accidentally left in a line-length related lint rule. If we don't set # anything, it defaults to 79, which is also wrong. max-line-length = 999 exclude = .pyre, __pycache__, .tox, native, max-complexity = 12 LibCST-1.2.0/.gitattributes000066400000000000000000000000151456464173300154760ustar00rootroot00000000000000*.svg binary LibCST-1.2.0/.github/000077500000000000000000000000001456464173300141475ustar00rootroot00000000000000LibCST-1.2.0/.github/PULL_REQUEST_TEMPLATE.md000066400000000000000000000000331456464173300177440ustar00rootroot00000000000000 ## Summary ## Test Plan LibCST-1.2.0/.github/build-matrix.json000066400000000000000000000010201456464173300174340ustar00rootroot00000000000000[ { "vers": "x86_64", "os": "ubuntu-20.04" }, { "vers": "i686", "os": "ubuntu-20.04" }, { "vers": "arm64", "os": "macos-latest" }, { "vers": "auto64", "os": "macos-latest" }, { "vers": "auto64", "os": "windows-2019" }, { "vers": "aarch64", "os": [ "self-hosted", "linux", "ARM64" ], "on_ref_regex": "^refs/(heads/main|tags/.*)$" } ]LibCST-1.2.0/.github/dependabot.yml000066400000000000000000000006541456464173300170040ustar00rootroot00000000000000# https://docs.github.com/en/code-security/dependabot/dependabot-version-updates/configuration-options-for-the-dependabot.yml-file version: 2 updates: - package-ecosystem: pip directory: "/" schedule: interval: weekly - package-ecosystem: cargo directory: "/native" schedule: interval: weekly - package-ecosystem: github-actions directory: "/" schedule: interval: weekly LibCST-1.2.0/.github/workflows/000077500000000000000000000000001456464173300162045ustar00rootroot00000000000000LibCST-1.2.0/.github/workflows/build.yml000066400000000000000000000037651456464173300200410ustar00rootroot00000000000000name: build on: workflow_call: jobs: # Build python wheels build: name: Build wheels on ${{ matrix.os }} runs-on: ${{ matrix.os }} strategy: fail-fast: false matrix: os: [macos-latest, ubuntu-latest, windows-latest] env: SCCACHE_VERSION: 0.2.13 CIBW_BEFORE_ALL_LINUX: "curl https://sh.rustup.rs -sSf | env -u CARGO_HOME sh -s -- --default-toolchain stable --profile minimal -y" CIBW_BEFORE_BUILD_LINUX: "rm -rf native/target; ln -s /host/${{github.workspace}}/native/target native/target; [ -d /host/${{github.workspace}}/native/target ] || mkdir /host/${{github.workspace}}/native/target" CIBW_ENVIRONMENT_LINUX: 'PATH="$PATH:$HOME/.cargo/bin" LIBCST_NO_LOCAL_SCHEME=$LIBCST_NO_LOCAL_SCHEME CARGO_HOME=/host/home/runner/.cargo' CIBW_BEFORE_ALL_MACOS: "rustup target add aarch64-apple-darwin x86_64-apple-darwin" CIBW_BEFORE_ALL_WINDOWS: "rustup target add x86_64-pc-windows-msvc i686-pc-windows-msvc" CIBW_ENVIRONMENT: 'PATH="$PATH:$HOME/.cargo/bin" LIBCST_NO_LOCAL_SCHEME=$LIBCST_NO_LOCAL_SCHEME' CIBW_SKIP: "cp27-* cp34-* cp35-* pp* *-win32 *-win_arm64 *-musllinux_*" CIBW_ARCHS_LINUX: auto aarch64 CIBW_ARCHS_MACOS: x86_64 arm64 CIBW_BUILD_VERBOSITY: 1 steps: - uses: actions/checkout@v4 with: fetch-depth: 0 - uses: actions/setup-python@v5 with: cache: pip cache-dependency-path: "pyproject.toml" python-version: "3.12" - name: Disable scmtools local scheme if: ${{ github.event_name == 'push' && github.ref == 'refs/heads/main' }} run: >- echo LIBCST_NO_LOCAL_SCHEME=1 >> $GITHUB_ENV - name: Set up QEMU if: runner.os == 'Linux' uses: docker/setup-qemu-action@v3 with: platforms: all - name: Build wheels uses: pypa/cibuildwheel@v2.16.5 - uses: actions/upload-artifact@v4 with: path: wheelhouse/*.whl name: wheels-${{matrix.os}} LibCST-1.2.0/.github/workflows/ci.yml000066400000000000000000000107561456464173300173330ustar00rootroot00000000000000name: CI on: push: branches: - main pull_request: jobs: test: runs-on: ${{ matrix.os }} strategy: fail-fast: false matrix: os: [macos-latest, ubuntu-latest, windows-latest] python-version: ["3.9", "3.10", "3.11", "3.12"] steps: - uses: actions/checkout@v4 with: fetch-depth: 0 - uses: actions/setup-python@v5 with: cache: pip cache-dependency-path: "pyproject.toml" python-version: ${{ matrix.python-version }} - name: Install hatch run: | pip install -U hatch - uses: actions-rs/toolchain@v1 with: toolchain: stable - name: Build LibCST run: hatch -vv env create - name: Tests run: hatch run test - name: Pure Parser Tests env: LIBCST_PARSER_TYPE: pure run: hatch run test # Run linters lint: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 with: fetch-depth: 0 - uses: actions/setup-python@v5 with: cache: pip cache-dependency-path: "pyproject.toml" python-version: "3.10" - name: Install hatch run: pip install -U hatch - run: hatch run lint - run: hatch run fixtures # Run pyre typechecker typecheck: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 with: fetch-depth: 0 - uses: actions/setup-python@v5 with: cache: pip cache-dependency-path: "pyproject.toml" python-version: "3.10" - name: Install hatch run: pip install -U hatch - run: hatch run typecheck # Upload test coverage coverage: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 with: fetch-depth: 0 - uses: actions/setup-python@v5 with: cache: pip cache-dependency-path: "pyproject.toml" python-version: "3.10" - name: Install hatch run: pip install -U hatch - name: Generate Coverage run: | hatch run coverage run setup.py test hatch run coverage xml -i - uses: codecov/codecov-action@v3 with: files: coverage.xml fail_ci_if_error: true verbose: true - name: Archive Coverage uses: actions/upload-artifact@v4 with: name: coverage path: coverage.xml # Build the docs docs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 with: fetch-depth: 0 - uses: actions/setup-python@v5 with: cache: pip cache-dependency-path: "pyproject.toml" python-version: "3.10" - name: Install hatch run: pip install -U hatch - uses: ts-graphviz/setup-graphviz@v1 - run: hatch run docs - name: Archive Docs uses: actions/upload-artifact@v4 with: name: sphinx-docs path: docs/build # Test rust parts native: name: Rust unit tests runs-on: ${{ matrix.os }} strategy: fail-fast: false matrix: os: [ubuntu-latest, macos-latest, windows-latest] steps: - uses: actions/checkout@v4 - uses: dtolnay/rust-toolchain@stable with: components: rustfmt, clippy - uses: actions/setup-python@v5 with: python-version: "3.10" - name: test uses: actions-rs/cargo@v1 with: command: test args: --manifest-path=native/Cargo.toml --release - name: test without python if: matrix.os == 'ubuntu-latest' uses: actions-rs/cargo@v1 with: command: test args: --manifest-path=native/Cargo.toml --release --no-default-features - name: clippy uses: actions-rs/clippy-check@v1 with: token: ${{ secrets.GITHUB_TOKEN }} args: --manifest-path=native/Cargo.toml --all-features - name: compile-benchmarks uses: actions-rs/cargo@v1 with: command: bench args: --manifest-path=native/Cargo.toml --no-run rustfmt: name: Rustfmt runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - uses: dtolnay/rust-toolchain@stable with: components: rustfmt - run: rustup component add rustfmt - uses: actions-rs/cargo@v1 with: command: fmt args: --all --manifest-path=native/Cargo.toml -- --check LibCST-1.2.0/.github/workflows/pypi_upload.yml000066400000000000000000000032261456464173300212570ustar00rootroot00000000000000name: pypi_upload on: release: types: [published] push: branches: [main] permissions: contents: read jobs: build: uses: Instagram/LibCST/.github/workflows/build.yml@main upload_release: name: Upload wheels to pypi runs-on: ubuntu-latest needs: build steps: - uses: actions/checkout@v4 with: fetch-depth: 0 - name: Download binary wheels id: download uses: actions/download-artifact@v4 with: pattern: wheels-* path: wheelhouse merge-multiple: true - uses: actions/setup-python@v5 with: cache: pip cache-dependency-path: "pyproject.toml" python-version: "3.10" - name: Install hatch run: pip install -U hatch - name: Build a source tarball env: LIBCST_NO_LOCAL_SCHEME: 1 run: >- hatch run python -m build --sdist --outdir ${{ steps.download.outputs.download-path }} - name: Publish distribution 📦 to Test PyPI if: github.event_name == 'push' uses: pypa/gh-action-pypi-publish@release/v1 with: user: __token__ password: ${{ secrets.TEST_PYPI_API_TOKEN }} repository-url: https://test.pypi.org/legacy/ packages-dir: ${{ steps.download.outputs.download-path }} - name: Publish distribution 📦 to PyPI if: github.event_name == 'release' uses: pypa/gh-action-pypi-publish@release/v1 with: user: __token__ password: ${{ secrets.PYPI_API_TOKEN }} packages-dir: ${{ steps.download.outputs.download-path }} LibCST-1.2.0/.gitignore000066400000000000000000000003371456464173300146020ustar00rootroot00000000000000*.swp *.swo *.pyc *.pyd *.pyo *.so *.egg-info/ .eggs/ .pyre/ __pycache__/ .tox/ docs/build/ dist/ docs/source/.ipynb_checkpoints/ build/ libcst/_version.py .coverage .hypothesis/ .python-version target/ venv/ .venv/ .idea/ LibCST-1.2.0/.pyre_configuration000066400000000000000000000003321456464173300165140ustar00rootroot00000000000000{ "exclude": [ ".*\/native\/.*" ], "source_directories": [ "." ], "search_path": [ "stubs", {"site-package": "setuptools_rust"} ], "workers": 3, "strict": true } LibCST-1.2.0/.readthedocs.yml000066400000000000000000000004211456464173300156720ustar00rootroot00000000000000version: 2 sphinx: configuration: docs/source/conf.py formats: all build: os: ubuntu-20.04 tools: python: "3" rust: "1.70" apt_packages: - graphviz python: install: - method: pip path: . extra_requirements: - dev LibCST-1.2.0/.watchmanconfig000066400000000000000000000000031456464173300155710ustar00rootroot00000000000000{} LibCST-1.2.0/CHANGELOG.md000066400000000000000000001173121456464173300144250ustar00rootroot00000000000000# 1.2.0 - 2024-02-19 ## Updated * Support running LibCST on Python 3.12 and drop support for running it on 3.8 * remove 3.8 support by @zsol in https://github.com/Instagram/LibCST/pull/1073 * Remove reference to distutils by @zsol in https://github.com/Instagram/LibCST/pull/1099 * Update pyproject.toml for Python 3.12 support by @itamaro in https://github.com/Instagram/LibCST/pull/1038 ## Added * Allow `Element::codegen` to be used by external users by @Wilfred in https://github.com/Instagram/LibCST/pull/1071 ## Fixed * Fix parsing list matchers without explicit brackets by @zsol in https://github.com/Instagram/LibCST/pull/1097 * installing rustc/cargo for mybinder demo by @aleivag in https://github.com/Instagram/LibCST/pull/1083 * fix filepathprovider generic type by @kinto0 in https://github.com/Instagram/LibCST/pull/1036 ## New Contributors * @itamaro made their first contribution in https://github.com/Instagram/LibCST/pull/1039 * @kinto0 made their first contribution in https://github.com/Instagram/LibCST/pull/1036 * @dtolnay made their first contribution in https://github.com/Instagram/LibCST/pull/1063 * @anonymousdouble made their first contribution in https://github.com/Instagram/LibCST/pull/1082 * @aleivag made their first contribution in https://github.com/Instagram/LibCST/pull/1083 * @Wilfred made their first contribution in https://github.com/Instagram/LibCST/pull/1071 * @diliop made their first contribution in https://github.com/Instagram/LibCST/pull/1106 **Full Changelog**: https://github.com/Instagram/LibCST/compare/v1.1.0...v1.2.0 # 1.1.0 - 2023-10-05 ## Added * PEP 695 support * parser: PEP 695 - Type Parameter Syntax #1004 * Scope provider: support for type annotations #1014 * PEP 701 support * parser: support arbitrarily nested f-strings #1026 * parser: Parse multiline expressions in f-strings #1027 * parser: Support files with mixed newlines #1007 * [libcst](https://crates.io/crates/libcst) is now published to crates.io ## Fixed * codemod/ApplyTypeAnnotationsVisitor: Do not annotate the same variable multiple times #956 * parser: Don't swallow trailing whitespace #976 * codemod/rename: Avoid duplicating import statements when the module name doesn't change #981 ## Updated * cli: Don't gather dirs ending .py #994 * drop support for Python 3.7 #997 * A few parser performance improvements: * Switch to using thread_local regular expressions to stop mutext contention #996 * Remove need for regex in TextPosition::matches #1002 * Remove Regexes from whitespace parser #1008 # 1.0.1 - 2023-06-07 ## Fixed * Fix type of `evaluated_value` on string to allow bytes by @ljodal in https://github.com/Instagram/LibCST/pull/721 * Fix Sentinal typo by @kit1980 in https://github.com/Instagram/LibCST/pull/948 * Allow no whitespace after lambda body in certain cases by @zsol in https://github.com/Instagram/LibCST/pull/939 * Fix whitespace, fstring, walrus related parse errors (#939, #938, #937, #936, #935, #934, #933, #932, #931) by @zsol in https://github.com/Instagram/LibCST/pull/940 * Codemod CLI: Print diff only when there is a change by @kit1980 in https://github.com/Instagram/LibCST/pull/945 ## New Contributors * @ljodal made their first contribution in https://github.com/Instagram/LibCST/pull/721 * @kit1980 made their first contribution in https://github.com/Instagram/LibCST/pull/948 **Full Changelog**: https://github.com/Instagram/LibCST/compare/v1.0.0...v1.0.1 # 1.0.0 - 2023-05-25 The first major release of LibCST is essentially the same as 0.4.10, but using the newer, Rust-based parser implementation by default. The old, pure Python parser is scheduled for removal in the next (non-patch) release. Until then, it is available with the `LIBCST_PARSER_TYPE` environment variable set to `pure`. ## Updated * Switch the default parser implementation to native by @zsol in https://github.com/Instagram/LibCST/pull/929 # 0.4.10 - 2023-05-23 ## New Contributors * @and-semakin made their first contribution in https://github.com/Instagram/LibCST/pull/816 * @carljm made their first contribution in https://github.com/Instagram/LibCST/pull/828 * @sagarbadiyani made their first contribution in https://github.com/Instagram/LibCST/pull/841 * @podtserkovskiy made their first contribution in https://github.com/Instagram/LibCST/pull/894 * @rchen152 made their first contribution in https://github.com/Instagram/LibCST/pull/903 * @Kludex made their first contribution in https://github.com/Instagram/LibCST/pull/913 * @jakkdl made their first contribution in https://github.com/Instagram/LibCST/pull/921 ## Added * Add py3.11 classifier by @and-semakin in https://github.com/Instagram/LibCST/pull/816 * Script to regenerate test fixtures, upgrade to Pyre 0.9.10 by @amyreese in https://github.com/Instagram/LibCST/pull/872 * Allow FullyQualifiedNameProvider to work with absolute paths by @amyreese in https://github.com/Instagram/LibCST/pull/867 * Allow running codemods without configuring in YAML by @akx in https://github.com/Instagram/LibCST/pull/879 * Support PEP 604 in ApplyTypeAnnotationsVisitor by @hauntsaninja in https://github.com/Instagram/LibCST/pull/868 ## Fixed * fix PEP 604 union annotations in decorators by @carljm in https://github.com/Instagram/LibCST/pull/828 * [AddImportsVisitor] Docstring Check Only for the Top Element of the Body by @sagarbadiyani in https://github.com/Instagram/LibCST/pull/841 * Fix [#855](https://github.com/Instagram/LibCST/issues/855) - fail to parse with statement by @stroxler in https://github.com/Instagram/LibCST/pull/861 * Add setuptools-rust to build requirements in setup.py by @amyreese in https://github.com/Instagram/LibCST/pull/873 * Relative imports from '' package are not allowed by @podtserkovskiy in https://github.com/Instagram/LibCST/pull/894 * Use subprocess.DEVNULL instead of opening os.devnull by hand by @akx in https://github.com/Instagram/LibCST/pull/897 * Ensure current Python interpreter is used for subprocesses by @akx in https://github.com/Instagram/LibCST/pull/898 * Fix ApplyTypeAnnotationsVisitor behavior on attribute assignments. by @rchen152 in https://github.com/Instagram/LibCST/pull/903 * Fix spelling and grammar in some comments by @stroxler in https://github.com/Instagram/LibCST/pull/908 * skip escaped backslash in rf-string by @jakkdl in https://github.com/Instagram/LibCST/pull/921 * relax validation rules on decorators by @jakkdl in https://github.com/Instagram/LibCST/pull/926 **Full Changelog**: https://github.com/Instagram/LibCST/compare/v0.4.9...v0.4.10 # 0.4.9 - 2022-11-10 ## Updated * Bump setuptools-rust version by @zsol in https://github.com/Instagram/LibCST/pull/809 **Full Changelog**: https://github.com/Instagram/LibCST/compare/v0.4.8...v0.4.9 # 0.4.8 - 2022-11-10 ## New Contributors * @dhruvmanila made their first contribution in https://github.com/Instagram/LibCST/pull/728 * @vfazio made their first contribution in https://github.com/Instagram/LibCST/pull/801 * @matthewshaer made their first contribution in https://github.com/Instagram/LibCST/pull/807 ## Fixed * Fix parse error message for number parsing by @zzl0 in https://github.com/Instagram/LibCST/pull/724 * Fix problematic doc build, due to the new builder image provided by readthedocs doesn't has the `graphviz-dev` package pre-installed any more by @MapleCCC in https://github.com/Instagram/LibCST/pull/751 * Fix docstring of `FullRepoManager` by @MapleCCC in https://github.com/Instagram/LibCST/pull/750 * Fix bug when `TypeOf` is one of options in `OneOf` / `AllOf` by @MapleCCC in https://github.com/Instagram/LibCST/pull/756 * Tighten the metadata type of `ExpressionContextProvider` by @MapleCCC in https://github.com/Instagram/LibCST/pull/760 * Fix the bug that the use of formatter in codemods has undetermined target Python version, resulting in hard-to-reason-with behavior by @MapleCCC in https://github.com/Instagram/LibCST/pull/771 ## Added * Python 3.11 rutime support * test using python 3.11 beta versions by @zsol in https://github.com/Instagram/LibCST/pull/723 * Python 3.11 wheels by @vfazio in https://github.com/Instagram/LibCST/pull/801 * Raise informative exception when metadata is unresolved in a metadata-based match by @MapleCCC in https://github.com/Instagram/LibCST/pull/757 * Add AccessorProvider by @matthewshaer in https://github.com/Instagram/LibCST/pull/807 **Full Changelog**: https://github.com/Instagram/LibCST/compare/v0.4.7...v0.4.8 # 0.4.7 - 2022-07-12 ## New Contributors * @Chenguang-Zhu made their first contribution in https://github.com/Instagram/LibCST/pull/720 ## Fixed * Fix get_qualified_names_for matching on prefixes of the given name by @lpetre in https://github.com/Instagram/LibCST/pull/719 ## Added * Implement lazy loading mechanism for expensive metadata providers by @Chenguang-Zhu in https://github.com/Instagram/LibCST/pull/720 # 0.4.6 - 2022-07-04 ## New Contributors - @superbobry made their first contribution in https://github.com/Instagram/LibCST/pull/702 ## Fixed - convert_type_comments now preserves comments following type comments by @superbobry in https://github.com/Instagram/LibCST/pull/702 - QualifiedNameProvider optimizations - Cache the scope name prefix to prevent scope traversal in a tight loop by @lpetre in https://github.com/Instagram/LibCST/pull/708 - Faster qualified name formatting by @lpetre in https://github.com/Instagram/LibCST/pull/710 - Prevent unnecessary work in Scope.get_qualified_names_for_ by @lpetre in https://github.com/Instagram/LibCST/pull/709 - Fix parsing of parenthesized empty tuples by @zsol in https://github.com/Instagram/LibCST/pull/712 - Support whitespace after ParamSlash by @zsol in https://github.com/Instagram/LibCST/pull/713 - [parser] bail on deeply nested expressions by @zsol in https://github.com/Instagram/LibCST/pull/718 # 0.4.5 - 2022-06-17 ## New Contributors - @zzl0 made their first contribution in https://github.com/Instagram/LibCST/pull/704 ## Fixed - Only skip supported escaped characters in f-strings by @zsol in https://github.com/Instagram/LibCST/pull/700 - Escaping quote characters in raw string literals causes a tokenizer error by @zsol in https://github.com/Instagram/LibCST/issues/668 - Corrected a code example in the documentation by @zzl0 in https://github.com/Instagram/LibCST/pull/703 - Handle multiline strings that start with quotes by @zzl0 in https://github.com/Instagram/LibCST/pull/704 - Fixed a performance regression in libcst.metadata.ScopeProvider by @lpetre in https://github.com/Instagram/LibCST/pull/698 # 0.4.4 - 2022-06-13 ## New Contributors - @adamchainz made their first contribution in https://github.com/Instagram/LibCST/pull/688 ## Added - Add package links to PyPI by @adamchainz in https://github.com/Instagram/LibCST/pull/688 - native: add overall benchmark by @zsol in https://github.com/Instagram/LibCST/pull/692 - Add support for PEP-646 by @zsol in https://github.com/Instagram/LibCST/pull/696 ## Updated - parser: use references instead of smart pointers for Tokens by @zsol in https://github.com/Instagram/LibCST/pull/691 # 0.4.3 - 2022-05-11 ## Fixed - Restore the 0.4.1 behavior for libcst.helpers.get_absolute_module by @lpetre in https://github.com/Instagram/LibCST/pull/684 # 0.4.2 - 2022-05-04 ## New Contributors - @stanislavlevin made their first contribution in https://github.com/Instagram/LibCST/pull/650 - @dmitryvinn made their first contribution in https://github.com/Instagram/LibCST/pull/655 - @wiyr made their first contribution in https://github.com/Instagram/LibCST/pull/669 - @toofar made their first contribution in https://github.com/Instagram/LibCST/pull/675 ## Fixed - native: Avoid crashing by making IntoPy conversion fallible by @zsol in https://github.com/Instagram/LibCST/pull/639 - native: make sure ParserError's line is zero-indexed by @zsol in https://github.com/Instagram/LibCST/pull/681 - Fix space validation for AsName and Await by @zsol in https://github.com/Instagram/LibCST/pull/641 - Qualified Name Provider: Fix returned qname for symbols that are prefixes of each other by @wiyr in https://github.com/Instagram/LibCST/pull/669 - Rename Codemod: Correct last renamed import from by @toofar in https://github.com/Instagram/LibCST/pull/675 - Many changes to the Apply Type Comments codemod: - Allow for skipping quotes when applying type comments by @stroxler in https://github.com/Instagram/LibCST/pull/644 - Port pyre fixes by @stroxler in https://github.com/Instagram/LibCST/pull/651 - Preserve as-imports when merging type annotations. by @martindemello in https://github.com/Instagram/LibCST/pull/664 - Qualify imported symbols when the dequalified form would cause a conflict by @martindemello in https://github.com/Instagram/LibCST/pull/674 - Add an argument to always qualify imported type annotations. by @martindemello in https://github.com/Instagram/LibCST/pull/676 ## Added - Create an AddTrailingCommas codemod by @stroxler in https://github.com/Instagram/LibCST/pull/643 - Define gather global names visitor by @shannonzhu in https://github.com/Instagram/LibCST/pull/657 ## Updated - Support module and package names in the codemod context by @lpetre in https://github.com/Instagram/LibCST/pull/662 - Drop support for running libcst using a python 3.6 interpreter by @lpetre in https://github.com/Instagram/LibCST/pull/663 - Update relative import logic to match cpython by @lpetre in https://github.com/Instagram/LibCST/pull/660 - Scope Provider: Consider access information when computing qualified names for nodes by @lpetre in https://github.com/Instagram/LibCST/pull/682 # 0.4.1 - 2022-01-28 ## New Contributors - @ariebovenberg made their first contribution in https://github.com/Instagram/LibCST/pull/605 - @sehz made their first contribution in https://github.com/Instagram/LibCST/pull/598 ## Added - Add docs about the native parts by @zsol in https://github.com/Instagram/LibCST/pull/601 - Specify minimum rust toolchain version by @zsol in https://github.com/Instagram/LibCST/pull/614 - build wheels on main branch for linux/arm64 by @zsol in https://github.com/Instagram/LibCST/pull/630 ## Updated - ApplyTypeAnnotationVisitor changes - Add support for methods with func type comment excluding self/cls by @stroxler in https://github.com/Instagram/LibCST/pull/622 - Merge in TypeVars and Generic base classes in ApplyTypeAnnotationVisitor by @martindemello in https://github.com/Instagram/LibCST/pull/596 - Full handling for applying type comments to Assign by @stroxler in https://github.com/Instagram/LibCST/pull/599 - Add support for For and With by @stroxler in https://github.com/Instagram/LibCST/pull/607 - Support FunctionDef transformations by @stroxler in https://github.com/Instagram/LibCST/pull/610 - change pyo3 as optional dependency in native Python Parser by @sehz in https://github.com/Instagram/LibCST/pull/598 - add slots to base classes, @add_slots takes bases into account by @ariebovenberg in https://github.com/Instagram/LibCST/pull/605 - [native] Box most enums by @zsol in https://github.com/Instagram/LibCST/pull/632 - [native] Return tuples instead of lists in CST nodes by @zsol in https://github.com/Instagram/LibCST/pull/631 ## Fixed - Allow trailing whitespace without newline at EOF by @zsol in https://github.com/Instagram/LibCST/pull/611 - Handle ast.parse failures when converting function type comments by @stroxler in https://github.com/Instagram/LibCST/pull/616 - [native] Don't redundantly nest StarredElement inside another Element by @isidentical in https://github.com/Instagram/LibCST/pull/624 - [native] Allow unparenthesized tuples inside f-strings by @isidentical in https://github.com/Instagram/LibCST/pull/621 - Don't require whitespace right after match by @isidentical in https://github.com/Instagram/LibCST/pull/628 - Proxy both parentheses in some pattern matching nodes by @isidentical in https://github.com/Instagram/LibCST/pull/626 # 0.4.0 - 2022-01-12 This release contains a new parsing infrastructure that is turned off by default. You can enable it by setting the `LIBCST_PARSER_TYPE` environment variable to `native` before parsing an input with the usual LibCST APIs. Parsing Python 3.10 documents is only supported in this new mode. Note: the new parser is built as a native extension, so LibCST will ship with binary wheels from now on. ## Added - Implement a Python PEG parser in Rust by @zsol in [#566](https://github.com/Instagram/LibCST/pull/566) - implement PEP-654: except\* by @zsol in [#571](https://github.com/Instagram/LibCST/pull/571) - Implement PEP-634 - Match statement by @zsol in [#568](https://github.com/Instagram/LibCST/pull/568) - Add instructions to codegen test failures by @stroxler in [#582](https://github.com/Instagram/LibCST/pull/582) - Support Parenthesized With Statements by @stroxler in [#584](https://github.com/Instagram/LibCST/pull/584) - Support relative imports in AddImportsVisitor by @martindemello in [#585](https://github.com/Instagram/LibCST/pull/585) - Codemod for PEP 484 Assign w / type comments -> PEP 526 AnnAssign by @stroxler in [#594](https://github.com/Instagram/LibCST/pull/594) ## Updated - Update license headers by @zsol in [#560](https://github.com/Instagram/LibCST/pull/560) - Use precise signature matching when inserting function type annotations by @martindemello in [#591](https://github.com/Instagram/LibCST/pull/591) # 0.3.23 - 2021-11-23 ## Fixed - Fix missing string annotation references [#561](https://github.com/Instagram/LibCST/pull/561) # 0.3.22 - 2021-11-22 ## Added - Add --indent-string option to `libcst.tool print` [#525](https://github.com/Instagram/LibCST/pull/525) - Publish pre-release packages to test.pypi.org [#550](https://github.com/Instagram/LibCST/pull/550) - Add ImportAssignment class extending Assignment to record assignments for import statements [#554](https://github.com/Instagram/LibCST/pull/554) ## Fixed - Various documentation fixes [#527](https://github.com/Instagram/LibCST/pull/527), [#529](https://github.com/Instagram/LibCST/pull/529) - Do not add imports if we added no type info in ApplyTypeAnnotationVisitor [(commit)](https://github.com/Instagram/LibCST/commit/87625d02b6cb321c9c29ba1c67d81ce954a1a396) - Support relative imports in ApplyTypeAnnotationVisitor qualifier handling [#538](https://github.com/Instagram/LibCST/pull/538) - Don't gather metadata if the wrapper already contains it [#545](https://github.com/Instagram/LibCST/pull/545) - Swallow parsing errors in string annotations [#548](https://github.com/Instagram/LibCST/pull/548) - Stop parsing string annotations when no longer in a typing call [#546](https://github.com/Instagram/LibCST/pull/546) ## Updated - Move find_qualified_names_for in the Assignment class [#557](https://github.com/Instagram/LibCST/pull/557) # 0.3.21 - 2021-09-21 ## Fixed - Fix pyre command for type inference provider [#523](https://github.com/Instagram/LibCST/pull/523) ## Updated - Change codegen to treat typing.Union[Foo, NoneType] and typing.Optional[Foo] as the same [#508]((https://github.com/Instagram/LibCST/pull/508) - Rewrite the MatchIfTrue type to be generic on \_MatchIfTrueT [#512](https://github.com/Instagram/LibCST/pull/512) - Add python3.9 to the CI [#506](https://github.com/Instagram/LibCST/pull/506) - Various CI changes [#471](https://github.com/Instagram/LibCST/pull/471) [#510](https://github.com/Instagram/LibCST/pull/510) [#505](https://github.com/Instagram/LibCST/pull/505) [#515](https://github.com/Instagram/LibCST/pull/515) [#516](https://github.com/Instagram/LibCST/pull/516) # 0.3.20 - 2021-08-09 ## Fixed - Don't reset subprocess environment to fix codemodding on windows [#495](https://github.com/Instagram/LibCST/pull/495) - TypeAnnotationsVisitor: don't truncate function return type [#499](https://github.com/Instagram/LibCST/pull/499) - Docs: Fix typo [#492](https://github.com/Instagram/LibCST/pull/492) # 0.3.19 - 2021-05-12 # Updated - Return more specific QNames for assignments [#477](https://github.com/Instagram/LibCST/pull/477) - Tie accesses from string annotation to the string node [#483](https://github.com/Instagram/LibCST/pull/483) ## Fixed - Fix leaking processes from TypeInferenceProvider [#474](https://github.com/Instagram/LibCST/pull/474) - Fix TypeInferenceProvider breakage with empty cache [#476](https://github.com/Instagram/LibCST/pull/476) - Fix formatting for link to QualifiedName class in docs [#480](https://github.com/Instagram/LibCST/pull/480) # 0.3.18 - 2021-03-29 ## Added - Add FlattenSentinel to support replacing a statement with multiple statements [#455](https://github.com/Instagram/LibCST/pull/455) - Add BuiltinScope [#469](https://github.com/Instagram/LibCST/pull/469) - Add FullyQualifiedNameProvider [#465](https://github.com/Instagram/LibCST/pull/465) ## Updated - Split QualifiedNameProvider out from libcst.metadata.scope_provider [#464](https://github.com/Instagram/LibCST/pull/464) ## Fixed - Exception while parsing escape character in raw f-strings [#462](https://github.com/Instagram/LibCST/issues/462) # 0.3.17 - 2021-02-08 ## Updated - Optimization: reduce the number of unused parallel processes [#440](https://github.com/Instagram/LibCST/pull/440) ## Fixed - Walrus operator's left hand side now has STORE expression context [#443](https://github.com/Instagram/LibCST/pull/433) - ApplyTypeAnnotationsVisitor applies parameter annotations even if no return type is declared [#445](https://github.com/Instagram/LibCST/pull/445) - Work around Windows problem by using dummy pool for `jobs=1` [#436](https://github.com/Instagram/LibCST/pull/436) - Remove extra unused imports added in other files [#453](https://github.com/Instagram/LibCST/pull/453) # 0.3.16 - 2020-12-16 ## Added - Support PEP-604 style unions in decorator annotations [#429](https://github.com/Instagram/LibCST/pull/429) - Gathering exports in augmented assignment statements [#426](https://github.com/Instagram/LibCST/pull/426) ## Fixed - Don't allow out of order accesses in the global scope [#431](https://github.com/Instagram/LibCST/pull/431) - Handle scope ordering in For statements [#430](https://github.com/Instagram/LibCST/pull/430) - Fix for not parsing subscripts such as `cast()["from"]` [#428](https://github.com/Instagram/LibCST/pull/428) - Walrus operator's left hand side now has STORE expression context [#433](https://github.com/Instagram/LibCST/pull/433) # 0.3.15 - 2020-12-01 ## Added - Support Named Unicode Characters and yield in f-strings [#424](https://github.com/Instagram/LibCST/pull/424) ## Fixed - Assignment/access ordering in comprehensions [#423](https://github.com/Instagram/LibCST/pull/423) - Referencing of remaining objects in cast() [#422](https://github.com/Instagram/LibCST/pull/422) # 0.3.14 - 2020-11-18 ## Fixed - Fix is_annotation for types used in classdef base and assign value [#406](https://github.com/Instagram/LibCST/pull/406) - Visit concatenated f-strings during scope analysis [#411](https://github.com/Instagram/LibCST/pull/411) - Correct handling of walrus operator in function args [#417](https://github.com/Instagram/LibCST/pull/417) - Allow generator expressions in f-strings [#419](https://github.com/Instagram/LibCST/pull/419) - Keep track of assignment/access ordering during scope analysis [#413](https://github.com/Instagram/LibCST/pull/413) - Handle string type references in cast() during scope analysis [#418](https://github.com/Instagram/LibCST/pull/418) # 0.3.13 - 2020-10-12 ## Fixed - Use correct type for AugAssign and AnnAssign target [#396](https://github.com/Instagram/LibCST/pull/396) - Support string annotations for type aliases [#401](https://github.com/Instagram/LibCST/pull/401) # 0.3.12 - 2020-10-01 ## Fixed - fix RemoveImportsVisitor crash when ImportAlias is inserted without comma [#397](https://github.com/Instagram/LibCST/pull/397) - Provide STORE for {Class,Function}Def.name in ExpressionContextProvider [#394](https://github.com/Instagram/LibCST/pull/394) # 0.3.11 - 2020-09-29 ## Added - Implement TypeOf matcher [#384](https://github.com/Instagram/LibCST/pull/384) ## Updated - Update return type of ParentNodeProvider to be CSTNode [#377](https://github.com/Instagram/LibCST/pull/377) - Add source code links to each class/function [#378](https://github.com/Instagram/LibCST/pull/378) ## Fixed - Removing an import alias with a trailing standalone comment should preserve the comment [#392](https://github.com/Instagram/LibCST/pull/392) # 0.3.10 - 2020-09-17 ## Added - Handle string annotations in ScopeProvider [#373](https://github.com/Instagram/LibCST/pull/373) - Add is_annotation subtype for Access inreferences. [#372](https://github.com/Instagram/LibCST/pull/372) ## Updated - Call pyre query with noninteractive logging [#371](https://github.com/Instagram/LibCST/pull/371) - Replace matchers with explicit visitation in gatherers [#366](https://github.com/Instagram/LibCST/pull/366) - Include missing test data in install [#365](https://github.com/Instagram/LibCST/pull/365) ## Fixed - Spaces around walrus operator are not required [#368](https://github.com/Instagram/LibCST/pull/368) - SaveMachedNode now matches with trailing empty wildcards [#356](https://github.com/Instagram/LibCST/pull/356) - Correctly extract wildcard matchers [#355](https://github.com/Instagram/LibCST/pull/355) # 0.3.9 - 2020-09-07 ## Added - Support string type annotations in RemoveUnusedImports [#353](https://github.com/Instagram/LibCST/pull/353) - Add scope to ImportAlias [#350](https://github.com/Instagram/LibCST/pull/350) - Add scope to ClassDef [#349](https://github.com/Instagram/LibCST/pull/349) ## Fixed - Fixed all pyre related errors [#360](https://github.com/Instagram/LibCST/pull/360) - Fixed enclosing attribute for attributes in call arguments [#362](https://github.com/Instagram/LibCST/pull/362) # 0.3.8 - 2020-07-22 ## Added - Handle type subscripts when applying annotations. [#335](https://github.com/Instagram/LibCST/pull/335) - Added FullRepoManager `cache` property [#330](https://github.com/Instagram/LibCST/pull/330) - Added optional args for tox commands [#327](https://github.com/Instagram/LibCST/pull/327) ## Updated - Only remove trailing comma if the last alias is removed [#334](https://github.com/Instagram/LibCST/pull/334) ## Fixed - Fixed inserting imports after module docstring [#343](https://github.com/Instagram/LibCST/pull/343) - Fixed ParenthesizedWhitespace before params in FuncDef [#342](https://github.com/Instagram/LibCST/pull/342) - Fixed validation for ImportAlias and Try statements [#340](https://github.com/Instagram/LibCST/pull/340) - Fixed NotEqual position issue [#325](https://github.com/Instagram/LibCST/pull/325) - Fixed minor typo in scope_provider.py [#324](https://github.com/Instagram/LibCST/pull/324) # 0.3.7 - 2020-06-24 ## Added - Added `RenameCommand` to rename all instances of a local or imported object to a specified new name. [#308](https://github.com/Instagram/LibCST/pull/308) ## Updated - Upgraded Codecov dev dependency to 2.1.4. [#311](https://github.com/Instagram/LibCST/pull/311) - Enabled Pyre `strict` mode by default. [#313](https://github.com/Instagram/LibCST/pull/313) ## Fixed - Fixed `ImportError` under Python 3.9. [#306](https://github.com/Instagram/LibCST/pull/306) - Fixed `stdout` being plugged into successfully codemod-ed files. [#309](https://github.com/Instagram/LibCST/pull/309) - Fixed `QualifiedName` retrieval for names with repeated substrings. [#312](https://github.com/Instagram/LibCST/pull/312) - Fixed default values of keyword-only and positional-only arguments in `ApplyTypeAnnotationsVisitor`. [#314](https://github.com/Instagram/LibCST/pull/314) - Fixed `ExpressionContextProvider` by giving subscript values a `LOAD`context. [#319](https://github.com/Instagram/LibCST/pull/319) # 0.3.6 - 2020-05-27 ## Added - Added `ConvertNamedTupleToDataclassCommand` to convert `NamedTuple` class declarations to Python 3.7 `dataclasses` using the `@dataclass(frozen=True)` decorator. [#299](https://github.com/Instagram/LibCST/pull/299) ## Fixed - Fixed typo in file name `libcst/codemod/commands/convert_percent_format_to_fstring.py`. [#301](https://github.com/Instagram/LibCST/pull/301) - Fixed `StopIteration` exception during scope analysis matching on import names. [#302](https://github.com/Instagram/LibCST/pull/302) # 0.3.5 - 2020-05-12 ## Updated - Expose more granular `Assignments` and `Accesses` for dotted imports in `ScopeProvider`. [#284](https://github.com/Instagram/LibCST/pull/284) - `get_qualified_names_for` returns the most appropriate qualified name. [#290](https://github.com/Instagram/LibCST/pull/290) - Surface `SyntaxError` raised by formatter in codemod run. [#288](https://github.com/Instagram/LibCST/pull/288) [#289](https://github.com/Instagram/LibCST/pull/289) - Rename `ApplyTypeAnnotationsVisitor.add_stub_to_context` as `ApplyTypeAnnotationsVisitor.store_stub_in_context` and add `overwrite_existing_annotations` to allow overwrite existing type annotations. [#289](https://github.com/Instagram/LibCST/pull/291) ## Fixed - Close opened file handles on finishing codemod to avoid `Too many open files` on OSX. [#283](https://github.com/Instagram/LibCST/pull/283) ## Deprecated - `ApplyTypeAnnotationsVisitor.add_stub_to_context` is renamed as `ApplyTypeAnnotationsVisitor.store_stub_in_context`. # 0.3.4 - 2020-03-27 ## Added - Supported CST parsing for Python 3.0, 3.1 and 3.3. [#261](https://github.com/Instagram/LibCST/pull/261) - Added `RemoveUnusedImportsCommand` for removing unused import codemod. [#266](https://github.com/Instagram/LibCST/pull/266) - Added `ApplyTypeAnnotationsVisitor.add_stub_to_context` for apply type annotations from stub modules. [#265](https://github.com/Instagram/LibCST/pull/265) ## Updated - Improved exception message of `get_metadata` when MetadataWrapper is not used. [#257](https://github.com/Instagram/LibCST/pull/257) - New steps for Pyre type check in README.rst which analyzes installed Python sources for better type checking. [#262](https://github.com/Instagram/LibCST/pull/262) ## Fixed - Parsed `except(Exception):` correctly while there is no space after except syntax. [#256](https://github.com/Instagram/LibCST/pull/256) - Fixed `RemoveImportsVisitor` to not remove imports when references still exist. [#264](https://github.com/Instagram/LibCST/pull/264) - Fixed missing type annotations. [#271](https://github.com/Instagram/LibCST/pull/271) - `AddImportsVisitor` generates deterministic order for added imports. [#274](https://github.com/Instagram/LibCST/pull/274) # 0.3.3 - 2020-03-05 ## Added - `ByteSpanPositionProvider` provides start offset and length of CSTNode as metadata. - `get_docstring` helper provides docstring from `Module`, `ClassDef` and `FunctionDef` node types. ## Updated - Optimized `ScopeProvider` performance to run faster and use less memory: - remove unnecessary `Assignment` of keyword `Arg`. - don't provide scope object for formatting information nodes. - batch set union updates in `infer_accesses` step. ## Fixed - Fixed `_assignments` mutation when calling read-only `Scope.get_qualified_names_for` and `__contains__`. # 0.3.2 - 2020-02-24 ## Added - Added `RemoveImportsVisitor` to remove an import if it's not used in a module. - Added `GatherExportsVisitor` to gather exports specified in `__all__`. - Added property helpers `evaluated_name` and `evaluated_name` in `ImportAlias`. - Added helper to get full module name: `get_absolute_module_for_import` and `get_absolute_module_for_import_or_raise`. - Added `CodemodContext.full_module_name` for full dotted module name. - Added format specifiers f-string conversion support to `ConvertFormatStringCommand`. ## Updated - Moved LibCST version to `_version.py` and can print it by `python -m libcst.tool --version`. - Improved `EnsureImportPresentCommand` with `--alias` option. - Improved `ConvertFormatStringCommand` with `--allow-strip-comments` and `--allow-await` options. # 0.3.1 - 2020-02-06 ## Added - Added helpers to get both the raw and evaluated value of a SimpleString. - Added helpers to get the quoting and prefix of SimpleString and FormattedString. - Added a helper to get the evaluated value of number types. - Added templated parsers for statement/expression/module to make constructing updated nodes in transforms easier. - FullRepoManager is now integrated into codemods, so metadata requiring full repo analysis can now be used. - Added `get_full_name_for_node_or_raise` helper to remove boilerplate of checking against `None`. ## Updated - Upgraded Pyre dependency to 0.0.41. - Added additional status to `libcst codemod` command. - `get_full_name_for_node` now supports decorators. ## Fixed - Clarified documentation around f-strings, fixed indentation. - Fixed `libcst list` crashing if a codemod does unsafe work on import. - Fixed deploy-time dependencies so pyyaml won't have to be manually installed to execute codemods. - QualifiedNameProvider no longer erroneously claims names inside attributes are built-ins. # 0.3.0 - 2020-01-16 ## Added - Added support for parsing and rendering Python 3.8 source code. - Added more documentation for codemods. - Added `get_full_name_for_expression` helper method. - Added `has_name` helper to `QualifiedNameProvider`. - Added a `--python-version` flag to `libcst.tool print` utility. ## Updated - Codemod command can now discover codemods in subdirectories of configured modules. - Updgraded Pyre dependency to 0.0.39. ## Fixed - Cleaned up some typos and formatting issues in comments and documentation. - Cleaned up a few redundant typevars. - Fixed callable typing in matchers implementation. - Fixed incorrect base class references in matcher decorator attribute visitors. - Fixed codemod test assertion failing for some whitespace edge cases. - Fixed scope analysis to track variable usage on `del` statements. ## Deprecated - Deprecated exporting `ensure_type` from `libcst` in favor of `libcst.helpers`. ## Removed - Removed `ExtSlice` and helper code in favor of `SubscriptElement`. - Removed `default_params` attribute on `Parameters`. - Removed `SyntacticPositionProvider` and `BasicPositionProvider`. - Removed `CodePosition` and `CodeRange` exports on `libcst` in favor of `libcst.metadata`. # 0.2.7 - 2020-01-07 ## Updated - Command-line interface now shows rough estimate of time remaining while executing a codemod. - Add needed import now supports import aliases. # 0.2.6 - 2020-01-01 ## Added - Added Codemod framework for running code transform over a codebase in parallel. - Codemod for code transform logic. - CodemodContext for preserving states across transforms. - CodemodCommand for CLI interface. - CodemodTest for testing codemod easily. - yaml codemod config. - Pre-build commands in codemod/commands/. - Added TypeInferenceProvider for inferred type info from Pyre. A regression test suite was included. - Added FullRepoManager for metadata inter-process cache handing. ## Fixed - Fixed usage link in README. - Fixed type annotation for Mypy compatibility. ## Updated - Upgraded Pyre to 0.0.38 # 0.2.5 - 2019-12-05 ## Added - Added `extract`, `extractall` and `replace` functions to Matchers API. ## Fixed - Fixed length restrictions for `AllOf` and `OneOf` so that they can be used with sequence expansion operators. - Fixed batchable visitors not calling attribute visit functions. - Fixed typos in docstrings. - Fixed matcher type exception not being pickleable. ## Deprecated - Deprecated parsing function parameters with defaults into `default_params` attribute. They can be found in the `params` attribute instead. # 0.2.4 - 2019-11-13 ## Fixed - Fixed broken types for sequence matchers. # 0.2.3 - 2019-11-11 ## Added - Preliminary support for 3.8 walrus operator. - CI config and fuzz tests for 3.8. - Experimental re-entrant codegen API. - Added `unsafe_skip_copy` optimization to `MetadataWrapper`. - Matchers API now includes a `findall` function. - Matchers now have a `MatchMetadataIfTrue` special matcher. ## Updated - Updated to latest Black release. - Better type documentation for generated matchers. ## Fixed - Clarified matchers documentation in several confusing areas. - Drastically sped up codegen and tests. - `QualifiedName` now supports imported attributtes. - `ExpressionContext` properly marks loop variables as `STORE`. - Various typos in documentation are fixed. ## Deprecated - Deprecated `BasicPositionProvider` and `SyntacticPositionProvider` in favor of `WhitespaceInclusivePositionProvider` and `PositionProvider`. # 0.2.2 - 2019-10-24 ## Added - Added `deep_with_changes` helper method on CSTNode. - Added metadata support to matchers. - Added ability to get the defining node from a `LocalScope` (`FunctionScope`, `ClassScope` or `ComprehensionScope`). ## Updated - Many changes to LibCST documentation including a new best practices page and updated scope tutorial. - Exported `CodePosition` and `CodeRange` from `libcst.metadata` instead of `libcst`. ## Fixed - Disallowed decorating a concrete visit or leave method with `@visit` or `@leave` decorators. - Renamed position provider classes to be more self-explanatory. - Fixed trailing newline detection when the last character in a file was from a continuation. - Fixed `deep_clone` potentially blowing the stack with large LibCST trees. ## Deprecated - Deprecated `ExtSlice` in favor of `SubscriptElement`. - Deprecated parsing `Subscript` slices directly into `Index` or `Slice` nodes. # 0.2.1 - 2019-10-14 ## Added - `Scope.assignments` and `Scope.accesses` APIs to access all references in a scope. - Scope analysis tutorial. ## Updated - Supported `` in `Scope.get_qualified_names_for` and `QualifiedName`. - Enforced identity equality for matchers and immutability of non-dataclass matchers. - Generalize codegen cleanup steps for all codegen. ## Fixed - Made `BatchableMetadataProvider` typing covariant over its typevar. - Fixed LICENSE header on generated matcher file. - Cleanup unused internal noqa and on-call specification. # 0.2.0 - 2019-10-04 ## Added - Added matchers which allow comparing LibCST trees against arbitrary patterns. - Improved tree manipulation with `deep_remove` and `deep_replace` helper methods on CSTNode. - Added new metadata providers: parent node and qualified name. ## Updated - Updated Pyre to latest release. - Updated scope metadata to provide additional helpers. - Updated preferred method of removing a node from its parent in a visitor. ## Fixed - Metadata classes and methods are now exported from "libcst.metadata" instead of several submodules. - Fixed LICENSE file to explicitly reference individual files in the repo with different licenses. - Fixed `deep_clone` to correctly clone leaf nodes. - Fixed all parse entrypoints to always return a tree with no duplicated leaf nodes. # 0.1.3 - 2019-09-18 ## Added - Added preliminary support for parsing Python 3.5 and Python 3.6 source. - Added scope analysis metadata provider. - Added mypy type support for built package. ## Fixed - Several typos in documentation are fixed. # 0.1.2 - 2019-08-29 ## Added - Added attribute visitor hooks. - Added base visit/leave methods which can be subclassed. - Hypothesis fuzz testing suite, courtesy of Zac Hatfield-Dodds. ## Fixed - Metadata documentation is much more complete. - Fixed several whitespace validation issues caught by Hypothesis. - Parser syntax errors are now used inside parser. # 0.1.1 - 2019-08-20 ## Added - Metadata interface is now exported. ## Fixed - Dependencies are now specified with minimum revisions. - Lots of documentation fixes. # 0.1 - 2019-07-23 ## Added - First public release of LibCST. - Complete, fully typed syntax tree for Python 3.6. - Full suite of tests for each defined node type. LibCST-1.2.0/CODE_OF_CONDUCT.md000066400000000000000000000067171456464173300154210ustar00rootroot00000000000000# Code of Conduct ## Our Pledge In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to make participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, sex characteristics, gender identity and expression, level of experience, education, socio-economic status, nationality, personal appearance, race, religion, or sexual identity and orientation. ## Our Standards Examples of behavior that contributes to creating a positive environment include: * Using welcoming and inclusive language * Being respectful of differing viewpoints and experiences * Gracefully accepting constructive criticism * Focusing on what is best for the community * Showing empathy towards other community members Examples of unacceptable behavior by participants include: * The use of sexualized language or imagery and unwelcome sexual attention or advances * Trolling, insulting/derogatory comments, and personal or political attacks * Public or private harassment * Publishing others' private information, such as a physical or electronic address, without explicit permission * Other conduct which could reasonably be considered inappropriate in a professional setting ## Our Responsibilities Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior. Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful. ## Scope This Code of Conduct applies within all project spaces, and it also applies when an individual is representing the project or its community in public spaces. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers. This Code of Conduct also applies outside the project spaces when there is a reasonable belief that an individual's behavior may have a negative impact on the project or its community. ## Enforcement Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at . All complaints will be reviewed and investigated and will result in a response that is deemed necessary and appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately. Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership. ## Attribution This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html [homepage]: https://www.contributor-covenant.org For answers to common questions about this code of conduct, see https://www.contributor-covenant.org/faq LibCST-1.2.0/CONTRIBUTING.md000066400000000000000000000026661456464173300150520ustar00rootroot00000000000000# Contributing to LibCST We want to make contributing to this project as easy and transparent as possible. ## Our Development Process This github repo is the source of truth and all changes need to be reviewed in pull requests. ## Pull Requests We actively welcome your pull requests. 1. Fork the repo and create your branch from `main`. 2. If you've added code that should be tested, add tests. 3. If you've changed APIs, update the documentation. 4. Ensure the test suite passes by `python -m unittest`. 5. Make sure your code lints. 6. If you haven't already, complete the Contributor License Agreement ("CLA"). ## Contributor License Agreement ("CLA") In order to accept your pull request, we need you to submit a CLA. You only need to do this once to work on any of Facebook's open source projects. Complete your CLA here: ## Issues We use GitHub issues to track public bugs. Please ensure your description is clear and has sufficient instructions to be able to reproduce the issue. Facebook has a [bounty program](https://www.facebook.com/whitehat/) for the safe disclosure of security bugs. In those cases, please go through the process outlined on that page and do not file a public issue. ## Coding Style We use flake8 and ufmt to enforce coding style. ## License By contributing to LibCST, you agree that your contributions will be licensed under the MIT LICENSE file in the root directory of this source tree. LibCST-1.2.0/LICENSE000066400000000000000000000111341456464173300136140ustar00rootroot00000000000000All contributions towards LibCST are MIT licensed. Some Python files have been derived from the standard library and are therefore PSF licensed. Modifications on these files are dual licensed (both MIT and PSF). These files are: - libcst/_parser/base_parser.py - libcst/_parser/parso/utils.py - libcst/_parser/parso/pgen2/generator.py - libcst/_parser/parso/pgen2/grammar_parser.py - libcst/_parser/parso/python/py_token.py - libcst/_parser/parso/python/tokenize.py - libcst/_parser/parso/tests/test_fstring.py - libcst/_parser/parso/tests/test_tokenize.py - libcst/_parser/parso/tests/test_utils.py - native/libcst/src/tokenizer/core/mod.rs - native/libcst/src/tokenizer/core/string_types.rs Some Python files have been taken from dataclasses and are therefore Apache licensed. Modifications on these files are licensed under Apache 2.0 license. These files are: - libcst/_add_slots.py ------------------------------------------------------------------------------- MIT License Copyright (c) Meta Platforms, Inc. and affiliates. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ------------------------------------------------------------------------------- PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2 1. This LICENSE AGREEMENT is between the Python Software Foundation ("PSF"), and the Individual or Organization ("Licensee") accessing and otherwise using this software ("Python") in source or binary form and its associated documentation. 2. Subject to the terms and conditions of this License Agreement, PSF hereby grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce, analyze, test, perform and/or display publicly, prepare derivative works, distribute, and otherwise use Python alone or in any derivative version, provided, however, that PSF's License Agreement and PSF's notice of copyright, i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014, 2015 Python Software Foundation; All Rights Reserved" are retained in Python alone or in any derivative version prepared by Licensee. 3. In the event Licensee prepares a derivative work that is based on or incorporates Python or any part thereof, and wants to make the derivative work available to others as provided herein, then Licensee hereby agrees to include in any such work a brief summary of the changes made to Python. 4. PSF is making Python available to Licensee on an "AS IS" basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT INFRINGE ANY THIRD PARTY RIGHTS. 5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON, OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF. 6. This License Agreement will automatically terminate upon a material breach of its terms and conditions. 7. Nothing in this License Agreement shall be deemed to create any relationship of agency, partnership, or joint venture between PSF and Licensee. This License Agreement does not grant permission to use PSF trademarks or trade name in a trademark sense to endorse or promote products or services of Licensee, or any third party. 8. By copying, installing or otherwise using Python, Licensee agrees to be bound by the terms and conditions of this License Agreement. ------------------------------------------------------------------------------- APACHE LICENSE, VERSION 2.0 http://www.apache.org/licenses/LICENSE-2.0 LibCST-1.2.0/MANIFEST.in000066400000000000000000000002671456464173300143520ustar00rootroot00000000000000include README.rst LICENSE CODE_OF_CONDUCT.md CONTRIBUTING.md docs/source/*.rst libcst/py.typed include native/Cargo.toml recursive-include native * recursive-exclude native/target *LibCST-1.2.0/README.rst000066400000000000000000000232271456464173300143040ustar00rootroot00000000000000.. image:: docs/source/_static/logo/horizontal.svg :width: 600 px :alt: LibCST A Concrete Syntax Tree (CST) parser and serializer library for Python |support-ukraine| |readthedocs-badge| |ci-badge| |codecov-badge| |pypi-badge| |pypi-download| |notebook-badge| .. |support-ukraine| image:: https://img.shields.io/badge/Support-Ukraine-FFD500?style=flat&labelColor=005BBB :alt: Support Ukraine - Help Provide Humanitarian Aid to Ukraine. :target: https://opensource.fb.com/support-ukraine .. |readthedocs-badge| image:: https://readthedocs.org/projects/libcst/badge/?version=latest&style=flat :target: https://libcst.readthedocs.io/en/latest/ :alt: Documentation .. |ci-badge| image:: https://github.com/Instagram/LibCST/actions/workflows/build.yml/badge.svg :target: https://github.com/Instagram/LibCST/actions/workflows/build.yml?query=branch%3Amain :alt: Github Actions .. |codecov-badge| image:: https://codecov.io/gh/Instagram/LibCST/branch/main/graph/badge.svg :target: https://codecov.io/gh/Instagram/LibCST/branch/main :alt: CodeCov .. |pypi-badge| image:: https://img.shields.io/pypi/v/libcst.svg :target: https://pypi.org/project/libcst :alt: PYPI .. |pypi-download| image:: https://pepy.tech/badge/libcst/month :target: https://pepy.tech/project/libcst/month :alt: PYPI Download .. |notebook-badge| image:: https://img.shields.io/badge/notebook-run-579ACA.svg?logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAFkAAABZCAMAAABi1XidAAAB8lBMVEX///9XmsrmZYH1olJXmsr1olJXmsrmZYH1olJXmsr1olJXmsrmZYH1olL1olJXmsr1olJXmsrmZYH1olL1olJXmsrmZYH1olJXmsr1olL1olJXmsrmZYH1olL1olJXmsrmZYH1olL1olL0nFf1olJXmsrmZYH1olJXmsq8dZb1olJXmsrmZYH1olJXmspXmspXmsr1olL1olJXmsrmZYH1olJXmsr1olL1olJXmsrmZYH1olL1olLeaIVXmsrmZYH1olL1olL1olJXmsrmZYH1olLna31Xmsr1olJXmsr1olJXmsrmZYH1olLqoVr1olJXmsr1olJXmsrmZYH1olL1olKkfaPobXvviGabgadXmsqThKuofKHmZ4Dobnr1olJXmsr1olJXmspXmsr1olJXmsrfZ4TuhWn1olL1olJXmsqBi7X1olJXmspZmslbmMhbmsdemsVfl8ZgmsNim8Jpk8F0m7R4m7F5nLB6jbh7jbiDirOEibOGnKaMhq+PnaCVg6qWg6qegKaff6WhnpKofKGtnomxeZy3noG6dZi+n3vCcpPDcpPGn3bLb4/Mb47UbIrVa4rYoGjdaIbeaIXhoWHmZYHobXvpcHjqdHXreHLroVrsfG/uhGnuh2bwj2Hxk17yl1vzmljzm1j0nlX1olL3AJXWAAAAbXRSTlMAEBAQHx8gICAuLjAwMDw9PUBAQEpQUFBXV1hgYGBkcHBwcXl8gICAgoiIkJCQlJicnJ2goKCmqK+wsLC4usDAwMjP0NDQ1NbW3Nzg4ODi5+3v8PDw8/T09PX29vb39/f5+fr7+/z8/Pz9/v7+zczCxgAABC5JREFUeAHN1ul3k0UUBvCb1CTVpmpaitAGSLSpSuKCLWpbTKNJFGlcSMAFF63iUmRccNG6gLbuxkXU66JAUef/9LSpmXnyLr3T5AO/rzl5zj137p136BISy44fKJXuGN/d19PUfYeO67Znqtf2KH33Id1psXoFdW30sPZ1sMvs2D060AHqws4FHeJojLZqnw53cmfvg+XR8mC0OEjuxrXEkX5ydeVJLVIlV0e10PXk5k7dYeHu7Cj1j+49uKg7uLU61tGLw1lq27ugQYlclHC4bgv7VQ+TAyj5Zc/UjsPvs1sd5cWryWObtvWT2EPa4rtnWW3JkpjggEpbOsPr7F7EyNewtpBIslA7p43HCsnwooXTEc3UmPmCNn5lrqTJxy6nRmcavGZVt/3Da2pD5NHvsOHJCrdc1G2r3DITpU7yic7w/7Rxnjc0kt5GC4djiv2Sz3Fb2iEZg41/ddsFDoyuYrIkmFehz0HR2thPgQqMyQYb2OtB0WxsZ3BeG3+wpRb1vzl2UYBog8FfGhttFKjtAclnZYrRo9ryG9uG/FZQU4AEg8ZE9LjGMzTmqKXPLnlWVnIlQQTvxJf8ip7VgjZjyVPrjw1te5otM7RmP7xm+sK2Gv9I8Gi++BRbEkR9EBw8zRUcKxwp73xkaLiqQb+kGduJTNHG72zcW9LoJgqQxpP3/Tj//c3yB0tqzaml05/+orHLksVO+95kX7/7qgJvnjlrfr2Ggsyx0eoy9uPzN5SPd86aXggOsEKW2Prz7du3VID3/tzs/sSRs2w7ovVHKtjrX2pd7ZMlTxAYfBAL9jiDwfLkq55Tm7ifhMlTGPyCAs7RFRhn47JnlcB9RM5T97ASuZXIcVNuUDIndpDbdsfrqsOppeXl5Y+XVKdjFCTh+zGaVuj0d9zy05PPK3QzBamxdwtTCrzyg/2Rvf2EstUjordGwa/kx9mSJLr8mLLtCW8HHGJc2R5hS219IiF6PnTusOqcMl57gm0Z8kanKMAQg0qSyuZfn7zItsbGyO9QlnxY0eCuD1XL2ys/MsrQhltE7Ug0uFOzufJFE2PxBo/YAx8XPPdDwWN0MrDRYIZF0mSMKCNHgaIVFoBbNoLJ7tEQDKxGF0kcLQimojCZopv0OkNOyWCCg9XMVAi7ARJzQdM2QUh0gmBozjc3Skg6dSBRqDGYSUOu66Zg+I2fNZs/M3/f/Grl/XnyF1Gw3VKCez0PN5IUfFLqvgUN4C0qNqYs5YhPL+aVZYDE4IpUk57oSFnJm4FyCqqOE0jhY2SMyLFoo56zyo6becOS5UVDdj7Vih0zp+tcMhwRpBeLyqtIjlJKAIZSbI8SGSF3k0pA3mR5tHuwPFoa7N7reoq2bqCsAk1HqCu5uvI1n6JuRXI+S1Mco54YmYTwcn6Aeic+kssXi8XpXC4V3t7/ADuTNKaQJdScAAAAAElFTkSuQmCC :target: https://mybinder.org/v2/gh/Instagram/LibCST/main?filepath=docs%2Fsource%2Ftutorial.ipynb :alt: Notebook .. intro-start LibCST parses Python 3.0 -> 3.12 source code as a CST tree that keeps all formatting details (comments, whitespaces, parentheses, etc). It's useful for building automated refactoring (codemod) applications and linters. .. intro-end .. why-libcst-intro-start LibCST creates a compromise between an Abstract Syntax Tree (AST) and a traditional Concrete Syntax Tree (CST). By carefully reorganizing and naming node types and fields, we've created a lossless CST that looks and feels like an AST. .. why-libcst-intro-end You can learn more about `the value that LibCST provides `__ and `our motivations for the project `__ in `our documentation `__. Try it out with `notebook examples `__. Example expression:: 1 + 2 CST representation: .. code-block:: python BinaryOperation( left=Integer( value='1', lpar=[], rpar=[], ), operator=Add( whitespace_before=SimpleWhitespace( value=' ', ), whitespace_after=SimpleWhitespace( value=' ', ), ), right=Integer( value='2', lpar=[], rpar=[], ), lpar=[], rpar=[], ) Getting Started =============== Examining a sample tree ----------------------- To examine the tree that is parsed from a particular file, do the following:: python -m libcst.tool print Alternatively, you can import LibCST into a Python REPL and use the included parser and pretty printing functions: >>> import libcst as cst >>> from libcst.tool import dump >>> print(dump(cst.parse_expression("(1 + 2)"))) BinaryOperation( left=Integer( value='1', ), operator=Add(), right=Integer( value='2', ), lpar=[ LeftParen(), ], rpar=[ RightParen(), ], ) For a more detailed usage example, `see our documentation `__. Installation ------------ LibCST requires Python 3.9+ and can be easily installed using most common Python packaging tools. We recommend installing the latest stable release from `PyPI `_ with pip: .. code-block:: shell pip install libcst For parsing, LibCST ships with a native extension, so releases are distributed as binary wheels as well as the source code. If a binary wheel is not available for your system (Linux/Windows x86/x64 and Mac x64/arm are covered), you'll need a recent `Rust toolchain `_ for installing. Further Reading --------------- - `Static Analysis at Scale: An Instagram Story. `_ - `Refactoring Python with LibCST. `_ Development ----------- You'll need a recent `Rust toolchain `_ for developing. We recommend using `hatch ` for running tests, linters, etc. Then, start by setting up and building the project: .. code-block:: shell git clone git@github.com:Instagram/LibCST.git libcst cd libcst hatch env create To run the project's test suite, you can: .. code-block:: shell hatch run test You can also run individual tests by using unittest and specifying a module like this: .. code-block:: shell hatch run python -m unittest libcst.tests.test_batched_visitor See the `unittest documentation `_ for more examples of how to run tests. We have multiple linters, including copyright checks and `slotscheck `_ to check the correctness of class ``__slots__``. To run all of the linters: .. code-block:: shell hatch run lint We use `ufmt `_ to format code. To format changes to be conformant, run the following in the root: .. code-block:: shell hatch run format Building ~~~~~~~~ In order to build LibCST, which includes a native parser module, you will need to have the Rust build tool ``cargo`` on your path. You can usually install ``cargo`` using your system package manager, but the most popular way to install cargo is using `rustup `_. To build just the native parser, do the following from the ``native`` directory: .. code-block:: shell cargo build To rebuild the ``libcst.native`` module, from the repo root: .. code-block:: shell hatch env prune && hatch env create Type Checking ~~~~~~~~~~~~~ We use `Pyre `_ for type-checking. To verify types for the library, do the following in the root: .. code-block:: shell hatch run typecheck Generating Documents ~~~~~~~~~~~~~~~~~~~~ To generate documents, do the following in the root: .. code-block:: shell hatch run docs Future ====== - Advanced full repository facts providers like fully qualified name and call graph. License ======= LibCST is `MIT licensed `_, as found in the LICENSE file. .. fb-docs-start Privacy Policy and Terms of Use =============================== - `Privacy Policy `_ - `Terms of Use `_ .. fb-docs-end Acknowledgements ================ - Guido van Rossum for creating the parser generator pgen2 (originally used in lib2to3 and forked into parso). - David Halter for parso which provides the parser and tokenizer that LibCST sits on top of. - Zac Hatfield-Dodds for hypothesis integration which continues to help us find bugs. - Zach Hammer improved type annotation for Mypy compatibility. LibCST-1.2.0/apt.txt000066400000000000000000000000131456464173300141260ustar00rootroot00000000000000rustc cargoLibCST-1.2.0/codecov.yml000066400000000000000000000000631456464173300147530ustar00rootroot00000000000000coverage: status: project: no patch: yes LibCST-1.2.0/docs/000077500000000000000000000000001456464173300135375ustar00rootroot00000000000000LibCST-1.2.0/docs/source/000077500000000000000000000000001456464173300150375ustar00rootroot00000000000000LibCST-1.2.0/docs/source/_static/000077500000000000000000000000001456464173300164655ustar00rootroot00000000000000LibCST-1.2.0/docs/source/_static/custom.css000066400000000000000000000006011456464173300205060ustar00rootroot00000000000000/** * Copyright (c) Meta Platforms, Inc. and affiliates. * * This source code is licensed under the MIT license found in the * LICENSE file in the root directory of this source tree. */ .toggle { display: block; clear: both; } .toggle:after { content: "Show Code [+]"; } .toggle.open:before { content: "Hide Code [-]"; } .toggle.open:after { content: ""; } LibCST-1.2.0/docs/source/_static/img/000077500000000000000000000000001456464173300172415ustar00rootroot00000000000000LibCST-1.2.0/docs/source/_static/img/python_scopes.png000066400000000000000000003236611456464173300226570ustar00rootroot00000000000000PNG  IHDRl pHYstt#r(tEXtSoftwarewww.inkscape.org< IDATxwul۽\ʥw D "ذ(*6PQT)bCޓKrrwl>׋׋f<V?t"""""""@{DDDDDDD4 """""" """""" """""" """""" """"""4~KP)LDDDDD€Oa X$C9""""=7ܣ """""""Q""""""Q""""""Q""""""Q""""""Q}ZI} !+gVAIaU`¾YXVdxC^NRw?;H{~k&/VmxG0of+Tm{}&(t'.7tw%Wy};H} c|nj[^0hda>LY[LiDD#  """"!s=/U Xv7,(ֹe,D"""""R-<`K>l7 @͏[z{W ㈈F'?|ucA0.-AUX(Yϴ㨈h(} ,@0ht׿%Xo_au6dg]~%<RaGFDDM| c,2}Nk?Dl J,0jp}q6" 5-(0` n3 MV t=s5CV?~ DυvѸe!GgBQNF+Nt;wF+V,p;7"@D+\EovM07~X#;ZX3 9ce,# ("4߾h{m?߉{W0#$"͸fX7m%J,yB>WMU떥W%;?Lɸ~EmNUU㖥Wm3ul=Yf|{$aWcs&W=牽/HkjʉnJն>DAĺi+p՜ 0`jp4=GԾW@V_ v\9|8A@͞vlُoXG]Vc)g90і{C~m<׏n;'vdlh+ekj҂{nݏ׏n{`Ï/ ;ЈM_M[y%! תՌosDϙ&W:疝IjG^|Ui00@DDtܻJ;&璉\9yi C]ˋF}QåhFD}9GˆypDD~Krt&\4}M_׏nΠ7YE~Q~Rhc픥mvUoN/ >NK8NTí'T?/pW6Hq&Jg!aYIϯ4X:~6 o6> Y٢ .M/VL?+GfŇgFXN9%|jnEIڔuF+'-@ø݇qh7kqs L8l/[&:^؍V⢯0ub|rezg3 83sg] ޜrRX|aٵbyw#Q图q+]5-:wGDH6 v0"/hd+/}5'.vr`tlFx]3`ٟ-Kuߓ(ryɅ_JݓNފ}8?^},~wX=yqs˿פǛU4;XI}5'ր/_Yy}ҌsnL;R__rsAFfQE!8 (B:G2Z'cy%$ ޕ LIhe8| &-±S' _ (2=2tzNO B|_80`97>@,aW!wcVQYzz΅X~\9q>.6a;JzJ`Ј%=w~+'-H 5`} &Jy- ' 0i ͥĄ$K6!?v7FΈRLKkt/~ ?޹eKav=x-L/q ϻzht^NyO/̽0a{ω G"ߔw\/Un/\N\6 8"T'V"- &0/GFDD:=Z(=܈ezsK^v =[[L<o OM~6~? ]VZ)dIqxx~#MᡭIH5۫o9ToXp ?vB .eT(=/&QLÝޢ=U76ܛ0ί!a/+2۽Oy zqQ׵MD㦧΀'NOeE;_{_NW߄Ч\ջPْpc /UmݦU6Έ,Ru_+u`6I݅D7LQ6ܛfu>Zbo"">aĊ};~Ʌ%sb'~CIo8.^|?a>o|1p4>t]+OsBst\SѮO "|3% Z5ЕZ!+KG: us!Zu>NxmC zg!Zw>Qc,mUADt Dw#iۤɊ_w-o: »$(hE°@;/3ϋ'{n$+N)H4r`\\Êb}}]L54J  Ch!fB;*r 9]T=}=?yx[kP UvQ1ӱ鼲T G#-7|KR+jfʒ^5lVtܿK [Z ASi:{p*a-loQř]p \WDUIs+V8zcP%)QIL]Q7 \Fڍfcf0G:IJ+a'z(dw(s>_p0m%񔨂v1z}^ѧ \F}{<!Zn߂[g#[E]!߽]mi=ax6ZJl41hDz|Etm0*AwB4 e5ݭ D}ԟ煓 (*h Wcz#'ѧY$r {nc}2xy(sqFII^}v}~9ng X":0?ο.ojљCDD'dǶ%Q*[{ݧہ,gU>؉%yќx{ v8^MqD=P S|Bn ML#hH>wNP؝a9Y{3TcU??^qcW2p?{·&˗nc:B7G@*L ڤ?tb.b Tp1yC!S7 AN|"5CŏlEѯC?68=y^d;YAB; ދEZh}svd%؈_R B#Dկz75;[P&?P\8m?ّ4{?'jPz*;oKsq`_;~SV| m))~E,Ӛd@DDX{d=p;a7vgN-!^NVȐ! p%$TDgMN} E8^U^w*SKIv5@)am_&l;A ےuwј>B*gIÐg[|* IZ0&B0І<}>a*hs$CSb4dGJ'[Ҝr_RͲC[MJP"xn@Gv=ǃP4w_UOE!n"SruP2dW#.v юP ·"ü|\6 lz@uq#l~ocHfC蠟6NCW0I D۽(ypuV5/V9y0./Tb 4؈d?DɃ[ ˱ɿa^>Nn bJ(hJ';Zݖu,xȁTJ` r{祓|(@0Xoa|^"50dI8#_VMue$CADȞ0µw¿%m2ȝ!D[%) 90yIy qK&A?hX͉Nx_>RC];PYթ!"%°"#YAL+!ً1Yۧ]`L\:͞vU@Et&A1 [/w*fO'i1 p Zf<h3$-:s|I5$l/כd.mFKsT#^#aE}c'W3duM>?7SCzTkM0w.NX,tNnm~t>u f 5e0k+33a?ɦdˇa^>ro*G`O+:B`_f0w`D`ZYM B/h[j7 u`ՄU ̴t8/m 6Oâ,E3rt(;Ե-tԅPi?G tǗ$ PTSd~nWOA56!h7 +`pMͷm =Փ!$>Q-=1.-Ӓժ׌E 7bvgaa:BUNthҔcJ*gcUTVDB]+AB75ƣ-]W Gh:QBUe^59iaAǦ*Nu4xy,>ӰA:8-hh"闄 De4:@m>'ϰn \1<(,M3i ȬsqɛaKv./ӔL55ɪ=?I7֐J$ ʮ LͰeޱ iLTl  ϥÙ mnSE<]2HWvW7Ś0OJ#t8[U~"kB߰!=P5e~ljb\4cM\oiFVχ2too*kF@?'}uS{!VT3sժm_s<Ѣ$_d'gfpe|ٟh{ dL7IޏX72oržs=~O(wic[meC6{( [ JWMQH\[ S X/ qv|\]д3,/V͙5?-{f\U ON'pR2,,'Yp?wfg~|D8qJ Vs ):oml?'1fYlmV,ȹ4.%0./K,"\< Tku]YAnO?)֮ Ru# "ok-Rm DBI{Fdwժxj3ؤwr,0k,͠FЊ|~ @Qe3F΄+w|n杨>6a[&cHg5q\='^Vz#Yg ͭ`#uݰҁ& )wԕ}ΔQ(GfnC6M~ߦ3[bv}+9[# +cCNDۻ#9Zȯ'ѠQ- VRPuh'- ƭ)lWg'/Um[o~Bo coZh䈯q wBw$ʉ:`}iwpN}=kvuKφAф6%!p9SĂK~~r\%!&)J|}8z"ɞj9(4g^(ˊ+CWfͺ׶p>.MM˅if٠n`*\zxiȮ} qEFR4UY/Ҹ>O{:N{mv状M($Zҏ5 &Z 90,~nt3 BK" zѪoʐD[Ȳ6n3$bwˮP qIxp$l΄m$)sKH[uuF$eZ̿.@qf - ;wru//Wiv痩Lސ{+U^ڢ _ Ls|~ȏ29Zgi^|/i.qvv %{n^|n?"O,4c˕z}^ ٧!>>U )YoW/*jn% ЕZ |*~ch6"|Ǘz<.]M mʃBh=`E!vWky / r%\"r EfCb-z03FVp'OkMA7Ң>oφ( z !SUq! Zow!>ODD#Vހ&w;vW$`= Vll;gv՜‘w' Iߺlxd.L? #\GkFQ秽gŜu(O{n wԩ^|y7p4RVsoI(ѭ %?6>2{em ~x7JдTsޗRLZ~yq Nc7$lj3dE N81]gmل+ϥǚɋq̵m;Pr,ߥ&@ $UkGTw2O h!Xu::<̗LܖY|ah'ZPsklUM)aWjނ{A;%yq5ѪCOB@2U^Kqhfؿ0+'ɈZkQ@$gJ}7ea/@׉ιs!rh4Ow'Oufl=e檶NhH(at٘_pIí'q˓X:~ꮻFpŬpŬ@'NBIeيy!axtiGT '}@,aSBk Ku[2-!}~}~xĖ4xϱdm0w?KRAs R 2\r x؁pU,C&!p&+p eV8 >k_=[v{ zI"p4{~{ٷa1ӊk6Vo9{1%LXG;+5iǖX1qްa4{qK//zҥjbb+UR[lo5)~#pxny 3F@{+_ph>.|qpN颌7"³?WW~ &Ľ>M'dtvoJ|iP^{jn} [jet2dtp]Ң:=XqrYC;xR9NBκ hf+'=m}9Z׌yXt~_luqGNB'TwԫmGZɽh_+BO?}!=R G ?|(KP^8mc`ћ` Ig#v5¶0ny.-磼p2F+r 9|p8zk`{Q>1 &2؍V x~8n8vj6 ʰrH΄v6l<wzJ|ٟaf4 Ϙ oȏ6[csޤ7rwHȽq⍨ fMژ (GJUcֵ ɪ@cpwRS4UdnvꮵA 炃rYUqCW (pP%$Cgx,TWe^;^nN^VTŏ8A+B7o ǵƜ?"$.+ Ƕ^K@}؜΀o~AoFHY75ƌñ^u5~ɼs"@ktʱeEʄ~yG cRu_2hw.qIbx +Y'ٚL 6W:^X\dM {}pܑRbp=F mg4|-x6T]&tPpLPO8oެG7.ꂋ-ۚj4\W:g%OLWmQ?l 犴-uq )LM h| ݝ0G30F[J^6D ((:ۓ烈vP405YϝQ)};XDV>q9Rאy~l*|owٕ=a4}?7uv{;ش B`:gLrS#\W e"CB?G`ZQ hR[TIĻ7C`j `Jg U94 2kjh\9%qيVe5rCw7wMeGm?M YvHVdoFB 2eZU"t\OT!xM3+O@BG]Qϋ5Ӥk'[!5]3-MPosz#0_Zv|ɇw/LQ#C`@Ko+EDdj< 7-дwcW" wmCm .8t:A_UGa`Tmx_SȬזA;ɒ&{ڻ}^F@DDDD#ې$?\om"jAoaBSl`"|ԅ>O^7Wkڊ)(~"4-@$cx ۃA|m  "-~77"RNQνa:4iyGx`?:xvCC ;tъy0|-Pʹh:{*+p@3%Ͱ΂/j&"MS iN(h{vmhڹ9ͱC7@4hu!8БA_X5pQg s6iA8>磕ЕB??h "m+WsDU |O |HF "#C R J|+l̂7aZUq)Eu 4lBoQf^BKrfU% !7yUXД`t9N>ܬזAj|E `j 17{.4(*߻k~Zӧuɘ׎UJm]d*'BU^wՖZPxϲY5ƕ%0(r Q=P2P|x_*ɱ}/" 鳏 K Q%IBdt`[p  + Zљa`?#^ו d H={c>_hv /֌oZ3PбjVa/̎-GHB;ق.jg!5}b:r A}eGѯVg;T먓t",L2ȝߏ~\4eXx*)ZtMpVr.r*XXUדGU7{aq0I<͸Qj|oj9O"9:Nɰ~r:DC/|o1:~OV>Kkˠh! \9";=9Z89Ov`qEq-ݜt) ~hmmG @ fqQ R^(|=.%U'0@,uq67!\li Ͱôvˊ cU#-w=Nȁ~^:/ۚt"t; BBtӬ0,+aQaeGv"T " Ѥnj. aZ5w,c.w߹΃o@Dpw:Z!tNz Lgh.ujC{2g\Q )ϐr/tuٝ@:݆A~0`A1G+tm0+X lw\gfzƇ2 f;t!쉍Q4k MɅnNSP < AQ)W )=cTF67!p/%} E0Q+`XX`E[:B!a\V?2MPUwwg "%CpoY)e .Q=vߡwi4#ǰF-F;R`r +^0,)D=ˇyTݢ>4|ͮ>zި H2z40Dk}y(ݪ4 "`F>FL#4j-e)%dw NUToT2=BDDDDg$F %EGTk hR4Btw'VUE0,)`chtܻ竻i;U USaHFXebWEhxp>VٽA#Y7 """"gp*љÿ WkC[jn )VV%E-vS ӳ^7M]B;s췎1f'w:L>1mDh`1@4n轡 hE}kFLB4nWs'- I<8IhUv~>to """3PI߷ 5c{(ҖZyma%8ۇζMM;lJDDDD, C`BGU w)2B?'ƵcaZ^ AM@ձ Q?݉/a[r ȿs14Ea %aC7+~ zh~pDB  \IDDDDDD4 0@DDDDDD4 p  R"`{D4Z!DDDDDI.`"buBuc\@DDDDDD4 0@DDDDDD4 0@DDDDDD4 0@DDDDDD4 0@DDDDDD4 0@DDDDDD4 0@DDDDDD4 0@DDDDDD4 0@DDDDDD4 0@DDDDDD4 0@DDDDDD4 0@DDDDDD4 0@DDDDDD4 0@DDDDDD4 0@DDDDDD4 0@DDDDDD4 0@DDDDDD4 0@DDDDDD4 0@DDDDDD4 0@DDDDDD4 0@DDD11 IDATDDD4 0@DDDDDD4 0@DDDDDD4 0@DDDDDD4 0@DDDDDD4 0@DDDDDD4 0@DDDDDD4 0@DDDDDD4 0@DDDDDD4 0@DDDDDD4 0@DDDDDD4 0@DDDDDD4 h{x=MN~^~FOWm ʬ^+"pm@?˞ѹ胃ADr6McyaF|Ԫܱ~¹ue(MDDDDDD\@DDDDDD4 0@DDDDDD4 0@DDDDDD4 0@DDDDDD4  ubB>ɪr;~~)s g\""""""`V?trriU"""""""^bubu(φx,DDDDDDD4  """""" """""" """""" """""" """""" """""" """""" """""" """"""4CPF>obx8 /X{v' """"""=%DDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDD@mS(2~a BDDDD4m8±p(kNxsh ů/""""""Q""""""Q""""""Q5hd7[sEq=:,YH9mm xnYj"҇aO_Bkz9]KFFFFFF$""""AXD* $B/ hɨjxGtXc WOID8Po|JrD#@_FU[Z \ f:I@(33ʶ(ՎЬ05_BI(¬(hQA/~Ab O?U+j~69%Z،"aNjgGۢW[' X0F1VVoHAWFEKIs(4 E82Z2D M: g,J~~=w$V=¯x,VJOgP@]?'KkF.N?{0.,A#/Qd[崕fKlpD" g1]5IkwXN MDQK4ƘĴz~jrM1j^lDR Hme .¢(y<ٙ3gf 4l/0\=*[\/D) _jŶ#~ֹԀ^wDm/0ͽPqƊs/fe*3~!}3$D[Sd.wae8E~}o\AB!B."S#Ġ\=PI؍y^RVLQblc%'ARv62xΗ_'!@IR`hCmF:o,%'06R|;f8ʮVl>e_sHJ^ |yP˙S$m>~{Z;Z{iJn;Z=EFλb^vfvjQl?ހkr8q̄ҟkڦRu<:$˸9fcZwQzLi ȗRl+0hzm@ӤN^>~-M=U5X 60*5T>ʸm,3j*;=/@&W@"S@ V EoݸBӬᆱKMs{w z=GBP:M3^>Py/0 IH: iC:XyK t X-m.jIEEq>fzJHrG!42wczgrl CS=AiBvvZM3d |CLPǮG!ro:i- o)Sc=;W7bIn\} >@-)2ejD+s)~=gNk;[a8יWYü!vS i~cV Pv x.Y:vG6-٪:_p/pyCBH`+jqV°ߕ8La}[#Cg5 Cz$K9>X:w֊Ghq"#`l1rܷ٥zmoǶߺ|iq6>f7dz;۶ɠ[gz,]Xֵ|C1r\(}7.q8z{w2g5N΀&TSfcֵ‘RVP^Xv Zԍ-s[[ض[W4T|^h$ 34<>-cmһ3\X[_?wo<.1bl\w u !R#ծW}f(+!Zw$ᔷZ٦'mْCu$kv|+8UǾR9u >?זDg6 gzj cDyFsfNn !+t_;6YWvi͵>U&L(G6mױXs7DՈp|V bV>ݯtY #@w kij, PWU1}mXp25%8y:Z|y["sL1O^׊?K_\}#~%[,Gںo;|#{}ŷx?Q4VvlBt<}!8h6tZQl ,z3 X:wJjȥy-31x{0v{ֺ:3JG4[2V`p련n/Aҿu;Fw헺mh+*H/6e!. ٘uJ,(6brd|0,Le1.}/z3d󔁓qZ1ҹtBi}^Ŧ>›O.V㙡)i~g3]/+$E:l[-l[-^zkҶ%"7g)q)nHn+ֳk?>WIxƌNzKw0ahhߩX?;)wlӑ#8z 7,{7,{ڶnǶ)W.rxn0~fLmEb+\:?8c_} gi v} 6thQ7@PGP]VT~|r,[C)={內m۾hmܵ7ܿa N3;s Omԉ.o+]+]j޵gضNë?us*'VGP`›2ad}fXPp"woƮM?l:IXOPUNSGfyKr\5G}",)s<>ł:9v'p`:{~}V u]58z`ǯ+Y9;29oL~⧏VC՛+O"˅wW!Δ6ynXiP6Z\o° Vvhw/Ej Lr91~ +@^*fɶ=EFl/0rRÄh1&lj_f¾RLP;Sb왕 O8'޽:s/6Yp~{`< aӦ5~DGok?4CS]:v?bq]y.oZL{-_c`+Pkۗ Y~ |̭YW޴ []c4oR贬cai.]wW=u^qcǒWB 2qCF`[+p!5%GWt{>U>}6?;+m#f/^87s%Xm־Dܿ=|:e.Rc=ؾ;; sa'>{ Sr<ġ =hx0sP$9W}-u: dbP+!6ku#ɥ-֨seaj { +Ф"]S@3&Iqbsao r\(p!H "1X;FP`'8'qν^YHwѨᄋϙ#FSԉXʕ;w/]{VnH|̘:.3m _<[pS:m ¿_#'ega]'Lwmj`W]+v L|_rǐQO_# $'zY2L=?=6ۋ+ӮFBQٍă(? K-]qB X:RaA?Nف?Wb>_s1~<[SI'&17xWFeڿ7 lj:.rX@H_} kxvѩc`.eY;Q0d$ȕ=Jl=[mhmQC0#". l$NĒV=B$bٳaտb6}yD\yHkmy4EdLv7p$!yV}'D"fhmQ̱3ҹ3ފ-?~ ?AӹBiQfz5C\*G &}/}Ʈ'8P+UXWPيA<)ƳK+vGidiAV)" bpN7pтmm#4[Gg#2r&Vp4#F+>-ow=0dT O@4b+`Ѭ[ Lo D젡(8 m[0enԼ۶[2hR<\s,2x[m Z&^VY^qmaM 8'2_ Z}cg.PhʍIY:Zu#]ib.j^Eq"Y۝ 0}n׉B.%g$>Z|>j$nzCnt?n]4o'l< ?]!!B(wxp?ѻsvˬ|qչ!*9w+g24̂lV\~UokYUBK<BJ3'pl،b2߻㈃eIrm!$?~W ^7~)8!/5Z\Q3uh1vq|^`DN9O`9+ߗ#";~f+.0ok{KyjaqpO\u$Bf^h5!϶Bgb|t+]߅D>Ʈ.Wz˗;*3YiYcVP1 mo23v>0#e'ĚF`6QxXʚvM m>C{vωgk*M.EU_[GTWwNr)њ<OOSt=?[y~? WI0̅H9þ5V+s Ģ΃ ScŜ,CB'_Bg̾j 7@1ȥAgyCL1r۫%D0Ůqި".{nM!TySs9gRZ\rE]xGoIr>7[p:}Vw8[c Bn.x!P>*7s;k;ahsug A|Hqc]};mY6b˺<'7?v豲*DB|2wF$bc۷~;H?5bOcض& _=vB}u*B#cpc_(&>\hǔ޾ z[Bȥ#:LV,3#"~)N& vY >>xaOo IU%w4\[k/+`IncFS]FV"V#ae`@Ӳ<=y IDATbOQ`jwA:+|e/[RliĄ@!FDuZ+旨-(6zB^Ac;H N8Qث$<6W9z!;!.</Rb'Iqs ?N8ϵDmAf5&[f)lKG0. {_ΉWecu=_xbط־sK&Yc=`W6n:295%(ƀ)n_kdm#b U].kE/Q]V*+D.;c6)GP`i0x8" DXTkTQtUe433eD,u/AnaӶRҧ4X1=Kf UuV7[!%k<ĄϝwZ$nJ[TqֈFT4[ #n<6|{q)y_- (j0`z1&NJ9=z3O2U1<9IQ")2Tm DGbW !O(sqIXM yz0)FI ͨ2xfpqfh14T[yccLhZ-ad@S];h[*/V]–FQB)!.@I!Xw\4BÛT^`γF5[ 9`X([lůGIo{VR#D:ol8a@N j/)Q>$L1YS{ 1%yx?w1Ui KeH?u| ef*Z[9@?O^xԶoǘ %Ake}ڢFH߲z/Q IOBt?BQY ФR ?wZrIh >+;lfW!:+M*ְl>_찎J< mk aDxŠh9:q0|~J33X-V,tz#֮a'p)Ж[\u+V๙*֔IqӰ& ŽV=jxcw+`=w%X8-FZYq1f8o`fzF{XVd4|9RQ L]('4Pvg-Qw=䒂<mG& <&c/`ڂ%yfֶ\~bŎȔz-h94Ta0kn Ixo󏠺nł3(/:"1iW"ulǙIEowRfsܛ tK!S dmO^%br!d03:GVvsVL(1-x~^.>ks66Qo3ӔsWl:i{[9 :b+n[ڻ31xn*ğ-xg73xuwmވ=liu,٧kmv=*. >|q;}# J /47hkݿ )g}̨g=SUxXQ.۾?^\٭ <ض;MFݿ G/Zv}:G_>xM2{?ғ,VGtV`L!G`K<֤gp6 Ƶr7f_W\wgY-oCcԎ2i뾱غ Jp.FtE HHS&5%PPY܎nCѩ8q(Ǐk;= =0f-^}#u"#s %𳚹Ћ$" ME_m#5 p&= 3n=bPdw{3!GAAB'GƇ 4魨l@a7gު]ϩuVNˠт#&Ժa{zok> 10P9J1z3FfT%&tWm ")X>Ҷz%j \Z3z35t.[aB$C&AkEƊ=EFT4)"$+Kƒ̠^Xٕ&#z 1Ouӝ8ZvTv>y@hmQC0 AF}um[Zvy Up]aC۶~i oGF LFd۾5 O$?ŧqTK_"04S:;htz(ܘbv0@f"AB9j ZU !73hzǫ8^ng V+6; ܯڂ8feХƤͭ_Sf&dtUF e&V'- gʾ(p}], vl&b?te7/(woq\ [&عSХzF ;yS}#F]1Wz:9="e IO6raWz|g*$$BB(۾:dmikBC { _qE&9};}EB#!}7 Xy~ \MK=$;R'.,Z(n482n x)[.B|!/N׵H6־uC꜕ EޑLv+a>ad"vf]s;=v Wx`5ckKO wNgf5NJﱩ#zVkelɭpIeJBX,49E:) S^φBqƌ``̴,)Ħ>;_4xeΨڳ< /5% \CbKLٶK`k^mE)'T\b=',:cG%{㉗=v w*:c])hK̇X _ @`h?ֳOٙv@HG~N!RSzo&L|G]2`%X<;kAZI9 =Q؉2n}V9Yi]wadm!mNܬ){ߎY2 I)=Z1s(:kCFX־uE i/(<]SYեEVB.9UfK](/7`->Zo|q7샟ouRNM' ]H+B:G-}~5(:u~XoG`X?2~X[9l2 %x@vm1enD|9`0JOZ,ػ7־fy?( vu_euzgvK!#4?kzFyi//b/"K{Z!0cwNNңl3.-G{(Ї$R<܇xŶW? p v>|LD' q~agymE OA\pf, _m8z`7NriF}Mma5u̻>oͶav6u|]k  һ]nem\N۟XPvw+BK>m=4b7bF!x~%b6\H?^m7Gt*B. 1~xʣ7MF|jio+1/vKxFO_uyP }.kߠc:nMakx+K mЭTl^k_wȀ)=V&>y$2-PT^tEg內QYR`ۖHrK7b{B"W %2`#V+N՚SeFa%#}S5h?`hq/]A9q ׯiVݕp2sS{`]}>l< ",1־j-+J n4@k-2'sApD]_wWދ5o@MyI`kKV%A @.H3X֭yۥs7ltV B髬 p֌xqތ{6xOO&*?*Z+(GEKe./!N7j\K~KOjkmQ_ͼ UAfFےX&˗xћѬfgK_pT%uVg{ }Z\:z'gș7"a>%=VHϚ{X3_~9m|W|ՑB!k@MÊ5%8{m/0 /X\P6vktޜFeYdζ/ay}eX,fdmu]T1Sq`矶}i1|¬nUUe(;{ʶ]rZhRtV9_$6}R@TC*WEj?4[&ͽso閩 nBںoPSխrGp$= RqCR04>A T&ACSc-JϞ±QR)gG{xoT_S“6칢!q3LE'! $ń+aɫ8sο @sc=J cwd1qb B!D 2pPm{̴څ['n C5qGCmE =d۷o 퇅?;:a~?{cr킦Y OgW~^RĶ82RySyc%>>>s~<so[-A$`-7u ,8ryoZԭzerQvח;/bB'gPQRq$}+o.ՓB!tMB=Co#@[|{^FQX-fr{T{W ._W_*q̅nF'a'0}^ `8%gmO8a0yEJ/n.Ryb%؄=[~é$$¤b9|Hg4!4XjtpDӥ7!B!n!U fY}ʣ)B!BH@B!B!!B!PB!B(@!B! B!B B!B!}!B!>B!BH@B!B!!B!PB!B(@!B! B!B B!B!}!B!>B!BH@B!B!!B!PB!B(@!B! B!B B!B!}!B!>B!BH@B!B!!B!PB!B(@!B! B!BaOn:Xӓ B!װ6a^jBz;/v5z6?P햲!B!һ"@S!B!>B!BH@B!B!!B!h@gDriB!BzdIg .J++RQ&B!T^jBC4B!B(@!B! B!B B!B!}!B!>#B!2w]TH}Ŷmc ?N~kDh!B!PB!B(@!B! B!B B!B!}!B!>$B!x /l\:BH)M=R@",@ F,K!C'IkY21$"ZϬkG_1VGʶVhSkbf"rZ0} `2-*2됿y..LW@$@rpZ"c9)ȩA\Wg@FnDٞZ0/CĤ r7QiU(ϨsCuPO% 9zmBr n;17 1B2 S-8gά/Y癠LoFitUضU 5"ٲME%@\B!r8/LjBGh,áwO#C<W 2\q"ac66wgrQ}sbfbԓvx|cUb8s u Vl}&`U?9E"HRaz5#o0bwne8I( dΑH=33C. QwZ6CH=hO'ƀ1pq;aٱsB!oݔřueD qzo *$C1b<#t?F xE)~? 0(dp]+E#=d^0*QH(rܱO'voO"Q"kNr2LOW.s),)9}[xbu wZJ{y|˯;ք*=R|( c/1iPuuX{CS4+T 3@ Us^ (iٱsB1D5,QC`G$Fڦ*# Kx>ϩiؒR&\gߌ)!Bsb/Ư6bϴ = aDNm(Io(Y}L@|CM'FԴ mx]$AV//b95]bpRV"# GLTnĞ$1R!~Qk8u!ZpulO-2{ F! 7G## CϹfqZs kp_[5 y2ܘ)'*qV{.< |\ƬH"D?!}Ix'7YC>6cV "pH=%۫pS2\5 8BtJXe|4\ZK -H.I _gnroOomڧ?` !B\7(h-8ДC1q #8ǻ* v+`ÇQ\5v.sm碵ݰx<ҷrpڈQw 11s;#k NhKҷge.K" sF.o(Ǯ4_wA;_ AO%BNǿ)<a-h)c_%Bmܲ:r}(:utkZ0@8JgG&~ѶhȇW#|jqS^gӹmyH@%֞(e5 i&8M!"&"xHeåַopJKޅg-X(j[p+0 gk?ő/@ !B y ;{yzKKmBZ% t֠Ʉu v\XM&s)aFȀ 9NoF;iZ^>;FYϰr_dzk q&ah4"Kw֠t?sNXҊ%;د1gWw e{joBQ'r>js(Ke%( xe}3C80y?1s * Kʶ?'CLxKex<>ssB!' PT׵Dj=ӡa1ZKPåD-Uf`5߀ nϻ.UXcX8gԡ2U>qQUk8XEzTl@Sq[+5&wh*j:\(pFX:rӪYIs)PG{E[*9+&y!#}MEOYu` + \&(@.:}2k/>um~xÆ{yf*sCgl69_:ܟ k/s]deMU}\jLY(-Y \P$EE((GYMjk`07hRxA" 24 9~h`qAU][񡒫x IB]N# lYd:4kax  ?/ _Cx<sϪ.7QUxXe{ޠт\=suuhimX,RDdp$0(z DBFhh?-iH!\gC?G0~OU0'V ;]Jf5JJM0ؑ7ВR\*g<9>+>\KFR*jt@m}->>81Xgxg%msg7N\mD 2uo°?X2 Sz5H|cUXHJ2_RTȅk~3Td8{&\L|ذ{zyoǂ yByȴ$ڳ73(),^ a?SȔ-: !CS h{@D.|[okx v{#\3ʫ1bq|^-ʦe9Njaj NpH{}uXp0>Ȍړ)W}yY39S73.F󯪲,<_p$Zw6qFqs0D RdF_U"Ja%^b!hflts;J%ʪP!Ќ R{xp;Z5ըm`x˅psr_E_ Ƒ_?89NR2@`EОq^RE/XZ#;NdU"$RR]J8vqtŠރL~z ֽFsI\aϷF2*R!BH$2ѷvԨxhL9~F<4ê:Y?VNpa s(B̛a8>/~uY 4=ewqPgr]&.+.GdmJTghg %,6  ÑqM'Mt42TV[ 5Wq4T_ofܰaj;StL.{3&l6xs'Ew}Qy!8='w#$ð>p׌كvx+dqm}-}'/3&~aX4 8r .&^B~Wfqaa,Q#Jqwq`ξ8e%>m%5W0&Ci+J}qq$f$tP !hnA5Yl=OFJKWG#^.|^ϔ+%\kb"D-ÿk=-P_aNyKwWV"D-fyZ)߮ kAWz!|S82%k^W*8s_K/GȧXiGekAc1vX`î Z2y>ŖEld,^?Ii)3jPw((mgTh4ֽA#s35ٱJ!Ƚ-uCZZ39Eag^yK3+!KE;ISlmCmgdsJd+q )pcsO? ظY" Tnmٯ\@w(_gau_ ^b>h,V=0帺& ν4Jܛe  `$`ŜKLx+Q$\Xf\wǝCz)J ^@У'xhSǜ AN(0l"ǜ~h*M:3t@8:'-qL0y,NvNF'5 bp-)b+m"9+ zjMdoɿ:/WoZeU2VDDcoܜږ=$g2Mi߅F 9!{3)J1?30yJX +%^}G>Dw[O^C3Ej5W}RC  xY2CJt/cL0kFwġL=4Շq\#Z jZ~B&N-O̮M{OX,\ ݽXOlFݏB!D=OG|nOpG$SQXvKYYjLLsG/I&ind)ѭXp4+JqQqX tsۀ:X͇u˻ .`ă֓#rpJ42E F~Q!@, 7s|*Wbԕ1Kٸ[bWQZ~i!j&-O-%s6ZV:z[cԗQPpM?Jo0f.Q6W"6"a纒n86zkk_z ?HQm|f-.I2-ErVrm QH#JIfFB!gJ56nxd=ɘY9Yk!Ƥ(lnLc~dYm Ba$Vi4H2Jk@ clr\vQ_F!fi(~̕f\rŤidοd:f5Aocj8ϱ# 79ݭ]w<j+ z?`n|-I?ѻ+V[ndI#$ːtdA~I>5 kpJWʬ0H_yOU+ 9y󧿄GF<LO[} ETh4p83/W+O1o BH#ɘ͚'I 0XʓHݕ{cܗkw#N8PHr`Qp>f f6出)#]`"gیYRp,X9[h.5"aK&mzMFސKf\RO;jM;ƍa!|gA.B 6kVfj-M)[b Q_! ]wQ=?DRvW#!] DCѨhzoٚZDe-QȔr9~!nJڞm+n %_c>.Q_--Yl\9~7}A 5ԣJԼ4"++:F~i>vَWOi,o o7 7ϫݍo 1;(;^˷95I;9X;Vcîu> "0[-Sh*VV073gJFCaOj}BݒD؈"lNOh }61o{AcRʕ9^25#̩g/!1c lqKU8 X 1=(S*=Q3P[ݓ3Ui" |+ ̕qss&2 (e*қՈY +dJdGww F2 IDAT5=A4*Km&Rw5hr~{{*o 1`i(]w?Ѝh-HkƞvkpDTH4zܝadzowtGWldZ@&GEg qidnjk oߏPDU8}N_=o؈wb4~c8xlTߧB°cN޶ .j.GP 09nF?Sb!7oy t%̡w(Y;Lq}}:#$mJWצ!| a#+%%Dɵ*[ :9\X{W][xU:^r|?ws;`79 iM#%֢zϔ1д‡ɸm&'xlaios[3H24TI ̩EYgJ5VBMa=μ[kOpK{`WJ!z5ΖihMɵ*X'xpk=,ahY RTP| Nj Q_O}#:5V.t0A*)ąu(TF]U8{ 8|3H#\9r΀AK%׫pp.pr (PWڀT /#l nV[N 5aJИGFً1 ,,Iߗ>/d \JɨkbF^"6iߺf\s,|e̝2\97"9+ "ݥhʫ˱l[xo6fz_[K~HMUNxxzcoSOJe榵$<3mp֬J!*M4V;5嵦Aw ǡBkn-~4CՖ4k:$.}+^ϔWj ഗkU4RD*\5}]Q jԉ!W;%#O W՞dܜ0w3;r)ʭxmh4-`nYbi6z $)97xʭT*jWwIf gS`B,2)rӑ\NCRX?~MI#&B!tg&`maKkruEqU!9pwu 7!!Oܳ `oqU"%aD _w_L2@S퇷⏓U eR:{_`4pp96퇶 >1=ړ}Op8RDlmb+((͇[k !B!XBX,C3]KuQO棱:3za58Ҧ~]\30㡙&Wf;~ Ll#X,DFc-pud&IIq!B!ܿnי4e{&l 8W` W UN#L.^Ҷ#pQFÎbcia1s#B!n4t2 ]F^Ϩ>bN}ceNsr!WyS" 93YC?\W~:;u8x\Na6[ q5G[cO+іPSw}ܳV$Io~g>'>rM!B龺U{\^T*qQ~IK KձP,.}6vʠ$f$`4J "pIƹaeXr>sF=}]xLщ鹺]ϳ%k`K 9 {Og{g9\Pba8wLsEmŶC[ash爚4H4u;Jd\Jqہ_ϊ 9 q gdЗqpf 3,!UŹk>_mz  Ək=\N8Xm 5Bdqt bʫ˱eflٳ|GxrJi>n>X2g@!Bȃ"A7 @_(؉e, -X,;y^l6J?+(.׸ֆ%sB(뽇z13[kX??yv@(Ejv ZOϳUV~zacopydry=3iNhӘlU D`ݛako!B!Dnl_8"$|G'DW`?4j~g|ǭn-Y0yØKp;`Ǒ?o =ǿNk *swG6#ap*~ml,1~DB8mo*6V6xo3y.؍kp/Wo7| !~&ϊEя#8sjJk|}1kS1 B!bJ=N ;ݖy4@Ͷ,/8oàŚ{S~i>n@aY!BH'zF"<06G T &[[ p1ʋ mn>ưF27;[{xz]T@(B\W39xVqFx@"=,*/DJv*3Q-ReYppDF (tBLBB"RX@@DD>K+K!7'p[v\HMݒ֠新gzF_F~CTQVсk} BzH:BԘs;'̗EQ-W\)عR?1}|s[ZXl6F ШaYf#'>FOO/xxa\8הԫ""0BkESp;"B!{U@B!B!hGB!B!!B!PB!B(@!B! B!Bz B!B!=@!BHw!logIn P!8϶3h!B!&bj W!4xFB!B!=!B!B!BH@B!B!!B!PB!B(@!B! B!Bz B!B!=!B! ޅwQ/*l^'η ~xtiR^]Qg̓,-~2)*U`eiWX[Z˽!b: Xm,B@Jv .%]DbzA$jrFcpsj}+8r0NƟDZNϝ0$BBt/B! BPes3 V㿪}}1u4PJDgm)lvw k}f׌s{O퇷M}:p)\JҊՀgŃV-Yʀq7ooX.62{ףQ?O?Oa,nOp1w"#Sk|bxs\Nn~_!?fݯkqYk5V3ԊVxL4y11={N߇˷{FFp1! w_S[_SWNԕSpqp/~aQt$Fmw}$ 8s4\;11c+`iaB|HJV&!1= a3o񳴶ʟVzU(Df^&ކ=}]p1)G&kD\%%^BL@|xνjhW(ZkSVUD*i>qqN }vd~;Sk'nJe~*E z--ߊg?|.{rV2^y,&TmnJazITMH{*W[}?Z~"o|KBLM;aԀXh+cNmm[m86 `qznQB!}Pn@$╯aNՕW#5;Ff›_/Bh]qExA.:+ +^R׿zE֖'&+z/R-Ů"1#O-_wqw{ 9Xqydcm`{o7A f$` :r=ʝ,Ʊ{Zb:!BHǢ]B{Fuh<1n&\ҪRH8anfQ1=\65"TT#%'Qx;˃wPA%b!b!sp~L.n:vنsGشt}ն/U5K K[χF2{=0g:wෞ]<7:V)-gVV=;̹((+WO܍J~ރᠶ,#=6z*|g6V6X<[wvR'/79lfٓhlŁmQ%v?/\t߯IiԈڭ3[mq9q^}[m.g3~jmq] DUp;!BL]C?#Vfgቱ{j.|}ic#8}I|1ct+BBGOuO\M:p;cLX%s[P=:0mE/_֖Vrqfb!  zvtiLrsr[>u5T ,.|Ɯ)`ٰanWGF<`7o'5"|G2m]v 44((ʟV--v:=ll0g\DKZyN̙s!unR!`6җL}h/3̌`5Gl ype+2l?$/;> s<3q>qx8{ O1, \}+X'Q!ј2a1 FPJv~ ̸x%^#4SWPKiG Bn pq堂--.']ӓ綹t߄lP{ {x#uaâZ9)5GvWn_eL (Zyr+IҪk!ߪsK.EO+?+S/1[/3B!(iT趕35qwp Q-P,FaڦoI# x_8VPJDl6GGKB!t tQh5lsut} +jpQ;DU9`m};""7B}Caiē*??DD#1#Au.X6}zŰ>C#S7A iߗr3|<v!z|+cX=qo]b, Ƈ/}lZ/|Q <.} IDATB!]Pɹs' Uo+ +x'F).$Wmr8 Dx@Fˆz.=t\B@bzӛ>n>@\ڞ<^?;֠B1HZ-YVFFDi7ֺ% ˾1xḱ9ze-`ea\%B!ZnU>޲g3~>y6 _w?zv6vyl'\3l֘X:qu<9Df^&=Kk<9q^ā>ßw1e/G~i>N5Waxmbg7VmJ||g8ۻo{;ȅmϤ>oYE⮗3JLO53r2yaۛW ami 4@&N-OʭB!<(E%=iHԘ{8{`bҐI?rQ5>cdq\Oj)چ:|G\˷FG-^cůp.A\%6~%p!\M~ .%^Ěxz3xx#psymwDUHԒϬrٹrѓMunNm^g#$]&/Ln܍xorFYHKr`d ?X7ݹ}WB!]P(͌s~n{1u4L5 2)RrRq;RSsyZ'g%chuf1(? rHJBjm"+?S#Aie)vS+0lٳ1H^54kYy]|3ء| \;qD|ɗۭ{[N <{_#Gld,V-iccWzTXu;90D!@g>+ɃL.ӱoiZnʪT:wIW'kܝ1?:W-l{s3s<<lzTI'VL.7zz1}"%V}4g8#&<?; G`BM:FSxw:{&QP7-WU?c/ha5WngFYU\ ,xm!Bڮ6-60x/7Fw7A@Ye* T 8'g%k7$o:z#[__;[l Ϛ~lCVmrZTn\7Ui$ :EtցoX- (ǠBw{*z f$`îFݿKUayɿ9>[9M}$)ဈcB2+QXV:Y/TjB!t, ta#FwX"ཌྷ! Z٭[xgqC?zMx勗qYӾ0y2xl2 S1΅1O_9B\%+;vEK/9HJ{]yU N5~AށqǍގgO  q9)Ψ>d|{#ܜ)su7#/qnǑCTSxrLde5@/{)c;pv?6٨]Ai> [!Lw2ʕ:0aD爫ǿۡD*kf{B!D; tal6o}3-7>~/hF#V<6}dei pn^/5bא}4f=Ru$q/r;^L.әq]}|zCmCn^7zem]\dF.BjxKG<8>t6IYPc?}lX AѯC=qwHHN&kD-z5r`G,baD* xo;xb8u=BZշ.,fHI+S4,j8oK >{nim_VUWXXUfciS5'B)PH™OJdpsrCpwR V )3&X,4c!>^^I^t\]g{'TQTV6Ǩ oace1􇳃 %*)F\ed,-,Z;%;@/:Œk*a q1 3yBm_{G[8c^jBPW G#|}d焚W@ a`˞ͪz~>qEQy!r s!ʎ˒;O3#\ '?y?v]p_XovC>sŸrM|8zjO=mFu?cѝ ߆ aKu0`ԃ}?kK^>6Qֈ0;6_%9تp= g{gqQ%DYUnI6jI\Ww7ݦ>6}/|>DQ^]?~QN 0/2JX,w`߉[!DnG5\x*^ZYq^am!##EBM8qL^b.<:ee8t}ݷnջgoGgA6ʤzU ǘ*`J}cDU:NX,<1v,-,./oɁO/\uNT"3/1ب8⽗HL[@HLO@b/xzc;Qt6xަw+9eRMz ϊw潋SZןz_\:T*J\m'wkܻF,f;1^/ֺ7Ħ^[`B<2"!B@[Y/l Jf1 <_-^g{´r+'^6=w'xy+q_/~HgEFcۊ6z״g.ϼsRdp$~V;2^3>_6V63Tӓ13 zrSKo;æ[6]ŎW{f٘2aOL2[=i;4pϊUWūHb5٢˟I5?~3B|C7FӶ}oB!X=5 85r0okexy-i]~i>܊GBzʪK!@Pgw_DEbDs+C'{krsל27Ĩ=BWo7'tqtAT(32y%+$f&T?HJDQYD"mpvpރ0qDsv=缇PB!Bi(@!B!4 B!B B!B!!B!FB!BH#@B!B!!B!PB!Bi(@!B!4 B!B B!B!!B!FB!BH# im](A!;D! 3YC7^P.]wA!B!w4kQOh !B!PB!Bi(@!B!4 B!BJx5mqhHC7B!Vgn![w9qx{7t3!rQB!Z@!B!4 B!B B!B!!B!FB!BH#@B!ab@ZB$HCŷ0ekVդw9e]]4r[!B=O@0L qhD~h؝!ؖSj0]0 "5(@!BjM>?=QP?̐w>mB B!3~rs~Cp[D! B!ZarHFsXo}ɌB坮obT\)3]nBnW B!" bNn,[تrKV4t3!9 B!Za>2KiyB4B!<_Q`b ˨Q)RX5^HM?RVr;QDMoť}7(`f<_F Q?XW[Y Kq)*ϹJ Q@0 ,e`KޫWQP@B!D0cA Uvj=o;bqD83&s7nNU>8a2?h^}y9kC1&>lv^Q1Qf3,EnS$*A qHSSӡ۹sPyfv*{DGr^W.\lLC|hq 6ls~F!| ts'@"#Op(5̀kQs}3nc \g),~aॠ=IVPiX78k4t ۶]k( o7_˅!%kAڱC@+9\R۹賣۸(Pkͽbd&As!M= }  >h2x*;?x`Y'| B!;C㢦7 ^` Лs>[Q!. )a)/ I6xy9$笣͎$>bhnĨP͙ŨNdM␦IbL I)~ ,5֭! v~{crH=:7A1j(Mu[#B֣+d=B9}"*C.w$:v|s q1QpLȡ#inpÓ|P_It4|ME0ryALsy OX(|èaY"> Yx煇'|<|&n~Te"?gǘVw^qo"eN4oIH1Sϴ `d2HĐB LhY0.C ]Iw6JE`a¯o-A>Ȥ{(oG_lBB!%^#XCG۲ #g:1R)>xVXm GӠۺ״+`|u{ru,(F ?09>ӹ ^LCSߏ.aXwk?d}9fȤ;SrA5H"xA03@#%> [aHLnt$Sܢ9odl ^uRtm{`8V˟Q# Go@~GS'pCr>[wÐKdb1ԯ< i.V d}; =08[{RZ@!B+kKF#8onumXPTLOu֭ li]B`G4^g5Q7ᮅHՓ9(݇JXO_r7O={NbLܘ~ څy`qx >7ױSNӂ}FMfTUq 2*_d=rth qX(r>=F!fdRLW=>~A&P3vT~n>NJ3ӹb1|{1 )Bgq͛A#uR}_Dɳsh!Bi^d\u؊Jh>25Nxe&Ag4b/۸bnh>W|Pa|~^Qpj.W4_~y h>a|iG qfp$aBG^w7ެqDrXeo} }LfT?W_>|/%ӡ IDAT۱: Zd zhDBmo45Qn qxu)F}^,E(}]g 9:/e|;CB!rW3? =Ngu:I31r M;`H= O阮8yB ӃR\ͼ\gY7|!/mkHc]Kɐ|e@#rQ90jO5ZGu7<68&} ␦7a3i<ڊ`H>՛x>ǹ&ݚ0>㱓\W=_6j(C_/w VqY* 8%B!*}go2XU7 j~nnZ4T [%ZrV@n owwOf^/}0]́&u},EF4TGUgYTۺe}KG`2Yg8۲^8e/`-~i_Z( FVv#5\_ljAg=|پRҮ5D~\ WsR~Akqh$[Ôy^PI5 l@"=כ &""C!ԕot\aTJH"ù]t-v쳮n@؎~a]M+7r{Ek KixK |NL Iv0`:0T`H>JPҾm> wK A!8'X8-`;ڹ zl_ye%ٍ='+pL@__D5 Gf#?c9vOE"|SؒB&Vόc^ID8oڱmC$y3NagWsj+8Sa-]+c,}ſ#ӷAQ`B1j(r,Ov̐*`:>!]Lg=2f}C/zxs8< G^{Ih>ElH"Iܡet&TGu 5gz?ASHg0 (7n6$%_:5U V\kE2u i:ݡuiHՆm N8ږ-[P 0_/䕉(9ڮSߋ{JZu+^P/04O vOKto{r˥R|3]+*KfBH}r]ۆ~u=x9 pg}1$䉗Qj8<OFxI!E {_h˄?(^]}P>iK ^NW *[F@&W q,"(hƲ%oZ1=z]0H%b|p"N7$%aSru@/~|y D!ŤR뺲'Ob=1dhBV M]ZRtDv8vd)-CQ H SkҺ?IZ{ߴm(xr]=.A6lλSZ_ۘQ4z@Q/QQXKt0˛Vm}ww<'#P47NG3geS9B#a_؂ ;wFX`7WB prtQ3 ҁ59u0R)/3=ea ӅlTa=kfyi|gzC֭ zTKGnv;5;3=w9֝f*ѓGB|7W8֯c'<0 Ҙ2w|ysn>4o?㔋pαB`+k qHS[C2 d=㹟3x2p {Y50]j␦vcViXn™VVt '"Jpy=I(vj+34jF#M=T[eZjLhƟ6uYq'O_φ,R. ,֭S !m7'ivz|uJ0W1uҎ iךSf: Kw{FuC~DҺCp f'zmf'x}p"?9I-e0;^!rQL=}}|h ߰!:}> kcg=&̳ #l&6''cc8 ~).Fʹsؑi8z,JJ<>R~k2ؖWqU^ΜkaY'992NFkJAڎCaunкyȸ"l "!;b׌J ŘNGA9}b}7M0m ݡ>'WXg0$.'S͔l ) ǹ1~9}ġ!{enHr?g/ʔSy?6n@֩5(A16if}Q^nPr=L1j3@5w&]f3tk7;Ύ$:rPMb w6 r/d}zl{yKA[pNh ٌwªCv$btI#HK֝"?oۊisxNtHsۧ/f%pVԥ0ۇy;cD^2O;Km>=۵ǿc;u<vlO` m A@$&"Fxn;93 ɘ"w~Vd|:w~޶S6k<%!aHIbN(}۴6YPNr[RZu?\9sQgu9˂Q(ϞΙ:X^>Q`x a)+HhÝ ܩi1r4EWr*@>t} je @ >DM״@ @ Yߞ4Wnα97Uk6}HFCv3LS|x:oӹ &`L?)W= 1Pj=A.16؎*XqF>ᡨ\ Qo,]ufKWA5B~oڼV08#*ˣAQZzV^'d;=dS8p=]aY?o߆o֭V?<_KwmǂǟB/?8}fk4;էggԥ륥m+)ᴱ[ r{ .D=u<"# fT$HpnjB~|Jhn̊ؖQzY)}kUcf!ۇnv(sGVb( S`)׀8M|HZjU\YV>LTVY'ܲ#_`N HZqtOwqgj0_ͅEP4}J cAC-{ƛL|"U:T,=)7jLk0ҁ#sD5n%h[a(F k4Au8A'L C*Ly0atZ?sĜw:#a[ uG"`vb Vm4.ƺM P 5Z@jSxxg.;u1x}/?y+rZ Q.hy w yyډe72t&|`H84Q{o[B=zڎ:< B!w2ӅlTAߏ;l@3ni,p8b H S;0>M;R{/9kkb|N;lE%;wnT.Lf9_l2쭏`LsJ'p [Ck9O';N/``:{">ΖL|., _8!~`@pD8@, ̍/ 3Na 8y"/ZqdM3&_˜!E{aDyT|<:GFʛ`X8DFsQ&~~5C[G.Gqygfp&wy~toIn]<6uǟ8]\A!NVdت*Rh>N5nH' `L 81>7`|-;!jIvJʐgY!}~~T,ҦTeA 7UVPgPՃSRژvڅ?ÜweoŨPΘ[B\x50]cu*4_|+h7 `_p4 S!j~',3WA[(@y"vv~:G&CՍ)5I!e͇ܲSYnnȇ 4=ġ!`|U`Xr?Ca[Mp?ӇkW~B689Ty~aHuGW# noRTcYR`rUݕ vQhD*uV KI$h9_/t{aeX(d=jl`,,p$G!e9DvY3Ë[| Ҏm! Q4`MÜ}tOe:c6CaC=y F!~ ɜ-׿iCڵ3d l ں5`Y9,e0˂!'3}.R`L=i|gzv8* @,k=C n KzC*h_1rT;`_ҫ;5ɥRto+|włvRȱ&$b1>Ҳ. ^xN_;D,ƳL@"vuD|7L0%ff L&\SB&C|68tc\UTiAB!V@-PRTU33enB-wؖZ?VQu,ejuz/rQu-Uk6պi'ܟ䀥NCY0j]}eNss:= 0$&K0IHkSw1ynG~O{&Ǝctz:ǹߤ!wchđsg)sGYjӳ'qu4Ja'@˲Cn%k-NB!r$Db1СrMMXfבWh]<\"r>o} nTqy4`WBɞվ@!BHcG"XҙLNϝgd k ·7m9)J[D0H svU6 B!r'%Dܬ>%%m zTԖWTzmw:X LnReY-T:9>>cMUU݋B!ƈDbL-oQh9KK9nIN*m;Ud?`otz!B&Z@+r׋Fl>{w{֠-Mz (}uWv/B!B#@3{23 !BXQr}C7"Vp畔}TբB'g#s5ULBMB!4F Emv>Z'$0l%%8WO"h9+.{LB!BHmP1"s|zWv8Wo"ε n9μVN_ބB!Dn@.]+*ӧѿS'A'yse\X$v|n?h0ڄѰ&u Uf bXn$fT'\$/-Qu邎-#l sZkE ;/3? O:b+WM,f²Hpv<0!O=ogÑ#^o'!B!w2 1W1q!/wlxQVQ!> D,]-,섚f$ijoIg3ߗEt8Dz.Hgv^!B!-  ʃmeiY079* B"zi ҲP^)oG۶xgCxץ g| O ф2d:ݗp +2VwKZl8ˮǐ[c b !B!V4Ԋ<48`& &2 7豶s: |? \8 ;qYd\bM9:@#?1QѶ-Fu&jz29MM.}՜Fx>eZO3:aXxvlNL7~ݹs3BW 118pb~>m݂'ǎU}ۋS.ڎ㢣1g/o4B;"5[!4^ ( !DקNn 2\KׯӕڎE LNB!# BkƎfӏuxim ֺWJ!B!#OHkY.l[Yxyzi+!B!!ɧRvO{+~&.z;ABO<LVo%B!XQⱈ0B~߽ X/׬7َdzEzm+!B!Ċv ʀ$/F$~GG>AS-$B!DBH*oB}B!B- B!B B!B!!B!FB!BH#@B!B!!B!PB!Bi(@!B!4 B!B B!B!@ȝ&L&Oٜk(jaah9 #R5t3 !w/V`+ KM0 2Bȝ. 7˲axxl!!P.㕟BzVtYYȸzv+W`|rMGv><ѭ^JokێվX+;EK>2 X5XMOzi%𱼒C"GIgQYE[5R.T˲8{_[5?3Wl6 ²8}< ((+EVaC?D6k^ۣgvj7`2ܵ^k{VkE@{ܮב|_Rh*+a4#A@/ZCPFFO)FH` :EDߖq*%vB\ D9xTmܖ-lM-0jdێEr[s /cM&{c[SD$`ּP,ێܿ ֮Z!8/>e6uѽM; è= Tk7Fdx7FlT`Η_P@@~ _X^۶b(hszf %xa+)v:N?~ģvkK~[˖]XK4B݇rl?| ýbA%!i.!css?ێE urkXX9X$/F=ykflLNrXOc]Xuο#eXv F:6==|ssV+Ř3 <ul =+ 7ǘC5sFĴl:^`F!@3qc[j ^f; WO| ‚ pz="=]S9oX0o*|3uǝs90g<䗔c-VeYkQr+읾"liȻĉq2뒠ׯKM!Q(@pGl>?k6qѭ0}`qZ]oۣmh?s}`?ggێ; N& eלNOyD4mzc~ں3jhٴkuhc0{Au9+;K۟W^ՠXB=qbaY|߈lqqn'Ivr#5åmk剓#b:y~fLfё$;yP[?؈7_X u$]eKviw- 8{va _m;~1Q˵vKCzV⢣=K ^c _kE8L:MeѹmAE[6arl;r!n]8o{??`d|w~K wNkVَGDt0kxsܷ߻7zv z.w"7ǘ z?lބo6%JcYoc?uNJ>n;Ǜg`Wme&AfZ,y}n-'p!h<ă׏wª8e0o.?_ 0<.;R_CS#1/۶sd$ǣ4ǠX|qW=GOWKbL_cz#aM1@L?z=&Ʋ;<^., Ooa4ʿ}!QB\mC[:BW,ٱ2W/tp4 T)^Z='׺)ɜ{XNb9{w|JNY@kaKvy(prsͩpf>q=hu:KJ䔽xD{J LqёX=c{Z*gF!DB\U_m0WRZL#gXp%-gl1+FCK:׊ Q^Y:0͸^Z" mWu6xX:<6j4վsgk]ߚ9zX$^sndvwX$O>g&FE9qJ4Zou)xnvWzLwJ!agؓxbX]#%G?m\vB 6$%!3[sƒblN4؜rRx& J*}˖GXPmM9)3c&MU9kEPk\57])(攣vaҹW궷 d^b;ƥ;w:3`0wcyXxSSgɥRhS^Nɭ+ł۶؎Ur:wީKq6'E`Yծš188q1sspeiAj5"6CH ~; \PVY٬FuZ}(469='z ;ё#%^$ĺD:~^G&CH }Cg{sT+Q}Lf3`DZc8-Y3 xڤD=,l]==۴Ƕ[Y~ϲ' |R涭&%;ۚwg<LV*z?lZ\a\X#gowё<^1sp,ݵ6=ŋؚuUa/s"(>^x=pӄ>0ojwT\-,hաȩiGBs^$䕟v M[iPlgdlɋqE|i#g h:ߡjZ۱8k`W1Nf:" y]&Poi. 7["2|f5V:ߤ7q04fƟbxǴ',,yBlٌwҝ;pwpk'ggg5k4H˺wBx{ƃxp;kWYYxHqUFGh;/Nϣ%5rq0*f F sFؙ'su\~AB|7|2ko*3Fg.:ru\*j7emb"g>}mBtHsLa4ջVl<ٱB,᱑ue1c9>vG { wmի59 MܙXŚr'g?Xf뒒8y"lh8 r%Գã9~Fj~M wmlIIu'O`ov%ē lwu$Ĩ\MryX$v,f__?i&ea4$Ĵ?rWe0p:v<+{;R1ne/5Fۇ;Y4_u-kg0 {{s õϝp7>(3gzmu oˡ /9<^Ю.}8E |wԗsfmrw8Ъysht:&zV#!ɶVڔ|o,[Q+߳zm@?5t#rHN]PiΑ='ctNYX`tC?J8}KSZc{t{s9W x{ m;#, R)q.7GΜ,=Ăk͆u11߳7b"TBg0ru9~+ ?}^y b4g0k|JWv.7ԶEuge!;zּ- ֭}&>r4v)繹z]p(QIG$b1^0poミKum&j+ţ3GKh˺{@v; qཇ`B8/ʣC[`$;xeC:w'sE=Q~0pfmіOj$AA`(VUzy.xeѱe7C~Ƴ/DNq:e;wܲRN'T"c`XFE!&J\Es3X:a\^әsOFuݸ~Dp{o `L3b$o=/˲7>l3/f3V݃צLsZ``,S{YEv><}n\o;S0oۙwn!K# fh$;wYI($5hLÈbr֛"eN{ vѴFڍΗl#I0JJ8߫Elc}TP)5GOe3Nc]3rTٺy(:#F~CR+*9 Էa]lH>ܥ(@H=f:m= 5 `AIxuO_-# # +R7sKV'q'\õZ%Y-T'`1Q>fo̶C.w;5U|0l%;~RkJՈtx^cHb‹->OβH;FRKb8O̽}5pp7-i"kw0[,(,/J!|FM 剓/memوWs[_0 O.S ;yM1KtDǖ-6,ooykVَSϟľABAVW,Ys~_XhT k>NzWb1^8E;"ww;*ZA0vVv[Ƽh*~muomB[vF24t}\t4VuAsrsPC)f,;9}mmOKF.c$t-)B^.8X.⌀v oQ"DRF#>s'a׵ n/cc6ԥ83t'+!k5:/- Aѫ}{9su{[yjcx׮;z ~vjag6D,FaQ9* ½ިg}B: @k#xҏqשC 8#xed<2F Պ#us'2-@ѩAAhV۶,Jc.pF [zʎ`u/XGF]lH=aJ ě*zTuGϞJ7Ӧ{۶׽&>GSj ɥ!2i/mbEgT29"'wJi2i }xmxhpRZ ļrd2q22\_[J 郆G->d Z4ǹy ^ݙU(0Kk݇X&NfrFαTV`R_{z*>5w튝ǎ xTM'`TXm V:$8'kcmaϜfu1ֶE [HS*>2Y$~(@H=8ycxK9&W.{%OK.ff떑V+j--{OkWpA3giܘrj/. [\T$:MUX-·(&j*rx^>@ոȎoϜ1 ukSqĦdNgwdPH \a~0GΞAjՆۑ 'Njh6󂗞y؝n\g7f%aSrgfwQTg6BBJD@{Ck~@PHzޓc`ٙΦ~a&3g{s{e`ǡ]}.Sk5Zj+*b4Q.7ZEGy\Fhݲمuy&8~6x1kr|jrB#+vljKFAhIHPݓQ֣޾:fp3ܞ1f.4 Eej)0aS[g8ޛJ>{`+||0k7 ͱ/})c/2k71V+j5H0"j: 5!|!uj$9RSZ%ip>%4( FDH("vNz I~;' ޴mZ8N6k"aݺccU;vV"sfm~~Ѿ=6;߳cSK;27 ?Ypzlڻ;4L~wz&%&9>tT!yBDzN <_>x^L/OF:|ull[l">Ы/!߽_Cl ]v;xmlHS#imۂO~I^[J}:o0\;Ƕ/}^S};$e^as 8tnZq虔d|UJ"Vem?P** >6 :1@F쭛wƍ Qz[U =uKf`_Z:nxݧnٳ C]"N#}M6۷1=чQ"fmBNAՊ_:L_6p}uPjn<\vLWZ-_\'bc=:ز/C_]:Dp j1GOvYE3KOߣQU{Y}yRJ|t ng {vX9s*v^|L;ל=UfӏfO?v3SXVǾks목?-5\1}[/Ps#ur͹x[q'//ƣq>-wu~~c}oQ˶nń' 桰@~Ylę*+-#Cw{%y?yJ߁,K_QcvD(FY?el˾ uI@"8[Wm߱cxe.LO߃_xyeد&KVZ^i^,]%i4/#L[=Gꃳj9biOS$D Nix/}6+vlǵ]"&, %y ZoX.)O-LLX8& 9+ /cfmbb8E7aa5A*x$m.A18t"1"Q'O#QlLO۶F%'뮫um ` /đ^Y5tssFED;0%jKj˭jl;wy^6mヴ7M|2#BeS>Ihu>/kV;WU[֮fd^w֭#S%Wx\L}RII.Pa6:孢wʗwg뽷;VژBo⠺w銿by1Y5/iɂwnetzV.J{>}uioz⿚?k-yt^o)(/kVAMN[^dF: aA. S͆n'N}jr ?0eܨqGFއ61֒7Ïsʸ0⢡t Ţg%Í`ӂ-ɫAl=1ah[ tDA~>5:n%v-,-oy8p8/7^=i5kzOڶO< Cn?!p߅S<>ûCP<|T|Cuqu=EDGaݺ;"?薓<]l& KFQ.m"'640k۵zVtmUjVu7髯2D 1:) tG\1,\FnM9V 59rCvCE 51sg3SC׭C^Ǎ+t&޹S7:;`\hȰZl ?ڵsqtF쉩gāwտ3!0g/d-X9 #.sD_JG7 \Iu'uȰ`yOd;=cjQgwi5jR$=cn8.!Ԇ >ۍy9B0@!2|}S{`3H)'2s] DcD."OUHۗ#ٰvcB"zSK9;wUYRJEnQr #l Bbtt#ܹ'uC?}ޣG":Љ,dxN0D"1*A i8>6aA F ?;}:>^0ϱ}yq5+{zJ,0i]_Ο c~~}syUP;brdC01DLѱUbSWNs>6RS<-<ڱoG.KHW[nzm ?u9y M\ !z|Љ,|xc;מeNw@DDwa]:5qo̶Sf"܌i7hnQ5k[]ˡ!{}WU!l.oӿeC'"j?Z[$>2ׄ5")DDԬھ!٣w^IIHFXp+.Ɩ}XqO]Vdڄf)|YzbZv?_/!;8QM_}W7QRD0 "!3W,-WoY7Re ?Xk+[֊N7v;y]8XBWJB+<:&56YڮuniL!zO‹7ބ[@:+{tDDDq׻ocI&Ǿ@}ǝ^ʔZ&N "fmHnҵH?}ǎ!y).B‚d7ukeʰ GQic*%$:-,C0yǾ8$'4a)0@DD-) Ъ`٫@D0ADyDDDDDDD^"""""""/` 0@DDDDDD """""" yDDDDDDD^"""""""/` 0@DDDDDD """""" yDDDDDDD^"""""""/` 0@DDDDDD """""" yDDDDDDD^"""""""/` 0@DDDDDD """""" yDDDDDDD^"""""""/i QR\Vޭ?|Rݾ#ú}0',]jH 8mR^Yi8߮-B-DDDDDT3N """""" ysn_Ӷ+~>>mҹ \m"""""H'~?rxkun:]j)DDDDDDD^"""""""/` 0@DDDDDD """""" yDDDDDDD^"""""""/` 0@DDDDDD """""" yDDDDDDD^"""""""/` 0@DDDDDD """""" yDDDDDDD^"""""""/` 0@DDDDDD """""" yDDDDDDD^"""""""/` 0@DDDDDD|DBTYM] "j,M] ""Ӓ7"1@XwvצQ {w 0@DDDDDD """""" yW jjPԵ z&L_ """r` ǿ+ADMby"""""""/` 0@DDDDDDțGAwn<u~U{V'ɣ; w.maȋ(퓻g_ F)DDDDDDD^"""""""/`""""F oF$hWb1vkAD^SG4΀h"8@I.d!OdԾP!{V_Q Y ȀL_wZY6_]0 (>% ڥB leCu"0XP : h"4V_ed^&M{iTgV)DP 0(+,:yx d@y.^{M": Xd wꂗj./އA@E d1 +ꞿ@u< "(= ypޕZYrcS RCD$jW+ܳ 2z@>"= + k?ՅCij F62Vu@er|!vɒ< z(6ֽ!")~~&dj#[Z*ND'>~U;K sTm@>M Y uyQXv#OH@On5 2  |֝<rjD%y ِ W l%2H ѮDhh%c ? tu>7EِGB]˧FQKeMPl.?D(Cr#1N#[.r:ev}C]5DB7(1=u-[n72md[(hzR/s`_ ?jtPn BuDd[9^TC4O_>PF Z5VD1 7ez-(^.P M(8 wev"6cm;j"s+_v_ϺJ;{2Pv>@w\'(æAtdV"뾇ΰ]n_pY@w1jqƵ@ +6mX9q]N wBr5PL1ʨ;n_־^ʰ!^Vu kq9unlyljZ"UȌUPoKܘ|~V?gi9q ܶʀˠ 6ր@qԿ?6׳zP$@6z-@f Ȍ˦&)DDD-H>>:vaޓa3n6 /[.p~3`5A ^aݮlWo mғ]xVnv?(.ϵ6hg:55n|`9&۴oV/۵@qDTخFCh,l%" *PF_vlv:zO wdPq2V(cwY{2lWa0ܾO kaM $Ƴ b;ϡ Vc_'8wvH Ov ױBlPg@M)M} @X!P^eoh#ܕ?*sޮ=|JK,Tʘ{a ߣSw> k?Ղ;3a 2>.yssp;\v]"s?9qlMwns`\U^e̽kw;)/;'=l|He = zF= o 9ƒF!1Z6:9l:p/)/At8ãEXlS^Hfxsd; b]TDm¿z= O@t>)BPzN^f%2n=vmSm6 4svۆ)8 6uڦB9Og^V3Rpn|&=\eMZ œ9(6(#o2rvPzOlGAygu?Wk{@۔"el< xVa;Z}wq!""jDx+=hlLa66?6: J !{HdgOf8!,̂;CIU_F>PFu+^SsP7Ά62D@DP^oh+r[n[5mdA@vP^qrp5!P҆BS^UPwvo}ZK IDATkw 2ؿu;=ʰ}Di ?j CkN ۅϡk\L_u,mȳj)ÜNR _kZ2p6鵪is w.=H]te&^W"ԆY^ 9PR/s Lt%8e͆Fq&䑭Ź@L(=C$їЪ;n5 RA/u!]Nc1n:E_Π+nY?7P??K%s}o_e4(З|Ch텩|2cU5JjMi?S}} `?A6J-Z,eeK; ? e-kup[HTUnamnqjsB$uspl?=KJ&s@`8DЪõ< =okP7n c^5G[?^D2}a_Y{gjۑAwq(; lis.|Y9%ۀVmI ݝ49?y+NCv@p~gi3k[~s˶]>hA9Orr,~C)ђnۄ A ۘ{Pң"-ίDel1~S b>2h u;P~'s oܶ JIr|r'P""fJD'A$9j>{u u[Yky@`#zR[ p1<։[]_wfEyGza`N(\$PWc}/'{dvؿrbeZCW||-SSF2eH,(:0 צ8RӗC]j͟"~@ hܦ%t:0UjL]χZi^p_K\ QT|VFY_z= l]-yTcuP:;̩~ƑƝSf=i [9%|l}lYpDLRv@(DclPi-zj?߷prm$L*SMyrrRK$85<)ѦajҰ7ͅu;T=_] Kikl)yv^,4Q)Z"?!y>o l*UwHCR5'F0fݍL,s_nMJ Erj|u"K G;#ىh[C,7~a '_c 5C"8 v"jp.׆K-So)",PEY>o;-dȲ!&S۳$Qr 9azI{90`n_sap Q3dW e&yث'K;:XX"MĤ'B5YDz!acr+`Amʼnkk؟ |} JsRkCul~lsQYkI,)y$lύB90}YnڳBI[lj!=G5G$|}2s0;{ LLކWӘ6AB[vrP:~ ],( u#7LG3&bl}*ˡ.P:h?@_.\pl4`,Ue3iYyUϞK 5C=BfCYk[Yo!ՙe7aj <GP-K BvTKWLYQ}~ibG-7Bis9Z$m~b(߫ja ,}0,gXQR }p#ʈ['KYҒ|cKʳ3仫:LAj A'/QsTTa 3"D><Fm#ߘy@׈7&I9myfs$vI{Z+ǁؔm@UFlׁ,8nl{<ŤY*dVb\В@5Q ^(}.|γkT9"-`-!wj&ߣ5h=#DPzN_A9rv Cq"4E~]ٹDKH WXFBW>+0y nj]r`9,<"sվh$RN&D NGwisl}i{rxŵE}V)o&K3]*:#[ ydaH yd|"bS>ydkdV<{euUM- :pƩ~7=|D\'c[j "Yw:u\vc0m+c °4ge<̖V~lOD@>fֽ@WJG[2]7A$Z[=vO*J9 aֳkO DdXRCUܿAдB0_ﻭH i"{ =l ~@H |n *oB]Ciڊ ~pom3Z< /oj)'{g>y_}~o@' 5D;~wh:XRQ\fHhu]P9Cݮ_10YnDp4D۾]2?(vj j#.Dl DP/yh|!6_(.#N#t+@{- 6]I$k<  e P]gbOQq""fJnmh0)&B$ty{2D~%axnԸC@9iN˚cDnj"B4Bz΃ܾh,Nm6 JK ԆƟzc(Ʌ6WO`.'b:v'{WBۥ}t:Ӵ^S-I|} =EDkخxFiZ~(miCEP>w/Cf] w%P |f3yfʯj{5EbOخ rj^1q1rmN%Y`9+-}ð]<HvJ7Y C]_C3o uɛp}\sWp zSC"M{Rɼ#4,)—a ~Wc}Q+ gު?7!t{??^e1 ܧa;?mAu",}+C#<|e€0Le5 ɣ;`9}P?GH2erBkUPO." 50(-}!s>5w.bSDL\.'s> &>v}S~ԉ"jzkǸq 4uuȍР@K1/.ڳusnT~s$L}ƽn3#[`6-y' >mi%jc^ nCy1?=_  >*/@=ʊ.xOjne]ݑ9`I Yis= +Bk YNu Odܾ%\ uw{mVMᒋ2s;a"ؿ?L+l@a`)'w%ﶴݖWD$LÉ<-BoS8 7_n{'D׳-TmY+yh?@^ˍټðw'D3ѺO n%];k S7 iԍ8K7YTQ=)0 8J Rpf!^yp#ԭA_^l:eqC$ 9y]aVKI ό|(SpL{u>J ~`ԭ w,,kN:t2oku9 Ѣ^B[ΦT @TKwj!O&kKr!6\iaZwt 0@DDDDDD8,/*cL+DqY"B=ѻS;-IVnڌ#q,'hwB/D>Z~0ydt-ڰrPTR tn ӟrw;>KlAQұr0L2xOU'm"Bqp%{Ks)o|Ve,osv\[)g^>Ƃ d $qQxЩ] >8c,zƝaWsj$<}%ձIDDDDt D]2'3hrsZRJ\c/6ཇoDDhpoçs@Y~’2:Y K3 ,~G.}u|һsODDDDޅsdhcWU/0k~@?p( cÎ d:Y <6~x^8ݾw)`94'9x{sKpKc[yEteEMX""7TܪK c4gMaCaI ן}yxHv2.3~džJs{orw__9nF3X[\Qxv'Ӳ;1kÔ?_F%.LB<ܳ#9}Gr9Ok;;@ IDATFE*߻@Dd<oOjjiT>]0u='B KyxQ!1:"0ydt퐈Kh?[.Nnq?ߪvëm\'rn*WO ulcݎ=!16myz&c@d0$yg? G :%S!AV;+{L_]vp%/Іx5n|V^D3& 7cKV.#7\k'2Pc̞CǰxMUg@j$_6v!"ms[~:9h&_$K?x&""""ijо4_֮&ݦD6qQ;yt~KѯkTԮ~$F~QIׁZ.af}}.7wVn#Sa& D@Dd{^:WiLxj/8"ÂR" k @DDDDԬ1@DQa7u U[B"N_ """"rSa`ޚ}V ӥA%**g|?_㫠j^Dh<ЦOSW*ɅkN 8>+쥨t"3p,Ox7g/sA""""9HGW?6ؿma§=9aDnƑ\3~h_EılmO=^g$=\ma} ]x6~\e[3#24A8g*k f8 0oᆪ>&$!{& vbCXQދZQrJNJ '$gH! Z{Iv젳^+Xߩ%ki2>^e]yfC}fNvkރY4 ד^.ɸ jUJzSlxD80@<=ku7-_áhxAuiլܶCzgG\&V) PYe )t՘g%W@8OzH]>aOܮ.U-C;b%I5HIo|e붗zQUǓS-ZJ}F5oP][7'nӔ'fZ-XR;!=?*Ys>7 ==4~HOF~>ՑdE; Z@}'}e{~Ƶ۩U^mm_S'YgCfîX]:e}1^l5ѿZ__ tP Zy觯F6Ũ/!8yd?Mٯ.YMY +bѝYpCT& r7xrۦ bmm2usۡ]s@u~c3xkش{SBH^:ʎ>Qwj3N5ѩ&r#W([)΅5L ӯt?kc"Y2BvzG}b6ؤJS.|V'Բu5wzIСz9eםWj:*IuVѠnfP/kouƈ>ٽ닆Al]nMթesr/,EEznq'MROς TGnX1=/~h{*Mt˥4>wZrsslcAiU;\HrZرM1Vni^PuV.fd軕*u zGx-;R}γ}^#P8 ')YA~l6«7*/#;G;QBR -j诮m# CBE'N)!)E9y+<==Ԣq"BTCSs蘒R3nvWX@%_Y.7I!~تH%>أJIR~aB|0X]Z7 ~P~ g5gc;ԧ%(Gs_5zޱV^an0{L&=|][; k z9.VnЂEKǟZ,/yݢW٭U9p~XI6yxCM[wL4}GW>unm8+jjK!-[JȒYa 5j3_RQ鯃|sHޱ_Ͻ|neg?@qzwh)߯m{mGԻCK=t%x7Y-#Bm,-]m[ m)S{ԉeuaE#'d9)EI#kT?ǗLyb2s$Inn>$4ϗWKו:%ݬK҃7\R -}l>euS4 Qu!l5 kPҳr_[{46ӿf\sZWwiރwǖZ^`j.Ϩ]v]u);7Rɠ۫CTmĦ=qsXE::Ǔ}a[9+;F?y*EER(Y`I٪YsPJKsDj>ufs.Tt|CLa l_yfH⎖zeX`#**:H1߮m>>1>3+G%i֜ϵ&:k7uOѓN-ZYq 8s $ů?KMRddǓܯjWY탻כ߬|v==E^~aB[A6Oi]]qG5 I?>Ao}T%mq߉o~[-Nff껕êMz扚>iC/ZPq0c IiJHJnjPc'O_P= z:Ki֜/lE6J`pϗ]P&wg?Ч=3st '9wU?ު=_"Pm[qtmFLN{z^]ΩO=NRҳtW8>cmt| %eV!1hZNjk[HF6V 3f)*^/oqi[leOwMާұKqv3 7G#K^>i.Kږ?5zpy0O/ѣ7O<">^epM4'ߝ;6x[oPfrx#FARi vӬ[.՚]؛ǿ}acTΚuej[:ݖ d }iX嶻chf;tz{fR"Ϊ kTL&fVlء1$IZ}Fq|S]o(wc?/Otu4)ے,>^nզ]PbO=n!OL&~tZ{L?ъ ;$Ii}صm73Ѯ.7|*NoXR]69XP= V>ZPĴZ3zpx^oRT8?A#v8ن%o= 'c24WC]euhWAݱnO\ٟ;߮0Bs8A6ﶕ'!=_z3F&_<˳6t<`ۻep7'ժi==l,%WE IU\?}6:8;e*G(**-3V/gWvɐw! >+T'$'HKT)Ԍҟ(%mܳh^jD! VH|] jw叞s>YoY]yv6: mg~`1,WmCAp5$ v.UKwQjI|c{`Ȫ_n3Խzv/Iwynn&'r tI&oT]?-4ƣLV9ϭ*ˬ'ֶJ}m#oǒNE^줧J {*=tTv?q}}_bz|b8þQ"B2$'ONpWʟrF|i}I#HҰxJN, Ԍ,۾P׫?ɅOh,[| />1=+G)ح9>ٸ=l(/qR0ui]cVv,>jEU\Q~ܣx@QhūR~3tmA5#/v'eVjg:.c{St)Yz׸=Qb{j[Q<=5#KNV>҈ޝ oWlPAa32VoCݨ]ǑɆ.YJ8 # y kK<9_-5MxAMPҳrb +(3~qCn.se \;YjĔ4O19-ZD1 u|?6T)~/MQ]+u2og5 k`+9U|tUqw.[8<`(l(pICz_[J t?ޤa:zm{5 G\ ^w='1p^tm?[rvIӯ9kɺ]dv(2[ݱuǿԳ}ڵh"_/%fjcwTV3JqZaN?/MO/IʜShh8}dI"oѤP^.ZSFݬP|a{ =8EIZf=熺[&^U?d:2v'l$ILAZ IDAT&^]WE6()*o~Xq{I`ywhglMBuѕyk CԹu*8Qx }9>[u׺Om(X{.5; (K6z䦉"C:p:О'?9;*p'n3_WWAz+4[C۫N6R߾ܶ~ߧۗgj_3G{߇ԦYxb4qxjˆ5^jѤ.I m"I`=zD[E&^܌i~;,%Cz*1 5+fc\3fX }t0Y~B+74o^Z zkd$ui^+:vk.:*gǖ{nm#խKQ2sUSlziQZy8}x*]ҳTPX@_5 W6ճ]FA?k[lçܴ'N[eg%C ܝWJRe˳́^MݸW9.KfU }|=6ܹ ŃzA=*}myL&O^V?gRW_SUJǖ2Bpk-kM+˟,6O]:@-r@IfVkhr-j)]zAE6.=SC+oO=ss-#B5qxHz:[9Au[=͚u˥Zts%7@On-T P%$7>ҵц˲aW<>0r`6F< ]Os"/c`v;{-FkNN^\V_.]+/Olnmu R3tXVo۫;o,\O*W/ؒrdoٺ톝|AΚ&_ʟqhnTO$j꾗>—fCTD-GW{v}|h;]D:ɰorXKC{uqC4aX9y<$M}frmu]WS.\Q@ P5iiC} -ڙL&nZڱս]d-D[5&I/wbOhgl$S͋3\;NY+!)VץU3=?Z $@+ Еc2s,L Qz}~~Ekvp^"_Hb=8b\ \ \ \ \ \ \ \ \ \{mf<9kHI^spcn;Ya;_os %f3UvN~[Lw8KH牁]ۖڷ+>X                                pԌZVN9@]C8O\;YaX `ph,LJxyh5$OQ!X              pgWQPr3j; Y1PXk;Kp$p$p$pl+^ UعHտk; .%U?VEIIٹA pfdʒYajKp$p$p$plx)kkV99 Dр k;@v6 \ \ \ \ \ \ \ \ \ \{m4P?6n SYj;JZd%Z%@@@M#ϙN3xvh(μpKp$p$p$p$p$p$p$p$p$p@]Wö̔Pp-FtWk_K uGLP}jxL?6Pٶʩ1:*?7G[,JNLPVF||jݩ_a%F6+#U}E[]hD.V g@rbl^cG(3N)?/WA Tf-լuM>~urߵVT H~>fg6]zھaX›WՙL_+08D=qz{ŏr J?Tf֩@jrznu;T )_@v/>"KqOLTO駒ϴu<>Ρk=<ձ@:F}tܜl݆cu˃f6y݅oVAnuIjՂoV歜wi.P_Vkܿ*,B}fa߮[yxzy]TP_~V[VԦSO R=b)ϟY7я_ }*}zZ+:uu}[i?_qݡ#'ʹ9YzGe:%/fwS}GWaU8W~lbwoC4]O??c{T[b)ԂQ^nSb ̴Tڷ}S{x*(@)LKĔ33˗|By١טW돟}toǡM&nحC%Iq{5~ʝN,?k8B7CY,:\75O`vНOQ!=ъ(I:y찾d %Ջ@fZ^9E fTSn}z kέڱOmW8z\],lγ'5T?^޺W۞/ sYTm0?_yxylVðsWyV>}YlKoG;V7YSz^O~rs$IMVk4f(/X Sw5oݩny?gHSfwGޯi-U5jO }hUm;뢫w/շ ^v-kQgť뒍~с][m4֙H_{1@y|/վ u=փ/~Xf =Q.I_}LnuftmUϋM! m++>votTcrӖ}bwfⶇw,_m^;7U"uztB?1P٦?j|5 oVa:_,TVF~50IJF59F0L$iso^[5b*{fH]}x 'ju0fn4i-{ܜ,ЬU{5 oZ8}[@H@KNMj[Er2l>.Ҩo(x8/o ;YPSm8Ϟ]~Хwέe{fvfo.|s+7Ҍޖ.7'[{/] q[rH3*1\;ns}e3L4r1#[;' ҬU;l[u]OFU.AzWhۻC,q!s4 _@? , }k)ʉ hY5'ؾ6Lj֪]""Ko]ڤmw1O,}bBܬuf-Ǐf@M "E:OOZlփGDoJ ă95 Ѣh*sڽǰ2ZV7{q(g%I-BJ:{pMu2f6n쬊x;׺*sЍ?!m[By?+xD@s@o-*2mpOUp^?ܽ+ (P?l8=+=ֵ#$4\LV˿Vg-*+~+~٬&ղ]WEv]ԼMG͎}z[}M 0r2S>7*!o4_c«jBM+x::%(=6׊,ѸdO{PcZw^^ зjޯV*<]Ae%`9{%ظ~ I+7ժc gg_SM҅TXʟ^]EvsT09wS>ﯛpMuv@]%rhrKz(xvlK(vwN=(zV{Ka}5|Ue{5%[HSWxz}>5-nOs νk{ʅ{OzU5n &\ &\#I};{mY\ /|yKp{zJ:%Irj;⟾k.pn&>SG]:;ϻy ki|tu1@Գ>%SٙiڳR ju$ƙl!9X_9gMY2쾿A!NTg&I#.Pw!-G‘&77]0Zu;PRۇ6)NJVzjbcaxyW_E4}mX{#Sg4쒫$^SLjwu~l^Qm e5if(-]uee*%&i],-N&1Wkps>"EqlFMf@M /o_]s,Cbkߦ-U]蹻&kê_'?U,uiVƀ;mgC`蠃{wۮCσuvFI/qbvi USq6JC]~n>{%P;7fNѫNՉ5[vv>~);m]\֢-'/}|ձR۶$x֭4t7m;Nغve9o۹SSJrS?~Yf"bZuN),"R!TXXSJ?uRvmk]cwoӜVhDz N)"Bd2)T? OS^ޥ@'Z駒XbsZĂlVN$$lvW={ZIR:G[ul]Pn׵OR=I oR|$Ţ}7iMG-;t8O;e~e~P/+ۦбZ\I~E.ڱ{l厽ן=t- IV\ +Y oUcu~ &77]~}j׽o oBԋKT+F)4"v=<5u۬+m>Yçr Ɨ5OdIDATv<ٟ~]#.Wc}@Ib@I-;t/Cvj^q{yFCwUۮ}Եpҫ#[V,]бٱQW-ўt<>̽B#"gX wÉjө"I:]֨s]_mcC/tQ6.)}9CIA Ԣm[9 kt[k} ;e4A9h6ysH=^}R~V6{hИ 1 {R'Ey(xv`܎ 9]v| JNLPVz2SU+o7 SXr/c 9J8Z}8|5GS~\^E~.r;Xz74 oBjZۑ~٦ScH5lm 0&z]GVcZxF]Vs{ OZvk϶%I Gb/ 5gvf~lf6+U;V(Ktm꾙r֢_? q|*dr+uY7Z~-e_?d$5iѦZ1@yH\N GbR_~Y~/.0L]D#/O%7R_16?}tU;F( z{w&{˾vmZS>_* 8Vް'[cHՂ1lym*0Q !P5[EEz3u!0L]4r4c=7MPѫO(nOC[,JNyi޲VHSzc)&gL&n" 3(TRY,ZڶvWw?|fʽOm޶rjr^N&)joKlvOFN@WYS3N<#-EVmua$g6 Yi m9Yj׭uk n6z:=V8@=0,B "j T}]\K`HC?S_@>%vSwD,%ehNZ+&bN\ \ \ \'hC 8QNGKU ],y )"s SN$[dJ.BP@@@@@gQ~T}6pIENDB`LibCST-1.2.0/docs/source/_static/img/python_scopes.svg000066400000000000000000000733001456464173300226620ustar00rootroot00000000000000 image/svg+xml builtin scope class range(stop) ... global scope ITERATIONS = 10Cls().fn() class scope class Cls: class_attribute = 20 function scope def fn(): for i in range(ITERATIONS): ... comprehension scope return [ i for i in range(10) ] LibCST-1.2.0/docs/source/_static/logo/000077500000000000000000000000001456464173300174255ustar00rootroot00000000000000LibCST-1.2.0/docs/source/_static/logo/favicon.ico000066400000000000000000000070661456464173300215570ustar00rootroot00000000000000h& ( 66+t-u2x3y4z6{7{7|8|;~=>?@ACFFINOPU^ŕ_Ɩ`ƗeȚiʝnˠp̡q̢sͣtͣtΤuΤvΥyϧѫҭӮӯ԰ձղطٸٹۼUUUUUUUUUUUUUUUUU:!6UUUUUUUUUUD 7=UUUUUUUUU>L;Q."@UUU-GR04 TUU( EUUS'GPUUUN&#9 5UUUUUJ 2#3rf*)d^%:LvzI (djJ"'?k OQ<,#WZ image/svg+xml LibCST-1.2.0/docs/source/_static/logo/favicon_16px.png000066400000000000000000000012441456464173300224370ustar00rootroot00000000000000PNG  IHDRasBIT|d pHYs66婎-tEXtSoftwarewww.inkscape.org<!IDAT8[HՒb%.C6a z)*JI@!DHR=/Q@ %tdY3!kJ[Դmn%Ea|眏;#mYרt&&"cjY)GNV#Oc($NF^5dK;&7/Sfs&;\}v:3:u=]Zfz]}"4:KeeJB 'ØI5=DS Eɥw]L\ԋ,*^uχfntsV|C_s f^jc?4H:w=SA(Qq$;}%(ce|r3 y){J.!bI2Px7#ЩsD}&/Ё}>?TP`S%}矤xIENDB`LibCST-1.2.0/docs/source/_static/logo/favicon_32px.png000066400000000000000000000027531456464173300224430ustar00rootroot00000000000000PNG  IHDR szzsBIT|d pHYs l l'tEXtSoftwarewww.inkscape.org<hIDATX͗kLSgNo\* @6t:pnNEM7/sfDLc̲/YLeaꦨ( R l>`3ݞO'9o ςB!'i4Y Bpz-tz$ Q#`'9c([N~T~ w^@- ԵT #ofz޲~C0:.`v^켎$Aθy4wܠ1nZniS虚iz*wPH4LYQɒ 5Wp԰KYmfXT׳' k{F4Jr?x!2qb!AyU_8gڇWΑk4|k <"@;[8X!HWe\(?K!Q3%fsꘐt06<}wQȇk!;i4yPJ콼W/ m * 79{<)*\>X/! k,+!c# Kpn=9.;zeՔPJuL-v]5#מG䧖%%(8@4 HWcl.m7]XL(,w8m܍HEc#xV$>zHN#KaܴPJZ_kd"!Ԓ>(җǭop0>+Jz79'+3Y2f2q"7g "딋 gqF1z!/CƠ+(~O?\>[^*=pqft>#}IENDB`LibCST-1.2.0/docs/source/_static/logo/horizontal.svg000066400000000000000000000114431456464173300223420ustar00rootroot00000000000000LibCST-logo-horizontalLibCST-1.2.0/docs/source/_static/logo/horizontal_white.svg000066400000000000000000000114241456464173300235410ustar00rootroot00000000000000LibCST-logo-horizontal-whiteLibCST-1.2.0/docs/source/_static/logo/horizontal_white_sidebar.png000066400000000000000000000211671456464173300252240ustar00rootroot00000000000000PNG  IHDRP"TsBITUF pHYs6tEXtSoftwarewww.inkscape.org<!IDATxyM׋א!!S)PHdLy d ]* Jh0["P)sf>?sZs}ZH=y4"W0=dHx^2rخ& K`][L/1lUNRFC s9)eH}\'g5buPQ`>o,TrY)b-a\q9\` oiPw `>"Bȕ\Xĝeɓ=17KoiKqn{Tjy-"u:%\17]j?^)[Y ~\?J !$_ȝS۽,kMh P]3EOWlx y-|R)ڱW¸s.%y]߲d%]ODV0^#".SMZd1W~,-T%KSZSҟ/vS\˝LfKXŷg;[Yz}^is)5SCI҉tfb żlf0^a pUZ:ho R-fJw&H/x\ |%L˝t3*W\(H%UEqGľ}dyʈXF|yz G=&NȓBNMF%FOH<%_m|ǟ(Y\|Sps4mHJy>P!q=/sm2|ȜCFP'v;FD<6;eAբ>)뭨1RvrPj~z xN`ﲚITf*ۃdnp ]\gEy EQ>kYE xR '쀧y}l_|*.ϖ-(Zڂ<-IZh8OnR#֒Mu3BPlYI46)7~x^ 2|䜔,iJhu;8|唀:H~^.OXPoet|yRSM)}7I񂯶s݀dټu.>TֳxJ0crZЄԢMhMw3{O]Ɖ̕ͼCuF*[n v|TF߹9kpBY~{-q͛rvu1#$"\,Ga>d+[:S$xN9yu  _fQO5S61 Ecwseg냭䨜Q bEizCr߷JAdnqG;zӗ\H+@_rOw*ґnQ~(h>)<5l^H8Ry'Wϻik6ӻh}t}z|:U4Ѹ_WaS_1 j3gqMK2}ڀ /O4>]gf=5؀gNy펦]M[Gfiq,q(ۙQfY(xCm17,|hPZϼi8* HS&A\Yx 'Y<oCA 5NsSW]'qiUVI4BU_eΒ*\kz +["Rd6oɀ<-e֬R{ 1AO&Ub93LUloҷ-U'+:s̞"wm4q)X %dfBtVYC^OJu|1;Z>:rM%D#n\ߡѰ@ lͥHZSBx[1:FJ9fQΙZ#f<œ9O L>|nJcy_#fW|T:*C~U,*ʄsz-Lv7LJZ-joRK +ϛ%CJ+.ф@.>PSF2B&C,؈Mq(TZkSR8{x9mֲw"sLvf:fy|5mM#}R*?X6'3+DE]E1_V^Z펱eYF;?qB  O~#(S_y8Xl3S}rgd^'e"b0T>[ȉߔN+qyX6 /)rӯiԻ98MRTkXweJ3+qQ_![w#:`eQ<@u i/iod*sE6MuE3 Ї.3c̒A2#D/(/?oc %^~2?}{Nۜ  uF3Bڴ3YUlmLmjxd??3:# ,| omK8{cY%bg.ua<؆e7W%.Ker#OG} Ny>sDS-[i&vLe7Hoo|6y~_ ϸr2|̦5];c TC , K " yOiR A6c43V(uuիDMUt c1yDH%<${ 4'&Cy ,ִ@tw`Sx!̤QJ E_Mw<|~O'NTӀ!|'̥C.GQ>)%S }$0)f 2.w<J6XH:|%ܜ7^ cuV\XMʺc[s7We,#PE_zqPpa5t'[L49/sn!ŹoM:A]peWEZ {i[d<(VLs\\;٫&fkǶkj_E ]gu@rs)WFR*xܡʂ;CxB4 GQ M{"텲^uWB +IemUp,C[S}Iw;b; MOq+}yjaT!\OPa{J{w!J_cհG6OTpҞ(w,Az0lD߶o};I~B Mň\{:ME> 3]䲢SK=£|. ]/.9r W" ?ޕmZ"ᮚ.ENKwG?NVnb0WJon4l(w:SYc+-gFp]|![ )OOyDȄ WN[MQM>ĕQ˺ztw>S dXm?J[kIQع">FcVsʻ?pf8&뒍 \OyW{X i 5րb0/:ZwN[53)4wRqCQQhG5sS% vB.`o;d4OpNټ*k'wV0$v*~YTBoP oG0w/>rhVRK*>]?G=8ZV\.ѭ~jze+Kqq(>u3^.Lt1ZP:? ȿco)W,h'5FLR7\F6ޱ~ܶ ?1h2OϛzJF6yE# IpwXȖi^7G-ƀgj&t.Im;ljڮ_ ʒI׸pX"V63J~F2x ʃFTD#>zjް~&>b#oڗP2Q Uc%:YR*Нqd(LD;GIFjQD@%qj!сh4 rb+_>Fm p'?j\g3Xbް'GVMvnl}.gᵡKrazjҔhqhokFo* waLό|%J^(MyDϟ^tBX|(XҔwIUh-s vaZiQ2CI5yLQh܏5<\8p2N )Oڡ BAc_m;kn2Ɂ< o0ٟBp&;XÕkW5*P>,LK%y&DT2 )Հb5liqY[|o#i7C>tڣ?G{I~75ri9e)>8۩}̇ w"ؓECd‘w1L&>)O$;|@֖̞6/+bt5f&oaG9ĐH2f-{gb',WzYrsA0u5U$ =p]ZڴeiLgc*un>`9)'9 (JUӕ,.~1/y5>rl)cMdkD#,? W6U61t-i͵>¹lH#9%f3|IݾT=vֳ|~|?f;˸\ɬiйӐ%Lg%YsmBI>{Q0x)>g|\5:G6.!;6s_z>f #g$8.T;!?q[d+|o+l[y =kNuݕ`cD 39#UaO\t ~ض)qc@2!߱|7mj^Hcj'!ϛdZq9J|e{^L- MuOOv%NnM=UxT븒d\^(A771+UO+@ AYW(aua>IIJOM6'|ĐQU㣧1]9˟T5VkxtOʺ~ Vl9"?L} A9TK3#IXϥ#}]FOi0IqmHoҗ>tS;l(GΣ5\Ik݇#H/< gya1b$O1b$<`oKr9X1 `/I|3F,8vkgK;F 1b,F }9sX"!?#a2y-2ʮ##F#~v>2b$LibCST-iconLibCST-1.2.0/docs/source/_static/logo/icon_white.svg000066400000000000000000000064131456464173300223020ustar00rootroot00000000000000LibCST-icon-whiteLibCST-1.2.0/docs/source/_static/logo/vertical.svg000066400000000000000000000112671456464173300217660ustar00rootroot00000000000000LibCST-logo-verticalLibCST-1.2.0/docs/source/_static/logo/vertical_white.svg000066400000000000000000000112501456464173300231560ustar00rootroot00000000000000LibCST-logo-vertical-whiteLibCST-1.2.0/docs/source/_templates/000077500000000000000000000000001456464173300171745ustar00rootroot00000000000000LibCST-1.2.0/docs/source/_templates/page.html000066400000000000000000000005321456464173300207760ustar00rootroot00000000000000{% extends "!page.html" %} {% block footer %} {% endblock %} LibCST-1.2.0/docs/source/best_practices.rst000066400000000000000000000207221456464173300205660ustar00rootroot00000000000000============== Best Practices ============== While there are plenty of ways to interact with LibCST, we recommend some patterns over others. Various best practices are laid out here along with their justifications. Avoid ``isinstance`` when traversing ------------------------------------ Excessive use of ``isinstance`` implies that you should rewrite your check as a matcher or unroll it into a set of visitor methods. Often, you should make use of :func:`~libcst.ensure_type` to make your type checker aware of a node's type. Often it is far easier to use :ref:`libcst-matchers` over explicit instance checks in a transform. Matching against some pattern and then extracting a value from a node's child is often easier and far more readable. Unfortunately this clashes with various type-checkers which do not understand that :func:`~libcst.matchers.matches` guarantees a particular set of children. Instead of instance checks, you should use :func:`~libcst.ensure_type` which can be inlined and nested. For example, if you have written the following:: def get_identifier_name(node: cst.CSTNode) -> Optional[str]: if m.matches(node, m.Name()): assert isinstance(node, cst.Name) return node.value return None You could instead write something like:: def get_identifier_name(node: cst.CSTNode) -> Optional[str]: return ( cst.ensure_type(node, cst.Name).value if m.matches(node, m.Name()) else None ) If you find yourself attempting to manually traverse a tree using ``isinstance``, you can often rewrite your code using visitor methods instead. Nested instance checks can often be unrolled into visitors methods along with matcher decorators. This may entail adding additional state to your visitor, but the resulting code is far more likely to work after changes to LibCST itself. For example, if you have written the following:: class CountBazFoobarArgs(cst.CSTVisitor): """ Given a set of function names, count how many arguments to those function calls are the identifiers "baz" or "foobar". """ def __init__(self, functions: Set[str]) -> None: super().__init__() self.functions: Set[str] = functions self.arg_count: int = 0 def visit_Call(self, node: cst.Call) -> None: # See if the call itself is one of our functions we care about if isinstance(node.func, cst.Name) and node.func.value in self.functions: # Loop through each argument for arg in node.args: # See if the argument is an identifier matching what we want to count if isinstance(arg.value, cst.Name) and arg.value.value in {"baz", "foobar"}: self.arg_count += 1 You could instead write something like:: class CountBazFoobarArgs(m.MatcherDecoratableVisitor): """ Given a set of function names, count how many arguments to those function calls are the identifiers "baz" or "foobar". """ def __init__(self, functions: Set[str]) -> None: super().__init__() self.functions: Set[str] = functions self.arg_count: int = 0 self.call_stack: List[str] = [] def visit_Call(self, node: cst.Call) -> None: # Store all calls in a stack if m.matches(node.func, m.Name()): self.call_stack.append(cst.ensure_type(node.func, cst.Name).value) def leave_Call(self, original_node: cst.Call) -> None: # Pop the latest call off the stack if m.matches(node.func, m.Name()): self.call_stack.pop() @m.visit(m.Arg(m.Name("baz") | m.Name("foobar"))) def _count_args(self, node: cst.Arg) -> None: # See if the most shallow call is one we're interested in, so we can # count the args we care about only in calls we care about. if self.call_stack[-1] in self.functions: self.arg_count += 1 While there is more code than the previous example, it is arguably easier to understand and maintain each part of the code. It is also immune to any future changes to LibCST which change's the tree shape. Note that LibCST is traversing the tree completely in both cases, so while the first appears to be faster, it is actually doing the same amount of work as the second. Prefer ``updated_node`` when modifying trees -------------------------------------------- When you are using :class:`~libcst.CSTTransformer` to modify a LibCST tree, only return modifications to ``updated_node``. The ``original_node`` parameter on any ``leave_`` method is provided for book-keeping and is guaranteed to be equal via ``==`` and ``is`` checks to the ``node`` parameter in the corresponding ``visit_`` method. Remember that LibCST trees are immutable, so the only way to make a modification is to return a new tree. Hence, by the time we get to calling ``leave_`` methods, we have an updated tree whose children have been modified. Therefore, you should only return ``original_node`` when you want to explicitly discard changes performed on the node's children. Say you wanted to rename all function calls which were calling global functions. So, you might write the following:: class FunctionRenamer(cst.CSTTransformer): def leave_Call(self, original_node: cst.Call, updated_node: cst.Call) -> cst.Call: if m.matches(original_node.func, m.Name()): return original_node.with_changes( func=cst.Name( "renamed_" + cst.ensure_type(original_node.func, cst.Name).value ) ) return original_node Consider writing instead:: class FunctionRenamer(cst.CSTTransformer): def leave_Call(self, original_node: cst.Call, updated_node: cst.Call) -> cst.Call: if m.matches(updated_node.func, m.Name()): return updated_node.with_changes( func=cst.Name( "renamed_" + cst.ensure_type(updated_node.func, cst.Name).value ) ) return updated_node The version that returns modifications to ``original_node`` has a subtle bug. Consider the following code snippet:: some_func(1, 2, other_func(3)) Running the recommended transform will return us a new code snippet that looks like this:: renamed_some_func(1, 2, renamed_other_func(3)) However, running the version which modifies ``original_node`` will instead return:: renamed_some_func(1, 2, other_func(3)) That's because the ``updated_node`` tree contains the modification to ``other_func``. By returning modifications to ``original_node`` instead of ``updated_node``, we accidentally discarded all the work done deeper in the tree. .. _libcst-config_best_practice: Provide a ``config`` when generating code from templates -------------------------------------------------------- When generating complex trees it is often far easier to pass a string to :func:`~libcst.parse_statement` or :func:`~libcst.parse_expression` than it is to manually construct the tree. When using these functions to generate code, you should always use the ``config`` parameter in order to generate code that matches the defaults of the module you are modifying. The :class:`~libcst.Module` class even has a helper attribute :attr:`~libcst.Module.config_for_parsing` to make it easy to use. This ensures that line endings and indentation are consistent with the defaults in the module you are adding the code to. For example, to add a print statement to the end of a module:: module = cst.parse_module(some_code_string) new_module = module.with_changes( body=( *module.body, cst.parse_statement( "print('Hello, world!')", config=module.config_for_parsing, ), ), ) new_code_string = new_module.code Leaving out the ``config`` parameter means that LibCST will assume some defaults and could result in added code which is formatted differently than the rest of the module it was added to. In the above example, because we used the config from the already-parsed example, the print statement will be added with line endings matching the rest of the module. If we neglect the ``config`` parameter, we might accidentally insert a windows line ending into a unix file or vice versa, depending on what system we ran the code under. LibCST-1.2.0/docs/source/codemods.rst000066400000000000000000000176251456464173300174010ustar00rootroot00000000000000======== Codemods ======== LibCST defines a codemod as an automated refactor that can be applied to a codebase of arbitrary size. Codemods are provided as a framework for writing higher-order transforms that consist of other, simpler transforms. It includes provisions for quickly creating a command-line interface to execute a codemod. .. _libcst-codemod-base: ------------ Codemod Base ------------ All codemods derive from a common base, :class:`~libcst.codemod.Codemod`. This class includes a context, automatic metadata resolution and multi-pass transform support. Codemods are intended to be executed using the :func:`~libcst.codemod.transform_module` interface. .. autoclass:: libcst.codemod.Codemod .. autoclass:: libcst.codemod.CodemodContext As a convenience, LibCST-compatible visitors are provided which extend the feature-set of :class:`~libcst.codemod.Codemod` to LibCST visitors and transforms. Remember that :class:`~libcst.codemod.ContextAwareTransformer` is still a :class:`~libcst.codemod.Codemod`, so you should still execute it using :func:`~libcst.codemod.transform_module`. .. autoclass:: libcst.codemod.ContextAwareTransformer :exclude-members: transform_module_impl .. autoclass:: libcst.codemod.ContextAwareVisitor It is often necessary to bail out of a codemod mid-operation when you realize that you do not want to operate on a module. This can be for any reason such as realizing the module includes some operation that you do not support. If you wish to skip a module, you can raise the :class:`~libcst.codemod.SkipFile` exception. For codemods executed using the :func:`~libcst.codemod.transform_module` interface, all warnings emitted up to the exception being thrown will be preserved in the result. .. autoclass:: libcst.codemod.SkipFile Finally, its often easier to test codemods by writing verification tests instead of running repeatedly on your project. LibCST makes this easy with :class:`~libcst.codemod.CodemodTest`. Often you can develop the majority of your codemod using just tests, augmenting functionality when you run into an unexpected edge case when running it against your repository. .. autoclass:: libcst.codemod.CodemodTest :inherited-members: :exclude-members: addCleanup, addTypeEqualityFunc, assertAlmostEqual, assertAlmostEquals, assertCountEqual, assertDictContainsSubset, assertDictEqual, assertEqual, assertEquals, assertFalse, assertGreater, assertGreaterEqual, assertIn, assertIs, assertIsInstance, assertIsNone, assertIsNot, assertIsNotNone, assertLess, assertLessEqual, assertListEqual, assertLogs, assertMultiLineEqual, assertNotAlmostEqual, assertNotAlmostEquals, assertNotEqual, assertNotEquals, assertNotIn, assertNotIsInstance, assertNotRegex, assertNotRegexpMatches, assertRaises, assertRaisesRegex, assertRaisesRegexp, assertRegex, assertRegexpMatches, assertSequenceEqual, assertSetEqual, assertTrue, assertTupleEqual, assertWarns, assertWarnsRegex, assert_, countTestCases, debug, defaultTestResult, doCleanups, fail, failIf, failIfAlmostEqual, failIfEqual, failUnless, failUnlessAlmostEqual, failUnlessEqual, failUnlessRaises, failureException, id, longMessage, maxDiff, run, setUp, classmethod, setUpClass, shortDescription, skipTest, subTest, tearDown, tearDownClass ------------------- Execution Interface ------------------- As documented in the Codemod Base section above, codemods are meant to be programmatically executed using :func:`~libcst.codemod.transform_module`. Executing in this manner handles all of the featureset of codemods, including metadata calculation and exception handling. .. autofunction:: libcst.codemod.transform_module .. autoclass:: libcst.codemod.TransformResult .. autoclass:: libcst.codemod.TransformSuccess .. autoclass:: libcst.codemod.TransformFailure .. autoclass:: libcst.codemod.TransformSkip .. autoclass:: libcst.codemod.SkipReason .. autoclass:: libcst.codemod.TransformExit -------------------- Command-Line Support -------------------- LibCST includes additional support to facilitate faster development of codemods which are to be run at the command-line. This is achieved through the :class:`~libcst.codemod.CodemodCommand` class and the ``codemod`` utility which lives inside ``libcst.tool``. The :class:`~libcst.codemod.CodemodCommand` class provides a codemod description and an interface to add arguments to the command-line. This is translated to a custom help message and command-line options that a user can provide when running a codemod at the command-line. For a brief overview of supported universal options, run the ``codemod`` utility like so:: python3 -m libcst.tool codemod --help The utility provides support for gathering up and parallelizing codemods across a series of files or directories, auto-formatting changed code according to a configured formatter, generating a unified diff of changes instead of applying them to files, taking code from stdin and codemodding it before returning to stdout, and printing progress and warnings to stderr during execution of a codemod. Help is auto-customized if a codemod class is provided, including any added options and the codemod description. For an example, run the ``codemod`` utility like so:: python3 -m libcst.tool codemod noop.NOOPCommand --help A second utility, ``list``, can list all available codemods given your configuration. Run it like so:: python3 -m libcst.tool list Finally, to set up a directory for codemodding using these tools, including additional directories where codemods can be found, use the ``initialize`` utility. To see help for how to use this, run the ``initialize`` utility like so:: python3 -m libcst.tool initialize --help The above tools operate against any codemod which subclasses from :class:`~libcst.codemod.CodemodCommand`. Remember that :class:`~libcst.codemod.CodemodCommand` is a subclass of :class:`~libcst.codemod.Codemod`, so all of the features documented in the :ref:`libcst-codemod-base` section are available in addition to command-line support. Any command-line enabled codemod can also be programmatically instantiated and invoked using the above-documented :func:`~libcst.codemod.transform_module` interface. .. autoclass:: libcst.codemod.CodemodCommand :exclude-members: transform_module Additionally, a few convenience classes have been provided which take the boilerplate out of common types of codemods: .. autoclass:: libcst.codemod.VisitorBasedCodemodCommand .. autoclass:: libcst.codemod.MagicArgsCodemodCommand :exclude-members: transform_module_impl -------------------- Command-Line Toolkit -------------------- Several helpers for constructing a command-line interface are provided. These are used in the ``codemod`` utility to provide LibCST's de-facto command-line interface but they are also available to be used directly in the case that circumstances demand a custom command-line tool. .. autofunction:: libcst.codemod.gather_files .. autofunction:: libcst.codemod.exec_transform_with_prettyprint .. autofunction:: libcst.codemod.parallel_exec_transform_with_prettyprint .. autoclass:: libcst.codemod.ParallelTransformResult .. autofunction:: libcst.codemod.diff_code --------------------- Library of Transforms --------------------- LibCST additionally includes a library of transforms to reduce the need for boilerplate inside codemods. As of now, the list includes the following helpers. .. autoclass:: libcst.codemod.visitors.GatherImportsVisitor :no-undoc-members: .. autoclass:: libcst.codemod.visitors.GatherExportsVisitor :no-undoc-members: .. autoclass:: libcst.codemod.visitors.AddImportsVisitor :no-undoc-members: .. autoclass:: libcst.codemod.visitors.RemoveImportsVisitor :no-undoc-members: .. autoclass:: libcst.codemod.visitors.ApplyTypeAnnotationsVisitor :no-undoc-members: .. autoclass:: libcst.codemod.visitors.GatherUnusedImportsVisitor :no-undoc-members: .. autoclass:: libcst.codemod.visitors.GatherCommentsVisitor :no-undoc-members: .. autoclass:: libcst.codemod.visitors.GatherNamesFromStringAnnotationsVisitor :no-undoc-members:LibCST-1.2.0/docs/source/codemods_tutorial.rst000066400000000000000000000207041456464173300213140ustar00rootroot00000000000000===================== Working With Codemods ===================== Codemods are an abstraction on top of LibCST for performing large-scale changes to an entire codebase. See :doc:`Codemods ` for the complete documentation. ------------------------------- Setting up and Running Codemods ------------------------------- Let's say you were interested in converting legacy ``.format()`` calls to shiny new Python 3.6 f-strings. LibCST ships with a command-line interface known as ``libcst.tool``. This includes a few provisions for working with codemods at the command-line. It also includes a library of pre-defined codemods, one of which is a transform that can convert most ``.format()`` calls to f-strings. So, let's use this to give Python 3.6 f-strings a try. You might be lucky enough that the defaults for LibCST perfectly match your coding style, but chances are you want to customize LibCST to your repository. Initialize your repository by running the following command in the root of your repository and then edit the produced ``.libcst.codemod.yaml`` file:: python3 -m libcst.tool initialize . The file includes provisions for customizing any generated code marker, calling an external code formatter such as `black `_, blackisting patterns of files you never wish to touch and a list of modules that contain valid codemods that can be executed. If you want to write and run codemods specific to your repository or organization, you can add an in-repo module location to the list of modules and LibCST will discover codemods in all locations. Now that your repository is initialized, let's have a quick look at what's currently available for running. Run the following command from the root of your repository:: python3 -m libcst.tool list You'll see several codemods available to you, one of which is ``convert_format_to_fstring.ConvertFormatStringCommand``. The description to the right of this codemod indicates that it converts ``.format()`` calls to f-strings, so let's give it a whirl! Execute the codemod from the root of your repository like so:: python3 -m libcst.tool codemod convert_format_to_fstring.ConvertFormatStringCommand . If you want to try it out on only one file or a specific subdirectory, you can replace the ``.`` in the above command with a relative directory, file, list of directories or list of files. While LibCST is walking through your repository and codemodding files you will see a progress indicator. If there's anything the codemod can't do or any unexpected syntax errors, you will also see them on your console as it progresses. If everything works out, you'll notice that your ``.format()`` calls have been converted to f-strings! ----------------- Writing a Codemod ----------------- Codemods use the same principles as the rest of LibCST. They take LibCST's core, metadata and matchers and package them up as a simple command-line interface. So, anything you can do with LibCST in isolation you can also do with a codemod. Let's say you need to clean up some legacy code which used magic values instead of constants. You've already got a constants module called ``utils.constants`` and you want to assume that every reference to a raw string matching a particular constant should be converted to that constant. For the simplest version of this codemod, you'll need a command-line tool that takes as arguments the string to replace and the constant to replace it with. You'll also need to ensure that modified modules import the constant itself. So, you can write something similar to the following:: import argparse from ast import literal_eval from typing import Union import libcst as cst from libcst.codemod import CodemodContext, VisitorBasedCodemodCommand from libcst.codemod.visitors import AddImportsVisitor class ConvertConstantCommand(VisitorBasedCodemodCommand): # Add a description so that future codemodders can see what this does. DESCRIPTION: str = "Converts raw strings to constant accesses." @staticmethod def add_args(arg_parser: argparse.ArgumentParser) -> None: # Add command-line args that a user can specify for running this # codemod. arg_parser.add_argument( "--string", dest="string", metavar="STRING", help="String contents that we should look for.", type=str, required=True, ) arg_parser.add_argument( "--constant", dest="constant", metavar="CONSTANT", help="Constant identifier we should replace strings with.", type=str, required=True, ) def __init__(self, context: CodemodContext, string: str, constant: str) -> None: # Initialize the base class with context, and save our args. Remember, the # "dest" for each argument we added above must match a parameter name in # this init. super().__init__(context) self.string = string self.constant = constant def leave_SimpleString( self, original_node: cst.SimpleString, updated_node: cst.SimpleString ) -> Union[cst.SimpleString, cst.Name]: if literal_eval(updated_node.value) == self.string: # Check to see if the string matches what we want to replace. If so, # then we do the replacement. We also know at this point that we need # to import the constant itself. AddImportsVisitor.add_needed_import( self.context, "utils.constants", self.constant, ) return cst.Name(self.constant) # This isn't a string we're concerned with, so leave it unchanged. return updated_node This codemod is pretty simple. It defines a command-line description, sets up to parse a few required command-line args, initializes its own member variables with the command-line args that were parsed for it by ``libcst.tool codemod`` and finally replaces any string which matches our string command-line argument with a constant. It also takes care of adding the import required for the constant to be defined properly. Cool! Let's look at the command-line help for this codemod. Let's assume you saved it as ``constant_folding.py`` inside ``libcst.codemod.commands``. You can get help for the codemod by running the following command:: python3 -m libcst.tool codemod constant_folding.ConvertConstantCommand --help Notice that along with the default arguments, the ``--string`` and ``--constant`` arguments are present in the help, and the command-line description has been updated with the codemod's description string. You'll notice that the codemod also shows up on ``libcst.tool list``. ---------------- Testing Codemods ---------------- Instead of iterating on a codemod by running it repeatedly on a codebase and seeing what happens, we can write a series of unit tests that assert on desired transformations. Given the above constant folding codemod that we wrote, we can test it with some code similar to the following:: from libcst.codemod import CodemodTest from libcst.codemod.commands.constant_folding import ConvertConstantCommand class TestConvertConstantCommand(CodemodTest): # The codemod that will be instantiated for us in assertCodemod. TRANSFORM = ConvertConstantCommand def test_noop(self) -> None: before = """ foo = "bar" """ after = """ foo = "bar" """ # Verify that if we don't have a valid string match, we don't make # any substitutions. self.assertCodemod(before, after, string="baz", constant="BAZ") def test_substitution(self) -> None: before = """ foo = "bar" """ after = """ from utils.constants import BAR foo = BAR """ # Verify that if we do have a valid string match, we make a substitution # as well as import the constant. self.assertCodemod(before, after, string="bar", constant="BAR") If we save this as ``test_constant_folding.py`` inside ``libcst.codemod.commands.tests`` then we can execute the tests with the following line:: python3 -m unittest libcst.codemod.commands.tests.test_constant_folding That's all there is to it! LibCST-1.2.0/docs/source/conf.py000066400000000000000000000225211456464173300163400ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # pyre-unsafe # -*- coding: utf-8 -*- # # Configuration file for the Sphinx documentation builder. # # This file does only contain a selection of the most common options. For a # full list see the documentation: # http://www.sphinx-doc.org/en/master/config # -- Path setup -------------------------------------------------------------- # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # # import os # import sys # sys.path.insert(0, os.path.abspath('.')) # -- Project information ----------------------------------------------------- project = "LibCST" copyright = "Meta Platforms, Inc. and affiliates" author = "Benjamin Woodruff, Jennifer Taylor, Carl Meyer, Jimmy Lai, Ray Zeng" # The short X.Y version version = "" # The full version, including alpha/beta/rc tags release = "" # -- General configuration --------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. # # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ "nbsphinx", "sphinx.ext.autodoc", "sphinx.ext.autosummary", "sphinx.ext.graphviz", "sphinx.ext.intersphinx", "sphinx.ext.viewcode", "sphinx_rtd_theme", ] # Add any paths that contain templates here, relative to this directory. templates_path = ["_templates"] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # # source_suffix = ['.rst', '.md'] source_suffix = ".rst" # The master toctree document. master_doc = "index" # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = "en" # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This pattern also affects html_static_path and html_extra_path. exclude_patterns = ["_build", "**.ipynb_checkpoints"] # The name of the Pygments (syntax highlighting) style to use. pygments_style = None # -- Options for HTML output ------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # html_theme = "sphinx_rtd_theme" # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # html_theme_options = {"logo_only": True} # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ["_static"] # Custom sidebar templates, must be a dictionary that maps document names # to template names. # # The default sidebars (for documents that don't match any pattern) are # defined by theme itself. Builtin themes are using these templates by # default: ``['localtoc.html', 'relations.html', 'sourcelink.html', # 'searchbox.html']``. # # html_sidebars = {} html_logo = "_static/logo/horizontal_white_sidebar.png" html_favicon = "_static/logo/favicon.ico" # -- Options for HTMLHelp output --------------------------------------------- # Output file base name for HTML help builder. htmlhelp_basename = "LibCSTdoc" # -- Options for LaTeX output ------------------------------------------------ latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). # # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. # # 'preamble': '', # Latex figure (float) alignment # # 'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [(master_doc, "LibCST.tex", "LibCST Documentation", author, "manual")] # -- Options for manual page output ------------------------------------------ # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [(master_doc, "libcst", "LibCST Documentation", [author], 1)] # -- Options for Texinfo output ---------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ( master_doc, "LibCST", "LibCST Documentation", author, "LibCST", "One line description of project.", "Miscellaneous", ) ] # -- Options for Epub output ------------------------------------------------- # Bibliographic Dublin Core info. epub_title = project # The unique identifier of the text. This can be a ISBN number # or the project homepage. # # epub_identifier = '' # A unique identification for the text. # # epub_uid = '' # A list of files that should not be packed into the epub file. epub_exclude_files = ["search.html"] # -- Extension configuration ------------------------------------------------- autodoc_member_order = "bysource" autodoc_default_options = {"members": True, "undoc-members": True} intersphinx_mapping = {"python": ("https://docs.python.org/3", None)} # -- Options for todo extension ---------------------------------------------- # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = True # -- autodoc customization def strip_class_signature(app, what, name, obj, options, signature, return_annotation): if what == "class": return (None, return_annotation) return (signature, return_annotation) def strip_class_signature_docstring(app, what, name, obj, options, lines): if what == "class": cls_name = name.split(".")[-1] if lines and lines[0].startswith(cls_name): while lines: del lines[0] def setup(app): app.connect("autodoc-process-signature", strip_class_signature) app.connect("autodoc-process-docstring", strip_class_signature_docstring) app.add_css_file("custom.css") nbsphinx_prolog = r""" {% set docname = 'docs/source/' + env.doc2path(env.docname, base=None) %} .. only:: html .. nbinfo:: Interactive online tutorial: |notebook-badge| .. |notebook-badge| image:: https://img.shields.io/badge/notebook-run-579ACA.svg?logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAFkAAABZCAMAAABi1XidAAAB8lBMVEX///9XmsrmZYH1olJXmsr1olJXmsrmZYH1olJXmsr1olJXmsrmZYH1olL1olJXmsr1olJXmsrmZYH1olL1olJXmsrmZYH1olJXmsr1olL1olJXmsrmZYH1olL1olJXmsrmZYH1olL1olL0nFf1olJXmsrmZYH1olJXmsq8dZb1olJXmsrmZYH1olJXmspXmspXmsr1olL1olJXmsrmZYH1olJXmsr1olL1olJXmsrmZYH1olL1olLeaIVXmsrmZYH1olL1olL1olJXmsrmZYH1olLna31Xmsr1olJXmsr1olJXmsrmZYH1olLqoVr1olJXmsr1olJXmsrmZYH1olL1olKkfaPobXvviGabgadXmsqThKuofKHmZ4Dobnr1olJXmsr1olJXmspXmsr1olJXmsrfZ4TuhWn1olL1olJXmsqBi7X1olJXmspZmslbmMhbmsdemsVfl8ZgmsNim8Jpk8F0m7R4m7F5nLB6jbh7jbiDirOEibOGnKaMhq+PnaCVg6qWg6qegKaff6WhnpKofKGtnomxeZy3noG6dZi+n3vCcpPDcpPGn3bLb4/Mb47UbIrVa4rYoGjdaIbeaIXhoWHmZYHobXvpcHjqdHXreHLroVrsfG/uhGnuh2bwj2Hxk17yl1vzmljzm1j0nlX1olL3AJXWAAAAbXRSTlMAEBAQHx8gICAuLjAwMDw9PUBAQEpQUFBXV1hgYGBkcHBwcXl8gICAgoiIkJCQlJicnJ2goKCmqK+wsLC4usDAwMjP0NDQ1NbW3Nzg4ODi5+3v8PDw8/T09PX29vb39/f5+fr7+/z8/Pz9/v7+zczCxgAABC5JREFUeAHN1ul3k0UUBvCb1CTVpmpaitAGSLSpSuKCLWpbTKNJFGlcSMAFF63iUmRccNG6gLbuxkXU66JAUef/9LSpmXnyLr3T5AO/rzl5zj137p136BISy44fKJXuGN/d19PUfYeO67Znqtf2KH33Id1psXoFdW30sPZ1sMvs2D060AHqws4FHeJojLZqnw53cmfvg+XR8mC0OEjuxrXEkX5ydeVJLVIlV0e10PXk5k7dYeHu7Cj1j+49uKg7uLU61tGLw1lq27ugQYlclHC4bgv7VQ+TAyj5Zc/UjsPvs1sd5cWryWObtvWT2EPa4rtnWW3JkpjggEpbOsPr7F7EyNewtpBIslA7p43HCsnwooXTEc3UmPmCNn5lrqTJxy6nRmcavGZVt/3Da2pD5NHvsOHJCrdc1G2r3DITpU7yic7w/7Rxnjc0kt5GC4djiv2Sz3Fb2iEZg41/ddsFDoyuYrIkmFehz0HR2thPgQqMyQYb2OtB0WxsZ3BeG3+wpRb1vzl2UYBog8FfGhttFKjtAclnZYrRo9ryG9uG/FZQU4AEg8ZE9LjGMzTmqKXPLnlWVnIlQQTvxJf8ip7VgjZjyVPrjw1te5otM7RmP7xm+sK2Gv9I8Gi++BRbEkR9EBw8zRUcKxwp73xkaLiqQb+kGduJTNHG72zcW9LoJgqQxpP3/Tj//c3yB0tqzaml05/+orHLksVO+95kX7/7qgJvnjlrfr2Ggsyx0eoy9uPzN5SPd86aXggOsEKW2Prz7du3VID3/tzs/sSRs2w7ovVHKtjrX2pd7ZMlTxAYfBAL9jiDwfLkq55Tm7ifhMlTGPyCAs7RFRhn47JnlcB9RM5T97ASuZXIcVNuUDIndpDbdsfrqsOppeXl5Y+XVKdjFCTh+zGaVuj0d9zy05PPK3QzBamxdwtTCrzyg/2Rvf2EstUjordGwa/kx9mSJLr8mLLtCW8HHGJc2R5hS219IiF6PnTusOqcMl57gm0Z8kanKMAQg0qSyuZfn7zItsbGyO9QlnxY0eCuD1XL2ys/MsrQhltE7Ug0uFOzufJFE2PxBo/YAx8XPPdDwWN0MrDRYIZF0mSMKCNHgaIVFoBbNoLJ7tEQDKxGF0kcLQimojCZopv0OkNOyWCCg9XMVAi7ARJzQdM2QUh0gmBozjc3Skg6dSBRqDGYSUOu66Zg+I2fNZs/M3/f/Grl/XnyF1Gw3VKCez0PN5IUfFLqvgUN4C0qNqYs5YhPL+aVZYDE4IpUk57oSFnJm4FyCqqOE0jhY2SMyLFoo56zyo6becOS5UVDdj7Vih0zp+tcMhwRpBeLyqtIjlJKAIZSbI8SGSF3k0pA3mR5tHuwPFoa7N7reoq2bqCsAk1HqCu5uvI1n6JuRXI+S1Mco54YmYTwcn6Aeic+kssXi8XpXC4V3t7/ADuTNKaQJdScAAAAAElFTkSuQmCC :target: https://mybinder.org/v2/gh/Instagram/LibCST/main?filepath={{ docname }} :alt: Notebook """ LibCST-1.2.0/docs/source/experimental.rst000066400000000000000000000007141456464173300202700ustar00rootroot00000000000000.. _libcst-experimental: ================= Experimental APIs ================= These APIs may change at any time (including in minor releases) with no notice. You probably shouldn't use them, but if you do, you should pin your application to an exact release of LibCST to avoid breakages. Reentrant Code Generation ------------------------- .. autoclass:: libcst.metadata.ExperimentalReentrantCodegenProvider .. autoclass:: libcst.metadata.CodegenPartial LibCST-1.2.0/docs/source/helpers.rst000066400000000000000000000021171456464173300172340ustar00rootroot00000000000000======= Helpers ======= Helpers are higher level functions built for reducing recurring code boilerplate. We add helpers as method of ``CSTNode`` or ``libcst.helpers`` package based on those principles: - ``CSTNode`` method: simple, read-only and only require data of the direct children of a CSTNode. - ``libcst.helpers``: node transforms or require recursively traversing the syntax tree. Construction Helpers -------------------- Functions that assist in creating a new LibCST tree. .. autofunction:: libcst.helpers.parse_template_module .. autofunction:: libcst.helpers.parse_template_expression .. autofunction:: libcst.helpers.parse_template_statement Transformation Helpers ---------------------- Functions that assist in transforming an existing LibCST node. .. autofunction:: libcst.helpers.insert_header_comments Traversing Helpers ------------------ Functions that assist in traversing an existing LibCST tree. .. autofunction:: libcst.helpers.get_full_name_for_node .. autofunction:: libcst.helpers.get_full_name_for_node_or_raise .. autofunction:: libcst.helpers.ensure_type LibCST-1.2.0/docs/source/index.rst000066400000000000000000000017601456464173300167040ustar00rootroot00000000000000.. LibCST documentation master file, created by sphinx-quickstart on Wed Jul 17 17:05:21 2019. You can adapt this file completely to your liking, but it should at least contain the root `toctree` directive. ====== LibCST ====== .. include:: ../../README.rst :start-after: intro-start :end-before: intro-end .. toctree:: :maxdepth: 2 :caption: Introduction: why_libcst motivation .. toctree:: :maxdepth: 2 :caption: Tutorial: Parsing and Visitors Metadata Scope Analysis Matchers Codemodding Best Practices .. toctree:: :maxdepth: 2 :caption: Reference: parser nodes visitors metadata matchers codemods helpers experimental Indices and tables ================== * :ref:`genindex` * :ref:`modindex` * :ref:`search` .. include:: ../../README.rst :start-after: fb-docs-start :end-before: fb-docs-end LibCST-1.2.0/docs/source/matchers.rst000066400000000000000000000212541456464173300174030ustar00rootroot00000000000000.. _libcst-matchers: ======== Matchers ======== Matchers are provided as a way of asking whether a particular LibCST node and its children match a particular shape. It is possible to write a visitor that tracks attributes using ``visit_`` methods. It is also possible to implement manual instance checking and traversal of a node's children. However, both are cumbersome to write and hard to understand. Matchers offer a more concise way of defining what attributes on a node matter when matching against predefined patterns. To accomplish this, a matcher has been created which corresponds to each LibCST node documented in :ref:`libcst-nodes`. Matchers default each of their attributes to the special sentinel matcher :func:`~libcst.matchers.DoNotCare`. When constructing a matcher, you can initialize the node with only the values of attributes that you are concerned with, leaving the rest of the attributes set to :func:`~libcst.matchers.DoNotCare` in order to skip comparing against them. ------------ Matcher APIs ------------ Functions ^^^^^^^^^ Matchers can be used either by calling :func:`~libcst.matchers.matches` or :func:`~libcst.matchers.findall` directly, or by using various decorators to selectively control when LibCST calls visitor functions. .. autofunction:: libcst.matchers.matches .. autofunction:: libcst.matchers.findall .. autofunction:: libcst.matchers.extract .. autofunction:: libcst.matchers.extractall .. autofunction:: libcst.matchers.replace .. _libcst-matcher-decorators: Decorators ^^^^^^^^^^ The following decorators can be placed onto a method in a visitor or transformer in order to convert it into a visitor which is called when the provided matcher is true. .. autofunction:: libcst.matchers.visit .. autofunction:: libcst.matchers.leave The following decorators can be placed onto any existing ``visit_`` or ``leave_`` visitor, as well as any visitor created using either :func:`~libcst.matchers.visit` or :func:`~libcst.matchers.leave`. They control whether the visitor itself gets called or skipped by LibCST when traversing a tree. Note that when a visitor function is skipped, its children will still be visited based on the rules set forth in :ref:`libcst-visitors`. Namely, if you have a separate ``visit_`` visitor that returns ``False`` for a particular node, we will not traverse to its children. .. autofunction:: libcst.matchers.call_if_inside .. autofunction:: libcst.matchers.call_if_not_inside When using matcher decorators, your visitors must subclass from :class:`~libcst.matchers.MatcherDecoratableVisitor` instead of :class:`libcst.CSTVisitor`, and from :class:`~libcst.matchers.MatcherDecoratableTransformer` instead of :class:`libcst.CSTTransformer`. This is so that visitors and transformers not making use of matcher decorators do not pay the extra cost of their implementation. Note that if you do not subclass from :class:`~libcst.matchers.MatcherDecoratableVisitor` or :class:`~libcst.matchers.MatcherDecoratableTransformer`, you can still use the :func:`~libcst.matchers.matches` function. Both of these classes are strict subclasses of their corresponding LibCST base class, so they can be used anywhere that expects a LibCST base class. See :ref:`libcst-visitors` for more information. .. autoclass:: libcst.matchers.MatcherDecoratableVisitor .. autoclass:: libcst.matchers.MatcherDecoratableTransformer Traversal Order ^^^^^^^^^^^^^^^ Visit and leave functions created using :func:`~libcst.matchers.visit` or :func:`~libcst.matchers.leave` follow the traversal order rules laid out in LibCST's visitor :ref:`libcst-visitor-traversal` with one additional rule. Any visit function created using the :func:`~libcst.matchers.visit` decorator will be called **before** a ``visit_`` function if it is defined for your visitor. The order in which various visit functions which are created with :func:`~libcst.matchers.visit` are called is indeterminate, but all such functions will be called before calling the ``visit_`` method. Similarly, any leave function created using the :func:`~libcst.matchers.leave` decorator will be called **after** a ``leave_`` function if it is defined for your visitor. The order in which various leave functions which are created with :func:`~libcst.matchers.leave` are called is indeterminate, but all such functions will be called after calling the ``visit_`` function if it is defined for your visitor. This has a few implications. The first is that if you return ``False`` from a ``visit_`` method, we are guaranteed to call your decorated visit functions as well. Second, when modifying a node in both ``leave_`` and a visitor created with :func:`~libcst.matchers.leave`, the ``original_node`` will be unchanged for both and the ``updated_node`` available to the decorated leave method will be the node that is returned by the ``leave_`` method. Chaining modifications across multiple leave functions is supported, but must be done with care. ------------- Matcher Types ------------- Concrete Matchers ^^^^^^^^^^^^^^^^^ For each node found in :ref:`libcst-nodes`, a corresponding concrete matcher has been generated. Each matcher has attributes identical to its LibCST node counterpart. For example, :class:`libcst.Expr` includes the ``value`` and ``semicolon`` attributes, and therefore :class:`libcst.matchers.Expr` similarly includes the same attributes. Just as :class:`libcst.Expr`'s ``value`` is typed as taking a :class:`libcst.BaseExpression`, :class:`libcst.matchers.Expr`'s ``value`` is typed as taking a :class:`libcst.matchers.BaseExpression`. For every node that exists in LibCST, both concrete and abstract, a corresponding matcher has been defined. There are a few special cases to the rules laid out above. For starters, matchers don't support evaluating :class:`~libcst.MaybeSentinel`. There is no way to specify that you wish to match against a :class:`~libcst.MaybeSentinel` except with the :func:`~libcst.matchers.DoNotCare` matcher. This tends not to be an issue in practice because :class:`~libcst.MaybeSentinel` is only found on syntax nodes. While there are base classes such as :class:`libcst.matchers.BaseExpression`, you cannot match directly on them. They are provided for typing purposes only in order to exactly match the types on LibCST node attributes. If you need to match on all concrete subclasses of a base class, we recommend using the special matcher :class:`~libcst.matchers.OneOf`. .. autoclass:: libcst.matchers.BaseMatcherNode Special Matchers ^^^^^^^^^^^^^^^^ Special matchers are matchers that don't have a corresponding LibCST node. Concrete matchers only match against their corresponding LibCST node, limiting their use under certain circumstances. Special matchers fill in the gap by allowing higher-level logic constructs such as inversion. You can use any special matcher in place of a concrete matcher when specifying matcher attributes. Additionally, you can also use the :class:`~libcst.matchers.AllOf` and :class:`~libcst.matchers.OneOf` special matchers in place of a concrete matcher when calling :func:`~libcst.matchers.matches` or using decorators. .. autoclass:: libcst.matchers.OneOf .. autoclass:: libcst.matchers.AllOf .. autoclass:: libcst.matchers.TypeOf .. autofunction:: libcst.matchers.DoesNotMatch .. autoclass:: libcst.matchers.MatchIfTrue .. autofunction:: libcst.matchers.MatchRegex .. autoclass:: libcst.matchers.MatchMetadata .. autoclass:: libcst.matchers.MatchMetadataIfTrue .. autofunction:: libcst.matchers.SaveMatchedNode .. autofunction:: libcst.matchers.DoNotCare Sequence Wildcard Matchers ^^^^^^^^^^^^^^^^^^^^^^^^^^ Sequence wildcard matchers are matchers that only get used when constructing a sequence to match against. Not all LibCST nodes have attributes which are sequences, but for those that do, sequence wildcard matchers offer a great degree of flexibility. Unlike all other matcher types, these allow you to match against more than one LibCST node, much like wildcards in regular expressions do. LibCST does not implicitly match on partial sequences for you. So, when matching against a sequence you will need to provide a complete pattern. This often means using helpers such as :func:`~libcst.matchers.ZeroOrMore` as the first and last element of your sequence. Think of it as the difference between Python's `re.match `_ and `re.fullmatch `_ functions. LibCST matchers behave like the latter so that it is possible to specify sequences which must start with, end with or be exactly equal to some pattern. .. autoclass:: libcst.matchers.AtLeastN .. autofunction:: libcst.matchers.ZeroOrMore .. autoclass:: libcst.matchers.AtMostN .. autofunction:: libcst.matchers.ZeroOrOne LibCST-1.2.0/docs/source/matchers_tutorial.ipynb000066400000000000000000000324351456464173300216420ustar00rootroot00000000000000{ "cells": [ { "cell_type": "raw", "metadata": { "raw_mimetype": "text/restructuredtext" }, "source": [ "=====================\n", "Working with Matchers\n", "=====================\n", "Matchers provide a flexible way of comparing LibCST nodes in order to build ", "more complex transforms. See :doc:`Matchers ` for the complete ", "documentation.\n", "\n", "Basic Matcher Usage\n", "===================\n", "Let's say you are visiting a LibCST :class:`~libcst.Call` node and you want ", "to know if all arguments provided are the literal ``True`` or ``False``. ", "You look at the documentation and see that ``Call.args`` is a sequence of ", ":class:`~libcst.Arg`, and each ``Arg.value`` is a :class:`~libcst.BaseExpression`. ", "In order to verify that each argument is either ``True`` or ``False`` you ", "would have to first loop over ``node.args``, and then check ", "``isinstance(arg.value, cst.Name)`` for each ``arg`` in the loop before ", "finally checking ``arg.value.value in (\"True\", \"False\")``. \n", "\n", "Here's a short example of that in action:\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "nbsphinx": "hidden" }, "outputs": [], "source": [ "import sys\n", "sys.path.append(\"../../\")" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "import libcst as cst\n", "\n", "def is_call_with_booleans(node: cst.Call) -> bool:\n", " for arg in node.args:\n", " if not isinstance(arg.value, cst.Name):\n", " # This can't be the literal True/False, so bail early.\n", " return False\n", " if cst.ensure_type(arg.value, cst.Name).value not in (\"True\", \"False\"):\n", " # This is a Name node, but not the literal True/False, so bail.\n", " return False\n", " # We got here, so all arguments are literal boolean values.\n", " return True\n" ] }, { "cell_type": "raw", "metadata": { "raw_mimetype": "text/restructuredtext" }, "source": [ "We can see from a few examples that this does work as intended. ", "However, it is an awful lot of boilerplate that was fairly cumbersome to write.\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "call_1 = cst.Call(\n", " func=cst.Name(\"foo\"),\n", " args=(\n", " cst.Arg(cst.Name(\"True\")),\n", " ),\n", ")\n", "is_call_with_booleans(call_1)\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "call_2 = cst.Call(\n", " func=cst.Name(\"foo\"),\n", " args=(\n", " cst.Arg(cst.Name(\"None\")),\n", " ),\n", ")\n", "is_call_with_booleans(call_2)\n" ] }, { "cell_type": "raw", "metadata": { "raw_mimetype": "text/restructuredtext" }, "source": [ "Let's try to do a bit better with matchers. We can make a better function ", "that takes advantage of matchers to get rid of both the instance check and ", "the ``ensure_type`` call, like so:\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "import libcst.matchers as m\n", "\n", "def better_is_call_with_booleans(node: cst.Call) -> bool:\n", " for arg in node.args:\n", " if not m.matches(arg.value, m.Name(\"True\") | m.Name(\"False\")):\n", " # Oops, this isn't a True/False literal!\n", " return False\n", " # We got here, so all arguments are literal boolean values.\n", " return True\n" ] }, { "cell_type": "raw", "metadata": { "raw_mimetype": "text/restructuredtext" }, "source": [ "This is a lot shorter and is easier to read as well! We made use of the ", "fact that matchers handles instance checking for us in a safe way. We also ", "made use of the fact that matchers allows us to concisely express multiple ", "match options with the use of Python's or operator. We can also see that ", "it still works on our previous examples:\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "better_is_call_with_booleans(call_1)\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "better_is_call_with_booleans(call_2)\n" ] }, { "cell_type": "raw", "metadata": { "raw_mimetype": "text/restructuredtext" }, "source": [ "We still have one more trick up our sleeve though. Matchers don't just ", "allow us to specify which attributes we want to match on exactly. It ", "also allows us to specify rules for matching sequences of nodes, like ", "the list of :class:`~libcst.Arg` nodes that appears in :class:`~libcst.Call`. ", "Let's make use of that, turning our original ``is_call_with_booleans`` ", "function into a call to :func:`~libcst.matchers.matches`:" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "def best_is_call_with_booleans(node: cst.Call) -> bool:\n", " return m.matches(\n", " node,\n", " m.Call(\n", " args=(\n", " m.ZeroOrMore(m.Arg(m.Name(\"True\") | m.Name(\"False\"))),\n", " ),\n", " ),\n", " )\n" ] }, { "cell_type": "raw", "metadata": { "raw_mimetype": "text/restructuredtext" }, "source": [ "We've turned our original function into a single call to ", ":func:`~libcst.matchers.matches`. As an added benefit, the match node can ", "be read from left to right in a way that makes sense in english: \"match ", "any call with zero or more arguments that are the literal ``True`` or ", "``False``\". As we can see, it works as intended: " ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "best_is_call_with_booleans(call_1)\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "best_is_call_with_booleans(call_2)\n" ] }, { "cell_type": "raw", "metadata": { "raw_mimetype": "text/restructuredtext" }, "source": [ "Matcher Decorators\n", "==================\n", "You can already do a lot with just :func:`~libcst.matchers.matches`. It ", "lets you define the shape of nodes you want to match and LibCST takes ", "care of the rest. However, you still need to include a lot of boilerplate ", "into your :ref:`libcst-visitors` in order to identify which nodes you care ", "about. Matcher :ref:`libcst-matcher-decorators` help reduce that boilerplate.\n", "\n", "Say you wanted to invert the boolean literals in functions which ", "match the above ``best_is_call_with_booleans``. You could build something ", "that looks like the following:" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "class BoolInverter(cst.CSTTransformer):\n", " def __init__(self) -> None:\n", " self.in_call: int = 0\n", "\n", " def visit_Call(self, node: cst.Call) -> None:\n", " if m.matches(node, m.Call(args=(\n", " m.ZeroOrMore(m.Arg(m.Name(\"True\") | m.Name(\"False\"))),\n", " ))):\n", " self.in_call += 1\n", "\n", " def leave_Call(self, original_node: cst.Call, updated_node: cst.Call) -> cst.Call:\n", " if m.matches(original_node, m.Call(args=(\n", " m.ZeroOrMore(m.Arg(m.Name(\"True\") | m.Name(\"False\"))),\n", " ))):\n", " self.in_call -= 1\n", " return updated_node\n", "\n", " def leave_Name(self, original_node: cst.Name, updated_node: cst.Name) -> cst.Name:\n", " if self.in_call > 0:\n", " if updated_node.value == \"True\":\n", " return updated_node.with_changes(value=\"False\")\n", " if updated_node.value == \"False\":\n", " return updated_node.with_changes(value=\"True\")\n", " return updated_node\n" ] }, { "cell_type": "raw", "metadata": { "raw_mimetype": "text/restructuredtext" }, "source": [ "We can try it out on a source file to see that it works:\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "source = \"def some_func(*params: object) -> None:\\n pass\\n\\nsome_func(True, False)\\nsome_func(1, 2, 3)\\nsome_func()\\n\"\n", "module = cst.parse_module(source)\n", "print(source)\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "new_module = module.visit(BoolInverter())\n", "print(new_module.code)\n" ] }, { "cell_type": "raw", "metadata": { "raw_mimetype": "text/restructuredtext" }, "source": [ "While this works its not super elegant. We have to track where we are in ", "the tree so we know when its safe to invert boolean literals which means ", "we have to create a constructor and we have to duplicate matching logic. ", "We could refactor that into a helper like the ``best_is_call_with_booleans`` ", "above, but it only makes things so much better.\n", "\n", "So, let's try rewriting it with matcher decorators instead. Note that this ", "includes changing the class we inherit from to ", ":class:`~libcst.matchers.MatcherDecoratableTransformer` in order to enable ", "the matcher decorator feature:\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "class BetterBoolInverter(m.MatcherDecoratableTransformer):\n", " @m.call_if_inside(m.Call(args=(\n", " m.ZeroOrMore(m.Arg(m.Name(\"True\") | m.Name(\"False\"))),\n", " )))\n", " def leave_Name(self, original_node: cst.Name, updated_node: cst.Name) -> cst.Name:\n", " if updated_node.value == \"True\":\n", " return updated_node.with_changes(value=\"False\")\n", " if updated_node.value == \"False\":\n", " return updated_node.with_changes(value=\"True\")\n", " return updated_node\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "new_module = module.visit(BetterBoolInverter())\n", "print(new_module.code)\n" ] }, { "cell_type": "raw", "metadata": { "raw_mimetype": "text/restructuredtext" }, "source": [ "Using matcher decorators we successfully removed all of the boilerplate ", "around state tracking! The only thing that ``leave_Name`` needs to concern ", "itself with is the actual business logic of the transform. However, it ", "still needs to check to see if the value of the node should be inverted. ", "This is because the ``Call.func`` is a :class:`~libcst.Name` in this case. ", "Let's use another matcher decorator to make that problem go away:" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "class BestBoolInverter(m.MatcherDecoratableTransformer):\n", " @m.call_if_inside(m.Call(args=(\n", " m.ZeroOrMore(m.Arg(m.Name(\"True\") | m.Name(\"False\"))),\n", " )))\n", " @m.leave(m.Name(\"True\") | m.Name(\"False\"))\n", " def invert_bool_literal(self, original_node: cst.Name, updated_node: cst.Name) -> cst.Name:\n", " return updated_node.with_changes(value=\"False\" if updated_node.value == \"True\" else \"True\")\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "new_module = module.visit(BestBoolInverter())\n", "print(new_module.code)\n" ] }, { "cell_type": "raw", "metadata": { "raw_mimetype": "text/restructuredtext" }, "source": [ "That's it! Instead of using a ``leave_Name`` which modifies all ", ":class:`~libcst.Name` nodes we instead created a matcher visitor that ", "only modifies :class:`~libcst.Name` nodes with the value of ``True`` or ", "``False``. We decorate *that* with :func:`~libcst.matchers.call_if_inside` ", "to ensure we run this on :class:`~libcst.Name` nodes found inside of ", "function calls that only take boolean literals. Using two matcher ", "decorators we got rid of all of the state management as well as all of the ", "cases where we needed to handle nodes we weren't interested in." ] } ], "metadata": { "celltoolbar": "Edit Metadata", "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.7.3" } }, "nbformat": 4, "nbformat_minor": 2 } LibCST-1.2.0/docs/source/metadata.rst000066400000000000000000000237071456464173300173620ustar00rootroot00000000000000.. _libcst-metadata: ======== Metadata ======== ------------- Metadata APIs ------------- LibCST ships with a metadata interface that defines a standardized way to associate nodes in a CST with arbitrary metadata while maintaining the immutability of the tree. The metadata interface is designed to be declarative and type safe. Here's a quick example of using the metadata interface to get line and column numbers of nodes through the :class:`~libcst.metadata.PositionProvider`: .. _libcst-metadata-position-example: .. code-block:: python class NamePrinter(cst.CSTVisitor): METADATA_DEPENDENCIES = (cst.metadata.PositionProvider,) def visit_Name(self, node: cst.Name) -> None: pos = self.get_metadata(cst.metadata.PositionProvider, node).start print(f"{node.value} found at line {pos.line}, column {pos.column}") wrapper = cst.metadata.MetadataWrapper(cst.parse_module("x = 1")) result = wrapper.visit(NamePrinter()) # should print "x found at line 1, column 0" More examples of using the metadata interface can be found on the :doc:`Metadata Tutorial `. Accessing Metadata ------------------ To work with metadata you need to wrap a module with a :class:`~libcst.metadata.MetadataWrapper`. The wrapper provides a :func:`~libcst.metadata.MetadataWrapper.resolve` function and a :func:`~libcst.metadata.MetadataWrapper.resolve_many` function to generate metadata. .. autoclass:: libcst.metadata.MetadataWrapper :special-members: __init__ If you're working with visitors, which extend :class:`~libcst.MetadataDependent`, metadata dependencies will be automatically computed when visited by a :class:`~libcst.metadata.MetadataWrapper` and are accessible through :func:`~libcst.MetadataDependent.get_metadata` .. autoclass:: libcst.MetadataDependent Providing Metadata ------------------ Metadata is generated through provider classes that can be be passed to :meth:`MetadataWrapper.resolve() ` or declared as a dependency of a :class:`~libcst.metadata.MetadataDependent`. These providers are then resolved automatically using methods provided by :class:`~libcst.metadata.MetadataWrapper`. In most cases, you should extend :class:`~libcst.metadata.BatchableMetadataProvider` when writing a provider, unless you have a particular reason to not to use a batchable visitor. Only extend from :class:`~libcst.metadata.BaseMetadataProvider` if your provider does not use the visitor pattern for computing metadata for a tree. .. autoclass:: libcst.BaseMetadataProvider .. autoclass:: libcst.metadata.BatchableMetadataProvider .. autoclass:: libcst.metadata.VisitorMetadataProvider .. _libcst-metadata-position: ------------------ Metadata Providers ------------------ :class:`~libcst.metadata.PositionProvider`, :class:`~libcst.metadata.ByteSpanPositionProvider`, :class:`~libcst.metadata.WhitespaceInclusivePositionProvider`, :class:`~libcst.metadata.ExpressionContextProvider`, :class:`~libcst.metadata.ScopeProvider`, :class:`~libcst.metadata.QualifiedNameProvider`, :class:`~libcst.metadata.ParentNodeProvider`, and :class:`~libcst.metadata.TypeInferenceProvider` are currently provided. Each metadata provider may has its own custom data structure. Position Metadata ----------------- There are two types of position metadata available. They both track the same position concept, but differ in terms of representation. One represents position with line and column numbers, while the other outputs byte offset and length pairs. Line and column numbers are available through the metadata interface by declaring one of :class:`~libcst.metadata.PositionProvider` or :class:`~libcst.metadata.WhitespaceInclusivePositionProvider`. For most cases, :class:`~libcst.metadata.PositionProvider` is what you probably want. Node positions are is represented with :class:`~libcst.metadata.CodeRange` objects. See :ref:`the above example`. .. autoclass:: libcst.metadata.PositionProvider .. autoclass:: libcst.metadata.WhitespaceInclusivePositionProvider .. autoclass:: libcst.metadata.CodeRange .. autoclass:: libcst.metadata.CodePosition Byte offset and length pairs can be accessed using :class:`~libcst.metadata.ByteSpanPositionProvider`. This provider represents positions using :class:`~libcst.metadata.CodeSpan`, which will contain the byte offsets of a :class:`~libcst.CSTNode` from the start of the file, and its length (also in bytes). .. autoclass:: libcst.metadata.ByteSpanPositionProvider .. autoclass:: libcst.metadata.CodeSpan Expression Context Metadata --------------------------- .. autoclass:: libcst.metadata.ExpressionContextProvider :no-undoc-members: .. autoclass:: libcst.metadata.ExpressionContext .. _libcst-scope-metadata: Scope Metadata -------------- Scopes contain and separate variables from each other. Scopes enforce that a local variable name bound inside of a function is not available outside of that function. While many programming languages are "block-scoped", Python is `function-scoped `_. New scopes are created for classes, functions, and comprehensions. Other block constructs like conditional statements, loops, and try…except don't create their own scope. There are five different type of scope in Python: :class:`~libcst.metadata.BuiltinScope`, :class:`~libcst.metadata.GlobalScope`, :class:`~libcst.metadata.ClassScope`, :class:`~libcst.metadata.FunctionScope`, and :class:`~libcst.metadata.ComprehensionScope`. .. image:: _static/img/python_scopes.png :alt: Diagram showing how the above 5 scopes are nested in each other :width: 400 :align: center LibCST allows you to inspect these scopes to see what local variables are assigned or accessed within. .. note:: Import statements bring new symbols into scope that are declared in other files. As such, they are represented by :class:`~libcst.metadata.Assignment` for scope analysis purposes. Dotted imports (e.g. ``import a.b.c``) generate multiple :class:`~libcst.metadata.Assignment` objects — one for each module. When analyzing references, only the most specific access is recorded. For example, the above ``import a.b.c`` statement generates three :class:`~libcst.metadata.Assignment` objects: one for ``a``, one for ``a.b``, and one for ``a.b.c``. A reference for ``a.b.c`` records an access only for the last assignment, while a reference for ``a.d`` only records an access for the :class:`~libcst.metadata.Assignment` representing ``a``. .. autoclass:: libcst.metadata.ScopeProvider :no-undoc-members: .. autoclass:: libcst.metadata.BaseAssignment :no-undoc-members: .. autoclass:: libcst.metadata.Access .. autoclass:: libcst.metadata.Assignment .. autoclass:: libcst.metadata.BuiltinAssignment .. autoclass:: libcst.metadata.Scope :no-undoc-members: :special-members: __contains__, __getitem__, __iter__ .. autoclass:: libcst.metadata.BuiltinScope :no-undoc-members: .. autoclass:: libcst.metadata.GlobalScope :no-undoc-members: .. autoclass:: libcst.metadata.FunctionScope .. autoclass:: libcst.metadata.ClassScope .. autoclass:: libcst.metadata.ComprehensionScope .. autoclass:: libcst.metadata.Assignments :special-members: __contains__, __getitem__, __iter__ .. autoclass:: libcst.metadata.Accesses :special-members: __contains__, __getitem__, __iter__ Qualified Name Metadata ----------------------- Qualified name provides an unambiguous name to locate the definition of variable and it's introduced for class and function in `PEP-3155 `_. QualifiedNameProvider provides possible :class:`~libcst.metadata.QualifiedName` given a :class:`~libcst.CSTNode`. We don't call it `fully qualified name `_ because the name refers to the current module which doesn't consider the hierarchy of code repository. For fully qualified names, there's :class:`~libcst.metadata.FullyQualifiedNameProvider` which is similar to the above but takes the current module's location (relative to some python root folder, usually the repository's root) into account. .. autoclass:: libcst.metadata.QualifiedNameSource .. autoclass:: libcst.metadata.QualifiedName .. autoclass:: libcst.metadata.QualifiedNameProvider :no-undoc-members: .. autoclass:: libcst.metadata.FullyQualifiedNameProvider :no-undoc-members: Parent Node Metadata -------------------- A :class:`~libcst.CSTNode` only has attributes link to its child nodes and thus only top-down tree traversal is doable. Sometimes user may want to access the parent :class:`~libcst.CSTNode` for more information or traverse in bottom-up manner. We provide :class:`~libcst.metadata.ParentNodeProvider` for those use cases. .. autoclass:: libcst.metadata.ParentNodeProvider :no-undoc-members: File Path Metadata ------------------ This provides the absolute file path on disk for any module being visited. Requires an active :class:`~libcst.metadata.FullRepoManager` when using this provider. .. autoclass:: libcst.metadata.FilePathProvider :no-undoc-members: Type Inference Metadata ----------------------- `Type inference `__ is to automatically infer data types of expression for deeper understanding source code. In Python, type checkers like `Mypy `_ or `Pyre `__ analyze `type annotations `__ and infer types for expressions. :class:`~libcst.metadata.TypeInferenceProvider` is provided by `Pyre Query API `__ which requires `setup watchman `_ for incremental typechecking. :class:`~libcst.metadata.FullRepoManger` is built for manage the inter process communication to Pyre. .. autoclass:: libcst.metadata.TypeInferenceProvider :no-undoc-members: .. autoclass:: libcst.metadata.FullRepoManager :no-undoc-members: :special-members: __init__ LibCST-1.2.0/docs/source/metadata_tutorial.ipynb000066400000000000000000000123431456464173300216100ustar00rootroot00000000000000{ "cells": [ { "cell_type": "raw", "metadata": { "raw_mimetype": "text/restructuredtext" }, "source": [ "=====================\n", "Working with Metadata\n", "=====================\n", "LibCST handles node metadata in a somewhat unusual manner in order to maintain the immutability of the tree. See :doc:`Metadata ` for the complete documentation. \n", "\n", "Providing Metadata\n", "==================\n", "While it's possible to write visitors that gather metadata from a tree ad hoc, using the provider interface gives you the advantage of being able to use dependency declaration to automatically run your providers in other visitors and type safety. For most cases, you'll want to extend :class:`~libcst.BatchableMetadataProvider` as providers that extend from that class can be resolved more efficiently in batches.\n", "\n", "Here's an example of a simple metadata provider that marks :class:`~libcst.Name` nodes that are function parameters:" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "nbsphinx": "hidden" }, "outputs": [], "source": [ "import sys\n", "sys.path.append(\"../../\")" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "import libcst as cst\n", "\n", "\n", "class IsParamProvider(cst.BatchableMetadataProvider[bool]):\n", " \"\"\"\n", " Marks Name nodes found as a parameter to a function.\n", " \"\"\"\n", " def __init__(self) -> None:\n", " super().__init__()\n", " self.is_param = False\n", " \n", " def visit_Param(self, node: cst.Param) -> None:\n", " # Mark the child Name node as a parameter \n", " self.set_metadata(node.name, True)\n", " \n", " def visit_Name(self, node: cst.Name) -> None:\n", " # Mark all other Name nodes as not parameters\n", " if not self.get_metadata(type(self), node, False):\n", " self.set_metadata(node, False)" ] }, { "cell_type": "raw", "metadata": { "raw_mimetype": "text/restructuredtext" }, "source": [ "Line and Column Metadata\n", "------------------------\n", "LibCST ships with two built-in providers for line and column metadata. See :ref:`Position Metadata` for more information.\n", "\n", "Accessing Metadata\n", "==================\n", "Once you have a provider, the metadata interface gives you two primary ways of working with your providers. The first is using the resolve methods provided by :class:`~libcst.MetadataWrapper` and the second is through declaring metadata dependencies on a :class:`~libcst.CSTTransformer` or :class:`~libcst.CSTVisitor`.\n", "\n", "Using the :class:`~libcst.MetadataWrapper`\n", "------------------------------------------\n", "The metadata wrapper class provides a way to associate metadata with a module as well as a simple interface to run providers. Here's an example of using a wrapper with the provider we just wrote:" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "module = cst.parse_module(\"x\")\n", "wrapper = cst.MetadataWrapper(module)\n", "\n", "isparam = wrapper.resolve(IsParamProvider)\n", "x_name_node = wrapper.module.body[0].body[0].value\n", "\n", "print(isparam[x_name_node]) # should print False" ] }, { "cell_type": "raw", "metadata": { "raw_mimetype": "text/restructuredtext" }, "source": [ "Using Dependency Declaration\n", "----------------------------\n", "The visitors that ship with LibCST can declare metadata providers as dependencies that will be run automatically when visited by a wrapper. Here is a visitor that prints all names that are function parameters." ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "from libcst.metadata import PositionProvider\n", "\n", "class ParamPrinter(cst.CSTVisitor):\n", " METADATA_DEPENDENCIES = (IsParamProvider, PositionProvider,)\n", "\n", " def visit_Name(self, node: cst.Name) -> None:\n", " # Only print out names that are parameters\n", " if self.get_metadata(IsParamProvider, node):\n", " pos = self.get_metadata(PositionProvider, node).start\n", " print(f\"{node.value} found at line {pos.line}, column {pos.column}\")\n", "\n", "\n", "module = cst.parse_module(\"def foo(x):\\n y = 1\\n return x + y\")\n", "wrapper = cst.MetadataWrapper(module)\n", "result = wrapper.visit(ParamPrinter()) # NB: wrapper.visit not module.visit" ] } ], "metadata": { "celltoolbar": "Edit Metadata", "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.7.3" } }, "nbformat": 4, "nbformat_minor": 2 } LibCST-1.2.0/docs/source/motivation.rst000066400000000000000000000076131456464173300177710ustar00rootroot00000000000000========== Motivation ========== When designing LibCST, we used the following list of motivations. Exact Representation -------------------- * **Trees should be rewritable.** It should always be possible to take a valid python file, parse it to a CST using LibCST and then write that tree back out exactly, byte for byte. When changing nodes in the tree, changes to the original source file should be localized to the area represented by the changed portion of the tree. Effectively, for all valid python inputs, the following equation should be true:: parse_module(some_input).code == some_input * **Nodes should be constructed exactly as written in code.** No magic should happen on initialization and all construction should be explicit. Nodes should directly correlate to the code they represent and vice versa. Ease of Traversal ----------------- * **As flat as possible.** There shouldn't be an AsyncFunction wrapper containing a FunctionDef just because the grammar specifies it that way. Instead, we should make a FunctionDef node and give it an async attribute. Instead of representing parenthesis as wrapper nodes, they should be attached to the expressions that they operate on. In any scenario where we could achieve deduplication of LibCST code through extra layers in the resulting tree, we will opt for more code in order to make traversal simpler. * **As regular as possible.** A module should always have a list of statements, even if that list is empty or only has one item. Irregularity makes tree inspection more difficult. * **As high-level as possible.** The tree should be as close to the Python AST as possible. It should not be necessary to understand Python syntax in order to traverse the tree correctly. You should not have to know to ignore commas when traversing a list of parameters for a function. You should not have to use helper functions to traverse or recognize expressions wrapped in parenthesis. A LibCST node will represent its semantic operation in python with as little syntactic trivia exposed as possible. Ease of Modification -------------------- * **All nodes should be fully typed.** A module is a list of statements, not a list of untyped nodes. A function has a name, parameters and an optional return. It should be clear where to access various attributes of each node and what are the valid node types that can be used for that attribute. * **Additional runtime (in addition to static types) constraints.** It shouldn't be possible to construct a node that can't be serialized correctly or that would result in invalid code. You shouldn't be able to construct a Name node with a string that isn't a valid python identifier. Strong constraints here should allow us to perform multiple passes safely without serializing and re-parsing the tree after each pass. * **Sane defaults.** If I construct a node, I shouldn't have to supply whitespace, commas or other required syntax unless I want to. I should be able to treat the node in abstract, specifying only the semantics of the resulting code. * **Reasonably intelligent ownership of whitespace.** A statement should own the comments directly above it, and any trailing comments on the same line. If we delete that statement, the whitespace should disappear with it. * **It should be easy to change a single field** in an existing node without needing to modify or fix up adjacent nodes. Syntactic trivia such as commas or proper spacing between nodes should be children of the node they logically belong to so that inserting or removing a node does not require modifications to adjacent nodes. * **Reparentable.** It should be possible to move or copy a node from one part of the tree easily. Well Tested ----------- * **All nodes should be fully tested.** It should not be possible to break upstream parsing or rendering code with a change to LibCST. Parsing, rendering and verifying functionality are all tested as completely as possible for all defined nodes. LibCST-1.2.0/docs/source/nodes.rst000066400000000000000000000226401456464173300167050ustar00rootroot00000000000000.. _libcst-nodes: Nodes ===== CSTNode and its subclasses cover Python's full grammar in a whitespace-sensitive fashion, forming LibCST's concrete syntax tree. Many of these nodes are designed to `behave similarly to Python's abstract syntax tree `_. CSTNode ------- The base node type which all other nodes derive from. .. autoclass:: libcst.CSTNode Module ------ A node that represents an entire python module. .. autoclass:: libcst.Module Expressions ----------- An expression is anything that represents a value (e.g. it could be returned from a function). All expressions subclass from :class:`~libcst.BaseExpression`. Expression can be parsed with :func:`~libcst.parse_expression` or as part of a statement or module using :func:`~libcst.parse_statement` or :func:`~libcst.parse_module`. .. autoclass:: libcst.BaseExpression Names and Object Attributes ^^^^^^^^^^^^^^^^^^^^^^^^^^^ .. autoclass:: libcst.Name .. autoclass:: libcst.Attribute Operations and Comparisons ^^^^^^^^^^^^^^^^^^^^^^^^^^ Operation and Comparison nodes combine one or more expressions with an operator_. .. _operator: Operators_ .. autoclass:: libcst.UnaryOperation .. autoclass:: libcst.BinaryOperation .. autoclass:: libcst.BooleanOperation .. autoclass:: libcst.Comparison .. autoclass:: libcst.ComparisonTarget Control Flow ^^^^^^^^^^^^ .. autoclass:: libcst.Asynchronous .. autoclass:: libcst.Await .. autoclass:: libcst.Yield .. autoclass:: libcst.From .. autoclass:: libcst.IfExp Lambdas and Function Calls ^^^^^^^^^^^^^^^^^^^^^^^^^^ .. autoclass:: libcst.Lambda .. autoclass:: libcst.Call .. autoclass:: libcst.Arg Literal Values ^^^^^^^^^^^^^^ .. autoclass:: libcst.Ellipsis Numbers ''''''' .. autoclass:: libcst.BaseNumber .. autoclass:: libcst.Integer .. autoclass:: libcst.Float .. autoclass:: libcst.Imaginary Strings ''''''' .. autoclass:: libcst.BaseString .. autoclass:: libcst.SimpleString .. autoclass:: libcst.ConcatenatedString Formatted Strings (f-strings) ''''''''''''''''''''''''''''' .. autoclass:: libcst.FormattedString .. autoclass:: libcst.BaseFormattedStringContent .. autoclass:: libcst.FormattedStringText .. autoclass:: libcst.FormattedStringExpression Collections ^^^^^^^^^^^ Simple Collections '''''''''''''''''' .. autoclass:: libcst.Tuple .. autoclass:: libcst.BaseList .. autoclass:: libcst.List .. autoclass:: libcst.BaseSet .. autoclass:: libcst.Set Simple Collection Elements '''''''''''''''''''''''''' .. autoclass:: libcst.BaseElement .. autoclass:: libcst.Element .. autoclass:: libcst.StarredElement Dictionaries '''''''''''' .. autoclass:: libcst.BaseDict .. autoclass:: libcst.Dict Dictionary Elements ''''''''''''''''''' .. autoclass:: libcst.BaseDictElement .. autoclass:: libcst.DictElement .. autoclass:: libcst.StarredDictElement Comprehensions ^^^^^^^^^^^^^^ .. autoclass:: libcst.BaseComp .. autoclass:: libcst.BaseSimpleComp .. autoclass:: libcst.GeneratorExp .. autoclass:: libcst.ListComp .. autoclass:: libcst.SetComp .. autoclass:: libcst.DictComp .. autoclass:: libcst.CompFor .. autoclass:: libcst.CompIf Subscripts and Slices ^^^^^^^^^^^^^^^^^^^^^ .. autoclass:: libcst.Subscript .. autoclass:: libcst.BaseSlice .. autoclass:: libcst.Index .. autoclass:: libcst.Slice .. autoclass:: libcst.SubscriptElement Parenthesis, Brackets, and Braces ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ .. autoclass:: libcst.LeftParen .. autoclass:: libcst.RightParen .. autoclass:: libcst.LeftSquareBracket .. autoclass:: libcst.RightSquareBracket .. autoclass:: libcst.LeftCurlyBrace .. autoclass:: libcst.RightCurlyBrace Statements ---------- Statements represent a "line of code" or a control structure with other lines of code, such as an :class:`~libcst.If` block. All statements subclass from :class:`~libcst.BaseSmallStatement` or :class:`~libcst.BaseCompoundStatement`. Statements can be parsed with :func:`~libcst.parse_statement` or as part of a module using :func:`~libcst.parse_module`. Simple Statements ^^^^^^^^^^^^^^^^^ Statements which at most have expressions as child attributes. .. autoclass:: libcst.BaseSmallStatement .. autoclass:: libcst.AnnAssign .. autoclass:: libcst.Assert .. autoclass:: libcst.Assign .. autoclass:: libcst.AugAssign .. autoclass:: libcst.Break .. autoclass:: libcst.Continue .. autoclass:: libcst.Del .. autoclass:: libcst.Expr .. autoclass:: libcst.Global .. autoclass:: libcst.Import .. autoclass:: libcst.ImportFrom .. autoclass:: libcst.Nonlocal .. autoclass:: libcst.Pass .. autoclass:: libcst.Raise .. autoclass:: libcst.Return Compound Statements ^^^^^^^^^^^^^^^^^^^ Statements that have one or more statement blocks as a child attribute. .. autoclass:: libcst.BaseCompoundStatement .. autoclass:: libcst.ClassDef .. autoclass:: libcst.For .. autoclass:: libcst.FunctionDef .. autoclass:: libcst.If .. autoclass:: libcst.Try .. autoclass:: libcst.While .. autoclass:: libcst.With Helper Nodes ^^^^^^^^^^^^ Nodes that are used by various statements to represent some syntax, but are not statements on their own and cannot be used outside of the statements they belong with. .. Annotation is in the expression module for import-order reasons, it's most-often used a a helper for statements (e.g. functions) .. autoclass:: libcst.Annotation .. autoclass:: libcst.AsName .. autoclass:: libcst.AssignTarget .. autoclass:: libcst.BaseAssignTargetExpression .. autoclass:: libcst.BaseDelTargetExpression .. autoclass:: libcst.Decorator .. autoclass:: libcst.Else .. autoclass:: libcst.ExceptHandler .. autoclass:: libcst.Finally .. autoclass:: libcst.ImportAlias .. autoclass:: libcst.NameItem .. Params are in the expression module for import-order reasons, but it makes sense to group these closer to FunctionDef than with Lambda. .. autoclass:: libcst.Parameters .. autoclass:: libcst.Param .. autoclass:: libcst.ParamSlash .. autoclass:: libcst.ParamStar .. autoclass:: libcst.WithItem Statement Blocks ^^^^^^^^^^^^^^^^ Nodes that represent some group of statements. .. autoclass:: libcst.BaseSuite .. autoclass:: libcst.SimpleStatementLine .. autoclass:: libcst.SimpleStatementSuite .. autoclass:: libcst.IndentedBlock Operators --------- Nodes that are used to signify an operation to be performed on a variable or value. Unary Operators ^^^^^^^^^^^^^^^ Nodes that are used with :class:`~libcst.UnaryOperation` to perform some unary operation. .. class:: libcst.BitInvert .. class:: libcst.Minus .. class:: libcst.Not .. autoclass:: libcst.Plus In addition, :class:`~libcst.BaseUnaryOp` is defined purely for typing and isinstance checks. .. class:: libcst.BaseUnaryOp Boolean Operators ^^^^^^^^^^^^^^^^^ Nodes that are used with :class:`~libcst.BooleanOperation` to perform some boolean operation. .. class:: libcst.And .. autoclass:: libcst.Or In addition, :class:`~libcst.BaseBooleanOp` is defined purely for typing and isinstance checks. .. class:: libcst.BaseBooleanOp Binary Operators ^^^^^^^^^^^^^^^^ Nodes that are used with :class:`~libcst.BinaryOperation` to perform some binary operation. .. class:: libcst.Add .. class:: libcst.BitAnd .. class:: libcst.BitOr .. class:: libcst.BitXor .. class:: libcst.Divide .. class:: libcst.FloorDivide .. class:: libcst.LeftShift .. class:: libcst.MatrixMultiply .. class:: libcst.Modulo .. class:: libcst.Multiply .. class:: libcst.Power .. class:: libcst.RightShift .. autoclass:: libcst.Subtract In addition, :class:`~libcst.BaseBinaryOp` is defined purely for typing and isinstance checks. .. class:: libcst.BaseBinaryOp Comparison Operators ^^^^^^^^^^^^^^^^^^^^ Nodes that are used with :class:`~libcst.Comparison` to perform some comparison operation. .. class:: libcst.Equal .. class:: libcst.GreaterThan .. class:: libcst.GreaterThanEqual .. class:: libcst.In .. class:: libcst.Is .. class:: libcst.LessThan .. autoclass:: libcst.LessThanEqual .. autoclass:: libcst.NotEqual .. class:: libcst.IsNot .. autoclass:: libcst.NotIn In addition, :class:`~libcst.BaseCompOp` is defined purely for typing and isinstance checks. .. class:: libcst.BaseCompOp Augmented Assignment Operators ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Nodes that are used with :class:`~libcst.AugAssign` to perform some agumented assignment. .. class:: libcst.AddAssign .. class:: libcst.BitAndAssign .. class:: libcst.BitOrAssign .. class:: libcst.BitXorAssign .. class:: libcst.DivideAssign .. class:: libcst.FloorDivideAssign .. class:: libcst.LeftShiftAssign .. class:: libcst.MatrixMultiplyAssign .. class:: libcst.ModuloAssign .. class:: libcst.MultiplyAssign .. class:: libcst.PowerAssign .. class:: libcst.RightShiftAssign .. autoclass:: libcst.SubtractAssign In addition, :class:`~libcst.BaseAugOp` is defined purely for typing and isinstance checks. .. class:: libcst.BaseAugOp Miscellaneous ------------- Miscelaneous nodes that are purely syntactic trivia. While python requires these nodes in order to parse a module, statement or expression they do not carry any meaning on their own. .. autoclass:: libcst.AssignEqual .. autoclass:: libcst.Colon .. autoclass:: libcst.Comma .. autoclass:: libcst.Dot .. autoclass:: libcst.ImportStar .. autoclass:: libcst.Semicolon Whitespace ---------- Nodes that encapsulate pure whitespace. .. autoclass:: libcst.Comment .. autoclass:: libcst.EmptyLine .. autoclass:: libcst.Newline .. autoclass:: libcst.ParenthesizedWhitespace .. autoclass:: libcst.SimpleWhitespace .. autoclass:: libcst.TrailingWhitespace .. autoclass:: libcst.BaseParenthesizableWhitespace Maybe Sentinel -------------- .. autoclass:: libcst.MaybeSentinel LibCST-1.2.0/docs/source/parser.rst000066400000000000000000000023441456464173300170700ustar00rootroot00000000000000Parsing ======= The parser functions accept source code and an optional configuration object, and will generate :class:`~libcst.CSTNode` objects. :func:`~libcst.parse_module` is the most useful function here, since it accepts the entire contents of a file and returns a new tree, but :func:`~libcst.parse_expression` and :func:`~libcst.parse_statement` are useful when inserting new nodes into the tree, because they're easier to use than the equivalent node constructors. >>> import libcst as cst >>> cst.parse_expression("1 + 2") BinaryOperation( left=Integer( value='1', lpar=[], rpar=[], ), operator=Add( whitespace_before=SimpleWhitespace( value=' ', ), whitespace_after=SimpleWhitespace( value=' ', ), ), right=Integer( value='2', lpar=[], rpar=[], ), lpar=[], rpar=[], ) .. autofunction:: libcst.parse_module .. autofunction:: libcst.parse_expression .. autofunction:: libcst.parse_statement .. autoclass:: libcst.PartialParserConfig Syntax Errors ------------- .. autoclass:: libcst.ParserSyntaxError :members: message, raw_line, raw_column, editor_line, editor_column :special-members: __str__ LibCST-1.2.0/docs/source/scope_tutorial.ipynb000066400000000000000000000240251456464173300211410ustar00rootroot00000000000000{ "cells": [ { "cell_type": "raw", "metadata": { "raw_mimetype": "text/restructuredtext" }, "source": [ " .. _libcst-scope-tutorial:\n", "\n", "==============\n", "Scope Analysis\n", "==============\n", "Scope analysis keeps track of assignments and accesses which could be useful for code automatic refactoring. If you're not familiar with scope analysis, see :ref:`Scope Metadata ` for more detail about scope metadata. This tutorial demonstrates some use cases of scope analysis. If you're new to metadata, see :doc:`Metadata Tutorial ` to get started.\n", "Given source codes, scope analysis parses all variable :class:`~libcst.metadata.Assignment` (or a :class:`~libcst.metadata.BuiltinAssignment` if it's a builtin) and :class:`~libcst.metadata.Access` to store in :class:`~libcst.metadata.Scope` containers.\n", "\n", ".. note::\n", " The scope analysis only handles local variable name access and cannot handle simple string type annotation forward references. See :class:`~libcst.metadata.Access`\n", "\n", "Given the following example source code contains a couple of unused imports (``f``, ``i``, ``m`` and ``n``) and undefined variable references (``func_undefined`` and ``var_undefined``). Scope analysis helps us identifying those unused imports and undefined variables to automatically provide warnings to developers to prevent bugs while they're developing.\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "nbsphinx": "hidden" }, "outputs": [], "source": [ "import sys\n", "sys.path.append(\"../../\")" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "source = \"\"\"\\\n", "import a, b, c as d, e as f # expect to keep: a, c as d\n", "from g import h, i, j as k, l as m # expect to keep: h, j as k\n", "from n import o # expect to be removed entirely\n", "\n", "a()\n", "\n", "def fun():\n", " d()\n", "\n", "class Cls:\n", " att = h.something\n", " \n", " def __new__(self) -> \"Cls\":\n", " var = k.method()\n", " func_undefined(var_undefined)\n", "\"\"\"" ] }, { "cell_type": "raw", "metadata": { "raw_mimetype": "text/restructuredtext" }, "source": [ "With a parsed :class:`~libcst.Module`, we construct a :class:`~libcst.metadata.MetadataWrapper` object and it provides a :func:`~libcst.metadata.MetadataWrapper.resolve` function to resolve metadata given a metadata provider.\n", ":class:`~libcst.metadata.ScopeProvider` is used here for analysing scope and there are three types of scopes (:class:`~libcst.metadata.GlobalScope`, :class:`~libcst.metadata.FunctionScope` and :class:`~libcst.metadata.ClassScope`) in this example.\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "import libcst as cst\n", "\n", "\n", "wrapper = cst.metadata.MetadataWrapper(cst.parse_module(source))\n", "scopes = set(wrapper.resolve(cst.metadata.ScopeProvider).values())\n", "for scope in scopes:\n", " print(scope)" ] }, { "cell_type": "raw", "metadata": { "raw_mimetype": "text/restructuredtext" }, "source": [ "Warn on unused imports and undefined references\n", "===============================================\n", "To find all unused imports, we iterate through :attr:`~libcst.metadata.Scope.assignments` and an assignment is unused when its :attr:`~libcst.metadata.BaseAssignment.references` is empty. To find all undefined references, we iterate through :attr:`~libcst.metadata.Scope.accesses` (we focus on :class:`~libcst.Import`/:class:`~libcst.ImportFrom` assignments) and an access is undefined reference when its :attr:`~libcst.metadata.Access.referents` is empty. When reporting the warning to developer, we'll want to report the line number and column offset along with the suggestion to make it more clear. We can get position information from :class:`~libcst.metadata.PositionProvider` and print the warnings as follows.\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "from collections import defaultdict\n", "from typing import Dict, Union, Set\n", "\n", "unused_imports: Dict[Union[cst.Import, cst.ImportFrom], Set[str]] = defaultdict(set)\n", "undefined_references: Dict[cst.CSTNode, Set[str]] = defaultdict(set)\n", "ranges = wrapper.resolve(cst.metadata.PositionProvider)\n", "for scope in scopes:\n", " for assignment in scope.assignments:\n", " node = assignment.node\n", " if isinstance(assignment, cst.metadata.Assignment) and isinstance(\n", " node, (cst.Import, cst.ImportFrom)\n", " ):\n", " if len(assignment.references) == 0:\n", " unused_imports[node].add(assignment.name)\n", " location = ranges[node].start\n", " print(\n", " f\"Warning on line {location.line:2d}, column {location.column:2d}: Imported name `{assignment.name}` is unused.\"\n", " )\n", "\n", " for access in scope.accesses:\n", " if len(access.referents) == 0:\n", " node = access.node\n", " location = ranges[node].start\n", " print(\n", " f\"Warning on line {location.line:2d}, column {location.column:2d}: Name reference `{node.value}` is not defined.\"\n", " )\n" ] }, { "cell_type": "raw", "metadata": { "raw_mimetype": "text/restructuredtext" }, "source": [ "Automatically Remove Unused Import\n", "==================================\n", "Unused import is a commmon code suggestion provided by lint tool like `flake8 F401 `_ ``imported but unused``.\n", "Even though reporting unused import is already useful, with LibCST we can provide automatic fix to remove unused import. That can make the suggestion more actionable and save developer's time.\n", "\n", "An import statement may import multiple names, we want to remove those unused names from the import statement. If all the names in the import statement are not used, we remove the entire import.\n", "To remove the unused name, we implement ``RemoveUnusedImportTransformer`` by subclassing :class:`~libcst.CSTTransformer`. We overwrite ``leave_Import`` and ``leave_ImportFrom`` to modify the import statements.\n", "When we find the import node in lookup table, we iterate through all ``names`` and keep used names in ``names_to_keep``.\n", "If ``names_to_keep`` is empty, all names are unused and we remove the entire import node.\n", "Otherwise, we update the import node and just removing partial names." ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "class RemoveUnusedImportTransformer(cst.CSTTransformer):\n", " def __init__(\n", " self, unused_imports: Dict[Union[cst.Import, cst.ImportFrom], Set[str]]\n", " ) -> None:\n", " self.unused_imports = unused_imports\n", "\n", " def leave_import_alike(\n", " self,\n", " original_node: Union[cst.Import, cst.ImportFrom],\n", " updated_node: Union[cst.Import, cst.ImportFrom],\n", " ) -> Union[cst.Import, cst.ImportFrom, cst.RemovalSentinel]:\n", " if original_node not in self.unused_imports:\n", " return updated_node\n", " names_to_keep = []\n", " for name in updated_node.names:\n", " asname = name.asname\n", " if asname is not None:\n", " name_value = asname.name.value\n", " else:\n", " name_value = name.name.value\n", " if name_value not in self.unused_imports[original_node]:\n", " names_to_keep.append(name.with_changes(comma=cst.MaybeSentinel.DEFAULT))\n", " if len(names_to_keep) == 0:\n", " return cst.RemoveFromParent()\n", " else:\n", " return updated_node.with_changes(names=names_to_keep)\n", "\n", " def leave_Import(\n", " self, original_node: cst.Import, updated_node: cst.Import\n", " ) -> cst.Import:\n", " return self.leave_import_alike(original_node, updated_node)\n", "\n", " def leave_ImportFrom(\n", " self, original_node: cst.ImportFrom, updated_node: cst.ImportFrom\n", " ) -> cst.ImportFrom:\n", " return self.leave_import_alike(original_node, updated_node)\n" ] }, { "cell_type": "raw", "metadata": { "raw_mimetype": "text/restructuredtext" }, "source": [ "After the transform, we use ``.code`` to generate fixed code and all unused names are fixed as expected! The difflib is used to show only changed part and only import lines are updated as expected." ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "import difflib\n", "fixed_module = wrapper.module.visit(RemoveUnusedImportTransformer(unused_imports))\n", "\n", "# Use difflib to show the changes to verify unused imports are removed as expected.\n", "print(\n", " \"\".join(\n", " difflib.unified_diff(source.splitlines(1), fixed_module.code.splitlines(1))\n", " )\n", ")" ] } ], "metadata": { "celltoolbar": "Raw Cell Format", "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.7.3" } }, "nbformat": 4, "nbformat_minor": 2 } LibCST-1.2.0/docs/source/tutorial.ipynb000066400000000000000000000232211456464173300177450ustar00rootroot00000000000000{ "cells": [ { "cell_type": "raw", "metadata": { "raw_mimetype": "text/restructuredtext" }, "source": [ "====================\n", "Parsing and Visiting\n", "====================\n", "\n", "LibCST provides helpers to parse source code string as concrete syntax tree. In order to perform static analysis to identify patterns in the tree or modify the tree programmatically, we can use visitor pattern to traverse the tree. In this tutorial, we demonstrate a common three-step-workflow to build an automated refactoring (codemod) application:\n", "\n", "1. `Parse Source Code <#Parse-Source-Code>`_\n", "2. `Build Visitor or Transformer <#Build-Visitor-or-Transformer>`_\n", "3. `Generate Source Code <#Generate-Source-Code>`_\n", "\n", "Parse Source Code\n", "=================\n", "LibCST provides various helpers to parse source code as concrete syntax tree: :func:`~libcst.parse_module`, :func:`~libcst.parse_expression` and :func:`~libcst.parse_statement` (see :doc:`Parsing ` for more detail). The default :class:`~libcst.CSTNode` repr provides pretty print formatting for reading the tree easily." ] }, { "cell_type": "code", "execution_count": null, "metadata": { "nbsphinx": "hidden" }, "outputs": [], "source": [ "import sys\n", "sys.path.append(\"../../\")" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "import libcst as cst\n", "\n", "cst.parse_expression(\"1 + 2\")" ] }, { "cell_type": "raw", "metadata": { "raw_mimetype": "text/restructuredtext" }, "source": [ "Example: add typing annotation from pyi stub file to Python source\n", "------------------------------------------------------------------\n", "Python `typing annotation `_ was added in Python 3.5. Some Python applications add typing annotations in separate ``pyi`` stub files in order to support old Python versions. When applications decide to stop supporting old Python versions, they'll want to automatically copy the type annotation from a pyi file to a source file. Here we demonstrate how to do that easliy using LibCST. The first step is to parse the pyi stub and source files as trees." ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "py_source = '''\n", "class PythonToken(Token):\n", " def __repr__(self):\n", " return ('TokenInfo(type=%s, string=%r, start_pos=%r, prefix=%r)' %\n", " self._replace(type=self.type.name))\n", "\n", "def tokenize(code, version_info, start_pos=(1, 0)):\n", " \"\"\"Generate tokens from a the source code (string).\"\"\"\n", " lines = split_lines(code, keepends=True)\n", " return tokenize_lines(lines, version_info, start_pos=start_pos)\n", "'''\n", "\n", "pyi_source = '''\n", "class PythonToken(Token):\n", " def __repr__(self) -> str: ...\n", "\n", "def tokenize(\n", " code: str, version_info: PythonVersionInfo, start_pos: Tuple[int, int] = (1, 0)\n", ") -> Generator[PythonToken, None, None]: ...\n", "'''\n", "\n", "source_tree = cst.parse_module(py_source)\n", "stub_tree = cst.parse_module(pyi_source)" ] }, { "cell_type": "raw", "metadata": { "raw_mimetype": "text/restructuredtext" }, "source": [ "Build Visitor or Transformer\n", "============================\n", "For traversing and modifying the tree, LibCST provides Visitor and Transformer classes similar to the `ast module `_. To implement a visitor (read only) or transformer (read/write), simply implement a subclass of :class:`~libcst.CSTVisitor` or :class:`~libcst.CSTTransformer` (see :doc:`Visitors ` for more detail).\n", "In the typing example, we need to implement a visitor to collect typing annotation from the stub tree and a transformer to copy the annotation to the function signature. In the visitor, we implement ``visit_FunctionDef`` to collect annotations. Later in the transformer, we implement ``leave_FunctionDef`` to add the collected annotations." ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "from typing import List, Tuple, Dict, Optional\n", "\n", "\n", "class TypingCollector(cst.CSTVisitor):\n", " def __init__(self):\n", " # stack for storing the canonical name of the current function\n", " self.stack: List[Tuple[str, ...]] = []\n", " # store the annotations\n", " self.annotations: Dict[\n", " Tuple[str, ...], # key: tuple of canonical class/function name\n", " Tuple[cst.Parameters, Optional[cst.Annotation]], # value: (params, returns)\n", " ] = {}\n", "\n", " def visit_ClassDef(self, node: cst.ClassDef) -> Optional[bool]:\n", " self.stack.append(node.name.value)\n", "\n", " def leave_ClassDef(self, node: cst.ClassDef) -> None:\n", " self.stack.pop()\n", "\n", " def visit_FunctionDef(self, node: cst.FunctionDef) -> Optional[bool]:\n", " self.stack.append(node.name.value)\n", " self.annotations[tuple(self.stack)] = (node.params, node.returns)\n", " return (\n", " False\n", " ) # pyi files don't support inner functions, return False to stop the traversal.\n", "\n", " def leave_FunctionDef(self, node: cst.FunctionDef) -> None:\n", " self.stack.pop()\n", "\n", "\n", "class TypingTransformer(cst.CSTTransformer):\n", " def __init__(self, annotations):\n", " # stack for storing the canonical name of the current function\n", " self.stack: List[Tuple[str, ...]] = []\n", " # store the annotations\n", " self.annotations: Dict[\n", " Tuple[str, ...], # key: tuple of canonical class/function name\n", " Tuple[cst.Parameters, Optional[cst.Annotation]], # value: (params, returns)\n", " ] = annotations\n", "\n", " def visit_ClassDef(self, node: cst.ClassDef) -> Optional[bool]:\n", " self.stack.append(node.name.value)\n", "\n", " def leave_ClassDef(\n", " self, original_node: cst.ClassDef, updated_node: cst.ClassDef\n", " ) -> cst.CSTNode:\n", " self.stack.pop()\n", " return updated_node\n", "\n", " def visit_FunctionDef(self, node: cst.FunctionDef) -> Optional[bool]:\n", " self.stack.append(node.name.value)\n", " return (\n", " False\n", " ) # pyi files don't support inner functions, return False to stop the traversal.\n", "\n", " def leave_FunctionDef(\n", " self, original_node: cst.FunctionDef, updated_node: cst.FunctionDef\n", " ) -> cst.CSTNode:\n", " key = tuple(self.stack)\n", " self.stack.pop()\n", " if key in self.annotations:\n", " annotations = self.annotations[key]\n", " return updated_node.with_changes(\n", " params=annotations[0], returns=annotations[1]\n", " )\n", " return updated_node\n", "\n", "\n", "visitor = TypingCollector()\n", "stub_tree.visit(visitor)\n", "transformer = TypingTransformer(visitor.annotations)\n", "modified_tree = source_tree.visit(transformer)" ] }, { "cell_type": "raw", "metadata": { "raw_mimetype": "text/restructuredtext" }, "source": [ "Generate Source Code\n", "====================\n", "Generating the source code from a cst tree is as easy as accessing the :attr:`~libcst.Module.code` attribute on :class:`~libcst.Module`. After the code generation, we often use `ufmt `_ to reformate the code to keep a consistent coding style." ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "print(modified_tree.code)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# Use difflib to show the changes to verify type annotations were added as expected.\n", "import difflib\n", "\n", "print(\n", " \"\".join(\n", " difflib.unified_diff(py_source.splitlines(1), modified_tree.code.splitlines(1))\n", " )\n", ")" ] }, { "cell_type": "raw", "metadata": { "raw_mimetype": "text/restructuredtext" }, "source": [ "For the sake of efficiency, we don't want to re-write the file when the transformer doesn't change the source code. We can use :meth:`~libcst.CSTNode.deep_equals` to check whether two trees have the same source code. Note that ``==`` checks the identity of tree object instead of representation." ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "if not modified_tree.deep_equals(source_tree):\n", " ... # write to file" ] } ], "metadata": { "celltoolbar": "Edit Metadata", "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.7.3" } }, "nbformat": 4, "nbformat_minor": 2 } LibCST-1.2.0/docs/source/visitors.rst000066400000000000000000000123711456464173300174570ustar00rootroot00000000000000.. _libcst-visitors: Visitors ======== .. autoclass:: libcst.CSTVisitor .. autoclass:: libcst.CSTTransformer .. autofunction:: libcst.RemoveFromParent .. autoclass:: libcst.RemovalSentinel .. autoclass:: libcst.FlattenSentinel Visit and Leave Helper Functions -------------------------------- While it is possible to subclass from :class:`~libcst.CSTVisitor` or :class:`~libcst.CSTTransformer` and override the ``on_visit``/``on_leave``/``on_visit_attribute``/``on_leave_attribute`` functions directly, it is not recommended. The default implementation for both visitors will look up any ``visit_``, ``leave_``, ``visit__`` and ``leave__`` method on the visitor subclass and call them directly. If such a function exists for the node in question, the visitor base class will call the relevant function, respecting the above outlined semantics. If the function does not exist, the visitor base class will assume that you do not care about that node and visit its children for you without requiring a default implementation. Much like ``on_visit``, ``visit_`` return a boolean specifying whether or not LibCST should visit a node's children. As a convenience, you can return ``None`` instead of a boolean value from your ``visit_`` functions. Returning a ``None`` value is treated as a request for default behavior, which causes the visitor to traverse children. It is equivalent to returning ``True``, but requires no explicit return. For example, the below visitor will visit every function definition, traversing to its children only if the function name doesn't include the word "foo". Notice that we don't need to provide our own ``on_visit`` or ``on_leave`` function, nor do we need to provide visit and leave functions for the rest of the nodes which we do not care about. This will have the effect of visiting all strings not inside of functions that have "foo" in the name. Note that we take advantage of default behavior when we decline to return a value in ``visit_SimpleString``. .. code-block:: python class FooingAround(libcst.CSTVisitor): def visit_FunctionDef(self, node: libcst.FunctionDef) -> bool: return "foo" not in node.name.value def visit_SimpleString(self, node: libcst.SimpleString) -> None: print(node.value) An example Python REPL using the above visitor is as follows:: >>> import libcst >>> demo = libcst.parse_module("'abc'\n'123'\ndef foo():\n 'not printed'") >>> _ = demo.visit(FooingAround()) 'abc' '123' .. _libcst-visitor-traversal: Traversal Order --------------- Traversal of any parsed tree directly matches the order that tokens appear in the source which was parsed. LibCST will first call ``on_visit`` for the node. Then, for each of the node's child attributes, LibCST will call ``on_visit_attribute`` for the node's attribute, followed by running the same visit algorithm on each child node in the node's attribute. Then, ``on_leave_attribute`` is called. After each attribute has been fully traversed, LibCST will call ``on_leave`` for the node. Note that LibCST will only call ``on_visit_attribute`` and ``on_leave_attribute`` for attributes in which there might be a LibCST node as a child. It will not call attribute visitors for attributes which are built-in python types. For example, take the following simple tree generated by calling ``parse_expression("1+2")``. .. code-block:: python BinaryOperation( left=Integer( value='1', lpar=[], rpar=[], ), operator=Add( whitespace_before=SimpleWhitespace( value='', ), whitespace_after=SimpleWhitespace( value='', ), ), right=Integer( value='2', lpar=[], rpar=[], ), lpar=[], rpar=[], ) Assuming you have a visitor that overrides every convenience helper method available, methods will be called in this order: .. code-block:: python visit_BinaryOperation visit_BinaryOperation_lpar leave_BinaryOperation_lpar visit_BinaryOperation_left visit_Integer visit_Integer_lpar leave_Integer_lpar visit_Integer_rpar leave_Integer_rpar leave_Integer leave_BinaryOperation_left visit_BinaryOperation_operator visit_Add visit_Add_whitespace_before visit_SimpleWhitespace leave_SimpleWhitespace leave_Add_whitespace_before visit_Add_whitespace_after visit_SimpleWhitespace leave_SimpleWhitespace leave_Add_whitespace_after leave_Add leave_BinaryOperation_operator visit_BinaryOperation_right visit_Integer visit_Integer_lpar leave_Integer_lpar visit_Integer_rpar leave_Integer_rpar leave_Integer leave_BinaryOperation_right visit_BinaryOperation_rpar leave_BinaryOperation_rpar leave_BinaryOperation Batched Visitors ---------------- A batchable visitor class is provided to facilitate performing operations that can be performed in parallel in a single traversal over a CST. An example of this is :ref:`metadata computation`. .. autoclass:: libcst.BatchableCSTVisitor .. autofunction:: libcst.visit_batched LibCST-1.2.0/docs/source/why_libcst.rst000066400000000000000000000361361456464173300177510ustar00rootroot00000000000000=========== Why LibCST? =========== **Python's ast module already provides a syntax tree. Why do we need another?** .. include:: ../../README.rst :start-after: why-libcst-intro-start :end-before: why-libcst-intro-end Abstract Syntax Trees (AST) =========================== Let's look at Python's AST for the following code snippet:: fn(1, 2) # calls fn .. container:: toggle .. code-block:: python ast.Module( body=[ ast.Expr( value=ast.Call( func=ast.Name("fn", ctx=ast.Load()), args=[ast.Num(n=1), ast.Num(n=2)], keywords=[], ), ), ], ) .. graphviz:: digraph ast { layout=dot; rankdir=LR; splines=polyline; ranksep=.6; nodesep=.4; dpi=300; bgcolor=transparent; node [ style=filled, color="#fb8d3f", fontcolor="#4b4f54", fillcolor="#fdd2b3", fontname="Source Code Pro Semibold", penwidth="2", ]; edge [ color="#999999", fontcolor="#4b4f54", fontname="Source Code Pro Semibold", fontsize=12, penwidth=2, ]; Name [label=" Name('fn') "]; Load [label=" Load() "]; Num1 [label=" Num(n=1) "]; Num2 [label=" Num(n=2) "]; Module -> Expr [label="body[0]"] Expr -> Call [label="value"] Call -> Name [label="func"] Name -> Load [label="ctx"] Call -> Num1 [label="args[0]"] Call -> Num2 [label="args[1]"] } This syntax tree does a great job of preserving the semantics of the original code, and the structure of the tree is relatively simple. However, given only the AST, it wouldn't be possible to reprint the original source code. `Like a JPEG `_, the Abstract Syntax Tree is lossy. - The comment we left at the line is gone. - There's a newline at the end of the file, but the AST doesn't tell us that. It also doesn't tell us if it's ``\n``, ``\r``, or ``\r\n``. - We've lost some information about the whitespace between the first and second argument. Abstract Syntax Trees are good for tools like compilers and type checkers where the semantics of code is important, but the exact syntax isn't. Concrete Syntax Trees (CST) =========================== A popular CST library for Python is `lib2to3 `_, which powers tools like `2to3 `_ and `Black `_. Let's look at the syntax tree it generates for the same piece of code:: fn(1, 2) # calls fn .. container:: toggle .. code-block:: python Node( file_input, children=[ Node( simple_stmt, children=[ Node( power, children=[ Leaf(NAME, "fn", prefix=""), Node( trailer, children=[ Leaf(LPAR, "(", prefix=""), Node( arglist, children=[ Leaf(NUMBER, "1", prefix=""), Leaf(COMMA, ",", prefix=""), Leaf(NUMBER, "2", prefix=" "), ], ), Leaf(RPAR, ")", prefix=""), ], ), ], ), Leaf( NEWLINE, "\n", prefix=" # calls fn", ), ], prefix="" ), Leaf(ENDMARKER, "", prefix=""), ], prefix="", ) .. graphviz:: digraph cst { layout=dot; rankdir=TB; ordering=out; splines=line; ranksep=.3; nodesep=.3; dpi=300; bgcolor=transparent; node [ style=filled, color="#fb8d3f", fontcolor="#4b4f54", fillcolor="#fdd2b3", fontname="Source Code Pro Semibold", penwidth="2", group=main, ]; edge [ color="#999999", fontcolor="#4b4f54", fontname="Source Code Pro Semibold", fontsize=12, penwidth=2, ]; ENDMARKER [label=" ENDMARKER('') ", color="#3e99ed", fillcolor="#b8d9f8", shape=box]; NAME_fn [label=" NAME('fn') ", color="#3e99ed", fillcolor="#b8d9f8", shape=box]; LPAR [label=" LPAR('(') ", color="#3e99ed", fillcolor="#b8d9f8", shape=box]; NUMBER_1 [label=" NUMBER('1') ", color="#3e99ed", fillcolor="#b8d9f8", shape=box]; COMMA [label=" COMMA(',') ", color="#3e99ed", fillcolor="#b8d9f8", shape=box]; NUMBER_2 [label=" NUMBER('2', prefix=' ') ", color="#3e99ed", fillcolor="#b8d9f8", shape=box]; RPAR [label=" RPAR(')') ", color="#3e99ed", fillcolor="#b8d9f8", shape=box]; NEWLINE [label=" NEWLINE('\\n', prefix=' # calls fn') ", color="#3e99ed", fillcolor="#b8d9f8", shape=box]; file_input -> simple_stmt [label="0"] file_input -> ENDMARKER [label="1"] simple_stmt -> power [label="0"] simple_stmt -> NEWLINE [label="1"] power -> NAME_fn [label="0"]; power -> trailer [label="1"]; trailer -> LPAR [label="0"]; trailer -> NUMBER_1 [label="1"]; trailer -> COMMA [label="2"]; trailer -> NUMBER_2 [label="3"]; trailer -> RPAR [label="4"]; } This tree is lossless. It retains enough information to reprint the exact input code by storing whitespace information in ``prefix`` properties. This makes it a "Concrete" Syntax Tree, or CST. However, much of the semantics of the code is now difficult to understand and extract. lib2to3 presents a tree that closely matches `Python's grammar `_ which can be hard to manipulate for complex operations. - Adding or removing a parameter from ``fn`` requires careful preservation of ``COMMA`` nodes. - Whitespace and comment ownership is unclear. Deleting nodes could result in invalid generated code. Concrete Syntax Trees are good for operations that don't significantly change the tree and tools that do not wish to change the semantics of the code itself, such as `Black `_. LibCST ====== LibCST takes a compromise between the two formats outlined above. Like a CST, LibCST preserves all whitespace and can be reprinted exactly. Like an AST, LibCST parses source into nodes that represent the semantics of the code. .. code-block:: python fn(1, 2) # calls fn .. container:: toggle .. code-block:: python Module( body=[ SimpleStatementLine( body=[ Expr( value=Call( func=Name( value='fn', lpar=[], rpar=[], ), args=[ Arg( value=Integer( value='1', lpar=[], rpar=[], ), keyword=None, equal=MaybeSentinel.DEFAULT, comma=Comma( whitespace_before=SimpleWhitespace( value='', ), whitespace_after=SimpleWhitespace( value=' ', ), ), star='', whitespace_after_star=SimpleWhitespace( value='', ), whitespace_after_arg=SimpleWhitespace( value='', ), ), Arg( value=Integer( value='2', lpar=[], rpar=[], ), keyword=None, equal=MaybeSentinel.DEFAULT, comma=MaybeSentinel.DEFAULT, star='', whitespace_after_star=SimpleWhitespace( value='', ), whitespace_after_arg=SimpleWhitespace( value='', ), ), ], lpar=[], rpar=[], whitespace_after_func=SimpleWhitespace( value='', ), whitespace_before_args=SimpleWhitespace( value='', ), ), semicolon=MaybeSentinel.DEFAULT, ), ], leading_lines=[], trailing_whitespace=TrailingWhitespace( whitespace=SimpleWhitespace( value=' ', ), comment=Comment( value='# calls fn', ), newline=Newline( value=None, ), ), ), ], header=[], footer=[], encoding='utf-8', default_indent=' ', default_newline='\n', has_trailing_newline=True, ) .. graphviz:: digraph libcst { layout=dot; rankdir=TB; splines=line; ranksep=0.5; nodesep=1.0; dpi=300; bgcolor=transparent; node [ style=filled, color="#fb8d3f", fontcolor="#4b4f54", fillcolor="#fdd2b3", fontname="Source Code Pro Semibold", penwidth="2", group=main, ]; edge [ color="#999999", fontcolor="#4b4f54", fontname="Source Code Pro Semibold", fontsize=12, penwidth=2, ]; Module [label="Module"]; SimpleStatementLine [label="SimpleStatementLine"]; Expr [label="Expr"]; Call [label="Call"]; Name [label="Name"]; NameValue [label=" 'fn' ", color="#3e99ed", fillcolor="#b8d9f8", shape=box]; Arg1 [label="Arg"]; Integer1 [label="Integer"]; Integer1Value [label=" '1' ", color="#3e99ed", fillcolor="#b8d9f8", shape=box]; Comma [label="Comma"]; SimpleWhitespace2 [label="SimpleWhitespace", color="#777777", fillcolor="#eeeeee"]; SimpleWhitespace2Value [label=" ' ' ", color="#777777", fillcolor="#cccccc", shape=box]; Arg2 [label="Arg"]; Integer2 [label="Integer"]; Integer2Value [label=" '2' ", color="#3e99ed", fillcolor="#b8d9f8", shape=box]; TrailingWhitespace [label="TrailingWhitespace", color="#777777", fillcolor="#eeeeee"]; SimpleWhitespace1 [label="SimpleWhitespace", color="#777777", fillcolor="#eeeeee"]; SimpleWhitespace1Value [label=" ' ' ", color="#777777", fillcolor="#cccccc", shape=box]; Comment1 [label="Comment", color="#777777", fillcolor="#eeeeee"]; Comment1Value [label=" '# calls fn' ", color="#777777", fillcolor="#cccccc", shape=box]; Module -> SimpleStatementLine [label="body[0]"]; SimpleStatementLine -> Expr [label="body[0]"]; Expr -> Call [label="value"]; Call -> Name [label="func"]; Name -> NameValue [label="value"]; Call -> Arg1 [label="args[0]"]; Arg1 -> Integer1 [label="value"]; Integer1 -> Integer1Value [label="value"]; Arg1 -> Comma [label="comma"]; Comma -> SimpleWhitespace2 [label="whitespace_after"]; SimpleWhitespace2 -> SimpleWhitespace2Value [label="value"]; Call -> Arg2 [label="args[1]"]; Arg2 -> Integer2 [label="value"]; Integer2 -> Integer2Value [label="value"]; SimpleStatementLine -> TrailingWhitespace [label="trailing_whitespace"]; TrailingWhitespace -> SimpleWhitespace1 [label="whitespace"]; SimpleWhitespace1 -> SimpleWhitespace1Value [label="value"]; TrailingWhitespace -> Comment1 [label="comment"]; Comment1 -> Comment1Value [label="value"]; } LibCST preserves whitespace by parsing it using an internal whitespace parser and assigning it to relevant nodes. This allows for much more granular whitespace ownership and greatly reduces the amount of work necessary to perform complex manipulations. Additionally, it is fully typed. A node's children are well-defined and match the semantics of Python. However, this does come with some downsides. - It is more difficult to implement tools that focus almost exclusively on whitespace on top of LibCST instead of lib2to3. For example, `Black `_ would need to modify whitespace nodes instead of prefix strings, making its implementation much more complex. - The equivalent AST for a Python module will usually be simpler. We must preserve whitespace ownership by assigning it to nodes that make the most sense which requires us to introduce nodes such as :class:`~libcst.Comma`. - Parsing with LibCST will always be slower than Python's AST due to the extra work needed to assign whitespace correctly. Nevertheless, we think that the trade-offs made in LibCST are worthwhile and offer a great deal of flexibility and power. LibCST-1.2.0/libcst/000077500000000000000000000000001456464173300140675ustar00rootroot00000000000000LibCST-1.2.0/libcst/__init__.py000066400000000000000000000207701456464173300162060ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from libcst._batched_visitor import BatchableCSTVisitor, visit_batched from libcst._exceptions import MetadataException, ParserSyntaxError from libcst._flatten_sentinel import FlattenSentinel from libcst._maybe_sentinel import MaybeSentinel from libcst._metadata_dependent import MetadataDependent from libcst._nodes.base import CSTNode, CSTValidationError from libcst._nodes.expression import ( Annotation, Arg, Asynchronous, Attribute, Await, BaseAssignTargetExpression, BaseComp, BaseDelTargetExpression, BaseDict, BaseDictElement, BaseElement, BaseExpression, BaseFormattedStringContent, BaseList, BaseNumber, BaseSet, BaseSimpleComp, BaseSlice, BaseString, BinaryOperation, BooleanOperation, Call, Comparison, ComparisonTarget, CompFor, CompIf, ConcatenatedString, Dict, DictComp, DictElement, Element, Ellipsis, Float, FormattedString, FormattedStringExpression, FormattedStringText, From, GeneratorExp, IfExp, Imaginary, Index, Integer, Lambda, LeftCurlyBrace, LeftParen, LeftSquareBracket, List, ListComp, Name, NamedExpr, Param, Parameters, ParamSlash, ParamStar, RightCurlyBrace, RightParen, RightSquareBracket, Set, SetComp, SimpleString, Slice, StarredDictElement, StarredElement, Subscript, SubscriptElement, Tuple, UnaryOperation, Yield, ) from libcst._nodes.module import Module from libcst._nodes.op import ( Add, AddAssign, And, AssignEqual, BaseAugOp, BaseBinaryOp, BaseBooleanOp, BaseCompOp, BaseUnaryOp, BitAnd, BitAndAssign, BitInvert, BitOr, BitOrAssign, BitXor, BitXorAssign, Colon, Comma, Divide, DivideAssign, Dot, Equal, FloorDivide, FloorDivideAssign, GreaterThan, GreaterThanEqual, ImportStar, In, Is, IsNot, LeftShift, LeftShiftAssign, LessThan, LessThanEqual, MatrixMultiply, MatrixMultiplyAssign, Minus, Modulo, ModuloAssign, Multiply, MultiplyAssign, Not, NotEqual, NotIn, Or, Plus, Power, PowerAssign, RightShift, RightShiftAssign, Semicolon, Subtract, SubtractAssign, ) from libcst._nodes.statement import ( AnnAssign, AsName, Assert, Assign, AssignTarget, AugAssign, BaseCompoundStatement, BaseSmallStatement, BaseStatement, BaseSuite, Break, ClassDef, Continue, Decorator, Del, Else, ExceptHandler, ExceptStarHandler, Expr, Finally, For, FunctionDef, Global, If, Import, ImportAlias, ImportFrom, IndentedBlock, Match, MatchAs, MatchCase, MatchClass, MatchKeywordElement, MatchList, MatchMapping, MatchMappingElement, MatchOr, MatchOrElement, MatchPattern, MatchSequence, MatchSequenceElement, MatchSingleton, MatchStar, MatchTuple, MatchValue, NameItem, Nonlocal, ParamSpec, Pass, Raise, Return, SimpleStatementLine, SimpleStatementSuite, Try, TryStar, TypeAlias, TypeParam, TypeParameters, TypeVar, TypeVarTuple, While, With, WithItem, ) from libcst._nodes.whitespace import ( BaseParenthesizableWhitespace, Comment, EmptyLine, Newline, ParenthesizedWhitespace, SimpleWhitespace, TrailingWhitespace, ) from libcst._parser.entrypoints import parse_expression, parse_module, parse_statement from libcst._parser.types.config import ( KNOWN_PYTHON_VERSION_STRINGS, PartialParserConfig, ) from libcst._removal_sentinel import RemovalSentinel, RemoveFromParent from libcst._visitors import CSTNodeT, CSTTransformer, CSTVisitor, CSTVisitorT try: from libcst._version import version as LIBCST_VERSION except ImportError: LIBCST_VERSION = "unknown" from libcst.helpers import ( # from libcst import ensure_type is deprecated, will be removed in 0.4.0 ensure_type, ) from libcst.metadata.base_provider import ( BaseMetadataProvider, BatchableMetadataProvider, VisitorMetadataProvider, ) from libcst.metadata.wrapper import MetadataWrapper __all__ = [ "KNOWN_PYTHON_VERSION_STRINGS", "LIBCST_VERSION", "BatchableCSTVisitor", "CSTNodeT", "CSTTransformer", "CSTValidationError", "CSTVisitor", "CSTVisitorT", "FlattenSentinel", "MaybeSentinel", "MetadataException", "ParserSyntaxError", "PartialParserConfig", "RemoveFromParent", "RemovalSentinel", "ensure_type", # from libcst import ensure_type is deprecated, will be removed in 0.4.0 "visit_batched", "parse_module", "parse_expression", "parse_statement", "CSTNode", "Module", "Annotation", "Arg", "Asynchronous", "Attribute", "Await", "BaseAssignTargetExpression", "BaseComp", "BaseDelTargetExpression", "BaseDict", "BaseDictElement", "BaseElement", "BaseExpression", "BaseFormattedStringContent", "BaseList", "BaseNumber", "BaseSet", "BaseSimpleComp", "BaseSlice", "BaseString", "BinaryOperation", "BooleanOperation", "Call", "Comparison", "ComparisonTarget", "CompFor", "CompIf", "ConcatenatedString", "Dict", "DictComp", "DictElement", "Element", "Ellipsis", "Float", "FormattedString", "FormattedStringExpression", "FormattedStringText", "From", "GeneratorExp", "IfExp", "Imaginary", "Index", "Integer", "Lambda", "LeftCurlyBrace", "LeftParen", "LeftSquareBracket", "List", "ListComp", "Name", "NamedExpr", "Param", "Parameters", "ParamSlash", "ParamStar", "RightCurlyBrace", "RightParen", "RightSquareBracket", "Set", "SetComp", "SimpleString", "Slice", "StarredDictElement", "StarredElement", "Subscript", "SubscriptElement", "Tuple", "UnaryOperation", "Yield", "Add", "AddAssign", "And", "AssignEqual", "BaseAugOp", "BaseBinaryOp", "BaseBooleanOp", "BaseCompOp", "BaseUnaryOp", "BitAnd", "BitAndAssign", "BitInvert", "BitOr", "BitOrAssign", "BitXor", "BitXorAssign", "Colon", "Comma", "Divide", "DivideAssign", "Dot", "Equal", "FloorDivide", "FloorDivideAssign", "GreaterThan", "GreaterThanEqual", "ImportStar", "In", "Is", "IsNot", "LeftShift", "LeftShiftAssign", "LessThan", "LessThanEqual", "MatrixMultiply", "MatrixMultiplyAssign", "Minus", "Modulo", "ModuloAssign", "Multiply", "MultiplyAssign", "Not", "NotEqual", "NotIn", "Or", "Plus", "Power", "PowerAssign", "RightShift", "RightShiftAssign", "Semicolon", "Subtract", "SubtractAssign", "AnnAssign", "AsName", "Assert", "Assign", "AssignTarget", "AugAssign", "BaseCompoundStatement", "BaseSmallStatement", "BaseStatement", "BaseSuite", "Break", "ClassDef", "Continue", "Decorator", "Del", "Else", "ExceptHandler", "ExceptStarHandler", "Expr", "Finally", "For", "FunctionDef", "Global", "If", "Import", "ImportAlias", "ImportFrom", "IndentedBlock", "Match", "MatchCase", "MatchAs", "MatchClass", "MatchKeywordElement", "MatchList", "MatchMapping", "MatchMappingElement", "MatchOr", "MatchOrElement", "MatchPattern", "MatchSequence", "MatchSequenceElement", "MatchSingleton", "MatchStar", "MatchTuple", "MatchValue", "NameItem", "Nonlocal", "Pass", "Raise", "Return", "SimpleStatementLine", "SimpleStatementSuite", "Try", "TryStar", "While", "With", "WithItem", "BaseParenthesizableWhitespace", "Comment", "EmptyLine", "Newline", "ParenthesizedWhitespace", "SimpleWhitespace", "TrailingWhitespace", "BaseMetadataProvider", "BatchableMetadataProvider", "VisitorMetadataProvider", "MetadataDependent", "MetadataWrapper", "TypeVar", "TypeVarTuple", "ParamSpec", "TypeParam", "TypeParameters", "TypeAlias", ] LibCST-1.2.0/libcst/_add_slots.py000066400000000000000000000044741456464173300165650ustar00rootroot00000000000000# This file is derived from github.com/ericvsmith/dataclasses, and is Apache 2 licensed. # https://github.com/ericvsmith/dataclasses/blob/ae712dd993420d43444f188f452/LICENSE.txt # https://github.com/ericvsmith/dataclasses/blob/ae712dd993420d43444f/dataclass_tools.py # Changed: takes slots in base classes into account when creating slots import dataclasses from itertools import chain, filterfalse from typing import Any, Mapping, Type, TypeVar _T = TypeVar("_T") def add_slots(cls: Type[_T]) -> Type[_T]: # Need to create a new class, since we can't set __slots__ # after a class has been created. # Make sure __slots__ isn't already set. if "__slots__" in cls.__dict__: raise TypeError(f"{cls.__name__} already specifies __slots__") # Create a new dict for our new class. cls_dict = dict(cls.__dict__) field_names = tuple(f.name for f in dataclasses.fields(cls)) inherited_slots = set( chain.from_iterable( superclass.__dict__.get("__slots__", ()) for superclass in cls.mro() ) ) cls_dict["__slots__"] = tuple( filterfalse(inherited_slots.__contains__, field_names) ) for field_name in field_names: # Remove our attributes, if present. They'll still be # available in _MARKER. cls_dict.pop(field_name, None) # Remove __dict__ itself. cls_dict.pop("__dict__", None) # Create the class. qualname = getattr(cls, "__qualname__", None) # pyre-fixme[9]: cls has type `Type[Variable[_T]]`; used as `_T`. # pyre-fixme[19]: Expected 0 positional arguments. cls = type(cls)(cls.__name__, cls.__bases__, cls_dict) if qualname is not None: cls.__qualname__ = qualname # Set __getstate__ and __setstate__ to workaround a bug with pickling frozen # dataclasses with slots. See https://bugs.python.org/issue36424 def __getstate__(self: object) -> Mapping[str, Any]: return { field.name: getattr(self, field.name) for field in dataclasses.fields(self) if hasattr(self, field.name) } def __setstate__(self: object, state: Mapping[str, Any]) -> None: for fieldname, value in state.items(): object.__setattr__(self, fieldname, value) cls.__getstate__ = __getstate__ cls.__setstate__ = __setstate__ return cls LibCST-1.2.0/libcst/_batched_visitor.py000066400000000000000000000130421456464173300177510ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import inspect from typing import ( Callable, cast, Iterable, List, Mapping, MutableMapping, Optional, TYPE_CHECKING, ) from libcst._metadata_dependent import MetadataDependent from libcst._typed_visitor import CSTTypedVisitorFunctions from libcst._visitors import CSTNodeT, CSTVisitor if TYPE_CHECKING: from libcst._nodes.base import CSTNode # noqa: F401 VisitorMethod = Callable[["CSTNode"], None] _VisitorMethodCollection = Mapping[str, List[VisitorMethod]] class BatchableCSTVisitor(CSTTypedVisitorFunctions, MetadataDependent): """ The low-level base visitor class for traversing a CST as part of a batched set of traversals. This should be used in conjunction with the :func:`~libcst.visit_batched` function or the :func:`~libcst.MetadataWrapper.visit_batched` method from :class:`~libcst.MetadataWrapper` to visit a tree. Instances of this class cannot modify the tree. """ def get_visitors(self) -> Mapping[str, VisitorMethod]: """ Returns a mapping of all the ``visit_``, ``visit__``, ``leave_`` and `leave__`` methods defined by this visitor, excluding all empty stubs. """ methods = inspect.getmembers( self, lambda m: ( inspect.ismethod(m) and (m.__name__.startswith("visit_") or m.__name__.startswith("leave_")) and not getattr(m, "_is_no_op", False) ), ) # TODO: verify all visitor methods reference valid node classes. # for name, __ in methods: # ... return dict(methods) def visit_batched( node: CSTNodeT, batchable_visitors: Iterable[BatchableCSTVisitor], before_visit: Optional[VisitorMethod] = None, after_leave: Optional[VisitorMethod] = None, ) -> CSTNodeT: """ Do a batched traversal over ``node`` with all ``visitors``. ``before_visit`` and ``after_leave`` are provided as optional hooks to execute before the ``visit_`` and after the ``leave_`` methods from each visitor in ``visitor`` are executed by the batched visitor. This function does not handle metadata dependency resolution for ``visitors``. See :func:`~libcst.MetadataWrapper.visit_batched` from :class:`~libcst.MetadataWrapper` for batched traversal with metadata dependency resolution. """ visitor_methods = _get_visitor_methods(batchable_visitors) batched_visitor = _BatchedCSTVisitor( visitor_methods, before_visit=before_visit, after_leave=after_leave ) return cast(CSTNodeT, node.visit(batched_visitor)) def _get_visitor_methods( batchable_visitors: Iterable[BatchableCSTVisitor], ) -> _VisitorMethodCollection: """ Gather all ``visit_``, ``visit__``, ``leave_`` amd `leave__`` methods from ``batchabled_visitors``. """ visitor_methods: MutableMapping[str, List[VisitorMethod]] = {} for bv in batchable_visitors: for name, fn in bv.get_visitors().items(): visitor_methods.setdefault(name, []).append(fn) return visitor_methods class _BatchedCSTVisitor(CSTVisitor): """ Internal visitor class to perform batched traversal over a tree. """ visitor_methods: _VisitorMethodCollection before_visit: Optional[VisitorMethod] after_leave: Optional[VisitorMethod] def __init__( self, visitor_methods: _VisitorMethodCollection, *, before_visit: Optional[VisitorMethod] = None, after_leave: Optional[VisitorMethod] = None, ) -> None: super().__init__() self.visitor_methods = visitor_methods self.before_visit = before_visit self.after_leave = after_leave def on_visit(self, node: "CSTNode") -> bool: """ Call appropriate visit methods on node before visiting children. """ before_visit = self.before_visit if before_visit is not None: before_visit(node) type_name = type(node).__name__ for v in self.visitor_methods.get(f"visit_{type_name}", []): v(node) return True def on_leave(self, original_node: "CSTNode") -> None: """ Call appropriate leave methods on node after visiting children. """ type_name = type(original_node).__name__ for v in self.visitor_methods.get(f"leave_{type_name}", []): v(original_node) after_leave = self.after_leave if after_leave is not None: after_leave(original_node) def on_visit_attribute(self, node: "CSTNode", attribute: str) -> None: """ Call appropriate visit attribute methods on node before visiting attribute's children. """ type_name = type(node).__name__ for v in self.visitor_methods.get(f"visit_{type_name}_{attribute}", []): v(node) def on_leave_attribute(self, original_node: "CSTNode", attribute: str) -> None: """ Call appropriate leave attribute methods on node after visiting attribute's children. """ type_name = type(original_node).__name__ for v in self.visitor_methods.get(f"leave_{type_name}_{attribute}", []): v(original_node) LibCST-1.2.0/libcst/_exceptions.py000066400000000000000000000175211456464173300167670ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from enum import auto, Enum from typing import Any, Callable, Iterable, Optional, Sequence, Tuple, Union from typing_extensions import final from libcst._parser.parso.pgen2.generator import ReservedString from libcst._parser.parso.python.token import PythonTokenTypes, TokenType from libcst._parser.types.token import Token from libcst._tabs import expand_tabs _EOF_STR: str = "end of file (EOF)" _INDENT_STR: str = "an indent" _DEDENT_STR: str = "a dedent" _NEWLINE_CHARS: str = "\r\n" class EOFSentinel(Enum): EOF = auto() def get_expected_str( encountered: Union[Token, EOFSentinel], expected: Union[Iterable[Union[TokenType, ReservedString]], EOFSentinel], ) -> str: if ( isinstance(encountered, EOFSentinel) or encountered.type is PythonTokenTypes.ENDMARKER ): encountered_str = _EOF_STR elif encountered.type is PythonTokenTypes.INDENT: encountered_str = _INDENT_STR elif encountered.type is PythonTokenTypes.DEDENT: encountered_str = _DEDENT_STR else: encountered_str = repr(encountered.string) if isinstance(expected, EOFSentinel): expected_names = [_EOF_STR] else: expected_names = sorted( [ repr(el.name) if isinstance(el, TokenType) else repr(el.value) for el in expected ] ) if len(expected_names) > 10: # There's too many possibilities, so it's probably not useful to list them. # Instead, let's just abbreviate the message. return f"Unexpectedly encountered {encountered_str}." else: if len(expected_names) == 1: expected_str = expected_names[0] else: expected_str = f"{', '.join(expected_names[:-1])}, or {expected_names[-1]}" return f"Encountered {encountered_str}, but expected {expected_str}." # pyre-fixme[2]: 'Any' type isn't pyre-strict. def _parser_syntax_error_unpickle(kwargs: Any) -> "ParserSyntaxError": return ParserSyntaxError(**kwargs) @final class PartialParserSyntaxError(Exception): """ An internal exception that represents a partially-constructed :class:`ParserSyntaxError`. It's raised by our internal parser conversion functions, which don't always know the current line and column information. This partial object only contains a message, with the expectation that the line and column information will be filled in by :class:`libcst._base_parser.BaseParser`. This should never be visible to the end-user. """ message: str def __init__(self, message: str) -> None: self.message = message @final class ParserSyntaxError(Exception): """ Contains an error encountered while trying to parse a piece of source code. This exception shouldn't be constructed directly by the user, but instead may be raised by calls to :func:`parse_module`, :func:`parse_expression`, or :func:`parse_statement`. This does not inherit from :class:`SyntaxError` because Python's may raise a :class:`SyntaxError` for any number of reasons, potentially leading to unintended behavior. """ #: A human-readable explanation of the syntax error without information about where #: the error occurred. #: #: For a human-readable explanation of the error alongside information about where #: it occurred, use :meth:`__str__` (via ``str(ex)``) instead. message: str # An internal value used to compute `editor_column` and to pretty-print where the # syntax error occurred in the code. _lines: Sequence[str] #: The one-indexed line where the error occured. raw_line: int #: The zero-indexed column as a number of characters from the start of the line #: where the error occured. raw_column: int def __init__( self, message: str, *, lines: Sequence[str], raw_line: int, raw_column: int ) -> None: super(ParserSyntaxError, self).__init__(message) self.message = message self._lines = lines self.raw_line = raw_line self.raw_column = raw_column def __reduce__( self, ) -> Tuple[Callable[..., "ParserSyntaxError"], Tuple[object, ...]]: return ( _parser_syntax_error_unpickle, ( { "message": self.message, "lines": self._lines, "raw_line": self.raw_line, "raw_column": self.raw_column, }, ), ) def __str__(self) -> str: """ A multi-line human-readable error message of where the syntax error is in their code. For example:: Syntax Error @ 2:1. Incomplete input. Encountered end of file (EOF), but expected 'except', or 'finally'. try: pass ^ """ context = self.context return ( f"Syntax Error @ {self.editor_line}:{self.editor_column}.\n" + f"{self.message}" + (f"\n\n{context}" if context is not None else "") ) def __repr__(self) -> str: return ( "ParserSyntaxError(" + f"{self.message!r}, lines=[...], raw_line={self.raw_line!r}, " + f"raw_column={self.raw_column!r})" ) @property def context(self) -> Optional[str]: """ A formatted string containing the line of code with the syntax error (or a non-empty line above it) along with a caret indicating the exact column where the error occurred. Return ``None`` if there's no relevant non-empty line to show. (e.g. the file consists of only blank lines) """ displayed_line = self.editor_line displayed_column = self.editor_column # we want to avoid displaying a blank line for context. If we're on a blank line # find the nearest line above us that isn't blank. while displayed_line >= 1 and not len(self._lines[displayed_line - 1].strip()): displayed_line -= 1 displayed_column = len(self._lines[displayed_line - 1]) # only show context if we managed to find a non-empty line if len(self._lines[displayed_line - 1].strip()): formatted_source_line = expand_tabs(self._lines[displayed_line - 1]).rstrip( _NEWLINE_CHARS ) # fmt: off return ( f"{formatted_source_line}\n" + f"{' ' * (displayed_column - 1)}^" ) # fmt: on else: return None @property def editor_line(self) -> int: """ The expected one-indexed line in the user's editor. This is the same as :attr:`raw_line`. """ return self.raw_line # raw_line is already one-indexed. @property def editor_column(self) -> int: """ The expected one-indexed column that's likely to match the behavior of the user's editor, assuming tabs expand to 1-8 spaces. This is the column number shown when the syntax error is printed out with `str`. This assumes single-width characters. However, because python doesn't ship with a wcwidth function, it's hard to handle this properly without a third-party dependency. For a raw zero-indexed character offset without tab expansion, see :attr:`raw_column`. """ prefix_str = self._lines[self.raw_line - 1][: self.raw_column] tab_adjusted_column = len(expand_tabs(prefix_str)) # Text editors use a one-indexed column, so we need to add one to our # zero-indexed column to get a human-readable result. return tab_adjusted_column + 1 class MetadataException(Exception): pass LibCST-1.2.0/libcst/_flatten_sentinel.py000066400000000000000000000031221456464173300201340ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import sys # PEP 585 if sys.version_info < (3, 9): from typing import Iterable, Sequence else: from collections.abc import Iterable, Sequence from libcst._types import CSTNodeT_co class FlattenSentinel(Sequence[CSTNodeT_co]): """ A :class:`FlattenSentinel` may be returned by a :meth:`CSTTransformer.on_leave` method when one wants to replace a node with multiple nodes. The replaced node must be contained in a `Sequence` attribute such as :attr:`~libcst.Module.body`. This is generally the case for :class:`~libcst.BaseStatement` and :class:`~libcst.BaseSmallStatement`. For example to insert a print before every return:: def leave_Return( self, original_node: cst.Return, updated_node: cst.Return ) -> Union[cst.Return, cst.RemovalSentinel, cst.FlattenSentinel[cst.BaseSmallStatement]]: log_stmt = cst.Expr(cst.parse_expression("print('returning')")) return cst.FlattenSentinel([log_stmt, updated_node]) Returning an empty :class:`FlattenSentinel` is equivalent to returning :attr:`cst.RemovalSentinel.REMOVE` and is subject to its requirements. """ nodes: Sequence[CSTNodeT_co] def __init__(self, nodes: Iterable[CSTNodeT_co]) -> None: self.nodes = tuple(nodes) def __getitem__(self, idx: int) -> CSTNodeT_co: return self.nodes[idx] def __len__(self) -> int: return len(self.nodes) LibCST-1.2.0/libcst/_maybe_sentinel.py000066400000000000000000000046131456464173300176020ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from enum import auto, Enum class MaybeSentinel(Enum): """ A :class:`MaybeSentinel` value is used as the default value for some attributes to denote that when generating code (when :attr:`Module.code` is evaluated) we should optionally include this element in order to generate valid code. :class:`MaybeSentinel` is only used for "syntactic trivia" that most users shouldn't care much about anyways, like commas, semicolons, and whitespace. For example, a function call's :attr:`Arg.comma` value defaults to :attr:`MaybeSentinel.DEFAULT`. A comma is required after every argument, except for the last one. If a comma is required and :attr:`Arg.comma` is a :class:`MaybeSentinel`, one is inserted. This makes manual node construction easier, but it also means that we safely add arguments to a preexisting function call without manually fixing the commas: >>> import libcst as cst >>> fn_call = cst.parse_expression("fn(1, 2)") >>> new_fn_call = fn_call.with_changes( ... args=[*fn_call.args, cst.Arg(cst.Integer("3"))] ... ) >>> dummy_module = cst.parse_module("") # we need to use Module.code_for_node >>> dummy_module.code_for_node(fn_call) 'fn(1, 2)' >>> dummy_module.code_for_node(new_fn_call) 'fn(1, 2, 3)' Notice that a comma was automatically inserted after the second argument. Since the original second argument had no comma, it was initialized to :attr:`MaybeSentinel.DEFAULT`. During the code generation of the second argument, a comma was inserted to ensure that the resulting code is valid. .. warning:: While this sentinel is used in place of nodes, it is not a :class:`CSTNode`, and will not be visited by a :class:`CSTVisitor`. Some other libraries, like `RedBaron`_, take other approaches to this problem. RedBaron's tree is mutable (LibCST's tree is immutable), and so they're able to solve this problem with `"proxy lists" `_. Both approaches come with different sets of tradeoffs. .. _RedBaron: http://redbaron.pycqa.org/en/latest/index.html """ DEFAULT = auto() def __repr__(self) -> str: return str(self) LibCST-1.2.0/libcst/_metadata_dependent.py000066400000000000000000000113401456464173300204050ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # import inspect from abc import ABC from contextlib import contextmanager from typing import ( Callable, cast, ClassVar, Collection, Generic, Iterator, Mapping, Type, TYPE_CHECKING, TypeVar, Union, ) if TYPE_CHECKING: # Circular dependency for typing reasons only from libcst._nodes.base import CSTNode # noqa: F401 from libcst.metadata.base_provider import ( # noqa: F401 BaseMetadataProvider, ProviderT, ) from libcst.metadata.wrapper import MetadataWrapper # noqa: F401 _T = TypeVar("_T") class _UNDEFINED_DEFAULT: pass class LazyValue(Generic[_T]): """ The class for implementing a lazy metadata loading mechanism that improves the performance when retriving expensive metadata (e.g., qualified names). Providers including :class:`~libcst.metadata.QualifiedNameProvider` use this class to load the metadata of a certain node lazily when calling :func:`~libcst.MetadataDependent.get_metadata`. """ def __init__(self, callable: Callable[[], _T]) -> None: self.callable = callable self.return_value: Union[_T, Type[_UNDEFINED_DEFAULT]] = _UNDEFINED_DEFAULT def __call__(self) -> _T: if self.return_value is _UNDEFINED_DEFAULT: self.return_value = self.callable() return cast(_T, self.return_value) class MetadataDependent(ABC): """ The low-level base class for all classes that declare required metadata dependencies. :class:`~libcst.CSTVisitor` and :class:`~libcst.CSTTransformer` extend this class. """ #: A cached copy of metadata computed by :func:`~libcst.MetadataDependent.resolve`. #: Prefer using :func:`~libcst.MetadataDependent.get_metadata` over accessing #: this attribute directly. metadata: Mapping["ProviderT", Mapping["CSTNode", object]] #: The set of metadata dependencies declared by this class. METADATA_DEPENDENCIES: ClassVar[Collection["ProviderT"]] = () def __init__(self) -> None: self.metadata = {} @classmethod def get_inherited_dependencies(cls) -> Collection["ProviderT"]: """ Returns all metadata dependencies declared by classes in the MRO of ``cls`` that subclass this class. Recursively searches the MRO of the subclass for metadata dependencies. """ try: # pyre-fixme[16]: use a hidden attribute to cache the property return cls._INHERITED_METADATA_DEPENDENCIES_CACHE except AttributeError: dependencies = set() for c in inspect.getmro(cls): if issubclass(c, MetadataDependent): dependencies.update(c.METADATA_DEPENDENCIES) # pyre-fixme[16]: use a hidden attribute to cache the property cls._INHERITED_METADATA_DEPENDENCIES_CACHE = frozenset(dependencies) return cls._INHERITED_METADATA_DEPENDENCIES_CACHE @contextmanager def resolve(self, wrapper: "MetadataWrapper") -> Iterator[None]: """ Context manager that resolves all metadata dependencies declared by ``self`` (using :func:`~libcst.MetadataDependent.get_inherited_dependencies`) on ``wrapper`` and caches it on ``self`` for use with :func:`~libcst.MetadataDependent.get_metadata`. Upon exiting this context manager, the metadata cache on ``self`` is cleared. """ self.metadata = wrapper.resolve_many(self.get_inherited_dependencies()) yield self.metadata = {} def get_metadata( self, key: Type["BaseMetadataProvider[_T]"], node: "CSTNode", default: _T = _UNDEFINED_DEFAULT, ) -> _T: """ Returns the metadata provided by the ``key`` if it is accessible from this visitor. Metadata is accessible in a subclass of this class if ``key`` is declared as a dependency by any class in the MRO of this class. """ if key not in self.get_inherited_dependencies(): raise KeyError( f"{key.__name__} is not declared as a dependency in {type(self).__name__}.METADATA_DEPENDENCIES." ) if key not in self.metadata: raise KeyError( f"{key.__name__} is a dependency, but not set; did you forget a MetadataWrapper?" ) if default is not _UNDEFINED_DEFAULT: value = self.metadata[key].get(node, default) else: value = self.metadata[key][node] if isinstance(value, LazyValue): value = value() return cast(_T, value) LibCST-1.2.0/libcst/_nodes/000077500000000000000000000000001456464173300153365ustar00rootroot00000000000000LibCST-1.2.0/libcst/_nodes/__init__.py000066400000000000000000000005501456464173300174470ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ This package contains CSTNode and all of the subclasses needed to express Python's full grammar in a whitespace-sensitive fashion, forming a "Concrete" Syntax Tree (CST). """ LibCST-1.2.0/libcst/_nodes/base.py000066400000000000000000000457551456464173300166420ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from abc import ABC, abstractmethod from copy import deepcopy from dataclasses import dataclass, field, fields, replace from typing import Any, cast, ClassVar, Dict, List, Mapping, Sequence, TypeVar, Union from libcst._flatten_sentinel import FlattenSentinel from libcst._nodes.internal import CodegenState from libcst._removal_sentinel import RemovalSentinel from libcst._type_enforce import is_value_of_type from libcst._types import CSTNodeT from libcst._visitors import CSTTransformer, CSTVisitor, CSTVisitorT _CSTNodeSelfT = TypeVar("_CSTNodeSelfT", bound="CSTNode") _EMPTY_SEQUENCE: Sequence["CSTNode"] = () class CSTValidationError(SyntaxError): pass class CSTCodegenError(SyntaxError): pass class _ChildrenCollectionVisitor(CSTVisitor): def __init__(self) -> None: self.children: List[CSTNode] = [] def on_visit(self, node: "CSTNode") -> bool: self.children.append(node) return False # Don't include transitive children class _ChildReplacementTransformer(CSTTransformer): def __init__( self, old_node: "CSTNode", new_node: Union["CSTNode", RemovalSentinel] ) -> None: self.old_node = old_node self.new_node = new_node def on_visit(self, node: "CSTNode") -> bool: # If the node is one we are about to replace, we shouldn't # recurse down it, that would be a waste of time. return node is not self.old_node def on_leave( self, original_node: "CSTNode", updated_node: "CSTNode" ) -> Union["CSTNode", RemovalSentinel]: if original_node is self.old_node: return self.new_node return updated_node class _ChildWithChangesTransformer(CSTTransformer): def __init__(self, old_node: "CSTNode", changes: Mapping[str, Any]) -> None: self.old_node = old_node self.changes = changes def on_visit(self, node: "CSTNode") -> bool: # If the node is one we are about to replace, we shouldn't # recurse down it, that would be a waste of time. return node is not self.old_node def on_leave(self, original_node: "CSTNode", updated_node: "CSTNode") -> "CSTNode": if original_node is self.old_node: return updated_node.with_changes(**self.changes) return updated_node class _NOOPVisitor(CSTTransformer): pass def _pretty_repr(value: object) -> str: if not isinstance(value, str) and isinstance(value, Sequence): return _pretty_repr_sequence(value) else: return repr(value) def _pretty_repr_sequence(seq: Sequence[object]) -> str: if len(seq) == 0: return "[]" else: return "\n".join(["[", *[f"{_indent(repr(el))}," for el in seq], "]"]) def _indent(value: str) -> str: return "\n".join(f" {line}" for line in value.split("\n")) def _clone(val: object) -> object: # We can't use isinstance(val, CSTNode) here due to poor performance # of isinstance checks against ABC direct subclasses. What we're trying # to do here is recursively call this functionality on subclasses, but # if the attribute isn't a CSTNode, fall back to copy.deepcopy. try: # pyre-ignore We know this might not exist, that's the point of the # attribute error and try block. return val.deep_clone() except AttributeError: return deepcopy(val) @dataclass(frozen=True) class CSTNode(ABC): __slots__: ClassVar[Sequence[str]] = () def __post_init__(self) -> None: # PERF: It might make more sense to move validation work into the visitor, which # would allow us to avoid validating the tree when parsing a file. self._validate() @classmethod def __init_subclass__(cls, **kwargs: Any) -> None: """ HACK: Add our implementation of `__repr__`, `__hash__`, and `__eq__` to the class's __dict__ to prevent dataclass from generating it's own `__repr__`, `__hash__`, and `__eq__`. The alternative is to require each implementation of a node to remember to add `repr=False, eq=False`, which is more error-prone. """ super().__init_subclass__(**kwargs) if "__repr__" not in cls.__dict__: cls.__repr__ = CSTNode.__repr__ if "__eq__" not in cls.__dict__: cls.__eq__ = CSTNode.__eq__ if "__hash__" not in cls.__dict__: cls.__hash__ = CSTNode.__hash__ def _validate(self) -> None: """ Override this to perform runtime validation of a newly created node. The function is called during `__init__`. It should check for possible mistakes that wouldn't be caught by a static type checker. If you can't use a static type checker, and want to perform a runtime validation of this node's types, use `validate_types` instead. """ pass def validate_types_shallow(self) -> None: """ Compares the type annotations on a node's fields with those field's actual values at runtime. Raises a TypeError is a mismatch is found. Only validates the current node, not any of it's children. For a recursive version, see :func:`validate_types_deep`. If you're using a static type checker (highly recommended), this is useless. However, if your code doesn't use a static type checker, or if you're unable to statically type your code for some reason, you can use this method to help validate your tree. Some (non-typing) validation is done unconditionally during the construction of a node. That validation does not overlap with the work that :func:`validate_types_deep` does. """ for f in fields(self): value = getattr(self, f.name) if not is_value_of_type(value, f.type): raise TypeError( f"Expected an instance of {f.type!r} on " + f"{type(self).__name__}'s '{f.name}' field, but instead got " + f"an instance of {type(value)!r}" ) def validate_types_deep(self) -> None: """ Like :func:`validate_types_shallow`, but recursively validates the whole tree. """ self.validate_types_shallow() for ch in self.children: ch.validate_types_deep() @property def children(self) -> Sequence["CSTNode"]: """ The immediate (not transitive) child CSTNodes of the current node. Various properties on the nodes, such as string values, will not be visited if they are not a subclass of CSTNode. Iterable properties of the node (e.g. an IndentedBlock's body) will be flattened into the children's sequence. The children will always be returned in the same order that they appear lexically in the code. """ # We're hooking into _visit_and_replace_children, which means that our current # implementation is slow. We may need to rethink and/or cache this if it becomes # a frequently accessed property. # # This probably won't be called frequently, because most child access will # probably through visit, or directly through named property access, not through # children. visitor = _ChildrenCollectionVisitor() self._visit_and_replace_children(visitor) return visitor.children def visit( self: _CSTNodeSelfT, visitor: CSTVisitorT ) -> Union[_CSTNodeSelfT, RemovalSentinel, FlattenSentinel[_CSTNodeSelfT]]: """ Visits the current node, its children, and all transitive children using the given visitor's callbacks. """ # visit self should_visit_children = visitor.on_visit(self) # TODO: provide traversal where children are not replaced # visit children (optionally) if should_visit_children: # It's not possible to define `_visit_and_replace_children` with the correct # return type in any sane way, so we're using this cast. See the # explanation above the declaration of `_visit_and_replace_children`. with_updated_children = cast( _CSTNodeSelfT, self._visit_and_replace_children(visitor) ) else: with_updated_children = self if isinstance(visitor, CSTVisitor): visitor.on_leave(self) leave_result = self else: leave_result = visitor.on_leave(self, with_updated_children) # validate return type of the user-defined `visitor.on_leave` method if not isinstance(leave_result, (CSTNode, RemovalSentinel, FlattenSentinel)): raise Exception( "Expected a node of type CSTNode or a RemovalSentinel, " + f"but got a return value of {type(leave_result).__name__}" ) # TODO: Run runtime typechecks against updated nodes return leave_result # The return type of `_visit_and_replace_children` is `CSTNode`, not # `_CSTNodeSelfT`. This is because pyre currently doesn't have a way to annotate # classes as final. https://mypy.readthedocs.io/en/latest/final_attrs.html # # The issue is that any reasonable implementation of `_visit_and_replace_children` # needs to refer to the class' own constructor: # # class While(CSTNode): # def _visit_and_replace_children(self, visitor: CSTVisitorT) -> While: # return While(...) # # You'll notice that because this implementation needs to call the `While` # constructor, the return type is also `While`. This function is a valid subtype of # `Callable[[CSTVisitorT], CSTNode]`. # # It is not a valid subtype of `Callable[[CSTVisitorT], _CSTNodeSelfT]`. That's # because the return type of this function wouldn't be valid for any subclasses. # In practice, that's not an issue, because we don't have any subclasses of `While`, # but there's no way to tell pyre that without a `@final` annotation. # # Instead, we're just relying on an unchecked call to `cast()` in the `visit` # method. @abstractmethod def _visit_and_replace_children(self, visitor: CSTVisitorT) -> "CSTNode": """ Intended to be overridden by subclasses to provide a low-level hook for the visitor API. Don't call this directly. Instead, use `visitor.visit_and_replace_node` or `visitor.visit_and_replace_module`. If you need list of children, access the `children` property instead. The general expectation is that children should be visited in the order in which they appear lexically. """ ... def _is_removable(self) -> bool: """ Intended to be overridden by nodes that will be iterated over inside Module and IndentedBlock. Returning true signifies that this node is essentially useless and can be dropped when doing a visit across it. """ return False @abstractmethod def _codegen_impl(self, state: CodegenState) -> None: ... def _codegen(self, state: CodegenState, **kwargs: Any) -> None: state.before_codegen(self) self._codegen_impl(state, **kwargs) state.after_codegen(self) def with_changes(self: _CSTNodeSelfT, **changes: Any) -> _CSTNodeSelfT: """ A convenience method for performing mutation-like operations on immutable nodes. Creates a new object of the same type, replacing fields with values from the supplied keyword arguments. For example, to update the test of an if conditional, you could do:: def leave_If(self, original_node: cst.If, updated_node: cst.If) -> cst.If: new_node = updated_node.with_changes(test=new_conditional) return new_node ``new_node`` will have the same ``body``, ``orelse``, and whitespace fields as ``updated_node``, but with the updated ``test`` field. The accepted arguments match the arguments given to ``__init__``, however there are no required or positional arguments. TODO: This API is untyped. There's probably no sane way to type it using pyre's current feature-set, but we should still think about ways to type this or a similar API in the future. """ return replace(self, **changes) def deep_clone(self: _CSTNodeSelfT) -> _CSTNodeSelfT: """ Recursively clone the entire tree. The created tree is a new tree has the same representation but different identity. >>> tree = cst.parse_expression("1+2") >>> tree.deep_clone() == tree False >>> tree == tree True >>> tree.deep_equals(tree.deep_clone()) True """ cloned_fields: Dict[str, object] = {} for field in fields(self): key = field.name if key[0] == "_": continue val = getattr(self, key) # Much like the comment on _clone itself, we are allergic to instance # checks against Sequence because of speed issues with ABC classes. So, # instead, first handle sequence types that we do not want to iterate on # and then just try to iterate and clone. if isinstance(val, (str, bytes)): cloned_fields[key] = _clone(val) else: try: cloned_fields[key] = tuple(_clone(v) for v in val) except TypeError: cloned_fields[key] = _clone(val) return type(self)(**cloned_fields) def deep_equals(self, other: "CSTNode") -> bool: """ Recursively inspects the entire tree under ``self`` and ``other`` to determine if the two trees are equal by representation instead of identity (``==``). """ from libcst._nodes.deep_equals import deep_equals as deep_equals_impl return deep_equals_impl(self, other) def deep_replace( self: _CSTNodeSelfT, old_node: "CSTNode", new_node: CSTNodeT ) -> Union[_CSTNodeSelfT, CSTNodeT]: """ Recursively replaces any instance of ``old_node`` with ``new_node`` by identity. Use this to avoid nested ``with_changes`` blocks when you are replacing one of a node's deep children with a new node. Note that if you have previously modified the tree in a way that ``old_node`` appears more than once as a deep child, all instances will be replaced. """ new_tree = self.visit(_ChildReplacementTransformer(old_node, new_node)) if isinstance(new_tree, (FlattenSentinel, RemovalSentinel)): # The above transform never returns *Sentinel, so this isn't possible raise Exception("Logic error, cannot get a *Sentinel here!") return new_tree def deep_remove( self: _CSTNodeSelfT, old_node: "CSTNode" ) -> Union[_CSTNodeSelfT, RemovalSentinel]: """ Recursively removes any instance of ``old_node`` by identity. Note that if you have previously modified the tree in a way that ``old_node`` appears more than once as a deep child, all instances will be removed. """ new_tree = self.visit( _ChildReplacementTransformer(old_node, RemovalSentinel.REMOVE) ) if isinstance(new_tree, FlattenSentinel): # The above transform never returns FlattenSentinel, so this isn't possible raise Exception("Logic error, cannot get a FlattenSentinel here!") return new_tree def with_deep_changes( self: _CSTNodeSelfT, old_node: "CSTNode", **changes: Any ) -> _CSTNodeSelfT: """ A convenience method for applying :attr:`with_changes` to a child node. Use this to avoid chains of :attr:`with_changes` or combinations of :attr:`deep_replace` and :attr:`with_changes`. The accepted arguments match the arguments given to the child node's ``__init__``. TODO: This API is untyped. There's probably no sane way to type it using pyre's current feature-set, but we should still think about ways to type this or a similar API in the future. """ new_tree = self.visit(_ChildWithChangesTransformer(old_node, changes)) if isinstance(new_tree, (FlattenSentinel, RemovalSentinel)): # This is impossible with the above transform. raise Exception("Logic error, cannot get a *Sentinel here!") return new_tree def __eq__(self: _CSTNodeSelfT, other: object) -> bool: """ CSTNodes are only treated as equal by identity. This matches the behavior of CPython's AST nodes. If you actually want to compare the value instead of the identity of the current node with another, use `node.deep_equals`. Because `deep_equals` must traverse the entire tree, it can have an unexpectedly large time complexity. We're not exposing value equality as the default behavior because of `deep_equals`'s large time complexity. """ return self is other def __hash__(self) -> int: # Equality of nodes is based on identity, so the hash should be too. return id(self) def __repr__(self) -> str: if len(fields(self)) == 0: return f"{type(self).__name__}()" lines = [f"{type(self).__name__}("] for f in fields(self): key = f.name if key[0] != "_": value = getattr(self, key) lines.append(_indent(f"{key}={_pretty_repr(value)},")) lines.append(")") return "\n".join(lines) @classmethod # pyre-fixme[3]: Return annotation cannot be `Any`. def field(cls, *args: object, **kwargs: object) -> Any: """ A helper that allows us to easily use CSTNodes in dataclass constructor defaults without accidentally aliasing nodes by identity across multiple instances. """ # pyre-ignore Pyre is complaining about CSTNode not being instantiable, # but we're only going to call this from concrete subclasses. return field(default_factory=lambda: cls(*args, **kwargs)) class BaseLeaf(CSTNode, ABC): __slots__ = () @property def children(self) -> Sequence[CSTNode]: # override this with an optimized implementation return _EMPTY_SEQUENCE def _visit_and_replace_children( self: _CSTNodeSelfT, visitor: CSTVisitorT ) -> _CSTNodeSelfT: return self class BaseValueToken(BaseLeaf, ABC): """ Represents the subset of nodes that only contain a value. Not all tokens from the tokenizer will exist as BaseValueTokens. In places where the token is always a constant value (e.g. a COLON token), the token's value will be implicitly folded into the parent CSTNode, and hard-coded into the implementation of _codegen. """ __slots__ = () value: str def _codegen_impl(self, state: CodegenState) -> None: state.add_token(self.value) LibCST-1.2.0/libcst/_nodes/deep_equals.py000066400000000000000000000032671456464173300202070ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ Provides the implementation of `CSTNode.deep_equals`. """ from dataclasses import fields from typing import Sequence from libcst._nodes.base import CSTNode def deep_equals(a: object, b: object) -> bool: if isinstance(a, CSTNode) and isinstance(b, CSTNode): return _deep_equals_cst_node(a, b) elif ( isinstance(a, Sequence) and not isinstance(a, (str, bytes)) and isinstance(b, Sequence) and not isinstance(b, (str, bytes)) ): return _deep_equals_sequence(a, b) else: return a == b def _deep_equals_sequence(a: Sequence[object], b: Sequence[object]) -> bool: """ A helper function for `CSTNode.deep_equals`. Normalizes and compares sequences. Because we only ever expose `Sequence[]` types, and not `List[]`, `Tuple[]`, or `Iterable[]` values, all sequences should be treated as equal if they have the same values. """ if a is b: # short-circuit return True if len(a) != len(b): return False return all(deep_equals(a_el, b_el) for (a_el, b_el) in zip(a, b)) def _deep_equals_cst_node(a: "CSTNode", b: "CSTNode") -> bool: if type(a) is not type(b): return False if a is b: # short-circuit return True # Ignore metadata and other hidden fields for field in (f for f in fields(a) if f.compare is True): a_value = getattr(a, field.name) b_value = getattr(b, field.name) if not deep_equals(a_value, b_value): return False return True LibCST-1.2.0/libcst/_nodes/expression.py000066400000000000000000004251131456464173300201150ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import re from abc import ABC, abstractmethod from ast import literal_eval from contextlib import contextmanager from dataclasses import dataclass, field from enum import auto, Enum from tokenize import ( Floatnumber as FLOATNUMBER_RE, Imagnumber as IMAGNUMBER_RE, Intnumber as INTNUMBER_RE, ) from typing import Callable, Generator, Optional, Sequence, Union from typing_extensions import Literal from libcst._add_slots import add_slots from libcst._maybe_sentinel import MaybeSentinel from libcst._nodes.base import CSTCodegenError, CSTNode, CSTValidationError from libcst._nodes.internal import ( CodegenState, visit_optional, visit_required, visit_sentinel, visit_sequence, ) from libcst._nodes.op import ( AssignEqual, BaseBinaryOp, BaseBooleanOp, BaseCompOp, BaseUnaryOp, Colon, Comma, Dot, In, Is, IsNot, Not, NotIn, ) from libcst._nodes.whitespace import BaseParenthesizableWhitespace, SimpleWhitespace from libcst._visitors import CSTVisitorT @add_slots @dataclass(frozen=True) class LeftSquareBracket(CSTNode): """ Used by various nodes to denote a subscript or list section. This doesn't own the whitespace to the left of it since this is owned by the parent node. """ #: Any space that appears directly after this left square bracket. whitespace_after: BaseParenthesizableWhitespace = SimpleWhitespace.field("") def _visit_and_replace_children(self, visitor: CSTVisitorT) -> "LeftSquareBracket": return LeftSquareBracket( whitespace_after=visit_required( self, "whitespace_after", self.whitespace_after, visitor ) ) def _codegen_impl(self, state: CodegenState) -> None: state.add_token("[") self.whitespace_after._codegen(state) @add_slots @dataclass(frozen=True) class RightSquareBracket(CSTNode): """ Used by various nodes to denote a subscript or list section. This doesn't own the whitespace to the right of it since this is owned by the parent node. """ #: Any space that appears directly before this right square bracket. whitespace_before: BaseParenthesizableWhitespace = SimpleWhitespace.field("") def _visit_and_replace_children(self, visitor: CSTVisitorT) -> "RightSquareBracket": return RightSquareBracket( whitespace_before=visit_required( self, "whitespace_before", self.whitespace_before, visitor ) ) def _codegen_impl(self, state: CodegenState) -> None: self.whitespace_before._codegen(state) state.add_token("]") @add_slots @dataclass(frozen=True) class LeftCurlyBrace(CSTNode): """ Used by various nodes to denote a dict or set. This doesn't own the whitespace to the left of it since this is owned by the parent node. """ #: Any space that appears directly after this left curly brace. whitespace_after: BaseParenthesizableWhitespace = SimpleWhitespace.field("") def _visit_and_replace_children(self, visitor: CSTVisitorT) -> "LeftCurlyBrace": return LeftCurlyBrace( whitespace_after=visit_required( self, "whitespace_after", self.whitespace_after, visitor ) ) def _codegen_impl(self, state: CodegenState) -> None: state.add_token("{") self.whitespace_after._codegen(state) @add_slots @dataclass(frozen=True) class RightCurlyBrace(CSTNode): """ Used by various nodes to denote a dict or set. This doesn't own the whitespace to the right of it since this is owned by the parent node. """ #: Any space that appears directly before this right curly brace. whitespace_before: BaseParenthesizableWhitespace = SimpleWhitespace.field("") def _visit_and_replace_children(self, visitor: CSTVisitorT) -> "RightCurlyBrace": return RightCurlyBrace( whitespace_before=visit_required( self, "whitespace_before", self.whitespace_before, visitor ) ) def _codegen_impl(self, state: CodegenState) -> None: self.whitespace_before._codegen(state) state.add_token("}") @add_slots @dataclass(frozen=True) class LeftParen(CSTNode): """ Used by various nodes to denote a parenthesized section. This doesn't own the whitespace to the left of it since this is owned by the parent node. """ #: Any space that appears directly after this left parenthesis. whitespace_after: BaseParenthesizableWhitespace = SimpleWhitespace.field("") def _visit_and_replace_children(self, visitor: CSTVisitorT) -> "LeftParen": return LeftParen( whitespace_after=visit_required( self, "whitespace_after", self.whitespace_after, visitor ) ) def _codegen_impl(self, state: CodegenState) -> None: state.add_token("(") self.whitespace_after._codegen(state) @add_slots @dataclass(frozen=True) class RightParen(CSTNode): """ Used by various nodes to denote a parenthesized section. This doesn't own the whitespace to the right of it since this is owned by the parent node. """ #: Any space that appears directly after this left parenthesis. whitespace_before: BaseParenthesizableWhitespace = SimpleWhitespace.field("") def _visit_and_replace_children(self, visitor: CSTVisitorT) -> "RightParen": return RightParen( whitespace_before=visit_required( self, "whitespace_before", self.whitespace_before, visitor ) ) def _codegen_impl(self, state: CodegenState) -> None: self.whitespace_before._codegen(state) state.add_token(")") @add_slots @dataclass(frozen=True) class Asynchronous(CSTNode): """ Used by asynchronous function definitions, as well as ``async for`` and ``async with``. """ #: Any space that appears directly after this async keyword. whitespace_after: SimpleWhitespace = SimpleWhitespace.field(" ") def _validate(self) -> None: if len(self.whitespace_after.value) < 1: raise CSTValidationError("Must have at least one space after Asynchronous.") def _visit_and_replace_children(self, visitor: CSTVisitorT) -> "Asynchronous": return Asynchronous( whitespace_after=visit_required( self, "whitespace_after", self.whitespace_after, visitor ) ) def _codegen_impl(self, state: CodegenState) -> None: with state.record_syntactic_position(self): state.add_token("async") self.whitespace_after._codegen(state) class _BaseParenthesizedNode(CSTNode, ABC): """ We don't want to have another level of indirection for parenthesis in our tree, since that makes us more of a CST than an AST. So, all the expressions or atoms that can be wrapped in parenthesis will subclass this to get that functionality. """ __slots__ = () lpar: Sequence[LeftParen] = () # Sequence of parenthesis for precedence dictation. rpar: Sequence[RightParen] = () def _validate(self) -> None: if self.lpar and not self.rpar: raise CSTValidationError("Cannot have left paren without right paren.") if not self.lpar and self.rpar: raise CSTValidationError("Cannot have right paren without left paren.") if len(self.lpar) != len(self.rpar): raise CSTValidationError("Cannot have unbalanced parens.") @contextmanager def _parenthesize(self, state: CodegenState) -> Generator[None, None, None]: for lpar in self.lpar: lpar._codegen(state) with state.record_syntactic_position(self): yield for rpar in self.rpar: rpar._codegen(state) class ExpressionPosition(Enum): LEFT = auto() RIGHT = auto() class BaseExpression(_BaseParenthesizedNode, ABC): """ An base class for all expressions. :class:`BaseExpression` contains no fields. """ __slots__ = () def _safe_to_use_with_word_operator(self, position: ExpressionPosition) -> bool: """ Returns true if this expression is safe to be use with a word operator such as "not" without space between the operator an ourselves. Examples where this is true are "not(True)", "(1)in[1,2,3]", etc. This base function handles parenthesized nodes, but certain nodes such as tuples, dictionaries and lists will override this to signifiy that they're always safe. """ return len(self.lpar) > 0 and len(self.rpar) > 0 def _check_left_right_word_concatenation_safety( self, position: ExpressionPosition, left: "BaseExpression", right: "BaseExpression", ) -> bool: if position == ExpressionPosition.RIGHT: return left._safe_to_use_with_word_operator(ExpressionPosition.RIGHT) if position == ExpressionPosition.LEFT: return right._safe_to_use_with_word_operator(ExpressionPosition.LEFT) return False class BaseAssignTargetExpression(BaseExpression, ABC): """ An expression that's valid on the left side of an assignment. That assignment may be part an :class:`Assign` node, or it may be part of a number of other control structures that perform an assignment, such as a :class:`For` loop. Python's grammar defines all expression as valid in this position, but the AST compiler further restricts the allowed types, which is what this type attempts to express. This is similar to a :class:`BaseDelTargetExpression`, but it also includes :class:`StarredElement` as a valid node. The set of valid nodes are defined as part of `CPython's AST context computation `_. """ __slots__ = () class BaseDelTargetExpression(BaseExpression, ABC): """ An expression that's valid on the right side of a :class:`Del` statement. Python's grammar defines all expression as valid in this position, but the AST compiler further restricts the allowed types, which is what this type attempts to express. This is similar to a :class:`BaseAssignTargetExpression`, but it excludes :class:`StarredElement`. The set of valid nodes are defined as part of `CPython's AST context computation `_ and as part of `CPython's bytecode compiler `_. """ __slots__ = () @add_slots @dataclass(frozen=True) class Name(BaseAssignTargetExpression, BaseDelTargetExpression): """ A simple variable name. Names are typically used in the context of a variable access, an assignment, or a deletion. Dotted variable names (``a.b.c``) are represented with :class:`Attribute` nodes, and subscripted variable names (``a[b]``) are represented with :class:`Subscript` nodes. """ #: The variable's name (or "identifier") as a string. value: str lpar: Sequence[LeftParen] = () #: Sequence of parenthesis for precedence dictation. rpar: Sequence[RightParen] = () def _visit_and_replace_children(self, visitor: CSTVisitorT) -> "Name": return Name( lpar=visit_sequence(self, "lpar", self.lpar, visitor), value=self.value, rpar=visit_sequence(self, "rpar", self.rpar, visitor), ) def _validate(self) -> None: super(Name, self)._validate() if len(self.value) == 0: raise CSTValidationError("Cannot have empty name identifier.") if not self.value.isidentifier(): raise CSTValidationError(f"Name {self.value!r} is not a valid identifier.") def _codegen_impl(self, state: CodegenState) -> None: with self._parenthesize(state): state.add_token(self.value) @add_slots @dataclass(frozen=True) class Ellipsis(BaseExpression): """ An ellipsis ``...``. When used as an expression, it evaluates to the `Ellipsis constant`_. Ellipsis are often used as placeholders in code or in conjunction with :class:`SubscriptElement`. .. _Ellipsis constant: https://docs.python.org/3/library/constants.html#Ellipsis """ lpar: Sequence[LeftParen] = () #: Sequence of parenthesis for precedence dictation. rpar: Sequence[RightParen] = () def _visit_and_replace_children(self, visitor: CSTVisitorT) -> "Ellipsis": return Ellipsis( lpar=visit_sequence(self, "lpar", self.lpar, visitor), rpar=visit_sequence(self, "rpar", self.rpar, visitor), ) def _safe_to_use_with_word_operator(self, position: ExpressionPosition) -> bool: return True def _codegen_impl(self, state: CodegenState) -> None: with self._parenthesize(state): state.add_token("...") class BaseNumber(BaseExpression, ABC): """ A type such as :class:`Integer`, :class:`Float`, or :class:`Imaginary` that can be used anywhere that you need to explicitly take any number type. """ __slots__ = () def _safe_to_use_with_word_operator(self, position: ExpressionPosition) -> bool: """ Numbers are funny. The expression "5in [1,2,3,4,5]" is a valid expression which evaluates to "True". So, encapsulate that here by allowing zero spacing with the left hand side of an expression with a comparison operator. """ if position == ExpressionPosition.LEFT: return True return super(BaseNumber, self)._safe_to_use_with_word_operator(position) @add_slots @dataclass(frozen=True) class Integer(BaseNumber): #: A string representation of the integer, such as ``"100000"`` or ``100_000``. #: #: To convert this string representation to an ``int``, use the calculated #: property :attr:`~Integer.evaluated_value`. value: str lpar: Sequence[LeftParen] = () #: Sequence of parenthesis for precedence dictation. rpar: Sequence[RightParen] = () def _visit_and_replace_children(self, visitor: CSTVisitorT) -> "Integer": return Integer( lpar=visit_sequence(self, "lpar", self.lpar, visitor), value=self.value, rpar=visit_sequence(self, "rpar", self.rpar, visitor), ) def _validate(self) -> None: super(Integer, self)._validate() if not re.fullmatch(INTNUMBER_RE, self.value): raise CSTValidationError("Number is not a valid integer.") def _codegen_impl(self, state: CodegenState) -> None: with self._parenthesize(state): state.add_token(self.value) @property def evaluated_value(self) -> int: """ Return an :func:`ast.literal_eval` evaluated int of :py:attr:`value`. """ return literal_eval(self.value) @add_slots @dataclass(frozen=True) class Float(BaseNumber): #: A string representation of the floating point number, such as ``"0.05"``, #: ``".050"``, or ``"5e-2"``. #: #: To convert this string representation to an ``float``, use the calculated #: property :attr:`~Float.evaluated_value`. value: str lpar: Sequence[LeftParen] = () #: Sequence of parenthesis for precedence dictation. rpar: Sequence[RightParen] = () def _visit_and_replace_children(self, visitor: CSTVisitorT) -> "Float": return Float( lpar=visit_sequence(self, "lpar", self.lpar, visitor), value=self.value, rpar=visit_sequence(self, "rpar", self.rpar, visitor), ) def _validate(self) -> None: super(Float, self)._validate() if not re.fullmatch(FLOATNUMBER_RE, self.value): raise CSTValidationError("Number is not a valid float.") def _codegen_impl(self, state: CodegenState) -> None: with self._parenthesize(state): state.add_token(self.value) @property def evaluated_value(self) -> float: """ Return an :func:`ast.literal_eval` evaluated float of :py:attr:`value`. """ return literal_eval(self.value) @add_slots @dataclass(frozen=True) class Imaginary(BaseNumber): #: A string representation of the imaginary (complex) number, such as ``"2j"``. #: #: To convert this string representation to an ``complex``, use the calculated #: property :attr:`~Imaginary.evaluated_value`. value: str lpar: Sequence[LeftParen] = () #: Sequence of parenthesis for precedence dictation. rpar: Sequence[RightParen] = () def _visit_and_replace_children(self, visitor: CSTVisitorT) -> "Imaginary": return Imaginary( lpar=visit_sequence(self, "lpar", self.lpar, visitor), value=self.value, rpar=visit_sequence(self, "rpar", self.rpar, visitor), ) def _validate(self) -> None: super(Imaginary, self)._validate() if not re.fullmatch(IMAGNUMBER_RE, self.value): raise CSTValidationError("Number is not a valid imaginary.") def _codegen_impl(self, state: CodegenState) -> None: with self._parenthesize(state): state.add_token(self.value) @property def evaluated_value(self) -> complex: """ Return an :func:`ast.literal_eval` evaluated complex of :py:attr:`value`. """ return literal_eval(self.value) class BaseString(BaseExpression, ABC): """ A type that can be used anywhere that you need to take any string. This includes :class:`SimpleString`, :class:`ConcatenatedString`, and :class:`FormattedString`. """ __slots__ = () StringQuoteLiteral = Literal['"', "'", '"""', "'''"] class _BasePrefixedString(BaseString, ABC): __slots__ = () @property def prefix(self) -> str: """ Returns the string's prefix, if any exists. See `String and Bytes literals `_ for more information. """ ... @property def quote(self) -> StringQuoteLiteral: """ Returns the quotation used to denote the string. Can be either ``'``, ``"``, ``'''`` or ``\"\"\"``. """ ... def _safe_to_use_with_word_operator(self, position: ExpressionPosition) -> bool: """ ``"a"in"abc`` is okay, but if you add a prefix, (e.g. ``b"a"inb"abc"``), the string is no longer valid on the RHS of the word operator, because it's not clear where the keyword ends and the prefix begins, unless it's parenthesized. """ if position == ExpressionPosition.LEFT: return True elif self.prefix == "": # and position == ExpressionPosition.RIGHT return True else: return super(_BasePrefixedString, self)._safe_to_use_with_word_operator( position ) @add_slots @dataclass(frozen=True) class SimpleString(_BasePrefixedString): """ Any sort of literal string expression that is not a :class:`FormattedString` (f-string), including triple-quoted multi-line strings. """ #: The texual representation of the string, including quotes, prefix characters, and #: any escape characters present in the original source code , such as #: ``r"my string\n"``. To remove the quotes and interpret any escape characters, #: use the calculated property :attr:`~SimpleString.evaluated_value`. value: str lpar: Sequence[LeftParen] = () #: Sequence of parenthesis for precidence dictation. rpar: Sequence[RightParen] = () def _validate(self) -> None: super(SimpleString, self)._validate() # Validate any prefix prefix = self.prefix if prefix not in ("", "r", "u", "b", "br", "rb"): raise CSTValidationError("Invalid string prefix.") prefixlen = len(prefix) # Validate wrapping quotes if len(self.value) < (prefixlen + 2): raise CSTValidationError("String must have enclosing quotes.") if ( self.value[prefixlen] not in ['"', "'"] or self.value[prefixlen] != self.value[-1] ): raise CSTValidationError("String must have matching enclosing quotes.") # Check validity of triple-quoted strings if len(self.value) >= (prefixlen + 6): if self.value[prefixlen] == self.value[prefixlen + 1]: # We know this isn't an empty string, so there needs to be a third # identical enclosing token. if ( self.value[prefixlen] != self.value[prefixlen + 2] or self.value[prefixlen] != self.value[-2] or self.value[prefixlen] != self.value[-3] ): raise CSTValidationError( "String must have matching enclosing quotes." ) # We should check the contents as well, but this is pretty complicated, # partially due to triple-quoted strings. @property def prefix(self) -> str: """ Returns the string's prefix, if any exists. The prefix can be ``r``, ``u``, ``b``, ``br`` or ``rb``. """ prefix: str = "" for c in self.value: if c in ['"', "'"]: break prefix += c return prefix.lower() @property def quote(self) -> StringQuoteLiteral: """ Returns the quotation used to denote the string. Can be either ``'``, ``"``, ``'''`` or ``\"\"\"``. """ quote: str = "" for char in self.value[len(self.prefix) :]: if char not in {"'", '"'}: break if quote and char != quote[0]: # This is no longer the same string quote break quote += char if len(quote) == 2: # Let's assume this is an empty string. quote = quote[:1] elif 3 < len(quote) <= 6: # Let's assume this can be one of the following: # >>> """"foo""" # '"foo' # >>> """""bar""" # '""bar' # >>> """""" # '' quote = quote[:3] if len(quote) not in {1, 3}: # We shouldn't get here due to construction validation logic, # but handle the case anyway. raise Exception(f"Invalid string {self.value}") # pyre-ignore We know via the above validation that we will only # ever return one of the four string literals. return quote @property def raw_value(self) -> str: """ Returns the raw value of the string as it appears in source, without the beginning or end quotes and without the prefix. This is often useful when constructing transforms which need to manipulate strings in source code. """ prefix_len = len(self.prefix) quote_len = len(self.quote) return self.value[(prefix_len + quote_len) : (-quote_len)] def _visit_and_replace_children(self, visitor: CSTVisitorT) -> "SimpleString": return SimpleString( lpar=visit_sequence(self, "lpar", self.lpar, visitor), value=self.value, rpar=visit_sequence(self, "rpar", self.rpar, visitor), ) def _codegen_impl(self, state: CodegenState) -> None: with self._parenthesize(state): state.add_token(self.value) @property def evaluated_value(self) -> Union[str, bytes]: """ Return an :func:`ast.literal_eval` evaluated str of :py:attr:`value`. """ return literal_eval(self.value) class BaseFormattedStringContent(CSTNode, ABC): """ The base type for :class:`FormattedStringText` and :class:`FormattedStringExpression`. A :class:`FormattedString` is composed of a sequence of :class:`BaseFormattedStringContent` parts. """ __slots__ = () @add_slots @dataclass(frozen=True) class FormattedStringText(BaseFormattedStringContent): """ Part of a :class:`FormattedString` that is not inside curly braces (``{`` or ``}``). For example, in:: f"ab{cd}ef" ``ab`` and ``ef`` are :class:`FormattedStringText` nodes, but ``{cd}`` is a :class:`FormattedStringExpression`. """ #: The raw string value, including any escape characters present in the source #: code, not including any enclosing quotes. value: str def _visit_and_replace_children( self, visitor: CSTVisitorT ) -> "FormattedStringText": return FormattedStringText(value=self.value) def _codegen_impl(self, state: CodegenState) -> None: state.add_token(self.value) @add_slots @dataclass(frozen=True) class FormattedStringExpression(BaseFormattedStringContent): """ Part of a :class:`FormattedString` that is inside curly braces (``{`` or ``}``), including the surrounding curly braces. For example, in:: f"ab{cd}ef" ``{cd}`` is a :class:`FormattedStringExpression`, but ``ab`` and ``ef`` are :class:`FormattedStringText` nodes. An f-string expression may contain ``conversion`` and ``format_spec`` suffixes that control how the expression is converted to a string. See `Python's language reference `__ for details. """ #: The expression we will evaluate and render when generating the string. expression: BaseExpression #: An optional conversion specifier, such as ``!s``, ``!r`` or ``!a``. conversion: Optional[str] = None #: An optional format specifier following the `format specification mini-language #: `_. format_spec: Optional[Sequence[BaseFormattedStringContent]] = None #: Whitespace after the opening curly brace (``{``), but before the ``expression``. whitespace_before_expression: BaseParenthesizableWhitespace = ( SimpleWhitespace.field("") ) #: Whitespace after the ``expression``, but before the ``conversion``, #: ``format_spec`` and the closing curly brace (``}``). Python does not #: allow whitespace inside or after a ``conversion`` or ``format_spec``. whitespace_after_expression: BaseParenthesizableWhitespace = SimpleWhitespace.field( "" ) #: Equal sign for formatted string expression uses self-documenting expressions, #: such as ``f"{x=}"``. See the `Python 3.8 release notes #: `_. equal: Optional[AssignEqual] = None def _validate(self) -> None: if self.conversion is not None and self.conversion not in ("s", "r", "a"): raise CSTValidationError("Invalid f-string conversion.") def _visit_and_replace_children( self, visitor: CSTVisitorT ) -> "FormattedStringExpression": format_spec = self.format_spec return FormattedStringExpression( whitespace_before_expression=visit_required( self, "whitespace_before_expression", self.whitespace_before_expression, visitor, ), expression=visit_required(self, "expression", self.expression, visitor), equal=visit_optional(self, "equal", self.equal, visitor), whitespace_after_expression=visit_required( self, "whitespace_after_expression", self.whitespace_after_expression, visitor, ), conversion=self.conversion, format_spec=( visit_sequence(self, "format_spec", format_spec, visitor) if format_spec is not None else None ), ) def _codegen_impl(self, state: CodegenState) -> None: state.add_token("{") self.whitespace_before_expression._codegen(state) self.expression._codegen(state) equal = self.equal if equal is not None: equal._codegen(state) self.whitespace_after_expression._codegen(state) conversion = self.conversion if conversion is not None: state.add_token("!") state.add_token(conversion) format_spec = self.format_spec if format_spec is not None: state.add_token(":") for spec in format_spec: spec._codegen(state) state.add_token("}") @add_slots @dataclass(frozen=True) class FormattedString(_BasePrefixedString): """ An "f-string". These formatted strings are string literals prefixed by the letter "f". An f-string may contain interpolated expressions inside curly braces (``{`` and ``}``). F-strings are defined in `PEP 498`_ and documented in `Python's language reference `__. >>> import libcst as cst >>> cst.parse_expression('f"ab{cd}ef"') FormattedString( parts=[ FormattedStringText( value='ab', ), FormattedStringExpression( expression=Name( value='cd', lpar=[], rpar=[], ), conversion=None, format_spec=None, whitespace_before_expression=SimpleWhitespace( value='', ), whitespace_after_expression=SimpleWhitespace( value='', ), ), FormattedStringText( value='ef', ), ], start='f"', end='"', lpar=[], rpar=[], ) .. _PEP 498: https://www.python.org/dev/peps/pep-0498/#specification """ #: A formatted string is composed as a series of :class:`FormattedStringText` and #: :class:`FormattedStringExpression` parts. parts: Sequence[BaseFormattedStringContent] #: The string prefix and the leading quote, such as ``f"``, ``F'``, ``fr"``, or #: ``f"""``. start: str = 'f"' #: The trailing quote. This must match the type of quote used in ``start``. end: Literal['"', "'", '"""', "'''"] = '"' lpar: Sequence[LeftParen] = () #: Sequence of parenthesis for precidence dictation. rpar: Sequence[RightParen] = () def _validate(self) -> None: super(FormattedString, self)._validate() # Validate any prefix prefix = self.prefix if prefix not in ("f", "fr", "rf"): raise CSTValidationError("Invalid f-string prefix.") # Validate wrapping quotes starttoken = self.start[len(prefix) :] if starttoken != self.end: raise CSTValidationError("f-string must have matching enclosing quotes.") # Validate valid wrapping quote usage if starttoken not in ('"', "'", '"""', "'''"): raise CSTValidationError("Invalid f-string enclosing quotes.") @property def prefix(self) -> str: """ Returns the string's prefix, if any exists. The prefix can be ``f``, ``fr``, or ``rf``. """ prefix = "" for c in self.start: if c in ['"', "'"]: break prefix += c return prefix.lower() @property def quote(self) -> StringQuoteLiteral: """ Returns the quotation used to denote the string. Can be either ``'``, ``"``, ``'''`` or ``\"\"\"``. """ return self.end def _visit_and_replace_children(self, visitor: CSTVisitorT) -> "FormattedString": return FormattedString( lpar=visit_sequence(self, "lpar", self.lpar, visitor), start=self.start, parts=visit_sequence(self, "parts", self.parts, visitor), end=self.end, rpar=visit_sequence(self, "rpar", self.rpar, visitor), ) def _codegen_impl(self, state: CodegenState) -> None: with self._parenthesize(state): state.add_token(self.start) for part in self.parts: part._codegen(state) state.add_token(self.end) @add_slots @dataclass(frozen=True) class ConcatenatedString(BaseString): """ Represents an implicitly concatenated string, such as:: "abc" "def" == "abcdef" .. warning:: This is different from two strings joined in a :class:`BinaryOperation` with an :class:`Add` operator, and is `sometimes viewed as an antifeature of Python `_. """ #: String on the left of the concatenation. left: Union[SimpleString, FormattedString] #: String on the right of the concatenation. right: Union[SimpleString, FormattedString, "ConcatenatedString"] lpar: Sequence[LeftParen] = () #: Sequence of parenthesis for precidence dictation. rpar: Sequence[RightParen] = () #: Whitespace between the ``left`` and ``right`` substrings. whitespace_between: BaseParenthesizableWhitespace = SimpleWhitespace.field("") def _safe_to_use_with_word_operator(self, position: ExpressionPosition) -> bool: if super(ConcatenatedString, self)._safe_to_use_with_word_operator(position): # if we have parenthesis, we're safe. return True return self._check_left_right_word_concatenation_safety( position, self.left, self.right ) def _validate(self) -> None: super(ConcatenatedString, self)._validate() # Strings that are concatenated cannot have parens. if bool(self.left.lpar) or bool(self.left.rpar): raise CSTValidationError("Cannot concatenate parenthesized strings.") if bool(self.right.lpar) or bool(self.right.rpar): raise CSTValidationError("Cannot concatenate parenthesized strings.") # Cannot concatenate str and bytes leftbytes = "b" in self.left.prefix right = self.right if isinstance(right, ConcatenatedString): rightbytes = "b" in right.left.prefix elif isinstance(right, SimpleString): rightbytes = "b" in right.prefix elif isinstance(right, FormattedString): rightbytes = "b" in right.prefix else: raise Exception("Logic error!") if leftbytes != rightbytes: raise CSTValidationError("Cannot concatenate string and bytes.") def _visit_and_replace_children(self, visitor: CSTVisitorT) -> "ConcatenatedString": return ConcatenatedString( lpar=visit_sequence(self, "lpar", self.lpar, visitor), left=visit_required(self, "left", self.left, visitor), whitespace_between=visit_required( self, "whitespace_between", self.whitespace_between, visitor ), right=visit_required(self, "right", self.right, visitor), rpar=visit_sequence(self, "rpar", self.rpar, visitor), ) def _codegen_impl(self, state: CodegenState) -> None: with self._parenthesize(state): self.left._codegen(state) self.whitespace_between._codegen(state) self.right._codegen(state) @property def evaluated_value(self) -> Union[str, bytes, None]: """ Return an :func:`ast.literal_eval` evaluated str of recursively concatenated :py:attr:`left` and :py:attr:`right` if and only if both :py:attr:`left` and :py:attr:`right` are composed by :class:`SimpleString` or :class:`ConcatenatedString` (:class:`FormattedString` cannot be evaluated). """ left = self.left right = self.right if isinstance(left, FormattedString) or isinstance(right, FormattedString): return None left_val = left.evaluated_value right_val = right.evaluated_value if right_val is None: return None if isinstance(left_val, bytes) and isinstance(right_val, bytes): return left_val + right_val if isinstance(left_val, str) and isinstance(right_val, str): return left_val + right_val return None @add_slots @dataclass(frozen=True) class ComparisonTarget(CSTNode): """ A target for a :class:`Comparison`. Owns the comparison operator and the value to the right of the operator. """ #: A comparison operator such as ``<``, ``>=``, ``==``, ``is``, or ``in``. operator: BaseCompOp #: The right hand side of the comparison operation. comparator: BaseExpression def _validate(self) -> None: # Validate operator spacing rules operator = self.operator if ( isinstance(operator, (In, NotIn, Is, IsNot)) and operator.whitespace_after.empty and not self.comparator._safe_to_use_with_word_operator( ExpressionPosition.RIGHT ) ): raise CSTValidationError( "Must have at least one space around comparison operator." ) def _visit_and_replace_children(self, visitor: CSTVisitorT) -> "ComparisonTarget": return ComparisonTarget( operator=visit_required(self, "operator", self.operator, visitor), comparator=visit_required(self, "comparator", self.comparator, visitor), ) def _codegen_impl(self, state: CodegenState) -> None: self.operator._codegen(state) self.comparator._codegen(state) @add_slots @dataclass(frozen=True) class Comparison(BaseExpression): """ A comparison between multiple values such as ``x < y``, ``x < y < z``, or ``x in [y, z]``. These comparisions typically result in boolean values. Unlike :class:`BinaryOperation` and :class:`BooleanOperation`, comparisons are not restricted to a left and right child. Instead they can contain an arbitrary number of :class:`ComparisonTarget` children. ``x < y < z`` is not equivalent to ``(x < y) < z`` or ``x < (y < z)``. Instead, it's roughly equivalent to ``x < y and y < z``. For more details, see `Python's documentation on comparisons `_. :: # x < y < z Comparison( Name("x"), [ ComparisonTarget(LessThan(), Name("y")), ComparisonTarget(LessThan(), Name("z")), ], ) """ #: The first value in the full sequence of values to compare. This value will be #: compared against the first value in ``comparisions``. left: BaseExpression #: Pairs of :class:`BaseCompOp` operators and expression values to compare. These #: come after ``left``. Each value is compared against the value before and after #: itself in the sequence. comparisons: Sequence[ComparisonTarget] lpar: Sequence[LeftParen] = () #: Sequence of parenthesis for precedence dictation. rpar: Sequence[RightParen] = () def _safe_to_use_with_word_operator(self, position: ExpressionPosition) -> bool: if super(Comparison, self)._safe_to_use_with_word_operator(position): # we have parenthesis return True return self._check_left_right_word_concatenation_safety( position, self.left, self.comparisons[-1].comparator ) def _validate(self) -> None: # Perform any validation on base type super(Comparison, self)._validate() if len(self.comparisons) == 0: raise CSTValidationError("Must have at least one ComparisonTarget.") # Validate operator spacing rules previous_comparator = self.left for target in self.comparisons: operator = target.operator if ( isinstance(operator, (In, NotIn, Is, IsNot)) and operator.whitespace_before.empty and not previous_comparator._safe_to_use_with_word_operator( ExpressionPosition.LEFT ) ): raise CSTValidationError( "Must have at least one space around comparison operator." ) previous_comparator = target.comparator def _visit_and_replace_children(self, visitor: CSTVisitorT) -> "Comparison": return Comparison( lpar=visit_sequence(self, "lpar", self.lpar, visitor), left=visit_required(self, "left", self.left, visitor), comparisons=visit_sequence(self, "comparisons", self.comparisons, visitor), rpar=visit_sequence(self, "rpar", self.rpar, visitor), ) def _codegen_impl(self, state: CodegenState) -> None: with self._parenthesize(state): self.left._codegen(state) for comp in self.comparisons: comp._codegen(state) @add_slots @dataclass(frozen=True) class UnaryOperation(BaseExpression): """ Any generic unary expression, such as ``not x`` or ``-x``. :class:`UnaryOperation` nodes apply a :class:`BaseUnaryOp` to an expression. """ #: The unary operator that applies some operation (e.g. negation) to the #: ``expression``. operator: BaseUnaryOp #: The expression that should be transformed (e.g. negated) by the operator to #: create a new value. expression: BaseExpression lpar: Sequence[LeftParen] = () #: Sequence of parenthesis for precedence dictation. rpar: Sequence[RightParen] = () def _validate(self) -> None: # Perform any validation on base type super(UnaryOperation, self)._validate() if ( isinstance(self.operator, Not) and self.operator.whitespace_after.empty and not self.expression._safe_to_use_with_word_operator( ExpressionPosition.RIGHT ) ): raise CSTValidationError("Must have at least one space after not operator.") def _visit_and_replace_children(self, visitor: CSTVisitorT) -> "UnaryOperation": return UnaryOperation( lpar=visit_sequence(self, "lpar", self.lpar, visitor), operator=visit_required(self, "operator", self.operator, visitor), expression=visit_required(self, "expression", self.expression, visitor), rpar=visit_sequence(self, "rpar", self.rpar, visitor), ) def _safe_to_use_with_word_operator(self, position: ExpressionPosition) -> bool: """ As long as we aren't comprised of the Not unary operator, we are safe to use without space. """ if super(UnaryOperation, self)._safe_to_use_with_word_operator(position): return True if position == ExpressionPosition.RIGHT: return not isinstance(self.operator, Not) if position == ExpressionPosition.LEFT: return self.expression._safe_to_use_with_word_operator( ExpressionPosition.LEFT ) return False def _codegen_impl(self, state: CodegenState) -> None: with self._parenthesize(state): self.operator._codegen(state) self.expression._codegen(state) @add_slots @dataclass(frozen=True) class BinaryOperation(BaseExpression): """ An operation that combines two expression such as ``x << y`` or ``y + z``. :class:`BinaryOperation` nodes apply a :class:`BaseBinaryOp` to an expression. Binary operations do not include operations performed with :class:`BaseBooleanOp` nodes, such as ``and`` or ``or``. Instead, those operations are provided by :class:`BooleanOperation`. It also does not include support for comparision operators performed with :class:`BaseCompOp`, such as ``<``, ``>=``, ``==``, ``is``, or ``in``. Instead, those operations are provided by :class:`Comparison`. """ #: The left hand side of the operation. left: BaseExpression #: The actual operator such as ``<<`` or ``+`` that combines the ``left`` and #: ``right`` expressions. operator: BaseBinaryOp #: The right hand side of the operation. right: BaseExpression lpar: Sequence[LeftParen] = () #: Sequence of parenthesis for precedence dictation. rpar: Sequence[RightParen] = () def _visit_and_replace_children(self, visitor: CSTVisitorT) -> "BinaryOperation": return BinaryOperation( lpar=visit_sequence(self, "lpar", self.lpar, visitor), left=visit_required(self, "left", self.left, visitor), operator=visit_required(self, "operator", self.operator, visitor), right=visit_required(self, "right", self.right, visitor), rpar=visit_sequence(self, "rpar", self.rpar, visitor), ) def _safe_to_use_with_word_operator(self, position: ExpressionPosition) -> bool: if super(BinaryOperation, self)._safe_to_use_with_word_operator(position): return True return self._check_left_right_word_concatenation_safety( position, self.left, self.right ) def _codegen_impl(self, state: CodegenState) -> None: with self._parenthesize(state): self.left._codegen(state) self.operator._codegen(state) self.right._codegen(state) @add_slots @dataclass(frozen=True) class BooleanOperation(BaseExpression): """ An operation that combines two booleans such as ``x or y`` or ``z and w`` :class:`BooleanOperation` nodes apply a :class:`BaseBooleanOp` to an expression. Boolean operations do not include operations performed with :class:`BaseBinaryOp` nodes, such as ``+`` or ``<<``. Instead, those operations are provided by :class:`BinaryOperation`. It also does not include support for comparision operators performed with :class:`BaseCompOp`, such as ``<``, ``>=``, ``==``, ``is``, or ``in``. Instead, those operations are provided by :class:`Comparison`. """ #: The left hand side of the operation. left: BaseExpression #: The actual operator such as ``and`` or ``or`` that combines the ``left`` and #: ``right`` expressions. operator: BaseBooleanOp #: The right hand side of the operation. right: BaseExpression lpar: Sequence[LeftParen] = () #: Sequence of parenthesis for precedence dictation. rpar: Sequence[RightParen] = () def _validate(self) -> None: # Paren validation and such super(BooleanOperation, self)._validate() # Validate spacing rules if ( self.operator.whitespace_before.empty and not self.left._safe_to_use_with_word_operator(ExpressionPosition.LEFT) ): raise CSTValidationError( "Must have at least one space around boolean operator." ) if ( self.operator.whitespace_after.empty and not self.right._safe_to_use_with_word_operator(ExpressionPosition.RIGHT) ): raise CSTValidationError( "Must have at least one space around boolean operator." ) def _visit_and_replace_children(self, visitor: CSTVisitorT) -> "BooleanOperation": return BooleanOperation( lpar=visit_sequence(self, "lpar", self.lpar, visitor), left=visit_required(self, "left", self.left, visitor), operator=visit_required(self, "operator", self.operator, visitor), right=visit_required(self, "right", self.right, visitor), rpar=visit_sequence(self, "rpar", self.rpar, visitor), ) def _safe_to_use_with_word_operator(self, position: ExpressionPosition) -> bool: if super(BooleanOperation, self)._safe_to_use_with_word_operator(position): return True return self._check_left_right_word_concatenation_safety( position, self.left, self.right ) def _codegen_impl(self, state: CodegenState) -> None: with self._parenthesize(state): self.left._codegen(state) self.operator._codegen(state) self.right._codegen(state) @add_slots @dataclass(frozen=True) class Attribute(BaseAssignTargetExpression, BaseDelTargetExpression): """ An attribute reference, such as ``x.y``. Note that in the case of ``x.y.z``, the outer attribute will have an attr of ``z`` and the value will be another :class:`Attribute` referencing the ``y`` attribute on ``x``:: Attribute( value=Attribute( value=Name("x") attr=Name("y") ), attr=Name("z"), ) """ #: An expression which, when evaluated, will produce an object with ``attr`` as an #: attribute. value: BaseExpression #: The name of the attribute being accessed on the ``value`` object. attr: Name #: A separating dot. If there's whitespace between the ``value`` and ``attr``, this #: dot owns it. dot: Dot = Dot() lpar: Sequence[LeftParen] = () #: Sequence of parenthesis for precedence dictation. rpar: Sequence[RightParen] = () def _visit_and_replace_children(self, visitor: CSTVisitorT) -> "Attribute": return Attribute( lpar=visit_sequence(self, "lpar", self.lpar, visitor), value=visit_required(self, "value", self.value, visitor), dot=visit_required(self, "dot", self.dot, visitor), attr=visit_required(self, "attr", self.attr, visitor), rpar=visit_sequence(self, "rpar", self.rpar, visitor), ) def _safe_to_use_with_word_operator(self, position: ExpressionPosition) -> bool: if super(Attribute, self)._safe_to_use_with_word_operator(position): return True return self._check_left_right_word_concatenation_safety( position, self.value, self.attr ) def _codegen_impl(self, state: CodegenState) -> None: with self._parenthesize(state): self.value._codegen(state) self.dot._codegen(state) self.attr._codegen(state) class BaseSlice(CSTNode, ABC): """ Any slice type that can slot into a :class:`SubscriptElement`. This node is purely for typing. """ __slots__ = () @add_slots @dataclass(frozen=True) class Index(BaseSlice): """ Any index as passed to a :class:`Subscript`. In ``x[2]``, this would be the ``2`` value. """ #: The index value itself. value: BaseExpression #: An optional string with an asterisk appearing before the name. This is #: expanded into variable number of positional arguments. See PEP-646 star: Optional[Literal["*"]] = None #: Whitespace after the ``star`` (if it exists), but before the ``value``. whitespace_after_star: Optional[BaseParenthesizableWhitespace] = None def _visit_and_replace_children(self, visitor: CSTVisitorT) -> "Index": return Index( star=self.star, whitespace_after_star=visit_optional( self, "whitespace_after_star", self.whitespace_after_star, visitor ), value=visit_required(self, "value", self.value, visitor), ) def _codegen_impl(self, state: CodegenState) -> None: star = self.star if star is not None: state.add_token(star) ws = self.whitespace_after_star if ws is not None: ws._codegen(state) self.value._codegen(state) @add_slots @dataclass(frozen=True) class Slice(BaseSlice): """ Any slice operation in a :class:`Subscript`, such as ``1:``, ``2:3:4``, etc. Note that the grammar does NOT allow parenthesis around a slice so they are not supported here. """ #: The lower bound in the slice, if present lower: Optional[BaseExpression] #: The upper bound in the slice, if present upper: Optional[BaseExpression] #: The step in the slice, if present step: Optional[BaseExpression] = None #: The first slice operator first_colon: Colon = Colon.field() #: The second slice operator, usually omitted second_colon: Union[Colon, MaybeSentinel] = MaybeSentinel.DEFAULT def _visit_and_replace_children(self, visitor: CSTVisitorT) -> "Slice": return Slice( lower=visit_optional(self, "lower", self.lower, visitor), first_colon=visit_required(self, "first_colon", self.first_colon, visitor), upper=visit_optional(self, "upper", self.upper, visitor), second_colon=visit_sentinel( self, "second_colon", self.second_colon, visitor ), step=visit_optional(self, "step", self.step, visitor), ) def _codegen_impl(self, state: CodegenState) -> None: lower = self.lower if lower is not None: lower._codegen(state) self.first_colon._codegen(state) upper = self.upper if upper is not None: upper._codegen(state) second_colon = self.second_colon if second_colon is MaybeSentinel.DEFAULT and self.step is not None: state.add_token(":") elif isinstance(second_colon, Colon): second_colon._codegen(state) step = self.step if step is not None: step._codegen(state) @add_slots @dataclass(frozen=True) class SubscriptElement(CSTNode): """ Part of a sequence of slices in a :class:`Subscript`, such as ``1:2, 3``. This is not used in Python's standard library, but it is used in some third-party libraries. For example, `NumPy uses it to select values and ranges from multi-dimensional arrays `_. """ #: A slice or index that is part of a subscript. slice: BaseSlice #: A separating comma, with any whitespace it owns. comma: Union[Comma, MaybeSentinel] = MaybeSentinel.DEFAULT def _visit_and_replace_children(self, visitor: CSTVisitorT) -> "SubscriptElement": return SubscriptElement( slice=visit_required(self, "slice", self.slice, visitor), comma=visit_sentinel(self, "comma", self.comma, visitor), ) def _codegen_impl(self, state: CodegenState, default_comma: bool = False) -> None: with state.record_syntactic_position(self): self.slice._codegen(state) comma = self.comma if comma is MaybeSentinel.DEFAULT and default_comma: state.add_token(", ") elif isinstance(comma, Comma): comma._codegen(state) @add_slots @dataclass(frozen=True) class Subscript(BaseAssignTargetExpression, BaseDelTargetExpression): """ A indexed subscript reference (:class:`Index`) such as ``x[2]``, a :class:`Slice` such as ``x[1:-1]``, or an extended slice (:class:`SubscriptElement`) such as ``x[1:2, 3]``. """ #: The left-hand expression which, when evaluated, will be subscripted, such as #: ``x`` in ``x[2]``. value: BaseExpression #: The :class:`SubscriptElement` to extract from the ``value``. slice: Sequence[SubscriptElement] lbracket: LeftSquareBracket = LeftSquareBracket.field() #: Brackets after the ``value`` surrounding the ``slice``. rbracket: RightSquareBracket = RightSquareBracket.field() lpar: Sequence[LeftParen] = () #: Sequence of parenthesis for precedence dictation. rpar: Sequence[RightParen] = () #: Whitespace after the ``value``, but before the ``lbracket``. whitespace_after_value: BaseParenthesizableWhitespace = SimpleWhitespace.field("") def _validate(self) -> None: super(Subscript, self)._validate() # Validate valid commas if len(self.slice) < 1: raise CSTValidationError("Cannot have empty SubscriptElement.") def _visit_and_replace_children(self, visitor: CSTVisitorT) -> "Subscript": return Subscript( lpar=visit_sequence(self, "lpar", self.lpar, visitor), value=visit_required(self, "value", self.value, visitor), whitespace_after_value=visit_required( self, "whitespace_after_value", self.whitespace_after_value, visitor ), lbracket=visit_required(self, "lbracket", self.lbracket, visitor), slice=visit_sequence(self, "slice", self.slice, visitor), rbracket=visit_required(self, "rbracket", self.rbracket, visitor), rpar=visit_sequence(self, "rpar", self.rpar, visitor), ) def _safe_to_use_with_word_operator(self, position: ExpressionPosition) -> bool: if position == ExpressionPosition.LEFT: return True if super(Subscript, self)._safe_to_use_with_word_operator(position): return True if position == ExpressionPosition.RIGHT: return self.value._safe_to_use_with_word_operator(ExpressionPosition.RIGHT) return False def _codegen_impl(self, state: CodegenState) -> None: with self._parenthesize(state): self.value._codegen(state) self.whitespace_after_value._codegen(state) self.lbracket._codegen(state) lastslice = len(self.slice) - 1 for i, slice in enumerate(self.slice): slice._codegen(state, default_comma=(i != lastslice)) self.rbracket._codegen(state) @add_slots @dataclass(frozen=True) class Annotation(CSTNode): """ An annotation for a function (`PEP 3107`_) or on a variable (`PEP 526`_). Typically these are used in the context of type hints (`PEP 484`_), such as:: # a variable with a type good_ideas: List[str] = [] # a function with type annotations def concat(substrings: Sequence[str]) -> str: ... .. _PEP 3107: https://www.python.org/dev/peps/pep-3107/ .. _PEP 526: https://www.python.org/dev/peps/pep-0526/ .. _PEP 484: https://www.python.org/dev/peps/pep-0484/ """ #: The annotation's value itself. This is the part of the annotation after the #: colon or arrow. annotation: BaseExpression whitespace_before_indicator: Union[ BaseParenthesizableWhitespace, MaybeSentinel ] = MaybeSentinel.DEFAULT whitespace_after_indicator: BaseParenthesizableWhitespace = SimpleWhitespace.field( " " ) def _visit_and_replace_children(self, visitor: CSTVisitorT) -> "Annotation": return Annotation( whitespace_before_indicator=visit_sentinel( self, "whitespace_before_indicator", self.whitespace_before_indicator, visitor, ), whitespace_after_indicator=visit_required( self, "whitespace_after_indicator", self.whitespace_after_indicator, visitor, ), annotation=visit_required(self, "annotation", self.annotation, visitor), ) def _codegen_impl( self, state: CodegenState, default_indicator: Optional[str] = None ) -> None: # First, figure out the indicator which tells us default whitespace. if default_indicator is None: raise CSTCodegenError( "Must specify a concrete default_indicator if default used on indicator." ) # Now, output the whitespace whitespace_before_indicator = self.whitespace_before_indicator if isinstance(whitespace_before_indicator, BaseParenthesizableWhitespace): whitespace_before_indicator._codegen(state) elif isinstance(whitespace_before_indicator, MaybeSentinel): if default_indicator == "->": state.add_token(" ") else: raise Exception("Logic error!") # Now, output the indicator and the rest of the annotation state.add_token(default_indicator) self.whitespace_after_indicator._codegen(state) with state.record_syntactic_position(self): self.annotation._codegen(state) @add_slots @dataclass(frozen=True) class ParamStar(CSTNode): """ A sentinel indicator on a :class:`Parameters` list to denote that the subsequent params are keyword-only args. This syntax is described in `PEP 3102`_. .. _PEP 3102: https://www.python.org/dev/peps/pep-3102/#specification """ # Comma that comes after the star. comma: Comma = Comma.field(whitespace_after=SimpleWhitespace(" ")) def _visit_and_replace_children(self, visitor: CSTVisitorT) -> "ParamStar": return ParamStar(comma=visit_required(self, "comma", self.comma, visitor)) def _codegen_impl(self, state: CodegenState) -> None: state.add_token("*") self.comma._codegen(state) @add_slots @dataclass(frozen=True) class ParamSlash(CSTNode): """ A sentinel indicator on a :class:`Parameters` list to denote that the previous params are positional-only args. This syntax is described in `PEP 570`_. .. _PEP 570: https://www.python.org/dev/peps/pep-0570/#specification """ #: Optional comma that comes after the slash. This comma doesn't own the whitespace #: between ``/`` and ``,``. comma: Union[Comma, MaybeSentinel] = MaybeSentinel.DEFAULT #: Whitespace after the ``/`` character. This is captured here in case there is a #: comma. whitespace_after: BaseParenthesizableWhitespace = SimpleWhitespace.field("") def _visit_and_replace_children(self, visitor: CSTVisitorT) -> "ParamSlash": return ParamSlash( comma=visit_sentinel(self, "comma", self.comma, visitor), whitespace_after=visit_required( self, "whitespace_after", self.whitespace_after, visitor ), ) def _codegen_impl(self, state: CodegenState, default_comma: bool = False) -> None: state.add_token("/") self.whitespace_after._codegen(state) comma = self.comma if comma is MaybeSentinel.DEFAULT and default_comma: state.add_token(", ") elif isinstance(comma, Comma): comma._codegen(state) @add_slots @dataclass(frozen=True) class Param(CSTNode): """ A positional or keyword argument in a :class:`Parameters` list. May contain an :class:`Annotation` and, in some cases, a ``default``. """ #: The parameter name itself. name: Name #: Any optional :class:`Annotation`. These annotations are usually used as type #: hints. annotation: Optional[Annotation] = None #: The equal sign used to denote assignment if there is a default. equal: Union[AssignEqual, MaybeSentinel] = MaybeSentinel.DEFAULT #: Any optional default value, used when the argument is not supplied. default: Optional[BaseExpression] = None #: A trailing comma. If one is not provided, :class:`MaybeSentinel` will be #: replaced with a comma only if a comma is required. comma: Union[Comma, MaybeSentinel] = MaybeSentinel.DEFAULT #: Zero, one, or two asterisks appearing before name for :class:`Param`'s #: ``star_arg`` and ``star_kwarg``. star: Union[str, MaybeSentinel] = MaybeSentinel.DEFAULT #: The whitespace before ``name``. It will appear after ``star`` when a star #: exists. whitespace_after_star: BaseParenthesizableWhitespace = SimpleWhitespace.field("") #: The whitespace after this entire node. whitespace_after_param: BaseParenthesizableWhitespace = SimpleWhitespace.field("") def _validate(self) -> None: if self.default is None and isinstance(self.equal, AssignEqual): raise CSTValidationError( "Must have a default when specifying an AssignEqual." ) if isinstance(self.star, str) and self.star not in ("", "*", "**"): raise CSTValidationError("Must specify either '', '*' or '**' for star.") def _visit_and_replace_children(self, visitor: CSTVisitorT) -> "Param": return Param( star=self.star, whitespace_after_star=visit_required( self, "whitespace_after_star", self.whitespace_after_star, visitor ), name=visit_required(self, "name", self.name, visitor), annotation=visit_optional(self, "annotation", self.annotation, visitor), equal=visit_sentinel(self, "equal", self.equal, visitor), default=visit_optional(self, "default", self.default, visitor), comma=visit_sentinel(self, "comma", self.comma, visitor), whitespace_after_param=visit_required( self, "whitespace_after_param", self.whitespace_after_param, visitor ), ) def _codegen_impl( self, state: CodegenState, default_star: Optional[str] = None, default_comma: bool = False, ) -> None: with state.record_syntactic_position(self): star = self.star if isinstance(star, MaybeSentinel): if default_star is None: raise CSTCodegenError( "Must specify a concrete default_star if default used on star." ) star = default_star if isinstance(star, str): state.add_token(star) self.whitespace_after_star._codegen(state) self.name._codegen(state) annotation = self.annotation if annotation is not None: annotation._codegen(state, default_indicator=":") equal = self.equal if equal is MaybeSentinel.DEFAULT and self.default is not None: state.add_token(" = ") elif isinstance(equal, AssignEqual): equal._codegen(state) default = self.default if default is not None: default._codegen(state) comma = self.comma if comma is MaybeSentinel.DEFAULT and default_comma: state.add_token(", ") elif isinstance(comma, Comma): comma._codegen(state) self.whitespace_after_param._codegen(state) @add_slots @dataclass(frozen=True) class Parameters(CSTNode): """ A function or lambda parameter list. """ #: Positional parameters, with or without defaults. Positional parameters #: with defaults must all be after those without defaults. params: Sequence[Param] = () # Optional parameter that captures unspecified positional arguments or a sentinel # star that dictates parameters following are kwonly args. star_arg: Union[Param, ParamStar, MaybeSentinel] = MaybeSentinel.DEFAULT #: Keyword-only params that may or may not have defaults. kwonly_params: Sequence[Param] = () #: Optional parameter that captures unspecified kwargs. star_kwarg: Optional[Param] = None #: Positional-only parameters, with or without defaults. Positional-only #: parameters with defaults must all be after those without defaults. posonly_params: Sequence[Param] = () #: Optional sentinel that dictates parameters preceeding are positional-only #: args. posonly_ind: Union[ParamSlash, MaybeSentinel] = MaybeSentinel.DEFAULT def _validate_stars_sequence(self, vals: Sequence[Param], *, section: str) -> None: if len(vals) == 0: return for val in vals: if isinstance(val.star, str) and val.star != "": raise CSTValidationError( f"Expecting a star prefix of '' for {section} Param." ) def _validate_posonly_ind(self) -> None: if isinstance(self.posonly_ind, ParamSlash) and len(self.posonly_params) == 0: raise CSTValidationError( "Must have at least one posonly param if ParamSlash is used." ) def _validate_kwonly_star(self) -> None: if isinstance(self.star_arg, ParamStar) and len(self.kwonly_params) == 0: raise CSTValidationError( "Must have at least one kwonly param if ParamStar is used." ) def _validate_defaults(self) -> None: seen_default = False # pyre-fixme[60]: Concatenation not yet support for multiple variadic # tuples: `*self.posonly_params, *self.params`. for param in (*self.posonly_params, *self.params): if param.default: # Mark that we've moved onto defaults if not seen_default: seen_default = True else: if seen_default: # We accidentally included a non-default after a default arg! raise CSTValidationError( "Cannot have param without defaults following a param with defaults." ) star_arg = self.star_arg if isinstance(star_arg, Param) and star_arg.default is not None: raise CSTValidationError("Cannot have default for star_arg.") star_kwarg = self.star_kwarg if star_kwarg is not None and star_kwarg.default is not None: raise CSTValidationError("Cannot have default for star_kwarg.") def _validate_stars(self) -> None: if len(self.params) > 0: self._validate_stars_sequence(self.params, section="params") if len(self.posonly_params) > 0: self._validate_stars_sequence(self.posonly_params, section="posonly_params") star_arg = self.star_arg if ( isinstance(star_arg, Param) and isinstance(star_arg.star, str) and star_arg.star != "*" ): raise CSTValidationError( "Expecting a star prefix of '*' for star_arg Param." ) if len(self.kwonly_params) > 0: self._validate_stars_sequence(self.kwonly_params, section="kwonly_params") star_kwarg = self.star_kwarg if ( star_kwarg is not None and isinstance(star_kwarg.star, str) and star_kwarg.star != "**" ): raise CSTValidationError( "Expecting a star prefix of '**' for star_kwarg Param." ) def _validate(self) -> None: # Validate posonly_params slash placement semantics. self._validate_posonly_ind() # Validate kwonly_param star placement semantics. self._validate_kwonly_star() # Validate defaults semantics for params and star_arg/star_kwarg. self._validate_defaults() # Validate that we don't have random stars on non star_kwarg. self._validate_stars() def _visit_and_replace_children(self, visitor: CSTVisitorT) -> "Parameters": return Parameters( posonly_params=visit_sequence( self, "posonly_params", self.posonly_params, visitor ), posonly_ind=visit_sentinel(self, "posonly_ind", self.posonly_ind, visitor), params=visit_sequence(self, "params", self.params, visitor), star_arg=visit_sentinel(self, "star_arg", self.star_arg, visitor), kwonly_params=visit_sequence( self, "kwonly_params", self.kwonly_params, visitor ), star_kwarg=visit_optional(self, "star_kwarg", self.star_kwarg, visitor), ) def _safe_to_join_with_lambda(self) -> bool: """ Determine if Parameters need a space after the `lambda` keyword. Returns True iff it's safe to omit the space between `lambda` and these Parameters. See also `BaseExpression._safe_to_use_with_word_operator`. For example: `lambda*_: pass` """ if len(self.posonly_params) != 0: return False # posonly_ind can't appear if above condition is false if len(self.params) > 0 and self.params[0].star not in {"*", "**"}: return False return True def _codegen_impl(self, state: CodegenState) -> None: # noqa: C901 # Compute the star existence first so we can ask about whether # each element is the last in the list or not. star_arg = self.star_arg if isinstance(star_arg, MaybeSentinel): starincluded = len(self.kwonly_params) > 0 elif isinstance(star_arg, (Param, ParamStar)): starincluded = True else: starincluded = False # Render out the positional-only params first. They will always have trailing # commas because in order to have positional-only params, there must be a # slash afterwards. for i, param in enumerate(self.posonly_params): param._codegen(state, default_star="", default_comma=True) # Render out the positional-only indicator if necessary. more_values = ( starincluded or len(self.params) > 0 or len(self.kwonly_params) > 0 or self.star_kwarg is not None ) posonly_ind = self.posonly_ind if isinstance(posonly_ind, ParamSlash): # Its explicitly included, so render the version we have here which # might have spacing applied to its comma. posonly_ind._codegen(state, default_comma=more_values) elif len(self.posonly_params) > 0: if more_values: state.add_token("/, ") else: state.add_token("/") # Render out the params next, computing necessary trailing commas. lastparam = len(self.params) - 1 more_values = ( starincluded or len(self.kwonly_params) > 0 or self.star_kwarg is not None ) for i, param in enumerate(self.params): param._codegen( state, default_star="", default_comma=(i < lastparam or more_values) ) # Render out optional star sentinel if its explicitly included or # if we are inferring it from kwonly_params. Otherwise, render out the # optional star_arg. if isinstance(star_arg, MaybeSentinel): if starincluded: state.add_token("*, ") elif isinstance(star_arg, Param): more_values = len(self.kwonly_params) > 0 or self.star_kwarg is not None star_arg._codegen(state, default_star="*", default_comma=more_values) elif isinstance(star_arg, ParamStar): star_arg._codegen(state) # Render out the kwonly_args next, computing necessary trailing commas. lastparam = len(self.kwonly_params) - 1 more_values = self.star_kwarg is not None for i, param in enumerate(self.kwonly_params): param._codegen( state, default_star="", default_comma=(i < lastparam or more_values) ) # Finally, render out any optional star_kwarg star_kwarg = self.star_kwarg if star_kwarg is not None: star_kwarg._codegen(state, default_star="**", default_comma=False) @add_slots @dataclass(frozen=True) class Lambda(BaseExpression): """ A lambda expression that creates an anonymous function. :: Lambda( params=Parameters([Param(Name("arg"))]), body=Ellipsis(), ) Represents the following code:: lambda arg: ... Named functions statements are provided by :class:`FunctionDef`. """ #: The arguments to the lambda. This is similar to the arguments on a #: :class:`FunctionDef`, however lambda arguments are not allowed to have an #: :class:`Annotation`. params: Parameters #: The value that the lambda computes and returns when called. body: BaseExpression #: The colon separating the parameters from the body. colon: Colon = Colon.field(whitespace_after=SimpleWhitespace(" ")) lpar: Sequence[LeftParen] = () #: Sequence of parenthesis for precedence dictation. rpar: Sequence[RightParen] = () #: Whitespace after the lambda keyword, but before any argument or the colon. whitespace_after_lambda: Union[ BaseParenthesizableWhitespace, MaybeSentinel ] = MaybeSentinel.DEFAULT def _safe_to_use_with_word_operator(self, position: ExpressionPosition) -> bool: if position == ExpressionPosition.LEFT: return len(self.rpar) > 0 or self.body._safe_to_use_with_word_operator( position ) return super()._safe_to_use_with_word_operator(position) def _validate(self) -> None: # Validate parents super(Lambda, self)._validate() # Sum up all parameters all_params = [ *self.params.posonly_params, *self.params.params, *self.params.kwonly_params, ] star_arg = self.params.star_arg if isinstance(star_arg, Param): all_params.append(star_arg) star_kwarg = self.params.star_kwarg if star_kwarg is not None: all_params.append(star_kwarg) # Check for nonzero parameters because several checks care # about this. if len(all_params) > 0: for param in all_params: if param.annotation is not None: raise CSTValidationError( "Lambda params cannot have type annotations." ) whitespace_after_lambda = self.whitespace_after_lambda if ( isinstance(whitespace_after_lambda, BaseParenthesizableWhitespace) and whitespace_after_lambda.empty and not self.params._safe_to_join_with_lambda() ): raise CSTValidationError( "Must have at least one space after lambda when specifying params" ) def _visit_and_replace_children(self, visitor: CSTVisitorT) -> "Lambda": return Lambda( lpar=visit_sequence(self, "lpar", self.lpar, visitor), whitespace_after_lambda=visit_sentinel( self, "whitespace_after_lambda", self.whitespace_after_lambda, visitor ), params=visit_required(self, "params", self.params, visitor), colon=visit_required(self, "colon", self.colon, visitor), body=visit_required(self, "body", self.body, visitor), rpar=visit_sequence(self, "rpar", self.rpar, visitor), ) def _codegen_impl(self, state: CodegenState) -> None: with self._parenthesize(state): state.add_token("lambda") whitespace_after_lambda = self.whitespace_after_lambda if isinstance(whitespace_after_lambda, MaybeSentinel): if not ( len(self.params.posonly_params) == 0 and len(self.params.params) == 0 and not isinstance(self.params.star_arg, Param) and len(self.params.kwonly_params) == 0 and self.params.star_kwarg is None ): # We have one or more params, provide a space state.add_token(" ") elif isinstance(whitespace_after_lambda, BaseParenthesizableWhitespace): whitespace_after_lambda._codegen(state) self.params._codegen(state) self.colon._codegen(state) self.body._codegen(state) @add_slots @dataclass(frozen=True) class Arg(CSTNode): """ A single argument to a :class:`Call`. This supports named keyword arguments in the form of ``keyword=value`` and variable argument expansion using ``*args`` or ``**kwargs`` syntax. """ #: The argument expression itself, not including a preceding keyword, or any of #: the surrounding the value, like a comma or asterisks. value: BaseExpression #: Optional keyword for the argument. keyword: Optional[Name] = None #: The equal sign used to denote assignment if there is a keyword. equal: Union[AssignEqual, MaybeSentinel] = MaybeSentinel.DEFAULT #: Any trailing comma. comma: Union[Comma, MaybeSentinel] = MaybeSentinel.DEFAULT #: A string with zero, one, or two asterisks appearing before the name. These are #: expanded into variable number of positional or keyword arguments. star: Literal["", "*", "**"] = "" #: Whitespace after the ``star`` (if it exists), but before the ``keyword`` or #: ``value`` (if no keyword is provided). whitespace_after_star: BaseParenthesizableWhitespace = SimpleWhitespace.field("") #: Whitespace after this entire node. The :class:`Comma` node (if it exists) may #: also store some trailing whitespace. whitespace_after_arg: BaseParenthesizableWhitespace = SimpleWhitespace.field("") def _validate(self) -> None: if self.keyword is None and isinstance(self.equal, AssignEqual): raise CSTValidationError( "Must have a keyword when specifying an AssignEqual." ) if self.star not in ("", "*", "**"): raise CSTValidationError("Must specify either '', '*' or '**' for star.") if self.star in ("*", "**") and self.keyword is not None: raise CSTValidationError("Cannot specify a star and a keyword together.") def _visit_and_replace_children(self, visitor: CSTVisitorT) -> "Arg": return Arg( star=self.star, whitespace_after_star=visit_required( self, "whitespace_after_star", self.whitespace_after_star, visitor ), keyword=visit_optional(self, "keyword", self.keyword, visitor), equal=visit_sentinel(self, "equal", self.equal, visitor), value=visit_required(self, "value", self.value, visitor), comma=visit_sentinel(self, "comma", self.comma, visitor), whitespace_after_arg=visit_required( self, "whitespace_after_arg", self.whitespace_after_arg, visitor ), ) def _codegen_impl(self, state: CodegenState, default_comma: bool = False) -> None: with state.record_syntactic_position(self): state.add_token(self.star) self.whitespace_after_star._codegen(state) keyword = self.keyword if keyword is not None: keyword._codegen(state) equal = self.equal if equal is MaybeSentinel.DEFAULT and self.keyword is not None: state.add_token(" = ") elif isinstance(equal, AssignEqual): equal._codegen(state) self.value._codegen(state) comma = self.comma if comma is MaybeSentinel.DEFAULT and default_comma: state.add_token(", ") elif isinstance(comma, Comma): comma._codegen(state) self.whitespace_after_arg._codegen(state) class _BaseExpressionWithArgs(BaseExpression, ABC): """ Arguments are complicated enough that we can't represent them easily in typing. So, we have common validation functions here. """ __slots__ = () #: Sequence of arguments that will be passed to the function call. args: Sequence[Arg] = () def _check_kwargs_or_keywords(self, arg: Arg) -> None: """ Validates that we only have a mix of "keyword=arg" and "**arg" expansion. """ if arg.keyword is not None: # Valid, keyword argument return None elif arg.star == "**": # Valid, kwargs return None elif arg.star == "*": # Invalid, cannot have "*" follow "**" raise CSTValidationError( "Cannot have iterable argument unpacking after keyword argument unpacking." ) else: # Invalid, cannot have positional argument follow **/keyword raise CSTValidationError( "Cannot have positional argument after keyword argument unpacking." ) def _check_starred_or_keywords( self, arg: Arg ) -> Optional[Callable[[Arg], Callable[[Arg], None]]]: """ Validates that we only have a mix of "*arg" expansion and "keyword=arg". """ if arg.keyword is not None: # Valid, keyword argument return None elif arg.star == "**": # Valid, but we now no longer allow "*" args # pyre-fixme[7]: Expected `Optional[Callable[[Arg], Callable[..., # Any]]]` but got `Callable[[Arg], Optional[Callable[[Arg], Callable[..., # Any]]]]`. return self._check_kwargs_or_keywords elif arg.star == "*": # Valid, iterable unpacking return None else: # Invalid, cannot have positional argument follow **/keyword raise CSTValidationError( "Cannot have positional argument after keyword argument." ) def _check_positional( self, arg: Arg ) -> Optional[Callable[[Arg], Callable[[Arg], Callable[[Arg], None]]]]: """ Validates that we only have a mix of positional args and "*arg" expansion. """ if arg.keyword is not None: # Valid, but this puts us into starred/keyword state # pyre-fixme[7]: Expected `Optional[Callable[[Arg], Callable[..., # Any]]]` but got `Callable[[Arg], Optional[Callable[[Arg], Callable[..., # Any]]]]`. return self._check_starred_or_keywords elif arg.star == "**": # Valid, but we skip states to kwargs/keywords # pyre-fixme[7]: Expected `Optional[Callable[[Arg], Callable[..., # Any]]]` but got `Callable[[Arg], Optional[Callable[[Arg], Callable[..., # Any]]]]`. return self._check_kwargs_or_keywords elif arg.star == "*": # Valid, iterator expansion return None else: # Valid, allowed to have positional arguments here return None # pyre-fixme[30]: Pyre gave up inferring some types - function `_validate` was # too complex. def _validate(self) -> None: # Validate any super-class stuff, whatever it may be. super()._validate() # Now, validate the weird intermingling rules for arguments by running # a small validator state machine. This works by passing each argument # to a validator function which can either raise an exception if it # detects an invalid sequence, return a new validator to be used for the # next arg, or return None to use the same validator. We could enforce # always returning ourselves instead of None but it ends up making the # functions themselves less readable. In this way, the current validator # function encodes the state we're in (positional state, iterable # expansion state, or dictionary expansion state). validator = self._check_positional for arg in self.args: validator = validator(arg) or validator @add_slots @dataclass(frozen=True) class Call(_BaseExpressionWithArgs): """ An expression representing a function call, such as ``do_math(1, 2)`` or ``picture.post_on_instagram()``. Function calls consist of a function name and a sequence of arguments wrapped in :class:`Arg` nodes. """ #: The expression resulting in a callable that we are to call. Often a :class:`Name` #: or :class:`Attribute`. func: BaseExpression #: The arguments to pass to the resulting callable. These may be a mix of #: positional arguments, keyword arguments, or "starred" arguments. args: Sequence[Arg] = () lpar: Sequence[LeftParen] = () #: Sequence of parenthesis for precedence dictation. These are not the parenthesis #: before and after the list of ``args``, but rather arguments around the entire #: call expression, such as ``(( do_math(1, 2) ))``. rpar: Sequence[RightParen] = () #: Whitespace after the ``func`` name, but before the opening parenthesis. whitespace_after_func: BaseParenthesizableWhitespace = SimpleWhitespace.field("") #: Whitespace after the opening parenthesis but before the first argument (if there #: are any). Whitespace after the last argument but before the closing parenthesis #: is owned by the last :class:`Arg` if it exists. whitespace_before_args: BaseParenthesizableWhitespace = SimpleWhitespace.field("") def _safe_to_use_with_word_operator(self, position: ExpressionPosition) -> bool: """ Calls have a close paren on the right side regardless of whether they're parenthesized as a whole. As a result, they are safe to use directly against an adjacent node to the right. """ if position == ExpressionPosition.LEFT: return True if super(Call, self)._safe_to_use_with_word_operator(position): return True if position == ExpressionPosition.RIGHT: return self.func._safe_to_use_with_word_operator(ExpressionPosition.RIGHT) return False def _visit_and_replace_children(self, visitor: CSTVisitorT) -> "Call": return Call( lpar=visit_sequence(self, "lpar", self.lpar, visitor), func=visit_required(self, "func", self.func, visitor), whitespace_after_func=visit_required( self, "whitespace_after_func", self.whitespace_after_func, visitor ), whitespace_before_args=visit_required( self, "whitespace_before_args", self.whitespace_before_args, visitor ), args=visit_sequence(self, "args", self.args, visitor), rpar=visit_sequence(self, "rpar", self.rpar, visitor), ) def _codegen_impl(self, state: CodegenState) -> None: with self._parenthesize(state): self.func._codegen(state) self.whitespace_after_func._codegen(state) state.add_token("(") self.whitespace_before_args._codegen(state) lastarg = len(self.args) - 1 for i, arg in enumerate(self.args): arg._codegen(state, default_comma=(i != lastarg)) state.add_token(")") @add_slots @dataclass(frozen=True) class Await(BaseExpression): """ An await expression. Await expressions are only valid inside the body of an asynchronous :class:`FunctionDef` or (as of Python 3.7) inside of an asynchronous :class:`GeneratorExp` nodes. """ #: The actual expression we need to wait for. expression: BaseExpression lpar: Sequence[LeftParen] = () #: Sequence of parenthesis for precedence dictation. rpar: Sequence[RightParen] = () #: Whitespace that appears after the ``async`` keyword, but before the inner #: ``expression``. whitespace_after_await: BaseParenthesizableWhitespace = SimpleWhitespace.field(" ") def _validate(self) -> None: # Validate any super-class stuff, whatever it may be. super(Await, self)._validate() # Make sure we don't run identifiers together. if ( self.whitespace_after_await.empty and not self.expression._safe_to_use_with_word_operator( ExpressionPosition.RIGHT ) ): raise CSTValidationError("Must have at least one space after await") def _visit_and_replace_children(self, visitor: CSTVisitorT) -> "Await": return Await( lpar=visit_sequence(self, "lpar", self.lpar, visitor), whitespace_after_await=visit_required( self, "whitespace_after_await", self.whitespace_after_await, visitor ), expression=visit_required(self, "expression", self.expression, visitor), rpar=visit_sequence(self, "rpar", self.rpar, visitor), ) def _codegen_impl(self, state: CodegenState) -> None: with self._parenthesize(state): state.add_token("await") self.whitespace_after_await._codegen(state) self.expression._codegen(state) @add_slots @dataclass(frozen=True) class IfExp(BaseExpression): """ An if expression of the form ``body if test else orelse``. If statements are provided by :class:`If` and :class:`Else` nodes. """ #: The test to perform. test: BaseExpression #: The expression to evaluate when the test is true. body: BaseExpression #: The expression to evaluate when the test is false. orelse: BaseExpression lpar: Sequence[LeftParen] = () #: Sequence of parenthesis for precedence dictation. rpar: Sequence[RightParen] = () #: Whitespace after the ``body`` expression, but before the ``if`` keyword. whitespace_before_if: BaseParenthesizableWhitespace = SimpleWhitespace.field(" ") #: Whitespace after the ``if`` keyword, but before the ``test`` clause. whitespace_after_if: BaseParenthesizableWhitespace = SimpleWhitespace.field(" ") #: Whitespace after the ``test`` expression, but before the ``else`` keyword. whitespace_before_else: BaseParenthesizableWhitespace = SimpleWhitespace.field(" ") #: Whitespace after the ``else`` keyword, but before the ``orelse`` expression. whitespace_after_else: BaseParenthesizableWhitespace = SimpleWhitespace.field(" ") def _safe_to_use_with_word_operator(self, position: ExpressionPosition) -> bool: if position == ExpressionPosition.RIGHT: return self.body._safe_to_use_with_word_operator(position) else: return self.orelse._safe_to_use_with_word_operator(position) def _validate(self) -> None: # Paren validation and such super(IfExp, self)._validate() # Validate spacing rules if ( self.whitespace_before_if.empty and not self.body._safe_to_use_with_word_operator(ExpressionPosition.LEFT) ): raise CSTValidationError( "Must have at least one space before 'if' keyword." ) if ( self.whitespace_after_if.empty and not self.test._safe_to_use_with_word_operator(ExpressionPosition.RIGHT) ): raise CSTValidationError("Must have at least one space after 'if' keyword.") if ( self.whitespace_before_else.empty and not self.test._safe_to_use_with_word_operator(ExpressionPosition.LEFT) ): raise CSTValidationError( "Must have at least one space before 'else' keyword." ) if ( self.whitespace_after_else.empty and not self.orelse._safe_to_use_with_word_operator( ExpressionPosition.RIGHT ) ): raise CSTValidationError( "Must have at least one space after 'else' keyword." ) def _visit_and_replace_children(self, visitor: CSTVisitorT) -> "IfExp": return IfExp( lpar=visit_sequence(self, "lpar", self.lpar, visitor), body=visit_required(self, "body", self.body, visitor), whitespace_before_if=visit_required( self, "whitespace_before_if", self.whitespace_before_if, visitor ), whitespace_after_if=visit_required( self, "whitespace_after_if", self.whitespace_after_if, visitor ), test=visit_required(self, "test", self.test, visitor), whitespace_before_else=visit_required( self, "whitespace_before_else", self.whitespace_before_else, visitor ), whitespace_after_else=visit_required( self, "whitespace_after_else", self.whitespace_after_else, visitor ), orelse=visit_required(self, "orelse", self.orelse, visitor), rpar=visit_sequence(self, "rpar", self.rpar, visitor), ) def _codegen_impl(self, state: CodegenState) -> None: with self._parenthesize(state): self.body._codegen(state) self.whitespace_before_if._codegen(state) state.add_token("if") self.whitespace_after_if._codegen(state) self.test._codegen(state) self.whitespace_before_else._codegen(state) state.add_token("else") self.whitespace_after_else._codegen(state) self.orelse._codegen(state) @add_slots @dataclass(frozen=True) class From(CSTNode): """ A ``from x`` stanza in a :class:`Yield` or :class:`Raise`. """ #: The expression that we are yielding/raising from. item: BaseExpression #: The whitespace at the very start of this node. whitespace_before_from: Union[ BaseParenthesizableWhitespace, MaybeSentinel ] = MaybeSentinel.DEFAULT #: The whitespace after the ``from`` keyword, but before the ``item``. whitespace_after_from: BaseParenthesizableWhitespace = SimpleWhitespace.field(" ") def _validate(self) -> None: if ( isinstance(self.whitespace_after_from, BaseParenthesizableWhitespace) and self.whitespace_after_from.empty and not self.item._safe_to_use_with_word_operator(ExpressionPosition.RIGHT) ): raise CSTValidationError( "Must have at least one space after 'from' keyword." ) def _visit_and_replace_children(self, visitor: CSTVisitorT) -> "From": return From( whitespace_before_from=visit_sentinel( self, "whitespace_before_from", self.whitespace_before_from, visitor ), whitespace_after_from=visit_required( self, "whitespace_after_from", self.whitespace_after_from, visitor ), item=visit_required(self, "item", self.item, visitor), ) def _codegen_impl(self, state: CodegenState, default_space: str = "") -> None: whitespace_before_from = self.whitespace_before_from if isinstance(whitespace_before_from, BaseParenthesizableWhitespace): whitespace_before_from._codegen(state) else: state.add_token(default_space) with state.record_syntactic_position(self): state.add_token("from") self.whitespace_after_from._codegen(state) self.item._codegen(state) @add_slots @dataclass(frozen=True) class Yield(BaseExpression): """ A yield expression similar to ``yield x`` or ``yield from fun()``. To learn more about the ways that yield can be used in generators, refer to `Python's language reference `__. """ #: The value yielded from the generator, in the case of a :class:`From` clause, a #: sub-generator to iterate over. value: Optional[Union[BaseExpression, From]] = None lpar: Sequence[LeftParen] = () #: Sequence of parenthesis for precedence dictation. rpar: Sequence[RightParen] = () #: Whitespace after the ``yield`` keyword, but before the ``value``. whitespace_after_yield: Union[ BaseParenthesizableWhitespace, MaybeSentinel ] = MaybeSentinel.DEFAULT def _validate(self) -> None: # Paren rules and such super(Yield, self)._validate() # Our own rules whitespace_after_yield = self.whitespace_after_yield if ( isinstance(whitespace_after_yield, BaseParenthesizableWhitespace) and whitespace_after_yield.empty ): value = self.value if isinstance(value, From): raise CSTValidationError( "Must have at least one space after 'yield' keyword." ) if isinstance( value, BaseExpression ) and not value._safe_to_use_with_word_operator(ExpressionPosition.RIGHT): raise CSTValidationError( "Must have at least one space after 'yield' keyword." ) def _visit_and_replace_children(self, visitor: CSTVisitorT) -> "Yield": return Yield( lpar=visit_sequence(self, "lpar", self.lpar, visitor), whitespace_after_yield=visit_sentinel( self, "whitespace_after_yield", self.whitespace_after_yield, visitor ), value=visit_optional(self, "value", self.value, visitor), rpar=visit_sequence(self, "rpar", self.rpar, visitor), ) def _codegen_impl(self, state: CodegenState) -> None: with self._parenthesize(state): state.add_token("yield") whitespace_after_yield = self.whitespace_after_yield if isinstance(whitespace_after_yield, BaseParenthesizableWhitespace): whitespace_after_yield._codegen(state) else: # Only need a space after yield if there is a value to yield. if self.value is not None: state.add_token(" ") value = self.value if isinstance(value, From): value._codegen(state, default_space="") elif value is not None: value._codegen(state) class _BaseElementImpl(CSTNode, ABC): """ An internal base class for :class:`Element` and :class:`DictElement`. """ __slots__ = () value: BaseExpression comma: Union[Comma, MaybeSentinel] = MaybeSentinel.DEFAULT def _codegen_comma( self, state: CodegenState, default_comma: bool = False, default_comma_whitespace: bool = False, # False for a single-item collection ) -> None: """ Called by `_codegen_impl` in subclasses to generate the comma. """ comma = self.comma if comma is MaybeSentinel.DEFAULT and default_comma: if default_comma_whitespace: state.add_token(", ") else: state.add_token(",") elif isinstance(comma, Comma): comma._codegen(state) @abstractmethod def _codegen_impl( self, state: CodegenState, default_comma: bool = False, default_comma_whitespace: bool = False, # False for a single-item collection ) -> None: ... class BaseElement(_BaseElementImpl, ABC): """ An element of a literal list, tuple, or set. For elements of a literal dict, see BaseDictElement. """ __slots__ = () class BaseDictElement(_BaseElementImpl, ABC): """ An element of a literal dict. For elements of a list, tuple, or set, see BaseElement. """ __slots__ = () @add_slots @dataclass(frozen=True) class Element(BaseElement): """ A simple value in a literal :class:`List`, :class:`Tuple`, or :class:`Set`. These a literal collection may also contain a :class:`StarredElement`. If you're using a literal :class:`Dict`, see :class:`DictElement` instead. """ value: BaseExpression #: A trailing comma. By default, we'll only insert a comma if one is required. comma: Union[Comma, MaybeSentinel] = MaybeSentinel.DEFAULT def _visit_and_replace_children(self, visitor: CSTVisitorT) -> "Element": return Element( value=visit_required(self, "value", self.value, visitor), comma=visit_sentinel(self, "comma", self.comma, visitor), ) def _codegen_impl( self, state: CodegenState, default_comma: bool = False, default_comma_whitespace: bool = False, ) -> None: with state.record_syntactic_position(self): self.value._codegen(state) self._codegen_comma(state, default_comma, default_comma_whitespace) @add_slots @dataclass(frozen=True) class DictElement(BaseDictElement): """ A simple ``key: value`` pair that represents a single entry in a literal :class:`Dict`. :class:`Dict` nodes may also contain a :class:`StarredDictElement`. If you're using a literal :class:`List`, :class:`Tuple`, or :class:`Set`, see :class:`Element` instead. """ key: BaseExpression value: BaseExpression #: A trailing comma. By default, we'll only insert a comma if one is required. comma: Union[Comma, MaybeSentinel] = MaybeSentinel.DEFAULT #: Whitespace after the key, but before the colon in ``key : value``. whitespace_before_colon: BaseParenthesizableWhitespace = SimpleWhitespace.field("") #: Whitespace after the colon, but before the value in ``key : value``. whitespace_after_colon: BaseParenthesizableWhitespace = SimpleWhitespace.field(" ") def _visit_and_replace_children(self, visitor: CSTVisitorT) -> "DictElement": return DictElement( key=visit_required(self, "key", self.key, visitor), whitespace_before_colon=visit_required( self, "whitespace_before_colon", self.whitespace_before_colon, visitor ), whitespace_after_colon=visit_required( self, "whitespace_after_colon", self.whitespace_after_colon, visitor ), value=visit_required(self, "value", self.value, visitor), comma=visit_sentinel(self, "comma", self.comma, visitor), ) def _codegen_impl( self, state: CodegenState, default_comma: bool = False, default_comma_whitespace: bool = False, ) -> None: with state.record_syntactic_position(self): self.key._codegen(state) self.whitespace_before_colon._codegen(state) state.add_token(":") self.whitespace_after_colon._codegen(state) self.value._codegen(state) self._codegen_comma(state, default_comma, default_comma_whitespace) @add_slots @dataclass(frozen=True) class StarredElement(BaseElement, BaseExpression, _BaseParenthesizedNode): """ A starred ``*value`` element that expands to represent multiple values in a literal :class:`List`, :class:`Tuple`, or :class:`Set`. If you're using a literal :class:`Dict`, see :class:`StarredDictElement` instead. If this node owns parenthesis, those parenthesis wrap the leading asterisk, but not the trailing comma. For example:: StarredElement( cst.Name("el"), comma=cst.Comma(), lpar=[cst.LeftParen()], rpar=[cst.RightParen()], ) will generate:: (*el), """ value: BaseExpression #: A trailing comma. By default, we'll only insert a comma if one is required. comma: Union[Comma, MaybeSentinel] = MaybeSentinel.DEFAULT #: Parenthesis at the beginning of the node, before the leading asterisk. lpar: Sequence[LeftParen] = () #: Parentheses after the value, but before a comma (if there is one). rpar: Sequence[RightParen] = () #: Whitespace between the leading asterisk and the value expression. whitespace_before_value: BaseParenthesizableWhitespace = SimpleWhitespace.field("") def _visit_and_replace_children(self, visitor: CSTVisitorT) -> "StarredElement": return StarredElement( lpar=visit_sequence(self, "lpar", self.lpar, visitor), whitespace_before_value=visit_required( self, "whitespace_before_value", self.whitespace_before_value, visitor ), value=visit_required(self, "value", self.value, visitor), rpar=visit_sequence(self, "rpar", self.rpar, visitor), comma=visit_sentinel(self, "comma", self.comma, visitor), ) def _codegen_impl( self, state: CodegenState, default_comma: bool = False, default_comma_whitespace: bool = False, ) -> None: with self._parenthesize(state): state.add_token("*") self.whitespace_before_value._codegen(state) self.value._codegen(state) self._codegen_comma(state, default_comma, default_comma_whitespace) @add_slots @dataclass(frozen=True) class StarredDictElement(BaseDictElement): """ A starred ``**value`` element that expands to represent multiple values in a literal :class:`Dict`. If you're using a literal :class:`List`, :class:`Tuple`, or :class:`Set`, see :class:`StarredElement` instead. Unlike :class:`StarredElement`, this node does not own left or right parenthesis, but the ``value`` field may still contain parenthesis. This is due to some asymmetry in Python's grammar. """ value: BaseExpression #: A trailing comma. By default, we'll only insert a comma if one is required. comma: Union[Comma, MaybeSentinel] = MaybeSentinel.DEFAULT #: Whitespace between the leading asterisks and the value expression. whitespace_before_value: BaseParenthesizableWhitespace = SimpleWhitespace.field("") def _visit_and_replace_children(self, visitor: CSTVisitorT) -> "StarredDictElement": return StarredDictElement( whitespace_before_value=visit_required( self, "whitespace_before_value", self.whitespace_before_value, visitor ), value=visit_required(self, "value", self.value, visitor), comma=visit_sentinel(self, "comma", self.comma, visitor), ) def _codegen_impl( self, state: CodegenState, default_comma: bool = False, default_comma_whitespace: bool = False, ) -> None: with state.record_syntactic_position(self): state.add_token("**") self.whitespace_before_value._codegen(state) self.value._codegen(state) self._codegen_comma(state, default_comma, default_comma_whitespace) @add_slots @dataclass(frozen=True) class Tuple(BaseAssignTargetExpression, BaseDelTargetExpression): """ An immutable literal tuple. Tuples are often (but not always) parenthesized. :: Tuple([ Element(Integer("1")), Element(Integer("2")), StarredElement(Name("others")), ]) generates the following code:: (1, 2, *others) """ #: A sequence containing all the :class:`Element` and :class:`StarredElement` nodes #: in the tuple. elements: Sequence[BaseElement] lpar: Sequence[LeftParen] = field(default_factory=lambda: (LeftParen(),)) #: Sequence of parenthesis for precedence dictation. rpar: Sequence[RightParen] = field(default_factory=lambda: (RightParen(),)) def _safe_to_use_with_word_operator(self, position: ExpressionPosition) -> bool: if super(Tuple, self)._safe_to_use_with_word_operator(position): # if we have parenthesis, we're safe. return True # elements[-1] and elements[0] must exist past this point, because # we're not parenthesized, meaning we must have at least one element. elements = self.elements if position == ExpressionPosition.LEFT: last_element = elements[-1] return ( isinstance(last_element.comma, Comma) or ( isinstance(last_element, StarredElement) and len(last_element.rpar) > 0 ) or last_element.value._safe_to_use_with_word_operator(position) ) else: # ExpressionPosition.RIGHT first_element = elements[0] # starred elements are always safe because they begin with ( or * return isinstance( first_element, StarredElement ) or first_element.value._safe_to_use_with_word_operator(position) def _validate(self) -> None: # Paren validation and such super(Tuple, self)._validate() if len(self.elements) == 0: if len(self.lpar) == 0: # assumes len(lpar) == len(rpar), via superclass raise CSTValidationError( "A zero-length tuple must be wrapped in parentheses." ) # Invalid commas aren't possible, because MaybeSentinel will ensure that there # is a comma where required. def _visit_and_replace_children(self, visitor: CSTVisitorT) -> "Tuple": return Tuple( lpar=visit_sequence(self, "lpar", self.lpar, visitor), elements=visit_sequence(self, "elements", self.elements, visitor), rpar=visit_sequence(self, "rpar", self.rpar, visitor), ) def _codegen_impl(self, state: CodegenState) -> None: with self._parenthesize(state): elements = self.elements if len(elements) == 1: elements[0]._codegen( state, default_comma=True, default_comma_whitespace=False ) else: for idx, el in enumerate(elements): el._codegen( state, default_comma=(idx < len(elements) - 1), default_comma_whitespace=True, ) class BaseList(BaseExpression, ABC): """ A base class for :class:`List` and :class:`ListComp`, which both result in a list object when evaluated. """ __slots__ = () lbracket: LeftSquareBracket = LeftSquareBracket.field() #: Brackets surrounding the list. rbracket: RightSquareBracket = RightSquareBracket.field() lpar: Sequence[LeftParen] = () #: Sequence of parenthesis for precedence dictation. rpar: Sequence[RightParen] = () def _safe_to_use_with_word_operator(self, position: ExpressionPosition) -> bool: return True @contextmanager def _bracketize(self, state: CodegenState) -> Generator[None, None, None]: self.lbracket._codegen(state) yield self.rbracket._codegen(state) @add_slots @dataclass(frozen=True) class List(BaseList, BaseAssignTargetExpression, BaseDelTargetExpression): """ A mutable literal list. :: List([ Element(Integer("1")), Element(Integer("2")), StarredElement(Name("others")), ]) generates the following code:: [1, 2, *others] List comprehensions are represented with a :class:`ListComp` node. """ #: A sequence containing all the :class:`Element` and :class:`StarredElement` nodes #: in the list. elements: Sequence[BaseElement] lbracket: LeftSquareBracket = LeftSquareBracket.field() #: Brackets surrounding the list. rbracket: RightSquareBracket = RightSquareBracket.field() lpar: Sequence[LeftParen] = () #: Sequence of parenthesis for precedence dictation. rpar: Sequence[RightParen] = () def _visit_and_replace_children(self, visitor: CSTVisitorT) -> "List": return List( lpar=visit_sequence(self, "lpar", self.lpar, visitor), lbracket=visit_required(self, "lbracket", self.lbracket, visitor), elements=visit_sequence(self, "elements", self.elements, visitor), rbracket=visit_required(self, "rbracket", self.rbracket, visitor), rpar=visit_sequence(self, "rpar", self.rpar, visitor), ) def _codegen_impl(self, state: CodegenState) -> None: with self._parenthesize(state), self._bracketize(state): elements = self.elements for idx, el in enumerate(elements): el._codegen( state, default_comma=(idx < len(elements) - 1), default_comma_whitespace=True, ) class _BaseSetOrDict(BaseExpression, ABC): """ An abstract base class for :class:`BaseSet` and :class:`BaseDict`. Literal sets and dicts are syntactically similar (hence this shared base class), but are semantically different. This base class is an implementation detail and shouldn't be exported. """ __slots__ = () lbrace: LeftCurlyBrace = LeftCurlyBrace.field() #: Braces surrounding the set or dict. rbrace: RightCurlyBrace = RightCurlyBrace.field() lpar: Sequence[LeftParen] = () #: Sequence of parenthesis for precedence dictation. rpar: Sequence[RightParen] = () def _safe_to_use_with_word_operator(self, position: ExpressionPosition) -> bool: return True # brace-ize seems like a very made-up word. And it is! @contextmanager def _braceize(self, state: CodegenState) -> Generator[None, None, None]: self.lbrace._codegen(state) yield self.rbrace._codegen(state) class BaseSet(_BaseSetOrDict, ABC): """ An abstract base class for :class:`Set` and :class:`SetComp`, which both result in a set object when evaluated. """ __slots__ = () @add_slots @dataclass(frozen=True) class Set(BaseSet): """ A mutable literal set. :: Set([ Element(Integer("1")), Element(Integer("2")), StarredElement(Name("others")), ]) generates the following code:: {1, 2, *others} Set comprehensions are represented with a :class:`SetComp` node. """ #: A sequence containing all the :class:`Element` and :class:`StarredElement` nodes #: in the set. elements: Sequence[BaseElement] lbrace: LeftCurlyBrace = LeftCurlyBrace.field() #: Braces surrounding the set. rbrace: RightCurlyBrace = RightCurlyBrace.field() lpar: Sequence[LeftParen] = () #: Sequence of parenthesis for precedence dictation. rpar: Sequence[RightParen] = () def _validate(self) -> None: super(Set, self)._validate() if len(self.elements) == 0: raise CSTValidationError( "A literal set must have at least one element. A zero-element set " + "would be syntatically ambiguous with an empty dict, `{}`." ) def _visit_and_replace_children(self, visitor: CSTVisitorT) -> "Set": return Set( lpar=visit_sequence(self, "lpar", self.lpar, visitor), lbrace=visit_required(self, "lbrace", self.lbrace, visitor), elements=visit_sequence(self, "elements", self.elements, visitor), rbrace=visit_required(self, "rbrace", self.rbrace, visitor), rpar=visit_sequence(self, "rpar", self.rpar, visitor), ) def _codegen_impl(self, state: CodegenState) -> None: with self._parenthesize(state), self._braceize(state): elements = self.elements for idx, el in enumerate(elements): el._codegen( state, default_comma=(idx < len(elements) - 1), default_comma_whitespace=True, ) class BaseDict(_BaseSetOrDict, ABC): """ An abstract base class for :class:`Dict` and :class:`DictComp`, which both result in a dict object when evaluated. """ __slots__ = () @add_slots @dataclass(frozen=True) class Dict(BaseDict): """ A literal dictionary. Key-value pairs are stored in ``elements`` using :class:`DictElement` nodes. It's possible to expand one dictionary into another, as in ``{k: v, **expanded}``. Expanded elements are stored as :class:`StarredDictElement` nodes. :: Dict([ DictElement(Name("k1"), Name("v1")), DictElement(Name("k2"), Name("v2")), StarredDictElement(Name("expanded")), ]) generates the following code:: {k1: v1, k2: v2, **expanded} """ elements: Sequence[BaseDictElement] lbrace: LeftCurlyBrace = LeftCurlyBrace.field() rbrace: RightCurlyBrace = RightCurlyBrace.field() lpar: Sequence[LeftParen] = () rpar: Sequence[RightParen] = () def _visit_and_replace_children(self, visitor: CSTVisitorT) -> "Dict": return Dict( lpar=visit_sequence(self, "lpar", self.lpar, visitor), lbrace=visit_required(self, "lbrace", self.lbrace, visitor), elements=visit_sequence(self, "elements", self.elements, visitor), rbrace=visit_required(self, "rbrace", self.rbrace, visitor), rpar=visit_sequence(self, "rpar", self.rpar, visitor), ) def _codegen_impl(self, state: CodegenState) -> None: with self._parenthesize(state), self._braceize(state): elements = self.elements for idx, el in enumerate(elements): el._codegen( state, default_comma=(idx < len(elements) - 1), default_comma_whitespace=True, ) @add_slots @dataclass(frozen=True) class CompFor(CSTNode): """ One ``for`` clause in a :class:`BaseComp`, or a nested hierarchy of ``for`` clauses. Nested loops in comprehensions are difficult to get right, but they can be thought of as a flat representation of nested clauses. ``elt for a in b for c in d if e`` can be thought of as:: for a in b: for c in d: if e: yield elt And that would form the following CST:: ListComp( elt=Name("elt"), for_in=CompFor( target=Name("a"), iter=Name("b"), ifs=[], inner_comp_for=CompFor( target=Name("c"), iter=Name("d"), ifs=[ CompIf( test=Name("e"), ), ], ), ), ) Normal ``for`` statements are provided by :class:`For`. """ #: The target to assign a value to in each iteration of the loop. This is different #: from :attr:`GeneratorExp.elt`, :attr:`ListComp.elt`, :attr:`SetComp.elt`, and #: ``key`` and ``value`` in :class:`DictComp`, because it doesn't directly effect #: the value of resulting generator, list, set, or dict. target: BaseAssignTargetExpression #: The value to iterate over. Every value in ``iter`` is stored in ``target``. iter: BaseExpression #: Zero or more conditional clauses that control this loop. If any of these tests #: fail, the ``target`` item is skipped. #: #: :: #: #: if a if b if c #: #: has similar semantics to:: #: #: if a and b and c ifs: Sequence["CompIf"] = () #: Another :class:`CompFor` node used to form nested loops. Nested comprehensions #: can be useful, but they tend to be difficult to read and write. As a result they #: are uncommon. inner_for_in: Optional["CompFor"] = None #: An optional async modifier that appears before the ``for`` keyword. asynchronous: Optional[Asynchronous] = None #: Whitespace that appears at the beginning of this node, before the ``for`` and #: ``async`` keywords. whitespace_before: BaseParenthesizableWhitespace = SimpleWhitespace.field(" ") #: Whitespace appearing after the ``for`` keyword, but before the ``target``. whitespace_after_for: BaseParenthesizableWhitespace = SimpleWhitespace.field(" ") #: Whitespace appearing after the ``target``, but before the ``in`` keyword. whitespace_before_in: BaseParenthesizableWhitespace = SimpleWhitespace.field(" ") #: Whitespace appearing after the ``in`` keyword, but before the ``iter``. whitespace_after_in: BaseParenthesizableWhitespace = SimpleWhitespace.field(" ") def _validate(self) -> None: if ( self.whitespace_after_for.empty and not self.target._safe_to_use_with_word_operator( ExpressionPosition.RIGHT ) ): raise CSTValidationError( "Must have at least one space after 'for' keyword." ) if ( self.whitespace_before_in.empty and not self.target._safe_to_use_with_word_operator(ExpressionPosition.LEFT) ): raise CSTValidationError( "Must have at least one space before 'in' keyword." ) if ( self.whitespace_after_in.empty and not self.iter._safe_to_use_with_word_operator(ExpressionPosition.RIGHT) ): raise CSTValidationError("Must have at least one space after 'in' keyword.") prev_expr = self.iter for if_clause in self.ifs: if ( if_clause.whitespace_before.empty and not prev_expr._safe_to_use_with_word_operator( ExpressionPosition.LEFT ) ): raise CSTValidationError( "Must have at least one space before 'if' keyword." ) prev_expr = if_clause.test inner_for_in = self.inner_for_in if ( inner_for_in is not None and inner_for_in.whitespace_before.empty and not prev_expr._safe_to_use_with_word_operator(ExpressionPosition.LEFT) ): keyword = "async" if inner_for_in.asynchronous else "for" raise CSTValidationError( f"Must have at least one space before '{keyword}' keyword." ) def _visit_and_replace_children(self, visitor: CSTVisitorT) -> "CompFor": return CompFor( whitespace_before=visit_required( self, "whitespace_before", self.whitespace_before, visitor ), asynchronous=visit_optional( self, "asynchronous", self.asynchronous, visitor ), whitespace_after_for=visit_required( self, "whitespace_after_for", self.whitespace_after_for, visitor ), target=visit_required(self, "target", self.target, visitor), whitespace_before_in=visit_required( self, "whitespace_before_in", self.whitespace_before_in, visitor ), whitespace_after_in=visit_required( self, "whitespace_after_in", self.whitespace_after_in, visitor ), iter=visit_required(self, "iter", self.iter, visitor), ifs=visit_sequence(self, "ifs", self.ifs, visitor), inner_for_in=visit_optional( self, "inner_for_in", self.inner_for_in, visitor ), ) def _codegen_impl(self, state: CodegenState) -> None: self.whitespace_before._codegen(state) asynchronous = self.asynchronous if asynchronous is not None: asynchronous._codegen(state) state.add_token("for") self.whitespace_after_for._codegen(state) self.target._codegen(state) self.whitespace_before_in._codegen(state) state.add_token("in") self.whitespace_after_in._codegen(state) self.iter._codegen(state) ifs = self.ifs for if_clause in ifs: if_clause._codegen(state) inner_for_in = self.inner_for_in if inner_for_in is not None: inner_for_in._codegen(state) @add_slots @dataclass(frozen=True) class CompIf(CSTNode): """ A conditional clause in a :class:`CompFor`, used as part of a generator or comprehension expression. If the ``test`` fails, the current element in the :class:`CompFor` will be skipped. """ #: An expression to evaluate. When interpreted, Python will coerce it to a boolean. test: BaseExpression #: Whitespace before the ``if`` keyword. whitespace_before: BaseParenthesizableWhitespace = SimpleWhitespace.field(" ") #: Whitespace after the ``if`` keyword, but before the ``test`` expression. whitespace_before_test: BaseParenthesizableWhitespace = SimpleWhitespace.field(" ") def _validate(self) -> None: if ( self.whitespace_before_test.empty and not self.test._safe_to_use_with_word_operator(ExpressionPosition.RIGHT) ): raise CSTValidationError("Must have at least one space after 'if' keyword.") def _visit_and_replace_children(self, visitor: CSTVisitorT) -> "CompIf": return CompIf( whitespace_before=visit_required( self, "whitespace_before", self.whitespace_before, visitor ), whitespace_before_test=visit_required( self, "whitespace_before_test", self.whitespace_before_test, visitor ), test=visit_required(self, "test", self.test, visitor), ) def _codegen_impl(self, state: CodegenState) -> None: self.whitespace_before._codegen(state) state.add_token("if") self.whitespace_before_test._codegen(state) self.test._codegen(state) class BaseComp(BaseExpression, ABC): """ A base class for all comprehension and generator expressions, including :class:`GeneratorExp`, :class:`ListComp`, :class:`SetComp`, and :class:`DictComp`. """ __slots__ = () for_in: CompFor class BaseSimpleComp(BaseComp, ABC): """ The base class for :class:`ListComp`, :class:`SetComp`, and :class:`GeneratorExp`. :class:`DictComp` is not a :class:`BaseSimpleComp`, because it uses ``key`` and ``value``. """ __slots__ = () #: The expression evaluated during each iteration of the comprehension. This #: lexically comes before the ``for_in`` clause, but it is semantically the #: inner-most element, evaluated inside the ``for_in`` clause. elt: BaseExpression #: The ``for ... in ... if ...`` clause that lexically comes after ``elt``. This may #: be a nested structure for nested comprehensions. See :class:`CompFor` for #: details. for_in: CompFor def _validate(self) -> None: super(BaseSimpleComp, self)._validate() for_in = self.for_in if ( for_in.whitespace_before.empty and not self.elt._safe_to_use_with_word_operator(ExpressionPosition.LEFT) ): keyword = "async" if for_in.asynchronous else "for" raise CSTValidationError( f"Must have at least one space before '{keyword}' keyword." ) @add_slots @dataclass(frozen=True) class GeneratorExp(BaseSimpleComp): """ A generator expression. ``elt`` represents the value yielded for each item in :attr:`CompFor.iter`. All ``for ... in ...`` and ``if ...`` clauses are stored as a recursive :class:`CompFor` data structure inside ``for_in``. """ #: The expression evaluated and yielded during each iteration of the generator. elt: BaseExpression #: The ``for ... in ... if ...`` clause that comes after ``elt``. This may be a #: nested structure for nested comprehensions. See :class:`CompFor` for details. for_in: CompFor lpar: Sequence[LeftParen] = field(default_factory=lambda: (LeftParen(),)) #: Sequence of parentheses for precedence dictation. Generator expressions must #: always be parenthesized. However, if a generator expression is the only argument #: inside a function call, the enclosing :class:`Call` node may own the parentheses #: instead. rpar: Sequence[RightParen] = field(default_factory=lambda: (RightParen(),)) def _safe_to_use_with_word_operator(self, position: ExpressionPosition) -> bool: # Generators are always parenthesized return True # A note about validation: Generators must always be parenthesized, but it's # possible that this Generator node doesn't own those parenthesis (in the case of a # function call with a single generator argument). # # Therefore, there's no useful validation we can do here. In theory, our parent # could do the validation, but there's a ton of potential parents to a Generator, so # it's not worth the effort. def _visit_and_replace_children(self, visitor: CSTVisitorT) -> "GeneratorExp": return GeneratorExp( lpar=visit_sequence(self, "lpar", self.lpar, visitor), elt=visit_required(self, "elt", self.elt, visitor), for_in=visit_required(self, "for_in", self.for_in, visitor), rpar=visit_sequence(self, "rpar", self.rpar, visitor), ) def _codegen_impl(self, state: CodegenState) -> None: with self._parenthesize(state): self.elt._codegen(state) self.for_in._codegen(state) @add_slots @dataclass(frozen=True) class ListComp(BaseList, BaseSimpleComp): """ A list comprehension. ``elt`` represents the value stored for each item in :attr:`CompFor.iter`. All ``for ... in ...`` and ``if ...`` clauses are stored as a recursive :class:`CompFor` data structure inside ``for_in``. """ #: The expression evaluated and stored during each iteration of the comprehension. elt: BaseExpression #: The ``for ... in ... if ...`` clause that comes after ``elt``. This may be a #: nested structure for nested comprehensions. See :class:`CompFor` for details. for_in: CompFor lbracket: LeftSquareBracket = LeftSquareBracket.field() #: Brackets surrounding the list comprehension. rbracket: RightSquareBracket = RightSquareBracket.field() lpar: Sequence[LeftParen] = () #: Sequence of parenthesis for precedence dictation. rpar: Sequence[RightParen] = () def _visit_and_replace_children(self, visitor: CSTVisitorT) -> "ListComp": return ListComp( lpar=visit_sequence(self, "lpar", self.lpar, visitor), lbracket=visit_required(self, "lbracket", self.lbracket, visitor), elt=visit_required(self, "elt", self.elt, visitor), for_in=visit_required(self, "for_in", self.for_in, visitor), rbracket=visit_required(self, "rbracket", self.rbracket, visitor), rpar=visit_sequence(self, "rpar", self.rpar, visitor), ) def _codegen_impl(self, state: CodegenState) -> None: with self._parenthesize(state), self._bracketize(state): self.elt._codegen(state) self.for_in._codegen(state) @add_slots @dataclass(frozen=True) class SetComp(BaseSet, BaseSimpleComp): """ A set comprehension. ``elt`` represents the value stored for each item in :attr:`CompFor.iter`. All ``for ... in ...`` and ``if ...`` clauses are stored as a recursive :class:`CompFor` data structure inside ``for_in``. """ #: The expression evaluated and stored during each iteration of the comprehension. elt: BaseExpression #: The ``for ... in ... if ...`` clause that comes after ``elt``. This may be a #: nested structure for nested comprehensions. See :class:`CompFor` for details. for_in: CompFor lbrace: LeftCurlyBrace = LeftCurlyBrace.field() #: Braces surrounding the set comprehension. rbrace: RightCurlyBrace = RightCurlyBrace.field() lpar: Sequence[LeftParen] = () #: Sequence of parenthesis for precedence dictation. rpar: Sequence[RightParen] = () def _visit_and_replace_children(self, visitor: CSTVisitorT) -> "SetComp": return SetComp( lpar=visit_sequence(self, "lpar", self.lpar, visitor), lbrace=visit_required(self, "lbrace", self.lbrace, visitor), elt=visit_required(self, "elt", self.elt, visitor), for_in=visit_required(self, "for_in", self.for_in, visitor), rbrace=visit_required(self, "rbrace", self.rbrace, visitor), rpar=visit_sequence(self, "rpar", self.rpar, visitor), ) def _codegen_impl(self, state: CodegenState) -> None: with self._parenthesize(state), self._braceize(state): self.elt._codegen(state) self.for_in._codegen(state) @add_slots @dataclass(frozen=True) class DictComp(BaseDict, BaseComp): """ A dictionary comprehension. ``key`` and ``value`` represent the dictionary entry evaluated for each item. All ``for ... in ...`` and ``if ...`` clauses are stored as a recursive :class:`CompFor` data structure inside ``for_in``. """ #: The key inserted into the dictionary during each iteration of the comprehension. key: BaseExpression #: The value associated with the ``key`` inserted into the dictionary during each #: iteration of the comprehension. value: BaseExpression #: The ``for ... in ... if ...`` clause that lexically comes after ``key`` and #: ``value``. This may be a nested structure for nested comprehensions. See #: :class:`CompFor` for details. for_in: CompFor lbrace: LeftCurlyBrace = LeftCurlyBrace.field() #: Braces surrounding the dict comprehension. rbrace: RightCurlyBrace = RightCurlyBrace.field() lpar: Sequence[LeftParen] = () #: Sequence of parenthesis for precedence dictation. rpar: Sequence[RightParen] = () #: Whitespace after the key, but before the colon in ``key : value``. whitespace_before_colon: BaseParenthesizableWhitespace = SimpleWhitespace.field("") #: Whitespace after the colon, but before the value in ``key : value``. whitespace_after_colon: BaseParenthesizableWhitespace = SimpleWhitespace.field(" ") def _validate(self) -> None: super(DictComp, self)._validate() for_in = self.for_in if ( for_in.whitespace_before.empty and not self.value._safe_to_use_with_word_operator(ExpressionPosition.LEFT) ): keyword = "async" if for_in.asynchronous else "for" raise CSTValidationError( f"Must have at least one space before '{keyword}' keyword." ) def _visit_and_replace_children(self, visitor: CSTVisitorT) -> "DictComp": return DictComp( lpar=visit_sequence(self, "lpar", self.lpar, visitor), lbrace=visit_required(self, "lbrace", self.lbrace, visitor), key=visit_required(self, "key", self.key, visitor), whitespace_before_colon=visit_required( self, "whitespace_before_colon", self.whitespace_before_colon, visitor ), whitespace_after_colon=visit_required( self, "whitespace_after_colon", self.whitespace_after_colon, visitor ), value=visit_required(self, "value", self.value, visitor), for_in=visit_required(self, "for_in", self.for_in, visitor), rbrace=visit_required(self, "rbrace", self.rbrace, visitor), rpar=visit_sequence(self, "rpar", self.rpar, visitor), ) def _codegen_impl(self, state: CodegenState) -> None: with self._parenthesize(state), self._braceize(state): self.key._codegen(state) self.whitespace_before_colon._codegen(state) state.add_token(":") self.whitespace_after_colon._codegen(state) self.value._codegen(state) self.for_in._codegen(state) @add_slots @dataclass(frozen=True) class NamedExpr(BaseExpression): """ An expression that is also an assignment, such as ``x := y + z``. Affectionately known as the walrus operator, this expression allows you to make an assignment inside an expression. This greatly simplifies loops:: while line := read_some_line_or_none(): do_thing_with_line(line) """ #: The target that is being assigned to. target: BaseExpression #: The expression being assigned to the target. value: BaseExpression #: Sequence of parenthesis for precedence dictation. lpar: Sequence[LeftParen] = () #: Sequence of parenthesis for precedence dictation. rpar: Sequence[RightParen] = () #: Whitespace after the target, but before the walrus operator. whitespace_before_walrus: BaseParenthesizableWhitespace = SimpleWhitespace.field( " " ) #: Whitespace after the walrus operator, but before the value. whitespace_after_walrus: BaseParenthesizableWhitespace = SimpleWhitespace.field(" ") def _visit_and_replace_children(self, visitor: CSTVisitorT) -> "NamedExpr": return NamedExpr( lpar=visit_sequence(self, "lpar", self.lpar, visitor), target=visit_required(self, "target", self.target, visitor), whitespace_before_walrus=visit_required( self, "whitespace_before_walrus", self.whitespace_before_walrus, visitor ), whitespace_after_walrus=visit_required( self, "whitespace_after_walrus", self.whitespace_after_walrus, visitor ), value=visit_required(self, "value", self.value, visitor), rpar=visit_sequence(self, "rpar", self.rpar, visitor), ) def _safe_to_use_with_word_operator(self, position: ExpressionPosition) -> bool: if position == ExpressionPosition.LEFT: return len(self.rpar) > 0 or self.value._safe_to_use_with_word_operator( position ) return len(self.lpar) > 0 or self.target._safe_to_use_with_word_operator( position ) def _codegen_impl(self, state: CodegenState) -> None: with self._parenthesize(state): self.target._codegen(state) self.whitespace_before_walrus._codegen(state) state.add_token(":=") self.whitespace_after_walrus._codegen(state) self.value._codegen(state) LibCST-1.2.0/libcst/_nodes/internal.py000066400000000000000000000167561456464173300175430ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from contextlib import contextmanager from dataclasses import dataclass, field from typing import Iterable, Iterator, List, Optional, Sequence, TYPE_CHECKING, Union from libcst._add_slots import add_slots from libcst._flatten_sentinel import FlattenSentinel from libcst._maybe_sentinel import MaybeSentinel from libcst._removal_sentinel import RemovalSentinel from libcst._types import CSTNodeT if TYPE_CHECKING: # These are circular dependencies only used for typing purposes from libcst._nodes.base import CSTNode # noqa: F401 from libcst._visitors import CSTVisitorT @add_slots @dataclass(frozen=False) class CodegenState: # These are derived from a Module default_indent: str default_newline: str provider: object = None # overridden by libcst.metadata.position_provider indent_tokens: List[str] = field(default_factory=list) tokens: List[str] = field(default_factory=list) def increase_indent(self, value: str) -> None: self.indent_tokens.append(value) def decrease_indent(self) -> None: self.indent_tokens.pop() def add_indent_tokens(self) -> None: self.tokens.extend(self.indent_tokens) def add_token(self, value: str) -> None: self.tokens.append(value) def before_codegen(self, node: "CSTNode") -> None: pass def after_codegen(self, node: "CSTNode") -> None: pass def pop_trailing_newline(self) -> None: """ Called by :meth:`libcst.Module._codegen_impl` at the end of the file to remove the last token (a trailing newline), assuming the file isn't empty. """ if len(self.tokens) > 0: # EmptyLine and all statements generate newlines, so we can be sure that the # last token (if we're not an empty file) is a newline. self.tokens.pop() @contextmanager def record_syntactic_position( self, node: "CSTNode", *, start_node: Optional["CSTNode"] = None, end_node: Optional["CSTNode"] = None, ) -> Iterator[None]: yield def visit_required( parent: "CSTNode", fieldname: str, node: CSTNodeT, visitor: "CSTVisitorT" ) -> CSTNodeT: """ Given a node, visits the node using `visitor`. If removal is attempted by the visitor, an exception is raised. """ visitor.on_visit_attribute(parent, fieldname) result = node.visit(visitor) if isinstance(result, RemovalSentinel): raise TypeError( f"We got a RemovalSentinel while visiting a {type(node).__name__}. This " + "node's parent does not allow it to be removed." ) elif isinstance(result, FlattenSentinel): raise TypeError( f"We got a FlattenSentinel while visiting a {type(node).__name__}. This " + "node's parent does not allow for it to be it to be replaced with a " + "sequence." ) visitor.on_leave_attribute(parent, fieldname) return result def visit_optional( parent: "CSTNode", fieldname: str, node: Optional[CSTNodeT], visitor: "CSTVisitorT" ) -> Optional[CSTNodeT]: """ Given an optional node, visits the node if it exists with `visitor`. If the node is removed, returns None. """ if node is None: visitor.on_visit_attribute(parent, fieldname) visitor.on_leave_attribute(parent, fieldname) return None visitor.on_visit_attribute(parent, fieldname) result = node.visit(visitor) if isinstance(result, FlattenSentinel): raise TypeError( f"We got a FlattenSentinel while visiting a {type(node).__name__}. This " + "node's parent does not allow for it to be it to be replaced with a " + "sequence." ) visitor.on_leave_attribute(parent, fieldname) return None if isinstance(result, RemovalSentinel) else result def visit_sentinel( parent: "CSTNode", fieldname: str, node: Union[CSTNodeT, MaybeSentinel], visitor: "CSTVisitorT", ) -> Union[CSTNodeT, MaybeSentinel]: """ Given a node that can be a real value or a sentinel value, visits the node if it is real with `visitor`. If the node is removed, returns MaybeSentinel. """ if isinstance(node, MaybeSentinel): visitor.on_visit_attribute(parent, fieldname) visitor.on_leave_attribute(parent, fieldname) return MaybeSentinel.DEFAULT visitor.on_visit_attribute(parent, fieldname) result = node.visit(visitor) if isinstance(result, FlattenSentinel): raise TypeError( f"We got a FlattenSentinel while visiting a {type(node).__name__}. This " + "node's parent does not allow for it to be it to be replaced with a " + "sequence." ) visitor.on_leave_attribute(parent, fieldname) return MaybeSentinel.DEFAULT if isinstance(result, RemovalSentinel) else result def visit_iterable( parent: "CSTNode", fieldname: str, children: Iterable[CSTNodeT], visitor: "CSTVisitorT", ) -> Iterable[CSTNodeT]: """ Given an iterable of children, visits each child with `visitor`, and yields the new children with any `RemovalSentinel` values removed. """ visitor.on_visit_attribute(parent, fieldname) for child in children: new_child = child.visit(visitor) if isinstance(new_child, FlattenSentinel): yield from new_child elif not isinstance(new_child, RemovalSentinel): yield new_child visitor.on_leave_attribute(parent, fieldname) def visit_sequence( parent: "CSTNode", fieldname: str, children: Sequence[CSTNodeT], visitor: "CSTVisitorT", ) -> Sequence[CSTNodeT]: """ A convenience wrapper for `visit_iterable` that returns a sequence instead of an iterable. """ return tuple(visit_iterable(parent, fieldname, children, visitor)) def visit_body_iterable( parent: "CSTNode", fieldname: str, children: Sequence[CSTNodeT], visitor: "CSTVisitorT", ) -> Iterable[CSTNodeT]: """ Similar to visit_iterable above, but capable of discarding empty SimpleStatementLine nodes in order to preserve correct pass insertion behavior. """ visitor.on_visit_attribute(parent, fieldname) for child in children: new_child = child.visit(visitor) # Don't yield a child if we removed it. if isinstance(new_child, RemovalSentinel): continue # Don't yield a child if the old child wasn't empty # and the new child is. This means a RemovalSentinel # caused a child of this node to be dropped, and it # is now useless. if isinstance(new_child, FlattenSentinel): for child_ in new_child: if (not child._is_removable()) and child_._is_removable(): continue yield child_ else: if (not child._is_removable()) and new_child._is_removable(): continue # Safe to yield child in this case. yield new_child visitor.on_leave_attribute(parent, fieldname) def visit_body_sequence( parent: "CSTNode", fieldname: str, children: Sequence[CSTNodeT], visitor: "CSTVisitorT", ) -> Sequence[CSTNodeT]: """ A convenience wrapper for `visit_body_iterable` that returns a sequence instead of an iterable. """ return tuple(visit_body_iterable(parent, fieldname, children, visitor)) LibCST-1.2.0/libcst/_nodes/module.py000066400000000000000000000146071456464173300172050ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from dataclasses import dataclass from typing import cast, Optional, Sequence, TYPE_CHECKING, TypeVar, Union from libcst._add_slots import add_slots from libcst._nodes.base import CSTNode from libcst._nodes.internal import CodegenState, visit_body_sequence, visit_sequence from libcst._nodes.statement import ( BaseCompoundStatement, get_docstring_impl, SimpleStatementLine, ) from libcst._nodes.whitespace import EmptyLine from libcst._removal_sentinel import RemovalSentinel from libcst._visitors import CSTVisitorT if TYPE_CHECKING: # This is circular, so import the type only in type checking from libcst._parser.types.config import PartialParserConfig _ModuleSelfT = TypeVar("_ModuleSelfT", bound="Module") # type alias needed for scope overlap in type definition builtin_bytes = bytes @add_slots @dataclass(frozen=True) class Module(CSTNode): """ Contains some top-level information inferred from the file letting us set correct defaults when printing the tree about global formatting rules. All code parsed with :func:`parse_module` will be encapsulated in a module. """ #: A list of zero or more statements that make up this module. body: Sequence[Union[SimpleStatementLine, BaseCompoundStatement]] #: Normally any whitespace/comments are assigned to the next node visited, but #: :class:`Module` is a special case, and comments at the top of the file tend #: to refer to the module itself, so we assign them to the :class:`Module` #: instead of the first statement in the body. header: Sequence[EmptyLine] = () #: Any trailing whitespace/comments found after the last statement. footer: Sequence[EmptyLine] = () #: The file's encoding format. When parsing a ``bytes`` object, this value may be #: inferred from the contents of the parsed source code. When parsing a ``str``, #: this value defaults to ``"utf-8"``. #: #: This value affects how :attr:`bytes` encodes the source code. encoding: str = "utf-8" #: The indentation of the file, expressed as a series of tabs and/or spaces. This #: value is inferred from the contents of the parsed source code by default. default_indent: str = " " * 4 #: The newline of the file, expressed as ``\n``, ``\r\n``, or ``\r``. This value is #: inferred from the contents of the parsed source code by default. default_newline: str = "\n" #: Whether the module has a trailing newline or not. has_trailing_newline: bool = True def _visit_and_replace_children(self, visitor: CSTVisitorT) -> "Module": return Module( header=visit_sequence(self, "header", self.header, visitor), body=visit_body_sequence(self, "body", self.body, visitor), footer=visit_sequence(self, "footer", self.footer, visitor), encoding=self.encoding, default_indent=self.default_indent, default_newline=self.default_newline, has_trailing_newline=self.has_trailing_newline, ) def visit(self: _ModuleSelfT, visitor: CSTVisitorT) -> _ModuleSelfT: """ Returns the result of running a visitor over this module. :class:`Module` overrides the default visitor entry point to resolve metadata dependencies declared by 'visitor'. """ result = super(Module, self).visit(visitor) if isinstance(result, RemovalSentinel): return self.with_changes(body=(), header=(), footer=()) else: # is a Module return cast(_ModuleSelfT, result) def _codegen_impl(self, state: CodegenState) -> None: for h in self.header: h._codegen(state) for stmt in self.body: stmt._codegen(state) for f in self.footer: f._codegen(state) if self.has_trailing_newline: if len(state.tokens) == 0: # There was nothing in the header, footer, or body. Just add a newline # to preserve the trailing newline. state.add_token(state.default_newline) else: # has_trailing_newline is false state.pop_trailing_newline() @property def code(self) -> str: """ The string representation of this module, respecting the inferred indentation and newline type. """ return self.code_for_node(self) @property def bytes(self) -> builtin_bytes: """ The bytes representation of this module, respecting the inferred indentation and newline type, using the current encoding. """ return self.code.encode(self.encoding) def code_for_node(self, node: CSTNode) -> str: """ Generates the code for the given node in the context of this module. This is a method of Module, not CSTNode, because we need to know the module's default indentation and newline formats. """ state = CodegenState( default_indent=self.default_indent, default_newline=self.default_newline ) node._codegen(state) return "".join(state.tokens) @property def config_for_parsing(self) -> "PartialParserConfig": """ Generates a parser config appropriate for passing to a :func:`parse_expression` or :func:`parse_statement` call. This is useful when using either parser function to generate code from a string template. By using a generated parser config instead of the default, you can guarantee that trees generated from both statement and expression strings have the same inferred defaults for things like newlines, indents and similar:: module = cst.parse_module("pass\\n") expression = cst.parse_expression("1 + 2", config=module.config_for_parsing) """ from libcst._parser.types.config import PartialParserConfig return PartialParserConfig( encoding=self.encoding, default_indent=self.default_indent, default_newline=self.default_newline, ) def get_docstring(self, clean: bool = True) -> Optional[str]: """ Returns a :func:`inspect.cleandoc` cleaned docstring if the docstring is available, ``None`` otherwise. """ return get_docstring_impl(self.body, clean) LibCST-1.2.0/libcst/_nodes/op.py000066400000000000000000000747311456464173300163420ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from abc import ABC, abstractmethod from dataclasses import dataclass from typing import Tuple from libcst._add_slots import add_slots from libcst._nodes.base import BaseLeaf, CSTNode, CSTValidationError from libcst._nodes.internal import CodegenState, visit_required from libcst._nodes.whitespace import BaseParenthesizableWhitespace, SimpleWhitespace from libcst._visitors import CSTVisitorT class _BaseOneTokenOp(CSTNode, ABC): """ Any node that has a static value and needs to own whitespace on both sides. """ __slots__ = () whitespace_before: BaseParenthesizableWhitespace whitespace_after: BaseParenthesizableWhitespace def _visit_and_replace_children(self, visitor: CSTVisitorT) -> "_BaseOneTokenOp": # pyre-ignore Pyre thinks that self.__class__ is CSTNode, not _BaseOneTokenOp return self.__class__( whitespace_before=visit_required( self, "whitespace_before", self.whitespace_before, visitor ), whitespace_after=visit_required( self, "whitespace_after", self.whitespace_after, visitor ), ) def _codegen_impl(self, state: CodegenState) -> None: self.whitespace_before._codegen(state) with state.record_syntactic_position(self): state.add_token(self._get_token()) self.whitespace_after._codegen(state) @abstractmethod def _get_token(self) -> str: ... class _BaseTwoTokenOp(CSTNode, ABC): """ Any node that ends up as two tokens, so we must preserve the whitespace in beteween them. """ __slots__ = () whitespace_before: BaseParenthesizableWhitespace whitespace_between: BaseParenthesizableWhitespace whitespace_after: BaseParenthesizableWhitespace def _validate(self) -> None: if self.whitespace_between.empty: raise CSTValidationError("Must have at least one space between not and in.") def _visit_and_replace_children(self, visitor: CSTVisitorT) -> "_BaseTwoTokenOp": # pyre-ignore Pyre thinks that self.__class__ is CSTNode, not _BaseTwoTokenOp return self.__class__( whitespace_before=visit_required( self, "whitespace_before", self.whitespace_before, visitor ), whitespace_between=visit_required( self, "whitespace_between", self.whitespace_between, visitor ), whitespace_after=visit_required( self, "whitespace_after", self.whitespace_after, visitor ), ) def _codegen_impl(self, state: CodegenState) -> None: self.whitespace_before._codegen(state) with state.record_syntactic_position(self): state.add_token(self._get_tokens()[0]) self.whitespace_between._codegen(state) state.add_token(self._get_tokens()[1]) self.whitespace_after._codegen(state) @abstractmethod def _get_tokens(self) -> Tuple[str, str]: ... class BaseUnaryOp(CSTNode, ABC): """ Any node that has a static value used in a :class:`UnaryOperation` expression. """ __slots__ = () #: Any space that appears directly after this operator. whitespace_after: BaseParenthesizableWhitespace def _visit_and_replace_children(self, visitor: CSTVisitorT) -> "BaseUnaryOp": # pyre-ignore Pyre thinks that self.__class__ is CSTNode, not BaseUnaryOp return self.__class__( whitespace_after=visit_required( self, "whitespace_after", self.whitespace_after, visitor ) ) def _codegen_impl(self, state: CodegenState) -> None: state.add_token(self._get_token()) self.whitespace_after._codegen(state) @abstractmethod def _get_token(self) -> str: ... class BaseBooleanOp(_BaseOneTokenOp, ABC): """ Any node that has a static value used in a :class:`BooleanOperation` expression. This node is purely for typing. """ __slots__ = () class BaseBinaryOp(CSTNode, ABC): """ Any node that has a static value used in a :class:`BinaryOperation` expression. This node is purely for typing. """ __slots__ = () class BaseCompOp(CSTNode, ABC): """ Any node that has a static value used in a :class:`Comparison` expression. This node is purely for typing. """ __slots__ = () class BaseAugOp(CSTNode, ABC): """ Any node that has a static value used in an :class:`AugAssign` assignment. This node is purely for typing. """ __slots__ = () @add_slots @dataclass(frozen=True) class Semicolon(_BaseOneTokenOp): """ Used by any small statement (any subclass of :class:`BaseSmallStatement` such as :class:`Pass`) as a separator between subsequent nodes contained within a :class:`SimpleStatementLine` or :class:`SimpleStatementSuite`. """ #: Any space that appears directly before this semicolon. whitespace_before: BaseParenthesizableWhitespace = SimpleWhitespace.field("") #: Any space that appears directly after this semicolon. whitespace_after: BaseParenthesizableWhitespace = SimpleWhitespace.field("") def _get_token(self) -> str: return ";" @add_slots @dataclass(frozen=True) class Colon(_BaseOneTokenOp): """ Used by :class:`Slice` as a separator between subsequent expressions, and in :class:`Lambda` to separate arguments and body. """ #: Any space that appears directly before this colon. whitespace_before: BaseParenthesizableWhitespace = SimpleWhitespace.field("") #: Any space that appears directly after this colon. whitespace_after: BaseParenthesizableWhitespace = SimpleWhitespace.field("") def _get_token(self) -> str: return ":" @add_slots @dataclass(frozen=True) class Comma(_BaseOneTokenOp): """ Syntactic trivia used as a separator between subsequent items in various parts of the grammar. Some use-cases are: * :class:`Import` or :class:`ImportFrom`. * :class:`FunctionDef` arguments. * :class:`Tuple`/:class:`List`/:class:`Set`/:class:`Dict` elements. """ #: Any space that appears directly before this comma. whitespace_before: BaseParenthesizableWhitespace = SimpleWhitespace.field("") #: Any space that appears directly after this comma. whitespace_after: BaseParenthesizableWhitespace = SimpleWhitespace.field("") def _get_token(self) -> str: return "," @add_slots @dataclass(frozen=True) class Dot(_BaseOneTokenOp): """ Used by :class:`Attribute` as a separator between subsequent :class:`Name` nodes. """ #: Any space that appears directly before this dot. whitespace_before: BaseParenthesizableWhitespace = SimpleWhitespace.field("") #: Any space that appears directly after this dot. whitespace_after: BaseParenthesizableWhitespace = SimpleWhitespace.field("") def _get_token(self) -> str: return "." @add_slots @dataclass(frozen=True) class ImportStar(BaseLeaf): """ Used by :class:`ImportFrom` to denote a star import instead of a list of importable objects. """ def _codegen_impl(self, state: CodegenState) -> None: state.add_token("*") @add_slots @dataclass(frozen=True) class AssignEqual(_BaseOneTokenOp): """ Used by :class:`AnnAssign` to denote a single equal character when doing an assignment on top of a type annotation. Also used by :class:`Param` and :class:`Arg` to denote assignment of a default value, and by :class:`FormattedStringExpression` to denote usage of self-documenting expressions. """ #: Any space that appears directly before this equal sign. whitespace_before: BaseParenthesizableWhitespace = SimpleWhitespace.field(" ") #: Any space that appears directly after this equal sign. whitespace_after: BaseParenthesizableWhitespace = SimpleWhitespace.field(" ") def _get_token(self) -> str: return "=" @add_slots @dataclass(frozen=True) class Plus(BaseUnaryOp): """ A unary operator that can be used in a :class:`UnaryOperation` expression. """ #: Any space that appears directly after this operator. whitespace_after: BaseParenthesizableWhitespace = SimpleWhitespace.field("") def _get_token(self) -> str: return "+" @add_slots @dataclass(frozen=True) class Minus(BaseUnaryOp): """ A unary operator that can be used in a :class:`UnaryOperation` expression. """ #: Any space that appears directly after this operator. whitespace_after: BaseParenthesizableWhitespace = SimpleWhitespace.field("") def _get_token(self) -> str: return "-" @add_slots @dataclass(frozen=True) class BitInvert(BaseUnaryOp): """ A unary operator that can be used in a :class:`UnaryOperation` expression. """ #: Any space that appears directly after this operator. whitespace_after: BaseParenthesizableWhitespace = SimpleWhitespace.field("") def _get_token(self) -> str: return "~" @add_slots @dataclass(frozen=True) class Not(BaseUnaryOp): """ A unary operator that can be used in a :class:`UnaryOperation` expression. """ #: Any space that appears directly after this operator. whitespace_after: BaseParenthesizableWhitespace = SimpleWhitespace.field(" ") def _get_token(self) -> str: return "not" @add_slots @dataclass(frozen=True) class And(BaseBooleanOp): """ A boolean operator that can be used in a :class:`BooleanOperation` expression. """ #: Any space that appears directly before this operator. whitespace_before: BaseParenthesizableWhitespace = SimpleWhitespace.field(" ") #: Any space that appears directly after this operator. whitespace_after: BaseParenthesizableWhitespace = SimpleWhitespace.field(" ") def _get_token(self) -> str: return "and" @add_slots @dataclass(frozen=True) class Or(BaseBooleanOp): """ A boolean operator that can be used in a :class:`BooleanOperation` expression. """ #: Any space that appears directly before this operator. whitespace_before: BaseParenthesizableWhitespace = SimpleWhitespace.field(" ") #: Any space that appears directly after this operator. whitespace_after: BaseParenthesizableWhitespace = SimpleWhitespace.field(" ") def _get_token(self) -> str: return "or" @add_slots @dataclass(frozen=True) class Add(BaseBinaryOp, _BaseOneTokenOp): """ A binary operator that can be used in a :class:`BinaryOperation` expression. """ #: Any space that appears directly before this operator. whitespace_before: BaseParenthesizableWhitespace = SimpleWhitespace.field(" ") #: Any space that appears directly after this operator. whitespace_after: BaseParenthesizableWhitespace = SimpleWhitespace.field(" ") def _get_token(self) -> str: return "+" @add_slots @dataclass(frozen=True) class Subtract(BaseBinaryOp, _BaseOneTokenOp): """ A binary operator that can be used in a :class:`BinaryOperation` expression. """ #: Any space that appears directly before this operator. whitespace_before: BaseParenthesizableWhitespace = SimpleWhitespace.field(" ") #: Any space that appears directly after this operator. whitespace_after: BaseParenthesizableWhitespace = SimpleWhitespace.field(" ") def _get_token(self) -> str: return "-" @add_slots @dataclass(frozen=True) class Multiply(BaseBinaryOp, _BaseOneTokenOp): """ A binary operator that can be used in a :class:`BinaryOperation` expression. """ #: Any space that appears directly before this operator. whitespace_before: BaseParenthesizableWhitespace = SimpleWhitespace.field(" ") #: Any space that appears directly after this operator. whitespace_after: BaseParenthesizableWhitespace = SimpleWhitespace.field(" ") def _get_token(self) -> str: return "*" @add_slots @dataclass(frozen=True) class Divide(BaseBinaryOp, _BaseOneTokenOp): """ A binary operator that can be used in a :class:`BinaryOperation` expression. """ #: Any space that appears directly before this operator. whitespace_before: BaseParenthesizableWhitespace = SimpleWhitespace.field(" ") #: Any space that appears directly after this operator. whitespace_after: BaseParenthesizableWhitespace = SimpleWhitespace.field(" ") def _get_token(self) -> str: return "/" @add_slots @dataclass(frozen=True) class FloorDivide(BaseBinaryOp, _BaseOneTokenOp): """ A binary operator that can be used in a :class:`BinaryOperation` expression. """ #: Any space that appears directly before this operator. whitespace_before: BaseParenthesizableWhitespace = SimpleWhitespace.field(" ") #: Any space that appears directly after this operator. whitespace_after: BaseParenthesizableWhitespace = SimpleWhitespace.field(" ") def _get_token(self) -> str: return "//" @add_slots @dataclass(frozen=True) class Modulo(BaseBinaryOp, _BaseOneTokenOp): """ A binary operator that can be used in a :class:`BinaryOperation` expression. """ #: Any space that appears directly before this operator. whitespace_before: BaseParenthesizableWhitespace = SimpleWhitespace.field(" ") #: Any space that appears directly after this operator. whitespace_after: BaseParenthesizableWhitespace = SimpleWhitespace.field(" ") def _get_token(self) -> str: return "%" @add_slots @dataclass(frozen=True) class Power(BaseBinaryOp, _BaseOneTokenOp): """ A binary operator that can be used in a :class:`BinaryOperation` expression. """ #: Any space that appears directly before this operator. whitespace_before: BaseParenthesizableWhitespace = SimpleWhitespace.field(" ") #: Any space that appears directly after this operator. whitespace_after: BaseParenthesizableWhitespace = SimpleWhitespace.field(" ") def _get_token(self) -> str: return "**" @add_slots @dataclass(frozen=True) class LeftShift(BaseBinaryOp, _BaseOneTokenOp): """ A binary operator that can be used in a :class:`BinaryOperation` expression. """ #: Any space that appears directly before this operator. whitespace_before: BaseParenthesizableWhitespace = SimpleWhitespace.field(" ") #: Any space that appears directly after this operator. whitespace_after: BaseParenthesizableWhitespace = SimpleWhitespace.field(" ") def _get_token(self) -> str: return "<<" @add_slots @dataclass(frozen=True) class RightShift(BaseBinaryOp, _BaseOneTokenOp): """ A binary operator that can be used in a :class:`BinaryOperation` expression. """ #: Any space that appears directly before this operator. whitespace_before: BaseParenthesizableWhitespace = SimpleWhitespace.field(" ") #: Any space that appears directly after this operator. whitespace_after: BaseParenthesizableWhitespace = SimpleWhitespace.field(" ") def _get_token(self) -> str: return ">>" @add_slots @dataclass(frozen=True) class BitOr(BaseBinaryOp, _BaseOneTokenOp): """ A binary operator that can be used in a :class:`BinaryOperation` expression. """ #: Any space that appears directly before this operator. whitespace_before: BaseParenthesizableWhitespace = SimpleWhitespace.field(" ") #: Any space that appears directly after this operator. whitespace_after: BaseParenthesizableWhitespace = SimpleWhitespace.field(" ") def _get_token(self) -> str: return "|" @add_slots @dataclass(frozen=True) class BitAnd(BaseBinaryOp, _BaseOneTokenOp): """ A binary operator that can be used in a :class:`BinaryOperation` expression. """ #: Any space that appears directly before this operator. whitespace_before: BaseParenthesizableWhitespace = SimpleWhitespace.field(" ") #: Any space that appears directly after this operator. whitespace_after: BaseParenthesizableWhitespace = SimpleWhitespace.field(" ") def _get_token(self) -> str: return "&" @add_slots @dataclass(frozen=True) class BitXor(BaseBinaryOp, _BaseOneTokenOp): """ A binary operator that can be used in a :class:`BinaryOperation` expression. """ #: Any space that appears directly before this operator. whitespace_before: BaseParenthesizableWhitespace = SimpleWhitespace.field(" ") #: Any space that appears directly after this operator. whitespace_after: BaseParenthesizableWhitespace = SimpleWhitespace.field(" ") def _get_token(self) -> str: return "^" @add_slots @dataclass(frozen=True) class MatrixMultiply(BaseBinaryOp, _BaseOneTokenOp): """ A binary operator that can be used in a :class:`BinaryOperation` expression. """ #: Any space that appears directly before this operator. whitespace_before: BaseParenthesizableWhitespace = SimpleWhitespace.field(" ") #: Any space that appears directly after this operator. whitespace_after: BaseParenthesizableWhitespace = SimpleWhitespace.field(" ") def _get_token(self) -> str: return "@" @add_slots @dataclass(frozen=True) class LessThan(BaseCompOp, _BaseOneTokenOp): """ A comparision operator that can be used in a :class:`Comparison` expression. """ #: Any space that appears directly before this operator. whitespace_before: BaseParenthesizableWhitespace = SimpleWhitespace.field(" ") #: Any space that appears directly after this operator. whitespace_after: BaseParenthesizableWhitespace = SimpleWhitespace.field(" ") def _get_token(self) -> str: return "<" @add_slots @dataclass(frozen=True) class GreaterThan(BaseCompOp, _BaseOneTokenOp): """ A comparision operator that can be used in a :class:`Comparison` expression. """ #: Any space that appears directly before this operator. whitespace_before: BaseParenthesizableWhitespace = SimpleWhitespace.field(" ") #: Any space that appears directly after this operator. whitespace_after: BaseParenthesizableWhitespace = SimpleWhitespace.field(" ") def _get_token(self) -> str: return ">" @add_slots @dataclass(frozen=True) class Equal(BaseCompOp, _BaseOneTokenOp): """ A comparision operator that can be used in a :class:`Comparison` expression. """ #: Any space that appears directly before this operator. whitespace_before: BaseParenthesizableWhitespace = SimpleWhitespace.field(" ") #: Any space that appears directly after this operator. whitespace_after: BaseParenthesizableWhitespace = SimpleWhitespace.field(" ") def _get_token(self) -> str: return "==" @add_slots @dataclass(frozen=True) class LessThanEqual(BaseCompOp, _BaseOneTokenOp): """ A comparision operator that can be used in a :class:`Comparison` expression. """ #: Any space that appears directly before this operator. whitespace_before: BaseParenthesizableWhitespace = SimpleWhitespace.field(" ") #: Any space that appears directly after this operator. whitespace_after: BaseParenthesizableWhitespace = SimpleWhitespace.field(" ") def _get_token(self) -> str: return "<=" @add_slots @dataclass(frozen=True) class GreaterThanEqual(BaseCompOp, _BaseOneTokenOp): """ A comparision operator that can be used in a :class:`Comparison` expression. """ #: Any space that appears directly before this operator. whitespace_before: BaseParenthesizableWhitespace = SimpleWhitespace.field(" ") #: Any space that appears directly after this operator. whitespace_after: BaseParenthesizableWhitespace = SimpleWhitespace.field(" ") def _get_token(self) -> str: return ">=" @add_slots @dataclass(frozen=True) class NotEqual(BaseCompOp, _BaseOneTokenOp): """ A comparison operator that can be used in a :class:`Comparison` expression. This node defines a static value for convenience, but in reality due to PEP 401 it can be one of two values, both of which should be a :class:`NotEqual` :class:`Comparison` operator. """ #: The actual text value of this operator. Can be either ``!=`` or ``<>``. value: str = "!=" #: Any space that appears directly before this operator. whitespace_before: BaseParenthesizableWhitespace = SimpleWhitespace.field(" ") #: Any space that appears directly after this operator. whitespace_after: BaseParenthesizableWhitespace = SimpleWhitespace.field(" ") def _validate(self) -> None: if self.value not in ["!=", "<>"]: raise CSTValidationError("Invalid value for NotEqual node.") def _visit_and_replace_children(self, visitor: CSTVisitorT) -> "NotEqual": return self.__class__( whitespace_before=visit_required( self, "whitespace_before", self.whitespace_before, visitor ), value=self.value, whitespace_after=visit_required( self, "whitespace_after", self.whitespace_after, visitor ), ) def _get_token(self) -> str: return self.value @add_slots @dataclass(frozen=True) class In(BaseCompOp, _BaseOneTokenOp): """ A comparision operator that can be used in a :class:`Comparison` expression. """ #: Any space that appears directly before this operator. whitespace_before: BaseParenthesizableWhitespace = SimpleWhitespace.field(" ") #: Any space that appears directly after this operator. whitespace_after: BaseParenthesizableWhitespace = SimpleWhitespace.field(" ") def _get_token(self) -> str: return "in" @add_slots @dataclass(frozen=True) class NotIn(BaseCompOp, _BaseTwoTokenOp): """ A comparision operator that can be used in a :class:`Comparison` expression. This operator spans two tokens that must be separated by at least one space, so there is a third whitespace attribute to represent this. """ #: Any space that appears directly before this operator. whitespace_before: BaseParenthesizableWhitespace = SimpleWhitespace.field(" ") #: Any space that appears between the ``not`` and ``in`` tokens. whitespace_between: BaseParenthesizableWhitespace = SimpleWhitespace.field(" ") #: Any space that appears directly after this operator. whitespace_after: BaseParenthesizableWhitespace = SimpleWhitespace.field(" ") def _get_tokens(self) -> Tuple[str, str]: return ("not", "in") @add_slots @dataclass(frozen=True) class Is(BaseCompOp, _BaseOneTokenOp): """ A comparision operator that can be used in a :class:`Comparison` expression. """ #: Any space that appears directly before this operator. whitespace_before: BaseParenthesizableWhitespace = SimpleWhitespace.field(" ") #: Any space that appears directly after this operator. whitespace_after: BaseParenthesizableWhitespace = SimpleWhitespace.field(" ") def _get_token(self) -> str: return "is" @add_slots @dataclass(frozen=True) class IsNot(BaseCompOp, _BaseTwoTokenOp): """ A comparision operator that can be used in a :class:`Comparison` expression. This operator spans two tokens that must be separated by at least one space, so there is a third whitespace attribute to represent this. """ #: Any space that appears directly before this operator. whitespace_before: BaseParenthesizableWhitespace = SimpleWhitespace.field(" ") #: Any space that appears between the ``is`` and ``not`` tokens. whitespace_between: BaseParenthesizableWhitespace = SimpleWhitespace.field(" ") #: Any space that appears directly after this operator. whitespace_after: BaseParenthesizableWhitespace = SimpleWhitespace.field(" ") def _get_tokens(self) -> Tuple[str, str]: return ("is", "not") @add_slots @dataclass(frozen=True) class AddAssign(BaseAugOp, _BaseOneTokenOp): """ An augmented assignment operator that can be used in a :class:`AugAssign` statement. """ #: Any space that appears directly before this operator. whitespace_before: BaseParenthesizableWhitespace = SimpleWhitespace.field(" ") #: Any space that appears directly after this operator. whitespace_after: BaseParenthesizableWhitespace = SimpleWhitespace.field(" ") def _get_token(self) -> str: return "+=" @add_slots @dataclass(frozen=True) class SubtractAssign(BaseAugOp, _BaseOneTokenOp): """ An augmented assignment operator that can be used in a :class:`AugAssign` statement. """ #: Any space that appears directly before this operator. whitespace_before: BaseParenthesizableWhitespace = SimpleWhitespace.field(" ") #: Any space that appears directly after this operator. whitespace_after: BaseParenthesizableWhitespace = SimpleWhitespace.field(" ") def _get_token(self) -> str: return "-=" @add_slots @dataclass(frozen=True) class MultiplyAssign(BaseAugOp, _BaseOneTokenOp): """ An augmented assignment operator that can be used in a :class:`AugAssign` statement. """ #: Any space that appears directly before this operator. whitespace_before: BaseParenthesizableWhitespace = SimpleWhitespace.field(" ") #: Any space that appears directly after this operator. whitespace_after: BaseParenthesizableWhitespace = SimpleWhitespace.field(" ") def _get_token(self) -> str: return "*=" @add_slots @dataclass(frozen=True) class MatrixMultiplyAssign(BaseAugOp, _BaseOneTokenOp): """ An augmented assignment operator that can be used in a :class:`AugAssign` statement. """ #: Any space that appears directly before this operator. whitespace_before: BaseParenthesizableWhitespace = SimpleWhitespace.field(" ") #: Any space that appears directly after this operator. whitespace_after: BaseParenthesizableWhitespace = SimpleWhitespace.field(" ") def _get_token(self) -> str: return "@=" @add_slots @dataclass(frozen=True) class DivideAssign(BaseAugOp, _BaseOneTokenOp): """ An augmented assignment operator that can be used in a :class:`AugAssign` statement. """ #: Any space that appears directly before this operator. whitespace_before: BaseParenthesizableWhitespace = SimpleWhitespace.field(" ") #: Any space that appears directly after this operator. whitespace_after: BaseParenthesizableWhitespace = SimpleWhitespace.field(" ") def _get_token(self) -> str: return "/=" @add_slots @dataclass(frozen=True) class ModuloAssign(BaseAugOp, _BaseOneTokenOp): """ An augmented assignment operator that can be used in a :class:`AugAssign` statement. """ #: Any space that appears directly before this operator. whitespace_before: BaseParenthesizableWhitespace = SimpleWhitespace.field(" ") #: Any space that appears directly after this operator. whitespace_after: BaseParenthesizableWhitespace = SimpleWhitespace.field(" ") def _get_token(self) -> str: return "%=" @add_slots @dataclass(frozen=True) class BitAndAssign(BaseAugOp, _BaseOneTokenOp): """ An augmented assignment operator that can be used in a :class:`AugAssign` statement. """ #: Any space that appears directly before this operator. whitespace_before: BaseParenthesizableWhitespace = SimpleWhitespace.field(" ") #: Any space that appears directly after this operator. whitespace_after: BaseParenthesizableWhitespace = SimpleWhitespace.field(" ") def _get_token(self) -> str: return "&=" @add_slots @dataclass(frozen=True) class BitOrAssign(BaseAugOp, _BaseOneTokenOp): """ An augmented assignment operator that can be used in a :class:`AugAssign` statement. """ #: Any space that appears directly before this operator. whitespace_before: BaseParenthesizableWhitespace = SimpleWhitespace.field(" ") #: Any space that appears directly after this operator. whitespace_after: BaseParenthesizableWhitespace = SimpleWhitespace.field(" ") def _get_token(self) -> str: return "|=" @add_slots @dataclass(frozen=True) class BitXorAssign(BaseAugOp, _BaseOneTokenOp): """ An augmented assignment operator that can be used in a :class:`AugAssign` statement. """ #: Any space that appears directly before this operator. whitespace_before: BaseParenthesizableWhitespace = SimpleWhitespace.field(" ") #: Any space that appears directly after this operator. whitespace_after: BaseParenthesizableWhitespace = SimpleWhitespace.field(" ") def _get_token(self) -> str: return "^=" @add_slots @dataclass(frozen=True) class LeftShiftAssign(BaseAugOp, _BaseOneTokenOp): """ An augmented assignment operator that can be used in a :class:`AugAssign` statement. """ #: Any space that appears directly before this operator. whitespace_before: BaseParenthesizableWhitespace = SimpleWhitespace.field(" ") #: Any space that appears directly after this operator. whitespace_after: BaseParenthesizableWhitespace = SimpleWhitespace.field(" ") def _get_token(self) -> str: return "<<=" @add_slots @dataclass(frozen=True) class RightShiftAssign(BaseAugOp, _BaseOneTokenOp): """ An augmented assignment operator that can be used in a :class:`AugAssign` statement. """ #: Any space that appears directly before this operator. whitespace_before: BaseParenthesizableWhitespace = SimpleWhitespace.field(" ") #: Any space that appears directly after this operator. whitespace_after: BaseParenthesizableWhitespace = SimpleWhitespace.field(" ") def _get_token(self) -> str: return ">>=" @add_slots @dataclass(frozen=True) class PowerAssign(BaseAugOp, _BaseOneTokenOp): """ An augmented assignment operator that can be used in a :class:`AugAssign` statement. """ #: Any space that appears directly before this operator. whitespace_before: BaseParenthesizableWhitespace = SimpleWhitespace.field(" ") #: Any space that appears directly after this operator. whitespace_after: BaseParenthesizableWhitespace = SimpleWhitespace.field(" ") def _get_token(self) -> str: return "**=" @add_slots @dataclass(frozen=True) class FloorDivideAssign(BaseAugOp, _BaseOneTokenOp): """ An augmented assignment operator that can be used in a :class:`AugAssign` statement. """ #: Any space that appears directly before this operator. whitespace_before: BaseParenthesizableWhitespace = SimpleWhitespace.field(" ") #: Any space that appears directly after this operator. whitespace_after: BaseParenthesizableWhitespace = SimpleWhitespace.field(" ") def _get_token(self) -> str: return "//=" LibCST-1.2.0/libcst/_nodes/statement.py000066400000000000000000004307761456464173300177350ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import inspect import re from abc import ABC, abstractmethod from dataclasses import dataclass, field from typing import Optional, Pattern, Sequence, Union from libcst._add_slots import add_slots from libcst._maybe_sentinel import MaybeSentinel from libcst._nodes.base import CSTNode, CSTValidationError from libcst._nodes.expression import ( _BaseParenthesizedNode, Annotation, Arg, Asynchronous, Attribute, BaseAssignTargetExpression, BaseDelTargetExpression, BaseExpression, ConcatenatedString, ExpressionPosition, From, LeftCurlyBrace, LeftParen, LeftSquareBracket, List, Name, Parameters, RightCurlyBrace, RightParen, RightSquareBracket, SimpleString, Tuple, ) from libcst._nodes.internal import ( CodegenState, visit_body_sequence, visit_optional, visit_required, visit_sentinel, visit_sequence, ) from libcst._nodes.op import ( AssignEqual, BaseAugOp, BitOr, Colon, Comma, Dot, ImportStar, Semicolon, ) from libcst._nodes.whitespace import ( BaseParenthesizableWhitespace, EmptyLine, ParenthesizedWhitespace, SimpleWhitespace, TrailingWhitespace, ) from libcst._visitors import CSTVisitorT _INDENT_WHITESPACE_RE: Pattern[str] = re.compile(r"[ \f\t]+", re.UNICODE) class BaseSuite(CSTNode, ABC): """ A dummy base-class for both :class:`SimpleStatementSuite` and :class:`IndentedBlock`. This exists to simplify type definitions and isinstance checks. A suite is a group of statements controlled by a clause. A suite can be one or more semicolon-separated simple statements on the same line as the header, following the header’s colon, or it can be one or more indented statements on subsequent lines. -- https://docs.python.org/3/reference/compound_stmts.html """ __slots__ = () body: Union[Sequence["BaseStatement"], Sequence["BaseSmallStatement"]] class BaseStatement(CSTNode, ABC): """ A class that exists to allow for typing to specify that any statement is allowed in a particular location. """ __slots__ = () class BaseSmallStatement(CSTNode, ABC): """ Encapsulates a small statement, like ``del`` or ``pass``, and optionally adds a trailing semicolon. A small statement is always contained inside a :class:`SimpleStatementLine` or :class:`SimpleStatementSuite`. This exists to simplify type definitions and isinstance checks. """ __slots__ = () #: An optional semicolon that appears after a small statement. This is optional #: for the last small statement in a :class:`SimpleStatementLine` or #: :class:`SimpleStatementSuite`, but all other small statements inside a simple #: statement must contain a semicolon to disambiguate multiple small statements #: on the same line. semicolon: Union[Semicolon, MaybeSentinel] = MaybeSentinel.DEFAULT @abstractmethod def _codegen_impl( self, state: CodegenState, default_semicolon: bool = False ) -> None: ... @add_slots @dataclass(frozen=True) class Del(BaseSmallStatement): """ Represents a ``del`` statement. ``del`` is always followed by a target. """ #: The target expression will be deleted. This can be a name, a tuple, #: an item of a list, an item of a dictionary, or an attribute. target: BaseDelTargetExpression #: The whitespace after the ``del`` keyword. whitespace_after_del: SimpleWhitespace = SimpleWhitespace.field(" ") #: Optional semicolon when this is used in a statement line. This semicolon #: owns the whitespace on both sides of it when it is used. semicolon: Union[Semicolon, MaybeSentinel] = MaybeSentinel.DEFAULT def _validate(self) -> None: if ( self.whitespace_after_del.empty and not self.target._safe_to_use_with_word_operator( ExpressionPosition.RIGHT ) ): raise CSTValidationError("Must have at least one space after 'del'.") def _visit_and_replace_children(self, visitor: CSTVisitorT) -> "Del": return Del( target=visit_required(self, "target", self.target, visitor), whitespace_after_del=visit_required( self, "whitespace_after_del", self.whitespace_after_del, visitor ), semicolon=visit_sentinel(self, "semicolon", self.semicolon, visitor), ) def _codegen_impl( self, state: CodegenState, default_semicolon: bool = False ) -> None: with state.record_syntactic_position(self): state.add_token("del") self.whitespace_after_del._codegen(state) self.target._codegen(state) semicolon = self.semicolon if isinstance(semicolon, MaybeSentinel): if default_semicolon: state.add_token("; ") elif isinstance(semicolon, Semicolon): semicolon._codegen(state) @add_slots @dataclass(frozen=True) class Pass(BaseSmallStatement): """ Represents a ``pass`` statement. """ #: Optional semicolon when this is used in a statement line. This semicolon #: owns the whitespace on both sides of it when it is used. semicolon: Union[Semicolon, MaybeSentinel] = MaybeSentinel.DEFAULT def _visit_and_replace_children(self, visitor: CSTVisitorT) -> "Pass": return Pass( semicolon=visit_sentinel(self, "semicolon", self.semicolon, visitor) ) def _codegen_impl( self, state: CodegenState, default_semicolon: bool = False ) -> None: with state.record_syntactic_position(self): state.add_token("pass") semicolon = self.semicolon if isinstance(semicolon, MaybeSentinel): if default_semicolon: state.add_token("; ") elif isinstance(semicolon, Semicolon): semicolon._codegen(state) @add_slots @dataclass(frozen=True) class Break(BaseSmallStatement): """ Represents a ``break`` statement, which is used to break out of a :class:`For` or :class:`While` loop early. """ #: Optional semicolon when this is used in a statement line. This semicolon #: owns the whitespace on both sides of it when it is used. semicolon: Union[Semicolon, MaybeSentinel] = MaybeSentinel.DEFAULT def _visit_and_replace_children(self, visitor: CSTVisitorT) -> "Break": return Break( semicolon=visit_sentinel(self, "semicolon", self.semicolon, visitor) ) def _codegen_impl( self, state: CodegenState, default_semicolon: bool = False ) -> None: with state.record_syntactic_position(self): state.add_token("break") semicolon = self.semicolon if isinstance(semicolon, MaybeSentinel): if default_semicolon: state.add_token("; ") elif isinstance(semicolon, Semicolon): semicolon._codegen(state) @add_slots @dataclass(frozen=True) class Continue(BaseSmallStatement): """ Represents a ``continue`` statement, which is used to skip to the next iteration in a :class:`For` or :class:`While` loop. """ #: Optional semicolon when this is used in a statement line. This semicolon #: owns the whitespace on both sides of it when it is used. semicolon: Union[Semicolon, MaybeSentinel] = MaybeSentinel.DEFAULT def _visit_and_replace_children(self, visitor: CSTVisitorT) -> "Continue": return Continue( semicolon=visit_sentinel(self, "semicolon", self.semicolon, visitor) ) def _codegen_impl( self, state: CodegenState, default_semicolon: bool = False ) -> None: with state.record_syntactic_position(self): state.add_token("continue") semicolon = self.semicolon if isinstance(semicolon, MaybeSentinel): if default_semicolon: state.add_token("; ") elif isinstance(semicolon, Semicolon): semicolon._codegen(state) @add_slots @dataclass(frozen=True) class Return(BaseSmallStatement): """ Represents a ``return`` or a ``return x`` statement. """ #: The optional expression that will be evaluated and returned. value: Optional[BaseExpression] = None #: Optional whitespace after the ``return`` keyword before the optional #: value expression. whitespace_after_return: Union[ SimpleWhitespace, MaybeSentinel ] = MaybeSentinel.DEFAULT #: Optional semicolon when this is used in a statement line. This semicolon #: owns the whitespace on both sides of it when it is used. semicolon: Union[Semicolon, MaybeSentinel] = MaybeSentinel.DEFAULT def _validate(self) -> None: value = self.value if value is not None: whitespace_after_return = self.whitespace_after_return has_no_gap = ( not isinstance(whitespace_after_return, MaybeSentinel) and whitespace_after_return.empty ) if has_no_gap and not value._safe_to_use_with_word_operator( ExpressionPosition.RIGHT ): raise CSTValidationError("Must have at least one space after 'return'.") def _visit_and_replace_children(self, visitor: CSTVisitorT) -> "Return": return Return( whitespace_after_return=visit_sentinel( self, "whitespace_after_return", self.whitespace_after_return, visitor ), value=visit_optional(self, "value", self.value, visitor), semicolon=visit_sentinel(self, "semicolon", self.semicolon, visitor), ) def _codegen_impl( self, state: CodegenState, default_semicolon: bool = False ) -> None: with state.record_syntactic_position(self): state.add_token("return") whitespace_after_return = self.whitespace_after_return value = self.value if isinstance(whitespace_after_return, MaybeSentinel): if value is not None: state.add_token(" ") else: whitespace_after_return._codegen(state) if value is not None: value._codegen(state) semicolon = self.semicolon if isinstance(semicolon, MaybeSentinel): if default_semicolon: state.add_token("; ") elif isinstance(semicolon, Semicolon): semicolon._codegen(state) @add_slots @dataclass(frozen=True) class Expr(BaseSmallStatement): """ An expression used as a statement, where the result is unused and unassigned. The most common place you will find this is in function calls where the return value is unneeded. """ #: The expression itself. Python will evaluate the expression but not assign #: the result anywhere. value: BaseExpression #: Optional semicolon when this is used in a statement line. This semicolon #: owns the whitespace on both sides of it when it is used. semicolon: Union[Semicolon, MaybeSentinel] = MaybeSentinel.DEFAULT def _visit_and_replace_children(self, visitor: CSTVisitorT) -> "Expr": return Expr( value=visit_required(self, "value", self.value, visitor), semicolon=visit_sentinel(self, "semicolon", self.semicolon, visitor), ) def _codegen_impl( self, state: CodegenState, default_semicolon: bool = False ) -> None: with state.record_syntactic_position(self): self.value._codegen(state) semicolon = self.semicolon if isinstance(semicolon, MaybeSentinel): if default_semicolon: state.add_token("; ") elif isinstance(semicolon, Semicolon): semicolon._codegen(state) class _BaseSimpleStatement(CSTNode, ABC): """ A simple statement is a series of small statements joined together by semicolons. simple_stmt: small_stmt (';' small_stmt)* [';'] NEWLINE Whitespace between each small statement is owned by the small statements themselves. It can be found on the required semicolon that will be attached to each non-terminal small statement. """ __slots__ = () #: Sequence of small statements. All but the last statement are required to have #: a semicolon. body: Sequence[BaseSmallStatement] #: Any trailing comment and the final ``NEWLINE``, which is part of small statement's #: grammar. trailing_whitespace: TrailingWhitespace def _validate(self) -> None: body = self.body for small_stmt in body[:-1]: if small_stmt.semicolon is None: raise CSTValidationError( "All but the last SmallStatement in a SimpleStatementLine or " + "SimpleStatementSuite must have a trailing semicolon. Otherwise, " + "there's no way to syntatically disambiguate each SmallStatement " + "on the same line." ) def _codegen_impl(self, state: CodegenState) -> None: body = self.body if body: laststmt = len(body) - 1 with state.record_syntactic_position(self, end_node=body[laststmt]): for idx, stmt in enumerate(body): stmt._codegen(state, default_semicolon=(idx != laststmt)) else: # Empty simple statement blocks are not syntactically valid in Python # unless they contain a 'pass' statement, so add one here. with state.record_syntactic_position(self): state.add_token("pass") self.trailing_whitespace._codegen(state) @add_slots @dataclass(frozen=True) class SimpleStatementLine(_BaseSimpleStatement, BaseStatement): """ A simple statement that's part of an IndentedBlock or Module. A simple statement is a series of small statements joined together by semicolons. This isn't differentiated from a :class:`SimpleStatementSuite` in the grammar, but because a :class:`SimpleStatementLine` can own additional whitespace that a :class:`SimpleStatementSuite` doesn't have, we're differentiating it in the CST. """ #: Sequence of small statements. All but the last statement are required to have #: a semicolon. body: Sequence[BaseSmallStatement] #: Sequence of empty lines appearing before this simple statement line. leading_lines: Sequence[EmptyLine] = () #: Any optional trailing comment and the final ``NEWLINE`` at the end of the line. trailing_whitespace: TrailingWhitespace = TrailingWhitespace.field() def _visit_and_replace_children( self, visitor: CSTVisitorT ) -> "SimpleStatementLine": return SimpleStatementLine( leading_lines=visit_sequence( self, "leading_lines", self.leading_lines, visitor ), body=visit_sequence(self, "body", self.body, visitor), trailing_whitespace=visit_required( self, "trailing_whitespace", self.trailing_whitespace, visitor ), ) def _is_removable(self) -> bool: # If we have an empty body, we are removable since we don't represent # anything concrete. return not self.body def _codegen_impl(self, state: CodegenState) -> None: for ll in self.leading_lines: ll._codegen(state) state.add_indent_tokens() _BaseSimpleStatement._codegen_impl(self, state) @add_slots @dataclass(frozen=True) class SimpleStatementSuite(_BaseSimpleStatement, BaseSuite): """ A simple statement that's used as a suite. A simple statement is a series of small statements joined together by semicolons. A suite is the thing that follows the colon in a compound statement. .. code-block:: if test: This isn't differentiated from a :class:`SimpleStatementLine` in the grammar, but because the two classes need to track different whitespace, we're differentiating it in the CST. """ #: Sequence of small statements. All but the last statement are required to have #: a semicolon. body: Sequence[BaseSmallStatement] #: The whitespace between the colon in the parent statement and the body. leading_whitespace: SimpleWhitespace = SimpleWhitespace.field(" ") #: Any optional trailing comment and the final ``NEWLINE`` at the end of the line. trailing_whitespace: TrailingWhitespace = TrailingWhitespace.field() def _visit_and_replace_children( self, visitor: CSTVisitorT ) -> "SimpleStatementSuite": return SimpleStatementSuite( leading_whitespace=visit_required( self, "leading_whitespace", self.leading_whitespace, visitor ), body=visit_sequence(self, "body", self.body, visitor), trailing_whitespace=visit_required( self, "trailing_whitespace", self.trailing_whitespace, visitor ), ) def _codegen_impl(self, state: CodegenState) -> None: self.leading_whitespace._codegen(state) _BaseSimpleStatement._codegen_impl(self, state) @add_slots @dataclass(frozen=True) class Else(CSTNode): """ An ``else`` clause that appears optionally after an :class:`If`, :class:`While`, :class:`Try`, or :class:`For` statement. This node does not match ``elif`` clauses in :class:`If` statements. It also does not match the required ``else`` clause in an :class:`IfExp` expression (``a = if b else c``). """ #: The body of else clause. body: BaseSuite #: Sequence of empty lines appearing before this compound statement line. leading_lines: Sequence[EmptyLine] = () #: The whitespace appearing after the ``else`` keyword but before the colon. whitespace_before_colon: SimpleWhitespace = SimpleWhitespace.field("") def _visit_and_replace_children(self, visitor: CSTVisitorT) -> "Else": return Else( leading_lines=visit_sequence( self, "leading_lines", self.leading_lines, visitor ), whitespace_before_colon=visit_required( self, "whitespace_before_colon", self.whitespace_before_colon, visitor ), body=visit_required(self, "body", self.body, visitor), ) def _codegen_impl(self, state: CodegenState) -> None: for ll in self.leading_lines: ll._codegen(state) state.add_indent_tokens() with state.record_syntactic_position(self, end_node=self.body): state.add_token("else") self.whitespace_before_colon._codegen(state) state.add_token(":") self.body._codegen(state) class BaseCompoundStatement(BaseStatement, ABC): """ Encapsulates a compound statement, like ``if True: pass`` or ``while True: pass``. This exists to simplify type definitions and isinstance checks. Compound statements contain (groups of) other statements; they affect or control the execution of those other statements in some way. In general, compound statements span multiple lines, although in simple incarnations a whole compound statement may be contained in one line. -- https://docs.python.org/3/reference/compound_stmts.html """ __slots__ = () #: The body of this compound statement. body: BaseSuite #: Any empty lines or comments appearing before this statement. leading_lines: Sequence[EmptyLine] @add_slots @dataclass(frozen=True) class If(BaseCompoundStatement): """ An ``if`` statement. ``test`` holds a single test expression. ``elif`` clauses don’t have a special representation in the AST, but rather appear as extra :class:`If` nodes within the ``orelse`` section of the previous one. """ #: The expression that, when evaluated, should give us a truthy/falsey value. test: BaseExpression # TODO: should be a test_nocond #: The body of this compound statement. body: BaseSuite #: An optional ``elif`` or ``else`` clause. :class:`If` signifies an ``elif`` block. #: :class:`Else` signifies an ``else`` block. ``None`` signifies no ``else`` or #:``elif`` block. orelse: Union["If", Else, None] = None #: Sequence of empty lines appearing before this compound statement line. leading_lines: Sequence[EmptyLine] = () #: The whitespace appearing after the ``if`` keyword but before the test expression. whitespace_before_test: SimpleWhitespace = SimpleWhitespace.field(" ") #: The whitespace appearing after the test expression but before the colon. whitespace_after_test: SimpleWhitespace = SimpleWhitespace.field("") # TODO: _validate def _visit_and_replace_children(self, visitor: CSTVisitorT) -> "If": return If( leading_lines=visit_sequence( self, "leading_lines", self.leading_lines, visitor ), whitespace_before_test=visit_required( self, "whitespace_before_test", self.whitespace_before_test, visitor ), test=visit_required(self, "test", self.test, visitor), whitespace_after_test=visit_required( self, "whitespace_after_test", self.whitespace_after_test, visitor ), body=visit_required(self, "body", self.body, visitor), orelse=visit_optional(self, "orelse", self.orelse, visitor), ) def _codegen_impl(self, state: CodegenState, is_elif: bool = False) -> None: for ll in self.leading_lines: ll._codegen(state) state.add_indent_tokens() end_node = self.body if self.orelse is None else self.orelse with state.record_syntactic_position(self, end_node=end_node): state.add_token("elif" if is_elif else "if") self.whitespace_before_test._codegen(state) self.test._codegen(state) self.whitespace_after_test._codegen(state) state.add_token(":") self.body._codegen(state) orelse = self.orelse if orelse is not None: if isinstance(orelse, If): # special-case elif orelse._codegen(state, is_elif=True) else: # is an Else clause orelse._codegen(state) @add_slots @dataclass(frozen=True) class IndentedBlock(BaseSuite): """ Represents a block of statements beginning with an ``INDENT`` token and ending in a ``DEDENT`` token. Used as the body of compound statements, such as an if statement's body. A common alternative to an :class:`IndentedBlock` is a :class:`SimpleStatementSuite`, which can also be used as a :class:`BaseSuite`, meaning that it can be used as the body of many compound statements. An :class:`IndentedBlock` always occurs after a colon in a :class:`BaseCompoundStatement`, so it owns the trailing whitespace for the compound statement's clause. .. code-block:: if test: # IndentedBlock's header body """ #: Sequence of statements belonging to this indented block. body: Sequence[BaseStatement] #: Any optional trailing comment and the final ``NEWLINE`` at the end of the line. header: TrailingWhitespace = TrailingWhitespace.field() #: A string represents a specific indentation. A ``None`` value uses the modules's #: default indentation. This is included because indentation is allowed to be #: inconsistent across a file, just not ambiguously. indent: Optional[str] = None #: Any trailing comments or lines after the dedent that are owned by this indented #: block. Statements own preceeding and same-line trailing comments, but not #: trailing lines, so it falls on :class:`IndentedBlock` to own it. In the case #: that a statement follows an :class:`IndentedBlock`, that statement will own the #: comments and lines that are at the same indent as the statement, and this #: :class:`IndentedBlock` will own the comments and lines that are indented further. footer: Sequence[EmptyLine] = () def _validate(self) -> None: indent = self.indent if indent is not None: if len(indent) == 0: raise CSTValidationError( "An indented block must have a non-zero width indent." ) if _INDENT_WHITESPACE_RE.fullmatch(indent) is None: raise CSTValidationError( "An indent must be composed of only whitespace characters." ) def _visit_and_replace_children(self, visitor: CSTVisitorT) -> "IndentedBlock": return IndentedBlock( header=visit_required(self, "header", self.header, visitor), indent=self.indent, body=visit_body_sequence(self, "body", self.body, visitor), footer=visit_sequence(self, "footer", self.footer, visitor), ) def _codegen_impl(self, state: CodegenState) -> None: self.header._codegen(state) indent = self.indent state.increase_indent(state.default_indent if indent is None else indent) if self.body: with state.record_syntactic_position( self, start_node=self.body[0], end_node=self.body[-1] ): for stmt in self.body: # IndentedBlock is responsible for adjusting the current indentation level, # but its children are responsible for actually adding that indentation to # the token list. stmt._codegen(state) else: # Empty indented blocks are not syntactically valid in Python unless # they contain a 'pass' statement, so add one here. state.add_indent_tokens() with state.record_syntactic_position(self): state.add_token("pass") state.add_token(state.default_newline) for f in self.footer: f._codegen(state) state.decrease_indent() @add_slots @dataclass(frozen=True) class AsName(CSTNode): """ An ``as name`` clause inside an :class:`ExceptHandler`, :class:`ImportAlias` or :class:`WithItem` node. """ #: Identifier that the parent node will be aliased to. name: Union[Name, Tuple, List] #: Whitespace between the parent node and the ``as`` keyword. whitespace_before_as: BaseParenthesizableWhitespace = SimpleWhitespace.field(" ") #: Whitespace between the ``as`` keyword and the name. whitespace_after_as: BaseParenthesizableWhitespace = SimpleWhitespace.field(" ") def _validate(self) -> None: if ( self.whitespace_after_as.empty and not self.name._safe_to_use_with_word_operator(ExpressionPosition.RIGHT) ): raise CSTValidationError( "There must be at least one space between 'as' and name." ) def _visit_and_replace_children(self, visitor: CSTVisitorT) -> "AsName": return AsName( whitespace_before_as=visit_required( self, "whitespace_before_as", self.whitespace_before_as, visitor ), name=visit_required(self, "name", self.name, visitor), whitespace_after_as=visit_required( self, "whitespace_after_as", self.whitespace_after_as, visitor ), ) def _codegen_impl(self, state: CodegenState) -> None: self.whitespace_before_as._codegen(state) state.add_token("as") self.whitespace_after_as._codegen(state) self.name._codegen(state) @add_slots @dataclass(frozen=True) class ExceptHandler(CSTNode): """ An ``except`` clause that appears optionally after a :class:`Try` statement. """ #: The body of the except. body: BaseSuite #: The type of exception this catches. Can be a tuple in some cases, #: or ``None`` if the code is catching all exceptions. type: Optional[BaseExpression] = None #: The optional name that a caught exception is assigned to. name: Optional[AsName] = None #: Sequence of empty lines appearing before this compound statement line. leading_lines: Sequence[EmptyLine] = () #: The whitespace between the ``except`` keyword and the type attribute. whitespace_after_except: SimpleWhitespace = SimpleWhitespace.field(" ") #: The whitespace after any type or name node (whichever comes last) and #: the colon. whitespace_before_colon: SimpleWhitespace = SimpleWhitespace.field("") def _validate(self) -> None: name = self.name if self.type is None and name is not None: raise CSTValidationError("Cannot have a name for an empty type.") if name is not None and not isinstance(name.name, Name): raise CSTValidationError( "Must use a Name node for AsName name inside ExceptHandler." ) type_ = self.type if type_ is not None and self.whitespace_after_except.empty: # Space is only required when the first char in `type` could start # an identifier. In the most common cases, we want to allow # grouping or tuple parens. if isinstance(type_, Name) and not type_.lpar: raise CSTValidationError( "Must have at least one space after except when ExceptHandler has a type." ) name = self.name if ( type_ is not None and name is not None and name.whitespace_before_as.empty and not type_._safe_to_use_with_word_operator(ExpressionPosition.LEFT) ): raise CSTValidationError( "Must have at least one space before as keyword in an except handler." ) def _visit_and_replace_children(self, visitor: CSTVisitorT) -> "ExceptHandler": return ExceptHandler( leading_lines=visit_sequence( self, "leading_lines", self.leading_lines, visitor ), whitespace_after_except=visit_required( self, "whitespace_after_except", self.whitespace_after_except, visitor ), type=visit_optional(self, "type", self.type, visitor), name=visit_optional(self, "name", self.name, visitor), whitespace_before_colon=visit_required( self, "whitespace_before_colon", self.whitespace_before_colon, visitor ), body=visit_required(self, "body", self.body, visitor), ) def _codegen_impl(self, state: CodegenState) -> None: for ll in self.leading_lines: ll._codegen(state) state.add_indent_tokens() with state.record_syntactic_position(self, end_node=self.body): state.add_token("except") self.whitespace_after_except._codegen(state) typenode = self.type if typenode is not None: typenode._codegen(state) namenode = self.name if namenode is not None: namenode._codegen(state) self.whitespace_before_colon._codegen(state) state.add_token(":") self.body._codegen(state) @add_slots @dataclass(frozen=True) class ExceptStarHandler(CSTNode): """ An ``except*`` clause that appears after a :class:`TryStar` statement. """ #: The body of the except. body: BaseSuite #: The type of exception this catches. Can be a tuple in some cases. type: BaseExpression #: The optional name that a caught exception is assigned to. name: Optional[AsName] = None #: Sequence of empty lines appearing before this compound statement line. leading_lines: Sequence[EmptyLine] = () #: The whitespace between the ``except`` keyword and the star. whitespace_after_except: SimpleWhitespace = SimpleWhitespace.field("") #: The whitespace between the star and the type. whitespace_after_star: SimpleWhitespace = SimpleWhitespace.field(" ") #: The whitespace after any type or name node (whichever comes last) and #: the colon. whitespace_before_colon: SimpleWhitespace = SimpleWhitespace.field("") def _validate(self) -> None: name = self.name if name is not None and not isinstance(name.name, Name): raise CSTValidationError( "Must use a Name node for AsName name inside ExceptHandler." ) def _visit_and_replace_children(self, visitor: CSTVisitorT) -> "ExceptStarHandler": return ExceptStarHandler( leading_lines=visit_sequence( self, "leading_lines", self.leading_lines, visitor ), whitespace_after_except=visit_required( self, "whitespace_after_except", self.whitespace_after_except, visitor ), whitespace_after_star=visit_required( self, "whitespace_after_star", self.whitespace_after_star, visitor ), type=visit_required(self, "type", self.type, visitor), name=visit_optional(self, "name", self.name, visitor), whitespace_before_colon=visit_required( self, "whitespace_before_colon", self.whitespace_before_colon, visitor ), body=visit_required(self, "body", self.body, visitor), ) def _codegen_impl(self, state: CodegenState) -> None: for ll in self.leading_lines: ll._codegen(state) state.add_indent_tokens() with state.record_syntactic_position(self, end_node=self.body): state.add_token("except") self.whitespace_after_except._codegen(state) state.add_token("*") self.whitespace_after_star._codegen(state) typenode = self.type if typenode is not None: typenode._codegen(state) namenode = self.name if namenode is not None: namenode._codegen(state) self.whitespace_before_colon._codegen(state) state.add_token(":") self.body._codegen(state) @add_slots @dataclass(frozen=True) class Finally(CSTNode): """ A ``finally`` clause that appears optionally after a :class:`Try` statement. """ #: The body of the except. body: BaseSuite #: Sequence of empty lines appearing before this compound statement line. leading_lines: Sequence[EmptyLine] = () #: The whitespace that appears after the ``finally`` keyword but before #: the colon. whitespace_before_colon: SimpleWhitespace = SimpleWhitespace.field("") def _visit_and_replace_children(self, visitor: CSTVisitorT) -> "Finally": return Finally( leading_lines=visit_sequence( self, "leading_lines", self.leading_lines, visitor ), whitespace_before_colon=visit_required( self, "whitespace_before_colon", self.whitespace_before_colon, visitor ), body=visit_required(self, "body", self.body, visitor), ) def _codegen_impl(self, state: CodegenState) -> None: for ll in self.leading_lines: ll._codegen(state) state.add_indent_tokens() with state.record_syntactic_position(self, end_node=self.body): state.add_token("finally") self.whitespace_before_colon._codegen(state) state.add_token(":") self.body._codegen(state) @add_slots @dataclass(frozen=True) class Try(BaseCompoundStatement): """ A regular ``try`` statement that cannot contain :class:`ExceptStar` blocks. For ``try`` statements that can contain :class:`ExceptStar` blocks, see :class:`TryStar`. """ #: The suite that is wrapped with a try statement. body: BaseSuite #: A list of zero or more exception handlers. handlers: Sequence[ExceptHandler] = () #: An optional else case. orelse: Optional[Else] = None #: An optional finally case. finalbody: Optional[Finally] = None #: Sequence of empty lines appearing before this compound statement line. leading_lines: Sequence[EmptyLine] = () #: The whitespace that appears after the ``try`` keyword but before #: the colon. whitespace_before_colon: SimpleWhitespace = SimpleWhitespace.field("") def _validate(self) -> None: if len(self.handlers) == 0 and self.finalbody is None: raise CSTValidationError( "A Try statement must have at least one ExceptHandler or Finally" ) if len(self.handlers) == 0 and self.orelse is not None: raise CSTValidationError( "A Try statement must have at least one ExceptHandler in order " + "to have an Else." ) # Check bare excepts are always at the last position if any(handler.type is None for handler in self.handlers[:-1]): raise CSTValidationError("The bare except: handler must be the last one.") def _visit_and_replace_children(self, visitor: CSTVisitorT) -> "Try": return Try( leading_lines=visit_sequence( self, "leading_lines", self.leading_lines, visitor ), whitespace_before_colon=visit_required( self, "whitespace_before_colon", self.whitespace_before_colon, visitor ), body=visit_required(self, "body", self.body, visitor), handlers=visit_sequence(self, "handlers", self.handlers, visitor), orelse=visit_optional(self, "orelse", self.orelse, visitor), finalbody=visit_optional(self, "finalbody", self.finalbody, visitor), ) def _codegen_impl(self, state: CodegenState) -> None: for ll in self.leading_lines: ll._codegen(state) state.add_indent_tokens() end_node = self.body if len(self.handlers) > 0: end_node = self.handlers[-1] orelse = self.orelse end_node = end_node if orelse is None else orelse finalbody = self.finalbody end_node = end_node if finalbody is None else finalbody with state.record_syntactic_position(self, end_node=end_node): state.add_token("try") self.whitespace_before_colon._codegen(state) state.add_token(":") self.body._codegen(state) for handler in self.handlers: handler._codegen(state) if orelse is not None: orelse._codegen(state) if finalbody is not None: finalbody._codegen(state) @add_slots @dataclass(frozen=True) class TryStar(BaseCompoundStatement): """ A ``try`` statement with ``except*`` blocks. """ #: The suite that is wrapped with a try statement. body: BaseSuite #: A list of one or more exception handlers. handlers: Sequence[ExceptStarHandler] #: An optional else case. orelse: Optional[Else] = None #: An optional finally case. finalbody: Optional[Finally] = None #: Sequence of empty lines appearing before this compound statement line. leading_lines: Sequence[EmptyLine] = () #: The whitespace that appears after the ``try`` keyword but before #: the colon. whitespace_before_colon: SimpleWhitespace = SimpleWhitespace.field("") def _validate(self) -> None: if len(self.handlers) == 0: raise CSTValidationError( "A TryStar statement must have at least one ExceptHandler" ) def _visit_and_replace_children(self, visitor: CSTVisitorT) -> "TryStar": return TryStar( leading_lines=visit_sequence( self, "leading_lines", self.leading_lines, visitor ), whitespace_before_colon=visit_required( self, "whitespace_before_colon", self.whitespace_before_colon, visitor ), body=visit_required(self, "body", self.body, visitor), handlers=visit_sequence(self, "handlers", self.handlers, visitor), orelse=visit_optional(self, "orelse", self.orelse, visitor), finalbody=visit_optional(self, "finalbody", self.finalbody, visitor), ) def _codegen_impl(self, state: CodegenState) -> None: for ll in self.leading_lines: ll._codegen(state) state.add_indent_tokens() end_node = self.handlers[-1] orelse = self.orelse end_node = end_node if orelse is None else orelse finalbody = self.finalbody end_node = end_node if finalbody is None else finalbody with state.record_syntactic_position(self, end_node=end_node): state.add_token("try") self.whitespace_before_colon._codegen(state) state.add_token(":") self.body._codegen(state) for handler in self.handlers: handler._codegen(state) if orelse is not None: orelse._codegen(state) if finalbody is not None: finalbody._codegen(state) @add_slots @dataclass(frozen=True) class ImportAlias(CSTNode): """ An import, with an optional :class:`AsName`. Used in both :class:`Import` and :class:`ImportFrom` to specify a single import out of another module. """ #: Name or Attribute node representing the object we are importing. name: Union[Attribute, Name] #: Local alias we will import the above object as. asname: Optional[AsName] = None #: Any trailing comma that appears after this import. This is optional for the #: last :class:`ImportAlias` in a :class:`Import` or :class:`ImportFrom`, but all #: other import aliases inside an import must contain a comma to disambiguate #: multiple imports. comma: Union[Comma, MaybeSentinel] = MaybeSentinel.DEFAULT def _validate(self) -> None: asname = self.asname if asname is not None: if not isinstance(asname.name, Name): raise CSTValidationError( "Must use a Name node for AsName name inside ImportAlias." ) if asname.whitespace_before_as.empty: raise CSTValidationError( "Must have at least one space before as keyword in an ImportAlias." ) try: self.evaluated_name except Exception as e: if str(e) == "Logic error!": raise CSTValidationError( "The imported name must be a valid qualified name." ) raise e def _visit_and_replace_children(self, visitor: CSTVisitorT) -> "ImportAlias": return ImportAlias( name=visit_required(self, "name", self.name, visitor), asname=visit_optional(self, "asname", self.asname, visitor), comma=visit_sentinel(self, "comma", self.comma, visitor), ) def _codegen_impl(self, state: CodegenState, default_comma: bool = False) -> None: with state.record_syntactic_position(self): self.name._codegen(state) asname = self.asname if asname is not None: asname._codegen(state) comma = self.comma if comma is MaybeSentinel.DEFAULT and default_comma: state.add_token(", ") elif isinstance(comma, Comma): comma._codegen(state) def _name(self, node: CSTNode) -> str: # Unrolled version of get_full_name_for_node to avoid circular imports. if isinstance(node, Name): return node.value elif isinstance(node, Attribute): return f"{self._name(node.value)}.{node.attr.value}" else: raise Exception("Logic error!") @property def evaluated_name(self) -> str: """ Returns the string name this :class:`ImportAlias` represents. """ return self._name(self.name) @property def evaluated_alias(self) -> Optional[str]: """ Returns the string name for any alias that this :class:`ImportAlias` has. If there is no ``asname`` attribute, this returns ``None``. """ asname = self.asname if asname is not None: return self._name(asname.name) return None @add_slots @dataclass(frozen=True) class Import(BaseSmallStatement): """ An ``import`` statement. """ #: One or more names that are being imported, with optional local aliases. names: Sequence[ImportAlias] #: Optional semicolon when this is used in a statement line. This semicolon #: owns the whitespace on both sides of it when it is used. semicolon: Union[Semicolon, MaybeSentinel] = MaybeSentinel.DEFAULT #: The whitespace that appears after the ``import`` keyword but before #: the first import alias. whitespace_after_import: SimpleWhitespace = SimpleWhitespace.field(" ") def _validate(self) -> None: if len(self.names) == 0: raise CSTValidationError( "An ImportStatement must have at least one ImportAlias" ) if isinstance(self.names[-1].comma, Comma): raise CSTValidationError( "An ImportStatement does not allow a trailing comma" ) if self.whitespace_after_import.empty: raise CSTValidationError("Must have at least one space after import.") def _visit_and_replace_children(self, visitor: CSTVisitorT) -> "Import": return Import( whitespace_after_import=visit_required( self, "whitespace_after_import", self.whitespace_after_import, visitor ), names=visit_sequence(self, "names", self.names, visitor), semicolon=visit_sentinel(self, "semicolon", self.semicolon, visitor), ) def _codegen_impl( self, state: CodegenState, default_semicolon: bool = False ) -> None: with state.record_syntactic_position(self): state.add_token("import") self.whitespace_after_import._codegen(state) lastname = len(self.names) - 1 for i, name in enumerate(self.names): name._codegen(state, default_comma=(i != lastname)) semicolon = self.semicolon if isinstance(semicolon, MaybeSentinel): if default_semicolon: state.add_token("; ") elif isinstance(semicolon, Semicolon): semicolon._codegen(state) @add_slots @dataclass(frozen=True) class ImportFrom(BaseSmallStatement): """ A ``from x import y`` statement. """ #: Name or Attribute node representing the module we're importing from. #: This is optional as :class:`ImportFrom` allows purely relative imports. module: Optional[Union[Attribute, Name]] #: One or more names that are being imported from the specified module, #: with optional local aliases. names: Union[Sequence[ImportAlias], ImportStar] #: Sequence of :class:`Dot` nodes indicating relative import level. relative: Sequence[Dot] = () #: Optional open parenthesis for multi-line import continuation. lpar: Optional[LeftParen] = None #: Optional close parenthesis for multi-line import continuation. rpar: Optional[RightParen] = None #: Optional semicolon when this is used in a statement line. This semicolon #: owns the whitespace on both sides of it when it is used. semicolon: Union[Semicolon, MaybeSentinel] = MaybeSentinel.DEFAULT #: The whitespace that appears after the ``from`` keyword but before #: the module and any relative import dots. whitespace_after_from: SimpleWhitespace = SimpleWhitespace.field(" ") #: The whitespace that appears after the module but before the #: ``import`` keyword. whitespace_before_import: SimpleWhitespace = SimpleWhitespace.field(" ") #: The whitespace that appears after the ``import`` keyword but #: before the first import name or optional left paren. whitespace_after_import: SimpleWhitespace = SimpleWhitespace.field(" ") def _validate_module(self) -> None: if self.module is None and len(self.relative) == 0: raise CSTValidationError( "Must have a module specified if there is no relative import." ) def _validate_names(self) -> None: names = self.names if isinstance(names, Sequence): if len(names) == 0: raise CSTValidationError( "An ImportFrom must have at least one ImportAlias" ) for name in names[:-1]: if name.comma is None: raise CSTValidationError("Non-final ImportAliases require a comma") if self.lpar is not None and self.rpar is None: raise CSTValidationError("Cannot have left paren without right paren.") if self.lpar is None and self.rpar is not None: raise CSTValidationError("Cannot have right paren without left paren.") if isinstance(names, ImportStar): if self.lpar is not None or self.rpar is not None: raise CSTValidationError( "An ImportFrom using ImportStar cannot have parens" ) def _validate_whitespace(self) -> None: if self.whitespace_after_from.empty and not self.relative: raise CSTValidationError("Must have at least one space after from.") if self.whitespace_before_import.empty and not ( self.relative and self.module is None ): raise CSTValidationError("Must have at least one space before import.") if ( self.whitespace_after_import.empty and self.lpar is None and not isinstance(self.names, ImportStar) ): raise CSTValidationError("Must have at least one space after import.") def _validate(self) -> None: self._validate_module() self._validate_names() self._validate_whitespace() def _visit_and_replace_children(self, visitor: CSTVisitorT) -> "ImportFrom": names = self.names return ImportFrom( whitespace_after_from=visit_required( self, "whitespace_after_from", self.whitespace_after_from, visitor ), relative=visit_sequence(self, "relative", self.relative, visitor), module=visit_optional(self, "module", self.module, visitor), whitespace_before_import=visit_required( self, "whitespace_before_import", self.whitespace_before_import, visitor ), whitespace_after_import=visit_required( self, "whitespace_after_import", self.whitespace_after_import, visitor ), lpar=visit_optional(self, "lpar", self.lpar, visitor), names=( visit_required(self, "names", names, visitor) if isinstance(names, ImportStar) else visit_sequence(self, "names", names, visitor) ), rpar=visit_optional(self, "rpar", self.rpar, visitor), semicolon=visit_sentinel(self, "semicolon", self.semicolon, visitor), ) def _codegen_impl( self, state: CodegenState, default_semicolon: bool = False ) -> None: names = self.names end_node = names[-1] if isinstance(names, Sequence) else names end_node = end_node if self.rpar is None else self.rpar with state.record_syntactic_position(self, end_node=end_node): state.add_token("from") self.whitespace_after_from._codegen(state) for dot in self.relative: dot._codegen(state) module = self.module if module is not None: module._codegen(state) self.whitespace_before_import._codegen(state) state.add_token("import") self.whitespace_after_import._codegen(state) lpar = self.lpar if lpar is not None: lpar._codegen(state) if isinstance(names, Sequence): lastname = len(names) - 1 for i, name in enumerate(names): name._codegen(state, default_comma=(i != lastname)) if isinstance(names, ImportStar): names._codegen(state) rpar = self.rpar if rpar is not None: rpar._codegen(state) semicolon = self.semicolon if isinstance(semicolon, MaybeSentinel): if default_semicolon: state.add_token("; ") elif isinstance(semicolon, Semicolon): semicolon._codegen(state) @add_slots @dataclass(frozen=True) class AssignTarget(CSTNode): """ A target for an :class:`Assign`. Owns the equals sign and the whitespace around it. """ #: The target expression being assigned to. target: BaseAssignTargetExpression #: The whitespace appearing before the equals sign. whitespace_before_equal: SimpleWhitespace = SimpleWhitespace.field(" ") #: The whitespace appearing after the equals sign. whitespace_after_equal: SimpleWhitespace = SimpleWhitespace.field(" ") def _visit_and_replace_children(self, visitor: CSTVisitorT) -> "AssignTarget": return AssignTarget( target=visit_required(self, "target", self.target, visitor), whitespace_before_equal=visit_required( self, "whitespace_before_equal", self.whitespace_before_equal, visitor ), whitespace_after_equal=visit_required( self, "whitespace_after_equal", self.whitespace_after_equal, visitor ), ) def _codegen_impl(self, state: CodegenState) -> None: with state.record_syntactic_position(self): self.target._codegen(state) self.whitespace_before_equal._codegen(state) state.add_token("=") self.whitespace_after_equal._codegen(state) @add_slots @dataclass(frozen=True) class Assign(BaseSmallStatement): """ An assignment statement such as ``x = y`` or ``x = y = z``. Unlike :class:`AnnAssign`, this does not allow type annotations but does allow for multiple targets. """ #: One or more targets that are being assigned to. targets: Sequence[AssignTarget] #: The expression being assigned to the targets. value: BaseExpression #: Optional semicolon when this is used in a statement line. This semicolon #: owns the whitespace on both sides of it when it is used. semicolon: Union[Semicolon, MaybeSentinel] = MaybeSentinel.DEFAULT def _validate(self) -> None: if len(self.targets) == 0: raise CSTValidationError( "An Assign statement must have at least one AssignTarget" ) def _visit_and_replace_children(self, visitor: CSTVisitorT) -> "Assign": return Assign( targets=visit_sequence(self, "targets", self.targets, visitor), value=visit_required(self, "value", self.value, visitor), semicolon=visit_sentinel(self, "semicolon", self.semicolon, visitor), ) def _codegen_impl( self, state: CodegenState, default_semicolon: bool = False ) -> None: with state.record_syntactic_position(self): for target in self.targets: target._codegen(state) self.value._codegen(state) semicolon = self.semicolon if isinstance(semicolon, MaybeSentinel): if default_semicolon: state.add_token("; ") elif isinstance(semicolon, Semicolon): semicolon._codegen(state) @add_slots @dataclass(frozen=True) class AnnAssign(BaseSmallStatement): """ An assignment statement such as ``x: int = 5`` or ``x: int``. This only allows for one assignment target unlike :class:`Assign` but it includes a variable annotation. Also unlike :class:`Assign`, the assignment target is optional, as it is possible to annotate an expression without assigning to it. """ #: The target that is being annotated and possibly assigned to. target: BaseAssignTargetExpression #: The annotation for the target. annotation: Annotation #: The optional expression being assigned to the target. value: Optional[BaseExpression] = None #: The equals sign used to denote assignment if there is a value. equal: Union[AssignEqual, MaybeSentinel] = MaybeSentinel.DEFAULT #: Optional semicolon when this is used in a statement line. This semicolon #: owns the whitespace on both sides of it when it is used. semicolon: Union[Semicolon, MaybeSentinel] = MaybeSentinel.DEFAULT def _validate(self) -> None: if self.value is None and isinstance(self.equal, AssignEqual): raise CSTValidationError( "Must have a value when specifying an AssignEqual." ) def _visit_and_replace_children(self, visitor: CSTVisitorT) -> "AnnAssign": return AnnAssign( target=visit_required(self, "target", self.target, visitor), annotation=visit_required(self, "annotation", self.annotation, visitor), equal=visit_sentinel(self, "equal", self.equal, visitor), value=visit_optional(self, "value", self.value, visitor), semicolon=visit_sentinel(self, "semicolon", self.semicolon, visitor), ) def _codegen_impl( self, state: CodegenState, default_semicolon: bool = False ) -> None: with state.record_syntactic_position(self): self.target._codegen(state) self.annotation._codegen(state, default_indicator=":") equal = self.equal if equal is MaybeSentinel.DEFAULT and self.value is not None: state.add_token(" = ") elif isinstance(equal, AssignEqual): equal._codegen(state) value = self.value if value is not None: value._codegen(state) semicolon = self.semicolon if isinstance(semicolon, MaybeSentinel): if default_semicolon: state.add_token("; ") elif isinstance(semicolon, Semicolon): semicolon._codegen(state) @add_slots @dataclass(frozen=True) class AugAssign(BaseSmallStatement): """ An augmented assignment statement, such as ``x += 5``. """ #: Target that is being operated on and assigned to. target: BaseAssignTargetExpression #: The augmented assignment operation being performed. operator: BaseAugOp #: The value used with the above operator to calculate the new assignment. value: BaseExpression #: Optional semicolon when this is used in a statement line. This semicolon #: owns the whitespace on both sides of it when it is used. semicolon: Union[Semicolon, MaybeSentinel] = MaybeSentinel.DEFAULT def _visit_and_replace_children(self, visitor: CSTVisitorT) -> "AugAssign": return AugAssign( target=visit_required(self, "target", self.target, visitor), operator=visit_required(self, "operator", self.operator, visitor), value=visit_required(self, "value", self.value, visitor), semicolon=visit_sentinel(self, "semicolon", self.semicolon, visitor), ) def _codegen_impl( self, state: CodegenState, default_semicolon: bool = False ) -> None: with state.record_syntactic_position(self): self.target._codegen(state) self.operator._codegen(state) self.value._codegen(state) semicolon = self.semicolon if isinstance(semicolon, MaybeSentinel): if default_semicolon: state.add_token("; ") elif isinstance(semicolon, Semicolon): semicolon._codegen(state) @add_slots @dataclass(frozen=True) class Decorator(CSTNode): """ A single decorator that decorates a :class:`FunctionDef` or a :class:`ClassDef`. """ #: The decorator that will return a new function wrapping the parent #: of this decorator. decorator: BaseExpression #: Line comments and empty lines before this decorator. The parent #: :class:`FunctionDef` or :class:`ClassDef` node owns leading lines before #: the first decorator so that if the first decorator is removed, spacing is preserved. leading_lines: Sequence[EmptyLine] = () #: Whitespace after the ``@`` and before the decorator expression itself. whitespace_after_at: SimpleWhitespace = SimpleWhitespace.field("") #: Optional trailing comment and newline following the decorator before the next line. trailing_whitespace: TrailingWhitespace = TrailingWhitespace.field() def _visit_and_replace_children(self, visitor: CSTVisitorT) -> "Decorator": return Decorator( leading_lines=visit_sequence( self, "leading_lines", self.leading_lines, visitor ), whitespace_after_at=visit_required( self, "whitespace_after_at", self.whitespace_after_at, visitor ), decorator=visit_required(self, "decorator", self.decorator, visitor), trailing_whitespace=visit_required( self, "trailing_whitespace", self.trailing_whitespace, visitor ), ) def _codegen_impl(self, state: CodegenState) -> None: for ll in self.leading_lines: ll._codegen(state) state.add_indent_tokens() with state.record_syntactic_position(self): state.add_token("@") self.whitespace_after_at._codegen(state) self.decorator._codegen(state) self.trailing_whitespace._codegen(state) def get_docstring_impl( body: Union[BaseSuite, Sequence[Union[SimpleStatementLine, BaseCompoundStatement]]], clean: bool, ) -> Optional[str]: """ Implementation Reference: - :func:`ast.get_docstring` https://docs.python.org/3/library/ast.html#ast.get_docstring and https://github.com/python/cpython/blob/89aa4694fc8c6d190325ef8ed6ce6a6b8efb3e50/Lib/ast.py#L254 - PEP 257 https://www.python.org/dev/peps/pep-0257/ """ if isinstance(body, Sequence): if body: expr = body[0] else: return None else: expr = body while isinstance(expr, (BaseSuite, SimpleStatementLine)): if len(expr.body) == 0: return None expr = expr.body[0] if not isinstance(expr, Expr): return None val = expr.value if isinstance(val, (SimpleString, ConcatenatedString)): evaluated_value = val.evaluated_value else: return None if isinstance(evaluated_value, bytes): return None if evaluated_value is not None and clean: return inspect.cleandoc(evaluated_value) return evaluated_value @add_slots @dataclass(frozen=True) class FunctionDef(BaseCompoundStatement): """ A function definition. """ #: The function name. name: Name #: The function parameters. Present even if there are no params. params: Parameters #: The function body. body: BaseSuite #: Sequence of decorators applied to this function. Decorators are listed in #: order that they appear in source (top to bottom) as apposed to the order #: that they are applied to the function at runtime. decorators: Sequence[Decorator] = () #: An optional return annotation, if the function is annotated. returns: Optional[Annotation] = None #: Optional async modifier, if this is an async function. asynchronous: Optional[Asynchronous] = None #: Leading empty lines and comments before the first decorator. We #: assume any comments before the first decorator are owned by the #: function definition itself. If there are no decorators, this will #: still contain all of the empty lines and comments before the #: function definition. leading_lines: Sequence[EmptyLine] = () #: Empty lines and comments between the final decorator and the #: :class:`FunctionDef` node. In the case of no decorators, this will be empty. lines_after_decorators: Sequence[EmptyLine] = () #: Whitespace after the ``def`` keyword and before the function name. whitespace_after_def: SimpleWhitespace = SimpleWhitespace.field(" ") #: Whitespace after the function name and before the type parameters or the opening #: parenthesis for the parameters. whitespace_after_name: SimpleWhitespace = SimpleWhitespace.field("") #: Whitespace after the opening parenthesis for the parameters but before #: the first param itself. whitespace_before_params: BaseParenthesizableWhitespace = SimpleWhitespace.field("") #: Whitespace after the closing parenthesis or return annotation and before #: the colon. whitespace_before_colon: SimpleWhitespace = SimpleWhitespace.field("") #: An optional declaration of type parameters. type_parameters: Optional["TypeParameters"] = None #: Whitespace between the type parameters and the opening parenthesis for the #: (non-type) parameters. whitespace_after_type_parameters: SimpleWhitespace = SimpleWhitespace.field("") def _validate(self) -> None: if len(self.name.lpar) > 0 or len(self.name.rpar) > 0: raise CSTValidationError("Cannot have parens around Name in a FunctionDef.") if self.whitespace_after_def.empty: raise CSTValidationError( "There must be at least one space between 'def' and name." ) if ( self.type_parameters is None and not self.whitespace_after_type_parameters.empty ): raise CSTValidationError( "whitespace_after_type_parameters must be empty if there are no type " "parameters in FunctionDef" ) def _visit_and_replace_children(self, visitor: CSTVisitorT) -> "FunctionDef": return FunctionDef( leading_lines=visit_sequence( self, "leading_lines", self.leading_lines, visitor ), decorators=visit_sequence(self, "decorators", self.decorators, visitor), lines_after_decorators=visit_sequence( self, "lines_after_decorators", self.lines_after_decorators, visitor ), asynchronous=visit_optional( self, "asynchronous", self.asynchronous, visitor ), whitespace_after_def=visit_required( self, "whitespace_after_def", self.whitespace_after_def, visitor ), name=visit_required(self, "name", self.name, visitor), whitespace_after_name=visit_required( self, "whitespace_after_name", self.whitespace_after_name, visitor ), type_parameters=visit_optional( self, "type_parameters", self.type_parameters, visitor ), whitespace_after_type_parameters=visit_required( self, "whitespace_after_type_parameters", self.whitespace_after_type_parameters, visitor, ), whitespace_before_params=visit_required( self, "whitespace_before_params", self.whitespace_before_params, visitor ), params=visit_required(self, "params", self.params, visitor), returns=visit_optional(self, "returns", self.returns, visitor), whitespace_before_colon=visit_required( self, "whitespace_before_colon", self.whitespace_before_colon, visitor ), body=visit_required(self, "body", self.body, visitor), ) def _codegen_impl(self, state: CodegenState) -> None: for ll in self.leading_lines: ll._codegen(state) for decorator in self.decorators: decorator._codegen(state) for lad in self.lines_after_decorators: lad._codegen(state) state.add_indent_tokens() with state.record_syntactic_position(self, end_node=self.body): asynchronous = self.asynchronous if asynchronous is not None: asynchronous._codegen(state) state.add_token("def") self.whitespace_after_def._codegen(state) self.name._codegen(state) self.whitespace_after_name._codegen(state) type_params = self.type_parameters if type_params is not None: type_params._codegen(state) self.whitespace_after_type_parameters._codegen(state) state.add_token("(") self.whitespace_before_params._codegen(state) self.params._codegen(state) state.add_token(")") returns = self.returns if returns is not None: returns._codegen(state, default_indicator="->") self.whitespace_before_colon._codegen(state) state.add_token(":") self.body._codegen(state) def get_docstring(self, clean: bool = True) -> Optional[str]: """ When docstring is available, returns a :func:`inspect.cleandoc` cleaned docstring. Otherwise, returns ``None``. """ return get_docstring_impl(self.body, clean) @add_slots @dataclass(frozen=True) class ClassDef(BaseCompoundStatement): """ A class definition. """ #: The class name. name: Name #: The class body. body: BaseSuite #: Sequence of base classes this class inherits from. bases: Sequence[Arg] = () #: Sequence of keywords, such as "metaclass". keywords: Sequence[Arg] = () #: Sequence of decorators applied to this class. decorators: Sequence[Decorator] = () #: Optional open parenthesis used when there are bases or keywords. lpar: Union[LeftParen, MaybeSentinel] = MaybeSentinel.DEFAULT #: Optional close parenthesis used when there are bases or keywords. rpar: Union[RightParen, MaybeSentinel] = MaybeSentinel.DEFAULT #: Leading empty lines and comments before the first decorator. We #: assume any comments before the first decorator are owned by the #: class definition itself. If there are no decorators, this will #: still contain all of the empty lines and comments before the #: class definition. leading_lines: Sequence[EmptyLine] = () #: Empty lines and comments between the final decorator and the #: :class:`ClassDef` node. In the case of no decorators, this will be empty. lines_after_decorators: Sequence[EmptyLine] = () #: Whitespace after the ``class`` keyword and before the class name. whitespace_after_class: SimpleWhitespace = SimpleWhitespace.field(" ") #: Whitespace after the class name and before the type parameters or the opening #: parenthesis for the bases and keywords. whitespace_after_name: SimpleWhitespace = SimpleWhitespace.field("") #: Whitespace after the closing parenthesis or class name and before #: the colon. whitespace_before_colon: SimpleWhitespace = SimpleWhitespace.field("") #: An optional declaration of type parameters. type_parameters: Optional["TypeParameters"] = None #: Whitespace between type parameters and opening parenthesis for the bases and #: keywords. whitespace_after_type_parameters: SimpleWhitespace = SimpleWhitespace.field("") def _validate_whitespace(self) -> None: if self.whitespace_after_class.empty: raise CSTValidationError( "There must be at least one space between 'class' and name." ) if ( self.type_parameters is None and not self.whitespace_after_type_parameters.empty ): raise CSTValidationError( "whitespace_after_type_parameters must be empty if there are no type" "parameters in a ClassDef" ) def _validate_parens(self) -> None: if len(self.name.lpar) > 0 or len(self.name.rpar) > 0: raise CSTValidationError("Cannot have parens around Name in a ClassDef.") if isinstance(self.lpar, MaybeSentinel) and isinstance(self.rpar, RightParen): raise CSTValidationError( "Do not mix concrete LeftParen/RightParen with MaybeSentinel." ) if isinstance(self.lpar, LeftParen) and isinstance(self.rpar, MaybeSentinel): raise CSTValidationError( "Do not mix concrete LeftParen/RightParen with MaybeSentinel." ) def _validate_args(self) -> None: if any((arg.keyword is not None) for arg in self.bases): raise CSTValidationError("Bases must be arguments without keywords.") if any((arg.keyword is None and arg.star != "**") for arg in self.keywords): raise CSTValidationError( "Keywords must be arguments with keywords or dictionary expansions." ) def _validate(self) -> None: self._validate_whitespace() self._validate_parens() self._validate_args() def _visit_and_replace_children(self, visitor: CSTVisitorT) -> "ClassDef": return ClassDef( leading_lines=visit_sequence( self, "leading_lines", self.leading_lines, visitor ), decorators=visit_sequence(self, "decorators", self.decorators, visitor), lines_after_decorators=visit_sequence( self, "lines_after_decorators", self.lines_after_decorators, visitor ), whitespace_after_class=visit_required( self, "whitespace_after_class", self.whitespace_after_class, visitor ), name=visit_required(self, "name", self.name, visitor), whitespace_after_name=visit_required( self, "whitespace_after_name", self.whitespace_after_name, visitor ), type_parameters=visit_optional( self, "type_parameters", self.type_parameters, visitor ), whitespace_after_type_parameters=visit_required( self, "whitespace_after_type_parameters", self.whitespace_after_type_parameters, visitor, ), lpar=visit_sentinel(self, "lpar", self.lpar, visitor), bases=visit_sequence(self, "bases", self.bases, visitor), keywords=visit_sequence(self, "keywords", self.keywords, visitor), rpar=visit_sentinel(self, "rpar", self.rpar, visitor), whitespace_before_colon=visit_required( self, "whitespace_before_colon", self.whitespace_before_colon, visitor ), body=visit_required(self, "body", self.body, visitor), ) def _codegen_impl(self, state: CodegenState) -> None: # noqa: C901 for ll in self.leading_lines: ll._codegen(state) for decorator in self.decorators: decorator._codegen(state) for lad in self.lines_after_decorators: lad._codegen(state) state.add_indent_tokens() with state.record_syntactic_position(self, end_node=self.body): state.add_token("class") self.whitespace_after_class._codegen(state) self.name._codegen(state) self.whitespace_after_name._codegen(state) type_params = self.type_parameters if type_params is not None: type_params._codegen(state) self.whitespace_after_type_parameters._codegen(state) lpar = self.lpar if isinstance(lpar, MaybeSentinel): if self.bases or self.keywords: state.add_token("(") elif isinstance(lpar, LeftParen): lpar._codegen(state) args = [*self.bases, *self.keywords] last_arg = len(args) - 1 for i, arg in enumerate(args): arg._codegen(state, default_comma=(i != last_arg)) rpar = self.rpar if isinstance(rpar, MaybeSentinel): if self.bases or self.keywords: state.add_token(")") elif isinstance(rpar, RightParen): rpar._codegen(state) self.whitespace_before_colon._codegen(state) state.add_token(":") self.body._codegen(state) def get_docstring(self, clean: bool = True) -> Optional[str]: """ Returns a :func:`inspect.cleandoc` cleaned docstring if the docstring is available, ``None`` otherwise. """ return get_docstring_impl(self.body, clean) @add_slots @dataclass(frozen=True) class WithItem(CSTNode): """ A single context manager in a :class:`With` block, with an optional variable name. """ #: Expression that evaluates to a context manager. item: BaseExpression #: Variable to assign the context manager to, if it is needed in the #: :class:`With` body. asname: Optional[AsName] = None #: This is forbidden for the last :class:`WithItem` in a :class:`With`, but all #: other items inside a with block must contain a comma to separate them. comma: Union[Comma, MaybeSentinel] = MaybeSentinel.DEFAULT def _validate(self) -> None: asname = self.asname if ( asname is not None and asname.whitespace_before_as.empty and not self.item._safe_to_use_with_word_operator(ExpressionPosition.LEFT) ): raise CSTValidationError("Must have at least one space before as keyword.") def _visit_and_replace_children(self, visitor: CSTVisitorT) -> "WithItem": return WithItem( item=visit_required(self, "item", self.item, visitor), asname=visit_optional(self, "asname", self.asname, visitor), comma=visit_sentinel(self, "comma", self.comma, visitor), ) def _codegen_impl(self, state: CodegenState, default_comma: bool = False) -> None: with state.record_syntactic_position(self): self.item._codegen(state) asname = self.asname if asname is not None: asname._codegen(state) comma = self.comma if comma is MaybeSentinel.DEFAULT and default_comma: state.add_token(", ") elif isinstance(comma, Comma): comma._codegen(state) @add_slots @dataclass(frozen=True) class With(BaseCompoundStatement): """ A ``with`` statement. """ #: A sequence of one or more items that evaluate to context managers. items: Sequence[WithItem] #: The suite that is wrapped with this statement. body: BaseSuite #: Optional async modifier if this is an ``async with`` statement. asynchronous: Optional[Asynchronous] = None #: Sequence of empty lines appearing before this with statement. leading_lines: Sequence[EmptyLine] = () #: Optional open parenthesis for multi-line with bindings lpar: Union[LeftParen, MaybeSentinel] = MaybeSentinel.DEFAULT #: Optional close parenthesis for multi-line with bindings rpar: Union[RightParen, MaybeSentinel] = MaybeSentinel.DEFAULT #: Whitespace after the ``with`` keyword and before the first item. whitespace_after_with: SimpleWhitespace = SimpleWhitespace.field(" ") #: Whitespace after the last item and before the colon. whitespace_before_colon: SimpleWhitespace = SimpleWhitespace.field("") def _validate_parens(self) -> None: if isinstance(self.lpar, MaybeSentinel) and isinstance(self.rpar, RightParen): raise CSTValidationError( "Do not mix concrete LeftParen/RightParen with MaybeSentinel." ) if isinstance(self.lpar, LeftParen) and isinstance(self.rpar, MaybeSentinel): raise CSTValidationError( "Do not mix concrete LeftParen/RightParen with MaybeSentinel." ) def _validate(self) -> None: self._validate_parens() if len(self.items) == 0: raise CSTValidationError( "A With statement must have at least one WithItem." ) if ( isinstance(self.rpar, MaybeSentinel) and self.items[-1].comma != MaybeSentinel.DEFAULT ): raise CSTValidationError( "The last WithItem in an unparenthesized With cannot have a trailing comma." ) if self.whitespace_after_with.empty and not ( isinstance(self.lpar, LeftParen) or self.items[0].item._safe_to_use_with_word_operator( ExpressionPosition.RIGHT ) ): raise CSTValidationError("Must have at least one space after with keyword.") def _visit_and_replace_children(self, visitor: CSTVisitorT) -> "With": return With( leading_lines=visit_sequence( self, "leading_lines", self.leading_lines, visitor ), asynchronous=visit_optional( self, "asynchronous", self.asynchronous, visitor ), whitespace_after_with=visit_required( self, "whitespace_after_with", self.whitespace_after_with, visitor ), lpar=visit_sentinel(self, "lpar", self.lpar, visitor), items=visit_sequence(self, "items", self.items, visitor), rpar=visit_sentinel(self, "rpar", self.rpar, visitor), whitespace_before_colon=visit_required( self, "whitespace_before_colon", self.whitespace_before_colon, visitor ), body=visit_required(self, "body", self.body, visitor), ) def _codegen_impl(self, state: CodegenState) -> None: for ll in self.leading_lines: ll._codegen(state) state.add_indent_tokens() needs_paren = False for item in self.items: comma = item.comma if isinstance(comma, Comma): if isinstance( comma.whitespace_after, (EmptyLine, TrailingWhitespace, ParenthesizedWhitespace), ): needs_paren = True break with state.record_syntactic_position(self, end_node=self.body): asynchronous = self.asynchronous if asynchronous is not None: asynchronous._codegen(state) state.add_token("with") self.whitespace_after_with._codegen(state) lpar = self.lpar if isinstance(lpar, LeftParen): lpar._codegen(state) elif needs_paren: state.add_token("(") last_item = len(self.items) - 1 for i, item in enumerate(self.items): item._codegen(state, default_comma=(i != last_item)) rpar = self.rpar if isinstance(rpar, RightParen): rpar._codegen(state) elif needs_paren: state.add_token(")") self.whitespace_before_colon._codegen(state) state.add_token(":") self.body._codegen(state) @add_slots @dataclass(frozen=True) class For(BaseCompoundStatement): """ A ``for target in iter`` statement. """ #: The target of the iterator in the for statement. target: BaseAssignTargetExpression #: The iterable expression we will loop over. iter: BaseExpression #: The suite that is wrapped with this statement. body: BaseSuite #: An optional else case which will be executed if there is no #: :class:`Break` statement encountered while looping. orelse: Optional[Else] = None #: Optional async modifier, if this is an `async for` statement. asynchronous: Optional[Asynchronous] = None #: Sequence of empty lines appearing before this for statement. leading_lines: Sequence[EmptyLine] = () #: Whitespace after the ``for`` keyword and before the target. whitespace_after_for: SimpleWhitespace = SimpleWhitespace.field(" ") #: Whitespace after the target and before the ``in`` keyword. whitespace_before_in: SimpleWhitespace = SimpleWhitespace.field(" ") #: Whitespace after the ``in`` keyword and before the iter. whitespace_after_in: SimpleWhitespace = SimpleWhitespace.field(" ") #: Whitespace after the iter and before the colon. whitespace_before_colon: SimpleWhitespace = SimpleWhitespace.field("") def _validate(self) -> None: if ( self.whitespace_after_for.empty and not self.target._safe_to_use_with_word_operator( ExpressionPosition.RIGHT ) ): raise CSTValidationError( "Must have at least one space after 'for' keyword." ) if ( self.whitespace_before_in.empty and not self.target._safe_to_use_with_word_operator(ExpressionPosition.LEFT) ): raise CSTValidationError( "Must have at least one space before 'in' keyword." ) if ( self.whitespace_after_in.empty and not self.iter._safe_to_use_with_word_operator(ExpressionPosition.RIGHT) ): raise CSTValidationError("Must have at least one space after 'in' keyword.") def _visit_and_replace_children(self, visitor: CSTVisitorT) -> "For": return For( leading_lines=visit_sequence( self, "leading_lines", self.leading_lines, visitor ), asynchronous=visit_optional( self, "asynchronous", self.asynchronous, visitor ), whitespace_after_for=visit_required( self, "whitespace_after_for", self.whitespace_after_for, visitor ), target=visit_required(self, "target", self.target, visitor), whitespace_before_in=visit_required( self, "whitespace_before_in", self.whitespace_before_in, visitor ), whitespace_after_in=visit_required( self, "whitespace_after_in", self.whitespace_after_in, visitor ), iter=visit_required(self, "iter", self.iter, visitor), whitespace_before_colon=visit_required( self, "whitespace_before_colon", self.whitespace_before_colon, visitor ), body=visit_required(self, "body", self.body, visitor), orelse=visit_optional(self, "orelse", self.orelse, visitor), ) def _codegen_impl(self, state: CodegenState) -> None: for ll in self.leading_lines: ll._codegen(state) state.add_indent_tokens() end_node = self.body if self.orelse is None else self.orelse with state.record_syntactic_position(self, end_node=end_node): asynchronous = self.asynchronous if asynchronous is not None: asynchronous._codegen(state) state.add_token("for") self.whitespace_after_for._codegen(state) self.target._codegen(state) self.whitespace_before_in._codegen(state) state.add_token("in") self.whitespace_after_in._codegen(state) self.iter._codegen(state) self.whitespace_before_colon._codegen(state) state.add_token(":") self.body._codegen(state) orelse = self.orelse if orelse is not None: orelse._codegen(state) @add_slots @dataclass(frozen=True) class While(BaseCompoundStatement): """ A ``while`` statement. """ #: The test we will loop against. test: BaseExpression #: The suite that is wrapped with this statement. body: BaseSuite #: An optional else case which will be executed if there is no #: :class:`Break` statement encountered while looping. orelse: Optional[Else] = None #: Sequence of empty lines appearing before this while statement. leading_lines: Sequence[EmptyLine] = () #: Whitespace after the ``while`` keyword and before the test. whitespace_after_while: SimpleWhitespace = SimpleWhitespace.field(" ") #: Whitespace after the test and before the colon. whitespace_before_colon: SimpleWhitespace = SimpleWhitespace.field("") def _validate(self) -> None: if ( self.whitespace_after_while.empty and not self.test._safe_to_use_with_word_operator(ExpressionPosition.RIGHT) ): raise CSTValidationError( "Must have at least one space after 'while' keyword." ) def _visit_and_replace_children(self, visitor: CSTVisitorT) -> "While": return While( leading_lines=visit_sequence( self, "leading_lines", self.leading_lines, visitor ), whitespace_after_while=visit_required( self, "whitespace_after_while", self.whitespace_after_while, visitor ), test=visit_required(self, "test", self.test, visitor), whitespace_before_colon=visit_required( self, "whitespace_before_colon", self.whitespace_before_colon, visitor ), body=visit_required(self, "body", self.body, visitor), orelse=visit_optional(self, "orelse", self.orelse, visitor), ) def _codegen_impl(self, state: CodegenState) -> None: for ll in self.leading_lines: ll._codegen(state) state.add_indent_tokens() end_node = self.body if self.orelse is None else self.orelse with state.record_syntactic_position(self, end_node=end_node): state.add_token("while") self.whitespace_after_while._codegen(state) self.test._codegen(state) self.whitespace_before_colon._codegen(state) state.add_token(":") self.body._codegen(state) orelse = self.orelse if orelse is not None: orelse._codegen(state) @add_slots @dataclass(frozen=True) class Raise(BaseSmallStatement): """ A ``raise exc`` or ``raise exc from cause`` statement. """ #: The exception that we should raise. exc: Optional[BaseExpression] = None #: Optionally, a ``from cause`` clause to allow us to raise an exception #: out of another exception's context. cause: Optional[From] = None #: Any whitespace appearing between the ``raise`` keyword and the exception. whitespace_after_raise: Union[ SimpleWhitespace, MaybeSentinel ] = MaybeSentinel.DEFAULT #: Optional semicolon when this is used in a statement line. This semicolon #: owns the whitespace on both sides of it when it is used. semicolon: Union[Semicolon, MaybeSentinel] = MaybeSentinel.DEFAULT def _validate(self) -> None: # Validate correct construction if self.exc is None and self.cause is not None: raise CSTValidationError( "Must have an 'exc' when specifying 'clause'. on Raise." ) # Validate spacing between "raise" and "exc" exc = self.exc if exc is not None: whitespace_after_raise = self.whitespace_after_raise has_no_gap = ( not isinstance(whitespace_after_raise, MaybeSentinel) and whitespace_after_raise.empty ) if has_no_gap and not exc._safe_to_use_with_word_operator( ExpressionPosition.RIGHT ): raise CSTValidationError("Must have at least one space after 'raise'.") # Validate spacing between "exc" and "from" cause = self.cause if exc is not None and cause is not None: whitespace_before_from = cause.whitespace_before_from has_no_gap = ( not isinstance(whitespace_before_from, MaybeSentinel) and whitespace_before_from.empty ) if has_no_gap and not exc._safe_to_use_with_word_operator( ExpressionPosition.LEFT ): raise CSTValidationError("Must have at least one space before 'from'.") def _visit_and_replace_children(self, visitor: CSTVisitorT) -> "Raise": return Raise( whitespace_after_raise=visit_sentinel( self, "whitespace_after_raise", self.whitespace_after_raise, visitor ), exc=visit_optional(self, "exc", self.exc, visitor), cause=visit_optional(self, "cause", self.cause, visitor), semicolon=visit_sentinel(self, "semicolon", self.semicolon, visitor), ) def _codegen_impl( self, state: CodegenState, default_semicolon: bool = False ) -> None: with state.record_syntactic_position(self): exc = self.exc cause = self.cause state.add_token("raise") whitespace_after_raise = self.whitespace_after_raise if isinstance(whitespace_after_raise, MaybeSentinel): if exc is not None: state.add_token(" ") else: whitespace_after_raise._codegen(state) if exc is not None: exc._codegen(state) if cause is not None: cause._codegen(state, default_space=" ") semicolon = self.semicolon if isinstance(semicolon, MaybeSentinel): if default_semicolon: state.add_token("; ") elif isinstance(semicolon, Semicolon): semicolon._codegen(state) @add_slots @dataclass(frozen=True) class Assert(BaseSmallStatement): """ An assert statement such as ``assert x > 5`` or ``assert x > 5, 'Uh oh!'`` """ #: The test we are going to assert on. test: BaseExpression #: The optional message to display if the test evaluates to a falsey value. msg: Optional[BaseExpression] = None #: A comma separating test and message, if there is a message. comma: Union[Comma, MaybeSentinel] = MaybeSentinel.DEFAULT #: Whitespace appearing after the ``assert`` keyword and before the test. whitespace_after_assert: SimpleWhitespace = SimpleWhitespace.field(" ") #: Optional semicolon when this is used in a statement line. This semicolon #: owns the whitespace on both sides of it when it is used. semicolon: Union[Semicolon, MaybeSentinel] = MaybeSentinel.DEFAULT def _validate(self) -> None: # Validate whitespace if ( self.whitespace_after_assert.empty and not self.test._safe_to_use_with_word_operator(ExpressionPosition.RIGHT) ): raise CSTValidationError("Must have at least one space after 'assert'.") # Validate comma rules if self.msg is None and isinstance(self.comma, Comma): raise CSTValidationError("Cannot have trailing comma after 'test'.") def _visit_and_replace_children(self, visitor: CSTVisitorT) -> "Assert": return Assert( whitespace_after_assert=visit_required( self, "whitespace_after_assert", self.whitespace_after_assert, visitor ), test=visit_required(self, "test", self.test, visitor), comma=visit_sentinel(self, "comma", self.comma, visitor), msg=visit_optional(self, "msg", self.msg, visitor), semicolon=visit_sentinel(self, "semicolon", self.semicolon, visitor), ) def _codegen_impl( self, state: CodegenState, default_semicolon: bool = False ) -> None: with state.record_syntactic_position(self): state.add_token("assert") self.whitespace_after_assert._codegen(state) self.test._codegen(state) comma = self.comma msg = self.msg if isinstance(comma, MaybeSentinel): if msg is not None: state.add_token(", ") else: comma._codegen(state) if msg is not None: msg._codegen(state) semicolon = self.semicolon if isinstance(semicolon, MaybeSentinel): if default_semicolon: state.add_token("; ") elif isinstance(semicolon, Semicolon): semicolon._codegen(state) @add_slots @dataclass(frozen=True) class NameItem(CSTNode): """ A single identifier name inside a :class:`Global` or :class:`Nonlocal` statement. This exists because a list of names in a ``global`` or ``nonlocal`` statement need to be separated by a comma, which ends up owned by the :class:`NameItem` node. """ #: Identifier name. name: Name #: This is forbidden for the last :class:`NameItem` in a #: :class:`Global`/:class:`Nonlocal`, but all other tems inside a ``global`` or #: ``nonlocal`` statement must contain a comma to separate them. comma: Union[Comma, MaybeSentinel] = MaybeSentinel.DEFAULT def _validate(self) -> None: # No parens around names here if len(self.name.lpar) > 0 or len(self.name.rpar) > 0: raise CSTValidationError("Cannot have parens around names in NameItem.") def _visit_and_replace_children(self, visitor: CSTVisitorT) -> "NameItem": return NameItem( name=visit_required(self, "name", self.name, visitor), comma=visit_sentinel(self, "comma", self.comma, visitor), ) def _codegen_impl(self, state: CodegenState, default_comma: bool = False) -> None: with state.record_syntactic_position(self): self.name._codegen(state) comma = self.comma if comma is MaybeSentinel.DEFAULT and default_comma: state.add_token(", ") elif isinstance(comma, Comma): comma._codegen(state) @add_slots @dataclass(frozen=True) class Global(BaseSmallStatement): """ A ``global`` statement. """ #: A list of one or more names. names: Sequence[NameItem] #: Whitespace appearing after the ``global`` keyword and before the first name. whitespace_after_global: SimpleWhitespace = SimpleWhitespace.field(" ") #: Optional semicolon when this is used in a statement line. This semicolon #: owns the whitespace on both sides of it when it is used. semicolon: Union[Semicolon, MaybeSentinel] = MaybeSentinel.DEFAULT def _validate(self) -> None: if len(self.names) == 0: raise CSTValidationError( "A Global statement must have at least one NameItem." ) if self.names[-1].comma != MaybeSentinel.DEFAULT: raise CSTValidationError( "The last NameItem in a Global cannot have a trailing comma." ) if self.whitespace_after_global.empty: raise CSTValidationError( "Must have at least one space after 'global' keyword." ) def _visit_and_replace_children(self, visitor: CSTVisitorT) -> "Global": return Global( whitespace_after_global=visit_required( self, "whitespace_after_global", self.whitespace_after_global, visitor ), names=visit_sequence(self, "names", self.names, visitor), semicolon=visit_sentinel(self, "semicolon", self.semicolon, visitor), ) def _codegen_impl( self, state: CodegenState, default_semicolon: bool = False ) -> None: with state.record_syntactic_position(self): state.add_token("global") self.whitespace_after_global._codegen(state) last_name = len(self.names) - 1 for i, name in enumerate(self.names): name._codegen(state, default_comma=(i != last_name)) semicolon = self.semicolon if isinstance(semicolon, MaybeSentinel): if default_semicolon: state.add_token("; ") elif isinstance(semicolon, Semicolon): semicolon._codegen(state) @add_slots @dataclass(frozen=True) class Nonlocal(BaseSmallStatement): """ A ``nonlocal`` statement. """ #: A list of one or more names. names: Sequence[NameItem] #: Whitespace appearing after the ``global`` keyword and before the first name. whitespace_after_nonlocal: SimpleWhitespace = SimpleWhitespace.field(" ") #: Optional semicolon when this is used in a statement line. This semicolon #: owns the whitespace on both sides of it when it is used. semicolon: Union[Semicolon, MaybeSentinel] = MaybeSentinel.DEFAULT def _validate(self) -> None: if len(self.names) == 0: raise CSTValidationError( "A Nonlocal statement must have at least one NameItem." ) if self.names[-1].comma != MaybeSentinel.DEFAULT: raise CSTValidationError( "The last NameItem in a Nonlocal cannot have a trailing comma." ) if self.whitespace_after_nonlocal.empty: raise CSTValidationError( "Must have at least one space after 'nonlocal' keyword." ) def _visit_and_replace_children(self, visitor: CSTVisitorT) -> "Nonlocal": return Nonlocal( whitespace_after_nonlocal=visit_required( self, "whitespace_after_nonlocal", self.whitespace_after_nonlocal, visitor, ), names=visit_sequence(self, "names", self.names, visitor), semicolon=visit_sentinel(self, "semicolon", self.semicolon, visitor), ) def _codegen_impl( self, state: CodegenState, default_semicolon: bool = False ) -> None: with state.record_syntactic_position(self): state.add_token("nonlocal") self.whitespace_after_nonlocal._codegen(state) last_name = len(self.names) - 1 for i, name in enumerate(self.names): name._codegen(state, default_comma=(i != last_name)) semicolon = self.semicolon if isinstance(semicolon, MaybeSentinel): if default_semicolon: state.add_token("; ") elif isinstance(semicolon, Semicolon): semicolon._codegen(state) class MatchPattern(_BaseParenthesizedNode, ABC): """ A base class for anything that can appear as a pattern in a :class:`Match` statement. """ __slots__ = () @add_slots @dataclass(frozen=True) # pyre-fixme[13]: Attribute `body` is never initialized. class Match(BaseCompoundStatement): """ A ``match`` statement. """ #: The subject of the match. subject: BaseExpression #: A non-empty list of match cases. cases: Sequence["MatchCase"] #: Sequence of empty lines appearing before this compound statement line. leading_lines: Sequence[EmptyLine] = () #: Whitespace between the ``match`` keyword and the subject. whitespace_after_match: SimpleWhitespace = SimpleWhitespace.field(" ") #: Whitespace after the subject but before the colon. whitespace_before_colon: SimpleWhitespace = SimpleWhitespace.field("") #: Any optional trailing comment and the final ``NEWLINE`` at the end of the line. whitespace_after_colon: TrailingWhitespace = TrailingWhitespace.field() #: A string represents a specific indentation. A ``None`` value uses the modules's #: default indentation. This is included because indentation is allowed to be #: inconsistent across a file, just not ambiguously. indent: Optional[str] = None #: Any trailing comments or lines after the dedent that are owned by this match #: block. Statements own preceeding and same-line trailing comments, but not #: trailing lines, so it falls on :class:`Match` to own it. In the case #: that a statement follows a :class:`Match` block, that statement will own the #: comments and lines that are at the same indent as the statement, and this #: :class:`Match` will own the comments and lines that are indented further. footer: Sequence[EmptyLine] = () def _validate(self) -> None: if len(self.cases) == 0: raise CSTValidationError("A match statement must have at least one case.") indent = self.indent if indent is not None: if len(indent) == 0: raise CSTValidationError( "A match statement must have a non-zero width indent." ) if _INDENT_WHITESPACE_RE.fullmatch(indent) is None: raise CSTValidationError( "An indent must be composed of only whitespace characters." ) def _visit_and_replace_children(self, visitor: CSTVisitorT) -> "Match": return Match( leading_lines=visit_sequence( self, "leading_lines", self.leading_lines, visitor ), whitespace_after_match=visit_required( self, "whitespace_after_match", self.whitespace_after_match, visitor ), subject=visit_required(self, "subject", self.subject, visitor), whitespace_before_colon=visit_required( self, "whitespace_before_colon", self.whitespace_before_colon, visitor ), whitespace_after_colon=visit_required( self, "whitespace_after_colon", self.whitespace_after_colon, visitor ), indent=self.indent, cases=visit_sequence(self, "cases", self.cases, visitor), footer=visit_sequence(self, "footer", self.footer, visitor), ) def _codegen_impl(self, state: CodegenState) -> None: for ll in self.leading_lines: ll._codegen(state) state.add_indent_tokens() with state.record_syntactic_position(self, end_node=self.cases[-1]): state.add_token("match") self.whitespace_after_match._codegen(state) self.subject._codegen(state) self.whitespace_before_colon._codegen(state) state.add_token(":") self.whitespace_after_colon._codegen(state) indent = self.indent state.increase_indent(state.default_indent if indent is None else indent) for c in self.cases: c._codegen(state) for f in self.footer: f._codegen(state) state.decrease_indent() @add_slots @dataclass(frozen=True) class MatchCase(CSTNode): """ A single ``case`` block of a :class:`Match` statement. """ #: The pattern that ``subject`` will be matched against. pattern: MatchPattern #: The body of this case block, to be evaluated if ``pattern`` matches ``subject`` #: and ``guard`` evaluates to a truthy value. body: BaseSuite #: Optional expression that will be evaluated if ``pattern`` matches ``subject``. guard: Optional[BaseExpression] = None #: Sequence of empty lines appearing before this case block. leading_lines: Sequence[EmptyLine] = () #: Whitespace directly after the ``case`` keyword. whitespace_after_case: SimpleWhitespace = SimpleWhitespace.field(" ") #: Whitespace before the ``if`` keyword in case there's a guard expression. whitespace_before_if: SimpleWhitespace = SimpleWhitespace.field("") #: Whitespace after the ``if`` keyword in case there's a guard expression. whitespace_after_if: SimpleWhitespace = SimpleWhitespace.field("") #: Whitespace before the colon. whitespace_before_colon: SimpleWhitespace = SimpleWhitespace.field("") def _visit_and_replace_children(self, visitor: CSTVisitorT) -> "CSTNode": return MatchCase( leading_lines=visit_sequence( self, "leading_lines", self.leading_lines, visitor ), whitespace_after_case=visit_required( self, "whitespace_after_case", self.whitespace_after_case, visitor ), pattern=visit_required(self, "pattern", self.pattern, visitor), # pyre-fixme[6]: Expected `SimpleWhitespace` for 4th param but got # `Optional[SimpleWhitespace]`. whitespace_before_if=visit_optional( self, "whitespace_before_if", self.whitespace_before_if, visitor ), # pyre-fixme[6]: Expected `SimpleWhitespace` for 5th param but got # `Optional[SimpleWhitespace]`. whitespace_after_if=visit_optional( self, "whitespace_after_if", self.whitespace_after_if, visitor ), guard=visit_optional(self, "guard", self.guard, visitor), body=visit_required(self, "body", self.body, visitor), ) def _codegen_impl(self, state: CodegenState) -> None: for ll in self.leading_lines: ll._codegen(state) state.add_indent_tokens() with state.record_syntactic_position(self, end_node=self.body): state.add_token("case") self.whitespace_after_case._codegen(state) self.pattern._codegen(state) guard = self.guard if guard is not None: self.whitespace_before_if._codegen(state) state.add_token("if") self.whitespace_after_if._codegen(state) guard._codegen(state) self.whitespace_before_colon._codegen(state) state.add_token(":") self.body._codegen(state) @add_slots @dataclass(frozen=True) class MatchValue(MatchPattern): """ A match literal or value pattern that compares by equality. """ #: an expression to compare to value: BaseExpression def _visit_and_replace_children(self, visitor: CSTVisitorT) -> "CSTNode": return MatchValue(value=visit_required(self, "value", self.value, visitor)) def _codegen_impl(self, state: CodegenState) -> None: with state.record_syntactic_position(self): self.value._codegen(state) @property def lpar(self) -> Sequence[LeftParen]: return self.value.lpar @lpar.setter def lpar(self, value: Sequence[LeftParen]) -> None: self.value.lpar = value @property def rpar(self) -> Sequence[RightParen]: return self.value.rpar @rpar.setter def rpar(self, value: Sequence[RightParen]) -> None: self.value.rpar = value @add_slots @dataclass(frozen=True) class MatchSingleton(MatchPattern): """ A match literal pattern that compares by identity. """ #: a literal to compare to value: Name def _visit_and_replace_children(self, visitor: CSTVisitorT) -> "CSTNode": return MatchSingleton(value=visit_required(self, "value", self.value, visitor)) def _validate(self) -> None: if self.value.value not in {"True", "False", "None"}: raise CSTValidationError( "A match singleton can only be True, False, or None" ) def _codegen_impl(self, state: CodegenState) -> None: with state.record_syntactic_position(self): self.value._codegen(state) @property def lpar(self) -> Sequence[LeftParen]: return self.value.lpar @lpar.setter def lpar(self, value: Sequence[LeftParen]) -> None: # pyre-fixme[41]: Cannot reassign final attribute `lpar`. self.value.lpar = value @property def rpar(self) -> Sequence[RightParen]: return self.value.rpar @rpar.setter def rpar(self, value: Sequence[RightParen]) -> None: # pyre-fixme[41]: Cannot reassign final attribute `rpar`. self.value.rpar = value @add_slots @dataclass(frozen=True) class MatchSequenceElement(CSTNode): """ An element in a sequence match pattern. """ value: MatchPattern #: An optional trailing comma. comma: Union[Comma, MaybeSentinel] = MaybeSentinel.DEFAULT def _visit_and_replace_children( self, visitor: CSTVisitorT ) -> "MatchSequenceElement": return MatchSequenceElement( value=visit_required(self, "value", self.value, visitor), comma=visit_sentinel(self, "comma", self.comma, visitor), ) def _codegen_impl( self, state: CodegenState, default_comma: bool = False, default_comma_whitespace: bool = True, ) -> None: with state.record_syntactic_position(self): self.value._codegen(state) comma = self.comma if comma is MaybeSentinel.DEFAULT and default_comma: state.add_token(", " if default_comma_whitespace else ",") elif isinstance(comma, Comma): comma._codegen(state) @add_slots @dataclass(frozen=True) class MatchStar(CSTNode): """ A starred element in a sequence match pattern. Matches the rest of the sequence. """ #: The name of the pattern binding. A ``None`` value represents ``*_``. name: Optional[Name] = None #: An optional trailing comma. comma: Union[Comma, MaybeSentinel] = MaybeSentinel.DEFAULT #: Optional whitespace between the star and the name. whitespace_before_name: BaseParenthesizableWhitespace = SimpleWhitespace.field("") def _visit_and_replace_children(self, visitor: CSTVisitorT) -> "MatchStar": return MatchStar( whitespace_before_name=visit_required( self, "whitespace_before_name", self.whitespace_before_name, visitor ), name=visit_optional(self, "name", self.name, visitor), comma=visit_sentinel(self, "comma", self.comma, visitor), ) def _codegen_impl( self, state: CodegenState, default_comma: bool = False, default_comma_whitespace: bool = True, ) -> None: with state.record_syntactic_position(self): state.add_token("*") self.whitespace_before_name._codegen(state) name = self.name if name is None: state.add_token("_") else: name._codegen(state) comma = self.comma if comma is MaybeSentinel.DEFAULT and default_comma: state.add_token(", " if default_comma_whitespace else ",") elif isinstance(comma, Comma): comma._codegen(state) class MatchSequence(MatchPattern, ABC): """ A match sequence pattern. It's either a :class:`MatchList` or a :class:`MatchTuple`. Matches a variable length sequence if one of the patterns is a :class:`MatchStar`, otherwise matches a fixed length sequence. """ __slots__ = () #: Patterns to be matched against the subject elements if it is a sequence. patterns: Sequence[Union[MatchSequenceElement, MatchStar]] @add_slots @dataclass(frozen=True) class MatchList(MatchSequence): """ A list match pattern. It's either an "open sequence pattern" (without brackets) or a regular list literal (with brackets). """ #: Patterns to be matched against the subject elements if it is a sequence. patterns: Sequence[Union[MatchSequenceElement, MatchStar]] #: An optional left bracket. If missing, this is an open sequence pattern. lbracket: Optional[LeftSquareBracket] = None #: An optional left bracket. If missing, this is an open sequence pattern. rbracket: Optional[RightSquareBracket] = None #: Parenthesis at the beginning of the node lpar: Sequence[LeftParen] = () #: Parentheses after the pattern, but before a comma (if there is one). rpar: Sequence[RightParen] = () def _validate(self) -> None: if self.lbracket and not self.rbracket: raise CSTValidationError("Cannot have left bracket without right bracket") if self.rbracket and not self.lbracket: raise CSTValidationError("Cannot have right bracket without left bracket") if not self.patterns and not self.lbracket: raise CSTValidationError( "Must have brackets if matching against empty list" ) super(MatchList, self)._validate() def _visit_and_replace_children(self, visitor: CSTVisitorT) -> "MatchList": return MatchList( lpar=visit_sequence(self, "lpar", self.lpar, visitor), lbracket=visit_optional(self, "lbracket", self.lbracket, visitor), patterns=visit_sequence(self, "patterns", self.patterns, visitor), rbracket=visit_optional(self, "rbracket", self.rbracket, visitor), rpar=visit_sequence(self, "rpar", self.rpar, visitor), ) def _codegen_impl(self, state: CodegenState) -> None: with self._parenthesize(state): lbracket = self.lbracket if lbracket is not None: lbracket._codegen(state) pats = self.patterns for idx, pat in enumerate(pats): pat._codegen(state, default_comma=(idx < len(pats) - 1)) rbracket = self.rbracket if rbracket is not None: rbracket._codegen(state) @add_slots @dataclass(frozen=True) class MatchTuple(MatchSequence): """ A tuple match pattern. """ #: Patterns to be matched against the subject elements if it is a sequence. patterns: Sequence[Union[MatchSequenceElement, MatchStar]] #: Parenthesis at the beginning of the node lpar: Sequence[LeftParen] = field(default_factory=lambda: (LeftParen(),)) #: Parentheses after the pattern, but before a comma (if there is one). rpar: Sequence[RightParen] = field(default_factory=lambda: (RightParen(),)) def _validate(self) -> None: if len(self.lpar) < 1: raise CSTValidationError( "Tuple patterns must have at least pair of parenthesis" ) super(MatchTuple, self)._validate() def _visit_and_replace_children(self, visitor: CSTVisitorT) -> "MatchTuple": return MatchTuple( lpar=visit_sequence(self, "lpar", self.lpar, visitor), patterns=visit_sequence(self, "patterns", self.patterns, visitor), rpar=visit_sequence(self, "rpar", self.rpar, visitor), ) def _codegen_impl(self, state: CodegenState) -> None: with self._parenthesize(state): pats = self.patterns patlen = len(pats) for idx, pat in enumerate(pats): pat._codegen( state, default_comma=patlen == 1 or (idx < patlen - 1), default_comma_whitespace=patlen != 1, ) @add_slots @dataclass(frozen=True) class MatchMappingElement(CSTNode): """ A ``key: value`` pair in a match mapping pattern. """ key: BaseExpression #: The pattern to be matched corresponding to ``key``. pattern: MatchPattern #: An optional trailing comma. comma: Union[Comma, MaybeSentinel] = MaybeSentinel.DEFAULT #: Whitespace between ``key`` and the colon. whitespace_before_colon: BaseParenthesizableWhitespace = SimpleWhitespace.field("") #: Whitespace between the colon and ``pattern``. whitespace_after_colon: BaseParenthesizableWhitespace = SimpleWhitespace.field(" ") def _visit_and_replace_children( self, visitor: CSTVisitorT ) -> "MatchMappingElement": return MatchMappingElement( key=visit_required(self, "key", self.key, visitor), whitespace_before_colon=visit_required( self, "whitespace_before_colon", self.whitespace_before_colon, visitor ), whitespace_after_colon=visit_required( self, "whitespace_after_colon", self.whitespace_after_colon, visitor ), pattern=visit_required(self, "pattern", self.pattern, visitor), comma=visit_sentinel(self, "comma", self.comma, visitor), ) def _codegen_impl(self, state: CodegenState, default_comma: bool = False) -> None: with state.record_syntactic_position(self): self.key._codegen(state) self.whitespace_before_colon._codegen(state) state.add_token(":") self.whitespace_after_colon._codegen(state) self.pattern._codegen(state) comma = self.comma if comma is MaybeSentinel.DEFAULT and default_comma: state.add_token(", ") elif isinstance(comma, Comma): comma._codegen(state) @add_slots @dataclass(frozen=True) class MatchMapping(MatchPattern): """ A match mapping pattern. """ #: A sequence of mapping elements. elements: Sequence[MatchMappingElement] = () #: Left curly brace at the beginning of the pattern. lbrace: LeftCurlyBrace = LeftCurlyBrace.field() #: Right curly brace at the end of the pattern. rbrace: RightCurlyBrace = RightCurlyBrace.field() #: An optional name to capture the remaining elements of the mapping. rest: Optional[Name] = None #: Optional whitespace between stars and ``rest``. whitespace_before_rest: SimpleWhitespace = SimpleWhitespace.field("") #: An optional trailing comma attached to ``rest``. trailing_comma: Optional[Comma] = None #: Parenthesis at the beginning of the node lpar: Sequence[LeftParen] = () #: Parentheses after the pattern rpar: Sequence[RightParen] = () def _validate(self) -> None: if isinstance(self.trailing_comma, Comma) and self.rest is not None: raise CSTValidationError("Cannot have a trailing comma without **rest") super(MatchMapping, self)._validate() def _visit_and_replace_children(self, visitor: CSTVisitorT) -> "MatchMapping": return MatchMapping( lpar=visit_sequence(self, "lpar", self.lpar, visitor), lbrace=visit_required(self, "lbrace", self.lbrace, visitor), elements=visit_sequence(self, "elements", self.elements, visitor), whitespace_before_rest=visit_required( self, "whitespace_before_rest", self.whitespace_before_rest, visitor ), rest=visit_optional(self, "rest", self.rest, visitor), trailing_comma=visit_optional( self, "trailing_comma", self.trailing_comma, visitor ), rbrace=visit_required(self, "rbrace", self.rbrace, visitor), rpar=visit_sequence(self, "rpar", self.rpar, visitor), ) def _codegen_impl(self, state: CodegenState) -> None: with self._parenthesize(state): self.lbrace._codegen(state) elems = self.elements rest = self.rest for idx, el in enumerate(elems): el._codegen( state, default_comma=rest is not None or idx < len(elems) - 1 ) if rest is not None: state.add_token("**") self.whitespace_before_rest._codegen(state) rest._codegen(state) comma = self.trailing_comma if comma is not None: comma._codegen(state) self.rbrace._codegen(state) @add_slots @dataclass(frozen=True) class MatchKeywordElement(CSTNode): """ A key=value pair in a :class:`MatchClass`. """ key: Name #: The pattern to be matched against the attribute named ``key``. pattern: MatchPattern #: An optional trailing comma. comma: Union[Comma, MaybeSentinel] = MaybeSentinel.DEFAULT #: Whitespace between ``key`` and the equals sign. whitespace_before_equal: BaseParenthesizableWhitespace = SimpleWhitespace.field("") #: Whitespace between the equals sign and ``pattern``. whitespace_after_equal: BaseParenthesizableWhitespace = SimpleWhitespace.field("") def _visit_and_replace_children( self, visitor: CSTVisitorT ) -> "MatchKeywordElement": return MatchKeywordElement( key=visit_required(self, "key", self.key, visitor), whitespace_before_equal=visit_required( self, "whitespace_before_equal", self.whitespace_before_equal, visitor ), whitespace_after_equal=visit_required( self, "whitespace_after_equal", self.whitespace_after_equal, visitor ), pattern=visit_required(self, "pattern", self.pattern, visitor), comma=visit_sentinel(self, "comma", self.comma, visitor), ) def _codegen_impl(self, state: CodegenState, default_comma: bool = False) -> None: with state.record_syntactic_position(self): self.key._codegen(state) self.whitespace_before_equal._codegen(state) state.add_token("=") self.whitespace_after_equal._codegen(state) self.pattern._codegen(state) comma = self.comma if comma is MaybeSentinel.DEFAULT and default_comma: state.add_token(", ") elif isinstance(comma, Comma): comma._codegen(state) @add_slots @dataclass(frozen=True) class MatchClass(MatchPattern): """ A match class pattern. """ #: An expression giving the nominal class to be matched. cls: BaseExpression #: A sequence of patterns to be matched against the class defined sequence of #: pattern matching attributes. patterns: Sequence[MatchSequenceElement] = () #: A sequence of additional attribute names and corresponding patterns to be #: matched. kwds: Sequence[MatchKeywordElement] = () #: Whitespace between the class name and the left parenthesis. whitespace_after_cls: BaseParenthesizableWhitespace = SimpleWhitespace.field("") #: Whitespace between the left parenthesis and the first pattern. whitespace_before_patterns: BaseParenthesizableWhitespace = SimpleWhitespace.field( "" ) #: Whitespace between the last pattern and the right parenthesis. whitespace_after_kwds: BaseParenthesizableWhitespace = SimpleWhitespace.field("") #: Parenthesis at the beginning of the node lpar: Sequence[LeftParen] = () #: Parentheses after the pattern rpar: Sequence[RightParen] = () def _visit_and_replace_children(self, visitor: CSTVisitorT) -> "MatchClass": return MatchClass( lpar=visit_sequence(self, "lpar", self.lpar, visitor), cls=visit_required(self, "cls", self.cls, visitor), whitespace_after_cls=visit_required( self, "whitespace_after_cls", self.whitespace_after_cls, visitor ), whitespace_before_patterns=visit_required( self, "whitespace_before_patterns", self.whitespace_before_patterns, visitor, ), patterns=visit_sequence(self, "patterns", self.patterns, visitor), kwds=visit_sequence(self, "kwds", self.kwds, visitor), whitespace_after_kwds=visit_required( self, "whitespace_after_kwds", self.whitespace_after_kwds, visitor ), ) def _codegen_impl(self, state: CodegenState) -> None: with self._parenthesize(state): self.cls._codegen(state) self.whitespace_after_cls._codegen(state) state.add_token("(") self.whitespace_before_patterns._codegen(state) pats = self.patterns kwds = self.kwds for idx, pat in enumerate(pats): pat._codegen(state, default_comma=idx + 1 < len(pats) + len(kwds)) for idx, kwd in enumerate(kwds): kwd._codegen(state, default_comma=idx + 1 < len(kwds)) self.whitespace_after_kwds._codegen(state) state.add_token(")") @add_slots @dataclass(frozen=True) class MatchAs(MatchPattern): """ A match "as-pattern", capture pattern, or wildcard pattern. """ #: The match pattern that the subject will be matched against. If this is ``None``, #: the node represents a capture pattern (i.e. a bare name) and will always succeed. pattern: Optional[MatchPattern] = None #: The name that will be bound if the pattern is successful. If this is ``None``, #: ``pattern`` must also be ``None`` and the node represents the wildcard pattern #: (i.e. ``_``). name: Optional[Name] = None #: Whitespace between ``pattern`` and the ``as`` keyword (if ``pattern`` is not #: ``None``) whitespace_before_as: Union[ BaseParenthesizableWhitespace, MaybeSentinel ] = MaybeSentinel.DEFAULT #: Whitespace between the ``as`` keyword and ``name`` (if ``pattern`` is not #: ``None``) whitespace_after_as: Union[ BaseParenthesizableWhitespace, MaybeSentinel ] = MaybeSentinel.DEFAULT #: Parenthesis at the beginning of the node lpar: Sequence[LeftParen] = () #: Parentheses after the pattern rpar: Sequence[RightParen] = () def _visit_and_replace_children(self, visitor: CSTVisitorT) -> "MatchAs": return MatchAs( lpar=visit_sequence(self, "lpar", self.lpar, visitor), pattern=visit_optional(self, "pattern", self.pattern, visitor), whitespace_before_as=visit_sentinel( self, "whitespace_before_as", self.whitespace_before_as, visitor ), whitespace_after_as=visit_sentinel( self, "whitespace_after_as", self.whitespace_after_as, visitor ), name=visit_optional(self, "name", self.name, visitor), rpar=visit_sequence(self, "rpar", self.rpar, visitor), ) def _validate(self) -> None: if self.name is None and self.pattern is not None: raise CSTValidationError("Pattern must be None if name is None") super(MatchAs, self)._validate() def _codegen_impl(self, state: CodegenState) -> None: with self._parenthesize(state): pat = self.pattern name = self.name if pat is not None: pat._codegen(state) ws_before = self.whitespace_before_as if ws_before is MaybeSentinel.DEFAULT: state.add_token(" ") elif isinstance(ws_before, BaseParenthesizableWhitespace): ws_before._codegen(state) state.add_token("as") ws_after = self.whitespace_after_as if ws_after is MaybeSentinel.DEFAULT: state.add_token(" ") elif isinstance(ws_after, BaseParenthesizableWhitespace): ws_after._codegen(state) if name is None: state.add_token("_") else: name._codegen(state) @add_slots @dataclass(frozen=True) class MatchOrElement(CSTNode): """ An element in a :class:`MatchOr` node. """ pattern: MatchPattern #: An optional ``|`` separator. separator: Union[BitOr, MaybeSentinel] = MaybeSentinel.DEFAULT def _visit_and_replace_children(self, visitor: CSTVisitorT) -> "MatchOrElement": return MatchOrElement( pattern=visit_required(self, "pattern", self.pattern, visitor), separator=visit_sentinel(self, "separator", self.separator, visitor), ) def _codegen_impl( self, state: CodegenState, default_separator: bool = False ) -> None: with state.record_syntactic_position(self): self.pattern._codegen(state) sep = self.separator if sep is MaybeSentinel.DEFAULT and default_separator: state.add_token(" | ") elif isinstance(sep, BitOr): sep._codegen(state) @add_slots @dataclass(frozen=True) class MatchOr(MatchPattern): """ A match "or-pattern". It matches each of its subpatterns in turn to the subject, until one succeeds. The or-pattern is then deemed to succeed. If none of the subpatterns succeed the or-pattern fails. """ #: The subpatterns to be tried in turn. patterns: Sequence[MatchOrElement] #: Parenthesis at the beginning of the node lpar: Sequence[LeftParen] = () #: Parentheses after the pattern rpar: Sequence[RightParen] = () def _visit_and_replace_children(self, visitor: CSTVisitorT) -> "MatchOr": return MatchOr( lpar=visit_sequence(self, "lpar", self.lpar, visitor), patterns=visit_sequence(self, "patterns", self.patterns, visitor), rpar=visit_sequence(self, "rpar", self.rpar, visitor), ) def _codegen_impl(self, state: CodegenState) -> None: with self._parenthesize(state): pats = self.patterns for idx, pat in enumerate(pats): pat._codegen(state, default_separator=idx + 1 < len(pats)) @add_slots @dataclass(frozen=True) class TypeVar(CSTNode): """ A simple (non-variadic) type variable. Note: this node represents type a variable when declared using PEP-695 syntax. """ #: The name of the type variable. name: Name #: An optional bound on the type. bound: Optional[BaseExpression] = None #: The colon used to separate the name and bound. If not specified, #: :class:`MaybeSentinel` will be replaced with a colon if there is a bound, #: otherwise will be left empty. colon: Union[Colon, MaybeSentinel] = MaybeSentinel.DEFAULT def _codegen_impl(self, state: CodegenState) -> None: with state.record_syntactic_position(self): self.name._codegen(state) bound = self.bound colon = self.colon if not isinstance(colon, MaybeSentinel): colon._codegen(state) else: if bound is not None: state.add_token(": ") if bound is not None: bound._codegen(state) def _visit_and_replace_children(self, visitor: CSTVisitorT) -> "TypeVar": return TypeVar( name=visit_required(self, "name", self.name, visitor), colon=visit_sentinel(self, "colon", self.colon, visitor), bound=visit_optional(self, "bound", self.bound, visitor), ) @add_slots @dataclass(frozen=True) class TypeVarTuple(CSTNode): """ A variadic type variable. """ #: The name of this type variable. name: Name #: The (optional) whitespace between the star declaring this type variable as #: variadic, and the variable's name. whitespace_after_star: SimpleWhitespace = SimpleWhitespace.field("") def _codegen_impl(self, state: CodegenState) -> None: with state.record_syntactic_position(self): state.add_token("*") self.whitespace_after_star._codegen(state) self.name._codegen(state) def _visit_and_replace_children(self, visitor: CSTVisitorT) -> "TypeVarTuple": return TypeVarTuple( name=visit_required(self, "name", self.name, visitor), whitespace_after_star=visit_required( self, "whitespace_after_star", self.whitespace_after_star, visitor ), ) @add_slots @dataclass(frozen=True) class ParamSpec(CSTNode): """ A parameter specification. Note: this node represents a parameter specification when declared using PEP-695 syntax. """ #: The name of this parameter specification. name: Name #: The (optional) whitespace between the double star declaring this type variable as #: a parameter specification, and the name. whitespace_after_star: SimpleWhitespace = SimpleWhitespace.field("") def _codegen_impl(self, state: CodegenState) -> None: with state.record_syntactic_position(self): state.add_token("**") self.whitespace_after_star._codegen(state) self.name._codegen(state) def _visit_and_replace_children(self, visitor: CSTVisitorT) -> "ParamSpec": return ParamSpec( name=visit_required(self, "name", self.name, visitor), whitespace_after_star=visit_required( self, "whitespace_after_star", self.whitespace_after_star, visitor ), ) @add_slots @dataclass(frozen=True) class TypeParam(CSTNode): """ A single type parameter that is contained in a :class:`TypeParameters` list. """ #: The actual parameter. param: Union[TypeVar, TypeVarTuple, ParamSpec] #: A trailing comma. If one is not provided, :class:`MaybeSentinel` will be replaced #: with a comma only if a comma is required. comma: Union[Comma, MaybeSentinel] = MaybeSentinel.DEFAULT def _codegen_impl(self, state: CodegenState, default_comma: bool = False) -> None: self.param._codegen(state) comma = self.comma if isinstance(comma, MaybeSentinel): if default_comma: state.add_token(", ") else: comma._codegen(state) def _visit_and_replace_children(self, visitor: CSTVisitorT) -> "TypeParam": return TypeParam( param=visit_required(self, "param", self.param, visitor), comma=visit_sentinel(self, "comma", self.comma, visitor), ) @add_slots @dataclass(frozen=True) class TypeParameters(CSTNode): """ Type parameters when specified with PEP-695 syntax. This node captures all specified parameters that are enclosed with square brackets. """ #: The parameters within the square brackets. params: Sequence[TypeParam] = () #: Opening square bracket that marks the start of these parameters. lbracket: LeftSquareBracket = LeftSquareBracket.field() #: Closing square bracket that marks the end of these parameters. rbracket: RightSquareBracket = RightSquareBracket.field() def _codegen_impl(self, state: CodegenState) -> None: self.lbracket._codegen(state) params_len = len(self.params) for idx, param in enumerate(self.params): param._codegen(state, default_comma=idx + 1 < params_len) self.rbracket._codegen(state) def _visit_and_replace_children(self, visitor: CSTVisitorT) -> "TypeParameters": return TypeParameters( lbracket=visit_required(self, "lbracket", self.lbracket, visitor), params=visit_sequence(self, "params", self.params, visitor), rbracket=visit_required(self, "rbracket", self.rbracket, visitor), ) @add_slots @dataclass(frozen=True) class TypeAlias(BaseSmallStatement): """ A type alias statement. This node represents the ``type`` statement as specified initially by PEP-695. Example: ``type ListOrSet[T] = list[T] | set[T]``. """ #: The name being introduced in this statement. name: Name #: Everything on the right hand side of the ``=``. value: BaseExpression #: An optional list of type parameters, specified after the name. type_parameters: Optional[TypeParameters] = None #: Whitespace between the ``type`` soft keyword and the name. whitespace_after_type: SimpleWhitespace = SimpleWhitespace.field(" ") #: Whitespace between the name and the type parameters (if they exist) or the ``=``. #: If not specified, :class:`MaybeSentinel` will be replaced with a single space if #: there are no type parameters, otherwise no spaces. whitespace_after_name: Union[ SimpleWhitespace, MaybeSentinel ] = MaybeSentinel.DEFAULT #: Whitespace between the type parameters and the ``=``. Always empty if there are #: no type parameters. If not specified, :class:`MaybeSentinel` will be replaced #: with a single space if there are type parameters. whitespace_after_type_parameters: Union[ SimpleWhitespace, MaybeSentinel ] = MaybeSentinel.DEFAULT #: Whitespace between the ``=`` and the value. whitespace_after_equals: SimpleWhitespace = SimpleWhitespace.field(" ") #: Optional semicolon when this is used in a statement line. This semicolon #: owns the whitespace on both sides of it when it is used. semicolon: Union[Semicolon, MaybeSentinel] = MaybeSentinel.DEFAULT def _validate(self) -> None: if ( self.type_parameters is None and self.whitespace_after_type_parameters not in { SimpleWhitespace(""), MaybeSentinel.DEFAULT, } ): raise CSTValidationError( "whitespace_after_type_parameters must be empty when there are no type parameters in a TypeAlias" ) def _visit_and_replace_children(self, visitor: CSTVisitorT) -> "TypeAlias": return TypeAlias( whitespace_after_type=visit_required( self, "whitespace_after_type", self.whitespace_after_type, visitor ), name=visit_required(self, "name", self.name, visitor), whitespace_after_name=visit_sentinel( self, "whitespace_after_name", self.whitespace_after_name, visitor ), type_parameters=visit_optional( self, "type_parameters", self.type_parameters, visitor ), whitespace_after_type_parameters=visit_sentinel( self, "whitespace_after_type_parameters", self.whitespace_after_type_parameters, visitor, ), whitespace_after_equals=visit_required( self, "whitespace_after_equals", self.whitespace_after_equals, visitor ), value=visit_required(self, "value", self.value, visitor), semicolon=visit_sentinel(self, "semicolon", self.semicolon, visitor), ) def _codegen_impl( self, state: CodegenState, default_semicolon: bool = False ) -> None: with state.record_syntactic_position(self): state.add_token("type") self.whitespace_after_type._codegen(state) self.name._codegen(state) ws_after_name = self.whitespace_after_name if isinstance(ws_after_name, MaybeSentinel): if self.type_parameters is None: state.add_token(" ") else: ws_after_name._codegen(state) ws_after_type_params = self.whitespace_after_type_parameters if self.type_parameters is not None: self.type_parameters._codegen(state) if isinstance(ws_after_type_params, MaybeSentinel): state.add_token(" ") else: ws_after_type_params._codegen(state) state.add_token("=") self.whitespace_after_equals._codegen(state) self.value._codegen(state) semi = self.semicolon if isinstance(semi, MaybeSentinel): if default_semicolon: state.add_token("; ") else: semi._codegen(state) LibCST-1.2.0/libcst/_nodes/tests/000077500000000000000000000000001456464173300165005ustar00rootroot00000000000000LibCST-1.2.0/libcst/_nodes/tests/__init__.py000066400000000000000000000002631456464173300206120ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. LibCST-1.2.0/libcst/_nodes/tests/base.py000066400000000000000000000241561456464173300177740ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import dataclasses from contextlib import ExitStack from dataclasses import dataclass from typing import Any, Callable, Iterable, List, Optional, Sequence, Type from unittest.mock import patch import libcst as cst from libcst._nodes.internal import CodegenState, visit_required from libcst._types import CSTNodeT from libcst._visitors import CSTTransformer, CSTVisitorT from libcst.metadata import CodeRange, PositionProvider from libcst.metadata.position_provider import PositionProvidingCodegenState from libcst.testing.utils import UnitTest @dataclass(frozen=True) class _CSTCodegenPatchTarget: type: Type[cst.CSTNode] name: str old_codegen: Callable[..., None] class _NOOPVisitor(CSTTransformer): pass def _cst_node_equality_func( a: cst.CSTNode, b: cst.CSTNode, msg: Optional[str] = None ) -> None: """ For use with addTypeEqualityFunc. """ if not a.deep_equals(b): suffix = "" if msg is None else f"\n{msg}" raise AssertionError(f"\n{a!r}\nis not deeply equal to \n{b!r}{suffix}") def parse_expression_as(**config: Any) -> Callable[[str], cst.BaseExpression]: def inner(code: str) -> cst.BaseExpression: return cst.parse_expression(code, config=cst.PartialParserConfig(**config)) return inner def parse_statement_as(**config: Any) -> Callable[[str], cst.BaseStatement]: def inner(code: str) -> cst.BaseStatement: return cst.parse_statement(code, config=cst.PartialParserConfig(**config)) return inner # We can't use an ABCMeta here, because of metaclass conflicts class CSTNodeTest(UnitTest): def setUp(self) -> None: # Fix `self.assertEqual` for CSTNode subclasses. We should compare equality by # value instead of identity (what `CSTNode.__eq__` does) for tests. # # The time complexity of CSTNode.deep_equals doesn't matter much inside tests. for v in cst.__dict__.values(): if isinstance(v, type) and issubclass(v, cst.CSTNode): self.addTypeEqualityFunc(v, _cst_node_equality_func) self.addTypeEqualityFunc(DummyIndentedBlock, _cst_node_equality_func) def validate_node( self, node: CSTNodeT, code: str, parser: Optional[Callable[[str], CSTNodeT]] = None, expected_position: Optional[CodeRange] = None, ) -> None: node.validate_types_deep() self.__assert_codegen(node, code, expected_position) if parser is not None: parsed_node = parser(code) self.assertEqual(parsed_node, node) # Tests of children should unwrap DummyIndentedBlock first, because we don't # want to test DummyIndentedBlock's behavior. unwrapped_node = node while isinstance(unwrapped_node, DummyIndentedBlock): unwrapped_node = unwrapped_node.child self.__assert_children_match_codegen(unwrapped_node) self.__assert_children_match_fields(unwrapped_node) self.__assert_visit_returns_identity(unwrapped_node) def assert_invalid( self, get_node: Callable[[], cst.CSTNode], expected_re: str ) -> None: with self.assertRaisesRegex(cst.CSTValidationError, expected_re): get_node() def assert_invalid_types( self, get_node: Callable[[], cst.CSTNode], expected_re: str ) -> None: with self.assertRaisesRegex(TypeError, expected_re): get_node().validate_types_shallow() def __assert_codegen( self, node: cst.CSTNode, expected: str, expected_position: Optional[CodeRange] = None, ) -> None: """ Verifies that the given node's `_codegen` method is correct. """ module = cst.Module([]) self.assertEqual(module.code_for_node(node), expected) if expected_position is not None: # This is using some internal APIs, because we only want to compute # position for the node being tested, not a whole module. # # Normally, this is a nonsense operation (how can a node have a position if # its not in a module?), which is why it's not supported, but it makes # sense in the context of these node tests. provider = PositionProvider() state = PositionProvidingCodegenState( default_indent=module.default_indent, default_newline=module.default_newline, provider=provider, ) node._codegen(state) self.assertEqual(provider._computed[node], expected_position) def __assert_children_match_codegen(self, node: cst.CSTNode) -> None: children = node.children codegen_children = self.__derive_children_from_codegen(node) self.assertSequenceEqual( children, codegen_children, msg=( "The list of children we got from `node.children` differs from the " + "children that were visited by `node._codegen`. This is probably " + "due to a mismatch between _visit_and_replace_children and " + "_codegen_impl." ), ) def __derive_children_from_codegen( self, node: cst.CSTNode ) -> Sequence[cst.CSTNode]: """ Patches all subclasses of `CSTNode` exported by the `cst` module to track which `_codegen` methods get called, generating a list of children. Because all children must be rendered out into lexical order, this should be equivalent to `node.children`. `node.children` uses `_visit_and_replace_children` under the hood, not `_codegen`, so this helps us verify that both of those two method's behaviors are in sync. """ patch_targets: Iterable[_CSTCodegenPatchTarget] = [ _CSTCodegenPatchTarget(type=v, name=k, old_codegen=v._codegen) for (k, v) in cst.__dict__.items() if isinstance(v, type) and issubclass(v, cst.CSTNode) and hasattr(v, "_codegen") ] children: List[cst.CSTNode] = [] codegen_stack: List[cst.CSTNode] = [] def _get_codegen_override( target: _CSTCodegenPatchTarget, ) -> Callable[..., None]: def _codegen_impl(self: CSTNodeT, *args: Any, **kwargs: Any) -> None: should_pop = False # Don't stick duplicates in the stack. This is needed so that we don't # track calls to `super()._codegen()`. if len(codegen_stack) == 0 or codegen_stack[-1] is not self: # Check the stack to see that we're a direct child, not the root or # a transitive child. if len(codegen_stack) == 1: children.append(self) codegen_stack.append(self) should_pop = True target.old_codegen(self, *args, **kwargs) # only pop if we pushed something to the stack earlier if should_pop: codegen_stack.pop() return _codegen_impl with ExitStack() as patch_stack: for t in patch_targets: patch_stack.enter_context( patch(f"libcst.{t.name}._codegen", _get_codegen_override(t)) ) # Execute `node._codegen()` cst.Module([]).code_for_node(node) return children def __assert_children_match_fields(self, node: cst.CSTNode) -> None: """ We expect `node.children` to match everything we can extract from the node's fields, but maybe in a different order. This asserts that those things match. If you want to verify order as well, use `assert_children_ordered`. """ node_children_ids = {id(child) for child in node.children} fields = dataclasses.fields(node) field_child_ids = set() for f in fields: value = getattr(node, f.name) if isinstance(value, cst.CSTNode): field_child_ids.add(id(value)) elif isinstance(value, Iterable): field_child_ids.update( id(el) for el in value if isinstance(el, cst.CSTNode) ) # order doesn't matter self.assertSetEqual( node_children_ids, field_child_ids, msg="`node.children` doesn't match what we found through introspection", ) def __assert_visit_returns_identity(self, node: cst.CSTNode) -> None: """ When visit is called with a visitor that acts as a no-op, the visit method should return the same node it started with. """ # TODO: We're only checking equality right now, because visit currently clones # the node, since that was easier to implement. We should fix that behavior in a # later version and tighten this check. self.assertEqual(node, node.visit(_NOOPVisitor())) def assert_parses( self, code: str, parser: Callable[[str], cst.CSTNode], expect_success: bool, ) -> None: if not expect_success: with self.assertRaises(cst.ParserSyntaxError): parser(code) else: parser(code) @dataclass(frozen=True) class DummyIndentedBlock(cst.CSTNode): """ A stripped-down version of cst.IndentedBlock that only sets/clears the indentation state for the purpose of testing cst.IndentWhitespace in isolation. """ value: str child: cst.CSTNode def _codegen_impl(self, state: CodegenState) -> None: state.increase_indent(self.value) with state.record_syntactic_position( self, start_node=self.child, end_node=self.child ): self.child._codegen(state) state.decrease_indent() def _visit_and_replace_children(self, visitor: CSTVisitorT) -> "DummyIndentedBlock": return DummyIndentedBlock( value=self.value, child=visit_required(self, "child", self.child, visitor) ) LibCST-1.2.0/libcst/_nodes/tests/test_assert.py000066400000000000000000000120141456464173300214100ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Any import libcst as cst from libcst import parse_statement from libcst._nodes.tests.base import CSTNodeTest from libcst.helpers import ensure_type from libcst.metadata import CodeRange from libcst.testing.utils import data_provider class AssertConstructionTest(CSTNodeTest): @data_provider( ( # Simple assert { "node": cst.Assert(cst.Name("True")), "code": "assert True", "parser": None, "expected_position": None, }, # Assert with message { "node": cst.Assert( cst.Name("True"), cst.SimpleString('"Value should be true"') ), "code": 'assert True, "Value should be true"', "parser": None, "expected_position": None, }, # Whitespace oddities test { "node": cst.Assert( cst.Name("True", lpar=(cst.LeftParen(),), rpar=(cst.RightParen(),)), whitespace_after_assert=cst.SimpleWhitespace(""), ), "code": "assert(True)", "parser": None, "expected_position": CodeRange((1, 0), (1, 12)), }, # Whitespace rendering test { "node": cst.Assert( whitespace_after_assert=cst.SimpleWhitespace(" "), test=cst.Name("True"), comma=cst.Comma( whitespace_before=cst.SimpleWhitespace(" "), whitespace_after=cst.SimpleWhitespace(" "), ), msg=cst.SimpleString('"Value should be true"'), ), "code": 'assert True , "Value should be true"', "parser": None, "expected_position": CodeRange((1, 0), (1, 39)), }, ) ) def test_valid(self, **kwargs: Any) -> None: self.validate_node(**kwargs) @data_provider( ( # Validate whitespace handling { "get_node": ( lambda: cst.Assert( cst.Name("True"), whitespace_after_assert=cst.SimpleWhitespace(""), ) ), "expected_re": "Must have at least one space after 'assert'", }, # Validate comma handling { "get_node": ( lambda: cst.Assert(test=cst.Name("True"), comma=cst.Comma()) ), "expected_re": "Cannot have trailing comma after 'test'", }, ) ) def test_invalid(self, **kwargs: Any) -> None: self.assert_invalid(**kwargs) def _assert_parser(code: str) -> cst.Assert: return ensure_type( ensure_type(parse_statement(code), cst.SimpleStatementLine).body[0], cst.Assert ) class AssertParsingTest(CSTNodeTest): @data_provider( ( # Simple assert { "node": cst.Assert(cst.Name("True")), "code": "assert True", "parser": _assert_parser, "expected_position": None, }, # Assert with message { "node": cst.Assert( cst.Name("True"), cst.SimpleString('"Value should be true"'), comma=cst.Comma(whitespace_after=cst.SimpleWhitespace(" ")), ), "code": 'assert True, "Value should be true"', "parser": _assert_parser, "expected_position": None, }, # Whitespace oddities test { "node": cst.Assert( cst.Name("True", lpar=(cst.LeftParen(),), rpar=(cst.RightParen(),)), whitespace_after_assert=cst.SimpleWhitespace(""), ), "code": "assert(True)", "parser": _assert_parser, "expected_position": None, }, # Whitespace rendering test { "node": cst.Assert( whitespace_after_assert=cst.SimpleWhitespace(" "), test=cst.Name("True"), comma=cst.Comma( whitespace_before=cst.SimpleWhitespace(" "), whitespace_after=cst.SimpleWhitespace(" "), ), msg=cst.SimpleString('"Value should be true"'), ), "code": 'assert True , "Value should be true"', "parser": _assert_parser, "expected_position": None, }, ) ) def test_valid(self, **kwargs: Any) -> None: self.validate_node(**kwargs) LibCST-1.2.0/libcst/_nodes/tests/test_assign.py000066400000000000000000000372631456464173300214100ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Any import libcst as cst from libcst import parse_statement from libcst._nodes.tests.base import CSTNodeTest from libcst.metadata import CodeRange from libcst.testing.utils import data_provider class AssignTest(CSTNodeTest): @data_provider( ( # Simple assignment creation case. { "node": cst.Assign( (cst.AssignTarget(cst.Name("foo")),), cst.Integer("5") ), "code": "foo = 5", "parser": None, "expected_position": CodeRange((1, 0), (1, 7)), }, # Multiple targets creation { "node": cst.Assign( ( cst.AssignTarget(cst.Name("foo")), cst.AssignTarget(cst.Name("bar")), ), cst.Integer("5"), ), "code": "foo = bar = 5", "parser": None, "expected_position": CodeRange((1, 0), (1, 13)), }, # Whitespace test for creating nodes { "node": cst.Assign( ( cst.AssignTarget( cst.Name("foo"), whitespace_before_equal=cst.SimpleWhitespace(""), whitespace_after_equal=cst.SimpleWhitespace(""), ), ), cst.Integer("5"), ), "code": "foo=5", "parser": None, "expected_position": CodeRange((1, 0), (1, 5)), }, # Simple assignment parser case. { "node": cst.SimpleStatementLine( ( cst.Assign( (cst.AssignTarget(cst.Name("foo")),), cst.Integer("5") ), ) ), "code": "foo = 5\n", "parser": parse_statement, "expected_position": None, }, # Multiple targets parser { "node": cst.SimpleStatementLine( ( cst.Assign( ( cst.AssignTarget(cst.Name("foo")), cst.AssignTarget(cst.Name("bar")), ), cst.Integer("5"), ), ) ), "code": "foo = bar = 5\n", "parser": parse_statement, "expected_position": None, }, # Whitespace test parser { "node": cst.SimpleStatementLine( ( cst.Assign( ( cst.AssignTarget( cst.Name("foo"), whitespace_before_equal=cst.SimpleWhitespace(""), whitespace_after_equal=cst.SimpleWhitespace(""), ), ), cst.Integer("5"), ), ) ), "code": "foo=5\n", "parser": parse_statement, "expected_position": None, }, ) ) def test_valid(self, **kwargs: Any) -> None: self.validate_node(**kwargs) @data_provider( ( { "get_node": (lambda: cst.Assign(targets=(), value=cst.Integer("5"))), "expected_re": "at least one AssignTarget", }, ) ) def test_invalid(self, **kwargs: Any) -> None: self.assert_invalid(**kwargs) @data_provider( ( { "get_node": ( lambda: cst.Assign( # pyre-ignore: Incompatible parameter type [6] targets=[ cst.BinaryOperation( left=cst.Name("x"), operator=cst.Add(), right=cst.Integer("1"), ), ], value=cst.Name("y"), ) ), "expected_re": "Expected an instance of .*statement.AssignTarget.*", }, ) ) def test_invalid_types(self, **kwargs: Any) -> None: self.assert_invalid_types(**kwargs) class AnnAssignTest(CSTNodeTest): @data_provider( ( # Simple assignment creation case. { "node": cst.AnnAssign( cst.Name("foo"), cst.Annotation(cst.Name("str")), cst.Integer("5") ), "code": "foo: str = 5", "parser": None, "expected_position": CodeRange((1, 0), (1, 12)), }, # Annotation creation without assignment { "node": cst.AnnAssign(cst.Name("foo"), cst.Annotation(cst.Name("str"))), "code": "foo: str", "parser": None, "expected_position": CodeRange((1, 0), (1, 8)), }, # Complex annotation creation { "node": cst.AnnAssign( cst.Name("foo"), cst.Annotation( cst.Subscript( cst.Name("Optional"), (cst.SubscriptElement(cst.Index(cst.Name("str"))),), ) ), cst.Integer("5"), ), "code": "foo: Optional[str] = 5", "parser": None, "expected_position": CodeRange((1, 0), (1, 22)), }, # Simple assignment parser case. { "node": cst.SimpleStatementLine( ( cst.AnnAssign( target=cst.Name("foo"), annotation=cst.Annotation( annotation=cst.Name("str"), whitespace_before_indicator=cst.SimpleWhitespace(""), ), equal=cst.AssignEqual(), value=cst.Integer("5"), ), ) ), "code": "foo: str = 5\n", "parser": parse_statement, "expected_position": None, }, # Annotation without assignment { "node": cst.SimpleStatementLine( ( cst.AnnAssign( target=cst.Name("foo"), annotation=cst.Annotation( annotation=cst.Name("str"), whitespace_before_indicator=cst.SimpleWhitespace(""), ), value=None, ), ) ), "code": "foo: str\n", "parser": parse_statement, "expected_position": None, }, # Complex annotation { "node": cst.SimpleStatementLine( ( cst.AnnAssign( target=cst.Name("foo"), annotation=cst.Annotation( annotation=cst.Subscript( cst.Name("Optional"), (cst.SubscriptElement(cst.Index(cst.Name("str"))),), ), whitespace_before_indicator=cst.SimpleWhitespace(""), ), equal=cst.AssignEqual(), value=cst.Integer("5"), ), ) ), "code": "foo: Optional[str] = 5\n", "parser": parse_statement, "expected_position": None, }, # Whitespace test { "node": cst.AnnAssign( target=cst.Name("foo"), annotation=cst.Annotation( annotation=cst.Subscript( cst.Name("Optional"), (cst.SubscriptElement(cst.Index(cst.Name("str"))),), ), whitespace_before_indicator=cst.SimpleWhitespace(" "), whitespace_after_indicator=cst.SimpleWhitespace(" "), ), equal=cst.AssignEqual( whitespace_before=cst.SimpleWhitespace(" "), whitespace_after=cst.SimpleWhitespace(" "), ), value=cst.Integer("5"), ), "code": "foo : Optional[str] = 5", "parser": None, "expected_position": CodeRange((1, 0), (1, 26)), }, { "node": cst.SimpleStatementLine( ( cst.AnnAssign( target=cst.Name("foo"), annotation=cst.Annotation( annotation=cst.Subscript( cst.Name("Optional"), (cst.SubscriptElement(cst.Index(cst.Name("str"))),), ), whitespace_before_indicator=cst.SimpleWhitespace(" "), whitespace_after_indicator=cst.SimpleWhitespace(" "), ), equal=cst.AssignEqual( whitespace_before=cst.SimpleWhitespace(" "), whitespace_after=cst.SimpleWhitespace(" "), ), value=cst.Integer("5"), ), ) ), "code": "foo : Optional[str] = 5\n", "parser": parse_statement, "expected_position": None, }, ) ) def test_valid(self, **kwargs: Any) -> None: self.validate_node(**kwargs) @data_provider( ( { "get_node": ( lambda: cst.AnnAssign( target=cst.Name("foo"), annotation=cst.Annotation(cst.Name("str")), equal=cst.AssignEqual(), value=None, ) ), "expected_re": "Must have a value when specifying an AssignEqual.", }, ) ) def test_invalid(self, **kwargs: Any) -> None: self.assert_invalid(**kwargs) @data_provider( ( { "get_node": ( lambda: cst.AnnAssign( # pyre-ignore: Incompatible parameter type [6] target=cst.BinaryOperation( left=cst.Name("x"), operator=cst.Add(), right=cst.Integer("1"), ), annotation=cst.Annotation(cst.Name("int")), equal=cst.AssignEqual(), value=cst.Name("y"), ) ), "expected_re": ( "Expected an instance of .*BaseAssignTargetExpression.*" ), }, ) ) def test_invalid_types(self, **kwargs: Any) -> None: self.assert_invalid_types(**kwargs) class AugAssignTest(CSTNodeTest): @data_provider( ( # Simple assignment constructor case. { "node": cst.AugAssign( cst.Name("foo"), cst.AddAssign(), cst.Integer("5") ), "code": "foo += 5", "parser": None, "expected_position": CodeRange((1, 0), (1, 8)), }, { "node": cst.AugAssign( cst.Name("bar"), cst.MultiplyAssign(), cst.Name("foo") ), "code": "bar *= foo", "parser": None, "expected_position": None, }, # Whitespace constructor test { "node": cst.AugAssign( target=cst.Name("foo"), operator=cst.LeftShiftAssign( whitespace_before=cst.SimpleWhitespace(" "), whitespace_after=cst.SimpleWhitespace(" "), ), value=cst.Integer("5"), ), "code": "foo <<= 5", "parser": None, "expected_position": CodeRange((1, 0), (1, 11)), }, # Simple assignment parser case. { "node": cst.SimpleStatementLine( (cst.AugAssign(cst.Name("foo"), cst.AddAssign(), cst.Integer("5")),) ), "code": "foo += 5\n", "parser": parse_statement, "expected_position": None, }, { "node": cst.SimpleStatementLine( ( cst.AugAssign( cst.Name("bar"), cst.MultiplyAssign(), cst.Name("foo") ), ) ), "code": "bar *= foo\n", "parser": parse_statement, "expected_position": None, }, # Whitespace parser test { "node": cst.SimpleStatementLine( ( cst.AugAssign( target=cst.Name("foo"), operator=cst.LeftShiftAssign( whitespace_before=cst.SimpleWhitespace(" "), whitespace_after=cst.SimpleWhitespace(" "), ), value=cst.Integer("5"), ), ) ), "code": "foo <<= 5\n", "parser": parse_statement, "expected_position": None, }, ) ) def test_valid(self, **kwargs: Any) -> None: self.validate_node(**kwargs) @data_provider( ( { "get_node": ( lambda: cst.AugAssign( # pyre-ignore: Incompatible parameter type [6] target=cst.BinaryOperation( left=cst.Name("x"), operator=cst.Add(), right=cst.Integer("1"), ), operator=cst.AddAssign(), value=cst.Name("y"), ) ), "expected_re": ( "Expected an instance of .*BaseAssignTargetExpression.*" ), }, ) ) def test_invalid_types(self, **kwargs: Any) -> None: self.assert_invalid_types(**kwargs) LibCST-1.2.0/libcst/_nodes/tests/test_atom.py000066400000000000000000001312341456464173300210550ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Any import libcst as cst from libcst import parse_expression from libcst._nodes.tests.base import CSTNodeTest, parse_expression_as from libcst._parser.entrypoints import is_native from libcst.metadata import CodeRange from libcst.testing.utils import data_provider def _parse_expression_force_38(code: str) -> cst.BaseExpression: return cst.parse_expression( code, config=cst.PartialParserConfig(python_version="3.8") ) class AtomTest(CSTNodeTest): @data_provider( ( # Simple identifier { "node": cst.Name("test"), "code": "test", "parser": parse_expression, "expected_position": None, }, # Parenthesized identifier { "node": cst.Name( "test", lpar=(cst.LeftParen(),), rpar=(cst.RightParen(),) ), "code": "(test)", "parser": parse_expression, "expected_position": CodeRange((1, 1), (1, 5)), }, # Decimal integers { "node": cst.Integer("12345"), "code": "12345", "parser": parse_expression, "expected_position": None, }, { "node": cst.Integer("0000"), "code": "0000", "parser": parse_expression, "expected_position": None, }, { "node": cst.Integer("1_234_567"), "code": "1_234_567", "parser": parse_expression, "expected_position": None, }, { "node": cst.Integer("0_000"), "code": "0_000", "parser": parse_expression, "expected_position": None, }, # Binary integers { "node": cst.Integer("0b0000"), "code": "0b0000", "parser": parse_expression, "expected_position": None, }, { "node": cst.Integer("0B1011_0100"), "code": "0B1011_0100", "parser": parse_expression, "expected_position": None, }, # Octal integers { "node": cst.Integer("0o12345"), "code": "0o12345", "parser": parse_expression, "expected_position": None, }, { "node": cst.Integer("0O12_345"), "code": "0O12_345", "parser": parse_expression, "expected_position": None, }, # Hex numbers { "node": cst.Integer("0x123abc"), "code": "0x123abc", "parser": parse_expression, "expected_position": None, }, { "node": cst.Integer("0X12_3ABC"), "code": "0X12_3ABC", "parser": parse_expression, "expected_position": None, }, # Parenthesized integers { "node": cst.Integer( "123", lpar=(cst.LeftParen(),), rpar=(cst.RightParen(),) ), "code": "(123)", "parser": parse_expression, "expected_position": CodeRange((1, 1), (1, 4)), }, # Non-exponent floats { "node": cst.Float("12345."), "code": "12345.", "parser": parse_expression, "expected_position": None, }, { "node": cst.Float("00.00"), "code": "00.00", "parser": parse_expression, "expected_position": None, }, { "node": cst.Float("12.21"), "code": "12.21", "parser": parse_expression, "expected_position": None, }, { "node": cst.Float(".321"), "code": ".321", "parser": parse_expression, "expected_position": None, }, { "node": cst.Float("1_234_567."), "code": "1_234_567.", "parser": parse_expression, "expected_position": None, }, { "node": cst.Float("0.000_000"), "code": "0.000_000", "parser": parse_expression, "expected_position": None, }, # Exponent floats { "node": cst.Float("12345.e10"), "code": "12345.e10", "parser": parse_expression, "expected_position": None, }, { "node": cst.Float("00.00e10"), "code": "00.00e10", "parser": parse_expression, "expected_position": None, }, { "node": cst.Float("12.21e10"), "code": "12.21e10", "parser": parse_expression, "expected_position": None, }, { "node": cst.Float(".321e10"), "code": ".321e10", "parser": parse_expression, "expected_position": None, }, { "node": cst.Float("1_234_567.e10"), "code": "1_234_567.e10", "parser": parse_expression, "expected_position": None, }, { "node": cst.Float("0.000_000e10"), "code": "0.000_000e10", "parser": parse_expression, "expected_position": None, }, { "node": cst.Float("1e+10"), "code": "1e+10", "parser": parse_expression, "expected_position": None, }, { "node": cst.Float("1e-10"), "code": "1e-10", "parser": parse_expression, "expected_position": None, }, # Parenthesized floats { "node": cst.Float( "123.4", lpar=(cst.LeftParen(),), rpar=(cst.RightParen(),) ), "code": "(123.4)", "parser": parse_expression, "expected_position": CodeRange((1, 1), (1, 6)), }, # Imaginary numbers { "node": cst.Imaginary("12345j"), "code": "12345j", "parser": parse_expression, "expected_position": None, }, { "node": cst.Imaginary("1_234_567J"), "code": "1_234_567J", "parser": parse_expression, "expected_position": None, }, { "node": cst.Imaginary("12345.e10j"), "code": "12345.e10j", "parser": parse_expression, "expected_position": None, }, { "node": cst.Imaginary(".321J"), "code": ".321J", "parser": parse_expression, "expected_position": None, }, # Parenthesized imaginary { "node": cst.Imaginary( "123.4j", lpar=(cst.LeftParen(),), rpar=(cst.RightParen(),) ), "code": "(123.4j)", "parser": parse_expression, "expected_position": CodeRange((1, 1), (1, 7)), }, # Simple elipses { "node": cst.Ellipsis(), "code": "...", "parser": parse_expression, "expected_position": None, }, # Parenthesized elipses { "node": cst.Ellipsis(lpar=(cst.LeftParen(),), rpar=(cst.RightParen(),)), "code": "(...)", "parser": parse_expression, "expected_position": CodeRange((1, 1), (1, 4)), }, # Simple strings { "node": cst.SimpleString('""'), "code": '""', "parser": parse_expression, "expected_position": None, }, { "node": cst.SimpleString("''"), "code": "''", "parser": parse_expression, "expected_position": None, }, { "node": cst.SimpleString('"test"'), "code": '"test"', "parser": parse_expression, "expected_position": None, }, { "node": cst.SimpleString('b"test"'), "code": 'b"test"', "parser": parse_expression, "expected_position": None, }, { "node": cst.SimpleString('r"test"'), "code": 'r"test"', "parser": parse_expression, "expected_position": None, }, { "node": cst.SimpleString('"""test"""'), "code": '"""test"""', "parser": parse_expression, "expected_position": None, }, # Validate parens { "node": cst.SimpleString( '"test"', lpar=(cst.LeftParen(),), rpar=(cst.RightParen(),) ), "code": '("test")', "parser": parse_expression, "expected_position": None, }, { "node": cst.SimpleString( 'rb"test"', lpar=(cst.LeftParen(),), rpar=(cst.RightParen(),) ), "code": '(rb"test")', "parser": parse_expression, "expected_position": CodeRange((1, 1), (1, 9)), }, # Test that _safe_to_use_with_word_operator allows no space around quotes { "node": cst.Comparison( cst.SimpleString('"a"'), [ cst.ComparisonTarget( cst.In( whitespace_before=cst.SimpleWhitespace(""), whitespace_after=cst.SimpleWhitespace(""), ), cst.SimpleString('"abc"'), ) ], ), "code": '"a"in"abc"', "parser": parse_expression, }, { "node": cst.Comparison( cst.SimpleString('"a"'), [ cst.ComparisonTarget( cst.In( whitespace_before=cst.SimpleWhitespace(""), whitespace_after=cst.SimpleWhitespace(""), ), cst.ConcatenatedString( cst.SimpleString('"a"'), cst.SimpleString('"bc"') ), ) ], ), "code": '"a"in"a""bc"', "parser": parse_expression, }, # Parenthesis make no spaces around a prefix okay { "node": cst.Comparison( cst.SimpleString('b"a"'), [ cst.ComparisonTarget( cst.In( whitespace_before=cst.SimpleWhitespace(""), whitespace_after=cst.SimpleWhitespace(""), ), cst.SimpleString( 'b"abc"', lpar=[cst.LeftParen()], rpar=[cst.RightParen()], ), ) ], ), "code": 'b"a"in(b"abc")', "parser": parse_expression, }, { "node": cst.Comparison( cst.SimpleString('b"a"'), [ cst.ComparisonTarget( cst.In( whitespace_before=cst.SimpleWhitespace(""), whitespace_after=cst.SimpleWhitespace(""), ), cst.ConcatenatedString( cst.SimpleString('b"a"'), cst.SimpleString('b"bc"'), lpar=[cst.LeftParen()], rpar=[cst.RightParen()], ), ) ], ), "code": 'b"a"in(b"a"b"bc")', "parser": parse_expression, }, # Empty formatted strings { "node": cst.FormattedString(start='f"', parts=(), end='"'), "code": 'f""', "parser": parse_expression, "expected_position": None, }, { "node": cst.FormattedString(start="f'", parts=(), end="'"), "code": "f''", "parser": parse_expression, "expected_position": None, }, { "node": cst.FormattedString(start='f"""', parts=(), end='"""'), "code": 'f""""""', "parser": parse_expression, "expected_position": None, }, { "node": cst.FormattedString(start="f'''", parts=(), end="'''"), "code": "f''''''", "parser": parse_expression, "expected_position": None, }, # Non-empty formatted strings { "node": cst.FormattedString(parts=(cst.FormattedStringText("foo"),)), "code": 'f"foo"', "parser": parse_expression, "expected_position": None, }, { "node": cst.FormattedString( parts=(cst.FormattedStringExpression(cst.Name("foo")),) ), "code": 'f"{foo}"', "parser": parse_expression, "expected_position": None, }, { "node": cst.FormattedString( parts=( cst.FormattedStringText("foo "), cst.FormattedStringExpression(cst.Name("bar")), cst.FormattedStringText(" baz"), ) ), "code": 'f"foo {bar} baz"', "parser": parse_expression, "expected_position": None, }, { "node": cst.FormattedString( parts=( cst.FormattedStringText("foo "), cst.FormattedStringExpression(cst.Call(cst.Name("bar"))), cst.FormattedStringText(" baz"), ) ), "code": 'f"foo {bar()} baz"', "parser": parse_expression, "expected_position": None, }, # Formatted strings with conversions and format specifiers { "node": cst.FormattedString( parts=( cst.FormattedStringExpression(cst.Name("foo"), conversion="s"), ) ), "code": 'f"{foo!s}"', "parser": parse_expression, "expected_position": None, }, { "node": cst.FormattedString( parts=( cst.FormattedStringExpression(cst.Name("foo"), format_spec=()), ) ), "code": 'f"{foo:}"', "parser": parse_expression, "expected_position": None, }, { "node": cst.FormattedString( parts=( cst.FormattedStringExpression( cst.Name("today"), format_spec=(cst.FormattedStringText("%B %d, %Y"),), ), ) ), "code": 'f"{today:%B %d, %Y}"', "parser": parse_expression, "expected_position": None, }, { "node": cst.FormattedString( parts=( cst.FormattedStringExpression( cst.Name("foo"), format_spec=( cst.FormattedStringExpression(cst.Name("bar")), ), ), ) ), "code": 'f"{foo:{bar}}"', "parser": parse_expression, "expected_position": None, }, { "node": cst.FormattedString( parts=( cst.FormattedStringExpression( cst.Name("foo"), format_spec=( cst.FormattedStringExpression(cst.Name("bar")), cst.FormattedStringText("."), cst.FormattedStringExpression(cst.Name("baz")), ), ), ) ), "code": 'f"{foo:{bar}.{baz}}"', "parser": parse_expression, "expected_position": None, }, { "node": cst.FormattedString( parts=( cst.FormattedStringExpression( cst.Name("foo"), conversion="s", format_spec=( cst.FormattedStringExpression(cst.Name("bar")), ), ), ) ), "code": 'f"{foo!s:{bar}}"', "parser": parse_expression, "expected_position": None, }, # Test equality expression added in 3.8. { "node": cst.FormattedString( parts=( cst.FormattedStringExpression( cst.Name("foo"), equal=cst.AssignEqual( whitespace_before=cst.SimpleWhitespace(""), whitespace_after=cst.SimpleWhitespace(""), ), ), ), ), "code": 'f"{foo=}"', "parser": _parse_expression_force_38, "expected_position": None, }, { "node": cst.FormattedString( parts=( cst.FormattedStringExpression( cst.Name("foo"), equal=cst.AssignEqual( whitespace_before=cst.SimpleWhitespace(""), whitespace_after=cst.SimpleWhitespace(""), ), conversion="s", ), ), ), "code": 'f"{foo=!s}"', "parser": _parse_expression_force_38, "expected_position": None, }, { "node": cst.FormattedString( parts=( cst.FormattedStringExpression( cst.Name("foo"), equal=cst.AssignEqual( whitespace_before=cst.SimpleWhitespace(""), whitespace_after=cst.SimpleWhitespace(""), ), conversion="s", format_spec=( cst.FormattedStringExpression(cst.Name("bar")), ), ), ), ), "code": 'f"{foo=!s:{bar}}"', "parser": _parse_expression_force_38, "expected_position": None, }, # Test that equality support doesn't break existing support { "node": cst.FormattedString( parts=( cst.FormattedStringExpression( cst.Comparison( left=cst.Name( value="a", ), comparisons=[ cst.ComparisonTarget( operator=cst.Equal(), comparator=cst.Name( value="b", ), ), ], ), ), ), ), "code": 'f"{a == b}"', "parser": _parse_expression_force_38, "expected_position": None, }, { "node": cst.FormattedString( parts=( cst.FormattedStringExpression( cst.Comparison( left=cst.Name( value="a", ), comparisons=[ cst.ComparisonTarget( operator=cst.NotEqual(), comparator=cst.Name( value="b", ), ), ], ), ), ), ), "code": 'f"{a != b}"', "parser": _parse_expression_force_38, "expected_position": None, }, { "node": cst.FormattedString( parts=( cst.FormattedStringExpression( cst.NamedExpr( target=cst.Name( value="a", ), value=cst.Integer( value="5", ), lpar=(cst.LeftParen(),), rpar=(cst.RightParen(),), ), ), ), ), "code": 'f"{(a := 5)}"', "parser": _parse_expression_force_38, "expected_position": None, }, { "node": cst.FormattedString( parts=( cst.FormattedStringExpression( cst.Yield( value=cst.Integer("1"), whitespace_after_yield=cst.SimpleWhitespace(" "), ), ), ), ), "code": 'f"{yield 1}"', "parser": _parse_expression_force_38, "expected_position": None, }, { "node": cst.FormattedString( parts=( cst.FormattedStringText("\\N{X Y}"), cst.FormattedStringExpression( cst.Name(value="Z"), ), ), ), "code": 'f"\\N{X Y}{Z}"', "parser": parse_expression, "expected_position": None, }, { "node": cst.FormattedString( parts=( cst.FormattedStringText("\\"), cst.FormattedStringExpression( cst.Name(value="a"), ), ), start='fr"', ), "code": 'fr"\\{a}"', "parser": parse_expression, "expected_position": None, }, # Validate parens { "node": cst.FormattedString( start='f"', parts=(), end='"', lpar=(cst.LeftParen(),), rpar=(cst.RightParen(),), ), "code": '(f"")', "parser": parse_expression, "expected_position": CodeRange((1, 1), (1, 4)), }, # Generator expression (doesn't make sense, but legal syntax) { "node": cst.FormattedString( start='f"', parts=[ cst.FormattedStringExpression( expression=cst.GeneratorExp( elt=cst.Name( value="x", ), for_in=cst.CompFor( target=cst.Name( value="x", ), iter=cst.Name( value="y", ), ), lpar=[], rpar=[], ), ), ], end='"', ), "code": 'f"{x for x in y}"', "parser": parse_expression, "expected_position": None, }, # Unpacked tuple { "node": cst.FormattedString( parts=[ cst.FormattedStringExpression( expression=cst.Tuple( elements=[ cst.Element( value=cst.Name( value="a", ), comma=cst.Comma( whitespace_before=cst.SimpleWhitespace( value="", ), whitespace_after=cst.SimpleWhitespace( value=" ", ), ), ), cst.Element( value=cst.Name( value="b", ), ), ], lpar=[], rpar=[], ), ), ], start="f'", end="'", ), "code": "f'{a, b}'", "parser": parse_expression, "expected_position": None, }, # Conditional expression { "node": cst.FormattedString( parts=[ cst.FormattedStringExpression( expression=cst.IfExp( test=cst.Name( value="b", ), body=cst.Name( value="a", ), orelse=cst.Name( value="c", ), ), ), ], start="f'", end="'", ), "code": "f'{a if b else c}'", "parser": parse_expression, "expected_position": None, }, # Concatenated strings { "node": cst.ConcatenatedString( cst.SimpleString('"ab"'), cst.SimpleString('"c"') ), "code": '"ab""c"', "parser": parse_expression, "expected_position": None, }, { "node": cst.ConcatenatedString( cst.SimpleString('"ab"'), cst.ConcatenatedString( cst.SimpleString('"c"'), cst.SimpleString('"d"') ), ), "code": '"ab""c""d"', "parser": parse_expression, "expected_position": None, }, # mixed SimpleString and FormattedString { "node": cst.ConcatenatedString( cst.FormattedString([cst.FormattedStringText("ab")]), cst.SimpleString('"c"'), ), "code": 'f"ab""c"', "parser": parse_expression, "expected_position": None, }, { "node": cst.ConcatenatedString( cst.SimpleString('"ab"'), cst.FormattedString([cst.FormattedStringText("c")]), ), "code": '"ab"f"c"', "parser": parse_expression, "expected_position": None, }, # Concatenated parenthesized strings { "node": cst.ConcatenatedString( lpar=(cst.LeftParen(),), left=cst.SimpleString('"ab"'), right=cst.SimpleString('"c"'), rpar=(cst.RightParen(),), ), "code": '("ab""c")', "parser": parse_expression, "expected_position": None, }, # Validate spacing { "node": cst.ConcatenatedString( lpar=(cst.LeftParen(whitespace_after=cst.SimpleWhitespace(" ")),), left=cst.SimpleString('"ab"'), whitespace_between=cst.SimpleWhitespace(" "), right=cst.SimpleString('"c"'), rpar=(cst.RightParen(whitespace_before=cst.SimpleWhitespace(" ")),), ), "code": '( "ab" "c" )', "parser": parse_expression, "expected_position": CodeRange((1, 2), (1, 10)), }, ) ) def test_valid(self, **kwargs: Any) -> None: # We don't have sentinel nodes for atoms, so we know that 100% of atoms # can be parsed identically to their creation. self.validate_node(**kwargs) @data_provider( ( { "node": cst.FormattedStringExpression( cst.Name("today"), format_spec=(cst.FormattedStringText("%B %d, %Y"),), ), "code": "{today:%B %d, %Y}", "parser": None, "expected_position": CodeRange((1, 0), (1, 17)), }, ) ) def test_valid_no_parse(self, **kwargs: Any) -> None: # Test some nodes that aren't valid source code by themselves self.validate_node(**kwargs) @data_provider( ( # Expression wrapping parenthesis rules { "get_node": (lambda: cst.Name("foo", lpar=(cst.LeftParen(),))), "expected_re": "left paren without right paren", }, { "get_node": lambda: cst.Name("foo", rpar=(cst.RightParen(),)), "expected_re": "right paren without left paren", }, { "get_node": lambda: cst.Ellipsis(lpar=(cst.LeftParen(),)), "expected_re": "left paren without right paren", }, { "get_node": lambda: cst.Ellipsis(rpar=(cst.RightParen(),)), "expected_re": "right paren without left paren", }, { "get_node": lambda: cst.Integer("5", lpar=(cst.LeftParen(),)), "expected_re": "left paren without right paren", }, { "get_node": lambda: cst.Integer("5", rpar=(cst.RightParen(),)), "expected_re": "right paren without left paren", }, { "get_node": lambda: cst.Float("5.5", lpar=(cst.LeftParen(),)), "expected_re": "left paren without right paren", }, { "get_node": lambda: cst.Float("5.5", rpar=(cst.RightParen(),)), "expected_re": "right paren without left paren", }, { "get_node": (lambda: cst.Imaginary("5j", lpar=(cst.LeftParen(),))), "expected_re": "left paren without right paren", }, { "get_node": (lambda: cst.Imaginary("5j", rpar=(cst.RightParen(),))), "expected_re": "right paren without left paren", }, { "get_node": (lambda: cst.Integer("5", lpar=(cst.LeftParen(),))), "expected_re": "left paren without right paren", }, { "get_node": (lambda: cst.Integer("5", rpar=(cst.RightParen(),))), "expected_re": "right paren without left paren", }, { "get_node": ( lambda: cst.SimpleString("'foo'", lpar=(cst.LeftParen(),)) ), "expected_re": "left paren without right paren", }, { "get_node": ( lambda: cst.SimpleString("'foo'", rpar=(cst.RightParen(),)) ), "expected_re": "right paren without left paren", }, { "get_node": ( lambda: cst.FormattedString(parts=(), lpar=(cst.LeftParen(),)) ), "expected_re": "left paren without right paren", }, { "get_node": ( lambda: cst.FormattedString(parts=(), rpar=(cst.RightParen(),)) ), "expected_re": "right paren without left paren", }, { "get_node": ( lambda: cst.ConcatenatedString( cst.SimpleString("'foo'"), cst.SimpleString("'foo'"), lpar=(cst.LeftParen(),), ) ), "expected_re": "left paren without right paren", }, { "get_node": ( lambda: cst.ConcatenatedString( cst.SimpleString("'foo'"), cst.SimpleString("'foo'"), rpar=(cst.RightParen(),), ) ), "expected_re": "right paren without left paren", }, # Node-specific rules { "get_node": (lambda: cst.Name("")), "expected_re": "empty name identifier", }, { "get_node": (lambda: cst.Name(r"\/")), "expected_re": "not a valid identifier", }, { "get_node": (lambda: cst.Integer("")), "expected_re": "not a valid integer", }, { "get_node": (lambda: cst.Integer("012345")), "expected_re": "not a valid integer", }, { "get_node": (lambda: cst.Integer("012345")), "expected_re": "not a valid integer", }, { "get_node": (lambda: cst.Integer("_12345")), "expected_re": "not a valid integer", }, { "get_node": (lambda: cst.Integer("0b2")), "expected_re": "not a valid integer", }, { "get_node": (lambda: cst.Integer("0o8")), "expected_re": "not a valid integer", }, { "get_node": (lambda: cst.Integer("0xg")), "expected_re": "not a valid integer", }, { "get_node": (lambda: cst.Integer("123.45")), "expected_re": "not a valid integer", }, { "get_node": (lambda: cst.Integer("12345j")), "expected_re": "not a valid integer", }, { "get_node": (lambda: cst.Float("12.3.45")), "expected_re": "not a valid float", }, {"get_node": (lambda: cst.Float("12")), "expected_re": "not a valid float"}, { "get_node": (lambda: cst.Float("12.3j")), "expected_re": "not a valid float", }, { "get_node": (lambda: cst.Imaginary("_12345j")), "expected_re": "not a valid imaginary", }, { "get_node": (lambda: cst.Imaginary("0b0j")), "expected_re": "not a valid imaginary", }, { "get_node": (lambda: cst.Imaginary("0o0j")), "expected_re": "not a valid imaginary", }, { "get_node": (lambda: cst.Imaginary("0x0j")), "expected_re": "not a valid imaginary", }, { "get_node": (lambda: cst.SimpleString('wee""')), "expected_re": "Invalid string prefix", }, { "get_node": (lambda: cst.SimpleString("'")), "expected_re": "must have enclosing quotes", }, { "get_node": (lambda: cst.SimpleString('"')), "expected_re": "must have enclosing quotes", }, { "get_node": (lambda: cst.SimpleString("\"'")), "expected_re": "must have matching enclosing quotes", }, { "get_node": (lambda: cst.SimpleString("")), "expected_re": "must have enclosing quotes", }, { "get_node": (lambda: cst.SimpleString("'bla")), "expected_re": "must have matching enclosing quotes", }, { "get_node": (lambda: cst.SimpleString("f''")), "expected_re": "Invalid string prefix", }, { "get_node": (lambda: cst.SimpleString("'''bla''")), "expected_re": "must have matching enclosing quotes", }, { "get_node": (lambda: cst.SimpleString("'''bla\"\"\"")), "expected_re": "must have matching enclosing quotes", }, { "get_node": (lambda: cst.FormattedString(start="'", parts=(), end="'")), "expected_re": "Invalid f-string prefix", }, { "get_node": ( lambda: cst.FormattedString(start="f'", parts=(), end='"') ), "expected_re": "must have matching enclosing quotes", }, { "get_node": ( lambda: cst.ConcatenatedString( cst.SimpleString( '"ab"', lpar=(cst.LeftParen(),), rpar=(cst.RightParen(),) ), cst.SimpleString('"c"'), ) ), "expected_re": "Cannot concatenate parenthesized", }, { "get_node": ( lambda: cst.ConcatenatedString( cst.SimpleString('"ab"'), cst.SimpleString( '"c"', lpar=(cst.LeftParen(),), rpar=(cst.RightParen(),) ), ) ), "expected_re": "Cannot concatenate parenthesized", }, { "get_node": ( lambda: cst.ConcatenatedString( cst.SimpleString('"ab"'), cst.SimpleString('b"c"') ) ), "expected_re": "Cannot concatenate string and bytes", }, # This isn't valid code: `"a" inb"abc"` { "get_node": ( lambda: cst.Comparison( cst.SimpleString('"a"'), [ cst.ComparisonTarget( cst.In(whitespace_after=cst.SimpleWhitespace("")), cst.SimpleString('b"abc"'), ) ], ) ), "expected_re": "Must have at least one space around comparison operator.", }, # Also not valid: `"a" in b"a"b"bc"` { "get_node": ( lambda: cst.Comparison( cst.SimpleString('"a"'), [ cst.ComparisonTarget( cst.In(whitespace_after=cst.SimpleWhitespace("")), cst.ConcatenatedString( cst.SimpleString('b"a"'), cst.SimpleString('b"bc"') ), ) ], ) ), "expected_re": "Must have at least one space around comparison operator.", }, ) ) def test_invalid(self, **kwargs: Any) -> None: self.assert_invalid(**kwargs) @data_provider( ( { "code": "u'x'", "parser": parse_expression_as(python_version="3.3"), "expect_success": True, }, { "code": "u'x'", "parser": parse_expression_as(python_version="3.1"), "expect_success": False, }, ) ) def test_versions(self, **kwargs: Any) -> None: if is_native() and not kwargs.get("expect_success", True): self.skipTest("parse errors are disabled for native parser") self.assert_parses(**kwargs) class StringHelperTest(CSTNodeTest): def test_string_prefix_and_quotes(self) -> None: """ Test our helpers out for various strings. """ emptybytestring = cst.ensure_type(parse_expression('b""'), cst.SimpleString) bytestring = cst.ensure_type(parse_expression('b"abc"'), cst.SimpleString) multilinestring = cst.ensure_type(parse_expression('""""""'), cst.SimpleString) formatstring = cst.ensure_type(parse_expression('f""""""'), cst.FormattedString) self.assertEqual(emptybytestring.prefix, "b") self.assertEqual(emptybytestring.quote, '"') self.assertEqual(emptybytestring.raw_value, "") self.assertEqual(bytestring.prefix, "b") self.assertEqual(bytestring.quote, '"') self.assertEqual(bytestring.raw_value, "abc") self.assertEqual(multilinestring.prefix, "") self.assertEqual(multilinestring.quote, '"""') self.assertEqual(multilinestring.raw_value, "") self.assertEqual(formatstring.prefix, "f") self.assertEqual(formatstring.quote, '"""') LibCST-1.2.0/libcst/_nodes/tests/test_attribute.py000066400000000000000000000052761456464173300221260ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Any import libcst as cst from libcst import parse_expression from libcst._nodes.tests.base import CSTNodeTest from libcst.metadata import CodeRange from libcst.testing.utils import data_provider class AttributeTest(CSTNodeTest): @data_provider( ( # Simple attribute access { "node": cst.Attribute(cst.Name("foo"), cst.Name("bar")), "code": "foo.bar", "parser": parse_expression, "expected_position": CodeRange((1, 0), (1, 7)), }, # Parenthesized attribute access { "node": cst.Attribute( lpar=(cst.LeftParen(),), value=cst.Name("foo"), attr=cst.Name("bar"), rpar=(cst.RightParen(),), ), "code": "(foo.bar)", "parser": parse_expression, "expected_position": CodeRange((1, 1), (1, 8)), }, # Make sure that spacing works { "node": cst.Attribute( lpar=(cst.LeftParen(whitespace_after=cst.SimpleWhitespace(" ")),), value=cst.Name("foo"), dot=cst.Dot( whitespace_before=cst.SimpleWhitespace(" "), whitespace_after=cst.SimpleWhitespace(" "), ), attr=cst.Name("bar"), rpar=(cst.RightParen(whitespace_before=cst.SimpleWhitespace(" ")),), ), "code": "( foo . bar )", "parser": parse_expression, "expected_position": CodeRange((1, 2), (1, 11)), }, ) ) def test_valid(self, **kwargs: Any) -> None: self.validate_node(**kwargs) @data_provider( ( { "get_node": ( lambda: cst.Attribute( cst.Name("foo"), cst.Name("bar"), lpar=(cst.LeftParen(),) ) ), "expected_re": "left paren without right paren", }, { "get_node": ( lambda: cst.Attribute( cst.Name("foo"), cst.Name("bar"), rpar=(cst.RightParen(),) ) ), "expected_re": "right paren without left paren", }, ) ) def test_invalid(self, **kwargs: Any) -> None: self.assert_invalid(**kwargs) LibCST-1.2.0/libcst/_nodes/tests/test_await.py000066400000000000000000000157421456464173300212270ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Any import libcst as cst from libcst import parse_expression, parse_statement, PartialParserConfig from libcst._nodes.tests.base import CSTNodeTest from libcst.metadata import CodeRange from libcst.testing.utils import data_provider class AwaitTest(CSTNodeTest): @data_provider( ( # Some simple calls { "node": cst.Await(cst.Name("test")), "code": "await test", "parser": lambda code: parse_expression( code, config=PartialParserConfig(python_version="3.7") ), "expected_position": None, }, { "node": cst.Await(cst.Call(cst.Name("test"))), "code": "await test()", "parser": lambda code: parse_expression( code, config=PartialParserConfig(python_version="3.7") ), "expected_position": None, }, # Whitespace { "node": cst.Await( cst.Name("test"), whitespace_after_await=cst.SimpleWhitespace(" "), lpar=(cst.LeftParen(whitespace_after=cst.SimpleWhitespace(" ")),), rpar=(cst.RightParen(whitespace_before=cst.SimpleWhitespace(" ")),), ), "code": "( await test )", "parser": lambda code: parse_expression( code, config=PartialParserConfig(python_version="3.7") ), "expected_position": CodeRange((1, 2), (1, 13)), }, # Whitespace after await { "node": cst.Await( cst.Name("foo", lpar=[cst.LeftParen()], rpar=[cst.RightParen()]), whitespace_after_await=cst.SimpleWhitespace(""), ), "code": "await(foo)", }, ) ) def test_valid_py37(self, **kwargs: Any) -> None: # We don't have sentinel nodes for atoms, so we know that 100% of atoms # can be parsed identically to their creation. self.validate_node(**kwargs) @data_provider( ( # Some simple calls { "node": cst.FunctionDef( cst.Name("foo"), cst.Parameters(), cst.IndentedBlock( ( cst.SimpleStatementLine( (cst.Expr(cst.Await(cst.Name("test"))),) ), ) ), asynchronous=cst.Asynchronous(), ), "code": "async def foo():\n await test\n", "parser": lambda code: parse_statement( code, config=PartialParserConfig(python_version="3.6") ), "expected_position": None, }, { "node": cst.FunctionDef( cst.Name("foo"), cst.Parameters(), cst.IndentedBlock( ( cst.SimpleStatementLine( (cst.Expr(cst.Await(cst.Call(cst.Name("test")))),) ), ) ), asynchronous=cst.Asynchronous(), ), "code": "async def foo():\n await test()\n", "parser": lambda code: parse_statement( code, config=PartialParserConfig(python_version="3.6") ), "expected_position": None, }, # Whitespace { "node": cst.FunctionDef( cst.Name("foo"), cst.Parameters(), cst.IndentedBlock( ( cst.SimpleStatementLine( ( cst.Expr( cst.Await( cst.Name("test"), whitespace_after_await=cst.SimpleWhitespace( " " ), lpar=( cst.LeftParen( whitespace_after=cst.SimpleWhitespace( " " ) ), ), rpar=( cst.RightParen( whitespace_before=cst.SimpleWhitespace( " " ) ), ), ) ), ) ), ) ), asynchronous=cst.Asynchronous(), ), "code": "async def foo():\n ( await test )\n", "parser": lambda code: parse_statement( code, config=PartialParserConfig(python_version="3.6") ), "expected_position": None, }, ) ) def test_valid_py36(self, **kwargs: Any) -> None: # We don't have sentinel nodes for atoms, so we know that 100% of atoms # can be parsed identically to their creation. self.validate_node(**kwargs) @data_provider( ( # Expression wrapping parenthesis rules { "get_node": ( lambda: cst.Await(cst.Name("foo"), lpar=(cst.LeftParen(),)) ), "expected_re": "left paren without right paren", }, { "get_node": ( lambda: cst.Await(cst.Name("foo"), rpar=(cst.RightParen(),)) ), "expected_re": "right paren without left paren", }, { "get_node": ( lambda: cst.Await( cst.Name("foo"), whitespace_after_await=cst.SimpleWhitespace("") ) ), "expected_re": "at least one space after await", }, ) ) def test_invalid(self, **kwargs: Any) -> None: self.assert_invalid(**kwargs) LibCST-1.2.0/libcst/_nodes/tests/test_binary_op.py000066400000000000000000000147641456464173300221070ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Any import libcst as cst from libcst import parse_expression from libcst._nodes.tests.base import CSTNodeTest from libcst._parser.entrypoints import is_native from libcst.metadata import CodeRange from libcst.testing.utils import data_provider class BinaryOperationTest(CSTNodeTest): @data_provider( ( # Simple binary operations { "node": cst.BinaryOperation( cst.Name("foo"), cst.Add(), cst.Float("5.5") ), "code": "foo + 5.5", "parser": parse_expression, "expected_position": None, }, { "node": cst.BinaryOperation( cst.Name("foo"), cst.Subtract(), cst.Float("5.5") ), "code": "foo - 5.5", "parser": parse_expression, "expected_position": None, }, { "node": cst.BinaryOperation( cst.Name("foo"), cst.LeftShift(), cst.Integer("5") ), "code": "foo << 5", "parser": parse_expression, "expected_position": None, }, { "node": cst.BinaryOperation( cst.Name("foo"), cst.RightShift(), cst.Integer("5") ), "code": "foo >> 5", "parser": parse_expression, "expected_position": None, }, { "node": cst.BinaryOperation( cst.Name("foo"), cst.BitAnd(), cst.Name("bar") ), "code": "foo & bar", "parser": parse_expression, "expected_position": None, }, { "node": cst.BinaryOperation( cst.Name("foo"), cst.BitXor(), cst.Name("bar") ), "code": "foo ^ bar", "parser": parse_expression, "expected_position": None, }, { "node": cst.BinaryOperation( cst.Name("foo"), cst.BitOr(), cst.Name("bar") ), "code": "foo | bar", "parser": parse_expression, "expected_position": None, }, { "node": cst.BinaryOperation( cst.Name("foo"), cst.Multiply(), cst.Float("5.5") ), "code": "foo * 5.5", "parser": parse_expression, "expected_position": None, }, { "node": cst.BinaryOperation( cst.Name("foo"), cst.MatrixMultiply(), cst.Float("5.5") ), "code": "foo @ 5.5", "parser": parse_expression, "expected_position": None, }, { "node": cst.BinaryOperation( cst.Name("foo"), cst.Divide(), cst.Float("5.5") ), "code": "foo / 5.5", "parser": parse_expression, "expected_position": None, }, { "node": cst.BinaryOperation( cst.Name("foo"), cst.Modulo(), cst.Float("5.5") ), "code": "foo % 5.5", "parser": parse_expression, "expected_position": None, }, { "node": cst.BinaryOperation( cst.Name("foo"), cst.FloorDivide(), cst.Float("5.5") ), "code": "foo // 5.5", "parser": parse_expression, "expected_position": None, }, # Parenthesized binary operation { "node": cst.BinaryOperation( lpar=(cst.LeftParen(),), left=cst.Name("foo"), operator=cst.LeftShift(), right=cst.Integer("5"), rpar=(cst.RightParen(),), ), "code": "(foo << 5)", "parser": parse_expression, "expected_position": None, }, # Make sure that spacing works { "node": cst.BinaryOperation( lpar=(cst.LeftParen(whitespace_after=cst.SimpleWhitespace(" ")),), left=cst.Name("foo"), operator=cst.Multiply( whitespace_before=cst.SimpleWhitespace(" "), whitespace_after=cst.SimpleWhitespace(" "), ), right=cst.Name("bar"), rpar=(cst.RightParen(whitespace_before=cst.SimpleWhitespace(" ")),), ), "code": "( foo * bar )", "parser": parse_expression, "expected_position": CodeRange((1, 2), (1, 13)), }, ) ) def test_valid(self, **kwargs: Any) -> None: self.validate_node(**kwargs) @data_provider( ( { "get_node": ( lambda: cst.BinaryOperation( cst.Name("foo"), cst.Add(), cst.Name("bar"), lpar=(cst.LeftParen(),), ) ), "expected_re": "left paren without right paren", }, { "get_node": ( lambda: cst.BinaryOperation( cst.Name("foo"), cst.Add(), cst.Name("bar"), rpar=(cst.RightParen(),), ) ), "expected_re": "right paren without left paren", }, ) ) def test_invalid(self, **kwargs: Any) -> None: self.assert_invalid(**kwargs) @data_provider( ( { "code": '"a"' * 6000, "parser": parse_expression, }, { "code": "[_" + " for _ in _" * 6000 + "]", "parser": parse_expression, }, ) ) def test_parse_error(self, **kwargs: Any) -> None: self.assert_parses(**kwargs, expect_success=not is_native()) LibCST-1.2.0/libcst/_nodes/tests/test_boolean_op.py000066400000000000000000000102761456464173300222340ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Any import libcst as cst from libcst import parse_expression from libcst._nodes.tests.base import CSTNodeTest from libcst.metadata import CodeRange from libcst.testing.utils import data_provider class BooleanOperationTest(CSTNodeTest): @data_provider( ( # Simple boolean operations { "node": cst.BooleanOperation( cst.Name("foo"), cst.And(), cst.Name("bar") ), "code": "foo and bar", "parser": parse_expression, "expected_position": None, }, { "node": cst.BooleanOperation( cst.Name("foo"), cst.Or(), cst.Name("bar") ), "code": "foo or bar", "parser": parse_expression, "expected_position": None, }, # Parenthesized boolean operation { "node": cst.BooleanOperation( lpar=(cst.LeftParen(),), left=cst.Name("foo"), operator=cst.Or(), right=cst.Name("bar"), rpar=(cst.RightParen(),), ), "code": "(foo or bar)", "parser": parse_expression, "expected_position": None, }, { "node": cst.BooleanOperation( left=cst.Name( "foo", lpar=(cst.LeftParen(),), rpar=(cst.RightParen(),) ), operator=cst.Or( whitespace_before=cst.SimpleWhitespace(""), whitespace_after=cst.SimpleWhitespace(""), ), right=cst.Name( "bar", lpar=(cst.LeftParen(),), rpar=(cst.RightParen(),) ), ), "code": "(foo)or(bar)", "parser": parse_expression, "expected_position": CodeRange((1, 0), (1, 12)), }, # Make sure that spacing works { "node": cst.BooleanOperation( lpar=(cst.LeftParen(whitespace_after=cst.SimpleWhitespace(" ")),), left=cst.Name("foo"), operator=cst.And( whitespace_before=cst.SimpleWhitespace(" "), whitespace_after=cst.SimpleWhitespace(" "), ), right=cst.Name("bar"), rpar=(cst.RightParen(whitespace_before=cst.SimpleWhitespace(" ")),), ), "code": "( foo and bar )", "parser": parse_expression, "expected_position": None, }, ) ) def test_valid(self, **kwargs: Any) -> None: self.validate_node(**kwargs) @data_provider( ( { "get_node": lambda: cst.BooleanOperation( cst.Name("foo"), cst.And(), cst.Name("bar"), lpar=(cst.LeftParen(),) ), "expected_re": "left paren without right paren", }, { "get_node": lambda: cst.BooleanOperation( cst.Name("foo"), cst.And(), cst.Name("bar"), rpar=(cst.RightParen(),), ), "expected_re": "right paren without left paren", }, { "get_node": lambda: cst.BooleanOperation( left=cst.Name("foo"), operator=cst.Or( whitespace_before=cst.SimpleWhitespace(""), whitespace_after=cst.SimpleWhitespace(""), ), right=cst.Name("bar"), ), "expected_re": "at least one space around boolean operator", }, ) ) def test_invalid(self, **kwargs: Any) -> None: self.assert_invalid(**kwargs) LibCST-1.2.0/libcst/_nodes/tests/test_call.py000066400000000000000000000542521456464173300210340ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Any import libcst as cst from libcst import parse_expression from libcst._nodes.tests.base import CSTNodeTest from libcst.metadata import CodeRange from libcst.testing.utils import data_provider class CallTest(CSTNodeTest): @data_provider( ( # Simple call { "node": cst.Call(cst.Name("foo")), "code": "foo()", "parser": parse_expression, "expected_position": None, }, { "node": cst.Call( cst.Name("foo"), whitespace_before_args=cst.SimpleWhitespace(" ") ), "code": "foo( )", "parser": parse_expression, "expected_position": None, }, # Call with attribute dereference { "node": cst.Call(cst.Attribute(cst.Name("foo"), cst.Name("bar"))), "code": "foo.bar()", "parser": parse_expression, "expected_position": None, }, # Positional arguments render test { "node": cst.Call(cst.Name("foo"), (cst.Arg(cst.Integer("1")),)), "code": "foo(1)", "parser": None, "expected_position": None, }, { "node": cst.Call( cst.Name("foo"), ( cst.Arg(cst.Integer("1")), cst.Arg(cst.Integer("2")), cst.Arg(cst.Integer("3")), ), ), "code": "foo(1, 2, 3)", "parser": None, "expected_position": None, }, # Positional arguments parse test { "node": cst.Call(cst.Name("foo"), (cst.Arg(value=cst.Integer("1")),)), "code": "foo(1)", "parser": parse_expression, "expected_position": None, }, { "node": cst.Call( cst.Name("foo"), ( cst.Arg( value=cst.Integer("1"), whitespace_after_arg=cst.SimpleWhitespace(" "), ), ), whitespace_after_func=cst.SimpleWhitespace(" "), whitespace_before_args=cst.SimpleWhitespace(" "), ), "code": "foo ( 1 )", "parser": parse_expression, "expected_position": None, }, { "node": cst.Call( cst.Name("foo"), ( cst.Arg( value=cst.Integer("1"), comma=cst.Comma(whitespace_after=cst.SimpleWhitespace(" ")), ), ), whitespace_after_func=cst.SimpleWhitespace(" "), whitespace_before_args=cst.SimpleWhitespace(" "), ), "code": "foo ( 1, )", "parser": parse_expression, "expected_position": None, }, { "node": cst.Call( cst.Name("foo"), ( cst.Arg( value=cst.Integer("1"), comma=cst.Comma(whitespace_after=cst.SimpleWhitespace(" ")), ), cst.Arg( value=cst.Integer("2"), comma=cst.Comma(whitespace_after=cst.SimpleWhitespace(" ")), ), cst.Arg(value=cst.Integer("3")), ), ), "code": "foo(1, 2, 3)", "parser": parse_expression, "expected_position": None, }, # Keyword arguments render test { "node": cst.Call( cst.Name("foo"), (cst.Arg(keyword=cst.Name("one"), value=cst.Integer("1")),), ), "code": "foo(one = 1)", "parser": None, "expected_position": None, }, { "node": cst.Call( cst.Name("foo"), ( cst.Arg(keyword=cst.Name("one"), value=cst.Integer("1")), cst.Arg(keyword=cst.Name("two"), value=cst.Integer("2")), cst.Arg(keyword=cst.Name("three"), value=cst.Integer("3")), ), ), "code": "foo(one = 1, two = 2, three = 3)", "parser": None, "expected_position": None, }, # Keyword arguments parser test { "node": cst.Call( cst.Name("foo"), ( cst.Arg( keyword=cst.Name("one"), equal=cst.AssignEqual(), value=cst.Integer("1"), ), ), ), "code": "foo(one = 1)", "parser": parse_expression, "expected_position": None, }, { "node": cst.Call( cst.Name("foo"), ( cst.Arg( keyword=cst.Name("one"), equal=cst.AssignEqual(), value=cst.Integer("1"), comma=cst.Comma(whitespace_after=cst.SimpleWhitespace(" ")), ), cst.Arg( keyword=cst.Name("two"), equal=cst.AssignEqual(), value=cst.Integer("2"), comma=cst.Comma(whitespace_after=cst.SimpleWhitespace(" ")), ), cst.Arg( keyword=cst.Name("three"), equal=cst.AssignEqual(), value=cst.Integer("3"), ), ), ), "code": "foo(one = 1, two = 2, three = 3)", "parser": parse_expression, "expected_position": None, }, # Iterator expansion render test { "node": cst.Call( cst.Name("foo"), (cst.Arg(star="*", value=cst.Name("one")),) ), "code": "foo(*one)", "parser": None, "expected_position": None, }, { "node": cst.Call( cst.Name("foo"), ( cst.Arg(star="*", value=cst.Name("one")), cst.Arg(star="*", value=cst.Name("two")), cst.Arg(star="*", value=cst.Name("three")), ), ), "code": "foo(*one, *two, *three)", "parser": None, "expected_position": None, }, # Iterator expansion parser test { "node": cst.Call( cst.Name("foo"), (cst.Arg(star="*", value=cst.Name("one")),) ), "code": "foo(*one)", "parser": parse_expression, "expected_position": None, }, { "node": cst.Call( cst.Name("foo"), ( cst.Arg( star="*", value=cst.Name("one"), comma=cst.Comma(whitespace_after=cst.SimpleWhitespace(" ")), ), cst.Arg( star="*", value=cst.Name("two"), comma=cst.Comma(whitespace_after=cst.SimpleWhitespace(" ")), ), cst.Arg(star="*", value=cst.Name("three")), ), ), "code": "foo(*one, *two, *three)", "parser": parse_expression, "expected_position": None, }, # Dictionary expansion render test { "node": cst.Call( cst.Name("foo"), (cst.Arg(star="**", value=cst.Name("one")),) ), "code": "foo(**one)", "parser": None, "expected_position": None, }, { "node": cst.Call( cst.Name("foo"), ( cst.Arg(star="**", value=cst.Name("one")), cst.Arg(star="**", value=cst.Name("two")), cst.Arg(star="**", value=cst.Name("three")), ), ), "code": "foo(**one, **two, **three)", "parser": None, "expected_position": None, }, # Dictionary expansion parser test { "node": cst.Call( cst.Name("foo"), (cst.Arg(star="**", value=cst.Name("one")),) ), "code": "foo(**one)", "parser": parse_expression, "expected_position": None, }, { "node": cst.Call( cst.Name("foo"), ( cst.Arg( star="**", value=cst.Name("one"), comma=cst.Comma(whitespace_after=cst.SimpleWhitespace(" ")), ), cst.Arg( star="**", value=cst.Name("two"), comma=cst.Comma(whitespace_after=cst.SimpleWhitespace(" ")), ), cst.Arg(star="**", value=cst.Name("three")), ), ), "code": "foo(**one, **two, **three)", "parser": parse_expression, "expected_position": None, }, # Complicated mingling rules render test { "node": cst.Call( cst.Name("foo"), ( cst.Arg(value=cst.Name("pos1")), cst.Arg(star="*", value=cst.Name("list1")), cst.Arg(value=cst.Name("pos2")), cst.Arg(value=cst.Name("pos3")), cst.Arg(star="*", value=cst.Name("list2")), cst.Arg(value=cst.Name("pos4")), cst.Arg(star="*", value=cst.Name("list3")), cst.Arg(keyword=cst.Name("kw1"), value=cst.Integer("1")), cst.Arg(star="*", value=cst.Name("list4")), cst.Arg(keyword=cst.Name("kw2"), value=cst.Integer("2")), cst.Arg(star="*", value=cst.Name("list5")), cst.Arg(keyword=cst.Name("kw3"), value=cst.Integer("3")), cst.Arg(star="**", value=cst.Name("dict1")), cst.Arg(keyword=cst.Name("kw4"), value=cst.Integer("4")), cst.Arg(star="**", value=cst.Name("dict2")), ), ), "code": "foo(pos1, *list1, pos2, pos3, *list2, pos4, *list3, kw1 = 1, *list4, kw2 = 2, *list5, kw3 = 3, **dict1, kw4 = 4, **dict2)", "parser": None, "expected_position": None, }, # Complicated mingling rules parser test { "node": cst.Call( cst.Name("foo"), ( cst.Arg( value=cst.Name("pos1"), comma=cst.Comma(whitespace_after=cst.SimpleWhitespace(" ")), ), cst.Arg( star="*", value=cst.Name("list1"), comma=cst.Comma(whitespace_after=cst.SimpleWhitespace(" ")), ), cst.Arg( value=cst.Name("pos2"), comma=cst.Comma(whitespace_after=cst.SimpleWhitespace(" ")), ), cst.Arg( value=cst.Name("pos3"), comma=cst.Comma(whitespace_after=cst.SimpleWhitespace(" ")), ), cst.Arg( star="*", value=cst.Name("list2"), comma=cst.Comma(whitespace_after=cst.SimpleWhitespace(" ")), ), cst.Arg( value=cst.Name("pos4"), comma=cst.Comma(whitespace_after=cst.SimpleWhitespace(" ")), ), cst.Arg( star="*", value=cst.Name("list3"), comma=cst.Comma(whitespace_after=cst.SimpleWhitespace(" ")), ), cst.Arg( keyword=cst.Name("kw1"), equal=cst.AssignEqual(), value=cst.Integer("1"), comma=cst.Comma(whitespace_after=cst.SimpleWhitespace(" ")), ), cst.Arg( star="*", value=cst.Name("list4"), comma=cst.Comma(whitespace_after=cst.SimpleWhitespace(" ")), ), cst.Arg( keyword=cst.Name("kw2"), equal=cst.AssignEqual(), value=cst.Integer("2"), comma=cst.Comma(whitespace_after=cst.SimpleWhitespace(" ")), ), cst.Arg( star="*", value=cst.Name("list5"), comma=cst.Comma(whitespace_after=cst.SimpleWhitespace(" ")), ), cst.Arg( keyword=cst.Name("kw3"), equal=cst.AssignEqual(), value=cst.Integer("3"), comma=cst.Comma(whitespace_after=cst.SimpleWhitespace(" ")), ), cst.Arg( star="**", value=cst.Name("dict1"), comma=cst.Comma(whitespace_after=cst.SimpleWhitespace(" ")), ), cst.Arg( keyword=cst.Name("kw4"), equal=cst.AssignEqual(), value=cst.Integer("4"), comma=cst.Comma(whitespace_after=cst.SimpleWhitespace(" ")), ), cst.Arg(star="**", value=cst.Name("dict2")), ), ), "code": "foo(pos1, *list1, pos2, pos3, *list2, pos4, *list3, kw1 = 1, *list4, kw2 = 2, *list5, kw3 = 3, **dict1, kw4 = 4, **dict2)", "parser": parse_expression, "expected_position": None, }, # Test whitespace { "node": cst.Call( lpar=(cst.LeftParen(whitespace_after=cst.SimpleWhitespace(" ")),), func=cst.Name("foo"), whitespace_after_func=cst.SimpleWhitespace(" "), whitespace_before_args=cst.SimpleWhitespace(" "), args=( cst.Arg( keyword=None, value=cst.Name("pos1"), comma=cst.Comma( whitespace_before=cst.SimpleWhitespace(" "), whitespace_after=cst.SimpleWhitespace(" "), ), ), cst.Arg( star="*", whitespace_after_star=cst.SimpleWhitespace(" "), keyword=None, value=cst.Name("list1"), comma=cst.Comma(whitespace_after=cst.SimpleWhitespace(" ")), ), cst.Arg( keyword=cst.Name("kw1"), equal=cst.AssignEqual( whitespace_before=cst.SimpleWhitespace(""), whitespace_after=cst.SimpleWhitespace(""), ), value=cst.Integer("1"), comma=cst.Comma(whitespace_after=cst.SimpleWhitespace(" ")), ), cst.Arg( star="**", keyword=None, whitespace_after_star=cst.SimpleWhitespace(" "), value=cst.Name("dict1"), whitespace_after_arg=cst.SimpleWhitespace(" "), ), ), rpar=(cst.RightParen(whitespace_before=cst.SimpleWhitespace(" ")),), ), "code": "( foo ( pos1 , * list1, kw1=1, ** dict1 ) )", "parser": parse_expression, "expected_position": CodeRange((1, 2), (1, 43)), }, # Test args { "node": cst.Arg( star="*", whitespace_after_star=cst.SimpleWhitespace(" "), keyword=None, value=cst.Name("list1"), comma=cst.Comma(whitespace_after=cst.SimpleWhitespace(" ")), ), "code": "* list1, ", "parser": None, "expected_position": CodeRange((1, 0), (1, 8)), }, ) ) def test_valid(self, **kwargs: Any) -> None: self.validate_node(**kwargs) @data_provider( ( # Basic expression parenthesizing tests. { "get_node": lambda: cst.Call( func=cst.Name("foo"), lpar=(cst.LeftParen(),) ), "expected_re": "left paren without right paren", }, { "get_node": lambda: cst.Call( func=cst.Name("foo"), rpar=(cst.RightParen(),) ), "expected_re": "right paren without left paren", }, # Test that we handle keyword stuff correctly. { "get_node": lambda: cst.Call( func=cst.Name("foo"), args=( cst.Arg( equal=cst.AssignEqual(), value=cst.SimpleString("'baz'") ), ), ), "expected_re": "Must have a keyword when specifying an AssignEqual", }, # Test that we separate *, ** and keyword args correctly { "get_node": lambda: cst.Call( func=cst.Name("foo"), args=( cst.Arg( star="*", keyword=cst.Name("bar"), value=cst.SimpleString("'baz'"), ), ), ), "expected_re": "Cannot specify a star and a keyword together", }, # Test for expected star inputs only { "get_node": lambda: cst.Call( func=cst.Name("foo"), # pyre-ignore: Ignore type on 'star' since we're testing behavior # when somebody isn't using a type checker. args=(cst.Arg(star="***", value=cst.SimpleString("'baz'")),), ), "expected_re": r"Must specify either '', '\*' or '\*\*' for star", }, # Test ordering exceptions { "get_node": lambda: cst.Call( func=cst.Name("foo"), args=( cst.Arg(star="**", value=cst.Name("bar")), cst.Arg(star="*", value=cst.Name("baz")), ), ), "expected_re": "Cannot have iterable argument unpacking after keyword argument unpacking", }, { "get_node": lambda: cst.Call( func=cst.Name("foo"), args=( cst.Arg(star="**", value=cst.Name("bar")), cst.Arg(value=cst.Name("baz")), ), ), "expected_re": "Cannot have positional argument after keyword argument unpacking", }, { "get_node": lambda: cst.Call( func=cst.Name("foo"), args=( cst.Arg( keyword=cst.Name("arg"), value=cst.SimpleString("'baz'") ), cst.Arg(value=cst.SimpleString("'bar'")), ), ), "expected_re": "Cannot have positional argument after keyword argument", }, ) ) def test_invalid(self, **kwargs: Any) -> None: self.assert_invalid(**kwargs) LibCST-1.2.0/libcst/_nodes/tests/test_classdef.py000066400000000000000000000423761456464173300217110ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Any, Callable import libcst as cst from libcst import parse_statement from libcst._nodes.tests.base import CSTNodeTest from libcst._parser.entrypoints import is_native from libcst.metadata import CodeRange from libcst.testing.utils import data_provider class ClassDefCreationTest(CSTNodeTest): @data_provider( ( # Simple classdef { "node": cst.ClassDef( cst.Name("Foo"), cst.SimpleStatementSuite((cst.Pass(),)) ), "code": "class Foo: pass\n", "expected_position": CodeRange((1, 0), (1, 15)), }, { "node": cst.ClassDef( cst.Name("Foo"), cst.SimpleStatementSuite((cst.Pass(),)), lpar=cst.LeftParen(), rpar=cst.RightParen(), ), "code": "class Foo(): pass\n", }, # Positional arguments render test { "node": cst.ClassDef( cst.Name("Foo"), cst.SimpleStatementSuite((cst.Pass(),)), bases=(cst.Arg(cst.Name("obj")),), ), "code": "class Foo(obj): pass\n", }, { "node": cst.ClassDef( cst.Name("Foo"), cst.SimpleStatementSuite((cst.Pass(),)), bases=( cst.Arg(cst.Name("Bar")), cst.Arg(cst.Name("Baz")), cst.Arg(cst.Name("object")), ), ), "code": "class Foo(Bar, Baz, object): pass\n", }, # Keyword arguments render test { "node": cst.ClassDef( cst.Name("Foo"), cst.SimpleStatementSuite((cst.Pass(),)), keywords=( cst.Arg(keyword=cst.Name("metaclass"), value=cst.Name("Bar")), ), ), "code": "class Foo(metaclass = Bar): pass\n", "expected_position": CodeRange((1, 0), (1, 32)), }, # Iterator expansion render test { "node": cst.ClassDef( cst.Name("Foo"), cst.SimpleStatementSuite((cst.Pass(),)), bases=(cst.Arg(star="*", value=cst.Name("one")),), ), "code": "class Foo(*one): pass\n", }, { "node": cst.ClassDef( cst.Name("Foo"), cst.SimpleStatementSuite((cst.Pass(),)), bases=( cst.Arg(star="*", value=cst.Name("one")), cst.Arg(star="*", value=cst.Name("two")), cst.Arg(star="*", value=cst.Name("three")), ), ), "code": "class Foo(*one, *two, *three): pass\n", }, # Dictionary expansion render test { "node": cst.ClassDef( cst.Name("Foo"), cst.SimpleStatementSuite((cst.Pass(),)), keywords=(cst.Arg(star="**", value=cst.Name("one")),), ), "code": "class Foo(**one): pass\n", }, { "node": cst.ClassDef( cst.Name("Foo"), cst.SimpleStatementSuite((cst.Pass(),)), keywords=( cst.Arg(star="**", value=cst.Name("one")), cst.Arg(star="**", value=cst.Name("two")), cst.Arg(star="**", value=cst.Name("three")), ), ), "code": "class Foo(**one, **two, **three): pass\n", }, ) ) def test_valid(self, **kwargs: Any) -> None: self.validate_node(**kwargs) @data_provider( ( { "node": cst.ClassDef( cst.Name("Foo"), cst.SimpleStatementSuite((cst.Pass(),)), type_parameters=cst.TypeParameters( ( cst.TypeParam( cst.TypeVar( cst.Name("T"), bound=cst.Name("int"), colon=cst.Colon( whitespace_after=cst.SimpleWhitespace(" ") ), ), cst.Comma(whitespace_after=cst.SimpleWhitespace(" ")), ), cst.TypeParam( cst.TypeVarTuple(cst.Name("Ts")), cst.Comma(whitespace_after=cst.SimpleWhitespace(" ")), ), cst.TypeParam(cst.ParamSpec(cst.Name("KW"))), ) ), ), "code": "class Foo[T: int, *Ts, **KW]: pass\n", }, { "node": cst.ClassDef( cst.Name("Foo"), cst.SimpleStatementSuite((cst.Pass(),)), type_parameters=cst.TypeParameters( params=( cst.TypeParam( param=cst.TypeVar( cst.Name("T"), bound=cst.Name("str"), colon=cst.Colon( whitespace_before=cst.SimpleWhitespace(" "), whitespace_after=cst.ParenthesizedWhitespace( empty_lines=(cst.EmptyLine(),), indent=True, ), ), ), comma=cst.Comma(cst.SimpleWhitespace(" ")), ), cst.TypeParam( cst.ParamSpec( cst.Name("PS"), cst.SimpleWhitespace(" ") ), cst.Comma(cst.SimpleWhitespace(" ")), ), ) ), whitespace_after_type_parameters=cst.SimpleWhitespace(" "), ), "code": "class Foo[T :\n\nstr ,** PS ,] : pass\n", }, { "node": cst.ClassDef( cst.Name("Foo"), cst.SimpleStatementSuite((cst.Pass(),)), type_parameters=cst.TypeParameters( params=( cst.TypeParam( param=cst.TypeVar( cst.Name("T"), bound=cst.Name("str"), colon=cst.Colon( whitespace_before=cst.SimpleWhitespace(" "), whitespace_after=cst.ParenthesizedWhitespace( empty_lines=(cst.EmptyLine(),), indent=True, ), ), ), comma=cst.Comma(cst.SimpleWhitespace(" ")), ), cst.TypeParam( cst.ParamSpec( cst.Name("PS"), cst.SimpleWhitespace(" ") ), cst.Comma(cst.SimpleWhitespace(" ")), ), ) ), lpar=cst.LeftParen(), rpar=cst.RightParen(), whitespace_after_type_parameters=cst.SimpleWhitespace(" "), ), "code": "class Foo[T :\n\nstr ,** PS ,] (): pass\n", }, ) ) def test_valid_native(self, **kwargs: Any) -> None: if not is_native(): self.skipTest("Disabled for pure python parser") self.validate_node(**kwargs) @data_provider( ( # Basic parenthesis tests. ( lambda: cst.ClassDef( name=cst.Name("Foo"), body=cst.SimpleStatementSuite((cst.Pass(),)), lpar=cst.LeftParen(), ), "Do not mix concrete LeftParen/RightParen with MaybeSentinel", ), ( lambda: cst.ClassDef( name=cst.Name("Foo"), body=cst.SimpleStatementSuite((cst.Pass(),)), rpar=cst.RightParen(), ), "Do not mix concrete LeftParen/RightParen with MaybeSentinel", ), # Whitespace validation ( lambda: cst.ClassDef( name=cst.Name("Foo"), body=cst.SimpleStatementSuite((cst.Pass(),)), whitespace_after_class=cst.SimpleWhitespace(""), ), "at least one space between 'class' and name", ), ) ) def test_invalid( self, get_node: Callable[[], cst.CSTNode], expected_re: str ) -> None: self.assert_invalid(get_node, expected_re) class ClassDefParserTest(CSTNodeTest): @data_provider( ( # Simple classdef { "node": cst.ClassDef( cst.Name("Foo"), cst.SimpleStatementSuite((cst.Pass(),)) ), "code": "class Foo: pass\n", }, { "node": cst.ClassDef( cst.Name("Foo"), cst.SimpleStatementSuite((cst.Pass(),)), lpar=cst.LeftParen(), rpar=cst.RightParen(), ), "code": "class Foo(): pass\n", }, # Positional arguments render test { "node": cst.ClassDef( cst.Name("Foo"), cst.SimpleStatementSuite((cst.Pass(),)), lpar=cst.LeftParen(), bases=(cst.Arg(cst.Name("obj")),), rpar=cst.RightParen(), ), "code": "class Foo(obj): pass\n", }, { "node": cst.ClassDef( cst.Name("Foo"), cst.SimpleStatementSuite((cst.Pass(),)), lpar=cst.LeftParen(), bases=( cst.Arg( cst.Name("Bar"), comma=cst.Comma(whitespace_after=cst.SimpleWhitespace(" ")), ), cst.Arg( cst.Name("Baz"), comma=cst.Comma(whitespace_after=cst.SimpleWhitespace(" ")), ), cst.Arg(cst.Name("object")), ), rpar=cst.RightParen(), ), "code": "class Foo(Bar, Baz, object): pass\n", }, # Keyword arguments render test { "node": cst.ClassDef( cst.Name("Foo"), cst.SimpleStatementSuite((cst.Pass(),)), lpar=cst.LeftParen(), keywords=( cst.Arg( keyword=cst.Name("metaclass"), equal=cst.AssignEqual(), value=cst.Name("Bar"), ), ), rpar=cst.RightParen(), ), "code": "class Foo(metaclass = Bar): pass\n", }, # Iterator expansion render test { "node": cst.ClassDef( cst.Name("Foo"), cst.SimpleStatementSuite((cst.Pass(),)), lpar=cst.LeftParen(), bases=(cst.Arg(star="*", value=cst.Name("one")),), rpar=cst.RightParen(), ), "code": "class Foo(*one): pass\n", }, { "node": cst.ClassDef( cst.Name("Foo"), cst.SimpleStatementSuite((cst.Pass(),)), lpar=cst.LeftParen(), bases=( cst.Arg( star="*", value=cst.Name("one"), comma=cst.Comma(whitespace_after=cst.SimpleWhitespace(" ")), ), cst.Arg( star="*", value=cst.Name("two"), comma=cst.Comma(whitespace_after=cst.SimpleWhitespace(" ")), ), cst.Arg(star="*", value=cst.Name("three")), ), rpar=cst.RightParen(), ), "code": "class Foo(*one, *two, *three): pass\n", }, # Dictionary expansion render test { "node": cst.ClassDef( cst.Name("Foo"), cst.SimpleStatementSuite((cst.Pass(),)), lpar=cst.LeftParen(), keywords=(cst.Arg(star="**", value=cst.Name("one")),), rpar=cst.RightParen(), ), "code": "class Foo(**one): pass\n", }, { "node": cst.ClassDef( cst.Name("Foo"), cst.SimpleStatementSuite((cst.Pass(),)), lpar=cst.LeftParen(), keywords=( cst.Arg( star="**", value=cst.Name("one"), comma=cst.Comma(whitespace_after=cst.SimpleWhitespace(" ")), ), cst.Arg( star="**", value=cst.Name("two"), comma=cst.Comma(whitespace_after=cst.SimpleWhitespace(" ")), ), cst.Arg(star="**", value=cst.Name("three")), ), rpar=cst.RightParen(), ), "code": "class Foo(**one, **two, **three): pass\n", }, # Decorator render tests { "node": cst.ClassDef( cst.Name("Foo"), cst.SimpleStatementSuite((cst.Pass(),)), decorators=(cst.Decorator(cst.Name("foo")),), lpar=cst.LeftParen(), rpar=cst.RightParen(), ), "code": "@foo\nclass Foo(): pass\n", "expected_position": CodeRange((2, 0), (2, 17)), }, { "node": cst.ClassDef( leading_lines=( cst.EmptyLine(), cst.EmptyLine(comment=cst.Comment("# leading comment 1")), ), decorators=( cst.Decorator(cst.Name("foo"), leading_lines=()), cst.Decorator( cst.Name("bar"), leading_lines=( cst.EmptyLine( comment=cst.Comment("# leading comment 2") ), ), ), cst.Decorator( cst.Name("baz"), leading_lines=( cst.EmptyLine( comment=cst.Comment("# leading comment 3") ), ), ), ), lines_after_decorators=( cst.EmptyLine(comment=cst.Comment("# class comment")), ), name=cst.Name("Foo"), body=cst.SimpleStatementSuite((cst.Pass(),)), lpar=cst.LeftParen(), rpar=cst.RightParen(), ), "code": "\n# leading comment 1\n@foo\n# leading comment 2\n@bar\n# leading comment 3\n@baz\n# class comment\nclass Foo(): pass\n", "expected_position": CodeRange((9, 0), (9, 17)), }, ) ) def test_valid(self, **kwargs: Any) -> None: self.validate_node(**kwargs, parser=parse_statement) LibCST-1.2.0/libcst/_nodes/tests/test_comment.py000066400000000000000000000021131456464173300215500ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Callable import libcst as cst from libcst._nodes.tests.base import CSTNodeTest from libcst.testing.utils import data_provider class CommentTest(CSTNodeTest): @data_provider( ( (cst.Comment("#"), "#"), (cst.Comment("#comment text"), "#comment text"), (cst.Comment("# comment text"), "# comment text"), ) ) def test_valid(self, node: cst.CSTNode, code: str) -> None: self.validate_node(node, code) @data_provider( ( (lambda: cst.Comment(" bad input"), "non-comment"), (lambda: cst.Comment("# newline shouldn't be here\n"), "non-comment"), (lambda: cst.Comment(" # Leading space is wrong"), "non-comment"), ) ) def test_invalid( self, get_node: Callable[[], cst.CSTNode], expected_re: str ) -> None: self.assert_invalid(get_node, expected_re) LibCST-1.2.0/libcst/_nodes/tests/test_comparison.py000066400000000000000000000325101456464173300222640ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Callable, Optional import libcst as cst from libcst import parse_expression from libcst._nodes.tests.base import CSTNodeTest from libcst.metadata import CodeRange from libcst.testing.utils import data_provider class ComparisonTest(CSTNodeTest): @data_provider( ( # Simple comparison statements ( cst.Comparison( cst.Name("foo"), (cst.ComparisonTarget(cst.LessThan(), cst.Integer("5")),), ), "foo < 5", ), ( cst.Comparison( cst.Name("foo"), (cst.ComparisonTarget(cst.NotEqual(), cst.Integer("5")),), ), "foo != 5", ), ( cst.Comparison( cst.Name("foo"), (cst.ComparisonTarget(cst.Is(), cst.Name("True")),) ), "foo is True", ), ( cst.Comparison( cst.Name("foo"), (cst.ComparisonTarget(cst.IsNot(), cst.Name("False")),), ), "foo is not False", ), ( cst.Comparison( cst.Name("foo"), (cst.ComparisonTarget(cst.In(), cst.Name("bar")),) ), "foo in bar", ), ( cst.Comparison( cst.Name("foo"), (cst.ComparisonTarget(cst.NotIn(), cst.Name("bar")),), ), "foo not in bar", ), # Comparison with parens ( cst.Comparison( lpar=(cst.LeftParen(),), left=cst.Name("foo"), comparisons=( cst.ComparisonTarget( operator=cst.NotIn(), comparator=cst.Name("bar") ), ), rpar=(cst.RightParen(),), ), "(foo not in bar)", ), ( cst.Comparison( left=cst.Name( "a", lpar=(cst.LeftParen(),), rpar=(cst.RightParen(),) ), comparisons=( cst.ComparisonTarget( operator=cst.Is( whitespace_before=cst.SimpleWhitespace(""), whitespace_after=cst.SimpleWhitespace(""), ), comparator=cst.Name( "b", lpar=(cst.LeftParen(),), rpar=(cst.RightParen(),) ), ), cst.ComparisonTarget( operator=cst.Is( whitespace_before=cst.SimpleWhitespace(""), whitespace_after=cst.SimpleWhitespace(""), ), comparator=cst.Name( "c", lpar=(cst.LeftParen(),), rpar=(cst.RightParen(),) ), ), ), ), "(a)is(b)is(c)", ), # Valid expressions that look like they shouldn't parse ( cst.Comparison( left=cst.Integer("5"), comparisons=( cst.ComparisonTarget( operator=cst.NotIn( whitespace_before=cst.SimpleWhitespace("") ), comparator=cst.Name("bar"), ), ), ), "5not in bar", ), # Validate that spacing works properly ( cst.Comparison( lpar=(cst.LeftParen(whitespace_after=cst.SimpleWhitespace(" ")),), left=cst.Name("foo"), comparisons=( cst.ComparisonTarget( operator=cst.NotIn( whitespace_before=cst.SimpleWhitespace(" "), whitespace_between=cst.SimpleWhitespace(" "), whitespace_after=cst.SimpleWhitespace(" "), ), comparator=cst.Name("bar"), ), ), rpar=(cst.RightParen(whitespace_before=cst.SimpleWhitespace(" ")),), ), "( foo not in bar )", ), # Do some complex nodes ( cst.Comparison( left=cst.Name("baz"), comparisons=( cst.ComparisonTarget( operator=cst.Equal(), comparator=cst.Comparison( lpar=(cst.LeftParen(),), left=cst.Name("foo"), comparisons=( cst.ComparisonTarget( operator=cst.NotIn(), comparator=cst.Name("bar") ), ), rpar=(cst.RightParen(),), ), ), ), ), "baz == (foo not in bar)", CodeRange((1, 0), (1, 23)), ), ( cst.Comparison( left=cst.Name("a"), comparisons=( cst.ComparisonTarget( operator=cst.GreaterThan(), comparator=cst.Name("b") ), cst.ComparisonTarget( operator=cst.GreaterThan(), comparator=cst.Name("c") ), ), ), "a > b > c", CodeRange((1, 0), (1, 9)), ), # Is safe to use with word operators if it's leading/trailing children are ( cst.IfExp( body=cst.Comparison( left=cst.Name("a"), comparisons=( cst.ComparisonTarget( operator=cst.GreaterThan(), comparator=cst.Name( "b", lpar=(cst.LeftParen(),), rpar=(cst.RightParen(),), ), ), ), ), test=cst.Comparison( left=cst.Name( "c", lpar=(cst.LeftParen(),), rpar=(cst.RightParen(),) ), comparisons=( cst.ComparisonTarget( operator=cst.GreaterThan(), comparator=cst.Name("d") ), ), ), orelse=cst.Name("e"), whitespace_before_if=cst.SimpleWhitespace(""), whitespace_after_if=cst.SimpleWhitespace(""), ), "a > (b)if(c) > d else e", ), # is safe to use with word operators if entirely surrounded in parenthesis ( cst.IfExp( body=cst.Name("a"), test=cst.Comparison( left=cst.Name("b"), comparisons=( cst.ComparisonTarget( operator=cst.GreaterThan(), comparator=cst.Name("c") ), ), lpar=(cst.LeftParen(),), rpar=(cst.RightParen(),), ), orelse=cst.Name("d"), whitespace_after_if=cst.SimpleWhitespace(""), whitespace_before_else=cst.SimpleWhitespace(""), ), "a if(b > c)else d", ), ) ) def test_valid( self, node: cst.CSTNode, code: str, position: Optional[CodeRange] = None ) -> None: self.validate_node(node, code, parse_expression, expected_position=position) @data_provider( ( ( lambda: cst.Comparison( cst.Name("foo"), (cst.ComparisonTarget(cst.LessThan(), cst.Integer("5")),), lpar=(cst.LeftParen(),), ), "left paren without right paren", ), ( lambda: cst.Comparison( cst.Name("foo"), (cst.ComparisonTarget(cst.LessThan(), cst.Integer("5")),), rpar=(cst.RightParen(),), ), "right paren without left paren", ), ( lambda: cst.Comparison(cst.Name("foo"), ()), "at least one ComparisonTarget", ), ( lambda: cst.Comparison( left=cst.Name("foo"), comparisons=( cst.ComparisonTarget( operator=cst.NotIn( whitespace_before=cst.SimpleWhitespace("") ), comparator=cst.Name("bar"), ), ), ), "at least one space around comparison operator", ), ( lambda: cst.Comparison( left=cst.Name("foo"), comparisons=( cst.ComparisonTarget( operator=cst.NotIn( whitespace_after=cst.SimpleWhitespace("") ), comparator=cst.Name("bar"), ), ), ), "at least one space around comparison operator", ), # multi-target comparisons ( lambda: cst.Comparison( left=cst.Name("a"), comparisons=( cst.ComparisonTarget( operator=cst.Is(), comparator=cst.Name("b") ), cst.ComparisonTarget( operator=cst.Is(whitespace_before=cst.SimpleWhitespace("")), comparator=cst.Name("c"), ), ), ), "at least one space around comparison operator", ), ( lambda: cst.Comparison( left=cst.Name("a"), comparisons=( cst.ComparisonTarget( operator=cst.Is(), comparator=cst.Name("b") ), cst.ComparisonTarget( operator=cst.Is(whitespace_after=cst.SimpleWhitespace("")), comparator=cst.Name("c"), ), ), ), "at least one space around comparison operator", ), # whitespace around the comparision itself # a ifb > c else d ( lambda: cst.IfExp( body=cst.Name("a"), test=cst.Comparison( left=cst.Name("b"), comparisons=( cst.ComparisonTarget( operator=cst.GreaterThan(), comparator=cst.Name("c") ), ), ), orelse=cst.Name("d"), whitespace_after_if=cst.SimpleWhitespace(""), ), "Must have at least one space after 'if' keyword.", ), # a if b > celse d ( lambda: cst.IfExp( body=cst.Name("a"), test=cst.Comparison( left=cst.Name("b"), comparisons=( cst.ComparisonTarget( operator=cst.GreaterThan(), comparator=cst.Name("c") ), ), ), orelse=cst.Name("d"), whitespace_before_else=cst.SimpleWhitespace(""), ), "Must have at least one space before 'else' keyword.", ), ) ) def test_invalid( self, get_node: Callable[[], cst.CSTNode], expected_re: str ) -> None: self.assert_invalid(get_node, expected_re) LibCST-1.2.0/libcst/_nodes/tests/test_cst_node.py000066400000000000000000000162331456464173300217140ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from textwrap import dedent from typing import Union import libcst as cst from libcst._removal_sentinel import RemovalSentinel from libcst._types import CSTNodeT from libcst._visitors import CSTTransformer from libcst.testing.utils import data_provider, none_throws, UnitTest _EMPTY_SIMPLE_WHITESPACE = cst.SimpleWhitespace("") class _TestVisitor(CSTTransformer): def __init__(self, test: UnitTest) -> None: self.counter = 0 self.test = test def assert_counter(self, expected: int) -> None: self.test.assertEqual(self.counter, expected) self.counter += 1 def on_visit(self, node: cst.CSTNode) -> bool: if isinstance(node, cst.Module): self.assert_counter(0) elif isinstance(node, cst.SimpleStatementLine): self.assert_counter(1) elif isinstance(node, cst.Pass): self.assert_counter(2) elif isinstance(node, cst.Newline): self.assert_counter(4) return True def on_leave( self, original_node: CSTNodeT, updated_node: CSTNodeT ) -> Union[CSTNodeT, RemovalSentinel]: self.test.assertTrue(original_node.deep_equals(updated_node)) # Don't allow type checkers to accidentally refine our return type. return_node = updated_node if isinstance(updated_node, cst.Pass): self.assert_counter(3) elif isinstance(updated_node, cst.Newline): self.assert_counter(5) elif isinstance(updated_node, cst.SimpleStatementLine): self.assert_counter(6) elif isinstance(updated_node, cst.Module): self.assert_counter(7) return return_node class CSTNodeTest(UnitTest): def test_with_changes(self) -> None: initial = cst.TrailingWhitespace( whitespace=cst.SimpleWhitespace(" \\\n "), comment=cst.Comment("# initial"), newline=cst.Newline("\r\n"), ) changed = initial.with_changes(comment=cst.Comment("# new comment")) # see that we have the updated fields self.assertEqual(none_throws(changed.comment).value, "# new comment") # and that the old fields are still there self.assertEqual(changed.whitespace.value, " \\\n ") self.assertEqual(changed.newline.value, "\r\n") # ensure no mutation actually happened self.assertEqual(none_throws(initial.comment).value, "# initial") def test_default_eq(self) -> None: sw1 = cst.SimpleWhitespace("") sw2 = cst.SimpleWhitespace("") self.assertNotEqual(sw1, sw2) self.assertEqual(sw1, sw1) self.assertEqual(sw2, sw2) self.assertTrue(sw1.deep_equals(sw2)) self.assertTrue(sw2.deep_equals(sw1)) def test_hash(self) -> None: sw1 = cst.SimpleWhitespace("") sw2 = cst.SimpleWhitespace("") self.assertNotEqual(hash(sw1), hash(sw2)) self.assertEqual(hash(sw1), hash(sw1)) self.assertEqual(hash(sw2), hash(sw2)) @data_provider( { "simple": (cst.SimpleWhitespace(""), cst.SimpleWhitespace("")), "identity": (_EMPTY_SIMPLE_WHITESPACE, _EMPTY_SIMPLE_WHITESPACE), "nested": ( cst.EmptyLine(whitespace=cst.SimpleWhitespace("")), cst.EmptyLine(whitespace=cst.SimpleWhitespace("")), ), "tuple_versus_list": ( cst.SimpleStatementLine(body=[cst.Pass()]), cst.SimpleStatementLine(body=(cst.Pass(),)), ), } ) def test_deep_equals_success(self, a: cst.CSTNode, b: cst.CSTNode) -> None: self.assertTrue(a.deep_equals(b)) @data_provider( { "simple": (cst.SimpleWhitespace(" "), cst.SimpleWhitespace(" ")), "nested": ( cst.EmptyLine(whitespace=cst.SimpleWhitespace(" ")), cst.EmptyLine(whitespace=cst.SimpleWhitespace(" ")), ), "list": ( cst.SimpleStatementLine(body=[cst.Pass(semicolon=cst.Semicolon())]), cst.SimpleStatementLine(body=[cst.Pass(semicolon=cst.Semicolon())] * 2), ), } ) def test_deep_equals_fails(self, a: cst.CSTNode, b: cst.CSTNode) -> None: self.assertFalse(a.deep_equals(b)) def test_repr(self) -> None: self.assertEqual( repr( cst.SimpleStatementLine( body=[cst.Pass()], # tuple with multiple items leading_lines=( cst.EmptyLine( indent=True, whitespace=cst.SimpleWhitespace(""), comment=None, newline=cst.Newline(), ), cst.EmptyLine( indent=True, whitespace=cst.SimpleWhitespace(""), comment=None, newline=cst.Newline(), ), ), trailing_whitespace=cst.TrailingWhitespace( whitespace=cst.SimpleWhitespace(" "), comment=cst.Comment("# comment"), newline=cst.Newline(), ), ) ), dedent( """ SimpleStatementLine( body=[ Pass( semicolon=MaybeSentinel.DEFAULT, ), ], leading_lines=[ EmptyLine( indent=True, whitespace=SimpleWhitespace( value='', ), comment=None, newline=Newline( value=None, ), ), EmptyLine( indent=True, whitespace=SimpleWhitespace( value='', ), comment=None, newline=Newline( value=None, ), ), ], trailing_whitespace=TrailingWhitespace( whitespace=SimpleWhitespace( value=' ', ), comment=Comment( value='# comment', ), newline=Newline( value=None, ), ), ) """ ).strip(), ) def test_visit(self) -> None: tree = cst.Module((cst.SimpleStatementLine((cst.Pass(),)),)) tree.visit(_TestVisitor(self)) LibCST-1.2.0/libcst/_nodes/tests/test_del.py000066400000000000000000000047731456464173300206700ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Any import libcst as cst from libcst import parse_statement from libcst._nodes.tests.base import CSTNodeTest from libcst.metadata import CodeRange from libcst.testing.utils import data_provider class DelTest(CSTNodeTest): @data_provider( ( { "node": cst.SimpleStatementLine([cst.Del(cst.Name("abc"))]), "code": "del abc\n", "parser": parse_statement, "expected_position": CodeRange((1, 0), (1, 7)), }, { "node": cst.SimpleStatementLine( [ cst.Del( cst.Name("abc"), whitespace_after_del=cst.SimpleWhitespace(" "), ) ] ), "code": "del abc\n", "parser": parse_statement, "expected_position": CodeRange((1, 0), (1, 9)), }, { "node": cst.SimpleStatementLine( [ cst.Del( cst.Name( "abc", lpar=[cst.LeftParen()], rpar=[cst.RightParen()] ), whitespace_after_del=cst.SimpleWhitespace(""), ) ] ), "code": "del(abc)\n", "parser": parse_statement, "expected_position": CodeRange((1, 0), (1, 8)), }, { "node": cst.SimpleStatementLine( [cst.Del(cst.Name("abc"), semicolon=cst.Semicolon())] ), "code": "del abc;\n", "parser": parse_statement, "expected_position": CodeRange((1, 0), (1, 7)), }, ) ) def test_valid(self, **kwargs: Any) -> None: self.validate_node(**kwargs) @data_provider( ( { "get_node": lambda: cst.Del( cst.Name("abc"), whitespace_after_del=cst.SimpleWhitespace("") ), "expected_re": "Must have at least one space after 'del'.", }, ) ) def test_invalid(self, **kwargs: Any) -> None: self.assert_invalid(**kwargs) LibCST-1.2.0/libcst/_nodes/tests/test_dict.py000066400000000000000000000157351456464173300210470ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Any import libcst as cst from libcst import parse_expression from libcst._nodes.tests.base import CSTNodeTest, parse_expression_as from libcst._parser.entrypoints import is_native from libcst.metadata import CodeRange from libcst.testing.utils import data_provider class DictTest(CSTNodeTest): @data_provider( [ # zero-element dict { "node": cst.Dict([]), "code": "{}", "parser": parse_expression, "expected_position": CodeRange((1, 0), (1, 2)), }, # one-element dict, sentinel comma value { "node": cst.Dict([cst.DictElement(cst.Name("k"), cst.Name("v"))]), "code": "{k: v}", "parser": parse_expression, "expected_position": CodeRange((1, 0), (1, 6)), }, { "node": cst.Dict([cst.StarredDictElement(cst.Name("expanded"))]), "code": "{**expanded}", "parser": parse_expression, "expected_position": CodeRange((1, 0), (1, 12)), }, # two-element dict, sentinel comma value { "node": cst.Dict( [ cst.DictElement(cst.Name("k1"), cst.Name("v1")), cst.DictElement(cst.Name("k2"), cst.Name("v2")), ] ), "code": "{k1: v1, k2: v2}", "parser": None, "expected_position": CodeRange((1, 0), (1, 16)), }, # custom whitespace between brackets { "node": cst.Dict( [cst.DictElement(cst.Name("k"), cst.Name("v"))], lbrace=cst.LeftCurlyBrace( whitespace_after=cst.SimpleWhitespace("\t") ), rbrace=cst.RightCurlyBrace( whitespace_before=cst.SimpleWhitespace("\t\t") ), ), "code": "{\tk: v\t\t}", "parser": parse_expression, "expected_position": CodeRange((1, 0), (1, 9)), }, # with parenthesis { "node": cst.Dict( [cst.DictElement(cst.Name("k"), cst.Name("v"))], lpar=[cst.LeftParen()], rpar=[cst.RightParen()], ), "code": "({k: v})", "parser": parse_expression, "expected_position": CodeRange((1, 1), (1, 7)), }, # starred element { "node": cst.Dict( [ cst.StarredDictElement(cst.Name("one")), cst.StarredDictElement(cst.Name("two")), ] ), "code": "{**one, **two}", "parser": None, "expected_position": CodeRange((1, 0), (1, 14)), }, # custom comma on DictElement { "node": cst.Dict( [cst.DictElement(cst.Name("k"), cst.Name("v"), comma=cst.Comma())] ), "code": "{k: v,}", "parser": parse_expression, "expected_position": CodeRange((1, 0), (1, 7)), }, # custom comma on StarredDictElement { "node": cst.Dict( [cst.StarredDictElement(cst.Name("expanded"), comma=cst.Comma())] ), "code": "{**expanded,}", "parser": parse_expression, "expected_position": CodeRange((1, 0), (1, 13)), }, # custom whitespace on DictElement { "node": cst.Dict( [ cst.DictElement( cst.Name("k"), cst.Name("v"), whitespace_before_colon=cst.SimpleWhitespace("\t"), whitespace_after_colon=cst.SimpleWhitespace("\t\t"), ) ] ), "code": "{k\t:\t\tv}", "parser": parse_expression, "expected_position": CodeRange((1, 0), (1, 8)), }, # custom whitespace on StarredDictElement { "node": cst.Dict( [ cst.DictElement( cst.Name("k"), cst.Name("v"), comma=cst.Comma() ), cst.StarredDictElement( cst.Name("expanded"), whitespace_before_value=cst.SimpleWhitespace(" "), ), ] ), "code": "{k: v,** expanded}", "parser": parse_expression, "expected_position": CodeRange((1, 0), (1, 19)), }, # missing spaces around dict is always okay { "node": cst.GeneratorExp( cst.Name("a"), cst.CompFor( cst.Name("b"), cst.Dict([cst.DictElement(cst.Name("k"), cst.Name("v"))]), ifs=[ cst.CompIf( cst.Name("c"), whitespace_before=cst.SimpleWhitespace(""), ) ], whitespace_after_in=cst.SimpleWhitespace(""), ), ), "parser": parse_expression, "code": "(a for b in{k: v}if c)", }, ] ) def test_valid(self, **kwargs: Any) -> None: self.validate_node(**kwargs) @data_provider( [ # unbalanced Dict { "get_node": lambda: cst.Dict([], lpar=[cst.LeftParen()]), "expected_re": "left paren without right paren", } ] ) def test_invalid(self, **kwargs: Any) -> None: self.assert_invalid(**kwargs) @data_provider( ( { "code": "{**{}}", "parser": parse_expression_as(python_version="3.5"), "expect_success": True, }, { "code": "{**{}}", "parser": parse_expression_as(python_version="3.3"), "expect_success": False, }, ) ) def test_versions(self, **kwargs: Any) -> None: if is_native() and not kwargs.get("expect_success", True): self.skipTest("parse errors are disabled for native parser") self.assert_parses(**kwargs) LibCST-1.2.0/libcst/_nodes/tests/test_dict_comp.py000066400000000000000000000146401456464173300220570ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Any import libcst as cst from libcst import parse_expression from libcst._nodes.tests.base import CSTNodeTest from libcst.metadata import CodeRange from libcst.testing.utils import data_provider class DictCompTest(CSTNodeTest): @data_provider( [ # simple DictComp { "node": cst.DictComp( cst.Name("k"), cst.Name("v"), cst.CompFor(target=cst.Name("a"), iter=cst.Name("b")), ), "code": "{k: v for a in b}", "parser": parse_expression, "expected_position": CodeRange((1, 0), (1, 17)), }, # non-trivial keys & values in DictComp { "node": cst.DictComp( cst.BinaryOperation(cst.Name("k1"), cst.Add(), cst.Name("k2")), cst.BinaryOperation(cst.Name("v1"), cst.Add(), cst.Name("v2")), cst.CompFor(target=cst.Name("a"), iter=cst.Name("b")), ), "code": "{k1 + k2: v1 + v2 for a in b}", "parser": parse_expression, "expected_position": CodeRange((1, 0), (1, 29)), }, # custom whitespace around colon { "node": cst.DictComp( cst.Name("k"), cst.Name("v"), cst.CompFor(target=cst.Name("a"), iter=cst.Name("b")), whitespace_before_colon=cst.SimpleWhitespace("\t"), whitespace_after_colon=cst.SimpleWhitespace("\t\t"), ), "code": "{k\t:\t\tv for a in b}", "parser": parse_expression, "expected_position": CodeRange((1, 0), (1, 19)), }, # custom whitespace inside braces { "node": cst.DictComp( cst.Name("k"), cst.Name("v"), cst.CompFor(target=cst.Name("a"), iter=cst.Name("b")), lbrace=cst.LeftCurlyBrace( whitespace_after=cst.SimpleWhitespace("\t") ), rbrace=cst.RightCurlyBrace( whitespace_before=cst.SimpleWhitespace("\t\t") ), ), "code": "{\tk: v for a in b\t\t}", "parser": parse_expression, "expected_position": CodeRange((1, 0), (1, 20)), }, # parenthesis { "node": cst.DictComp( cst.Name("k"), cst.Name("v"), cst.CompFor(target=cst.Name("a"), iter=cst.Name("b")), lpar=[cst.LeftParen()], rpar=[cst.RightParen()], ), "code": "({k: v for a in b})", "parser": parse_expression, "expected_position": CodeRange((1, 1), (1, 18)), }, # missing spaces around DictComp is always okay { "node": cst.DictComp( cst.Name("a"), cst.Name("b"), cst.CompFor( target=cst.Name("c"), iter=cst.DictComp( cst.Name("d"), cst.Name("e"), cst.CompFor(target=cst.Name("f"), iter=cst.Name("g")), ), ifs=[ cst.CompIf( cst.Name("h"), whitespace_before=cst.SimpleWhitespace(""), ) ], whitespace_after_in=cst.SimpleWhitespace(""), ), ), "code": "{a: b for c in{d: e for f in g}if h}", "parser": parse_expression, "expected_position": CodeRange((1, 0), (1, 36)), }, # no whitespace before `for` clause { "node": cst.DictComp( cst.Name("k"), cst.Name("v", lpar=[cst.LeftParen()], rpar=[cst.RightParen()]), cst.CompFor( target=cst.Name("a"), iter=cst.Name("b"), whitespace_before=cst.SimpleWhitespace(""), ), ), "code": "{k: (v)for a in b}", "parser": parse_expression, "expected_position": CodeRange((1, 0), (1, 18)), }, ] ) def test_valid(self, **kwargs: Any) -> None: self.validate_node(**kwargs) @data_provider( [ # unbalanced DictComp { "get_node": lambda: cst.DictComp( cst.Name("k"), cst.Name("v"), cst.CompFor(target=cst.Name("a"), iter=cst.Name("b")), lpar=[cst.LeftParen()], ), "expected_re": "left paren without right paren", }, # invalid whitespace before for/async { "get_node": lambda: cst.DictComp( cst.Name("k"), cst.Name("v"), cst.CompFor( target=cst.Name("a"), iter=cst.Name("b"), whitespace_before=cst.SimpleWhitespace(""), ), ), "expected_re": "Must have at least one space before 'for' keyword.", }, { "get_node": lambda: cst.DictComp( cst.Name("k"), cst.Name("v"), cst.CompFor( target=cst.Name("a"), iter=cst.Name("b"), asynchronous=cst.Asynchronous(), whitespace_before=cst.SimpleWhitespace(""), ), ), "expected_re": "Must have at least one space before 'async' keyword.", }, ] ) def test_invalid(self, **kwargs: Any) -> None: self.assert_invalid(**kwargs) LibCST-1.2.0/libcst/_nodes/tests/test_docstring.py000066400000000000000000000077551456464173300221230ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from textwrap import dedent from typing import Optional import libcst as cst from libcst.helpers import ensure_type from libcst.testing.utils import data_provider, UnitTest class DocstringTest(UnitTest): @data_provider( ( ("", None), ('""', ""), ("# comment is not docstring", None), ( ''' # comment """docstring in triple quotes.""" ''', "docstring in triple quotes.", ), ( '''"docstring in single quotes."''', "docstring in single quotes.", ), ( ''' # comment """docstring in """ "concatenated strings." ''', "docstring in concatenated strings.", ), ) ) def test_module_docstring(self, code: str, docstring: Optional[str]) -> None: self.assertEqual(cst.parse_module(dedent(code)).get_docstring(), docstring) @data_provider( ( ( """ def f(): # comment" pass """, None, ), ('def f():"docstring"', "docstring"), ( ''' def f(): """ This function has no input and always returns None. """ ''', "This function has no input\nand always returns None.", ), ( """ def fn(): # comment 1 # comment 2 "docstring" """, "docstring", ), ( """ def fn(): ("docstring") """, "docstring", ), ) ) def test_function_docstring(self, code: str, docstring: Optional[str]) -> None: self.assertEqual( ensure_type( cst.parse_statement(dedent(code)), cst.FunctionDef ).get_docstring(), docstring, ) @data_provider( ( ( """ class C: # comment" pass """, None, ), ('class C(Base):"docstring"', "docstring"), ( ''' class C(Base): # a comment """ This class has a multi- line docstring. """ ''', "This class has a multi-\nline docstring.", ), ( """ class C(A, B): # comment 1 # comment 2 "docstring" """, "docstring", ), ) ) def test_class_docstring(self, code: str, docstring: Optional[str]) -> None: self.assertEqual( ensure_type( cst.parse_statement(dedent(code)), cst.ClassDef ).get_docstring(), docstring, ) def test_clean_docstring(self) -> None: code = ''' """ A docstring with indentation one first line and the second line. """ ''' self.assertEqual( cst.parse_module(dedent(code)).get_docstring(), "A docstring with indentation one first line\nand the second line.", ) self.assertEqual( cst.parse_module(dedent(code)).get_docstring(clean=False), " A docstring with indentation one first line\n and the second line.\n", ) LibCST-1.2.0/libcst/_nodes/tests/test_else.py000066400000000000000000000020351456464173300210410ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Any import libcst as cst from libcst._nodes.tests.base import CSTNodeTest from libcst.metadata import CodeRange from libcst.testing.utils import data_provider class ElseTest(CSTNodeTest): @data_provider( ( { "node": cst.Else(cst.SimpleStatementSuite((cst.Pass(),))), "code": "else: pass\n", "expected_position": CodeRange((1, 0), (1, 10)), }, { "node": cst.Else( cst.SimpleStatementSuite((cst.Pass(),)), whitespace_before_colon=cst.SimpleWhitespace(" "), ), "code": "else : pass\n", "expected_position": CodeRange((1, 0), (1, 12)), }, ) ) def test_valid(self, **kwargs: Any) -> None: self.validate_node(**kwargs) LibCST-1.2.0/libcst/_nodes/tests/test_empty_line.py000066400000000000000000000024061456464173300222600ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import libcst as cst from libcst._nodes.tests.base import CSTNodeTest, DummyIndentedBlock from libcst.testing.utils import data_provider class EmptyLineTest(CSTNodeTest): @data_provider( ( (cst.EmptyLine(), "\n"), (cst.EmptyLine(whitespace=cst.SimpleWhitespace(" ")), " \n"), (cst.EmptyLine(comment=cst.Comment("# comment")), "# comment\n"), (cst.EmptyLine(newline=cst.Newline("\r\n")), "\r\n"), (DummyIndentedBlock(" ", cst.EmptyLine()), " \n"), (DummyIndentedBlock(" ", cst.EmptyLine(indent=False)), "\n"), ( DummyIndentedBlock( "\t", cst.EmptyLine( whitespace=cst.SimpleWhitespace(" "), comment=cst.Comment("# comment"), newline=cst.Newline("\r\n"), ), ), "\t # comment\r\n", ), ) ) def test_valid(self, node: cst.CSTNode, code: str) -> None: self.validate_node(node, code) LibCST-1.2.0/libcst/_nodes/tests/test_flatten_behavior.py000066400000000000000000000052601456464173300234300ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Type, Union import libcst as cst from libcst import FlattenSentinel, parse_expression, parse_module, RemovalSentinel from libcst._nodes.tests.base import CSTNodeTest from libcst._types import CSTNodeT from libcst._visitors import CSTTransformer from libcst.testing.utils import data_provider class InsertPrintBeforeReturn(CSTTransformer): def leave_Return( self, original_node: cst.Return, updated_node: cst.Return ) -> Union[cst.Return, RemovalSentinel, FlattenSentinel[cst.BaseSmallStatement]]: return FlattenSentinel( [ cst.Expr(parse_expression("print('returning')")), updated_node, ] ) class FlattenLines(CSTTransformer): def on_leave( self, original_node: CSTNodeT, updated_node: CSTNodeT ) -> Union[CSTNodeT, RemovalSentinel, FlattenSentinel[cst.SimpleStatementLine]]: if isinstance(updated_node, cst.SimpleStatementLine): return FlattenSentinel( [ cst.SimpleStatementLine( [stmt.with_changes(semicolon=cst.MaybeSentinel.DEFAULT)] ) for stmt in updated_node.body ] ) else: return updated_node class RemoveReturnWithEmpty(CSTTransformer): def leave_Return( self, original_node: cst.Return, updated_node: cst.Return ) -> Union[cst.Return, RemovalSentinel, FlattenSentinel[cst.BaseSmallStatement]]: return FlattenSentinel([]) class FlattenBehavior(CSTNodeTest): @data_provider( ( ("return", "print('returning'); return", InsertPrintBeforeReturn), ( "print('returning'); return", "print('returning')\nreturn", FlattenLines, ), ( "print('returning')\nreturn", "print('returning')", RemoveReturnWithEmpty, ), ) ) def test_flatten_pass_behavior( self, before: str, after: str, visitor: Type[CSTTransformer] ) -> None: # Test doesn't have newline termination case before_module = parse_module(before) after_module = before_module.visit(visitor()) self.assertEqual(after, after_module.code) # Test does have newline termination case before_module = parse_module(before + "\n") after_module = before_module.visit(visitor()) self.assertEqual(after + "\n", after_module.code) LibCST-1.2.0/libcst/_nodes/tests/test_for.py000066400000000000000000000167611456464173300207120ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Any import libcst as cst from libcst import parse_statement, PartialParserConfig from libcst._nodes.tests.base import CSTNodeTest, DummyIndentedBlock from libcst.metadata import CodeRange from libcst.testing.utils import data_provider class ForTest(CSTNodeTest): @data_provider( ( # Simple for block { "node": cst.For( cst.Name("target"), cst.Call(cst.Name("iter")), cst.SimpleStatementSuite((cst.Pass(),)), ), "code": "for target in iter(): pass\n", "parser": parse_statement, }, # Simple async for block { "node": cst.For( cst.Name("target"), cst.Call(cst.Name("iter")), cst.SimpleStatementSuite((cst.Pass(),)), asynchronous=cst.Asynchronous(), ), "code": "async for target in iter(): pass\n", "parser": lambda code: parse_statement( code, config=PartialParserConfig(python_version="3.7") ), }, # Python 3.6 async for block { "node": cst.FunctionDef( cst.Name("foo"), cst.Parameters(), cst.IndentedBlock( ( cst.For( cst.Name("target"), cst.Call(cst.Name("iter")), cst.SimpleStatementSuite((cst.Pass(),)), asynchronous=cst.Asynchronous(), ), ) ), asynchronous=cst.Asynchronous(), ), "code": "async def foo():\n async for target in iter(): pass\n", "parser": lambda code: parse_statement( code, config=PartialParserConfig(python_version="3.6") ), }, # For block with else { "node": cst.For( cst.Name("target"), cst.Call(cst.Name("iter")), cst.SimpleStatementSuite((cst.Pass(),)), cst.Else(cst.SimpleStatementSuite((cst.Pass(),))), ), "code": "for target in iter(): pass\nelse: pass\n", "parser": parse_statement, }, # indentation { "node": DummyIndentedBlock( " ", cst.For( cst.Name("target"), cst.Call(cst.Name("iter")), cst.SimpleStatementSuite((cst.Pass(),)), ), ), "code": " for target in iter(): pass\n", "parser": None, }, # for an indented body { "node": DummyIndentedBlock( " ", cst.For( cst.Name("target"), cst.Call(cst.Name("iter")), cst.IndentedBlock((cst.SimpleStatementLine((cst.Pass(),)),)), ), ), "code": " for target in iter():\n pass\n", "parser": None, "expected_position": CodeRange((1, 4), (2, 12)), }, # leading_lines { "node": cst.For( cst.Name("target"), cst.Call(cst.Name("iter")), cst.IndentedBlock((cst.SimpleStatementLine((cst.Pass(),)),)), cst.Else( cst.IndentedBlock((cst.SimpleStatementLine((cst.Pass(),)),)), leading_lines=( cst.EmptyLine(comment=cst.Comment("# else comment")), ), ), leading_lines=( cst.EmptyLine(comment=cst.Comment("# leading comment")), ), ), "code": "# leading comment\nfor target in iter():\n pass\n# else comment\nelse:\n pass\n", "parser": None, "expected_position": CodeRange((2, 0), (6, 8)), }, # Weird spacing rules { "node": cst.For( cst.Name( "target", lpar=(cst.LeftParen(),), rpar=(cst.RightParen(),) ), cst.Call( cst.Name("iter"), lpar=(cst.LeftParen(),), rpar=(cst.RightParen(),), ), cst.SimpleStatementSuite((cst.Pass(),)), whitespace_after_for=cst.SimpleWhitespace(""), whitespace_before_in=cst.SimpleWhitespace(""), whitespace_after_in=cst.SimpleWhitespace(""), ), "code": "for(target)in(iter()): pass\n", "parser": parse_statement, "expected_position": CodeRange((1, 0), (1, 27)), }, # Whitespace { "node": cst.For( cst.Name("target"), cst.Call(cst.Name("iter")), cst.SimpleStatementSuite((cst.Pass(),)), whitespace_after_for=cst.SimpleWhitespace(" "), whitespace_before_in=cst.SimpleWhitespace(" "), whitespace_after_in=cst.SimpleWhitespace(" "), whitespace_before_colon=cst.SimpleWhitespace(" "), ), "code": "for target in iter() : pass\n", "parser": parse_statement, "expected_position": CodeRange((1, 0), (1, 31)), }, ) ) def test_valid(self, **kwargs: Any) -> None: self.validate_node(**kwargs) @data_provider( ( { "get_node": lambda: cst.For( cst.Name("target"), cst.Call(cst.Name("iter")), cst.SimpleStatementSuite((cst.Pass(),)), whitespace_after_for=cst.SimpleWhitespace(""), ), "expected_re": "Must have at least one space after 'for' keyword", }, { "get_node": lambda: cst.For( cst.Name("target"), cst.Call(cst.Name("iter")), cst.SimpleStatementSuite((cst.Pass(),)), whitespace_before_in=cst.SimpleWhitespace(""), ), "expected_re": "Must have at least one space before 'in' keyword", }, { "get_node": lambda: cst.For( cst.Name("target"), cst.Call(cst.Name("iter")), cst.SimpleStatementSuite((cst.Pass(),)), whitespace_after_in=cst.SimpleWhitespace(""), ), "expected_re": "Must have at least one space after 'in' keyword", }, ) ) def test_invalid(self, **kwargs: Any) -> None: self.assert_invalid(**kwargs) LibCST-1.2.0/libcst/_nodes/tests/test_funcdef.py000066400000000000000000002765761456464173300215520ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Any, Callable import libcst as cst from libcst import parse_statement from libcst._nodes.tests.base import CSTNodeTest, DummyIndentedBlock, parse_statement_as from libcst._parser.entrypoints import is_native from libcst.metadata import CodeRange from libcst.testing.utils import data_provider class FunctionDefCreationTest(CSTNodeTest): @data_provider( ( # Simple function definition without any arguments or return { "node": cst.FunctionDef( cst.Name("foo"), cst.Parameters(), cst.SimpleStatementSuite((cst.Pass(),)), ), "code": "def foo(): pass\n", }, # Functiondef with a return annotation { "node": cst.FunctionDef( cst.Name("foo"), cst.Parameters(), cst.SimpleStatementSuite((cst.Pass(),)), returns=cst.Annotation(cst.Name("str")), ), "code": "def foo() -> str: pass\n", "expected_position": CodeRange((1, 0), (1, 22)), }, # Async function definition. { "node": cst.FunctionDef( cst.Name("foo"), cst.Parameters(), cst.SimpleStatementSuite((cst.Pass(),)), asynchronous=cst.Asynchronous(), ), "code": "async def foo(): pass\n", }, # Async function definition with annotation. { "node": cst.FunctionDef( cst.Name("foo"), cst.Parameters(), cst.SimpleStatementSuite((cst.Pass(),)), asynchronous=cst.Asynchronous(), returns=cst.Annotation(cst.Name("int")), ), "code": "async def foo() -> int: pass\n", "expected_position": CodeRange((1, 0), (1, 28)), }, # Test basic positional params { "node": cst.FunctionDef( cst.Name("foo"), cst.Parameters( params=(cst.Param(cst.Name("bar")), cst.Param(cst.Name("baz"))) ), cst.SimpleStatementSuite((cst.Pass(),)), ), "code": "def foo(bar, baz): pass\n", }, # Typed positional params { "node": cst.FunctionDef( cst.Name("foo"), cst.Parameters( params=( cst.Param(cst.Name("bar"), cst.Annotation(cst.Name("str"))), cst.Param(cst.Name("baz"), cst.Annotation(cst.Name("int"))), ) ), cst.SimpleStatementSuite((cst.Pass(),)), ), "code": "def foo(bar: str, baz: int): pass\n", }, # Test basic positional default params { "node": cst.FunctionDef( cst.Name("foo"), cst.Parameters( params=( cst.Param( cst.Name("bar"), default=cst.SimpleString('"one"') ), cst.Param(cst.Name("baz"), default=cst.Integer("5")), ) ), cst.SimpleStatementSuite((cst.Pass(),)), ), "code": 'def foo(bar = "one", baz = 5): pass\n', }, # Typed positional default params { "node": cst.FunctionDef( cst.Name("foo"), cst.Parameters( params=( cst.Param( cst.Name("bar"), cst.Annotation(cst.Name("str")), default=cst.SimpleString('"one"'), ), cst.Param( cst.Name("baz"), cst.Annotation(cst.Name("int")), default=cst.Integer("5"), ), ) ), cst.SimpleStatementSuite((cst.Pass(),)), ), "code": 'def foo(bar: str = "one", baz: int = 5): pass\n', }, # Test basic positional only params { "node": cst.FunctionDef( cst.Name("foo"), cst.Parameters( posonly_params=( cst.Param(cst.Name("bar")), cst.Param(cst.Name("baz")), ) ), cst.SimpleStatementSuite((cst.Pass(),)), ), "code": "def foo(bar, baz, /): pass\n", }, # Typed positional only params { "node": cst.FunctionDef( cst.Name("foo"), cst.Parameters( posonly_params=( cst.Param(cst.Name("bar"), cst.Annotation(cst.Name("str"))), cst.Param(cst.Name("baz"), cst.Annotation(cst.Name("int"))), ) ), cst.SimpleStatementSuite((cst.Pass(),)), ), "code": "def foo(bar: str, baz: int, /): pass\n", }, # Test basic positional only default params { "node": cst.FunctionDef( cst.Name("foo"), cst.Parameters( posonly_params=( cst.Param( cst.Name("bar"), default=cst.SimpleString('"one"') ), cst.Param(cst.Name("baz"), default=cst.Integer("5")), ) ), cst.SimpleStatementSuite((cst.Pass(),)), ), "code": 'def foo(bar = "one", baz = 5, /): pass\n', }, # Typed positional only default params { "node": cst.FunctionDef( cst.Name("foo"), cst.Parameters( posonly_params=( cst.Param( cst.Name("bar"), cst.Annotation(cst.Name("str")), default=cst.SimpleString('"one"'), ), cst.Param( cst.Name("baz"), cst.Annotation(cst.Name("int")), default=cst.Integer("5"), ), ) ), cst.SimpleStatementSuite((cst.Pass(),)), ), "code": 'def foo(bar: str = "one", baz: int = 5, /): pass\n', }, # Mixed positional and default params. { "node": cst.FunctionDef( cst.Name("foo"), cst.Parameters( params=( cst.Param(cst.Name("bar"), cst.Annotation(cst.Name("str"))), cst.Param( cst.Name("baz"), cst.Annotation(cst.Name("int")), default=cst.Integer("5"), ), ) ), cst.SimpleStatementSuite((cst.Pass(),)), ), "code": "def foo(bar: str, baz: int = 5): pass\n", }, # Test kwonly params { "node": cst.FunctionDef( cst.Name("foo"), cst.Parameters( kwonly_params=( cst.Param( cst.Name("bar"), default=cst.SimpleString('"one"') ), cst.Param(cst.Name("baz")), ) ), cst.SimpleStatementSuite((cst.Pass(),)), ), "code": 'def foo(*, bar = "one", baz): pass\n', }, # Typed kwonly params { "node": cst.FunctionDef( cst.Name("foo"), cst.Parameters( kwonly_params=( cst.Param( cst.Name("bar"), cst.Annotation(cst.Name("str")), default=cst.SimpleString('"one"'), ), cst.Param(cst.Name("baz"), cst.Annotation(cst.Name("int"))), cst.Param( cst.Name("biz"), cst.Annotation(cst.Name("str")), default=cst.SimpleString('"two"'), ), ) ), cst.SimpleStatementSuite((cst.Pass(),)), ), "code": 'def foo(*, bar: str = "one", baz: int, biz: str = "two"): pass\n', }, # Mixed params and kwonly_params { "node": cst.FunctionDef( cst.Name("foo"), cst.Parameters( params=( cst.Param(cst.Name("first")), cst.Param(cst.Name("second")), ), kwonly_params=( cst.Param( cst.Name("bar"), cst.Annotation(cst.Name("str")), default=cst.SimpleString('"one"'), ), cst.Param(cst.Name("baz"), cst.Annotation(cst.Name("int"))), cst.Param( cst.Name("biz"), cst.Annotation(cst.Name("str")), default=cst.SimpleString('"two"'), ), ), ), cst.SimpleStatementSuite((cst.Pass(),)), ), "code": 'def foo(first, second, *, bar: str = "one", baz: int, biz: str = "two"): pass\n', }, # Mixed params and kwonly_params { "node": cst.FunctionDef( cst.Name("foo"), cst.Parameters( params=( cst.Param(cst.Name("first"), default=cst.Float("1.0")), cst.Param(cst.Name("second"), default=cst.Float("1.5")), ), kwonly_params=( cst.Param( cst.Name("bar"), cst.Annotation(cst.Name("str")), default=cst.SimpleString('"one"'), ), cst.Param(cst.Name("baz"), cst.Annotation(cst.Name("int"))), cst.Param( cst.Name("biz"), cst.Annotation(cst.Name("str")), default=cst.SimpleString('"two"'), ), ), ), cst.SimpleStatementSuite((cst.Pass(),)), ), "code": 'def foo(first = 1.0, second = 1.5, *, bar: str = "one", baz: int, biz: str = "two"): pass\n', }, # Mixed params and kwonly_params { "node": cst.FunctionDef( cst.Name("foo"), cst.Parameters( params=( cst.Param(cst.Name("first")), cst.Param(cst.Name("second")), cst.Param(cst.Name("third"), default=cst.Float("1.0")), cst.Param(cst.Name("fourth"), default=cst.Float("1.5")), ), kwonly_params=( cst.Param( cst.Name("bar"), cst.Annotation(cst.Name("str")), default=cst.SimpleString('"one"'), ), cst.Param(cst.Name("baz"), cst.Annotation(cst.Name("int"))), cst.Param( cst.Name("biz"), cst.Annotation(cst.Name("str")), default=cst.SimpleString('"two"'), ), ), ), cst.SimpleStatementSuite((cst.Pass(),)), ), "code": 'def foo(first, second, third = 1.0, fourth = 1.5, *, bar: str = "one", baz: int, biz: str = "two"): pass\n', }, # Test star_arg { "node": cst.FunctionDef( cst.Name("foo"), cst.Parameters(star_arg=cst.Param(cst.Name("params"))), cst.SimpleStatementSuite((cst.Pass(),)), ), "code": "def foo(*params): pass\n", }, # Typed star_arg, include kwonly_params { "node": cst.FunctionDef( cst.Name("foo"), cst.Parameters( star_arg=cst.Param( cst.Name("params"), cst.Annotation(cst.Name("str")) ), kwonly_params=( cst.Param( cst.Name("bar"), cst.Annotation(cst.Name("str")), default=cst.SimpleString('"one"'), ), cst.Param(cst.Name("baz"), cst.Annotation(cst.Name("int"))), cst.Param( cst.Name("biz"), cst.Annotation(cst.Name("str")), default=cst.SimpleString('"two"'), ), ), ), cst.SimpleStatementSuite((cst.Pass(),)), ), "code": 'def foo(*params: str, bar: str = "one", baz: int, biz: str = "two"): pass\n', }, # Mixed params star_arg and kwonly_params { "node": cst.FunctionDef( cst.Name("foo"), cst.Parameters( params=( cst.Param(cst.Name("first")), cst.Param(cst.Name("second")), cst.Param(cst.Name("third"), default=cst.Float("1.0")), cst.Param(cst.Name("fourth"), default=cst.Float("1.5")), ), star_arg=cst.Param( cst.Name("params"), cst.Annotation(cst.Name("str")) ), kwonly_params=( cst.Param( cst.Name("bar"), cst.Annotation(cst.Name("str")), default=cst.SimpleString('"one"'), ), cst.Param(cst.Name("baz"), cst.Annotation(cst.Name("int"))), cst.Param( cst.Name("biz"), cst.Annotation(cst.Name("str")), default=cst.SimpleString('"two"'), ), ), ), cst.SimpleStatementSuite((cst.Pass(),)), ), "code": 'def foo(first, second, third = 1.0, fourth = 1.5, *params: str, bar: str = "one", baz: int, biz: str = "two"): pass\n', }, # Test star_arg and star_kwarg { "node": cst.FunctionDef( cst.Name("foo"), cst.Parameters(star_kwarg=cst.Param(cst.Name("kwparams"))), cst.SimpleStatementSuite((cst.Pass(),)), ), "code": "def foo(**kwparams): pass\n", }, # Test star_arg and kwarg { "node": cst.FunctionDef( cst.Name("foo"), cst.Parameters( star_arg=cst.Param(cst.Name("params")), star_kwarg=cst.Param(cst.Name("kwparams")), ), cst.SimpleStatementSuite((cst.Pass(),)), ), "code": "def foo(*params, **kwparams): pass\n", }, # Test typed star_arg and star_kwarg { "node": cst.FunctionDef( cst.Name("foo"), cst.Parameters( star_arg=cst.Param( cst.Name("params"), cst.Annotation(cst.Name("str")) ), star_kwarg=cst.Param( cst.Name("kwparams"), cst.Annotation(cst.Name("int")) ), ), cst.SimpleStatementSuite((cst.Pass(),)), ), "code": "def foo(*params: str, **kwparams: int): pass\n", }, # Test positional only params and positional params { "node": cst.FunctionDef( cst.Name("foo"), cst.Parameters( posonly_params=(cst.Param(cst.Name("bar")),), params=(cst.Param(cst.Name("baz")),), ), cst.SimpleStatementSuite((cst.Pass(),)), ), "code": "def foo(bar, /, baz): pass\n", }, # Test positional only params and star args { "node": cst.FunctionDef( cst.Name("foo"), cst.Parameters( posonly_params=(cst.Param(cst.Name("bar")),), star_arg=cst.Param(cst.Name("baz")), ), cst.SimpleStatementSuite((cst.Pass(),)), ), "code": "def foo(bar, /, *baz): pass\n", }, # Test positional only params and kwonly params { "node": cst.FunctionDef( cst.Name("foo"), cst.Parameters( posonly_params=(cst.Param(cst.Name("bar")),), kwonly_params=(cst.Param(cst.Name("baz")),), ), cst.SimpleStatementSuite((cst.Pass(),)), ), "code": "def foo(bar, /, *, baz): pass\n", }, # Test positional only params and star kwargs { "node": cst.FunctionDef( cst.Name("foo"), cst.Parameters( posonly_params=(cst.Param(cst.Name("bar")),), star_kwarg=cst.Param(cst.Name("baz")), ), cst.SimpleStatementSuite((cst.Pass(),)), ), "code": "def foo(bar, /, **baz): pass\n", }, # Test decorators { "node": cst.FunctionDef( cst.Name("foo"), cst.Parameters(), cst.SimpleStatementSuite((cst.Pass(),)), (cst.Decorator(cst.Name("bar")),), ), "code": "@bar\ndef foo(): pass\n", }, { "node": cst.FunctionDef( cst.Name("foo"), cst.Parameters(), cst.SimpleStatementSuite((cst.Pass(),)), ( cst.Decorator( cst.Call( cst.Name("bar"), ( cst.Arg(cst.Name("baz")), cst.Arg(cst.SimpleString("'123'")), ), ) ), ), ), "code": "@bar(baz, '123')\ndef foo(): pass\n", }, { "node": cst.FunctionDef( cst.Name("foo"), cst.Parameters(), cst.SimpleStatementSuite((cst.Pass(),)), ( cst.Decorator( cst.Call( cst.Name("bar"), (cst.Arg(cst.SimpleString("'123'")),) ) ), cst.Decorator( cst.Call( cst.Name("baz"), (cst.Arg(cst.SimpleString("'456'")),) ) ), ), ), "code": "@bar('123')\n@baz('456')\ndef foo(): pass\n", "expected_position": CodeRange((3, 0), (3, 15)), }, # Test indentation { "node": DummyIndentedBlock( " ", cst.FunctionDef( cst.Name("foo"), cst.Parameters(), cst.SimpleStatementSuite((cst.Pass(),)), (cst.Decorator(cst.Name("bar")),), ), ), "code": " @bar\n def foo(): pass\n", }, # With an indented body { "node": DummyIndentedBlock( " ", cst.FunctionDef( cst.Name("foo"), cst.Parameters(), cst.IndentedBlock((cst.SimpleStatementLine((cst.Pass(),)),)), (cst.Decorator(cst.Name("bar")),), ), ), "code": " @bar\n def foo():\n pass\n", "expected_position": CodeRange((2, 4), (3, 12)), }, # Leading lines { "node": cst.FunctionDef( cst.Name("foo"), cst.Parameters(), cst.SimpleStatementSuite((cst.Pass(),)), leading_lines=( cst.EmptyLine(comment=cst.Comment("# leading comment")), ), ), "code": "# leading comment\ndef foo(): pass\n", "expected_position": CodeRange((2, 0), (2, 15)), }, # Inner whitespace { "node": cst.FunctionDef( leading_lines=( cst.EmptyLine(), cst.EmptyLine( comment=cst.Comment("# What an amazing decorator") ), ), decorators=( cst.Decorator( whitespace_after_at=cst.SimpleWhitespace(" "), decorator=cst.Call( func=cst.Name("bar"), whitespace_after_func=cst.SimpleWhitespace(" "), whitespace_before_args=cst.SimpleWhitespace(" "), ), ), ), lines_after_decorators=( cst.EmptyLine(comment=cst.Comment("# What a great function")), ), asynchronous=cst.Asynchronous( whitespace_after=cst.SimpleWhitespace(" ") ), whitespace_after_def=cst.SimpleWhitespace(" "), name=cst.Name("foo"), whitespace_after_name=cst.SimpleWhitespace(" "), whitespace_before_params=cst.SimpleWhitespace(" "), params=cst.Parameters(), returns=cst.Annotation( whitespace_before_indicator=cst.SimpleWhitespace(" "), whitespace_after_indicator=cst.SimpleWhitespace(" "), annotation=cst.Name("str"), ), whitespace_before_colon=cst.SimpleWhitespace(" "), body=cst.SimpleStatementSuite((cst.Pass(),)), ), "code": "\n# What an amazing decorator\n@ bar ( )\n# What a great function\nasync def foo ( ) -> str : pass\n", "expected_position": CodeRange((5, 0), (5, 37)), }, # Decorators and annotations { "node": cst.Decorator( whitespace_after_at=cst.SimpleWhitespace(" "), decorator=cst.Call( func=cst.Name("bar"), whitespace_after_func=cst.SimpleWhitespace(" "), whitespace_before_args=cst.SimpleWhitespace(" "), ), ), "code": "@ bar ( )\n", "expected_position": CodeRange((1, 0), (1, 10)), }, # Allow nested calls on decorator { "node": cst.FunctionDef( cst.Name("foo"), cst.Parameters(), cst.SimpleStatementSuite((cst.Pass(),)), (cst.Decorator(cst.Call(func=cst.Call(func=cst.Name("bar")))),), ), "code": "@bar()()\ndef foo(): pass\n", }, # Allow any expression in decorator { "node": cst.FunctionDef( cst.Name("foo"), cst.Parameters(), cst.SimpleStatementSuite((cst.Pass(),)), ( cst.Decorator( cst.BinaryOperation(cst.Name("a"), cst.Add(), cst.Name("b")) ), ), ), "code": "@a + b\ndef foo(): pass\n", }, # Allow parentheses around decorator { "node": cst.FunctionDef( cst.Name("foo"), cst.Parameters(), cst.SimpleStatementSuite((cst.Pass(),)), ( cst.Decorator( cst.Name( "bar", lpar=(cst.LeftParen(),), rpar=(cst.RightParen(),) ) ), ), ), "code": "@(bar)\ndef foo(): pass\n", }, # Parameters { "node": cst.Parameters( params=( cst.Param(cst.Name("first")), cst.Param(cst.Name("second")), cst.Param(cst.Name("third"), default=cst.Float("1.0")), cst.Param(cst.Name("fourth"), default=cst.Float("1.5")), ), star_arg=cst.Param( cst.Name("params"), cst.Annotation(cst.Name("str")) ), kwonly_params=( cst.Param( cst.Name("bar"), cst.Annotation(cst.Name("str")), default=cst.SimpleString('"one"'), ), cst.Param(cst.Name("baz"), cst.Annotation(cst.Name("int"))), cst.Param( cst.Name("biz"), cst.Annotation(cst.Name("str")), default=cst.SimpleString('"two"'), ), ), ), "code": 'first, second, third = 1.0, fourth = 1.5, *params: str, bar: str = "one", baz: int, biz: str = "two"', "expected_position": CodeRange((1, 0), (1, 100)), }, { "node": cst.Param(cst.Name("third"), star="", default=cst.Float("1.0")), "code": "third = 1.0", "expected_position": CodeRange((1, 0), (1, 5)), }, { "node": cst.Param( cst.Name("third"), star="*", whitespace_after_star=cst.SimpleWhitespace(" "), ), "code": "* third", "expected_position": CodeRange((1, 0), (1, 7)), }, { "node": cst.FunctionDef( name=cst.Name( value="foo", ), params=cst.Parameters( params=[ cst.Param( name=cst.Name( value="param1", ), ), ], ), body=cst.IndentedBlock( body=[ cst.SimpleStatementLine( body=[ cst.Pass(), ], ), ], ), whitespace_before_params=cst.ParenthesizedWhitespace( last_line=cst.SimpleWhitespace( value=" ", ), ), ), "code": "def foo(\n param1):\n pass\n", "expected_position": CodeRange((1, 0), (3, 8)), }, ) ) def test_valid(self, **kwargs: Any) -> None: if not is_native() and kwargs.get("native_only", False): self.skipTest("Disabled for native parser") if "native_only" in kwargs: kwargs.pop("native_only") self.validate_node(**kwargs) @data_provider( ( # PEP 646 { "node": cst.FunctionDef( name=cst.Name(value="foo"), params=cst.Parameters( params=[], star_arg=cst.Param( star="*", name=cst.Name("a"), annotation=cst.Annotation( cst.StarredElement(value=cst.Name("b")), whitespace_before_indicator=cst.SimpleWhitespace(""), ), ), ), body=cst.SimpleStatementSuite((cst.Pass(),)), ), "parser": parse_statement, "code": "def foo(*a: *b): pass\n", }, { "node": cst.FunctionDef( name=cst.Name(value="foo"), params=cst.Parameters( params=[], star_arg=cst.Param( star="*", name=cst.Name("a"), annotation=cst.Annotation( cst.StarredElement( value=cst.Subscript( value=cst.Name("tuple"), slice=[ cst.SubscriptElement( cst.Index(cst.Name("int")), comma=cst.Comma(), ), cst.SubscriptElement( cst.Index( value=cst.Name("Ts"), star="*", whitespace_after_star=cst.SimpleWhitespace( "" ), ), comma=cst.Comma(), ), cst.SubscriptElement( cst.Index(cst.Ellipsis()) ), ], ) ), whitespace_before_indicator=cst.SimpleWhitespace(""), ), ), ), body=cst.SimpleStatementSuite((cst.Pass(),)), ), "parser": parse_statement, "code": "def foo(*a: *tuple[int,*Ts,...]): pass\n", }, # Single type variable { "node": cst.FunctionDef( cst.Name("foo"), cst.Parameters(), cst.SimpleStatementSuite((cst.Pass(),)), type_parameters=cst.TypeParameters( (cst.TypeParam(cst.TypeVar(cst.Name("T"))),) ), ), "code": "def foo[T](): pass\n", "parser": parse_statement, }, # All the type parameters { "node": cst.FunctionDef( cst.Name("foo"), cst.Parameters(), cst.SimpleStatementSuite((cst.Pass(),)), type_parameters=cst.TypeParameters( ( cst.TypeParam( cst.TypeVar( cst.Name("T"), bound=cst.Name("int"), colon=cst.Colon( whitespace_after=cst.SimpleWhitespace(" ") ), ), cst.Comma(whitespace_after=cst.SimpleWhitespace(" ")), ), cst.TypeParam( cst.TypeVarTuple(cst.Name("Ts")), cst.Comma(whitespace_after=cst.SimpleWhitespace(" ")), ), cst.TypeParam(cst.ParamSpec(cst.Name("KW"))), ) ), ), "code": "def foo[T: int, *Ts, **KW](): pass\n", "parser": parse_statement, }, # Type parameters with whitespace { "node": cst.FunctionDef( cst.Name("foo"), cst.Parameters(), cst.SimpleStatementSuite((cst.Pass(),)), type_parameters=cst.TypeParameters( params=( cst.TypeParam( param=cst.TypeVar( cst.Name("T"), bound=cst.Name("str"), colon=cst.Colon( whitespace_before=cst.SimpleWhitespace(" "), whitespace_after=cst.ParenthesizedWhitespace( empty_lines=(cst.EmptyLine(),), indent=True, ), ), ), comma=cst.Comma(cst.SimpleWhitespace(" ")), ), cst.TypeParam( cst.ParamSpec( cst.Name("PS"), cst.SimpleWhitespace(" ") ), cst.Comma(cst.SimpleWhitespace(" ")), ), ) ), whitespace_after_type_parameters=cst.SimpleWhitespace(" "), ), "code": "def foo[T :\n\nstr ,** PS ,] (): pass\n", "parser": parse_statement, }, ) ) def test_valid_native(self, **kwargs: Any) -> None: if not is_native(): self.skipTest("Disabled for pure python parser") self.validate_node(**kwargs) @data_provider( ( ( lambda: cst.FunctionDef( cst.Name("foo", lpar=(cst.LeftParen(),), rpar=(cst.RightParen(),)), cst.Parameters(), cst.SimpleStatementSuite((cst.Pass(),)), ), "Cannot have parens around Name", ), ( lambda: cst.FunctionDef( cst.Name("foo"), cst.Parameters(), cst.SimpleStatementSuite((cst.Pass(),)), asynchronous=cst.Asynchronous( whitespace_after=cst.SimpleWhitespace("") ), ), "one space after Asynchronous", ), ( lambda: cst.FunctionDef( cst.Name("foo"), cst.Parameters(), cst.SimpleStatementSuite((cst.Pass(),)), whitespace_after_def=cst.SimpleWhitespace(""), ), "one space between 'def' and name", ), ( lambda: cst.FunctionDef( cst.Name("foo"), cst.Parameters( star_kwarg=cst.Param(cst.Name("bar"), equal=cst.AssignEqual()) ), cst.SimpleStatementSuite((cst.Pass(),)), ), "Must have a default when specifying an AssignEqual.", ), ( lambda: cst.FunctionDef( cst.Name("foo"), cst.Parameters(star_kwarg=cst.Param(cst.Name("bar"), star="***")), cst.SimpleStatementSuite((cst.Pass(),)), ), r"Must specify either '', '\*' or '\*\*' for star.", ), ( lambda: cst.FunctionDef( cst.Name("foo"), cst.Parameters( params=( cst.Param( cst.Name("bar"), default=cst.SimpleString('"one"') ), cst.Param(cst.Name("bar")), ) ), cst.SimpleStatementSuite((cst.Pass(),)), ), "Cannot have param without defaults following a param with defaults.", ), ( lambda: cst.FunctionDef( cst.Name("foo"), cst.Parameters( posonly_params=( cst.Param( cst.Name("bar"), default=cst.SimpleString('"one"') ), ), params=(cst.Param(cst.Name("bar")),), ), cst.SimpleStatementSuite((cst.Pass(),)), ), "Cannot have param without defaults following a param with defaults.", ), ( lambda: cst.FunctionDef( cst.Name("foo"), cst.Parameters(star_arg=cst.ParamStar()), cst.SimpleStatementSuite((cst.Pass(),)), ), "Must have at least one kwonly param if ParamStar is used.", ), ( lambda: cst.FunctionDef( cst.Name("foo"), cst.Parameters(posonly_ind=cst.ParamSlash()), cst.SimpleStatementSuite((cst.Pass(),)), ), "Must have at least one posonly param if ParamSlash is used.", ), ( lambda: cst.FunctionDef( cst.Name("foo"), cst.Parameters(params=(cst.Param(cst.Name("bar"), star="*"),)), cst.SimpleStatementSuite((cst.Pass(),)), ), "Expecting a star prefix of ''", ), ( lambda: cst.FunctionDef( cst.Name("foo"), cst.Parameters( params=( cst.Param( cst.Name("bar"), default=cst.SimpleString('"one"'), star="*", ), ) ), cst.SimpleStatementSuite((cst.Pass(),)), ), "Expecting a star prefix of ''", ), ( lambda: cst.FunctionDef( cst.Name("foo"), cst.Parameters( kwonly_params=(cst.Param(cst.Name("bar"), star="*"),) ), cst.SimpleStatementSuite((cst.Pass(),)), ), "Expecting a star prefix of ''", ), ( lambda: cst.FunctionDef( cst.Name("foo"), cst.Parameters(star_arg=cst.Param(cst.Name("bar"), star="**")), cst.SimpleStatementSuite((cst.Pass(),)), ), r"Expecting a star prefix of '\*'", ), ( lambda: cst.FunctionDef( cst.Name("foo"), cst.Parameters(star_kwarg=cst.Param(cst.Name("bar"), star="*")), cst.SimpleStatementSuite((cst.Pass(),)), ), r"Expecting a star prefix of '\*\*'", ), ) ) def test_invalid( self, get_node: Callable[[], cst.CSTNode], expected_re: str ) -> None: self.assert_invalid(get_node, expected_re) def _parse_statement_force_38(code: str) -> cst.BaseCompoundStatement: statement = cst.parse_statement( code, config=cst.PartialParserConfig(python_version="3.8") ) if not isinstance(statement, cst.BaseCompoundStatement): raise Exception("This function is expecting to parse compound statements only!") return statement class FunctionDefParserTest(CSTNodeTest): @data_provider( ( # Simple function definition without any arguments or return ( cst.FunctionDef( cst.Name("foo"), cst.Parameters(), cst.SimpleStatementSuite((cst.Pass(),)), ), "def foo(): pass\n", ), # Functiondef with a return annotation ( cst.FunctionDef( cst.Name("foo"), cst.Parameters(), cst.SimpleStatementSuite((cst.Pass(),)), returns=cst.Annotation( cst.Name("str"), whitespace_before_indicator=cst.SimpleWhitespace(" "), ), ), "def foo() -> str: pass\n", ), # Async function definition. ( cst.FunctionDef( cst.Name("foo"), cst.Parameters(), cst.SimpleStatementSuite((cst.Pass(),)), asynchronous=cst.Asynchronous(), ), "async def foo(): pass\n", ), # Async function definition with annotation. ( cst.FunctionDef( cst.Name("foo"), cst.Parameters(), cst.SimpleStatementSuite((cst.Pass(),)), asynchronous=cst.Asynchronous(), returns=cst.Annotation( cst.Name("int"), whitespace_before_indicator=cst.SimpleWhitespace(" "), ), ), "async def foo() -> int: pass\n", ), # Test basic positional params ( cst.FunctionDef( cst.Name("foo"), cst.Parameters( params=( cst.Param( cst.Name("bar"), star="", comma=cst.Comma( whitespace_after=cst.SimpleWhitespace(" ") ), ), cst.Param(cst.Name("baz"), star=""), ) ), cst.SimpleStatementSuite((cst.Pass(),)), ), "def foo(bar, baz): pass\n", ), # Typed positional params ( cst.FunctionDef( cst.Name("foo"), cst.Parameters( params=( cst.Param( cst.Name("bar"), cst.Annotation( cst.Name("str"), whitespace_before_indicator=cst.SimpleWhitespace( "" ), ), star="", comma=cst.Comma( whitespace_after=cst.SimpleWhitespace(" ") ), ), cst.Param( cst.Name("baz"), cst.Annotation( cst.Name("int"), whitespace_before_indicator=cst.SimpleWhitespace( "" ), ), star="", ), ) ), cst.SimpleStatementSuite((cst.Pass(),)), ), "def foo(bar: str, baz: int): pass\n", ), # Test basic positional default params ( cst.FunctionDef( cst.Name("foo"), cst.Parameters( params=( cst.Param( cst.Name("bar"), equal=cst.AssignEqual(), default=cst.SimpleString('"one"'), star="", comma=cst.Comma( whitespace_after=cst.SimpleWhitespace(" ") ), ), cst.Param( cst.Name("baz"), equal=cst.AssignEqual(), default=cst.Integer("5"), star="", ), ) ), cst.SimpleStatementSuite((cst.Pass(),)), ), 'def foo(bar = "one", baz = 5): pass\n', ), # Typed positional default params ( cst.FunctionDef( cst.Name("foo"), cst.Parameters( params=( cst.Param( cst.Name("bar"), cst.Annotation( cst.Name("str"), whitespace_before_indicator=cst.SimpleWhitespace( "" ), ), equal=cst.AssignEqual(), default=cst.SimpleString('"one"'), star="", comma=cst.Comma( whitespace_after=cst.SimpleWhitespace(" ") ), ), cst.Param( cst.Name("baz"), cst.Annotation( cst.Name("int"), whitespace_before_indicator=cst.SimpleWhitespace( "" ), ), equal=cst.AssignEqual(), default=cst.Integer("5"), star="", ), ) ), cst.SimpleStatementSuite((cst.Pass(),)), ), 'def foo(bar: str = "one", baz: int = 5): pass\n', ), # Mixed positional and default params. ( cst.FunctionDef( cst.Name("foo"), cst.Parameters( params=( cst.Param( cst.Name("bar"), cst.Annotation( cst.Name("str"), whitespace_before_indicator=cst.SimpleWhitespace( "" ), ), default=None, star="", comma=cst.Comma( whitespace_after=cst.SimpleWhitespace(" ") ), ), cst.Param( cst.Name("baz"), cst.Annotation( cst.Name("int"), whitespace_before_indicator=cst.SimpleWhitespace( "" ), ), equal=cst.AssignEqual(), default=cst.Integer("5"), star="", ), ) ), cst.SimpleStatementSuite((cst.Pass(),)), ), "def foo(bar: str, baz: int = 5): pass\n", ), # Test kwonly params ( cst.FunctionDef( cst.Name("foo"), cst.Parameters( star_arg=cst.ParamStar(), kwonly_params=( cst.Param( cst.Name("bar"), equal=cst.AssignEqual(), default=cst.SimpleString('"one"'), star="", comma=cst.Comma( whitespace_after=cst.SimpleWhitespace(" ") ), ), cst.Param(cst.Name("baz"), default=None, star=""), ), ), cst.SimpleStatementSuite((cst.Pass(),)), ), 'def foo(*, bar = "one", baz): pass\n', ), # Typed kwonly params ( cst.FunctionDef( cst.Name("foo"), cst.Parameters( star_arg=cst.ParamStar(), kwonly_params=( cst.Param( cst.Name("bar"), cst.Annotation( cst.Name("str"), whitespace_before_indicator=cst.SimpleWhitespace( "" ), ), equal=cst.AssignEqual(), default=cst.SimpleString('"one"'), star="", comma=cst.Comma( whitespace_after=cst.SimpleWhitespace(" ") ), ), cst.Param( cst.Name("baz"), cst.Annotation( cst.Name("int"), whitespace_before_indicator=cst.SimpleWhitespace( "" ), ), default=None, star="", comma=cst.Comma( whitespace_after=cst.SimpleWhitespace(" ") ), ), cst.Param( cst.Name("biz"), cst.Annotation( cst.Name("str"), whitespace_before_indicator=cst.SimpleWhitespace( "" ), ), equal=cst.AssignEqual(), default=cst.SimpleString('"two"'), star="", ), ), ), cst.SimpleStatementSuite((cst.Pass(),)), ), 'def foo(*, bar: str = "one", baz: int, biz: str = "two"): pass\n', ), # Mixed params and kwonly_params ( cst.FunctionDef( cst.Name("foo"), cst.Parameters( params=( cst.Param( cst.Name("first"), annotation=None, default=None, star="", comma=cst.Comma( whitespace_after=cst.SimpleWhitespace(" ") ), ), cst.Param( cst.Name("second"), annotation=None, default=None, star="", comma=cst.Comma( whitespace_after=cst.SimpleWhitespace(" ") ), ), ), star_arg=cst.ParamStar(), kwonly_params=( cst.Param( cst.Name("bar"), cst.Annotation( cst.Name("str"), whitespace_before_indicator=cst.SimpleWhitespace( "" ), ), equal=cst.AssignEqual(), default=cst.SimpleString('"one"'), star="", comma=cst.Comma( whitespace_after=cst.SimpleWhitespace(" ") ), ), cst.Param( cst.Name("baz"), cst.Annotation( cst.Name("int"), whitespace_before_indicator=cst.SimpleWhitespace( "" ), ), default=None, star="", comma=cst.Comma( whitespace_after=cst.SimpleWhitespace(" ") ), ), cst.Param( cst.Name("biz"), cst.Annotation( cst.Name("str"), whitespace_before_indicator=cst.SimpleWhitespace( "" ), ), equal=cst.AssignEqual(), default=cst.SimpleString('"two"'), star="", ), ), ), cst.SimpleStatementSuite((cst.Pass(),)), ), 'def foo(first, second, *, bar: str = "one", baz: int, biz: str = "two"): pass\n', ), # Mixed params and kwonly_params ( cst.FunctionDef( cst.Name("foo"), cst.Parameters( params=( cst.Param( cst.Name("first"), annotation=None, equal=cst.AssignEqual(), default=cst.Float("1.0"), star="", comma=cst.Comma( whitespace_after=cst.SimpleWhitespace(" ") ), ), cst.Param( cst.Name("second"), annotation=None, equal=cst.AssignEqual(), default=cst.Float("1.5"), star="", comma=cst.Comma( whitespace_after=cst.SimpleWhitespace(" ") ), ), ), star_arg=cst.ParamStar(), kwonly_params=( cst.Param( cst.Name("bar"), cst.Annotation( cst.Name("str"), whitespace_before_indicator=cst.SimpleWhitespace( "" ), ), equal=cst.AssignEqual(), default=cst.SimpleString('"one"'), star="", comma=cst.Comma( whitespace_after=cst.SimpleWhitespace(" ") ), ), cst.Param( cst.Name("baz"), cst.Annotation( cst.Name("int"), whitespace_before_indicator=cst.SimpleWhitespace( "" ), ), default=None, star="", comma=cst.Comma( whitespace_after=cst.SimpleWhitespace(" ") ), ), cst.Param( cst.Name("biz"), cst.Annotation( cst.Name("str"), whitespace_before_indicator=cst.SimpleWhitespace( "" ), ), equal=cst.AssignEqual(), default=cst.SimpleString('"two"'), star="", ), ), ), cst.SimpleStatementSuite((cst.Pass(),)), ), 'def foo(first = 1.0, second = 1.5, *, bar: str = "one", baz: int, biz: str = "two"): pass\n', ), # Mixed params, and kwonly_params ( cst.FunctionDef( cst.Name("foo"), cst.Parameters( params=( cst.Param( cst.Name("first"), annotation=None, default=None, star="", comma=cst.Comma( whitespace_after=cst.SimpleWhitespace(" ") ), ), cst.Param( cst.Name("second"), annotation=None, default=None, star="", comma=cst.Comma( whitespace_after=cst.SimpleWhitespace(" ") ), ), cst.Param( cst.Name("third"), annotation=None, equal=cst.AssignEqual(), default=cst.Float("1.0"), star="", comma=cst.Comma( whitespace_after=cst.SimpleWhitespace(" ") ), ), cst.Param( cst.Name("fourth"), annotation=None, equal=cst.AssignEqual(), default=cst.Float("1.5"), star="", comma=cst.Comma( whitespace_after=cst.SimpleWhitespace(" ") ), ), ), star_arg=cst.ParamStar(), kwonly_params=( cst.Param( cst.Name("bar"), cst.Annotation( cst.Name("str"), whitespace_before_indicator=cst.SimpleWhitespace( "" ), ), equal=cst.AssignEqual(), default=cst.SimpleString('"one"'), star="", comma=cst.Comma( whitespace_after=cst.SimpleWhitespace(" ") ), ), cst.Param( cst.Name("baz"), cst.Annotation( cst.Name("int"), whitespace_before_indicator=cst.SimpleWhitespace( "" ), ), default=None, star="", comma=cst.Comma( whitespace_after=cst.SimpleWhitespace(" ") ), ), cst.Param( cst.Name("biz"), cst.Annotation( cst.Name("str"), whitespace_before_indicator=cst.SimpleWhitespace( "" ), ), equal=cst.AssignEqual(), default=cst.SimpleString('"two"'), star="", ), ), ), cst.SimpleStatementSuite((cst.Pass(),)), ), 'def foo(first, second, third = 1.0, fourth = 1.5, *, bar: str = "one", baz: int, biz: str = "two"): pass\n', ), # Test star_arg ( cst.FunctionDef( cst.Name("foo"), cst.Parameters( star_arg=cst.Param( cst.Name("params"), annotation=None, default=None, star="*" ) ), cst.SimpleStatementSuite((cst.Pass(),)), ), "def foo(*params): pass\n", ), # Typed star_arg, include kwonly_params ( cst.FunctionDef( cst.Name("foo"), cst.Parameters( star_arg=cst.Param( cst.Name("params"), cst.Annotation( cst.Name("str"), whitespace_before_indicator=cst.SimpleWhitespace(""), ), default=None, star="*", comma=cst.Comma(whitespace_after=cst.SimpleWhitespace(" ")), ), kwonly_params=( cst.Param( cst.Name("bar"), cst.Annotation( cst.Name("str"), whitespace_before_indicator=cst.SimpleWhitespace( "" ), ), equal=cst.AssignEqual(), default=cst.SimpleString('"one"'), star="", comma=cst.Comma( whitespace_after=cst.SimpleWhitespace(" ") ), ), cst.Param( cst.Name("baz"), cst.Annotation( cst.Name("int"), whitespace_before_indicator=cst.SimpleWhitespace( "" ), ), default=None, star="", comma=cst.Comma( whitespace_after=cst.SimpleWhitespace(" ") ), ), cst.Param( cst.Name("biz"), cst.Annotation( cst.Name("str"), whitespace_before_indicator=cst.SimpleWhitespace( "" ), ), equal=cst.AssignEqual(), default=cst.SimpleString('"two"'), star="", ), ), ), cst.SimpleStatementSuite((cst.Pass(),)), ), 'def foo(*params: str, bar: str = "one", baz: int, biz: str = "two"): pass\n', ), # Mixed params star_arg and kwonly_params ( cst.FunctionDef( cst.Name("foo"), cst.Parameters( params=( cst.Param( cst.Name("first"), annotation=None, default=None, star="", comma=cst.Comma( whitespace_after=cst.SimpleWhitespace(" ") ), ), cst.Param( cst.Name("second"), annotation=None, default=None, star="", comma=cst.Comma( whitespace_after=cst.SimpleWhitespace(" ") ), ), cst.Param( cst.Name("third"), annotation=None, equal=cst.AssignEqual(), default=cst.Float("1.0"), star="", comma=cst.Comma( whitespace_after=cst.SimpleWhitespace(" ") ), ), cst.Param( cst.Name("fourth"), annotation=None, equal=cst.AssignEqual(), default=cst.Float("1.5"), star="", comma=cst.Comma( whitespace_after=cst.SimpleWhitespace(" ") ), ), ), star_arg=cst.Param( cst.Name("params"), cst.Annotation( cst.Name("str"), whitespace_before_indicator=cst.SimpleWhitespace(""), ), default=None, star="*", comma=cst.Comma(whitespace_after=cst.SimpleWhitespace(" ")), ), kwonly_params=( cst.Param( cst.Name("bar"), cst.Annotation( cst.Name("str"), whitespace_before_indicator=cst.SimpleWhitespace( "" ), ), equal=cst.AssignEqual(), default=cst.SimpleString('"one"'), star="", comma=cst.Comma( whitespace_after=cst.SimpleWhitespace(" ") ), ), cst.Param( cst.Name("baz"), cst.Annotation( cst.Name("int"), whitespace_before_indicator=cst.SimpleWhitespace( "" ), ), default=None, star="", comma=cst.Comma( whitespace_after=cst.SimpleWhitespace(" ") ), ), cst.Param( cst.Name("biz"), cst.Annotation( cst.Name("str"), whitespace_before_indicator=cst.SimpleWhitespace( "" ), ), equal=cst.AssignEqual(), default=cst.SimpleString('"two"'), star="", ), ), ), cst.SimpleStatementSuite((cst.Pass(),)), ), 'def foo(first, second, third = 1.0, fourth = 1.5, *params: str, bar: str = "one", baz: int, biz: str = "two"): pass\n', ), # Test star_arg and star_kwarg ( cst.FunctionDef( cst.Name("foo"), cst.Parameters( star_kwarg=cst.Param( cst.Name("kwparams"), annotation=None, default=None, star="**", ) ), cst.SimpleStatementSuite((cst.Pass(),)), ), "def foo(**kwparams): pass\n", ), # Test star_arg and kwarg ( cst.FunctionDef( cst.Name("foo"), cst.Parameters( star_arg=cst.Param( cst.Name("params"), annotation=None, default=None, star="*", comma=cst.Comma(whitespace_after=cst.SimpleWhitespace(" ")), ), star_kwarg=cst.Param( cst.Name("kwparams"), annotation=None, default=None, star="**", ), ), cst.SimpleStatementSuite((cst.Pass(),)), ), "def foo(*params, **kwparams): pass\n", ), # Test typed star_arg and star_kwarg ( cst.FunctionDef( cst.Name("foo"), cst.Parameters( star_arg=cst.Param( cst.Name("params"), cst.Annotation( cst.Name("str"), whitespace_before_indicator=cst.SimpleWhitespace(""), ), default=None, star="*", comma=cst.Comma(whitespace_after=cst.SimpleWhitespace(" ")), ), star_kwarg=cst.Param( cst.Name("kwparams"), cst.Annotation( cst.Name("int"), whitespace_before_indicator=cst.SimpleWhitespace(""), ), default=None, star="**", ), ), cst.SimpleStatementSuite((cst.Pass(),)), ), "def foo(*params: str, **kwparams: int): pass\n", ), # Test decorators ( cst.FunctionDef( cst.Name("foo"), cst.Parameters(), cst.SimpleStatementSuite((cst.Pass(),)), (cst.Decorator(cst.Name("bar")),), ), "@bar\ndef foo(): pass\n", ), ( cst.FunctionDef( cst.Name("foo"), cst.Parameters(), cst.SimpleStatementSuite((cst.Pass(),)), ( cst.Decorator( cst.Call( cst.Name("bar"), ( cst.Arg( cst.Name("baz"), keyword=None, comma=cst.Comma( whitespace_after=cst.SimpleWhitespace(" ") ), ), cst.Arg(cst.SimpleString("'123'"), keyword=None), ), ) ), ), ), "@bar(baz, '123')\ndef foo(): pass\n", ), ( cst.FunctionDef( cst.Name("foo"), cst.Parameters(), cst.SimpleStatementSuite((cst.Pass(),)), ( cst.Decorator( cst.Call( cst.Name("bar"), (cst.Arg(cst.SimpleString("'123'"), keyword=None),), ) ), cst.Decorator( cst.Call( cst.Name("baz"), (cst.Arg(cst.SimpleString("'456'"), keyword=None),), ) ), ), ), "@bar('123')\n@baz('456')\ndef foo(): pass\n", ), # Leading lines ( cst.FunctionDef( cst.Name("foo"), cst.Parameters(), cst.SimpleStatementSuite((cst.Pass(),)), leading_lines=( cst.EmptyLine(comment=cst.Comment("# leading comment")), ), ), "# leading comment\ndef foo(): pass\n", ), # Inner whitespace ( cst.FunctionDef( leading_lines=( cst.EmptyLine(), cst.EmptyLine( comment=cst.Comment("# What an amazing decorator") ), ), decorators=( cst.Decorator( whitespace_after_at=cst.SimpleWhitespace(" "), decorator=cst.Call( func=cst.Name("bar"), whitespace_after_func=cst.SimpleWhitespace(" "), whitespace_before_args=cst.SimpleWhitespace(" "), ), ), ), lines_after_decorators=( cst.EmptyLine(comment=cst.Comment("# What a great function")), ), asynchronous=cst.Asynchronous( whitespace_after=cst.SimpleWhitespace(" ") ), whitespace_after_def=cst.SimpleWhitespace(" "), name=cst.Name("foo"), whitespace_after_name=cst.SimpleWhitespace(" "), whitespace_before_params=cst.SimpleWhitespace(" "), params=cst.Parameters(), returns=cst.Annotation( whitespace_before_indicator=cst.SimpleWhitespace(" "), whitespace_after_indicator=cst.SimpleWhitespace(" "), annotation=cst.Name("str"), ), whitespace_before_colon=cst.SimpleWhitespace(" "), body=cst.SimpleStatementSuite((cst.Pass(),)), ), "\n# What an amazing decorator\n@ bar ( )\n# What a great function\nasync def foo ( ) -> str : pass\n", ), ) ) def test_valid(self, node: cst.CSTNode, code: str) -> None: self.validate_node(node, code, parse_statement) @data_provider( ( # Test basic positional only params { "node": cst.FunctionDef( cst.Name("foo"), cst.Parameters( posonly_params=( cst.Param( cst.Name("bar"), star="", comma=cst.Comma( whitespace_after=cst.SimpleWhitespace(" ") ), ), cst.Param( cst.Name("baz"), star="", comma=cst.Comma( whitespace_after=cst.SimpleWhitespace(" ") ), ), ), posonly_ind=cst.ParamSlash(), ), cst.SimpleStatementSuite((cst.Pass(),)), ), "code": "def foo(bar, baz, /): pass\n", }, # Positional only params with whitespace after but no comma { "node": cst.FunctionDef( cst.Name("foo"), cst.Parameters( posonly_params=( cst.Param( cst.Name("bar"), star="", comma=cst.Comma( whitespace_after=cst.SimpleWhitespace(" ") ), ), cst.Param( cst.Name("baz"), star="", comma=cst.Comma( whitespace_after=cst.SimpleWhitespace(" ") ), ), ), posonly_ind=cst.ParamSlash( whitespace_after=cst.SimpleWhitespace(" ") ), ), cst.SimpleStatementSuite((cst.Pass(),)), ), "code": "def foo(bar, baz, / ): pass\n", "native_only": True, }, # Typed positional only params { "node": cst.FunctionDef( cst.Name("foo"), cst.Parameters( posonly_params=( cst.Param( cst.Name("bar"), cst.Annotation( cst.Name("str"), whitespace_before_indicator=cst.SimpleWhitespace( "" ), ), star="", comma=cst.Comma( whitespace_after=cst.SimpleWhitespace(" ") ), ), cst.Param( cst.Name("baz"), cst.Annotation( cst.Name("int"), whitespace_before_indicator=cst.SimpleWhitespace( "" ), ), star="", comma=cst.Comma( whitespace_after=cst.SimpleWhitespace(" ") ), ), ), posonly_ind=cst.ParamSlash(), ), cst.SimpleStatementSuite((cst.Pass(),)), ), "code": "def foo(bar: str, baz: int, /): pass\n", }, # Test basic positional only default params { "node": cst.FunctionDef( cst.Name("foo"), cst.Parameters( posonly_params=( cst.Param( cst.Name("bar"), default=cst.SimpleString('"one"'), equal=cst.AssignEqual(), star="", comma=cst.Comma( whitespace_after=cst.SimpleWhitespace(" ") ), ), cst.Param( cst.Name("baz"), default=cst.Integer("5"), equal=cst.AssignEqual(), star="", comma=cst.Comma( whitespace_after=cst.SimpleWhitespace(" ") ), ), ), posonly_ind=cst.ParamSlash(), ), cst.SimpleStatementSuite((cst.Pass(),)), ), "code": 'def foo(bar = "one", baz = 5, /): pass\n', }, # Typed positional only default params { "node": cst.FunctionDef( cst.Name("foo"), cst.Parameters( posonly_params=( cst.Param( cst.Name("bar"), cst.Annotation( cst.Name("str"), whitespace_before_indicator=cst.SimpleWhitespace( "" ), ), default=cst.SimpleString('"one"'), equal=cst.AssignEqual(), star="", comma=cst.Comma( whitespace_after=cst.SimpleWhitespace(" ") ), ), cst.Param( cst.Name("baz"), cst.Annotation( cst.Name("int"), whitespace_before_indicator=cst.SimpleWhitespace( "" ), ), default=cst.Integer("5"), equal=cst.AssignEqual(), star="", comma=cst.Comma( whitespace_after=cst.SimpleWhitespace(" ") ), ), ), posonly_ind=cst.ParamSlash(), ), cst.SimpleStatementSuite((cst.Pass(),)), ), "code": 'def foo(bar: str = "one", baz: int = 5, /): pass\n', }, # Test positional only params and positional params { "node": cst.FunctionDef( cst.Name("foo"), cst.Parameters( posonly_params=( cst.Param( cst.Name("bar"), star="", comma=cst.Comma( whitespace_after=cst.SimpleWhitespace(" ") ), ), ), posonly_ind=cst.ParamSlash( comma=cst.Comma(whitespace_after=cst.SimpleWhitespace(" ")), ), params=( cst.Param( cst.Name("baz"), star="", ), ), ), cst.SimpleStatementSuite((cst.Pass(),)), ), "code": "def foo(bar, /, baz): pass\n", }, # Test positional only params and star args { "node": cst.FunctionDef( cst.Name("foo"), cst.Parameters( posonly_params=( cst.Param( cst.Name("bar"), star="", comma=cst.Comma( whitespace_after=cst.SimpleWhitespace(" ") ), ), ), posonly_ind=cst.ParamSlash( comma=cst.Comma(whitespace_after=cst.SimpleWhitespace(" ")), ), star_arg=cst.Param(cst.Name("baz"), star="*"), ), cst.SimpleStatementSuite((cst.Pass(),)), ), "code": "def foo(bar, /, *baz): pass\n", }, # Test positional only params and kwonly params { "node": cst.FunctionDef( cst.Name("foo"), cst.Parameters( posonly_params=( cst.Param( cst.Name("bar"), star="", comma=cst.Comma( whitespace_after=cst.SimpleWhitespace(" ") ), ), ), posonly_ind=cst.ParamSlash( comma=cst.Comma(whitespace_after=cst.SimpleWhitespace(" ")), ), star_arg=cst.ParamStar( comma=cst.Comma(whitespace_after=cst.SimpleWhitespace(" ")), ), kwonly_params=(cst.Param(cst.Name("baz"), star=""),), ), cst.SimpleStatementSuite((cst.Pass(),)), ), "code": "def foo(bar, /, *, baz): pass\n", }, # Test positional only params and star kwargs { "node": cst.FunctionDef( cst.Name("foo"), cst.Parameters( posonly_params=( cst.Param( cst.Name("bar"), star="", comma=cst.Comma( whitespace_after=cst.SimpleWhitespace(" ") ), ), ), posonly_ind=cst.ParamSlash( comma=cst.Comma(whitespace_after=cst.SimpleWhitespace(" ")), ), star_kwarg=cst.Param(cst.Name("baz"), star="**"), ), cst.SimpleStatementSuite((cst.Pass(),)), ), "code": "def foo(bar, /, **baz): pass\n", }, ) ) def test_valid_38(self, node: cst.CSTNode, code: str, **kwargs: Any) -> None: if not is_native() and kwargs.get("native_only", False): self.skipTest("disabled for pure python parser") self.validate_node(node, code, _parse_statement_force_38) @data_provider( ( { "code": "async def foo(): pass", "parser": parse_statement_as(python_version="3.7"), "expect_success": True, }, { "code": "async def foo(): pass", "parser": parse_statement_as(python_version="3.6"), "expect_success": True, }, { "code": "async def foo(): pass", "parser": parse_statement_as(python_version="3.5"), "expect_success": True, }, { "code": "async def foo(): pass", "parser": parse_statement_as(python_version="3.3"), "expect_success": False, }, ) ) def test_versions(self, **kwargs: Any) -> None: if is_native() and not kwargs.get("expect_success", True): self.skipTest("parse errors are disabled for native parser") self.assert_parses(**kwargs) @data_provider( ( {"code": "A[:*b]"}, {"code": "A[*b:]"}, {"code": "A[*b:*b]"}, {"code": "A[*(1:2)]"}, {"code": "A[*:]"}, {"code": "A[:*]"}, {"code": "A[**b]"}, {"code": "def f(x: *b): pass"}, {"code": "def f(**x: *b): pass"}, {"code": "x: *b"}, ) ) def test_parse_error(self, **kwargs: Any) -> None: if not is_native(): self.skipTest("Skipped for non-native parser") self.assert_parses(**kwargs, expect_success=False, parser=parse_statement) LibCST-1.2.0/libcst/_nodes/tests/test_global.py000066400000000000000000000112541456464173300213540ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Any import libcst as cst from libcst import parse_statement from libcst._nodes.tests.base import CSTNodeTest from libcst.helpers import ensure_type from libcst.metadata import CodeRange from libcst.testing.utils import data_provider class GlobalConstructionTest(CSTNodeTest): @data_provider( ( # Single global statement {"node": cst.Global((cst.NameItem(cst.Name("a")),)), "code": "global a"}, # Multiple entries in global statement { "node": cst.Global( (cst.NameItem(cst.Name("a")), cst.NameItem(cst.Name("b"))) ), "code": "global a, b", }, # Whitespace rendering test { "node": cst.Global( ( cst.NameItem( cst.Name("a"), comma=cst.Comma( whitespace_before=cst.SimpleWhitespace(" "), whitespace_after=cst.SimpleWhitespace(" "), ), ), cst.NameItem(cst.Name("b")), ), whitespace_after_global=cst.SimpleWhitespace(" "), ), "code": "global a , b", "expected_position": CodeRange((1, 0), (1, 15)), }, ) ) def test_valid(self, **kwargs: Any) -> None: self.validate_node(**kwargs) @data_provider( ( # Validate construction { "get_node": lambda: cst.Global(()), "expected_re": "A Global statement must have at least one NameItem", }, # Validate whitespace handling { "get_node": lambda: cst.Global( (cst.NameItem(cst.Name("a")),), whitespace_after_global=cst.SimpleWhitespace(""), ), "expected_re": "Must have at least one space after 'global' keyword", }, # Validate comma handling { "get_node": lambda: cst.Global( (cst.NameItem(cst.Name("a"), comma=cst.Comma()),) ), "expected_re": "The last NameItem in a Global cannot have a trailing comma", }, # Validate paren handling { "get_node": lambda: cst.Global( ( cst.NameItem( cst.Name( "a", lpar=(cst.LeftParen(),), rpar=(cst.RightParen(),) ) ), ) ), "expected_re": "Cannot have parens around names in NameItem", }, ) ) def test_invalid(self, **kwargs: Any) -> None: self.assert_invalid(**kwargs) class GlobalParsingTest(CSTNodeTest): @data_provider( ( # Single global statement {"node": cst.Global((cst.NameItem(cst.Name("a")),)), "code": "global a"}, # Multiple entries in global statement { "node": cst.Global( ( cst.NameItem( cst.Name("a"), comma=cst.Comma(whitespace_after=cst.SimpleWhitespace(" ")), ), cst.NameItem(cst.Name("b")), ) ), "code": "global a, b", }, # Whitespace rendering test { "node": cst.Global( ( cst.NameItem( cst.Name("a"), comma=cst.Comma( whitespace_before=cst.SimpleWhitespace(" "), whitespace_after=cst.SimpleWhitespace(" "), ), ), cst.NameItem(cst.Name("b")), ), whitespace_after_global=cst.SimpleWhitespace(" "), ), "code": "global a , b", }, ) ) def test_valid(self, **kwargs: Any) -> None: self.validate_node( parser=lambda code: ensure_type( parse_statement(code), cst.SimpleStatementLine ).body[0], **kwargs, ) LibCST-1.2.0/libcst/_nodes/tests/test_if.py000066400000000000000000000122631456464173300205130ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Any import libcst as cst from libcst import parse_statement from libcst._nodes.tests.base import CSTNodeTest, DummyIndentedBlock from libcst.metadata import CodeRange from libcst.testing.utils import data_provider class IfTest(CSTNodeTest): @data_provider( ( # Simple if without elif or else { "node": cst.If( cst.Name("conditional"), cst.SimpleStatementSuite((cst.Pass(),)) ), "code": "if conditional: pass\n", "parser": parse_statement, "expected_position": CodeRange((1, 0), (1, 20)), }, # else clause { "node": cst.If( cst.Name("conditional"), cst.SimpleStatementSuite((cst.Pass(),)), orelse=cst.Else(cst.SimpleStatementSuite((cst.Pass(),))), ), "code": "if conditional: pass\nelse: pass\n", "parser": parse_statement, "expected_position": CodeRange((1, 0), (2, 10)), }, # elif clause { "node": cst.If( cst.Name("conditional"), cst.SimpleStatementSuite((cst.Pass(),)), orelse=cst.If( cst.Name("other_conditional"), cst.SimpleStatementSuite((cst.Pass(),)), orelse=cst.Else(cst.SimpleStatementSuite((cst.Pass(),))), ), ), "code": "if conditional: pass\nelif other_conditional: pass\nelse: pass\n", "parser": parse_statement, "expected_position": CodeRange((1, 0), (3, 10)), }, # indentation { "node": DummyIndentedBlock( " ", cst.If( cst.Name("conditional"), cst.SimpleStatementSuite((cst.Pass(),)), orelse=cst.Else(cst.SimpleStatementSuite((cst.Pass(),))), ), ), "code": " if conditional: pass\n else: pass\n", "parser": None, "expected_position": CodeRange((1, 4), (2, 14)), }, # with an indented body { "node": DummyIndentedBlock( " ", cst.If( cst.Name("conditional"), cst.IndentedBlock((cst.SimpleStatementLine((cst.Pass(),)),)), ), ), "code": " if conditional:\n pass\n", "parser": None, "expected_position": CodeRange((1, 4), (2, 12)), }, # leading_lines { "node": cst.If( cst.Name("conditional"), cst.SimpleStatementSuite((cst.Pass(),)), leading_lines=( cst.EmptyLine(comment=cst.Comment("# leading comment")), ), ), "code": "# leading comment\nif conditional: pass\n", "parser": parse_statement, "expected_position": CodeRange((2, 0), (2, 20)), }, # whitespace before/after test and else { "node": cst.If( cst.Name("conditional"), cst.SimpleStatementSuite((cst.Pass(),)), whitespace_before_test=cst.SimpleWhitespace(" "), whitespace_after_test=cst.SimpleWhitespace(" "), orelse=cst.Else( cst.SimpleStatementSuite((cst.Pass(),)), whitespace_before_colon=cst.SimpleWhitespace(" "), ), ), "code": "if conditional : pass\nelse : pass\n", "parser": parse_statement, "expected_position": CodeRange((1, 0), (2, 11)), }, # empty lines between if/elif/else clauses, not captured by the suite. { "node": cst.If( cst.Name("test_a"), cst.SimpleStatementSuite((cst.Pass(),)), orelse=cst.If( cst.Name("test_b"), cst.SimpleStatementSuite((cst.Pass(),)), leading_lines=(cst.EmptyLine(),), orelse=cst.Else( cst.SimpleStatementSuite((cst.Pass(),)), leading_lines=(cst.EmptyLine(),), ), ), ), "code": "if test_a: pass\n\nelif test_b: pass\n\nelse: pass\n", "parser": parse_statement, "expected_position": CodeRange((1, 0), (5, 10)), }, ) ) def test_valid(self, **kwargs: Any) -> None: self.validate_node(**kwargs) LibCST-1.2.0/libcst/_nodes/tests/test_ifexp.py000066400000000000000000000121271456464173300212270ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Callable, Optional import libcst as cst from libcst import parse_expression from libcst._nodes.tests.base import CSTNodeTest from libcst.metadata import CodeRange from libcst.testing.utils import data_provider class IfExpTest(CSTNodeTest): @data_provider( ( # Simple if experessions ( cst.IfExp( body=cst.Name("foo"), test=cst.Name("bar"), orelse=cst.Name("baz") ), "foo if bar else baz", ), # Parenthesized if expressions ( cst.IfExp( lpar=(cst.LeftParen(),), body=cst.Name("foo"), test=cst.Name("bar"), orelse=cst.Name("baz"), rpar=(cst.RightParen(),), ), "(foo if bar else baz)", ), ( cst.IfExp( body=cst.Name( "foo", lpar=(cst.LeftParen(),), rpar=(cst.RightParen(),) ), whitespace_before_if=cst.SimpleWhitespace(""), whitespace_after_if=cst.SimpleWhitespace(""), test=cst.Name( "bar", lpar=(cst.LeftParen(),), rpar=(cst.RightParen(),) ), whitespace_before_else=cst.SimpleWhitespace(""), whitespace_after_else=cst.SimpleWhitespace(""), orelse=cst.Name( "baz", lpar=(cst.LeftParen(),), rpar=(cst.RightParen(),) ), ), "(foo)if(bar)else(baz)", CodeRange((1, 0), (1, 21)), ), ( cst.IfExp( body=cst.Name("foo"), whitespace_before_if=cst.SimpleWhitespace(" "), whitespace_after_if=cst.SimpleWhitespace(" "), test=cst.Name("bar"), whitespace_before_else=cst.SimpleWhitespace(" "), whitespace_after_else=cst.SimpleWhitespace(""), orelse=cst.IfExp( body=cst.SimpleString("''"), whitespace_before_if=cst.SimpleWhitespace(""), test=cst.Name("bar"), orelse=cst.Name("baz"), ), ), "foo if bar else''if bar else baz", CodeRange((1, 0), (1, 32)), ), ( cst.GeneratorExp( elt=cst.IfExp( body=cst.Name("foo"), test=cst.Name("bar"), orelse=cst.SimpleString("''"), whitespace_after_else=cst.SimpleWhitespace(""), ), for_in=cst.CompFor( target=cst.Name("_"), iter=cst.Name("_"), whitespace_before=cst.SimpleWhitespace(""), ), ), "(foo if bar else''for _ in _)", CodeRange((1, 1), (1, 28)), ), # Make sure that spacing works ( cst.IfExp( lpar=(cst.LeftParen(whitespace_after=cst.SimpleWhitespace(" ")),), body=cst.Name("foo"), whitespace_before_if=cst.SimpleWhitespace(" "), whitespace_after_if=cst.SimpleWhitespace(" "), test=cst.Name("bar"), whitespace_before_else=cst.SimpleWhitespace(" "), whitespace_after_else=cst.SimpleWhitespace(" "), orelse=cst.Name("baz"), rpar=(cst.RightParen(whitespace_before=cst.SimpleWhitespace(" ")),), ), "( foo if bar else baz )", CodeRange((1, 2), (1, 25)), ), ) ) def test_valid( self, node: cst.CSTNode, code: str, position: Optional[CodeRange] = None ) -> None: self.validate_node(node, code, parse_expression, expected_position=position) @data_provider( ( ( lambda: cst.IfExp( cst.Name("bar"), cst.Name("foo"), cst.Name("baz"), lpar=(cst.LeftParen(),), ), "left paren without right paren", ), ( lambda: cst.IfExp( cst.Name("bar"), cst.Name("foo"), cst.Name("baz"), rpar=(cst.RightParen(),), ), "right paren without left paren", ), ) ) def test_invalid( self, get_node: Callable[[], cst.CSTNode], expected_re: str ) -> None: self.assert_invalid(get_node, expected_re) LibCST-1.2.0/libcst/_nodes/tests/test_import.py000066400000000000000000000721211456464173300214260ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Any import libcst as cst from libcst import parse_statement from libcst._nodes.tests.base import CSTNodeTest from libcst.helpers import ensure_type from libcst.metadata import CodeRange from libcst.testing.utils import data_provider class ImportCreateTest(CSTNodeTest): @data_provider( ( # Simple import statement { "node": cst.Import(names=(cst.ImportAlias(cst.Name("foo")),)), "code": "import foo", }, { "node": cst.Import( names=( cst.ImportAlias( cst.Attribute(cst.Name("foo"), cst.Name("bar")) ), ) ), "code": "import foo.bar", }, { "node": cst.Import( names=( cst.ImportAlias( cst.Attribute(cst.Name("foo"), cst.Name("bar")) ), ) ), "code": "import foo.bar", }, # Comma-separated list of imports { "node": cst.Import( names=( cst.ImportAlias( cst.Attribute(cst.Name("foo"), cst.Name("bar")) ), cst.ImportAlias( cst.Attribute(cst.Name("foo"), cst.Name("baz")) ), ) ), "code": "import foo.bar, foo.baz", "expected_position": CodeRange((1, 0), (1, 23)), }, # Import with an alias { "node": cst.Import( names=( cst.ImportAlias( cst.Attribute(cst.Name("foo"), cst.Name("bar")), asname=cst.AsName(cst.Name("baz")), ), ) ), "code": "import foo.bar as baz", }, # Import with an alias, comma separated { "node": cst.Import( names=( cst.ImportAlias( cst.Attribute(cst.Name("foo"), cst.Name("bar")), asname=cst.AsName(cst.Name("baz")), ), cst.ImportAlias( cst.Attribute(cst.Name("foo"), cst.Name("baz")), asname=cst.AsName(cst.Name("bar")), ), ) ), "code": "import foo.bar as baz, foo.baz as bar", }, # Combine for fun and profit { "node": cst.Import( names=( cst.ImportAlias( cst.Attribute(cst.Name("foo"), cst.Name("bar")), asname=cst.AsName(cst.Name("baz")), ), cst.ImportAlias( cst.Attribute(cst.Name("insta"), cst.Name("gram")) ), cst.ImportAlias( cst.Attribute(cst.Name("foo"), cst.Name("baz")) ), cst.ImportAlias( cst.Name("unittest"), asname=cst.AsName(cst.Name("ut")) ), ) ), "code": "import foo.bar as baz, insta.gram, foo.baz, unittest as ut", }, # Verify whitespace works everywhere. { "node": cst.Import( names=( cst.ImportAlias( cst.Attribute( cst.Name("foo"), cst.Name("bar"), dot=cst.Dot( whitespace_before=cst.SimpleWhitespace(" "), whitespace_after=cst.SimpleWhitespace(" "), ), ), asname=cst.AsName( cst.Name("baz"), whitespace_before_as=cst.SimpleWhitespace(" "), whitespace_after_as=cst.SimpleWhitespace(" "), ), comma=cst.Comma( whitespace_before=cst.SimpleWhitespace(" "), whitespace_after=cst.SimpleWhitespace(" "), ), ), cst.ImportAlias( cst.Name("unittest"), asname=cst.AsName( cst.Name("ut"), whitespace_before_as=cst.SimpleWhitespace(" "), whitespace_after_as=cst.SimpleWhitespace(" "), ), ), ), whitespace_after_import=cst.SimpleWhitespace(" "), ), "code": "import foo . bar as baz , unittest as ut", "expected_position": CodeRange((1, 0), (1, 46)), }, ) ) def test_valid(self, **kwargs: Any) -> None: self.validate_node(**kwargs) @data_provider( ( { "get_node": lambda: cst.Import(names=()), "expected_re": "at least one ImportAlias", }, { "get_node": lambda: cst.Import(names=(cst.ImportAlias(cst.Name("")),)), "expected_re": "empty name identifier", }, { "get_node": lambda: cst.Import( names=( cst.ImportAlias(cst.Attribute(cst.Name(""), cst.Name("bla"))), ) ), "expected_re": "empty name identifier", }, { "get_node": lambda: cst.Import( names=( cst.ImportAlias(cst.Attribute(cst.Name("bla"), cst.Name(""))), ) ), "expected_re": "empty name identifier", }, { "get_node": lambda: cst.Import( names=( cst.ImportAlias( cst.Attribute(cst.Name("foo"), cst.Name("bar")), comma=cst.Comma(), ), ) ), "expected_re": "trailing comma", }, { "get_node": lambda: cst.Import( names=( cst.ImportAlias( cst.Attribute(cst.Name("foo"), cst.Name("bar")) ), ), whitespace_after_import=cst.SimpleWhitespace(""), ), "expected_re": "at least one space", }, { "get_node": lambda: cst.Import( names=( cst.ImportAlias( cst.Name("foo"), asname=cst.AsName( cst.Name("bar"), whitespace_before_as=cst.SimpleWhitespace(""), ), ), ), ), "expected_re": "at least one space", }, { "get_node": lambda: cst.Import( names=[ cst.ImportAlias( name=cst.Attribute( value=cst.Float(value="0."), attr=cst.Name(value="A") ) ) ] ), "expected_re": "imported name must be a valid qualified name.", }, ) ) def test_invalid(self, **kwargs: Any) -> None: self.assert_invalid(**kwargs) class ImportParseTest(CSTNodeTest): @data_provider( ( # Simple import statement { "node": cst.Import(names=(cst.ImportAlias(cst.Name("foo")),)), "code": "import foo", }, { "node": cst.Import( names=( cst.ImportAlias( cst.Attribute(cst.Name("foo"), cst.Name("bar")) ), ) ), "code": "import foo.bar", }, { "node": cst.Import( names=( cst.ImportAlias( cst.Attribute(cst.Name("foo"), cst.Name("bar")) ), ) ), "code": "import foo.bar", }, # Comma-separated list of imports { "node": cst.Import( names=( cst.ImportAlias( cst.Attribute(cst.Name("foo"), cst.Name("bar")), comma=cst.Comma(whitespace_after=cst.SimpleWhitespace(" ")), ), cst.ImportAlias( cst.Attribute(cst.Name("foo"), cst.Name("baz")) ), ) ), "code": "import foo.bar, foo.baz", }, # Import with an alias { "node": cst.Import( names=( cst.ImportAlias( cst.Attribute(cst.Name("foo"), cst.Name("bar")), asname=cst.AsName(cst.Name("baz")), ), ) ), "code": "import foo.bar as baz", }, # Import with an alias, comma separated { "node": cst.Import( names=( cst.ImportAlias( cst.Attribute(cst.Name("foo"), cst.Name("bar")), asname=cst.AsName(cst.Name("baz")), comma=cst.Comma(whitespace_after=cst.SimpleWhitespace(" ")), ), cst.ImportAlias( cst.Attribute(cst.Name("foo"), cst.Name("baz")), asname=cst.AsName(cst.Name("bar")), ), ) ), "code": "import foo.bar as baz, foo.baz as bar", }, # Combine for fun and profit { "node": cst.Import( names=( cst.ImportAlias( cst.Attribute(cst.Name("foo"), cst.Name("bar")), asname=cst.AsName(cst.Name("baz")), comma=cst.Comma(whitespace_after=cst.SimpleWhitespace(" ")), ), cst.ImportAlias( cst.Attribute(cst.Name("insta"), cst.Name("gram")), comma=cst.Comma(whitespace_after=cst.SimpleWhitespace(" ")), ), cst.ImportAlias( cst.Attribute(cst.Name("foo"), cst.Name("baz")), comma=cst.Comma(whitespace_after=cst.SimpleWhitespace(" ")), ), cst.ImportAlias( cst.Name("unittest"), asname=cst.AsName(cst.Name("ut")) ), ) ), "code": "import foo.bar as baz, insta.gram, foo.baz, unittest as ut", }, # Verify whitespace works everywhere. { "node": cst.Import( names=( cst.ImportAlias( cst.Attribute( cst.Name("foo"), cst.Name("bar"), dot=cst.Dot( whitespace_before=cst.SimpleWhitespace(" "), whitespace_after=cst.SimpleWhitespace(" "), ), ), asname=cst.AsName( cst.Name("baz"), whitespace_before_as=cst.SimpleWhitespace(" "), whitespace_after_as=cst.SimpleWhitespace(" "), ), comma=cst.Comma( whitespace_before=cst.SimpleWhitespace(" "), whitespace_after=cst.SimpleWhitespace(" "), ), ), cst.ImportAlias( cst.Name("unittest"), asname=cst.AsName( cst.Name("ut"), whitespace_before_as=cst.SimpleWhitespace(" "), whitespace_after_as=cst.SimpleWhitespace(" "), ), ), ), whitespace_after_import=cst.SimpleWhitespace(" "), ), "code": "import foo . bar as baz , unittest as ut", }, ) ) def test_valid(self, **kwargs: Any) -> None: self.validate_node( parser=lambda code: ensure_type( parse_statement(code), cst.SimpleStatementLine ).body[0], **kwargs, ) class ImportFromCreateTest(CSTNodeTest): @data_provider( ( # Simple from import statement { "node": cst.ImportFrom( module=cst.Name("foo"), names=(cst.ImportAlias(cst.Name("bar")),) ), "code": "from foo import bar", }, # From import statement with alias { "node": cst.ImportFrom( module=cst.Name("foo"), names=( cst.ImportAlias( cst.Name("bar"), asname=cst.AsName(cst.Name("baz")) ), ), ), "code": "from foo import bar as baz", }, # Multiple imports { "node": cst.ImportFrom( module=cst.Name("foo"), names=( cst.ImportAlias(cst.Name("bar")), cst.ImportAlias(cst.Name("baz")), ), ), "code": "from foo import bar, baz", }, # Trailing comma { "node": cst.ImportFrom( module=cst.Name("foo"), names=( cst.ImportAlias(cst.Name("bar"), comma=cst.Comma()), cst.ImportAlias(cst.Name("baz"), comma=cst.Comma()), ), ), "code": "from foo import bar,baz,", "expected_position": CodeRange((1, 0), (1, 23)), }, # Star import statement { "node": cst.ImportFrom(module=cst.Name("foo"), names=cst.ImportStar()), "code": "from foo import *", "expected_position": CodeRange((1, 0), (1, 17)), }, # Simple relative import statement { "node": cst.ImportFrom( relative=(cst.Dot(),), module=cst.Name("foo"), names=(cst.ImportAlias(cst.Name("bar")),), ), "code": "from .foo import bar", }, { "node": cst.ImportFrom( relative=(cst.Dot(), cst.Dot()), module=cst.Name("foo"), names=(cst.ImportAlias(cst.Name("bar")),), ), "code": "from ..foo import bar", }, # Relative only import { "node": cst.ImportFrom( relative=(cst.Dot(), cst.Dot()), module=None, names=(cst.ImportAlias(cst.Name("bar")),), ), "code": "from .. import bar", }, # Parenthesis { "node": cst.ImportFrom( module=cst.Name("foo"), lpar=cst.LeftParen(), names=( cst.ImportAlias( cst.Name("bar"), asname=cst.AsName(cst.Name("baz")) ), ), rpar=cst.RightParen(), ), "code": "from foo import (bar as baz)", "expected_position": CodeRange((1, 0), (1, 28)), }, # Verify whitespace works everywhere. { "node": cst.ImportFrom( relative=( cst.Dot( whitespace_before=cst.SimpleWhitespace(" "), whitespace_after=cst.SimpleWhitespace(" "), ), cst.Dot( whitespace_before=cst.SimpleWhitespace(" "), whitespace_after=cst.SimpleWhitespace(" "), ), ), module=cst.Name("foo"), lpar=cst.LeftParen(whitespace_after=cst.SimpleWhitespace(" ")), names=( cst.ImportAlias( cst.Name("bar"), asname=cst.AsName( cst.Name("baz"), whitespace_before_as=cst.SimpleWhitespace(" "), whitespace_after_as=cst.SimpleWhitespace(" "), ), comma=cst.Comma( whitespace_before=cst.SimpleWhitespace(" "), whitespace_after=cst.SimpleWhitespace(" "), ), ), cst.ImportAlias( cst.Name("unittest"), asname=cst.AsName( cst.Name("ut"), whitespace_before_as=cst.SimpleWhitespace(" "), whitespace_after_as=cst.SimpleWhitespace(" "), ), ), ), rpar=cst.RightParen(whitespace_before=cst.SimpleWhitespace(" ")), whitespace_after_from=cst.SimpleWhitespace(" "), whitespace_before_import=cst.SimpleWhitespace(" "), whitespace_after_import=cst.SimpleWhitespace(" "), ), "code": "from . . foo import ( bar as baz , unittest as ut )", "expected_position": CodeRange((1, 0), (1, 61)), }, ) ) def test_valid(self, **kwargs: Any) -> None: self.validate_node(**kwargs) @data_provider( ( { "get_node": lambda: cst.ImportFrom( module=None, names=(cst.ImportAlias(cst.Name("bar")),) ), "expected_re": "Must have a module specified", }, { "get_node": lambda: cst.ImportFrom(module=cst.Name("foo"), names=()), "expected_re": "at least one ImportAlias", }, { "get_node": lambda: cst.ImportFrom( module=cst.Name("foo"), names=(cst.ImportAlias(cst.Name("bar")),), lpar=cst.LeftParen(), ), "expected_re": "left paren without right paren", }, { "get_node": lambda: cst.ImportFrom( module=cst.Name("foo"), names=(cst.ImportAlias(cst.Name("bar")),), rpar=cst.RightParen(), ), "expected_re": "right paren without left paren", }, { "get_node": lambda: cst.ImportFrom( module=cst.Name("foo"), names=cst.ImportStar(), lpar=cst.LeftParen() ), "expected_re": "cannot have parens", }, { "get_node": lambda: cst.ImportFrom( module=cst.Name("foo"), names=cst.ImportStar(), rpar=cst.RightParen(), ), "expected_re": "cannot have parens", }, { "get_node": lambda: cst.ImportFrom( module=cst.Name("foo"), names=(cst.ImportAlias(cst.Name("bar")),), whitespace_after_from=cst.SimpleWhitespace(""), ), "expected_re": "one space after from", }, { "get_node": lambda: cst.ImportFrom( module=cst.Name("foo"), names=(cst.ImportAlias(cst.Name("bar")),), whitespace_before_import=cst.SimpleWhitespace(""), ), "expected_re": "one space before import", }, { "get_node": lambda: cst.ImportFrom( module=cst.Name("foo"), names=(cst.ImportAlias(cst.Name("bar")),), whitespace_after_import=cst.SimpleWhitespace(""), ), "expected_re": "one space after import", }, { "get_node": lambda: cst.ImportFrom( module=cst.Name("foo"), names=( cst.ImportAlias( cst.Name("bar"), asname=cst.AsName( cst.Name( "baz", lpar=(cst.LeftParen(),), rpar=(cst.RightParen(),), ), whitespace_before_as=cst.SimpleWhitespace(""), ), ), ), ), "expected_re": "one space before as keyword", }, ) ) def test_invalid(self, **kwargs: Any) -> None: self.assert_invalid(**kwargs) class ImportFromParseTest(CSTNodeTest): @data_provider( ( # Simple from import statement { "node": cst.ImportFrom( module=cst.Name("foo"), names=(cst.ImportAlias(cst.Name("bar")),) ), "code": "from foo import bar", }, # From import statement with alias { "node": cst.ImportFrom( module=cst.Name("foo"), names=( cst.ImportAlias( cst.Name("bar"), asname=cst.AsName(cst.Name("baz")) ), ), ), "code": "from foo import bar as baz", }, # Multiple imports { "node": cst.ImportFrom( module=cst.Name("foo"), names=( cst.ImportAlias( cst.Name("bar"), comma=cst.Comma(whitespace_after=cst.SimpleWhitespace(" ")), ), cst.ImportAlias(cst.Name("baz")), ), ), "code": "from foo import bar, baz", }, # Trailing comma { "node": cst.ImportFrom( module=cst.Name("foo"), names=( cst.ImportAlias( cst.Name("bar"), comma=cst.Comma(whitespace_after=cst.SimpleWhitespace(" ")), ), cst.ImportAlias(cst.Name("baz"), comma=cst.Comma()), ), lpar=cst.LeftParen(), rpar=cst.RightParen(), ), "code": "from foo import (bar, baz,)", }, # Star import statement { "node": cst.ImportFrom(module=cst.Name("foo"), names=cst.ImportStar()), "code": "from foo import *", }, # Simple relative import statement { "node": cst.ImportFrom( relative=(cst.Dot(),), module=cst.Name("foo"), names=(cst.ImportAlias(cst.Name("bar")),), ), "code": "from .foo import bar", }, { "node": cst.ImportFrom( relative=(cst.Dot(), cst.Dot()), module=cst.Name("foo"), names=(cst.ImportAlias(cst.Name("bar")),), ), "code": "from ..foo import bar", }, # Relative only import { "node": cst.ImportFrom( relative=(cst.Dot(), cst.Dot()), module=None, names=(cst.ImportAlias(cst.Name("bar")),), ), "code": "from .. import bar", }, # Parenthesis { "node": cst.ImportFrom( module=cst.Name("foo"), lpar=cst.LeftParen(), names=( cst.ImportAlias( cst.Name("bar"), asname=cst.AsName(cst.Name("baz")) ), ), rpar=cst.RightParen(), ), "code": "from foo import (bar as baz)", }, # Verify whitespace works everywhere. { "node": cst.ImportFrom( relative=( cst.Dot( whitespace_before=cst.SimpleWhitespace(""), whitespace_after=cst.SimpleWhitespace(" "), ), cst.Dot( whitespace_before=cst.SimpleWhitespace(""), whitespace_after=cst.SimpleWhitespace(" "), ), ), module=cst.Name("foo"), lpar=cst.LeftParen(whitespace_after=cst.SimpleWhitespace(" ")), names=( cst.ImportAlias( cst.Name("bar"), asname=cst.AsName( cst.Name("baz"), whitespace_before_as=cst.SimpleWhitespace(" "), whitespace_after_as=cst.SimpleWhitespace(" "), ), comma=cst.Comma( whitespace_before=cst.SimpleWhitespace(" "), whitespace_after=cst.SimpleWhitespace(" "), ), ), cst.ImportAlias( cst.Name("unittest"), asname=cst.AsName( cst.Name("ut"), whitespace_before_as=cst.SimpleWhitespace(" "), whitespace_after_as=cst.SimpleWhitespace(" "), ), ), ), rpar=cst.RightParen(whitespace_before=cst.SimpleWhitespace(" ")), whitespace_after_from=cst.SimpleWhitespace(" "), whitespace_before_import=cst.SimpleWhitespace(" "), whitespace_after_import=cst.SimpleWhitespace(" "), ), "code": "from . . foo import ( bar as baz , unittest as ut )", }, ) ) def test_valid(self, **kwargs: Any) -> None: self.validate_node( parser=lambda code: ensure_type( parse_statement(code), cst.SimpleStatementLine ).body[0], **kwargs, ) LibCST-1.2.0/libcst/_nodes/tests/test_indented_block.py000066400000000000000000000124701456464173300230610ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Callable, Optional import libcst as cst from libcst import parse_statement from libcst._nodes.tests.base import CSTNodeTest from libcst.testing.utils import data_provider class IndentedBlockTest(CSTNodeTest): @data_provider( ( # Standard render ( cst.IndentedBlock((cst.SimpleStatementLine((cst.Pass(),)),)), "\n pass\n", None, ), # Render with empty (cst.IndentedBlock(()), "\n pass\n", None), # Render with empty subnodes (cst.IndentedBlock((cst.SimpleStatementLine(()),)), "\n pass\n", None), # Test render with custom indent ( cst.IndentedBlock( (cst.SimpleStatementLine((cst.Pass(),)),), indent="\t" ), "\n\tpass\n", None, ), # Test comments ( cst.IndentedBlock( (cst.SimpleStatementLine((cst.Pass(),)),), header=cst.TrailingWhitespace( whitespace=cst.SimpleWhitespace(" "), comment=cst.Comment("# header comment"), ), ), " # header comment\n pass\n", None, ), ( cst.IndentedBlock( (cst.SimpleStatementLine((cst.Pass(),)),), footer=(cst.EmptyLine(comment=cst.Comment("# footer comment")),), ), "\n pass\n # footer comment\n", None, ), ( cst.IndentedBlock( (cst.SimpleStatementLine((cst.Pass(),)),), footer=( cst.EmptyLine( whitespace=cst.SimpleWhitespace(" "), comment=cst.Comment("# footer comment"), ), ), ), "\n pass\n # footer comment\n", None, ), ( cst.IndentedBlock( ( cst.SimpleStatementLine((cst.Continue(),)), cst.SimpleStatementLine((cst.Pass(),)), ) ), "\n continue\n pass\n", None, ), # Basic parsing test ( cst.If( cst.Name("conditional"), cst.IndentedBlock((cst.SimpleStatementLine((cst.Pass(),)),)), ), "if conditional:\n pass\n", parse_statement, ), # Multi-level parsing test ( cst.If( cst.Name("conditional"), cst.IndentedBlock( ( cst.SimpleStatementLine((cst.Pass(),)), cst.If( cst.Name("other_conditional"), cst.IndentedBlock( (cst.SimpleStatementLine((cst.Pass(),)),) ), ), ) ), ), "if conditional:\n pass\n if other_conditional:\n pass\n", parse_statement, ), # Inconsistent indentation parsing test ( cst.If( cst.Name("conditional"), cst.IndentedBlock( ( cst.SimpleStatementLine((cst.Pass(),)), cst.If( cst.Name("other_conditional"), cst.IndentedBlock( (cst.SimpleStatementLine((cst.Pass(),)),), indent=" ", ), ), ) ), ), "if conditional:\n pass\n if other_conditional:\n pass\n", parse_statement, ), ) ) def test_valid( self, node: cst.CSTNode, code: str, parser: Optional[Callable[[str], cst.CSTNode]], ) -> None: self.validate_node(node, code, parser) @data_provider( ( ( lambda: cst.IndentedBlock( (cst.SimpleStatementLine((cst.Pass(),)),), indent="" ), "non-zero width indent", ), ( lambda: cst.IndentedBlock( (cst.SimpleStatementLine((cst.Pass(),)),), indent="this isn't valid whitespace!", ), "only whitespace", ), ) ) def test_invalid( self, get_node: Callable[[], cst.CSTNode], expected_re: str ) -> None: self.assert_invalid(get_node, expected_re) LibCST-1.2.0/libcst/_nodes/tests/test_lambda.py000066400000000000000000001177101456464173300213400ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Callable, Optional import libcst as cst from libcst import parse_expression from libcst._nodes.tests.base import CSTNodeTest from libcst.metadata import CodeRange from libcst.testing.utils import data_provider class LambdaCreationTest(CSTNodeTest): @data_provider( ( # Simple lambda (cst.Lambda(cst.Parameters(), cst.Integer("5")), "lambda: 5"), # Test basic positional only params { "node": cst.Lambda( cst.Parameters( posonly_params=( cst.Param(cst.Name("bar")), cst.Param(cst.Name("baz")), ) ), cst.Integer("5"), ), "code": "lambda bar, baz, /: 5", }, # Test basic positional only params with extra trailing whitespace { "node": cst.Lambda( cst.Parameters( posonly_params=( cst.Param(cst.Name("bar")), cst.Param(cst.Name("baz")), ), posonly_ind=cst.ParamSlash( whitespace_after=cst.SimpleWhitespace(" ") ), ), cst.Integer("5"), ), "code": "lambda bar, baz, / : 5", }, # Test basic positional params ( cst.Lambda( cst.Parameters( params=(cst.Param(cst.Name("bar")), cst.Param(cst.Name("baz"))) ), cst.Integer("5"), ), "lambda bar, baz: 5", ), # Test basic positional default params ( cst.Lambda( cst.Parameters( params=( cst.Param( cst.Name("bar"), default=cst.SimpleString('"one"') ), cst.Param(cst.Name("baz"), default=cst.Integer("5")), ) ), cst.Integer("5"), ), 'lambda bar = "one", baz = 5: 5', ), # Mixed positional and default params. ( cst.Lambda( cst.Parameters( params=( cst.Param(cst.Name("bar")), cst.Param(cst.Name("baz"), default=cst.Integer("5")), ) ), cst.Integer("5"), ), "lambda bar, baz = 5: 5", ), # Test kwonly params ( cst.Lambda( cst.Parameters( kwonly_params=( cst.Param( cst.Name("bar"), default=cst.SimpleString('"one"') ), cst.Param(cst.Name("baz")), ) ), cst.Integer("5"), ), 'lambda *, bar = "one", baz: 5', ), # Mixed params and kwonly_params ( cst.Lambda( cst.Parameters( params=( cst.Param(cst.Name("first")), cst.Param(cst.Name("second")), ), kwonly_params=( cst.Param( cst.Name("bar"), default=cst.SimpleString('"one"') ), cst.Param(cst.Name("baz")), cst.Param( cst.Name("biz"), default=cst.SimpleString('"two"') ), ), ), cst.Integer("5"), ), 'lambda first, second, *, bar = "one", baz, biz = "two": 5', ), # Mixed params and kwonly_params ( cst.Lambda( cst.Parameters( params=( cst.Param(cst.Name("first"), default=cst.Float("1.0")), cst.Param(cst.Name("second"), default=cst.Float("1.5")), ), kwonly_params=( cst.Param( cst.Name("bar"), default=cst.SimpleString('"one"') ), cst.Param(cst.Name("baz")), cst.Param( cst.Name("biz"), default=cst.SimpleString('"two"') ), ), ), cst.Integer("5"), ), 'lambda first = 1.0, second = 1.5, *, bar = "one", baz, biz = "two": 5', ), # Mixed params and kwonly_params ( cst.Lambda( cst.Parameters( params=( cst.Param(cst.Name("first")), cst.Param(cst.Name("second")), cst.Param(cst.Name("third"), default=cst.Float("1.0")), cst.Param(cst.Name("fourth"), default=cst.Float("1.5")), ), kwonly_params=( cst.Param( cst.Name("bar"), default=cst.SimpleString('"one"') ), cst.Param(cst.Name("baz")), cst.Param( cst.Name("biz"), default=cst.SimpleString('"two"') ), ), ), cst.Integer("5"), ), 'lambda first, second, third = 1.0, fourth = 1.5, *, bar = "one", baz, biz = "two": 5', CodeRange((1, 0), (1, 84)), ), # Test star_arg ( cst.Lambda( cst.Parameters(star_arg=cst.Param(cst.Name("params"))), cst.Integer("5"), ), "lambda *params: 5", ), # Typed star_arg, include kwonly_params ( cst.Lambda( cst.Parameters( star_arg=cst.Param(cst.Name("params")), kwonly_params=( cst.Param( cst.Name("bar"), default=cst.SimpleString('"one"') ), cst.Param(cst.Name("baz")), cst.Param( cst.Name("biz"), default=cst.SimpleString('"two"') ), ), ), cst.Integer("5"), ), 'lambda *params, bar = "one", baz, biz = "two": 5', ), # Mixed params, star_arg and kwonly_params ( cst.Lambda( cst.Parameters( params=( cst.Param(cst.Name("first")), cst.Param(cst.Name("second")), cst.Param(cst.Name("third"), default=cst.Float("1.0")), cst.Param(cst.Name("fourth"), default=cst.Float("1.5")), ), star_arg=cst.Param(cst.Name("params")), kwonly_params=( cst.Param( cst.Name("bar"), default=cst.SimpleString('"one"') ), cst.Param(cst.Name("baz")), cst.Param( cst.Name("biz"), default=cst.SimpleString('"two"') ), ), ), cst.Integer("5"), ), 'lambda first, second, third = 1.0, fourth = 1.5, *params, bar = "one", baz, biz = "two": 5', ), # Test star_arg and star_kwarg ( cst.Lambda( cst.Parameters(star_kwarg=cst.Param(cst.Name("kwparams"))), cst.Integer("5"), ), "lambda **kwparams: 5", ), # Test star_arg and kwarg ( cst.Lambda( cst.Parameters( star_arg=cst.Param(cst.Name("params")), star_kwarg=cst.Param(cst.Name("kwparams")), ), cst.Integer("5"), ), "lambda *params, **kwparams: 5", ), # Inner whitespace ( cst.Lambda( lpar=(cst.LeftParen(whitespace_after=cst.SimpleWhitespace(" ")),), whitespace_after_lambda=cst.SimpleWhitespace(" "), params=cst.Parameters(), colon=cst.Colon(whitespace_after=cst.SimpleWhitespace(" ")), body=cst.Integer("5"), rpar=(cst.RightParen(whitespace_before=cst.SimpleWhitespace(" ")),), ), "( lambda : 5 )", CodeRange((1, 2), (1, 13)), ), ) ) def test_valid( self, node: cst.CSTNode, code: str, position: Optional[CodeRange] = None ) -> None: self.validate_node(node, code, expected_position=position) @data_provider( ( ( lambda: cst.Lambda( cst.Parameters(params=(cst.Param(cst.Name("arg")),)), cst.Integer("5"), lpar=(cst.LeftParen(),), ), "left paren without right paren", ), ( lambda: cst.Lambda( cst.Parameters(params=(cst.Param(cst.Name("arg")),)), cst.Integer("5"), rpar=(cst.RightParen(),), ), "right paren without left paren", ), ( lambda: cst.Lambda( cst.Parameters(posonly_params=(cst.Param(cst.Name("arg")),)), cst.Integer("5"), whitespace_after_lambda=cst.SimpleWhitespace(""), ), "at least one space after lambda", ), ( lambda: cst.Lambda( cst.Parameters(params=(cst.Param(cst.Name("arg")),)), cst.Integer("5"), whitespace_after_lambda=cst.SimpleWhitespace(""), ), "at least one space after lambda", ), ( lambda: cst.Lambda( cst.Parameters( params=(cst.Param(cst.Name("arg"), default=cst.Integer("5")),) ), cst.Integer("5"), whitespace_after_lambda=cst.SimpleWhitespace(""), ), "at least one space after lambda", ), ( lambda: cst.Lambda( cst.Parameters( star_kwarg=cst.Param(cst.Name("bar"), equal=cst.AssignEqual()) ), cst.Integer("5"), ), "Must have a default when specifying an AssignEqual.", ), ( lambda: cst.Lambda( cst.Parameters(star_kwarg=cst.Param(cst.Name("bar"), star="***")), cst.Integer("5"), ), r"Must specify either '', '\*' or '\*\*' for star.", ), ( lambda: cst.Lambda( cst.Parameters( params=( cst.Param( cst.Name("bar"), default=cst.SimpleString('"one"') ), cst.Param(cst.Name("bar")), ) ), cst.Integer("5"), ), "Cannot have param without defaults following a param with defaults.", ), ( lambda: cst.Lambda( cst.Parameters(star_arg=cst.ParamStar()), cst.Integer("5") ), "Must have at least one kwonly param if ParamStar is used.", ), ( lambda: cst.Lambda( cst.Parameters(params=(cst.Param(cst.Name("bar"), star="*"),)), cst.Integer("5"), ), "Expecting a star prefix of ''", ), ( lambda: cst.Lambda( cst.Parameters( params=( cst.Param( cst.Name("bar"), default=cst.SimpleString('"one"'), star="*", ), ) ), cst.Integer("5"), ), "Expecting a star prefix of ''", ), ( lambda: cst.Lambda( cst.Parameters( kwonly_params=(cst.Param(cst.Name("bar"), star="*"),) ), cst.Integer("5"), ), "Expecting a star prefix of ''", ), ( lambda: cst.Lambda( cst.Parameters(star_arg=cst.Param(cst.Name("bar"), star="**")), cst.Integer("5"), ), r"Expecting a star prefix of '\*'", ), ( lambda: cst.Lambda( cst.Parameters(star_kwarg=cst.Param(cst.Name("bar"), star="*")), cst.Integer("5"), ), r"Expecting a star prefix of '\*\*'", ), ( lambda: cst.Lambda( cst.Parameters( posonly_params=( cst.Param( cst.Name("arg"), annotation=cst.Annotation(cst.Name("str")), ), ) ), cst.Integer("5"), whitespace_after_lambda=cst.SimpleWhitespace(""), ), "Lambda params cannot have type annotations", ), ( lambda: cst.Lambda( cst.Parameters( params=( cst.Param( cst.Name("arg"), annotation=cst.Annotation(cst.Name("str")), ), ) ), cst.Integer("5"), whitespace_after_lambda=cst.SimpleWhitespace(""), ), "Lambda params cannot have type annotations", ), ( lambda: cst.Lambda( cst.Parameters( params=( cst.Param( cst.Name("arg"), default=cst.Integer("5"), annotation=cst.Annotation(cst.Name("str")), ), ) ), cst.Integer("5"), whitespace_after_lambda=cst.SimpleWhitespace(""), ), "Lambda params cannot have type annotations", ), ( lambda: cst.Lambda( cst.Parameters( star_arg=cst.Param( cst.Name("arg"), annotation=cst.Annotation(cst.Name("str")) ) ), cst.Integer("5"), whitespace_after_lambda=cst.SimpleWhitespace(""), ), "Lambda params cannot have type annotations", ), ( lambda: cst.Lambda( cst.Parameters( kwonly_params=( cst.Param( cst.Name("arg"), annotation=cst.Annotation(cst.Name("str")), ), ) ), cst.Integer("5"), whitespace_after_lambda=cst.SimpleWhitespace(""), ), "Lambda params cannot have type annotations", ), ( lambda: cst.Lambda( cst.Parameters( star_kwarg=cst.Param( cst.Name("arg"), annotation=cst.Annotation(cst.Name("str")) ) ), cst.Integer("5"), whitespace_after_lambda=cst.SimpleWhitespace(""), ), "Lambda params cannot have type annotations", ), ) ) def test_invalid( self, get_node: Callable[[], cst.CSTNode], expected_re: str ) -> None: self.assert_invalid(get_node, expected_re) def _parse_expression_force_38(code: str) -> cst.BaseExpression: return cst.parse_expression( code, config=cst.PartialParserConfig(python_version="3.8") ) class LambdaParserTest(CSTNodeTest): @data_provider( ( # Simple lambda (cst.Lambda(cst.Parameters(), cst.Integer("5")), "lambda: 5"), # Test basic positional params ( cst.Lambda( cst.Parameters( params=( cst.Param( cst.Name("bar"), star="", comma=cst.Comma( whitespace_after=cst.SimpleWhitespace(" ") ), ), cst.Param(cst.Name("baz"), star=""), ) ), cst.Integer("5"), whitespace_after_lambda=cst.SimpleWhitespace(" "), ), "lambda bar, baz: 5", ), # Test basic positional default params ( cst.Lambda( cst.Parameters( params=( cst.Param( cst.Name("bar"), default=cst.SimpleString('"one"'), equal=cst.AssignEqual(), star="", comma=cst.Comma( whitespace_after=cst.SimpleWhitespace(" ") ), ), cst.Param( cst.Name("baz"), default=cst.Integer("5"), equal=cst.AssignEqual(), star="", ), ) ), cst.Integer("5"), whitespace_after_lambda=cst.SimpleWhitespace(" "), ), 'lambda bar = "one", baz = 5: 5', ), # Mixed positional and default params. ( cst.Lambda( cst.Parameters( params=( cst.Param( cst.Name("bar"), star="", comma=cst.Comma( whitespace_after=cst.SimpleWhitespace(" ") ), ), cst.Param( cst.Name("baz"), default=cst.Integer("5"), equal=cst.AssignEqual(), star="", ), ) ), cst.Integer("5"), whitespace_after_lambda=cst.SimpleWhitespace(" "), ), "lambda bar, baz = 5: 5", ), # Test kwonly params ( cst.Lambda( cst.Parameters( star_arg=cst.ParamStar(), kwonly_params=( cst.Param( cst.Name("bar"), default=cst.SimpleString('"one"'), equal=cst.AssignEqual(), star="", comma=cst.Comma( whitespace_after=cst.SimpleWhitespace(" ") ), ), cst.Param(cst.Name("baz"), star=""), ), ), cst.Integer("5"), whitespace_after_lambda=cst.SimpleWhitespace(" "), ), 'lambda *, bar = "one", baz: 5', ), # Mixed params and kwonly_params ( cst.Lambda( cst.Parameters( params=( cst.Param( cst.Name("first"), star="", comma=cst.Comma( whitespace_after=cst.SimpleWhitespace(" ") ), ), cst.Param( cst.Name("second"), star="", comma=cst.Comma( whitespace_after=cst.SimpleWhitespace(" ") ), ), ), star_arg=cst.ParamStar(), kwonly_params=( cst.Param( cst.Name("bar"), default=cst.SimpleString('"one"'), equal=cst.AssignEqual(), star="", comma=cst.Comma( whitespace_after=cst.SimpleWhitespace(" ") ), ), cst.Param( cst.Name("baz"), star="", comma=cst.Comma( whitespace_after=cst.SimpleWhitespace(" ") ), ), cst.Param( cst.Name("biz"), default=cst.SimpleString('"two"'), equal=cst.AssignEqual(), star="", ), ), ), cst.Integer("5"), whitespace_after_lambda=cst.SimpleWhitespace(" "), ), 'lambda first, second, *, bar = "one", baz, biz = "two": 5', ), # Mixed params and kwonly_params ( cst.Lambda( cst.Parameters( params=( cst.Param( cst.Name("first"), default=cst.Float("1.0"), equal=cst.AssignEqual(), star="", comma=cst.Comma( whitespace_after=cst.SimpleWhitespace(" ") ), ), cst.Param( cst.Name("second"), default=cst.Float("1.5"), equal=cst.AssignEqual(), star="", comma=cst.Comma( whitespace_after=cst.SimpleWhitespace(" ") ), ), ), star_arg=cst.ParamStar(), kwonly_params=( cst.Param( cst.Name("bar"), default=cst.SimpleString('"one"'), equal=cst.AssignEqual(), star="", comma=cst.Comma( whitespace_after=cst.SimpleWhitespace(" ") ), ), cst.Param( cst.Name("baz"), star="", comma=cst.Comma( whitespace_after=cst.SimpleWhitespace(" ") ), ), cst.Param( cst.Name("biz"), default=cst.SimpleString('"two"'), equal=cst.AssignEqual(), star="", ), ), ), cst.Integer("5"), whitespace_after_lambda=cst.SimpleWhitespace(" "), ), 'lambda first = 1.0, second = 1.5, *, bar = "one", baz, biz = "two": 5', ), # Mixed params and kwonly_params ( cst.Lambda( cst.Parameters( params=( cst.Param( cst.Name("first"), star="", comma=cst.Comma( whitespace_after=cst.SimpleWhitespace(" ") ), ), cst.Param( cst.Name("second"), star="", comma=cst.Comma( whitespace_after=cst.SimpleWhitespace(" ") ), ), cst.Param( cst.Name("third"), default=cst.Float("1.0"), equal=cst.AssignEqual(), star="", comma=cst.Comma( whitespace_after=cst.SimpleWhitespace(" ") ), ), cst.Param( cst.Name("fourth"), default=cst.Float("1.5"), equal=cst.AssignEqual(), star="", comma=cst.Comma( whitespace_after=cst.SimpleWhitespace(" ") ), ), ), star_arg=cst.ParamStar(), kwonly_params=( cst.Param( cst.Name("bar"), default=cst.SimpleString('"one"'), equal=cst.AssignEqual(), star="", comma=cst.Comma( whitespace_after=cst.SimpleWhitespace(" ") ), ), cst.Param( cst.Name("baz"), star="", comma=cst.Comma( whitespace_after=cst.SimpleWhitespace(" ") ), ), cst.Param( cst.Name("biz"), default=cst.SimpleString('"two"'), equal=cst.AssignEqual(), star="", ), ), ), cst.Integer("5"), whitespace_after_lambda=cst.SimpleWhitespace(" "), ), 'lambda first, second, third = 1.0, fourth = 1.5, *, bar = "one", baz, biz = "two": 5', ), # Test star_arg ( cst.Lambda( cst.Parameters(star_arg=cst.Param(cst.Name("params"), star="*")), cst.Integer("5"), whitespace_after_lambda=cst.SimpleWhitespace(" "), ), "lambda *params: 5", ), # Typed star_arg, include kwonly_params ( cst.Lambda( cst.Parameters( star_arg=cst.Param( cst.Name("params"), star="*", comma=cst.Comma(whitespace_after=cst.SimpleWhitespace(" ")), ), kwonly_params=( cst.Param( cst.Name("bar"), default=cst.SimpleString('"one"'), equal=cst.AssignEqual(), star="", comma=cst.Comma( whitespace_after=cst.SimpleWhitespace(" ") ), ), cst.Param( cst.Name("baz"), star="", comma=cst.Comma( whitespace_after=cst.SimpleWhitespace(" ") ), ), cst.Param( cst.Name("biz"), default=cst.SimpleString('"two"'), equal=cst.AssignEqual(), star="", ), ), ), cst.Integer("5"), whitespace_after_lambda=cst.SimpleWhitespace(" "), ), 'lambda *params, bar = "one", baz, biz = "two": 5', ), # Mixed params, star_arg and kwonly_params ( cst.Lambda( cst.Parameters( params=( cst.Param( cst.Name("first"), star="", comma=cst.Comma( whitespace_after=cst.SimpleWhitespace(" ") ), ), cst.Param( cst.Name("second"), star="", comma=cst.Comma( whitespace_after=cst.SimpleWhitespace(" ") ), ), cst.Param( cst.Name("third"), default=cst.Float("1.0"), equal=cst.AssignEqual(), star="", comma=cst.Comma( whitespace_after=cst.SimpleWhitespace(" ") ), ), cst.Param( cst.Name("fourth"), default=cst.Float("1.5"), equal=cst.AssignEqual(), star="", comma=cst.Comma( whitespace_after=cst.SimpleWhitespace(" ") ), ), ), star_arg=cst.Param( cst.Name("params"), star="*", comma=cst.Comma(whitespace_after=cst.SimpleWhitespace(" ")), ), kwonly_params=( cst.Param( cst.Name("bar"), default=cst.SimpleString('"one"'), equal=cst.AssignEqual(), star="", comma=cst.Comma( whitespace_after=cst.SimpleWhitespace(" ") ), ), cst.Param( cst.Name("baz"), star="", comma=cst.Comma( whitespace_after=cst.SimpleWhitespace(" ") ), ), cst.Param( cst.Name("biz"), default=cst.SimpleString('"two"'), equal=cst.AssignEqual(), star="", ), ), ), cst.Integer("5"), whitespace_after_lambda=cst.SimpleWhitespace(" "), ), 'lambda first, second, third = 1.0, fourth = 1.5, *params, bar = "one", baz, biz = "two": 5', ), # Test star_arg and star_kwarg ( cst.Lambda( cst.Parameters( star_kwarg=cst.Param(cst.Name("kwparams"), star="**") ), cst.Integer("5"), whitespace_after_lambda=cst.SimpleWhitespace(" "), ), "lambda **kwparams: 5", ), # Test star_arg and kwarg ( cst.Lambda( cst.Parameters( star_arg=cst.Param( cst.Name("params"), star="*", comma=cst.Comma(whitespace_after=cst.SimpleWhitespace(" ")), ), star_kwarg=cst.Param(cst.Name("kwparams"), star="**"), ), cst.Integer("5"), whitespace_after_lambda=cst.SimpleWhitespace(" "), ), "lambda *params, **kwparams: 5", ), # Inner whitespace ( cst.Lambda( lpar=(cst.LeftParen(whitespace_after=cst.SimpleWhitespace(" ")),), params=cst.Parameters(), colon=cst.Colon( whitespace_before=cst.SimpleWhitespace(" "), whitespace_after=cst.SimpleWhitespace(" "), ), body=cst.Integer("5"), rpar=(cst.RightParen(whitespace_before=cst.SimpleWhitespace(" ")),), ), "( lambda : 5 )", ), # No space between lambda and params ( cst.Lambda( cst.Parameters(star_arg=cst.Param(cst.Name("args"), star="*")), cst.Integer("5"), whitespace_after_lambda=cst.SimpleWhitespace(""), ), "lambda*args: 5", ), ( cst.Lambda( cst.Parameters(star_kwarg=cst.Param(cst.Name("kwargs"), star="**")), cst.Integer("5"), whitespace_after_lambda=cst.SimpleWhitespace(""), ), "lambda**kwargs: 5", ), ( cst.Lambda( cst.Parameters( star_arg=cst.ParamStar( comma=cst.Comma( cst.SimpleWhitespace(""), cst.SimpleWhitespace("") ) ), kwonly_params=[cst.Param(cst.Name("args"), star="")], ), cst.Integer("5"), whitespace_after_lambda=cst.SimpleWhitespace(""), ), "lambda*,args: 5", ), ( cst.ListComp( elt=cst.Lambda( params=cst.Parameters(), body=cst.Tuple(()), colon=cst.Colon(), ), for_in=cst.CompFor( target=cst.Name("_"), iter=cst.Name("_"), whitespace_before=cst.SimpleWhitespace(""), ), ), "[lambda:()for _ in _]", ), ) ) def test_valid( self, node: cst.CSTNode, code: str, position: Optional[CodeRange] = None ) -> None: self.validate_node(node, code, parse_expression, position) @data_provider( ( # Test basic positional only params { "node": cst.Lambda( cst.Parameters( posonly_params=( cst.Param( cst.Name("bar"), star="", comma=cst.Comma( whitespace_after=cst.SimpleWhitespace(" ") ), ), cst.Param( cst.Name("baz"), star="", comma=cst.Comma( whitespace_after=cst.SimpleWhitespace(" ") ), ), ), posonly_ind=cst.ParamSlash(), ), cst.Integer("5"), whitespace_after_lambda=cst.SimpleWhitespace(" "), ), "code": "lambda bar, baz, /: 5", }, ) ) def test_valid_38( self, node: cst.CSTNode, code: str, position: Optional[CodeRange] = None ) -> None: self.validate_node(node, code, _parse_expression_force_38, position) LibCST-1.2.0/libcst/_nodes/tests/test_leaf_small_statements.py000066400000000000000000000010551456464173300244600ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import libcst as cst from libcst._nodes.tests.base import CSTNodeTest from libcst.testing.utils import data_provider class LeafSmallStatementsTest(CSTNodeTest): @data_provider( ((cst.Pass(), "pass"), (cst.Break(), "break"), (cst.Continue(), "continue")) ) def test_valid(self, node: cst.CSTNode, code: str) -> None: self.validate_node(node, code) LibCST-1.2.0/libcst/_nodes/tests/test_list.py000066400000000000000000000111261456464173300210650ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Any, Callable import libcst as cst from libcst import parse_expression, parse_statement from libcst._nodes.tests.base import CSTNodeTest, parse_expression_as from libcst._parser.entrypoints import is_native from libcst.metadata import CodeRange from libcst.testing.utils import data_provider class ListTest(CSTNodeTest): # A lot of Element/StarredElement tests are provided by the tests for Tuple, so we # we don't need to duplicate them here. @data_provider( [ # zero-element list {"node": cst.List([]), "code": "[]", "parser": parse_expression}, # one-element list, sentinel comma value { "node": cst.List([cst.Element(cst.Name("single_element"))]), "code": "[single_element]", "parser": parse_expression, }, # custom whitespace between brackets { "node": cst.List( [cst.Element(cst.Name("single_element"))], lbracket=cst.LeftSquareBracket( whitespace_after=cst.SimpleWhitespace("\t") ), rbracket=cst.RightSquareBracket( whitespace_before=cst.SimpleWhitespace(" ") ), ), "code": "[\tsingle_element ]", "parser": parse_expression, "expected_position": CodeRange((1, 0), (1, 21)), }, # two-element list, sentinel comma value { "node": cst.List( [cst.Element(cst.Name("one")), cst.Element(cst.Name("two"))] ), "code": "[one, two]", "parser": None, }, # with parenthesis { "node": cst.List( [cst.Element(cst.Name("one"))], lpar=[cst.LeftParen()], rpar=[cst.RightParen()], ), "code": "([one])", "parser": None, "expected_position": CodeRange((1, 1), (1, 6)), }, # starred element { "node": cst.List( [ cst.StarredElement(cst.Name("one")), cst.StarredElement(cst.Name("two")), ] ), "code": "[*one, *two]", "parser": None, "expected_position": CodeRange((1, 0), (1, 12)), }, # missing spaces around list, always okay { "node": cst.For( target=cst.List( [ cst.Element(cst.Name("k"), comma=cst.Comma()), cst.Element(cst.Name("v")), ] ), iter=cst.Name("abc"), body=cst.SimpleStatementSuite([cst.Pass()]), whitespace_after_for=cst.SimpleWhitespace(""), whitespace_before_in=cst.SimpleWhitespace(""), ), "code": "for[k,v]in abc: pass\n", "parser": parse_statement, }, ] ) def test_valid(self, **kwargs: Any) -> None: self.validate_node(**kwargs) @data_provider( ( ( lambda: cst.List( [cst.Element(cst.Name("mismatched"))], lpar=[cst.LeftParen(), cst.LeftParen()], rpar=[cst.RightParen()], ), "unbalanced parens", ), ) ) def test_invalid( self, get_node: Callable[[], cst.CSTNode], expected_re: str ) -> None: self.assert_invalid(get_node, expected_re) @data_provider( ( { "code": "[a, *b]", "parser": parse_expression_as(python_version="3.5"), "expect_success": True, }, { "code": "[a, *b]", "parser": parse_expression_as(python_version="3.3"), "expect_success": False, }, ) ) def test_versions(self, **kwargs: Any) -> None: if is_native() and not kwargs.get("expect_success", True): self.skipTest("parse errors are disabled for native parser") self.assert_parses(**kwargs) LibCST-1.2.0/libcst/_nodes/tests/test_match.py000066400000000000000000000534361456464173300212200ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Any, Callable, Optional import libcst as cst from libcst import parse_statement from libcst._nodes.tests.base import CSTNodeTest from libcst._parser.entrypoints import is_native from libcst.testing.utils import data_provider parser: Optional[Callable[[str], cst.CSTNode]] = ( parse_statement if is_native() else None ) class MatchTest(CSTNodeTest): @data_provider( ( # Values and singletons { "node": cst.Match( subject=cst.Name("x"), cases=[ cst.MatchCase( pattern=cst.MatchSingleton(cst.Name("None")), body=cst.SimpleStatementSuite((cst.Pass(),)), ), cst.MatchCase( pattern=cst.MatchValue(cst.SimpleString('"foo"')), body=cst.SimpleStatementSuite((cst.Pass(),)), ), ], ), "code": "match x:\n" + " case None: pass\n" + ' case "foo": pass\n', "parser": parser, }, # Parenthesized value { "node": cst.Match( subject=cst.Name( value="x", ), cases=[ cst.MatchCase( pattern=cst.MatchAs( pattern=cst.MatchValue( value=cst.Integer( value="1", lpar=[ cst.LeftParen(), ], rpar=[ cst.RightParen(), ], ), ), name=cst.Name( value="z", ), whitespace_before_as=cst.SimpleWhitespace(" "), whitespace_after_as=cst.SimpleWhitespace(" "), ), body=cst.SimpleStatementSuite([cst.Pass()]), ), ], ), "code": "match x:\n case (1) as z: pass\n", "parser": parser, }, # List patterns { "node": cst.Match( subject=cst.Name("x"), cases=[ cst.MatchCase( # empty list pattern=cst.MatchList( [], lbracket=cst.LeftSquareBracket(), rbracket=cst.RightSquareBracket(), ), body=cst.SimpleStatementSuite((cst.Pass(),)), ), cst.MatchCase( # single element list pattern=cst.MatchList( [ cst.MatchSequenceElement( cst.MatchSingleton(cst.Name("None")) ) ], lbracket=cst.LeftSquareBracket(), rbracket=cst.RightSquareBracket(), ), body=cst.SimpleStatementSuite((cst.Pass(),)), ), cst.MatchCase( # single element list with trailing comma pattern=cst.MatchList( [ cst.MatchSequenceElement( cst.MatchSingleton(cst.Name("None")), cst.Comma(), ) ], lbracket=cst.LeftSquareBracket(), rbracket=cst.RightSquareBracket(), ), body=cst.SimpleStatementSuite((cst.Pass(),)), ), ], ), "code": ( "match x:\n" + " case []: pass\n" + " case [None]: pass\n" + " case [None,]: pass\n" ), "parser": parser, }, # Tuple patterns { "node": cst.Match( subject=cst.Name("x"), cases=[ cst.MatchCase( # empty tuple pattern=cst.MatchTuple( [], ), body=cst.SimpleStatementSuite((cst.Pass(),)), ), cst.MatchCase( # two element tuple pattern=cst.MatchTuple( [ cst.MatchSequenceElement( cst.MatchSingleton(cst.Name("None")), cst.Comma(), ), cst.MatchSequenceElement( cst.MatchSingleton(cst.Name("None")), ), ], ), body=cst.SimpleStatementSuite((cst.Pass(),)), ), cst.MatchCase( # single element tuple with trailing comma pattern=cst.MatchTuple( [ cst.MatchSequenceElement( cst.MatchSingleton(cst.Name("None")), cst.Comma(), ) ], ), body=cst.SimpleStatementSuite((cst.Pass(),)), ), cst.MatchCase( # two element tuple pattern=cst.MatchTuple( [ cst.MatchSequenceElement( cst.MatchSingleton(cst.Name("None")), cst.Comma(), ), cst.MatchStar( comma=cst.Comma(), ), cst.MatchSequenceElement( cst.MatchSingleton(cst.Name("None")), ), ], ), body=cst.SimpleStatementSuite((cst.Pass(),)), ), ], ), "code": ( "match x:\n" + " case (): pass\n" + " case (None,None): pass\n" + " case (None,): pass\n" + " case (None,*_,None): pass\n" ), "parser": parser, }, # Mapping patterns { "node": cst.Match( subject=cst.Name("x"), cases=[ cst.MatchCase( # empty mapping pattern=cst.MatchMapping( [], ), body=cst.SimpleStatementSuite((cst.Pass(),)), ), cst.MatchCase( # two element mapping pattern=cst.MatchMapping( [ cst.MatchMappingElement( key=cst.SimpleString('"a"'), pattern=cst.MatchSingleton(cst.Name("None")), comma=cst.Comma(), ), cst.MatchMappingElement( key=cst.SimpleString('"b"'), pattern=cst.MatchSingleton(cst.Name("None")), ), ], ), body=cst.SimpleStatementSuite((cst.Pass(),)), ), cst.MatchCase( # single element mapping with trailing comma pattern=cst.MatchMapping( [ cst.MatchMappingElement( key=cst.SimpleString('"a"'), pattern=cst.MatchSingleton(cst.Name("None")), comma=cst.Comma(), ) ], ), body=cst.SimpleStatementSuite((cst.Pass(),)), ), cst.MatchCase( # rest pattern=cst.MatchMapping( rest=cst.Name("rest"), ), body=cst.SimpleStatementSuite((cst.Pass(),)), ), ], ), "code": ( "match x:\n" + " case {}: pass\n" + ' case {"a": None,"b": None}: pass\n' + ' case {"a": None,}: pass\n' + " case {**rest}: pass\n" ), "parser": parser, }, # Class patterns { "node": cst.Match( subject=cst.Name("x"), cases=[ cst.MatchCase( # empty class pattern=cst.MatchClass( cls=cst.Attribute(cst.Name("a"), cst.Name("b")), ), body=cst.SimpleStatementSuite((cst.Pass(),)), ), cst.MatchCase( # single pattern class pattern=cst.MatchClass( cls=cst.Attribute(cst.Name("a"), cst.Name("b")), patterns=[ cst.MatchSequenceElement( cst.MatchSingleton(cst.Name("None")) ) ], ), body=cst.SimpleStatementSuite((cst.Pass(),)), ), cst.MatchCase( # single pattern class with trailing comma pattern=cst.MatchClass( cls=cst.Attribute(cst.Name("a"), cst.Name("b")), patterns=[ cst.MatchSequenceElement( cst.MatchSingleton(cst.Name("None")), comma=cst.Comma(), ) ], ), body=cst.SimpleStatementSuite((cst.Pass(),)), ), cst.MatchCase( # single keyword pattern class pattern=cst.MatchClass( cls=cst.Attribute(cst.Name("a"), cst.Name("b")), kwds=[ cst.MatchKeywordElement( key=cst.Name("foo"), pattern=cst.MatchSingleton(cst.Name("None")), ) ], ), body=cst.SimpleStatementSuite((cst.Pass(),)), ), cst.MatchCase( # single keyword pattern class with trailing comma pattern=cst.MatchClass( cls=cst.Attribute(cst.Name("a"), cst.Name("b")), kwds=[ cst.MatchKeywordElement( key=cst.Name("foo"), pattern=cst.MatchSingleton(cst.Name("None")), comma=cst.Comma(), ) ], ), body=cst.SimpleStatementSuite((cst.Pass(),)), ), cst.MatchCase( # now all at once pattern=cst.MatchClass( cls=cst.Attribute(cst.Name("a"), cst.Name("b")), patterns=[ cst.MatchSequenceElement( cst.MatchSingleton(cst.Name("None")), cst.Comma(), ), cst.MatchSequenceElement( cst.MatchSingleton(cst.Name("None")), cst.Comma(), ), ], kwds=[ cst.MatchKeywordElement( key=cst.Name("foo"), pattern=cst.MatchSingleton(cst.Name("None")), comma=cst.Comma(), ), cst.MatchKeywordElement( key=cst.Name("bar"), pattern=cst.MatchSingleton(cst.Name("None")), comma=cst.Comma(), ), ], ), body=cst.SimpleStatementSuite((cst.Pass(),)), ), ], ), "code": ( "match x:\n" + " case a.b(): pass\n" + " case a.b(None): pass\n" + " case a.b(None,): pass\n" + " case a.b(foo=None): pass\n" + " case a.b(foo=None,): pass\n" + " case a.b(None,None,foo=None,bar=None,): pass\n" ), "parser": parser, }, # as pattern { "node": cst.Match( subject=cst.Name("x"), cases=[ cst.MatchCase( pattern=cst.MatchAs(), body=cst.SimpleStatementSuite((cst.Pass(),)), ), cst.MatchCase( pattern=cst.MatchAs(name=cst.Name("foo")), body=cst.SimpleStatementSuite((cst.Pass(),)), ), cst.MatchCase( pattern=cst.MatchAs( pattern=cst.MatchSingleton(cst.Name("None")), name=cst.Name("bar"), whitespace_before_as=cst.SimpleWhitespace(" "), whitespace_after_as=cst.SimpleWhitespace(" "), ), body=cst.SimpleStatementSuite((cst.Pass(),)), ), ], ), "code": "match x:\n" + " case _: pass\n" + " case foo: pass\n" + " case None as bar: pass\n", "parser": parser, }, # or pattern { "node": cst.Match( subject=cst.Name("x"), cases=[ cst.MatchCase( pattern=cst.MatchOr( [ cst.MatchOrElement( cst.MatchSingleton(cst.Name("None")), cst.BitOr(), ), cst.MatchOrElement( cst.MatchSingleton(cst.Name("False")), cst.BitOr(), ), cst.MatchOrElement( cst.MatchSingleton(cst.Name("True")) ), ] ), body=cst.SimpleStatementSuite((cst.Pass(),)), ) ], ), "code": "match x:\n case None | False | True: pass\n", "parser": parser, }, { # exercise sentinels "node": cst.Match( subject=cst.Name("x"), cases=[ cst.MatchCase( pattern=cst.MatchList( [cst.MatchStar(), cst.MatchStar()], lbracket=None, rbracket=None, ), body=cst.SimpleStatementSuite((cst.Pass(),)), ), cst.MatchCase( pattern=cst.MatchTuple( [ cst.MatchSequenceElement( cst.MatchSingleton(cst.Name("None")) ) ] ), body=cst.SimpleStatementSuite((cst.Pass(),)), ), cst.MatchCase( pattern=cst.MatchAs( pattern=cst.MatchTuple( [ cst.MatchSequenceElement( cst.MatchSingleton(cst.Name("None")) ) ] ), name=cst.Name("bar"), ), body=cst.SimpleStatementSuite((cst.Pass(),)), ), cst.MatchCase( pattern=cst.MatchOr( [ cst.MatchOrElement( cst.MatchSingleton(cst.Name("None")), ), cst.MatchOrElement( cst.MatchSingleton(cst.Name("False")), ), cst.MatchOrElement( cst.MatchSingleton(cst.Name("True")) ), ] ), body=cst.SimpleStatementSuite((cst.Pass(),)), ), ], ), "code": "match x:\n" + " case *_, *_: pass\n" + " case (None,): pass\n" + " case (None,) as bar: pass\n" + " case None | False | True: pass\n", "parser": None, }, # Match without whitespace between keyword and the expr { "node": cst.Match( subject=cst.Name( "x", lpar=[cst.LeftParen()], rpar=[cst.RightParen()] ), cases=[ cst.MatchCase( pattern=cst.MatchSingleton( cst.Name( "None", lpar=[cst.LeftParen()], rpar=[cst.RightParen()], ) ), body=cst.SimpleStatementSuite((cst.Pass(),)), whitespace_after_case=cst.SimpleWhitespace( value="", ), ), ], whitespace_after_match=cst.SimpleWhitespace( value="", ), ), "code": "match(x):\n case(None): pass\n", "parser": parser, }, ) ) def test_valid(self, **kwargs: Any) -> None: self.validate_node(**kwargs) LibCST-1.2.0/libcst/_nodes/tests/test_matrix_multiply.py000066400000000000000000000044761456464173300233670ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Any import libcst as cst from libcst._nodes.tests.base import ( CSTNodeTest, parse_expression_as, parse_statement_as, ) from libcst._parser.entrypoints import is_native from libcst.testing.utils import data_provider class NamedExprTest(CSTNodeTest): @data_provider( ( { "node": cst.BinaryOperation( left=cst.Name("a"), operator=cst.MatrixMultiply(), right=cst.Name("b"), ), "code": "a @ b", "parser": parse_expression_as(python_version="3.8"), }, { "node": cst.SimpleStatementLine( body=( cst.AugAssign( target=cst.Name("a"), operator=cst.MatrixMultiplyAssign(), value=cst.Name("b"), ), ), ), "code": "a @= b\n", "parser": parse_statement_as(python_version="3.8"), }, ) ) def test_valid(self, **kwargs: Any) -> None: self.validate_node(**kwargs) @data_provider( ( { "code": "a @ b", "parser": parse_expression_as(python_version="3.6"), "expect_success": True, }, { "code": "a @ b", "parser": parse_expression_as(python_version="3.3"), "expect_success": False, }, { "code": "a @= b", "parser": parse_statement_as(python_version="3.6"), "expect_success": True, }, { "code": "a @= b", "parser": parse_statement_as(python_version="3.3"), "expect_success": False, }, ) ) def test_versions(self, **kwargs: Any) -> None: if is_native() and not kwargs.get("expect_success", True): self.skipTest("parse errors are disabled for native parser") self.assert_parses(**kwargs) LibCST-1.2.0/libcst/_nodes/tests/test_module.py000066400000000000000000000201421456464173300213750ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import cast, Tuple import libcst as cst from libcst import parse_module, parse_statement from libcst._nodes.tests.base import CSTNodeTest from libcst._parser.entrypoints import is_native from libcst.metadata import CodeRange, MetadataWrapper, PositionProvider from libcst.testing.utils import data_provider class ModuleTest(CSTNodeTest): @data_provider( ( # simplest possible program (cst.Module((cst.SimpleStatementLine((cst.Pass(),)),)), "pass\n"), # test default_newline ( cst.Module( (cst.SimpleStatementLine((cst.Pass(),)),), default_newline="\r" ), "pass\r", ), # test header/footer ( cst.Module( (cst.SimpleStatementLine((cst.Pass(),)),), header=(cst.EmptyLine(comment=cst.Comment("# header")),), footer=(cst.EmptyLine(comment=cst.Comment("# footer")),), ), "# header\npass\n# footer\n", ), # test has_trailing_newline ( cst.Module( (cst.SimpleStatementLine((cst.Pass(),)),), has_trailing_newline=False, ), "pass", ), # an empty file (cst.Module((), has_trailing_newline=False), ""), # a file with only comments ( cst.Module( (), header=( cst.EmptyLine(comment=cst.Comment("# nothing to see here")), ), ), "# nothing to see here\n", ), # TODO: test default_indent ) ) def test_code_and_bytes_properties(self, module: cst.Module, expected: str) -> None: self.assertEqual(module.code, expected) self.assertEqual(module.bytes, expected.encode("utf-8")) @data_provider( ( (cst.Module(()), cst.Newline(), "\n"), (cst.Module((), default_newline="\r\n"), cst.Newline(), "\r\n"), # has_trailing_newline has no effect on code_for_node (cst.Module((), has_trailing_newline=False), cst.Newline(), "\n"), # TODO: test default_indent ) ) def test_code_for_node( self, module: cst.Module, node: cst.CSTNode, expected: str ) -> None: self.assertEqual(module.code_for_node(node), expected) @data_provider( { "empty_program": { "code": "", "expected": cst.Module([], has_trailing_newline=False), }, "empty_program_with_newline": { "code": "\n", "expected": cst.Module([], has_trailing_newline=True), "enabled_for_native": False, }, "empty_program_with_comments": { "code": "# some comment\n", "expected": cst.Module( [], header=[cst.EmptyLine(comment=cst.Comment("# some comment"))] ), }, "simple_pass": { "code": "pass\n", "expected": cst.Module([cst.SimpleStatementLine([cst.Pass()])]), }, "simple_pass_with_header_footer": { "code": "# header\npass # trailing\n# footer\n", "expected": cst.Module( [ cst.SimpleStatementLine( [cst.Pass()], trailing_whitespace=cst.TrailingWhitespace( whitespace=cst.SimpleWhitespace(" "), comment=cst.Comment("# trailing"), ), ) ], header=[cst.EmptyLine(comment=cst.Comment("# header"))], footer=[cst.EmptyLine(comment=cst.Comment("# footer"))], ), }, } ) def test_parser( self, *, code: str, expected: cst.Module, enabled_for_native: bool = True ) -> None: if is_native() and not enabled_for_native: self.skipTest("Disabled for native parser") self.assertEqual(parse_module(code), expected) @data_provider( { "empty": {"code": "", "expected": CodeRange((1, 0), (1, 0))}, "empty_with_newline": {"code": "\n", "expected": CodeRange((1, 0), (2, 0))}, "empty_program_with_comments": { "code": "# 2345", "expected": CodeRange((1, 0), (2, 0)), }, "simple_pass": {"code": "pass\n", "expected": CodeRange((1, 0), (2, 0))}, "simple_pass_with_header_footer": { "code": "# header\npass # trailing\n# footer\n", "expected": CodeRange((1, 0), (4, 0)), }, } ) def test_module_position(self, *, code: str, expected: CodeRange) -> None: wrapper = MetadataWrapper(parse_module(code)) positions = wrapper.resolve(PositionProvider) self.assertEqual(positions[wrapper.module], expected) def cmp_position( self, actual: CodeRange, start: Tuple[int, int], end: Tuple[int, int] ) -> None: self.assertEqual(actual, CodeRange(start, end)) def test_function_position(self) -> None: wrapper = MetadataWrapper(parse_module("def foo():\n pass")) module = wrapper.module positions = wrapper.resolve(PositionProvider) fn = cast(cst.FunctionDef, module.body[0]) stmt = cast(cst.SimpleStatementLine, fn.body.body[0]) pass_stmt = cast(cst.Pass, stmt.body[0]) self.cmp_position(positions[stmt], (2, 4), (2, 8)) self.cmp_position(positions[pass_stmt], (2, 4), (2, 8)) def test_nested_indent_position(self) -> None: wrapper = MetadataWrapper( parse_module("if True:\n if False:\n x = 1\nelse:\n return") ) module = wrapper.module positions = wrapper.resolve(PositionProvider) outer_if = cast(cst.If, module.body[0]) inner_if = cast(cst.If, outer_if.body.body[0]) assign = cast(cst.SimpleStatementLine, inner_if.body.body[0]).body[0] outer_else = cast(cst.Else, outer_if.orelse) return_stmt = cast(cst.SimpleStatementLine, outer_else.body.body[0]).body[0] self.cmp_position(positions[outer_if], (1, 0), (5, 10)) self.cmp_position(positions[inner_if], (2, 4), (3, 13)) self.cmp_position(positions[assign], (3, 8), (3, 13)) self.cmp_position(positions[outer_else], (4, 0), (5, 10)) self.cmp_position(positions[return_stmt], (5, 4), (5, 10)) def test_multiline_string_position(self) -> None: wrapper = MetadataWrapper(parse_module('"abc"\\\n"def"')) module = wrapper.module positions = wrapper.resolve(PositionProvider) stmt = cast(cst.SimpleStatementLine, module.body[0]) expr = cast(cst.Expr, stmt.body[0]) string = expr.value self.cmp_position(positions[stmt], (1, 0), (2, 5)) self.cmp_position(positions[expr], (1, 0), (2, 5)) self.cmp_position(positions[string], (1, 0), (2, 5)) def test_module_config_for_parsing(self) -> None: module = parse_module("pass\r") statement = parse_statement( "if True:\r pass", config=module.config_for_parsing ) self.assertEqual( statement, cst.If( test=cst.Name(value="True"), body=cst.IndentedBlock( body=[cst.SimpleStatementLine(body=[cst.Pass()])], header=cst.TrailingWhitespace( newline=cst.Newline( # This would be "\r" if we didn't pass the module config forward. value=None ) ), ), ), ) LibCST-1.2.0/libcst/_nodes/tests/test_namedexpr.py000066400000000000000000000203201456464173300220710ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Any import libcst as cst from libcst._nodes.tests.base import CSTNodeTest from libcst.metadata import CodeRange from libcst.testing.utils import data_provider def _parse_expression_force_38(code: str) -> cst.BaseExpression: return cst.parse_expression( code, config=cst.PartialParserConfig(python_version="3.8") ) def _parse_statement_force_38(code: str) -> cst.BaseCompoundStatement: statement = cst.parse_statement( code, config=cst.PartialParserConfig(python_version="3.8") ) if not isinstance(statement, cst.BaseCompoundStatement): raise Exception("This function is expecting to parse compound statements only!") return statement class NamedExprTest(CSTNodeTest): @data_provider( ( # Simple named expression { "node": cst.NamedExpr(cst.Name("x"), cst.Float("5.5")), "code": "x := 5.5", "parser": None, # Walrus operator is illegal as top-level statement "expected_position": None, }, # Parenthesized named expression { "node": cst.NamedExpr( lpar=(cst.LeftParen(),), target=cst.Name("foo"), value=cst.Integer("5"), rpar=(cst.RightParen(),), ), "code": "(foo := 5)", "parser": _parse_expression_force_38, "expected_position": CodeRange((1, 1), (1, 9)), }, # Make sure that spacing works { "node": cst.NamedExpr( lpar=(cst.LeftParen(whitespace_after=cst.SimpleWhitespace(" ")),), target=cst.Name("foo"), whitespace_before_walrus=cst.SimpleWhitespace(" "), whitespace_after_walrus=cst.SimpleWhitespace(" "), value=cst.Name("bar"), rpar=(cst.RightParen(whitespace_before=cst.SimpleWhitespace(" ")),), ), "code": "( foo := bar )", "parser": _parse_expression_force_38, "expected_position": CodeRange((1, 2), (1, 14)), }, # Make sure we can use these where allowed in if/while statements { "node": cst.While( test=cst.NamedExpr( target=cst.Name(value="x"), value=cst.Call(func=cst.Name(value="some_input")), ), body=cst.SimpleStatementSuite(body=[cst.Pass()]), ), "code": "while x := some_input(): pass\n", "parser": _parse_statement_force_38, "expected_position": None, }, { "node": cst.If( test=cst.NamedExpr( target=cst.Name(value="x"), value=cst.Call(func=cst.Name(value="some_input")), ), body=cst.SimpleStatementSuite(body=[cst.Pass()]), ), "code": "if x := some_input(): pass\n", "parser": _parse_statement_force_38, "expected_position": None, }, { "node": cst.If( test=cst.NamedExpr( target=cst.Name(value="x"), value=cst.Integer(value="1"), whitespace_before_walrus=cst.SimpleWhitespace(""), whitespace_after_walrus=cst.SimpleWhitespace(""), ), body=cst.SimpleStatementSuite(body=[cst.Pass()]), ), "code": "if x:=1: pass\n", "parser": _parse_statement_force_38, "expected_position": None, }, # Function args { "node": cst.Call( func=cst.Name(value="f"), args=[ cst.Arg( value=cst.NamedExpr( target=cst.Name(value="y"), value=cst.Integer(value="1"), whitespace_before_walrus=cst.SimpleWhitespace(""), whitespace_after_walrus=cst.SimpleWhitespace(""), ) ), ], ), "code": "f(y:=1)", "parser": _parse_expression_force_38, "expected_position": None, }, # Whitespace handling on args is fragile { "node": cst.Call( func=cst.Name(value="f"), args=[ cst.Arg( value=cst.Name(value="x"), comma=cst.Comma( whitespace_after=cst.SimpleWhitespace(" ") ), ), cst.Arg( value=cst.NamedExpr( target=cst.Name(value="y"), value=cst.Integer(value="1"), whitespace_before_walrus=cst.SimpleWhitespace(" "), whitespace_after_walrus=cst.SimpleWhitespace(" "), ), whitespace_after_arg=cst.SimpleWhitespace(" "), ), ], ), "code": "f(x, y := 1 )", "parser": _parse_expression_force_38, "expected_position": None, }, { "node": cst.Call( func=cst.Name(value="f"), args=[ cst.Arg( value=cst.NamedExpr( target=cst.Name(value="y"), value=cst.Integer(value="1"), whitespace_before_walrus=cst.SimpleWhitespace(" "), whitespace_after_walrus=cst.SimpleWhitespace(" "), ), whitespace_after_arg=cst.SimpleWhitespace(" "), ), ], whitespace_before_args=cst.SimpleWhitespace(" "), ), "code": "f( y := 1 )", "parser": _parse_expression_force_38, "expected_position": None, }, { "node": cst.ListComp( elt=cst.NamedExpr( cst.Name("_"), cst.SimpleString("''"), whitespace_after_walrus=cst.SimpleWhitespace(""), whitespace_before_walrus=cst.SimpleWhitespace(""), ), for_in=cst.CompFor( target=cst.Name("_"), iter=cst.Name("_"), whitespace_before=cst.SimpleWhitespace(""), ), ), "code": "[_:=''for _ in _]", }, ) ) def test_valid(self, **kwargs: Any) -> None: self.validate_node(**kwargs) @data_provider( ( { "get_node": ( lambda: cst.NamedExpr( cst.Name("foo"), cst.Name("bar"), lpar=(cst.LeftParen(),) ) ), "expected_re": "left paren without right paren", }, { "get_node": ( lambda: cst.NamedExpr( cst.Name("foo"), cst.Name("bar"), rpar=(cst.RightParen(),) ) ), "expected_re": "right paren without left paren", }, ) ) def test_invalid(self, **kwargs: Any) -> None: self.assert_invalid(**kwargs) LibCST-1.2.0/libcst/_nodes/tests/test_newline.py000066400000000000000000000020031456464173300215450ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Callable import libcst as cst from libcst._nodes.tests.base import CSTNodeTest from libcst.testing.utils import data_provider class NewlineTest(CSTNodeTest): @data_provider( ( (cst.Newline("\r\n"), "\r\n"), (cst.Newline("\r"), "\r"), (cst.Newline("\n"), "\n"), ) ) def test_valid(self, node: cst.CSTNode, code: str) -> None: self.validate_node(node, code) @data_provider( ( (lambda: cst.Newline("bad input"), "invalid value"), (lambda: cst.Newline("\nbad input\n"), "invalid value"), (lambda: cst.Newline("\n\n"), "invalid value"), ) ) def test_invalid( self, get_node: Callable[[], cst.CSTNode], expected_re: str ) -> None: self.assert_invalid(get_node, expected_re) LibCST-1.2.0/libcst/_nodes/tests/test_nonlocal.py000066400000000000000000000116031456464173300217170ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Any import libcst as cst from libcst import parse_statement from libcst._nodes.tests.base import CSTNodeTest from libcst.helpers import ensure_type from libcst.metadata import CodeRange from libcst.testing.utils import data_provider class NonlocalConstructionTest(CSTNodeTest): @data_provider( ( # Single nonlocal statement { "node": cst.Nonlocal((cst.NameItem(cst.Name("a")),)), "code": "nonlocal a", }, # Multiple entries in nonlocal statement { "node": cst.Nonlocal( (cst.NameItem(cst.Name("a")), cst.NameItem(cst.Name("b"))) ), "code": "nonlocal a, b", "expected_position": CodeRange((1, 0), (1, 13)), }, # Whitespace rendering test { "node": cst.Nonlocal( ( cst.NameItem( cst.Name("a"), comma=cst.Comma( whitespace_before=cst.SimpleWhitespace(" "), whitespace_after=cst.SimpleWhitespace(" "), ), ), cst.NameItem(cst.Name("b")), ), whitespace_after_nonlocal=cst.SimpleWhitespace(" "), ), "code": "nonlocal a , b", "expected_position": CodeRange((1, 0), (1, 17)), }, ) ) def test_valid(self, **kwargs: Any) -> None: self.validate_node(**kwargs) @data_provider( ( # Validate construction { "get_node": lambda: cst.Nonlocal(()), "expected_re": "A Nonlocal statement must have at least one NameItem", }, # Validate whitespace handling { "get_node": lambda: cst.Nonlocal( (cst.NameItem(cst.Name("a")),), whitespace_after_nonlocal=cst.SimpleWhitespace(""), ), "expected_re": "Must have at least one space after 'nonlocal' keyword", }, # Validate comma handling { "get_node": lambda: cst.Nonlocal( (cst.NameItem(cst.Name("a"), comma=cst.Comma()),) ), "expected_re": "The last NameItem in a Nonlocal cannot have a trailing comma", }, # Validate paren handling { "get_node": lambda: cst.Nonlocal( ( cst.NameItem( cst.Name( "a", lpar=(cst.LeftParen(),), rpar=(cst.RightParen(),) ) ), ) ), "expected_re": "Cannot have parens around names in NameItem", }, ) ) def test_invalid(self, **kwargs: Any) -> None: self.assert_invalid(**kwargs) class NonlocalParsingTest(CSTNodeTest): @data_provider( ( # Single nonlocal statement { "node": cst.Nonlocal((cst.NameItem(cst.Name("a")),)), "code": "nonlocal a", }, # Multiple entries in nonlocal statement { "node": cst.Nonlocal( ( cst.NameItem( cst.Name("a"), comma=cst.Comma(whitespace_after=cst.SimpleWhitespace(" ")), ), cst.NameItem(cst.Name("b")), ) ), "code": "nonlocal a, b", }, # Whitespace rendering test { "node": cst.Nonlocal( ( cst.NameItem( cst.Name("a"), comma=cst.Comma( whitespace_before=cst.SimpleWhitespace(" "), whitespace_after=cst.SimpleWhitespace(" "), ), ), cst.NameItem(cst.Name("b")), ), whitespace_after_nonlocal=cst.SimpleWhitespace(" "), ), "code": "nonlocal a , b", }, ) ) def test_valid(self, **kwargs: Any) -> None: self.validate_node( parser=lambda code: ensure_type( parse_statement(code), cst.SimpleStatementLine ).body[0], **kwargs, ) LibCST-1.2.0/libcst/_nodes/tests/test_number.py000066400000000000000000000103521456464173300214020ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Callable, Optional import libcst as cst from libcst import parse_expression from libcst._nodes.tests.base import CSTNodeTest from libcst.metadata import CodeRange from libcst.testing.utils import data_provider class NumberTest(CSTNodeTest): @data_provider( ( # Simple number (cst.Integer("5"), "5", parse_expression), # Negted number ( cst.UnaryOperation(operator=cst.Minus(), expression=cst.Integer("5")), "-5", parse_expression, CodeRange((1, 0), (1, 2)), ), # In parenthesis ( cst.UnaryOperation( lpar=(cst.LeftParen(),), operator=cst.Minus(), expression=cst.Integer("5"), rpar=(cst.RightParen(),), ), "(-5)", parse_expression, CodeRange((1, 1), (1, 3)), ), ( cst.UnaryOperation( lpar=(cst.LeftParen(),), operator=cst.Minus(), expression=cst.Integer( "5", lpar=(cst.LeftParen(),), rpar=(cst.RightParen(),) ), rpar=(cst.RightParen(),), ), "(-(5))", parse_expression, CodeRange((1, 1), (1, 5)), ), ( cst.UnaryOperation( operator=cst.Minus(), expression=cst.UnaryOperation( operator=cst.Minus(), expression=cst.Integer("5") ), ), "--5", parse_expression, CodeRange((1, 0), (1, 3)), ), # multiple nested parenthesis ( cst.Integer( "5", lpar=(cst.LeftParen(), cst.LeftParen()), rpar=(cst.RightParen(), cst.RightParen()), ), "((5))", parse_expression, CodeRange((1, 2), (1, 3)), ), ( cst.UnaryOperation( lpar=(cst.LeftParen(),), operator=cst.Plus(), expression=cst.Integer( "5", lpar=(cst.LeftParen(), cst.LeftParen()), rpar=(cst.RightParen(), cst.RightParen()), ), rpar=(cst.RightParen(),), ), "(+((5)))", parse_expression, CodeRange((1, 1), (1, 7)), ), ) ) def test_valid( self, node: cst.CSTNode, code: str, parser: Optional[Callable[[str], cst.CSTNode]], position: Optional[CodeRange] = None, ) -> None: self.validate_node(node, code, parser, expected_position=position) @data_provider( ( ( lambda: cst.Integer("5", lpar=(cst.LeftParen(),)), "left paren without right paren", ), ( lambda: cst.Integer("5", rpar=(cst.RightParen(),)), "right paren without left paren", ), ( lambda: cst.Float("5.5", lpar=(cst.LeftParen(),)), "left paren without right paren", ), ( lambda: cst.Float("5.5", rpar=(cst.RightParen(),)), "right paren without left paren", ), ( lambda: cst.Imaginary("5i", lpar=(cst.LeftParen(),)), "left paren without right paren", ), ( lambda: cst.Imaginary("5i", rpar=(cst.RightParen(),)), "right paren without left paren", ), ) ) def test_invalid( self, get_node: Callable[[], cst.CSTNode], expected_re: str ) -> None: self.assert_invalid(get_node, expected_re) LibCST-1.2.0/libcst/_nodes/tests/test_raise.py000066400000000000000000000167351456464173300212300ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Any import libcst as cst from libcst import parse_statement from libcst._nodes.tests.base import CSTNodeTest from libcst.helpers import ensure_type from libcst.metadata import CodeRange from libcst.testing.utils import data_provider class RaiseConstructionTest(CSTNodeTest): @data_provider( ( # Simple raise {"node": cst.Raise(), "code": "raise"}, # Raise exception { "node": cst.Raise(cst.Call(cst.Name("Exception"))), "code": "raise Exception()", "expected_position": CodeRange((1, 0), (1, 17)), }, # Raise exception from cause { "node": cst.Raise( cst.Call(cst.Name("Exception")), cst.From(cst.Name("cause")) ), "code": "raise Exception() from cause", }, # Whitespace oddities test { "node": cst.Raise( cst.Call( cst.Name("Exception"), lpar=(cst.LeftParen(),), rpar=(cst.RightParen(),), ), cst.From( cst.Name( "cause", lpar=(cst.LeftParen(),), rpar=(cst.RightParen(),) ), whitespace_before_from=cst.SimpleWhitespace(""), whitespace_after_from=cst.SimpleWhitespace(""), ), whitespace_after_raise=cst.SimpleWhitespace(""), ), "code": "raise(Exception())from(cause)", "expected_position": CodeRange((1, 0), (1, 29)), }, { "node": cst.Raise( cst.Call(cst.Name("Exception")), cst.From( cst.Name("cause"), whitespace_before_from=cst.SimpleWhitespace(""), ), ), "code": "raise Exception()from cause", "expected_position": CodeRange((1, 0), (1, 27)), }, # Whitespace rendering test { "node": cst.Raise( exc=cst.Call(cst.Name("Exception")), cause=cst.From( cst.Name("cause"), whitespace_before_from=cst.SimpleWhitespace(" "), whitespace_after_from=cst.SimpleWhitespace(" "), ), whitespace_after_raise=cst.SimpleWhitespace(" "), ), "code": "raise Exception() from cause", "expected_position": CodeRange((1, 0), (1, 31)), }, ) ) def test_valid(self, **kwargs: Any) -> None: self.validate_node(**kwargs) @data_provider( ( # Validate construction { "get_node": lambda: cst.Raise(cause=cst.From(cst.Name("cause"))), "expected_re": "Must have an 'exc' when specifying 'clause'. on Raise", }, # Validate whitespace handling { "get_node": lambda: cst.Raise( cst.Call(cst.Name("Exception")), whitespace_after_raise=cst.SimpleWhitespace(""), ), "expected_re": "Must have at least one space after 'raise'", }, { "get_node": lambda: cst.Raise( cst.Name("exc"), cst.From( cst.Name("cause"), whitespace_before_from=cst.SimpleWhitespace(""), ), ), "expected_re": "Must have at least one space before 'from'", }, { "get_node": lambda: cst.Raise( cst.Name("exc"), cst.From( cst.Name("cause"), whitespace_after_from=cst.SimpleWhitespace(""), ), ), "expected_re": "Must have at least one space after 'from'", }, ) ) def test_invalid(self, **kwargs: Any) -> None: self.assert_invalid(**kwargs) class RaiseParsingTest(CSTNodeTest): @data_provider( ( # Simple raise {"node": cst.Raise(), "code": "raise"}, # Raise exception { "node": cst.Raise( cst.Call(cst.Name("Exception")), whitespace_after_raise=cst.SimpleWhitespace(" "), ), "code": "raise Exception()", }, # Raise exception from cause { "node": cst.Raise( cst.Call(cst.Name("Exception")), cst.From( cst.Name("cause"), whitespace_before_from=cst.SimpleWhitespace(" "), whitespace_after_from=cst.SimpleWhitespace(" "), ), whitespace_after_raise=cst.SimpleWhitespace(" "), ), "code": "raise Exception() from cause", }, # Whitespace oddities test { "node": cst.Raise( cst.Call( cst.Name("Exception"), lpar=(cst.LeftParen(),), rpar=(cst.RightParen(),), ), cst.From( cst.Name( "cause", lpar=(cst.LeftParen(),), rpar=(cst.RightParen(),) ), whitespace_before_from=cst.SimpleWhitespace(""), whitespace_after_from=cst.SimpleWhitespace(""), ), whitespace_after_raise=cst.SimpleWhitespace(""), ), "code": "raise(Exception())from(cause)", }, { "node": cst.Raise( cst.Call(cst.Name("Exception")), cst.From( cst.Name("cause"), whitespace_before_from=cst.SimpleWhitespace(""), whitespace_after_from=cst.SimpleWhitespace(" "), ), whitespace_after_raise=cst.SimpleWhitespace(" "), ), "code": "raise Exception()from cause", }, # Whitespace rendering test { "node": cst.Raise( exc=cst.Call(cst.Name("Exception")), cause=cst.From( cst.Name("cause"), whitespace_before_from=cst.SimpleWhitespace(" "), whitespace_after_from=cst.SimpleWhitespace(" "), ), whitespace_after_raise=cst.SimpleWhitespace(" "), ), "code": "raise Exception() from cause", }, ) ) def test_valid(self, **kwargs: Any) -> None: self.validate_node( parser=lambda code: ensure_type( parse_statement(code), cst.SimpleStatementLine ).body[0], **kwargs, ) LibCST-1.2.0/libcst/_nodes/tests/test_removal_behavior.py000066400000000000000000000076771456464173300234560ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Type, Union import libcst as cst from libcst import parse_module, RemovalSentinel from libcst._nodes.tests.base import CSTNodeTest from libcst._types import CSTNodeT from libcst._visitors import CSTTransformer from libcst.testing.utils import data_provider class IfStatementRemovalVisitor(CSTTransformer): def on_leave( self, original_node: CSTNodeT, updated_node: CSTNodeT ) -> Union[CSTNodeT, RemovalSentinel]: if isinstance(updated_node, cst.If): return cst.RemoveFromParent() else: return updated_node class ContinueStatementRemovalVisitor(CSTTransformer): def on_leave( self, original_node: CSTNodeT, updated_node: CSTNodeT ) -> Union[CSTNodeT, RemovalSentinel]: if isinstance(updated_node, cst.Continue): return cst.RemoveFromParent() else: return updated_node class SpecificImportRemovalVisitor(CSTTransformer): def on_leave( self, original_node: CSTNodeT, updated_node: CSTNodeT ) -> Union[cst.Import, cst.ImportFrom, CSTNodeT, RemovalSentinel]: if isinstance(updated_node, cst.Import): for alias in updated_node.names: name = alias.name if isinstance(name, cst.Name) and name.value == "b": return cst.RemoveFromParent() elif isinstance(updated_node, cst.ImportFrom): module = updated_node.module if isinstance(module, cst.Name) and module.value == "e": return cst.RemoveFromParent() return updated_node class RemovalBehavior(CSTNodeTest): @data_provider( ( # Top of module doesn't require a pass, empty code is valid. ("continue", "", ContinueStatementRemovalVisitor), ("if condition: print('hello world')", "", IfStatementRemovalVisitor), # Verify behavior within an indented block. ( "while True:\n continue", "while True:\n pass", ContinueStatementRemovalVisitor, ), ( "while True:\n if condition: print('hello world')", "while True:\n pass", IfStatementRemovalVisitor, ), # Verify behavior within a simple statement suite. ( "while True: continue", "while True: pass", ContinueStatementRemovalVisitor, ), # Verify with some imports ( "import a\nimport b\n\nfrom c import d\nfrom e import f", "import a\n\nfrom c import d", SpecificImportRemovalVisitor, ), # Verify only one pass is generated even if we remove multiple statements ( "while True:\n continue\ncontinue", "while True:\n pass", ContinueStatementRemovalVisitor, ), ( "while True: continue ; continue", "while True: pass", ContinueStatementRemovalVisitor, ), ) ) def test_removal_pass_behavior( self, before: str, after: str, visitor: Type[CSTTransformer] ) -> None: if before.endswith("\n") or after.endswith("\n"): raise Exception("Test cases should not be newline-terminated!") # Test doesn't have newline termination case before_module = parse_module(before) after_module = before_module.visit(visitor()) self.assertEqual(after, after_module.code) # Test does have newline termination case before_module = parse_module(before + "\n") after_module = before_module.visit(visitor()) self.assertEqual(after + "\n", after_module.code) LibCST-1.2.0/libcst/_nodes/tests/test_return.py000066400000000000000000000070321456464173300214320ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Any import libcst as cst from libcst import parse_statement from libcst._nodes.tests.base import CSTNodeTest from libcst.metadata import CodeRange from libcst.testing.utils import data_provider class ReturnCreateTest(CSTNodeTest): @data_provider( ( { "node": cst.SimpleStatementLine([cst.Return()]), "code": "return\n", "expected_position": CodeRange((1, 0), (1, 6)), }, { "node": cst.SimpleStatementLine([cst.Return(cst.Name("abc"))]), "code": "return abc\n", "expected_position": CodeRange((1, 0), (1, 10)), }, ) ) def test_valid(self, **kwargs: Any) -> None: self.validate_node(**kwargs) @data_provider( ( { "get_node": lambda: cst.Return( cst.Name("abc"), whitespace_after_return=cst.SimpleWhitespace("") ), "expected_re": "Must have at least one space after 'return'.", }, ) ) def test_invalid(self, **kwargs: Any) -> None: self.assert_invalid(**kwargs) class ReturnParseTest(CSTNodeTest): @data_provider( ( { "node": cst.SimpleStatementLine( [cst.Return(whitespace_after_return=cst.SimpleWhitespace(""))] ), "code": "return\n", "parser": parse_statement, }, { "node": cst.SimpleStatementLine( [ cst.Return( cst.Name("abc"), whitespace_after_return=cst.SimpleWhitespace(" "), ) ] ), "code": "return abc\n", "parser": parse_statement, }, { "node": cst.SimpleStatementLine( [ cst.Return( cst.Name("abc"), whitespace_after_return=cst.SimpleWhitespace(" "), ) ] ), "code": "return abc\n", "parser": parse_statement, }, { "node": cst.SimpleStatementLine( [ cst.Return( cst.Name( "abc", lpar=[cst.LeftParen()], rpar=[cst.RightParen()] ), whitespace_after_return=cst.SimpleWhitespace(""), ) ] ), "code": "return(abc)\n", "parser": parse_statement, }, { "node": cst.SimpleStatementLine( [ cst.Return( cst.Name("abc"), whitespace_after_return=cst.SimpleWhitespace(" "), semicolon=cst.Semicolon(), ) ] ), "code": "return abc;\n", "parser": parse_statement, }, ) ) def test_valid(self, **kwargs: Any) -> None: self.validate_node(**kwargs) LibCST-1.2.0/libcst/_nodes/tests/test_set.py000066400000000000000000000114031456464173300207030ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Any, Callable import libcst as cst from libcst import parse_expression from libcst._nodes.tests.base import CSTNodeTest, parse_expression_as from libcst._parser.entrypoints import is_native from libcst.testing.utils import data_provider class ListTest(CSTNodeTest): # A lot of Element/StarredElement tests are provided by the tests for Tuple, so we # we don't need to duplicate them here. @data_provider( [ # one-element list, sentinel comma value { "node": cst.Set([cst.Element(cst.Name("single_element"))]), "code": "{single_element}", "parser": parse_expression, }, # custom whitespace between brackets { "node": cst.Set( [cst.Element(cst.Name("single_element"))], lbrace=cst.LeftCurlyBrace( whitespace_after=cst.SimpleWhitespace("\t") ), rbrace=cst.RightCurlyBrace( whitespace_before=cst.SimpleWhitespace(" ") ), ), "code": "{\tsingle_element }", "parser": parse_expression, }, # two-element list, sentinel comma value { "node": cst.Set( [cst.Element(cst.Name("one")), cst.Element(cst.Name("two"))] ), "code": "{one, two}", "parser": None, }, # with parenthesis { "node": cst.Set( [cst.Element(cst.Name("one"))], lpar=[cst.LeftParen()], rpar=[cst.RightParen()], ), "code": "({one})", "parser": None, }, # starred element { "node": cst.Set( [ cst.StarredElement(cst.Name("one")), cst.StarredElement(cst.Name("two")), ] ), "code": "{*one, *two}", "parser": None, }, # missing spaces around set, always okay { "node": cst.GeneratorExp( cst.Name("elt"), cst.CompFor( target=cst.Name("elt"), iter=cst.Set( [ cst.Element( cst.Name("one"), cst.Comma( whitespace_after=cst.SimpleWhitespace(" ") ), ), cst.Element(cst.Name("two")), ] ), ifs=[ cst.CompIf( cst.Name("test"), whitespace_before=cst.SimpleWhitespace(""), ) ], whitespace_after_in=cst.SimpleWhitespace(""), ), ), "code": "(elt for elt in{one, two}if test)", "parser": parse_expression, }, ] ) def test_valid(self, **kwargs: Any) -> None: self.validate_node(**kwargs) @data_provider( ( ( lambda: cst.Set( [cst.Element(cst.Name("mismatched"))], lpar=[cst.LeftParen(), cst.LeftParen()], rpar=[cst.RightParen()], ), "unbalanced parens", ), (lambda: cst.Set([]), "at least one element"), ) ) def test_invalid( self, get_node: Callable[[], cst.CSTNode], expected_re: str ) -> None: self.assert_invalid(get_node, expected_re) @data_provider( ( { "code": "{*x, 2}", "parser": parse_expression_as(python_version="3.5"), "expect_success": True, }, { "code": "{*x, 2}", "parser": parse_expression_as(python_version="3.3"), "expect_success": False, }, ) ) def test_versions(self, **kwargs: Any) -> None: if is_native() and not kwargs.get("expect_success", True): self.skipTest("parse errors are disabled for native parser") self.assert_parses(**kwargs) LibCST-1.2.0/libcst/_nodes/tests/test_simple_comp.py000066400000000000000000000513651456464173300224320ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Any, Callable import libcst as cst from libcst import parse_expression, parse_statement, PartialParserConfig from libcst._nodes.tests.base import CSTNodeTest from libcst.metadata import CodeRange from libcst.testing.utils import data_provider class SimpleCompTest(CSTNodeTest): @data_provider( [ # simple GeneratorExp { "node": cst.GeneratorExp( cst.Name("a"), cst.CompFor(target=cst.Name("b"), iter=cst.Name("c")) ), "code": "(a for b in c)", "parser": parse_expression, "expected_position": CodeRange((1, 1), (1, 13)), }, # simple ListComp { "node": cst.ListComp( cst.Name("a"), cst.CompFor(target=cst.Name("b"), iter=cst.Name("c")) ), "code": "[a for b in c]", "parser": parse_expression, "expected_position": CodeRange((1, 0), (1, 14)), }, # simple SetComp { "node": cst.SetComp( cst.Name("a"), cst.CompFor(target=cst.Name("b"), iter=cst.Name("c")) ), "code": "{a for b in c}", "parser": parse_expression, }, # non-trivial elt in GeneratorExp { "node": cst.GeneratorExp( cst.BinaryOperation(cst.Name("a1"), cst.Add(), cst.Name("a2")), cst.CompFor(target=cst.Name("b"), iter=cst.Name("c")), ), "code": "(a1 + a2 for b in c)", "parser": parse_expression, }, # non-trivial elt in ListComp { "node": cst.ListComp( cst.BinaryOperation(cst.Name("a1"), cst.Add(), cst.Name("a2")), cst.CompFor(target=cst.Name("b"), iter=cst.Name("c")), ), "code": "[a1 + a2 for b in c]", "parser": parse_expression, }, # non-trivial elt in SetComp { "node": cst.SetComp( cst.BinaryOperation(cst.Name("a1"), cst.Add(), cst.Name("a2")), cst.CompFor(target=cst.Name("b"), iter=cst.Name("c")), ), "code": "{a1 + a2 for b in c}", "parser": parse_expression, }, # async GeneratorExp { "node": cst.GeneratorExp( cst.Name("a"), cst.CompFor( target=cst.Name("b"), iter=cst.Name("c"), asynchronous=cst.Asynchronous(), ), ), "code": "(a async for b in c)", "parser": lambda code: parse_expression( code, config=PartialParserConfig(python_version="3.7") ), }, # Python 3.6 async GeneratorExp { "node": cst.FunctionDef( cst.Name("foo"), cst.Parameters(), cst.IndentedBlock( ( cst.SimpleStatementLine( ( cst.Expr( cst.GeneratorExp( cst.Name("a"), cst.CompFor( target=cst.Name("b"), iter=cst.Name("c"), asynchronous=cst.Asynchronous(), ), ) ), ) ), ) ), asynchronous=cst.Asynchronous(), ), "code": "async def foo():\n (a async for b in c)\n", "parser": lambda code: parse_statement( code, config=PartialParserConfig(python_version="3.6") ), }, # a generator doesn't have to own it's own parenthesis { "node": cst.Call( cst.Name("func"), [ cst.Arg( cst.GeneratorExp( cst.Name("a"), cst.CompFor(target=cst.Name("b"), iter=cst.Name("c")), lpar=[], rpar=[], ) ) ], ), "code": "func(a for b in c)", "parser": parse_expression, }, # add a few 'if' clauses { "node": cst.GeneratorExp( cst.Name("a"), cst.CompFor( target=cst.Name("b"), iter=cst.Name("c"), ifs=[ cst.CompIf(cst.Name("d")), cst.CompIf(cst.Name("e")), cst.CompIf(cst.Name("f")), ], ), ), "code": "(a for b in c if d if e if f)", "parser": parse_expression, "expected_position": CodeRange((1, 1), (1, 28)), }, # nested/inner for-in clause { "node": cst.GeneratorExp( cst.Name("a"), cst.CompFor( target=cst.Name("b"), iter=cst.Name("c"), inner_for_in=cst.CompFor( target=cst.Name("d"), iter=cst.Name("e") ), ), ), "code": "(a for b in c for d in e)", "parser": parse_expression, }, # nested/inner for-in clause with an 'if' clause { "node": cst.GeneratorExp( cst.Name("a"), cst.CompFor( target=cst.Name("b"), iter=cst.Name("c"), ifs=[cst.CompIf(cst.Name("d"))], inner_for_in=cst.CompFor( target=cst.Name("e"), iter=cst.Name("f") ), ), ), "code": "(a for b in c if d for e in f)", "parser": parse_expression, }, # custom whitespace { "node": cst.GeneratorExp( cst.Name("a"), cst.CompFor( target=cst.Name("b"), iter=cst.Name("c"), ifs=[ cst.CompIf( cst.Name("d"), whitespace_before=cst.SimpleWhitespace("\t"), whitespace_before_test=cst.SimpleWhitespace("\t\t"), ) ], whitespace_before=cst.SimpleWhitespace(" "), whitespace_after_for=cst.SimpleWhitespace(" "), whitespace_before_in=cst.SimpleWhitespace(" "), whitespace_after_in=cst.SimpleWhitespace(" "), ), lpar=[cst.LeftParen(whitespace_after=cst.SimpleWhitespace("\f"))], rpar=[ cst.RightParen(whitespace_before=cst.SimpleWhitespace("\f\f")) ], ), "code": "(\fa for b in c\tif\t\td\f\f)", "parser": parse_expression, "expected_position": CodeRange((1, 2), (1, 30)), }, # custom whitespace around ListComp's brackets { "node": cst.ListComp( cst.Name("a"), cst.CompFor(target=cst.Name("b"), iter=cst.Name("c")), lbracket=cst.LeftSquareBracket( whitespace_after=cst.SimpleWhitespace("\t") ), rbracket=cst.RightSquareBracket( whitespace_before=cst.SimpleWhitespace("\t\t") ), lpar=[cst.LeftParen(whitespace_after=cst.SimpleWhitespace("\f"))], rpar=[ cst.RightParen(whitespace_before=cst.SimpleWhitespace("\f\f")) ], ), "code": "(\f[\ta for b in c\t\t]\f\f)", "parser": parse_expression, "expected_position": CodeRange((1, 2), (1, 19)), }, # custom whitespace around SetComp's braces { "node": cst.SetComp( cst.Name("a"), cst.CompFor(target=cst.Name("b"), iter=cst.Name("c")), lbrace=cst.LeftCurlyBrace( whitespace_after=cst.SimpleWhitespace("\t") ), rbrace=cst.RightCurlyBrace( whitespace_before=cst.SimpleWhitespace("\t\t") ), lpar=[cst.LeftParen(whitespace_after=cst.SimpleWhitespace("\f"))], rpar=[ cst.RightParen(whitespace_before=cst.SimpleWhitespace("\f\f")) ], ), "code": "(\f{\ta for b in c\t\t}\f\f)", "parser": parse_expression, }, # no whitespace between elements { "node": cst.GeneratorExp( cst.Name("a", lpar=[cst.LeftParen()], rpar=[cst.RightParen()]), cst.CompFor( target=cst.Name( "b", lpar=[cst.LeftParen()], rpar=[cst.RightParen()] ), iter=cst.Name( "c", lpar=[cst.LeftParen()], rpar=[cst.RightParen()] ), ifs=[ cst.CompIf( cst.Name( "d", lpar=[cst.LeftParen()], rpar=[cst.RightParen()] ), whitespace_before=cst.SimpleWhitespace(""), whitespace_before_test=cst.SimpleWhitespace(""), ) ], inner_for_in=cst.CompFor( target=cst.Name( "e", lpar=[cst.LeftParen()], rpar=[cst.RightParen()] ), iter=cst.Name( "f", lpar=[cst.LeftParen()], rpar=[cst.RightParen()] ), whitespace_before=cst.SimpleWhitespace(""), whitespace_after_for=cst.SimpleWhitespace(""), whitespace_before_in=cst.SimpleWhitespace(""), whitespace_after_in=cst.SimpleWhitespace(""), ), whitespace_before=cst.SimpleWhitespace(""), whitespace_after_for=cst.SimpleWhitespace(""), whitespace_before_in=cst.SimpleWhitespace(""), whitespace_after_in=cst.SimpleWhitespace(""), ), lpar=[cst.LeftParen()], rpar=[cst.RightParen()], ), "code": "((a)for(b)in(c)if(d)for(e)in(f))", "parser": parse_expression, "expected_position": CodeRange((1, 1), (1, 31)), }, # no whitespace before/after GeneratorExp is valid { "node": cst.Comparison( cst.GeneratorExp( cst.Name("a"), cst.CompFor(target=cst.Name("b"), iter=cst.Name("c")), ), [ cst.ComparisonTarget( cst.Is( whitespace_before=cst.SimpleWhitespace(""), whitespace_after=cst.SimpleWhitespace(""), ), cst.GeneratorExp( cst.Name("d"), cst.CompFor(target=cst.Name("e"), iter=cst.Name("f")), ), ) ], ), "code": "(a for b in c)is(d for e in f)", "parser": parse_expression, }, # no whitespace before/after ListComp is valid { "node": cst.Comparison( cst.ListComp( cst.Name("a"), cst.CompFor(target=cst.Name("b"), iter=cst.Name("c")), ), [ cst.ComparisonTarget( cst.Is( whitespace_before=cst.SimpleWhitespace(""), whitespace_after=cst.SimpleWhitespace(""), ), cst.ListComp( cst.Name("d"), cst.CompFor(target=cst.Name("e"), iter=cst.Name("f")), ), ) ], ), "code": "[a for b in c]is[d for e in f]", "parser": parse_expression, }, # no whitespace before/after SetComp is valid { "node": cst.Comparison( cst.SetComp( cst.Name("a"), cst.CompFor(target=cst.Name("b"), iter=cst.Name("c")), ), [ cst.ComparisonTarget( cst.Is( whitespace_before=cst.SimpleWhitespace(""), whitespace_after=cst.SimpleWhitespace(""), ), cst.SetComp( cst.Name("d"), cst.CompFor(target=cst.Name("e"), iter=cst.Name("f")), ), ) ], ), "code": "{a for b in c}is{d for e in f}", "parser": parse_expression, }, ] ) def test_valid(self, **kwargs: Any) -> None: self.validate_node(**kwargs) @data_provider( ( ( lambda: cst.GeneratorExp( cst.Name("a"), cst.CompFor(target=cst.Name("b"), iter=cst.Name("c")), lpar=[cst.LeftParen(), cst.LeftParen()], rpar=[cst.RightParen()], ), "unbalanced parens", ), ( lambda: cst.ListComp( cst.Name("a"), cst.CompFor(target=cst.Name("b"), iter=cst.Name("c")), lpar=[cst.LeftParen(), cst.LeftParen()], rpar=[cst.RightParen()], ), "unbalanced parens", ), ( lambda: cst.SetComp( cst.Name("a"), cst.CompFor(target=cst.Name("b"), iter=cst.Name("c")), lpar=[cst.LeftParen(), cst.LeftParen()], rpar=[cst.RightParen()], ), "unbalanced parens", ), ( lambda: cst.GeneratorExp( cst.Name("a"), cst.CompFor( target=cst.Name("b"), iter=cst.Name("c"), whitespace_before=cst.SimpleWhitespace(""), ), ), "Must have at least one space before 'for' keyword.", ), ( lambda: cst.GeneratorExp( cst.Name("a"), cst.CompFor( target=cst.Name("b"), iter=cst.Name("c"), asynchronous=cst.Asynchronous(), whitespace_before=cst.SimpleWhitespace(""), ), ), "Must have at least one space before 'async' keyword.", ), ( lambda: cst.GeneratorExp( cst.Name("a"), cst.CompFor( target=cst.Name("b"), iter=cst.Name("c"), whitespace_after_for=cst.SimpleWhitespace(""), ), ), "Must have at least one space after 'for' keyword.", ), ( lambda: cst.GeneratorExp( cst.Name("a"), cst.CompFor( target=cst.Name("b"), iter=cst.Name("c"), whitespace_before_in=cst.SimpleWhitespace(""), ), ), "Must have at least one space before 'in' keyword.", ), ( lambda: cst.GeneratorExp( cst.Name("a"), cst.CompFor( target=cst.Name("b"), iter=cst.Name("c"), whitespace_after_in=cst.SimpleWhitespace(""), ), ), "Must have at least one space after 'in' keyword.", ), ( lambda: cst.GeneratorExp( cst.Name("a"), cst.CompFor( target=cst.Name("b"), iter=cst.Name("c"), ifs=[ cst.CompIf( cst.Name("d"), whitespace_before=cst.SimpleWhitespace(""), ) ], ), ), "Must have at least one space before 'if' keyword.", ), ( lambda: cst.GeneratorExp( cst.Name("a"), cst.CompFor( target=cst.Name("b"), iter=cst.Name("c"), ifs=[ cst.CompIf( cst.Name("d"), whitespace_before_test=cst.SimpleWhitespace(""), ) ], ), ), "Must have at least one space after 'if' keyword.", ), ( lambda: cst.GeneratorExp( cst.Name("a"), cst.CompFor( target=cst.Name("b"), iter=cst.Name("c"), inner_for_in=cst.CompFor( target=cst.Name("d"), iter=cst.Name("e"), whitespace_before=cst.SimpleWhitespace(""), ), ), ), "Must have at least one space before 'for' keyword.", ), ( lambda: cst.GeneratorExp( cst.Name("a"), cst.CompFor( target=cst.Name("b"), iter=cst.Name("c"), inner_for_in=cst.CompFor( target=cst.Name("d"), iter=cst.Name("e"), asynchronous=cst.Asynchronous(), whitespace_before=cst.SimpleWhitespace(""), ), ), ), "Must have at least one space before 'async' keyword.", ), ) ) def test_invalid( self, get_node: Callable[[], cst.CSTNode], expected_re: str ) -> None: self.assert_invalid(get_node, expected_re) LibCST-1.2.0/libcst/_nodes/tests/test_simple_statement.py000066400000000000000000000332451456464173300234750ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Any import libcst as cst from libcst import parse_statement from libcst._nodes.tests.base import CSTNodeTest, DummyIndentedBlock from libcst.metadata import CodeRange from libcst.testing.utils import data_provider class SimpleStatementTest(CSTNodeTest): @data_provider( ( # a single-element SimpleStatementLine { "node": cst.SimpleStatementLine((cst.Pass(),)), "code": "pass\n", "parser": parse_statement, }, # a multi-element SimpleStatementLine { "node": cst.SimpleStatementLine( (cst.Pass(semicolon=cst.Semicolon()), cst.Continue()) ), "code": "pass;continue\n", "parser": parse_statement, }, # a multi-element SimpleStatementLine with whitespace { "node": cst.SimpleStatementLine( ( cst.Pass( semicolon=cst.Semicolon( whitespace_before=cst.SimpleWhitespace(" "), whitespace_after=cst.SimpleWhitespace(" "), ) ), cst.Continue(), ) ), "code": "pass ; continue\n", "parser": parse_statement, }, # A more complicated SimpleStatementLine { "node": cst.SimpleStatementLine( ( cst.Pass(semicolon=cst.Semicolon()), cst.Continue(semicolon=cst.Semicolon()), cst.Break(), ) ), "code": "pass;continue;break\n", "parser": parse_statement, "expected_position": CodeRange((1, 0), (1, 19)), }, # a multi-element SimpleStatementLine, inferred semicolons { "node": cst.SimpleStatementLine( (cst.Pass(), cst.Continue(), cst.Break()) ), "code": "pass; continue; break\n", "parser": None, # No test for parsing, since we are using sentinels. }, # some expression statements { "node": cst.SimpleStatementLine((cst.Expr(cst.Name("None")),)), "code": "None\n", "parser": parse_statement, }, { "node": cst.SimpleStatementLine((cst.Expr(cst.Name("True")),)), "code": "True\n", "parser": parse_statement, }, { "node": cst.SimpleStatementLine((cst.Expr(cst.Name("False")),)), "code": "False\n", "parser": parse_statement, }, { "node": cst.SimpleStatementLine((cst.Expr(cst.Ellipsis()),)), "code": "...\n", "parser": parse_statement, }, # Test some numbers { "node": cst.SimpleStatementLine((cst.Expr(cst.Integer("5")),)), "code": "5\n", "parser": parse_statement, }, { "node": cst.SimpleStatementLine((cst.Expr(cst.Float("5.5")),)), "code": "5.5\n", "parser": parse_statement, }, { "node": cst.SimpleStatementLine((cst.Expr(cst.Imaginary("5j")),)), "code": "5j\n", "parser": parse_statement, }, # Test some numbers with parens { "node": cst.SimpleStatementLine( ( cst.Expr( cst.Integer( "5", lpar=(cst.LeftParen(),), rpar=(cst.RightParen(),) ) ), ) ), "code": "(5)\n", "parser": parse_statement, "expected_position": CodeRange((1, 0), (1, 3)), }, { "node": cst.SimpleStatementLine( ( cst.Expr( cst.Float( "5.5", lpar=(cst.LeftParen(),), rpar=(cst.RightParen(),) ) ), ) ), "code": "(5.5)\n", "parser": parse_statement, }, { "node": cst.SimpleStatementLine( ( cst.Expr( cst.Imaginary( "5j", lpar=(cst.LeftParen(),), rpar=(cst.RightParen(),) ) ), ) ), "code": "(5j)\n", "parser": parse_statement, }, # Test some strings { "node": cst.SimpleStatementLine((cst.Expr(cst.SimpleString('"abc"')),)), "code": '"abc"\n', "parser": parse_statement, }, { "node": cst.SimpleStatementLine( ( cst.Expr( cst.ConcatenatedString( cst.SimpleString('"abc"'), cst.SimpleString('"def"') ) ), ) ), "code": '"abc""def"\n', "parser": parse_statement, }, { "node": cst.SimpleStatementLine( ( cst.Expr( cst.ConcatenatedString( left=cst.SimpleString('"abc"'), whitespace_between=cst.SimpleWhitespace(" "), right=cst.ConcatenatedString( left=cst.SimpleString('"def"'), whitespace_between=cst.SimpleWhitespace(" "), right=cst.SimpleString('"ghi"'), ), ) ), ) ), "code": '"abc" "def" "ghi"\n', "parser": parse_statement, "expected_position": CodeRange((1, 0), (1, 17)), }, # Test parenthesis rules { "node": cst.SimpleStatementLine( ( cst.Expr( cst.Ellipsis( lpar=(cst.LeftParen(),), rpar=(cst.RightParen(),) ) ), ) ), "code": "(...)\n", "parser": parse_statement, }, # Test parenthesis with whitespace ownership { "node": cst.SimpleStatementLine( ( cst.Expr( cst.Ellipsis( lpar=( cst.LeftParen( whitespace_after=cst.SimpleWhitespace(" ") ), ), rpar=( cst.RightParen( whitespace_before=cst.SimpleWhitespace(" ") ), ), ) ), ) ), "code": "( ... )\n", "parser": parse_statement, }, { "node": cst.SimpleStatementLine( ( cst.Expr( cst.Ellipsis( lpar=( cst.LeftParen( whitespace_after=cst.SimpleWhitespace(" ") ), cst.LeftParen( whitespace_after=cst.SimpleWhitespace(" ") ), cst.LeftParen( whitespace_after=cst.SimpleWhitespace(" ") ), ), rpar=( cst.RightParen( whitespace_before=cst.SimpleWhitespace(" ") ), cst.RightParen( whitespace_before=cst.SimpleWhitespace(" ") ), cst.RightParen( whitespace_before=cst.SimpleWhitespace(" ") ), ), ) ), ) ), "code": "( ( ( ... ) ) )\n", "parser": parse_statement, "expected_position": CodeRange((1, 0), (1, 21)), }, # Test parenthesis rules with expressions { "node": cst.SimpleStatementLine( ( cst.Expr( cst.Ellipsis( lpar=( cst.LeftParen( whitespace_after=cst.ParenthesizedWhitespace( first_line=cst.TrailingWhitespace(), empty_lines=( cst.EmptyLine( comment=cst.Comment( "# Wow, a comment!" ) ), ), indent=True, last_line=cst.SimpleWhitespace(" "), ) ), ), rpar=( cst.RightParen( whitespace_before=cst.ParenthesizedWhitespace( first_line=cst.TrailingWhitespace(), empty_lines=(), indent=True, last_line=cst.SimpleWhitespace(""), ) ), ), ) ), ) ), "code": "(\n# Wow, a comment!\n ...\n)\n", "parser": parse_statement, "expected_position": CodeRange((1, 0), (4, 1)), }, # test trailing whitespace { "node": cst.SimpleStatementLine( (cst.Pass(),), trailing_whitespace=cst.TrailingWhitespace( whitespace=cst.SimpleWhitespace(" "), comment=cst.Comment("# trailing comment"), ), ), "code": "pass # trailing comment\n", "parser": parse_statement, "expected_position": CodeRange((1, 0), (1, 4)), }, # test leading comment { "node": cst.SimpleStatementLine( (cst.Pass(),), leading_lines=(cst.EmptyLine(comment=cst.Comment("# comment")),), ), "code": "# comment\npass\n", "parser": parse_statement, "expected_position": CodeRange((2, 0), (2, 4)), }, # test indentation { "node": DummyIndentedBlock( " ", cst.SimpleStatementLine( (cst.Pass(),), leading_lines=( cst.EmptyLine(comment=cst.Comment("# comment")), ), ), ), "code": " # comment\n pass\n", "expected_position": CodeRange((2, 4), (2, 8)), }, # test suite variant { "node": cst.SimpleStatementSuite((cst.Pass(),)), "code": " pass\n", "expected_position": CodeRange((1, 1), (1, 5)), }, { "node": cst.SimpleStatementSuite( (cst.Pass(),), leading_whitespace=cst.SimpleWhitespace("") ), "code": "pass\n", "expected_position": CodeRange((1, 0), (1, 4)), }, ) ) def test_valid(self, **kwargs: Any) -> None: self.validate_node(**kwargs) LibCST-1.2.0/libcst/_nodes/tests/test_simple_string.py000066400000000000000000000015601456464173300227720ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest import libcst as cst class TestSimpleString(unittest.TestCase): def test_quote(self) -> None: test_cases = [ ('"a"', '"'), ("'b'", "'"), ('""', '"'), ("''", "'"), ('"""c"""', '"""'), ("'''d'''", "'''"), ('""""e"""', '"""'), ("''''f'''", "'''"), ('"""""g"""', '"""'), ("'''''h'''", "'''"), ('""""""', '"""'), ("''''''", "'''"), ] for s, expected_quote in test_cases: simple_string = cst.SimpleString(s) actual = simple_string.quote self.assertEqual(expected_quote, actual) LibCST-1.2.0/libcst/_nodes/tests/test_simple_whitespace.py000066400000000000000000000073631456464173300236270ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Callable import libcst as cst from libcst._nodes.tests.base import CSTNodeTest, DummyIndentedBlock from libcst.testing.utils import data_provider class SimpleWhitespaceTest(CSTNodeTest): @data_provider( ( (cst.SimpleWhitespace(""), ""), (cst.SimpleWhitespace(" "), " "), (cst.SimpleWhitespace(" \t\f"), " \t\f"), (cst.SimpleWhitespace("\\\n "), "\\\n "), (cst.SimpleWhitespace("\\\r\n "), "\\\r\n "), ) ) def test_valid(self, node: cst.CSTNode, code: str) -> None: self.validate_node(node, code) @data_provider( ( (lambda: cst.SimpleWhitespace(" bad input"), "non-whitespace"), (lambda: cst.SimpleWhitespace("\\"), "non-whitespace"), (lambda: cst.SimpleWhitespace("\\\n\n "), "non-whitespace"), ) ) def test_invalid( self, get_node: Callable[[], cst.CSTNode], expected_re: str ) -> None: self.assert_invalid(get_node, expected_re) class ParenthesizedWhitespaceTest(CSTNodeTest): @data_provider( ( (cst.ParenthesizedWhitespace(), "\n"), ( cst.ParenthesizedWhitespace( first_line=cst.TrailingWhitespace( cst.SimpleWhitespace(" "), cst.Comment("# This is a comment") ) ), " # This is a comment\n", ), ( cst.ParenthesizedWhitespace( first_line=cst.TrailingWhitespace( cst.SimpleWhitespace(" "), cst.Comment("# This is a comment") ), empty_lines=(cst.EmptyLine(), cst.EmptyLine(), cst.EmptyLine()), ), " # This is a comment\n\n\n\n", ), ( cst.ParenthesizedWhitespace( first_line=cst.TrailingWhitespace( cst.SimpleWhitespace(" "), cst.Comment("# This is a comment") ), empty_lines=(cst.EmptyLine(), cst.EmptyLine(), cst.EmptyLine()), indent=False, last_line=cst.SimpleWhitespace(" "), ), " # This is a comment\n\n\n\n ", ), ( DummyIndentedBlock( " ", cst.ParenthesizedWhitespace( first_line=cst.TrailingWhitespace( cst.SimpleWhitespace(" "), cst.Comment("# This is a comment"), ), empty_lines=(cst.EmptyLine(), cst.EmptyLine(), cst.EmptyLine()), indent=True, last_line=cst.SimpleWhitespace(" "), ), ), " # This is a comment\n \n \n \n ", ), ( DummyIndentedBlock( " ", cst.ParenthesizedWhitespace( first_line=cst.TrailingWhitespace( cst.SimpleWhitespace(" "), cst.Comment("# This is a comment"), ), indent=True, last_line=cst.SimpleWhitespace(""), ), ), " # This is a comment\n ", ), ) ) def test_valid(self, node: cst.CSTNode, code: str) -> None: self.validate_node(node, code) LibCST-1.2.0/libcst/_nodes/tests/test_small_statement.py000066400000000000000000000057351456464173300233170ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Any import libcst as cst from libcst._nodes.tests.base import CSTNodeTest from libcst.metadata import CodeRange from libcst.testing.utils import data_provider class SmallStatementTest(CSTNodeTest): @data_provider( ( {"node": cst.Pass(), "code": "pass"}, {"node": cst.Pass(semicolon=cst.Semicolon()), "code": "pass;"}, { "node": cst.Pass( semicolon=cst.Semicolon( whitespace_before=cst.SimpleWhitespace(" "), whitespace_after=cst.SimpleWhitespace(" "), ) ), "code": "pass ; ", "expected_position": CodeRange((1, 0), (1, 4)), }, {"node": cst.Continue(), "code": "continue"}, {"node": cst.Continue(semicolon=cst.Semicolon()), "code": "continue;"}, { "node": cst.Continue( semicolon=cst.Semicolon( whitespace_before=cst.SimpleWhitespace(" "), whitespace_after=cst.SimpleWhitespace(" "), ) ), "code": "continue ; ", "expected_position": CodeRange((1, 0), (1, 8)), }, {"node": cst.Break(), "code": "break"}, {"node": cst.Break(semicolon=cst.Semicolon()), "code": "break;"}, { "node": cst.Break( semicolon=cst.Semicolon( whitespace_before=cst.SimpleWhitespace(" "), whitespace_after=cst.SimpleWhitespace(" "), ) ), "code": "break ; ", "expected_position": CodeRange((1, 0), (1, 5)), }, { "node": cst.Expr( cst.BinaryOperation(cst.Name("x"), cst.Add(), cst.Name("y")) ), "code": "x + y", }, { "node": cst.Expr( cst.BinaryOperation(cst.Name("x"), cst.Add(), cst.Name("y")), semicolon=cst.Semicolon(), ), "code": "x + y;", }, { "node": cst.Expr( cst.BinaryOperation(cst.Name("x"), cst.Add(), cst.Name("y")), semicolon=cst.Semicolon( whitespace_before=cst.SimpleWhitespace(" "), whitespace_after=cst.SimpleWhitespace(" "), ), ), "code": "x + y ; ", "expected_position": CodeRange((1, 0), (1, 5)), }, ) ) def test_valid(self, **kwargs: Any) -> None: self.validate_node(**kwargs) LibCST-1.2.0/libcst/_nodes/tests/test_subscript.py000066400000000000000000000405661456464173300221420ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Callable, Optional import libcst as cst from libcst import parse_expression from libcst._nodes.tests.base import CSTNodeTest from libcst.metadata import CodeRange from libcst.testing.utils import data_provider class SubscriptTest(CSTNodeTest): @data_provider( ( # Simple subscript expression ( cst.Subscript( cst.Name("foo"), (cst.SubscriptElement(cst.Index(cst.Integer("5"))),), ), "foo[5]", True, ), # Test creation of subscript with slice/extslice. ( cst.Subscript( cst.Name("foo"), ( cst.SubscriptElement( cst.Slice( lower=cst.Integer("1"), upper=cst.Integer("2"), step=cst.Integer("3"), ) ), ), ), "foo[1:2:3]", False, ), ( cst.Subscript( cst.Name("foo"), ( cst.SubscriptElement( cst.Slice( lower=cst.Integer("1"), upper=cst.Integer("2"), step=cst.Integer("3"), ) ), cst.SubscriptElement(cst.Index(cst.Integer("5"))), ), ), "foo[1:2:3, 5]", False, CodeRange((1, 0), (1, 13)), ), # Test parsing of subscript with slice/extslice. ( cst.Subscript( cst.Name("foo"), ( cst.SubscriptElement( cst.Slice( lower=cst.Integer("1"), first_colon=cst.Colon(), upper=cst.Integer("2"), second_colon=cst.Colon(), step=cst.Integer("3"), ) ), ), ), "foo[1:2:3]", True, ), ( cst.Subscript( cst.Name("foo"), ( cst.SubscriptElement( cst.Slice( lower=cst.Integer("1"), first_colon=cst.Colon(), upper=cst.Integer("2"), second_colon=cst.Colon(), step=cst.Integer("3"), ), comma=cst.Comma(), ), cst.SubscriptElement(cst.Index(cst.Integer("5"))), ), ), "foo[1:2:3,5]", True, ), # Some more wild slice creations ( cst.Subscript( cst.Name("foo"), ( cst.SubscriptElement( cst.Slice(lower=cst.Integer("1"), upper=cst.Integer("2")) ), ), ), "foo[1:2]", True, ), ( cst.Subscript( cst.Name("foo"), ( cst.SubscriptElement( cst.Slice(lower=cst.Integer("1"), upper=None) ), ), ), "foo[1:]", True, ), ( cst.Subscript( cst.Name("foo"), ( cst.SubscriptElement( cst.Slice(lower=None, upper=cst.Integer("2")) ), ), ), "foo[:2]", True, ), ( cst.Subscript( cst.Name("foo"), ( cst.SubscriptElement( cst.Slice( lower=cst.Integer("1"), upper=None, step=cst.Integer("3"), ) ), ), ), "foo[1::3]", False, ), ( cst.Subscript( cst.Name("foo"), ( cst.SubscriptElement( cst.Slice(lower=None, upper=None, step=cst.Integer("3")) ), ), ), "foo[::3]", False, CodeRange((1, 0), (1, 8)), ), # Some more wild slice parsings ( cst.Subscript( cst.Name("foo"), ( cst.SubscriptElement( cst.Slice(lower=cst.Integer("1"), upper=cst.Integer("2")) ), ), ), "foo[1:2]", True, ), ( cst.Subscript( cst.Name("foo"), ( cst.SubscriptElement( cst.Slice(lower=cst.Integer("1"), upper=None) ), ), ), "foo[1:]", True, ), ( cst.Subscript( cst.Name("foo"), ( cst.SubscriptElement( cst.Slice(lower=None, upper=cst.Integer("2")) ), ), ), "foo[:2]", True, ), ( cst.Subscript( cst.Name("foo"), ( cst.SubscriptElement( cst.Slice( lower=cst.Integer("1"), upper=None, second_colon=cst.Colon(), step=cst.Integer("3"), ) ), ), ), "foo[1::3]", True, ), ( cst.Subscript( cst.Name("foo"), ( cst.SubscriptElement( cst.Slice( lower=None, upper=None, second_colon=cst.Colon(), step=cst.Integer("3"), ) ), ), ), "foo[::3]", True, ), # Valid list clone operations rendering ( cst.Subscript( cst.Name("foo"), (cst.SubscriptElement(cst.Slice(lower=None, upper=None)),), ), "foo[:]", True, ), ( cst.Subscript( cst.Name("foo"), ( cst.SubscriptElement( cst.Slice( lower=None, upper=None, second_colon=cst.Colon(), step=None, ) ), ), ), "foo[::]", True, ), # Valid list clone operations parsing ( cst.Subscript( cst.Name("foo"), (cst.SubscriptElement(cst.Slice(lower=None, upper=None)),), ), "foo[:]", True, ), ( cst.Subscript( cst.Name("foo"), ( cst.SubscriptElement( cst.Slice( lower=None, upper=None, second_colon=cst.Colon(), step=None, ) ), ), ), "foo[::]", True, ), # In parenthesis ( cst.Subscript( lpar=(cst.LeftParen(),), value=cst.Name("foo"), slice=(cst.SubscriptElement(cst.Index(cst.Integer("5"))),), rpar=(cst.RightParen(),), ), "(foo[5])", True, ), # Verify spacing ( cst.Subscript( lpar=(cst.LeftParen(whitespace_after=cst.SimpleWhitespace(" ")),), value=cst.Name("foo"), lbracket=cst.LeftSquareBracket( whitespace_after=cst.SimpleWhitespace(" ") ), slice=(cst.SubscriptElement(cst.Index(cst.Integer("5"))),), rbracket=cst.RightSquareBracket( whitespace_before=cst.SimpleWhitespace(" ") ), rpar=(cst.RightParen(whitespace_before=cst.SimpleWhitespace(" ")),), whitespace_after_value=cst.SimpleWhitespace(" "), ), "( foo [ 5 ] )", True, ), ( cst.Subscript( lpar=(cst.LeftParen(whitespace_after=cst.SimpleWhitespace(" ")),), value=cst.Name("foo"), lbracket=cst.LeftSquareBracket( whitespace_after=cst.SimpleWhitespace(" ") ), slice=( cst.SubscriptElement( cst.Slice( lower=cst.Integer("1"), first_colon=cst.Colon( whitespace_before=cst.SimpleWhitespace(" "), whitespace_after=cst.SimpleWhitespace(" "), ), upper=cst.Integer("2"), second_colon=cst.Colon( whitespace_before=cst.SimpleWhitespace(" "), whitespace_after=cst.SimpleWhitespace(" "), ), step=cst.Integer("3"), ) ), ), rbracket=cst.RightSquareBracket( whitespace_before=cst.SimpleWhitespace(" ") ), rpar=(cst.RightParen(whitespace_before=cst.SimpleWhitespace(" ")),), whitespace_after_value=cst.SimpleWhitespace(" "), ), "( foo [ 1 : 2 : 3 ] )", True, ), ( cst.Subscript( lpar=(cst.LeftParen(whitespace_after=cst.SimpleWhitespace(" ")),), value=cst.Name("foo"), lbracket=cst.LeftSquareBracket( whitespace_after=cst.SimpleWhitespace(" ") ), slice=( cst.SubscriptElement( slice=cst.Slice( lower=cst.Integer("1"), first_colon=cst.Colon( whitespace_before=cst.SimpleWhitespace(" "), whitespace_after=cst.SimpleWhitespace(" "), ), upper=cst.Integer("2"), second_colon=cst.Colon( whitespace_before=cst.SimpleWhitespace(" "), whitespace_after=cst.SimpleWhitespace(" "), ), step=cst.Integer("3"), ), comma=cst.Comma( whitespace_before=cst.SimpleWhitespace(" "), whitespace_after=cst.SimpleWhitespace(" "), ), ), cst.SubscriptElement(slice=cst.Index(cst.Integer("5"))), ), rbracket=cst.RightSquareBracket( whitespace_before=cst.SimpleWhitespace(" ") ), rpar=(cst.RightParen(whitespace_before=cst.SimpleWhitespace(" ")),), whitespace_after_value=cst.SimpleWhitespace(" "), ), "( foo [ 1 : 2 : 3 , 5 ] )", True, CodeRange((1, 2), (1, 24)), ), # Test Index, Slice, SubscriptElement (cst.Index(cst.Integer("5")), "5", False, CodeRange((1, 0), (1, 1))), ( cst.Slice(lower=None, upper=None, second_colon=cst.Colon(), step=None), "::", False, CodeRange((1, 0), (1, 2)), ), ( cst.SubscriptElement( slice=cst.Slice( lower=cst.Integer("1"), first_colon=cst.Colon( whitespace_before=cst.SimpleWhitespace(" "), whitespace_after=cst.SimpleWhitespace(" "), ), upper=cst.Integer("2"), second_colon=cst.Colon( whitespace_before=cst.SimpleWhitespace(" "), whitespace_after=cst.SimpleWhitespace(" "), ), step=cst.Integer("3"), ), comma=cst.Comma( whitespace_before=cst.SimpleWhitespace(" "), whitespace_after=cst.SimpleWhitespace(" "), ), ), "1 : 2 : 3 , ", False, CodeRange((1, 0), (1, 9)), ), ) ) def test_valid( self, node: cst.CSTNode, code: str, check_parsing: bool, position: Optional[CodeRange] = None, ) -> None: if check_parsing: self.validate_node(node, code, parse_expression, expected_position=position) else: self.validate_node(node, code, expected_position=position) @data_provider( ( ( lambda: cst.Subscript( cst.Name("foo"), (cst.SubscriptElement(cst.Index(cst.Integer("5"))),), lpar=(cst.LeftParen(),), ), "left paren without right paren", ), ( lambda: cst.Subscript( cst.Name("foo"), (cst.SubscriptElement(cst.Index(cst.Integer("5"))),), rpar=(cst.RightParen(),), ), "right paren without left paren", ), (lambda: cst.Subscript(cst.Name("foo"), ()), "empty SubscriptElement"), ) ) def test_invalid( self, get_node: Callable[[], cst.CSTNode], expected_re: str ) -> None: self.assert_invalid(get_node, expected_re) LibCST-1.2.0/libcst/_nodes/tests/test_trailing_whitespace.py000066400000000000000000000020651456464173300241410ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import libcst as cst from libcst._nodes.tests.base import CSTNodeTest from libcst.testing.utils import data_provider class TrailingWhitespaceTest(CSTNodeTest): @data_provider( ( (cst.TrailingWhitespace(), "\n"), (cst.TrailingWhitespace(whitespace=cst.SimpleWhitespace(" ")), " \n"), (cst.TrailingWhitespace(comment=cst.Comment("# comment")), "# comment\n"), (cst.TrailingWhitespace(newline=cst.Newline("\r\n")), "\r\n"), ( cst.TrailingWhitespace( whitespace=cst.SimpleWhitespace(" "), comment=cst.Comment("# comment"), newline=cst.Newline("\r\n"), ), " # comment\r\n", ), ) ) def test_valid(self, node: cst.CSTNode, code: str) -> None: self.validate_node(node, code) LibCST-1.2.0/libcst/_nodes/tests/test_try.py000066400000000000000000000623001456464173300207300ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Any, Callable, Optional import libcst as cst from libcst import parse_statement from libcst._nodes.tests.base import CSTNodeTest, DummyIndentedBlock from libcst._parser.entrypoints import is_native from libcst.metadata import CodeRange from libcst.testing.utils import data_provider native_parse_statement: Optional[Callable[[str], cst.CSTNode]] = ( parse_statement if is_native() else None ) class TryTest(CSTNodeTest): @data_provider( ( # Simple try/except block { "node": cst.Try( cst.SimpleStatementSuite((cst.Pass(),)), handlers=( cst.ExceptHandler( cst.SimpleStatementSuite((cst.Pass(),)), whitespace_after_except=cst.SimpleWhitespace(""), ), ), ), "code": "try: pass\nexcept: pass\n", "parser": parse_statement, "expected_position": CodeRange((1, 0), (2, 12)), }, # Try/except with a class { "node": cst.Try( cst.SimpleStatementSuite((cst.Pass(),)), handlers=( cst.ExceptHandler( cst.SimpleStatementSuite((cst.Pass(),)), type=cst.Name("Exception"), ), ), ), "code": "try: pass\nexcept Exception: pass\n", "parser": parse_statement, }, # Try/except with a named class { "node": cst.Try( cst.SimpleStatementSuite((cst.Pass(),)), handlers=( cst.ExceptHandler( cst.SimpleStatementSuite((cst.Pass(),)), type=cst.Name("Exception"), name=cst.AsName(cst.Name("exc")), ), ), ), "code": "try: pass\nexcept Exception as exc: pass\n", "parser": parse_statement, "expected_position": CodeRange((1, 0), (2, 29)), }, # Try/except with multiple clauses { "node": cst.Try( cst.SimpleStatementSuite((cst.Pass(),)), handlers=( cst.ExceptHandler( cst.SimpleStatementSuite((cst.Pass(),)), type=cst.Name("TypeError"), name=cst.AsName(cst.Name("e")), ), cst.ExceptHandler( cst.SimpleStatementSuite((cst.Pass(),)), type=cst.Name("KeyError"), name=cst.AsName(cst.Name("e")), ), cst.ExceptHandler( cst.SimpleStatementSuite((cst.Pass(),)), whitespace_after_except=cst.SimpleWhitespace(""), ), ), ), "code": "try: pass\n" + "except TypeError as e: pass\n" + "except KeyError as e: pass\n" + "except: pass\n", "parser": parse_statement, "expected_position": CodeRange((1, 0), (4, 12)), }, # Simple try/finally block { "node": cst.Try( cst.SimpleStatementSuite((cst.Pass(),)), finalbody=cst.Finally(cst.SimpleStatementSuite((cst.Pass(),))), ), "code": "try: pass\nfinally: pass\n", "parser": parse_statement, "expected_position": CodeRange((1, 0), (2, 13)), }, # Simple try/except/finally block { "node": cst.Try( cst.SimpleStatementSuite((cst.Pass(),)), handlers=( cst.ExceptHandler( cst.SimpleStatementSuite((cst.Pass(),)), whitespace_after_except=cst.SimpleWhitespace(""), ), ), finalbody=cst.Finally(cst.SimpleStatementSuite((cst.Pass(),))), ), "code": "try: pass\nexcept: pass\nfinally: pass\n", "parser": parse_statement, "expected_position": CodeRange((1, 0), (3, 13)), }, # Simple try/except/else block { "node": cst.Try( cst.SimpleStatementSuite((cst.Pass(),)), handlers=( cst.ExceptHandler( cst.SimpleStatementSuite((cst.Pass(),)), whitespace_after_except=cst.SimpleWhitespace(""), ), ), orelse=cst.Else(cst.SimpleStatementSuite((cst.Pass(),))), ), "code": "try: pass\nexcept: pass\nelse: pass\n", "parser": parse_statement, "expected_position": CodeRange((1, 0), (3, 10)), }, # Simple try/except/else block/finally { "node": cst.Try( cst.SimpleStatementSuite((cst.Pass(),)), handlers=( cst.ExceptHandler( cst.SimpleStatementSuite((cst.Pass(),)), whitespace_after_except=cst.SimpleWhitespace(""), ), ), orelse=cst.Else(cst.SimpleStatementSuite((cst.Pass(),))), finalbody=cst.Finally(cst.SimpleStatementSuite((cst.Pass(),))), ), "code": "try: pass\nexcept: pass\nelse: pass\nfinally: pass\n", "parser": parse_statement, "expected_position": CodeRange((1, 0), (4, 13)), }, # Verify whitespace in various locations { "node": cst.Try( leading_lines=(cst.EmptyLine(comment=cst.Comment("# 1")),), body=cst.SimpleStatementSuite((cst.Pass(),)), handlers=( cst.ExceptHandler( leading_lines=(cst.EmptyLine(comment=cst.Comment("# 2")),), type=cst.Name("TypeError"), name=cst.AsName( cst.Name("e"), whitespace_before_as=cst.SimpleWhitespace(" "), whitespace_after_as=cst.SimpleWhitespace(" "), ), whitespace_after_except=cst.SimpleWhitespace(" "), whitespace_before_colon=cst.SimpleWhitespace(" "), body=cst.SimpleStatementSuite((cst.Pass(),)), ), ), orelse=cst.Else( leading_lines=(cst.EmptyLine(comment=cst.Comment("# 3")),), body=cst.SimpleStatementSuite((cst.Pass(),)), whitespace_before_colon=cst.SimpleWhitespace(" "), ), finalbody=cst.Finally( leading_lines=(cst.EmptyLine(comment=cst.Comment("# 4")),), body=cst.SimpleStatementSuite((cst.Pass(),)), whitespace_before_colon=cst.SimpleWhitespace(" "), ), whitespace_before_colon=cst.SimpleWhitespace(" "), ), "code": "# 1\ntry : pass\n# 2\nexcept TypeError as e : pass\n# 3\nelse : pass\n# 4\nfinally : pass\n", "parser": parse_statement, "expected_position": CodeRange((2, 0), (8, 14)), }, # Please don't write code like this { "node": cst.Try( cst.SimpleStatementSuite((cst.Pass(),)), handlers=( cst.ExceptHandler( cst.SimpleStatementSuite((cst.Pass(),)), type=cst.Name("TypeError"), name=cst.AsName(cst.Name("e")), ), cst.ExceptHandler( cst.SimpleStatementSuite((cst.Pass(),)), type=cst.Name("KeyError"), name=cst.AsName(cst.Name("e")), ), cst.ExceptHandler( cst.SimpleStatementSuite((cst.Pass(),)), whitespace_after_except=cst.SimpleWhitespace(""), ), ), orelse=cst.Else(cst.SimpleStatementSuite((cst.Pass(),))), finalbody=cst.Finally(cst.SimpleStatementSuite((cst.Pass(),))), ), "code": "try: pass\n" + "except TypeError as e: pass\n" + "except KeyError as e: pass\n" + "except: pass\n" + "else: pass\n" + "finally: pass\n", "parser": parse_statement, "expected_position": CodeRange((1, 0), (6, 13)), }, # Verify indentation { "node": DummyIndentedBlock( " ", cst.Try( cst.SimpleStatementSuite((cst.Pass(),)), handlers=( cst.ExceptHandler( cst.SimpleStatementSuite((cst.Pass(),)), type=cst.Name("TypeError"), name=cst.AsName(cst.Name("e")), ), cst.ExceptHandler( cst.SimpleStatementSuite((cst.Pass(),)), type=cst.Name("KeyError"), name=cst.AsName(cst.Name("e")), ), cst.ExceptHandler( cst.SimpleStatementSuite((cst.Pass(),)), whitespace_after_except=cst.SimpleWhitespace(""), ), ), orelse=cst.Else(cst.SimpleStatementSuite((cst.Pass(),))), finalbody=cst.Finally(cst.SimpleStatementSuite((cst.Pass(),))), ), ), "code": " try: pass\n" + " except TypeError as e: pass\n" + " except KeyError as e: pass\n" + " except: pass\n" + " else: pass\n" + " finally: pass\n", "parser": None, }, # Verify indentation in bodies { "node": DummyIndentedBlock( " ", cst.Try( cst.IndentedBlock((cst.SimpleStatementLine((cst.Pass(),)),)), handlers=( cst.ExceptHandler( cst.IndentedBlock( (cst.SimpleStatementLine((cst.Pass(),)),) ), whitespace_after_except=cst.SimpleWhitespace(""), ), ), orelse=cst.Else( cst.IndentedBlock((cst.SimpleStatementLine((cst.Pass(),)),)) ), finalbody=cst.Finally( cst.IndentedBlock((cst.SimpleStatementLine((cst.Pass(),)),)) ), ), ), "code": " try:\n" + " pass\n" + " except:\n" + " pass\n" + " else:\n" + " pass\n" + " finally:\n" + " pass\n", "parser": None, }, # No space when using grouping parens { "node": cst.Try( cst.SimpleStatementSuite((cst.Pass(),)), handlers=( cst.ExceptHandler( cst.SimpleStatementSuite((cst.Pass(),)), whitespace_after_except=cst.SimpleWhitespace(""), type=cst.Name( "Exception", lpar=(cst.LeftParen(),), rpar=(cst.RightParen(),), ), ), ), ), "code": "try: pass\nexcept(Exception): pass\n", "parser": parse_statement, }, # No space when using tuple { "node": cst.Try( cst.SimpleStatementSuite((cst.Pass(),)), handlers=( cst.ExceptHandler( cst.SimpleStatementSuite((cst.Pass(),)), whitespace_after_except=cst.SimpleWhitespace(""), type=cst.Tuple( [ cst.Element( cst.Name("IOError"), comma=cst.Comma( whitespace_after=cst.SimpleWhitespace(" ") ), ), cst.Element(cst.Name("ImportError")), ] ), ), ), ), "code": "try: pass\nexcept(IOError, ImportError): pass\n", "parser": parse_statement, }, # No space before as { "node": cst.Try( cst.SimpleStatementSuite((cst.Pass(),)), handlers=[ cst.ExceptHandler( cst.SimpleStatementSuite((cst.Pass(),)), whitespace_after_except=cst.SimpleWhitespace(" "), type=cst.Call(cst.Name("foo")), name=cst.AsName( whitespace_before_as=cst.SimpleWhitespace(""), name=cst.Name("bar"), ), ) ], ), "code": "try: pass\nexcept foo()as bar: pass\n", }, ) ) def test_valid(self, **kwargs: Any) -> None: self.validate_node(**kwargs) @data_provider( ( { "get_node": lambda: cst.AsName(cst.Name("")), "expected_re": "empty name identifier", }, { "get_node": lambda: cst.AsName( cst.Name("bla"), whitespace_after_as=cst.SimpleWhitespace("") ), "expected_re": "between 'as'", }, { "get_node": lambda: cst.ExceptHandler( cst.SimpleStatementSuite((cst.Pass(),)), name=cst.AsName(cst.Name("bla")), ), "expected_re": "name for an empty type", }, { "get_node": lambda: cst.ExceptHandler( cst.SimpleStatementSuite((cst.Pass(),)), type=cst.Name("TypeError"), whitespace_after_except=cst.SimpleWhitespace(""), ), "expected_re": "at least one space after except", }, { "get_node": lambda: cst.Try(cst.SimpleStatementSuite((cst.Pass(),))), "expected_re": "at least one ExceptHandler or Finally", }, { "get_node": lambda: cst.Try( cst.SimpleStatementSuite((cst.Pass(),)), orelse=cst.Else(cst.SimpleStatementSuite((cst.Pass(),))), finalbody=cst.Finally(cst.SimpleStatementSuite((cst.Pass(),))), ), "expected_re": "at least one ExceptHandler in order to have an Else", }, { "get_node": lambda: cst.Try( body=cst.SimpleStatementSuite(body=[cst.Pass()]), handlers=( cst.ExceptHandler( body=cst.SimpleStatementSuite(body=[cst.Pass()]), ), cst.ExceptHandler( body=cst.SimpleStatementSuite(body=[cst.Pass()]), ), ), ), "expected_re": "The bare except: handler must be the last one.", }, { "get_node": lambda: cst.Try( body=cst.SimpleStatementSuite(body=[cst.Pass()]), handlers=( cst.ExceptHandler( body=cst.SimpleStatementSuite(body=[cst.Pass()]), ), cst.ExceptHandler( body=cst.SimpleStatementSuite(body=[cst.Pass()]), type=cst.Name("Exception"), ), ), ), "expected_re": "The bare except: handler must be the last one.", }, ) ) def test_invalid(self, **kwargs: Any) -> None: self.assert_invalid(**kwargs) class TryStarTest(CSTNodeTest): @data_provider( ( # Try/except with a class { "node": cst.TryStar( cst.SimpleStatementSuite((cst.Pass(),)), handlers=( cst.ExceptStarHandler( cst.SimpleStatementSuite((cst.Pass(),)), type=cst.Name("Exception"), ), ), ), "code": "try: pass\nexcept* Exception: pass\n", "parser": native_parse_statement, }, # Try/except with a named class { "node": cst.TryStar( cst.SimpleStatementSuite((cst.Pass(),)), handlers=( cst.ExceptStarHandler( cst.SimpleStatementSuite((cst.Pass(),)), type=cst.Name("Exception"), name=cst.AsName(cst.Name("exc")), ), ), ), "code": "try: pass\nexcept* Exception as exc: pass\n", "parser": native_parse_statement, "expected_position": CodeRange((1, 0), (2, 30)), }, # Try/except with multiple clauses { "node": cst.TryStar( cst.SimpleStatementSuite((cst.Pass(),)), handlers=( cst.ExceptStarHandler( cst.SimpleStatementSuite((cst.Pass(),)), type=cst.Name("TypeError"), name=cst.AsName(cst.Name("e")), ), cst.ExceptStarHandler( cst.SimpleStatementSuite((cst.Pass(),)), type=cst.Name("KeyError"), name=cst.AsName(cst.Name("e")), ), ), ), "code": "try: pass\n" + "except* TypeError as e: pass\n" + "except* KeyError as e: pass\n", "parser": native_parse_statement, "expected_position": CodeRange((1, 0), (3, 27)), }, # Simple try/except/finally block { "node": cst.TryStar( cst.SimpleStatementSuite((cst.Pass(),)), handlers=( cst.ExceptStarHandler( cst.SimpleStatementSuite((cst.Pass(),)), type=cst.Name("KeyError"), whitespace_after_except=cst.SimpleWhitespace(""), ), ), finalbody=cst.Finally(cst.SimpleStatementSuite((cst.Pass(),))), ), "code": "try: pass\nexcept* KeyError: pass\nfinally: pass\n", "parser": native_parse_statement, "expected_position": CodeRange((1, 0), (3, 13)), }, # Simple try/except/else block { "node": cst.TryStar( cst.SimpleStatementSuite((cst.Pass(),)), handlers=( cst.ExceptStarHandler( cst.SimpleStatementSuite((cst.Pass(),)), type=cst.Name("KeyError"), whitespace_after_except=cst.SimpleWhitespace(""), ), ), orelse=cst.Else(cst.SimpleStatementSuite((cst.Pass(),))), ), "code": "try: pass\nexcept* KeyError: pass\nelse: pass\n", "parser": native_parse_statement, "expected_position": CodeRange((1, 0), (3, 10)), }, # Verify whitespace in various locations { "node": cst.TryStar( leading_lines=(cst.EmptyLine(comment=cst.Comment("# 1")),), body=cst.SimpleStatementSuite((cst.Pass(),)), handlers=( cst.ExceptStarHandler( leading_lines=(cst.EmptyLine(comment=cst.Comment("# 2")),), type=cst.Name("TypeError"), name=cst.AsName( cst.Name("e"), whitespace_before_as=cst.SimpleWhitespace(" "), whitespace_after_as=cst.SimpleWhitespace(" "), ), whitespace_after_except=cst.SimpleWhitespace(" "), whitespace_after_star=cst.SimpleWhitespace(""), whitespace_before_colon=cst.SimpleWhitespace(" "), body=cst.SimpleStatementSuite((cst.Pass(),)), ), ), orelse=cst.Else( leading_lines=(cst.EmptyLine(comment=cst.Comment("# 3")),), body=cst.SimpleStatementSuite((cst.Pass(),)), whitespace_before_colon=cst.SimpleWhitespace(" "), ), finalbody=cst.Finally( leading_lines=(cst.EmptyLine(comment=cst.Comment("# 4")),), body=cst.SimpleStatementSuite((cst.Pass(),)), whitespace_before_colon=cst.SimpleWhitespace(" "), ), whitespace_before_colon=cst.SimpleWhitespace(" "), ), "code": "# 1\ntry : pass\n# 2\nexcept *TypeError as e : pass\n# 3\nelse : pass\n# 4\nfinally : pass\n", "parser": native_parse_statement, "expected_position": CodeRange((2, 0), (8, 14)), }, # Now all together { "node": cst.TryStar( cst.SimpleStatementSuite((cst.Pass(),)), handlers=( cst.ExceptStarHandler( cst.SimpleStatementSuite((cst.Pass(),)), type=cst.Name("TypeError"), name=cst.AsName(cst.Name("e")), ), cst.ExceptStarHandler( cst.SimpleStatementSuite((cst.Pass(),)), type=cst.Name("KeyError"), name=cst.AsName(cst.Name("e")), ), ), orelse=cst.Else(cst.SimpleStatementSuite((cst.Pass(),))), finalbody=cst.Finally(cst.SimpleStatementSuite((cst.Pass(),))), ), "code": "try: pass\n" + "except* TypeError as e: pass\n" + "except* KeyError as e: pass\n" + "else: pass\n" + "finally: pass\n", "parser": native_parse_statement, "expected_position": CodeRange((1, 0), (5, 13)), }, ) ) def test_valid(self, **kwargs: Any) -> None: self.validate_node(**kwargs) LibCST-1.2.0/libcst/_nodes/tests/test_tuple.py000066400000000000000000000257451456464173300212570ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Any, Callable import libcst as cst from libcst import parse_expression, parse_statement from libcst._nodes.tests.base import CSTNodeTest, parse_expression_as from libcst._parser.entrypoints import is_native from libcst.metadata import CodeRange from libcst.testing.utils import data_provider class TupleTest(CSTNodeTest): @data_provider( [ # zero-element tuple {"node": cst.Tuple([]), "code": "()", "parser": parse_expression}, # one-element tuple, sentinel comma value { "node": cst.Tuple([cst.Element(cst.Name("single_element"))]), "code": "(single_element,)", "parser": None, }, { "node": cst.Tuple([cst.StarredElement(cst.Name("single_element"))]), "code": "(*single_element,)", "parser": None, }, # two-element tuple, sentinel comma value { "node": cst.Tuple( [cst.Element(cst.Name("one")), cst.Element(cst.Name("two"))] ), "code": "(one, two)", "parser": None, }, # remove parenthesis { "node": cst.Tuple( [cst.Element(cst.Name("one")), cst.Element(cst.Name("two"))], lpar=[], rpar=[], ), "code": "one, two", "parser": None, }, # add extra parenthesis { "node": cst.Tuple( [cst.Element(cst.Name("one")), cst.Element(cst.Name("two"))], lpar=[cst.LeftParen(), cst.LeftParen()], rpar=[cst.RightParen(), cst.RightParen()], ), "code": "((one, two))", "parser": None, }, # starred element { "node": cst.Tuple( [ cst.StarredElement(cst.Name("one")), cst.StarredElement(cst.Name("two")), ] ), "code": "(*one, *two)", "parser": None, }, # custom comma on Element { "node": cst.Tuple( [ cst.Element(cst.Name("one"), comma=cst.Comma()), cst.Element(cst.Name("two"), comma=cst.Comma()), ] ), "code": "(one,two,)", "parser": parse_expression, }, # custom comma on StarredElement { "node": cst.Tuple( [ cst.StarredElement(cst.Name("one"), comma=cst.Comma()), cst.StarredElement(cst.Name("two"), comma=cst.Comma()), ] ), "code": "(*one,*two,)", "parser": parse_expression, "expected_position": CodeRange((1, 1), (1, 11)), }, # top-level two-element tuple, with one being starred { "node": cst.SimpleStatementLine( body=[ cst.Expr( value=cst.Tuple( [ cst.Element(cst.Name("one"), comma=cst.Comma()), cst.StarredElement(cst.Name("two")), ], lpar=[], rpar=[], ) ) ] ), "code": "one,*two\n", "parser": parse_statement, }, # top-level three-element tuple, start/end is starred { "node": cst.SimpleStatementLine( body=[ cst.Expr( value=cst.Tuple( [ cst.StarredElement( cst.Name("one"), comma=cst.Comma() ), cst.Element(cst.Name("two"), comma=cst.Comma()), cst.StarredElement(cst.Name("three")), ], lpar=[], rpar=[], ) ) ] ), "code": "*one,two,*three\n", "parser": parse_statement, }, # missing spaces around tuple, okay with parenthesis { "node": cst.For( target=cst.Tuple( [ cst.Element(cst.Name("k"), comma=cst.Comma()), cst.Element(cst.Name("v")), ] ), iter=cst.Name("abc"), body=cst.SimpleStatementSuite([cst.Pass()]), whitespace_after_for=cst.SimpleWhitespace(""), whitespace_before_in=cst.SimpleWhitespace(""), ), "code": "for(k,v)in abc: pass\n", "parser": parse_statement, }, # no spaces around tuple, but using values that are parenthesized { "node": cst.For( target=cst.Tuple( [ cst.Element( cst.Name( "k", lpar=[cst.LeftParen()], rpar=[cst.RightParen()] ), comma=cst.Comma(), ), cst.Element( cst.Name( "v", lpar=[cst.LeftParen()], rpar=[cst.RightParen()] ) ), ], lpar=[], rpar=[], ), iter=cst.Name("abc"), body=cst.SimpleStatementSuite([cst.Pass()]), whitespace_after_for=cst.SimpleWhitespace(""), whitespace_before_in=cst.SimpleWhitespace(""), ), "code": "for(k),(v)in abc: pass\n", "parser": parse_statement, }, # starred elements are safe to use without a space before them { "node": cst.For( target=cst.Tuple( [cst.StarredElement(cst.Name("foo"), comma=cst.Comma())], lpar=[], rpar=[], ), iter=cst.Name("bar"), body=cst.SimpleStatementSuite([cst.Pass()]), whitespace_after_for=cst.SimpleWhitespace(""), ), "code": "for*foo, in bar: pass\n", "parser": parse_statement, }, # a trailing comma doesn't mess up TrailingWhitespace { "node": cst.SimpleStatementLine( [ cst.Expr( cst.Tuple( [ cst.Element(cst.Name("one"), comma=cst.Comma()), cst.Element(cst.Name("two"), comma=cst.Comma()), ], lpar=[], rpar=[], ) ) ], trailing_whitespace=cst.TrailingWhitespace( whitespace=cst.SimpleWhitespace(" "), comment=cst.Comment("# comment"), ), ), "code": "one,two, # comment\n", "parser": parse_statement, }, ] ) def test_valid(self, **kwargs: Any) -> None: self.validate_node(**kwargs) @data_provider( ( ( lambda: cst.Tuple([], lpar=[], rpar=[]), "A zero-length tuple must be wrapped in parentheses.", ), ( lambda: cst.Tuple( [cst.Element(cst.Name("mismatched"))], lpar=[cst.LeftParen(), cst.LeftParen()], rpar=[cst.RightParen()], ), "unbalanced parens", ), ( lambda: cst.For( target=cst.Tuple([cst.Element(cst.Name("el"))], lpar=[], rpar=[]), iter=cst.Name("it"), body=cst.SimpleStatementSuite([cst.Pass()]), whitespace_after_for=cst.SimpleWhitespace(""), ), "Must have at least one space after 'for' keyword.", ), ( lambda: cst.For( target=cst.Tuple([cst.Element(cst.Name("el"))], lpar=[], rpar=[]), iter=cst.Name("it"), body=cst.SimpleStatementSuite([cst.Pass()]), whitespace_before_in=cst.SimpleWhitespace(""), ), "Must have at least one space before 'in' keyword.", ), # an additional check for StarredElement, since it's a separate codepath ( lambda: cst.For( target=cst.Tuple( [cst.StarredElement(cst.Name("el"))], lpar=[], rpar=[] ), iter=cst.Name("it"), body=cst.SimpleStatementSuite([cst.Pass()]), whitespace_before_in=cst.SimpleWhitespace(""), ), "Must have at least one space before 'in' keyword.", ), ) ) def test_invalid( self, get_node: Callable[[], cst.CSTNode], expected_re: str ) -> None: self.assert_invalid(get_node, expected_re) @data_provider( ( { "code": "(a, *b)", "parser": parse_expression_as(python_version="3.5"), "expect_success": True, }, { "code": "(a, *b)", "parser": parse_expression_as(python_version="3.3"), "expect_success": False, }, ) ) def test_versions(self, **kwargs: Any) -> None: if is_native() and not kwargs.get("expect_success", True): self.skipTest("parse errors are disabled for native parser") self.assert_parses(**kwargs) LibCST-1.2.0/libcst/_nodes/tests/test_type_alias.py000066400000000000000000000126151456464173300222500ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Any import libcst as cst from libcst import parse_statement from libcst._nodes.tests.base import CSTNodeTest from libcst._parser.entrypoints import is_native from libcst.metadata import CodeRange from libcst.testing.utils import data_provider class TypeAliasCreationTest(CSTNodeTest): @data_provider( ( { "node": cst.TypeAlias( cst.Name("foo"), cst.Name("bar"), ), "code": "type foo = bar", "expected_position": CodeRange((1, 0), (1, 14)), }, { "node": cst.TypeAlias( cst.Name("foo"), type_parameters=cst.TypeParameters( [cst.TypeParam(cst.TypeVar(cst.Name("T")))] ), value=cst.BinaryOperation( cst.Name("bar"), cst.BitOr(), cst.Name("baz") ), ), "code": "type foo[T] = bar | baz", "expected_position": CodeRange((1, 0), (1, 23)), }, { "node": cst.TypeAlias( cst.Name("foo"), type_parameters=cst.TypeParameters( [ cst.TypeParam( cst.TypeVar(cst.Name("T"), bound=cst.Name("str")) ), cst.TypeParam(cst.TypeVarTuple(cst.Name("Ts"))), cst.TypeParam(cst.ParamSpec(cst.Name("KW"))), ] ), value=cst.BinaryOperation( cst.Name("bar"), cst.BitOr(), cst.Name("baz") ), ), "code": "type foo[T: str, *Ts, **KW] = bar | baz", "expected_position": CodeRange((1, 0), (1, 39)), }, ) ) def test_valid(self, **kwargs: Any) -> None: if not is_native(): self.skipTest("Disabled in the old parser") self.validate_node(**kwargs) class TypeAliasParserTest(CSTNodeTest): @data_provider( ( { "node": cst.SimpleStatementLine( [ cst.TypeAlias( cst.Name("foo"), cst.Name("bar"), whitespace_after_name=cst.SimpleWhitespace(" "), ) ] ), "code": "type foo = bar\n", "parser": parse_statement, }, { "node": cst.SimpleStatementLine( [ cst.TypeAlias( cst.Name("foo"), cst.Name("bar"), type_parameters=cst.TypeParameters( params=[ cst.TypeParam( cst.TypeVar( cst.Name("T"), cst.Name("str"), cst.Colon() ), cst.Comma(), ), cst.TypeParam( cst.ParamSpec( cst.Name("KW"), whitespace_after_star=cst.SimpleWhitespace( " " ), ), cst.Comma( whitespace_before=cst.SimpleWhitespace(" "), whitespace_after=cst.SimpleWhitespace(" "), ), ), ], rbracket=cst.RightSquareBracket( cst.SimpleWhitespace("") ), ), whitespace_after_name=cst.SimpleWhitespace(" "), whitespace_after_type=cst.SimpleWhitespace(" "), whitespace_after_equals=cst.SimpleWhitespace(" "), whitespace_after_type_parameters=cst.SimpleWhitespace(" "), semicolon=cst.Semicolon( whitespace_before=cst.SimpleWhitespace(" "), whitespace_after=cst.SimpleWhitespace(" "), ), ) ] ), "code": "type foo [T:str,** KW , ] = bar ; \n", "parser": parse_statement, }, ) ) def test_valid(self, **kwargs: Any) -> None: if not is_native(): self.skipTest("Disabled in the old parser") self.validate_node(**kwargs) LibCST-1.2.0/libcst/_nodes/tests/test_unary_op.py000066400000000000000000000062121456464173300217460ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Callable, Optional import libcst as cst from libcst import parse_expression from libcst._nodes.tests.base import CSTNodeTest from libcst.metadata import CodeRange from libcst.testing.utils import data_provider class UnaryOperationTest(CSTNodeTest): @data_provider( ( # Simple unary operations (cst.UnaryOperation(cst.Plus(), cst.Name("foo")), "+foo"), (cst.UnaryOperation(cst.Minus(), cst.Name("foo")), "-foo"), (cst.UnaryOperation(cst.BitInvert(), cst.Name("foo")), "~foo"), (cst.UnaryOperation(cst.Not(), cst.Name("foo")), "not foo"), # Parenthesized unary operation ( cst.UnaryOperation( lpar=(cst.LeftParen(),), operator=cst.Not(), expression=cst.Name("foo"), rpar=(cst.RightParen(),), ), "(not foo)", CodeRange((1, 1), (1, 8)), ), ( cst.UnaryOperation( operator=cst.Not(whitespace_after=cst.SimpleWhitespace("")), expression=cst.Name( "foo", lpar=(cst.LeftParen(),), rpar=(cst.RightParen(),) ), ), "not(foo)", CodeRange((1, 0), (1, 8)), ), # Make sure that spacing works ( cst.UnaryOperation( lpar=(cst.LeftParen(whitespace_after=cst.SimpleWhitespace(" ")),), operator=cst.Not(whitespace_after=cst.SimpleWhitespace(" ")), expression=cst.Name("foo"), rpar=(cst.RightParen(whitespace_before=cst.SimpleWhitespace(" ")),), ), "( not foo )", CodeRange((1, 2), (1, 10)), ), ) ) def test_valid( self, node: cst.CSTNode, code: str, position: Optional[CodeRange] = None ) -> None: self.validate_node(node, code, parse_expression, expected_position=position) @data_provider( ( ( lambda: cst.UnaryOperation( cst.Plus(), cst.Name("foo"), lpar=(cst.LeftParen(),) ), "left paren without right paren", ), ( lambda: cst.UnaryOperation( cst.Plus(), cst.Name("foo"), rpar=(cst.RightParen(),) ), "right paren without left paren", ), ( lambda: cst.UnaryOperation( operator=cst.Not(whitespace_after=cst.SimpleWhitespace("")), expression=cst.Name("foo"), ), "at least one space after not operator", ), ) ) def test_invalid( self, get_node: Callable[[], cst.CSTNode], expected_re: str ) -> None: self.assert_invalid(get_node, expected_re) LibCST-1.2.0/libcst/_nodes/tests/test_while.py000066400000000000000000000122151456464173300212220ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Any import libcst as cst from libcst import parse_statement from libcst._nodes.tests.base import CSTNodeTest, DummyIndentedBlock from libcst.metadata import CodeRange from libcst.testing.utils import data_provider class WhileTest(CSTNodeTest): @data_provider( ( # Simple while block { "node": cst.While( cst.Call(cst.Name("iter")), cst.SimpleStatementSuite((cst.Pass(),)) ), "code": "while iter(): pass\n", "parser": parse_statement, }, # While block with else { "node": cst.While( cst.Call(cst.Name("iter")), cst.SimpleStatementSuite((cst.Pass(),)), cst.Else(cst.SimpleStatementSuite((cst.Pass(),))), ), "code": "while iter(): pass\nelse: pass\n", "parser": parse_statement, }, # indentation { "node": DummyIndentedBlock( " ", cst.While( cst.Call(cst.Name("iter")), cst.SimpleStatementSuite((cst.Pass(),)), ), ), "code": " while iter(): pass\n", "parser": None, "expected_position": CodeRange((1, 4), (1, 22)), }, # while an indented body { "node": DummyIndentedBlock( " ", cst.While( cst.Call(cst.Name("iter")), cst.IndentedBlock((cst.SimpleStatementLine((cst.Pass(),)),)), ), ), "code": " while iter():\n pass\n", "parser": None, "expected_position": CodeRange((1, 4), (2, 12)), }, # leading_lines { "node": cst.While( cst.Call(cst.Name("iter")), cst.IndentedBlock((cst.SimpleStatementLine((cst.Pass(),)),)), leading_lines=( cst.EmptyLine(comment=cst.Comment("# leading comment")), ), ), "code": "# leading comment\nwhile iter():\n pass\n", "parser": parse_statement, "expected_position": CodeRange((2, 0), (3, 8)), }, { "node": cst.While( cst.Call(cst.Name("iter")), cst.IndentedBlock((cst.SimpleStatementLine((cst.Pass(),)),)), cst.Else( cst.IndentedBlock((cst.SimpleStatementLine((cst.Pass(),)),)), leading_lines=( cst.EmptyLine(comment=cst.Comment("# else comment")), ), ), leading_lines=( cst.EmptyLine(comment=cst.Comment("# leading comment")), ), ), "code": "# leading comment\nwhile iter():\n pass\n# else comment\nelse:\n pass\n", "parser": None, "expected_position": CodeRange((2, 0), (6, 8)), }, # Weird spacing rules { "node": cst.While( cst.Call( cst.Name("iter"), lpar=(cst.LeftParen(),), rpar=(cst.RightParen(),), ), cst.SimpleStatementSuite((cst.Pass(),)), whitespace_after_while=cst.SimpleWhitespace(""), ), "code": "while(iter()): pass\n", "parser": parse_statement, "expected_position": CodeRange((1, 0), (1, 19)), }, # Whitespace { "node": cst.While( cst.Call(cst.Name("iter")), cst.SimpleStatementSuite((cst.Pass(),)), whitespace_after_while=cst.SimpleWhitespace(" "), whitespace_before_colon=cst.SimpleWhitespace(" "), ), "code": "while iter() : pass\n", "parser": parse_statement, "expected_position": CodeRange((1, 0), (1, 21)), }, ) ) def test_valid(self, **kwargs: Any) -> None: self.validate_node(**kwargs) @data_provider( ( { "get_node": lambda: cst.While( cst.Call(cst.Name("iter")), cst.SimpleStatementSuite((cst.Pass(),)), whitespace_after_while=cst.SimpleWhitespace(""), ), "expected_re": "Must have at least one space after 'while' keyword", }, ) ) def test_invalid(self, **kwargs: Any) -> None: self.assert_invalid(**kwargs) LibCST-1.2.0/libcst/_nodes/tests/test_with.py000066400000000000000000000330321456464173300210650ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Any import libcst as cst from libcst import parse_statement, PartialParserConfig from libcst._maybe_sentinel import MaybeSentinel from libcst._nodes.tests.base import CSTNodeTest, DummyIndentedBlock, parse_statement_as from libcst._parser.entrypoints import is_native from libcst.metadata import CodeRange from libcst.testing.utils import data_provider class WithTest(CSTNodeTest): maxDiff: int = 2000 @data_provider( ( # Simple with block { "node": cst.With( (cst.WithItem(cst.Call(cst.Name("context_mgr"))),), cst.SimpleStatementSuite((cst.Pass(),)), ), "code": "with context_mgr(): pass\n", "parser": parse_statement, "expected_position": CodeRange((1, 0), (1, 24)), }, # Simple async with block { "node": cst.With( (cst.WithItem(cst.Call(cst.Name("context_mgr"))),), cst.SimpleStatementSuite((cst.Pass(),)), asynchronous=cst.Asynchronous(), ), "code": "async with context_mgr(): pass\n", "parser": lambda code: parse_statement( code, config=PartialParserConfig(python_version="3.7") ), }, # Python 3.6 async with block { "node": cst.FunctionDef( cst.Name("foo"), cst.Parameters(), cst.IndentedBlock( ( cst.With( (cst.WithItem(cst.Call(cst.Name("context_mgr"))),), cst.SimpleStatementSuite((cst.Pass(),)), asynchronous=cst.Asynchronous(), ), ) ), asynchronous=cst.Asynchronous(), ), "code": "async def foo():\n async with context_mgr(): pass\n", "parser": lambda code: parse_statement( code, config=PartialParserConfig(python_version="3.6") ), }, # Multiple context managers { "node": cst.With( ( cst.WithItem(cst.Call(cst.Name("foo"))), cst.WithItem(cst.Call(cst.Name("bar"))), ), cst.SimpleStatementSuite((cst.Pass(),)), ), "code": "with foo(), bar(): pass\n", "parser": None, }, { "node": cst.With( ( cst.WithItem( cst.Call(cst.Name("foo")), comma=cst.Comma(whitespace_after=cst.SimpleWhitespace(" ")), ), cst.WithItem(cst.Call(cst.Name("bar"))), ), cst.SimpleStatementSuite((cst.Pass(),)), ), "code": "with foo(), bar(): pass\n", "parser": parse_statement, }, # With block containing variable for context manager. { "node": cst.With( ( cst.WithItem( cst.Call(cst.Name("context_mgr")), cst.AsName(cst.Name("ctx")), ), ), cst.SimpleStatementSuite((cst.Pass(),)), ), "code": "with context_mgr() as ctx: pass\n", "parser": parse_statement, }, { "node": cst.With( ( cst.WithItem( cst.Call(cst.Name("context_mgr")), cst.AsName( cst.Tuple(()), whitespace_after_as=cst.SimpleWhitespace(""), whitespace_before_as=cst.SimpleWhitespace(""), ), ), ), cst.SimpleStatementSuite((cst.Pass(),)), ), "code": "with context_mgr()as(): pass\n", "parser": parse_statement, }, # indentation { "node": DummyIndentedBlock( " ", cst.With( (cst.WithItem(cst.Call(cst.Name("context_mgr"))),), cst.SimpleStatementSuite((cst.Pass(),)), ), ), "code": " with context_mgr(): pass\n", "parser": None, "expected_position": CodeRange((1, 4), (1, 28)), }, # with an indented body { "node": DummyIndentedBlock( " ", cst.With( (cst.WithItem(cst.Call(cst.Name("context_mgr"))),), cst.IndentedBlock((cst.SimpleStatementLine((cst.Pass(),)),)), ), ), "code": " with context_mgr():\n pass\n", "parser": None, "expected_position": CodeRange((1, 4), (2, 12)), }, # leading_lines { "node": cst.With( (cst.WithItem(cst.Call(cst.Name("context_mgr"))),), cst.SimpleStatementSuite((cst.Pass(),)), leading_lines=( cst.EmptyLine(comment=cst.Comment("# leading comment")), ), ), "code": "# leading comment\nwith context_mgr(): pass\n", "parser": parse_statement, "expected_position": CodeRange((2, 0), (2, 24)), }, # Whitespace { "node": cst.With( ( cst.WithItem( cst.Call(cst.Name("context_mgr")), cst.AsName( cst.Name("ctx"), whitespace_before_as=cst.SimpleWhitespace(" "), whitespace_after_as=cst.SimpleWhitespace(" "), ), ), ), cst.SimpleStatementSuite((cst.Pass(),)), whitespace_after_with=cst.SimpleWhitespace(" "), whitespace_before_colon=cst.SimpleWhitespace(" "), ), "code": "with context_mgr() as ctx : pass\n", "parser": parse_statement, "expected_position": CodeRange((1, 0), (1, 36)), }, # Weird spacing rules, that parse differently depending on whether # we are using a grammar that included parenthesized with statements. { "node": cst.With( ( cst.WithItem( cst.Call( cst.Name("context_mgr"), lpar=() if is_native() else (cst.LeftParen(),), rpar=() if is_native() else (cst.RightParen(),), ) ), ), cst.SimpleStatementSuite((cst.Pass(),)), lpar=(cst.LeftParen() if is_native() else MaybeSentinel.DEFAULT), rpar=(cst.RightParen() if is_native() else MaybeSentinel.DEFAULT), whitespace_after_with=cst.SimpleWhitespace(""), ), "code": "with(context_mgr()): pass\n", "parser": parse_statement, "expected_position": CodeRange((1, 0), (1, 25)), }, # Multi-line parenthesized with. { "node": cst.With( ( cst.WithItem( cst.Call(cst.Name("foo")), comma=cst.Comma( whitespace_after=cst.ParenthesizedWhitespace( first_line=cst.TrailingWhitespace( whitespace=cst.SimpleWhitespace( value="", ), comment=None, newline=cst.Newline( value=None, ), ), empty_lines=[], indent=True, last_line=cst.SimpleWhitespace( value=" ", ), ) ), ), cst.WithItem(cst.Call(cst.Name("bar")), comma=cst.Comma()), ), cst.SimpleStatementSuite((cst.Pass(),)), lpar=cst.LeftParen(whitespace_after=cst.SimpleWhitespace(" ")), rpar=cst.RightParen(whitespace_before=cst.SimpleWhitespace(" ")), ), "code": ("with ( foo(),\n" " bar(), ): pass\n"), # noqa "parser": parse_statement if is_native() else None, "expected_position": CodeRange((1, 0), (2, 21)), }, ) ) def test_valid(self, **kwargs: Any) -> None: self.validate_node(**kwargs) @data_provider( ( { "get_node": lambda: cst.With( (), cst.IndentedBlock((cst.SimpleStatementLine((cst.Pass(),)),)) ), "expected_re": "A With statement must have at least one WithItem", }, { "get_node": lambda: cst.With( ( cst.WithItem( cst.Call(cst.Name("foo")), comma=cst.Comma(whitespace_after=cst.SimpleWhitespace(" ")), ), ), cst.IndentedBlock((cst.SimpleStatementLine((cst.Pass(),)),)), ), "expected_re": "The last WithItem in an unparenthesized With cannot " + "have a trailing comma.", }, { "get_node": lambda: cst.With( (cst.WithItem(cst.Call(cst.Name("context_mgr"))),), cst.SimpleStatementSuite((cst.Pass(),)), whitespace_after_with=cst.SimpleWhitespace(""), ), "expected_re": "Must have at least one space after with keyword", }, { "get_node": lambda: cst.With( (cst.WithItem(cst.Call(cst.Name("context_mgr"))),), cst.SimpleStatementSuite((cst.Pass(),)), whitespace_after_with=cst.SimpleWhitespace(""), lpar=cst.LeftParen(), ), "expected_re": "Do not mix concrete LeftParen/RightParen with " + "MaybeSentinel", }, { "get_node": lambda: cst.With( (cst.WithItem(cst.Call(cst.Name("context_mgr"))),), cst.SimpleStatementSuite((cst.Pass(),)), whitespace_after_with=cst.SimpleWhitespace(""), rpar=cst.RightParen(), ), "expected_re": "Do not mix concrete LeftParen/RightParen with " + "MaybeSentinel", }, ) ) def test_invalid(self, **kwargs: Any) -> None: self.assert_invalid(**kwargs) @data_provider( ( { "code": "with a, b: pass", "parser": parse_statement_as(python_version="3.1"), "expect_success": True, }, { "code": "with a, b: pass", "parser": parse_statement_as(python_version="3.0"), "expect_success": False, }, ) ) def test_versions(self, **kwargs: Any) -> None: if is_native() and not kwargs.get("expect_success", True): self.skipTest("parse errors are disabled for native parser") self.assert_parses(**kwargs) def test_adding_parens(self) -> None: node = cst.With( ( cst.WithItem( cst.Call(cst.Name("foo")), comma=cst.Comma( whitespace_after=cst.ParenthesizedWhitespace(), ), ), cst.WithItem(cst.Call(cst.Name("bar")), comma=cst.Comma()), ), cst.SimpleStatementSuite((cst.Pass(),)), lpar=cst.LeftParen(whitespace_after=cst.SimpleWhitespace(" ")), rpar=cst.RightParen(whitespace_before=cst.SimpleWhitespace(" ")), ) module = cst.Module([]) self.assertEqual( module.code_for_node(node), ("with ( foo(),\n" "bar(), ): pass\n") # noqa ) LibCST-1.2.0/libcst/_nodes/tests/test_yield.py000066400000000000000000000211321456464173300212160ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Any, Callable, Optional import libcst as cst from libcst import parse_statement from libcst._nodes.tests.base import CSTNodeTest, parse_statement_as from libcst._parser.entrypoints import is_native from libcst.helpers import ensure_type from libcst.metadata import CodeRange from libcst.testing.utils import data_provider class YieldConstructionTest(CSTNodeTest): @data_provider( ( # Simple yield (cst.Yield(), "yield"), # yield expression (cst.Yield(cst.Name("a")), "yield a"), # yield from expression (cst.Yield(cst.From(cst.Call(cst.Name("a")))), "yield from a()"), # Parenthesizing tests ( cst.Yield( lpar=(cst.LeftParen(),), value=cst.Integer("5"), rpar=(cst.RightParen(),), ), "(yield 5)", ), # Whitespace oddities tests ( cst.Yield( cst.Name("a", lpar=(cst.LeftParen(),), rpar=(cst.RightParen(),)), whitespace_after_yield=cst.SimpleWhitespace(""), ), "yield(a)", CodeRange((1, 0), (1, 8)), ), ( cst.Yield( cst.From( cst.Call( cst.Name("a"), lpar=(cst.LeftParen(),), rpar=(cst.RightParen(),), ), whitespace_after_from=cst.SimpleWhitespace(""), ) ), "yield from(a())", ), # Whitespace rendering/parsing tests ( cst.Yield( lpar=(cst.LeftParen(whitespace_after=cst.SimpleWhitespace(" ")),), value=cst.Integer("5"), whitespace_after_yield=cst.SimpleWhitespace(" "), rpar=(cst.RightParen(whitespace_before=cst.SimpleWhitespace(" ")),), ), "( yield 5 )", ), ( cst.Yield( lpar=(cst.LeftParen(whitespace_after=cst.SimpleWhitespace(" ")),), value=cst.From( cst.Call(cst.Name("bla")), whitespace_after_from=cst.SimpleWhitespace(" "), ), whitespace_after_yield=cst.SimpleWhitespace(" "), rpar=(cst.RightParen(whitespace_before=cst.SimpleWhitespace(" ")),), ), "( yield from bla() )", CodeRange((1, 2), (1, 20)), ), # From expression position tests ( cst.From( cst.Integer("5"), whitespace_after_from=cst.SimpleWhitespace(" ") ), "from 5", CodeRange((1, 0), (1, 6)), ), ) ) def test_valid( self, node: cst.CSTNode, code: str, position: Optional[CodeRange] = None ) -> None: self.validate_node(node, code, expected_position=position) @data_provider( ( # Paren validation ( lambda: cst.Yield(lpar=(cst.LeftParen(),)), "left paren without right paren", ), ( lambda: cst.Yield(rpar=(cst.RightParen(),)), "right paren without left paren", ), # Make sure we have adequate space after yield ( lambda: cst.Yield( cst.Name("a"), whitespace_after_yield=cst.SimpleWhitespace("") ), "Must have at least one space after 'yield' keyword", ), ( lambda: cst.Yield( cst.From(cst.Call(cst.Name("a"))), whitespace_after_yield=cst.SimpleWhitespace(""), ), "Must have at least one space after 'yield' keyword", ), # MAke sure we have adequate space after from ( lambda: cst.Yield( cst.From( cst.Call(cst.Name("a")), whitespace_after_from=cst.SimpleWhitespace(""), ) ), "Must have at least one space after 'from' keyword", ), ) ) def test_invalid( self, get_node: Callable[[], cst.CSTNode], expected_re: str ) -> None: self.assert_invalid(get_node, expected_re) class YieldParsingTest(CSTNodeTest): @data_provider( ( # Simple yield (cst.Yield(), "yield"), # yield expression ( cst.Yield( cst.Name("a"), whitespace_after_yield=cst.SimpleWhitespace(" ") ), "yield a", ), # yield from expression ( cst.Yield( cst.From( cst.Call(cst.Name("a")), whitespace_after_from=cst.SimpleWhitespace(" "), ), whitespace_after_yield=cst.SimpleWhitespace(" "), ), "yield from a()", ), # Parenthesizing tests ( cst.Yield( lpar=(cst.LeftParen(),), whitespace_after_yield=cst.SimpleWhitespace(" "), value=cst.Integer("5"), rpar=(cst.RightParen(),), ), "(yield 5)", ), # Whitespace oddities tests ( cst.Yield( cst.Name("a", lpar=(cst.LeftParen(),), rpar=(cst.RightParen(),)), whitespace_after_yield=cst.SimpleWhitespace(""), ), "yield(a)", ), ( cst.Yield( cst.From( cst.Call( cst.Name("a"), lpar=(cst.LeftParen(),), rpar=(cst.RightParen(),), ), whitespace_after_from=cst.SimpleWhitespace(""), ), whitespace_after_yield=cst.SimpleWhitespace(" "), ), "yield from(a())", ), # Whitespace rendering/parsing tests ( cst.Yield( lpar=(cst.LeftParen(whitespace_after=cst.SimpleWhitespace(" ")),), value=cst.Integer("5"), whitespace_after_yield=cst.SimpleWhitespace(" "), rpar=(cst.RightParen(whitespace_before=cst.SimpleWhitespace(" ")),), ), "( yield 5 )", ), ( cst.Yield( lpar=(cst.LeftParen(whitespace_after=cst.SimpleWhitespace(" ")),), value=cst.From( cst.Call(cst.Name("bla")), whitespace_after_from=cst.SimpleWhitespace(" "), ), whitespace_after_yield=cst.SimpleWhitespace(" "), rpar=(cst.RightParen(whitespace_before=cst.SimpleWhitespace(" ")),), ), "( yield from bla() )", ), ) ) def test_valid( self, node: cst.CSTNode, code: str, position: Optional[CodeRange] = None ) -> None: self.validate_node( node, code, lambda code: ensure_type( ensure_type(parse_statement(code), cst.SimpleStatementLine).body[0], cst.Expr, ).value, ) @data_provider( ( { "code": "yield from x", "parser": parse_statement_as(python_version="3.3"), "expect_success": True, }, { "code": "yield from x", "parser": parse_statement_as(python_version="3.1"), "expect_success": False, }, ) ) def test_versions(self, **kwargs: Any) -> None: if is_native() and not kwargs.get("expect_success", True): self.skipTest("parse errors are disabled for native parser") self.assert_parses(**kwargs) LibCST-1.2.0/libcst/_nodes/whitespace.py000066400000000000000000000257501456464173300200550ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import re from abc import ABC, abstractmethod from dataclasses import dataclass from typing import Optional, Pattern, Sequence from libcst._add_slots import add_slots from libcst._nodes.base import BaseLeaf, BaseValueToken, CSTNode, CSTValidationError from libcst._nodes.internal import ( CodegenState, visit_optional, visit_required, visit_sequence, ) from libcst._visitors import CSTVisitorT # SimpleWhitespace includes continuation characters, which must be followed immediately # by a newline. SimpleWhitespace does not include other kinds of newlines, because those # may have semantic significance. SIMPLE_WHITESPACE_RE: Pattern[str] = re.compile(r"([ \f\t]|\\(\r\n?|\n))*", re.UNICODE) NEWLINE_RE: Pattern[str] = re.compile(r"\r\n?|\n", re.UNICODE) COMMENT_RE: Pattern[str] = re.compile(r"#[^\r\n]*", re.UNICODE) class BaseParenthesizableWhitespace(CSTNode, ABC): """ This is the kind of whitespace you might see inside the body of a statement or expression between two tokens. This is the most common type of whitespace. The list of allowed characters in a whitespace depends on whether it is found inside a parenthesized expression or not. This class allows nodes which can be found inside or outside a ``()``, ``[]`` or ``{}`` section to accept either whitespace form. https://docs.python.org/3/reference/lexical_analysis.html#implicit-line-joining Parenthesizable whitespace may contain a backslash character (``\\``), when used as a line-continuation character. While the continuation character isn't technically "whitespace", it serves the same purpose. Parenthesizable whitespace is often non-semantic (optional), but in cases where whitespace solves a grammar ambiguity between tokens (e.g. ``if test``, versus ``iftest``), it has some semantic value. """ __slots__ = () # TODO: Should we somehow differentiate places where we require non-zero whitespace # with a separate type? @property @abstractmethod def empty(self) -> bool: """ Indicates that this node is empty (zero whitespace characters). """ ... @add_slots @dataclass(frozen=True) class SimpleWhitespace(BaseParenthesizableWhitespace, BaseValueToken): """ This is the kind of whitespace you might see inside the body of a statement or expression between two tokens. This is the most common type of whitespace. A simple whitespace cannot contain a newline character unless it is directly preceeded by a line continuation character (``\\``). It can contain zero or more spaces or tabs. If you need a newline character without a line continuation character, use :class:`ParenthesizedWhitespace` instead. Simple whitespace is often non-semantic (optional), but in cases where whitespace solves a grammar ambiguity between tokens (e.g. ``if test``, versus ``iftest``), it has some semantic value. An example :class:`SimpleWhitespace` containing a space, a line continuation, a newline and another space is as follows:: SimpleWhitespace(r" \\\\n ") """ #: Actual string value of the simple whitespace. A legal value contains only #: space, ``\f`` and ``\t`` characters, and optionally a continuation #: (``\``) followed by a newline (``\n`` or ``\r\n``). value: str def _validate(self) -> None: if SIMPLE_WHITESPACE_RE.fullmatch(self.value) is None: raise CSTValidationError( f"Got non-whitespace value for whitespace node: {repr(self.value)}" ) @property def empty(self) -> bool: """ Indicates that this node is empty (zero whitespace characters). """ return len(self.value) == 0 @add_slots @dataclass(frozen=True) class Newline(BaseLeaf): """ Represents the newline that ends an :class:`EmptyLine` or a statement (as part of :class:`TrailingWhitespace`). Other newlines may occur in the document after continuation characters (the backslash, ``\\``), but those newlines are treated as part of the :class:`SimpleWhitespace`. Optionally, a value can be specified in order to overwrite the module's default newline. In general, this should be left as the default, which is ``None``. This is allowed because python modules are permitted to mix multiple unambiguous newline markers. """ #: A value of ``None`` indicates that the module's default newline sequence should #: be used. A value of ``\n`` or ``\r\n`` indicates that the exact value specified #: will be used for this newline. value: Optional[str] = None def _validate(self) -> None: value = self.value if value and NEWLINE_RE.fullmatch(value) is None: raise CSTValidationError( f"Got an invalid value for newline node: {repr(value)}" ) def _codegen_impl(self, state: CodegenState) -> None: value = self.value state.add_token(state.default_newline if value is None else value) @add_slots @dataclass(frozen=True) class Comment(BaseValueToken): """ A comment including the leading pound (``#``) character. The leading pound character is included in the 'value' property (instead of being stripped) to help re-enforce the idea that whitespace immediately after the pound character may be significant. E.g:: # comment with whitespace at the start (usually preferred) #comment without whitespace at the start (usually not desirable) Usually wrapped in a :class:`TrailingWhitespace` or :class:`EmptyLine` node. """ #: The comment itself. Valid values start with the pound (``#``) character followed #: by zero or more non-newline characters. Comments cannot include newlines. value: str def _validate(self) -> None: if COMMENT_RE.fullmatch(self.value) is None: raise CSTValidationError( f"Got non-comment value for comment node: {repr(self.value)}" ) @add_slots @dataclass(frozen=True) class TrailingWhitespace(CSTNode): """ The whitespace at the end of a line after a statement. If a line contains only whitespace, :class:`EmptyLine` should be used instead. """ #: Any simple whitespace before any comment or newline. whitespace: SimpleWhitespace = SimpleWhitespace.field("") #: An optional comment appearing after any simple whitespace. comment: Optional[Comment] = None #: The newline character that terminates this trailing whitespace. newline: Newline = Newline.field() def _visit_and_replace_children(self, visitor: CSTVisitorT) -> "TrailingWhitespace": return TrailingWhitespace( whitespace=visit_required(self, "whitespace", self.whitespace, visitor), comment=visit_optional(self, "comment", self.comment, visitor), newline=visit_required(self, "newline", self.newline, visitor), ) def _codegen_impl(self, state: CodegenState) -> None: self.whitespace._codegen(state) comment = self.comment if comment is not None: comment._codegen(state) self.newline._codegen(state) @add_slots @dataclass(frozen=True) class EmptyLine(CSTNode): """ Represents a line with only whitespace/comments. Usually statements will own any :class:`EmptyLine` nodes above themselves, and a :class:`Module` will own the document's header/footer :class:`EmptyLine` nodes. """ #: An empty line doesn't have to correspond to the current indentation level. For #: example, this happens when all trailing whitespace is stripped and there is #: an empty line between two statements. indent: bool = True #: Extra whitespace after the indent, but before the comment. whitespace: SimpleWhitespace = SimpleWhitespace.field("") #: An optional comment appearing after the indent and extra whitespace. comment: Optional[Comment] = None #: The newline character that terminates this empty line. newline: Newline = Newline.field() def _visit_and_replace_children(self, visitor: CSTVisitorT) -> "EmptyLine": return EmptyLine( indent=self.indent, whitespace=visit_required(self, "whitespace", self.whitespace, visitor), comment=visit_optional(self, "comment", self.comment, visitor), newline=visit_required(self, "newline", self.newline, visitor), ) def _codegen_impl(self, state: CodegenState) -> None: if self.indent: state.add_indent_tokens() self.whitespace._codegen(state) comment = self.comment if comment is not None: comment._codegen(state) self.newline._codegen(state) @add_slots @dataclass(frozen=True) class ParenthesizedWhitespace(BaseParenthesizableWhitespace): """ This is the kind of whitespace you might see inside a parenthesized expression or statement between two tokens when there is a newline without a line continuation (``\\``) character. https://docs.python.org/3/reference/lexical_analysis.html#implicit-line-joining A parenthesized whitespace cannot be empty since it requires at least one :class:`TrailingWhitespace`. If you have whitespace that does not contain comments or newlines, use :class:`SimpleWhitespace` instead. """ #: The whitespace that comes after the previous node, up to and including #: the end-of-line comment and newline. first_line: TrailingWhitespace = TrailingWhitespace.field() #: Any lines after the first that contain only indentation and/or comments. empty_lines: Sequence[EmptyLine] = () #: Whether or not the final simple whitespace is indented regularly. indent: bool = False #: Extra whitespace after the indent, but before the next node. last_line: SimpleWhitespace = SimpleWhitespace.field("") def _visit_and_replace_children( self, visitor: CSTVisitorT ) -> "ParenthesizedWhitespace": return ParenthesizedWhitespace( first_line=visit_required(self, "first_line", self.first_line, visitor), empty_lines=visit_sequence(self, "empty_lines", self.empty_lines, visitor), indent=self.indent, last_line=visit_required(self, "last_line", self.last_line, visitor), ) def _codegen_impl(self, state: CodegenState) -> None: self.first_line._codegen(state) for line in self.empty_lines: line._codegen(state) if self.indent: state.add_indent_tokens() self.last_line._codegen(state) @property def empty(self) -> bool: """ Indicates that this node is empty (zero whitespace characters). For :class:`ParenthesizedWhitespace` this will always be ``False``. """ # Its not possible to have a ParenthesizedWhitespace with zero characers. # If we did, the TrailingWhitespace would not have parsed. return False LibCST-1.2.0/libcst/_parser/000077500000000000000000000000001456464173300155225ustar00rootroot00000000000000LibCST-1.2.0/libcst/_parser/__init__.py000066400000000000000000000002631456464173300176340ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. LibCST-1.2.0/libcst/_parser/base_parser.py000066400000000000000000000207221456464173300203650ustar00rootroot00000000000000# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved. # Licensed to PSF under a Contributor Agreement. # Modifications: # Copyright David Halter and Contributors # Modifications are dual-licensed: MIT and PSF. # 99% of the code is different from pgen2, now. # A fork of `parso.parser`. # https://github.com/davidhalter/parso/blob/v0.3.4/parso/parser.py # # The following changes were made: # - Typing was added. # - Error recovery is removed. # - The Jedi-specific _allowed_transition_names_and_token_types API is removed. # - Improved error messages by using our exceptions module. # - node_map/leaf_map were removed in favor of just calling convert_*. # - convert_node/convert_leaf were renamed to convert_nonterminal/convert_terminal # - convert_nonterminal is called regardless of the number of children. Parso avoids # calling it in some cases to avoid creating extra nodes. # - The parser is constructed with the tokens to allow us to track a bit more state. As # As a consequence parser may only be used once. # - Supports our custom Token class, instead of `parso.python.tokenize.Token`. from dataclasses import dataclass, field from typing import Generic, Iterable, List, Sequence, TypeVar, Union from libcst._exceptions import ( EOFSentinel, get_expected_str, ParserSyntaxError, PartialParserSyntaxError, ) from libcst._parser.parso.pgen2.generator import DFAState, Grammar, ReservedString from libcst._parser.parso.python.token import TokenType from libcst._parser.types.token import Token _NodeT = TypeVar("_NodeT") _TokenTypeT = TypeVar("_TokenTypeT", bound=TokenType) _TokenT = TypeVar("_TokenT", bound=Token) @dataclass(frozen=False) class StackNode(Generic[_TokenTypeT, _NodeT]): dfa: "DFAState[_TokenTypeT]" nodes: List[_NodeT] = field(default_factory=list) @property def nonterminal(self) -> str: return self.dfa.from_rule def _token_to_transition( grammar: "Grammar[_TokenTypeT]", type_: _TokenTypeT, value: str ) -> Union[ReservedString, _TokenTypeT]: # Map from token to label if type_.contains_syntax: # Check for reserved words (keywords) try: return grammar.reserved_syntax_strings[value] except KeyError: pass return type_ # TODO: This should be an ABC, but there's a metaclass conflict between Generic and ABC # that's fixed in Python 3.7. class BaseParser(Generic[_TokenT, _TokenTypeT, _NodeT]): """Parser engine. A Parser instance contains state pertaining to the current token sequence, and should not be used concurrently by different threads to parse separate token sequences. See python/tokenize.py for how to get input tokens by a string. """ tokens: Iterable[_TokenT] lines: Sequence[str] # used when generating parse errors _pgen_grammar: "Grammar[_TokenTypeT]" stack: List[StackNode[_TokenTypeT, _NodeT]] # Keep track of if parse was called. Because a parser may keep global mutable state, # each BaseParser instance should only be used once. __was_parse_called: bool def __init__( self, *, tokens: Iterable[_TokenT], lines: Sequence[str], pgen_grammar: "Grammar[_TokenTypeT]", start_nonterminal: str, ) -> None: self.tokens = tokens self.lines = lines self._pgen_grammar = pgen_grammar first_dfa = pgen_grammar.nonterminal_to_dfas[start_nonterminal][0] self.stack = [StackNode(first_dfa)] self.__was_parse_called = False def parse(self) -> _NodeT: # Ensure that we don't re-use parsers. if self.__was_parse_called: raise Exception("Each parser object may only be used to parse once.") self.__was_parse_called = True for token in self.tokens: self._add_token(token) while True: tos = self.stack[-1] if not tos.dfa.is_final: expected_str = get_expected_str( EOFSentinel.EOF, tos.dfa.transitions.keys() ) raise ParserSyntaxError( f"Incomplete input. {expected_str}", lines=self.lines, raw_line=len(self.lines), raw_column=len(self.lines[-1]), ) if len(self.stack) > 1: self._pop() else: return self.convert_nonterminal(tos.nonterminal, tos.nodes) def convert_nonterminal( self, nonterminal: str, children: Sequence[_NodeT] ) -> _NodeT: ... def convert_terminal(self, token: _TokenT) -> _NodeT: ... def _add_token(self, token: _TokenT) -> None: """ This is the only core function for parsing. Here happens basically everything. Everything is well prepared by the parser generator and we only apply the necessary steps here. """ grammar = self._pgen_grammar stack = self.stack # pyre-fixme[6]: Expected `_TokenTypeT` for 2nd param but got `TokenType`. transition = _token_to_transition(grammar, token.type, token.string) while True: try: plan = stack[-1].dfa.transitions[transition] break except KeyError: if stack[-1].dfa.is_final: try: self._pop() except PartialParserSyntaxError as ex: # Upconvert the PartialParserSyntaxError to a ParserSyntaxError # by backfilling the line/column information. raise ParserSyntaxError( ex.message, lines=self.lines, raw_line=token.start_pos[0], raw_column=token.start_pos[1], ) except Exception as ex: # convert_nonterminal may fail due to a bug in our code. Try to # recover enough to at least tell us where in the file it # failed. raise ParserSyntaxError( f"Internal error: {ex}", lines=self.lines, raw_line=token.start_pos[0], raw_column=token.start_pos[1], ) else: # We never broke out -- EOF is too soon -- Unfinished statement. # # BUG: The `expected_str` may not be complete because we already # popped the other possibilities off the stack at this point, but # it still seems useful to list some of the possibilities that we # could've expected. expected_str = get_expected_str( token, stack[-1].dfa.transitions.keys() ) raise ParserSyntaxError( f"Incomplete input. {expected_str}", lines=self.lines, raw_line=token.start_pos[0], raw_column=token.start_pos[1], ) except IndexError: # I don't think this will ever happen with Python's grammar, because if # there are any extra tokens at the end of the input, we'll instead # complain that we expected ENDMARKER. # # However, let's leave it just in case. expected_str = get_expected_str(token, EOFSentinel.EOF) raise ParserSyntaxError( f"Too much input. {expected_str}", lines=self.lines, raw_line=token.start_pos[0], raw_column=token.start_pos[1], ) # Logically, `plan` is always defined, but pyre can't reasonably determine that. stack[-1].dfa = plan.next_dfa for push in plan.dfa_pushes: stack.append(StackNode(push)) leaf = self.convert_terminal(token) stack[-1].nodes.append(leaf) def _pop(self) -> None: tos = self.stack.pop() # Unlike parso and lib2to3, we call `convert_nonterminal` unconditionally # instead of only when we have more than one child. This allows us to create a # far more consistent and predictable tree. new_node = self.convert_nonterminal(tos.dfa.from_rule, tos.nodes) self.stack[-1].nodes.append(new_node) LibCST-1.2.0/libcst/_parser/conversions/000077500000000000000000000000001456464173300200725ustar00rootroot00000000000000LibCST-1.2.0/libcst/_parser/conversions/README.md000066400000000000000000000201551456464173300213540ustar00rootroot00000000000000# Parser Conversions Developer Guide Parser conversions take grammar productions and convert them to CST nodes, or to some "partial" value that will later be converted to a CST node. The grammar production that parser conversions are associated with is co-located alongside the conversion function using our `@with_production` decorator. This is similar to the API that [rply](https://github.com/alex/rply/) uses. Grammar productions are collected when the parser is first called, and converted into a state machine by Parso's pgen2 fork. Unlike rply's API, productions are not automatically gathered, because that would be dependent on implicit import-time side-effects. Instead all conversion functions must be listed in `_grammar.py`. # What's a production? A production is a line in our BNF-like grammar definition. A production has a name (the first argument of `@with_production`), and a sequence of children (the second argument of `@with_production`). Python's full grammar is here: https://docs.python.org/3/reference/grammar.html We use Parso's fork of pgen2, and therefore support the same BNF-like syntax that Python's documentation uses. # Why is everything `Any`-typed? Isn't that bad? Yes, `Any` types indicate a gap in static type coverage. Unfortunately, this isn't easily solved. The value of `children` given to a conversion function is dependent on textual grammar representation and pgen2's implementation, which the type system is unaware of. Unless we extend the type system to support pgen2 (unlikely) or add a layer of machine-generated code (possible, but we're not there), there's no way for the type system to validate any annotations on `children`. We could add annotations to `children`, but they're usually complicated types (so they wouldn't be very human-readable), and they wouldn't actually provide any type safety because the type checker doesn't know about them. Similarly, we could annotate return type annotations, but that's just duplicating the type we're already expressing in our return statement (so it doesn't improve readability much), and it's not providing any static type safety. We do perform runtime type checks inside tests, and we hope that this test coverage will help compensate for the lack of static type safety. # Where's the whitespace? The most important differentiation between an Abstract Syntax Tree and a Concrete Syntax Tree (for our purposes) is that the CST contains enough information to exactly reproduce the original program. This means that we must somehow capture and store whitespace. The grammar does not contain whitespace information, and there are no explicit tokens for whitespace. If the grammar did contain whitespace information, the grammar likely wouldn't be LL(1), and while we could use another context free grammar parsing algorithm, it would add complexity and likely wouldn't be as efficient. Instead, we have a hand-written re-entrant recursive-descent parser for whitespace. It's the responsibility of conversion functions to call into this parser given whitespace states before and after a token. # Token and WhitespaceState Data Structures A token is defined as: ``` class Token: type: TokenType string: str # The start of where `string` is in the source, not including leading whitespace. start_pos: Tuple[int, int] # The end of where `string` is in the source, not including trailing whitespace. end_pos: Tuple[int, int] whitespace_before: WhitespaceState whitespace_after: WhitespaceState ``` Or, in the order that these pieces appear lexically in a parsed program: ``` +-------------------+--------+-------------------+ | whitespace_before | string | whitespace_after | | (WhitespaceState) | (str) | (WhitespaceState) | +-------------------+--------+-------------------+ ``` Tokens are immutable, but only shallowly, because their whitespace fields are mutable WhitespaceState objects. WhitespaceStates are opaque objects that the whitespace parser consumes and mutates. WhitespaceState nodes are shared across multiple tokens, so `whitespace_after` is the same object as `whitespace_before` in the next token. # Parser Execution Order The parser generator we use (`pgen2`) is bottom-up, meaning that children productions are called before their parents. In contrast, our hand written whitespace parser is top-down. Inside each production, child conversion functions are called from left to right. As an example, assume we're given the following simple grammar and program: ``` add_expr: NUMBER ['+' add_expr] ``` ``` 1 + 2 + 3 ``` which forms the parse tree: ``` [H] add_expr / | \ [A] 1 [B] '+' [G] add_expr / | \ [C] 2 [D] '+' [F] add_expr | [E] 3 ``` The conversion functions would be called in the labeled alphabetical order, with `A` converted first, and `H` converted last. # Who owns whitespace? There's a lot of holes between you and a correct whitespace representation, but these can be divided into a few categories of potential mistakes: ## Forgetting to Parse Whitespace Fortunately, the inverse (parsing the same whitespace twice) should not be possible, because whitespace is "consumed" by the whitespace parser. This kind of mistake is easily caught with tests. ## Assigning Whitespace to the Wrong Owner This is probably the easiest mistake to make. The general convention is that the top-most possible node owns whitespace, but in a bottom-up parser like ours, the children are parsed before their parents. In contrast, the best owner for whitespace in our tree when there's multiple possible owners is usually the top-most node. As an example, assume we have the following grammar and program: ``` simple_stmt: (pass_stmt ';')* NEWLINE ``` ``` pass; # comment ``` Since both `cst.Semicolon` and `cst.SimpleStatement` can both store some whitespace after themselves, there's some ambiguity about who should own the space character before the comment. However, since `cst.SimpleStatement` is the parent, the convention is that it should own it. Unfortunately, since nodes are processed bottom-to-top and left-to-right, the semicolon under `simple_stmt` will get processed before `simple_stmt` is. This means that in a naive implementation, the semicolon's conversion function would have a chance to consume the whitespace before `simple_stmt` can. To solve this problem, you must "fix" the whitespace in the parent node's conversion function or grammar. This can be done in a number of ways. In order of preference: 1. Split the child's grammar production into two separate productions, one that consumes it's leading or trailing whitespace, and one that doesn't. Depending on the parent, use the appropriate version of the child. 2. Construct a "partial" node in the child that doesn't consume the whitespace, and then consume the correct whitespace in the parent. Be careful about what whitespace a node's siblings consume. 3. "Steal" the whitespace from the child by replacing the child with a new version that doesn't have the whitespace. This mistake is probably hard to catch with tests, because the CST will still reprint correctly, but it creates ergonomic issues for tools consuming the CST. ## Consuming Whitespace in the Wrong Order This mistake is probably is the hardest to make by accident, but it could still happen, and may be hard to catch with tests. Given the following piece of code: ``` pass # trailing # empty line pass ``` The first statement should own `# trailing` (parsed using `parse_trailing_whitespace`). The second statement then should `# empty line` (parsed using `parse_empty_lines`). However, it's possible that if you somehow called `parse_empty_lines` on the second statement before calling `parse_trailing_whitespace` on the first statement, `parse_empty_lines` could accidentally end up consuming the `# trailing` comment, because `parse_trailing_whitespace` hasn't yet consumed it. However, this circumstance is unlikely, because you'd explicitly have to handle the children out-of-order, and we have assertions inside the whitespace parser to prevent some possible mistakes, like the one described above. LibCST-1.2.0/libcst/_parser/conversions/__init__.py000066400000000000000000000002631456464173300222040ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. LibCST-1.2.0/libcst/_parser/conversions/expression.py000066400000000000000000001475261456464173300226620ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # pyre-unsafe import re import typing from tokenize import ( Floatnumber as FLOATNUMBER_RE, Imagnumber as IMAGNUMBER_RE, Intnumber as INTNUMBER_RE, ) from libcst._exceptions import PartialParserSyntaxError from libcst._maybe_sentinel import MaybeSentinel from libcst._nodes.expression import ( Arg, Asynchronous, Attribute, Await, BinaryOperation, BooleanOperation, Call, Comparison, ComparisonTarget, CompFor, CompIf, ConcatenatedString, Dict, DictComp, DictElement, Element, Ellipsis, Float, FormattedString, FormattedStringExpression, FormattedStringText, From, GeneratorExp, IfExp, Imaginary, Index, Integer, Lambda, LeftCurlyBrace, LeftParen, LeftSquareBracket, List, ListComp, Name, NamedExpr, Param, Parameters, RightCurlyBrace, RightParen, RightSquareBracket, Set, SetComp, Slice, StarredDictElement, StarredElement, Subscript, SubscriptElement, Tuple, UnaryOperation, Yield, ) from libcst._nodes.op import ( Add, And, AssignEqual, BaseBinaryOp, BaseBooleanOp, BaseCompOp, BitAnd, BitInvert, BitOr, BitXor, Colon, Comma, Divide, Dot, Equal, FloorDivide, GreaterThan, GreaterThanEqual, In, Is, IsNot, LeftShift, LessThan, LessThanEqual, MatrixMultiply, Minus, Modulo, Multiply, Not, NotEqual, NotIn, Or, Plus, Power, RightShift, Subtract, ) from libcst._nodes.whitespace import SimpleWhitespace from libcst._parser.custom_itertools import grouper from libcst._parser.production_decorator import with_production from libcst._parser.types.config import ParserConfig from libcst._parser.types.partials import ( ArglistPartial, AttributePartial, CallPartial, FormattedStringConversionPartial, FormattedStringFormatSpecPartial, SlicePartial, SubscriptPartial, WithLeadingWhitespace, ) from libcst._parser.types.token import Token from libcst._parser.whitespace_parser import parse_parenthesizable_whitespace BINOP_TOKEN_LUT: typing.Dict[str, typing.Type[BaseBinaryOp]] = { "*": Multiply, "@": MatrixMultiply, "/": Divide, "%": Modulo, "//": FloorDivide, "+": Add, "-": Subtract, "<<": LeftShift, ">>": RightShift, "&": BitAnd, "^": BitXor, "|": BitOr, } BOOLOP_TOKEN_LUT: typing.Dict[str, typing.Type[BaseBooleanOp]] = {"and": And, "or": Or} COMPOP_TOKEN_LUT: typing.Dict[str, typing.Type[BaseCompOp]] = { "<": LessThan, ">": GreaterThan, "==": Equal, "<=": LessThanEqual, ">=": GreaterThanEqual, "in": In, "is": Is, } # N.B. This uses a `testlist | star_expr`, not a `testlist_star_expr` because # `testlist_star_expr` may not always be representable by a non-partial node, since it's # only used as part of `expr_stmt`. @with_production("expression_input", "(testlist | star_expr) ENDMARKER") def convert_expression_input( config: ParserConfig, children: typing.Sequence[typing.Any] ) -> typing.Any: (child, endmarker) = children # HACK: UGLY! REMOVE THIS SOON! # Unwrap WithLeadingWhitespace if it exists. It shouldn't exist by this point, but # testlist isn't fully implemented, and we currently leak these partial objects. if isinstance(child, WithLeadingWhitespace): child = child.value return child @with_production("namedexpr_test", "test [':=' test]", version=">=3.8") def convert_namedexpr_test( config: ParserConfig, children: typing.Sequence[typing.Any] ) -> typing.Any: test, *assignment = children if len(assignment) == 0: return test # Convert all of the operations that have no precedence in a loop (walrus, value) = assignment return WithLeadingWhitespace( NamedExpr( target=test.value, whitespace_before_walrus=parse_parenthesizable_whitespace( config, walrus.whitespace_before ), whitespace_after_walrus=parse_parenthesizable_whitespace( config, walrus.whitespace_after ), value=value.value, ), test.whitespace_before, ) @with_production("test", "or_test ['if' or_test 'else' test] | lambdef") def convert_test( config: ParserConfig, children: typing.Sequence[typing.Any] ) -> typing.Any: if len(children) == 1: (child,) = children return child else: (body, if_token, test, else_token, orelse) = children return WithLeadingWhitespace( IfExp( body=body.value, test=test.value, orelse=orelse.value, whitespace_before_if=parse_parenthesizable_whitespace( config, if_token.whitespace_before ), whitespace_after_if=parse_parenthesizable_whitespace( config, if_token.whitespace_after ), whitespace_before_else=parse_parenthesizable_whitespace( config, else_token.whitespace_before ), whitespace_after_else=parse_parenthesizable_whitespace( config, else_token.whitespace_after ), ), body.whitespace_before, ) @with_production("test_nocond", "or_test | lambdef_nocond") def convert_test_nocond( config: ParserConfig, children: typing.Sequence[typing.Any] ) -> typing.Any: (child,) = children return child @with_production("lambdef", "'lambda' [varargslist] ':' test") @with_production("lambdef_nocond", "'lambda' [varargslist] ':' test_nocond") def convert_lambda( config: ParserConfig, children: typing.Sequence[typing.Any] ) -> typing.Any: lambdatoken, *params, colontoken, test = children # Grab the whitespace around the colon. If there are no params, then # the colon owns the whitespace before and after it. If there are # any params, then the last param owns the whitespace before the colon. # We handle the parameter movement below. colon = Colon( whitespace_before=parse_parenthesizable_whitespace( config, colontoken.whitespace_before ), whitespace_after=parse_parenthesizable_whitespace( config, colontoken.whitespace_after ), ) # Unpack optional parameters if len(params) == 0: parameters = Parameters() whitespace_after_lambda = MaybeSentinel.DEFAULT else: (parameters,) = params whitespace_after_lambda = parse_parenthesizable_whitespace( config, lambdatoken.whitespace_after ) # Handle pre-colon whitespace if parameters.star_kwarg is not None: if parameters.star_kwarg.comma == MaybeSentinel.DEFAULT: parameters = parameters.with_changes( star_kwarg=parameters.star_kwarg.with_changes( whitespace_after_param=colon.whitespace_before ) ) elif parameters.kwonly_params: if parameters.kwonly_params[-1].comma == MaybeSentinel.DEFAULT: parameters = parameters.with_changes( kwonly_params=( *parameters.kwonly_params[:-1], parameters.kwonly_params[-1].with_changes( whitespace_after_param=colon.whitespace_before ), ) ) elif isinstance(parameters.star_arg, Param): if parameters.star_arg.comma == MaybeSentinel.DEFAULT: parameters = parameters.with_changes( star_arg=parameters.star_arg.with_changes( whitespace_after_param=colon.whitespace_before ) ) elif parameters.params: if parameters.params[-1].comma == MaybeSentinel.DEFAULT: parameters = parameters.with_changes( params=( *parameters.params[:-1], parameters.params[-1].with_changes( whitespace_after_param=colon.whitespace_before ), ) ) # Colon doesn't own its own pre-whitespace now. colon = colon.with_changes(whitespace_before=SimpleWhitespace("")) # Return a lambda return WithLeadingWhitespace( Lambda( whitespace_after_lambda=whitespace_after_lambda, params=parameters, body=test.value, colon=colon, ), lambdatoken.whitespace_before, ) @with_production("or_test", "and_test ('or' and_test)*") @with_production("and_test", "not_test ('and' not_test)*") def convert_boolop( config: ParserConfig, children: typing.Sequence[typing.Any] ) -> typing.Any: leftexpr, *rightexprs = children if len(rightexprs) == 0: return leftexpr whitespace_before = leftexpr.whitespace_before leftexpr = leftexpr.value # Convert all of the operations that have no precedence in a loop for op, rightexpr in grouper(rightexprs, 2): if op.string not in BOOLOP_TOKEN_LUT: raise Exception(f"Unexpected token '{op.string}'!") leftexpr = BooleanOperation( left=leftexpr, # pyre-ignore Pyre thinks that the type of the LUT is CSTNode. operator=BOOLOP_TOKEN_LUT[op.string]( whitespace_before=parse_parenthesizable_whitespace( config, op.whitespace_before ), whitespace_after=parse_parenthesizable_whitespace( config, op.whitespace_after ), ), right=rightexpr.value, ) return WithLeadingWhitespace(leftexpr, whitespace_before) @with_production("not_test", "'not' not_test | comparison") def convert_not_test( config: ParserConfig, children: typing.Sequence[typing.Any] ) -> typing.Any: if len(children) == 1: (child,) = children return child else: nottoken, nottest = children return WithLeadingWhitespace( UnaryOperation( operator=Not( whitespace_after=parse_parenthesizable_whitespace( config, nottoken.whitespace_after ) ), expression=nottest.value, ), nottoken.whitespace_before, ) @with_production("comparison", "expr (comp_op expr)*") def convert_comparison( config: ParserConfig, children: typing.Sequence[typing.Any] ) -> typing.Any: if len(children) == 1: (child,) = children return child lhs, *rest = children comparisons: typing.List[ComparisonTarget] = [] for operator, comparator in grouper(rest, 2): comparisons.append( ComparisonTarget(operator=operator, comparator=comparator.value) ) return WithLeadingWhitespace( Comparison(left=lhs.value, comparisons=tuple(comparisons)), lhs.whitespace_before, ) @with_production( "comp_op", "('<'|'>'|'=='|'>='|'<='|'<>'|'!='|'in'|'not' 'in'|'is'|'is' 'not')" ) def convert_comp_op( config: ParserConfig, children: typing.Sequence[typing.Any] ) -> typing.Any: if len(children) == 1: (op,) = children if op.string in COMPOP_TOKEN_LUT: # A regular comparison containing one token # pyre-ignore Pyre thinks that the type of the LUT is CSTNode. return COMPOP_TOKEN_LUT[op.string]( whitespace_before=parse_parenthesizable_whitespace( config, op.whitespace_before ), whitespace_after=parse_parenthesizable_whitespace( config, op.whitespace_after ), ) elif op.string in ["!=", "<>"]: # Not equal, which can take two forms in some cases return NotEqual( whitespace_before=parse_parenthesizable_whitespace( config, op.whitespace_before ), value=op.string, whitespace_after=parse_parenthesizable_whitespace( config, op.whitespace_after ), ) else: # this should be unreachable raise Exception(f"Unexpected token '{op.string}'!") else: # A two-token comparison leftcomp, rightcomp = children if leftcomp.string == "not" and rightcomp.string == "in": return NotIn( whitespace_before=parse_parenthesizable_whitespace( config, leftcomp.whitespace_before ), whitespace_between=parse_parenthesizable_whitespace( config, leftcomp.whitespace_after ), whitespace_after=parse_parenthesizable_whitespace( config, rightcomp.whitespace_after ), ) elif leftcomp.string == "is" and rightcomp.string == "not": return IsNot( whitespace_before=parse_parenthesizable_whitespace( config, leftcomp.whitespace_before ), whitespace_between=parse_parenthesizable_whitespace( config, leftcomp.whitespace_after ), whitespace_after=parse_parenthesizable_whitespace( config, rightcomp.whitespace_after ), ) else: # this should be unreachable raise Exception(f"Unexpected token '{leftcomp.string} {rightcomp.string}'!") @with_production("star_expr", "'*' expr") def convert_star_expr( config: ParserConfig, children: typing.Sequence[typing.Any] ) -> typing.Any: star, expr = children return WithLeadingWhitespace( StarredElement( expr.value, whitespace_before_value=parse_parenthesizable_whitespace( config, expr.whitespace_before ), # atom is responsible for parenthesis and trailing_whitespace if they exist # testlist_comp, exprlist, dictorsetmaker, etc are responsible for the comma # if it exists. ), whitespace_before=star.whitespace_before, ) @with_production("expr", "xor_expr ('|' xor_expr)*") @with_production("xor_expr", "and_expr ('^' and_expr)*") @with_production("and_expr", "shift_expr ('&' shift_expr)*") @with_production("shift_expr", "arith_expr (('<<'|'>>') arith_expr)*") @with_production("arith_expr", "term (('+'|'-') term)*") @with_production("term", "factor (('*'|'@'|'/'|'%'|'//') factor)*", version=">=3.5") @with_production("term", "factor (('*'|'/'|'%'|'//') factor)*", version="<3.5") def convert_binop( config: ParserConfig, children: typing.Sequence[typing.Any] ) -> typing.Any: leftexpr, *rightexprs = children if len(rightexprs) == 0: return leftexpr whitespace_before = leftexpr.whitespace_before leftexpr = leftexpr.value # Convert all of the operations that have no precedence in a loop for op, rightexpr in grouper(rightexprs, 2): if op.string not in BINOP_TOKEN_LUT: raise Exception(f"Unexpected token '{op.string}'!") leftexpr = BinaryOperation( left=leftexpr, # pyre-ignore Pyre thinks that the type of the LUT is CSTNode. operator=BINOP_TOKEN_LUT[op.string]( whitespace_before=parse_parenthesizable_whitespace( config, op.whitespace_before ), whitespace_after=parse_parenthesizable_whitespace( config, op.whitespace_after ), ), right=rightexpr.value, ) return WithLeadingWhitespace(leftexpr, whitespace_before) @with_production("factor", "('+'|'-'|'~') factor | power") def convert_factor( config: ParserConfig, children: typing.Sequence[typing.Any] ) -> typing.Any: if len(children) == 1: (child,) = children return child op, factor = children # First, tokenize the unary operator if op.string == "+": opnode = Plus( whitespace_after=parse_parenthesizable_whitespace( config, op.whitespace_after ) ) elif op.string == "-": opnode = Minus( whitespace_after=parse_parenthesizable_whitespace( config, op.whitespace_after ) ) elif op.string == "~": opnode = BitInvert( whitespace_after=parse_parenthesizable_whitespace( config, op.whitespace_after ) ) else: raise Exception(f"Unexpected token '{op.string}'!") return WithLeadingWhitespace( UnaryOperation(operator=opnode, expression=factor.value), op.whitespace_before ) @with_production("power", "atom_expr ['**' factor]") def convert_power( config: ParserConfig, children: typing.Sequence[typing.Any] ) -> typing.Any: if len(children) == 1: (child,) = children return child left, power, right = children return WithLeadingWhitespace( BinaryOperation( left=left.value, operator=Power( whitespace_before=parse_parenthesizable_whitespace( config, power.whitespace_before ), whitespace_after=parse_parenthesizable_whitespace( config, power.whitespace_after ), ), right=right.value, ), left.whitespace_before, ) @with_production("atom_expr", "atom_expr_await | atom_expr_trailer") def convert_atom_expr( config: ParserConfig, children: typing.Sequence[typing.Any] ) -> typing.Any: (child,) = children return child @with_production("atom_expr_await", "AWAIT atom_expr_trailer") def convert_atom_expr_await( config: ParserConfig, children: typing.Sequence[typing.Any] ) -> typing.Any: keyword, expr = children return WithLeadingWhitespace( Await( whitespace_after_await=parse_parenthesizable_whitespace( config, keyword.whitespace_after ), expression=expr.value, ), keyword.whitespace_before, ) @with_production("atom_expr_trailer", "atom trailer*") def convert_atom_expr_trailer( config: ParserConfig, children: typing.Sequence[typing.Any] ) -> typing.Any: atom, *trailers = children whitespace_before = atom.whitespace_before atom = atom.value # Need to walk through all trailers from left to right and construct # a series of nodes based on each partial type. We can't do this with # left recursion due to limits in the parser. for trailer in trailers: if isinstance(trailer, SubscriptPartial): atom = Subscript( value=atom, whitespace_after_value=parse_parenthesizable_whitespace( config, trailer.whitespace_before ), lbracket=trailer.lbracket, # pyre-fixme[6]: Expected `Sequence[SubscriptElement]` for 4th param # but got `Union[typing.Sequence[SubscriptElement], Index, Slice]`. slice=trailer.slice, rbracket=trailer.rbracket, ) elif isinstance(trailer, AttributePartial): atom = Attribute(value=atom, dot=trailer.dot, attr=trailer.attr) elif isinstance(trailer, CallPartial): # If the trailing argument doesn't have a comma, then it owns the # trailing whitespace before the rpar. Otherwise, the comma owns # it. if ( len(trailer.args) > 0 and trailer.args[-1].comma == MaybeSentinel.DEFAULT ): args = ( *trailer.args[:-1], trailer.args[-1].with_changes( whitespace_after_arg=trailer.rpar.whitespace_before ), ) else: args = trailer.args atom = Call( func=atom, whitespace_after_func=parse_parenthesizable_whitespace( config, trailer.lpar.whitespace_before ), whitespace_before_args=trailer.lpar.value.whitespace_after, # pyre-fixme[6]: Expected `Sequence[Arg]` for 4th param but got # `Tuple[object, ...]`. args=tuple(args), ) else: # This is an invalid trailer, so lets give up raise Exception("Logic error!") return WithLeadingWhitespace(atom, whitespace_before) @with_production( "trailer", "trailer_arglist | trailer_subscriptlist | trailer_attribute" ) def convert_trailer( config: ParserConfig, children: typing.Sequence[typing.Any] ) -> typing.Any: (child,) = children return child @with_production("trailer_arglist", "'(' [arglist] ')'") def convert_trailer_arglist( config: ParserConfig, children: typing.Sequence[typing.Any] ) -> typing.Any: lpar, *arglist, rpar = children return CallPartial( lpar=WithLeadingWhitespace( LeftParen( whitespace_after=parse_parenthesizable_whitespace( config, lpar.whitespace_after ) ), lpar.whitespace_before, ), args=() if not arglist else arglist[0].args, rpar=RightParen( whitespace_before=parse_parenthesizable_whitespace( config, rpar.whitespace_before ) ), ) @with_production("trailer_subscriptlist", "'[' subscriptlist ']'") def convert_trailer_subscriptlist( config: ParserConfig, children: typing.Sequence[typing.Any] ) -> typing.Any: (lbracket, subscriptlist, rbracket) = children return SubscriptPartial( lbracket=LeftSquareBracket( whitespace_after=parse_parenthesizable_whitespace( config, lbracket.whitespace_after ) ), slice=subscriptlist.value, rbracket=RightSquareBracket( whitespace_before=parse_parenthesizable_whitespace( config, rbracket.whitespace_before ) ), whitespace_before=lbracket.whitespace_before, ) @with_production("subscriptlist", "subscript (',' subscript)* [',']") def convert_subscriptlist( config: ParserConfig, children: typing.Sequence[typing.Any] ) -> typing.Any: # This is a list of SubscriptElement, so construct as such by grouping every # subscript with an optional comma and adding to a list. elements = [] for slice, comma in grouper(children, 2): if comma is None: elements.append(SubscriptElement(slice=slice.value)) else: elements.append( SubscriptElement( slice=slice.value, comma=Comma( whitespace_before=parse_parenthesizable_whitespace( config, comma.whitespace_before ), whitespace_after=parse_parenthesizable_whitespace( config, comma.whitespace_after ), ), ) ) return WithLeadingWhitespace(elements, children[0].whitespace_before) @with_production("subscript", "test | [test] ':' [test] [sliceop]") def convert_subscript( config: ParserConfig, children: typing.Sequence[typing.Any] ) -> typing.Any: if len(children) == 1 and not isinstance(children[0], Token): # This is just an index node (test,) = children return WithLeadingWhitespace(Index(test.value), test.whitespace_before) if isinstance(children[-1], SlicePartial): # We got a partial slice as the final param. Extract the final # bits of the full subscript. *others, sliceop = children whitespace_before = others[0].whitespace_before second_colon = sliceop.second_colon step = sliceop.step else: # We can just parse this below, without taking extras from the # partial child. others = children whitespace_before = others[0].whitespace_before second_colon = MaybeSentinel.DEFAULT step = None # We need to create a partial slice to pass up. So, align so we have # a list that's always [Optional[Test], Colon, Optional[Test]]. if isinstance(others[0], Token): # First token is a colon, so insert an empty test on the LHS. We # know the RHS is a test since it's not a sliceop. slicechildren = [None, *others] else: # First token is non-colon, so its a test. slicechildren = [*others] if len(slicechildren) < 3: # Now, we have to fill in the RHS. We know its two long # at this point if its not already 3. slicechildren = [*slicechildren, None] lower, first_colon, upper = slicechildren return WithLeadingWhitespace( Slice( lower=lower.value if lower is not None else None, first_colon=Colon( whitespace_before=parse_parenthesizable_whitespace( config, first_colon.whitespace_before, ), whitespace_after=parse_parenthesizable_whitespace( config, first_colon.whitespace_after, ), ), upper=upper.value if upper is not None else None, second_colon=second_colon, step=step, ), whitespace_before=whitespace_before, ) @with_production("sliceop", "':' [test]") def convert_sliceop( config: ParserConfig, children: typing.Sequence[typing.Any] ) -> typing.Any: if len(children) == 2: colon, test = children step = test.value else: (colon,) = children step = None return SlicePartial( second_colon=Colon( whitespace_before=parse_parenthesizable_whitespace( config, colon.whitespace_before ), whitespace_after=parse_parenthesizable_whitespace( config, colon.whitespace_after ), ), step=step, ) @with_production("trailer_attribute", "'.' NAME") def convert_trailer_attribute( config: ParserConfig, children: typing.Sequence[typing.Any] ) -> typing.Any: dot, name = children return AttributePartial( dot=Dot( whitespace_before=parse_parenthesizable_whitespace( config, dot.whitespace_before ), whitespace_after=parse_parenthesizable_whitespace( config, dot.whitespace_after ), ), attr=Name(name.string), ) @with_production( "atom", "atom_parens | atom_squarebrackets | atom_curlybraces | atom_string | atom_basic | atom_ellipses", ) def convert_atom( config: ParserConfig, children: typing.Sequence[typing.Any] ) -> typing.Any: (child,) = children return child @with_production("atom_basic", "NAME | NUMBER | 'None' | 'True' | 'False'") def convert_atom_basic( config: ParserConfig, children: typing.Sequence[typing.Any] ) -> typing.Any: (child,) = children if child.type.name == "NAME": # This also handles 'None', 'True', and 'False' directly, but we # keep it in the grammar to be more correct. return WithLeadingWhitespace(Name(child.string), child.whitespace_before) elif child.type.name == "NUMBER": # We must determine what type of number it is since we split node # types up this way. if re.fullmatch(INTNUMBER_RE, child.string): return WithLeadingWhitespace(Integer(child.string), child.whitespace_before) elif re.fullmatch(FLOATNUMBER_RE, child.string): return WithLeadingWhitespace(Float(child.string), child.whitespace_before) elif re.fullmatch(IMAGNUMBER_RE, child.string): return WithLeadingWhitespace( Imaginary(child.string), child.whitespace_before ) else: raise Exception(f"Unparseable number {child.string}") else: raise Exception(f"Logic error, unexpected token {child.type.name}") @with_production("atom_squarebrackets", "'[' [testlist_comp_list] ']'") def convert_atom_squarebrackets( config: ParserConfig, children: typing.Sequence[typing.Any] ) -> typing.Any: lbracket_tok, *body, rbracket_tok = children lbracket = LeftSquareBracket( whitespace_after=parse_parenthesizable_whitespace( config, lbracket_tok.whitespace_after ) ) rbracket = RightSquareBracket( whitespace_before=parse_parenthesizable_whitespace( config, rbracket_tok.whitespace_before ) ) if len(body) == 0: list_node = List((), lbracket=lbracket, rbracket=rbracket) else: # len(body) == 1 # body[0] is a List or ListComp list_node = body[0].value.with_changes(lbracket=lbracket, rbracket=rbracket) return WithLeadingWhitespace(list_node, lbracket_tok.whitespace_before) @with_production("atom_curlybraces", "'{' [dictorsetmaker] '}'") def convert_atom_curlybraces( config: ParserConfig, children: typing.Sequence[typing.Any] ) -> typing.Any: lbrace_tok, *body, rbrace_tok = children lbrace = LeftCurlyBrace( whitespace_after=parse_parenthesizable_whitespace( config, lbrace_tok.whitespace_after ) ) rbrace = RightCurlyBrace( whitespace_before=parse_parenthesizable_whitespace( config, rbrace_tok.whitespace_before ) ) if len(body) == 0: dict_or_set_node = Dict((), lbrace=lbrace, rbrace=rbrace) else: # len(body) == 1 dict_or_set_node = body[0].value.with_changes(lbrace=lbrace, rbrace=rbrace) return WithLeadingWhitespace(dict_or_set_node, lbrace_tok.whitespace_before) @with_production("atom_parens", "'(' [yield_expr|testlist_comp_tuple] ')'") def convert_atom_parens( config: ParserConfig, children: typing.Sequence[typing.Any] ) -> typing.Any: lpar_tok, *atoms, rpar_tok = children lpar = LeftParen( whitespace_after=parse_parenthesizable_whitespace( config, lpar_tok.whitespace_after ) ) rpar = RightParen( whitespace_before=parse_parenthesizable_whitespace( config, rpar_tok.whitespace_before ) ) if len(atoms) == 1: # inner_atom is a _BaseParenthesizedNode inner_atom = atoms[0].value return WithLeadingWhitespace( inner_atom.with_changes( # pyre-fixme[60]: Expected to unpack an iterable, but got `unknown`. lpar=(lpar, *inner_atom.lpar), # pyre-fixme[60]: Expected to unpack an iterable, but got `unknown`. rpar=(*inner_atom.rpar, rpar), ), lpar_tok.whitespace_before, ) else: return WithLeadingWhitespace( Tuple((), lpar=(lpar,), rpar=(rpar,)), lpar_tok.whitespace_before ) @with_production("atom_ellipses", "'...'") def convert_atom_ellipses( config: ParserConfig, children: typing.Sequence[typing.Any] ) -> typing.Any: (token,) = children return WithLeadingWhitespace(Ellipsis(), token.whitespace_before) @with_production("atom_string", "(STRING | fstring) [atom_string]") def convert_atom_string( config: ParserConfig, children: typing.Sequence[typing.Any] ) -> typing.Any: if len(children) == 1: return children[0] else: left, right = children return WithLeadingWhitespace( ConcatenatedString( left=left.value, whitespace_between=parse_parenthesizable_whitespace( config, right.whitespace_before ), right=right.value, ), left.whitespace_before, ) @with_production("fstring", "FSTRING_START fstring_content* FSTRING_END") def convert_fstring( config: ParserConfig, children: typing.Sequence[typing.Any] ) -> typing.Any: start, *content, end = children return WithLeadingWhitespace( FormattedString(start=start.string, parts=tuple(content), end=end.string), start.whitespace_before, ) @with_production("fstring_content", "FSTRING_STRING | fstring_expr") def convert_fstring_content( config: ParserConfig, children: typing.Sequence[typing.Any] ) -> typing.Any: (child,) = children if isinstance(child, Token): # Construct and return a raw string portion. return FormattedStringText(child.string) else: # Pass the expression up one production. return child @with_production("fstring_conversion", "'!' NAME") def convert_fstring_conversion( config: ParserConfig, children: typing.Sequence[typing.Any] ) -> typing.Any: exclaim, name = children # There cannot be a space between the two tokens, so no need to preserve this. return FormattedStringConversionPartial(name.string, exclaim.whitespace_before) @with_production("fstring_equality", "'='", version=">=3.8") def convert_fstring_equality( config: ParserConfig, children: typing.Sequence[typing.Any] ) -> typing.Any: (equal,) = children return AssignEqual( whitespace_before=parse_parenthesizable_whitespace( config, equal.whitespace_before ), whitespace_after=parse_parenthesizable_whitespace( config, equal.whitespace_after ), ) @with_production( "fstring_expr", "'{' (testlist_comp_tuple | yield_expr) [ fstring_equality ] [ fstring_conversion ] [ fstring_format_spec ] '}'", version=">=3.8", ) @with_production( "fstring_expr", "'{' (testlist_comp_tuple | yield_expr) [ fstring_conversion ] [ fstring_format_spec ] '}'", version="<3.8", ) def convert_fstring_expr( config: ParserConfig, children: typing.Sequence[typing.Any] ) -> typing.Any: openbrkt, testlist, *conversions, closebrkt = children # Extract any optional equality (self-debugging expressions) if len(conversions) > 0 and isinstance(conversions[0], AssignEqual): equal = conversions[0] conversions = conversions[1:] else: equal = None # Extract any optional conversion if len(conversions) > 0 and isinstance( conversions[0], FormattedStringConversionPartial ): conversion = conversions[0].value conversions = conversions[1:] else: conversion = None # Extract any optional format spec if len(conversions) > 0: format_spec = conversions[0].values else: format_spec = None # Fix up any spacing issue we find due to the fact that the equal can # have whitespace and is also at the end of the expression. if equal is not None: whitespace_after_expression = SimpleWhitespace("") else: whitespace_after_expression = parse_parenthesizable_whitespace( config, children[2].whitespace_before ) return FormattedStringExpression( whitespace_before_expression=parse_parenthesizable_whitespace( config, testlist.whitespace_before ), expression=testlist.value, equal=equal, whitespace_after_expression=whitespace_after_expression, conversion=conversion, format_spec=format_spec, ) @with_production("fstring_format_spec", "':' fstring_content*") def convert_fstring_format_spec( config: ParserConfig, children: typing.Sequence[typing.Any] ) -> typing.Any: colon, *content = children return FormattedStringFormatSpecPartial(tuple(content), colon.whitespace_before) @with_production( "testlist_comp_tuple", "(namedexpr_test|star_expr) ( comp_for | (',' (namedexpr_test|star_expr))* [','] )", version=">=3.8", ) @with_production( "testlist_comp_tuple", "(test|star_expr) ( comp_for | (',' (test|star_expr))* [','] )", version=">=3.5,<3.8", ) @with_production( "testlist_comp_tuple", "(test) ( comp_for | (',' (test))* [','] )", version="<3.5", ) def convert_testlist_comp_tuple( config: ParserConfig, children: typing.Sequence[typing.Any] ) -> typing.Any: return _convert_testlist_comp( config, children, single_child_is_sequence=False, sequence_type=Tuple, comprehension_type=GeneratorExp, ) @with_production( "testlist_comp_list", "(namedexpr_test|star_expr) ( comp_for | (',' (namedexpr_test|star_expr))* [','] )", version=">=3.8", ) @with_production( "testlist_comp_list", "(test|star_expr) ( comp_for | (',' (test|star_expr))* [','] )", version=">=3.5,<3.8", ) @with_production( "testlist_comp_list", "(test) ( comp_for | (',' (test))* [','] )", version="<3.5", ) def convert_testlist_comp_list( config: ParserConfig, children: typing.Sequence[typing.Any] ) -> typing.Any: return _convert_testlist_comp( config, children, single_child_is_sequence=True, sequence_type=List, comprehension_type=ListComp, ) def _convert_testlist_comp( config: ParserConfig, children: typing.Sequence[typing.Any], single_child_is_sequence: bool, sequence_type: typing.Union[ typing.Type[Tuple], typing.Type[List], typing.Type[Set] ], comprehension_type: typing.Union[ typing.Type[GeneratorExp], typing.Type[ListComp], typing.Type[SetComp] ], ) -> typing.Any: # This is either a single-element list, or the second token is a comma, so we're not # in a generator. if len(children) == 1 or isinstance(children[1], Token): return _convert_sequencelike( config, children, single_child_is_sequence, sequence_type ) else: # N.B. The parent node (e.g. atom) is responsible for computing and attaching # whitespace information on any parenthesis, square brackets, or curly braces elt, for_in = children return WithLeadingWhitespace( comprehension_type(elt=elt.value, for_in=for_in, lpar=(), rpar=()), elt.whitespace_before, ) @with_production("testlist_star_expr", "(test|star_expr) (',' (test|star_expr))* [',']") @with_production("testlist", "test (',' test)* [',']") @with_production("exprlist", "(expr|star_expr) (',' (expr|star_expr))* [',']") def convert_test_or_expr_list( config: ParserConfig, children: typing.Sequence[typing.Any] ) -> typing.Any: # Used by expression statements and assignments. Neither of these cases want to # treat a single child as a sequence. return _convert_sequencelike( config, children, single_child_is_sequence=False, sequence_type=Tuple ) def _convert_sequencelike( config: ParserConfig, children: typing.Sequence[typing.Any], single_child_is_sequence: bool, sequence_type: typing.Union[ typing.Type[Tuple], typing.Type[List], typing.Type[Set] ], ) -> typing.Any: if not single_child_is_sequence and len(children) == 1: return children[0] # N.B. The parent node (e.g. atom) is responsible for computing and attaching # whitespace information on any parenthesis, square brackets, or curly braces elements = [] for wrapped_expr_or_starred_element, comma_token in grouper(children, 2): expr_or_starred_element = wrapped_expr_or_starred_element.value if comma_token is None: comma = MaybeSentinel.DEFAULT else: comma = Comma( whitespace_before=parse_parenthesizable_whitespace( config, comma_token.whitespace_before ), # Only compute whitespace_after if we're not a trailing comma. # If we're a trailing comma, that whitespace should be consumed by the # TrailingWhitespace, parenthesis, etc. whitespace_after=( parse_parenthesizable_whitespace( config, comma_token.whitespace_after ) if comma_token is not children[-1] else SimpleWhitespace("") ), ) if isinstance(expr_or_starred_element, StarredElement): starred_element = expr_or_starred_element elements.append(starred_element.with_changes(comma=comma)) else: expr = expr_or_starred_element elements.append(Element(value=expr, comma=comma)) # lpar/rpar are the responsibility of our parent return WithLeadingWhitespace( sequence_type(elements, lpar=(), rpar=()), children[0].whitespace_before, ) @with_production( "dictorsetmaker", ( "( ((test ':' test | '**' expr)" + " (comp_for | (',' (test ':' test | '**' expr))* [','])) |" + "((test | star_expr) " + " (comp_for | (',' (test | star_expr))* [','])) )" ), version=">=3.5", ) @with_production( "dictorsetmaker", ( "( ((test ':' test)" + " (comp_for | (',' (test ':' test))* [','])) |" + "((test) " + " (comp_for | (',' (test))* [','])) )" ), version="<3.5", ) def convert_dictorsetmaker( config: ParserConfig, children: typing.Sequence[typing.Any] ) -> typing.Any: # We'll always have at least one child. `atom_curlybraces` handles empty # dicts. if len(children) > 1 and ( (isinstance(children[1], Token) and children[1].string == ":") or (isinstance(children[0], Token) and children[0].string == "**") ): return _convert_dict(config, children) else: return _convert_set(config, children) def _convert_dict_element( config: ParserConfig, children_iter: typing.Iterator[typing.Any], last_child: typing.Any, ) -> typing.Union[DictElement, StarredDictElement]: first = next(children_iter) if isinstance(first, Token) and first.string == "**": expr = next(children_iter) element = StarredDictElement( expr.value, whitespace_before_value=parse_parenthesizable_whitespace( config, expr.whitespace_before ), ) else: key = first colon_tok = next(children_iter) value = next(children_iter) element = DictElement( key.value, value.value, whitespace_before_colon=parse_parenthesizable_whitespace( config, colon_tok.whitespace_before ), whitespace_after_colon=parse_parenthesizable_whitespace( config, colon_tok.whitespace_after ), ) # Handle the trailing comma (if there is one) try: comma_token = next(children_iter) element = element.with_changes( comma=Comma( whitespace_before=parse_parenthesizable_whitespace( config, comma_token.whitespace_before ), # Only compute whitespace_after if we're not a trailing comma. # If we're a trailing comma, that whitespace should be consumed by the # RightBracket. whitespace_after=( parse_parenthesizable_whitespace( config, comma_token.whitespace_after ) if comma_token is not last_child else SimpleWhitespace("") ), ) ) except StopIteration: pass return element def _convert_dict( config: ParserConfig, children: typing.Sequence[typing.Any] ) -> typing.Any: is_first_starred = isinstance(children[0], Token) and children[0].string == "**" if is_first_starred: possible_comp_for = None if len(children) < 3 else children[2] else: possible_comp_for = None if len(children) < 4 else children[3] if isinstance(possible_comp_for, CompFor): if is_first_starred: raise PartialParserSyntaxError( "dict unpacking cannot be used in dict comprehension" ) return _convert_dict_comp(config, children) children_iter = iter(children) last_child = children[-1] elements = [] while True: try: elements.append(_convert_dict_element(config, children_iter, last_child)) except StopIteration: break # lbrace, rbrace, lpar, and rpar will be attached as-needed by the atom grammar return WithLeadingWhitespace(Dict(tuple(elements)), children[0].whitespace_before) def _convert_dict_comp(config, children: typing.Sequence[typing.Any]) -> typing.Any: key, colon_token, value, comp_for = children return WithLeadingWhitespace( DictComp( key.value, value.value, comp_for, # lbrace, rbrace, lpar, and rpar will be attached as-needed by the atom grammar whitespace_before_colon=parse_parenthesizable_whitespace( config, colon_token.whitespace_before ), whitespace_after_colon=parse_parenthesizable_whitespace( config, colon_token.whitespace_after ), ), key.whitespace_before, ) def _convert_set( config: ParserConfig, children: typing.Sequence[typing.Any] ) -> typing.Any: return _convert_testlist_comp( config, children, single_child_is_sequence=True, sequence_type=Set, comprehension_type=SetComp, ) @with_production("arglist", "argument (',' argument)* [',']") def convert_arglist( config: ParserConfig, children: typing.Sequence[typing.Any] ) -> typing.Any: args = [] for argument, comma in grouper(children, 2): if comma is None: args.append(argument) else: args.append( argument.with_changes( comma=Comma( whitespace_before=parse_parenthesizable_whitespace( config, comma.whitespace_before ), whitespace_after=parse_parenthesizable_whitespace( config, comma.whitespace_after ), ) ) ) return ArglistPartial(args) @with_production("argument", "arg_assign_comp_for | star_arg") def convert_argument( config: ParserConfig, children: typing.Sequence[typing.Any] ) -> typing.Any: (child,) = children return child @with_production( "arg_assign_comp_for", "test [comp_for] | test '=' test", version="<=3.7" ) @with_production( "arg_assign_comp_for", "test [comp_for] | test ':=' test | test '=' test", version=">=3.8", ) def convert_arg_assign_comp_for( config: ParserConfig, children: typing.Sequence[typing.Any] ) -> typing.Any: if len(children) == 1: # Simple test (child,) = children return Arg(value=child.value) elif len(children) == 2: elt, for_in = children return Arg(value=GeneratorExp(elt.value, for_in, lpar=(), rpar=())) else: lhs, equal, rhs = children # "key := value" assignment; positional if equal.string == ":=": val = convert_namedexpr_test(config, children) if not isinstance(val, WithLeadingWhitespace): raise Exception( f"convert_namedexpr_test returned {val!r}, not WithLeadingWhitespace" ) return Arg(value=val.value) # "key = value" assignment; keyword argument return Arg( keyword=lhs.value, equal=AssignEqual( whitespace_before=parse_parenthesizable_whitespace( config, equal.whitespace_before ), whitespace_after=parse_parenthesizable_whitespace( config, equal.whitespace_after ), ), value=rhs.value, ) @with_production("star_arg", "'**' test | '*' test") def convert_star_arg( config: ParserConfig, children: typing.Sequence[typing.Any] ) -> typing.Any: star, test = children return Arg( star=star.string, whitespace_after_star=parse_parenthesizable_whitespace( config, star.whitespace_after ), value=test.value, ) @with_production("sync_comp_for", "'for' exprlist 'in' or_test comp_if* [comp_for]") def convert_sync_comp_for( config: ParserConfig, children: typing.Sequence[typing.Any] ) -> typing.Any: # unpack for_tok, target, in_tok, iter, *trailing = children if len(trailing) and isinstance(trailing[-1], CompFor): *ifs, inner_for_in = trailing else: ifs, inner_for_in = trailing, None return CompFor( target=target.value, iter=iter.value, ifs=ifs, inner_for_in=inner_for_in, whitespace_before=parse_parenthesizable_whitespace( config, for_tok.whitespace_before ), whitespace_after_for=parse_parenthesizable_whitespace( config, for_tok.whitespace_after ), whitespace_before_in=parse_parenthesizable_whitespace( config, in_tok.whitespace_before ), whitespace_after_in=parse_parenthesizable_whitespace( config, in_tok.whitespace_after ), ) @with_production("comp_for", "[ASYNC] sync_comp_for", version=">=3.6") @with_production("comp_for", "sync_comp_for", version="<=3.5") def convert_comp_for( config: ParserConfig, children: typing.Sequence[typing.Any] ) -> typing.Any: if len(children) == 1: (sync_comp_for,) = children return sync_comp_for else: (async_tok, sync_comp_for) = children return sync_comp_for.with_changes( # asynchronous steals the `CompFor`'s `whitespace_before`. asynchronous=Asynchronous(whitespace_after=sync_comp_for.whitespace_before), # But, in exchange, `CompFor` gets to keep `async_tok`'s leading # whitespace, because that's now the beginning of the `CompFor`. whitespace_before=parse_parenthesizable_whitespace( config, async_tok.whitespace_before ), ) @with_production("comp_if", "'if' test_nocond") def convert_comp_if( config: ParserConfig, children: typing.Sequence[typing.Any] ) -> typing.Any: if_tok, test = children return CompIf( test.value, whitespace_before=parse_parenthesizable_whitespace( config, if_tok.whitespace_before ), whitespace_before_test=parse_parenthesizable_whitespace( config, test.whitespace_before ), ) @with_production("yield_expr", "'yield' [yield_arg]") def convert_yield_expr( config: ParserConfig, children: typing.Sequence[typing.Any] ) -> typing.Any: if len(children) == 1: # Yielding implicit none (yield_token,) = children yield_node = Yield(value=None) else: # Yielding explicit value (yield_token, yield_arg) = children yield_node = Yield( value=yield_arg.value, whitespace_after_yield=parse_parenthesizable_whitespace( config, yield_arg.whitespace_before ), ) return WithLeadingWhitespace(yield_node, yield_token.whitespace_before) @with_production("yield_arg", "testlist", version="<3.3") @with_production("yield_arg", "'from' test | testlist", version=">=3.3,<3.8") @with_production("yield_arg", "'from' test | testlist_star_expr", version=">=3.8") def convert_yield_arg( config: ParserConfig, children: typing.Sequence[typing.Any] ) -> typing.Any: if len(children) == 1: # Just a regular testlist, pass it up (child,) = children return child else: # Its a yield from (from_token, test) = children return WithLeadingWhitespace( From( item=test.value, whitespace_after_from=parse_parenthesizable_whitespace( config, test.whitespace_before ), ), from_token.whitespace_before, ) LibCST-1.2.0/libcst/_parser/conversions/module.py000066400000000000000000000032761456464173300217410ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # pyre-unsafe from typing import Any, Sequence from libcst._nodes.module import Module from libcst._nodes.whitespace import NEWLINE_RE from libcst._parser.production_decorator import with_production from libcst._parser.types.config import ParserConfig @with_production("file_input", "(NEWLINE | stmt)* ENDMARKER") def convert_file_input(config: ParserConfig, children: Sequence[Any]) -> Any: *body, footer = children if len(body) == 0: # If there's no body, the header and footer are ambiguous. The header is more # important, and should own the EmptyLine nodes instead of the footer. header = footer footer = () if ( len(config.lines) == 2 and NEWLINE_RE.fullmatch(config.lines[0]) and config.lines[1] == "" ): # This is an empty file (not even a comment), so special-case this to an # empty list instead of a single dummy EmptyLine (which is what we'd # normally parse). header = () else: # Steal the leading lines from the first statement, and move them into the # header. first_stmt = body[0] header = first_stmt.leading_lines body[0] = first_stmt.with_changes(leading_lines=()) return Module( header=header, body=body, footer=footer, encoding=config.encoding, default_indent=config.default_indent, default_newline=config.default_newline, has_trailing_newline=config.has_trailing_newline, ) LibCST-1.2.0/libcst/_parser/conversions/params.py000066400000000000000000000320651456464173300217350ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # pyre-unsafe from typing import Any, List, Optional, Sequence, Union from libcst._exceptions import PartialParserSyntaxError from libcst._maybe_sentinel import MaybeSentinel from libcst._nodes.expression import ( Annotation, Name, Param, Parameters, ParamSlash, ParamStar, ) from libcst._nodes.op import AssignEqual, Comma from libcst._parser.custom_itertools import grouper from libcst._parser.production_decorator import with_production from libcst._parser.types.config import ParserConfig from libcst._parser.types.partials import ParamStarPartial from libcst._parser.whitespace_parser import parse_parenthesizable_whitespace @with_production( # noqa: C901: too complex "typedargslist", """( (tfpdef_assign (',' tfpdef_assign)* ',' tfpdef_posind [',' [ tfpdef_assign ( ',' tfpdef_assign)* [',' [ tfpdef_star (',' tfpdef_assign)* [',' [tfpdef_starstar [',']]] | tfpdef_starstar [',']]] | tfpdef_star (',' tfpdef_assign)* [',' [tfpdef_starstar [',']]] | tfpdef_starstar [',']]] ) | (tfpdef_assign (',' tfpdef_assign)* [',' [ tfpdef_star (',' tfpdef_assign)* [',' [tfpdef_starstar [',']]] | tfpdef_starstar [',']]] | tfpdef_star (',' tfpdef_assign)* [',' [tfpdef_starstar [',']]] | tfpdef_starstar [',']) )""", version=">=3.8", ) @with_production( # noqa: C901: too complex "typedargslist", ( "(tfpdef_assign (',' tfpdef_assign)* " + "[',' [tfpdef_star (',' tfpdef_assign)* [',' [tfpdef_starstar [',']]] | tfpdef_starstar [',']]]" + "| tfpdef_star (',' tfpdef_assign)* [',' [tfpdef_starstar [',']]] | tfpdef_starstar [','])" ), version=">=3.6,<=3.7", ) @with_production( # noqa: C901: too complex "typedargslist", ( "(tfpdef_assign (',' tfpdef_assign)* " + "[',' [tfpdef_star (',' tfpdef_assign)* [',' tfpdef_starstar] | tfpdef_starstar]]" + "| tfpdef_star (',' tfpdef_assign)* [',' tfpdef_starstar] | tfpdef_starstar)" ), version="<=3.5", ) @with_production( "varargslist", """vfpdef_assign (',' vfpdef_assign)* ',' vfpdef_posind [',' [ (vfpdef_assign (',' vfpdef_assign)* [',' [ vfpdef_star (',' vfpdef_assign)* [',' [vfpdef_starstar [',']]] | vfpdef_starstar [',']]] | vfpdef_star (',' vfpdef_assign)* [',' [vfpdef_starstar [',']]] | vfpdef_starstar [',']) ]] | (vfpdef_assign (',' vfpdef_assign)* [',' [ vfpdef_star (',' vfpdef_assign)* [',' [vfpdef_starstar [',']]] | vfpdef_starstar [',']]] | vfpdef_star (',' vfpdef_assign)* [',' [vfpdef_starstar [',']]] | vfpdef_starstar [','] )""", version=">=3.8", ) @with_production( "varargslist", ( "(vfpdef_assign (',' vfpdef_assign)* " + "[',' [vfpdef_star (',' vfpdef_assign)* [',' [vfpdef_starstar [',']]] | vfpdef_starstar [',']]]" + "| vfpdef_star (',' vfpdef_assign)* [',' [vfpdef_starstar [',']]] | vfpdef_starstar [','])" ), version=">=3.6,<=3.7", ) @with_production( "varargslist", ( "(vfpdef_assign (',' vfpdef_assign)* " + "[',' [vfpdef_star (',' vfpdef_assign)* [',' vfpdef_starstar] | vfpdef_starstar]]" + "| vfpdef_star (',' vfpdef_assign)* [',' vfpdef_starstar] | vfpdef_starstar)" ), version="<=3.5", ) def convert_argslist( # noqa: C901 config: ParserConfig, children: Sequence[Any] ) -> Any: posonly_params: List[Param] = [] posonly_ind: Union[ParamSlash, MaybeSentinel] = MaybeSentinel.DEFAULT params: List[Param] = [] seen_default: bool = False star_arg: Union[Param, ParamStar, MaybeSentinel] = MaybeSentinel.DEFAULT kwonly_params: List[Param] = [] star_kwarg: Optional[Param] = None def add_param( current_param: Optional[List[Param]], param: Union[Param, ParamStar] ) -> Optional[List[Param]]: nonlocal star_arg nonlocal star_kwarg nonlocal seen_default nonlocal posonly_params nonlocal posonly_ind nonlocal params if isinstance(param, ParamStar): # Only can add this if we don't already have a "*" or a "*param". if current_param is params: star_arg = param current_param = kwonly_params else: # Example code: # def fn(*abc, *): ... # This should be unreachable, the grammar already disallows it. raise Exception( "Cannot have multiple star ('*') markers in a single argument " + "list." ) elif isinstance(param, ParamSlash): # Only can add this if we don't already have a "/" or a "*" or a "*param". if current_param is params and len(posonly_params) == 0: posonly_ind = param posonly_params = params params = [] current_param = params else: # Example code: # def fn(foo, /, *, /, bar): ... # This should be unreachable, the grammar already disallows it. raise Exception( "Cannot have multiple slash ('/') markers in a single argument " + "list." ) elif isinstance(param.star, str) and param.star == "" and param.default is None: # Can only add this if we're in the params or kwonly_params section if current_param is params and not seen_default: params.append(param) elif current_param is kwonly_params: kwonly_params.append(param) else: # Example code: # def fn(first=None, second): ... # This code is reachable, so we should use a PartialParserSyntaxError. raise PartialParserSyntaxError( "Cannot have a non-default argument following a default argument." ) elif ( isinstance(param.star, str) and param.star == "" and param.default is not None ): # Can only add this if we're not yet at star args. if current_param is params: seen_default = True params.append(param) elif current_param is kwonly_params: kwonly_params.append(param) else: # Example code: # def fn(**kwargs, trailing=None) # This should be unreachable, the grammar already disallows it. raise Exception("Cannot have any arguments after a kwargs expansion.") elif ( isinstance(param.star, str) and param.star == "*" and param.default is None ): # Can only add this if we're in params, since we only allow one of # "*" or "*param". if current_param is params: star_arg = param current_param = kwonly_params else: # Example code: # def fn(*first, *second): ... # This should be unreachable, the grammar already disallows it. raise Exception( "Expected a keyword argument but found a starred positional " + "argument expansion." ) elif ( isinstance(param.star, str) and param.star == "**" and param.default is None ): # Can add this in all cases where we don't have a star_kwarg # yet. if current_param is not None: star_kwarg = param current_param = None else: # Example code: # def fn(**first, **second) # This should be unreachable, the grammar already disallows it. raise Exception( "Multiple starred keyword argument expansions are not allowed in a " + "single argument list" ) else: # The state machine should never end up here. raise Exception("Logic error!") return current_param # The parameter list we are adding to current: Optional[List[Param]] = params # We should have every other item in the group as a param or a comma by now, # so split them up, add commas and then put them in the appropriate group. for parameter, comma in grouper(children, 2): if comma is None: if isinstance(parameter, ParamStarPartial): # Example: # def fn(abc, *): ... # # There's also the case where we have bare * with a trailing comma. # That's handled later. # # It's not valid to construct a ParamStar object without a comma, so we # need to catch the non-comma case separately. raise PartialParserSyntaxError( "Named (keyword) arguments must follow a bare *." ) else: current = add_param(current, parameter) else: comma = Comma( whitespace_before=parse_parenthesizable_whitespace( config, comma.whitespace_before ), whitespace_after=parse_parenthesizable_whitespace( config, comma.whitespace_after ), ) if isinstance(parameter, ParamStarPartial): current = add_param(current, ParamStar(comma=comma)) else: current = add_param(current, parameter.with_changes(comma=comma)) if isinstance(star_arg, ParamStar) and len(kwonly_params) == 0: # Example: # def fn(abc, *,): ... # # This will raise a validation error, but we want to make sure to raise a syntax # error instead. # # The case where there's no trailing comma is already handled by this point, so # this conditional is only for the case where we have a trailing comma. raise PartialParserSyntaxError( "Named (keyword) arguments must follow a bare *." ) return Parameters( posonly_params=tuple(posonly_params), posonly_ind=posonly_ind, params=tuple(params), star_arg=star_arg, kwonly_params=tuple(kwonly_params), star_kwarg=star_kwarg, ) @with_production("tfpdef_star", "'*' [tfpdef]") @with_production("vfpdef_star", "'*' [vfpdef]") def convert_fpdef_star(config: ParserConfig, children: Sequence[Any]) -> Any: if len(children) == 1: (star,) = children return ParamStarPartial() else: star, param = children return param.with_changes( star=star.string, whitespace_after_star=parse_parenthesizable_whitespace( config, star.whitespace_after ), ) @with_production("tfpdef_starstar", "'**' tfpdef") @with_production("vfpdef_starstar", "'**' vfpdef") def convert_fpdef_starstar(config: ParserConfig, children: Sequence[Any]) -> Any: starstar, param = children return param.with_changes( star=starstar.string, whitespace_after_star=parse_parenthesizable_whitespace( config, starstar.whitespace_after ), ) @with_production("tfpdef_assign", "tfpdef ['=' test]") @with_production("vfpdef_assign", "vfpdef ['=' test]") def convert_fpdef_assign(config: ParserConfig, children: Sequence[Any]) -> Any: if len(children) == 1: (child,) = children return child param, equal, default = children return param.with_changes( equal=AssignEqual( whitespace_before=parse_parenthesizable_whitespace( config, equal.whitespace_before ), whitespace_after=parse_parenthesizable_whitespace( config, equal.whitespace_after ), ), default=default.value, ) @with_production("tfpdef", "NAME [':' test]") @with_production("vfpdef", "NAME") def convert_fpdef(config: ParserConfig, children: Sequence[Any]) -> Any: if len(children) == 1: # This is just a parameter (child,) = children namenode = Name(child.string) annotation = None else: # This is a parameter with a type hint name, colon, typehint = children namenode = Name(name.string) annotation = Annotation( whitespace_before_indicator=parse_parenthesizable_whitespace( config, colon.whitespace_before ), whitespace_after_indicator=parse_parenthesizable_whitespace( config, colon.whitespace_after ), annotation=typehint.value, ) return Param(star="", name=namenode, annotation=annotation, default=None) @with_production("tfpdef_posind", "'/'") @with_production("vfpdef_posind", "'/'") def convert_fpdef_slash(config: ParserConfig, children: Sequence[Any]) -> Any: return ParamSlash() LibCST-1.2.0/libcst/_parser/conversions/statement.py000066400000000000000000001343051456464173300224560ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # pyre-unsafe from typing import Any, Dict, List, Optional, Sequence, Tuple, Type from libcst._exceptions import PartialParserSyntaxError from libcst._maybe_sentinel import MaybeSentinel from libcst._nodes.expression import ( Annotation, Arg, Asynchronous, Attribute, Call, From, LeftParen, Name, Param, Parameters, RightParen, ) from libcst._nodes.op import ( AddAssign, AssignEqual, BaseAugOp, BitAndAssign, BitOrAssign, BitXorAssign, Comma, DivideAssign, Dot, FloorDivideAssign, ImportStar, LeftShiftAssign, MatrixMultiplyAssign, ModuloAssign, MultiplyAssign, PowerAssign, RightShiftAssign, Semicolon, SubtractAssign, ) from libcst._nodes.statement import ( AnnAssign, AsName, Assert, Assign, AssignTarget, AugAssign, Break, ClassDef, Continue, Decorator, Del, Else, ExceptHandler, Expr, Finally, For, FunctionDef, Global, If, Import, ImportAlias, ImportFrom, IndentedBlock, NameItem, Nonlocal, Pass, Raise, Return, SimpleStatementLine, SimpleStatementSuite, Try, While, With, WithItem, ) from libcst._nodes.whitespace import EmptyLine, SimpleWhitespace from libcst._parser.custom_itertools import grouper from libcst._parser.production_decorator import with_production from libcst._parser.types.config import ParserConfig from libcst._parser.types.partials import ( AnnAssignPartial, AssignPartial, AugAssignPartial, DecoratorPartial, ExceptClausePartial, FuncdefPartial, ImportPartial, ImportRelativePartial, SimpleStatementPartial, WithLeadingWhitespace, ) from libcst._parser.types.token import Token from libcst._parser.whitespace_parser import ( parse_empty_lines, parse_parenthesizable_whitespace, parse_simple_whitespace, ) AUGOP_TOKEN_LUT: Dict[str, Type[BaseAugOp]] = { "+=": AddAssign, "-=": SubtractAssign, "*=": MultiplyAssign, "@=": MatrixMultiplyAssign, "/=": DivideAssign, "%=": ModuloAssign, "&=": BitAndAssign, "|=": BitOrAssign, "^=": BitXorAssign, "<<=": LeftShiftAssign, ">>=": RightShiftAssign, "**=": PowerAssign, "//=": FloorDivideAssign, } @with_production("stmt_input", "stmt ENDMARKER") def convert_stmt_input(config: ParserConfig, children: Sequence[Any]) -> Any: (child, endmarker) = children return child @with_production("stmt", "simple_stmt_line | compound_stmt") def convert_stmt(config: ParserConfig, children: Sequence[Any]) -> Any: (child,) = children return child @with_production("simple_stmt_partial", "small_stmt (';' small_stmt)* [';'] NEWLINE") def convert_simple_stmt_partial(config: ParserConfig, children: Sequence[Any]) -> Any: *statements, trailing_whitespace = children last_stmt = len(statements) / 2 body = [] for i, (stmt_body, semi) in enumerate(grouper(statements, 2)): if semi is not None: if i == (last_stmt - 1): # Trailing semicolons only own the whitespace before. semi = Semicolon( whitespace_before=parse_simple_whitespace( config, semi.whitespace_before ), whitespace_after=SimpleWhitespace(""), ) else: # Middle semicolons own the whitespace before and after. semi = Semicolon( whitespace_before=parse_simple_whitespace( config, semi.whitespace_before ), whitespace_after=parse_simple_whitespace( config, semi.whitespace_after ), ) else: semi = MaybeSentinel.DEFAULT body.append(stmt_body.value.with_changes(semicolon=semi)) return SimpleStatementPartial( body, whitespace_before=statements[0].whitespace_before, trailing_whitespace=trailing_whitespace, ) @with_production("simple_stmt_line", "simple_stmt_partial") def convert_simple_stmt_line(config: ParserConfig, children: Sequence[Any]) -> Any: """ This function is similar to convert_simple_stmt_suite, but yields a different type """ (partial,) = children return SimpleStatementLine( partial.body, leading_lines=parse_empty_lines(config, partial.whitespace_before), trailing_whitespace=partial.trailing_whitespace, ) @with_production("simple_stmt_suite", "simple_stmt_partial") def convert_simple_stmt_suite(config: ParserConfig, children: Sequence[Any]) -> Any: """ This function is similar to convert_simple_stmt_line, but yields a different type """ (partial,) = children return SimpleStatementSuite( partial.body, leading_whitespace=parse_simple_whitespace(config, partial.whitespace_before), trailing_whitespace=partial.trailing_whitespace, ) @with_production( "small_stmt", ( "expr_stmt | del_stmt | pass_stmt | break_stmt | continue_stmt | return_stmt" + "| raise_stmt | yield_stmt | import_stmt | global_stmt | nonlocal_stmt" + "| assert_stmt" ), ) def convert_small_stmt(config: ParserConfig, children: Sequence[Any]) -> Any: # Doesn't construct SmallStatement, because we don't know about semicolons yet. # convert_simple_stmt will construct the SmallStatement nodes. (small_stmt_body,) = children return small_stmt_body @with_production( "expr_stmt", "testlist_star_expr (annassign | augassign | assign* )", version=">=3.6", ) @with_production( "expr_stmt", "testlist_star_expr (augassign | assign* )", version="<=3.5" ) @with_production("yield_stmt", "yield_expr") def convert_expr_stmt(config: ParserConfig, children: Sequence[Any]) -> Any: if len(children) == 1: # This is an unassigned expr statement (like a function call) (test_node,) = children return WithLeadingWhitespace( Expr(value=test_node.value), test_node.whitespace_before ) elif len(children) == 2: lhs, rhs = children if isinstance(rhs, AnnAssignPartial): return WithLeadingWhitespace( AnnAssign( target=lhs.value, annotation=rhs.annotation, equal=MaybeSentinel.DEFAULT if rhs.equal is None else rhs.equal, value=rhs.value, ), lhs.whitespace_before, ) elif isinstance(rhs, AugAssignPartial): return WithLeadingWhitespace( AugAssign(target=lhs.value, operator=rhs.operator, value=rhs.value), lhs.whitespace_before, ) # The only thing it could be at this point is an assign with one or more targets. # So, walk the children moving the equals ownership back one and constructing a # list of AssignTargets. targets = [] for i in range(len(children) - 1): target = children[i].value equal = children[i + 1].equal targets.append( AssignTarget( target=target, whitespace_before_equal=equal.whitespace_before, whitespace_after_equal=equal.whitespace_after, ) ) return WithLeadingWhitespace( Assign(targets=tuple(targets), value=children[-1].value), children[0].whitespace_before, ) @with_production("annassign", "':' test ['=' test]", version=">=3.6,<3.8") @with_production( "annassign", "':' test ['=' (yield_expr|testlist_star_expr)]", version=">=3.8" ) def convert_annassign(config: ParserConfig, children: Sequence[Any]) -> Any: if len(children) == 2: # Variable annotation only colon, annotation = children annotation = annotation.value equal = None value = None elif len(children) == 4: # Variable annotation and assignment colon, annotation, equal, value = children annotation = annotation.value value = value.value equal = AssignEqual( whitespace_before=parse_simple_whitespace(config, equal.whitespace_before), whitespace_after=parse_simple_whitespace(config, equal.whitespace_after), ) else: raise Exception("Invalid parser state!") return AnnAssignPartial( annotation=Annotation( whitespace_before_indicator=parse_simple_whitespace( config, colon.whitespace_before ), whitespace_after_indicator=parse_simple_whitespace( config, colon.whitespace_after ), annotation=annotation, ), equal=equal, value=value, ) @with_production( "augassign", ( "('+=' | '-=' | '*=' | '@=' | '/=' | '%=' | '&=' | '|=' | '^=' | '<<=' | " + "'>>=' | '**=' | '//=') (yield_expr | testlist)" ), version=">=3.5", ) @with_production( "augassign", ( "('+=' | '-=' | '*=' | '/=' | '%=' | '&=' | '|=' | '^=' | '<<=' | " + "'>>=' | '**=' | '//=') (yield_expr | testlist)" ), version="<3.5", ) def convert_augassign(config: ParserConfig, children: Sequence[Any]) -> Any: op, expr = children if op.string not in AUGOP_TOKEN_LUT: raise Exception(f"Unexpected token '{op.string}'!") return AugAssignPartial( # pyre-ignore Pyre seems to think that the value of this LUT is CSTNode operator=AUGOP_TOKEN_LUT[op.string]( whitespace_before=parse_simple_whitespace(config, op.whitespace_before), whitespace_after=parse_simple_whitespace(config, op.whitespace_after), ), value=expr.value, ) @with_production("assign", "'=' (yield_expr|testlist_star_expr)") def convert_assign(config: ParserConfig, children: Sequence[Any]) -> Any: equal, expr = children return AssignPartial( equal=AssignEqual( whitespace_before=parse_simple_whitespace(config, equal.whitespace_before), whitespace_after=parse_simple_whitespace(config, equal.whitespace_after), ), value=expr.value, ) @with_production("pass_stmt", "'pass'") def convert_pass_stmt(config: ParserConfig, children: Sequence[Any]) -> Any: (name,) = children return WithLeadingWhitespace(Pass(), name.whitespace_before) @with_production("del_stmt", "'del' exprlist") def convert_del_stmt(config: ParserConfig, children: Sequence[Any]) -> Any: (del_name, exprlist) = children return WithLeadingWhitespace( Del( target=exprlist.value, whitespace_after_del=parse_simple_whitespace( config, del_name.whitespace_after ), ), del_name.whitespace_before, ) @with_production("continue_stmt", "'continue'") def convert_continue_stmt(config: ParserConfig, children: Sequence[Any]) -> Any: (name,) = children return WithLeadingWhitespace(Continue(), name.whitespace_before) @with_production("break_stmt", "'break'") def convert_break_stmt(config: ParserConfig, children: Sequence[Any]) -> Any: (name,) = children return WithLeadingWhitespace(Break(), name.whitespace_before) @with_production("return_stmt", "'return' [testlist]", version="<=3.7") @with_production("return_stmt", "'return' [testlist_star_expr]", version=">=3.8") def convert_return_stmt(config: ParserConfig, children: Sequence[Any]) -> Any: if len(children) == 1: (keyword,) = children return WithLeadingWhitespace( Return(whitespace_after_return=SimpleWhitespace("")), keyword.whitespace_before, ) else: (keyword, testlist) = children return WithLeadingWhitespace( Return( value=testlist.value, whitespace_after_return=parse_simple_whitespace( config, keyword.whitespace_after ), ), keyword.whitespace_before, ) @with_production("import_stmt", "import_name | import_from") def convert_import_stmt(config: ParserConfig, children: Sequence[Any]) -> Any: (child,) = children return child @with_production("import_name", "'import' dotted_as_names") def convert_import_name(config: ParserConfig, children: Sequence[Any]) -> Any: importtoken, names = children return WithLeadingWhitespace( Import( names=names.names, whitespace_after_import=parse_simple_whitespace( config, importtoken.whitespace_after ), ), importtoken.whitespace_before, ) @with_production("import_relative", "('.' | '...')* dotted_name | ('.' | '...')+") def convert_import_relative(config: ParserConfig, children: Sequence[Any]) -> Any: dots = [] dotted_name = None for child in children: if isinstance(child, Token): # Special case for "...", which is part of the grammar if child.string == "...": dots.extend( [ Dot(), Dot(), Dot( whitespace_after=parse_simple_whitespace( config, child.whitespace_after ) ), ] ) else: dots.append( Dot( whitespace_after=parse_simple_whitespace( config, child.whitespace_after ) ) ) else: # This should be the dotted name, and we can't get more than # one, but lets be sure anyway if dotted_name is not None: raise Exception("Logic error!") dotted_name = child return ImportRelativePartial(relative=tuple(dots), module=dotted_name) @with_production( "import_from", "'from' import_relative 'import' ('*' | '(' import_as_names ')' | import_as_names)", ) def convert_import_from(config: ParserConfig, children: Sequence[Any]) -> Any: fromtoken, import_relative, importtoken, *importlist = children if len(importlist) == 1: (possible_star,) = importlist if isinstance(possible_star, Token): # Its a "*" import, so we must construct this node. names = ImportStar() else: # Its an import as names partial, grab the names from that. names = possible_star.names lpar = None rpar = None else: # Its an import as names partial with parens lpartoken, namespartial, rpartoken = importlist lpar = LeftParen( whitespace_after=parse_parenthesizable_whitespace( config, lpartoken.whitespace_after ) ) names = namespartial.names rpar = RightParen( whitespace_before=parse_parenthesizable_whitespace( config, rpartoken.whitespace_before ) ) # If we have a relative-only import, then we need to relocate the space # after the final dot to be owned by the import token. if len(import_relative.relative) > 0 and import_relative.module is None: whitespace_before_import = import_relative.relative[-1].whitespace_after relative = ( *import_relative.relative[:-1], import_relative.relative[-1].with_changes( whitespace_after=SimpleWhitespace("") ), ) else: whitespace_before_import = parse_simple_whitespace( config, importtoken.whitespace_before ) relative = import_relative.relative return WithLeadingWhitespace( ImportFrom( whitespace_after_from=parse_simple_whitespace( config, fromtoken.whitespace_after ), relative=relative, module=import_relative.module, whitespace_before_import=whitespace_before_import, whitespace_after_import=parse_simple_whitespace( config, importtoken.whitespace_after ), lpar=lpar, names=names, rpar=rpar, ), fromtoken.whitespace_before, ) @with_production("import_as_name", "NAME ['as' NAME]") def convert_import_as_name(config: ParserConfig, children: Sequence[Any]) -> Any: if len(children) == 1: (dotted_name,) = children return ImportAlias(name=Name(dotted_name.string), asname=None) else: dotted_name, astoken, name = children return ImportAlias( name=Name(dotted_name.string), asname=AsName( whitespace_before_as=parse_simple_whitespace( config, astoken.whitespace_before ), whitespace_after_as=parse_simple_whitespace( config, astoken.whitespace_after ), name=Name(name.string), ), ) @with_production("dotted_as_name", "dotted_name ['as' NAME]") def convert_dotted_as_name(config: ParserConfig, children: Sequence[Any]) -> Any: if len(children) == 1: (dotted_name,) = children return ImportAlias(name=dotted_name, asname=None) else: dotted_name, astoken, name = children return ImportAlias( name=dotted_name, asname=AsName( whitespace_before_as=parse_parenthesizable_whitespace( config, astoken.whitespace_before ), whitespace_after_as=parse_parenthesizable_whitespace( config, astoken.whitespace_after ), name=Name(name.string), ), ) @with_production("import_as_names", "import_as_name (',' import_as_name)* [',']") def convert_import_as_names(config: ParserConfig, children: Sequence[Any]) -> Any: return _gather_import_names(config, children) @with_production("dotted_as_names", "dotted_as_name (',' dotted_as_name)*") def convert_dotted_as_names(config: ParserConfig, children: Sequence[Any]) -> Any: return _gather_import_names(config, children) def _gather_import_names( config: ParserConfig, children: Sequence[Any] ) -> ImportPartial: names = [] for name, comma in grouper(children, 2): if comma is None: names.append(name) else: names.append( name.with_changes( comma=Comma( whitespace_before=parse_parenthesizable_whitespace( config, comma.whitespace_before ), whitespace_after=parse_parenthesizable_whitespace( config, comma.whitespace_after ), ) ) ) return ImportPartial(names=names) @with_production("dotted_name", "NAME ('.' NAME)*") def convert_dotted_name(config: ParserConfig, children: Sequence[Any]) -> Any: left, *rest = children node = Name(left.string) for dot, right in grouper(rest, 2): node = Attribute( value=node, dot=Dot( whitespace_before=parse_parenthesizable_whitespace( config, dot.whitespace_before ), whitespace_after=parse_parenthesizable_whitespace( config, dot.whitespace_after ), ), attr=Name(right.string), ) return node @with_production("raise_stmt", "'raise' [test ['from' test]]") def convert_raise_stmt(config: ParserConfig, children: Sequence[Any]) -> Any: if len(children) == 1: (raise_token,) = children whitespace_after_raise = MaybeSentinel.DEFAULT exc = None cause = None elif len(children) == 2: (raise_token, test) = children whitespace_after_raise = parse_simple_whitespace(config, test.whitespace_before) exc = test.value cause = None elif len(children) == 4: (raise_token, test, from_token, source) = children whitespace_after_raise = parse_simple_whitespace(config, test.whitespace_before) exc = test.value cause = From( whitespace_before_from=parse_simple_whitespace( config, from_token.whitespace_before ), whitespace_after_from=parse_simple_whitespace( config, source.whitespace_before ), item=source.value, ) else: raise Exception("Logic error!") return WithLeadingWhitespace( Raise(whitespace_after_raise=whitespace_after_raise, exc=exc, cause=cause), raise_token.whitespace_before, ) def _construct_nameitems(config: ParserConfig, names: Sequence[Any]) -> List[NameItem]: nameitems: List[NameItem] = [] for name, maybe_comma in grouper(names, 2): if maybe_comma is None: nameitems.append(NameItem(Name(name.string))) else: nameitems.append( NameItem( Name(name.string), comma=Comma( whitespace_before=parse_simple_whitespace( config, maybe_comma.whitespace_before ), whitespace_after=parse_simple_whitespace( config, maybe_comma.whitespace_after ), ), ) ) return nameitems @with_production("global_stmt", "'global' NAME (',' NAME)*") def convert_global_stmt(config: ParserConfig, children: Sequence[Any]) -> Any: (global_token, *names) = children return WithLeadingWhitespace( Global( names=tuple(_construct_nameitems(config, names)), whitespace_after_global=parse_simple_whitespace( config, names[0].whitespace_before ), ), global_token.whitespace_before, ) @with_production("nonlocal_stmt", "'nonlocal' NAME (',' NAME)*") def convert_nonlocal_stmt(config: ParserConfig, children: Sequence[Any]) -> Any: (nonlocal_token, *names) = children return WithLeadingWhitespace( Nonlocal( names=tuple(_construct_nameitems(config, names)), whitespace_after_nonlocal=parse_simple_whitespace( config, names[0].whitespace_before ), ), nonlocal_token.whitespace_before, ) @with_production("assert_stmt", "'assert' test [',' test]") def convert_assert_stmt(config: ParserConfig, children: Sequence[Any]) -> Any: if len(children) == 2: (assert_token, test) = children assert_node = Assert( whitespace_after_assert=parse_simple_whitespace( config, test.whitespace_before ), test=test.value, msg=None, ) else: (assert_token, test, comma_token, msg) = children assert_node = Assert( whitespace_after_assert=parse_simple_whitespace( config, test.whitespace_before ), test=test.value, comma=Comma( whitespace_before=parse_simple_whitespace( config, comma_token.whitespace_before ), whitespace_after=parse_simple_whitespace(config, msg.whitespace_before), ), msg=msg.value, ) return WithLeadingWhitespace(assert_node, assert_token.whitespace_before) @with_production( "compound_stmt", ("if_stmt | while_stmt | asyncable_stmt | try_stmt | classdef | decorated"), ) def convert_compound_stmt(config: ParserConfig, children: Sequence[Any]) -> Any: (stmt,) = children return stmt @with_production( "if_stmt", "'if' test ':' suite [if_stmt_elif|if_stmt_else]", version="<=3.7" ) @with_production( "if_stmt", "'if' namedexpr_test ':' suite [if_stmt_elif|if_stmt_else]", version=">=3.8", ) def convert_if_stmt(config: ParserConfig, children: Sequence[Any]) -> Any: if_tok, test, colon_tok, suite, *tail = children if len(tail) > 0: (orelse,) = tail else: orelse = None return If( leading_lines=parse_empty_lines(config, if_tok.whitespace_before), whitespace_before_test=parse_simple_whitespace(config, if_tok.whitespace_after), test=test.value, whitespace_after_test=parse_simple_whitespace( config, colon_tok.whitespace_before ), body=suite, orelse=orelse, ) @with_production( "if_stmt_elif", "'elif' test ':' suite [if_stmt_elif|if_stmt_else]", version="<=3.7" ) @with_production( "if_stmt_elif", "'elif' namedexpr_test ':' suite [if_stmt_elif|if_stmt_else]", version=">=3.8", ) def convert_if_stmt_elif(config: ParserConfig, children: Sequence[Any]) -> Any: # this behaves exactly the same as `convert_if_stmt`, except that the leading token # has a different string value. return convert_if_stmt(config, children) @with_production("if_stmt_else", "'else' ':' suite") def convert_if_stmt_else(config: ParserConfig, children: Sequence[Any]) -> Any: else_tok, colon_tok, suite = children return Else( leading_lines=parse_empty_lines(config, else_tok.whitespace_before), whitespace_before_colon=parse_simple_whitespace( config, colon_tok.whitespace_before ), body=suite, ) @with_production( "while_stmt", "'while' test ':' suite ['else' ':' suite]", version="<=3.7" ) @with_production( "while_stmt", "'while' namedexpr_test ':' suite ['else' ':' suite]", version=">=3.8" ) def convert_while_stmt(config: ParserConfig, children: Sequence[Any]) -> Any: while_token, test, while_colon_token, while_suite, *else_block = children if len(else_block) > 0: (else_token, else_colon_token, else_suite) = else_block orelse = Else( leading_lines=parse_empty_lines(config, else_token.whitespace_before), whitespace_before_colon=parse_simple_whitespace( config, else_colon_token.whitespace_before ), body=else_suite, ) else: orelse = None return While( leading_lines=parse_empty_lines(config, while_token.whitespace_before), whitespace_after_while=parse_simple_whitespace( config, while_token.whitespace_after ), test=test.value, whitespace_before_colon=parse_simple_whitespace( config, while_colon_token.whitespace_before ), body=while_suite, orelse=orelse, ) @with_production( "for_stmt", "'for' exprlist 'in' testlist ':' suite ['else' ':' suite]" ) def convert_for_stmt(config: ParserConfig, children: Sequence[Any]) -> Any: ( for_token, expr, in_token, test, for_colon_token, for_suite, *else_block, ) = children if len(else_block) > 0: (else_token, else_colon_token, else_suite) = else_block orelse = Else( leading_lines=parse_empty_lines(config, else_token.whitespace_before), whitespace_before_colon=parse_simple_whitespace( config, else_colon_token.whitespace_before ), body=else_suite, ) else: orelse = None return WithLeadingWhitespace( For( whitespace_after_for=parse_simple_whitespace( config, for_token.whitespace_after ), target=expr.value, whitespace_before_in=parse_simple_whitespace( config, in_token.whitespace_before ), whitespace_after_in=parse_simple_whitespace( config, in_token.whitespace_after ), iter=test.value, whitespace_before_colon=parse_simple_whitespace( config, for_colon_token.whitespace_before ), body=for_suite, orelse=orelse, ), for_token.whitespace_before, ) @with_production( "try_stmt", "('try' ':' suite ((except_clause ':' suite)+ ['else' ':' suite] ['finally' ':' suite] | 'finally' ':' suite))", ) def convert_try_stmt(config: ParserConfig, children: Sequence[Any]) -> Any: trytoken, try_colon_token, try_suite, *rest = children handlers: List[ExceptHandler] = [] orelse: Optional[Else] = None finalbody: Optional[Finally] = None for clause, colon_token, suite in grouper(rest, 3): if isinstance(clause, Token): if clause.string == "else": if orelse is not None: raise Exception("Logic error!") orelse = Else( leading_lines=parse_empty_lines(config, clause.whitespace_before), whitespace_before_colon=parse_simple_whitespace( config, colon_token.whitespace_before ), body=suite, ) elif clause.string == "finally": if finalbody is not None: raise Exception("Logic error!") finalbody = Finally( leading_lines=parse_empty_lines(config, clause.whitespace_before), whitespace_before_colon=parse_simple_whitespace( config, colon_token.whitespace_before ), body=suite, ) else: raise Exception("Logic error!") elif isinstance(clause, ExceptClausePartial): handlers.append( ExceptHandler( body=suite, type=clause.type, name=clause.name, leading_lines=clause.leading_lines, whitespace_after_except=clause.whitespace_after_except, whitespace_before_colon=parse_simple_whitespace( config, colon_token.whitespace_before ), ) ) else: raise Exception("Logic error!") return Try( leading_lines=parse_empty_lines(config, trytoken.whitespace_before), whitespace_before_colon=parse_simple_whitespace( config, try_colon_token.whitespace_before ), body=try_suite, handlers=tuple(handlers), orelse=orelse, finalbody=finalbody, ) @with_production("except_clause", "'except' [test ['as' NAME]]") def convert_except_clause(config: ParserConfig, children: Sequence[Any]) -> Any: if len(children) == 1: (except_token,) = children whitespace_after_except = SimpleWhitespace("") test = None name = None elif len(children) == 2: (except_token, test_node) = children whitespace_after_except = parse_simple_whitespace( config, except_token.whitespace_after ) test = test_node.value name = None else: (except_token, test_node, as_token, name_token) = children whitespace_after_except = parse_simple_whitespace( config, except_token.whitespace_after ) test = test_node.value name = AsName( whitespace_before_as=parse_simple_whitespace( config, as_token.whitespace_before ), whitespace_after_as=parse_simple_whitespace( config, as_token.whitespace_after ), name=Name(name_token.string), ) return ExceptClausePartial( leading_lines=parse_empty_lines(config, except_token.whitespace_before), whitespace_after_except=whitespace_after_except, type=test, name=name, ) @with_production( "with_stmt", "'with' with_item (',' with_item)* ':' suite", version=">=3.1" ) @with_production("with_stmt", "'with' with_item ':' suite", version="<3.1") def convert_with_stmt(config: ParserConfig, children: Sequence[Any]) -> Any: (with_token, *items, colon_token, suite) = children item_nodes: List[WithItem] = [] for with_item, maybe_comma in grouper(items, 2): if maybe_comma is not None: item_nodes.append( with_item.with_changes( comma=Comma( whitespace_before=parse_parenthesizable_whitespace( config, maybe_comma.whitespace_before ), whitespace_after=parse_parenthesizable_whitespace( config, maybe_comma.whitespace_after ), ) ) ) else: item_nodes.append(with_item) return WithLeadingWhitespace( With( whitespace_after_with=parse_simple_whitespace( config, with_token.whitespace_after ), items=tuple(item_nodes), whitespace_before_colon=parse_simple_whitespace( config, colon_token.whitespace_before ), body=suite, ), with_token.whitespace_before, ) @with_production("with_item", "test ['as' expr]") def convert_with_item(config: ParserConfig, children: Sequence[Any]) -> Any: if len(children) == 3: (test, as_token, expr_node) = children test_node = test.value asname = AsName( whitespace_before_as=parse_simple_whitespace( config, as_token.whitespace_before ), whitespace_after_as=parse_simple_whitespace( config, as_token.whitespace_after ), name=expr_node.value, ) else: (test,) = children test_node = test.value asname = None return WithItem(item=test_node, asname=asname) def _extract_async( config: ParserConfig, children: Sequence[Any] ) -> Tuple[List[EmptyLine], Optional[Asynchronous], Any]: if len(children) == 1: (stmt,) = children whitespace_before = stmt.whitespace_before asyncnode = None else: asynctoken, stmt = children whitespace_before = asynctoken.whitespace_before asyncnode = Asynchronous( whitespace_after=parse_simple_whitespace( config, asynctoken.whitespace_after ) ) return (parse_empty_lines(config, whitespace_before), asyncnode, stmt.value) @with_production("asyncable_funcdef", "[ASYNC] funcdef", version=">=3.5") @with_production("asyncable_funcdef", "funcdef", version="<3.5") def convert_asyncable_funcdef(config: ParserConfig, children: Sequence[Any]) -> Any: leading_lines, asyncnode, funcdef = _extract_async(config, children) return funcdef.with_changes( asynchronous=asyncnode, leading_lines=leading_lines, lines_after_decorators=() ) @with_production("funcdef", "'def' NAME parameters [funcdef_annotation] ':' suite") def convert_funcdef(config: ParserConfig, children: Sequence[Any]) -> Any: defnode, namenode, param_partial, *annotation, colon, suite = children # If the trailing paremeter doesn't have a comma, then it owns the trailing # whitespace before the rpar. Otherwise, the comma owns it (and will have # already parsed it). We don't check/update ParamStar because if it exists # then we are guaranteed have at least one kwonly_param. parameters = param_partial.params if parameters.star_kwarg is not None: if parameters.star_kwarg.comma == MaybeSentinel.DEFAULT: parameters = parameters.with_changes( star_kwarg=parameters.star_kwarg.with_changes( whitespace_after_param=param_partial.rpar.whitespace_before ) ) elif parameters.kwonly_params: if parameters.kwonly_params[-1].comma == MaybeSentinel.DEFAULT: parameters = parameters.with_changes( kwonly_params=( *parameters.kwonly_params[:-1], parameters.kwonly_params[-1].with_changes( whitespace_after_param=param_partial.rpar.whitespace_before ), ) ) elif isinstance(parameters.star_arg, Param): if parameters.star_arg.comma == MaybeSentinel.DEFAULT: parameters = parameters.with_changes( star_arg=parameters.star_arg.with_changes( whitespace_after_param=param_partial.rpar.whitespace_before ) ) elif parameters.params: if parameters.params[-1].comma == MaybeSentinel.DEFAULT: parameters = parameters.with_changes( params=( *parameters.params[:-1], parameters.params[-1].with_changes( whitespace_after_param=param_partial.rpar.whitespace_before ), ) ) return WithLeadingWhitespace( FunctionDef( whitespace_after_def=parse_simple_whitespace( config, defnode.whitespace_after ), name=Name(namenode.string), whitespace_after_name=parse_simple_whitespace( config, namenode.whitespace_after ), whitespace_before_params=param_partial.lpar.whitespace_after, params=parameters, returns=None if not annotation else annotation[0], whitespace_before_colon=parse_simple_whitespace( config, colon.whitespace_before ), body=suite, ), defnode.whitespace_before, ) @with_production("parameters", "'(' [typedargslist] ')'") def convert_parameters(config: ParserConfig, children: Sequence[Any]) -> Any: lpar, *paramlist, rpar = children return FuncdefPartial( lpar=LeftParen( whitespace_after=parse_parenthesizable_whitespace( config, lpar.whitespace_after ) ), params=Parameters() if not paramlist else paramlist[0], rpar=RightParen( whitespace_before=parse_parenthesizable_whitespace( config, rpar.whitespace_before ) ), ) @with_production("funcdef_annotation", "'->' test") def convert_funcdef_annotation(config: ParserConfig, children: Sequence[Any]) -> Any: arrow, typehint = children return Annotation( whitespace_before_indicator=parse_parenthesizable_whitespace( config, arrow.whitespace_before ), whitespace_after_indicator=parse_parenthesizable_whitespace( config, arrow.whitespace_after ), annotation=typehint.value, ) @with_production("classdef", "'class' NAME ['(' [arglist] ')'] ':' suite") def convert_classdef(config: ParserConfig, children: Sequence[Any]) -> Any: classdef, name, *arglist, colon, suite = children # First, parse out the comments and empty lines before the statement. leading_lines = parse_empty_lines(config, classdef.whitespace_before) # Compute common whitespace and nodes whitespace_after_class = parse_simple_whitespace(config, classdef.whitespace_after) namenode = Name(name.string) whitespace_after_name = parse_simple_whitespace(config, name.whitespace_after) # Now, construct the classdef node itself if not arglist: # No arglist, so no arguments to this class return ClassDef( leading_lines=leading_lines, lines_after_decorators=(), whitespace_after_class=whitespace_after_class, name=namenode, whitespace_after_name=whitespace_after_name, body=suite, ) else: # Unwrap arglist partial, because its valid to not have any lpar, *args, rpar = arglist args = args[0].args if args else [] bases: List[Arg] = [] keywords: List[Arg] = [] current_arg = bases for arg in args: if arg.star == "**" or arg.keyword is not None: current_arg = keywords # Some quick validation if current_arg is keywords and ( arg.star == "*" or (arg.star == "" and arg.keyword is None) ): raise PartialParserSyntaxError( "Positional argument follows keyword argument." ) current_arg.append(arg) return ClassDef( leading_lines=leading_lines, lines_after_decorators=(), whitespace_after_class=whitespace_after_class, name=namenode, whitespace_after_name=whitespace_after_name, lpar=LeftParen( whitespace_after=parse_parenthesizable_whitespace( config, lpar.whitespace_after ) ), bases=bases, keywords=keywords, rpar=RightParen( whitespace_before=parse_parenthesizable_whitespace( config, rpar.whitespace_before ) ), whitespace_before_colon=parse_simple_whitespace( config, colon.whitespace_before ), body=suite, ) @with_production("decorator", "'@' dotted_name [ '(' [arglist] ')' ] NEWLINE") def convert_decorator(config: ParserConfig, children: Sequence[Any]) -> Any: atsign, name, *arglist, newline = children if not arglist: # This is either a name or an attribute node, so just extract it. decoratornode = name else: # This needs to be converted into a call node, and we have the # arglist partial. lpar, *args, rpar = arglist args = args[0].args if args else [] # If the trailing argument doesn't have a comma, then it owns the # trailing whitespace before the rpar. Otherwise, the comma owns # it. if len(args) > 0 and args[-1].comma == MaybeSentinel.DEFAULT: args[-1] = args[-1].with_changes( whitespace_after_arg=parse_parenthesizable_whitespace( config, rpar.whitespace_before ) ) decoratornode = Call( func=name, whitespace_after_func=parse_simple_whitespace( config, lpar.whitespace_before ), whitespace_before_args=parse_parenthesizable_whitespace( config, lpar.whitespace_after ), args=tuple(args), ) return Decorator( leading_lines=parse_empty_lines(config, atsign.whitespace_before), whitespace_after_at=parse_simple_whitespace(config, atsign.whitespace_after), decorator=decoratornode, trailing_whitespace=newline, ) @with_production("decorators", "decorator+") def convert_decorators(config: ParserConfig, children: Sequence[Any]) -> Any: return DecoratorPartial(decorators=children) @with_production("decorated", "decorators (classdef | asyncable_funcdef)") def convert_decorated(config: ParserConfig, children: Sequence[Any]) -> Any: partial, class_or_func = children # First, split up the spacing on the first decorator leading_lines = partial.decorators[0].leading_lines # Now, redistribute ownership of the whitespace decorators = ( partial.decorators[0].with_changes(leading_lines=()), *partial.decorators[1:], ) # Now, modify the original function or class to add the decorators. return class_or_func.with_changes( leading_lines=leading_lines, # pyre-fixme[60]: Concatenation not yet support for multiple variadic # tuples: `*class_or_func.leading_lines, # *class_or_func.lines_after_decorators`. # pyre-fixme[60]: Expected to unpack an iterable, but got `unknown`. lines_after_decorators=( *class_or_func.leading_lines, *class_or_func.lines_after_decorators, ), decorators=decorators, ) @with_production( "asyncable_stmt", "[ASYNC] (funcdef | with_stmt | for_stmt)", version=">=3.5" ) @with_production("asyncable_stmt", "funcdef | with_stmt | for_stmt", version="<3.5") def convert_asyncable_stmt(config: ParserConfig, children: Sequence[Any]) -> Any: leading_lines, asyncnode, stmtnode = _extract_async(config, children) if isinstance(stmtnode, FunctionDef): return stmtnode.with_changes( asynchronous=asyncnode, leading_lines=leading_lines, lines_after_decorators=(), ) elif isinstance(stmtnode, With): return stmtnode.with_changes( asynchronous=asyncnode, leading_lines=leading_lines ) elif isinstance(stmtnode, For): return stmtnode.with_changes( asynchronous=asyncnode, leading_lines=leading_lines ) else: raise Exception("Logic error!") @with_production("suite", "simple_stmt_suite | indented_suite") def convert_suite(config: ParserConfig, children: Sequence[Any]) -> Any: (suite,) = children return suite @with_production("indented_suite", "NEWLINE INDENT stmt+ DEDENT") def convert_indented_suite(config: ParserConfig, children: Sequence[Any]) -> Any: newline, indent, *stmts, dedent = children return IndentedBlock( header=newline, indent=( None if indent.relative_indent == config.default_indent else indent.relative_indent ), body=stmts, # We want to be able to only keep comments in the footer that are actually for # this IndentedBlock. We do so by assuming that lines which are indented to the # same level as the block itself are comments that go at the footer of the # block. Comments that are indented to less than this indent are assumed to # belong to the next line of code. We override the indent here because the # dedent node's absolute indent is the resulting indentation after the dedent # is performed. Its this way because the whitespace state for both the dedent's # whitespace_after and the next BaseCompoundStatement's whitespace_before is # shared. This allows us to partially parse here and parse the rest of the # whitespace and comments on the next line, effectively making sure that # comments are attached to the correct node. footer=parse_empty_lines( config, dedent.whitespace_after, override_absolute_indent=indent.whitespace_before.absolute_indent, ), ) LibCST-1.2.0/libcst/_parser/conversions/terminals.py000066400000000000000000000052421456464173300224450ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # pyre-unsafe from typing import Any from libcst._nodes.expression import SimpleString from libcst._parser.types.config import ParserConfig from libcst._parser.types.partials import WithLeadingWhitespace from libcst._parser.types.token import Token from libcst._parser.whitespace_parser import ( parse_empty_lines, parse_trailing_whitespace, ) def convert_NAME(config: ParserConfig, token: Token) -> Any: return token def convert_NUMBER(config: ParserConfig, token: Token) -> Any: return token def convert_STRING(config: ParserConfig, token: Token) -> Any: return WithLeadingWhitespace(SimpleString(token.string), token.whitespace_before) def convert_OP(config: ParserConfig, token: Token) -> Any: return token def convert_NEWLINE(config: ParserConfig, token: Token) -> Any: # A NEWLINE token is only emitted for semantic newlines, which means that this # corresponds to a TrailingWhitespace, since that's the only semantic # newline-containing node. # N.B. Because this token is whitespace, and because the whitespace parser doesn't # try to prevent overflows, `token.whitespace_before` will end up overflowing into # the value of this newline token, so `parse_trailing_whitespace` will include # token.string's value. This is expected and desired behavior. return parse_trailing_whitespace(config, token.whitespace_before) def convert_INDENT(config: ParserConfig, token: Token) -> Any: return token def convert_DEDENT(config: ParserConfig, token: Token) -> Any: return token def convert_ENDMARKER(config: ParserConfig, token: Token) -> Any: # Parse any and all empty lines with an indent similar to the header. That is, # indent of nothing and including all indents. In some cases, like when the # footer parser follows an indented suite, the state's indent can be wrong # due to the fact that it is shared with the _DEDENT node. We know that if # we're parsing the end of a file, we will have no indent. return parse_empty_lines( config, token.whitespace_before, override_absolute_indent="" ) def convert_FSTRING_START(config: ParserConfig, token: Token) -> Any: return token def convert_FSTRING_END(config: ParserConfig, token: Token) -> Any: return token def convert_FSTRING_STRING(config: ParserConfig, token: Token) -> Any: return token def convert_ASYNC(config: ParserConfig, token: Token) -> Any: return token def convert_AWAIT(config: ParserConfig, token: Token) -> Any: return token LibCST-1.2.0/libcst/_parser/custom_itertools.py000066400000000000000000000011561456464173300215150ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from itertools import zip_longest from typing import Iterable, Iterator, TypeVar _T = TypeVar("_T") # https://docs.python.org/3/library/itertools.html#itertools-recipes def grouper(iterable: Iterable[_T], n: int, fillvalue: _T = None) -> Iterator[_T]: "Collect data into fixed-length chunks or blocks" # grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx" args = [iter(iterable)] * n return zip_longest(*args, fillvalue=fillvalue) LibCST-1.2.0/libcst/_parser/detect_config.py000066400000000000000000000161461456464173300207010ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import itertools import re from dataclasses import dataclass from io import BytesIO from tokenize import detect_encoding as py_tokenize_detect_encoding from typing import FrozenSet, Iterable, Iterator, Pattern, Set, Tuple, Union from libcst._nodes.whitespace import NEWLINE_RE from libcst._parser.parso.python.token import PythonTokenTypes, TokenType from libcst._parser.parso.utils import split_lines from libcst._parser.types.config import AutoConfig, ParserConfig, PartialParserConfig from libcst._parser.types.token import Token from libcst._parser.wrapped_tokenize import tokenize_lines _INDENT: TokenType = PythonTokenTypes.INDENT _NAME: TokenType = PythonTokenTypes.NAME _NEWLINE: TokenType = PythonTokenTypes.NEWLINE _STRING: TokenType = PythonTokenTypes.STRING _FALLBACK_DEFAULT_NEWLINE = "\n" _FALLBACK_DEFAULT_INDENT = " " _CONTINUATION_RE: Pattern[str] = re.compile(r"\\(\r\n?|\n)", re.UNICODE) @dataclass(frozen=True) class ConfigDetectionResult: # The config is a set of constant values used by the parser. config: ParserConfig # The tokens iterator is mutated by the parser. tokens: Iterator[Token] def _detect_encoding(source: Union[str, bytes]) -> str: """ Detects the encoding from the presence of a UTF-8 BOM or an encoding cookie as specified in PEP 263. If given a string (instead of bytes) the encoding is assumed to be utf-8. """ if isinstance(source, str): return "utf-8" return py_tokenize_detect_encoding(BytesIO(source).readline)[0] def _detect_default_newline(source_str: str) -> str: """ Finds the first newline, and uses that value as the default newline. """ # Don't use `NEWLINE_RE` for this, because it might match multiple newlines as a # single newline. match = NEWLINE_RE.search(source_str) return match.group(0) if match is not None else _FALLBACK_DEFAULT_NEWLINE def _detect_indent(tokens: Iterable[Token]) -> str: """ Finds the first INDENT token, and uses that as the value of the default indent. """ try: first_indent = next(t for t in tokens if t.type is _INDENT) except StopIteration: return _FALLBACK_DEFAULT_INDENT first_indent_str = first_indent.relative_indent assert first_indent_str is not None, "INDENT tokens must contain a relative_indent" return first_indent_str def _detect_trailing_newline(source_str: str) -> bool: if len(source_str) == 0 or not NEWLINE_RE.fullmatch(source_str[-1]): return False # Make sure that the last newline wasn't following a continuation return not ( _CONTINUATION_RE.fullmatch(source_str[-2:]) or _CONTINUATION_RE.fullmatch(source_str[-3:]) ) def _detect_future_imports(tokens: Iterable[Token]) -> FrozenSet[str]: """ Finds __future__ imports in their proper locations. See `https://www.python.org/dev/peps/pep-0236/`_ """ future_imports: Set[str] = set() state = 0 for tok in tokens: if state == 0 and tok.type in (_STRING, _NEWLINE): continue elif state == 0 and tok.string == "from": state = 1 elif state == 1 and tok.string == "__future__": state = 2 elif state == 2 and tok.string == "import": state = 3 elif state == 3 and tok.string == "as": state = 4 elif state == 3 and tok.type == _NAME: future_imports.add(tok.string) elif state == 4 and tok.type == _NAME: state = 3 elif state == 3 and tok.string in "(),": continue elif state == 3 and tok.type == _NEWLINE: state = 0 else: break return frozenset(future_imports) def convert_to_utf8( source: Union[str, bytes], *, partial: PartialParserConfig ) -> Tuple[str, str]: """ Returns an (original encoding, converted source) tuple. """ partial_encoding = partial.encoding encoding = ( _detect_encoding(source) if isinstance(partial_encoding, AutoConfig) else partial_encoding ) source_str = source if isinstance(source, str) else source.decode(encoding) return (encoding, source_str) def detect_config( source: Union[str, bytes], *, partial: PartialParserConfig, detect_trailing_newline: bool, detect_default_newline: bool, ) -> ConfigDetectionResult: """ Computes a ParserConfig given the current source code to be parsed and a partial config. """ python_version = partial.parsed_python_version encoding, source_str = convert_to_utf8(source, partial=partial) partial_default_newline = partial.default_newline default_newline = ( ( _detect_default_newline(source_str) if detect_default_newline else _FALLBACK_DEFAULT_NEWLINE ) if isinstance(partial_default_newline, AutoConfig) else partial_default_newline ) # HACK: The grammar requires a trailing newline, but python doesn't actually require # a trailing newline. Add one onto the end to make the parser happy. We'll strip it # out again during cst.Module's codegen. # # I think parso relies on error recovery support to handle this, which we don't # have. lib2to3 doesn't handle this case at all AFAICT. has_trailing_newline = detect_trailing_newline and _detect_trailing_newline( source_str ) if detect_trailing_newline and not has_trailing_newline: source_str += default_newline lines = split_lines(source_str, keepends=True) tokens = tokenize_lines(source_str, lines, python_version) partial_default_indent = partial.default_indent if isinstance(partial_default_indent, AutoConfig): # We need to clone `tokens` before passing it to `_detect_indent`, because # `_detect_indent` consumes some tokens, mutating `tokens`. # # Implementation detail: CPython's `itertools.tee` uses weakrefs to reduce the # size of its FIFO, so this doesn't retain items (leak memory) for `tokens_dup` # once `token_dup` is freed at the end of this method (subject to # GC/refcounting). tokens, tokens_dup = itertools.tee(tokens) default_indent = _detect_indent(tokens_dup) else: default_indent = partial_default_indent partial_future_imports = partial.future_imports if isinstance(partial_future_imports, AutoConfig): # Same note as above re itertools.tee, we will consume tokens. tokens, tokens_dup = itertools.tee(tokens) future_imports = _detect_future_imports(tokens_dup) else: future_imports = partial_future_imports return ConfigDetectionResult( config=ParserConfig( lines=lines, encoding=encoding, default_indent=default_indent, default_newline=default_newline, has_trailing_newline=has_trailing_newline, version=python_version, future_imports=future_imports, ), tokens=tokens, ) LibCST-1.2.0/libcst/_parser/entrypoints.py000066400000000000000000000135021456464173300204730ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ Parser entrypoints define the way users of our API are allowed to interact with the parser. A parser entrypoint should take the source code and some configuration information """ import os from functools import partial from typing import Union from libcst._nodes.base import CSTNode from libcst._nodes.expression import BaseExpression from libcst._nodes.module import Module from libcst._nodes.statement import BaseCompoundStatement, SimpleStatementLine from libcst._parser.detect_config import convert_to_utf8, detect_config from libcst._parser.grammar import get_grammar, validate_grammar from libcst._parser.python_parser import PythonCSTParser from libcst._parser.types.config import PartialParserConfig _DEFAULT_PARTIAL_PARSER_CONFIG: PartialParserConfig = PartialParserConfig() def is_native() -> bool: typ = os.environ.get("LIBCST_PARSER_TYPE") return typ != "pure" def _parse( entrypoint: str, source: Union[str, bytes], config: PartialParserConfig, *, detect_trailing_newline: bool, detect_default_newline: bool, ) -> CSTNode: if is_native(): from libcst.native import parse_expression, parse_module, parse_statement encoding, source_str = convert_to_utf8(source, partial=config) if entrypoint == "file_input": parse = partial(parse_module, encoding=encoding) elif entrypoint == "stmt_input": parse = parse_statement elif entrypoint == "expression_input": parse = parse_expression else: raise ValueError(f"Unknown parser entry point: {entrypoint}") return parse(source_str) return _pure_python_parse( entrypoint, source, config, detect_trailing_newline=detect_trailing_newline, detect_default_newline=detect_default_newline, ) def _pure_python_parse( entrypoint: str, source: Union[str, bytes], config: PartialParserConfig, *, detect_trailing_newline: bool, detect_default_newline: bool, ) -> CSTNode: detection_result = detect_config( source, partial=config, detect_trailing_newline=detect_trailing_newline, detect_default_newline=detect_default_newline, ) validate_grammar() grammar = get_grammar(config.parsed_python_version, config.future_imports) parser = PythonCSTParser( tokens=detection_result.tokens, config=detection_result.config, pgen_grammar=grammar, start_nonterminal=entrypoint, ) # The parser has an Any return type, we can at least refine it to CSTNode here. result = parser.parse() assert isinstance(result, CSTNode) return result def parse_module( source: Union[str, bytes], # the only entrypoint that accepts bytes config: PartialParserConfig = _DEFAULT_PARTIAL_PARSER_CONFIG, ) -> Module: """ Accepts an entire python module, including all leading and trailing whitespace. If source is ``bytes``, the encoding will be inferred and preserved. If the source is a ``string``, we will default to assuming UTF-8 encoding if the module is rendered back out to source as bytes. It is recommended that when calling :func:`~libcst.parse_module` with a string you access the serialized code using :class:`~libcst.Module`'s code attribute, and when calling it with bytes you access the serialized code using :class:`~libcst.Module`'s bytes attribute. """ result = _parse( "file_input", source, config, detect_trailing_newline=True, detect_default_newline=True, ) assert isinstance(result, Module) return result def parse_statement( source: str, config: PartialParserConfig = _DEFAULT_PARTIAL_PARSER_CONFIG ) -> Union[SimpleStatementLine, BaseCompoundStatement]: """ Accepts a statement followed by a trailing newline. If a trailing newline is not provided, one will be added. :func:`parse_statement` is provided mainly as a convenience function to generate semi-complex trees from code snippetes. If you need to represent a statement exactly, including all leading/trailing comments, you should instead use :func:`parse_module`. Leading comments and trailing comments (on the same line) are accepted, but whitespace (or anything else) after the statement's trailing newline is not valid (there's nowhere to store it on the statement node). Note that since there is nowhere to store leading and trailing comments/empty lines, code rendered out from a parsed statement using ``cst.Module([]).code_for_node(statement)`` will not include leading/trailing comments. """ # use detect_trailing_newline to insert a newline result = _parse( "stmt_input", source, config, detect_trailing_newline=True, detect_default_newline=False, ) assert isinstance(result, (SimpleStatementLine, BaseCompoundStatement)) return result def parse_expression( source: str, config: PartialParserConfig = _DEFAULT_PARTIAL_PARSER_CONFIG ) -> BaseExpression: """ Accepts an expression on a single line. Leading and trailing whitespace is not valid (there's nowhere to store it on the expression node). :func:`parse_expression` is provided mainly as a convenience function to generate semi-complex trees from code snippets. If you need to represent an expression exactly, including all leading/trailing comments, you should instead use :func:`parse_module`. """ result = _parse( "expression_input", source, config, detect_trailing_newline=False, detect_default_newline=False, ) assert isinstance(result, BaseExpression) return result LibCST-1.2.0/libcst/_parser/grammar.py000066400000000000000000000301701456464173300175230ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import re from functools import lru_cache from typing import FrozenSet, Iterator, Mapping, Optional, Tuple, Union from libcst._parser.conversions.expression import ( convert_arg_assign_comp_for, convert_arglist, convert_argument, convert_atom, convert_atom_basic, convert_atom_curlybraces, convert_atom_ellipses, convert_atom_expr, convert_atom_expr_await, convert_atom_expr_trailer, convert_atom_parens, convert_atom_squarebrackets, convert_atom_string, convert_binop, convert_boolop, convert_comp_for, convert_comp_if, convert_comp_op, convert_comparison, convert_dictorsetmaker, convert_expression_input, convert_factor, convert_fstring, convert_fstring_content, convert_fstring_conversion, convert_fstring_equality, convert_fstring_expr, convert_fstring_format_spec, convert_lambda, convert_namedexpr_test, convert_not_test, convert_power, convert_sliceop, convert_star_arg, convert_star_expr, convert_subscript, convert_subscriptlist, convert_sync_comp_for, convert_test, convert_test_nocond, convert_test_or_expr_list, convert_testlist_comp_list, convert_testlist_comp_tuple, convert_trailer, convert_trailer_arglist, convert_trailer_attribute, convert_trailer_subscriptlist, convert_yield_arg, convert_yield_expr, ) from libcst._parser.conversions.module import convert_file_input from libcst._parser.conversions.params import ( convert_argslist, convert_fpdef, convert_fpdef_assign, convert_fpdef_slash, convert_fpdef_star, convert_fpdef_starstar, ) from libcst._parser.conversions.statement import ( convert_annassign, convert_assert_stmt, convert_assign, convert_asyncable_funcdef, convert_asyncable_stmt, convert_augassign, convert_break_stmt, convert_classdef, convert_compound_stmt, convert_continue_stmt, convert_decorated, convert_decorator, convert_decorators, convert_del_stmt, convert_dotted_as_name, convert_dotted_as_names, convert_dotted_name, convert_except_clause, convert_expr_stmt, convert_for_stmt, convert_funcdef, convert_funcdef_annotation, convert_global_stmt, convert_if_stmt, convert_if_stmt_elif, convert_if_stmt_else, convert_import_as_name, convert_import_as_names, convert_import_from, convert_import_name, convert_import_relative, convert_import_stmt, convert_indented_suite, convert_nonlocal_stmt, convert_parameters, convert_pass_stmt, convert_raise_stmt, convert_return_stmt, convert_simple_stmt_line, convert_simple_stmt_partial, convert_simple_stmt_suite, convert_small_stmt, convert_stmt, convert_stmt_input, convert_suite, convert_try_stmt, convert_while_stmt, convert_with_item, convert_with_stmt, ) from libcst._parser.conversions.terminals import ( convert_ASYNC, convert_AWAIT, convert_DEDENT, convert_ENDMARKER, convert_FSTRING_END, convert_FSTRING_START, convert_FSTRING_STRING, convert_INDENT, convert_NAME, convert_NEWLINE, convert_NUMBER, convert_OP, convert_STRING, ) from libcst._parser.parso.pgen2.generator import generate_grammar, Grammar from libcst._parser.parso.python.token import PythonTokenTypes, TokenType from libcst._parser.parso.utils import parse_version_string, PythonVersionInfo from libcst._parser.production_decorator import get_productions from libcst._parser.types.config import AutoConfig from libcst._parser.types.conversions import NonterminalConversion, TerminalConversion from libcst._parser.types.production import Production # Keep this sorted alphabetically _TERMINAL_CONVERSIONS_SEQUENCE: Tuple[TerminalConversion, ...] = ( convert_DEDENT, convert_ENDMARKER, convert_INDENT, convert_NAME, convert_NEWLINE, convert_NUMBER, convert_OP, convert_STRING, convert_FSTRING_START, convert_FSTRING_END, convert_FSTRING_STRING, convert_ASYNC, convert_AWAIT, ) # Try to match the order of https://docs.python.org/3/reference/grammar.html _NONTERMINAL_CONVERSIONS_SEQUENCE: Tuple[NonterminalConversion, ...] = ( convert_file_input, convert_stmt_input, # roughly equivalent to single_input convert_expression_input, # roughly equivalent to eval_input convert_stmt, convert_simple_stmt_partial, convert_simple_stmt_line, convert_simple_stmt_suite, convert_small_stmt, convert_expr_stmt, convert_annassign, convert_augassign, convert_assign, convert_pass_stmt, convert_continue_stmt, convert_break_stmt, convert_del_stmt, convert_import_stmt, convert_import_name, convert_import_relative, convert_import_from, convert_import_as_name, convert_dotted_as_name, convert_import_as_names, convert_dotted_as_names, convert_dotted_name, convert_return_stmt, convert_raise_stmt, convert_global_stmt, convert_nonlocal_stmt, convert_assert_stmt, convert_compound_stmt, convert_if_stmt, convert_if_stmt_elif, convert_if_stmt_else, convert_while_stmt, convert_for_stmt, convert_try_stmt, convert_except_clause, convert_with_stmt, convert_with_item, convert_asyncable_funcdef, convert_funcdef, convert_classdef, convert_decorator, convert_decorators, convert_decorated, convert_asyncable_stmt, convert_parameters, convert_argslist, convert_fpdef_slash, convert_fpdef_star, convert_fpdef_starstar, convert_fpdef_assign, convert_fpdef, convert_funcdef_annotation, convert_suite, convert_indented_suite, convert_namedexpr_test, convert_test, convert_test_nocond, convert_lambda, convert_boolop, convert_not_test, convert_comparison, convert_comp_op, convert_star_expr, convert_binop, convert_factor, convert_power, convert_atom_expr, convert_atom_expr_await, convert_atom_expr_trailer, convert_trailer, convert_trailer_attribute, convert_trailer_subscriptlist, convert_subscriptlist, convert_subscript, convert_sliceop, convert_trailer_arglist, convert_atom, convert_atom_basic, convert_atom_parens, convert_atom_squarebrackets, convert_atom_curlybraces, convert_atom_string, convert_fstring, convert_fstring_content, convert_fstring_conversion, convert_fstring_equality, convert_fstring_expr, convert_fstring_format_spec, convert_atom_ellipses, convert_testlist_comp_tuple, convert_testlist_comp_list, convert_test_or_expr_list, convert_dictorsetmaker, convert_arglist, convert_argument, convert_arg_assign_comp_for, convert_star_arg, convert_sync_comp_for, convert_comp_for, convert_comp_if, convert_yield_expr, convert_yield_arg, ) def get_grammar_str(version: PythonVersionInfo, future_imports: FrozenSet[str]) -> str: """ Returns an BNF-like grammar text that `parso.pgen2.generator.generate_grammar` can handle. While you should generally use `get_grammar` instead, this can be useful for debugging the grammar. """ lines = [] for p in get_nonterminal_productions(version, future_imports): lines.append(str(p)) return "\n".join(lines) + "\n" # TODO: We should probably provide an on-disk cache like parso and lib2to3 do. Because # of how we're defining our grammar, efficient cache invalidation is harder, though not # impossible. @lru_cache() def get_grammar( version: PythonVersionInfo, future_imports: Union[FrozenSet[str], AutoConfig], ) -> "Grammar[TokenType]": if isinstance(future_imports, AutoConfig): # For easier testing, if not provided assume no __future__ imports future_imports = frozenset(()) return generate_grammar(get_grammar_str(version, future_imports), PythonTokenTypes) @lru_cache() def get_terminal_conversions() -> Mapping[str, TerminalConversion]: """ Returns a mapping from terminal type name to the conversion function that should be called by the parser. """ return { # pyre-fixme[16]: Optional type has no attribute `group`. re.match("convert_(.*)", fn.__name__).group(1): fn for fn in _TERMINAL_CONVERSIONS_SEQUENCE } @lru_cache() def validate_grammar() -> None: for fn in _NONTERMINAL_CONVERSIONS_SEQUENCE: fn_productions = get_productions(fn) if all(p.name == fn_productions[0].name for p in fn_productions): # all the production names are the same, ensure that the `convert_` function # is named correctly production_name = fn_productions[0].name expected_name = f"convert_{production_name}" if fn.__name__ != expected_name: raise Exception( f"The conversion function for '{production_name}' " + f"must be called '{expected_name}', not '{fn.__name__}'." ) def _get_version_comparison(version: str) -> Tuple[str, PythonVersionInfo]: if version[:2] in (">=", "<=", "==", "!="): return (version[:2], parse_version_string(version[2:].strip())) if version[:1] in (">", "<"): return (version[:1], parse_version_string(version[1:].strip())) raise Exception(f"Invalid version comparison specifier '{version}'") def _compare_versions( requested_version: PythonVersionInfo, actual_version: PythonVersionInfo, comparison: str, ) -> bool: if comparison == ">=": return actual_version >= requested_version if comparison == "<=": return actual_version <= requested_version if comparison == "==": return actual_version == requested_version if comparison == "!=": return actual_version != requested_version if comparison == ">": return actual_version > requested_version if comparison == "<": return actual_version < requested_version raise Exception(f"Invalid version comparison specifier '{comparison}'") def _should_include( requested_version: Optional[str], actual_version: PythonVersionInfo ) -> bool: if requested_version is None: return True for version in requested_version.split(","): comparison, parsed_version = _get_version_comparison(version.strip()) if not _compare_versions(parsed_version, actual_version, comparison): return False return True def _should_include_future( future: Optional[str], future_imports: FrozenSet[str], ) -> bool: if future is None: return True if future[:1] == "!": return future[1:] not in future_imports return future in future_imports def get_nonterminal_productions( version: PythonVersionInfo, future_imports: FrozenSet[str] ) -> Iterator[Production]: for conversion in _NONTERMINAL_CONVERSIONS_SEQUENCE: for production in get_productions(conversion): if not _should_include(production.version, version): continue if not _should_include_future(production.future, future_imports): continue yield production @lru_cache() def get_nonterminal_conversions( version: PythonVersionInfo, future_imports: FrozenSet[str], ) -> Mapping[str, NonterminalConversion]: """ Returns a mapping from nonterminal production name to the conversion function that should be called by the parser. """ conversions = {} for fn in _NONTERMINAL_CONVERSIONS_SEQUENCE: for fn_production in get_productions(fn): if not _should_include(fn_production.version, version): continue if not _should_include_future(fn_production.future, future_imports): continue if fn_production.name in conversions: raise Exception( f"Found duplicate '{fn_production.name}' production in grammar" ) conversions[fn_production.name] = fn return conversions LibCST-1.2.0/libcst/_parser/parso/000077500000000000000000000000001456464173300166465ustar00rootroot00000000000000LibCST-1.2.0/libcst/_parser/parso/__init__.py000066400000000000000000000002631456464173300207600ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. LibCST-1.2.0/libcst/_parser/parso/pgen2/000077500000000000000000000000001456464173300176615ustar00rootroot00000000000000LibCST-1.2.0/libcst/_parser/parso/pgen2/__init__.py000066400000000000000000000002631456464173300217730ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. LibCST-1.2.0/libcst/_parser/parso/pgen2/generator.py000066400000000000000000000326371456464173300222340ustar00rootroot00000000000000# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved. # Licensed to PSF under a Contributor Agreement. # # Modifications: # Copyright David Halter and Contributors # Modifications are dual-licensed: MIT and PSF. # 99% of the code is different from pgen2, now. # # A fork of `parso.pgen2.generator`. # https://github.com/davidhalter/parso/blob/master/parso/pgen2/generator.py # # The following changes were made: # - Type stubs were directly applied. # pyre-unsafe """ This module defines the data structures used to represent a grammar. Specifying grammars in pgen is possible with this grammar:: grammar: (NEWLINE | rule)* ENDMARKER rule: NAME ':' rhs NEWLINE rhs: items ('|' items)* items: item+ item: '[' rhs ']' | atom ['+' | '*'] atom: '(' rhs ')' | NAME | STRING This grammar is self-referencing. This parser generator (pgen2) was created by Guido Rossum and used for lib2to3. Most of the code has been refactored to make it more Pythonic. Since this was a "copy" of the CPython Parser parser "pgen", there was some work needed to make it more readable. It should also be slightly faster than the original pgen2, because we made some optimizations. """ from ast import literal_eval from typing import Any, Generic, Mapping, Sequence, Set, TypeVar, Union from libcst._parser.parso.pgen2.grammar_parser import GrammarParser, NFAState _TokenTypeT = TypeVar("_TokenTypeT") class DFAPlan: """ Plans are used for the parser to create stack nodes and do the proper DFA state transitions. """ def __init__( self, next_dfa: "DFAState", dfa_pushes: Sequence["DFAState"] = [] ) -> None: self.next_dfa = next_dfa self.dfa_pushes = dfa_pushes def __repr__(self) -> str: return "%s(%s, %s)" % (self.__class__.__name__, self.next_dfa, self.dfa_pushes) class DFAState(Generic[_TokenTypeT]): """ The DFAState object is the core class for pretty much anything. DFAState are the vertices of an ordered graph while arcs and transitions are the edges. Arcs are the initial edges, where most DFAStates are not connected and transitions are then calculated to connect the DFA state machines that have different nonterminals. """ def __init__(self, from_rule: str, nfa_set: Set[NFAState], final: NFAState) -> None: self.from_rule = from_rule self.nfa_set = nfa_set self.arcs: Mapping[ str, DFAState ] = {} # map from terminals/nonterminals to DFAState # In an intermediary step we set these nonterminal arcs (which has the # same structure as arcs). These don't contain terminals anymore. self.nonterminal_arcs: Mapping[str, DFAState] = {} # Transitions are basically the only thing that the parser is using # with is_final. Everyting else is purely here to create a parser. self.transitions: Mapping[Union[_TokenTypeT, ReservedString], DFAPlan] = {} self.is_final = final in nfa_set def add_arc(self, next_, label): assert isinstance(label, str) assert label not in self.arcs assert isinstance(next_, DFAState) self.arcs[label] = next_ def unifystate(self, old, new): for label, next_ in self.arcs.items(): if next_ is old: self.arcs[label] = new def __eq__(self, other): # Equality test -- ignore the nfa_set instance variable assert isinstance(other, DFAState) if self.is_final != other.is_final: return False # Can't just return self.arcs == other.arcs, because that # would invoke this method recursively, with cycles... if len(self.arcs) != len(other.arcs): return False for label, next_ in self.arcs.items(): if next_ is not other.arcs.get(label): return False return True def __repr__(self) -> str: return "<%s: %s is_final=%s>" % ( self.__class__.__name__, self.from_rule, self.is_final, ) class ReservedString: """ Most grammars will have certain keywords and operators that are mentioned in the grammar as strings (e.g. "if") and not token types (e.g. NUMBER). This class basically is the former. """ def __init__(self, value: str) -> None: self.value = value def __repr__(self) -> str: return "%s(%s)" % (self.__class__.__name__, self.value) class Grammar(Generic[_TokenTypeT]): """ Once initialized, this class supplies the grammar tables for the parsing engine implemented by parse.py. The parsing engine accesses the instance variables directly. The only important part in this parsers are dfas and transitions between dfas. """ def __init__( self, start_nonterminal: str, rule_to_dfas: Mapping[str, Sequence[DFAState[_TokenTypeT]]], reserved_syntax_strings: Mapping[str, ReservedString], ) -> None: self.nonterminal_to_dfas = rule_to_dfas self.reserved_syntax_strings = reserved_syntax_strings self.start_nonterminal = start_nonterminal def _simplify_dfas(dfas): """ This is not theoretically optimal, but works well enough. Algorithm: repeatedly look for two states that have the same set of arcs (same labels pointing to the same nodes) and unify them, until things stop changing. dfas is a list of DFAState instances """ changes = True while changes: changes = False for i, state_i in enumerate(dfas): for j in range(i + 1, len(dfas)): state_j = dfas[j] if state_i == state_j: # print " unify", i, j del dfas[j] for state in dfas: state.unifystate(state_j, state_i) changes = True break def _make_dfas(start, finish): """ Uses the powerset construction algorithm to create DFA states from sets of NFA states. Also does state reduction if some states are not needed. """ # To turn an NFA into a DFA, we define the states of the DFA # to correspond to *sets* of states of the NFA. Then do some # state reduction. assert isinstance(start, NFAState) assert isinstance(finish, NFAState) def addclosure(nfa_state, base_nfa_set): assert isinstance(nfa_state, NFAState) if nfa_state in base_nfa_set: return base_nfa_set.add(nfa_state) for nfa_arc in nfa_state.arcs: if nfa_arc.nonterminal_or_string is None: addclosure(nfa_arc.next, base_nfa_set) base_nfa_set = set() addclosure(start, base_nfa_set) states = [DFAState(start.from_rule, base_nfa_set, finish)] for state in states: # NB states grows while we're iterating arcs = {} # Find state transitions and store them in arcs. for nfa_state in state.nfa_set: for nfa_arc in nfa_state.arcs: if nfa_arc.nonterminal_or_string is not None: nfa_set = arcs.setdefault(nfa_arc.nonterminal_or_string, set()) addclosure(nfa_arc.next, nfa_set) # Now create the dfa's with no None's in arcs anymore. All Nones have # been eliminated and state transitions (arcs) are properly defined, we # just need to create the dfa's. for nonterminal_or_string, nfa_set in arcs.items(): for nested_state in states: if nested_state.nfa_set == nfa_set: # The DFA state already exists for this rule. break else: nested_state = DFAState(start.from_rule, nfa_set, finish) states.append(nested_state) state.add_arc(nested_state, nonterminal_or_string) return states # List of DFAState instances; first one is start def generate_grammar(bnf_grammar: str, token_namespace: Any) -> Grammar[Any]: """ ``bnf_text`` is a grammar in extended BNF (using * for repetition, + for at-least-once repetition, [] for optional parts, | for alternatives and () for grouping). It's not EBNF according to ISO/IEC 14977. It's a dialect Python uses in its own parser. """ rule_to_dfas = {} start_nonterminal = None for nfa_a, nfa_z in GrammarParser(bnf_grammar).parse(): dfas = _make_dfas(nfa_a, nfa_z) _simplify_dfas(dfas) rule_to_dfas[nfa_a.from_rule] = dfas if start_nonterminal is None: start_nonterminal = nfa_a.from_rule reserved_strings = {} for nonterminal, dfas in rule_to_dfas.items(): for dfa_state in dfas: for terminal_or_nonterminal, next_dfa in dfa_state.arcs.items(): if terminal_or_nonterminal in rule_to_dfas: dfa_state.nonterminal_arcs[terminal_or_nonterminal] = next_dfa else: transition = _make_transition( token_namespace, reserved_strings, terminal_or_nonterminal ) dfa_state.transitions[transition] = DFAPlan(next_dfa) _calculate_tree_traversal(rule_to_dfas) if start_nonterminal is None: raise Exception("could not find starting nonterminal!") return Grammar(start_nonterminal, rule_to_dfas, reserved_strings) def _make_transition(token_namespace, reserved_syntax_strings, label): """ Creates a reserved string ("if", "for", "*", ...) or returns the token type (NUMBER, STRING, ...) for a given grammar terminal. """ if label[0].isalpha(): # A named token (e.g. NAME, NUMBER, STRING) return getattr(token_namespace, label) else: # Either a keyword or an operator assert label[0] in ('"', "'"), label assert not label.startswith('"""') and not label.startswith("'''") value = literal_eval(label) try: return reserved_syntax_strings[value] except KeyError: r = reserved_syntax_strings[value] = ReservedString(value) return r def _calculate_tree_traversal(nonterminal_to_dfas): """ By this point we know how dfas can move around within a stack node, but we don't know how we can add a new stack node (nonterminal transitions). """ # Map from grammar rule (nonterminal) name to a set of tokens. first_plans = {} nonterminals = list(nonterminal_to_dfas.keys()) nonterminals.sort() for nonterminal in nonterminals: if nonterminal not in first_plans: _calculate_first_plans(nonterminal_to_dfas, first_plans, nonterminal) # Now that we have calculated the first terminals, we are sure that # there is no left recursion. for dfas in nonterminal_to_dfas.values(): for dfa_state in dfas: transitions = dfa_state.transitions for nonterminal, next_dfa in dfa_state.nonterminal_arcs.items(): for transition, pushes in first_plans[nonterminal].items(): if transition in transitions: prev_plan = transitions[transition] # Make sure these are sorted so that error messages are # at least deterministic choices = sorted( [ ( prev_plan.dfa_pushes[0].from_rule if prev_plan.dfa_pushes else prev_plan.next_dfa.from_rule ), (pushes[0].from_rule if pushes else next_dfa.from_rule), ] ) raise ValueError( ( "Rule %s is ambiguous; given a %s token, we " + "can't determine if we should evaluate %s or %s." ) % ((dfa_state.from_rule, transition) + tuple(choices)) ) transitions[transition] = DFAPlan(next_dfa, pushes) def _calculate_first_plans(nonterminal_to_dfas, first_plans, nonterminal): """ Calculates the first plan in the first_plans dictionary for every given nonterminal. This is going to be used to know when to create stack nodes. """ dfas = nonterminal_to_dfas[nonterminal] new_first_plans = {} first_plans[nonterminal] = None # dummy to detect left recursion # We only need to check the first dfa. All the following ones are not # interesting to find first terminals. state = dfas[0] for transition, next_ in state.transitions.items(): # It's a string. We have finally found a possible first token. new_first_plans[transition] = [next_.next_dfa] for nonterminal2, next_ in state.nonterminal_arcs.items(): # It's a nonterminal and we have either a left recursion issue # in the grammar or we have to recurse. try: first_plans2 = first_plans[nonterminal2] except KeyError: first_plans2 = _calculate_first_plans( nonterminal_to_dfas, first_plans, nonterminal2 ) else: if first_plans2 is None: raise ValueError("left recursion for rule %r" % nonterminal) for t, pushes in first_plans2.items(): new_first_plans[t] = [next_] + pushes first_plans[nonterminal] = new_first_plans return new_first_plans LibCST-1.2.0/libcst/_parser/parso/pgen2/grammar_parser.py000066400000000000000000000133771456464173300232500ustar00rootroot00000000000000# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved. # Licensed to PSF under a Contributor Agreement. # # Modifications: # Copyright David Halter and Contributors # Modifications are dual-licensed: MIT and PSF. # 99% of the code is different from pgen2, now. # # A fork of `parso.pgen2.grammar_parser`. # https://github.com/davidhalter/parso/blob/master/parso/pgen2/grammar_parser.py # # The following changes were made: # - Type stubs were directly applied. # pyre-unsafe from typing import Generator, List, Optional, Tuple from libcst._parser.parso.python.token import PythonTokenTypes from libcst._parser.parso.python.tokenize import tokenize from libcst._parser.parso.utils import parse_version_string class NFAArc: def __init__(self, next_: "NFAState", nonterminal_or_string: Optional[str]) -> None: self.next: NFAState = next_ self.nonterminal_or_string: Optional[str] = nonterminal_or_string def __repr__(self) -> str: return "<%s: %s>" % (self.__class__.__name__, self.nonterminal_or_string) class NFAState: def __init__(self, from_rule: str) -> None: self.from_rule = from_rule self.arcs: List[NFAArc] = [] def add_arc( self, next_: "NFAState", nonterminal_or_string: Optional[str] = None ) -> None: self.arcs.append(NFAArc(next_, nonterminal_or_string)) def __repr__(self) -> str: return "<%s: from %s>" % (self.__class__.__name__, self.from_rule) class GrammarParser: """ The parser for Python grammar files. """ def __init__(self, bnf_grammar: str) -> None: self._bnf_grammar: str = bnf_grammar self.generator = tokenize(bnf_grammar, version_info=parse_version_string("3.6")) self._gettoken() # Initialize lookahead def parse(self) -> Generator[Tuple[NFAState, NFAState], None, None]: # grammar: (NEWLINE | rule)* ENDMARKER while self.type != PythonTokenTypes.ENDMARKER: while self.type == PythonTokenTypes.NEWLINE: self._gettoken() # rule: NAME ':' rhs NEWLINE # pyre-ignore Pyre is unhappy with the fact that we haven't put # _current_rule_name in the constructor. self._current_rule_name = self._expect(PythonTokenTypes.NAME) self._expect(PythonTokenTypes.OP, ":") a, z = self._parse_rhs() self._expect(PythonTokenTypes.NEWLINE) yield a, z def _parse_rhs(self): # rhs: items ('|' items)* a, z = self._parse_items() if self.value != "|": return a, z else: aa = NFAState(self._current_rule_name) zz = NFAState(self._current_rule_name) while True: # Add the possibility to go into the state of a and come back # to finish. aa.add_arc(a) z.add_arc(zz) if self.value != "|": break self._gettoken() a, z = self._parse_items() return aa, zz def _parse_items(self): # items: item+ a, b = self._parse_item() while self.type in ( PythonTokenTypes.NAME, PythonTokenTypes.STRING, ) or self.value in ("(", "["): c, d = self._parse_item() # Need to end on the next item. b.add_arc(c) b = d return a, b def _parse_item(self): # item: '[' rhs ']' | atom ['+' | '*'] if self.value == "[": self._gettoken() a, z = self._parse_rhs() self._expect(PythonTokenTypes.OP, "]") # Make it also possible that there is no token and change the # state. a.add_arc(z) return a, z else: a, z = self._parse_atom() value = self.value if value not in ("+", "*"): return a, z self._gettoken() # Make it clear that we can go back to the old state and repeat. z.add_arc(a) if value == "+": return a, z else: # The end state is the same as the beginning, nothing must # change. return a, a def _parse_atom(self): # atom: '(' rhs ')' | NAME | STRING if self.value == "(": self._gettoken() a, z = self._parse_rhs() self._expect(PythonTokenTypes.OP, ")") return a, z elif self.type in (PythonTokenTypes.NAME, PythonTokenTypes.STRING): a = NFAState(self._current_rule_name) z = NFAState(self._current_rule_name) # Make it clear that the state transition requires that value. a.add_arc(z, self.value) self._gettoken() return a, z else: self._raise_error( "expected (...) or NAME or STRING, got %s/%s", self.type, self.value ) def _expect(self, type_, value=None): if self.type != type_: self._raise_error("expected %s, got %s [%s]", type_, self.type, self.value) if value is not None and self.value != value: self._raise_error("expected %s, got %s", value, self.value) value = self.value self._gettoken() return value def _gettoken(self) -> None: tup = next(self.generator) self.type, self.value, self.begin, prefix = tup def _raise_error(self, msg: str, *args: object) -> None: if args: try: msg = msg % args except Exception: msg = " ".join([msg] + list(map(str, args))) line = self._bnf_grammar.splitlines()[self.begin[0] - 1] raise SyntaxError(msg, ("", self.begin[0], self.begin[1], line)) LibCST-1.2.0/libcst/_parser/parso/python/000077500000000000000000000000001456464173300201675ustar00rootroot00000000000000LibCST-1.2.0/libcst/_parser/parso/python/__init__.py000066400000000000000000000002631456464173300223010ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. LibCST-1.2.0/libcst/_parser/parso/python/py_token.py000066400000000000000000000031511456464173300223710ustar00rootroot00000000000000# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved. # Licensed to PSF under a Contributor Agreement. # # Modifications: # Copyright David Halter and Contributors # Modifications are dual-licensed: MIT and PSF. # 99% of the code is different from pgen2, now. # # A fork of `parso.python.token`. # https://github.com/davidhalter/parso/blob/master/parso/python/token.py # # The following changes were made: # - Explicit TokenType references instead of dynamic creation. # - Use dataclasses instead of raw classes. # pyre-unsafe from dataclasses import dataclass @dataclass(frozen=True) class TokenType: name: str contains_syntax: bool = False def __repr__(self) -> str: return "%s(%s)" % (self.__class__.__name__, self.name) class PythonTokenTypes: """ Basically an enum, but Python 2 doesn't have enums in the standard library. """ STRING: TokenType = TokenType("STRING") NUMBER: TokenType = TokenType("NUMBER") NAME: TokenType = TokenType("NAME", contains_syntax=True) ERRORTOKEN: TokenType = TokenType("ERRORTOKEN") NEWLINE: TokenType = TokenType("NEWLINE") INDENT: TokenType = TokenType("INDENT") DEDENT: TokenType = TokenType("DEDENT") ERROR_DEDENT: TokenType = TokenType("ERROR_DEDENT") ASYNC: TokenType = TokenType("ASYNC") AWAIT: TokenType = TokenType("AWAIT") FSTRING_STRING: TokenType = TokenType("FSTRING_STRING") FSTRING_START: TokenType = TokenType("FSTRING_START") FSTRING_END: TokenType = TokenType("FSTRING_END") OP: TokenType = TokenType("OP", contains_syntax=True) ENDMARKER: TokenType = TokenType("ENDMARKER") LibCST-1.2.0/libcst/_parser/parso/python/token.py000066400000000000000000000025501456464173300216630ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. try: from libcst_native import token_type as native_token_type TokenType = native_token_type.TokenType class PythonTokenTypes: STRING: TokenType = native_token_type.STRING NUMBER: TokenType = native_token_type.NUMBER NAME: TokenType = native_token_type.NAME NEWLINE: TokenType = native_token_type.NEWLINE INDENT: TokenType = native_token_type.INDENT DEDENT: TokenType = native_token_type.DEDENT ASYNC: TokenType = native_token_type.ASYNC AWAIT: TokenType = native_token_type.AWAIT FSTRING_STRING: TokenType = native_token_type.FSTRING_STRING FSTRING_START: TokenType = native_token_type.FSTRING_START FSTRING_END: TokenType = native_token_type.FSTRING_END OP: TokenType = native_token_type.OP ENDMARKER: TokenType = native_token_type.ENDMARKER # unused dummy tokens for backwards compat with the parso tokenizer ERRORTOKEN: TokenType = native_token_type.ERRORTOKEN ERROR_DEDENT: TokenType = native_token_type.ERROR_DEDENT except ImportError: from libcst._parser.parso.python.py_token import ( # noqa F401 PythonTokenTypes, TokenType, ) LibCST-1.2.0/libcst/_parser/parso/python/tokenize.py000066400000000000000000001261611456464173300224000ustar00rootroot00000000000000# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved. # Licensed to PSF under a Contributor Agreement. # # Modifications: # Copyright David Halter and Contributors # Modifications are dual-licensed: MIT and PSF. # 99% of the code is different from pgen2, now. # # A fork of `parso.python.tokenize`. # https://github.com/davidhalter/parso/blob/master/parso/python/tokenize.py # # The following changes were made: # - Changes to be compatible with PythonTokenTypes # - Removed main section # - Applied type stubs directly # - Removed Python 2 shims # - Added support for Python 3.6 ASYNC/AWAIT hacks # # -*- coding: utf-8 -*- # This tokenizer has been copied from the ``tokenize.py`` standard library # tokenizer. The reason was simple: The standard library tokenizer fails # if the indentation is not right. To make it possible to do error recovery the # tokenizer needed to be rewritten. # # Basically this is a stripped down version of the standard library module, so # you can read the documentation there. Additionally we included some speed and # memory optimizations here. # pyre-unsafe from __future__ import absolute_import import itertools as _itertools import re import sys from codecs import BOM_UTF8 from collections import namedtuple from dataclasses import dataclass from typing import Dict, Generator, Iterable, Optional, Pattern, Set, Tuple from libcst._parser.parso.python.token import PythonTokenTypes from libcst._parser.parso.utils import PythonVersionInfo, split_lines # Maximum code point of Unicode 6.0: 0x10ffff (1,114,111) MAX_UNICODE = "\U0010ffff" BOM_UTF8_STRING = BOM_UTF8.decode("utf-8") STRING = PythonTokenTypes.STRING NAME = PythonTokenTypes.NAME NUMBER = PythonTokenTypes.NUMBER OP = PythonTokenTypes.OP NEWLINE = PythonTokenTypes.NEWLINE INDENT = PythonTokenTypes.INDENT DEDENT = PythonTokenTypes.DEDENT ASYNC = PythonTokenTypes.ASYNC AWAIT = PythonTokenTypes.AWAIT ENDMARKER = PythonTokenTypes.ENDMARKER ERRORTOKEN = PythonTokenTypes.ERRORTOKEN ERROR_DEDENT = PythonTokenTypes.ERROR_DEDENT FSTRING_START = PythonTokenTypes.FSTRING_START FSTRING_STRING = PythonTokenTypes.FSTRING_STRING FSTRING_END = PythonTokenTypes.FSTRING_END @dataclass(frozen=True) class TokenCollection: pseudo_token: Pattern single_quoted: Set[str] triple_quoted: Set[str] endpats: Dict[str, Pattern] whitespace: Pattern fstring_pattern_map: Dict[str, str] always_break_tokens: Set[str] _token_collection_cache: Dict[PythonVersionInfo, TokenCollection] = {} def group(*choices: str, **kwargs: object) -> str: capture = kwargs.pop("capture", False) # Python 2, arrghhhhh :( assert not kwargs start = "(" if not capture: start += "?:" return start + "|".join(choices) + ")" def maybe(*choices: str) -> str: return group(*choices) + "?" # Return the empty string, plus all of the valid string prefixes. def _all_string_prefixes( version_info: PythonVersionInfo, include_fstring: bool = False, only_fstring: bool = False, ) -> Set[str]: def different_case_versions(prefix): for s in _itertools.product(*[(c, c.upper()) for c in prefix]): yield "".join(s) # The valid string prefixes. Only contain the lower case versions, # and don't contain any permuations (include 'fr', but not # 'rf'). The various permutations will be generated. valid_string_prefixes = ["b", "r"] if version_info >= (3, 0): valid_string_prefixes.append("br") if version_info < (3, 0) or version_info >= (3, 3): valid_string_prefixes.append("u") result = {""} if version_info >= (3, 6) and include_fstring: f = ["f", "fr"] if only_fstring: valid_string_prefixes = f result = set() else: valid_string_prefixes += f elif only_fstring: return set() # if we add binary f-strings, add: ['fb', 'fbr'] for prefix in valid_string_prefixes: for t in _itertools.permutations(prefix): # create a list with upper and lower versions of each # character result.update(different_case_versions(t)) if version_info <= (2, 7): # In Python 2 the order cannot just be random. result.update(different_case_versions("ur")) result.update(different_case_versions("br")) return result def _compile(expr: str) -> Pattern: return re.compile(expr, re.UNICODE) def _get_token_collection(version_info: PythonVersionInfo) -> TokenCollection: try: return _token_collection_cache[version_info] except KeyError: _token_collection_cache[version_info] = result = _create_token_collection( version_info ) return result fstring_raw_string = _compile(r"(?:[^{}]+|\{\{|\}\})+") unicode_character_name = r"[A-Za-z0-9\-]+(?: [A-Za-z0-9\-]+)*" fstring_string_single_line = _compile( r"(?:\{\{|\}\}|\\N\{" + unicode_character_name + r"\}|\\(?:\r\n?|\n)|\\[^\r\nN]|[^{}\r\n\\])+" ) fstring_string_multi_line = _compile( r"(?:\{\{|\}\}|\\N\{" + unicode_character_name + r"\}|\\[^N]|[^{}\\])+" ) fstring_format_spec_single_line = _compile(r"(?:\\(?:\r\n?|\n)|[^{}\r\n])+") fstring_format_spec_multi_line = _compile(r"[^{}]+") def _create_token_collection( # noqa: C901 version_info: PythonVersionInfo, ) -> TokenCollection: # Note: we use unicode matching for names ("\w") but ascii matching for # number literals. Whitespace = r"[ \f\t]*" Comment = r"#[^\r\n]*" # Python 2 is pretty much not working properly anymore, we just ignore # parsing unicode properly, which is fine, I guess. if version_info.major == 2: Name = r"([A-Za-z_0-9]+)" elif sys.version_info[0] == 2: # Unfortunately the regex engine cannot deal with the regex below, so # just use this one. Name = r"(\w+)" else: Name = "([A-Za-z_0-9\u0080-" + MAX_UNICODE + "]+)" if version_info >= (3, 6): Hexnumber = r"0[xX](?:_?[0-9a-fA-F])+" Binnumber = r"0[bB](?:_?[01])+" Octnumber = r"0[oO](?:_?[0-7])+" Decnumber = r"(?:0(?:_?0)*|[1-9](?:_?[0-9])*)" Intnumber = group(Hexnumber, Binnumber, Octnumber, Decnumber) Exponent = r"[eE][-+]?[0-9](?:_?[0-9])*" Pointfloat = group( r"[0-9](?:_?[0-9])*\.(?:[0-9](?:_?[0-9])*)?", r"\.[0-9](?:_?[0-9])*" ) + maybe(Exponent) Expfloat = r"[0-9](?:_?[0-9])*" + Exponent Floatnumber = group(Pointfloat, Expfloat) Imagnumber = group(r"[0-9](?:_?[0-9])*[jJ]", Floatnumber + r"[jJ]") else: Hexnumber = r"0[xX][0-9a-fA-F]+" Binnumber = r"0[bB][01]+" if version_info >= (3, 0): Octnumber = r"0[oO][0-7]+" else: Octnumber = "0[oO]?[0-7]+" Decnumber = r"(?:0+|[1-9][0-9]*)" Intnumber = group(Hexnumber, Binnumber, Octnumber, Decnumber) if version_info.major < 3: Intnumber += "[lL]?" Exponent = r"[eE][-+]?[0-9]+" Pointfloat = group(r"[0-9]+\.[0-9]*", r"\.[0-9]+") + maybe(Exponent) Expfloat = r"[0-9]+" + Exponent Floatnumber = group(Pointfloat, Expfloat) Imagnumber = group(r"[0-9]+[jJ]", Floatnumber + r"[jJ]") Number = group(Imagnumber, Floatnumber, Intnumber) # Note that since _all_string_prefixes includes the empty string, # StringPrefix can be the empty string (making it optional). possible_prefixes = _all_string_prefixes(version_info) StringPrefix = group(*possible_prefixes) StringPrefixWithF = group(*_all_string_prefixes(version_info, include_fstring=True)) fstring_prefixes = _all_string_prefixes( version_info, include_fstring=True, only_fstring=True ) FStringStart = group(*fstring_prefixes) # Tail end of ' string. Single = r"(?:\\.|[^'\\])*'" # Tail end of " string. Double = r'(?:\\.|[^"\\])*"' # Tail end of ''' string. Single3 = r"(?:\\.|'(?!'')|[^'\\])*'''" # Tail end of """ string. Double3 = r'(?:\\.|"(?!"")|[^"\\])*"""' Triple = group(StringPrefixWithF + "'''", StringPrefixWithF + '"""') # Because of leftmost-then-longest match semantics, be sure to put the # longest operators first (e.g., if = came before ==, == would get # recognized as two instances of =). Operator = group( r"\*\*=?", r">>=?", r"<<=?", r"//=?", r"->", r"[+\-*/%&@`|^!=<>]=?", r"~" ) Bracket = "[][(){}]" special_args = [r"\r\n?", r"\n", r"[;.,@]"] if version_info >= (3, 0): special_args.insert(0, r"\.\.\.") if version_info >= (3, 8): special_args.insert(0, ":=?") else: special_args.insert(0, ":") Special = group(*special_args) Funny = group(Operator, Bracket, Special) # First (or only) line of ' or " string. ContStr = group( StringPrefix + r"'[^\r\n'\\]*(?:\\.[^\r\n'\\]*)*" + group("'", r"\\(?:\r\n?|\n)"), StringPrefix + r'"[^\r\n"\\]*(?:\\.[^\r\n"\\]*)*' + group('"', r"\\(?:\r\n?|\n)"), ) pseudo_extra_pool = [Comment, Triple] all_quotes = '"', "'", '"""', "'''" if fstring_prefixes: pseudo_extra_pool.append(FStringStart + group(*all_quotes)) PseudoExtras = group(r"\\(?:\r\n?|\n)|\Z", *pseudo_extra_pool) PseudoToken = group(Whitespace, capture=True) + group( PseudoExtras, Number, Funny, ContStr, Name, capture=True ) # For a given string prefix plus quotes, endpats maps it to a regex # to match the remainder of that string. _prefix can be empty, for # a normal single or triple quoted string (with no prefix). endpats = {} for _prefix in possible_prefixes: endpats[_prefix + "'"] = _compile(Single) endpats[_prefix + '"'] = _compile(Double) endpats[_prefix + "'''"] = _compile(Single3) endpats[_prefix + '"""'] = _compile(Double3) # A set of all of the single and triple quoted string prefixes, # including the opening quotes. single_quoted = set() triple_quoted = set() fstring_pattern_map = {} for t in possible_prefixes: for quote in '"', "'": single_quoted.add(t + quote) for quote in '"""', "'''": triple_quoted.add(t + quote) for t in fstring_prefixes: for quote in all_quotes: fstring_pattern_map[t + quote] = quote pseudo_token_compiled = _compile(PseudoToken) return TokenCollection( pseudo_token_compiled, single_quoted, triple_quoted, endpats, _compile(Whitespace), fstring_pattern_map, { ";", "import", "class", "def", "try", "except", "finally", "while", "with", "return", }, ) class Token(namedtuple("Token", ["type", "string", "start_pos", "prefix"])): @property def end_pos(self): lines = split_lines(self.string) if len(lines) > 1: return self.start_pos[0] + len(lines) - 1, 0 else: return self.start_pos[0], self.start_pos[1] + len(self.string) class PythonToken(Token): def __repr__(self): return "TokenInfo(type=%s, string=%r, start_pos=%r, prefix=%r)" % self._replace( type=self.type.name ) class FStringNode: def __init__(self, quote, raw): self.quote = quote self.raw = raw self.parentheses_count = 0 self.previous_lines = "" self.last_string_start_pos = None # In the syntax there can be multiple format_spec's nested: # {x:{y:3}} self.format_spec_count = 0 def open_parentheses(self, character): self.parentheses_count += 1 def close_parentheses(self, character): self.parentheses_count -= 1 if self.parentheses_count == 0: # No parentheses means that the format spec is also finished. self.format_spec_count = 0 def allow_multiline(self): return len(self.quote) == 3 def is_in_expr(self): return self.parentheses_count > self.format_spec_count def is_in_format_spec(self): return not self.is_in_expr() and self.format_spec_count def _close_fstring_if_necessary(fstring_stack, string, start_pos, additional_prefix): for fstring_stack_index, node in enumerate(fstring_stack): if string.startswith(node.quote): token = PythonToken( FSTRING_END, node.quote, start_pos, prefix=additional_prefix ) additional_prefix = "" assert not node.previous_lines del fstring_stack[fstring_stack_index:] return token, "", len(node.quote) return None, additional_prefix, 0 def _find_fstring_string(endpats, fstring_stack, line, lnum, pos): tos = fstring_stack[-1] allow_multiline = tos.allow_multiline() if tos.is_in_format_spec(): if allow_multiline: regex = fstring_format_spec_multi_line else: regex = fstring_format_spec_single_line else: if tos.raw: regex = fstring_raw_string elif allow_multiline: regex = fstring_string_multi_line else: regex = fstring_string_single_line match = regex.match(line, pos) if match is None: return tos.previous_lines, pos if not tos.previous_lines: tos.last_string_start_pos = (lnum, pos) string = match.group(0) for fstring_stack_node in fstring_stack: end_match = endpats[fstring_stack_node.quote].match(string) if end_match is not None: string = end_match.group(0)[: -len(fstring_stack_node.quote)] new_pos = pos new_pos += len(string) # even if allow_multiline is False, we still need to check for trailing # newlines, because a single-line f-string can contain line continuations if string.endswith("\n") or string.endswith("\r"): tos.previous_lines += string string = "" else: string = tos.previous_lines + string return string, new_pos def tokenize( code: str, version_info: PythonVersionInfo, start_pos: Tuple[int, int] = (1, 0) ) -> Generator[PythonToken, None, None]: """Generate tokens from a the source code (string).""" lines = split_lines(code, keepends=True) return tokenize_lines(lines, version_info, start_pos=start_pos) def tokenize_lines( # noqa: C901 lines: Iterable[str], version_info: PythonVersionInfo, start_pos: Tuple[int, int] = (1, 0), ) -> Generator[PythonToken, None, None]: token_collection = _get_token_collection(version_info) if version_info >= PythonVersionInfo(3, 7): return _tokenize_lines_py37_or_above( lines, version_info, token_collection, start_pos=start_pos ) else: return _tokenize_lines_py36_or_below( lines, version_info, token_collection, start_pos=start_pos ) def _tokenize_lines_py36_or_below( # noqa: C901 lines: Iterable[str], version_info: PythonVersionInfo, token_collection: TokenCollection, start_pos: Tuple[int, int] = (1, 0), ) -> Generator[PythonToken, None, None]: """ A heavily modified Python standard library tokenizer. Additionally to the default information, yields also the prefix of each token. This idea comes from lib2to3. The prefix contains all information that is irrelevant for the parser like newlines in parentheses or comments. """ paren_level = 0 # count parentheses indents = [0] max = 0 numchars = "0123456789" contstr = "" contline = None # We start with a newline. This makes indent at the first position # possible. It's not valid Python, but still better than an INDENT in the # second line (and not in the first). This makes quite a few things in # Jedi's fast parser possible. new_line = True prefix = "" # Should never be required, but here for safety endprog = None # Should not be required, but here for lint contstr_start: Optional[Tuple[int, int]] = None additional_prefix = "" first = True lnum = start_pos[0] - 1 fstring_stack = [] # stash and async_* are used for async/await parsing stashed: Optional[PythonToken] = None async_def: bool = False async_def_indent: int = 0 async_def_newline: bool = False def dedent_if_necessary(start): nonlocal stashed nonlocal async_def nonlocal async_def_indent nonlocal async_def_newline while start < indents[-1]: if start > indents[-2]: yield PythonToken(ERROR_DEDENT, "", (lnum, 0), "") break if stashed is not None: yield stashed stashed = None if async_def and async_def_newline and async_def_indent >= indents[-1]: # We exited an 'async def' block, so stop tracking for indents async_def = False async_def_newline = False async_def_indent = 0 yield PythonToken(DEDENT, "", spos, "") indents.pop() for line in lines: # loop over lines in stream lnum += 1 pos = 0 max = len(line) if first: if line.startswith(BOM_UTF8_STRING): additional_prefix = BOM_UTF8_STRING line = line[1:] max = len(line) # Fake that the part before was already parsed. line = "^" * start_pos[1] + line pos = start_pos[1] max += start_pos[1] first = False if contstr: # continued string if endprog is None: raise Exception("Logic error!") endmatch = endprog.match(line) if endmatch: pos = endmatch.end(0) if contstr_start is None: raise Exception("Logic error!") if stashed is not None: raise Exception("Logic error!") yield PythonToken(STRING, contstr + line[:pos], contstr_start, prefix) contstr = "" contline = None else: contstr = contstr + line contline = contline + line continue while pos < max: if fstring_stack: tos = fstring_stack[-1] if not tos.is_in_expr(): string, pos = _find_fstring_string( token_collection.endpats, fstring_stack, line, lnum, pos ) if string: if stashed is not None: raise Exception("Logic error!") yield PythonToken( FSTRING_STRING, string, tos.last_string_start_pos, # Never has a prefix because it can start anywhere and # include whitespace. prefix="", ) tos.previous_lines = "" continue if pos == max: break rest = line[pos:] ( fstring_end_token, additional_prefix, quote_length, ) = _close_fstring_if_necessary( fstring_stack, rest, (lnum, pos), additional_prefix ) pos += quote_length if fstring_end_token is not None: if stashed is not None: raise Exception("Logic error!") yield fstring_end_token continue pseudomatch = token_collection.pseudo_token.match(line, pos) if not pseudomatch: # scan for tokens match = token_collection.whitespace.match(line, pos) if pos == 0: # pyre-fixme[16]: `Optional` has no attribute `end`. yield from dedent_if_necessary(match.end()) pos = match.end() new_line = False yield PythonToken( ERRORTOKEN, line[pos], (lnum, pos), # pyre-fixme[16]: `Optional` has no attribute `group`. additional_prefix + match.group(0), ) additional_prefix = "" pos += 1 continue prefix = additional_prefix + pseudomatch.group(1) additional_prefix = "" start, pos = pseudomatch.span(2) spos = (lnum, start) token = pseudomatch.group(2) if token == "": assert prefix additional_prefix = prefix # This means that we have a line with whitespace/comments at # the end, which just results in an endmarker. break initial = token[0] if new_line and initial not in "\r\n\\#": new_line = False if paren_level == 0 and not fstring_stack: i = 0 indent_start = start while line[i] == "\f": i += 1 # TODO don't we need to change spos as well? indent_start -= 1 if indent_start > indents[-1]: if stashed is not None: yield stashed stashed = None yield PythonToken(INDENT, "", spos, "") indents.append(indent_start) yield from dedent_if_necessary(indent_start) if initial in numchars or ( # ordinary number initial == "." and token != "." and token != "..." ): if stashed is not None: yield stashed stashed = None yield PythonToken(NUMBER, token, spos, prefix) elif pseudomatch.group(3) is not None: # ordinary name if token in token_collection.always_break_tokens: fstring_stack[:] = [] paren_level = 0 # We only want to dedent if the token is on a new line. if re.match(r"[ \f\t]*$", line[:start]): while True: indent = indents.pop() if indent > start: if ( async_def and async_def_newline and async_def_indent >= indent ): # We dedented outside of an 'async def' block. async_def = False async_def_newline = False async_def_indent = 0 if stashed is not None: yield stashed stashed = None yield PythonToken(DEDENT, "", spos, "") else: indents.append(indent) break if str.isidentifier(token): should_yield_identifier = True if token in ("async", "await") and async_def: # We're inside an 'async def' block, all async/await are # tokens. if token == "async": yield PythonToken(ASYNC, token, spos, prefix) else: yield PythonToken(AWAIT, token, spos, prefix) should_yield_identifier = False # We are possibly starting an 'async def' section elif token == "async" and not stashed: stashed = PythonToken(NAME, token, spos, prefix) should_yield_identifier = False # We actually are starting an 'async def' section elif ( token == "def" and stashed is not None and stashed[0] is NAME and stashed[1] == "async" ): async_def = True async_def_indent = indents[-1] yield PythonToken(ASYNC, stashed[1], stashed[2], stashed[3]) stashed = None # We are either not stashed, or we output an ASYNC token above. elif stashed: yield stashed stashed = None # If we didn't bail early due to possibly recognizing an 'async def', # then we should yield this token as normal. if should_yield_identifier: yield PythonToken(NAME, token, spos, prefix) else: yield from _split_illegal_unicode_name(token, spos, prefix) elif initial in "\r\n": if any(not f.allow_multiline() for f in fstring_stack): # Would use fstring_stack.clear, but that's not available # in Python 2. fstring_stack[:] = [] if not new_line and paren_level == 0 and not fstring_stack: if async_def: async_def_newline = True if stashed: yield stashed stashed = None yield PythonToken(NEWLINE, token, spos, prefix) else: additional_prefix = prefix + token new_line = True elif initial == "#": # Comments assert not token.endswith("\n") additional_prefix = prefix + token elif token in token_collection.triple_quoted: endprog = token_collection.endpats[token] endmatch = endprog.match(line, pos) if endmatch: # all on one line pos = endmatch.end(0) token = line[start:pos] if stashed is not None: yield stashed stashed = None yield PythonToken(STRING, token, spos, prefix) else: contstr_start = (lnum, start) # multiple lines contstr = line[start:] contline = line break # Check up to the first 3 chars of the token to see if # they're in the single_quoted set. If so, they start # a string. # We're using the first 3, because we're looking for # "rb'" (for example) at the start of the token. If # we switch to longer prefixes, this needs to be # adjusted. # Note that initial == token[:1]. # Also note that single quote checking must come after # triple quote checking (above). elif ( initial in token_collection.single_quoted or token[:2] in token_collection.single_quoted or token[:3] in token_collection.single_quoted ): if token[-1] in "\r\n": # continued string # This means that a single quoted string ends with a # backslash and is continued. contstr_start = lnum, start endprog = ( token_collection.endpats.get(initial) or token_collection.endpats.get(token[1]) or token_collection.endpats.get(token[2]) ) contstr = line[start:] contline = line break else: # ordinary string if stashed is not None: yield stashed stashed = None yield PythonToken(STRING, token, spos, prefix) elif ( token in token_collection.fstring_pattern_map ): # The start of an fstring. fstring_stack.append( FStringNode( token_collection.fstring_pattern_map[token], "r" in token or "R" in token, ) ) if stashed is not None: yield stashed stashed = None yield PythonToken(FSTRING_START, token, spos, prefix) elif initial == "\\" and line[start:] in ( "\\\n", "\\\r\n", "\\\r", ): # continued stmt additional_prefix += prefix + line[start:] break else: if token in "([{": if fstring_stack: fstring_stack[-1].open_parentheses(token) else: paren_level += 1 elif token in ")]}": if fstring_stack: fstring_stack[-1].close_parentheses(token) else: if paren_level: paren_level -= 1 elif ( token == ":" and fstring_stack and fstring_stack[-1].parentheses_count - fstring_stack[-1].format_spec_count == 1 ): fstring_stack[-1].format_spec_count += 1 if stashed is not None: yield stashed stashed = None yield PythonToken(OP, token, spos, prefix) if contstr: yield PythonToken(ERRORTOKEN, contstr, contstr_start, prefix) if contstr.endswith("\n") or contstr.endswith("\r"): new_line = True if stashed is not None: yield stashed stashed = None end_pos = lnum, max # As the last position we just take the maximally possible position. We # remove -1 for the last new line. for indent in indents[1:]: yield PythonToken(DEDENT, "", end_pos, "") yield PythonToken(ENDMARKER, "", end_pos, additional_prefix) def _tokenize_lines_py37_or_above( # noqa: C901 lines: Iterable[str], version_info: PythonVersionInfo, token_collection: TokenCollection, start_pos: Tuple[int, int] = (1, 0), ) -> Generator[PythonToken, None, None]: """ A heavily modified Python standard library tokenizer. Additionally to the default information, yields also the prefix of each token. This idea comes from lib2to3. The prefix contains all information that is irrelevant for the parser like newlines in parentheses or comments. """ def dedent_if_necessary(start): while start < indents[-1]: if start > indents[-2]: yield PythonToken(ERROR_DEDENT, "", (lnum, 0), "") break yield PythonToken(DEDENT, "", spos, "") indents.pop() paren_level = 0 # count parentheses indents = [0] max = 0 numchars = "0123456789" contstr = "" contline = None # We start with a newline. This makes indent at the first position # possible. It's not valid Python, but still better than an INDENT in the # second line (and not in the first). This makes quite a few things in # Jedi's fast parser possible. new_line = True prefix = "" # Should never be required, but here for safety endprog = None # Should not be required, but here for lint contstr_start: Optional[Tuple[int, int]] = None additional_prefix = "" first = True lnum = start_pos[0] - 1 fstring_stack = [] for line in lines: # loop over lines in stream lnum += 1 pos = 0 max = len(line) if first: if line.startswith(BOM_UTF8_STRING): additional_prefix = BOM_UTF8_STRING line = line[1:] max = len(line) # Fake that the part before was already parsed. line = "^" * start_pos[1] + line pos = start_pos[1] max += start_pos[1] first = False if contstr: # continued string if endprog is None: raise Exception("Logic error!") endmatch = endprog.match(line) if endmatch: pos = endmatch.end(0) if contstr_start is None: raise Exception("Logic error!") yield PythonToken(STRING, contstr + line[:pos], contstr_start, prefix) contstr = "" contline = None else: contstr = contstr + line contline = contline + line continue while pos < max: if fstring_stack: tos = fstring_stack[-1] if not tos.is_in_expr(): string, pos = _find_fstring_string( token_collection.endpats, fstring_stack, line, lnum, pos ) if string: yield PythonToken( FSTRING_STRING, string, tos.last_string_start_pos, # Never has a prefix because it can start anywhere and # include whitespace. prefix="", ) tos.previous_lines = "" continue if pos == max: break rest = line[pos:] ( fstring_end_token, additional_prefix, quote_length, ) = _close_fstring_if_necessary( fstring_stack, rest, (lnum, pos), additional_prefix ) pos += quote_length if fstring_end_token is not None: yield fstring_end_token continue pseudomatch = token_collection.pseudo_token.match(line, pos) if not pseudomatch: # scan for tokens match = token_collection.whitespace.match(line, pos) if pos == 0: # pyre-fixme[16]: `Optional` has no attribute `end`. for t in dedent_if_necessary(match.end()): yield t pos = match.end() new_line = False yield PythonToken( ERRORTOKEN, line[pos], (lnum, pos), # pyre-fixme[16]: `Optional` has no attribute `group`. additional_prefix + match.group(0), ) additional_prefix = "" pos += 1 continue prefix = additional_prefix + pseudomatch.group(1) additional_prefix = "" start, pos = pseudomatch.span(2) spos = (lnum, start) token = pseudomatch.group(2) if token == "": assert prefix additional_prefix = prefix # This means that we have a line with whitespace/comments at # the end, which just results in an endmarker. break initial = token[0] if new_line and initial not in "\r\n\\#": new_line = False if paren_level == 0 and not fstring_stack: i = 0 indent_start = start while line[i] == "\f": i += 1 # TODO don't we need to change spos as well? indent_start -= 1 if indent_start > indents[-1]: yield PythonToken(INDENT, "", spos, "") indents.append(indent_start) for t in dedent_if_necessary(indent_start): yield t if initial in numchars or ( # ordinary number initial == "." and token != "." and token != "..." ): yield PythonToken(NUMBER, token, spos, prefix) elif pseudomatch.group(3) is not None: # ordinary name if token in token_collection.always_break_tokens: fstring_stack[:] = [] paren_level = 0 # We only want to dedent if the token is on a new line. if re.match(r"[ \f\t]*$", line[:start]): while True: indent = indents.pop() if indent > start: yield PythonToken(DEDENT, "", spos, "") else: indents.append(indent) break if str.isidentifier(token): # py37 doesn't need special tokens for async/await, and we could # emit NAME, but then we'd need different grammar for py36 and py37. if token == "async": yield PythonToken(ASYNC, token, spos, prefix) elif token == "await": yield PythonToken(AWAIT, token, spos, prefix) else: yield PythonToken(NAME, token, spos, prefix) else: for t in _split_illegal_unicode_name(token, spos, prefix): yield t # yield from Python 2 elif initial in "\r\n": if any(not f.allow_multiline() for f in fstring_stack): # Would use fstring_stack.clear, but that's not available # in Python 2. fstring_stack[:] = [] if not new_line and paren_level == 0 and not fstring_stack: yield PythonToken(NEWLINE, token, spos, prefix) else: additional_prefix = prefix + token new_line = True elif initial == "#": # Comments assert not token.endswith("\n") additional_prefix = prefix + token elif token in token_collection.triple_quoted: endprog = token_collection.endpats[token] endmatch = endprog.match(line, pos) if endmatch: # all on one line pos = endmatch.end(0) token = line[start:pos] yield PythonToken(STRING, token, spos, prefix) else: contstr_start = (lnum, start) # multiple lines contstr = line[start:] contline = line break # Check up to the first 3 chars of the token to see if # they're in the single_quoted set. If so, they start # a string. # We're using the first 3, because we're looking for # "rb'" (for example) at the start of the token. If # we switch to longer prefixes, this needs to be # adjusted. # Note that initial == token[:1]. # Also note that single quote checking must come after # triple quote checking (above). elif ( initial in token_collection.single_quoted or token[:2] in token_collection.single_quoted or token[:3] in token_collection.single_quoted ): if token[-1] in "\r\n": # continued string # This means that a single quoted string ends with a # backslash and is continued. contstr_start = lnum, start endprog = ( token_collection.endpats.get(initial) or token_collection.endpats.get(token[1]) or token_collection.endpats.get(token[2]) ) contstr = line[start:] contline = line break else: # ordinary string yield PythonToken(STRING, token, spos, prefix) elif ( token in token_collection.fstring_pattern_map ): # The start of an fstring. fstring_stack.append( FStringNode( token_collection.fstring_pattern_map[token], "r" in token or "R" in token, ) ) yield PythonToken(FSTRING_START, token, spos, prefix) elif initial == "\\" and line[start:] in ( "\\\n", "\\\r\n", "\\\r", ): # continued stmt additional_prefix += prefix + line[start:] break else: if token in "([{": if fstring_stack: fstring_stack[-1].open_parentheses(token) else: paren_level += 1 elif token in ")]}": if fstring_stack: fstring_stack[-1].close_parentheses(token) else: if paren_level: paren_level -= 1 elif ( token == ":" and fstring_stack and fstring_stack[-1].parentheses_count - fstring_stack[-1].format_spec_count == 1 ): fstring_stack[-1].format_spec_count += 1 yield PythonToken(OP, token, spos, prefix) if contstr: yield PythonToken(ERRORTOKEN, contstr, contstr_start, prefix) if contstr.endswith("\n") or contstr.endswith("\r"): new_line = True end_pos = lnum, max # As the last position we just take the maximally possible position. We # remove -1 for the last new line. for indent in indents[1:]: yield PythonToken(DEDENT, "", end_pos, "") yield PythonToken(ENDMARKER, "", end_pos, additional_prefix) def _split_illegal_unicode_name( token: str, start_pos: Tuple[int, int], prefix: str ) -> Generator[PythonToken, None, None]: def create_token(): return PythonToken(ERRORTOKEN if is_illegal else NAME, found, pos, prefix) found = "" is_illegal = False pos = start_pos for i, char in enumerate(token): if is_illegal: if str.isidentifier(char): yield create_token() found = char is_illegal = False prefix = "" pos = start_pos[0], start_pos[1] + i else: found += char else: new_found = found + char if str.isidentifier(new_found): found = new_found else: if found: yield create_token() prefix = "" pos = start_pos[0], start_pos[1] + i found = char is_illegal = True if found: yield create_token() LibCST-1.2.0/libcst/_parser/parso/tests/000077500000000000000000000000001456464173300200105ustar00rootroot00000000000000LibCST-1.2.0/libcst/_parser/parso/tests/__init__.py000066400000000000000000000002631456464173300221220ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. LibCST-1.2.0/libcst/_parser/parso/tests/test_fstring.py000066400000000000000000000031061456464173300230750ustar00rootroot00000000000000# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved. # Licensed to PSF under a Contributor Agreement. # # Modifications: # Copyright David Halter and Contributors # Modifications are dual-licensed: MIT and PSF. # 99% of the code is different from pgen2, now. # # A fork of Parso's tokenize test # https://github.com/davidhalter/parso/blob/master/test/test_tokenize.py # # The following changes were made: # - Convert base test to Unittet # - Remove grammar-specific tests # pyre-unsafe from libcst._parser.parso.python.tokenize import tokenize from libcst._parser.parso.utils import parse_version_string from libcst.testing.utils import data_provider, UnitTest class ParsoTokenizeTest(UnitTest): @data_provider( ( # 2 times 2, 5 because python expr and endmarker. ('f"}{"', [(1, 0), (1, 2), (1, 3), (1, 4), (1, 5)]), ( 'f" :{ 1 : } "', [ (1, 0), (1, 2), (1, 4), (1, 6), (1, 8), (1, 9), (1, 10), (1, 11), (1, 12), (1, 13), ], ), ( 'f"""\n {\nfoo\n }"""', [(1, 0), (1, 4), (2, 1), (3, 0), (4, 1), (4, 2), (4, 5)], ), ) ) def test_tokenize_start_pos(self, code, positions): tokens = list(tokenize(code, version_info=parse_version_string("3.6"))) assert positions == [p.start_pos for p in tokens] LibCST-1.2.0/libcst/_parser/parso/tests/test_tokenize.py000066400000000000000000000332611456464173300232560ustar00rootroot00000000000000# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved. # Licensed to PSF under a Contributor Agreement. # # Modifications: # Copyright David Halter and Contributors # Modifications are dual-licensed: MIT and PSF. # 99% of the code is different from pgen2, now. # # A fork of Parso's tokenize test # https://github.com/davidhalter/parso/blob/master/test/test_tokenize.py # # The following changes were made: # - Convert base test to Unittet # - Remove grammar-specific tests # pyre-unsafe # -*- coding: utf-8 # This file contains Unicode characters. from textwrap import dedent from libcst._parser.parso.python.token import PythonTokenTypes from libcst._parser.parso.python.tokenize import PythonToken, tokenize from libcst._parser.parso.utils import parse_version_string, split_lines from libcst.testing.utils import data_provider, UnitTest # To make it easier to access some of the token types, just put them here. NAME = PythonTokenTypes.NAME NEWLINE = PythonTokenTypes.NEWLINE STRING = PythonTokenTypes.STRING NUMBER = PythonTokenTypes.NUMBER INDENT = PythonTokenTypes.INDENT DEDENT = PythonTokenTypes.DEDENT ERRORTOKEN = PythonTokenTypes.ERRORTOKEN OP = PythonTokenTypes.OP ENDMARKER = PythonTokenTypes.ENDMARKER ERROR_DEDENT = PythonTokenTypes.ERROR_DEDENT FSTRING_START = PythonTokenTypes.FSTRING_START FSTRING_STRING = PythonTokenTypes.FSTRING_STRING FSTRING_END = PythonTokenTypes.FSTRING_END def _get_token_list(string, version=None): # Load the current version. version_info = parse_version_string(version) return list(tokenize(string, version_info)) class ParsoTokenizerTest(UnitTest): def test_simple_no_whitespace(self): # Test a simple one line string, no preceding whitespace simple_docstring = '"""simple one line docstring"""' token_list = _get_token_list(simple_docstring) _, value, _, prefix = token_list[0] assert prefix == "" assert value == '"""simple one line docstring"""' def test_simple_with_whitespace(self): # Test a simple one line string with preceding whitespace and newline simple_docstring = ' """simple one line docstring""" \r\n' token_list = _get_token_list(simple_docstring) assert token_list[0][0] == INDENT typ, value, start_pos, prefix = token_list[1] assert prefix == " " assert value == '"""simple one line docstring"""' assert typ == STRING typ, value, start_pos, prefix = token_list[2] assert prefix == " " assert typ == NEWLINE def test_function_whitespace(self): # Test function definition whitespace identification fundef = dedent( """ def test_whitespace(*args, **kwargs): x = 1 if x > 0: print(True) """ ) token_list = _get_token_list(fundef) for _, value, _, prefix in token_list: if value == "test_whitespace": assert prefix == " " if value == "(": assert prefix == "" if value == "*": assert prefix == "" if value == "**": assert prefix == " " if value == "print": assert prefix == " " if value == "if": assert prefix == " " def test_tokenize_multiline_I(self): # Make sure multiline string having newlines have the end marker on the # next line fundef = '''""""\n''' token_list = _get_token_list(fundef) assert token_list == [ PythonToken(ERRORTOKEN, '""""\n', (1, 0), ""), PythonToken(ENDMARKER, "", (2, 0), ""), ] def test_tokenize_multiline_II(self): # Make sure multiline string having no newlines have the end marker on # same line fundef = '''""""''' token_list = _get_token_list(fundef) assert token_list == [ PythonToken(ERRORTOKEN, '""""', (1, 0), ""), PythonToken(ENDMARKER, "", (1, 4), ""), ] def test_tokenize_multiline_III(self): # Make sure multiline string having newlines have the end marker on the # next line even if several newline fundef = '''""""\n\n''' token_list = _get_token_list(fundef) assert token_list == [ PythonToken(ERRORTOKEN, '""""\n\n', (1, 0), ""), PythonToken(ENDMARKER, "", (3, 0), ""), ] def test_identifier_contains_unicode(self): fundef = dedent( """ def 我あφ(): pass """ ) token_list = _get_token_list(fundef) unicode_token = token_list[1] assert unicode_token[0] == NAME def test_ur_literals(self): """ Decided to parse `u''` literals regardless of Python version. This makes probably sense: - Python 3+ doesn't support it, but it doesn't hurt not be. While this is incorrect, it's just incorrect for one "old" and in the future not very important version. - All the other Python versions work very well with it. """ def check(literal, is_literal=True): token_list = _get_token_list(literal) typ, result_literal, _, _ = token_list[0] if is_literal: if typ != FSTRING_START: assert typ == STRING assert result_literal == literal else: assert typ == NAME check('u""') check('ur""', is_literal=False) check('Ur""', is_literal=False) check('UR""', is_literal=False) check('bR""') # Starting with Python 3.3 this ordering is also possible. check('Rb""') # Starting with Python 3.6 format strings where introduced. check('fr""', is_literal=True) check('rF""', is_literal=True) check('f""', is_literal=True) check('F""', is_literal=True) def test_error_literal(self): error_token, newline, endmarker = _get_token_list('"\n') assert error_token.type == ERRORTOKEN assert error_token.string == '"' assert newline.type == NEWLINE assert endmarker.type == ENDMARKER assert endmarker.prefix == "" bracket, error_token, endmarker = _get_token_list('( """') assert error_token.type == ERRORTOKEN assert error_token.prefix == " " assert error_token.string == '"""' assert endmarker.type == ENDMARKER assert endmarker.prefix == "" def test_endmarker_end_pos(self): def check(code): tokens = _get_token_list(code) lines = split_lines(code) assert tokens[-1].end_pos == (len(lines), len(lines[-1])) check("#c") check("#c\n") check("a\n") check("a") check(r"a\\n") check("a\\") @data_provider( ( # Indentation (" foo", [INDENT, NAME, DEDENT]), (" foo\n bar", [INDENT, NAME, NEWLINE, ERROR_DEDENT, NAME, DEDENT]), ( " foo\n bar \n baz", [ INDENT, NAME, NEWLINE, ERROR_DEDENT, NAME, NEWLINE, ERROR_DEDENT, NAME, DEDENT, ], ), (" foo\nbar", [INDENT, NAME, NEWLINE, DEDENT, NAME]), # Name stuff ("1foo1", [NUMBER, NAME]), ("மெல்லினம்", [NAME]), ("²", [ERRORTOKEN]), ("ä²ö", [NAME, ERRORTOKEN, NAME]), ("ää²¹öö", [NAME, ERRORTOKEN, NAME]), ) ) def test_token_types(self, code, types): actual_types = [t.type for t in _get_token_list(code)] assert actual_types == types + [ENDMARKER] def test_error_string(self): t1, newline, endmarker = _get_token_list(' "\n') assert t1.type == ERRORTOKEN assert t1.prefix == " " assert t1.string == '"' assert newline.type == NEWLINE assert endmarker.prefix == "" assert endmarker.string == "" def test_indent_error_recovery(self): code = dedent( """\ str( from x import a def """ ) lst = _get_token_list(code) expected = [ # `str(` INDENT, NAME, OP, # `from parso` NAME, NAME, # `import a` on same line as the previous from parso NAME, NAME, NEWLINE, # Dedent happens, because there's an import now and the import # statement "breaks" out of the opening paren on the first line. DEDENT, # `b` NAME, NEWLINE, ENDMARKER, ] assert [t.type for t in lst] == expected def test_error_token_after_dedent(self): code = dedent( """\ class C: pass $foo """ ) lst = _get_token_list(code) expected = [ NAME, NAME, OP, NEWLINE, INDENT, NAME, NEWLINE, DEDENT, # $foo\n ERRORTOKEN, NAME, NEWLINE, ENDMARKER, ] assert [t.type for t in lst] == expected def test_brackets_no_indentation(self): """ There used to be an issue that the parentheses counting would go below zero. This should not happen. """ code = dedent( """\ } { } """ ) lst = _get_token_list(code) assert [t.type for t in lst] == [OP, NEWLINE, OP, OP, NEWLINE, ENDMARKER] def test_form_feed(self): error_token, endmarker = _get_token_list( dedent( '''\ \f"""''' ) ) assert error_token.prefix == "\f" assert error_token.string == '"""' assert endmarker.prefix == "" def test_carriage_return(self): lst = _get_token_list(" =\\\rclass") assert [t.type for t in lst] == [INDENT, OP, DEDENT, NAME, ENDMARKER] def test_backslash(self): code = "\\\n# 1 \n" (endmarker,) = _get_token_list(code) assert endmarker.prefix == code @data_provider( ( ('f"', [FSTRING_START], "3.7"), ('f""', [FSTRING_START, FSTRING_END], "3.7"), ('f" {}"', [FSTRING_START, FSTRING_STRING, OP, OP, FSTRING_END], "3.7"), ('f" "{}', [FSTRING_START, FSTRING_STRING, FSTRING_END, OP, OP], "3.7"), (r'f"\""', [FSTRING_START, FSTRING_STRING, FSTRING_END], "3.7"), (r'f"\""', [FSTRING_START, FSTRING_STRING, FSTRING_END], "3.7"), # format spec ( r'f"Some {x:.2f}{y}"', [ FSTRING_START, FSTRING_STRING, OP, NAME, OP, FSTRING_STRING, OP, OP, NAME, OP, FSTRING_END, ], "3.7", ), # multiline f-string ('f"""abc\ndef"""', [FSTRING_START, FSTRING_STRING, FSTRING_END], "3.7"), ( 'f"""abc{\n123}def"""', [ FSTRING_START, FSTRING_STRING, OP, NUMBER, OP, FSTRING_STRING, FSTRING_END, ], "3.7", ), # a line continuation inside of an fstring_string ('f"abc\\\ndef"', [FSTRING_START, FSTRING_STRING, FSTRING_END], "3.7"), ( 'f"\\\n{123}\\\n"', [ FSTRING_START, FSTRING_STRING, OP, NUMBER, OP, FSTRING_STRING, FSTRING_END, ], "3.7", ), # a line continuation inside of an fstring_expr ('f"{\\\n123}"', [FSTRING_START, OP, NUMBER, OP, FSTRING_END], "3.7"), # a line continuation inside of an format spec ( 'f"{123:.2\\\nf}"', [FSTRING_START, OP, NUMBER, OP, FSTRING_STRING, OP, FSTRING_END], "3.7", ), # a newline without a line continuation inside a single-line string is # wrong, and will generate an ERRORTOKEN ( 'f"abc\ndef"', [FSTRING_START, FSTRING_STRING, NEWLINE, NAME, ERRORTOKEN], "3.7", ), # a more complex example ( r'print(f"Some {x:.2f}a{y}")', [ NAME, OP, FSTRING_START, FSTRING_STRING, OP, NAME, OP, FSTRING_STRING, OP, FSTRING_STRING, OP, NAME, OP, FSTRING_END, OP, ], "3.7", ), ) ) def test_fstring(self, code, types, py_version): actual_types = [t.type for t in _get_token_list(code, py_version)] assert types + [ENDMARKER] == actual_types LibCST-1.2.0/libcst/_parser/parso/tests/test_utils.py000066400000000000000000000040131456464173300225570ustar00rootroot00000000000000# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved. # Licensed to PSF under a Contributor Agreement. # # Modifications: # Copyright David Halter and Contributors # Modifications are dual-licensed: MIT and PSF. # 99% of the code is different from pgen2, now. # # A fork of Parso's tokenize test # https://github.com/davidhalter/parso/blob/master/test/test_tokenize.py # # The following changes were made: # - Convert base test to Unittet # - Remove grammar-specific tests # pyre-unsafe from libcst._parser.parso.utils import python_bytes_to_unicode, split_lines from libcst.testing.utils import data_provider, UnitTest class ParsoUtilsTest(UnitTest): @data_provider( ( ("asd\r\n", ["asd", ""], False), ("asd\r\n", ["asd\r\n", ""], True), ("asd\r", ["asd", ""], False), ("asd\r", ["asd\r", ""], True), ("asd\n", ["asd", ""], False), ("asd\n", ["asd\n", ""], True), ("asd\r\n\f", ["asd", "\f"], False), ("asd\r\n\f", ["asd\r\n", "\f"], True), ("\fasd\r\n", ["\fasd", ""], False), ("\fasd\r\n", ["\fasd\r\n", ""], True), ("", [""], False), ("", [""], True), ("\n", ["", ""], False), ("\n", ["\n", ""], True), ("\r", ["", ""], False), ("\r", ["\r", ""], True), # Invalid line breaks ("a\vb", ["a\vb"], False), ("a\vb", ["a\vb"], True), ("\x1C", ["\x1C"], False), ("\x1C", ["\x1C"], True), ) ) def test_split_lines(self, string, expected_result, keepends): assert split_lines(string, keepends=keepends) == expected_result def test_python_bytes_to_unicode_unicode_text(self): source = ( b"# vim: fileencoding=utf-8\n" + b"# \xe3\x81\x82\xe3\x81\x84\xe3\x81\x86\xe3\x81\x88\xe3\x81\x8a\n" ) actual = python_bytes_to_unicode(source) expected = source.decode("utf-8") assert actual == expected LibCST-1.2.0/libcst/_parser/parso/utils.py000066400000000000000000000166151456464173300203710ustar00rootroot00000000000000# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved. # Licensed to PSF under a Contributor Agreement. # # Modifications: # Copyright David Halter and Contributors # Modifications are dual-licensed: MIT and PSF. # 99% of the code is different from pgen2, now. # # A fork of `parso.utils`. # https://github.com/davidhalter/parso/blob/master/parso/utils.py # # The following changes were made: # - Drop Python 2 compatibility layer # - Use dataclasses instead of namedtuple # - Apply type hints directly to files # - Make PythonVersionInfo directly usable in hashmaps # - Unroll total ordering because Pyre doesn't understand it import re import sys from ast import literal_eval from dataclasses import dataclass from typing import Optional, Sequence, Tuple, Union # The following is a list in Python that are line breaks in str.splitlines, but # not in Python. In Python only \r (Carriage Return, 0xD) and \n (Line Feed, # 0xA) are allowed to split lines. _NON_LINE_BREAKS = ( "\v", # Vertical Tabulation 0xB "\f", # Form Feed 0xC "\x1C", # File Separator "\x1D", # Group Separator "\x1E", # Record Separator "\x85", # Next Line (NEL - Equivalent to CR+LF. # Used to mark end-of-line on some IBM mainframes.) "\u2028", # Line Separator "\u2029", # Paragraph Separator ) @dataclass(frozen=True) class Version: major: int minor: int micro: int def split_lines(string: str, keepends: bool = False) -> Sequence[str]: r""" Intended for Python code. In contrast to Python's :py:meth:`str.splitlines`, looks at form feeds and other special characters as normal text. Just splits ``\n`` and ``\r\n``. Also different: Returns ``[""]`` for an empty string input. In Python 2.7 form feeds are used as normal characters when using str.splitlines. However in Python 3 somewhere there was a decision to split also on form feeds. """ if keepends: lst = string.splitlines(True) # We have to merge lines that were broken by form feed characters. merge = [] for i, line in enumerate(lst): try: last_chr = line[-1] except IndexError: pass else: if last_chr in _NON_LINE_BREAKS: merge.append(i) for index in reversed(merge): try: lst[index] = lst[index] + lst[index + 1] del lst[index + 1] except IndexError: # index + 1 can be empty and therefore there's no need to # merge. pass # The stdlib's implementation of the end is inconsistent when calling # it with/without keepends. One time there's an empty string in the # end, one time there's none. if string.endswith("\n") or string.endswith("\r") or string == "": lst.append("") return lst else: return re.split(r"\n|\r\n|\r", string) def python_bytes_to_unicode( source: Union[str, bytes], encoding: str = "utf-8", errors: str = "strict" ) -> str: """ Checks for unicode BOMs and PEP 263 encoding declarations. Then returns a unicode object like in :py:meth:`bytes.decode`. :param encoding: See :py:meth:`bytes.decode` documentation. :param errors: See :py:meth:`bytes.decode` documentation. ``errors`` can be ``'strict'``, ``'replace'`` or ``'ignore'``. """ def detect_encoding() -> Union[str, bytes]: """ For the implementation of encoding definitions in Python, look at: - http://www.python.org/dev/peps/pep-0263/ - http://docs.python.org/2/reference/lexical_analysis.html#encoding-declarations """ byte_mark = literal_eval(r"b'\xef\xbb\xbf'") if source.startswith(byte_mark): # UTF-8 byte-order mark return b"utf-8" # pyre-ignore Pyre can't see that Union[str, bytes] conforms to AnyStr. first_two_match = re.match(rb"(?:[^\n]*\n){0,2}", source) if first_two_match is None: return encoding first_two_lines = first_two_match.group(0) possible_encoding = re.search(rb"coding[=:]\s*([-\w.]+)", first_two_lines) if possible_encoding: return possible_encoding.group(1) else: # the default if nothing else has been set -> PEP 263 return encoding if isinstance(source, str): # only cast bytes return source actual_encoding = detect_encoding() if not isinstance(actual_encoding, str): actual_encoding = actual_encoding.decode("utf-8", "replace") # Cast to str return source.decode(actual_encoding, errors) @dataclass(frozen=True) class PythonVersionInfo: major: int minor: int def __gt__(self, other: Union["PythonVersionInfo", Tuple[int, int]]) -> bool: if isinstance(other, tuple): if len(other) != 2: raise ValueError("Can only compare to tuples of length 2.") return (self.major, self.minor) > other return (self.major, self.minor) > (other.major, other.minor) def __ge__(self, other: Union["PythonVersionInfo", Tuple[int, int]]) -> bool: return self.__gt__(other) or self.__eq__(other) def __lt__(self, other: Union["PythonVersionInfo", Tuple[int, int]]) -> bool: if isinstance(other, tuple): if len(other) != 2: raise ValueError("Can only compare to tuples of length 2.") return (self.major, self.minor) < other return (self.major, self.minor) < (other.major, other.minor) def __le__(self, other: Union["PythonVersionInfo", Tuple[int, int]]) -> bool: return self.__lt__(other) or self.__eq__(other) def __eq__(self, other: Union["PythonVersionInfo", Tuple[int, int]]) -> bool: if isinstance(other, tuple): if len(other) != 2: raise ValueError("Can only compare to tuples of length 2.") return (self.major, self.minor) == other return (self.major, self.minor) == (other.major, other.minor) def __ne__(self, other: Union["PythonVersionInfo", Tuple[int, int]]) -> bool: return not self.__eq__(other) def __hash__(self) -> int: return hash((self.major, self.minor)) def _parse_version(version: str) -> PythonVersionInfo: match = re.match(r"(\d+)(?:\.(\d+)(?:\.\d+)?)?$", version) if match is None: raise ValueError( ( "The given version is not in the right format. " + 'Use something like "3.2" or "3".' ) ) major = int(match.group(1)) minor = match.group(2) if minor is None: # Use the latest Python in case it's not exactly defined, because the # grammars are typically backwards compatible? if major == 2: minor = "7" elif major == 3: minor = "6" else: raise NotImplementedError( "Sorry, no support yet for those fancy new/old versions." ) minor = int(minor) return PythonVersionInfo(major, minor) def parse_version_string(version: Optional[str] = None) -> PythonVersionInfo: """ Checks for a valid version number (e.g. `3.2` or `2.7.1` or `3`) and returns a corresponding version info that is always two characters long in decimal. """ if version is None: version = "%s.%s" % sys.version_info[:2] return _parse_version(version) LibCST-1.2.0/libcst/_parser/production_decorator.py000066400000000000000000000040431456464173300223250ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Callable, Optional, Sequence, TypeVar from libcst._parser.types.conversions import NonterminalConversion from libcst._parser.types.production import Production _NonterminalConversionT = TypeVar( "_NonterminalConversionT", bound=NonterminalConversion ) # We could version our grammar at a later point by adding a version metadata kwarg to # this decorator. def with_production( production_name: str, children: str, *, version: Optional[str] = None, future: Optional[str] = None, # pyre-fixme[34]: `Variable[_NonterminalConversionT (bound to # typing.Callable[[libcst_native.parser_config.ParserConfig, # typing.Sequence[typing.Any]], typing.Any])]` isn't present in the function's # parameters. ) -> Callable[[_NonterminalConversionT], _NonterminalConversionT]: """ Attaches a bit of grammar to a conversion function. The parser extracts all of these production strings, and uses it to form the language's full grammar. If you need to attach multiple productions to the same conversion function """ def inner(fn: _NonterminalConversionT) -> _NonterminalConversionT: if not hasattr(fn, "productions"): fn.productions = [] # pyre-ignore: Pyre doesn't think that fn has a __name__ attribute fn_name = fn.__name__ if not fn_name.startswith("convert_"): raise Exception( "A function with a production must be named 'convert_X', not " + f"'{fn_name}'." ) # pyre-ignore: Pyre doesn't know about this magic field we added fn.productions.append(Production(production_name, children, version, future)) return fn return inner def get_productions(fn: NonterminalConversion) -> Sequence[Production]: # pyre-ignore Pyre doesn't know about this magic field we added return fn.productions LibCST-1.2.0/libcst/_parser/py_whitespace_parser.py000066400000000000000000000222321456464173300223150ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import List, Optional, Sequence, Tuple, Union from libcst._nodes.whitespace import ( Comment, COMMENT_RE, EmptyLine, Newline, NEWLINE_RE, ParenthesizedWhitespace, SIMPLE_WHITESPACE_RE, SimpleWhitespace, TrailingWhitespace, ) from libcst._parser.types.config import BaseWhitespaceParserConfig from libcst._parser.types.whitespace_state import WhitespaceState as State # BEGIN PARSER ENTRYPOINTS def parse_simple_whitespace( config: BaseWhitespaceParserConfig, state: State ) -> SimpleWhitespace: # The match never fails because the pattern can match an empty string lines = config.lines # pyre-fixme[16]: Optional type has no attribute `group`. ws_line = SIMPLE_WHITESPACE_RE.match(lines[state.line - 1], state.column).group(0) ws_line_list = [ws_line] while "\\" in ws_line: # continuation character state.line += 1 state.column = 0 ws_line = SIMPLE_WHITESPACE_RE.match(lines[state.line - 1], state.column).group( 0 ) ws_line_list.append(ws_line) # TODO: we could special-case the common case where there's no continuation # character to avoid list construction and joining. # once we've finished collecting continuation characters state.column += len(ws_line) return SimpleWhitespace("".join(ws_line_list)) def parse_empty_lines( config: BaseWhitespaceParserConfig, state: State, *, override_absolute_indent: Optional[str] = None, ) -> Sequence[EmptyLine]: # If override_absolute_indent is true, then we need to parse all lines up # to and including the last line that is indented at our level. These all # belong to the footer and not to the next line's leading_lines. All lines # that have indent=False and come after the last line where indent=True # do not belong to this node. state_for_line = State( state.line, state.column, state.absolute_indent, state.is_parenthesized ) lines: List[Tuple[State, EmptyLine]] = [] while True: el = _parse_empty_line( config, state_for_line, override_absolute_indent=override_absolute_indent ) if el is None: break # Store the updated state with the element we parsed. Then make a new state # clone for the next element. lines.append((state_for_line, el)) state_for_line = State( state_for_line.line, state_for_line.column, state.absolute_indent, state.is_parenthesized, ) if override_absolute_indent is not None: # We need to find the last element that is indented, and then split the list # at that point. for i in range(len(lines) - 1, -1, -1): if lines[i][1].indent: lines = lines[: (i + 1)] break else: # We didn't find any lines, throw them all away lines = [] if lines: # Update the state line and column to match the last line actually parsed. final_state: State = lines[-1][0] state.line = final_state.line state.column = final_state.column return [r[1] for r in lines] def parse_trailing_whitespace( config: BaseWhitespaceParserConfig, state: State ) -> TrailingWhitespace: trailing_whitespace = _parse_trailing_whitespace(config, state) if trailing_whitespace is None: raise Exception( "Internal Error: Failed to parse TrailingWhitespace. This should never " + "happen because a TrailingWhitespace is never optional in the grammar, " + "so this error should've been caught by parso first." ) return trailing_whitespace def parse_parenthesizable_whitespace( config: BaseWhitespaceParserConfig, state: State ) -> Union[SimpleWhitespace, ParenthesizedWhitespace]: if state.is_parenthesized: # First, try parenthesized (don't need speculation because it either # parses or doesn't modify state). parenthesized_whitespace = _parse_parenthesized_whitespace(config, state) if parenthesized_whitespace is not None: return parenthesized_whitespace # Now, just parse and return a simple whitespace return parse_simple_whitespace(config, state) # END PARSER ENTRYPOINTS # BEGIN PARSER INTERNAL PRODUCTIONS def _parse_empty_line( config: BaseWhitespaceParserConfig, state: State, *, override_absolute_indent: Optional[str] = None, ) -> Optional[EmptyLine]: # begin speculative parsing speculative_state = State( state.line, state.column, state.absolute_indent, state.is_parenthesized ) try: indent = _parse_indent( config, speculative_state, override_absolute_indent=override_absolute_indent ) except Exception: # We aren't on a new line, speculative parsing failed return None whitespace = parse_simple_whitespace(config, speculative_state) comment = _parse_comment(config, speculative_state) newline = _parse_newline(config, speculative_state) if newline is None: # speculative parsing failed return None # speculative parsing succeeded state.line = speculative_state.line state.column = speculative_state.column # don't need to copy absolute_indent/is_parenthesized because they don't change. return EmptyLine(indent, whitespace, comment, newline) def _parse_indent( config: BaseWhitespaceParserConfig, state: State, *, override_absolute_indent: Optional[str] = None, ) -> bool: """ Returns True if indentation was found, otherwise False. """ absolute_indent = ( override_absolute_indent if override_absolute_indent is not None else state.absolute_indent ) line_str = config.lines[state.line - 1] if state.column != 0: if state.column == len(line_str) and state.line == len(config.lines): # We're at EOF, treat this as a failed speculative parse return False raise Exception("Internal Error: Column should be 0 when parsing an indent.") if line_str.startswith(absolute_indent, state.column): state.column += len(absolute_indent) return True return False def _parse_comment( config: BaseWhitespaceParserConfig, state: State ) -> Optional[Comment]: comment_match = COMMENT_RE.match(config.lines[state.line - 1], state.column) if comment_match is None: return None comment = comment_match.group(0) state.column += len(comment) return Comment(comment) def _parse_newline( config: BaseWhitespaceParserConfig, state: State ) -> Optional[Newline]: # begin speculative parsing line_str = config.lines[state.line - 1] newline_match = NEWLINE_RE.match(line_str, state.column) if newline_match is not None: # speculative parsing succeeded newline_str = newline_match.group(0) state.column += len(newline_str) if state.column != len(line_str): raise Exception("Internal Error: Found a newline, but it wasn't the EOL.") if state.line < len(config.lines): # this newline was the end of a line, and there's another line, # therefore we should move to the next line state.line += 1 state.column = 0 if newline_str == config.default_newline: # Just inherit it from the Module instead of explicitly setting it. return Newline() else: return Newline(newline_str) else: # no newline was found, speculative parsing failed return None def _parse_trailing_whitespace( config: BaseWhitespaceParserConfig, state: State ) -> Optional[TrailingWhitespace]: # Begin speculative parsing speculative_state = State( state.line, state.column, state.absolute_indent, state.is_parenthesized ) whitespace = parse_simple_whitespace(config, speculative_state) comment = _parse_comment(config, speculative_state) newline = _parse_newline(config, speculative_state) if newline is None: # Speculative parsing failed return None # Speculative parsing succeeded state.line = speculative_state.line state.column = speculative_state.column # don't need to copy absolute_indent/is_parenthesized because they don't change. return TrailingWhitespace(whitespace, comment, newline) def _parse_parenthesized_whitespace( config: BaseWhitespaceParserConfig, state: State ) -> Optional[ParenthesizedWhitespace]: first_line = _parse_trailing_whitespace(config, state) if first_line is None: # Speculative parsing failed return None empty_lines = () while True: empty_line = _parse_empty_line(config, state) if empty_line is None: # This isn't an empty line, so parse it below break empty_lines = empty_lines + (empty_line,) indent = _parse_indent(config, state) last_line = parse_simple_whitespace(config, state) return ParenthesizedWhitespace(first_line, empty_lines, indent, last_line) LibCST-1.2.0/libcst/_parser/python_parser.py000066400000000000000000000034171456464173300207760ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # pyre-unsafe from typing import Any, Iterable, Mapping, Sequence from libcst._parser.base_parser import BaseParser from libcst._parser.grammar import get_nonterminal_conversions, get_terminal_conversions from libcst._parser.parso.pgen2.generator import Grammar from libcst._parser.parso.python.token import TokenType from libcst._parser.types.config import ParserConfig from libcst._parser.types.conversions import NonterminalConversion, TerminalConversion from libcst._parser.types.token import Token class PythonCSTParser(BaseParser[Token, TokenType, Any]): config: ParserConfig terminal_conversions: Mapping[str, TerminalConversion] nonterminal_conversions: Mapping[str, NonterminalConversion] def __init__( self, *, tokens: Iterable[Token], config: ParserConfig, pgen_grammar: "Grammar[TokenType]", start_nonterminal: str = "file_input", ) -> None: super().__init__( tokens=tokens, lines=config.lines, pgen_grammar=pgen_grammar, start_nonterminal=start_nonterminal, ) self.config = config self.terminal_conversions = get_terminal_conversions() self.nonterminal_conversions = get_nonterminal_conversions( config.version, config.future_imports ) def convert_nonterminal(self, nonterminal: str, children: Sequence[Any]) -> Any: return self.nonterminal_conversions[nonterminal](self.config, children) def convert_terminal(self, token: Token) -> Any: return self.terminal_conversions[token.type.name](self.config, token) LibCST-1.2.0/libcst/_parser/tests/000077500000000000000000000000001456464173300166645ustar00rootroot00000000000000LibCST-1.2.0/libcst/_parser/tests/__init__.py000066400000000000000000000002631456464173300207760ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. LibCST-1.2.0/libcst/_parser/tests/test_config.py000066400000000000000000000024051456464173300215430ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # pyre-strict from libcst._parser.parso.utils import PythonVersionInfo from libcst._parser.types.config import _pick_compatible_python_version from libcst.testing.utils import UnitTest class ConfigTest(UnitTest): def test_pick_compatible(self) -> None: self.assertEqual( PythonVersionInfo(3, 1), _pick_compatible_python_version("3.2") ) self.assertEqual( PythonVersionInfo(3, 1), _pick_compatible_python_version("3.1") ) self.assertEqual( PythonVersionInfo(3, 8), _pick_compatible_python_version("3.9") ) self.assertEqual( PythonVersionInfo(3, 8), _pick_compatible_python_version("3.10") ) self.assertEqual( PythonVersionInfo(3, 8), _pick_compatible_python_version("4.0") ) with self.assertRaisesRegex( ValueError, ( r"No version found older than 1\.0 \(PythonVersionInfo\(" + r"major=1, minor=0\)\) while running on" ), ): _pick_compatible_python_version("1.0") LibCST-1.2.0/libcst/_parser/tests/test_detect_config.py000066400000000000000000000334061456464173300231000ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Union from libcst._parser.detect_config import detect_config from libcst._parser.parso.utils import PythonVersionInfo from libcst._parser.types.config import ( parser_config_asdict, ParserConfig, PartialParserConfig, ) from libcst.testing.utils import data_provider, UnitTest class TestDetectConfig(UnitTest): @data_provider( { "empty_input": { "source": b"", "partial": PartialParserConfig(python_version="3.7"), "detect_trailing_newline": True, "detect_default_newline": True, "expected_config": ParserConfig( lines=["\n", ""], encoding="utf-8", default_indent=" ", default_newline="\n", has_trailing_newline=False, version=PythonVersionInfo(3, 7), future_imports=frozenset(), ), }, "detect_trailing_newline_disabled": { "source": b"", "partial": PartialParserConfig(python_version="3.7"), "detect_trailing_newline": False, "detect_default_newline": True, "expected_config": ParserConfig( lines=[""], # the trailing newline isn't inserted encoding="utf-8", default_indent=" ", default_newline="\n", has_trailing_newline=False, version=PythonVersionInfo(3, 7), future_imports=frozenset(), ), }, "detect_default_newline_disabled": { "source": b"pass\r", "partial": PartialParserConfig(python_version="3.7"), "detect_trailing_newline": False, "detect_default_newline": False, "expected_config": ParserConfig( lines=["pass\r", ""], # the trailing newline isn't inserted encoding="utf-8", default_indent=" ", default_newline="\n", has_trailing_newline=False, version=PythonVersionInfo(3, 7), future_imports=frozenset(), ), }, "newline_inferred": { "source": b"first_line\r\n\nsomething\n", "partial": PartialParserConfig(python_version="3.7"), "detect_trailing_newline": True, "detect_default_newline": True, "expected_config": ParserConfig( lines=["first_line\r\n", "\n", "something\n", ""], encoding="utf-8", default_indent=" ", default_newline="\r\n", has_trailing_newline=True, version=PythonVersionInfo(3, 7), future_imports=frozenset(), ), }, "newline_partial_given": { "source": b"first_line\r\nsecond_line\r\n", "partial": PartialParserConfig( default_newline="\n", python_version="3.7" ), "detect_trailing_newline": True, "detect_default_newline": True, "expected_config": ParserConfig( lines=["first_line\r\n", "second_line\r\n", ""], encoding="utf-8", default_indent=" ", default_newline="\n", # The given partial disables inference has_trailing_newline=True, version=PythonVersionInfo(3, 7), future_imports=frozenset(), ), }, "indent_inferred": { "source": b"if test:\n\t something\n", "partial": PartialParserConfig(python_version="3.7"), "detect_trailing_newline": True, "detect_default_newline": True, "expected_config": ParserConfig( lines=["if test:\n", "\t something\n", ""], encoding="utf-8", default_indent="\t ", default_newline="\n", has_trailing_newline=True, version=PythonVersionInfo(3, 7), future_imports=frozenset(), ), }, "indent_partial_given": { "source": b"if test:\n\t something\n", "partial": PartialParserConfig( default_indent=" ", python_version="3.7" ), "detect_trailing_newline": True, "detect_default_newline": True, "expected_config": ParserConfig( lines=["if test:\n", "\t something\n", ""], encoding="utf-8", default_indent=" ", default_newline="\n", has_trailing_newline=True, version=PythonVersionInfo(3, 7), future_imports=frozenset(), ), }, "encoding_inferred": { "source": b"#!/usr/bin/python3\n# -*- coding: latin-1 -*-\npass\n", "partial": PartialParserConfig(python_version="3.7"), "detect_trailing_newline": True, "detect_default_newline": True, "expected_config": ParserConfig( lines=[ "#!/usr/bin/python3\n", "# -*- coding: latin-1 -*-\n", "pass\n", "", ], encoding="iso-8859-1", # this is an alias for latin-1 default_indent=" ", default_newline="\n", has_trailing_newline=True, version=PythonVersionInfo(3, 7), future_imports=frozenset(), ), }, "encoding_partial_given": { "source": b"#!/usr/bin/python3\n# -*- coding: latin-1 -*-\npass\n", "partial": PartialParserConfig( encoding="us-ascii", python_version="3.7" ), "detect_trailing_newline": True, "detect_default_newline": True, "expected_config": ParserConfig( lines=[ "#!/usr/bin/python3\n", "# -*- coding: latin-1 -*-\n", "pass\n", "", ], encoding="us-ascii", default_indent=" ", default_newline="\n", has_trailing_newline=True, version=PythonVersionInfo(3, 7), future_imports=frozenset(), ), }, "encoding_str_not_bytes_disables_inference": { "source": "#!/usr/bin/python3\n# -*- coding: latin-1 -*-\npass\n", "partial": PartialParserConfig(python_version="3.7"), "detect_trailing_newline": True, "detect_default_newline": True, "expected_config": ParserConfig( lines=[ "#!/usr/bin/python3\n", "# -*- coding: latin-1 -*-\n", "pass\n", "", ], encoding="utf-8", # because source is a str, don't infer latin-1 default_indent=" ", default_newline="\n", has_trailing_newline=True, version=PythonVersionInfo(3, 7), future_imports=frozenset(), ), }, "encoding_non_ascii_compatible_utf_16_with_bom": { "source": b"\xff\xfet\x00e\x00s\x00t\x00", "partial": PartialParserConfig(encoding="utf-16", python_version="3.7"), "detect_trailing_newline": True, "detect_default_newline": True, "expected_config": ParserConfig( lines=["test\n", ""], encoding="utf-16", default_indent=" ", default_newline="\n", has_trailing_newline=False, version=PythonVersionInfo(3, 7), future_imports=frozenset(), ), }, "detect_trailing_newline_missing_newline": { "source": b"test", "partial": PartialParserConfig(python_version="3.7"), "detect_trailing_newline": True, "detect_default_newline": True, "expected_config": ParserConfig( lines=["test\n", ""], encoding="utf-8", default_indent=" ", default_newline="\n", has_trailing_newline=False, version=PythonVersionInfo(3, 7), future_imports=frozenset(), ), }, "detect_trailing_newline_has_newline": { "source": b"test\n", "partial": PartialParserConfig(python_version="3.7"), "detect_trailing_newline": True, "detect_default_newline": True, "expected_config": ParserConfig( lines=["test\n", ""], encoding="utf-8", default_indent=" ", default_newline="\n", has_trailing_newline=True, version=PythonVersionInfo(3, 7), future_imports=frozenset(), ), }, "detect_trailing_newline_missing_newline_after_line_continuation": { "source": b"test\\\n", "partial": PartialParserConfig(python_version="3.7"), "detect_trailing_newline": True, "detect_default_newline": True, "expected_config": ParserConfig( lines=["test\\\n", "\n", ""], encoding="utf-8", default_indent=" ", default_newline="\n", has_trailing_newline=False, version=PythonVersionInfo(3, 7), future_imports=frozenset(), ), }, "detect_trailing_newline_has_newline_after_line_continuation": { "source": b"test\\\n\n", "partial": PartialParserConfig(python_version="3.7"), "detect_trailing_newline": True, "detect_default_newline": True, "expected_config": ParserConfig( lines=["test\\\n", "\n", ""], encoding="utf-8", default_indent=" ", default_newline="\n", has_trailing_newline=True, version=PythonVersionInfo(3, 7), future_imports=frozenset(), ), }, "future_imports_in_correct_position": { "source": b"# C\n''' D '''\nfrom __future__ import a as b\n", "partial": PartialParserConfig(python_version="3.7"), "detect_trailing_newline": True, "detect_default_newline": True, "expected_config": ParserConfig( lines=[ "# C\n", "''' D '''\n", "from __future__ import a as b\n", "", ], encoding="utf-8", default_indent=" ", default_newline="\n", has_trailing_newline=True, version=PythonVersionInfo(3, 7), future_imports=frozenset({"a"}), ), }, "future_imports_in_mixed_position": { "source": ( b"from __future__ import a, b\nimport os\n" + b"from __future__ import c\n" ), "partial": PartialParserConfig(python_version="3.7"), "detect_trailing_newline": True, "detect_default_newline": True, "expected_config": ParserConfig( lines=[ "from __future__ import a, b\n", "import os\n", "from __future__ import c\n", "", ], encoding="utf-8", default_indent=" ", default_newline="\n", has_trailing_newline=True, version=PythonVersionInfo(3, 7), future_imports=frozenset({"a", "b"}), ), }, } ) def test_detect_module_config( self, *, source: Union[str, bytes], partial: PartialParserConfig, detect_trailing_newline: bool, detect_default_newline: bool, expected_config: ParserConfig, ) -> None: self.assertEqual( parser_config_asdict( detect_config( source, partial=partial, detect_trailing_newline=detect_trailing_newline, detect_default_newline=detect_default_newline, ).config ), parser_config_asdict(expected_config), ) LibCST-1.2.0/libcst/_parser/tests/test_footer_behavior.py000066400000000000000000000246461456464173300234660ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from textwrap import dedent import libcst as cst from libcst import parse_module from libcst._nodes.deep_equals import deep_equals from libcst.testing.utils import data_provider, UnitTest class FooterBehaviorTest(UnitTest): @data_provider( { # Literally the most basic example "simple_module": { "code": "", "expected_module": cst.Module(body=(), has_trailing_newline=False), }, # A module with a header comment "header_only_module": { "code": "# This is a header comment\n", "expected_module": cst.Module( header=[ cst.EmptyLine( comment=cst.Comment(value="# This is a header comment") ) ], body=[], ), }, # A module with a header and footer "simple_header_footer_module": { "code": "# This is a header comment\npass\n# This is a footer comment\n", "expected_module": cst.Module( header=[ cst.EmptyLine( comment=cst.Comment(value="# This is a header comment") ) ], body=[cst.SimpleStatementLine([cst.Pass()])], footer=[ cst.EmptyLine( comment=cst.Comment(value="# This is a footer comment") ) ], ), }, # A module which should have a footer comment taken from the # if statement's indented block. "simple_reparented_footer_module": { "code": "# This is a header comment\nif True:\n pass\n# This is a footer comment\n", "expected_module": cst.Module( header=[ cst.EmptyLine( comment=cst.Comment(value="# This is a header comment") ) ], body=[ cst.If( test=cst.Name(value="True"), body=cst.IndentedBlock( header=cst.TrailingWhitespace(), body=[ cst.SimpleStatementLine( body=[cst.Pass()], trailing_whitespace=cst.TrailingWhitespace(), ) ], ), ) ], footer=[ cst.EmptyLine( comment=cst.Comment(value="# This is a footer comment") ) ], ), }, # Verifying that we properly parse and spread out footer comments to the # relative indents they go with. "complex_reparented_footer_module": { "code": ( "# This is a header comment\nif True:\n if True:\n pass" + "\n # This is an inner indented block comment\n # This " + "is an outer indented block comment\n# This is a footer comment\n" ), "expected_module": cst.Module( body=[ cst.If( test=cst.Name(value="True"), body=cst.IndentedBlock( body=[ cst.If( test=cst.Name(value="True"), body=cst.IndentedBlock( body=[ cst.SimpleStatementLine( body=[cst.Pass()] ) ], footer=[ cst.EmptyLine( comment=cst.Comment( value="# This is an inner indented block comment" ) ) ], ), ) ], footer=[ cst.EmptyLine( comment=cst.Comment( value="# This is an outer indented block comment" ) ) ], ), ) ], header=[ cst.EmptyLine( comment=cst.Comment(value="# This is a header comment") ) ], footer=[ cst.EmptyLine( comment=cst.Comment(value="# This is a footer comment") ) ], ), }, # Verify that comments belonging to statements are still owned even # after an indented block. "statement_comment_reparent": { "code": "if foo:\n return\n# comment\nx = 7\n", "expected_module": cst.Module( body=[ cst.If( test=cst.Name(value="foo"), body=cst.IndentedBlock( body=[ cst.SimpleStatementLine( body=[ cst.Return( whitespace_after_return=cst.SimpleWhitespace( value="" ) ) ] ) ] ), ), cst.SimpleStatementLine( body=[ cst.Assign( targets=[ cst.AssignTarget(target=cst.Name(value="x")) ], value=cst.Integer(value="7"), ) ], leading_lines=[ cst.EmptyLine(comment=cst.Comment(value="# comment")) ], ), ] ), }, # Verify that even if there are completely empty lines, we give all lines # up to and including the last line that's indented correctly. That way # comments that line up with indented block's indentation level aren't # parented to the next line just because there's a blank line or two # between them. "statement_comment_with_empty_lines": { "code": ( "def foo():\n if True:\n pass\n\n # Empty " + "line before me\n\n else:\n pass\n" ), "expected_module": cst.Module( body=[ cst.FunctionDef( name=cst.Name(value="foo"), params=cst.Parameters(), body=cst.IndentedBlock( body=[ cst.If( test=cst.Name(value="True"), body=cst.IndentedBlock( body=[ cst.SimpleStatementLine( body=[cst.Pass()] ) ], footer=[ cst.EmptyLine(indent=False), cst.EmptyLine( comment=cst.Comment( value="# Empty line before me" ) ), ], ), orelse=cst.Else( body=cst.IndentedBlock( body=[ cst.SimpleStatementLine( body=[cst.Pass()] ) ] ), leading_lines=[cst.EmptyLine(indent=False)], ), ) ] ), ) ] ), }, } ) def test_parsers(self, code: str, expected_module: cst.CSTNode) -> None: parsed_module = parse_module(dedent(code)) self.assertTrue( deep_equals(parsed_module, expected_module), msg=f"\n{parsed_module!r}\nis not deeply equal to \n{expected_module!r}", ) LibCST-1.2.0/libcst/_parser/tests/test_node_identity.py000066400000000000000000000025251456464173300231370ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from collections import Counter from textwrap import dedent import libcst as cst from libcst.testing.utils import data_provider, UnitTest class DuplicateLeafNodeTest(UnitTest): @data_provider( ( # Simple program ( """ foo = 'toplevel' fn1(foo) fn2(foo) def fn_def(): foo = 'shadow' fn3(foo) """, ), ) ) def test_tokenize(self, code: str) -> None: test_case = self class CountVisitor(cst.CSTVisitor): def __init__(self) -> None: self.count = Counter() self.nodes = {} def on_visit(self, node: cst.CSTNode) -> bool: self.count[id(node)] += 1 test_case.assertTrue( self.count[id(node)] == 1, f"Node duplication detected between {node} and {self.nodes.get(id(node))}", ) self.nodes[id(node)] = node return True module = cst.parse_module(dedent(code)) module.visit(CountVisitor()) LibCST-1.2.0/libcst/_parser/tests/test_parse_errors.py000066400000000000000000000150041456464173300230030ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from textwrap import dedent from typing import Callable from unittest.mock import patch import libcst as cst from libcst._nodes.base import CSTValidationError from libcst._parser.entrypoints import is_native from libcst.testing.utils import data_provider, UnitTest class ParseErrorsTest(UnitTest): @data_provider( { # _wrapped_tokenize raises these exceptions "wrapped_tokenize__invalid_token": ( lambda: cst.parse_module("'"), dedent( """ Syntax Error @ 1:1. "'" is not a valid token. ' ^ """ ).strip(), ), "wrapped_tokenize__expected_dedent": ( lambda: cst.parse_module("if False:\n pass\n pass"), dedent( """ Syntax Error @ 3:1. Inconsistent indentation. Expected a dedent. pass ^ """ ).strip(), ), "wrapped_tokenize__mismatched_braces": ( lambda: cst.parse_module("abcd)"), dedent( """ Syntax Error @ 1:5. Encountered a closing brace without a matching opening brace. abcd) ^ """ ).strip(), ), # _base_parser raises these exceptions "base_parser__unexpected_indent": ( lambda: cst.parse_module(" abcd"), dedent( """ Syntax Error @ 1:5. Incomplete input. Unexpectedly encountered an indent. abcd ^ """ ).strip(), ), "base_parser__unexpected_dedent": ( lambda: cst.parse_module("if False:\n (el for el\n"), dedent( """ Syntax Error @ 3:1. Incomplete input. Encountered a dedent, but expected 'in'. (el for el ^ """ ).strip(), ), "base_parser__multiple_possibilities": ( lambda: cst.parse_module("try: pass"), dedent( """ Syntax Error @ 2:1. Incomplete input. Encountered end of file (EOF), but expected 'except', or 'finally'. try: pass ^ """ ).strip(), ), # conversion functions raise these exceptions. # `_base_parser` is responsible for attaching location information. "convert_nonterminal__dict_unpacking": ( lambda: cst.parse_expression("{**el for el in []}"), dedent( """ Syntax Error @ 1:19. dict unpacking cannot be used in dict comprehension {**el for el in []} ^ """ ).strip(), ), "convert_nonterminal__arglist_non_default_after_default": ( lambda: cst.parse_statement("def fn(first=None, second): ..."), dedent( """ Syntax Error @ 1:26. Cannot have a non-default argument following a default argument. def fn(first=None, second): ... ^ """ ).strip(), ), "convert_nonterminal__arglist_trailing_param_star_without_comma": ( lambda: cst.parse_statement("def fn(abc, *): ..."), dedent( """ Syntax Error @ 1:14. Named (keyword) arguments must follow a bare *. def fn(abc, *): ... ^ """ ).strip(), ), "convert_nonterminal__arglist_trailing_param_star_with_comma": ( lambda: cst.parse_statement("def fn(abc, *,): ..."), dedent( """ Syntax Error @ 1:15. Named (keyword) arguments must follow a bare *. def fn(abc, *,): ... ^ """ ).strip(), ), "convert_nonterminal__class_arg_positional_after_keyword": ( lambda: cst.parse_statement("class Cls(first=None, second): ..."), dedent( """ Syntax Error @ 2:1. Positional argument follows keyword argument. class Cls(first=None, second): ... ^ """ ).strip(), ), "convert_nonterminal__class_arg_positional_expansion_after_keyword": ( lambda: cst.parse_statement("class Cls(first=None, *second): ..."), dedent( """ Syntax Error @ 2:1. Positional argument follows keyword argument. class Cls(first=None, *second): ... ^ """ ).strip(), ), } ) def test_parser_syntax_error_str( self, parse_fn: Callable[[], object], expected: str ) -> None: with self.assertRaises(cst.ParserSyntaxError) as cm: parse_fn() # make sure str() doesn't blow up self.assertIn("Syntax Error", str(cm.exception)) if not is_native(): self.assertEqual(str(cm.exception), expected) def test_native_fallible_into_py(self) -> None: with patch("libcst._nodes.expression.Name._validate") as await_validate: await_validate.side_effect = CSTValidationError("validate is broken") with self.assertRaises((SyntaxError, cst.ParserSyntaxError)): cst.parse_module("foo") LibCST-1.2.0/libcst/_parser/tests/test_version_compare.py000066400000000000000000000033211456464173300234670ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from libcst._parser.grammar import _should_include from libcst._parser.parso.utils import PythonVersionInfo from libcst.testing.utils import data_provider, UnitTest class VersionCompareTest(UnitTest): @data_provider( ( # Simple equality ("==3.6", PythonVersionInfo(3, 6), True), ("!=3.6", PythonVersionInfo(3, 6), False), # Equal or GT/LT (">=3.6", PythonVersionInfo(3, 5), False), (">=3.6", PythonVersionInfo(3, 6), True), (">=3.6", PythonVersionInfo(3, 7), True), ("<=3.6", PythonVersionInfo(3, 5), True), ("<=3.6", PythonVersionInfo(3, 6), True), ("<=3.6", PythonVersionInfo(3, 7), False), # GT/LT (">3.6", PythonVersionInfo(3, 5), False), (">3.6", PythonVersionInfo(3, 6), False), (">3.6", PythonVersionInfo(3, 7), True), ("<3.6", PythonVersionInfo(3, 5), True), ("<3.6", PythonVersionInfo(3, 6), False), ("<3.6", PythonVersionInfo(3, 7), False), # Multiple checks (">3.6,<3.8", PythonVersionInfo(3, 6), False), (">3.6,<3.8", PythonVersionInfo(3, 7), True), (">3.6,<3.8", PythonVersionInfo(3, 8), False), ) ) def test_tokenize( self, requested_version: str, actual_version: PythonVersionInfo, expected_result: bool, ) -> None: self.assertEqual( _should_include(requested_version, actual_version), expected_result ) LibCST-1.2.0/libcst/_parser/tests/test_whitespace_parser.py000066400000000000000000000223471456464173300240150ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Callable, TypeVar import libcst as cst from libcst._nodes.deep_equals import deep_equals from libcst._parser.types.config import MockWhitespaceParserConfig as Config from libcst._parser.types.whitespace_state import WhitespaceState as State from libcst._parser.whitespace_parser import ( parse_empty_lines, parse_simple_whitespace, parse_trailing_whitespace, ) from libcst.testing.utils import data_provider, UnitTest _T = TypeVar("_T") class WhitespaceParserTest(UnitTest): @data_provider( { "simple_whitespace_empty": { "parser": parse_simple_whitespace, "config": Config( lines=["not whitespace\n", " another line\n"], default_newline="\n" ), "start_state": State( line=1, column=0, absolute_indent="", is_parenthesized=False ), "end_state": State( line=1, column=0, absolute_indent="", is_parenthesized=False ), "expected_node": cst.SimpleWhitespace(""), }, "simple_whitespace_start_of_line": { "parser": parse_simple_whitespace, "config": Config( lines=["\t <-- There's some whitespace there\n"], default_newline="\n", ), "start_state": State( line=1, column=0, absolute_indent="", is_parenthesized=False ), "end_state": State( line=1, column=3, absolute_indent="", is_parenthesized=False ), "expected_node": cst.SimpleWhitespace("\t "), }, "simple_whitespace_end_of_line": { "parser": parse_simple_whitespace, "config": Config(lines=["prefix "], default_newline="\n"), "start_state": State( line=1, column=6, absolute_indent="", is_parenthesized=False ), "end_state": State( line=1, column=9, absolute_indent="", is_parenthesized=False ), "expected_node": cst.SimpleWhitespace(" "), }, "simple_whitespace_line_continuation": { "parser": parse_simple_whitespace, "config": Config( lines=["prefix \\\n", " \\\n", " # suffix\n"], default_newline="\n", ), "start_state": State( line=1, column=6, absolute_indent="", is_parenthesized=False ), "end_state": State( line=3, column=4, absolute_indent="", is_parenthesized=False ), "expected_node": cst.SimpleWhitespace(" \\\n \\\n "), }, "empty_lines_empty_list": { "parser": parse_empty_lines, "config": Config( lines=["this is not an empty line"], default_newline="\n" ), "start_state": State( line=1, column=0, absolute_indent="", is_parenthesized=False ), "end_state": State( line=1, column=0, absolute_indent="", is_parenthesized=False ), "expected_node": [], }, "empty_lines_single_line": { "parser": parse_empty_lines, "config": Config( lines=[" # comment\n", "this is not an empty line\n"], default_newline="\n", ), "start_state": State( line=1, column=0, absolute_indent=" ", is_parenthesized=False ), "end_state": State( line=2, column=0, absolute_indent=" ", is_parenthesized=False ), "expected_node": [ cst.EmptyLine( indent=True, whitespace=cst.SimpleWhitespace(""), comment=cst.Comment("# comment"), newline=cst.Newline(), ) ], }, "empty_lines_multiple": { "parser": parse_empty_lines, "config": Config( lines=[ "\n", " \n", " # comment with indent and whitespace\n", "# comment without indent\n", " # comment with no indent but some whitespace\n", ], default_newline="\n", ), "start_state": State( line=1, column=0, absolute_indent=" ", is_parenthesized=False ), "end_state": State( line=5, column=47, absolute_indent=" ", is_parenthesized=False ), "expected_node": [ cst.EmptyLine( indent=False, whitespace=cst.SimpleWhitespace(""), comment=None, newline=cst.Newline(), ), cst.EmptyLine( indent=True, whitespace=cst.SimpleWhitespace(""), comment=None, newline=cst.Newline(), ), cst.EmptyLine( indent=True, whitespace=cst.SimpleWhitespace(" "), comment=cst.Comment("# comment with indent and whitespace"), newline=cst.Newline(), ), cst.EmptyLine( indent=False, whitespace=cst.SimpleWhitespace(""), comment=cst.Comment("# comment without indent"), newline=cst.Newline(), ), cst.EmptyLine( indent=False, whitespace=cst.SimpleWhitespace(" "), comment=cst.Comment( "# comment with no indent but some whitespace" ), newline=cst.Newline(), ), ], }, "empty_lines_non_default_newline": { "parser": parse_empty_lines, "config": Config(lines=["\n", "\r\n", "\r"], default_newline="\n"), "start_state": State( line=1, column=0, absolute_indent="", is_parenthesized=False ), "end_state": State( line=3, column=1, absolute_indent="", is_parenthesized=False ), "expected_node": [ cst.EmptyLine( indent=True, whitespace=cst.SimpleWhitespace(""), comment=None, newline=cst.Newline(None), # default newline ), cst.EmptyLine( indent=True, whitespace=cst.SimpleWhitespace(""), comment=None, newline=cst.Newline("\r\n"), # non-default ), cst.EmptyLine( indent=True, whitespace=cst.SimpleWhitespace(""), comment=None, newline=cst.Newline("\r"), # non-default ), ], }, "trailing_whitespace": { "parser": parse_trailing_whitespace, "config": Config( lines=["some code # comment\n"], default_newline="\n" ), "start_state": State( line=1, column=9, absolute_indent="", is_parenthesized=False ), "end_state": State( line=1, column=21, absolute_indent="", is_parenthesized=False ), "expected_node": cst.TrailingWhitespace( whitespace=cst.SimpleWhitespace(" "), comment=cst.Comment("# comment"), newline=cst.Newline(), ), }, } ) def test_parsers( self, parser: Callable[[Config, State], _T], config: Config, start_state: State, end_state: State, expected_node: _T, ) -> None: # Uses internal `deep_equals` function instead of `CSTNode.deep_equals`, because # we need to compare sequences of nodes, and this is the easiest way. :/ parsed_node = parser(config, start_state) self.assertTrue( deep_equals(parsed_node, expected_node), msg=f"\n{parsed_node!r}\nis not deeply equal to \n{expected_node!r}", ) self.assertEqual(start_state, end_state) LibCST-1.2.0/libcst/_parser/tests/test_wrapped_tokenize.py000066400000000000000000002430061456464173300236540ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # pyre-unsafe from typing import Sequence from libcst._exceptions import ParserSyntaxError from libcst._parser.parso.python.token import PythonTokenTypes from libcst._parser.parso.utils import parse_version_string, PythonVersionInfo from libcst._parser.types.whitespace_state import WhitespaceState from libcst._parser.wrapped_tokenize import Token, tokenize from libcst.testing.utils import data_provider, UnitTest _PY38 = parse_version_string("3.8.0") _PY37 = parse_version_string("3.7.0") _PY36 = parse_version_string("3.6.0") _PY35 = parse_version_string("3.5.0") class WrappedTokenizeTest(UnitTest): maxDiff = 10000 @data_provider( { "simple_py35": ( "pass;\n", _PY35, ( Token( type=PythonTokenTypes.NAME, string="pass", start_pos=(1, 0), end_pos=(1, 4), whitespace_before=WhitespaceState( line=1, column=0, absolute_indent="", is_parenthesized=False ), whitespace_after=WhitespaceState( line=1, column=4, absolute_indent="", is_parenthesized=False ), relative_indent=None, ), Token( type=PythonTokenTypes.OP, string=";", start_pos=(1, 4), end_pos=(1, 5), whitespace_before=WhitespaceState( line=1, column=4, absolute_indent="", is_parenthesized=False ), whitespace_after=WhitespaceState( line=1, column=5, absolute_indent="", is_parenthesized=False ), relative_indent=None, ), Token( type=PythonTokenTypes.NEWLINE, string="\n", start_pos=(1, 5), end_pos=(2, 0), whitespace_before=WhitespaceState( line=1, column=5, absolute_indent="", is_parenthesized=False ), whitespace_after=WhitespaceState( line=2, column=0, absolute_indent="", is_parenthesized=False ), relative_indent=None, ), Token( type=PythonTokenTypes.ENDMARKER, string="", start_pos=(2, 0), end_pos=(2, 0), whitespace_before=WhitespaceState( line=2, column=0, absolute_indent="", is_parenthesized=False ), whitespace_after=WhitespaceState( line=2, column=0, absolute_indent="", is_parenthesized=False ), relative_indent=None, ), ), ), "with_indent_py35": ( "if foo:\n bar\n", _PY35, ( Token( type=PythonTokenTypes.NAME, string="if", start_pos=(1, 0), end_pos=(1, 2), whitespace_before=WhitespaceState( line=1, column=0, absolute_indent="", is_parenthesized=False ), whitespace_after=WhitespaceState( line=1, column=2, absolute_indent="", is_parenthesized=False ), relative_indent=None, ), Token( type=PythonTokenTypes.NAME, string="foo", start_pos=(1, 3), end_pos=(1, 6), whitespace_before=WhitespaceState( line=1, column=2, absolute_indent="", is_parenthesized=False ), whitespace_after=WhitespaceState( line=1, column=6, absolute_indent="", is_parenthesized=False ), relative_indent=None, ), Token( type=PythonTokenTypes.OP, string=":", start_pos=(1, 6), end_pos=(1, 7), whitespace_before=WhitespaceState( line=1, column=6, absolute_indent="", is_parenthesized=False ), whitespace_after=WhitespaceState( line=1, column=7, absolute_indent="", is_parenthesized=False ), relative_indent=None, ), Token( type=PythonTokenTypes.NEWLINE, string="\n", start_pos=(1, 7), end_pos=(2, 0), whitespace_before=WhitespaceState( line=1, column=7, absolute_indent="", is_parenthesized=False ), whitespace_after=WhitespaceState( line=2, column=0, absolute_indent=" ", is_parenthesized=False, ), relative_indent=None, ), Token( type=PythonTokenTypes.INDENT, string="", start_pos=(2, 4), end_pos=(2, 4), whitespace_before=WhitespaceState( line=2, column=0, absolute_indent=" ", is_parenthesized=False, ), whitespace_after=WhitespaceState( line=2, column=0, absolute_indent=" ", is_parenthesized=False, ), relative_indent=" ", ), Token( type=PythonTokenTypes.NAME, string="bar", start_pos=(2, 4), end_pos=(2, 7), whitespace_before=WhitespaceState( line=2, column=0, absolute_indent=" ", is_parenthesized=False, ), whitespace_after=WhitespaceState( line=2, column=7, absolute_indent=" ", is_parenthesized=False, ), relative_indent=None, ), Token( type=PythonTokenTypes.NEWLINE, string="\n", start_pos=(2, 7), end_pos=(3, 0), whitespace_before=WhitespaceState( line=2, column=7, absolute_indent=" ", is_parenthesized=False, ), whitespace_after=WhitespaceState( line=3, column=0, absolute_indent="", is_parenthesized=False ), relative_indent=None, ), Token( type=PythonTokenTypes.DEDENT, string="", start_pos=(3, 0), end_pos=(3, 0), whitespace_before=WhitespaceState( line=3, column=0, absolute_indent="", is_parenthesized=False ), whitespace_after=WhitespaceState( line=3, column=0, absolute_indent="", is_parenthesized=False ), relative_indent=None, ), Token( type=PythonTokenTypes.ENDMARKER, string="", start_pos=(3, 0), end_pos=(3, 0), whitespace_before=WhitespaceState( line=3, column=0, absolute_indent="", is_parenthesized=False ), whitespace_after=WhitespaceState( line=3, column=0, absolute_indent="", is_parenthesized=False ), relative_indent=None, ), ), ), "async_py35": ( "async def foo():\n return await bar\n", _PY35, ( Token( type=PythonTokenTypes.ASYNC, string="async", start_pos=(1, 0), end_pos=(1, 5), whitespace_before=WhitespaceState( line=1, column=0, absolute_indent="", is_parenthesized=False ), whitespace_after=WhitespaceState( line=1, column=5, absolute_indent="", is_parenthesized=False ), relative_indent=None, ), Token( type=PythonTokenTypes.NAME, string="def", start_pos=(1, 6), end_pos=(1, 9), whitespace_before=WhitespaceState( line=1, column=5, absolute_indent="", is_parenthesized=False ), whitespace_after=WhitespaceState( line=1, column=9, absolute_indent="", is_parenthesized=False ), relative_indent=None, ), Token( type=PythonTokenTypes.NAME, string="foo", start_pos=(1, 10), end_pos=(1, 13), whitespace_before=WhitespaceState( line=1, column=9, absolute_indent="", is_parenthesized=False ), whitespace_after=WhitespaceState( line=1, column=13, absolute_indent="", is_parenthesized=False, ), relative_indent=None, ), Token( type=PythonTokenTypes.OP, string="(", start_pos=(1, 13), end_pos=(1, 14), whitespace_before=WhitespaceState( line=1, column=13, absolute_indent="", is_parenthesized=False, ), whitespace_after=WhitespaceState( line=1, column=14, absolute_indent="", is_parenthesized=True ), relative_indent=None, ), Token( type=PythonTokenTypes.OP, string=")", start_pos=(1, 14), end_pos=(1, 15), whitespace_before=WhitespaceState( line=1, column=14, absolute_indent="", is_parenthesized=True ), whitespace_after=WhitespaceState( line=1, column=15, absolute_indent="", is_parenthesized=False, ), relative_indent=None, ), Token( type=PythonTokenTypes.OP, string=":", start_pos=(1, 15), end_pos=(1, 16), whitespace_before=WhitespaceState( line=1, column=15, absolute_indent="", is_parenthesized=False, ), whitespace_after=WhitespaceState( line=1, column=16, absolute_indent="", is_parenthesized=False, ), relative_indent=None, ), Token( type=PythonTokenTypes.NEWLINE, string="\n", start_pos=(1, 16), end_pos=(2, 0), whitespace_before=WhitespaceState( line=1, column=16, absolute_indent="", is_parenthesized=False, ), whitespace_after=WhitespaceState( line=2, column=0, absolute_indent=" ", is_parenthesized=False, ), relative_indent=None, ), Token( type=PythonTokenTypes.INDENT, string="", start_pos=(2, 4), end_pos=(2, 4), whitespace_before=WhitespaceState( line=2, column=0, absolute_indent=" ", is_parenthesized=False, ), whitespace_after=WhitespaceState( line=2, column=0, absolute_indent=" ", is_parenthesized=False, ), relative_indent=" ", ), Token( type=PythonTokenTypes.NAME, string="return", start_pos=(2, 4), end_pos=(2, 10), whitespace_before=WhitespaceState( line=2, column=0, absolute_indent=" ", is_parenthesized=False, ), whitespace_after=WhitespaceState( line=2, column=10, absolute_indent=" ", is_parenthesized=False, ), relative_indent=None, ), Token( type=PythonTokenTypes.AWAIT, string="await", start_pos=(2, 11), end_pos=(2, 16), whitespace_before=WhitespaceState( line=2, column=10, absolute_indent=" ", is_parenthesized=False, ), whitespace_after=WhitespaceState( line=2, column=16, absolute_indent=" ", is_parenthesized=False, ), relative_indent=None, ), Token( type=PythonTokenTypes.NAME, string="bar", start_pos=(2, 17), end_pos=(2, 20), whitespace_before=WhitespaceState( line=2, column=16, absolute_indent=" ", is_parenthesized=False, ), whitespace_after=WhitespaceState( line=2, column=20, absolute_indent=" ", is_parenthesized=False, ), relative_indent=None, ), Token( type=PythonTokenTypes.NEWLINE, string="\n", start_pos=(2, 20), end_pos=(3, 0), whitespace_before=WhitespaceState( line=2, column=20, absolute_indent=" ", is_parenthesized=False, ), whitespace_after=WhitespaceState( line=3, column=0, absolute_indent="", is_parenthesized=False ), relative_indent=None, ), Token( type=PythonTokenTypes.DEDENT, string="", start_pos=(3, 0), end_pos=(3, 0), whitespace_before=WhitespaceState( line=3, column=0, absolute_indent="", is_parenthesized=False ), whitespace_after=WhitespaceState( line=3, column=0, absolute_indent="", is_parenthesized=False ), relative_indent=None, ), Token( type=PythonTokenTypes.ENDMARKER, string="", start_pos=(3, 0), end_pos=(3, 0), whitespace_before=WhitespaceState( line=3, column=0, absolute_indent="", is_parenthesized=False ), whitespace_after=WhitespaceState( line=3, column=0, absolute_indent="", is_parenthesized=False ), relative_indent=None, ), ), ), "async_no_token_35": ( "async;\n", _PY35, ( Token( type=PythonTokenTypes.NAME, string="async", start_pos=(1, 0), end_pos=(1, 5), whitespace_before=WhitespaceState( line=1, column=0, absolute_indent="", is_parenthesized=False ), whitespace_after=WhitespaceState( line=1, column=5, absolute_indent="", is_parenthesized=False ), relative_indent=None, ), Token( type=PythonTokenTypes.OP, string=";", start_pos=(1, 5), end_pos=(1, 6), whitespace_before=WhitespaceState( line=1, column=5, absolute_indent="", is_parenthesized=False ), whitespace_after=WhitespaceState( line=1, column=6, absolute_indent="", is_parenthesized=False ), relative_indent=None, ), Token( type=PythonTokenTypes.NEWLINE, string="\n", start_pos=(1, 6), end_pos=(2, 0), whitespace_before=WhitespaceState( line=1, column=6, absolute_indent="", is_parenthesized=False ), whitespace_after=WhitespaceState( line=2, column=0, absolute_indent="", is_parenthesized=False ), relative_indent=None, ), Token( type=PythonTokenTypes.ENDMARKER, string="", start_pos=(2, 0), end_pos=(2, 0), whitespace_before=WhitespaceState( line=2, column=0, absolute_indent="", is_parenthesized=False ), whitespace_after=WhitespaceState( line=2, column=0, absolute_indent="", is_parenthesized=False ), relative_indent=None, ), ), ), "simple_py36": ( "pass;\n", _PY36, ( Token( type=PythonTokenTypes.NAME, string="pass", start_pos=(1, 0), end_pos=(1, 4), whitespace_before=WhitespaceState( line=1, column=0, absolute_indent="", is_parenthesized=False ), whitespace_after=WhitespaceState( line=1, column=4, absolute_indent="", is_parenthesized=False ), relative_indent=None, ), Token( type=PythonTokenTypes.OP, string=";", start_pos=(1, 4), end_pos=(1, 5), whitespace_before=WhitespaceState( line=1, column=4, absolute_indent="", is_parenthesized=False ), whitespace_after=WhitespaceState( line=1, column=5, absolute_indent="", is_parenthesized=False ), relative_indent=None, ), Token( type=PythonTokenTypes.NEWLINE, string="\n", start_pos=(1, 5), end_pos=(2, 0), whitespace_before=WhitespaceState( line=1, column=5, absolute_indent="", is_parenthesized=False ), whitespace_after=WhitespaceState( line=2, column=0, absolute_indent="", is_parenthesized=False ), relative_indent=None, ), Token( type=PythonTokenTypes.ENDMARKER, string="", start_pos=(2, 0), end_pos=(2, 0), whitespace_before=WhitespaceState( line=2, column=0, absolute_indent="", is_parenthesized=False ), whitespace_after=WhitespaceState( line=2, column=0, absolute_indent="", is_parenthesized=False ), relative_indent=None, ), ), ), "with_indent_py36": ( "if foo:\n bar\n", _PY36, ( Token( type=PythonTokenTypes.NAME, string="if", start_pos=(1, 0), end_pos=(1, 2), whitespace_before=WhitespaceState( line=1, column=0, absolute_indent="", is_parenthesized=False ), whitespace_after=WhitespaceState( line=1, column=2, absolute_indent="", is_parenthesized=False ), relative_indent=None, ), Token( type=PythonTokenTypes.NAME, string="foo", start_pos=(1, 3), end_pos=(1, 6), whitespace_before=WhitespaceState( line=1, column=2, absolute_indent="", is_parenthesized=False ), whitespace_after=WhitespaceState( line=1, column=6, absolute_indent="", is_parenthesized=False ), relative_indent=None, ), Token( type=PythonTokenTypes.OP, string=":", start_pos=(1, 6), end_pos=(1, 7), whitespace_before=WhitespaceState( line=1, column=6, absolute_indent="", is_parenthesized=False ), whitespace_after=WhitespaceState( line=1, column=7, absolute_indent="", is_parenthesized=False ), relative_indent=None, ), Token( type=PythonTokenTypes.NEWLINE, string="\n", start_pos=(1, 7), end_pos=(2, 0), whitespace_before=WhitespaceState( line=1, column=7, absolute_indent="", is_parenthesized=False ), whitespace_after=WhitespaceState( line=2, column=0, absolute_indent=" ", is_parenthesized=False, ), relative_indent=None, ), Token( type=PythonTokenTypes.INDENT, string="", start_pos=(2, 4), end_pos=(2, 4), whitespace_before=WhitespaceState( line=2, column=0, absolute_indent=" ", is_parenthesized=False, ), whitespace_after=WhitespaceState( line=2, column=0, absolute_indent=" ", is_parenthesized=False, ), relative_indent=" ", ), Token( type=PythonTokenTypes.NAME, string="bar", start_pos=(2, 4), end_pos=(2, 7), whitespace_before=WhitespaceState( line=2, column=0, absolute_indent=" ", is_parenthesized=False, ), whitespace_after=WhitespaceState( line=2, column=7, absolute_indent=" ", is_parenthesized=False, ), relative_indent=None, ), Token( type=PythonTokenTypes.NEWLINE, string="\n", start_pos=(2, 7), end_pos=(3, 0), whitespace_before=WhitespaceState( line=2, column=7, absolute_indent=" ", is_parenthesized=False, ), whitespace_after=WhitespaceState( line=3, column=0, absolute_indent="", is_parenthesized=False ), relative_indent=None, ), Token( type=PythonTokenTypes.DEDENT, string="", start_pos=(3, 0), end_pos=(3, 0), whitespace_before=WhitespaceState( line=3, column=0, absolute_indent="", is_parenthesized=False ), whitespace_after=WhitespaceState( line=3, column=0, absolute_indent="", is_parenthesized=False ), relative_indent=None, ), Token( type=PythonTokenTypes.ENDMARKER, string="", start_pos=(3, 0), end_pos=(3, 0), whitespace_before=WhitespaceState( line=3, column=0, absolute_indent="", is_parenthesized=False ), whitespace_after=WhitespaceState( line=3, column=0, absolute_indent="", is_parenthesized=False ), relative_indent=None, ), ), ), "async_py36": ( "async def foo():\n return await bar\n", _PY36, ( Token( type=PythonTokenTypes.ASYNC, string="async", start_pos=(1, 0), end_pos=(1, 5), whitespace_before=WhitespaceState( line=1, column=0, absolute_indent="", is_parenthesized=False ), whitespace_after=WhitespaceState( line=1, column=5, absolute_indent="", is_parenthesized=False ), relative_indent=None, ), Token( type=PythonTokenTypes.NAME, string="def", start_pos=(1, 6), end_pos=(1, 9), whitespace_before=WhitespaceState( line=1, column=5, absolute_indent="", is_parenthesized=False ), whitespace_after=WhitespaceState( line=1, column=9, absolute_indent="", is_parenthesized=False ), relative_indent=None, ), Token( type=PythonTokenTypes.NAME, string="foo", start_pos=(1, 10), end_pos=(1, 13), whitespace_before=WhitespaceState( line=1, column=9, absolute_indent="", is_parenthesized=False ), whitespace_after=WhitespaceState( line=1, column=13, absolute_indent="", is_parenthesized=False, ), relative_indent=None, ), Token( type=PythonTokenTypes.OP, string="(", start_pos=(1, 13), end_pos=(1, 14), whitespace_before=WhitespaceState( line=1, column=13, absolute_indent="", is_parenthesized=False, ), whitespace_after=WhitespaceState( line=1, column=14, absolute_indent="", is_parenthesized=True ), relative_indent=None, ), Token( type=PythonTokenTypes.OP, string=")", start_pos=(1, 14), end_pos=(1, 15), whitespace_before=WhitespaceState( line=1, column=14, absolute_indent="", is_parenthesized=True ), whitespace_after=WhitespaceState( line=1, column=15, absolute_indent="", is_parenthesized=False, ), relative_indent=None, ), Token( type=PythonTokenTypes.OP, string=":", start_pos=(1, 15), end_pos=(1, 16), whitespace_before=WhitespaceState( line=1, column=15, absolute_indent="", is_parenthesized=False, ), whitespace_after=WhitespaceState( line=1, column=16, absolute_indent="", is_parenthesized=False, ), relative_indent=None, ), Token( type=PythonTokenTypes.NEWLINE, string="\n", start_pos=(1, 16), end_pos=(2, 0), whitespace_before=WhitespaceState( line=1, column=16, absolute_indent="", is_parenthesized=False, ), whitespace_after=WhitespaceState( line=2, column=0, absolute_indent=" ", is_parenthesized=False, ), relative_indent=None, ), Token( type=PythonTokenTypes.INDENT, string="", start_pos=(2, 4), end_pos=(2, 4), whitespace_before=WhitespaceState( line=2, column=0, absolute_indent=" ", is_parenthesized=False, ), whitespace_after=WhitespaceState( line=2, column=0, absolute_indent=" ", is_parenthesized=False, ), relative_indent=" ", ), Token( type=PythonTokenTypes.NAME, string="return", start_pos=(2, 4), end_pos=(2, 10), whitespace_before=WhitespaceState( line=2, column=0, absolute_indent=" ", is_parenthesized=False, ), whitespace_after=WhitespaceState( line=2, column=10, absolute_indent=" ", is_parenthesized=False, ), relative_indent=None, ), Token( type=PythonTokenTypes.AWAIT, string="await", start_pos=(2, 11), end_pos=(2, 16), whitespace_before=WhitespaceState( line=2, column=10, absolute_indent=" ", is_parenthesized=False, ), whitespace_after=WhitespaceState( line=2, column=16, absolute_indent=" ", is_parenthesized=False, ), relative_indent=None, ), Token( type=PythonTokenTypes.NAME, string="bar", start_pos=(2, 17), end_pos=(2, 20), whitespace_before=WhitespaceState( line=2, column=16, absolute_indent=" ", is_parenthesized=False, ), whitespace_after=WhitespaceState( line=2, column=20, absolute_indent=" ", is_parenthesized=False, ), relative_indent=None, ), Token( type=PythonTokenTypes.NEWLINE, string="\n", start_pos=(2, 20), end_pos=(3, 0), whitespace_before=WhitespaceState( line=2, column=20, absolute_indent=" ", is_parenthesized=False, ), whitespace_after=WhitespaceState( line=3, column=0, absolute_indent="", is_parenthesized=False ), relative_indent=None, ), Token( type=PythonTokenTypes.DEDENT, string="", start_pos=(3, 0), end_pos=(3, 0), whitespace_before=WhitespaceState( line=3, column=0, absolute_indent="", is_parenthesized=False ), whitespace_after=WhitespaceState( line=3, column=0, absolute_indent="", is_parenthesized=False ), relative_indent=None, ), Token( type=PythonTokenTypes.ENDMARKER, string="", start_pos=(3, 0), end_pos=(3, 0), whitespace_before=WhitespaceState( line=3, column=0, absolute_indent="", is_parenthesized=False ), whitespace_after=WhitespaceState( line=3, column=0, absolute_indent="", is_parenthesized=False ), relative_indent=None, ), ), ), "async_no_token_36": ( "async;\n", _PY36, ( Token( type=PythonTokenTypes.NAME, string="async", start_pos=(1, 0), end_pos=(1, 5), whitespace_before=WhitespaceState( line=1, column=0, absolute_indent="", is_parenthesized=False ), whitespace_after=WhitespaceState( line=1, column=5, absolute_indent="", is_parenthesized=False ), relative_indent=None, ), Token( type=PythonTokenTypes.OP, string=";", start_pos=(1, 5), end_pos=(1, 6), whitespace_before=WhitespaceState( line=1, column=5, absolute_indent="", is_parenthesized=False ), whitespace_after=WhitespaceState( line=1, column=6, absolute_indent="", is_parenthesized=False ), relative_indent=None, ), Token( type=PythonTokenTypes.NEWLINE, string="\n", start_pos=(1, 6), end_pos=(2, 0), whitespace_before=WhitespaceState( line=1, column=6, absolute_indent="", is_parenthesized=False ), whitespace_after=WhitespaceState( line=2, column=0, absolute_indent="", is_parenthesized=False ), relative_indent=None, ), Token( type=PythonTokenTypes.ENDMARKER, string="", start_pos=(2, 0), end_pos=(2, 0), whitespace_before=WhitespaceState( line=2, column=0, absolute_indent="", is_parenthesized=False ), whitespace_after=WhitespaceState( line=2, column=0, absolute_indent="", is_parenthesized=False ), relative_indent=None, ), ), ), "simple_py37": ( "pass;\n", _PY37, ( Token( type=PythonTokenTypes.NAME, string="pass", start_pos=(1, 0), end_pos=(1, 4), whitespace_before=WhitespaceState( line=1, column=0, absolute_indent="", is_parenthesized=False ), whitespace_after=WhitespaceState( line=1, column=4, absolute_indent="", is_parenthesized=False ), relative_indent=None, ), Token( type=PythonTokenTypes.OP, string=";", start_pos=(1, 4), end_pos=(1, 5), whitespace_before=WhitespaceState( line=1, column=4, absolute_indent="", is_parenthesized=False ), whitespace_after=WhitespaceState( line=1, column=5, absolute_indent="", is_parenthesized=False ), relative_indent=None, ), Token( type=PythonTokenTypes.NEWLINE, string="\n", start_pos=(1, 5), end_pos=(2, 0), whitespace_before=WhitespaceState( line=1, column=5, absolute_indent="", is_parenthesized=False ), whitespace_after=WhitespaceState( line=2, column=0, absolute_indent="", is_parenthesized=False ), relative_indent=None, ), Token( type=PythonTokenTypes.ENDMARKER, string="", start_pos=(2, 0), end_pos=(2, 0), whitespace_before=WhitespaceState( line=2, column=0, absolute_indent="", is_parenthesized=False ), whitespace_after=WhitespaceState( line=2, column=0, absolute_indent="", is_parenthesized=False ), relative_indent=None, ), ), ), "with_indent_py37": ( "if foo:\n bar\n", _PY37, ( Token( type=PythonTokenTypes.NAME, string="if", start_pos=(1, 0), end_pos=(1, 2), whitespace_before=WhitespaceState( line=1, column=0, absolute_indent="", is_parenthesized=False ), whitespace_after=WhitespaceState( line=1, column=2, absolute_indent="", is_parenthesized=False ), relative_indent=None, ), Token( type=PythonTokenTypes.NAME, string="foo", start_pos=(1, 3), end_pos=(1, 6), whitespace_before=WhitespaceState( line=1, column=2, absolute_indent="", is_parenthesized=False ), whitespace_after=WhitespaceState( line=1, column=6, absolute_indent="", is_parenthesized=False ), relative_indent=None, ), Token( type=PythonTokenTypes.OP, string=":", start_pos=(1, 6), end_pos=(1, 7), whitespace_before=WhitespaceState( line=1, column=6, absolute_indent="", is_parenthesized=False ), whitespace_after=WhitespaceState( line=1, column=7, absolute_indent="", is_parenthesized=False ), relative_indent=None, ), Token( type=PythonTokenTypes.NEWLINE, string="\n", start_pos=(1, 7), end_pos=(2, 0), whitespace_before=WhitespaceState( line=1, column=7, absolute_indent="", is_parenthesized=False ), whitespace_after=WhitespaceState( line=2, column=0, absolute_indent=" ", is_parenthesized=False, ), relative_indent=None, ), Token( type=PythonTokenTypes.INDENT, string="", start_pos=(2, 4), end_pos=(2, 4), whitespace_before=WhitespaceState( line=2, column=0, absolute_indent=" ", is_parenthesized=False, ), whitespace_after=WhitespaceState( line=2, column=0, absolute_indent=" ", is_parenthesized=False, ), relative_indent=" ", ), Token( type=PythonTokenTypes.NAME, string="bar", start_pos=(2, 4), end_pos=(2, 7), whitespace_before=WhitespaceState( line=2, column=0, absolute_indent=" ", is_parenthesized=False, ), whitespace_after=WhitespaceState( line=2, column=7, absolute_indent=" ", is_parenthesized=False, ), relative_indent=None, ), Token( type=PythonTokenTypes.NEWLINE, string="\n", start_pos=(2, 7), end_pos=(3, 0), whitespace_before=WhitespaceState( line=2, column=7, absolute_indent=" ", is_parenthesized=False, ), whitespace_after=WhitespaceState( line=3, column=0, absolute_indent="", is_parenthesized=False ), relative_indent=None, ), Token( type=PythonTokenTypes.DEDENT, string="", start_pos=(3, 0), end_pos=(3, 0), whitespace_before=WhitespaceState( line=3, column=0, absolute_indent="", is_parenthesized=False ), whitespace_after=WhitespaceState( line=3, column=0, absolute_indent="", is_parenthesized=False ), relative_indent=None, ), Token( type=PythonTokenTypes.ENDMARKER, string="", start_pos=(3, 0), end_pos=(3, 0), whitespace_before=WhitespaceState( line=3, column=0, absolute_indent="", is_parenthesized=False ), whitespace_after=WhitespaceState( line=3, column=0, absolute_indent="", is_parenthesized=False ), relative_indent=None, ), ), ), "async_py37": ( "async def foo():\n return await bar\n", _PY37, ( Token( type=PythonTokenTypes.ASYNC, string="async", start_pos=(1, 0), end_pos=(1, 5), whitespace_before=WhitespaceState( line=1, column=0, absolute_indent="", is_parenthesized=False ), whitespace_after=WhitespaceState( line=1, column=5, absolute_indent="", is_parenthesized=False ), relative_indent=None, ), Token( type=PythonTokenTypes.NAME, string="def", start_pos=(1, 6), end_pos=(1, 9), whitespace_before=WhitespaceState( line=1, column=5, absolute_indent="", is_parenthesized=False ), whitespace_after=WhitespaceState( line=1, column=9, absolute_indent="", is_parenthesized=False ), relative_indent=None, ), Token( type=PythonTokenTypes.NAME, string="foo", start_pos=(1, 10), end_pos=(1, 13), whitespace_before=WhitespaceState( line=1, column=9, absolute_indent="", is_parenthesized=False ), whitespace_after=WhitespaceState( line=1, column=13, absolute_indent="", is_parenthesized=False, ), relative_indent=None, ), Token( type=PythonTokenTypes.OP, string="(", start_pos=(1, 13), end_pos=(1, 14), whitespace_before=WhitespaceState( line=1, column=13, absolute_indent="", is_parenthesized=False, ), whitespace_after=WhitespaceState( line=1, column=14, absolute_indent="", is_parenthesized=True ), relative_indent=None, ), Token( type=PythonTokenTypes.OP, string=")", start_pos=(1, 14), end_pos=(1, 15), whitespace_before=WhitespaceState( line=1, column=14, absolute_indent="", is_parenthesized=True ), whitespace_after=WhitespaceState( line=1, column=15, absolute_indent="", is_parenthesized=False, ), relative_indent=None, ), Token( type=PythonTokenTypes.OP, string=":", start_pos=(1, 15), end_pos=(1, 16), whitespace_before=WhitespaceState( line=1, column=15, absolute_indent="", is_parenthesized=False, ), whitespace_after=WhitespaceState( line=1, column=16, absolute_indent="", is_parenthesized=False, ), relative_indent=None, ), Token( type=PythonTokenTypes.NEWLINE, string="\n", start_pos=(1, 16), end_pos=(2, 0), whitespace_before=WhitespaceState( line=1, column=16, absolute_indent="", is_parenthesized=False, ), whitespace_after=WhitespaceState( line=2, column=0, absolute_indent=" ", is_parenthesized=False, ), relative_indent=None, ), Token( type=PythonTokenTypes.INDENT, string="", start_pos=(2, 4), end_pos=(2, 4), whitespace_before=WhitespaceState( line=2, column=0, absolute_indent=" ", is_parenthesized=False, ), whitespace_after=WhitespaceState( line=2, column=0, absolute_indent=" ", is_parenthesized=False, ), relative_indent=" ", ), Token( type=PythonTokenTypes.NAME, string="return", start_pos=(2, 4), end_pos=(2, 10), whitespace_before=WhitespaceState( line=2, column=0, absolute_indent=" ", is_parenthesized=False, ), whitespace_after=WhitespaceState( line=2, column=10, absolute_indent=" ", is_parenthesized=False, ), relative_indent=None, ), Token( type=PythonTokenTypes.AWAIT, string="await", start_pos=(2, 11), end_pos=(2, 16), whitespace_before=WhitespaceState( line=2, column=10, absolute_indent=" ", is_parenthesized=False, ), whitespace_after=WhitespaceState( line=2, column=16, absolute_indent=" ", is_parenthesized=False, ), relative_indent=None, ), Token( type=PythonTokenTypes.NAME, string="bar", start_pos=(2, 17), end_pos=(2, 20), whitespace_before=WhitespaceState( line=2, column=16, absolute_indent=" ", is_parenthesized=False, ), whitespace_after=WhitespaceState( line=2, column=20, absolute_indent=" ", is_parenthesized=False, ), relative_indent=None, ), Token( type=PythonTokenTypes.NEWLINE, string="\n", start_pos=(2, 20), end_pos=(3, 0), whitespace_before=WhitespaceState( line=2, column=20, absolute_indent=" ", is_parenthesized=False, ), whitespace_after=WhitespaceState( line=3, column=0, absolute_indent="", is_parenthesized=False ), relative_indent=None, ), Token( type=PythonTokenTypes.DEDENT, string="", start_pos=(3, 0), end_pos=(3, 0), whitespace_before=WhitespaceState( line=3, column=0, absolute_indent="", is_parenthesized=False ), whitespace_after=WhitespaceState( line=3, column=0, absolute_indent="", is_parenthesized=False ), relative_indent=None, ), Token( type=PythonTokenTypes.ENDMARKER, string="", start_pos=(3, 0), end_pos=(3, 0), whitespace_before=WhitespaceState( line=3, column=0, absolute_indent="", is_parenthesized=False ), whitespace_after=WhitespaceState( line=3, column=0, absolute_indent="", is_parenthesized=False ), relative_indent=None, ), ), ), "simple_py38": ( "pass;\n", _PY38, ( Token( type=PythonTokenTypes.NAME, string="pass", start_pos=(1, 0), end_pos=(1, 4), whitespace_before=WhitespaceState( line=1, column=0, absolute_indent="", is_parenthesized=False ), whitespace_after=WhitespaceState( line=1, column=4, absolute_indent="", is_parenthesized=False ), relative_indent=None, ), Token( type=PythonTokenTypes.OP, string=";", start_pos=(1, 4), end_pos=(1, 5), whitespace_before=WhitespaceState( line=1, column=4, absolute_indent="", is_parenthesized=False ), whitespace_after=WhitespaceState( line=1, column=5, absolute_indent="", is_parenthesized=False ), relative_indent=None, ), Token( type=PythonTokenTypes.NEWLINE, string="\n", start_pos=(1, 5), end_pos=(2, 0), whitespace_before=WhitespaceState( line=1, column=5, absolute_indent="", is_parenthesized=False ), whitespace_after=WhitespaceState( line=2, column=0, absolute_indent="", is_parenthesized=False ), relative_indent=None, ), Token( type=PythonTokenTypes.ENDMARKER, string="", start_pos=(2, 0), end_pos=(2, 0), whitespace_before=WhitespaceState( line=2, column=0, absolute_indent="", is_parenthesized=False ), whitespace_after=WhitespaceState( line=2, column=0, absolute_indent="", is_parenthesized=False ), relative_indent=None, ), ), ), "with_indent_py38": ( "if foo:\n bar\n", _PY38, ( Token( type=PythonTokenTypes.NAME, string="if", start_pos=(1, 0), end_pos=(1, 2), whitespace_before=WhitespaceState( line=1, column=0, absolute_indent="", is_parenthesized=False ), whitespace_after=WhitespaceState( line=1, column=2, absolute_indent="", is_parenthesized=False ), relative_indent=None, ), Token( type=PythonTokenTypes.NAME, string="foo", start_pos=(1, 3), end_pos=(1, 6), whitespace_before=WhitespaceState( line=1, column=2, absolute_indent="", is_parenthesized=False ), whitespace_after=WhitespaceState( line=1, column=6, absolute_indent="", is_parenthesized=False ), relative_indent=None, ), Token( type=PythonTokenTypes.OP, string=":", start_pos=(1, 6), end_pos=(1, 7), whitespace_before=WhitespaceState( line=1, column=6, absolute_indent="", is_parenthesized=False ), whitespace_after=WhitespaceState( line=1, column=7, absolute_indent="", is_parenthesized=False ), relative_indent=None, ), Token( type=PythonTokenTypes.NEWLINE, string="\n", start_pos=(1, 7), end_pos=(2, 0), whitespace_before=WhitespaceState( line=1, column=7, absolute_indent="", is_parenthesized=False ), whitespace_after=WhitespaceState( line=2, column=0, absolute_indent=" ", is_parenthesized=False, ), relative_indent=None, ), Token( type=PythonTokenTypes.INDENT, string="", start_pos=(2, 4), end_pos=(2, 4), whitespace_before=WhitespaceState( line=2, column=0, absolute_indent=" ", is_parenthesized=False, ), whitespace_after=WhitespaceState( line=2, column=0, absolute_indent=" ", is_parenthesized=False, ), relative_indent=" ", ), Token( type=PythonTokenTypes.NAME, string="bar", start_pos=(2, 4), end_pos=(2, 7), whitespace_before=WhitespaceState( line=2, column=0, absolute_indent=" ", is_parenthesized=False, ), whitespace_after=WhitespaceState( line=2, column=7, absolute_indent=" ", is_parenthesized=False, ), relative_indent=None, ), Token( type=PythonTokenTypes.NEWLINE, string="\n", start_pos=(2, 7), end_pos=(3, 0), whitespace_before=WhitespaceState( line=2, column=7, absolute_indent=" ", is_parenthesized=False, ), whitespace_after=WhitespaceState( line=3, column=0, absolute_indent="", is_parenthesized=False ), relative_indent=None, ), Token( type=PythonTokenTypes.DEDENT, string="", start_pos=(3, 0), end_pos=(3, 0), whitespace_before=WhitespaceState( line=3, column=0, absolute_indent="", is_parenthesized=False ), whitespace_after=WhitespaceState( line=3, column=0, absolute_indent="", is_parenthesized=False ), relative_indent=None, ), Token( type=PythonTokenTypes.ENDMARKER, string="", start_pos=(3, 0), end_pos=(3, 0), whitespace_before=WhitespaceState( line=3, column=0, absolute_indent="", is_parenthesized=False ), whitespace_after=WhitespaceState( line=3, column=0, absolute_indent="", is_parenthesized=False ), relative_indent=None, ), ), ), "async_py38": ( "async def foo():\n return await bar\n", _PY38, ( Token( type=PythonTokenTypes.ASYNC, string="async", start_pos=(1, 0), end_pos=(1, 5), whitespace_before=WhitespaceState( line=1, column=0, absolute_indent="", is_parenthesized=False ), whitespace_after=WhitespaceState( line=1, column=5, absolute_indent="", is_parenthesized=False ), relative_indent=None, ), Token( type=PythonTokenTypes.NAME, string="def", start_pos=(1, 6), end_pos=(1, 9), whitespace_before=WhitespaceState( line=1, column=5, absolute_indent="", is_parenthesized=False ), whitespace_after=WhitespaceState( line=1, column=9, absolute_indent="", is_parenthesized=False ), relative_indent=None, ), Token( type=PythonTokenTypes.NAME, string="foo", start_pos=(1, 10), end_pos=(1, 13), whitespace_before=WhitespaceState( line=1, column=9, absolute_indent="", is_parenthesized=False ), whitespace_after=WhitespaceState( line=1, column=13, absolute_indent="", is_parenthesized=False, ), relative_indent=None, ), Token( type=PythonTokenTypes.OP, string="(", start_pos=(1, 13), end_pos=(1, 14), whitespace_before=WhitespaceState( line=1, column=13, absolute_indent="", is_parenthesized=False, ), whitespace_after=WhitespaceState( line=1, column=14, absolute_indent="", is_parenthesized=True ), relative_indent=None, ), Token( type=PythonTokenTypes.OP, string=")", start_pos=(1, 14), end_pos=(1, 15), whitespace_before=WhitespaceState( line=1, column=14, absolute_indent="", is_parenthesized=True ), whitespace_after=WhitespaceState( line=1, column=15, absolute_indent="", is_parenthesized=False, ), relative_indent=None, ), Token( type=PythonTokenTypes.OP, string=":", start_pos=(1, 15), end_pos=(1, 16), whitespace_before=WhitespaceState( line=1, column=15, absolute_indent="", is_parenthesized=False, ), whitespace_after=WhitespaceState( line=1, column=16, absolute_indent="", is_parenthesized=False, ), relative_indent=None, ), Token( type=PythonTokenTypes.NEWLINE, string="\n", start_pos=(1, 16), end_pos=(2, 0), whitespace_before=WhitespaceState( line=1, column=16, absolute_indent="", is_parenthesized=False, ), whitespace_after=WhitespaceState( line=2, column=0, absolute_indent=" ", is_parenthesized=False, ), relative_indent=None, ), Token( type=PythonTokenTypes.INDENT, string="", start_pos=(2, 4), end_pos=(2, 4), whitespace_before=WhitespaceState( line=2, column=0, absolute_indent=" ", is_parenthesized=False, ), whitespace_after=WhitespaceState( line=2, column=0, absolute_indent=" ", is_parenthesized=False, ), relative_indent=" ", ), Token( type=PythonTokenTypes.NAME, string="return", start_pos=(2, 4), end_pos=(2, 10), whitespace_before=WhitespaceState( line=2, column=0, absolute_indent=" ", is_parenthesized=False, ), whitespace_after=WhitespaceState( line=2, column=10, absolute_indent=" ", is_parenthesized=False, ), relative_indent=None, ), Token( type=PythonTokenTypes.AWAIT, string="await", start_pos=(2, 11), end_pos=(2, 16), whitespace_before=WhitespaceState( line=2, column=10, absolute_indent=" ", is_parenthesized=False, ), whitespace_after=WhitespaceState( line=2, column=16, absolute_indent=" ", is_parenthesized=False, ), relative_indent=None, ), Token( type=PythonTokenTypes.NAME, string="bar", start_pos=(2, 17), end_pos=(2, 20), whitespace_before=WhitespaceState( line=2, column=16, absolute_indent=" ", is_parenthesized=False, ), whitespace_after=WhitespaceState( line=2, column=20, absolute_indent=" ", is_parenthesized=False, ), relative_indent=None, ), Token( type=PythonTokenTypes.NEWLINE, string="\n", start_pos=(2, 20), end_pos=(3, 0), whitespace_before=WhitespaceState( line=2, column=20, absolute_indent=" ", is_parenthesized=False, ), whitespace_after=WhitespaceState( line=3, column=0, absolute_indent="", is_parenthesized=False ), relative_indent=None, ), Token( type=PythonTokenTypes.DEDENT, string="", start_pos=(3, 0), end_pos=(3, 0), whitespace_before=WhitespaceState( line=3, column=0, absolute_indent="", is_parenthesized=False ), whitespace_after=WhitespaceState( line=3, column=0, absolute_indent="", is_parenthesized=False ), relative_indent=None, ), Token( type=PythonTokenTypes.ENDMARKER, string="", start_pos=(3, 0), end_pos=(3, 0), whitespace_before=WhitespaceState( line=3, column=0, absolute_indent="", is_parenthesized=False ), whitespace_after=WhitespaceState( line=3, column=0, absolute_indent="", is_parenthesized=False ), relative_indent=None, ), ), ), } ) def test_tokenize( self, code: str, ver: PythonVersionInfo, expected: Sequence[Token] ) -> None: tokens = tuple(tokenize(code, ver)) self.assertSequenceEqual(tokens, expected) for a, b in zip(tokens, tokens[1:]): # These must be the same object, so if whitespace gets consumed (mutated) at # the end of token a, it shows up at the beginning of token b. self.assertIs(a.whitespace_after, b.whitespace_before) def test_errortoken(self) -> None: for version in [_PY36, _PY37, _PY38]: with self.assertRaisesRegex(ParserSyntaxError, "not a valid token"): # use tuple() to read everything # The copyright symbol isn't a valid token tuple(tokenize("\u00a9", version)) def test_error_dedent(self) -> None: for version in [_PY36, _PY37, _PY38]: with self.assertRaisesRegex(ParserSyntaxError, "Inconsistent indentation"): # create some inconsistent indents to generate an ERROR_DEDENT token tuple(tokenize(" a\n b", version)) LibCST-1.2.0/libcst/_parser/types/000077500000000000000000000000001456464173300166665ustar00rootroot00000000000000LibCST-1.2.0/libcst/_parser/types/__init__.py000066400000000000000000000002631456464173300210000ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. LibCST-1.2.0/libcst/_parser/types/config.py000066400000000000000000000155541456464173300205170ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import codecs import re import sys from dataclasses import dataclass, field, fields from enum import Enum from typing import Any, Callable, FrozenSet, List, Mapping, Optional, Pattern, Union from libcst._add_slots import add_slots from libcst._nodes.whitespace import NEWLINE_RE from libcst._parser.parso.utils import parse_version_string, PythonVersionInfo _INDENT_RE: Pattern[str] = re.compile(r"[ \t]+") try: from libcst_native import parser_config as config_mod MockWhitespaceParserConfig = config_mod.BaseWhitespaceParserConfig except ImportError: from libcst._parser.types import py_config as config_mod MockWhitespaceParserConfig = config_mod.MockWhitespaceParserConfig BaseWhitespaceParserConfig = config_mod.BaseWhitespaceParserConfig ParserConfig = config_mod.ParserConfig parser_config_asdict: Callable[ [ParserConfig], Mapping[str, Any] ] = config_mod.parser_config_asdict class AutoConfig(Enum): """ A sentinel value used in PartialParserConfig """ token: int = 0 def __repr__(self) -> str: return str(self) # This list should be kept in sorted order. KNOWN_PYTHON_VERSION_STRINGS = ["3.0", "3.1", "3.3", "3.5", "3.6", "3.7", "3.8"] @add_slots @dataclass(frozen=True) class PartialParserConfig: r""" An optional object that can be supplied to the parser entrypoints (e.g. :func:`parse_module`) to configure the parser. Unspecified fields will be inferred from the input source code or from the execution environment. >>> import libcst as cst >>> tree = cst.parse_module("abc") >>> tree.bytes b'abc' >>> # override the default utf-8 encoding ... tree = cst.parse_module("abc", cst.PartialParserConfig(encoding="utf-32")) >>> tree.bytes b'\xff\xfe\x00\x00a\x00\x00\x00b\x00\x00\x00c\x00\x00\x00' """ #: The version of Python that the input source code is expected to be syntactically #: compatible with. This may be different from the Python interpreter being used to #: run LibCST. For example, you can parse code as 3.7 with a CPython 3.6 #: interpreter. #: #: If unspecified, it will default to the syntax of the running interpreter #: (rounding down from among the following list). #: #: Currently, only Python 3.0, 3.1, 3.3, 3.5, 3.6, 3.7 and 3.8 syntax is supported. #: The gaps did not have any syntax changes from the version prior. python_version: Union[str, AutoConfig] = AutoConfig.token #: A named tuple with the ``major`` and ``minor`` Python version numbers. This is #: derived from :attr:`python_version` and should not be supplied to the #: :class:`PartialParserConfig` constructor. parsed_python_version: PythonVersionInfo = field(init=False) #: The file's encoding format. When parsing a ``bytes`` object, this value may be #: inferred from the contents of the parsed source code. When parsing a ``str``, #: this value defaults to ``"utf-8"``. encoding: Union[str, AutoConfig] = AutoConfig.token #: Detected ``__future__`` import names future_imports: Union[FrozenSet[str], AutoConfig] = AutoConfig.token #: The indentation of the file, expressed as a series of tabs and/or spaces. This #: value is inferred from the contents of the parsed source code by default. default_indent: Union[str, AutoConfig] = AutoConfig.token #: The newline of the file, expressed as ``\n``, ``\r\n``, or ``\r``. This value is #: inferred from the contents of the parsed source code by default. default_newline: Union[str, AutoConfig] = AutoConfig.token def __post_init__(self) -> None: raw_python_version = self.python_version if isinstance(raw_python_version, AutoConfig): # If unspecified, we'll try to pick the same as the running # interpreter. There will always be at least one entry. parsed_python_version = _pick_compatible_python_version() else: # If the caller specified a version, we require that to be a known # version (because we don't want to encourage doing duplicate work # when there weren't syntax changes). # `parse_version_string` will raise a ValueError if the version is # invalid. parsed_python_version = parse_version_string(raw_python_version) if not any( parsed_python_version == parse_version_string(v) for v in KNOWN_PYTHON_VERSION_STRINGS ): comma_versions = ", ".join(KNOWN_PYTHON_VERSION_STRINGS) raise ValueError( "LibCST can only parse code using one of the following versions of " + f"Python's grammar: {comma_versions}. More versions may be " + "supported by future releases." ) # We use object.__setattr__ because the dataclass is frozen. See: # https://docs.python.org/3/library/dataclasses.html#frozen-instances # This should be safe behavior inside of `__post_init__`. object.__setattr__(self, "parsed_python_version", parsed_python_version) encoding = self.encoding if not isinstance(encoding, AutoConfig): try: codecs.lookup(encoding) except LookupError: raise ValueError(f"{repr(encoding)} is not a supported encoding") newline = self.default_newline if ( not isinstance(newline, AutoConfig) and NEWLINE_RE.fullmatch(newline) is None ): raise ValueError( f"Got an invalid value for default_newline: {repr(newline)}" ) indent = self.default_indent if not isinstance(indent, AutoConfig) and _INDENT_RE.fullmatch(indent) is None: raise ValueError(f"Got an invalid value for default_indent: {repr(indent)}") def __repr__(self) -> str: init_keys: List[str] = [] for f in fields(self): # We don't display the parsed_python_version attribute because it contains # the same value as python_version, only parsed. if f.name == "parsed_python_version": continue value = getattr(self, f.name) if not isinstance(value, AutoConfig): init_keys.append(f"{f.name}={value!r}") return f"{self.__class__.__name__}({', '.join(init_keys)})" def _pick_compatible_python_version(version: Optional[str] = None) -> PythonVersionInfo: max_version = parse_version_string(version) for v in KNOWN_PYTHON_VERSION_STRINGS[::-1]: tmp = parse_version_string(v) if tmp <= max_version: return tmp raise ValueError( f"No version found older than {version} ({max_version}) while " + f"running on {sys.version_info}" ) LibCST-1.2.0/libcst/_parser/types/conversions.py000066400000000000000000000010701456464173300216060ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Any, Callable, Sequence from libcst._parser.types.config import ParserConfig from libcst._parser.types.token import Token # pyre-fixme[33]: Aliased annotation cannot contain `Any`. NonterminalConversion = Callable[[ParserConfig, Sequence[Any]], Any] # pyre-fixme[33]: Aliased annotation cannot contain `Any`. TerminalConversion = Callable[[ParserConfig, Token], Any] LibCST-1.2.0/libcst/_parser/types/partials.py000066400000000000000000000063201456464173300210600ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from dataclasses import dataclass from typing import Generic, Optional, Sequence, TypeVar, Union from libcst._add_slots import add_slots from libcst._nodes.expression import ( Annotation, Arg, Attribute, BaseExpression, BaseFormattedStringContent, Index, LeftParen, LeftSquareBracket, Name, Parameters, RightParen, RightSquareBracket, Slice, SubscriptElement, ) from libcst._nodes.op import AssignEqual, BaseAugOp, Colon, Dot from libcst._nodes.statement import AsName, BaseSmallStatement, Decorator, ImportAlias from libcst._nodes.whitespace import EmptyLine, SimpleWhitespace, TrailingWhitespace from libcst._parser.types.whitespace_state import WhitespaceState _T = TypeVar("_T") @add_slots @dataclass(frozen=True) class WithLeadingWhitespace(Generic[_T]): value: _T whitespace_before: WhitespaceState @add_slots @dataclass(frozen=True) class SimpleStatementPartial: body: Sequence[BaseSmallStatement] whitespace_before: WhitespaceState trailing_whitespace: TrailingWhitespace @add_slots @dataclass(frozen=True) class SlicePartial: second_colon: Colon step: Optional[BaseExpression] @add_slots @dataclass(frozen=True) class AttributePartial: dot: Dot attr: Name @add_slots @dataclass(frozen=True) class ArglistPartial: args: Sequence[Arg] @add_slots @dataclass(frozen=True) class CallPartial: lpar: WithLeadingWhitespace[LeftParen] args: Sequence[Arg] rpar: RightParen @add_slots @dataclass(frozen=True) class SubscriptPartial: slice: Union[Index, Slice, Sequence[SubscriptElement]] lbracket: LeftSquareBracket rbracket: RightSquareBracket whitespace_before: WhitespaceState @add_slots @dataclass(frozen=True) class AnnAssignPartial: annotation: Annotation equal: Optional[AssignEqual] value: Optional[BaseExpression] @add_slots @dataclass(frozen=True) class AugAssignPartial: operator: BaseAugOp value: BaseExpression @add_slots @dataclass(frozen=True) class AssignPartial: equal: AssignEqual value: BaseExpression class ParamStarPartial: pass @add_slots @dataclass(frozen=True) class FuncdefPartial: lpar: LeftParen params: Parameters rpar: RightParen @add_slots @dataclass(frozen=True) class DecoratorPartial: decorators: Sequence[Decorator] @add_slots @dataclass(frozen=True) class ImportPartial: names: Sequence[ImportAlias] @add_slots @dataclass(frozen=True) class ImportRelativePartial: relative: Sequence[Dot] module: Optional[Union[Attribute, Name]] @add_slots @dataclass(frozen=True) class FormattedStringConversionPartial: value: str whitespace_before: WhitespaceState @add_slots @dataclass(frozen=True) class FormattedStringFormatSpecPartial: values: Sequence[BaseFormattedStringContent] whitespace_before: WhitespaceState @add_slots @dataclass(frozen=True) class ExceptClausePartial: leading_lines: Sequence[EmptyLine] whitespace_after_except: SimpleWhitespace type: Optional[BaseExpression] = None name: Optional[AsName] = None LibCST-1.2.0/libcst/_parser/types/production.py000066400000000000000000000007021456464173300214250ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from dataclasses import dataclass from typing import Optional @dataclass(frozen=True) class Production: name: str children: str version: Optional[str] future: Optional[str] def __str__(self) -> str: return f"{self.name}: {self.children}" LibCST-1.2.0/libcst/_parser/types/py_config.py000066400000000000000000000027031456464173300212170ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import abc from dataclasses import asdict, dataclass from typing import Any, FrozenSet, Mapping, Sequence from libcst._parser.parso.utils import PythonVersionInfo class BaseWhitespaceParserConfig(abc.ABC): """ Represents the subset of ParserConfig that the whitespace parser requires. This makes calling the whitespace parser in tests with a mocked configuration easier. """ lines: Sequence[str] default_newline: str @dataclass(frozen=True) class MockWhitespaceParserConfig(BaseWhitespaceParserConfig): """ An internal type used by unit tests. """ lines: Sequence[str] default_newline: str @dataclass(frozen=True) class ParserConfig(BaseWhitespaceParserConfig): """ An internal configuration object that the python parser passes around. These values are global to the parsed code and should not change during the lifetime of the parser object. """ lines: Sequence[str] encoding: str default_indent: str default_newline: str has_trailing_newline: bool version: PythonVersionInfo future_imports: FrozenSet[str] def parser_config_asdict(config: ParserConfig) -> Mapping[str, Any]: """ An internal helper function used by unit tests to compare configs. """ return asdict(config) LibCST-1.2.0/libcst/_parser/types/py_token.py000066400000000000000000000015651456464173300210770ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from dataclasses import dataclass from typing import Optional, Tuple from libcst._add_slots import add_slots from libcst._parser.parso.python.token import TokenType from libcst._parser.types.whitespace_state import WhitespaceState @add_slots @dataclass(frozen=True) class Token: type: TokenType string: str # The start of where `string` is in the source, not including leading whitespace. start_pos: Tuple[int, int] # The end of where `string` is in the source, not including trailing whitespace. end_pos: Tuple[int, int] whitespace_before: WhitespaceState whitespace_after: WhitespaceState # The relative indent this token adds. relative_indent: Optional[str] LibCST-1.2.0/libcst/_parser/types/py_whitespace_state.py000066400000000000000000000025641456464173300233130ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from dataclasses import dataclass from libcst._add_slots import add_slots @add_slots @dataclass(frozen=False) class WhitespaceState: """ A frequently mutated store of the whitespace parser's current state. This object must be cloned prior to speculative parsing. This is in contrast to the `config` object each whitespace parser function takes, which is frozen and never mutated. Whitespace parsing works by mutating this state object. By encapsulating saving, and re-using state objects inside the top-level python parser, the whitespace parser is able to be reentrant. One 'convert' function can consume part of the whitespace, and another 'convert' function can consume the rest, depending on who owns what whitespace. This is similar to the approach you might take to parse nested languages (e.g. JavaScript inside of HTML). We're treating whitespace as a separate language and grammar from the rest of Python's grammar. """ line: int # one-indexed (to match parso's behavior) column: int # zero-indexed (to match parso's behavior) # What to look for when executing `_parse_indent`. absolute_indent: str is_parenthesized: bool LibCST-1.2.0/libcst/_parser/types/tests/000077500000000000000000000000001456464173300200305ustar00rootroot00000000000000LibCST-1.2.0/libcst/_parser/types/tests/__init__.py000066400000000000000000000002631456464173300221420ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. LibCST-1.2.0/libcst/_parser/types/tests/test_config.py000066400000000000000000000042441456464173300227120ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Callable from libcst._parser.types.config import PartialParserConfig from libcst.testing.utils import data_provider, UnitTest class TestConfig(UnitTest): @data_provider( { "empty": (PartialParserConfig,), "python_version_a": (lambda: PartialParserConfig(python_version="3.7"),), "python_version_b": (lambda: PartialParserConfig(python_version="3.7.1"),), "encoding": (lambda: PartialParserConfig(encoding="latin-1"),), "default_indent": (lambda: PartialParserConfig(default_indent="\t "),), "default_newline": (lambda: PartialParserConfig(default_newline="\r\n"),), } ) def test_valid_partial_parser_config( self, factory: Callable[[], PartialParserConfig] ) -> None: self.assertIsInstance(factory(), PartialParserConfig) @data_provider( { "python_version": ( lambda: PartialParserConfig(python_version="3.7.1.0"), "The given version is not in the right format", ), "python_version_unsupported": ( lambda: PartialParserConfig(python_version="3.4"), "LibCST can only parse code using one of the following versions of Python's grammar", ), "encoding": ( lambda: PartialParserConfig(encoding="utf-42"), "not a supported encoding", ), "default_indent": ( lambda: PartialParserConfig(default_indent="badinput"), "invalid value for default_indent", ), "default_newline": ( lambda: PartialParserConfig(default_newline="\n\r"), "invalid value for default_newline", ), } ) def test_invalid_partial_parser_config( self, factory: Callable[[], PartialParserConfig], expected_re: str ) -> None: with self.assertRaisesRegex(ValueError, expected_re): factory() LibCST-1.2.0/libcst/_parser/types/token.py000066400000000000000000000005221456464173300203570ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. try: from libcst_native import tokenize Token = tokenize.Token except ImportError: from libcst._parser.types.py_token import Token # noqa F401 LibCST-1.2.0/libcst/_parser/types/whitespace_state.py000066400000000000000000000006531456464173300226000ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ Defines the state object used by the whitespace parser. """ try: from libcst_native import whitespace_state as mod except ImportError: from libcst._parser.types import py_whitespace_state as mod WhitespaceState = mod.WhitespaceState LibCST-1.2.0/libcst/_parser/whitespace_parser.py000066400000000000000000000022201456464173300216000ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ Parso doesn't attempt to parse (or even emit tokens for) whitespace or comments that aren't syntatically important. Instead, we're just given the whitespace as a "prefix" of the token. However, in our CST, whitespace is gathered into far more detailed objects than a simple str. Fortunately this isn't hard for us to parse ourselves, so we just use our own hand-rolled recursive descent parser. """ try: # It'd be better to do `from libcst_native.whitespace_parser import *`, but we're # blocked on https://github.com/PyO3/pyo3/issues/759 # (which ultimately seems to be a limitation of how importlib works) from libcst_native import whitespace_parser as mod except ImportError: from libcst._parser import py_whitespace_parser as mod parse_simple_whitespace = mod.parse_simple_whitespace parse_empty_lines = mod.parse_empty_lines parse_trailing_whitespace = mod.parse_trailing_whitespace parse_parenthesizable_whitespace = mod.parse_parenthesizable_whitespace LibCST-1.2.0/libcst/_parser/wrapped_tokenize.py000066400000000000000000000201771456464173300214550ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ Parso's tokenize doesn't give us tokens in the format that we'd ideally like, so this performs a small number of transformations to the token stream: - `end_pos` is precomputed as a property, instead of lazily as a method, for more efficient access. - `whitespace_before` and `whitespace_after` have been added. These include the correct indentation information. - `prefix` is removed, since we don't use it anywhere. - `ERRORTOKEN` and `ERROR_DEDENT` have been removed, because we don't intend to support error recovery. If we encounter token errors, we'll raise a ParserSyntaxError instead. If performance becomes a concern, we can rewrite this later as a fork of the original tokenize module, instead of as a wrapper. """ from dataclasses import dataclass, field from enum import Enum from typing import Generator, Iterator, List, Optional, Sequence from libcst._add_slots import add_slots from libcst._exceptions import ParserSyntaxError from libcst._parser.parso.python.token import PythonTokenTypes, TokenType from libcst._parser.parso.python.tokenize import ( Token as OrigToken, tokenize_lines as orig_tokenize_lines, ) from libcst._parser.parso.utils import PythonVersionInfo, split_lines from libcst._parser.types.token import Token from libcst._parser.types.whitespace_state import WhitespaceState _ERRORTOKEN: TokenType = PythonTokenTypes.ERRORTOKEN _ERROR_DEDENT: TokenType = PythonTokenTypes.ERROR_DEDENT _INDENT: TokenType = PythonTokenTypes.INDENT _DEDENT: TokenType = PythonTokenTypes.DEDENT _ENDMARKER: TokenType = PythonTokenTypes.ENDMARKER _FSTRING_START: TokenType = PythonTokenTypes.FSTRING_START _FSTRING_END: TokenType = PythonTokenTypes.FSTRING_END _OP: TokenType = PythonTokenTypes.OP class _ParenthesisOrFStringStackEntry(Enum): PARENTHESIS = 0 FSTRING = 0 _PARENTHESIS_STACK_ENTRY: _ParenthesisOrFStringStackEntry = ( _ParenthesisOrFStringStackEntry.PARENTHESIS ) _FSTRING_STACK_ENTRY: _ParenthesisOrFStringStackEntry = ( _ParenthesisOrFStringStackEntry.FSTRING ) @add_slots @dataclass(frozen=False) class _TokenizeState: lines: Sequence[str] previous_whitespace_state: WhitespaceState = field( default_factory=lambda: WhitespaceState( line=1, column=0, absolute_indent="", is_parenthesized=False ) ) indents: List[str] = field(default_factory=lambda: [""]) parenthesis_or_fstring_stack: List[_ParenthesisOrFStringStackEntry] = field( default_factory=list ) def tokenize(code: str, version_info: PythonVersionInfo) -> Iterator[Token]: try: from libcst_native import tokenize as native_tokenize return native_tokenize.tokenize(code) except ImportError: lines = split_lines(code, keepends=True) return tokenize_lines(code, lines, version_info) def tokenize_lines( code: str, lines: Sequence[str], version_info: PythonVersionInfo ) -> Iterator[Token]: try: from libcst_native import tokenize as native_tokenize # TODO: pass through version_info return native_tokenize.tokenize(code) except ImportError: return tokenize_lines_py(code, lines, version_info) def tokenize_lines_py( code: str, lines: Sequence[str], version_info: PythonVersionInfo ) -> Generator[Token, None, None]: state = _TokenizeState(lines) orig_tokens_iter = iter(orig_tokenize_lines(lines, version_info)) # Iterate over the tokens and pass them to _convert_token, providing a one-token # lookahead, to enable proper indent handling. try: curr_token = next(orig_tokens_iter) except StopIteration: pass # empty file else: for next_token in orig_tokens_iter: yield _convert_token(state, curr_token, next_token) curr_token = next_token yield _convert_token(state, curr_token, None) def _convert_token( # noqa: C901: too complex state: _TokenizeState, curr_token: OrigToken, next_token: Optional[OrigToken] ) -> Token: ct_type = curr_token.type ct_string = curr_token.string ct_start_pos = curr_token.start_pos if ct_type is _ERRORTOKEN: raise ParserSyntaxError( f"{ct_string!r} is not a valid token.", lines=state.lines, raw_line=ct_start_pos[0], raw_column=ct_start_pos[1], ) if ct_type is _ERROR_DEDENT: raise ParserSyntaxError( "Inconsistent indentation. Expected a dedent.", lines=state.lines, raw_line=ct_start_pos[0], raw_column=ct_start_pos[1], ) # Compute relative indent changes for indent/dedent nodes relative_indent: Optional[str] = None if ct_type is _INDENT: old_indent = "" if len(state.indents) < 2 else state.indents[-2] new_indent = state.indents[-1] relative_indent = new_indent[len(old_indent) :] if next_token is not None: nt_type = next_token.type if nt_type is _INDENT: nt_line, nt_column = next_token.start_pos state.indents.append(state.lines[nt_line - 1][:nt_column]) elif nt_type is _DEDENT: state.indents.pop() whitespace_before = state.previous_whitespace_state if ct_type is _INDENT or ct_type is _DEDENT or ct_type is _ENDMARKER: # Don't update whitespace state for these dummy tokens. This makes it possible # to partially parse whitespace for IndentedBlock footers, and then parse the # rest of the whitespace in the following statement's leading_lines. # Unfortunately, that means that the indentation is either wrong for the footer # comments, or for the next line. We've chosen to allow it to be wrong for the # IndentedBlock footer and manually override the state when parsing whitespace # in that particular node. whitespace_after = whitespace_before ct_end_pos = ct_start_pos else: # Not a dummy token, so update the whitespace state. # Compute our own end_pos, since parso's end_pos is wrong for triple-strings. lines = split_lines(ct_string) if len(lines) > 1: ct_end_pos = ct_start_pos[0] + len(lines) - 1, len(lines[-1]) else: ct_end_pos = (ct_start_pos[0], ct_start_pos[1] + len(ct_string)) # Figure out what mode the whitespace parser should use. If we're inside # parentheses, certain whitespace (e.g. newlines) are allowed where they would # otherwise not be. f-strings override and disable this behavior, however. # # Parso's tokenizer tracks this internally, but doesn't expose it, so we have to # duplicate that logic here. pof_stack = state.parenthesis_or_fstring_stack try: if ct_type is _FSTRING_START: pof_stack.append(_FSTRING_STACK_ENTRY) elif ct_type is _FSTRING_END: pof_stack.pop() elif ct_type is _OP: if ct_string in "([{": pof_stack.append(_PARENTHESIS_STACK_ENTRY) elif ct_string in ")]}": pof_stack.pop() except IndexError: # pof_stack may be empty by the time we need to read from it due to # mismatched braces. raise ParserSyntaxError( "Encountered a closing brace without a matching opening brace.", lines=state.lines, raw_line=ct_start_pos[0], raw_column=ct_start_pos[1], ) is_parenthesized = ( len(pof_stack) > 0 and pof_stack[-1] == _PARENTHESIS_STACK_ENTRY ) whitespace_after = WhitespaceState( ct_end_pos[0], ct_end_pos[1], state.indents[-1], is_parenthesized ) # Hold onto whitespace_after, so we can use it as whitespace_before in the next # node. state.previous_whitespace_state = whitespace_after return Token( ct_type, ct_string, ct_start_pos, ct_end_pos, whitespace_before, whitespace_after, relative_indent, ) LibCST-1.2.0/libcst/_position.py000066400000000000000000000033621456464173300164500ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ Data structures used for storing position information. These are publicly exported by metadata, but their implementation lives outside of metadata, because they're used internally by the codegen logic, which computes position locations. """ from dataclasses import dataclass from typing import cast, overload, Tuple, Union from libcst._add_slots import add_slots _CodePositionT = Union[Tuple[int, int], "CodePosition"] @add_slots @dataclass(frozen=True) class CodePosition: #: Line numbers are 1-indexed. line: int #: Column numbers are 0-indexed. column: int @add_slots @dataclass(frozen=True) # pyre-fixme[13]: Attribute `end` is never initialized. # pyre-fixme[13]: Attribute `start` is never initialized. class CodeRange: #: Starting position of a node (inclusive). start: CodePosition #: Ending position of a node (exclusive). end: CodePosition @overload def __init__(self, start: CodePosition, end: CodePosition) -> None: ... @overload def __init__(self, start: Tuple[int, int], end: Tuple[int, int]) -> None: ... def __init__(self, start: _CodePositionT, end: _CodePositionT) -> None: if isinstance(start, tuple) and isinstance(end, tuple): object.__setattr__(self, "start", CodePosition(start[0], start[1])) object.__setattr__(self, "end", CodePosition(end[0], end[1])) else: start = cast(CodePosition, start) end = cast(CodePosition, end) object.__setattr__(self, "start", start) object.__setattr__(self, "end", end) LibCST-1.2.0/libcst/_removal_sentinel.py000066400000000000000000000036121456464173300201500ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ Used by visitors. This is hoisted into a separate module to avoid some circular dependencies in the definition of CSTNode. """ from enum import auto, Enum class RemovalSentinel(Enum): """ A :attr:`RemovalSentinel.REMOVE` value should be returned by a :meth:`CSTTransformer.on_leave` method when we want to remove that child from its parent. As a convenience, this can be constructed by calling :func:`libcst.RemoveFromParent`. The parent node should make a best-effort to remove the child, but may raise an exception when removing the child doesn't make sense, or could change the semantics in an unexpected way. For example, a function definition with no name doesn't make sense, but removing one of the arguments is valid. In we can't automatically remove the child, the developer should instead remove the child by constructing a new parent in the parent's :meth:`~CSTTransformer.on_leave` call. We use this instead of ``None`` to force developers to be explicit about deletions. Because ``None`` is the default return value for a function with no return statement, it would be too easy to accidentally delete nodes from the tree by forgetting to return a value. """ REMOVE = auto() def RemoveFromParent() -> RemovalSentinel: """ A convenience method for requesting that this node be removed by its parent. Use this in place of returning :class:`RemovalSentinel` directly. For example, to remove all arguments unconditionally:: def leave_Arg( self, original_node: cst.Arg, updated_node: cst.Arg ) -> Union[cst.Arg, cst.RemovalSentinel]: return RemoveFromParent() """ return RemovalSentinel.REMOVE LibCST-1.2.0/libcst/_tabs.py000066400000000000000000000014321456464173300155310ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. def expand_tabs(line: str) -> str: """ Tabs are treated as 1-8 spaces according to https://docs.python.org/3/reference/lexical_analysis.html#indentation Given a string with tabs, this removes all tab characters and replaces them with the appropriate number of spaces. """ result_list = [] total = 0 for ch in line: if ch == "\t": prev_total = total total = ((total + 8) // 8) * 8 result_list.append(" " * (total - prev_total)) else: total += 1 result_list.append(ch) return "".join(result_list) LibCST-1.2.0/libcst/_type_enforce.py000066400000000000000000000135271456464173300172720ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import ( Any, ForwardRef, Iterable, Mapping, MutableMapping, MutableSequence, Tuple, ) from typing_extensions import Literal from typing_inspect import get_args, get_origin, is_classvar, is_typevar, is_union_type def is_value_of_type( # noqa: C901 "too complex" # pyre-fixme[2]: Parameter annotation cannot be `Any`. value: Any, # pyre-fixme[2]: Parameter annotation cannot be `Any`. expected_type: Any, invariant_check: bool = False, ) -> bool: """ This method attempts to verify a given value is of a given type. If the type is not supported, it returns True but throws an exception in tests. It is similar to typeguard / enforce pypi modules, but neither of those have permissive options for types they do not support. Supported types for now: - List/Set/Iterable - Dict/Mapping - base types (str, int, etc) - Literal - Unions - Tuples - Concrete Classes - ClassVar Not supported: - Callables, which will likely not be used in XHP anyways - Generics, Type Vars (treated as Any) - Generators - Forward Refs -- use `typing.get_type_hints` to resolve these - Type[...] """ if is_classvar(expected_type): classvar_args = get_args(expected_type) expected_type = (classvar_args[0] or Any) if classvar_args else Any if is_typevar(expected_type): # treat this the same as Any # TODO: evaluate bounds return True expected_origin_type = get_origin(expected_type) or expected_type if expected_origin_type == Any: return True elif is_union_type(expected_type): return any( is_value_of_type(value, subtype) for subtype in expected_type.__args__ ) elif isinstance(expected_origin_type, type(Literal)): literal_values = get_args(expected_type, evaluate=True) return any(value == literal for literal in literal_values) elif isinstance(expected_origin_type, ForwardRef): # not much we can do here for now, lets just return :( return True # Handle `Tuple[A, B, C]`. # We don't want to include Tuple subclasses, like NamedTuple, because they're # unlikely to behave similarly. elif expected_origin_type in [Tuple, tuple]: # py36 uses Tuple, py37+ uses tuple if not isinstance(value, tuple): return False type_args = get_args(expected_type, evaluate=True) if len(type_args) == 0: # `Tuple` (no subscript) is implicitly `Tuple[Any, ...]` return True if type_args is None: return True if len(value) != len(type_args): return False # TODO: Handle `Tuple[T, ...]` like `Iterable[T]` for subvalue, subtype in zip(value, type_args): if not is_value_of_type(subvalue, subtype): return False return True elif issubclass(expected_origin_type, Mapping): # We're expecting *some* kind of Mapping, but we also want to make sure it's # the correct Mapping subtype. That means we want {a: b, c: d} to match Mapping, # MutableMapping, and Dict, but we don't want MappingProxyType({a: b, c: d}) to # match MutableMapping or Dict. if not issubclass(type(value), expected_origin_type): return False type_args = get_args(expected_type, evaluate=True) if len(type_args) == 0: # `Mapping` (no subscript) is implicitly `Mapping[Any, Any]`. return True invariant_check = issubclass(expected_origin_type, MutableMapping) for subkey, subvalue in value.items(): if not is_value_of_type( subkey, type_args[0], # key type is always invariant invariant_check=True, ): return False if not is_value_of_type( subvalue, type_args[1], invariant_check=invariant_check ): return False return True # While this does technically work fine for str and bytes (they are iterables), it's # better to use the default isinstance behavior for them. # # Similarly, tuple subclasses tend to have pretty different behavior, and we should # fall back to the default check. elif issubclass(expected_origin_type, Iterable) and not issubclass( expected_origin_type, (str, bytes, tuple), ): # We know this thing is *some* kind of Iterable, but we want to # allow subclasses. That means we want [1,2,3] to match both # List[int] and Iterable[int], but we do NOT want that # to match Set[int]. if not issubclass(type(value), expected_origin_type): return False type_args = get_args(expected_type, evaluate=True) if len(type_args) == 0: # `Iterable` (no subscript) is implicitly `Iterable[Any]`. return True # We invariant check if its a mutable sequence invariant_check = issubclass(expected_origin_type, MutableSequence) return all( is_value_of_type(subvalue, type_args[0], invariant_check=invariant_check) for subvalue in value ) try: if not invariant_check: if expected_type is float: return isinstance(value, (int, float)) else: return isinstance(value, expected_type) return type(value) is expected_type except Exception as e: raise NotImplementedError( f"the value {value!r} was compared to type {expected_type!r} " + f"but support for that has not been implemented yet! Exception: {e!r}" ) LibCST-1.2.0/libcst/_typed_visitor.py000066400000000000000000005741461456464173300175250ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # This file was generated by libcst.codegen.gen_matcher_classes from typing import Optional, TYPE_CHECKING, Union from libcst._flatten_sentinel import FlattenSentinel from libcst._maybe_sentinel import MaybeSentinel from libcst._removal_sentinel import RemovalSentinel from libcst._typed_visitor_base import mark_no_op if TYPE_CHECKING: from libcst._nodes.expression import ( # noqa: F401 Annotation, Arg, Asynchronous, Attribute, Await, BaseDictElement, BaseElement, BaseExpression, BaseFormattedStringContent, BaseSlice, BinaryOperation, BooleanOperation, Call, Comparison, ComparisonTarget, CompFor, CompIf, ConcatenatedString, Dict, DictComp, DictElement, Element, Ellipsis, Float, FormattedString, FormattedStringExpression, FormattedStringText, From, GeneratorExp, IfExp, Imaginary, Index, Integer, Lambda, LeftCurlyBrace, LeftParen, LeftSquareBracket, List, ListComp, Name, NamedExpr, Param, Parameters, ParamSlash, ParamStar, RightCurlyBrace, RightParen, RightSquareBracket, Set, SetComp, SimpleString, Slice, StarredDictElement, StarredElement, Subscript, SubscriptElement, Tuple, UnaryOperation, Yield, ) from libcst._nodes.module import Module # noqa: F401 from libcst._nodes.op import ( # noqa: F401 Add, AddAssign, And, AssignEqual, BaseAugOp, BaseBinaryOp, BaseBooleanOp, BaseCompOp, BaseUnaryOp, BitAnd, BitAndAssign, BitInvert, BitOr, BitOrAssign, BitXor, BitXorAssign, Colon, Comma, Divide, DivideAssign, Dot, Equal, FloorDivide, FloorDivideAssign, GreaterThan, GreaterThanEqual, ImportStar, In, Is, IsNot, LeftShift, LeftShiftAssign, LessThan, LessThanEqual, MatrixMultiply, MatrixMultiplyAssign, Minus, Modulo, ModuloAssign, Multiply, MultiplyAssign, Not, NotEqual, NotIn, Or, Plus, Power, PowerAssign, RightShift, RightShiftAssign, Semicolon, Subtract, SubtractAssign, ) from libcst._nodes.statement import ( # noqa: F401 AnnAssign, AsName, Assert, Assign, AssignTarget, AugAssign, BaseSmallStatement, BaseStatement, BaseSuite, Break, ClassDef, Continue, Decorator, Del, Else, ExceptHandler, ExceptStarHandler, Expr, Finally, For, FunctionDef, Global, If, Import, ImportAlias, ImportFrom, IndentedBlock, Match, MatchAs, MatchCase, MatchClass, MatchKeywordElement, MatchList, MatchMapping, MatchMappingElement, MatchOr, MatchOrElement, MatchPattern, MatchSequence, MatchSequenceElement, MatchSingleton, MatchStar, MatchTuple, MatchValue, NameItem, Nonlocal, ParamSpec, Pass, Raise, Return, SimpleStatementLine, SimpleStatementSuite, Try, TryStar, TypeAlias, TypeParam, TypeParameters, TypeVar, TypeVarTuple, While, With, WithItem, ) from libcst._nodes.whitespace import ( # noqa: F401 BaseParenthesizableWhitespace, Comment, EmptyLine, Newline, ParenthesizedWhitespace, SimpleWhitespace, TrailingWhitespace, ) class CSTTypedBaseFunctions: @mark_no_op def visit_Add(self, node: "Add") -> Optional[bool]: pass @mark_no_op def visit_Add_whitespace_before(self, node: "Add") -> None: pass @mark_no_op def leave_Add_whitespace_before(self, node: "Add") -> None: pass @mark_no_op def visit_Add_whitespace_after(self, node: "Add") -> None: pass @mark_no_op def leave_Add_whitespace_after(self, node: "Add") -> None: pass @mark_no_op def visit_AddAssign(self, node: "AddAssign") -> Optional[bool]: pass @mark_no_op def visit_AddAssign_whitespace_before(self, node: "AddAssign") -> None: pass @mark_no_op def leave_AddAssign_whitespace_before(self, node: "AddAssign") -> None: pass @mark_no_op def visit_AddAssign_whitespace_after(self, node: "AddAssign") -> None: pass @mark_no_op def leave_AddAssign_whitespace_after(self, node: "AddAssign") -> None: pass @mark_no_op def visit_And(self, node: "And") -> Optional[bool]: pass @mark_no_op def visit_And_whitespace_before(self, node: "And") -> None: pass @mark_no_op def leave_And_whitespace_before(self, node: "And") -> None: pass @mark_no_op def visit_And_whitespace_after(self, node: "And") -> None: pass @mark_no_op def leave_And_whitespace_after(self, node: "And") -> None: pass @mark_no_op def visit_AnnAssign(self, node: "AnnAssign") -> Optional[bool]: pass @mark_no_op def visit_AnnAssign_target(self, node: "AnnAssign") -> None: pass @mark_no_op def leave_AnnAssign_target(self, node: "AnnAssign") -> None: pass @mark_no_op def visit_AnnAssign_annotation(self, node: "AnnAssign") -> None: pass @mark_no_op def leave_AnnAssign_annotation(self, node: "AnnAssign") -> None: pass @mark_no_op def visit_AnnAssign_value(self, node: "AnnAssign") -> None: pass @mark_no_op def leave_AnnAssign_value(self, node: "AnnAssign") -> None: pass @mark_no_op def visit_AnnAssign_equal(self, node: "AnnAssign") -> None: pass @mark_no_op def leave_AnnAssign_equal(self, node: "AnnAssign") -> None: pass @mark_no_op def visit_AnnAssign_semicolon(self, node: "AnnAssign") -> None: pass @mark_no_op def leave_AnnAssign_semicolon(self, node: "AnnAssign") -> None: pass @mark_no_op def visit_Annotation(self, node: "Annotation") -> Optional[bool]: pass @mark_no_op def visit_Annotation_annotation(self, node: "Annotation") -> None: pass @mark_no_op def leave_Annotation_annotation(self, node: "Annotation") -> None: pass @mark_no_op def visit_Annotation_whitespace_before_indicator(self, node: "Annotation") -> None: pass @mark_no_op def leave_Annotation_whitespace_before_indicator(self, node: "Annotation") -> None: pass @mark_no_op def visit_Annotation_whitespace_after_indicator(self, node: "Annotation") -> None: pass @mark_no_op def leave_Annotation_whitespace_after_indicator(self, node: "Annotation") -> None: pass @mark_no_op def visit_Arg(self, node: "Arg") -> Optional[bool]: pass @mark_no_op def visit_Arg_value(self, node: "Arg") -> None: pass @mark_no_op def leave_Arg_value(self, node: "Arg") -> None: pass @mark_no_op def visit_Arg_keyword(self, node: "Arg") -> None: pass @mark_no_op def leave_Arg_keyword(self, node: "Arg") -> None: pass @mark_no_op def visit_Arg_equal(self, node: "Arg") -> None: pass @mark_no_op def leave_Arg_equal(self, node: "Arg") -> None: pass @mark_no_op def visit_Arg_comma(self, node: "Arg") -> None: pass @mark_no_op def leave_Arg_comma(self, node: "Arg") -> None: pass @mark_no_op def visit_Arg_star(self, node: "Arg") -> None: pass @mark_no_op def leave_Arg_star(self, node: "Arg") -> None: pass @mark_no_op def visit_Arg_whitespace_after_star(self, node: "Arg") -> None: pass @mark_no_op def leave_Arg_whitespace_after_star(self, node: "Arg") -> None: pass @mark_no_op def visit_Arg_whitespace_after_arg(self, node: "Arg") -> None: pass @mark_no_op def leave_Arg_whitespace_after_arg(self, node: "Arg") -> None: pass @mark_no_op def visit_AsName(self, node: "AsName") -> Optional[bool]: pass @mark_no_op def visit_AsName_name(self, node: "AsName") -> None: pass @mark_no_op def leave_AsName_name(self, node: "AsName") -> None: pass @mark_no_op def visit_AsName_whitespace_before_as(self, node: "AsName") -> None: pass @mark_no_op def leave_AsName_whitespace_before_as(self, node: "AsName") -> None: pass @mark_no_op def visit_AsName_whitespace_after_as(self, node: "AsName") -> None: pass @mark_no_op def leave_AsName_whitespace_after_as(self, node: "AsName") -> None: pass @mark_no_op def visit_Assert(self, node: "Assert") -> Optional[bool]: pass @mark_no_op def visit_Assert_test(self, node: "Assert") -> None: pass @mark_no_op def leave_Assert_test(self, node: "Assert") -> None: pass @mark_no_op def visit_Assert_msg(self, node: "Assert") -> None: pass @mark_no_op def leave_Assert_msg(self, node: "Assert") -> None: pass @mark_no_op def visit_Assert_comma(self, node: "Assert") -> None: pass @mark_no_op def leave_Assert_comma(self, node: "Assert") -> None: pass @mark_no_op def visit_Assert_whitespace_after_assert(self, node: "Assert") -> None: pass @mark_no_op def leave_Assert_whitespace_after_assert(self, node: "Assert") -> None: pass @mark_no_op def visit_Assert_semicolon(self, node: "Assert") -> None: pass @mark_no_op def leave_Assert_semicolon(self, node: "Assert") -> None: pass @mark_no_op def visit_Assign(self, node: "Assign") -> Optional[bool]: pass @mark_no_op def visit_Assign_targets(self, node: "Assign") -> None: pass @mark_no_op def leave_Assign_targets(self, node: "Assign") -> None: pass @mark_no_op def visit_Assign_value(self, node: "Assign") -> None: pass @mark_no_op def leave_Assign_value(self, node: "Assign") -> None: pass @mark_no_op def visit_Assign_semicolon(self, node: "Assign") -> None: pass @mark_no_op def leave_Assign_semicolon(self, node: "Assign") -> None: pass @mark_no_op def visit_AssignEqual(self, node: "AssignEqual") -> Optional[bool]: pass @mark_no_op def visit_AssignEqual_whitespace_before(self, node: "AssignEqual") -> None: pass @mark_no_op def leave_AssignEqual_whitespace_before(self, node: "AssignEqual") -> None: pass @mark_no_op def visit_AssignEqual_whitespace_after(self, node: "AssignEqual") -> None: pass @mark_no_op def leave_AssignEqual_whitespace_after(self, node: "AssignEqual") -> None: pass @mark_no_op def visit_AssignTarget(self, node: "AssignTarget") -> Optional[bool]: pass @mark_no_op def visit_AssignTarget_target(self, node: "AssignTarget") -> None: pass @mark_no_op def leave_AssignTarget_target(self, node: "AssignTarget") -> None: pass @mark_no_op def visit_AssignTarget_whitespace_before_equal(self, node: "AssignTarget") -> None: pass @mark_no_op def leave_AssignTarget_whitespace_before_equal(self, node: "AssignTarget") -> None: pass @mark_no_op def visit_AssignTarget_whitespace_after_equal(self, node: "AssignTarget") -> None: pass @mark_no_op def leave_AssignTarget_whitespace_after_equal(self, node: "AssignTarget") -> None: pass @mark_no_op def visit_Asynchronous(self, node: "Asynchronous") -> Optional[bool]: pass @mark_no_op def visit_Asynchronous_whitespace_after(self, node: "Asynchronous") -> None: pass @mark_no_op def leave_Asynchronous_whitespace_after(self, node: "Asynchronous") -> None: pass @mark_no_op def visit_Attribute(self, node: "Attribute") -> Optional[bool]: pass @mark_no_op def visit_Attribute_value(self, node: "Attribute") -> None: pass @mark_no_op def leave_Attribute_value(self, node: "Attribute") -> None: pass @mark_no_op def visit_Attribute_attr(self, node: "Attribute") -> None: pass @mark_no_op def leave_Attribute_attr(self, node: "Attribute") -> None: pass @mark_no_op def visit_Attribute_dot(self, node: "Attribute") -> None: pass @mark_no_op def leave_Attribute_dot(self, node: "Attribute") -> None: pass @mark_no_op def visit_Attribute_lpar(self, node: "Attribute") -> None: pass @mark_no_op def leave_Attribute_lpar(self, node: "Attribute") -> None: pass @mark_no_op def visit_Attribute_rpar(self, node: "Attribute") -> None: pass @mark_no_op def leave_Attribute_rpar(self, node: "Attribute") -> None: pass @mark_no_op def visit_AugAssign(self, node: "AugAssign") -> Optional[bool]: pass @mark_no_op def visit_AugAssign_target(self, node: "AugAssign") -> None: pass @mark_no_op def leave_AugAssign_target(self, node: "AugAssign") -> None: pass @mark_no_op def visit_AugAssign_operator(self, node: "AugAssign") -> None: pass @mark_no_op def leave_AugAssign_operator(self, node: "AugAssign") -> None: pass @mark_no_op def visit_AugAssign_value(self, node: "AugAssign") -> None: pass @mark_no_op def leave_AugAssign_value(self, node: "AugAssign") -> None: pass @mark_no_op def visit_AugAssign_semicolon(self, node: "AugAssign") -> None: pass @mark_no_op def leave_AugAssign_semicolon(self, node: "AugAssign") -> None: pass @mark_no_op def visit_Await(self, node: "Await") -> Optional[bool]: pass @mark_no_op def visit_Await_expression(self, node: "Await") -> None: pass @mark_no_op def leave_Await_expression(self, node: "Await") -> None: pass @mark_no_op def visit_Await_lpar(self, node: "Await") -> None: pass @mark_no_op def leave_Await_lpar(self, node: "Await") -> None: pass @mark_no_op def visit_Await_rpar(self, node: "Await") -> None: pass @mark_no_op def leave_Await_rpar(self, node: "Await") -> None: pass @mark_no_op def visit_Await_whitespace_after_await(self, node: "Await") -> None: pass @mark_no_op def leave_Await_whitespace_after_await(self, node: "Await") -> None: pass @mark_no_op def visit_BinaryOperation(self, node: "BinaryOperation") -> Optional[bool]: pass @mark_no_op def visit_BinaryOperation_left(self, node: "BinaryOperation") -> None: pass @mark_no_op def leave_BinaryOperation_left(self, node: "BinaryOperation") -> None: pass @mark_no_op def visit_BinaryOperation_operator(self, node: "BinaryOperation") -> None: pass @mark_no_op def leave_BinaryOperation_operator(self, node: "BinaryOperation") -> None: pass @mark_no_op def visit_BinaryOperation_right(self, node: "BinaryOperation") -> None: pass @mark_no_op def leave_BinaryOperation_right(self, node: "BinaryOperation") -> None: pass @mark_no_op def visit_BinaryOperation_lpar(self, node: "BinaryOperation") -> None: pass @mark_no_op def leave_BinaryOperation_lpar(self, node: "BinaryOperation") -> None: pass @mark_no_op def visit_BinaryOperation_rpar(self, node: "BinaryOperation") -> None: pass @mark_no_op def leave_BinaryOperation_rpar(self, node: "BinaryOperation") -> None: pass @mark_no_op def visit_BitAnd(self, node: "BitAnd") -> Optional[bool]: pass @mark_no_op def visit_BitAnd_whitespace_before(self, node: "BitAnd") -> None: pass @mark_no_op def leave_BitAnd_whitespace_before(self, node: "BitAnd") -> None: pass @mark_no_op def visit_BitAnd_whitespace_after(self, node: "BitAnd") -> None: pass @mark_no_op def leave_BitAnd_whitespace_after(self, node: "BitAnd") -> None: pass @mark_no_op def visit_BitAndAssign(self, node: "BitAndAssign") -> Optional[bool]: pass @mark_no_op def visit_BitAndAssign_whitespace_before(self, node: "BitAndAssign") -> None: pass @mark_no_op def leave_BitAndAssign_whitespace_before(self, node: "BitAndAssign") -> None: pass @mark_no_op def visit_BitAndAssign_whitespace_after(self, node: "BitAndAssign") -> None: pass @mark_no_op def leave_BitAndAssign_whitespace_after(self, node: "BitAndAssign") -> None: pass @mark_no_op def visit_BitInvert(self, node: "BitInvert") -> Optional[bool]: pass @mark_no_op def visit_BitInvert_whitespace_after(self, node: "BitInvert") -> None: pass @mark_no_op def leave_BitInvert_whitespace_after(self, node: "BitInvert") -> None: pass @mark_no_op def visit_BitOr(self, node: "BitOr") -> Optional[bool]: pass @mark_no_op def visit_BitOr_whitespace_before(self, node: "BitOr") -> None: pass @mark_no_op def leave_BitOr_whitespace_before(self, node: "BitOr") -> None: pass @mark_no_op def visit_BitOr_whitespace_after(self, node: "BitOr") -> None: pass @mark_no_op def leave_BitOr_whitespace_after(self, node: "BitOr") -> None: pass @mark_no_op def visit_BitOrAssign(self, node: "BitOrAssign") -> Optional[bool]: pass @mark_no_op def visit_BitOrAssign_whitespace_before(self, node: "BitOrAssign") -> None: pass @mark_no_op def leave_BitOrAssign_whitespace_before(self, node: "BitOrAssign") -> None: pass @mark_no_op def visit_BitOrAssign_whitespace_after(self, node: "BitOrAssign") -> None: pass @mark_no_op def leave_BitOrAssign_whitespace_after(self, node: "BitOrAssign") -> None: pass @mark_no_op def visit_BitXor(self, node: "BitXor") -> Optional[bool]: pass @mark_no_op def visit_BitXor_whitespace_before(self, node: "BitXor") -> None: pass @mark_no_op def leave_BitXor_whitespace_before(self, node: "BitXor") -> None: pass @mark_no_op def visit_BitXor_whitespace_after(self, node: "BitXor") -> None: pass @mark_no_op def leave_BitXor_whitespace_after(self, node: "BitXor") -> None: pass @mark_no_op def visit_BitXorAssign(self, node: "BitXorAssign") -> Optional[bool]: pass @mark_no_op def visit_BitXorAssign_whitespace_before(self, node: "BitXorAssign") -> None: pass @mark_no_op def leave_BitXorAssign_whitespace_before(self, node: "BitXorAssign") -> None: pass @mark_no_op def visit_BitXorAssign_whitespace_after(self, node: "BitXorAssign") -> None: pass @mark_no_op def leave_BitXorAssign_whitespace_after(self, node: "BitXorAssign") -> None: pass @mark_no_op def visit_BooleanOperation(self, node: "BooleanOperation") -> Optional[bool]: pass @mark_no_op def visit_BooleanOperation_left(self, node: "BooleanOperation") -> None: pass @mark_no_op def leave_BooleanOperation_left(self, node: "BooleanOperation") -> None: pass @mark_no_op def visit_BooleanOperation_operator(self, node: "BooleanOperation") -> None: pass @mark_no_op def leave_BooleanOperation_operator(self, node: "BooleanOperation") -> None: pass @mark_no_op def visit_BooleanOperation_right(self, node: "BooleanOperation") -> None: pass @mark_no_op def leave_BooleanOperation_right(self, node: "BooleanOperation") -> None: pass @mark_no_op def visit_BooleanOperation_lpar(self, node: "BooleanOperation") -> None: pass @mark_no_op def leave_BooleanOperation_lpar(self, node: "BooleanOperation") -> None: pass @mark_no_op def visit_BooleanOperation_rpar(self, node: "BooleanOperation") -> None: pass @mark_no_op def leave_BooleanOperation_rpar(self, node: "BooleanOperation") -> None: pass @mark_no_op def visit_Break(self, node: "Break") -> Optional[bool]: pass @mark_no_op def visit_Break_semicolon(self, node: "Break") -> None: pass @mark_no_op def leave_Break_semicolon(self, node: "Break") -> None: pass @mark_no_op def visit_Call(self, node: "Call") -> Optional[bool]: pass @mark_no_op def visit_Call_func(self, node: "Call") -> None: pass @mark_no_op def leave_Call_func(self, node: "Call") -> None: pass @mark_no_op def visit_Call_args(self, node: "Call") -> None: pass @mark_no_op def leave_Call_args(self, node: "Call") -> None: pass @mark_no_op def visit_Call_lpar(self, node: "Call") -> None: pass @mark_no_op def leave_Call_lpar(self, node: "Call") -> None: pass @mark_no_op def visit_Call_rpar(self, node: "Call") -> None: pass @mark_no_op def leave_Call_rpar(self, node: "Call") -> None: pass @mark_no_op def visit_Call_whitespace_after_func(self, node: "Call") -> None: pass @mark_no_op def leave_Call_whitespace_after_func(self, node: "Call") -> None: pass @mark_no_op def visit_Call_whitespace_before_args(self, node: "Call") -> None: pass @mark_no_op def leave_Call_whitespace_before_args(self, node: "Call") -> None: pass @mark_no_op def visit_ClassDef(self, node: "ClassDef") -> Optional[bool]: pass @mark_no_op def visit_ClassDef_name(self, node: "ClassDef") -> None: pass @mark_no_op def leave_ClassDef_name(self, node: "ClassDef") -> None: pass @mark_no_op def visit_ClassDef_body(self, node: "ClassDef") -> None: pass @mark_no_op def leave_ClassDef_body(self, node: "ClassDef") -> None: pass @mark_no_op def visit_ClassDef_bases(self, node: "ClassDef") -> None: pass @mark_no_op def leave_ClassDef_bases(self, node: "ClassDef") -> None: pass @mark_no_op def visit_ClassDef_keywords(self, node: "ClassDef") -> None: pass @mark_no_op def leave_ClassDef_keywords(self, node: "ClassDef") -> None: pass @mark_no_op def visit_ClassDef_decorators(self, node: "ClassDef") -> None: pass @mark_no_op def leave_ClassDef_decorators(self, node: "ClassDef") -> None: pass @mark_no_op def visit_ClassDef_lpar(self, node: "ClassDef") -> None: pass @mark_no_op def leave_ClassDef_lpar(self, node: "ClassDef") -> None: pass @mark_no_op def visit_ClassDef_rpar(self, node: "ClassDef") -> None: pass @mark_no_op def leave_ClassDef_rpar(self, node: "ClassDef") -> None: pass @mark_no_op def visit_ClassDef_leading_lines(self, node: "ClassDef") -> None: pass @mark_no_op def leave_ClassDef_leading_lines(self, node: "ClassDef") -> None: pass @mark_no_op def visit_ClassDef_lines_after_decorators(self, node: "ClassDef") -> None: pass @mark_no_op def leave_ClassDef_lines_after_decorators(self, node: "ClassDef") -> None: pass @mark_no_op def visit_ClassDef_whitespace_after_class(self, node: "ClassDef") -> None: pass @mark_no_op def leave_ClassDef_whitespace_after_class(self, node: "ClassDef") -> None: pass @mark_no_op def visit_ClassDef_whitespace_after_name(self, node: "ClassDef") -> None: pass @mark_no_op def leave_ClassDef_whitespace_after_name(self, node: "ClassDef") -> None: pass @mark_no_op def visit_ClassDef_whitespace_before_colon(self, node: "ClassDef") -> None: pass @mark_no_op def leave_ClassDef_whitespace_before_colon(self, node: "ClassDef") -> None: pass @mark_no_op def visit_ClassDef_type_parameters(self, node: "ClassDef") -> None: pass @mark_no_op def leave_ClassDef_type_parameters(self, node: "ClassDef") -> None: pass @mark_no_op def visit_ClassDef_whitespace_after_type_parameters(self, node: "ClassDef") -> None: pass @mark_no_op def leave_ClassDef_whitespace_after_type_parameters(self, node: "ClassDef") -> None: pass @mark_no_op def visit_Colon(self, node: "Colon") -> Optional[bool]: pass @mark_no_op def visit_Colon_whitespace_before(self, node: "Colon") -> None: pass @mark_no_op def leave_Colon_whitespace_before(self, node: "Colon") -> None: pass @mark_no_op def visit_Colon_whitespace_after(self, node: "Colon") -> None: pass @mark_no_op def leave_Colon_whitespace_after(self, node: "Colon") -> None: pass @mark_no_op def visit_Comma(self, node: "Comma") -> Optional[bool]: pass @mark_no_op def visit_Comma_whitespace_before(self, node: "Comma") -> None: pass @mark_no_op def leave_Comma_whitespace_before(self, node: "Comma") -> None: pass @mark_no_op def visit_Comma_whitespace_after(self, node: "Comma") -> None: pass @mark_no_op def leave_Comma_whitespace_after(self, node: "Comma") -> None: pass @mark_no_op def visit_Comment(self, node: "Comment") -> Optional[bool]: pass @mark_no_op def visit_Comment_value(self, node: "Comment") -> None: pass @mark_no_op def leave_Comment_value(self, node: "Comment") -> None: pass @mark_no_op def visit_CompFor(self, node: "CompFor") -> Optional[bool]: pass @mark_no_op def visit_CompFor_target(self, node: "CompFor") -> None: pass @mark_no_op def leave_CompFor_target(self, node: "CompFor") -> None: pass @mark_no_op def visit_CompFor_iter(self, node: "CompFor") -> None: pass @mark_no_op def leave_CompFor_iter(self, node: "CompFor") -> None: pass @mark_no_op def visit_CompFor_ifs(self, node: "CompFor") -> None: pass @mark_no_op def leave_CompFor_ifs(self, node: "CompFor") -> None: pass @mark_no_op def visit_CompFor_inner_for_in(self, node: "CompFor") -> None: pass @mark_no_op def leave_CompFor_inner_for_in(self, node: "CompFor") -> None: pass @mark_no_op def visit_CompFor_asynchronous(self, node: "CompFor") -> None: pass @mark_no_op def leave_CompFor_asynchronous(self, node: "CompFor") -> None: pass @mark_no_op def visit_CompFor_whitespace_before(self, node: "CompFor") -> None: pass @mark_no_op def leave_CompFor_whitespace_before(self, node: "CompFor") -> None: pass @mark_no_op def visit_CompFor_whitespace_after_for(self, node: "CompFor") -> None: pass @mark_no_op def leave_CompFor_whitespace_after_for(self, node: "CompFor") -> None: pass @mark_no_op def visit_CompFor_whitespace_before_in(self, node: "CompFor") -> None: pass @mark_no_op def leave_CompFor_whitespace_before_in(self, node: "CompFor") -> None: pass @mark_no_op def visit_CompFor_whitespace_after_in(self, node: "CompFor") -> None: pass @mark_no_op def leave_CompFor_whitespace_after_in(self, node: "CompFor") -> None: pass @mark_no_op def visit_CompIf(self, node: "CompIf") -> Optional[bool]: pass @mark_no_op def visit_CompIf_test(self, node: "CompIf") -> None: pass @mark_no_op def leave_CompIf_test(self, node: "CompIf") -> None: pass @mark_no_op def visit_CompIf_whitespace_before(self, node: "CompIf") -> None: pass @mark_no_op def leave_CompIf_whitespace_before(self, node: "CompIf") -> None: pass @mark_no_op def visit_CompIf_whitespace_before_test(self, node: "CompIf") -> None: pass @mark_no_op def leave_CompIf_whitespace_before_test(self, node: "CompIf") -> None: pass @mark_no_op def visit_Comparison(self, node: "Comparison") -> Optional[bool]: pass @mark_no_op def visit_Comparison_left(self, node: "Comparison") -> None: pass @mark_no_op def leave_Comparison_left(self, node: "Comparison") -> None: pass @mark_no_op def visit_Comparison_comparisons(self, node: "Comparison") -> None: pass @mark_no_op def leave_Comparison_comparisons(self, node: "Comparison") -> None: pass @mark_no_op def visit_Comparison_lpar(self, node: "Comparison") -> None: pass @mark_no_op def leave_Comparison_lpar(self, node: "Comparison") -> None: pass @mark_no_op def visit_Comparison_rpar(self, node: "Comparison") -> None: pass @mark_no_op def leave_Comparison_rpar(self, node: "Comparison") -> None: pass @mark_no_op def visit_ComparisonTarget(self, node: "ComparisonTarget") -> Optional[bool]: pass @mark_no_op def visit_ComparisonTarget_operator(self, node: "ComparisonTarget") -> None: pass @mark_no_op def leave_ComparisonTarget_operator(self, node: "ComparisonTarget") -> None: pass @mark_no_op def visit_ComparisonTarget_comparator(self, node: "ComparisonTarget") -> None: pass @mark_no_op def leave_ComparisonTarget_comparator(self, node: "ComparisonTarget") -> None: pass @mark_no_op def visit_ConcatenatedString(self, node: "ConcatenatedString") -> Optional[bool]: pass @mark_no_op def visit_ConcatenatedString_left(self, node: "ConcatenatedString") -> None: pass @mark_no_op def leave_ConcatenatedString_left(self, node: "ConcatenatedString") -> None: pass @mark_no_op def visit_ConcatenatedString_right(self, node: "ConcatenatedString") -> None: pass @mark_no_op def leave_ConcatenatedString_right(self, node: "ConcatenatedString") -> None: pass @mark_no_op def visit_ConcatenatedString_lpar(self, node: "ConcatenatedString") -> None: pass @mark_no_op def leave_ConcatenatedString_lpar(self, node: "ConcatenatedString") -> None: pass @mark_no_op def visit_ConcatenatedString_rpar(self, node: "ConcatenatedString") -> None: pass @mark_no_op def leave_ConcatenatedString_rpar(self, node: "ConcatenatedString") -> None: pass @mark_no_op def visit_ConcatenatedString_whitespace_between( self, node: "ConcatenatedString" ) -> None: pass @mark_no_op def leave_ConcatenatedString_whitespace_between( self, node: "ConcatenatedString" ) -> None: pass @mark_no_op def visit_Continue(self, node: "Continue") -> Optional[bool]: pass @mark_no_op def visit_Continue_semicolon(self, node: "Continue") -> None: pass @mark_no_op def leave_Continue_semicolon(self, node: "Continue") -> None: pass @mark_no_op def visit_Decorator(self, node: "Decorator") -> Optional[bool]: pass @mark_no_op def visit_Decorator_decorator(self, node: "Decorator") -> None: pass @mark_no_op def leave_Decorator_decorator(self, node: "Decorator") -> None: pass @mark_no_op def visit_Decorator_leading_lines(self, node: "Decorator") -> None: pass @mark_no_op def leave_Decorator_leading_lines(self, node: "Decorator") -> None: pass @mark_no_op def visit_Decorator_whitespace_after_at(self, node: "Decorator") -> None: pass @mark_no_op def leave_Decorator_whitespace_after_at(self, node: "Decorator") -> None: pass @mark_no_op def visit_Decorator_trailing_whitespace(self, node: "Decorator") -> None: pass @mark_no_op def leave_Decorator_trailing_whitespace(self, node: "Decorator") -> None: pass @mark_no_op def visit_Del(self, node: "Del") -> Optional[bool]: pass @mark_no_op def visit_Del_target(self, node: "Del") -> None: pass @mark_no_op def leave_Del_target(self, node: "Del") -> None: pass @mark_no_op def visit_Del_whitespace_after_del(self, node: "Del") -> None: pass @mark_no_op def leave_Del_whitespace_after_del(self, node: "Del") -> None: pass @mark_no_op def visit_Del_semicolon(self, node: "Del") -> None: pass @mark_no_op def leave_Del_semicolon(self, node: "Del") -> None: pass @mark_no_op def visit_Dict(self, node: "Dict") -> Optional[bool]: pass @mark_no_op def visit_Dict_elements(self, node: "Dict") -> None: pass @mark_no_op def leave_Dict_elements(self, node: "Dict") -> None: pass @mark_no_op def visit_Dict_lbrace(self, node: "Dict") -> None: pass @mark_no_op def leave_Dict_lbrace(self, node: "Dict") -> None: pass @mark_no_op def visit_Dict_rbrace(self, node: "Dict") -> None: pass @mark_no_op def leave_Dict_rbrace(self, node: "Dict") -> None: pass @mark_no_op def visit_Dict_lpar(self, node: "Dict") -> None: pass @mark_no_op def leave_Dict_lpar(self, node: "Dict") -> None: pass @mark_no_op def visit_Dict_rpar(self, node: "Dict") -> None: pass @mark_no_op def leave_Dict_rpar(self, node: "Dict") -> None: pass @mark_no_op def visit_DictComp(self, node: "DictComp") -> Optional[bool]: pass @mark_no_op def visit_DictComp_key(self, node: "DictComp") -> None: pass @mark_no_op def leave_DictComp_key(self, node: "DictComp") -> None: pass @mark_no_op def visit_DictComp_value(self, node: "DictComp") -> None: pass @mark_no_op def leave_DictComp_value(self, node: "DictComp") -> None: pass @mark_no_op def visit_DictComp_for_in(self, node: "DictComp") -> None: pass @mark_no_op def leave_DictComp_for_in(self, node: "DictComp") -> None: pass @mark_no_op def visit_DictComp_lbrace(self, node: "DictComp") -> None: pass @mark_no_op def leave_DictComp_lbrace(self, node: "DictComp") -> None: pass @mark_no_op def visit_DictComp_rbrace(self, node: "DictComp") -> None: pass @mark_no_op def leave_DictComp_rbrace(self, node: "DictComp") -> None: pass @mark_no_op def visit_DictComp_lpar(self, node: "DictComp") -> None: pass @mark_no_op def leave_DictComp_lpar(self, node: "DictComp") -> None: pass @mark_no_op def visit_DictComp_rpar(self, node: "DictComp") -> None: pass @mark_no_op def leave_DictComp_rpar(self, node: "DictComp") -> None: pass @mark_no_op def visit_DictComp_whitespace_before_colon(self, node: "DictComp") -> None: pass @mark_no_op def leave_DictComp_whitespace_before_colon(self, node: "DictComp") -> None: pass @mark_no_op def visit_DictComp_whitespace_after_colon(self, node: "DictComp") -> None: pass @mark_no_op def leave_DictComp_whitespace_after_colon(self, node: "DictComp") -> None: pass @mark_no_op def visit_DictElement(self, node: "DictElement") -> Optional[bool]: pass @mark_no_op def visit_DictElement_key(self, node: "DictElement") -> None: pass @mark_no_op def leave_DictElement_key(self, node: "DictElement") -> None: pass @mark_no_op def visit_DictElement_value(self, node: "DictElement") -> None: pass @mark_no_op def leave_DictElement_value(self, node: "DictElement") -> None: pass @mark_no_op def visit_DictElement_comma(self, node: "DictElement") -> None: pass @mark_no_op def leave_DictElement_comma(self, node: "DictElement") -> None: pass @mark_no_op def visit_DictElement_whitespace_before_colon(self, node: "DictElement") -> None: pass @mark_no_op def leave_DictElement_whitespace_before_colon(self, node: "DictElement") -> None: pass @mark_no_op def visit_DictElement_whitespace_after_colon(self, node: "DictElement") -> None: pass @mark_no_op def leave_DictElement_whitespace_after_colon(self, node: "DictElement") -> None: pass @mark_no_op def visit_Divide(self, node: "Divide") -> Optional[bool]: pass @mark_no_op def visit_Divide_whitespace_before(self, node: "Divide") -> None: pass @mark_no_op def leave_Divide_whitespace_before(self, node: "Divide") -> None: pass @mark_no_op def visit_Divide_whitespace_after(self, node: "Divide") -> None: pass @mark_no_op def leave_Divide_whitespace_after(self, node: "Divide") -> None: pass @mark_no_op def visit_DivideAssign(self, node: "DivideAssign") -> Optional[bool]: pass @mark_no_op def visit_DivideAssign_whitespace_before(self, node: "DivideAssign") -> None: pass @mark_no_op def leave_DivideAssign_whitespace_before(self, node: "DivideAssign") -> None: pass @mark_no_op def visit_DivideAssign_whitespace_after(self, node: "DivideAssign") -> None: pass @mark_no_op def leave_DivideAssign_whitespace_after(self, node: "DivideAssign") -> None: pass @mark_no_op def visit_Dot(self, node: "Dot") -> Optional[bool]: pass @mark_no_op def visit_Dot_whitespace_before(self, node: "Dot") -> None: pass @mark_no_op def leave_Dot_whitespace_before(self, node: "Dot") -> None: pass @mark_no_op def visit_Dot_whitespace_after(self, node: "Dot") -> None: pass @mark_no_op def leave_Dot_whitespace_after(self, node: "Dot") -> None: pass @mark_no_op def visit_Element(self, node: "Element") -> Optional[bool]: pass @mark_no_op def visit_Element_value(self, node: "Element") -> None: pass @mark_no_op def leave_Element_value(self, node: "Element") -> None: pass @mark_no_op def visit_Element_comma(self, node: "Element") -> None: pass @mark_no_op def leave_Element_comma(self, node: "Element") -> None: pass @mark_no_op def visit_Ellipsis(self, node: "Ellipsis") -> Optional[bool]: pass @mark_no_op def visit_Ellipsis_lpar(self, node: "Ellipsis") -> None: pass @mark_no_op def leave_Ellipsis_lpar(self, node: "Ellipsis") -> None: pass @mark_no_op def visit_Ellipsis_rpar(self, node: "Ellipsis") -> None: pass @mark_no_op def leave_Ellipsis_rpar(self, node: "Ellipsis") -> None: pass @mark_no_op def visit_Else(self, node: "Else") -> Optional[bool]: pass @mark_no_op def visit_Else_body(self, node: "Else") -> None: pass @mark_no_op def leave_Else_body(self, node: "Else") -> None: pass @mark_no_op def visit_Else_leading_lines(self, node: "Else") -> None: pass @mark_no_op def leave_Else_leading_lines(self, node: "Else") -> None: pass @mark_no_op def visit_Else_whitespace_before_colon(self, node: "Else") -> None: pass @mark_no_op def leave_Else_whitespace_before_colon(self, node: "Else") -> None: pass @mark_no_op def visit_EmptyLine(self, node: "EmptyLine") -> Optional[bool]: pass @mark_no_op def visit_EmptyLine_indent(self, node: "EmptyLine") -> None: pass @mark_no_op def leave_EmptyLine_indent(self, node: "EmptyLine") -> None: pass @mark_no_op def visit_EmptyLine_whitespace(self, node: "EmptyLine") -> None: pass @mark_no_op def leave_EmptyLine_whitespace(self, node: "EmptyLine") -> None: pass @mark_no_op def visit_EmptyLine_comment(self, node: "EmptyLine") -> None: pass @mark_no_op def leave_EmptyLine_comment(self, node: "EmptyLine") -> None: pass @mark_no_op def visit_EmptyLine_newline(self, node: "EmptyLine") -> None: pass @mark_no_op def leave_EmptyLine_newline(self, node: "EmptyLine") -> None: pass @mark_no_op def visit_Equal(self, node: "Equal") -> Optional[bool]: pass @mark_no_op def visit_Equal_whitespace_before(self, node: "Equal") -> None: pass @mark_no_op def leave_Equal_whitespace_before(self, node: "Equal") -> None: pass @mark_no_op def visit_Equal_whitespace_after(self, node: "Equal") -> None: pass @mark_no_op def leave_Equal_whitespace_after(self, node: "Equal") -> None: pass @mark_no_op def visit_ExceptHandler(self, node: "ExceptHandler") -> Optional[bool]: pass @mark_no_op def visit_ExceptHandler_body(self, node: "ExceptHandler") -> None: pass @mark_no_op def leave_ExceptHandler_body(self, node: "ExceptHandler") -> None: pass @mark_no_op def visit_ExceptHandler_type(self, node: "ExceptHandler") -> None: pass @mark_no_op def leave_ExceptHandler_type(self, node: "ExceptHandler") -> None: pass @mark_no_op def visit_ExceptHandler_name(self, node: "ExceptHandler") -> None: pass @mark_no_op def leave_ExceptHandler_name(self, node: "ExceptHandler") -> None: pass @mark_no_op def visit_ExceptHandler_leading_lines(self, node: "ExceptHandler") -> None: pass @mark_no_op def leave_ExceptHandler_leading_lines(self, node: "ExceptHandler") -> None: pass @mark_no_op def visit_ExceptHandler_whitespace_after_except( self, node: "ExceptHandler" ) -> None: pass @mark_no_op def leave_ExceptHandler_whitespace_after_except( self, node: "ExceptHandler" ) -> None: pass @mark_no_op def visit_ExceptHandler_whitespace_before_colon( self, node: "ExceptHandler" ) -> None: pass @mark_no_op def leave_ExceptHandler_whitespace_before_colon( self, node: "ExceptHandler" ) -> None: pass @mark_no_op def visit_ExceptStarHandler(self, node: "ExceptStarHandler") -> Optional[bool]: pass @mark_no_op def visit_ExceptStarHandler_body(self, node: "ExceptStarHandler") -> None: pass @mark_no_op def leave_ExceptStarHandler_body(self, node: "ExceptStarHandler") -> None: pass @mark_no_op def visit_ExceptStarHandler_type(self, node: "ExceptStarHandler") -> None: pass @mark_no_op def leave_ExceptStarHandler_type(self, node: "ExceptStarHandler") -> None: pass @mark_no_op def visit_ExceptStarHandler_name(self, node: "ExceptStarHandler") -> None: pass @mark_no_op def leave_ExceptStarHandler_name(self, node: "ExceptStarHandler") -> None: pass @mark_no_op def visit_ExceptStarHandler_leading_lines(self, node: "ExceptStarHandler") -> None: pass @mark_no_op def leave_ExceptStarHandler_leading_lines(self, node: "ExceptStarHandler") -> None: pass @mark_no_op def visit_ExceptStarHandler_whitespace_after_except( self, node: "ExceptStarHandler" ) -> None: pass @mark_no_op def leave_ExceptStarHandler_whitespace_after_except( self, node: "ExceptStarHandler" ) -> None: pass @mark_no_op def visit_ExceptStarHandler_whitespace_after_star( self, node: "ExceptStarHandler" ) -> None: pass @mark_no_op def leave_ExceptStarHandler_whitespace_after_star( self, node: "ExceptStarHandler" ) -> None: pass @mark_no_op def visit_ExceptStarHandler_whitespace_before_colon( self, node: "ExceptStarHandler" ) -> None: pass @mark_no_op def leave_ExceptStarHandler_whitespace_before_colon( self, node: "ExceptStarHandler" ) -> None: pass @mark_no_op def visit_Expr(self, node: "Expr") -> Optional[bool]: pass @mark_no_op def visit_Expr_value(self, node: "Expr") -> None: pass @mark_no_op def leave_Expr_value(self, node: "Expr") -> None: pass @mark_no_op def visit_Expr_semicolon(self, node: "Expr") -> None: pass @mark_no_op def leave_Expr_semicolon(self, node: "Expr") -> None: pass @mark_no_op def visit_Finally(self, node: "Finally") -> Optional[bool]: pass @mark_no_op def visit_Finally_body(self, node: "Finally") -> None: pass @mark_no_op def leave_Finally_body(self, node: "Finally") -> None: pass @mark_no_op def visit_Finally_leading_lines(self, node: "Finally") -> None: pass @mark_no_op def leave_Finally_leading_lines(self, node: "Finally") -> None: pass @mark_no_op def visit_Finally_whitespace_before_colon(self, node: "Finally") -> None: pass @mark_no_op def leave_Finally_whitespace_before_colon(self, node: "Finally") -> None: pass @mark_no_op def visit_Float(self, node: "Float") -> Optional[bool]: pass @mark_no_op def visit_Float_value(self, node: "Float") -> None: pass @mark_no_op def leave_Float_value(self, node: "Float") -> None: pass @mark_no_op def visit_Float_lpar(self, node: "Float") -> None: pass @mark_no_op def leave_Float_lpar(self, node: "Float") -> None: pass @mark_no_op def visit_Float_rpar(self, node: "Float") -> None: pass @mark_no_op def leave_Float_rpar(self, node: "Float") -> None: pass @mark_no_op def visit_FloorDivide(self, node: "FloorDivide") -> Optional[bool]: pass @mark_no_op def visit_FloorDivide_whitespace_before(self, node: "FloorDivide") -> None: pass @mark_no_op def leave_FloorDivide_whitespace_before(self, node: "FloorDivide") -> None: pass @mark_no_op def visit_FloorDivide_whitespace_after(self, node: "FloorDivide") -> None: pass @mark_no_op def leave_FloorDivide_whitespace_after(self, node: "FloorDivide") -> None: pass @mark_no_op def visit_FloorDivideAssign(self, node: "FloorDivideAssign") -> Optional[bool]: pass @mark_no_op def visit_FloorDivideAssign_whitespace_before( self, node: "FloorDivideAssign" ) -> None: pass @mark_no_op def leave_FloorDivideAssign_whitespace_before( self, node: "FloorDivideAssign" ) -> None: pass @mark_no_op def visit_FloorDivideAssign_whitespace_after( self, node: "FloorDivideAssign" ) -> None: pass @mark_no_op def leave_FloorDivideAssign_whitespace_after( self, node: "FloorDivideAssign" ) -> None: pass @mark_no_op def visit_For(self, node: "For") -> Optional[bool]: pass @mark_no_op def visit_For_target(self, node: "For") -> None: pass @mark_no_op def leave_For_target(self, node: "For") -> None: pass @mark_no_op def visit_For_iter(self, node: "For") -> None: pass @mark_no_op def leave_For_iter(self, node: "For") -> None: pass @mark_no_op def visit_For_body(self, node: "For") -> None: pass @mark_no_op def leave_For_body(self, node: "For") -> None: pass @mark_no_op def visit_For_orelse(self, node: "For") -> None: pass @mark_no_op def leave_For_orelse(self, node: "For") -> None: pass @mark_no_op def visit_For_asynchronous(self, node: "For") -> None: pass @mark_no_op def leave_For_asynchronous(self, node: "For") -> None: pass @mark_no_op def visit_For_leading_lines(self, node: "For") -> None: pass @mark_no_op def leave_For_leading_lines(self, node: "For") -> None: pass @mark_no_op def visit_For_whitespace_after_for(self, node: "For") -> None: pass @mark_no_op def leave_For_whitespace_after_for(self, node: "For") -> None: pass @mark_no_op def visit_For_whitespace_before_in(self, node: "For") -> None: pass @mark_no_op def leave_For_whitespace_before_in(self, node: "For") -> None: pass @mark_no_op def visit_For_whitespace_after_in(self, node: "For") -> None: pass @mark_no_op def leave_For_whitespace_after_in(self, node: "For") -> None: pass @mark_no_op def visit_For_whitespace_before_colon(self, node: "For") -> None: pass @mark_no_op def leave_For_whitespace_before_colon(self, node: "For") -> None: pass @mark_no_op def visit_FormattedString(self, node: "FormattedString") -> Optional[bool]: pass @mark_no_op def visit_FormattedString_parts(self, node: "FormattedString") -> None: pass @mark_no_op def leave_FormattedString_parts(self, node: "FormattedString") -> None: pass @mark_no_op def visit_FormattedString_start(self, node: "FormattedString") -> None: pass @mark_no_op def leave_FormattedString_start(self, node: "FormattedString") -> None: pass @mark_no_op def visit_FormattedString_end(self, node: "FormattedString") -> None: pass @mark_no_op def leave_FormattedString_end(self, node: "FormattedString") -> None: pass @mark_no_op def visit_FormattedString_lpar(self, node: "FormattedString") -> None: pass @mark_no_op def leave_FormattedString_lpar(self, node: "FormattedString") -> None: pass @mark_no_op def visit_FormattedString_rpar(self, node: "FormattedString") -> None: pass @mark_no_op def leave_FormattedString_rpar(self, node: "FormattedString") -> None: pass @mark_no_op def visit_FormattedStringExpression( self, node: "FormattedStringExpression" ) -> Optional[bool]: pass @mark_no_op def visit_FormattedStringExpression_expression( self, node: "FormattedStringExpression" ) -> None: pass @mark_no_op def leave_FormattedStringExpression_expression( self, node: "FormattedStringExpression" ) -> None: pass @mark_no_op def visit_FormattedStringExpression_conversion( self, node: "FormattedStringExpression" ) -> None: pass @mark_no_op def leave_FormattedStringExpression_conversion( self, node: "FormattedStringExpression" ) -> None: pass @mark_no_op def visit_FormattedStringExpression_format_spec( self, node: "FormattedStringExpression" ) -> None: pass @mark_no_op def leave_FormattedStringExpression_format_spec( self, node: "FormattedStringExpression" ) -> None: pass @mark_no_op def visit_FormattedStringExpression_whitespace_before_expression( self, node: "FormattedStringExpression" ) -> None: pass @mark_no_op def leave_FormattedStringExpression_whitespace_before_expression( self, node: "FormattedStringExpression" ) -> None: pass @mark_no_op def visit_FormattedStringExpression_whitespace_after_expression( self, node: "FormattedStringExpression" ) -> None: pass @mark_no_op def leave_FormattedStringExpression_whitespace_after_expression( self, node: "FormattedStringExpression" ) -> None: pass @mark_no_op def visit_FormattedStringExpression_equal( self, node: "FormattedStringExpression" ) -> None: pass @mark_no_op def leave_FormattedStringExpression_equal( self, node: "FormattedStringExpression" ) -> None: pass @mark_no_op def visit_FormattedStringText(self, node: "FormattedStringText") -> Optional[bool]: pass @mark_no_op def visit_FormattedStringText_value(self, node: "FormattedStringText") -> None: pass @mark_no_op def leave_FormattedStringText_value(self, node: "FormattedStringText") -> None: pass @mark_no_op def visit_From(self, node: "From") -> Optional[bool]: pass @mark_no_op def visit_From_item(self, node: "From") -> None: pass @mark_no_op def leave_From_item(self, node: "From") -> None: pass @mark_no_op def visit_From_whitespace_before_from(self, node: "From") -> None: pass @mark_no_op def leave_From_whitespace_before_from(self, node: "From") -> None: pass @mark_no_op def visit_From_whitespace_after_from(self, node: "From") -> None: pass @mark_no_op def leave_From_whitespace_after_from(self, node: "From") -> None: pass @mark_no_op def visit_FunctionDef(self, node: "FunctionDef") -> Optional[bool]: pass @mark_no_op def visit_FunctionDef_name(self, node: "FunctionDef") -> None: pass @mark_no_op def leave_FunctionDef_name(self, node: "FunctionDef") -> None: pass @mark_no_op def visit_FunctionDef_params(self, node: "FunctionDef") -> None: pass @mark_no_op def leave_FunctionDef_params(self, node: "FunctionDef") -> None: pass @mark_no_op def visit_FunctionDef_body(self, node: "FunctionDef") -> None: pass @mark_no_op def leave_FunctionDef_body(self, node: "FunctionDef") -> None: pass @mark_no_op def visit_FunctionDef_decorators(self, node: "FunctionDef") -> None: pass @mark_no_op def leave_FunctionDef_decorators(self, node: "FunctionDef") -> None: pass @mark_no_op def visit_FunctionDef_returns(self, node: "FunctionDef") -> None: pass @mark_no_op def leave_FunctionDef_returns(self, node: "FunctionDef") -> None: pass @mark_no_op def visit_FunctionDef_asynchronous(self, node: "FunctionDef") -> None: pass @mark_no_op def leave_FunctionDef_asynchronous(self, node: "FunctionDef") -> None: pass @mark_no_op def visit_FunctionDef_leading_lines(self, node: "FunctionDef") -> None: pass @mark_no_op def leave_FunctionDef_leading_lines(self, node: "FunctionDef") -> None: pass @mark_no_op def visit_FunctionDef_lines_after_decorators(self, node: "FunctionDef") -> None: pass @mark_no_op def leave_FunctionDef_lines_after_decorators(self, node: "FunctionDef") -> None: pass @mark_no_op def visit_FunctionDef_whitespace_after_def(self, node: "FunctionDef") -> None: pass @mark_no_op def leave_FunctionDef_whitespace_after_def(self, node: "FunctionDef") -> None: pass @mark_no_op def visit_FunctionDef_whitespace_after_name(self, node: "FunctionDef") -> None: pass @mark_no_op def leave_FunctionDef_whitespace_after_name(self, node: "FunctionDef") -> None: pass @mark_no_op def visit_FunctionDef_whitespace_before_params(self, node: "FunctionDef") -> None: pass @mark_no_op def leave_FunctionDef_whitespace_before_params(self, node: "FunctionDef") -> None: pass @mark_no_op def visit_FunctionDef_whitespace_before_colon(self, node: "FunctionDef") -> None: pass @mark_no_op def leave_FunctionDef_whitespace_before_colon(self, node: "FunctionDef") -> None: pass @mark_no_op def visit_FunctionDef_type_parameters(self, node: "FunctionDef") -> None: pass @mark_no_op def leave_FunctionDef_type_parameters(self, node: "FunctionDef") -> None: pass @mark_no_op def visit_FunctionDef_whitespace_after_type_parameters( self, node: "FunctionDef" ) -> None: pass @mark_no_op def leave_FunctionDef_whitespace_after_type_parameters( self, node: "FunctionDef" ) -> None: pass @mark_no_op def visit_GeneratorExp(self, node: "GeneratorExp") -> Optional[bool]: pass @mark_no_op def visit_GeneratorExp_elt(self, node: "GeneratorExp") -> None: pass @mark_no_op def leave_GeneratorExp_elt(self, node: "GeneratorExp") -> None: pass @mark_no_op def visit_GeneratorExp_for_in(self, node: "GeneratorExp") -> None: pass @mark_no_op def leave_GeneratorExp_for_in(self, node: "GeneratorExp") -> None: pass @mark_no_op def visit_GeneratorExp_lpar(self, node: "GeneratorExp") -> None: pass @mark_no_op def leave_GeneratorExp_lpar(self, node: "GeneratorExp") -> None: pass @mark_no_op def visit_GeneratorExp_rpar(self, node: "GeneratorExp") -> None: pass @mark_no_op def leave_GeneratorExp_rpar(self, node: "GeneratorExp") -> None: pass @mark_no_op def visit_Global(self, node: "Global") -> Optional[bool]: pass @mark_no_op def visit_Global_names(self, node: "Global") -> None: pass @mark_no_op def leave_Global_names(self, node: "Global") -> None: pass @mark_no_op def visit_Global_whitespace_after_global(self, node: "Global") -> None: pass @mark_no_op def leave_Global_whitespace_after_global(self, node: "Global") -> None: pass @mark_no_op def visit_Global_semicolon(self, node: "Global") -> None: pass @mark_no_op def leave_Global_semicolon(self, node: "Global") -> None: pass @mark_no_op def visit_GreaterThan(self, node: "GreaterThan") -> Optional[bool]: pass @mark_no_op def visit_GreaterThan_whitespace_before(self, node: "GreaterThan") -> None: pass @mark_no_op def leave_GreaterThan_whitespace_before(self, node: "GreaterThan") -> None: pass @mark_no_op def visit_GreaterThan_whitespace_after(self, node: "GreaterThan") -> None: pass @mark_no_op def leave_GreaterThan_whitespace_after(self, node: "GreaterThan") -> None: pass @mark_no_op def visit_GreaterThanEqual(self, node: "GreaterThanEqual") -> Optional[bool]: pass @mark_no_op def visit_GreaterThanEqual_whitespace_before( self, node: "GreaterThanEqual" ) -> None: pass @mark_no_op def leave_GreaterThanEqual_whitespace_before( self, node: "GreaterThanEqual" ) -> None: pass @mark_no_op def visit_GreaterThanEqual_whitespace_after(self, node: "GreaterThanEqual") -> None: pass @mark_no_op def leave_GreaterThanEqual_whitespace_after(self, node: "GreaterThanEqual") -> None: pass @mark_no_op def visit_If(self, node: "If") -> Optional[bool]: pass @mark_no_op def visit_If_test(self, node: "If") -> None: pass @mark_no_op def leave_If_test(self, node: "If") -> None: pass @mark_no_op def visit_If_body(self, node: "If") -> None: pass @mark_no_op def leave_If_body(self, node: "If") -> None: pass @mark_no_op def visit_If_orelse(self, node: "If") -> None: pass @mark_no_op def leave_If_orelse(self, node: "If") -> None: pass @mark_no_op def visit_If_leading_lines(self, node: "If") -> None: pass @mark_no_op def leave_If_leading_lines(self, node: "If") -> None: pass @mark_no_op def visit_If_whitespace_before_test(self, node: "If") -> None: pass @mark_no_op def leave_If_whitespace_before_test(self, node: "If") -> None: pass @mark_no_op def visit_If_whitespace_after_test(self, node: "If") -> None: pass @mark_no_op def leave_If_whitespace_after_test(self, node: "If") -> None: pass @mark_no_op def visit_IfExp(self, node: "IfExp") -> Optional[bool]: pass @mark_no_op def visit_IfExp_test(self, node: "IfExp") -> None: pass @mark_no_op def leave_IfExp_test(self, node: "IfExp") -> None: pass @mark_no_op def visit_IfExp_body(self, node: "IfExp") -> None: pass @mark_no_op def leave_IfExp_body(self, node: "IfExp") -> None: pass @mark_no_op def visit_IfExp_orelse(self, node: "IfExp") -> None: pass @mark_no_op def leave_IfExp_orelse(self, node: "IfExp") -> None: pass @mark_no_op def visit_IfExp_lpar(self, node: "IfExp") -> None: pass @mark_no_op def leave_IfExp_lpar(self, node: "IfExp") -> None: pass @mark_no_op def visit_IfExp_rpar(self, node: "IfExp") -> None: pass @mark_no_op def leave_IfExp_rpar(self, node: "IfExp") -> None: pass @mark_no_op def visit_IfExp_whitespace_before_if(self, node: "IfExp") -> None: pass @mark_no_op def leave_IfExp_whitespace_before_if(self, node: "IfExp") -> None: pass @mark_no_op def visit_IfExp_whitespace_after_if(self, node: "IfExp") -> None: pass @mark_no_op def leave_IfExp_whitespace_after_if(self, node: "IfExp") -> None: pass @mark_no_op def visit_IfExp_whitespace_before_else(self, node: "IfExp") -> None: pass @mark_no_op def leave_IfExp_whitespace_before_else(self, node: "IfExp") -> None: pass @mark_no_op def visit_IfExp_whitespace_after_else(self, node: "IfExp") -> None: pass @mark_no_op def leave_IfExp_whitespace_after_else(self, node: "IfExp") -> None: pass @mark_no_op def visit_Imaginary(self, node: "Imaginary") -> Optional[bool]: pass @mark_no_op def visit_Imaginary_value(self, node: "Imaginary") -> None: pass @mark_no_op def leave_Imaginary_value(self, node: "Imaginary") -> None: pass @mark_no_op def visit_Imaginary_lpar(self, node: "Imaginary") -> None: pass @mark_no_op def leave_Imaginary_lpar(self, node: "Imaginary") -> None: pass @mark_no_op def visit_Imaginary_rpar(self, node: "Imaginary") -> None: pass @mark_no_op def leave_Imaginary_rpar(self, node: "Imaginary") -> None: pass @mark_no_op def visit_Import(self, node: "Import") -> Optional[bool]: pass @mark_no_op def visit_Import_names(self, node: "Import") -> None: pass @mark_no_op def leave_Import_names(self, node: "Import") -> None: pass @mark_no_op def visit_Import_semicolon(self, node: "Import") -> None: pass @mark_no_op def leave_Import_semicolon(self, node: "Import") -> None: pass @mark_no_op def visit_Import_whitespace_after_import(self, node: "Import") -> None: pass @mark_no_op def leave_Import_whitespace_after_import(self, node: "Import") -> None: pass @mark_no_op def visit_ImportAlias(self, node: "ImportAlias") -> Optional[bool]: pass @mark_no_op def visit_ImportAlias_name(self, node: "ImportAlias") -> None: pass @mark_no_op def leave_ImportAlias_name(self, node: "ImportAlias") -> None: pass @mark_no_op def visit_ImportAlias_asname(self, node: "ImportAlias") -> None: pass @mark_no_op def leave_ImportAlias_asname(self, node: "ImportAlias") -> None: pass @mark_no_op def visit_ImportAlias_comma(self, node: "ImportAlias") -> None: pass @mark_no_op def leave_ImportAlias_comma(self, node: "ImportAlias") -> None: pass @mark_no_op def visit_ImportFrom(self, node: "ImportFrom") -> Optional[bool]: pass @mark_no_op def visit_ImportFrom_module(self, node: "ImportFrom") -> None: pass @mark_no_op def leave_ImportFrom_module(self, node: "ImportFrom") -> None: pass @mark_no_op def visit_ImportFrom_names(self, node: "ImportFrom") -> None: pass @mark_no_op def leave_ImportFrom_names(self, node: "ImportFrom") -> None: pass @mark_no_op def visit_ImportFrom_relative(self, node: "ImportFrom") -> None: pass @mark_no_op def leave_ImportFrom_relative(self, node: "ImportFrom") -> None: pass @mark_no_op def visit_ImportFrom_lpar(self, node: "ImportFrom") -> None: pass @mark_no_op def leave_ImportFrom_lpar(self, node: "ImportFrom") -> None: pass @mark_no_op def visit_ImportFrom_rpar(self, node: "ImportFrom") -> None: pass @mark_no_op def leave_ImportFrom_rpar(self, node: "ImportFrom") -> None: pass @mark_no_op def visit_ImportFrom_semicolon(self, node: "ImportFrom") -> None: pass @mark_no_op def leave_ImportFrom_semicolon(self, node: "ImportFrom") -> None: pass @mark_no_op def visit_ImportFrom_whitespace_after_from(self, node: "ImportFrom") -> None: pass @mark_no_op def leave_ImportFrom_whitespace_after_from(self, node: "ImportFrom") -> None: pass @mark_no_op def visit_ImportFrom_whitespace_before_import(self, node: "ImportFrom") -> None: pass @mark_no_op def leave_ImportFrom_whitespace_before_import(self, node: "ImportFrom") -> None: pass @mark_no_op def visit_ImportFrom_whitespace_after_import(self, node: "ImportFrom") -> None: pass @mark_no_op def leave_ImportFrom_whitespace_after_import(self, node: "ImportFrom") -> None: pass @mark_no_op def visit_ImportStar(self, node: "ImportStar") -> Optional[bool]: pass @mark_no_op def visit_In(self, node: "In") -> Optional[bool]: pass @mark_no_op def visit_In_whitespace_before(self, node: "In") -> None: pass @mark_no_op def leave_In_whitespace_before(self, node: "In") -> None: pass @mark_no_op def visit_In_whitespace_after(self, node: "In") -> None: pass @mark_no_op def leave_In_whitespace_after(self, node: "In") -> None: pass @mark_no_op def visit_IndentedBlock(self, node: "IndentedBlock") -> Optional[bool]: pass @mark_no_op def visit_IndentedBlock_body(self, node: "IndentedBlock") -> None: pass @mark_no_op def leave_IndentedBlock_body(self, node: "IndentedBlock") -> None: pass @mark_no_op def visit_IndentedBlock_header(self, node: "IndentedBlock") -> None: pass @mark_no_op def leave_IndentedBlock_header(self, node: "IndentedBlock") -> None: pass @mark_no_op def visit_IndentedBlock_indent(self, node: "IndentedBlock") -> None: pass @mark_no_op def leave_IndentedBlock_indent(self, node: "IndentedBlock") -> None: pass @mark_no_op def visit_IndentedBlock_footer(self, node: "IndentedBlock") -> None: pass @mark_no_op def leave_IndentedBlock_footer(self, node: "IndentedBlock") -> None: pass @mark_no_op def visit_Index(self, node: "Index") -> Optional[bool]: pass @mark_no_op def visit_Index_value(self, node: "Index") -> None: pass @mark_no_op def leave_Index_value(self, node: "Index") -> None: pass @mark_no_op def visit_Index_star(self, node: "Index") -> None: pass @mark_no_op def leave_Index_star(self, node: "Index") -> None: pass @mark_no_op def visit_Index_whitespace_after_star(self, node: "Index") -> None: pass @mark_no_op def leave_Index_whitespace_after_star(self, node: "Index") -> None: pass @mark_no_op def visit_Integer(self, node: "Integer") -> Optional[bool]: pass @mark_no_op def visit_Integer_value(self, node: "Integer") -> None: pass @mark_no_op def leave_Integer_value(self, node: "Integer") -> None: pass @mark_no_op def visit_Integer_lpar(self, node: "Integer") -> None: pass @mark_no_op def leave_Integer_lpar(self, node: "Integer") -> None: pass @mark_no_op def visit_Integer_rpar(self, node: "Integer") -> None: pass @mark_no_op def leave_Integer_rpar(self, node: "Integer") -> None: pass @mark_no_op def visit_Is(self, node: "Is") -> Optional[bool]: pass @mark_no_op def visit_Is_whitespace_before(self, node: "Is") -> None: pass @mark_no_op def leave_Is_whitespace_before(self, node: "Is") -> None: pass @mark_no_op def visit_Is_whitespace_after(self, node: "Is") -> None: pass @mark_no_op def leave_Is_whitespace_after(self, node: "Is") -> None: pass @mark_no_op def visit_IsNot(self, node: "IsNot") -> Optional[bool]: pass @mark_no_op def visit_IsNot_whitespace_before(self, node: "IsNot") -> None: pass @mark_no_op def leave_IsNot_whitespace_before(self, node: "IsNot") -> None: pass @mark_no_op def visit_IsNot_whitespace_between(self, node: "IsNot") -> None: pass @mark_no_op def leave_IsNot_whitespace_between(self, node: "IsNot") -> None: pass @mark_no_op def visit_IsNot_whitespace_after(self, node: "IsNot") -> None: pass @mark_no_op def leave_IsNot_whitespace_after(self, node: "IsNot") -> None: pass @mark_no_op def visit_Lambda(self, node: "Lambda") -> Optional[bool]: pass @mark_no_op def visit_Lambda_params(self, node: "Lambda") -> None: pass @mark_no_op def leave_Lambda_params(self, node: "Lambda") -> None: pass @mark_no_op def visit_Lambda_body(self, node: "Lambda") -> None: pass @mark_no_op def leave_Lambda_body(self, node: "Lambda") -> None: pass @mark_no_op def visit_Lambda_colon(self, node: "Lambda") -> None: pass @mark_no_op def leave_Lambda_colon(self, node: "Lambda") -> None: pass @mark_no_op def visit_Lambda_lpar(self, node: "Lambda") -> None: pass @mark_no_op def leave_Lambda_lpar(self, node: "Lambda") -> None: pass @mark_no_op def visit_Lambda_rpar(self, node: "Lambda") -> None: pass @mark_no_op def leave_Lambda_rpar(self, node: "Lambda") -> None: pass @mark_no_op def visit_Lambda_whitespace_after_lambda(self, node: "Lambda") -> None: pass @mark_no_op def leave_Lambda_whitespace_after_lambda(self, node: "Lambda") -> None: pass @mark_no_op def visit_LeftCurlyBrace(self, node: "LeftCurlyBrace") -> Optional[bool]: pass @mark_no_op def visit_LeftCurlyBrace_whitespace_after(self, node: "LeftCurlyBrace") -> None: pass @mark_no_op def leave_LeftCurlyBrace_whitespace_after(self, node: "LeftCurlyBrace") -> None: pass @mark_no_op def visit_LeftParen(self, node: "LeftParen") -> Optional[bool]: pass @mark_no_op def visit_LeftParen_whitespace_after(self, node: "LeftParen") -> None: pass @mark_no_op def leave_LeftParen_whitespace_after(self, node: "LeftParen") -> None: pass @mark_no_op def visit_LeftShift(self, node: "LeftShift") -> Optional[bool]: pass @mark_no_op def visit_LeftShift_whitespace_before(self, node: "LeftShift") -> None: pass @mark_no_op def leave_LeftShift_whitespace_before(self, node: "LeftShift") -> None: pass @mark_no_op def visit_LeftShift_whitespace_after(self, node: "LeftShift") -> None: pass @mark_no_op def leave_LeftShift_whitespace_after(self, node: "LeftShift") -> None: pass @mark_no_op def visit_LeftShiftAssign(self, node: "LeftShiftAssign") -> Optional[bool]: pass @mark_no_op def visit_LeftShiftAssign_whitespace_before(self, node: "LeftShiftAssign") -> None: pass @mark_no_op def leave_LeftShiftAssign_whitespace_before(self, node: "LeftShiftAssign") -> None: pass @mark_no_op def visit_LeftShiftAssign_whitespace_after(self, node: "LeftShiftAssign") -> None: pass @mark_no_op def leave_LeftShiftAssign_whitespace_after(self, node: "LeftShiftAssign") -> None: pass @mark_no_op def visit_LeftSquareBracket(self, node: "LeftSquareBracket") -> Optional[bool]: pass @mark_no_op def visit_LeftSquareBracket_whitespace_after( self, node: "LeftSquareBracket" ) -> None: pass @mark_no_op def leave_LeftSquareBracket_whitespace_after( self, node: "LeftSquareBracket" ) -> None: pass @mark_no_op def visit_LessThan(self, node: "LessThan") -> Optional[bool]: pass @mark_no_op def visit_LessThan_whitespace_before(self, node: "LessThan") -> None: pass @mark_no_op def leave_LessThan_whitespace_before(self, node: "LessThan") -> None: pass @mark_no_op def visit_LessThan_whitespace_after(self, node: "LessThan") -> None: pass @mark_no_op def leave_LessThan_whitespace_after(self, node: "LessThan") -> None: pass @mark_no_op def visit_LessThanEqual(self, node: "LessThanEqual") -> Optional[bool]: pass @mark_no_op def visit_LessThanEqual_whitespace_before(self, node: "LessThanEqual") -> None: pass @mark_no_op def leave_LessThanEqual_whitespace_before(self, node: "LessThanEqual") -> None: pass @mark_no_op def visit_LessThanEqual_whitespace_after(self, node: "LessThanEqual") -> None: pass @mark_no_op def leave_LessThanEqual_whitespace_after(self, node: "LessThanEqual") -> None: pass @mark_no_op def visit_List(self, node: "List") -> Optional[bool]: pass @mark_no_op def visit_List_elements(self, node: "List") -> None: pass @mark_no_op def leave_List_elements(self, node: "List") -> None: pass @mark_no_op def visit_List_lbracket(self, node: "List") -> None: pass @mark_no_op def leave_List_lbracket(self, node: "List") -> None: pass @mark_no_op def visit_List_rbracket(self, node: "List") -> None: pass @mark_no_op def leave_List_rbracket(self, node: "List") -> None: pass @mark_no_op def visit_List_lpar(self, node: "List") -> None: pass @mark_no_op def leave_List_lpar(self, node: "List") -> None: pass @mark_no_op def visit_List_rpar(self, node: "List") -> None: pass @mark_no_op def leave_List_rpar(self, node: "List") -> None: pass @mark_no_op def visit_ListComp(self, node: "ListComp") -> Optional[bool]: pass @mark_no_op def visit_ListComp_elt(self, node: "ListComp") -> None: pass @mark_no_op def leave_ListComp_elt(self, node: "ListComp") -> None: pass @mark_no_op def visit_ListComp_for_in(self, node: "ListComp") -> None: pass @mark_no_op def leave_ListComp_for_in(self, node: "ListComp") -> None: pass @mark_no_op def visit_ListComp_lbracket(self, node: "ListComp") -> None: pass @mark_no_op def leave_ListComp_lbracket(self, node: "ListComp") -> None: pass @mark_no_op def visit_ListComp_rbracket(self, node: "ListComp") -> None: pass @mark_no_op def leave_ListComp_rbracket(self, node: "ListComp") -> None: pass @mark_no_op def visit_ListComp_lpar(self, node: "ListComp") -> None: pass @mark_no_op def leave_ListComp_lpar(self, node: "ListComp") -> None: pass @mark_no_op def visit_ListComp_rpar(self, node: "ListComp") -> None: pass @mark_no_op def leave_ListComp_rpar(self, node: "ListComp") -> None: pass @mark_no_op def visit_Match(self, node: "Match") -> Optional[bool]: pass @mark_no_op def visit_Match_subject(self, node: "Match") -> None: pass @mark_no_op def leave_Match_subject(self, node: "Match") -> None: pass @mark_no_op def visit_Match_cases(self, node: "Match") -> None: pass @mark_no_op def leave_Match_cases(self, node: "Match") -> None: pass @mark_no_op def visit_Match_leading_lines(self, node: "Match") -> None: pass @mark_no_op def leave_Match_leading_lines(self, node: "Match") -> None: pass @mark_no_op def visit_Match_whitespace_after_match(self, node: "Match") -> None: pass @mark_no_op def leave_Match_whitespace_after_match(self, node: "Match") -> None: pass @mark_no_op def visit_Match_whitespace_before_colon(self, node: "Match") -> None: pass @mark_no_op def leave_Match_whitespace_before_colon(self, node: "Match") -> None: pass @mark_no_op def visit_Match_whitespace_after_colon(self, node: "Match") -> None: pass @mark_no_op def leave_Match_whitespace_after_colon(self, node: "Match") -> None: pass @mark_no_op def visit_Match_indent(self, node: "Match") -> None: pass @mark_no_op def leave_Match_indent(self, node: "Match") -> None: pass @mark_no_op def visit_Match_footer(self, node: "Match") -> None: pass @mark_no_op def leave_Match_footer(self, node: "Match") -> None: pass @mark_no_op def visit_MatchAs(self, node: "MatchAs") -> Optional[bool]: pass @mark_no_op def visit_MatchAs_pattern(self, node: "MatchAs") -> None: pass @mark_no_op def leave_MatchAs_pattern(self, node: "MatchAs") -> None: pass @mark_no_op def visit_MatchAs_name(self, node: "MatchAs") -> None: pass @mark_no_op def leave_MatchAs_name(self, node: "MatchAs") -> None: pass @mark_no_op def visit_MatchAs_whitespace_before_as(self, node: "MatchAs") -> None: pass @mark_no_op def leave_MatchAs_whitespace_before_as(self, node: "MatchAs") -> None: pass @mark_no_op def visit_MatchAs_whitespace_after_as(self, node: "MatchAs") -> None: pass @mark_no_op def leave_MatchAs_whitespace_after_as(self, node: "MatchAs") -> None: pass @mark_no_op def visit_MatchAs_lpar(self, node: "MatchAs") -> None: pass @mark_no_op def leave_MatchAs_lpar(self, node: "MatchAs") -> None: pass @mark_no_op def visit_MatchAs_rpar(self, node: "MatchAs") -> None: pass @mark_no_op def leave_MatchAs_rpar(self, node: "MatchAs") -> None: pass @mark_no_op def visit_MatchCase(self, node: "MatchCase") -> Optional[bool]: pass @mark_no_op def visit_MatchCase_pattern(self, node: "MatchCase") -> None: pass @mark_no_op def leave_MatchCase_pattern(self, node: "MatchCase") -> None: pass @mark_no_op def visit_MatchCase_body(self, node: "MatchCase") -> None: pass @mark_no_op def leave_MatchCase_body(self, node: "MatchCase") -> None: pass @mark_no_op def visit_MatchCase_guard(self, node: "MatchCase") -> None: pass @mark_no_op def leave_MatchCase_guard(self, node: "MatchCase") -> None: pass @mark_no_op def visit_MatchCase_leading_lines(self, node: "MatchCase") -> None: pass @mark_no_op def leave_MatchCase_leading_lines(self, node: "MatchCase") -> None: pass @mark_no_op def visit_MatchCase_whitespace_after_case(self, node: "MatchCase") -> None: pass @mark_no_op def leave_MatchCase_whitespace_after_case(self, node: "MatchCase") -> None: pass @mark_no_op def visit_MatchCase_whitespace_before_if(self, node: "MatchCase") -> None: pass @mark_no_op def leave_MatchCase_whitespace_before_if(self, node: "MatchCase") -> None: pass @mark_no_op def visit_MatchCase_whitespace_after_if(self, node: "MatchCase") -> None: pass @mark_no_op def leave_MatchCase_whitespace_after_if(self, node: "MatchCase") -> None: pass @mark_no_op def visit_MatchCase_whitespace_before_colon(self, node: "MatchCase") -> None: pass @mark_no_op def leave_MatchCase_whitespace_before_colon(self, node: "MatchCase") -> None: pass @mark_no_op def visit_MatchClass(self, node: "MatchClass") -> Optional[bool]: pass @mark_no_op def visit_MatchClass_cls(self, node: "MatchClass") -> None: pass @mark_no_op def leave_MatchClass_cls(self, node: "MatchClass") -> None: pass @mark_no_op def visit_MatchClass_patterns(self, node: "MatchClass") -> None: pass @mark_no_op def leave_MatchClass_patterns(self, node: "MatchClass") -> None: pass @mark_no_op def visit_MatchClass_kwds(self, node: "MatchClass") -> None: pass @mark_no_op def leave_MatchClass_kwds(self, node: "MatchClass") -> None: pass @mark_no_op def visit_MatchClass_whitespace_after_cls(self, node: "MatchClass") -> None: pass @mark_no_op def leave_MatchClass_whitespace_after_cls(self, node: "MatchClass") -> None: pass @mark_no_op def visit_MatchClass_whitespace_before_patterns(self, node: "MatchClass") -> None: pass @mark_no_op def leave_MatchClass_whitespace_before_patterns(self, node: "MatchClass") -> None: pass @mark_no_op def visit_MatchClass_whitespace_after_kwds(self, node: "MatchClass") -> None: pass @mark_no_op def leave_MatchClass_whitespace_after_kwds(self, node: "MatchClass") -> None: pass @mark_no_op def visit_MatchClass_lpar(self, node: "MatchClass") -> None: pass @mark_no_op def leave_MatchClass_lpar(self, node: "MatchClass") -> None: pass @mark_no_op def visit_MatchClass_rpar(self, node: "MatchClass") -> None: pass @mark_no_op def leave_MatchClass_rpar(self, node: "MatchClass") -> None: pass @mark_no_op def visit_MatchKeywordElement(self, node: "MatchKeywordElement") -> Optional[bool]: pass @mark_no_op def visit_MatchKeywordElement_key(self, node: "MatchKeywordElement") -> None: pass @mark_no_op def leave_MatchKeywordElement_key(self, node: "MatchKeywordElement") -> None: pass @mark_no_op def visit_MatchKeywordElement_pattern(self, node: "MatchKeywordElement") -> None: pass @mark_no_op def leave_MatchKeywordElement_pattern(self, node: "MatchKeywordElement") -> None: pass @mark_no_op def visit_MatchKeywordElement_comma(self, node: "MatchKeywordElement") -> None: pass @mark_no_op def leave_MatchKeywordElement_comma(self, node: "MatchKeywordElement") -> None: pass @mark_no_op def visit_MatchKeywordElement_whitespace_before_equal( self, node: "MatchKeywordElement" ) -> None: pass @mark_no_op def leave_MatchKeywordElement_whitespace_before_equal( self, node: "MatchKeywordElement" ) -> None: pass @mark_no_op def visit_MatchKeywordElement_whitespace_after_equal( self, node: "MatchKeywordElement" ) -> None: pass @mark_no_op def leave_MatchKeywordElement_whitespace_after_equal( self, node: "MatchKeywordElement" ) -> None: pass @mark_no_op def visit_MatchList(self, node: "MatchList") -> Optional[bool]: pass @mark_no_op def visit_MatchList_patterns(self, node: "MatchList") -> None: pass @mark_no_op def leave_MatchList_patterns(self, node: "MatchList") -> None: pass @mark_no_op def visit_MatchList_lbracket(self, node: "MatchList") -> None: pass @mark_no_op def leave_MatchList_lbracket(self, node: "MatchList") -> None: pass @mark_no_op def visit_MatchList_rbracket(self, node: "MatchList") -> None: pass @mark_no_op def leave_MatchList_rbracket(self, node: "MatchList") -> None: pass @mark_no_op def visit_MatchList_lpar(self, node: "MatchList") -> None: pass @mark_no_op def leave_MatchList_lpar(self, node: "MatchList") -> None: pass @mark_no_op def visit_MatchList_rpar(self, node: "MatchList") -> None: pass @mark_no_op def leave_MatchList_rpar(self, node: "MatchList") -> None: pass @mark_no_op def visit_MatchMapping(self, node: "MatchMapping") -> Optional[bool]: pass @mark_no_op def visit_MatchMapping_elements(self, node: "MatchMapping") -> None: pass @mark_no_op def leave_MatchMapping_elements(self, node: "MatchMapping") -> None: pass @mark_no_op def visit_MatchMapping_lbrace(self, node: "MatchMapping") -> None: pass @mark_no_op def leave_MatchMapping_lbrace(self, node: "MatchMapping") -> None: pass @mark_no_op def visit_MatchMapping_rbrace(self, node: "MatchMapping") -> None: pass @mark_no_op def leave_MatchMapping_rbrace(self, node: "MatchMapping") -> None: pass @mark_no_op def visit_MatchMapping_rest(self, node: "MatchMapping") -> None: pass @mark_no_op def leave_MatchMapping_rest(self, node: "MatchMapping") -> None: pass @mark_no_op def visit_MatchMapping_whitespace_before_rest(self, node: "MatchMapping") -> None: pass @mark_no_op def leave_MatchMapping_whitespace_before_rest(self, node: "MatchMapping") -> None: pass @mark_no_op def visit_MatchMapping_trailing_comma(self, node: "MatchMapping") -> None: pass @mark_no_op def leave_MatchMapping_trailing_comma(self, node: "MatchMapping") -> None: pass @mark_no_op def visit_MatchMapping_lpar(self, node: "MatchMapping") -> None: pass @mark_no_op def leave_MatchMapping_lpar(self, node: "MatchMapping") -> None: pass @mark_no_op def visit_MatchMapping_rpar(self, node: "MatchMapping") -> None: pass @mark_no_op def leave_MatchMapping_rpar(self, node: "MatchMapping") -> None: pass @mark_no_op def visit_MatchMappingElement(self, node: "MatchMappingElement") -> Optional[bool]: pass @mark_no_op def visit_MatchMappingElement_key(self, node: "MatchMappingElement") -> None: pass @mark_no_op def leave_MatchMappingElement_key(self, node: "MatchMappingElement") -> None: pass @mark_no_op def visit_MatchMappingElement_pattern(self, node: "MatchMappingElement") -> None: pass @mark_no_op def leave_MatchMappingElement_pattern(self, node: "MatchMappingElement") -> None: pass @mark_no_op def visit_MatchMappingElement_comma(self, node: "MatchMappingElement") -> None: pass @mark_no_op def leave_MatchMappingElement_comma(self, node: "MatchMappingElement") -> None: pass @mark_no_op def visit_MatchMappingElement_whitespace_before_colon( self, node: "MatchMappingElement" ) -> None: pass @mark_no_op def leave_MatchMappingElement_whitespace_before_colon( self, node: "MatchMappingElement" ) -> None: pass @mark_no_op def visit_MatchMappingElement_whitespace_after_colon( self, node: "MatchMappingElement" ) -> None: pass @mark_no_op def leave_MatchMappingElement_whitespace_after_colon( self, node: "MatchMappingElement" ) -> None: pass @mark_no_op def visit_MatchOr(self, node: "MatchOr") -> Optional[bool]: pass @mark_no_op def visit_MatchOr_patterns(self, node: "MatchOr") -> None: pass @mark_no_op def leave_MatchOr_patterns(self, node: "MatchOr") -> None: pass @mark_no_op def visit_MatchOr_lpar(self, node: "MatchOr") -> None: pass @mark_no_op def leave_MatchOr_lpar(self, node: "MatchOr") -> None: pass @mark_no_op def visit_MatchOr_rpar(self, node: "MatchOr") -> None: pass @mark_no_op def leave_MatchOr_rpar(self, node: "MatchOr") -> None: pass @mark_no_op def visit_MatchOrElement(self, node: "MatchOrElement") -> Optional[bool]: pass @mark_no_op def visit_MatchOrElement_pattern(self, node: "MatchOrElement") -> None: pass @mark_no_op def leave_MatchOrElement_pattern(self, node: "MatchOrElement") -> None: pass @mark_no_op def visit_MatchOrElement_separator(self, node: "MatchOrElement") -> None: pass @mark_no_op def leave_MatchOrElement_separator(self, node: "MatchOrElement") -> None: pass @mark_no_op def visit_MatchPattern(self, node: "MatchPattern") -> Optional[bool]: pass @mark_no_op def visit_MatchSequence(self, node: "MatchSequence") -> Optional[bool]: pass @mark_no_op def visit_MatchSequenceElement( self, node: "MatchSequenceElement" ) -> Optional[bool]: pass @mark_no_op def visit_MatchSequenceElement_value(self, node: "MatchSequenceElement") -> None: pass @mark_no_op def leave_MatchSequenceElement_value(self, node: "MatchSequenceElement") -> None: pass @mark_no_op def visit_MatchSequenceElement_comma(self, node: "MatchSequenceElement") -> None: pass @mark_no_op def leave_MatchSequenceElement_comma(self, node: "MatchSequenceElement") -> None: pass @mark_no_op def visit_MatchSingleton(self, node: "MatchSingleton") -> Optional[bool]: pass @mark_no_op def visit_MatchSingleton_value(self, node: "MatchSingleton") -> None: pass @mark_no_op def leave_MatchSingleton_value(self, node: "MatchSingleton") -> None: pass @mark_no_op def visit_MatchStar(self, node: "MatchStar") -> Optional[bool]: pass @mark_no_op def visit_MatchStar_name(self, node: "MatchStar") -> None: pass @mark_no_op def leave_MatchStar_name(self, node: "MatchStar") -> None: pass @mark_no_op def visit_MatchStar_comma(self, node: "MatchStar") -> None: pass @mark_no_op def leave_MatchStar_comma(self, node: "MatchStar") -> None: pass @mark_no_op def visit_MatchStar_whitespace_before_name(self, node: "MatchStar") -> None: pass @mark_no_op def leave_MatchStar_whitespace_before_name(self, node: "MatchStar") -> None: pass @mark_no_op def visit_MatchTuple(self, node: "MatchTuple") -> Optional[bool]: pass @mark_no_op def visit_MatchTuple_patterns(self, node: "MatchTuple") -> None: pass @mark_no_op def leave_MatchTuple_patterns(self, node: "MatchTuple") -> None: pass @mark_no_op def visit_MatchTuple_lpar(self, node: "MatchTuple") -> None: pass @mark_no_op def leave_MatchTuple_lpar(self, node: "MatchTuple") -> None: pass @mark_no_op def visit_MatchTuple_rpar(self, node: "MatchTuple") -> None: pass @mark_no_op def leave_MatchTuple_rpar(self, node: "MatchTuple") -> None: pass @mark_no_op def visit_MatchValue(self, node: "MatchValue") -> Optional[bool]: pass @mark_no_op def visit_MatchValue_value(self, node: "MatchValue") -> None: pass @mark_no_op def leave_MatchValue_value(self, node: "MatchValue") -> None: pass @mark_no_op def visit_MatrixMultiply(self, node: "MatrixMultiply") -> Optional[bool]: pass @mark_no_op def visit_MatrixMultiply_whitespace_before(self, node: "MatrixMultiply") -> None: pass @mark_no_op def leave_MatrixMultiply_whitespace_before(self, node: "MatrixMultiply") -> None: pass @mark_no_op def visit_MatrixMultiply_whitespace_after(self, node: "MatrixMultiply") -> None: pass @mark_no_op def leave_MatrixMultiply_whitespace_after(self, node: "MatrixMultiply") -> None: pass @mark_no_op def visit_MatrixMultiplyAssign( self, node: "MatrixMultiplyAssign" ) -> Optional[bool]: pass @mark_no_op def visit_MatrixMultiplyAssign_whitespace_before( self, node: "MatrixMultiplyAssign" ) -> None: pass @mark_no_op def leave_MatrixMultiplyAssign_whitespace_before( self, node: "MatrixMultiplyAssign" ) -> None: pass @mark_no_op def visit_MatrixMultiplyAssign_whitespace_after( self, node: "MatrixMultiplyAssign" ) -> None: pass @mark_no_op def leave_MatrixMultiplyAssign_whitespace_after( self, node: "MatrixMultiplyAssign" ) -> None: pass @mark_no_op def visit_Minus(self, node: "Minus") -> Optional[bool]: pass @mark_no_op def visit_Minus_whitespace_after(self, node: "Minus") -> None: pass @mark_no_op def leave_Minus_whitespace_after(self, node: "Minus") -> None: pass @mark_no_op def visit_Module(self, node: "Module") -> Optional[bool]: pass @mark_no_op def visit_Module_body(self, node: "Module") -> None: pass @mark_no_op def leave_Module_body(self, node: "Module") -> None: pass @mark_no_op def visit_Module_header(self, node: "Module") -> None: pass @mark_no_op def leave_Module_header(self, node: "Module") -> None: pass @mark_no_op def visit_Module_footer(self, node: "Module") -> None: pass @mark_no_op def leave_Module_footer(self, node: "Module") -> None: pass @mark_no_op def visit_Module_encoding(self, node: "Module") -> None: pass @mark_no_op def leave_Module_encoding(self, node: "Module") -> None: pass @mark_no_op def visit_Module_default_indent(self, node: "Module") -> None: pass @mark_no_op def leave_Module_default_indent(self, node: "Module") -> None: pass @mark_no_op def visit_Module_default_newline(self, node: "Module") -> None: pass @mark_no_op def leave_Module_default_newline(self, node: "Module") -> None: pass @mark_no_op def visit_Module_has_trailing_newline(self, node: "Module") -> None: pass @mark_no_op def leave_Module_has_trailing_newline(self, node: "Module") -> None: pass @mark_no_op def visit_Modulo(self, node: "Modulo") -> Optional[bool]: pass @mark_no_op def visit_Modulo_whitespace_before(self, node: "Modulo") -> None: pass @mark_no_op def leave_Modulo_whitespace_before(self, node: "Modulo") -> None: pass @mark_no_op def visit_Modulo_whitespace_after(self, node: "Modulo") -> None: pass @mark_no_op def leave_Modulo_whitespace_after(self, node: "Modulo") -> None: pass @mark_no_op def visit_ModuloAssign(self, node: "ModuloAssign") -> Optional[bool]: pass @mark_no_op def visit_ModuloAssign_whitespace_before(self, node: "ModuloAssign") -> None: pass @mark_no_op def leave_ModuloAssign_whitespace_before(self, node: "ModuloAssign") -> None: pass @mark_no_op def visit_ModuloAssign_whitespace_after(self, node: "ModuloAssign") -> None: pass @mark_no_op def leave_ModuloAssign_whitespace_after(self, node: "ModuloAssign") -> None: pass @mark_no_op def visit_Multiply(self, node: "Multiply") -> Optional[bool]: pass @mark_no_op def visit_Multiply_whitespace_before(self, node: "Multiply") -> None: pass @mark_no_op def leave_Multiply_whitespace_before(self, node: "Multiply") -> None: pass @mark_no_op def visit_Multiply_whitespace_after(self, node: "Multiply") -> None: pass @mark_no_op def leave_Multiply_whitespace_after(self, node: "Multiply") -> None: pass @mark_no_op def visit_MultiplyAssign(self, node: "MultiplyAssign") -> Optional[bool]: pass @mark_no_op def visit_MultiplyAssign_whitespace_before(self, node: "MultiplyAssign") -> None: pass @mark_no_op def leave_MultiplyAssign_whitespace_before(self, node: "MultiplyAssign") -> None: pass @mark_no_op def visit_MultiplyAssign_whitespace_after(self, node: "MultiplyAssign") -> None: pass @mark_no_op def leave_MultiplyAssign_whitespace_after(self, node: "MultiplyAssign") -> None: pass @mark_no_op def visit_Name(self, node: "Name") -> Optional[bool]: pass @mark_no_op def visit_Name_value(self, node: "Name") -> None: pass @mark_no_op def leave_Name_value(self, node: "Name") -> None: pass @mark_no_op def visit_Name_lpar(self, node: "Name") -> None: pass @mark_no_op def leave_Name_lpar(self, node: "Name") -> None: pass @mark_no_op def visit_Name_rpar(self, node: "Name") -> None: pass @mark_no_op def leave_Name_rpar(self, node: "Name") -> None: pass @mark_no_op def visit_NameItem(self, node: "NameItem") -> Optional[bool]: pass @mark_no_op def visit_NameItem_name(self, node: "NameItem") -> None: pass @mark_no_op def leave_NameItem_name(self, node: "NameItem") -> None: pass @mark_no_op def visit_NameItem_comma(self, node: "NameItem") -> None: pass @mark_no_op def leave_NameItem_comma(self, node: "NameItem") -> None: pass @mark_no_op def visit_NamedExpr(self, node: "NamedExpr") -> Optional[bool]: pass @mark_no_op def visit_NamedExpr_target(self, node: "NamedExpr") -> None: pass @mark_no_op def leave_NamedExpr_target(self, node: "NamedExpr") -> None: pass @mark_no_op def visit_NamedExpr_value(self, node: "NamedExpr") -> None: pass @mark_no_op def leave_NamedExpr_value(self, node: "NamedExpr") -> None: pass @mark_no_op def visit_NamedExpr_lpar(self, node: "NamedExpr") -> None: pass @mark_no_op def leave_NamedExpr_lpar(self, node: "NamedExpr") -> None: pass @mark_no_op def visit_NamedExpr_rpar(self, node: "NamedExpr") -> None: pass @mark_no_op def leave_NamedExpr_rpar(self, node: "NamedExpr") -> None: pass @mark_no_op def visit_NamedExpr_whitespace_before_walrus(self, node: "NamedExpr") -> None: pass @mark_no_op def leave_NamedExpr_whitespace_before_walrus(self, node: "NamedExpr") -> None: pass @mark_no_op def visit_NamedExpr_whitespace_after_walrus(self, node: "NamedExpr") -> None: pass @mark_no_op def leave_NamedExpr_whitespace_after_walrus(self, node: "NamedExpr") -> None: pass @mark_no_op def visit_Newline(self, node: "Newline") -> Optional[bool]: pass @mark_no_op def visit_Newline_value(self, node: "Newline") -> None: pass @mark_no_op def leave_Newline_value(self, node: "Newline") -> None: pass @mark_no_op def visit_Nonlocal(self, node: "Nonlocal") -> Optional[bool]: pass @mark_no_op def visit_Nonlocal_names(self, node: "Nonlocal") -> None: pass @mark_no_op def leave_Nonlocal_names(self, node: "Nonlocal") -> None: pass @mark_no_op def visit_Nonlocal_whitespace_after_nonlocal(self, node: "Nonlocal") -> None: pass @mark_no_op def leave_Nonlocal_whitespace_after_nonlocal(self, node: "Nonlocal") -> None: pass @mark_no_op def visit_Nonlocal_semicolon(self, node: "Nonlocal") -> None: pass @mark_no_op def leave_Nonlocal_semicolon(self, node: "Nonlocal") -> None: pass @mark_no_op def visit_Not(self, node: "Not") -> Optional[bool]: pass @mark_no_op def visit_Not_whitespace_after(self, node: "Not") -> None: pass @mark_no_op def leave_Not_whitespace_after(self, node: "Not") -> None: pass @mark_no_op def visit_NotEqual(self, node: "NotEqual") -> Optional[bool]: pass @mark_no_op def visit_NotEqual_value(self, node: "NotEqual") -> None: pass @mark_no_op def leave_NotEqual_value(self, node: "NotEqual") -> None: pass @mark_no_op def visit_NotEqual_whitespace_before(self, node: "NotEqual") -> None: pass @mark_no_op def leave_NotEqual_whitespace_before(self, node: "NotEqual") -> None: pass @mark_no_op def visit_NotEqual_whitespace_after(self, node: "NotEqual") -> None: pass @mark_no_op def leave_NotEqual_whitespace_after(self, node: "NotEqual") -> None: pass @mark_no_op def visit_NotIn(self, node: "NotIn") -> Optional[bool]: pass @mark_no_op def visit_NotIn_whitespace_before(self, node: "NotIn") -> None: pass @mark_no_op def leave_NotIn_whitespace_before(self, node: "NotIn") -> None: pass @mark_no_op def visit_NotIn_whitespace_between(self, node: "NotIn") -> None: pass @mark_no_op def leave_NotIn_whitespace_between(self, node: "NotIn") -> None: pass @mark_no_op def visit_NotIn_whitespace_after(self, node: "NotIn") -> None: pass @mark_no_op def leave_NotIn_whitespace_after(self, node: "NotIn") -> None: pass @mark_no_op def visit_Or(self, node: "Or") -> Optional[bool]: pass @mark_no_op def visit_Or_whitespace_before(self, node: "Or") -> None: pass @mark_no_op def leave_Or_whitespace_before(self, node: "Or") -> None: pass @mark_no_op def visit_Or_whitespace_after(self, node: "Or") -> None: pass @mark_no_op def leave_Or_whitespace_after(self, node: "Or") -> None: pass @mark_no_op def visit_Param(self, node: "Param") -> Optional[bool]: pass @mark_no_op def visit_Param_name(self, node: "Param") -> None: pass @mark_no_op def leave_Param_name(self, node: "Param") -> None: pass @mark_no_op def visit_Param_annotation(self, node: "Param") -> None: pass @mark_no_op def leave_Param_annotation(self, node: "Param") -> None: pass @mark_no_op def visit_Param_equal(self, node: "Param") -> None: pass @mark_no_op def leave_Param_equal(self, node: "Param") -> None: pass @mark_no_op def visit_Param_default(self, node: "Param") -> None: pass @mark_no_op def leave_Param_default(self, node: "Param") -> None: pass @mark_no_op def visit_Param_comma(self, node: "Param") -> None: pass @mark_no_op def leave_Param_comma(self, node: "Param") -> None: pass @mark_no_op def visit_Param_star(self, node: "Param") -> None: pass @mark_no_op def leave_Param_star(self, node: "Param") -> None: pass @mark_no_op def visit_Param_whitespace_after_star(self, node: "Param") -> None: pass @mark_no_op def leave_Param_whitespace_after_star(self, node: "Param") -> None: pass @mark_no_op def visit_Param_whitespace_after_param(self, node: "Param") -> None: pass @mark_no_op def leave_Param_whitespace_after_param(self, node: "Param") -> None: pass @mark_no_op def visit_ParamSlash(self, node: "ParamSlash") -> Optional[bool]: pass @mark_no_op def visit_ParamSlash_comma(self, node: "ParamSlash") -> None: pass @mark_no_op def leave_ParamSlash_comma(self, node: "ParamSlash") -> None: pass @mark_no_op def visit_ParamSlash_whitespace_after(self, node: "ParamSlash") -> None: pass @mark_no_op def leave_ParamSlash_whitespace_after(self, node: "ParamSlash") -> None: pass @mark_no_op def visit_ParamSpec(self, node: "ParamSpec") -> Optional[bool]: pass @mark_no_op def visit_ParamSpec_name(self, node: "ParamSpec") -> None: pass @mark_no_op def leave_ParamSpec_name(self, node: "ParamSpec") -> None: pass @mark_no_op def visit_ParamSpec_whitespace_after_star(self, node: "ParamSpec") -> None: pass @mark_no_op def leave_ParamSpec_whitespace_after_star(self, node: "ParamSpec") -> None: pass @mark_no_op def visit_ParamStar(self, node: "ParamStar") -> Optional[bool]: pass @mark_no_op def visit_ParamStar_comma(self, node: "ParamStar") -> None: pass @mark_no_op def leave_ParamStar_comma(self, node: "ParamStar") -> None: pass @mark_no_op def visit_Parameters(self, node: "Parameters") -> Optional[bool]: pass @mark_no_op def visit_Parameters_params(self, node: "Parameters") -> None: pass @mark_no_op def leave_Parameters_params(self, node: "Parameters") -> None: pass @mark_no_op def visit_Parameters_star_arg(self, node: "Parameters") -> None: pass @mark_no_op def leave_Parameters_star_arg(self, node: "Parameters") -> None: pass @mark_no_op def visit_Parameters_kwonly_params(self, node: "Parameters") -> None: pass @mark_no_op def leave_Parameters_kwonly_params(self, node: "Parameters") -> None: pass @mark_no_op def visit_Parameters_star_kwarg(self, node: "Parameters") -> None: pass @mark_no_op def leave_Parameters_star_kwarg(self, node: "Parameters") -> None: pass @mark_no_op def visit_Parameters_posonly_params(self, node: "Parameters") -> None: pass @mark_no_op def leave_Parameters_posonly_params(self, node: "Parameters") -> None: pass @mark_no_op def visit_Parameters_posonly_ind(self, node: "Parameters") -> None: pass @mark_no_op def leave_Parameters_posonly_ind(self, node: "Parameters") -> None: pass @mark_no_op def visit_ParenthesizedWhitespace( self, node: "ParenthesizedWhitespace" ) -> Optional[bool]: pass @mark_no_op def visit_ParenthesizedWhitespace_first_line( self, node: "ParenthesizedWhitespace" ) -> None: pass @mark_no_op def leave_ParenthesizedWhitespace_first_line( self, node: "ParenthesizedWhitespace" ) -> None: pass @mark_no_op def visit_ParenthesizedWhitespace_empty_lines( self, node: "ParenthesizedWhitespace" ) -> None: pass @mark_no_op def leave_ParenthesizedWhitespace_empty_lines( self, node: "ParenthesizedWhitespace" ) -> None: pass @mark_no_op def visit_ParenthesizedWhitespace_indent( self, node: "ParenthesizedWhitespace" ) -> None: pass @mark_no_op def leave_ParenthesizedWhitespace_indent( self, node: "ParenthesizedWhitespace" ) -> None: pass @mark_no_op def visit_ParenthesizedWhitespace_last_line( self, node: "ParenthesizedWhitespace" ) -> None: pass @mark_no_op def leave_ParenthesizedWhitespace_last_line( self, node: "ParenthesizedWhitespace" ) -> None: pass @mark_no_op def visit_Pass(self, node: "Pass") -> Optional[bool]: pass @mark_no_op def visit_Pass_semicolon(self, node: "Pass") -> None: pass @mark_no_op def leave_Pass_semicolon(self, node: "Pass") -> None: pass @mark_no_op def visit_Plus(self, node: "Plus") -> Optional[bool]: pass @mark_no_op def visit_Plus_whitespace_after(self, node: "Plus") -> None: pass @mark_no_op def leave_Plus_whitespace_after(self, node: "Plus") -> None: pass @mark_no_op def visit_Power(self, node: "Power") -> Optional[bool]: pass @mark_no_op def visit_Power_whitespace_before(self, node: "Power") -> None: pass @mark_no_op def leave_Power_whitespace_before(self, node: "Power") -> None: pass @mark_no_op def visit_Power_whitespace_after(self, node: "Power") -> None: pass @mark_no_op def leave_Power_whitespace_after(self, node: "Power") -> None: pass @mark_no_op def visit_PowerAssign(self, node: "PowerAssign") -> Optional[bool]: pass @mark_no_op def visit_PowerAssign_whitespace_before(self, node: "PowerAssign") -> None: pass @mark_no_op def leave_PowerAssign_whitespace_before(self, node: "PowerAssign") -> None: pass @mark_no_op def visit_PowerAssign_whitespace_after(self, node: "PowerAssign") -> None: pass @mark_no_op def leave_PowerAssign_whitespace_after(self, node: "PowerAssign") -> None: pass @mark_no_op def visit_Raise(self, node: "Raise") -> Optional[bool]: pass @mark_no_op def visit_Raise_exc(self, node: "Raise") -> None: pass @mark_no_op def leave_Raise_exc(self, node: "Raise") -> None: pass @mark_no_op def visit_Raise_cause(self, node: "Raise") -> None: pass @mark_no_op def leave_Raise_cause(self, node: "Raise") -> None: pass @mark_no_op def visit_Raise_whitespace_after_raise(self, node: "Raise") -> None: pass @mark_no_op def leave_Raise_whitespace_after_raise(self, node: "Raise") -> None: pass @mark_no_op def visit_Raise_semicolon(self, node: "Raise") -> None: pass @mark_no_op def leave_Raise_semicolon(self, node: "Raise") -> None: pass @mark_no_op def visit_Return(self, node: "Return") -> Optional[bool]: pass @mark_no_op def visit_Return_value(self, node: "Return") -> None: pass @mark_no_op def leave_Return_value(self, node: "Return") -> None: pass @mark_no_op def visit_Return_whitespace_after_return(self, node: "Return") -> None: pass @mark_no_op def leave_Return_whitespace_after_return(self, node: "Return") -> None: pass @mark_no_op def visit_Return_semicolon(self, node: "Return") -> None: pass @mark_no_op def leave_Return_semicolon(self, node: "Return") -> None: pass @mark_no_op def visit_RightCurlyBrace(self, node: "RightCurlyBrace") -> Optional[bool]: pass @mark_no_op def visit_RightCurlyBrace_whitespace_before(self, node: "RightCurlyBrace") -> None: pass @mark_no_op def leave_RightCurlyBrace_whitespace_before(self, node: "RightCurlyBrace") -> None: pass @mark_no_op def visit_RightParen(self, node: "RightParen") -> Optional[bool]: pass @mark_no_op def visit_RightParen_whitespace_before(self, node: "RightParen") -> None: pass @mark_no_op def leave_RightParen_whitespace_before(self, node: "RightParen") -> None: pass @mark_no_op def visit_RightShift(self, node: "RightShift") -> Optional[bool]: pass @mark_no_op def visit_RightShift_whitespace_before(self, node: "RightShift") -> None: pass @mark_no_op def leave_RightShift_whitespace_before(self, node: "RightShift") -> None: pass @mark_no_op def visit_RightShift_whitespace_after(self, node: "RightShift") -> None: pass @mark_no_op def leave_RightShift_whitespace_after(self, node: "RightShift") -> None: pass @mark_no_op def visit_RightShiftAssign(self, node: "RightShiftAssign") -> Optional[bool]: pass @mark_no_op def visit_RightShiftAssign_whitespace_before( self, node: "RightShiftAssign" ) -> None: pass @mark_no_op def leave_RightShiftAssign_whitespace_before( self, node: "RightShiftAssign" ) -> None: pass @mark_no_op def visit_RightShiftAssign_whitespace_after(self, node: "RightShiftAssign") -> None: pass @mark_no_op def leave_RightShiftAssign_whitespace_after(self, node: "RightShiftAssign") -> None: pass @mark_no_op def visit_RightSquareBracket(self, node: "RightSquareBracket") -> Optional[bool]: pass @mark_no_op def visit_RightSquareBracket_whitespace_before( self, node: "RightSquareBracket" ) -> None: pass @mark_no_op def leave_RightSquareBracket_whitespace_before( self, node: "RightSquareBracket" ) -> None: pass @mark_no_op def visit_Semicolon(self, node: "Semicolon") -> Optional[bool]: pass @mark_no_op def visit_Semicolon_whitespace_before(self, node: "Semicolon") -> None: pass @mark_no_op def leave_Semicolon_whitespace_before(self, node: "Semicolon") -> None: pass @mark_no_op def visit_Semicolon_whitespace_after(self, node: "Semicolon") -> None: pass @mark_no_op def leave_Semicolon_whitespace_after(self, node: "Semicolon") -> None: pass @mark_no_op def visit_Set(self, node: "Set") -> Optional[bool]: pass @mark_no_op def visit_Set_elements(self, node: "Set") -> None: pass @mark_no_op def leave_Set_elements(self, node: "Set") -> None: pass @mark_no_op def visit_Set_lbrace(self, node: "Set") -> None: pass @mark_no_op def leave_Set_lbrace(self, node: "Set") -> None: pass @mark_no_op def visit_Set_rbrace(self, node: "Set") -> None: pass @mark_no_op def leave_Set_rbrace(self, node: "Set") -> None: pass @mark_no_op def visit_Set_lpar(self, node: "Set") -> None: pass @mark_no_op def leave_Set_lpar(self, node: "Set") -> None: pass @mark_no_op def visit_Set_rpar(self, node: "Set") -> None: pass @mark_no_op def leave_Set_rpar(self, node: "Set") -> None: pass @mark_no_op def visit_SetComp(self, node: "SetComp") -> Optional[bool]: pass @mark_no_op def visit_SetComp_elt(self, node: "SetComp") -> None: pass @mark_no_op def leave_SetComp_elt(self, node: "SetComp") -> None: pass @mark_no_op def visit_SetComp_for_in(self, node: "SetComp") -> None: pass @mark_no_op def leave_SetComp_for_in(self, node: "SetComp") -> None: pass @mark_no_op def visit_SetComp_lbrace(self, node: "SetComp") -> None: pass @mark_no_op def leave_SetComp_lbrace(self, node: "SetComp") -> None: pass @mark_no_op def visit_SetComp_rbrace(self, node: "SetComp") -> None: pass @mark_no_op def leave_SetComp_rbrace(self, node: "SetComp") -> None: pass @mark_no_op def visit_SetComp_lpar(self, node: "SetComp") -> None: pass @mark_no_op def leave_SetComp_lpar(self, node: "SetComp") -> None: pass @mark_no_op def visit_SetComp_rpar(self, node: "SetComp") -> None: pass @mark_no_op def leave_SetComp_rpar(self, node: "SetComp") -> None: pass @mark_no_op def visit_SimpleStatementLine(self, node: "SimpleStatementLine") -> Optional[bool]: pass @mark_no_op def visit_SimpleStatementLine_body(self, node: "SimpleStatementLine") -> None: pass @mark_no_op def leave_SimpleStatementLine_body(self, node: "SimpleStatementLine") -> None: pass @mark_no_op def visit_SimpleStatementLine_leading_lines( self, node: "SimpleStatementLine" ) -> None: pass @mark_no_op def leave_SimpleStatementLine_leading_lines( self, node: "SimpleStatementLine" ) -> None: pass @mark_no_op def visit_SimpleStatementLine_trailing_whitespace( self, node: "SimpleStatementLine" ) -> None: pass @mark_no_op def leave_SimpleStatementLine_trailing_whitespace( self, node: "SimpleStatementLine" ) -> None: pass @mark_no_op def visit_SimpleStatementSuite( self, node: "SimpleStatementSuite" ) -> Optional[bool]: pass @mark_no_op def visit_SimpleStatementSuite_body(self, node: "SimpleStatementSuite") -> None: pass @mark_no_op def leave_SimpleStatementSuite_body(self, node: "SimpleStatementSuite") -> None: pass @mark_no_op def visit_SimpleStatementSuite_leading_whitespace( self, node: "SimpleStatementSuite" ) -> None: pass @mark_no_op def leave_SimpleStatementSuite_leading_whitespace( self, node: "SimpleStatementSuite" ) -> None: pass @mark_no_op def visit_SimpleStatementSuite_trailing_whitespace( self, node: "SimpleStatementSuite" ) -> None: pass @mark_no_op def leave_SimpleStatementSuite_trailing_whitespace( self, node: "SimpleStatementSuite" ) -> None: pass @mark_no_op def visit_SimpleString(self, node: "SimpleString") -> Optional[bool]: pass @mark_no_op def visit_SimpleString_value(self, node: "SimpleString") -> None: pass @mark_no_op def leave_SimpleString_value(self, node: "SimpleString") -> None: pass @mark_no_op def visit_SimpleString_lpar(self, node: "SimpleString") -> None: pass @mark_no_op def leave_SimpleString_lpar(self, node: "SimpleString") -> None: pass @mark_no_op def visit_SimpleString_rpar(self, node: "SimpleString") -> None: pass @mark_no_op def leave_SimpleString_rpar(self, node: "SimpleString") -> None: pass @mark_no_op def visit_SimpleWhitespace(self, node: "SimpleWhitespace") -> Optional[bool]: pass @mark_no_op def visit_SimpleWhitespace_value(self, node: "SimpleWhitespace") -> None: pass @mark_no_op def leave_SimpleWhitespace_value(self, node: "SimpleWhitespace") -> None: pass @mark_no_op def visit_Slice(self, node: "Slice") -> Optional[bool]: pass @mark_no_op def visit_Slice_lower(self, node: "Slice") -> None: pass @mark_no_op def leave_Slice_lower(self, node: "Slice") -> None: pass @mark_no_op def visit_Slice_upper(self, node: "Slice") -> None: pass @mark_no_op def leave_Slice_upper(self, node: "Slice") -> None: pass @mark_no_op def visit_Slice_step(self, node: "Slice") -> None: pass @mark_no_op def leave_Slice_step(self, node: "Slice") -> None: pass @mark_no_op def visit_Slice_first_colon(self, node: "Slice") -> None: pass @mark_no_op def leave_Slice_first_colon(self, node: "Slice") -> None: pass @mark_no_op def visit_Slice_second_colon(self, node: "Slice") -> None: pass @mark_no_op def leave_Slice_second_colon(self, node: "Slice") -> None: pass @mark_no_op def visit_StarredDictElement(self, node: "StarredDictElement") -> Optional[bool]: pass @mark_no_op def visit_StarredDictElement_value(self, node: "StarredDictElement") -> None: pass @mark_no_op def leave_StarredDictElement_value(self, node: "StarredDictElement") -> None: pass @mark_no_op def visit_StarredDictElement_comma(self, node: "StarredDictElement") -> None: pass @mark_no_op def leave_StarredDictElement_comma(self, node: "StarredDictElement") -> None: pass @mark_no_op def visit_StarredDictElement_whitespace_before_value( self, node: "StarredDictElement" ) -> None: pass @mark_no_op def leave_StarredDictElement_whitespace_before_value( self, node: "StarredDictElement" ) -> None: pass @mark_no_op def visit_StarredElement(self, node: "StarredElement") -> Optional[bool]: pass @mark_no_op def visit_StarredElement_value(self, node: "StarredElement") -> None: pass @mark_no_op def leave_StarredElement_value(self, node: "StarredElement") -> None: pass @mark_no_op def visit_StarredElement_comma(self, node: "StarredElement") -> None: pass @mark_no_op def leave_StarredElement_comma(self, node: "StarredElement") -> None: pass @mark_no_op def visit_StarredElement_lpar(self, node: "StarredElement") -> None: pass @mark_no_op def leave_StarredElement_lpar(self, node: "StarredElement") -> None: pass @mark_no_op def visit_StarredElement_rpar(self, node: "StarredElement") -> None: pass @mark_no_op def leave_StarredElement_rpar(self, node: "StarredElement") -> None: pass @mark_no_op def visit_StarredElement_whitespace_before_value( self, node: "StarredElement" ) -> None: pass @mark_no_op def leave_StarredElement_whitespace_before_value( self, node: "StarredElement" ) -> None: pass @mark_no_op def visit_Subscript(self, node: "Subscript") -> Optional[bool]: pass @mark_no_op def visit_Subscript_value(self, node: "Subscript") -> None: pass @mark_no_op def leave_Subscript_value(self, node: "Subscript") -> None: pass @mark_no_op def visit_Subscript_slice(self, node: "Subscript") -> None: pass @mark_no_op def leave_Subscript_slice(self, node: "Subscript") -> None: pass @mark_no_op def visit_Subscript_lbracket(self, node: "Subscript") -> None: pass @mark_no_op def leave_Subscript_lbracket(self, node: "Subscript") -> None: pass @mark_no_op def visit_Subscript_rbracket(self, node: "Subscript") -> None: pass @mark_no_op def leave_Subscript_rbracket(self, node: "Subscript") -> None: pass @mark_no_op def visit_Subscript_lpar(self, node: "Subscript") -> None: pass @mark_no_op def leave_Subscript_lpar(self, node: "Subscript") -> None: pass @mark_no_op def visit_Subscript_rpar(self, node: "Subscript") -> None: pass @mark_no_op def leave_Subscript_rpar(self, node: "Subscript") -> None: pass @mark_no_op def visit_Subscript_whitespace_after_value(self, node: "Subscript") -> None: pass @mark_no_op def leave_Subscript_whitespace_after_value(self, node: "Subscript") -> None: pass @mark_no_op def visit_SubscriptElement(self, node: "SubscriptElement") -> Optional[bool]: pass @mark_no_op def visit_SubscriptElement_slice(self, node: "SubscriptElement") -> None: pass @mark_no_op def leave_SubscriptElement_slice(self, node: "SubscriptElement") -> None: pass @mark_no_op def visit_SubscriptElement_comma(self, node: "SubscriptElement") -> None: pass @mark_no_op def leave_SubscriptElement_comma(self, node: "SubscriptElement") -> None: pass @mark_no_op def visit_Subtract(self, node: "Subtract") -> Optional[bool]: pass @mark_no_op def visit_Subtract_whitespace_before(self, node: "Subtract") -> None: pass @mark_no_op def leave_Subtract_whitespace_before(self, node: "Subtract") -> None: pass @mark_no_op def visit_Subtract_whitespace_after(self, node: "Subtract") -> None: pass @mark_no_op def leave_Subtract_whitespace_after(self, node: "Subtract") -> None: pass @mark_no_op def visit_SubtractAssign(self, node: "SubtractAssign") -> Optional[bool]: pass @mark_no_op def visit_SubtractAssign_whitespace_before(self, node: "SubtractAssign") -> None: pass @mark_no_op def leave_SubtractAssign_whitespace_before(self, node: "SubtractAssign") -> None: pass @mark_no_op def visit_SubtractAssign_whitespace_after(self, node: "SubtractAssign") -> None: pass @mark_no_op def leave_SubtractAssign_whitespace_after(self, node: "SubtractAssign") -> None: pass @mark_no_op def visit_TrailingWhitespace(self, node: "TrailingWhitespace") -> Optional[bool]: pass @mark_no_op def visit_TrailingWhitespace_whitespace(self, node: "TrailingWhitespace") -> None: pass @mark_no_op def leave_TrailingWhitespace_whitespace(self, node: "TrailingWhitespace") -> None: pass @mark_no_op def visit_TrailingWhitespace_comment(self, node: "TrailingWhitespace") -> None: pass @mark_no_op def leave_TrailingWhitespace_comment(self, node: "TrailingWhitespace") -> None: pass @mark_no_op def visit_TrailingWhitespace_newline(self, node: "TrailingWhitespace") -> None: pass @mark_no_op def leave_TrailingWhitespace_newline(self, node: "TrailingWhitespace") -> None: pass @mark_no_op def visit_Try(self, node: "Try") -> Optional[bool]: pass @mark_no_op def visit_Try_body(self, node: "Try") -> None: pass @mark_no_op def leave_Try_body(self, node: "Try") -> None: pass @mark_no_op def visit_Try_handlers(self, node: "Try") -> None: pass @mark_no_op def leave_Try_handlers(self, node: "Try") -> None: pass @mark_no_op def visit_Try_orelse(self, node: "Try") -> None: pass @mark_no_op def leave_Try_orelse(self, node: "Try") -> None: pass @mark_no_op def visit_Try_finalbody(self, node: "Try") -> None: pass @mark_no_op def leave_Try_finalbody(self, node: "Try") -> None: pass @mark_no_op def visit_Try_leading_lines(self, node: "Try") -> None: pass @mark_no_op def leave_Try_leading_lines(self, node: "Try") -> None: pass @mark_no_op def visit_Try_whitespace_before_colon(self, node: "Try") -> None: pass @mark_no_op def leave_Try_whitespace_before_colon(self, node: "Try") -> None: pass @mark_no_op def visit_TryStar(self, node: "TryStar") -> Optional[bool]: pass @mark_no_op def visit_TryStar_body(self, node: "TryStar") -> None: pass @mark_no_op def leave_TryStar_body(self, node: "TryStar") -> None: pass @mark_no_op def visit_TryStar_handlers(self, node: "TryStar") -> None: pass @mark_no_op def leave_TryStar_handlers(self, node: "TryStar") -> None: pass @mark_no_op def visit_TryStar_orelse(self, node: "TryStar") -> None: pass @mark_no_op def leave_TryStar_orelse(self, node: "TryStar") -> None: pass @mark_no_op def visit_TryStar_finalbody(self, node: "TryStar") -> None: pass @mark_no_op def leave_TryStar_finalbody(self, node: "TryStar") -> None: pass @mark_no_op def visit_TryStar_leading_lines(self, node: "TryStar") -> None: pass @mark_no_op def leave_TryStar_leading_lines(self, node: "TryStar") -> None: pass @mark_no_op def visit_TryStar_whitespace_before_colon(self, node: "TryStar") -> None: pass @mark_no_op def leave_TryStar_whitespace_before_colon(self, node: "TryStar") -> None: pass @mark_no_op def visit_Tuple(self, node: "Tuple") -> Optional[bool]: pass @mark_no_op def visit_Tuple_elements(self, node: "Tuple") -> None: pass @mark_no_op def leave_Tuple_elements(self, node: "Tuple") -> None: pass @mark_no_op def visit_Tuple_lpar(self, node: "Tuple") -> None: pass @mark_no_op def leave_Tuple_lpar(self, node: "Tuple") -> None: pass @mark_no_op def visit_Tuple_rpar(self, node: "Tuple") -> None: pass @mark_no_op def leave_Tuple_rpar(self, node: "Tuple") -> None: pass @mark_no_op def visit_TypeAlias(self, node: "TypeAlias") -> Optional[bool]: pass @mark_no_op def visit_TypeAlias_name(self, node: "TypeAlias") -> None: pass @mark_no_op def leave_TypeAlias_name(self, node: "TypeAlias") -> None: pass @mark_no_op def visit_TypeAlias_value(self, node: "TypeAlias") -> None: pass @mark_no_op def leave_TypeAlias_value(self, node: "TypeAlias") -> None: pass @mark_no_op def visit_TypeAlias_type_parameters(self, node: "TypeAlias") -> None: pass @mark_no_op def leave_TypeAlias_type_parameters(self, node: "TypeAlias") -> None: pass @mark_no_op def visit_TypeAlias_whitespace_after_type(self, node: "TypeAlias") -> None: pass @mark_no_op def leave_TypeAlias_whitespace_after_type(self, node: "TypeAlias") -> None: pass @mark_no_op def visit_TypeAlias_whitespace_after_name(self, node: "TypeAlias") -> None: pass @mark_no_op def leave_TypeAlias_whitespace_after_name(self, node: "TypeAlias") -> None: pass @mark_no_op def visit_TypeAlias_whitespace_after_type_parameters( self, node: "TypeAlias" ) -> None: pass @mark_no_op def leave_TypeAlias_whitespace_after_type_parameters( self, node: "TypeAlias" ) -> None: pass @mark_no_op def visit_TypeAlias_whitespace_after_equals(self, node: "TypeAlias") -> None: pass @mark_no_op def leave_TypeAlias_whitespace_after_equals(self, node: "TypeAlias") -> None: pass @mark_no_op def visit_TypeAlias_semicolon(self, node: "TypeAlias") -> None: pass @mark_no_op def leave_TypeAlias_semicolon(self, node: "TypeAlias") -> None: pass @mark_no_op def visit_TypeParam(self, node: "TypeParam") -> Optional[bool]: pass @mark_no_op def visit_TypeParam_param(self, node: "TypeParam") -> None: pass @mark_no_op def leave_TypeParam_param(self, node: "TypeParam") -> None: pass @mark_no_op def visit_TypeParam_comma(self, node: "TypeParam") -> None: pass @mark_no_op def leave_TypeParam_comma(self, node: "TypeParam") -> None: pass @mark_no_op def visit_TypeParameters(self, node: "TypeParameters") -> Optional[bool]: pass @mark_no_op def visit_TypeParameters_params(self, node: "TypeParameters") -> None: pass @mark_no_op def leave_TypeParameters_params(self, node: "TypeParameters") -> None: pass @mark_no_op def visit_TypeParameters_lbracket(self, node: "TypeParameters") -> None: pass @mark_no_op def leave_TypeParameters_lbracket(self, node: "TypeParameters") -> None: pass @mark_no_op def visit_TypeParameters_rbracket(self, node: "TypeParameters") -> None: pass @mark_no_op def leave_TypeParameters_rbracket(self, node: "TypeParameters") -> None: pass @mark_no_op def visit_TypeVar(self, node: "TypeVar") -> Optional[bool]: pass @mark_no_op def visit_TypeVar_name(self, node: "TypeVar") -> None: pass @mark_no_op def leave_TypeVar_name(self, node: "TypeVar") -> None: pass @mark_no_op def visit_TypeVar_bound(self, node: "TypeVar") -> None: pass @mark_no_op def leave_TypeVar_bound(self, node: "TypeVar") -> None: pass @mark_no_op def visit_TypeVar_colon(self, node: "TypeVar") -> None: pass @mark_no_op def leave_TypeVar_colon(self, node: "TypeVar") -> None: pass @mark_no_op def visit_TypeVarTuple(self, node: "TypeVarTuple") -> Optional[bool]: pass @mark_no_op def visit_TypeVarTuple_name(self, node: "TypeVarTuple") -> None: pass @mark_no_op def leave_TypeVarTuple_name(self, node: "TypeVarTuple") -> None: pass @mark_no_op def visit_TypeVarTuple_whitespace_after_star(self, node: "TypeVarTuple") -> None: pass @mark_no_op def leave_TypeVarTuple_whitespace_after_star(self, node: "TypeVarTuple") -> None: pass @mark_no_op def visit_UnaryOperation(self, node: "UnaryOperation") -> Optional[bool]: pass @mark_no_op def visit_UnaryOperation_operator(self, node: "UnaryOperation") -> None: pass @mark_no_op def leave_UnaryOperation_operator(self, node: "UnaryOperation") -> None: pass @mark_no_op def visit_UnaryOperation_expression(self, node: "UnaryOperation") -> None: pass @mark_no_op def leave_UnaryOperation_expression(self, node: "UnaryOperation") -> None: pass @mark_no_op def visit_UnaryOperation_lpar(self, node: "UnaryOperation") -> None: pass @mark_no_op def leave_UnaryOperation_lpar(self, node: "UnaryOperation") -> None: pass @mark_no_op def visit_UnaryOperation_rpar(self, node: "UnaryOperation") -> None: pass @mark_no_op def leave_UnaryOperation_rpar(self, node: "UnaryOperation") -> None: pass @mark_no_op def visit_While(self, node: "While") -> Optional[bool]: pass @mark_no_op def visit_While_test(self, node: "While") -> None: pass @mark_no_op def leave_While_test(self, node: "While") -> None: pass @mark_no_op def visit_While_body(self, node: "While") -> None: pass @mark_no_op def leave_While_body(self, node: "While") -> None: pass @mark_no_op def visit_While_orelse(self, node: "While") -> None: pass @mark_no_op def leave_While_orelse(self, node: "While") -> None: pass @mark_no_op def visit_While_leading_lines(self, node: "While") -> None: pass @mark_no_op def leave_While_leading_lines(self, node: "While") -> None: pass @mark_no_op def visit_While_whitespace_after_while(self, node: "While") -> None: pass @mark_no_op def leave_While_whitespace_after_while(self, node: "While") -> None: pass @mark_no_op def visit_While_whitespace_before_colon(self, node: "While") -> None: pass @mark_no_op def leave_While_whitespace_before_colon(self, node: "While") -> None: pass @mark_no_op def visit_With(self, node: "With") -> Optional[bool]: pass @mark_no_op def visit_With_items(self, node: "With") -> None: pass @mark_no_op def leave_With_items(self, node: "With") -> None: pass @mark_no_op def visit_With_body(self, node: "With") -> None: pass @mark_no_op def leave_With_body(self, node: "With") -> None: pass @mark_no_op def visit_With_asynchronous(self, node: "With") -> None: pass @mark_no_op def leave_With_asynchronous(self, node: "With") -> None: pass @mark_no_op def visit_With_leading_lines(self, node: "With") -> None: pass @mark_no_op def leave_With_leading_lines(self, node: "With") -> None: pass @mark_no_op def visit_With_lpar(self, node: "With") -> None: pass @mark_no_op def leave_With_lpar(self, node: "With") -> None: pass @mark_no_op def visit_With_rpar(self, node: "With") -> None: pass @mark_no_op def leave_With_rpar(self, node: "With") -> None: pass @mark_no_op def visit_With_whitespace_after_with(self, node: "With") -> None: pass @mark_no_op def leave_With_whitespace_after_with(self, node: "With") -> None: pass @mark_no_op def visit_With_whitespace_before_colon(self, node: "With") -> None: pass @mark_no_op def leave_With_whitespace_before_colon(self, node: "With") -> None: pass @mark_no_op def visit_WithItem(self, node: "WithItem") -> Optional[bool]: pass @mark_no_op def visit_WithItem_item(self, node: "WithItem") -> None: pass @mark_no_op def leave_WithItem_item(self, node: "WithItem") -> None: pass @mark_no_op def visit_WithItem_asname(self, node: "WithItem") -> None: pass @mark_no_op def leave_WithItem_asname(self, node: "WithItem") -> None: pass @mark_no_op def visit_WithItem_comma(self, node: "WithItem") -> None: pass @mark_no_op def leave_WithItem_comma(self, node: "WithItem") -> None: pass @mark_no_op def visit_Yield(self, node: "Yield") -> Optional[bool]: pass @mark_no_op def visit_Yield_value(self, node: "Yield") -> None: pass @mark_no_op def leave_Yield_value(self, node: "Yield") -> None: pass @mark_no_op def visit_Yield_lpar(self, node: "Yield") -> None: pass @mark_no_op def leave_Yield_lpar(self, node: "Yield") -> None: pass @mark_no_op def visit_Yield_rpar(self, node: "Yield") -> None: pass @mark_no_op def leave_Yield_rpar(self, node: "Yield") -> None: pass @mark_no_op def visit_Yield_whitespace_after_yield(self, node: "Yield") -> None: pass @mark_no_op def leave_Yield_whitespace_after_yield(self, node: "Yield") -> None: pass class CSTTypedVisitorFunctions(CSTTypedBaseFunctions): @mark_no_op def leave_Add(self, original_node: "Add") -> None: pass @mark_no_op def leave_AddAssign(self, original_node: "AddAssign") -> None: pass @mark_no_op def leave_And(self, original_node: "And") -> None: pass @mark_no_op def leave_AnnAssign(self, original_node: "AnnAssign") -> None: pass @mark_no_op def leave_Annotation(self, original_node: "Annotation") -> None: pass @mark_no_op def leave_Arg(self, original_node: "Arg") -> None: pass @mark_no_op def leave_AsName(self, original_node: "AsName") -> None: pass @mark_no_op def leave_Assert(self, original_node: "Assert") -> None: pass @mark_no_op def leave_Assign(self, original_node: "Assign") -> None: pass @mark_no_op def leave_AssignEqual(self, original_node: "AssignEqual") -> None: pass @mark_no_op def leave_AssignTarget(self, original_node: "AssignTarget") -> None: pass @mark_no_op def leave_Asynchronous(self, original_node: "Asynchronous") -> None: pass @mark_no_op def leave_Attribute(self, original_node: "Attribute") -> None: pass @mark_no_op def leave_AugAssign(self, original_node: "AugAssign") -> None: pass @mark_no_op def leave_Await(self, original_node: "Await") -> None: pass @mark_no_op def leave_BinaryOperation(self, original_node: "BinaryOperation") -> None: pass @mark_no_op def leave_BitAnd(self, original_node: "BitAnd") -> None: pass @mark_no_op def leave_BitAndAssign(self, original_node: "BitAndAssign") -> None: pass @mark_no_op def leave_BitInvert(self, original_node: "BitInvert") -> None: pass @mark_no_op def leave_BitOr(self, original_node: "BitOr") -> None: pass @mark_no_op def leave_BitOrAssign(self, original_node: "BitOrAssign") -> None: pass @mark_no_op def leave_BitXor(self, original_node: "BitXor") -> None: pass @mark_no_op def leave_BitXorAssign(self, original_node: "BitXorAssign") -> None: pass @mark_no_op def leave_BooleanOperation(self, original_node: "BooleanOperation") -> None: pass @mark_no_op def leave_Break(self, original_node: "Break") -> None: pass @mark_no_op def leave_Call(self, original_node: "Call") -> None: pass @mark_no_op def leave_ClassDef(self, original_node: "ClassDef") -> None: pass @mark_no_op def leave_Colon(self, original_node: "Colon") -> None: pass @mark_no_op def leave_Comma(self, original_node: "Comma") -> None: pass @mark_no_op def leave_Comment(self, original_node: "Comment") -> None: pass @mark_no_op def leave_CompFor(self, original_node: "CompFor") -> None: pass @mark_no_op def leave_CompIf(self, original_node: "CompIf") -> None: pass @mark_no_op def leave_Comparison(self, original_node: "Comparison") -> None: pass @mark_no_op def leave_ComparisonTarget(self, original_node: "ComparisonTarget") -> None: pass @mark_no_op def leave_ConcatenatedString(self, original_node: "ConcatenatedString") -> None: pass @mark_no_op def leave_Continue(self, original_node: "Continue") -> None: pass @mark_no_op def leave_Decorator(self, original_node: "Decorator") -> None: pass @mark_no_op def leave_Del(self, original_node: "Del") -> None: pass @mark_no_op def leave_Dict(self, original_node: "Dict") -> None: pass @mark_no_op def leave_DictComp(self, original_node: "DictComp") -> None: pass @mark_no_op def leave_DictElement(self, original_node: "DictElement") -> None: pass @mark_no_op def leave_Divide(self, original_node: "Divide") -> None: pass @mark_no_op def leave_DivideAssign(self, original_node: "DivideAssign") -> None: pass @mark_no_op def leave_Dot(self, original_node: "Dot") -> None: pass @mark_no_op def leave_Element(self, original_node: "Element") -> None: pass @mark_no_op def leave_Ellipsis(self, original_node: "Ellipsis") -> None: pass @mark_no_op def leave_Else(self, original_node: "Else") -> None: pass @mark_no_op def leave_EmptyLine(self, original_node: "EmptyLine") -> None: pass @mark_no_op def leave_Equal(self, original_node: "Equal") -> None: pass @mark_no_op def leave_ExceptHandler(self, original_node: "ExceptHandler") -> None: pass @mark_no_op def leave_ExceptStarHandler(self, original_node: "ExceptStarHandler") -> None: pass @mark_no_op def leave_Expr(self, original_node: "Expr") -> None: pass @mark_no_op def leave_Finally(self, original_node: "Finally") -> None: pass @mark_no_op def leave_Float(self, original_node: "Float") -> None: pass @mark_no_op def leave_FloorDivide(self, original_node: "FloorDivide") -> None: pass @mark_no_op def leave_FloorDivideAssign(self, original_node: "FloorDivideAssign") -> None: pass @mark_no_op def leave_For(self, original_node: "For") -> None: pass @mark_no_op def leave_FormattedString(self, original_node: "FormattedString") -> None: pass @mark_no_op def leave_FormattedStringExpression( self, original_node: "FormattedStringExpression" ) -> None: pass @mark_no_op def leave_FormattedStringText(self, original_node: "FormattedStringText") -> None: pass @mark_no_op def leave_From(self, original_node: "From") -> None: pass @mark_no_op def leave_FunctionDef(self, original_node: "FunctionDef") -> None: pass @mark_no_op def leave_GeneratorExp(self, original_node: "GeneratorExp") -> None: pass @mark_no_op def leave_Global(self, original_node: "Global") -> None: pass @mark_no_op def leave_GreaterThan(self, original_node: "GreaterThan") -> None: pass @mark_no_op def leave_GreaterThanEqual(self, original_node: "GreaterThanEqual") -> None: pass @mark_no_op def leave_If(self, original_node: "If") -> None: pass @mark_no_op def leave_IfExp(self, original_node: "IfExp") -> None: pass @mark_no_op def leave_Imaginary(self, original_node: "Imaginary") -> None: pass @mark_no_op def leave_Import(self, original_node: "Import") -> None: pass @mark_no_op def leave_ImportAlias(self, original_node: "ImportAlias") -> None: pass @mark_no_op def leave_ImportFrom(self, original_node: "ImportFrom") -> None: pass @mark_no_op def leave_ImportStar(self, original_node: "ImportStar") -> None: pass @mark_no_op def leave_In(self, original_node: "In") -> None: pass @mark_no_op def leave_IndentedBlock(self, original_node: "IndentedBlock") -> None: pass @mark_no_op def leave_Index(self, original_node: "Index") -> None: pass @mark_no_op def leave_Integer(self, original_node: "Integer") -> None: pass @mark_no_op def leave_Is(self, original_node: "Is") -> None: pass @mark_no_op def leave_IsNot(self, original_node: "IsNot") -> None: pass @mark_no_op def leave_Lambda(self, original_node: "Lambda") -> None: pass @mark_no_op def leave_LeftCurlyBrace(self, original_node: "LeftCurlyBrace") -> None: pass @mark_no_op def leave_LeftParen(self, original_node: "LeftParen") -> None: pass @mark_no_op def leave_LeftShift(self, original_node: "LeftShift") -> None: pass @mark_no_op def leave_LeftShiftAssign(self, original_node: "LeftShiftAssign") -> None: pass @mark_no_op def leave_LeftSquareBracket(self, original_node: "LeftSquareBracket") -> None: pass @mark_no_op def leave_LessThan(self, original_node: "LessThan") -> None: pass @mark_no_op def leave_LessThanEqual(self, original_node: "LessThanEqual") -> None: pass @mark_no_op def leave_List(self, original_node: "List") -> None: pass @mark_no_op def leave_ListComp(self, original_node: "ListComp") -> None: pass @mark_no_op def leave_Match(self, original_node: "Match") -> None: pass @mark_no_op def leave_MatchAs(self, original_node: "MatchAs") -> None: pass @mark_no_op def leave_MatchCase(self, original_node: "MatchCase") -> None: pass @mark_no_op def leave_MatchClass(self, original_node: "MatchClass") -> None: pass @mark_no_op def leave_MatchKeywordElement(self, original_node: "MatchKeywordElement") -> None: pass @mark_no_op def leave_MatchList(self, original_node: "MatchList") -> None: pass @mark_no_op def leave_MatchMapping(self, original_node: "MatchMapping") -> None: pass @mark_no_op def leave_MatchMappingElement(self, original_node: "MatchMappingElement") -> None: pass @mark_no_op def leave_MatchOr(self, original_node: "MatchOr") -> None: pass @mark_no_op def leave_MatchOrElement(self, original_node: "MatchOrElement") -> None: pass @mark_no_op def leave_MatchPattern(self, original_node: "MatchPattern") -> None: pass @mark_no_op def leave_MatchSequence(self, original_node: "MatchSequence") -> None: pass @mark_no_op def leave_MatchSequenceElement(self, original_node: "MatchSequenceElement") -> None: pass @mark_no_op def leave_MatchSingleton(self, original_node: "MatchSingleton") -> None: pass @mark_no_op def leave_MatchStar(self, original_node: "MatchStar") -> None: pass @mark_no_op def leave_MatchTuple(self, original_node: "MatchTuple") -> None: pass @mark_no_op def leave_MatchValue(self, original_node: "MatchValue") -> None: pass @mark_no_op def leave_MatrixMultiply(self, original_node: "MatrixMultiply") -> None: pass @mark_no_op def leave_MatrixMultiplyAssign(self, original_node: "MatrixMultiplyAssign") -> None: pass @mark_no_op def leave_Minus(self, original_node: "Minus") -> None: pass @mark_no_op def leave_Module(self, original_node: "Module") -> None: pass @mark_no_op def leave_Modulo(self, original_node: "Modulo") -> None: pass @mark_no_op def leave_ModuloAssign(self, original_node: "ModuloAssign") -> None: pass @mark_no_op def leave_Multiply(self, original_node: "Multiply") -> None: pass @mark_no_op def leave_MultiplyAssign(self, original_node: "MultiplyAssign") -> None: pass @mark_no_op def leave_Name(self, original_node: "Name") -> None: pass @mark_no_op def leave_NameItem(self, original_node: "NameItem") -> None: pass @mark_no_op def leave_NamedExpr(self, original_node: "NamedExpr") -> None: pass @mark_no_op def leave_Newline(self, original_node: "Newline") -> None: pass @mark_no_op def leave_Nonlocal(self, original_node: "Nonlocal") -> None: pass @mark_no_op def leave_Not(self, original_node: "Not") -> None: pass @mark_no_op def leave_NotEqual(self, original_node: "NotEqual") -> None: pass @mark_no_op def leave_NotIn(self, original_node: "NotIn") -> None: pass @mark_no_op def leave_Or(self, original_node: "Or") -> None: pass @mark_no_op def leave_Param(self, original_node: "Param") -> None: pass @mark_no_op def leave_ParamSlash(self, original_node: "ParamSlash") -> None: pass @mark_no_op def leave_ParamSpec(self, original_node: "ParamSpec") -> None: pass @mark_no_op def leave_ParamStar(self, original_node: "ParamStar") -> None: pass @mark_no_op def leave_Parameters(self, original_node: "Parameters") -> None: pass @mark_no_op def leave_ParenthesizedWhitespace( self, original_node: "ParenthesizedWhitespace" ) -> None: pass @mark_no_op def leave_Pass(self, original_node: "Pass") -> None: pass @mark_no_op def leave_Plus(self, original_node: "Plus") -> None: pass @mark_no_op def leave_Power(self, original_node: "Power") -> None: pass @mark_no_op def leave_PowerAssign(self, original_node: "PowerAssign") -> None: pass @mark_no_op def leave_Raise(self, original_node: "Raise") -> None: pass @mark_no_op def leave_Return(self, original_node: "Return") -> None: pass @mark_no_op def leave_RightCurlyBrace(self, original_node: "RightCurlyBrace") -> None: pass @mark_no_op def leave_RightParen(self, original_node: "RightParen") -> None: pass @mark_no_op def leave_RightShift(self, original_node: "RightShift") -> None: pass @mark_no_op def leave_RightShiftAssign(self, original_node: "RightShiftAssign") -> None: pass @mark_no_op def leave_RightSquareBracket(self, original_node: "RightSquareBracket") -> None: pass @mark_no_op def leave_Semicolon(self, original_node: "Semicolon") -> None: pass @mark_no_op def leave_Set(self, original_node: "Set") -> None: pass @mark_no_op def leave_SetComp(self, original_node: "SetComp") -> None: pass @mark_no_op def leave_SimpleStatementLine(self, original_node: "SimpleStatementLine") -> None: pass @mark_no_op def leave_SimpleStatementSuite(self, original_node: "SimpleStatementSuite") -> None: pass @mark_no_op def leave_SimpleString(self, original_node: "SimpleString") -> None: pass @mark_no_op def leave_SimpleWhitespace(self, original_node: "SimpleWhitespace") -> None: pass @mark_no_op def leave_Slice(self, original_node: "Slice") -> None: pass @mark_no_op def leave_StarredDictElement(self, original_node: "StarredDictElement") -> None: pass @mark_no_op def leave_StarredElement(self, original_node: "StarredElement") -> None: pass @mark_no_op def leave_Subscript(self, original_node: "Subscript") -> None: pass @mark_no_op def leave_SubscriptElement(self, original_node: "SubscriptElement") -> None: pass @mark_no_op def leave_Subtract(self, original_node: "Subtract") -> None: pass @mark_no_op def leave_SubtractAssign(self, original_node: "SubtractAssign") -> None: pass @mark_no_op def leave_TrailingWhitespace(self, original_node: "TrailingWhitespace") -> None: pass @mark_no_op def leave_Try(self, original_node: "Try") -> None: pass @mark_no_op def leave_TryStar(self, original_node: "TryStar") -> None: pass @mark_no_op def leave_Tuple(self, original_node: "Tuple") -> None: pass @mark_no_op def leave_TypeAlias(self, original_node: "TypeAlias") -> None: pass @mark_no_op def leave_TypeParam(self, original_node: "TypeParam") -> None: pass @mark_no_op def leave_TypeParameters(self, original_node: "TypeParameters") -> None: pass @mark_no_op def leave_TypeVar(self, original_node: "TypeVar") -> None: pass @mark_no_op def leave_TypeVarTuple(self, original_node: "TypeVarTuple") -> None: pass @mark_no_op def leave_UnaryOperation(self, original_node: "UnaryOperation") -> None: pass @mark_no_op def leave_While(self, original_node: "While") -> None: pass @mark_no_op def leave_With(self, original_node: "With") -> None: pass @mark_no_op def leave_WithItem(self, original_node: "WithItem") -> None: pass @mark_no_op def leave_Yield(self, original_node: "Yield") -> None: pass class CSTTypedTransformerFunctions(CSTTypedBaseFunctions): @mark_no_op def leave_Add(self, original_node: "Add", updated_node: "Add") -> "BaseBinaryOp": return updated_node @mark_no_op def leave_AddAssign( self, original_node: "AddAssign", updated_node: "AddAssign" ) -> "BaseAugOp": return updated_node @mark_no_op def leave_And(self, original_node: "And", updated_node: "And") -> "BaseBooleanOp": return updated_node @mark_no_op def leave_AnnAssign( self, original_node: "AnnAssign", updated_node: "AnnAssign" ) -> Union[ "BaseSmallStatement", FlattenSentinel["BaseSmallStatement"], RemovalSentinel ]: return updated_node @mark_no_op def leave_Annotation( self, original_node: "Annotation", updated_node: "Annotation" ) -> "Annotation": return updated_node @mark_no_op def leave_Arg( self, original_node: "Arg", updated_node: "Arg" ) -> Union["Arg", FlattenSentinel["Arg"], RemovalSentinel]: return updated_node @mark_no_op def leave_AsName(self, original_node: "AsName", updated_node: "AsName") -> "AsName": return updated_node @mark_no_op def leave_Assert( self, original_node: "Assert", updated_node: "Assert" ) -> Union[ "BaseSmallStatement", FlattenSentinel["BaseSmallStatement"], RemovalSentinel ]: return updated_node @mark_no_op def leave_Assign( self, original_node: "Assign", updated_node: "Assign" ) -> Union[ "BaseSmallStatement", FlattenSentinel["BaseSmallStatement"], RemovalSentinel ]: return updated_node @mark_no_op def leave_AssignEqual( self, original_node: "AssignEqual", updated_node: "AssignEqual" ) -> Union["AssignEqual", MaybeSentinel]: return updated_node @mark_no_op def leave_AssignTarget( self, original_node: "AssignTarget", updated_node: "AssignTarget" ) -> Union["AssignTarget", FlattenSentinel["AssignTarget"], RemovalSentinel]: return updated_node @mark_no_op def leave_Asynchronous( self, original_node: "Asynchronous", updated_node: "Asynchronous" ) -> "Asynchronous": return updated_node @mark_no_op def leave_Attribute( self, original_node: "Attribute", updated_node: "Attribute" ) -> "BaseExpression": return updated_node @mark_no_op def leave_AugAssign( self, original_node: "AugAssign", updated_node: "AugAssign" ) -> Union[ "BaseSmallStatement", FlattenSentinel["BaseSmallStatement"], RemovalSentinel ]: return updated_node @mark_no_op def leave_Await( self, original_node: "Await", updated_node: "Await" ) -> "BaseExpression": return updated_node @mark_no_op def leave_BinaryOperation( self, original_node: "BinaryOperation", updated_node: "BinaryOperation" ) -> "BaseExpression": return updated_node @mark_no_op def leave_BitAnd( self, original_node: "BitAnd", updated_node: "BitAnd" ) -> "BaseBinaryOp": return updated_node @mark_no_op def leave_BitAndAssign( self, original_node: "BitAndAssign", updated_node: "BitAndAssign" ) -> "BaseAugOp": return updated_node @mark_no_op def leave_BitInvert( self, original_node: "BitInvert", updated_node: "BitInvert" ) -> "BaseUnaryOp": return updated_node @mark_no_op def leave_BitOr( self, original_node: "BitOr", updated_node: "BitOr" ) -> Union["BaseBinaryOp", MaybeSentinel]: return updated_node @mark_no_op def leave_BitOrAssign( self, original_node: "BitOrAssign", updated_node: "BitOrAssign" ) -> "BaseAugOp": return updated_node @mark_no_op def leave_BitXor( self, original_node: "BitXor", updated_node: "BitXor" ) -> "BaseBinaryOp": return updated_node @mark_no_op def leave_BitXorAssign( self, original_node: "BitXorAssign", updated_node: "BitXorAssign" ) -> "BaseAugOp": return updated_node @mark_no_op def leave_BooleanOperation( self, original_node: "BooleanOperation", updated_node: "BooleanOperation" ) -> "BaseExpression": return updated_node @mark_no_op def leave_Break( self, original_node: "Break", updated_node: "Break" ) -> Union[ "BaseSmallStatement", FlattenSentinel["BaseSmallStatement"], RemovalSentinel ]: return updated_node @mark_no_op def leave_Call( self, original_node: "Call", updated_node: "Call" ) -> "BaseExpression": return updated_node @mark_no_op def leave_ClassDef( self, original_node: "ClassDef", updated_node: "ClassDef" ) -> Union["BaseStatement", FlattenSentinel["BaseStatement"], RemovalSentinel]: return updated_node @mark_no_op def leave_Colon( self, original_node: "Colon", updated_node: "Colon" ) -> Union["Colon", MaybeSentinel]: return updated_node @mark_no_op def leave_Comma( self, original_node: "Comma", updated_node: "Comma" ) -> Union["Comma", MaybeSentinel]: return updated_node @mark_no_op def leave_Comment( self, original_node: "Comment", updated_node: "Comment" ) -> "Comment": return updated_node @mark_no_op def leave_CompFor( self, original_node: "CompFor", updated_node: "CompFor" ) -> "CompFor": return updated_node @mark_no_op def leave_CompIf(self, original_node: "CompIf", updated_node: "CompIf") -> "CompIf": return updated_node @mark_no_op def leave_Comparison( self, original_node: "Comparison", updated_node: "Comparison" ) -> "BaseExpression": return updated_node @mark_no_op def leave_ComparisonTarget( self, original_node: "ComparisonTarget", updated_node: "ComparisonTarget" ) -> Union[ "ComparisonTarget", FlattenSentinel["ComparisonTarget"], RemovalSentinel ]: return updated_node @mark_no_op def leave_ConcatenatedString( self, original_node: "ConcatenatedString", updated_node: "ConcatenatedString" ) -> "BaseExpression": return updated_node @mark_no_op def leave_Continue( self, original_node: "Continue", updated_node: "Continue" ) -> Union[ "BaseSmallStatement", FlattenSentinel["BaseSmallStatement"], RemovalSentinel ]: return updated_node @mark_no_op def leave_Decorator( self, original_node: "Decorator", updated_node: "Decorator" ) -> Union["Decorator", FlattenSentinel["Decorator"], RemovalSentinel]: return updated_node @mark_no_op def leave_Del( self, original_node: "Del", updated_node: "Del" ) -> Union[ "BaseSmallStatement", FlattenSentinel["BaseSmallStatement"], RemovalSentinel ]: return updated_node @mark_no_op def leave_Dict( self, original_node: "Dict", updated_node: "Dict" ) -> "BaseExpression": return updated_node @mark_no_op def leave_DictComp( self, original_node: "DictComp", updated_node: "DictComp" ) -> "BaseExpression": return updated_node @mark_no_op def leave_DictElement( self, original_node: "DictElement", updated_node: "DictElement" ) -> Union["BaseDictElement", FlattenSentinel["BaseDictElement"], RemovalSentinel]: return updated_node @mark_no_op def leave_Divide( self, original_node: "Divide", updated_node: "Divide" ) -> "BaseBinaryOp": return updated_node @mark_no_op def leave_DivideAssign( self, original_node: "DivideAssign", updated_node: "DivideAssign" ) -> "BaseAugOp": return updated_node @mark_no_op def leave_Dot( self, original_node: "Dot", updated_node: "Dot" ) -> Union["Dot", FlattenSentinel["Dot"], RemovalSentinel]: return updated_node @mark_no_op def leave_Element( self, original_node: "Element", updated_node: "Element" ) -> Union["BaseElement", FlattenSentinel["BaseElement"], RemovalSentinel]: return updated_node @mark_no_op def leave_Ellipsis( self, original_node: "Ellipsis", updated_node: "Ellipsis" ) -> "BaseExpression": return updated_node @mark_no_op def leave_Else(self, original_node: "Else", updated_node: "Else") -> "Else": return updated_node @mark_no_op def leave_EmptyLine( self, original_node: "EmptyLine", updated_node: "EmptyLine" ) -> Union["EmptyLine", FlattenSentinel["EmptyLine"], RemovalSentinel]: return updated_node @mark_no_op def leave_Equal( self, original_node: "Equal", updated_node: "Equal" ) -> "BaseCompOp": return updated_node @mark_no_op def leave_ExceptHandler( self, original_node: "ExceptHandler", updated_node: "ExceptHandler" ) -> Union["ExceptHandler", FlattenSentinel["ExceptHandler"], RemovalSentinel]: return updated_node @mark_no_op def leave_ExceptStarHandler( self, original_node: "ExceptStarHandler", updated_node: "ExceptStarHandler" ) -> Union[ "ExceptStarHandler", FlattenSentinel["ExceptStarHandler"], RemovalSentinel ]: return updated_node @mark_no_op def leave_Expr( self, original_node: "Expr", updated_node: "Expr" ) -> Union[ "BaseSmallStatement", FlattenSentinel["BaseSmallStatement"], RemovalSentinel ]: return updated_node @mark_no_op def leave_Finally( self, original_node: "Finally", updated_node: "Finally" ) -> "Finally": return updated_node @mark_no_op def leave_Float( self, original_node: "Float", updated_node: "Float" ) -> "BaseExpression": return updated_node @mark_no_op def leave_FloorDivide( self, original_node: "FloorDivide", updated_node: "FloorDivide" ) -> "BaseBinaryOp": return updated_node @mark_no_op def leave_FloorDivideAssign( self, original_node: "FloorDivideAssign", updated_node: "FloorDivideAssign" ) -> "BaseAugOp": return updated_node @mark_no_op def leave_For( self, original_node: "For", updated_node: "For" ) -> Union["BaseStatement", FlattenSentinel["BaseStatement"], RemovalSentinel]: return updated_node @mark_no_op def leave_FormattedString( self, original_node: "FormattedString", updated_node: "FormattedString" ) -> "BaseExpression": return updated_node @mark_no_op def leave_FormattedStringExpression( self, original_node: "FormattedStringExpression", updated_node: "FormattedStringExpression", ) -> Union[ "BaseFormattedStringContent", FlattenSentinel["BaseFormattedStringContent"], RemovalSentinel, ]: return updated_node @mark_no_op def leave_FormattedStringText( self, original_node: "FormattedStringText", updated_node: "FormattedStringText" ) -> Union[ "BaseFormattedStringContent", FlattenSentinel["BaseFormattedStringContent"], RemovalSentinel, ]: return updated_node @mark_no_op def leave_From(self, original_node: "From", updated_node: "From") -> "From": return updated_node @mark_no_op def leave_FunctionDef( self, original_node: "FunctionDef", updated_node: "FunctionDef" ) -> Union["BaseStatement", FlattenSentinel["BaseStatement"], RemovalSentinel]: return updated_node @mark_no_op def leave_GeneratorExp( self, original_node: "GeneratorExp", updated_node: "GeneratorExp" ) -> "BaseExpression": return updated_node @mark_no_op def leave_Global( self, original_node: "Global", updated_node: "Global" ) -> Union[ "BaseSmallStatement", FlattenSentinel["BaseSmallStatement"], RemovalSentinel ]: return updated_node @mark_no_op def leave_GreaterThan( self, original_node: "GreaterThan", updated_node: "GreaterThan" ) -> "BaseCompOp": return updated_node @mark_no_op def leave_GreaterThanEqual( self, original_node: "GreaterThanEqual", updated_node: "GreaterThanEqual" ) -> "BaseCompOp": return updated_node @mark_no_op def leave_If( self, original_node: "If", updated_node: "If" ) -> Union["BaseStatement", FlattenSentinel["BaseStatement"], RemovalSentinel]: return updated_node @mark_no_op def leave_IfExp( self, original_node: "IfExp", updated_node: "IfExp" ) -> "BaseExpression": return updated_node @mark_no_op def leave_Imaginary( self, original_node: "Imaginary", updated_node: "Imaginary" ) -> "BaseExpression": return updated_node @mark_no_op def leave_Import( self, original_node: "Import", updated_node: "Import" ) -> Union[ "BaseSmallStatement", FlattenSentinel["BaseSmallStatement"], RemovalSentinel ]: return updated_node @mark_no_op def leave_ImportAlias( self, original_node: "ImportAlias", updated_node: "ImportAlias" ) -> Union["ImportAlias", FlattenSentinel["ImportAlias"], RemovalSentinel]: return updated_node @mark_no_op def leave_ImportFrom( self, original_node: "ImportFrom", updated_node: "ImportFrom" ) -> Union[ "BaseSmallStatement", FlattenSentinel["BaseSmallStatement"], RemovalSentinel ]: return updated_node @mark_no_op def leave_ImportStar( self, original_node: "ImportStar", updated_node: "ImportStar" ) -> "ImportStar": return updated_node @mark_no_op def leave_In(self, original_node: "In", updated_node: "In") -> "BaseCompOp": return updated_node @mark_no_op def leave_IndentedBlock( self, original_node: "IndentedBlock", updated_node: "IndentedBlock" ) -> "BaseSuite": return updated_node @mark_no_op def leave_Index(self, original_node: "Index", updated_node: "Index") -> "BaseSlice": return updated_node @mark_no_op def leave_Integer( self, original_node: "Integer", updated_node: "Integer" ) -> "BaseExpression": return updated_node @mark_no_op def leave_Is(self, original_node: "Is", updated_node: "Is") -> "BaseCompOp": return updated_node @mark_no_op def leave_IsNot( self, original_node: "IsNot", updated_node: "IsNot" ) -> "BaseCompOp": return updated_node @mark_no_op def leave_Lambda( self, original_node: "Lambda", updated_node: "Lambda" ) -> "BaseExpression": return updated_node @mark_no_op def leave_LeftCurlyBrace( self, original_node: "LeftCurlyBrace", updated_node: "LeftCurlyBrace" ) -> "LeftCurlyBrace": return updated_node @mark_no_op def leave_LeftParen( self, original_node: "LeftParen", updated_node: "LeftParen" ) -> Union[ "LeftParen", MaybeSentinel, FlattenSentinel["LeftParen"], RemovalSentinel ]: return updated_node @mark_no_op def leave_LeftShift( self, original_node: "LeftShift", updated_node: "LeftShift" ) -> "BaseBinaryOp": return updated_node @mark_no_op def leave_LeftShiftAssign( self, original_node: "LeftShiftAssign", updated_node: "LeftShiftAssign" ) -> "BaseAugOp": return updated_node @mark_no_op def leave_LeftSquareBracket( self, original_node: "LeftSquareBracket", updated_node: "LeftSquareBracket" ) -> "LeftSquareBracket": return updated_node @mark_no_op def leave_LessThan( self, original_node: "LessThan", updated_node: "LessThan" ) -> "BaseCompOp": return updated_node @mark_no_op def leave_LessThanEqual( self, original_node: "LessThanEqual", updated_node: "LessThanEqual" ) -> "BaseCompOp": return updated_node @mark_no_op def leave_List( self, original_node: "List", updated_node: "List" ) -> "BaseExpression": return updated_node @mark_no_op def leave_ListComp( self, original_node: "ListComp", updated_node: "ListComp" ) -> "BaseExpression": return updated_node @mark_no_op def leave_Match( self, original_node: "Match", updated_node: "Match" ) -> Union["BaseStatement", FlattenSentinel["BaseStatement"], RemovalSentinel]: return updated_node @mark_no_op def leave_MatchAs( self, original_node: "MatchAs", updated_node: "MatchAs" ) -> "MatchPattern": return updated_node @mark_no_op def leave_MatchCase( self, original_node: "MatchCase", updated_node: "MatchCase" ) -> "MatchCase": return updated_node @mark_no_op def leave_MatchClass( self, original_node: "MatchClass", updated_node: "MatchClass" ) -> "MatchPattern": return updated_node @mark_no_op def leave_MatchKeywordElement( self, original_node: "MatchKeywordElement", updated_node: "MatchKeywordElement" ) -> Union[ "MatchKeywordElement", FlattenSentinel["MatchKeywordElement"], RemovalSentinel ]: return updated_node @mark_no_op def leave_MatchList( self, original_node: "MatchList", updated_node: "MatchList" ) -> "MatchPattern": return updated_node @mark_no_op def leave_MatchMapping( self, original_node: "MatchMapping", updated_node: "MatchMapping" ) -> "MatchPattern": return updated_node @mark_no_op def leave_MatchMappingElement( self, original_node: "MatchMappingElement", updated_node: "MatchMappingElement" ) -> Union[ "MatchMappingElement", FlattenSentinel["MatchMappingElement"], RemovalSentinel ]: return updated_node @mark_no_op def leave_MatchOr( self, original_node: "MatchOr", updated_node: "MatchOr" ) -> "MatchPattern": return updated_node @mark_no_op def leave_MatchOrElement( self, original_node: "MatchOrElement", updated_node: "MatchOrElement" ) -> Union["MatchOrElement", FlattenSentinel["MatchOrElement"], RemovalSentinel]: return updated_node @mark_no_op def leave_MatchPattern( self, original_node: "MatchPattern", updated_node: "MatchPattern" ) -> "MatchPattern": return updated_node @mark_no_op def leave_MatchSequence( self, original_node: "MatchSequence", updated_node: "MatchSequence" ) -> "MatchPattern": return updated_node @mark_no_op def leave_MatchSequenceElement( self, original_node: "MatchSequenceElement", updated_node: "MatchSequenceElement", ) -> Union[ "MatchSequenceElement", FlattenSentinel["MatchSequenceElement"], RemovalSentinel ]: return updated_node @mark_no_op def leave_MatchSingleton( self, original_node: "MatchSingleton", updated_node: "MatchSingleton" ) -> "MatchPattern": return updated_node @mark_no_op def leave_MatchStar( self, original_node: "MatchStar", updated_node: "MatchStar" ) -> "MatchStar": return updated_node @mark_no_op def leave_MatchTuple( self, original_node: "MatchTuple", updated_node: "MatchTuple" ) -> "MatchPattern": return updated_node @mark_no_op def leave_MatchValue( self, original_node: "MatchValue", updated_node: "MatchValue" ) -> "MatchPattern": return updated_node @mark_no_op def leave_MatrixMultiply( self, original_node: "MatrixMultiply", updated_node: "MatrixMultiply" ) -> "BaseBinaryOp": return updated_node @mark_no_op def leave_MatrixMultiplyAssign( self, original_node: "MatrixMultiplyAssign", updated_node: "MatrixMultiplyAssign", ) -> "BaseAugOp": return updated_node @mark_no_op def leave_Minus( self, original_node: "Minus", updated_node: "Minus" ) -> "BaseUnaryOp": return updated_node @mark_no_op def leave_Module(self, original_node: "Module", updated_node: "Module") -> "Module": return updated_node @mark_no_op def leave_Modulo( self, original_node: "Modulo", updated_node: "Modulo" ) -> "BaseBinaryOp": return updated_node @mark_no_op def leave_ModuloAssign( self, original_node: "ModuloAssign", updated_node: "ModuloAssign" ) -> "BaseAugOp": return updated_node @mark_no_op def leave_Multiply( self, original_node: "Multiply", updated_node: "Multiply" ) -> "BaseBinaryOp": return updated_node @mark_no_op def leave_MultiplyAssign( self, original_node: "MultiplyAssign", updated_node: "MultiplyAssign" ) -> "BaseAugOp": return updated_node @mark_no_op def leave_Name( self, original_node: "Name", updated_node: "Name" ) -> "BaseExpression": return updated_node @mark_no_op def leave_NameItem( self, original_node: "NameItem", updated_node: "NameItem" ) -> Union["NameItem", FlattenSentinel["NameItem"], RemovalSentinel]: return updated_node @mark_no_op def leave_NamedExpr( self, original_node: "NamedExpr", updated_node: "NamedExpr" ) -> "BaseExpression": return updated_node @mark_no_op def leave_Newline( self, original_node: "Newline", updated_node: "Newline" ) -> "Newline": return updated_node @mark_no_op def leave_Nonlocal( self, original_node: "Nonlocal", updated_node: "Nonlocal" ) -> Union[ "BaseSmallStatement", FlattenSentinel["BaseSmallStatement"], RemovalSentinel ]: return updated_node @mark_no_op def leave_Not(self, original_node: "Not", updated_node: "Not") -> "BaseUnaryOp": return updated_node @mark_no_op def leave_NotEqual( self, original_node: "NotEqual", updated_node: "NotEqual" ) -> "BaseCompOp": return updated_node @mark_no_op def leave_NotIn( self, original_node: "NotIn", updated_node: "NotIn" ) -> "BaseCompOp": return updated_node @mark_no_op def leave_Or(self, original_node: "Or", updated_node: "Or") -> "BaseBooleanOp": return updated_node @mark_no_op def leave_Param( self, original_node: "Param", updated_node: "Param" ) -> Union["Param", MaybeSentinel, FlattenSentinel["Param"], RemovalSentinel]: return updated_node @mark_no_op def leave_ParamSlash( self, original_node: "ParamSlash", updated_node: "ParamSlash" ) -> Union["ParamSlash", MaybeSentinel]: return updated_node @mark_no_op def leave_ParamSpec( self, original_node: "ParamSpec", updated_node: "ParamSpec" ) -> "ParamSpec": return updated_node @mark_no_op def leave_ParamStar( self, original_node: "ParamStar", updated_node: "ParamStar" ) -> Union["ParamStar", MaybeSentinel]: return updated_node @mark_no_op def leave_Parameters( self, original_node: "Parameters", updated_node: "Parameters" ) -> "Parameters": return updated_node @mark_no_op def leave_ParenthesizedWhitespace( self, original_node: "ParenthesizedWhitespace", updated_node: "ParenthesizedWhitespace", ) -> Union["BaseParenthesizableWhitespace", MaybeSentinel]: return updated_node @mark_no_op def leave_Pass( self, original_node: "Pass", updated_node: "Pass" ) -> Union[ "BaseSmallStatement", FlattenSentinel["BaseSmallStatement"], RemovalSentinel ]: return updated_node @mark_no_op def leave_Plus(self, original_node: "Plus", updated_node: "Plus") -> "BaseUnaryOp": return updated_node @mark_no_op def leave_Power( self, original_node: "Power", updated_node: "Power" ) -> "BaseBinaryOp": return updated_node @mark_no_op def leave_PowerAssign( self, original_node: "PowerAssign", updated_node: "PowerAssign" ) -> "BaseAugOp": return updated_node @mark_no_op def leave_Raise( self, original_node: "Raise", updated_node: "Raise" ) -> Union[ "BaseSmallStatement", FlattenSentinel["BaseSmallStatement"], RemovalSentinel ]: return updated_node @mark_no_op def leave_Return( self, original_node: "Return", updated_node: "Return" ) -> Union[ "BaseSmallStatement", FlattenSentinel["BaseSmallStatement"], RemovalSentinel ]: return updated_node @mark_no_op def leave_RightCurlyBrace( self, original_node: "RightCurlyBrace", updated_node: "RightCurlyBrace" ) -> "RightCurlyBrace": return updated_node @mark_no_op def leave_RightParen( self, original_node: "RightParen", updated_node: "RightParen" ) -> Union[ "RightParen", MaybeSentinel, FlattenSentinel["RightParen"], RemovalSentinel ]: return updated_node @mark_no_op def leave_RightShift( self, original_node: "RightShift", updated_node: "RightShift" ) -> "BaseBinaryOp": return updated_node @mark_no_op def leave_RightShiftAssign( self, original_node: "RightShiftAssign", updated_node: "RightShiftAssign" ) -> "BaseAugOp": return updated_node @mark_no_op def leave_RightSquareBracket( self, original_node: "RightSquareBracket", updated_node: "RightSquareBracket" ) -> "RightSquareBracket": return updated_node @mark_no_op def leave_Semicolon( self, original_node: "Semicolon", updated_node: "Semicolon" ) -> Union["Semicolon", MaybeSentinel]: return updated_node @mark_no_op def leave_Set(self, original_node: "Set", updated_node: "Set") -> "BaseExpression": return updated_node @mark_no_op def leave_SetComp( self, original_node: "SetComp", updated_node: "SetComp" ) -> "BaseExpression": return updated_node @mark_no_op def leave_SimpleStatementLine( self, original_node: "SimpleStatementLine", updated_node: "SimpleStatementLine" ) -> Union["BaseStatement", FlattenSentinel["BaseStatement"], RemovalSentinel]: return updated_node @mark_no_op def leave_SimpleStatementSuite( self, original_node: "SimpleStatementSuite", updated_node: "SimpleStatementSuite", ) -> "BaseSuite": return updated_node @mark_no_op def leave_SimpleString( self, original_node: "SimpleString", updated_node: "SimpleString" ) -> "BaseExpression": return updated_node @mark_no_op def leave_SimpleWhitespace( self, original_node: "SimpleWhitespace", updated_node: "SimpleWhitespace" ) -> Union["BaseParenthesizableWhitespace", MaybeSentinel]: return updated_node @mark_no_op def leave_Slice(self, original_node: "Slice", updated_node: "Slice") -> "BaseSlice": return updated_node @mark_no_op def leave_StarredDictElement( self, original_node: "StarredDictElement", updated_node: "StarredDictElement" ) -> Union["BaseDictElement", FlattenSentinel["BaseDictElement"], RemovalSentinel]: return updated_node @mark_no_op def leave_StarredElement( self, original_node: "StarredElement", updated_node: "StarredElement" ) -> "BaseExpression": return updated_node @mark_no_op def leave_Subscript( self, original_node: "Subscript", updated_node: "Subscript" ) -> "BaseExpression": return updated_node @mark_no_op def leave_SubscriptElement( self, original_node: "SubscriptElement", updated_node: "SubscriptElement" ) -> Union[ "SubscriptElement", FlattenSentinel["SubscriptElement"], RemovalSentinel ]: return updated_node @mark_no_op def leave_Subtract( self, original_node: "Subtract", updated_node: "Subtract" ) -> "BaseBinaryOp": return updated_node @mark_no_op def leave_SubtractAssign( self, original_node: "SubtractAssign", updated_node: "SubtractAssign" ) -> "BaseAugOp": return updated_node @mark_no_op def leave_TrailingWhitespace( self, original_node: "TrailingWhitespace", updated_node: "TrailingWhitespace" ) -> "TrailingWhitespace": return updated_node @mark_no_op def leave_Try( self, original_node: "Try", updated_node: "Try" ) -> Union["BaseStatement", FlattenSentinel["BaseStatement"], RemovalSentinel]: return updated_node @mark_no_op def leave_TryStar( self, original_node: "TryStar", updated_node: "TryStar" ) -> Union["BaseStatement", FlattenSentinel["BaseStatement"], RemovalSentinel]: return updated_node @mark_no_op def leave_Tuple( self, original_node: "Tuple", updated_node: "Tuple" ) -> "BaseExpression": return updated_node @mark_no_op def leave_TypeAlias( self, original_node: "TypeAlias", updated_node: "TypeAlias" ) -> Union[ "BaseSmallStatement", FlattenSentinel["BaseSmallStatement"], RemovalSentinel ]: return updated_node @mark_no_op def leave_TypeParam( self, original_node: "TypeParam", updated_node: "TypeParam" ) -> Union["TypeParam", FlattenSentinel["TypeParam"], RemovalSentinel]: return updated_node @mark_no_op def leave_TypeParameters( self, original_node: "TypeParameters", updated_node: "TypeParameters" ) -> "TypeParameters": return updated_node @mark_no_op def leave_TypeVar( self, original_node: "TypeVar", updated_node: "TypeVar" ) -> "TypeVar": return updated_node @mark_no_op def leave_TypeVarTuple( self, original_node: "TypeVarTuple", updated_node: "TypeVarTuple" ) -> "TypeVarTuple": return updated_node @mark_no_op def leave_UnaryOperation( self, original_node: "UnaryOperation", updated_node: "UnaryOperation" ) -> "BaseExpression": return updated_node @mark_no_op def leave_While( self, original_node: "While", updated_node: "While" ) -> Union["BaseStatement", FlattenSentinel["BaseStatement"], RemovalSentinel]: return updated_node @mark_no_op def leave_With( self, original_node: "With", updated_node: "With" ) -> Union["BaseStatement", FlattenSentinel["BaseStatement"], RemovalSentinel]: return updated_node @mark_no_op def leave_WithItem( self, original_node: "WithItem", updated_node: "WithItem" ) -> Union["WithItem", FlattenSentinel["WithItem"], RemovalSentinel]: return updated_node @mark_no_op def leave_Yield( self, original_node: "Yield", updated_node: "Yield" ) -> "BaseExpression": return updated_node LibCST-1.2.0/libcst/_typed_visitor_base.py000066400000000000000000000011471456464173300205010ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Any, Callable, cast, TypeVar # pyre-fixme[24]: Generic type `Callable` expects 2 type parameters. F = TypeVar("F", bound=Callable) def mark_no_op(f: F) -> F: """ Annotates stubs with a field to indicate they should not be collected by BatchableCSTVisitor.get_visitors() to reduce function call overhead when running a batched visitor pass. """ cast(Any, f)._is_no_op = True return f LibCST-1.2.0/libcst/_types.py000066400000000000000000000007461456464173300157530ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from pathlib import PurePath from typing import TYPE_CHECKING, TypeVar, Union if TYPE_CHECKING: from libcst._nodes.base import CSTNode # noqa: F401 CSTNodeT = TypeVar("CSTNodeT", bound="CSTNode") CSTNodeT_co = TypeVar("CSTNodeT_co", bound="CSTNode", covariant=True) StrPath = Union[str, PurePath] LibCST-1.2.0/libcst/_visitors.py000066400000000000000000000152631456464173300164710ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import TYPE_CHECKING, Union from libcst._flatten_sentinel import FlattenSentinel from libcst._metadata_dependent import MetadataDependent from libcst._removal_sentinel import RemovalSentinel from libcst._typed_visitor import CSTTypedTransformerFunctions, CSTTypedVisitorFunctions from libcst._types import CSTNodeT if TYPE_CHECKING: # Circular dependency for typing reasons only from libcst._nodes.base import CSTNode # noqa: F401 CSTVisitorT = Union["CSTTransformer", "CSTVisitor"] class CSTTransformer(CSTTypedTransformerFunctions, MetadataDependent): """ The low-level base visitor class for traversing a CST and creating an updated copy of the original CST. This should be used in conjunction with the :func:`~libcst.CSTNode.visit` method on a :class:`~libcst.CSTNode` to visit each element in a tree starting with that node, and possibly returning a new node in its place. When visiting nodes using a :class:`CSTTransformer`, the return value of :func:`~libcst.CSTNode.visit` will be a new tree with any changes made in :func:`~libcst.CSTTransformer.on_leave` calls reflected in its children. """ def on_visit(self, node: "CSTNode") -> bool: """ Called every time a node is visited, before we've visited its children. Returns ``True`` if children should be visited, and returns ``False`` otherwise. """ visit_func = getattr(self, f"visit_{type(node).__name__}", None) if visit_func is not None: retval = visit_func(node) else: retval = True # Don't visit children IFF the visit function returned False. return False if retval is False else True def on_leave( self, original_node: CSTNodeT, updated_node: CSTNodeT ) -> Union[CSTNodeT, RemovalSentinel, FlattenSentinel[CSTNodeT]]: """ Called every time we leave a node, after we've visited its children. If the :func:`~libcst.CSTTransformer.on_visit` function for this node returns ``False``, this function will still be called on that node. ``original_node`` is guaranteed to be the same node as is passed to :func:`~libcst.CSTTransformer.on_visit`, so it is safe to do state-based checks using the ``is`` operator. Modifications should always be performed on the ``updated_node`` so as to not overwrite changes made by child visits. Returning :attr:`RemovalSentinel.REMOVE` indicates that the node should be removed from its parent. This is not always possible, and may raise an exception if this node is required. As a convenience, you can use :func:`RemoveFromParent` as an alias to :attr:`RemovalSentinel.REMOVE`. """ leave_func = getattr(self, f"leave_{type(original_node).__name__}", None) if leave_func is not None: updated_node = leave_func(original_node, updated_node) return updated_node def on_visit_attribute(self, node: "CSTNode", attribute: str) -> None: """ Called before a node's child attribute is visited and after we have called :func:`~libcst.CSTTransformer.on_visit` on the node. A node's child attributes are visited in the order that they appear in source that this node originates from. """ visit_func = getattr(self, f"visit_{type(node).__name__}_{attribute}", None) if visit_func is not None: visit_func(node) def on_leave_attribute(self, original_node: "CSTNode", attribute: str) -> None: """ Called after a node's child attribute is visited and before we have called :func:`~libcst.CSTTransformer.on_leave` on the node. Unlike :func:`~libcst.CSTTransformer.on_leave`, this function does not allow modifications to the tree and is provided solely for state management. """ leave_func = getattr( self, f"leave_{type(original_node).__name__}_{attribute}", None ) if leave_func is not None: leave_func(original_node) class CSTVisitor(CSTTypedVisitorFunctions, MetadataDependent): """ The low-level base visitor class for traversing a CST. This should be used in conjunction with the :func:`~libcst.CSTNode.visit` method on a :class:`~libcst.CSTNode` to visit each element in a tree starting with that node. Unlike :class:`CSTTransformer`, instances of this class cannot modify the tree. When visiting nodes using a :class:`CSTVisitor`, the return value of :func:`~libcst.CSTNode.visit` will equal the passed in tree. """ def on_visit(self, node: "CSTNode") -> bool: """ Called every time a node is visited, before we've visited its children. Returns ``True`` if children should be visited, and returns ``False`` otherwise. """ visit_func = getattr(self, f"visit_{type(node).__name__}", None) if visit_func is not None: retval = visit_func(node) else: retval = True # Don't visit children IFF the visit function returned False. return False if retval is False else True def on_leave(self, original_node: "CSTNode") -> None: """ Called every time we leave a node, after we've visited its children. If the :func:`~libcst.CSTVisitor.on_visit` function for this node returns ``False``, this function will still be called on that node. """ leave_func = getattr(self, f"leave_{type(original_node).__name__}", None) if leave_func is not None: leave_func(original_node) def on_visit_attribute(self, node: "CSTNode", attribute: str) -> None: """ Called before a node's child attribute is visited and after we have called :func:`~libcst.CSTTransformer.on_visit` on the node. A node's child attributes are visited in the order that they appear in source that this node originates from. """ visit_func = getattr(self, f"visit_{type(node).__name__}_{attribute}", None) if visit_func is not None: visit_func(node) def on_leave_attribute(self, original_node: "CSTNode", attribute: str) -> None: """ Called after a node's child attribute is visited and before we have called :func:`~libcst.CSTVisitor.on_leave` on the node. """ leave_func = getattr( self, f"leave_{type(original_node).__name__}_{attribute}", None ) if leave_func is not None: leave_func(original_node) LibCST-1.2.0/libcst/codegen/000077500000000000000000000000001456464173300154735ustar00rootroot00000000000000LibCST-1.2.0/libcst/codegen/__init__.py000066400000000000000000000002631456464173300176050ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. LibCST-1.2.0/libcst/codegen/gather.py000066400000000000000000000111521456464173300173170ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import inspect from collections import defaultdict from collections.abc import Sequence as ABCSequence from dataclasses import dataclass, fields, replace from typing import Dict, Iterator, List, Mapping, Sequence, Set, Type, Union import libcst as cst def _get_bases() -> Iterator[Type[cst.CSTNode]]: """ Get all base classes that are subclasses of CSTNode but not an actual node itself. This allows us to keep our types sane by refering to the base classes themselves. """ for name in dir(cst): if not name.startswith("Base"): continue yield getattr(cst, name) typeclasses: Sequence[Type[cst.CSTNode]] = sorted( _get_bases(), key=lambda base: base.__name__ ) def _get_nodes() -> Iterator[Type[cst.CSTNode]]: """ Grab all CSTNodes that are not a superclass. Basically, anything that a person might use to generate a tree. """ for name in dir(cst): if name.startswith("__") and name.endswith("__"): continue if name == "CSTNode": continue node = getattr(cst, name) try: if issubclass(node, cst.CSTNode): yield node except TypeError: # This isn't a class, so we don't care about it. pass all_libcst_nodes: Sequence[Type[cst.CSTNode]] = sorted( _get_nodes(), key=lambda node: node.__name__ ) node_to_bases: Dict[Type[cst.CSTNode], List[Type[cst.CSTNode]]] = {} for node in all_libcst_nodes: # Map the base classes for this node node_to_bases[node] = list( reversed([b for b in inspect.getmro(node) if issubclass(b, cst.CSTNode)]) ) def _get_most_generic_base_for_node(node: Type[cst.CSTNode]) -> Type[cst.CSTNode]: # Ignore non-exported bases, a user couldn't specify these types # in type hints. exportable_bases = [b for b in node_to_bases[node] if b in node_to_bases] return exportable_bases[0] nodebases: Dict[Type[cst.CSTNode], Type[cst.CSTNode]] = {} for node in all_libcst_nodes: # Find the most generic version of this node that isn't CSTNode. nodebases[node] = _get_most_generic_base_for_node(node) @dataclass(frozen=True) class Usage: maybe: bool = False optional: bool = False sequence: bool = False nodeuses: Dict[Type[cst.CSTNode], Usage] = {node: Usage() for node in all_libcst_nodes} def _is_maybe(typeobj: object) -> bool: try: # pyre-ignore We wrap this in a TypeError check so this is safe return issubclass(typeobj, cst.MaybeSentinel) except TypeError: return False def _get_origin(typeobj: object) -> object: try: # pyre-ignore We wrap this in a AttributeError check so this is safe return typeobj.__origin__ except AttributeError: # Don't care, not a union or sequence return None def _get_args(typeobj: object) -> List[object]: try: # pyre-ignore We wrap this in a AttributeError check so this is safe return typeobj.__args__ except AttributeError: # Don't care, not a union or sequence return [] def _is_sequence(typeobj: object) -> bool: origin = _get_origin(typeobj) return origin is Sequence or origin is ABCSequence def _is_union(typeobj: object) -> bool: return _get_origin(typeobj) is Union def _calc_node_usage(typeobj: object) -> None: if _is_union(typeobj): has_maybe = any(_is_maybe(n) for n in _get_args(typeobj)) has_none = any(isinstance(n, type(None)) for n in _get_args(typeobj)) for node in _get_args(typeobj): if node in all_libcst_nodes: nodeuses[node] = replace( nodeuses[node], maybe=nodeuses[node].maybe or has_maybe, optional=nodeuses[node].optional or has_none, ) else: _calc_node_usage(node) if _is_sequence(typeobj): for node in _get_args(typeobj): if node in all_libcst_nodes: nodeuses[node] = replace(nodeuses[node], sequence=True) else: _calc_node_usage(node) for node in all_libcst_nodes: for field in fields(node) or []: if field.name == "_metadata": continue _calc_node_usage(field.type) imports: Mapping[str, Set[str]] = defaultdict(set) for node, base in nodebases.items(): if node.__name__.startswith("Base"): continue for x in (node, base): imports[x.__module__].add(x.__name__) LibCST-1.2.0/libcst/codegen/gen_matcher_classes.py000066400000000000000000000500571456464173300220450ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import re from dataclasses import dataclass, fields from typing import Generator, List, Optional, Sequence, Set, Tuple, Type, Union import libcst as cst from libcst import ensure_type, parse_expression from libcst.codegen.gather import all_libcst_nodes, typeclasses CST_DIR: Set[str] = set(dir(cst)) CLASS_RE = r"" OPTIONAL_RE = r"typing\.Union\[([^,]*?), NoneType]" class CleanseFullTypeNames(cst.CSTTransformer): def leave_Call( self, original_node: cst.Call, updated_node: cst.Call ) -> cst.BaseExpression: # Convert forward ref repr back to a SimpleString. if isinstance(updated_node.func, cst.Name) and ( updated_node.func.deep_equals(cst.Name("_ForwardRef")) or updated_node.func.deep_equals(cst.Name("ForwardRef")) ): return updated_node.args[0].value return updated_node def leave_Attribute( self, original_node: cst.Attribute, updated_node: cst.Attribute ) -> Union[cst.Attribute, cst.Name]: # Unwrap all attributes, so things like libcst.x.y.Name becomes Name return updated_node.attr def leave_Name( self, original_node: cst.Name, updated_node: cst.Name ) -> Union[cst.Name, cst.SimpleString]: value = updated_node.value if value == "NoneType": # This is special-cased in typing, un-special case it. return updated_node.with_changes(value="None") if value in CST_DIR and not value.endswith("Sentinel"): # If this isn't a typing define and it isn't a builtin, convert it to # a forward ref string. return cst.SimpleString(repr(value)) return updated_node def leave_SubscriptElement( self, original_node: cst.SubscriptElement, updated_node: cst.SubscriptElement ) -> Union[cst.SubscriptElement, cst.RemovalSentinel]: slc = updated_node.slice if isinstance(slc, cst.Index): val = slc.value if isinstance(val, cst.Name): if "Sentinel" in val.value: # We don't support maybes in matchers. return cst.RemoveFromParent() # Simple trick to kill trailing commas return updated_node.with_changes(comma=cst.MaybeSentinel.DEFAULT) class RemoveTypesFromGeneric(cst.CSTTransformer): def __init__(self, values: Sequence[str]) -> None: self.values: Set[str] = set(values) def leave_SubscriptElement( self, original_node: cst.SubscriptElement, updated_node: cst.SubscriptElement ) -> Union[cst.SubscriptElement, cst.RemovalSentinel]: slc = updated_node.slice if isinstance(slc, cst.Index): val = slc.value if isinstance(val, cst.Name): if val.value in self.values: # This type matches, so out it goes return cst.RemoveFromParent() return updated_node def _remove_types( oldtype: cst.BaseExpression, values: Sequence[str] ) -> cst.BaseExpression: """ Given a BaseExpression from a type, return a new BaseExpression that does not refer to any types listed in values. """ return ensure_type( oldtype.visit(RemoveTypesFromGeneric(values)), cst.BaseExpression ) class MatcherClassToLibCSTClass(cst.CSTTransformer): def leave_SimpleString( self, original_node: cst.SimpleString, updated_node: cst.SimpleString ) -> Union[cst.SimpleString, cst.Attribute]: value = updated_node.evaluated_value if value in CST_DIR: return cst.Attribute(cst.Name("cst"), cst.Name(value)) return updated_node def _convert_match_nodes_to_cst_nodes( matchtype: cst.BaseExpression, ) -> cst.BaseExpression: """ Given a BaseExpression in a type, convert this to a new BaseExpression that refers to LibCST nodes instead of forward references to matcher nodes. """ return ensure_type(matchtype.visit(MatcherClassToLibCSTClass()), cst.BaseExpression) def _get_match_if_true(oldtype: cst.BaseExpression) -> cst.SubscriptElement: """ Construct a MatchIfTrue type node appropriate for going into a Union. """ return cst.SubscriptElement( cst.Index( cst.Subscript( cst.Name("MatchIfTrue"), slice=( cst.SubscriptElement( cst.Index( # MatchIfTrue takes in the original node type, # and returns a boolean. So, lets convert our # quoted classes (forward refs to other # matchers) back to the CSTNode they refer to. # We can do this because there's always a 1:1 # name mapping. _convert_match_nodes_to_cst_nodes(oldtype) ), ), ), ) ) ) def _add_generic(name: str, oldtype: cst.BaseExpression) -> cst.BaseExpression: return cst.Subscript(cst.Name(name), (cst.SubscriptElement(cst.Index(oldtype)),)) class AddLogicMatchersToUnions(cst.CSTTransformer): def leave_Subscript( self, original_node: cst.Subscript, updated_node: cst.Subscript ) -> cst.Subscript: if updated_node.value.deep_equals(cst.Name("Union")): # Take the original node, remove do not care so we have concrete types. # Explicitly taking the original node because we want to discard nested # changes. concrete_only_expr = _remove_types(updated_node, ["DoNotCareSentinel"]) return updated_node.with_changes( slice=[ *updated_node.slice, cst.SubscriptElement( cst.Index(_add_generic("OneOf", concrete_only_expr)) ), cst.SubscriptElement( cst.Index(_add_generic("AllOf", concrete_only_expr)) ), ] ) return updated_node class AddWildcardsToSequenceUnions(cst.CSTTransformer): def __init__(self) -> None: super().__init__() self.in_match_if_true: Set[cst.CSTNode] = set() self.fixup_nodes: Set[cst.Subscript] = set() def visit_Subscript(self, node: cst.Subscript) -> None: # If the current node is a MatchIfTrue, we don't want to modify it. if node.value.deep_equals(cst.Name("MatchIfTrue")): self.in_match_if_true.add(node) # If the direct descendant is a union, lets add it to be fixed up. elif node.value.deep_equals(cst.Name("Sequence")): if self.in_match_if_true: # We don't want to add AtLeastN/AtMostN inside MatchIfTrue # type blocks, even for sequence types. return if len(node.slice) != 1: raise Exception( "Unexpected number of sequence elements inside Sequence type " + "annotation!" ) nodeslice = node.slice[0].slice if isinstance(nodeslice, cst.Index): possibleunion = nodeslice.value if isinstance(possibleunion, cst.Subscript): if possibleunion.value.deep_equals(cst.Name("Union")): self.fixup_nodes.add(possibleunion) def leave_Subscript( self, original_node: cst.Subscript, updated_node: cst.Subscript ) -> cst.Subscript: if original_node in self.in_match_if_true: self.in_match_if_true.remove(original_node) if original_node in self.fixup_nodes: self.fixup_nodes.remove(original_node) return updated_node.with_changes( slice=[ *updated_node.slice, cst.SubscriptElement( cst.Index(_add_generic("AtLeastN", original_node)) ), cst.SubscriptElement( cst.Index(_add_generic("AtMostN", original_node)) ), ] ) return updated_node def _get_do_not_care() -> cst.SubscriptElement: """ Construct a DoNotCareSentinel entry appropriate for going into a Union. """ return cst.SubscriptElement(cst.Index(cst.Name("DoNotCareSentinel"))) def _get_match_metadata() -> cst.SubscriptElement: """ Construct a MetadataMatchType entry appropriate for going into a Union. """ return cst.SubscriptElement(cst.Index(cst.Name("MetadataMatchType"))) def _get_wrapped_union_type( node: cst.BaseExpression, addition: cst.SubscriptElement, *additions: cst.SubscriptElement, ) -> cst.Subscript: """ Take two or more nodes, wrap them in a union type. Function signature is explicitly defined as taking at least one addition for type safety. """ return cst.Subscript( cst.Name("Union"), [cst.SubscriptElement(cst.Index(node)), addition, *additions] ) # List of global aliases we've already generated, so we don't redefine types _global_aliases: Set[str] = set() @dataclass(frozen=True) class Alias: name: str type: str @dataclass(frozen=True) class Field: name: str type: str aliases: List[Alias] def _get_raw_name(node: cst.CSTNode) -> Optional[str]: if isinstance(node, cst.Name): return node.value elif isinstance(node, cst.SimpleString): evaluated_value = node.evaluated_value if isinstance(evaluated_value, str): return evaluated_value elif isinstance(node, cst.SubscriptElement): return _get_raw_name(node.slice) elif isinstance(node, cst.Index): return _get_raw_name(node.value) else: return None def _get_alias_name(node: cst.CSTNode) -> Optional[str]: if isinstance(node, (cst.Name, cst.SimpleString)): return f"{_get_raw_name(node)}MatchType" elif isinstance(node, cst.Subscript): if node.value.deep_equals(cst.Name("Union")): names = [_get_raw_name(s) for s in node.slice] if any(n is None for n in names): return None return "Or".join(n for n in names if n is not None) + "MatchType" return None def _wrap_clean_type( aliases: List[Alias], name: Optional[str], value: cst.Subscript ) -> cst.BaseExpression: if name is not None: # We created an alias, lets use that, wrapping the alias in a do not care. aliases.append(Alias(name=name, type=cst.Module(body=()).code_for_node(value))) return _get_wrapped_union_type(cst.Name(name), _get_do_not_care()) else: # Couldn't name the alias, fall back to regular node creation, add do not # care to the resulting type we widened. return value.with_changes(slice=[*value.slice, _get_do_not_care()]) def _get_clean_type_from_expression( aliases: List[Alias], typecst: cst.BaseExpression ) -> cst.BaseExpression: name = _get_alias_name(typecst) value = _get_wrapped_union_type( typecst, _get_match_metadata(), _get_match_if_true(typecst) ) return _wrap_clean_type(aliases, name, value) def _maybe_fix_sequence_in_union( aliases: List[Alias], typecst: cst.SubscriptElement ) -> cst.SubscriptElement: slc = typecst.slice if isinstance(slc, cst.Index): val = slc.value if isinstance(val, cst.Subscript): return cst.ensure_type( typecst.deep_replace(val, _get_clean_type_from_subscript(aliases, val)), cst.SubscriptElement, ) return typecst def _get_clean_type_from_union( aliases: List[Alias], typecst: cst.Subscript ) -> cst.BaseExpression: name = _get_alias_name(typecst) value = typecst.with_changes( slice=[ *[_maybe_fix_sequence_in_union(aliases, slc) for slc in typecst.slice], _get_match_metadata(), _get_match_if_true(typecst), ] ) return _wrap_clean_type(aliases, name, value) def _get_clean_type_from_subscript( aliases: List[Alias], typecst: cst.Subscript ) -> cst.BaseExpression: if typecst.value.deep_equals(cst.Name("Sequence")): # Lets attempt to widen the sequence type and alias it. if len(typecst.slice) != 1: raise Exception("Logic error, Sequence shouldn't have more than one param!") inner_type = typecst.slice[0].slice if not isinstance(inner_type, cst.Index): raise Exception("Logic error, expecting Index for only Sequence element!") inner_type = inner_type.value if isinstance(inner_type, cst.Subscript): clean_inner_type = _get_clean_type_from_subscript(aliases, inner_type) elif isinstance(inner_type, (cst.Name, cst.SimpleString)): clean_inner_type = _get_clean_type_from_expression(aliases, inner_type) else: raise Exception("Logic error, unexpected type in Sequence!") return _get_wrapped_union_type( typecst.deep_replace(inner_type, clean_inner_type), _get_do_not_care(), _get_match_if_true(typecst), ) # We can modify this as-is to add our extra values elif typecst.value.deep_equals(cst.Name("Union")): return _get_clean_type_from_union(aliases, typecst) else: # Don't handle other types like "Literal", just widen them. return _get_clean_type_from_expression(aliases, typecst) def _get_clean_type_and_aliases( typeobj: object, ) -> Tuple[str, List[Alias]]: # noqa: C901 """ Given a type object as returned by dataclasses, sanitize it and convert it to a type string that is appropriate for our codegen below. """ # First, get the type as a parseable expression. typestr = repr(typeobj) typestr = re.sub(CLASS_RE, r"\1", typestr) typestr = re.sub(OPTIONAL_RE, r"typing.Optional[\1]", typestr) # Now, parse the expression with LibCST. cleanser = CleanseFullTypeNames() typecst = parse_expression(typestr) typecst = typecst.visit(cleanser) aliases: List[Alias] = [] # Now, convert the type to allow for MetadataMatchType and MatchIfTrue values. if isinstance(typecst, cst.Subscript): clean_type = _get_clean_type_from_subscript(aliases, typecst) elif isinstance(typecst, (cst.Name, cst.SimpleString)): clean_type = _get_clean_type_from_expression(aliases, typecst) else: raise Exception("Logic error, unexpected top level type!") # Now, insert OneOf/AllOf and MatchIfTrue into unions so we can typecheck their usage. # This allows us to put OneOf[SomeType] or MatchIfTrue[cst.SomeType] into any # spot that we would have originally allowed a SomeType. clean_type = ensure_type(clean_type.visit(AddLogicMatchersToUnions()), cst.CSTNode) # Now, insert AtMostN and AtLeastN into sequence unions, so we can typecheck # them. This relies on the previous OneOf/AllOf insertion to ensure that all # sequences we care about are Sequence[Union[]]. clean_type = ensure_type( clean_type.visit(AddWildcardsToSequenceUnions()), cst.CSTNode ) # Finally, generate the code given a default Module so we can spit it out. return cst.Module(body=()).code_for_node(clean_type), aliases def _get_fields(node: Type[cst.CSTNode]) -> Generator[Field, None, None]: """ Given a CSTNode, generate a field name and type string for each. """ for field in fields(node) or []: if field.name == "_metadata": continue fieldtype, aliases = _get_clean_type_and_aliases(field.type) yield Field( name=field.name, type=fieldtype, aliases=[a for a in aliases if a.name not in _global_aliases], ) _global_aliases.update(a.name for a in aliases) all_exports: Set[str] = set() generated_code: List[str] = [] generated_code.append("# Copyright (c) Meta Platforms, Inc. and affiliates.") generated_code.append("#") generated_code.append( "# This source code is licensed under the MIT license found in the" ) generated_code.append("# LICENSE file in the root directory of this source tree.") generated_code.append("") generated_code.append("") generated_code.append("# This file was generated by libcst.codegen.gen_matcher_classes") generated_code.append("from dataclasses import dataclass") generated_code.append("from typing import Optional, Sequence, Union") generated_code.append("from typing_extensions import Literal") generated_code.append("import libcst as cst") generated_code.append("") generated_code.append( "from libcst.matchers._matcher_base import AbstractBaseMatcherNodeMeta, BaseMatcherNode, DoNotCareSentinel, DoNotCare, TypeOf, OneOf, AllOf, DoesNotMatch, MatchIfTrue, MatchRegex, MatchMetadata, MatchMetadataIfTrue, ZeroOrMore, AtLeastN, ZeroOrOne, AtMostN, SaveMatchedNode, extract, extractall, findall, matches, replace" ) all_exports.update( [ "BaseMatcherNode", "DoNotCareSentinel", "DoNotCare", "OneOf", "AllOf", "DoesNotMatch", "MatchIfTrue", "MatchRegex", "MatchMetadata", "MatchMetadataIfTrue", "TypeOf", "ZeroOrMore", "AtLeastN", "ZeroOrOne", "AtMostN", "SaveMatchedNode", "extract", "extractall", "findall", "matches", "replace", ] ) generated_code.append( "from libcst.matchers._decorators import call_if_inside, call_if_not_inside, visit, leave" ) all_exports.update(["call_if_inside", "call_if_not_inside", "visit", "leave"]) generated_code.append( "from libcst.matchers._visitors import MatchDecoratorMismatch, MatcherDecoratableTransformer, MatcherDecoratableVisitor" ) all_exports.update( [ "MatchDecoratorMismatch", "MatcherDecoratableTransformer", "MatcherDecoratableVisitor", ] ) generated_code.append("") generated_code.append("") generated_code.append("class _NodeABC(metaclass=AbstractBaseMatcherNodeMeta):") generated_code.append(" __slots__ = ()") for base in typeclasses: generated_code.append("") generated_code.append("") generated_code.append(f"class {base.__name__}(_NodeABC):") generated_code.append(" pass") all_exports.add(base.__name__) # Add a generic MetadataMatchType to be referred to by everywhere else. generated_code.append("") generated_code.append("") generated_code.append("MetadataMatchType = Union[MatchMetadata, MatchMetadataIfTrue]") for node in all_libcst_nodes: if node.__name__.startswith("Base"): continue classes: List[str] = [] for tc in typeclasses: if issubclass(node, tc): classes.append(tc.__name__) classes.append("BaseMatcherNode") has_aliases = False node_fields = list(_get_fields(node)) for field in node_fields: for alias in field.aliases: # Output a separator if we're going to output any aliases if not has_aliases: generated_code.append("") generated_code.append("") has_aliases = True # Must generate code for aliases before the class they are referenced in generated_code.append(f"{alias.name} = {alias.type}") generated_code.append("") generated_code.append("") generated_code.append("@dataclass(frozen=True, eq=False, unsafe_hash=False)") generated_code.append(f'class {node.__name__}({", ".join(classes)}):') all_exports.add(node.__name__) fields_printed = False for field in node_fields: fields_printed = True generated_code.append(f" {field.name}: {field.type} = DoNotCare()") # Add special metadata field generated_code.append( " metadata: Union[MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType]] = DoNotCare()" ) # Make sure to add an __all__ for flake8 and compatibility with "from libcst.matchers import *" generated_code.append(f"__all__ = {repr(sorted(all_exports))}") if __name__ == "__main__": # Output the code print("\n".join(generated_code)) LibCST-1.2.0/libcst/codegen/gen_type_mapping.py000066400000000000000000000044341456464173300213770ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import List from libcst.codegen.gather import imports, nodebases, nodeuses generated_code: List[str] = [] generated_code.append("# Copyright (c) Meta Platforms, Inc. and affiliates.") generated_code.append("#") generated_code.append( "# This source code is licensed under the MIT license found in the" ) generated_code.append("# LICENSE file in the root directory of this source tree.") generated_code.append("") generated_code.append("") generated_code.append("# This file was generated by libcst.codegen.gen_type_mapping") generated_code.append("from typing import Dict as TypingDict, Type, Union") generated_code.append("") generated_code.append("from libcst._maybe_sentinel import MaybeSentinel") generated_code.append("from libcst._removal_sentinel import RemovalSentinel") generated_code.append("from libcst._nodes.base import CSTNode") # Import the types we use. These have to be type guarded since it would # cause an import cycle otherwise. generated_code.append("") generated_code.append("") for module, objects in imports.items(): generated_code.append(f"from {module} import (") generated_code.append(f" {', '.join(sorted(objects))}") generated_code.append(")") # Generate the base visit_ methods generated_code.append("") generated_code.append("") generated_code.append( "TYPED_FUNCTION_RETURN_MAPPING: TypingDict[Type[CSTNode], object] = {" ) for node in sorted(nodebases.keys(), key=lambda node: node.__name__): name = node.__name__ if name.startswith("Base"): continue valid_return_types: List[str] = [nodebases[node].__name__] node_uses = nodeuses[node] base_uses = nodeuses[nodebases[node]] if node_uses.maybe or base_uses.maybe: valid_return_types.append("MaybeSentinel") if ( node_uses.optional or node_uses.sequence or base_uses.optional or base_uses.sequence ): valid_return_types.append("RemovalSentinel") generated_code.append(f' {name}: Union[{", ".join(valid_return_types)}],') generated_code.append("}") if __name__ == "__main__": # Output the code print("\n".join(generated_code)) LibCST-1.2.0/libcst/codegen/gen_visitor_functions.py000066400000000000000000000105531456464173300224710ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from dataclasses import fields from typing import List from libcst.codegen.gather import imports, nodebases, nodeuses generated_code: List[str] = [] generated_code.append("# Copyright (c) Meta Platforms, Inc. and affiliates.") generated_code.append("#") generated_code.append( "# This source code is licensed under the MIT license found in the" ) generated_code.append("# LICENSE file in the root directory of this source tree.") generated_code.append("") generated_code.append("") generated_code.append("# This file was generated by libcst.codegen.gen_matcher_classes") generated_code.append("from typing import Optional, Union, TYPE_CHECKING") generated_code.append("") generated_code.append("from libcst._flatten_sentinel import FlattenSentinel") generated_code.append("from libcst._maybe_sentinel import MaybeSentinel") generated_code.append("from libcst._removal_sentinel import RemovalSentinel") generated_code.append("from libcst._typed_visitor_base import mark_no_op") # Import the types we use. These have to be type guarded since it would # cause an import cycle otherwise. generated_code.append("") generated_code.append("") generated_code.append("if TYPE_CHECKING:") for module, objects in imports.items(): generated_code.append(f" from {module} import ( # noqa: F401") generated_code.append(f" {', '.join(sorted(objects))}") generated_code.append(" )") # Generate the base visit_ methods generated_code.append("") generated_code.append("") generated_code.append("class CSTTypedBaseFunctions:") for node in sorted(nodebases.keys(), key=lambda node: node.__name__): name = node.__name__ if name.startswith("Base"): continue generated_code.append("") generated_code.append(" @mark_no_op") generated_code.append( f' def visit_{name}(self, node: "{name}") -> Optional[bool]:' ) generated_code.append(" pass") for field in fields(node) or []: if field.name == "_metadata": continue generated_code.append("") generated_code.append(" @mark_no_op") generated_code.append( f' def visit_{name}_{field.name}(self, node: "{name}") -> None:' ) generated_code.append(" pass") generated_code.append("") generated_code.append(" @mark_no_op") generated_code.append( f' def leave_{name}_{field.name}(self, node: "{name}") -> None:' ) generated_code.append(" pass") # Generate the visitor leave_ methods generated_code.append("") generated_code.append("") generated_code.append("class CSTTypedVisitorFunctions(CSTTypedBaseFunctions):") for node in sorted(nodebases.keys(), key=lambda node: node.__name__): name = node.__name__ if name.startswith("Base"): continue generated_code.append("") generated_code.append(" @mark_no_op") generated_code.append( f' def leave_{name}(self, original_node: "{name}") -> None:' ) generated_code.append(" pass") # Generate the transformer leave_ methods generated_code.append("") generated_code.append("") generated_code.append("class CSTTypedTransformerFunctions(CSTTypedBaseFunctions):") for node in sorted(nodebases.keys(), key=lambda node: node.__name__): name = node.__name__ if name.startswith("Base"): continue generated_code.append("") generated_code.append(" @mark_no_op") valid_return_types: List[str] = [f'"{nodebases[node].__name__}"'] node_uses = nodeuses[node] base_uses = nodeuses[nodebases[node]] if node_uses.maybe or base_uses.maybe: valid_return_types.append("MaybeSentinel") if node_uses.sequence or base_uses.sequence: valid_return_types.append(f'FlattenSentinel["{nodebases[node].__name__}"]') valid_return_types.append("RemovalSentinel") elif node_uses.optional or base_uses.optional: valid_return_types.append("RemovalSentinel") generated_code.append( f' def leave_{name}(self, original_node: "{name}", updated_node: "{name}") -> Union[{", ".join(valid_return_types)}]:' ) generated_code.append(" return updated_node") if __name__ == "__main__": # Output the code print("\n".join(generated_code)) LibCST-1.2.0/libcst/codegen/generate.py000066400000000000000000000132051456464173300176400ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # Usage: # # python -m libcst.codegen.generate --help # python -m libcst.codegen.generate visitors import argparse import os import os.path import shutil import subprocess import sys from typing import List import libcst as cst from libcst import ensure_type, parse_module from libcst.codegen.transforms import ( DoubleQuoteForwardRefsTransformer, SimplifyUnionsTransformer, ) def format_file(fname: str) -> None: subprocess.check_call( ["ufmt", "format", fname], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, ) def clean_generated_code(code: str) -> str: """ Generalized sanity clean-up for all codegen so we can fix issues such as Union[SingleType]. The transforms found here are strictly for form and do not affect functionality. """ module = parse_module(code) module = ensure_type(module.visit(SimplifyUnionsTransformer()), cst.Module) module = ensure_type(module.visit(DoubleQuoteForwardRefsTransformer()), cst.Module) return module.code def codegen_visitors() -> None: # First, back up the original file, since we have a nasty bootstrap problem. # We're in a situation where we want to import libcst in order to get the # valid nodes for visitors, but doing so means that we depend on ourselves. # So, this attempts to keep the repo in a working state for as many operations # as possible. base = os.path.abspath( os.path.join(os.path.dirname(os.path.abspath(__file__)), "../") ) visitors_file = os.path.join(base, "_typed_visitor.py") shutil.copyfile(visitors_file, f"{visitors_file}.bak") try: # Now that we backed up the file, lets codegen a new version. # We import now, because this script does work on import. import libcst.codegen.gen_visitor_functions as visitor_codegen new_code = clean_generated_code("\n".join(visitor_codegen.generated_code)) with open(visitors_file, "w") as fp: fp.write(new_code) fp.close() # Now, see if the file we generated causes any import errors # by attempting to run codegen again in a new process. subprocess.check_call( [sys.executable, "-m", "libcst.codegen.gen_visitor_functions"], cwd=base, stdout=subprocess.DEVNULL, ) # If it worked, lets format the file format_file(visitors_file) # Since we were successful with importing, we can remove the backup. os.remove(f"{visitors_file}.bak") # Inform the user print(f"Successfully generated a new {visitors_file} file.") except Exception: # On failure, we put the original file back, and keep the failed version # for developers to look at. print( f"Failed to generated a new {visitors_file} file, failure " + f"is saved in {visitors_file}.failed_generate.", file=sys.stderr, ) os.rename(visitors_file, f"{visitors_file}.failed_generate") os.rename(f"{visitors_file}.bak", visitors_file) # Reraise so we can debug raise def codegen_matchers() -> None: # Given that matchers isn't in the default import chain, we don't have to # worry about generating invalid code that then prevents us from generating # again. import libcst.codegen.gen_matcher_classes as matcher_codegen base = os.path.abspath( os.path.join(os.path.dirname(os.path.abspath(__file__)), "../") ) matchers_file = os.path.join(base, "matchers/__init__.py") new_code = clean_generated_code("\n".join(matcher_codegen.generated_code)) with open(matchers_file, "w") as fp: fp.write(new_code) fp.close() # If it worked, lets format the file format_file(matchers_file) # Inform the user print(f"Successfully generated a new {matchers_file} file.") def codegen_return_types() -> None: # Given that matchers isn't in the default import chain, we don't have to # worry about generating invalid code that then prevents us from generating # again. import libcst.codegen.gen_type_mapping as type_codegen base = os.path.abspath( os.path.join(os.path.dirname(os.path.abspath(__file__)), "../") ) type_mapping_file = os.path.join(base, "matchers/_return_types.py") new_code = clean_generated_code("\n".join(type_codegen.generated_code)) with open(type_mapping_file, "w") as fp: fp.write(new_code) fp.close() # If it worked, lets format the file format_file(type_mapping_file) # Inform the user print(f"Successfully generated a new {type_mapping_file} file.") def main(cli_args: List[str]) -> int: # Parse out arguments, run codegen parser = argparse.ArgumentParser(description="Generate code for libcst.") parser.add_argument( "system", choices=["all", "visitors", "matchers", "return_types"], help="System to generate code for.", type=str, ) args = parser.parse_args(cli_args) if args.system == "all": codegen_visitors() codegen_matchers() codegen_return_types() return 0 if args.system == "visitors": codegen_visitors() return 0 elif args.system == "matchers": codegen_matchers() return 0 elif args.system == "return_types": codegen_return_types() return 0 else: print(f'Invalid system "{args.system}".') return 1 if __name__ == "__main__": sys.exit(main(sys.argv[1:])) LibCST-1.2.0/libcst/codegen/tests/000077500000000000000000000000001456464173300166355ustar00rootroot00000000000000LibCST-1.2.0/libcst/codegen/tests/__init__.py000066400000000000000000000002631456464173300207470ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. LibCST-1.2.0/libcst/codegen/tests/test_codegen_clean.py000066400000000000000000000113041456464173300230130ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import os import os.path import libcst.codegen.gen_matcher_classes as matcher_codegen import libcst.codegen.gen_type_mapping as type_codegen import libcst.codegen.gen_visitor_functions as visitor_codegen from libcst.codegen.generate import clean_generated_code, format_file from libcst.testing.utils import UnitTest class TestCodegenClean(UnitTest): def assert_code_matches( self, old_code: str, new_code: str, module_name: str, ) -> None: self.assertTrue( old_code == new_code, f"{module_name} needs new codegen, see " + "`python -m libcst.codegen.generate --help` " + "for instructions, or run `python -m libcst.codegen.generate all`", ) def test_codegen_clean_visitor_functions(self) -> None: """ Verifies that codegen of visitor functions would not result in a changed file. If this test fails, please run 'python -m libcst.codegen.generate all' to generate new files. """ new_code = clean_generated_code("\n".join(visitor_codegen.generated_code)) new_file = os.path.join( os.path.dirname(os.path.abspath(__file__)), "visitor_codegen.deleteme.py" ) with open(new_file, "w") as fp: fp.write(new_code) try: format_file(new_file) except Exception: # We failed to format, but this is probably due to invalid code that # black doesn't like. This test will still fail and report to run codegen. pass with open(new_file, "r") as fp: new_code = fp.read() os.remove(new_file) with open( os.path.join( os.path.dirname(os.path.abspath(__file__)), "../../_typed_visitor.py" ), "r", ) as fp: old_code = fp.read() # Now that we've done simple codegen, verify that it matches. self.assert_code_matches(old_code, new_code, "libcst._typed_visitor") def test_codegen_clean_matcher_classes(self) -> None: """ Verifies that codegen of matcher classes would not result in a changed file. If this test fails, please run 'python -m libcst.codegen.generate all' to generate new files. """ new_code = clean_generated_code("\n".join(matcher_codegen.generated_code)) new_file = os.path.join( os.path.dirname(os.path.abspath(__file__)), "matcher_codegen.deleteme.py" ) with open(new_file, "w") as fp: fp.write(new_code) try: format_file(new_file) except Exception: # We failed to format, but this is probably due to invalid code that # black doesn't like. This test will still fail and report to run codegen. pass with open(new_file, "r") as fp: new_code = fp.read() os.remove(new_file) with open( os.path.join( os.path.dirname(os.path.abspath(__file__)), "../../matchers/__init__.py" ), "r", ) as fp: old_code = fp.read() # Now that we've done simple codegen, verify that it matches. self.assert_code_matches(old_code, new_code, "libcst.matchers.__init__") def test_codegen_clean_return_types(self) -> None: """ Verifies that codegen of return types would not result in a changed file. If this test fails, please run 'python -m libcst.codegen.generate all' to generate new files. """ new_code = clean_generated_code("\n".join(type_codegen.generated_code)) new_file = os.path.join( os.path.dirname(os.path.abspath(__file__)), "type_codegen.deleteme.py" ) with open(new_file, "w") as fp: fp.write(new_code) try: format_file(new_file) except Exception: # We failed to format, but this is probably due to invalid code that # black doesn't like. This test will still fail and report to run codegen. pass with open(new_file, "r") as fp: new_code = fp.read() os.remove(new_file) with open( os.path.join( os.path.dirname(os.path.abspath(__file__)), "../../matchers/_return_types.py", ), "r", ) as fp: old_code = fp.read() # Now that we've done simple codegen, verify that it matches. self.assert_code_matches(old_code, new_code, "libcst.matchers._return_types") LibCST-1.2.0/libcst/codegen/transforms.py000066400000000000000000000035371456464173300202530ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # This holds a series of transforms that help prettify generated code. # The design is such that any of them could be left out and the code # in question will still be correct, but possibly uglier to look at. # Great care should be taken to include only transforms that do not # affect the behavior of generated code, only the style for readability. # As a result, since these can be skipped without harm, it is okay to # use features such as matchers which rely on previously generated # code to function. import ast import libcst as cst import libcst.matchers as m class SimplifyUnionsTransformer(m.MatcherDecoratableTransformer): @m.leave(m.Subscript(m.Name("Union"))) def _leave_union( self, original_node: cst.Subscript, updated_node: cst.Subscript ) -> cst.BaseExpression: if len(updated_node.slice) == 1: # This is a Union[SimpleType,] which is equivalent to just SimpleType return cst.ensure_type(updated_node.slice[0].slice, cst.Index).value return updated_node class DoubleQuoteForwardRefsTransformer(m.MatcherDecoratableTransformer): @m.call_if_inside(m.Annotation()) def leave_SimpleString( self, original_node: cst.SimpleString, updated_node: cst.SimpleString ) -> cst.SimpleString: # For prettiness, convert all single-quoted forward refs to double-quoted. if "'" in updated_node.quote: new_value = f'"{updated_node.value[1:-1]}"' try: if updated_node.evaluated_value == ast.literal_eval(new_value): return updated_node.with_changes(value=new_value) except SyntaxError: pass return updated_node LibCST-1.2.0/libcst/codemod/000077500000000000000000000000001456464173300155015ustar00rootroot00000000000000LibCST-1.2.0/libcst/codemod/__init__.py000066400000000000000000000026431456464173300176170ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # from libcst.codemod._cli import ( diff_code, exec_transform_with_prettyprint, gather_files, parallel_exec_transform_with_prettyprint, ParallelTransformResult, ) from libcst.codemod._codemod import Codemod from libcst.codemod._command import ( CodemodCommand, MagicArgsCodemodCommand, VisitorBasedCodemodCommand, ) from libcst.codemod._context import CodemodContext from libcst.codemod._runner import ( SkipFile, SkipReason, transform_module, TransformExit, TransformFailure, TransformResult, TransformSkip, TransformSuccess, ) from libcst.codemod._testing import CodemodTest from libcst.codemod._visitor import ContextAwareTransformer, ContextAwareVisitor __all__ = [ "Codemod", "CodemodContext", "CodemodCommand", "VisitorBasedCodemodCommand", "MagicArgsCodemodCommand", "ContextAwareTransformer", "ContextAwareVisitor", "ParallelTransformResult", "TransformSuccess", "TransformFailure", "TransformExit", "SkipReason", "TransformSkip", "SkipFile", "TransformResult", "CodemodTest", "transform_module", "gather_files", "exec_transform_with_prettyprint", "parallel_exec_transform_with_prettyprint", "diff_code", ] LibCST-1.2.0/libcst/codemod/_cli.py000066400000000000000000000574061456464173300167750ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # """ Provides helpers for CLI interaction. """ import difflib import os.path import re import subprocess import sys import time import traceback from dataclasses import dataclass, replace from multiprocessing import cpu_count, Pool from pathlib import Path from typing import Any, AnyStr, cast, Dict, List, Optional, Sequence, Union from libcst import parse_module, PartialParserConfig from libcst.codemod._codemod import Codemod from libcst.codemod._dummy_pool import DummyPool from libcst.codemod._runner import ( SkipFile, SkipReason, transform_module, TransformExit, TransformFailure, TransformResult, TransformSkip, TransformSuccess, ) from libcst.helpers import calculate_module_and_package from libcst.metadata import FullRepoManager _DEFAULT_GENERATED_CODE_MARKER: str = f"@gen{''}erated" def invoke_formatter(formatter_args: Sequence[str], code: AnyStr) -> AnyStr: """ Given a code string, run an external formatter on the code and return new formatted code. """ # Make sure there is something to run if len(formatter_args) == 0: raise Exception("No formatter configured but code formatting requested.") # Invoke the formatter, giving it the code as stdin and assuming the formatted # code comes from stdout. work_with_bytes = isinstance(code, bytes) return cast( AnyStr, subprocess.check_output( formatter_args, input=code, universal_newlines=not work_with_bytes, encoding=None if work_with_bytes else "utf-8", ), ) def print_execution_result(result: TransformResult) -> None: for warning in result.warning_messages: print(f"WARNING: {warning}", file=sys.stderr) if isinstance(result, TransformFailure): error = result.error if isinstance(error, subprocess.CalledProcessError): print(error.output.decode("utf-8"), file=sys.stderr) print(result.traceback_str, file=sys.stderr) def gather_files( files_or_dirs: Sequence[str], *, include_stubs: bool = False ) -> List[str]: """ Given a list of files or directories (can be intermingled), return a list of all python files that exist at those locations. If ``include_stubs`` is ``True``, this will include ``.py`` and ``.pyi`` stub files. If it is ``False``, only ``.py`` files will be included in the returned list. """ ret: List[str] = [] for fd in files_or_dirs: if os.path.isfile(fd): ret.append(fd) elif os.path.isdir(fd): ret.extend( str(p) for p in Path(fd).rglob("*.py*") if Path.is_file(p) and ( str(p).endswith("py") or (include_stubs and str(p).endswith("pyi")) ) ) return sorted(ret) def diff_code( oldcode: str, newcode: str, context: int, *, filename: Optional[str] = None ) -> str: """ Given two strings representing a module before and after a codemod, produce a unified diff of the changes with ``context`` lines of context. Optionally, assign the ``filename`` to the change, and if it is not available, assume that the change was performed on stdin/stdout. If no change is detected, return an empty string instead of returning an empty unified diff. This is comparable to revision control software which only shows differences for files that have changed. """ if oldcode == newcode: return "" if filename: difflines = difflib.unified_diff( oldcode.split("\n"), newcode.split("\n"), fromfile=filename, tofile=filename, lineterm="", n=context, ) else: difflines = difflib.unified_diff( oldcode.split("\n"), newcode.split("\n"), lineterm="", n=context ) return "\n".join(difflines) def exec_transform_with_prettyprint( transform: Codemod, code: str, *, include_generated: bool = False, generated_code_marker: str = _DEFAULT_GENERATED_CODE_MARKER, format_code: bool = False, formatter_args: Sequence[str] = (), python_version: Optional[str] = None, ) -> Optional[str]: """ Given an instantiated codemod and a string representing a module, transform that code by executing the transform, optionally invoking the formatter and finally printing any generated warnings to stderr. If the code includes the generated marker at any spot and ``include_generated`` is not set to ``True``, the code will not be modified. If ``format_code`` is set to ``False`` or the instantiated codemod does not modify the code, the code will not be formatted. If a ``python_version`` is provided, then we will parse the module using this version. Otherwise, we will use the version of the currently executing python binary. In all cases a module will be returned. Whether it is changed depends on the input parameters as well as the codemod itself. """ if not include_generated and generated_code_marker in code: print( "WARNING: Code is generated and we are set to ignore generated code, " + "skipping!", file=sys.stderr, ) return code result = transform_module(transform, code, python_version=python_version) maybe_code: Optional[str] = ( None if isinstance(result, (TransformFailure, TransformExit, TransformSkip)) else result.code ) if maybe_code is not None and format_code: try: maybe_code = invoke_formatter(formatter_args, maybe_code) except Exception as ex: # Failed to format code, treat as a failure and make sure that # we print the exception for debugging. maybe_code = None result = TransformFailure( error=ex, traceback_str=traceback.format_exc(), warning_messages=result.warning_messages, ) # Finally, print the output, regardless of what happened print_execution_result(result) return maybe_code @dataclass(frozen=True) class ExecutionResult: # File we have results for filename: str # Whether we actually changed the code for the file or not changed: bool # The actual result transform_result: TransformResult @dataclass(frozen=True) class ExecutionConfig: blacklist_patterns: Sequence[str] = () format_code: bool = False formatter_args: Sequence[str] = () generated_code_marker: str = _DEFAULT_GENERATED_CODE_MARKER include_generated: bool = False python_version: Optional[str] = None repo_root: Optional[str] = None unified_diff: Optional[int] = None def _execute_transform( # noqa: C901 transformer: Codemod, filename: str, config: ExecutionConfig, ) -> ExecutionResult: for pattern in config.blacklist_patterns: if re.fullmatch(pattern, filename): return ExecutionResult( filename=filename, changed=False, transform_result=TransformSkip( skip_reason=SkipReason.BLACKLISTED, skip_description=f"Blacklisted by pattern {pattern}.", ), ) try: with open(filename, "rb") as fp: oldcode = fp.read() # Skip generated files if ( not config.include_generated and config.generated_code_marker.encode("utf-8") in oldcode ): return ExecutionResult( filename=filename, changed=False, transform_result=TransformSkip( skip_reason=SkipReason.GENERATED, skip_description="Generated file.", ), ) # Somewhat gross hack to provide the filename in the transform's context. # We do this after the fork so that a context that was initialized with # some defaults before calling parallel_exec_transform_with_prettyprint # will be updated per-file. transformer.context = replace( transformer.context, filename=filename, scratch={}, ) # determine the module and package name for this file try: module_name_and_package = calculate_module_and_package( config.repo_root or ".", filename ) transformer.context = replace( transformer.context, full_module_name=module_name_and_package.name, full_package_name=module_name_and_package.package, ) except ValueError as ex: print( f"Failed to determine module name for {filename}: {ex}", file=sys.stderr ) # Run the transform, bail if we failed or if we aren't formatting code try: input_tree = parse_module( oldcode, config=( PartialParserConfig(python_version=str(config.python_version)) if config.python_version is not None else PartialParserConfig() ), ) output_tree = transformer.transform_module(input_tree) newcode = output_tree.bytes encoding = output_tree.encoding except KeyboardInterrupt: return ExecutionResult( filename=filename, changed=False, transform_result=TransformExit() ) except SkipFile as ex: return ExecutionResult( filename=filename, changed=False, transform_result=TransformSkip( skip_reason=SkipReason.OTHER, skip_description=str(ex), warning_messages=transformer.context.warnings, ), ) except Exception as ex: return ExecutionResult( filename=filename, changed=False, transform_result=TransformFailure( error=ex, traceback_str=traceback.format_exc(), warning_messages=transformer.context.warnings, ), ) # Call formatter if needed, but only if we actually changed something in this # file if config.format_code and newcode != oldcode: try: newcode = invoke_formatter(config.formatter_args, newcode) except KeyboardInterrupt: return ExecutionResult( filename=filename, changed=False, transform_result=TransformExit(), ) except Exception as ex: return ExecutionResult( filename=filename, changed=False, transform_result=TransformFailure( error=ex, traceback_str=traceback.format_exc(), warning_messages=transformer.context.warnings, ), ) # Format as unified diff if needed, otherwise save it back changed = oldcode != newcode if config.unified_diff: newcode = diff_code( oldcode.decode(encoding), newcode.decode(encoding), config.unified_diff, filename=filename, ) else: # Write back if we changed if changed: with open(filename, "wb") as fp: fp.write(newcode) # Not strictly necessary, but saves space in pickle since we won't use it newcode = "" # Inform success return ExecutionResult( filename=filename, changed=changed, transform_result=TransformSuccess( warning_messages=transformer.context.warnings, code=newcode ), ) except KeyboardInterrupt: return ExecutionResult( filename=filename, changed=False, transform_result=TransformExit() ) except Exception as ex: return ExecutionResult( filename=filename, changed=False, transform_result=TransformFailure( error=ex, traceback_str=traceback.format_exc(), warning_messages=transformer.context.warnings, ), ) class Progress: ERASE_CURRENT_LINE: str = "\r\033[2K" def __init__(self, *, enabled: bool, total: int) -> None: self.enabled = enabled self.total = total # 1/100 = 0, len("0") = 1, precision = 0, more digits for more files self.pretty_precision: int = len(str(self.total // 100)) - 1 # Pretend we start processing immediately. This is not true, but it's # close enough to true. self.started_at: float = time.time() def print(self, finished: int) -> None: if not self.enabled: return left = self.total - finished percent = 100.0 * (float(finished) / float(self.total)) elapsed_time = max(time.time() - self.started_at, 0) print( f"{self.ERASE_CURRENT_LINE}{self._human_seconds(elapsed_time)} {percent:.{self.pretty_precision}f}% complete, {self.estimate_completion(elapsed_time, finished, left)} estimated for {left} files to go...", end="", file=sys.stderr, ) def _human_seconds(self, seconds: Union[int, float]) -> str: """ This returns a string which is a human-ish readable elapsed time such as 30.42s or 10m 31s """ minutes, seconds = divmod(seconds, 60) hours, minutes = divmod(minutes, 60) if hours > 0: return f"{hours:.0f}h {minutes:02.0f}m {seconds:02.0f}s" elif minutes > 0: return f"{minutes:02.0f}m {seconds:02.0f}s" else: return f"{seconds:02.2f}s" def estimate_completion( self, elapsed_seconds: float, files_finished: int, files_left: int ) -> str: """ Computes a really basic estimated completion given a number of operations still to do. """ if files_finished <= 0: # Technically infinite but calculating sounds better. return "[calculating]" fps = files_finished / elapsed_seconds estimated_seconds_left = files_left / fps return self._human_seconds(estimated_seconds_left) def clear(self) -> None: if not self.enabled: return print(self.ERASE_CURRENT_LINE, end="", file=sys.stderr) def _print_parallel_result( exec_result: ExecutionResult, progress: Progress, *, unified_diff: bool, show_successes: bool, hide_generated: bool, hide_blacklisted: bool, ) -> None: filename = exec_result.filename result = exec_result.transform_result if isinstance(result, TransformSkip): # Skipped file, print message and don't write back since not changed. if not ( (result.skip_reason is SkipReason.BLACKLISTED and hide_blacklisted) or (result.skip_reason is SkipReason.GENERATED and hide_generated) ): progress.clear() print(f"Codemodding {filename}", file=sys.stderr) print_execution_result(result) print( f"Skipped codemodding {filename}: {result.skip_description}\n", file=sys.stderr, ) elif isinstance(result, TransformFailure): # Print any exception, don't write the file back. progress.clear() print(f"Codemodding {filename}", file=sys.stderr) print_execution_result(result) print(f"Failed to codemod {filename}\n", file=sys.stderr) elif isinstance(result, TransformSuccess): if show_successes or result.warning_messages: # Print any warnings, save the changes if there were any. progress.clear() print(f"Codemodding {filename}", file=sys.stderr) print_execution_result(result) print( f"Successfully codemodded {filename}" + (" with warnings\n" if result.warning_messages else "\n"), file=sys.stderr, ) # In unified diff mode, the code is a diff we must print. if unified_diff and result.code: print(result.code) @dataclass(frozen=True) class ParallelTransformResult: """ The result of running :func:`~libcst.codemod.parallel_exec_transform_with_prettyprint` against a series of files. This is a simple summary, with counts for number of successfully codemodded files, number of files that we failed to codemod, number of warnings generated when running the codemod across the files, and the number of files that we skipped when running the codemod. """ #: Number of files that we successfully transformed. successes: int #: Number of files that we failed to transform. failures: int #: Number of warnings generated when running transform across files. warnings: int #: Number of files skipped because they were blacklisted, generated #: or the codemod requested to skip. skips: int # Unfortunate wrapper required since there is no `istarmap_unordered`... def _execute_transform_wrap( job: Dict[str, Any], ) -> ExecutionResult: return _execute_transform(**job) def parallel_exec_transform_with_prettyprint( # noqa: C901 transform: Codemod, files: Sequence[str], *, jobs: Optional[int] = None, unified_diff: Optional[int] = None, include_generated: bool = False, generated_code_marker: str = _DEFAULT_GENERATED_CODE_MARKER, format_code: bool = False, formatter_args: Sequence[str] = (), show_successes: bool = False, hide_generated: bool = False, hide_blacklisted: bool = False, hide_progress: bool = False, blacklist_patterns: Sequence[str] = (), python_version: Optional[str] = None, repo_root: Optional[str] = None, ) -> ParallelTransformResult: """ Given a list of files and an instantiated codemod we should apply to them, fork and apply the codemod in parallel to all of the files, including any configured formatter. The ``jobs`` parameter controls the maximum number of in-flight transforms, and needs to be at least 1. If not included, the number of jobs will automatically be set to the number of CPU cores. If ``unified_diff`` is set to a number, changes to files will be printed to stdout with ``unified_diff`` lines of context. If it is set to ``None`` or left out, files themselves will be updated with changes and formatting. If a ``python_version`` is provided, then we will parse each source file using this version. Otherwise, we will use the version of the currently executing python binary. A progress indicator as well as any generated warnings will be printed to stderr. To supress the interactive progress indicator, set ``hide_progress`` to ``True``. Files that include the generated code marker will be skipped unless the ``include_generated`` parameter is set to ``True``. Similarly, files that match a supplied blacklist of regex patterns will be skipped. Warnings for skipping both blacklisted and generated files will be printed to stderr along with warnings generated by the codemod unless ``hide_blacklisted`` and ``hide_generated`` are set to ``True``. Files that were successfully codemodded will not be printed to stderr unless ``show_successes`` is set to ``True``. To make this API possible, we take an instantiated transform. This is due to the fact that lambdas are not pickleable and pickling functions is undefined. This means we're implicitly relying on fork behavior on UNIX-like systems, and this function will not work on Windows systems. To create a command-line utility that runs on Windows, please instead see :func:`~libcst.codemod.exec_transform_with_prettyprint`. """ # Ensure that we have no duplicates, otherwise we might get race conditions # on write. files = sorted({os.path.abspath(f) for f in files}) total = len(files) progress = Progress(enabled=not hide_progress, total=total) chunksize = 4 # Grab number of cores if we need to jobs = min( jobs if jobs is not None else cpu_count(), (len(files) + chunksize - 1) // chunksize, ) if jobs < 1: raise Exception("Must have at least one job to process!") if total == 0: return ParallelTransformResult(successes=0, failures=0, skips=0, warnings=0) if repo_root is not None: # Make sure if there is a root that we have the absolute path to it. repo_root = os.path.abspath(repo_root) # Spin up a full repo metadata manager so that we can provide metadata # like type inference to individual forked processes. print("Calculating full-repo metadata...", file=sys.stderr) metadata_manager = FullRepoManager( repo_root, files, transform.get_inherited_dependencies(), ) metadata_manager.resolve_cache() transform.context = replace( transform.context, metadata_manager=metadata_manager, ) print("Executing codemod...", file=sys.stderr) config = ExecutionConfig( repo_root=repo_root, unified_diff=unified_diff, include_generated=include_generated, generated_code_marker=generated_code_marker, format_code=format_code, formatter_args=formatter_args, blacklist_patterns=blacklist_patterns, python_version=python_version, ) if total == 1 or jobs == 1: # Simple case, we should not pay for process overhead. # Let's just use a dummy synchronous pool. jobs = 1 pool_impl = DummyPool else: pool_impl = Pool # Warm the parser, pre-fork. parse_module( "", config=( PartialParserConfig(python_version=python_version) if python_version is not None else PartialParserConfig() ), ) successes: int = 0 failures: int = 0 warnings: int = 0 skips: int = 0 with pool_impl(processes=jobs) as p: # type: ignore args = [ { "transformer": transform, "filename": filename, "config": config, } for filename in files ] try: for result in p.imap_unordered( _execute_transform_wrap, args, chunksize=chunksize ): # Print an execution result, keep track of failures _print_parallel_result( result, progress, unified_diff=bool(unified_diff), show_successes=show_successes, hide_generated=hide_generated, hide_blacklisted=hide_blacklisted, ) progress.print(successes + failures + skips) if isinstance(result.transform_result, TransformFailure): failures += 1 elif isinstance(result.transform_result, TransformSuccess): successes += 1 elif isinstance( result.transform_result, (TransformExit, TransformSkip) ): skips += 1 warnings += len(result.transform_result.warning_messages) finally: progress.clear() # Return whether there was one or more failure. return ParallelTransformResult( successes=successes, failures=failures, skips=skips, warnings=warnings ) LibCST-1.2.0/libcst/codemod/_codemod.py000066400000000000000000000114641456464173300176320ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # from abc import ABC, abstractmethod from contextlib import contextmanager from dataclasses import replace from typing import Generator from libcst import MetadataDependent, MetadataWrapper, Module from libcst.codemod._context import CodemodContext class Codemod(MetadataDependent, ABC): """ Abstract base class that all codemods must subclass from. Classes wishing to perform arbitrary, non-visitor-based mutations on a tree should subclass from this class directly. Classes wishing to perform visitor-based mutation should instead subclass from :class:`~libcst.codemod.ContextAwareTransformer`. Note that a :class:`~libcst.codemod.Codemod` is a subclass of :class:`~libcst.MetadataDependent`, meaning that you can declare metadata dependencies with the :attr:`~libcst.MetadataDependent.METADATA_DEPENDENCIES` class property and while you are executing a transform you can call :meth:`~libcst.MetadataDependent.get_metadata` to retrieve the resolved metadata. """ def __init__(self, context: CodemodContext) -> None: MetadataDependent.__init__(self) self.context: CodemodContext = context def should_allow_multiple_passes(self) -> bool: """ Override this and return ``True`` to allow your transform to be called repeatedly until the tree doesn't change between passes. By default, this is off, and should suffice for most transforms. """ return False def warn(self, warning: str) -> None: """ Emit a warning that is displayed to the user who has invoked this codemod. """ self.context.warnings.append(warning) @property def module(self) -> Module: """ Reference to the currently-traversed module. Note that this is only available during the execution of a codemod. The module reference is particularly handy if you want to use :meth:`libcst.Module.code_for_node` or :attr:`libcst.Module.config_for_parsing` and don't wish to track a reference to the top-level module manually. """ module = self.context.module if module is None: raise Exception( f"Attempted access of {self.__class__.__name__}.module outside of " + "transform_module()." ) return module @abstractmethod def transform_module_impl(self, tree: Module) -> Module: """ Override this with your transform. You should take in the tree, optionally mutate it and then return the mutated version. The module reference and all calculated metadata are available for the lifetime of this function. """ ... @contextmanager def _handle_metadata_reference( self, module: Module ) -> Generator[Module, None, None]: oldwrapper = self.context.wrapper metadata_manager = self.context.metadata_manager filename = self.context.filename if metadata_manager is not None and filename: # We can look up full-repo metadata for this codemod! cache = metadata_manager.get_cache_for_path(filename) wrapper = MetadataWrapper(module, cache=cache) else: # We are missing either the repo manager or the current path, # which can happen when we are codemodding from stdin or when # an upstream dependency manually instantiates us. wrapper = MetadataWrapper(module) with self.resolve(wrapper): self.context = replace(self.context, wrapper=wrapper) try: yield wrapper.module finally: self.context = replace(self.context, wrapper=oldwrapper) def transform_module(self, tree: Module) -> Module: """ Transform entrypoint which handles multi-pass logic and metadata calculation for you. This is the method that you should call if you wish to invoke a codemod directly. This is the method that is called by :func:`~libcst.codemod.transform_module`. """ if not self.should_allow_multiple_passes(): with self._handle_metadata_reference(tree) as tree_with_metadata: return self.transform_module_impl(tree_with_metadata) # We allow multiple passes, so we execute 1+ passes until there are # no more changes. previous: Module = tree while True: with self._handle_metadata_reference(tree) as tree_with_metadata: tree = self.transform_module_impl(tree_with_metadata) if tree.deep_equals(previous): break previous = tree return tree LibCST-1.2.0/libcst/codemod/_command.py000066400000000000000000000205211456464173300176300ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # import argparse import inspect from abc import ABC, abstractmethod from typing import Dict, Generator, List, Type, TypeVar from libcst import Module from libcst.codemod._codemod import Codemod from libcst.codemod._context import CodemodContext from libcst.codemod._visitor import ContextAwareTransformer from libcst.codemod.visitors._add_imports import AddImportsVisitor from libcst.codemod.visitors._remove_imports import RemoveImportsVisitor _Codemod = TypeVar("_Codemod", bound=Codemod) class CodemodCommand(Codemod, ABC): """ A :class:`~libcst.codemod.Codemod` which can be invoked on the command-line using the ``libcst.tool codemod`` utility. It behaves like any other codemod in that it can be instantiated and run identically to a :class:`~libcst.codemod.Codemod`. However, it provides support for providing help text and command-line arguments to ``libcst.tool codemod`` as well as facilities for automatically running certain common transforms after executing your :meth:`~libcst.codemod.Codemod.transform_module_impl`. The following list of transforms are automatically run at this time: - :class:`~libcst.codemod.visitors.AddImportsVisitor` (adds needed imports to a module). - :class:`~libcst.codemod.visitors.RemoveImportsVisitor` (removes unreferenced imports from a module). """ #: An overrideable description attribute so that codemods can provide #: a short summary of what they do. This description will show up in #: command-line help as well as when listing available codemods. DESCRIPTION: str = "No description." @staticmethod def add_args(arg_parser: argparse.ArgumentParser) -> None: """ Override this to add arguments to the CLI argument parser. These args will show up when the user invokes ``libcst.tool codemod`` with ``--help``. They will also be presented to your class's ``__init__`` method. So, if you define a command with an argument 'foo', you should also have a corresponding 'foo' positional or keyword argument in your class's ``__init__`` method. """ pass def _instantiate_and_run(self, transform: Type[_Codemod], tree: Module) -> Module: inst = transform(self.context) return inst.transform_module(tree) @abstractmethod def transform_module_impl(self, tree: Module) -> Module: """ Override this with your transform. You should take in the tree, optionally mutate it and then return the mutated version. The module reference and all calculated metadata are available for the lifetime of this function. """ ... def transform_module(self, tree: Module) -> Module: # Overrides (but then calls) Codemod's transform_module to provide # a spot where additional supported transforms can be attached and run. tree = super().transform_module(tree) # List of transforms we should run, with their context key they use # for storing in context.scratch. Typically, the transform will also # have a static method that other transforms can use which takes # a context and other optional args and modifies its own context key # accordingly. We import them here so that we don't have circular imports. supported_transforms: Dict[str, Type[Codemod]] = { AddImportsVisitor.CONTEXT_KEY: AddImportsVisitor, RemoveImportsVisitor.CONTEXT_KEY: RemoveImportsVisitor, } # For any visitors that we support auto-running, run them here if needed. for key, transform in supported_transforms.items(): if key in self.context.scratch: # We have work to do, so lets run this. tree = self._instantiate_and_run(transform, tree) # We're finally done! return tree class VisitorBasedCodemodCommand(ContextAwareTransformer, CodemodCommand, ABC): """ A command that acts identically to a visitor-based transform, but also has the support of :meth:`~libcst.codemod.CodemodCommand.add_args` and running supported helper transforms after execution. See :class:`~libcst.codemod.CodemodCommand` and :class:`~libcst.codemod.ContextAwareTransformer` for additional documentation. """ pass class MagicArgsCodemodCommand(CodemodCommand, ABC): """ A "magic" args command, which auto-magically looks up the transforms that are yielded from :meth:`~libcst.codemod.MagicArgsCodemodCommand.get_transforms` and instantiates them using values out of the context. Visitors yielded in :meth:`~libcst.codemod.MagicArgsCodemodCommand.get_transforms` must have constructor arguments that match a key in the context :attr:`~libcst.codemod.CodemodContext.scratch`. The easiest way to guarantee that is to use :meth:`~libcst.codemod.CodemodCommand.add_args` to add a command arg that will be parsed for each of the args. However, if you wish to chain transforms, adding to the scratch in one transform will make the value available to the constructor in subsequent transforms as well as the scratch for subsequent transforms. """ def __init__(self, context: CodemodContext, **kwargs: Dict[str, object]) -> None: super().__init__(context) self.context.scratch.update(kwargs) @abstractmethod def get_transforms(self) -> Generator[Type[Codemod], None, None]: """ A generator which yields one or more subclasses of :class:`~libcst.codemod.Codemod`. In the general case, you will usually yield a series of classes, but it is possible to programmatically decide which classes to yield depending on the contents of the context :attr:`~libcst.codemod.CodemodContext.scratch`. Note that you should yield classes, not instances of classes, as the point of :class:`~libcst.codemod.MagicArgsCodemodCommand` is to instantiate them for you with the contents of :attr:`~libcst.codemod.CodemodContext.scratch`. """ ... def _instantiate(self, transform: Type[_Codemod]) -> _Codemod: # Grab the expected arguments argspec = inspect.getfullargspec(transform.__init__) args: List[object] = [] kwargs: Dict[str, object] = {} last_default_arg = len(argspec.args) - len(argspec.defaults or ()) for i, arg in enumerate(argspec.args): if arg in ["self", "context"]: # Self is bound, and context we explicitly include below. continue if arg not in self.context.scratch: if i >= last_default_arg: # This arg has a default, so the fact that its missing is fine. continue raise KeyError( f"Visitor {transform.__name__} requires positional arg {arg} but " + "it is not in our context nor does it have a default! It should " + "be provided by an argument returned from the 'add_args' method " + "or populated into context.scratch by a previous transform!" ) # No default, but we found something in scratch. So, forward it. args.append(self.context.scratch[arg]) kwonlydefaults = argspec.kwonlydefaults or {} for kwarg in argspec.kwonlyargs: if kwarg not in self.context.scratch and kwarg not in kwonlydefaults: raise KeyError( f"Visitor {transform.__name__} requires keyword arg {kwarg} but " + "it is not in our context nor does it have a default! It should " + "be provided by an argument returned from the 'add_args' method " + "or populated into context.scratch by a previous transform!" ) kwargs[kwarg] = self.context.scratch.get(kwarg, kwonlydefaults[kwarg]) # Return an instance of the transform with those arguments return transform(self.context, *args, **kwargs) def transform_module_impl(self, tree: Module) -> Module: for transform in self.get_transforms(): inst = self._instantiate(transform) tree = inst.transform_module(tree) return tree LibCST-1.2.0/libcst/codemod/_context.py000066400000000000000000000061371456464173300177050ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # from dataclasses import dataclass, field from typing import Any, Dict, List, Optional import libcst as cst import libcst.metadata as meta @dataclass(frozen=True) class CodemodContext: """ A context holding all information that is shared amongst all transforms and visitors in a single codemod invocation. When chaining multiple transforms together, the context holds the state that needs to be passed between transforms. The context is responsible for keeping track of metadata wrappers and the filename of the file that is being modified (if available). """ #: List of warnings gathered while running a codemod. Add to this list #: by calling :meth:`~libcst.codemod.Codemod.warn` method from a class #: that subclasses from :class:`~libcst.codemod.Codemod`, #: :class:`~libcst.codemod.ContextAwareTransformer` or #: :class:`~libcst.codemod.ContextAwareVisitor`. warnings: List[str] = field(default_factory=list) #: Scratch dictionary available for codemods which are spread across multiple #: transforms. Codemods are free to add to this at will. scratch: Dict[str, Any] = field(default_factory=dict) #: The current filename if a codemod is being executed against a file that #: lives on disk. Populated by #: :func:`libcst.codemod.parallel_exec_transform_with_prettyprint` when #: running codemods from the command line. filename: Optional[str] = None #: The current module if a codemod is being executed against a file that #: lives on disk, and the repository root is correctly configured. This #: Will take the form of a dotted name such as ``foo.bar.baz`` for a file #: in the repo named ``foo/bar/baz.py``. full_module_name: Optional[str] = None #: The current package if a codemod is being executed against a file that #: lives on disk, and the repository root is correctly configured. This #: Will take the form of a dotted name such as ``foo.bar`` for a file #: in the repo named ``foo/bar/baz.py`` full_package_name: Optional[str] = None #: The current top level metadata wrapper for the module being modified. #: To access computed metadata when inside an actively running codemod, use #: the :meth:`~libcst.MetadataDependent.get_metadata` method on #: :class:`~libcst.codemod.Codemod`. wrapper: Optional[cst.MetadataWrapper] = None #: The current repo-level metadata manager for the active codemod. metadata_manager: Optional[meta.FullRepoManager] = None @property def module(self) -> Optional[cst.Module]: """ The current top level module being modified. As a convenience, you can use the :attr:`~libcst.codemod.Codemod.module` property on :class:`~libcst.codemod.Codemod` to refer to this when inside an actively running codemod. """ wrapper = self.wrapper if wrapper is None: return None return wrapper.module LibCST-1.2.0/libcst/codemod/_dummy_pool.py000066400000000000000000000017351456464173300204040ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from types import TracebackType from typing import Callable, Generator, Iterable, Optional, Type, TypeVar RetT = TypeVar("RetT") ArgT = TypeVar("ArgT") class DummyPool: """ Synchronous dummy `multiprocessing.Pool` analogue. """ def __init__(self, processes: Optional[int] = None) -> None: pass def imap_unordered( self, func: Callable[[ArgT], RetT], iterable: Iterable[ArgT], chunksize: Optional[int] = None, ) -> Generator[RetT, None, None]: for args in iterable: yield func(args) def __enter__(self) -> "DummyPool": return self def __exit__( self, exc_type: Optional[Type[Exception]], exc: Optional[Exception], tb: Optional[TracebackType], ) -> None: pass LibCST-1.2.0/libcst/codemod/_runner.py000066400000000000000000000125331456464173300175270ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # """ Provides everything needed to run a CodemodCommand. """ import traceback from dataclasses import dataclass from enum import Enum from typing import Optional, Sequence, Union from libcst import parse_module, PartialParserConfig from libcst.codemod._codemod import Codemod # All datastructures defined in this class are pickleable so that they can be used # as a return value with the multiprocessing module. @dataclass(frozen=True) class TransformSuccess: """ A :class:`~libcst.codemod.TransformResult` used when the codemod was successful. Stores all the information we might need to display to the user upon success, as well as the transformed file contents. """ #: All warning messages that were generated during the codemod. warning_messages: Sequence[str] #: The updated code, post-codemod. code: str @dataclass(frozen=True) class TransformFailure: """ A :class:`~libcst.codemod.TransformResult` used when the codemod failed. Stores all the information we might need to display to the user upon a failure. """ #: All warning messages that were generated before the codemod crashed. warning_messages: Sequence[str] #: The exception that was raised during the codemod. error: Exception #: The traceback string that was recorded at the time of exception. traceback_str: str @dataclass(frozen=True) class TransformExit: """ A :class:`~libcst.codemod.TransformResult` used when the codemod was interrupted by the user (e.g. KeyboardInterrupt). """ #: An empty list of warnings, included so that all #: :class:`~libcst.codemod.TransformResult` have a ``warning_messages`` attribute. warning_messages: Sequence[str] = () class SkipReason(Enum): """ An enumeration of all valid reasons for a codemod to skip. """ #: The module was skipped because we detected that it was generated code, and #: we were configured to skip generated files. GENERATED = "generated" #: The module was skipped because we detected that it was blacklisted, and we #: were configured to skip blacklisted files. BLACKLISTED = "blacklisted" #: The module was skipped because the codemod requested us to skip using the #: :class:`~libcst.codemod.SkipFile` exception. OTHER = "other" @dataclass(frozen=True) class TransformSkip: """ A :class:`~libcst.codemod.TransformResult` used when the codemod requested to be skipped. This could be because it's a generated file, or due to filename blacklist, or because the transform raised :class:`~libcst.codemod.SkipFile`. """ #: The reason that we skipped codemodding this module. skip_reason: SkipReason #: The description populated from the :class:`~libcst.codemod.SkipFile` exception. skip_description: str #: All warning messages that were generated before the codemod decided to skip. warning_messages: Sequence[str] = () class SkipFile(Exception): """ Raise this exception to skip codemodding the current file. The exception message should be the reason for skipping. """ TransformResult = Union[ TransformSuccess, TransformFailure, TransformExit, TransformSkip ] def transform_module( transformer: Codemod, code: str, *, python_version: Optional[str] = None ) -> TransformResult: """ Given a module as represented by a string and a :class:`~libcst.codemod.Codemod` that we wish to run, execute the codemod on the code and return a :class:`~libcst.codemod.TransformResult`. This should never raise an exception. On success, this returns a :class:`~libcst.codemod.TransformSuccess` containing any generated warnings as well as the transformed code. If the codemod is interrupted with a Ctrl+C, this returns a :class:`~libcst.codemod.TransformExit`. If the codemod elected to skip by throwing a :class:`~libcst.codemod.SkipFile` exception, this will return a :class:`~libcst.codemod.TransformSkip` containing the reason for skipping as well as any warnings that were generated before the codemod decided to skip. If the codemod throws an unexpected exception, this will return a :class:`~libcst.codemod.TransformFailure` containing the exception that occured as well as any warnings that were generated before the codemod crashed. """ try: input_tree = parse_module( code, config=( PartialParserConfig(python_version=python_version) if python_version is not None else PartialParserConfig() ), ) output_tree = transformer.transform_module(input_tree) return TransformSuccess( code=output_tree.code, warning_messages=transformer.context.warnings ) except KeyboardInterrupt: return TransformExit() except SkipFile as ex: return TransformSkip( skip_description=str(ex), skip_reason=SkipReason.OTHER, warning_messages=transformer.context.warnings, ) except Exception as ex: return TransformFailure( error=ex, traceback_str=traceback.format_exc(), warning_messages=transformer.context.warnings, ) LibCST-1.2.0/libcst/codemod/_testing.py000066400000000000000000000133321456464173300176710ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # from textwrap import dedent from typing import Optional, Sequence, Type from libcst import parse_module, PartialParserConfig from libcst.codemod._codemod import Codemod from libcst.codemod._context import CodemodContext from libcst.codemod._runner import SkipFile from libcst.testing.utils import UnitTest # pyre-fixme[13]: This should be an ABC but there are metaclass conflicts due to # the way we implement the data_provider decorator, so pyre complains about the # uninitialized TRANSFORM below. class _CodemodTest: """ Mixin that can be added to a unit test framework in order to provide convenience features. This is provided as an internal-only feature so that CodemodTest can be used with other frameworks. This is necessary since we set a metaclass on our UnitTest implementation. """ TRANSFORM: Type[Codemod] = ... @staticmethod def make_fixture_data(data: str) -> str: """ Given a code string originting from a multi-line triple-quoted string, normalize the code using ``dedent`` and ensuring a trailing newline is present. """ lines = dedent(data).split("\n") def filter_line(line: str) -> str: if len(line.strip()) == 0: return "" return line # Get rid of lines that are space only lines = [filter_line(line) for line in lines] # Get rid of leading and trailing newlines (because of """ style strings) while lines and lines[0] == "": lines = lines[1:] while lines and lines[-1] == "": lines = lines[:-1] code = "\n".join(lines) if not code.endswith("\n"): return code + "\n" else: return code def assertCodeEqual(self, expected: str, actual: str) -> None: """ Given an expected and actual code string, makes sure they equal. This ensures that both the expected and actual are sanitized, so its safe to use this on strings that may have come from a triple-quoted multi-line string. """ # pyre-ignore This mixin needs to be used with a UnitTest subclass. self.assertEqual( CodemodTest.make_fixture_data(expected), CodemodTest.make_fixture_data(actual), ) def assertCodemod( self, before: str, after: str, *args: object, context_override: Optional[CodemodContext] = None, python_version: Optional[str] = None, expected_warnings: Optional[Sequence[str]] = None, expected_skip: bool = False, **kwargs: object, ) -> None: """ Given a before and after code string, and any args/kwargs that should be passed to the codemod constructor specified in :attr:`~CodemodTest.TRANSFORM`, validate that the codemod executes as expected. Verify that the codemod completes successfully, unless the ``expected_skip`` option is set to ``True``, in which case verify that the codemod skips. Optionally, a :class:`CodemodContext` can be provided. If none is specified, a default, empty context is created for you. Additionally, the python version for the code parser can be overridden to a valid python version string such as `"3.6"`. If none is specified, the version of the interpreter running your tests will be used. Also, a list of warning strings can be specified and :meth:`~CodemodTest.assertCodemod` will verify that the codemod generates those warnings in the order specified. If it is left out, warnings are not checked. """ context = context_override if context_override is not None else CodemodContext() # pyre-fixme[45]: Cannot instantiate abstract class `Codemod`. transform_instance = self.TRANSFORM(context, *args, **kwargs) input_tree = parse_module( CodemodTest.make_fixture_data(before), config=( PartialParserConfig(python_version=python_version) if python_version is not None else PartialParserConfig() ), ) try: output_tree = transform_instance.transform_module(input_tree) except SkipFile: if not expected_skip: raise output_tree = input_tree else: if expected_skip: # pyre-ignore This mixin needs to be used with a UnitTest subclass. self.fail("Expected SkipFile but was not raised") # pyre-ignore This mixin needs to be used with a UnitTest subclass. self.assertEqual( CodemodTest.make_fixture_data(after), CodemodTest.make_fixture_data(output_tree.code), ) if expected_warnings is not None: # pyre-ignore This mixin needs to be used with a UnitTest subclass. self.assertSequenceEqual(expected_warnings, context.warnings) class CodemodTest(_CodemodTest, UnitTest): """ Base test class for a :class:`Codemod` test. Provides facilities for auto-instantiating and executing a codemod, given the args/kwargs that should be passed to it. Set the :attr:`~CodemodTest.TRANSFORM` class attribute to the :class:`Codemod` class you wish to test and call :meth:`~CodemodTest.assertCodemod` inside your test method to verify it transforms various source code chunks correctly. Note that this is a subclass of ``UnitTest`` so any :class:`CodemodTest` can be executed using your favorite test runner such as the ``unittest`` module. """ LibCST-1.2.0/libcst/codemod/_visitor.py000066400000000000000000000115361456464173300177170ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # from typing import Mapping import libcst as cst from libcst import MetadataDependent from libcst.codemod._codemod import Codemod from libcst.codemod._context import CodemodContext from libcst.matchers import MatcherDecoratableTransformer, MatcherDecoratableVisitor from libcst.metadata import ProviderT class ContextAwareTransformer(Codemod, MatcherDecoratableTransformer): """ A transformer which visits using LibCST. Allows visitor-based mutation of a tree. Classes wishing to do arbitrary non-visitor-based mutation on a tree should instead subclass from :class:`Codemod` and implement :meth:`~Codemod.transform_module_impl`. This is a subclass of :class:`~libcst.matchers.MatcherDecoratableTransformer` so all features of matchers as well as :class:`~libcst.CSTTransformer` are available to subclasses of this class. """ def __init__(self, context: CodemodContext) -> None: Codemod.__init__(self, context) MatcherDecoratableTransformer.__init__(self) def transform_module_impl(self, tree: cst.Module) -> cst.Module: return tree.visit(self) class ContextAwareVisitor(MatcherDecoratableVisitor, MetadataDependent): """ A visitor which visits using LibCST. Allows visitor-based collecting of info on a tree. All codemods which wish to implement an information collector should subclass from this instead of directly from :class:`~libcst.matchers.MatcherDecoratableVisitor` or :class:`~libcst.CSTVisitor` since this provides access to the current codemod context. As a result, this class allows access to metadata which was calculated in a parent :class:`~libcst.codemod.Codemod` through the :meth:`~libcst.MetadataDependent.get_metadata` method. Note that you cannot directly run a :class:`~libcst.codemod.ContextAwareVisitor` using :func:`~libcst.codemod.transform_module` because visitors by definition do not transform trees. However, you can instantiate a :class:`~libcst.codemod.ContextAwareVisitor` inside a codemod and pass it to the :class:`~libcst.CSTNode.visit` method on any node in order to run information gathering with metadata and context support. Remember that a :class:`~libcst.codemod.ContextAwareVisitor` is a subclass of :class:`~libcst.MetadataDependent`, meaning that you still need to declare your metadata dependencies with :attr:`~libcst.MetadataDependent.METADATA_DEPENDENCIES` before you can retrieve metadata using :meth:`~libcst.MetadataDependent.get_metadata`, even if the parent codemod has listed its own metadata dependencies. Note also that the dependencies listed on this class must be a strict subset of the dependencies listed in the parent codemod. """ def __init__(self, context: CodemodContext) -> None: MetadataDependent.__init__(self) MatcherDecoratableVisitor.__init__(self) self.context = context dependencies = self.get_inherited_dependencies() if dependencies: wrapper = self.context.wrapper if wrapper is None: raise Exception( f"Attempting to instantiate {self.__class__.__name__} outside of " + "an active transform. This means that metadata hasn't been " + "calculated and we cannot successfully create this visitor." ) for dep in dependencies: if dep not in wrapper._metadata: raise Exception( f"Attempting to access metadata {dep.__name__} that was not a " + "declared dependency of parent transform! This means it is " + "not possible to compute this value. Please ensure that all " + f"parent transforms of {self.__class__.__name__} declare " + f"{dep.__name__} as a metadata dependency." ) self.metadata: Mapping[ProviderT, Mapping[cst.CSTNode, object]] = { dep: wrapper._metadata[dep] for dep in dependencies } def warn(self, warning: str) -> None: """ Emit a warning that is displayed to the user who has invoked this codemod. """ self.context.warnings.append(warning) @property def module(self) -> cst.Module: """ Reference to the currently-traversed module. Note that this is only available during a transform itself. """ module = self.context.module if module is None: raise Exception( f"Attempted access of {self.__class__.__name__}.module outside of " + "transform_module()." ) return module LibCST-1.2.0/libcst/codemod/commands/000077500000000000000000000000001456464173300173025ustar00rootroot00000000000000LibCST-1.2.0/libcst/codemod/commands/__init__.py000066400000000000000000000002651456464173300214160ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # LibCST-1.2.0/libcst/codemod/commands/add_pyre_directive.py000066400000000000000000000032211456464173300234770ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # import re from abc import ABC from typing import Pattern import libcst from libcst.codemod import CodemodContext, VisitorBasedCodemodCommand from libcst.helpers import insert_header_comments class AddPyreDirectiveCommand(VisitorBasedCodemodCommand, ABC): PYRE_TAG: str def __init__(self, context: CodemodContext) -> None: super().__init__(context) self._regex_pattern: Pattern[str] = re.compile( rf"^#\s+pyre-{self.PYRE_TAG}\s*$" ) self.needs_add = True def visit_Comment(self, node: libcst.Comment) -> None: if self._regex_pattern.search(node.value): self.needs_add = False def leave_Module( self, original_node: libcst.Module, updated_node: libcst.Module ) -> libcst.Module: # If the tag already exists, don't modify the file. if not self.needs_add: return updated_node return insert_header_comments(updated_node, [f"# pyre-{self.PYRE_TAG}"]) class AddPyreStrictCommand(AddPyreDirectiveCommand): """ Given a source file, we'll add the strict tag if the file doesn't already contain it. """ PYRE_TAG: str = "strict" DESCRIPTION: str = "Add the 'pyre-strict' tag to a module." class AddPyreUnsafeCommand(AddPyreDirectiveCommand): """ Given a source file, we'll add the unsafe tag if the file doesn't already contain it. """ PYRE_TAG: str = "unsafe" DESCRIPTION: str = "Add the 'pyre-unsafe' tag to a module." LibCST-1.2.0/libcst/codemod/commands/add_trailing_commas.py000066400000000000000000000076711456464173300236470ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import argparse import textwrap from typing import Dict, Optional import libcst as cst from libcst.codemod import CodemodContext, VisitorBasedCodemodCommand presets_per_formatter: Dict[str, Dict[str, int]] = { "black": { "parameter_count": 1, "argument_count": 2, }, "yapf": { "parameter_count": 2, "argument_count": 2, }, } class AddTrailingCommas(VisitorBasedCodemodCommand): DESCRIPTION: str = textwrap.dedent( """ Codemod that adds trailing commas to arguments in function headers and function calls. The idea is that both the black and yapf autoformatters will tend to split headers and function calls so that there is one parameter / argument per line if there is a trailing comma: - Black will always separate them by line - Yapf appears to do so whenever there are at least two arguments Applying this codemod (and then an autoformatter) may make it easier to read function definitions and calls """ ) def __init__( self, context: CodemodContext, formatter: str = "black", parameter_count: Optional[int] = None, argument_count: Optional[int] = None, ) -> None: super().__init__(context) presets = presets_per_formatter.get(formatter) if presets is None: raise ValueError( f"Unknown formatter {formatter!r}. Presets exist for " + ", ".join(presets_per_formatter.keys()) ) self.parameter_count: int = parameter_count or presets["parameter_count"] self.argument_count: int = argument_count or presets["argument_count"] @staticmethod def add_args(arg_parser: argparse.ArgumentParser) -> None: arg_parser.add_argument( "--formatter", dest="formatter", metavar="FORMATTER", help="Formatter to target (e.g. yapf or black)", type=str, default="black", ) arg_parser.add_argument( "--paramter-count", dest="parameter_count", metavar="PARAMETER_COUNT", help="Minimal number of parameters for us to add trailing comma", type=int, default=None, ) arg_parser.add_argument( "--argument-count", dest="argument_count", metavar="ARGUMENT_COUNT", help="Minimal number of arguments for us to add trailing comma", type=int, default=None, ) def leave_Parameters( self, original_node: cst.Parameters, updated_node: cst.Parameters, ) -> cst.Parameters: skip = ( # self.parameter_count is None or len(updated_node.params) < self.parameter_count or ( len(updated_node.params) == 1 and updated_node.params[0].name.value in {"self", "cls"} ) ) if skip: return updated_node else: last_param = updated_node.params[-1] return updated_node.with_changes( params=( *updated_node.params[:-1], last_param.with_changes(comma=cst.Comma()), ), ) def leave_Call( self, original_node: cst.Call, updated_node: cst.Call, ) -> cst.Call: if len(updated_node.args) < self.argument_count: return updated_node else: last_arg = updated_node.args[-1] return updated_node.with_changes( args=( *updated_node.args[:-1], last_arg.with_changes(comma=cst.Comma()), ), ) LibCST-1.2.0/libcst/codemod/commands/convert_format_to_fstring.py000066400000000000000000000372331456464173300251520ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # import argparse import ast from typing import Generator, List, Optional, Sequence, Set, Tuple import libcst as cst import libcst.matchers as m from libcst.codemod import ( CodemodContext, ContextAwareTransformer, ContextAwareVisitor, VisitorBasedCodemodCommand, ) def _get_lhs(field: cst.BaseExpression) -> cst.BaseExpression: if isinstance(field, (cst.Name, cst.Integer)): return field elif isinstance(field, (cst.Attribute, cst.Subscript)): return _get_lhs(field.value) else: raise Exception("Unsupported node type!") def _find_expr_from_field_name( fieldname: str, args: Sequence[cst.Arg] ) -> Optional[cst.BaseExpression]: # Things like "0.name" are invalid expressions in python since # we can't tell if name is supposed to be the fraction or a name. # So we do a trick to parse here where we wrap the LHS in parens # and assume LibCST will handle it. if "." in fieldname: ind, exp = fieldname.split(".", 1) fieldname = f"({ind}).{exp}" field_expr = cst.parse_expression(fieldname) lhs = _get_lhs(field_expr) # Verify we don't have any *args or **kwargs attributes. if any(arg.star != "" for arg in args): return None # Get the index into the arg index: Optional[int] = None if isinstance(lhs, cst.Integer): index = int(lhs.value) if index < 0 or index >= len(args): raise Exception(f"Logic error, arg sequence {index} out of bounds!") elif isinstance(lhs, cst.Name): for i, arg in enumerate(args): kw = arg.keyword if kw is None: continue if kw.value == lhs.value: index = i break if index is None: raise Exception(f"Logic error, arg name {lhs.value} out of bounds!") if index is None: raise Exception(f"Logic error, unsupported fieldname expression {fieldname}!") # Format it! return field_expr.deep_replace(lhs, args[index].value) def _get_field(formatstr: str) -> Tuple[str, Optional[str], Optional[str]]: in_index: int = 0 format_spec: Optional[str] = None conversion: Optional[str] = None # Grab any format spec as long as its not an array slice for pos, char in enumerate(formatstr): if char == "[": in_index += 1 elif char == "]": in_index -= 1 elif char == ":": if in_index == 0: formatstr, format_spec = (formatstr[:pos], formatstr[pos + 1 :]) break # Grab any conversion if "!" in formatstr: formatstr, conversion = formatstr.split("!", 1) # Return it return formatstr, format_spec, conversion def _get_tokens( # noqa: C901 string: str, ) -> Generator[Tuple[str, Optional[str], Optional[str], Optional[str]], None, None]: length = len(string) prefix: str = "" format_accum: str = "" in_brackets: int = 0 seen_escape: bool = False for pos, char in enumerate(string): if seen_escape: # The last character was an escape character, so consume # this one as well, and then pop out of the escape. if in_brackets == 0: prefix += char else: format_accum += char seen_escape = False continue # We can't escape inside a f-string/format specifier. if in_brackets == 0: # Grab the next character to see if we are an escape sequence. next_char: Optional[str] = None if pos < length - 1: next_char = string[pos + 1] # If this current character is an escape, we want to # not react to it, append it to the current accumulator and # then do the same for the next character. if char == "{" and next_char == "{": seen_escape = True if char == "}" and next_char == "}": seen_escape = True # Only if we are not an escape sequence do we consider these # brackets. if not seen_escape: if char == "{": in_brackets += 1 # We want to add brackets to the format accumulator as # long as they aren't the outermost, because format # specs allow {} expansion. if in_brackets == 1: continue if char == "}": in_brackets -= 1 if in_brackets < 0: raise Exception("Stray } in format string!") if in_brackets == 0: field_name, format_spec, conversion = _get_field(format_accum) yield (prefix, field_name, format_spec, conversion) prefix = "" format_accum = "" continue # Place in the correct accumulator if in_brackets == 0: prefix += char else: format_accum += char if in_brackets > 0: raise Exception("Stray { in format string!") if format_accum: raise Exception("Logic error!") # Yield the last bit of information yield (prefix, None, None, None) class StringQuoteGatherer(ContextAwareVisitor): def __init__(self, context: CodemodContext) -> None: super().__init__(context) self.stringends: Set[str] = set() def visit_SimpleString(self, node: cst.SimpleString) -> None: self.stringends.add(node.value[-1]) class StripNewlinesTransformer(ContextAwareTransformer): def leave_ParenthesizedWhitespace( self, original_node: cst.ParenthesizedWhitespace, updated_node: cst.ParenthesizedWhitespace, ) -> cst.SimpleWhitespace: return cst.SimpleWhitespace(" ") class SwitchStringQuotesTransformer(ContextAwareTransformer): def __init__(self, context: CodemodContext, avoid_quote: str) -> None: super().__init__(context) if avoid_quote not in {'"', "'"}: raise Exception("Must specify either ' or \" single quote to avoid.") self.avoid_quote: str = avoid_quote self.replace_quote: str = '"' if avoid_quote == "'" else "'" def leave_SimpleString( self, original_node: cst.SimpleString, updated_node: cst.SimpleString ) -> cst.SimpleString: if self.avoid_quote in updated_node.quote: # Attempt to swap the value out, verify that the string is still identical # before and after transformation. new_quote = updated_node.quote.replace(self.avoid_quote, self.replace_quote) new_value = ( f"{updated_node.prefix}{new_quote}{updated_node.raw_value}{new_quote}" ) try: new_str = ast.literal_eval(new_value) if updated_node.evaluated_value != new_str: # This isn't the same! return updated_node return updated_node.with_changes(value=new_value) except Exception: # Failed to parse string, changing the quoting screwed us up. pass # Either failed to parse the new string, or don't need to make changes. return updated_node class ConvertFormatStringCommand(VisitorBasedCodemodCommand): DESCRIPTION: str = "Converts instances of str.format() to f-string." @staticmethod def add_args(arg_parser: argparse.ArgumentParser) -> None: arg_parser.add_argument( "--allow-strip-comments", dest="allow_strip_comments", help=( "Allow stripping comments inside .format() calls when converting " + "to f-strings." ), action="store_true", ) arg_parser.add_argument( "--allow-await", dest="allow_await", help=( "Allow converting expressions inside .format() calls that contain " + "an await expression (only compatible with Python 3.7+)." ), action="store_true", ) def __init__( self, context: CodemodContext, allow_strip_comments: bool = False, allow_await: bool = False, ) -> None: super().__init__(context) self.allow_strip_comments = allow_strip_comments self.allow_await = allow_await def leave_Call( # noqa: C901 self, original_node: cst.Call, updated_node: cst.Call ) -> cst.BaseExpression: # Lets figure out if this is a "".format() call extraction = self.extract( updated_node, m.Call( func=m.Attribute( value=m.SaveMatchedNode(m.SimpleString(), "string"), attr=m.Name("format"), ) ), ) if extraction is not None: fstring: List[cst.BaseFormattedStringContent] = [] inserted_sequence: int = 0 stringnode = cst.ensure_type(extraction["string"], cst.SimpleString) tokens = _get_tokens(stringnode.raw_value) for literal_text, field_name, format_spec, conversion in tokens: if literal_text: fstring.append(cst.FormattedStringText(literal_text)) if field_name is None: # This is not a format-specification continue # Auto-insert field sequence if it is empty if field_name == "": field_name = str(inserted_sequence) inserted_sequence += 1 # Now, if there is a valid format spec, parse it as a f-string # as well, since it allows for insertion of parameters just # like regular f-strings. format_spec_parts: List[cst.BaseFormattedStringContent] = [] if format_spec is not None and len(format_spec) > 0: # Parse the format spec out as a series of tokens as well. format_spec_tokens = _get_tokens(format_spec) for ( spec_literal_text, spec_field_name, spec_format_spec, spec_conversion, ) in format_spec_tokens: if spec_format_spec is not None: # This shouldn't be possible, we don't allow it in the spec! raise Exception("Logic error!") if spec_literal_text: format_spec_parts.append( cst.FormattedStringText(spec_literal_text) ) if spec_field_name is None: # This is not a format-specification continue # Auto-insert field sequence if it is empty if spec_field_name == "": spec_field_name = str(inserted_sequence) inserted_sequence += 1 # Now, convert the spec expression itself. fstring_expression = self._convert_token_to_fstring_expression( spec_field_name, spec_conversion, updated_node.args, stringnode, ) if fstring_expression is None: return updated_node format_spec_parts.append(fstring_expression) # Finally, output the converted value. fstring_expression = self._convert_token_to_fstring_expression( field_name, conversion, updated_node.args, stringnode ) if fstring_expression is None: return updated_node # Technically its valid to add the parts even if it is empty, but # it results in an empty format spec being added which is ugly. if format_spec_parts: fstring_expression = fstring_expression.with_changes( format_spec=format_spec_parts ) fstring.append(fstring_expression) # We converted each part, so lets bang together the f-string itself. return cst.FormattedString( parts=fstring, start=f"f{stringnode.prefix}{stringnode.quote}", end=stringnode.quote, ) return updated_node def _convert_token_to_fstring_expression( self, field_name: str, conversion: Optional[str], arguments: Sequence[cst.Arg], containing_string: cst.SimpleString, ) -> Optional[cst.FormattedStringExpression]: expr = _find_expr_from_field_name(field_name, arguments) if expr is None: # Most likely they used * expansion in a format. self.warn(f"Unsupported field_name {field_name} in format() call") return None # Verify that we don't have any comments or newlines. Comments aren't # allowed in f-strings, and newlines need parenthesization. We can # have formattedstrings inside other formattedstrings, but I chose not # to doeal with that for now. if self.findall(expr, m.Comment()) and not self.allow_strip_comments: # We could strip comments, but this is a formatting change so # we choose not to for now. self.warn("Unsupported comment in format() call") return None if self.findall(expr, m.FormattedString()): self.warn("Unsupported f-string in format() call") return None if self.findall(expr, m.Await()) and not self.allow_await: # This is fixed in 3.7 but we don't currently have a flag # to enable/disable it. self.warn("Unsupported await in format() call") return None # Stripping newlines is effectively a format-only change. expr = cst.ensure_type( expr.visit(StripNewlinesTransformer(self.context)), cst.BaseExpression, ) # Try our best to swap quotes on any strings that won't fit expr = cst.ensure_type( expr.visit( SwitchStringQuotesTransformer(self.context, containing_string.quote[0]) ), cst.BaseExpression, ) # Verify that the resulting expression doesn't have a backslash # in it. raw_expr_string = self.module.code_for_node(expr) if "\\" in raw_expr_string: self.warn("Unsupported backslash in format expression") return None # For safety sake, if this is a dict/set or dict/set comprehension, # wrap it in parens so that it doesn't accidentally create an # escape. if (raw_expr_string.startswith("{") or raw_expr_string.endswith("}")) and ( not expr.lpar or not expr.rpar ): expr = expr.with_changes(lpar=[cst.LeftParen()], rpar=[cst.RightParen()]) # Verify that any strings we insert don't have the same quote quote_gatherer = StringQuoteGatherer(self.context) expr.visit(quote_gatherer) for stringend in quote_gatherer.stringends: if stringend in containing_string.quote: self.warn("Cannot embed string with same quote from format() call") return None return cst.FormattedStringExpression(expression=expr, conversion=conversion) LibCST-1.2.0/libcst/codemod/commands/convert_namedtuple_to_dataclass.py000066400000000000000000000053501456464173300262760ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # from typing import List, Optional, Sequence import libcst as cst from libcst.codemod import VisitorBasedCodemodCommand from libcst.codemod.visitors import AddImportsVisitor, RemoveImportsVisitor from libcst.metadata import ( ProviderT, QualifiedName, QualifiedNameProvider, QualifiedNameSource, ) class ConvertNamedTupleToDataclassCommand(VisitorBasedCodemodCommand): """ Convert NamedTuple class declarations to Python 3.7 dataclasses. This only performs a conversion at the class declaration level. It does not perform type annotation conversions, nor does it convert NamedTuple-specific attributes and methods. """ DESCRIPTION: str = "Convert NamedTuple class declarations to Python 3.7 dataclasses using the @dataclass decorator." METADATA_DEPENDENCIES: Sequence[ProviderT] = (QualifiedNameProvider,) # The 'NamedTuple' we are interested in qualified_namedtuple: QualifiedName = QualifiedName( name="typing.NamedTuple", source=QualifiedNameSource.IMPORT ) def leave_ClassDef( self, original_node: cst.ClassDef, updated_node: cst.ClassDef ) -> cst.ClassDef: new_bases: List[cst.Arg] = [] namedtuple_base: Optional[cst.Arg] = None # Need to examine the original node's bases since they are directly tied to import metadata for base_class in original_node.bases: # Compare the base class's qualified name against the expected typing.NamedTuple if not QualifiedNameProvider.has_name( self, base_class.value, self.qualified_namedtuple ): # Keep all bases that are not of type typing.NamedTuple new_bases.append(base_class) else: namedtuple_base = base_class # We still want to return the updated node in case some of its children have been modified if namedtuple_base is None: return updated_node AddImportsVisitor.add_needed_import(self.context, "dataclasses", "dataclass") RemoveImportsVisitor.remove_unused_import_by_node( self.context, namedtuple_base.value ) call = cst.ensure_type( cst.parse_expression( "dataclass(frozen=True)", config=self.module.config_for_parsing ), cst.Call, ) return updated_node.with_changes( lpar=cst.MaybeSentinel.DEFAULT, rpar=cst.MaybeSentinel.DEFAULT, bases=new_bases, decorators=[*original_node.decorators, cst.Decorator(decorator=call)], ) LibCST-1.2.0/libcst/codemod/commands/convert_percent_format_to_fstring.py000066400000000000000000000122511456464173300266630ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # import itertools import re from typing import Callable, cast, List, Sequence import libcst as cst import libcst.matchers as m from libcst.codemod import VisitorBasedCodemodCommand USE_FSTRING_SIMPLE_EXPRESSION_MAX_LENGTH = 30 def _match_simple_string(node: cst.CSTNode) -> bool: if isinstance(node, cst.SimpleString) and not node.prefix.lower().startswith("b"): # SimpleString can be a bytes and fstring don't support bytes return re.fullmatch("[^%]*(%s[^%]*)+", node.raw_value) is not None return False def _gen_match_simple_expression(module: cst.Module) -> Callable[[cst.CSTNode], bool]: def _match_simple_expression(node: cst.CSTNode) -> bool: # either each element in Tuple is simple expression or the entire expression is simple. if ( isinstance(node, cst.Tuple) and all( len(module.code_for_node(elm.value)) < USE_FSTRING_SIMPLE_EXPRESSION_MAX_LENGTH for elm in node.elements ) ) or len(module.code_for_node(node)) < USE_FSTRING_SIMPLE_EXPRESSION_MAX_LENGTH: return True return False return _match_simple_expression class EscapeStringQuote(cst.CSTTransformer): def __init__(self, quote: str) -> None: self.quote = quote super().__init__() def leave_SimpleString( self, original_node: cst.SimpleString, updated_node: cst.SimpleString ) -> cst.SimpleString: if self.quote == original_node.quote: for quo in ["'", '"', "'''", '"""']: if quo != original_node.quote and quo not in original_node.raw_value: escaped_string = cst.SimpleString( original_node.prefix + quo + original_node.raw_value + quo ) if escaped_string.evaluated_value != original_node.evaluated_value: raise Exception( f"Failed to escape string:\n original:{original_node.value}\n escaped:{escaped_string.value}" ) else: return escaped_string raise Exception( f"Cannot find a good quote for escaping the SimpleString: {original_node.value}" ) return original_node class ConvertPercentFormatStringCommand(VisitorBasedCodemodCommand): DESCRIPTION: str = "Converts simple % style string format to f-string." def leave_BinaryOperation( self, original_node: cst.BinaryOperation, updated_node: cst.BinaryOperation ) -> cst.BaseExpression: expr_key = "expr" extracts = m.extract( original_node, m.BinaryOperation( # pyre-fixme[6]: Expected `Union[m._matcher_base.AllOf[typing.Union[m... left=m.MatchIfTrue(_match_simple_string), operator=m.Modulo(), # pyre-fixme[6]: Expected `Union[m._matcher_base.AllOf[typing.Union[m... right=m.SaveMatchedNode( m.MatchIfTrue(_gen_match_simple_expression(self.module)), expr_key, ), ), ) if extracts: exprs = extracts[expr_key] exprs = (exprs,) if not isinstance(exprs, Sequence) else exprs parts = [] simple_string = cst.ensure_type(original_node.left, cst.SimpleString) innards = simple_string.raw_value.replace("{", "{{").replace("}", "}}") tokens = innards.split("%s") token = tokens[0] if len(token) > 0: parts.append(cst.FormattedStringText(value=token)) expressions: List[cst.CSTNode] = list( *itertools.chain( [elm.value for elm in expr.elements] if isinstance(expr, cst.Tuple) else [expr] for expr in exprs ) ) escape_transformer = EscapeStringQuote(simple_string.quote) i = 1 while i < len(tokens): if i - 1 >= len(expressions): # the %-string doesn't come with same number of elements in tuple return original_node try: parts.append( cst.FormattedStringExpression( expression=cast( cst.BaseExpression, expressions[i - 1].visit(escape_transformer), ) ) ) except Exception: return original_node token = tokens[i] if len(token) > 0: parts.append(cst.FormattedStringText(value=token)) i += 1 start = f"f{simple_string.prefix}{simple_string.quote}" return cst.FormattedString( parts=parts, start=start, end=simple_string.quote ) return original_node LibCST-1.2.0/libcst/codemod/commands/convert_type_comments.py000066400000000000000000000774601456464173300243200ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import argparse import ast import builtins import dataclasses import functools import sys from typing import cast, Dict, List, Optional, Sequence, Set, Tuple, Union from typing_extensions import TypeAlias import libcst as cst import libcst.matchers as m from libcst.codemod import CodemodContext, VisitorBasedCodemodCommand @functools.lru_cache() def _empty_module() -> cst.Module: return cst.parse_module("") def _code_for_node(node: cst.CSTNode) -> str: return _empty_module().code_for_node(node) def _ast_for_statement(node: cst.CSTNode) -> ast.stmt: """ Get the type-comment-enriched python AST for a node. If there are illegal type comments, this can return a SyntaxError. In that case, return the same node with no type comments (which will cause this codemod to ignore it). """ code = _code_for_node(node) try: return ast.parse(code, type_comments=True).body[-1] except SyntaxError: return ast.parse(code, type_comments=False).body[-1] def _parse_type_comment( type_comment: Optional[str], ) -> Optional[ast.expr]: """ Attempt to parse a type comment. If it is None or if it fails to parse, return None. """ if type_comment is None: return None try: return ast.parse(type_comment, "", "eval").body except SyntaxError: return None def _annotation_for_statement( node: cst.CSTNode, ) -> Optional[ast.expr]: return _parse_type_comment(_ast_for_statement(node).type_comment) def _parse_func_type_comment( func_type_comment: Optional[str], ) -> Optional["ast.FunctionType"]: if func_type_comment is None: return None return ast.parse(func_type_comment, "", "func_type") @functools.lru_cache() def _builtins() -> Set[str]: return set(dir(builtins)) def _is_builtin(annotation: str) -> bool: return annotation in _builtins() def _convert_annotation( raw: str, quote_annotations: bool, ) -> cst.Annotation: """ Convert a raw annotation - which is a string coming from a type comment - into a suitable libcst Annotation node. If `quote_annotations`, we'll always quote annotations unless they are builtin types. The reason for this is to make the codemod safer to apply on legacy code where type comments may well include invalid types that would crash at runtime. """ if _is_builtin(raw): return cst.Annotation(annotation=cst.Name(value=raw)) if not quote_annotations: try: return cst.Annotation(annotation=cst.parse_expression(raw)) except cst.ParserSyntaxError: pass return cst.Annotation(annotation=cst.SimpleString(f'"{raw}"')) def _is_type_comment(comment: Optional[cst.Comment]) -> bool: """ Determine whether a comment is a type comment. Unfortunately, to strip type comments in a location-invariant way requires finding them from pure libcst data. We only use this in function defs, where the precise cst location of the type comment cna be hard to predict. """ if comment is None: return False value = comment.value[1:].strip() if not value.startswith("type:"): return False suffix = value.removeprefix("type:").strip().split() if len(suffix) > 0 and suffix[0] == "ignore": return False return True def _strip_type_comment(comment: Optional[cst.Comment]) -> Optional[cst.Comment]: """ Remove the type comment while keeping any following comments. """ if not _is_type_comment(comment): return comment assert comment is not None idx = comment.value.find("#", 1) if idx < 0: return None return comment.with_changes(value=comment.value[idx:]) class _FailedToApplyAnnotation: pass class _ArityError(Exception): pass UnpackedBindings: TypeAlias = Union[cst.BaseExpression, List["UnpackedBindings"]] UnpackedAnnotations: TypeAlias = Union[str, List["UnpackedAnnotations"]] TargetAnnotationPair: TypeAlias = Tuple[cst.BaseExpression, str] class AnnotationSpreader: """ Utilities to help with lining up tuples of types from type comments with the tuples of values with which they should be associated. """ @staticmethod def unpack_annotation( expression: ast.expr, ) -> UnpackedAnnotations: if isinstance(expression, ast.Tuple): return [ AnnotationSpreader.unpack_annotation(elt) for elt in expression.elts ] else: return ast.unparse(expression) @staticmethod def unpack_target( target: cst.BaseExpression, ) -> UnpackedBindings: """ Take a (non-function-type) type comment and split it into components. A type comment body should always be either a single type or a tuple of types. We work with strings for annotations because without detailed scope analysis that is the safest option for codemods. """ if isinstance(target, cst.Tuple): return [ AnnotationSpreader.unpack_target(element.value) for element in target.elements ] else: return target @staticmethod def annotated_bindings( bindings: UnpackedBindings, annotations: UnpackedAnnotations, ) -> List[Tuple[cst.BaseAssignTargetExpression, str]]: if isinstance(annotations, list): if isinstance(bindings, list) and len(bindings) == len(annotations): # The arities match, so we return the flattened result of # mapping annotated_bindings over each pair. out: List[Tuple[cst.BaseAssignTargetExpression, str]] = [] for binding, annotation in zip(bindings, annotations): out.extend( AnnotationSpreader.annotated_bindings(binding, annotation) ) return out else: # Either mismatched lengths, or multi-type and one-target raise _ArityError() elif isinstance(bindings, list): # multi-target and one-type raise _ArityError() else: assert isinstance(bindings, cst.BaseAssignTargetExpression) return [(bindings, annotations)] @staticmethod def type_declaration( binding: cst.BaseAssignTargetExpression, raw_annotation: str, quote_annotations: bool, ) -> cst.AnnAssign: return cst.AnnAssign( target=binding, annotation=_convert_annotation( raw=raw_annotation, quote_annotations=quote_annotations, ), value=None, ) @staticmethod def type_declaration_statements( bindings: UnpackedBindings, annotations: UnpackedAnnotations, leading_lines: Sequence[cst.EmptyLine], quote_annotations: bool, ) -> List[cst.SimpleStatementLine]: return [ cst.SimpleStatementLine( body=[ AnnotationSpreader.type_declaration( binding=binding, raw_annotation=raw_annotation, quote_annotations=quote_annotations, ) ], leading_lines=leading_lines if i == 0 else [], ) for i, (binding, raw_annotation) in enumerate( AnnotationSpreader.annotated_bindings( bindings=bindings, annotations=annotations, ) ) ] def convert_Assign( node: cst.Assign, annotation: ast.expr, quote_annotations: bool, ) -> Union[ _FailedToApplyAnnotation, cst.AnnAssign, List[Union[cst.AnnAssign, cst.Assign]], ]: # zip the type and target information tother. If there are mismatched # arities, this is a PEP 484 violation (technically we could use # logic beyond the PEP to recover some cases as typing.Tuple, but this # should be rare) so we give up. try: annotations = AnnotationSpreader.unpack_annotation(annotation) annotated_targets = [ AnnotationSpreader.annotated_bindings( bindings=AnnotationSpreader.unpack_target(target.target), annotations=annotations, ) for target in node.targets ] except _ArityError: return _FailedToApplyAnnotation() if len(annotated_targets) == 1 and len(annotated_targets[0]) == 1: # We can convert simple one-target assignments into a single AnnAssign binding, raw_annotation = annotated_targets[0][0] return cst.AnnAssign( target=binding, annotation=_convert_annotation( raw=raw_annotation, quote_annotations=quote_annotations, ), value=node.value, semicolon=node.semicolon, ) else: # For multi-target assigns (regardless of whether they are using tuples # on the LHS or multiple `=` tokens or both), we need to add a type # declaration per individual LHS target. type_declarations = [ AnnotationSpreader.type_declaration( binding, raw_annotation, quote_annotations=quote_annotations, ) for annotated_bindings in annotated_targets for binding, raw_annotation in annotated_bindings ] return [ *type_declarations, node, ] @dataclasses.dataclass(frozen=True) class FunctionTypeInfo: arguments: Dict[str, Optional[str]] returns: Optional[str] def is_empty(self) -> bool: return self.returns is None and self.arguments == {} @classmethod def from_cst( cls, node_cst: cst.FunctionDef, is_method: bool, ) -> "FunctionTypeInfo": """ Using the `ast` type comment extraction logic, get type information for a function definition. To understand edge case behavior see the `leave_FunctionDef` docstring. """ node_ast = cast(ast.FunctionDef, _ast_for_statement(node_cst)) # Note: this is guaranteed to have the correct arity. args = [ *node_ast.args.posonlyargs, *node_ast.args.args, *( [] if node_ast.args.vararg is None else [ node_ast.args.vararg, ] ), *node_ast.args.kwonlyargs, *( [] if node_ast.args.kwarg is None else [ node_ast.args.kwarg, ] ), ] try: func_type_annotation = _parse_func_type_comment(node_ast.type_comment) except SyntaxError: # On unparsable function type annotations, ignore type information return cls({}, None) if func_type_annotation is None: return cls( arguments={ arg.arg: arg.type_comment for arg in args if arg.type_comment is not None }, returns=None, ) else: argtypes = func_type_annotation.argtypes returns = ast.unparse(func_type_annotation.returns) if ( len(argtypes) == 1 and isinstance(argtypes[0], ast.Constant) # pyre-ignore [16] Pyre cannot refine constant indexes (yet!) and argtypes[0].value is Ellipsis ): # Only use the return type if the comment was like `(...) -> R` return cls( arguments={arg.arg: arg.type_comment for arg in args}, returns=returns, ) elif len(argtypes) == len(args): # Merge the type comments, preferring inline comments where available return cls( arguments={ arg.arg: arg.type_comment or ast.unparse(from_func_type) for arg, from_func_type in zip(args, argtypes) }, returns=returns, ) elif is_method and len(argtypes) == len(args) - 1: # Merge as above, but skip merging the initial `self` or `cls` arg. return cls( arguments={ args[0].arg: args[0].type_comment, **{ arg.arg: arg.type_comment or ast.unparse(from_func_type) for arg, from_func_type in zip(args[1:], argtypes) }, }, returns=returns, ) else: # On arity mismatches, ignore the type information return cls({}, None) class ConvertTypeComments(VisitorBasedCodemodCommand): DESCRIPTION = """ Codemod that converts type comments into Python 3.6+ style annotations. Notes: - This transform requires using the `ast` module, which is not compatible with multiprocessing. So you should run using a recent version of python, and set `--jobs=1` if using `python -m libcst.tool codemod ...` from the commandline. - This transform requires capabilities from `ast` that are not available prior to Python 3.9, so libcst must run on Python 3.9+. The code you are transforming can by Python 3.6+, this limitation applies only to libcst itself. We can handle type comments in the following statement types: - Assign - This is converted into a single AnnAssign when possible - In more complicated cases it will produce multiple AnnAssign nodes with no value (i.e. "type declaration" statements) followed by an Assign - For and With - We prepend both of these with type declaration statements. - FunctionDef - We apply all the types we can find. If we find several: - We prefer any existing annotations to type comments - For parameters, we prefer inline type comments to function-level type comments if we find both. We always apply the type comments as quote_annotations annotations, unless we know that it refers to a builtin. We do not guarantee that the resulting string annotations would parse, but they should never cause failures at module import time. We attempt to: - Always strip type comments for statements where we successfully applied types. - Never strip type comments for statements where we failed to apply types. There are many edge case possible where the arity of a type hint (which is either a tuple or a func_type) might not match the code. In these cases we generally give up: - For Assign, For, and With, we require that every target of bindings (e.g. a tuple of names being bound) must have exactly the same arity as the comment. - So, for example, we would skip an assignment statement such as ``x = y, z = 1, 2 # type: int, int`` because the arity of ``x`` does not match the arity of the hint. - For FunctionDef, we do *not* check arity of inline parameter type comments but we do skip the transform if the arity of the function does not match the function-level comment. """ # Finding the location of a type comment in a FunctionDef is difficult. # # As a result, if when visiting a FunctionDef header we are able to # successfully extrct type information then we aggressively strip type # comments until we reach the first statement in the body. # # Once we get there we have to stop, so that we don't unintentionally remove # unprocessed type comments. # # This state handles tracking everything we need for this. function_type_info_stack: List[FunctionTypeInfo] function_body_stack: List[cst.BaseSuite] aggressively_strip_type_comments: bool @staticmethod def add_args(arg_parser: argparse.ArgumentParser) -> None: arg_parser.add_argument( "--no-quote-annotations", action="store_true", help=( "Add unquoted annotations. This leads to prettier code " + "but possibly more errors if type comments are invalid." ), ) def __init__( self, context: CodemodContext, no_quote_annotations: bool = False, ) -> None: if (sys.version_info.major, sys.version_info.minor) < (3, 9): # The ast module did not get `unparse` until Python 3.9, # or `type_comments` until Python 3.8 # # For earlier versions of python, raise early instead of failing # later. It might be possible to use libcst parsing and the # typed_ast library to support earlier python versions, but this is # not a high priority. raise NotImplementedError( "You are trying to run ConvertTypeComments, but libcst " + "needs to be running with Python 3.9+ in order to " + "do this. Try using Python 3.9+ to run your codemod. " + "Note that the target code can be using Python 3.6+, " + "it is only libcst that needs a new Python version." ) super().__init__(context) # flags used to control overall behavior self.quote_annotations: bool = not no_quote_annotations # state used to manage how we traverse nodes in various contexts self.function_type_info_stack = [] self.function_body_stack = [] self.aggressively_strip_type_comments = False def _strip_TrailingWhitespace( self, node: cst.TrailingWhitespace, ) -> cst.TrailingWhitespace: trailing_comment = _strip_type_comment(node.comment) if trailing_comment is not None: return node.with_changes(comment=trailing_comment) return node.with_changes( whitespace=cst.SimpleWhitespace( "" ), # any whitespace came before the comment, so strip it. comment=None, ) def leave_SimpleStatementLine( self, original_node: cst.SimpleStatementLine, updated_node: cst.SimpleStatementLine, ) -> Union[cst.SimpleStatementLine, cst.FlattenSentinel]: """ Convert any SimpleStatementLine containing an Assign with a type comment into a one that uses a PEP 526 AnnAssign. """ # determine whether to apply an annotation assign = updated_node.body[-1] if not isinstance(assign, cst.Assign): # only Assign matters return updated_node annotation = _annotation_for_statement(original_node) if annotation is None: return updated_node # At this point have a single-line Assign with a type comment. # Convert it to an AnnAssign and strip the comment. converted = convert_Assign( node=assign, annotation=annotation, quote_annotations=self.quote_annotations, ) if isinstance(converted, _FailedToApplyAnnotation): # We were unable to consume the type comment, so return the # original code unchanged. # TODO: allow stripping the invalid type comments via a flag return updated_node elif isinstance(converted, cst.AnnAssign): # We were able to convert the Assign into an AnnAssign, so # we can update the node. return updated_node.with_changes( body=[*updated_node.body[:-1], converted], trailing_whitespace=self._strip_TrailingWhitespace( updated_node.trailing_whitespace, ), ) elif isinstance(converted, list): # We need to inject two or more type declarations. # # In this case, we need to split across multiple lines, and # this also means we'll spread any multi-statement lines out # (multi-statement lines are PEP 8 violating anyway). # # We still preserve leading lines from before our transform. new_statements = [ *( statement.with_changes( semicolon=cst.MaybeSentinel.DEFAULT, ) for statement in updated_node.body[:-1] ), *converted, ] if len(new_statements) < 2: raise RuntimeError("Unreachable code.") return cst.FlattenSentinel( [ updated_node.with_changes( body=[new_statements[0]], trailing_whitespace=self._strip_TrailingWhitespace( updated_node.trailing_whitespace, ), ), *( cst.SimpleStatementLine(body=[statement]) for statement in new_statements[1:] ), ] ) else: raise RuntimeError(f"Unhandled value {converted}") def leave_For( self, original_node: cst.For, updated_node: cst.For, ) -> Union[cst.For, cst.FlattenSentinel]: """ Convert a For with a type hint on the bound variable(s) to use type declarations. """ # Type comments are only possible when the body is an indented # block, and we need this refinement to work with the header, # so we check and only then extract the type comment. body = updated_node.body if not isinstance(body, cst.IndentedBlock): return updated_node annotation = _annotation_for_statement(original_node) if annotation is None: return updated_node # Zip up the type hint and the bindings. If we hit an arity # error, abort. try: type_declarations = AnnotationSpreader.type_declaration_statements( bindings=AnnotationSpreader.unpack_target(updated_node.target), annotations=AnnotationSpreader.unpack_annotation(annotation), leading_lines=updated_node.leading_lines, quote_annotations=self.quote_annotations, ) except _ArityError: return updated_node # There is no arity error, so we can add the type delaration(s) return cst.FlattenSentinel( [ *type_declarations, updated_node.with_changes( body=body.with_changes( header=self._strip_TrailingWhitespace(body.header) ), leading_lines=[], ), ] ) def leave_With( self, original_node: cst.With, updated_node: cst.With, ) -> Union[cst.With, cst.FlattenSentinel]: """ Convert a With with a type hint on the bound variable(s) to use type declarations. """ # Type comments are only possible when the body is an indented # block, and we need this refinement to work with the header, # so we check and only then extract the type comment. body = updated_node.body if not isinstance(body, cst.IndentedBlock): return updated_node annotation = _annotation_for_statement(original_node) if annotation is None: return updated_node # PEP 484 does not attempt to specify type comment semantics for # multiple with bindings (there's more than one sensible way to # do it), so we make no attempt to handle this targets = [ item.asname.name for item in updated_node.items if item.asname is not None ] if len(targets) != 1: return updated_node target = targets[0] # Zip up the type hint and the bindings. If we hit an arity # error, abort. try: type_declarations = AnnotationSpreader.type_declaration_statements( bindings=AnnotationSpreader.unpack_target(target), annotations=AnnotationSpreader.unpack_annotation(annotation), leading_lines=updated_node.leading_lines, quote_annotations=self.quote_annotations, ) except _ArityError: return updated_node # There is no arity error, so we can add the type delaration(s) return cst.FlattenSentinel( [ *type_declarations, updated_node.with_changes( body=body.with_changes( header=self._strip_TrailingWhitespace(body.header) ), leading_lines=[], ), ] ) # Handle function definitions ------------------------- # **Implementation Notes** # # It is much harder to predict where exactly type comments will live # in function definitions than in Assign / For / With. # # As a result, we use two different patterns: # (A) we aggressively strip out type comments from whitespace between the # start of a function define and the start of the body, whenever we were # able to extract type information. This is done via mutable state and the # usual visitor pattern. # (B) we also manually reach down to the first statement inside of the # function body and aggressively strip type comments from leading # whitespaces # # PEP 484 underspecifies how to apply type comments to (non-static) # methods - it would be possible to provide a type for `self`, or to omit # it. So we accept either approach when interpreting type comments on # non-static methods: the first argument an have a type provided or not. def _visit_FunctionDef( self, node: cst.FunctionDef, is_method: bool, ) -> None: """ Set up the data we need to handle function definitions: - Parse the type comments. - Store the resulting function type info on the stack, where it will remain until we use it in `leave_FunctionDef` - Set that we are aggressively stripping type comments, which will remain true until we visit the body. """ function_type_info = FunctionTypeInfo.from_cst(node, is_method=is_method) self.aggressively_strip_type_comments = not function_type_info.is_empty() self.function_type_info_stack.append(function_type_info) self.function_body_stack.append(node.body) @m.call_if_not_inside(m.ClassDef()) @m.visit(m.FunctionDef()) def visit_method( self, node: cst.FunctionDef, ) -> None: return self._visit_FunctionDef( node=node, is_method=False, ) @m.call_if_inside(m.ClassDef()) @m.visit(m.FunctionDef()) def visit_function( self, node: cst.FunctionDef, ) -> None: return self._visit_FunctionDef( node=node, is_method=not any( m.matches(d.decorator, m.Name("staticmethod")) for d in node.decorators ), ) def leave_TrailingWhitespace( self, original_node: cst.TrailingWhitespace, updated_node: cst.TrailingWhitespace, ) -> Union[cst.TrailingWhitespace]: "Aggressively remove type comments when in header if we extracted types." if self.aggressively_strip_type_comments and _is_type_comment( updated_node.comment ): return cst.TrailingWhitespace() else: return updated_node def leave_EmptyLine( self, original_node: cst.EmptyLine, updated_node: cst.EmptyLine, ) -> Union[cst.EmptyLine, cst.RemovalSentinel]: "Aggressively remove type comments when in header if we extracted types." if self.aggressively_strip_type_comments and _is_type_comment( updated_node.comment ): return cst.RemovalSentinel.REMOVE else: return updated_node def visit_FunctionDef_body( self, node: cst.FunctionDef, ) -> None: "Turn off aggressive type comment removal when we've left the header." self.aggressively_strip_type_comments = False def leave_IndentedBlock( self, original_node: cst.IndentedBlock, updated_node: cst.IndentedBlock, ) -> cst.IndentedBlock: "When appropriate, strip function type comment from the function body." # abort unless this is the body of a function we are transforming if len(self.function_body_stack) == 0: return updated_node if original_node is not self.function_body_stack[-1]: return updated_node if self.function_type_info_stack[-1].is_empty(): return updated_node # The comment will be in the body header if it was on the same line # as the colon. if _is_type_comment(updated_node.header.comment): updated_node = updated_node.with_changes( header=cst.TrailingWhitespace(), ) # The comment will be in a leading line of the first body statement # if it was on the first line after the colon. first_statement = updated_node.body[0] if not hasattr(first_statement, "leading_lines"): return updated_node return updated_node.with_changes( body=[ first_statement.with_changes( leading_lines=[ line # pyre-ignore[16]: we refined via `hasattr` for line in first_statement.leading_lines if not _is_type_comment(line.comment) ] ), *updated_node.body[1:], ] ) # Methods for adding type annotations ---- # # By the time we get here, all type comments should already be stripped. def leave_Param( self, original_node: cst.Param, updated_node: cst.Param, ) -> cst.Param: # ignore type comments if there's already an annotation if updated_node.annotation is not None: return updated_node # find out if there's a type comment and apply it if so function_type_info = self.function_type_info_stack[-1] raw_annotation = function_type_info.arguments.get(updated_node.name.value) if raw_annotation is not None: return updated_node.with_changes( annotation=_convert_annotation( raw=raw_annotation, quote_annotations=self.quote_annotations, ) ) else: return updated_node def leave_FunctionDef( self, original_node: cst.FunctionDef, updated_node: cst.FunctionDef, ) -> cst.FunctionDef: self.function_body_stack.pop() function_type_info = self.function_type_info_stack.pop() if updated_node.returns is None and function_type_info.returns is not None: return updated_node.with_changes( returns=_convert_annotation( raw=function_type_info.returns, quote_annotations=self.quote_annotations, ) ) else: return updated_node def visit_Lambda( self, node: cst.Lambda, ) -> bool: """ Disable traversing under lambdas. They don't have any statements nested inside them so there's no need, and they do have Params which we don't want to transform. """ return False LibCST-1.2.0/libcst/codemod/commands/ensure_import_present.py000066400000000000000000000035161456464173300243140ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # import argparse from typing import Generator, Type from libcst.codemod import Codemod, MagicArgsCodemodCommand from libcst.codemod.visitors import AddImportsVisitor class EnsureImportPresentCommand(MagicArgsCodemodCommand): DESCRIPTION: str = ( "Given a module and possibly an entity in that module, add an import " + "as long as one does not already exist." ) @staticmethod def add_args(arg_parser: argparse.ArgumentParser) -> None: arg_parser.add_argument( "--module", dest="module", metavar="MODULE", help="Module that should be imported.", type=str, required=True, ) arg_parser.add_argument( "--entity", dest="entity", metavar="ENTITY", help=( "Entity that should be imported from module. If left empty, entire " + " module will be imported." ), type=str, default=None, ) arg_parser.add_argument( "--alias", dest="alias", metavar="ALIAS", help=( "Alias that will be used for the imported module or entity. If left " + "empty, no alias will be applied." ), type=str, default=None, ) def get_transforms(self) -> Generator[Type[Codemod], None, None]: AddImportsVisitor.add_needed_import( self.context, self.context.scratch["module"], self.context.scratch["entity"], self.context.scratch["alias"], ) yield AddImportsVisitor LibCST-1.2.0/libcst/codemod/commands/fix_pyre_directives.py000066400000000000000000000100451456464173300237220ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # from typing import Dict, Sequence, Union import libcst import libcst.matchers as m from libcst.codemod import CodemodContext, VisitorBasedCodemodCommand from libcst.helpers import insert_header_comments class FixPyreDirectivesCommand(VisitorBasedCodemodCommand): """ Given a source file, we'll move the any strict or unsafe tag to the top of the file if it contains one. Also tries to fix typo'd directives. """ DESCRIPTION: str = "Fixes common misspelling and location errors with pyre tags." PYRE_TAGS: Sequence[str] = ["strict", "unsafe"] def __init__(self, context: CodemodContext) -> None: super().__init__(context) self.move_strict: Dict[str, bool] = {tag: False for tag in self.PYRE_TAGS} self.module_header_tags: Dict[str, int] = {tag: 0 for tag in self.PYRE_TAGS} self.in_module_header: bool = False def visit_Module_header(self, node: libcst.Module) -> None: if self.in_module_header: raise Exception("Logic error!") self.in_module_header = True def leave_Module_header(self, node: libcst.Module) -> None: if not self.in_module_header: raise Exception("Logic error!") self.in_module_header = False def leave_EmptyLine( self, original_node: libcst.EmptyLine, updated_node: libcst.EmptyLine ) -> Union[libcst.EmptyLine, libcst.RemovalSentinel]: # First, find misplaced lines. for tag in self.PYRE_TAGS: if m.matches(updated_node, m.EmptyLine(comment=m.Comment(f"# pyre-{tag}"))): if self.in_module_header: # We only want to remove this if we've already found another # pyre-strict in the header (that means its duplicated). We # also don't want to move the pyre-strict since its already in # the header, so don't mark that we need to move. self.module_header_tags[tag] += 1 if self.module_header_tags[tag] > 1: return libcst.RemoveFromParent() else: return updated_node else: # This showed up outside the module header, so move it inside if self.module_header_tags[tag] < 1: self.move_strict[tag] = True return libcst.RemoveFromParent() # Now, find misnamed lines if m.matches(updated_node, m.EmptyLine(comment=m.Comment(f"# pyre {tag}"))): if self.in_module_header: # We only want to remove this if we've already found another # pyre-strict in the header (that means its duplicated). We # also don't want to move the pyre-strict since its already in # the header, so don't mark that we need to move. self.module_header_tags[tag] += 1 if self.module_header_tags[tag] > 1: return libcst.RemoveFromParent() else: return updated_node.with_changes( comment=libcst.Comment(f"# pyre-{tag}") ) else: # We found an intended pyre-strict, but its spelled wrong. So, remove it # and re-add a new one in leave_Module. if self.module_header_tags[tag] < 1: self.move_strict[tag] = True return libcst.RemoveFromParent() # We found a regular comment, don't care about this. return updated_node def leave_Module( self, original_node: libcst.Module, updated_node: libcst.Module ) -> libcst.Module: comments = [f"# pyre-{tag}" for tag in self.PYRE_TAGS if self.move_strict[tag]] return insert_header_comments(updated_node, comments) LibCST-1.2.0/libcst/codemod/commands/noop.py000066400000000000000000000007431456464173300206330ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # from libcst import Module from libcst.codemod import CodemodCommand class NOOPCommand(CodemodCommand): DESCRIPTION: str = "Does absolutely nothing." def transform_module_impl(self, tree: Module) -> Module: # Return the tree as-is, with absolutely no modification return tree LibCST-1.2.0/libcst/codemod/commands/remove_pyre_directive.py000066400000000000000000000032521456464173300242500ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # import re from abc import ABC from typing import Pattern, Union import libcst from libcst.codemod import CodemodContext, VisitorBasedCodemodCommand class RemovePyreDirectiveCommand(VisitorBasedCodemodCommand, ABC): PYRE_TAG: str def __init__(self, context: CodemodContext) -> None: super().__init__(context) self._regex_pattern: Pattern[str] = re.compile( rf"^#\s+pyre-{self.PYRE_TAG}\s*$" ) def leave_EmptyLine( self, original_node: libcst.EmptyLine, updated_node: libcst.EmptyLine ) -> Union[libcst.EmptyLine, libcst.RemovalSentinel]: if updated_node.comment is None or not bool( self._regex_pattern.search( libcst.ensure_type(updated_node.comment, libcst.Comment).value ) ): # This is a normal comment return updated_node # This is a directive comment matching our tag, so remove it. return libcst.RemoveFromParent() class RemovePyreStrictCommand(RemovePyreDirectiveCommand): """ Given a source file, we'll remove the any strict tag if the file already contains it. """ DESCRIPTION: str = "Removes the 'pyre-strict' tag from a module." PYRE_TAG: str = "strict" class RemovePyreUnsafeCommand(RemovePyreDirectiveCommand): """ Given a source file, we'll remove the any unsafe tag if the file already contains it. """ DESCRIPTION: str = "Removes the 'pyre-unsafe' tag from a module." PYRE_TAG: str = "unsafe" LibCST-1.2.0/libcst/codemod/commands/remove_unused_imports.py000066400000000000000000000065141456464173300243170ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # from typing import Set, Tuple, Union from libcst import Import, ImportFrom, ImportStar, Module from libcst.codemod import CodemodContext, VisitorBasedCodemodCommand from libcst.codemod.visitors import GatherCommentsVisitor, RemoveImportsVisitor from libcst.helpers import get_absolute_module_from_package_for_import from libcst.metadata import PositionProvider, ProviderT DEFAULT_SUPPRESS_COMMENT_REGEX = ( r".*\W(noqa|lint-ignore: ?unused-import|lint-ignore: ?F401)(\W.*)?$" ) class RemoveUnusedImportsCommand(VisitorBasedCodemodCommand): """ Remove all unused imports from a file based on scope analysis. This command analyses individual files in isolation and does not attempt to track cross-references between them. If a symbol is imported in a file but otherwise unused in it, that import will be removed even if it is being referenced from another file. """ DESCRIPTION: str = ( "Remove all imports that are not used in a file. " + "Note: only considers the file in isolation. " ) METADATA_DEPENDENCIES: Tuple[ProviderT] = (PositionProvider,) def __init__(self, context: CodemodContext) -> None: super().__init__(context) self._ignored_lines: Set[int] = set() def visit_Module(self, node: Module) -> bool: comment_visitor = GatherCommentsVisitor( self.context, DEFAULT_SUPPRESS_COMMENT_REGEX ) node.visit(comment_visitor) self._ignored_lines = set(comment_visitor.comments.keys()) return True def visit_Import(self, node: Import) -> bool: self._handle_import(node) return False def visit_ImportFrom(self, node: ImportFrom) -> bool: self._handle_import(node) return False def _handle_import(self, node: Union[Import, ImportFrom]) -> None: node_start = self.get_metadata(PositionProvider, node).start.line if node_start in self._ignored_lines: return names = node.names if isinstance(names, ImportStar): return for alias in names: position = self.get_metadata(PositionProvider, alias) lines = set(range(position.start.line, position.end.line + 1)) if lines.isdisjoint(self._ignored_lines): if isinstance(node, Import): RemoveImportsVisitor.remove_unused_import( self.context, module=alias.evaluated_name, asname=alias.evaluated_alias, ) else: module_name = get_absolute_module_from_package_for_import( self.context.full_package_name, node ) if module_name is None: raise ValueError( f"Couldn't get absolute module name for {alias.evaluated_name}" ) RemoveImportsVisitor.remove_unused_import( self.context, module=module_name, obj=alias.evaluated_name, asname=alias.evaluated_alias, ) LibCST-1.2.0/libcst/codemod/commands/rename.py000066400000000000000000000404761456464173300211360ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # # pyre-strict import argparse from typing import Callable, Optional, Sequence, Set, Tuple, Union import libcst as cst from libcst.codemod import CodemodContext, VisitorBasedCodemodCommand from libcst.codemod.visitors import AddImportsVisitor, RemoveImportsVisitor from libcst.helpers import get_full_name_for_node from libcst.metadata import QualifiedNameProvider def leave_import_decorator( method: Callable[..., Union[cst.Import, cst.ImportFrom]] ) -> Callable[..., Union[cst.Import, cst.ImportFrom]]: # We want to record any 'as name' that is relevant but only after we leave the corresponding Import/ImportFrom node since # we don't want the 'as name' to interfere with children 'Name' and 'Attribute' nodes. def wrapper( self: "RenameCommand", original_node: Union[cst.Import, cst.ImportFrom], updated_node: Union[cst.Import, cst.ImportFrom], ) -> Union[cst.Import, cst.ImportFrom]: updated_node = method(self, original_node, updated_node) if original_node != updated_node: self.record_asname(original_node) return updated_node return wrapper class RenameCommand(VisitorBasedCodemodCommand): """ Rename all instances of a local or imported object. """ DESCRIPTION: str = "Rename all instances of a local or imported object." METADATA_DEPENDENCIES = (QualifiedNameProvider,) @staticmethod def add_args(arg_parser: argparse.ArgumentParser) -> None: arg_parser.add_argument( "--old_name", dest="old_name", required=True, help="Full dotted name of object to rename. Eg: `foo.bar.baz`", ) arg_parser.add_argument( "--new_name", dest="new_name", required=True, help=( "Full dotted name of replacement object. You may provide a single-colon-delimited name to specify how you want the new import to be structured." + "\nEg: `foo:bar.baz` will be translated to `from foo import bar`." + "\nIf no ':' character is provided, the import statement will default to `from foo.bar import baz` for a `new_name` value of `foo.bar.baz`" + " or simply replace the old import on the spot if the old import is an exact match." ), ) def __init__(self, context: CodemodContext, old_name: str, new_name: str) -> None: super().__init__(context) new_module, has_colon, new_mod_or_obj = new_name.rpartition(":") # Exit early if improperly formatted args. if ":" in new_module: raise ValueError("Error: `new_name` should contain at most one colon.") if ":" in old_name: raise ValueError("Error: `old_name` should not contain any colons.") if not has_colon or not new_module: new_module, _, new_mod_or_obj = new_name.rpartition(".") self.new_name: str = new_name.replace(":", ".").strip(".") self.new_module: str = new_module.replace(":", ".").strip(".") self.new_mod_or_obj: str = new_mod_or_obj # If `new_name` contains a single colon at the end, then we assume the user wants the import # to be structured as 'import new_name'. So both self.new_mod_or_obj and self.old_mod_or_obj # will be empty in this case. if not self.new_mod_or_obj: old_module = old_name old_mod_or_obj = "" else: old_module, _, old_mod_or_obj = old_name.rpartition(".") self.old_name: str = old_name self.old_module: str = old_module self.old_mod_or_obj: str = old_mod_or_obj self.as_name: Optional[Tuple[str, str]] = None # A set of nodes that have been renamed to help with the cleanup of now potentially unused # imports, during import cleanup in `leave_Module`. self.scheduled_removals: Set[cst.CSTNode] = set() # If an import has been renamed while inside an `Import` or `ImportFrom` node, we want to flag # this so that we do not end up with two of the same import. self.bypass_import = False def visit_Import(self, node: cst.Import) -> None: for import_alias in node.names: alias_name = get_full_name_for_node(import_alias.name) if alias_name is not None: if alias_name == self.old_name or alias_name.startswith( self.old_name + "." ): # If the import statement is exactly equivalent to the old name, or we are renaming a top-level module of the import, # it will be taken care of in `leave_Name` or `leave_Attribute` when visiting the Name and Attribute children of this Import. self.bypass_import = True @leave_import_decorator def leave_Import( self, original_node: cst.Import, updated_node: cst.Import ) -> cst.Import: new_names = [] for import_alias in updated_node.names: import_alias_name = import_alias.name import_alias_full_name = get_full_name_for_node(import_alias_name) if import_alias_full_name is None: raise Exception("Could not parse full name for ImportAlias.name node.") if isinstance(import_alias_name, cst.Name) and self.old_name.startswith( import_alias_full_name + "." ): # Might, be in use elsewhere in the code, so schedule a potential removal, and add another alias. new_names.append(import_alias) replacement_module = self.gen_replacement_module(import_alias_full_name) self.bypass_import = True if replacement_module != import_alias_name.value: self.scheduled_removals.add(original_node) new_names.append( cst.ImportAlias(name=cst.Name(value=replacement_module)) ) elif isinstance( import_alias_name, cst.Attribute ) and self.old_name.startswith(import_alias_full_name + "."): # Same idea as above. new_names.append(import_alias) replacement_module = self.gen_replacement_module(import_alias_full_name) self.bypass_import = True if replacement_module != import_alias_full_name: self.scheduled_removals.add(original_node) new_name_node: Union[ cst.Attribute, cst.Name ] = self.gen_name_or_attr_node(replacement_module) new_names.append(cst.ImportAlias(name=new_name_node)) else: new_names.append(import_alias) return updated_node.with_changes(names=new_names) def visit_ImportFrom(self, node: cst.ImportFrom) -> None: module = node.module if module is None: return imported_module_name = get_full_name_for_node(module) if imported_module_name is None: return if imported_module_name == self.old_name or imported_module_name.startswith( self.old_name + "." ): # If the imported module is exactly equivalent to the old name or we are renaming a parent module of the current module, # it will be taken care of in `leave_Name` or `leave_Attribute` when visiting the children of this ImportFrom. self.bypass_import = True @leave_import_decorator def leave_ImportFrom( self, original_node: cst.ImportFrom, updated_node: cst.ImportFrom ) -> cst.ImportFrom: module = updated_node.module if module is None: return updated_node imported_module_name = get_full_name_for_node(module) names = original_node.names if imported_module_name is None or not isinstance(names, Sequence): return updated_node else: new_names = [] for import_alias in names: alias_name = get_full_name_for_node(import_alias.name) if alias_name is not None: qual_name = f"{imported_module_name}.{alias_name}" if self.old_name == qual_name: replacement_module = self.gen_replacement_module( imported_module_name ) replacement_obj = self.gen_replacement(alias_name) if not replacement_obj: # The user has requested an `import` statement rather than an `from ... import`. # This will be taken care of in `leave_Module`, in the meantime, schedule for potential removal. new_names.append(import_alias) self.scheduled_removals.add(original_node) continue new_import_alias_name: Union[ cst.Attribute, cst.Name ] = self.gen_name_or_attr_node(replacement_obj) # Rename on the spot only if this is the only imported name under the module. if len(names) == 1: updated_node = updated_node.with_changes( module=cst.parse_expression(replacement_module), ) self.scheduled_removals.add(updated_node) new_names.append(import_alias) # Or if the module name is to stay the same. elif replacement_module == imported_module_name: self.bypass_import = True new_names.append( cst.ImportAlias(name=new_import_alias_name) ) else: if self.old_name.startswith(qual_name + "."): # This import might be in use elsewhere in the code, so schedule a potential removal. self.scheduled_removals.add(original_node) new_names.append(import_alias) return updated_node.with_changes(names=new_names) return updated_node def leave_Name( self, original_node: cst.Name, updated_node: cst.Name ) -> Union[cst.Attribute, cst.Name]: full_name_for_node: str = original_node.value full_replacement_name = self.gen_replacement(full_name_for_node) # If a node has no associated QualifiedName, we are still inside an import statement. inside_import_statement: bool = not self.get_metadata( QualifiedNameProvider, original_node, set() ) if QualifiedNameProvider.has_name(self, original_node, self.old_name) or ( inside_import_statement and full_replacement_name == self.new_name ): if not full_replacement_name: full_replacement_name = self.new_name if not inside_import_statement: self.scheduled_removals.add(original_node) return self.gen_name_or_attr_node(full_replacement_name) return updated_node def leave_Attribute( self, original_node: cst.Attribute, updated_node: cst.Attribute ) -> Union[cst.Name, cst.Attribute]: full_name_for_node = get_full_name_for_node(original_node) if full_name_for_node is None: raise Exception("Could not parse full name for Attribute node.") full_replacement_name = self.gen_replacement(full_name_for_node) # If a node has no associated QualifiedName, we are still inside an import statement. inside_import_statement: bool = not self.get_metadata( QualifiedNameProvider, original_node, set() ) if QualifiedNameProvider.has_name( self, original_node, self.old_name, ) or (inside_import_statement and full_replacement_name == self.new_name): new_value, new_attr = self.new_module, self.new_mod_or_obj if not inside_import_statement: self.scheduled_removals.add(original_node.value) if full_replacement_name == self.new_name: return updated_node.with_changes( value=cst.parse_expression(new_value), attr=cst.Name(value=new_attr.rstrip(".")), ) return self.gen_name_or_attr_node(new_attr) return updated_node def leave_Module( self, original_node: cst.Module, updated_node: cst.Module ) -> cst.Module: for removal_node in self.scheduled_removals: RemoveImportsVisitor.remove_unused_import_by_node( self.context, removal_node ) # If bypass_import is False, we know that no import statements were directly renamed, and the fact # that we have any `self.scheduled_removals` tells us we encountered a matching `old_name` in the code. if not self.bypass_import and self.scheduled_removals: if self.new_module: new_obj: Optional[str] = ( self.new_mod_or_obj.split(".")[0] if self.new_mod_or_obj else None ) AddImportsVisitor.add_needed_import( self.context, module=self.new_module, obj=new_obj ) return updated_node def gen_replacement(self, original_name: str) -> str: module_as_name = self.as_name if module_as_name is not None: if original_name == module_as_name[0]: original_name = module_as_name[1] elif original_name.startswith(module_as_name[0] + "."): original_name = original_name.replace( module_as_name[0] + ".", module_as_name[1] + ".", 1 ) if original_name == self.old_mod_or_obj: return self.new_mod_or_obj elif original_name == ".".join([self.old_module, self.old_mod_or_obj]): return self.new_name elif original_name.endswith("." + self.old_mod_or_obj): return self.new_mod_or_obj else: return self.gen_replacement_module(original_name) def gen_replacement_module(self, original_module: str) -> str: return self.new_module if original_module == self.old_module else "" def gen_name_or_attr_node( self, dotted_expression: str ) -> Union[cst.Attribute, cst.Name]: name_or_attr_node: cst.BaseExpression = cst.parse_expression(dotted_expression) if not isinstance(name_or_attr_node, (cst.Name, cst.Attribute)): raise Exception( "`parse_expression()` on dotted path returned non-Attribute-or-Name." ) return name_or_attr_node def record_asname(self, original_node: Union[cst.Import, cst.ImportFrom]) -> None: # Record the import's `as` name if it has one, and set the attribute mapping. names = original_node.names if not isinstance(names, Sequence): return for import_alias in names: alias_name = get_full_name_for_node(import_alias.name) if isinstance(original_node, cst.ImportFrom): module = original_node.module if module is None: return module_name = get_full_name_for_node(module) if module_name is None: return qual_name = f"{module_name}.{alias_name}" else: qual_name = alias_name if qual_name is not None and alias_name is not None: if qual_name == self.old_name or self.old_name.startswith( qual_name + "." ): as_name_optional = import_alias.asname as_name_node = ( as_name_optional.name if as_name_optional is not None else None ) if as_name_node is not None and isinstance( as_name_node, (cst.Name, cst.Attribute) ): full_as_name = get_full_name_for_node(as_name_node) if full_as_name is not None: self.as_name = (full_as_name, alias_name) LibCST-1.2.0/libcst/codemod/commands/strip_strings_from_types.py000066400000000000000000000040761456464173300250440ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # from typing import Union import libcst import libcst.matchers as m from libcst import parse_expression from libcst.codemod import VisitorBasedCodemodCommand from libcst.codemod.visitors import AddImportsVisitor from libcst.metadata import QualifiedNameProvider class StripStringsCommand(VisitorBasedCodemodCommand): DESCRIPTION: str = ( "Converts string type annotations to 3.7-compatible forward references." ) METADATA_DEPENDENCIES = (QualifiedNameProvider,) # We want to gate the SimpleString visitor below to only SimpleStrings inside # an Annotation. @m.call_if_inside(m.Annotation()) # We also want to gate the SimpleString visitor below to ensure that we don't # erroneously strip strings from a Literal. @m.call_if_not_inside( m.Subscript( # We could match on value=m.Name("Literal") here, but then we might miss # instances where people are importing typing_extensions directly, or # importing Literal as an alias. value=m.MatchMetadataIfTrue( QualifiedNameProvider, lambda qualnames: any( qualname.name == "typing_extensions.Literal" for qualname in qualnames ), ) ) ) def leave_SimpleString( self, original_node: libcst.SimpleString, updated_node: libcst.SimpleString ) -> Union[libcst.SimpleString, libcst.BaseExpression]: AddImportsVisitor.add_needed_import(self.context, "__future__", "annotations") evaluated_value = updated_node.evaluated_value # Just use LibCST to evaluate the expression itself, and insert that as the # annotation. if isinstance(evaluated_value, str): return parse_expression( evaluated_value, config=self.module.config_for_parsing ) else: return updated_node LibCST-1.2.0/libcst/codemod/commands/tests/000077500000000000000000000000001456464173300204445ustar00rootroot00000000000000LibCST-1.2.0/libcst/codemod/commands/tests/__init__.py000066400000000000000000000002651456464173300225600ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # LibCST-1.2.0/libcst/codemod/commands/tests/test_add_pyre_directive.py000066400000000000000000000062551456464173300257120ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # from libcst.codemod import CodemodTest from libcst.codemod.commands.add_pyre_directive import AddPyreUnsafeCommand class TestAddPyreUnsafeCommand(CodemodTest): TRANSFORM = AddPyreUnsafeCommand def test_add_to_file(self) -> None: before = """ def baz() -> List[Foo]: pass """ after = """ # pyre-unsafe def baz() -> List[Foo]: pass """ self.assertCodemod(before, after) def test_add_to_file_with_pyre_unsafe(self) -> None: """ We shouldn't be adding pyre-unsafe to a file that already has it. """ before = """ # pyre-unsafe def baz() -> List[Foo]: pass """ after = """ # pyre-unsafe def baz() -> List[Foo]: pass """ self.assertCodemod(before, after) def test_add_to_file_with_pyre_unsafe_after(self) -> None: """ We shouldn't be adding pyre-unsafe to a file that already has it. """ before = """ # THIS IS A COMMENT! # pyre-unsafe def baz() -> List[Foo]: pass """ after = """ # THIS IS A COMMENT! # pyre-unsafe def baz() -> List[Foo]: pass """ self.assertCodemod(before, after) def test_add_to_file_with_pyre_unsafe_before(self) -> None: """ We shouldn't be adding pyre-unsafe to a file that already has it. """ before = """ # pyre-unsafe # THIS IS A COMMENT! def baz() -> List[Foo]: pass """ after = """ # pyre-unsafe # THIS IS A COMMENT! def baz() -> List[Foo]: pass """ self.assertCodemod(before, after) def test_empty_file(self) -> None: """ If a file is empty, we should still add it. """ before = "" after = "# pyre-unsafe" self.assertCodemod(before, after) def test_add_to_file_with_comment(self) -> None: """ We should add pyre-unsafe after the last comment at the top of a file. """ before = """ # YO I'M A COMMENT def baz() -> List[Foo]: pass """ after = """ # YO I'M A COMMENT # pyre-unsafe def baz() -> List[Foo]: pass """ self.assertCodemod(before, after) def test_add_to_file_with_import(self) -> None: """ Tests that adding to a file with an import works properly. """ before = """ from typing import List def baz() -> List[Foo]: pass """ after = """ # pyre-unsafe from typing import List def baz() -> List[Foo]: pass """ self.assertCodemod(before, after) LibCST-1.2.0/libcst/codemod/commands/tests/test_add_trailing_commas.py000066400000000000000000000040661456464173300260430ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # from libcst.codemod import CodemodTest from libcst.codemod.commands.add_trailing_commas import AddTrailingCommas class AddTrailingCommasTest(CodemodTest): TRANSFORM = AddTrailingCommas def test_transform_defines(self) -> None: before = """ def f(x, y): pass """ after = """ def f(x, y,): pass """ self.assertCodemod(before, after) def test_skip_transforming_defines(self) -> None: before = """ # skip defines with no params. def f0(): pass # skip defines with a single param named `self`. class Foo: def __init__(self): pass """ after = before self.assertCodemod(before, after) def test_transform_calls(self) -> None: before = """ f(a, b, c) g(x=a, y=b, z=c) """ after = """ f(a, b, c,) g(x=a, y=b, z=c,) """ self.assertCodemod(before, after) def test_skip_transforming_calls(self) -> None: before = """ # skip empty calls f() # skip calls with one argument g(a) g(x=a) """ after = before self.assertCodemod(before, after) def test_using_yapf_presets(self) -> None: before = """ def f(x): # skip single parameters for yapf pass def g(x, y): pass """ after = """ def f(x): # skip single parameters for yapf pass def g(x, y,): pass """ self.assertCodemod(before, after, formatter="yapf") def test_using_custom_presets(self) -> None: before = """ def f(x, y, z): pass f(5, 6, 7) """ after = before self.assertCodemod(before, after, parameter_count=4, argument_count=4) LibCST-1.2.0/libcst/codemod/commands/tests/test_convert_format_to_fstring.py000066400000000000000000000256441456464173300273560ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # from libcst.codemod import CodemodTest from libcst.codemod.commands.convert_format_to_fstring import ConvertFormatStringCommand class ConvertFormatStringCommandTest(CodemodTest): TRANSFORM = ConvertFormatStringCommand def test_noop(self) -> None: """ Should do nothing, since there's nothing to do. """ before = """ def foo() -> str: return "foo" def bar(baz: str) -> str: return baz.format(bla, baz) """ after = """ def foo() -> str: return "foo" def bar(baz: str) -> str: return baz.format(bla, baz) """ self.assertCodemod(before, after) def test_unsupported_expansion(self) -> None: """ Should do nothing, since we can't safely expand at compile-time. """ before = """ def baz() -> str: return "{}: {}".format(*baz) def foobar() -> str: return "{x}: {y}".format(**baz) """ after = """ def baz() -> str: return "{}: {}".format(*baz) def foobar() -> str: return "{x}: {y}".format(**baz) """ self.assertCodemod( before, after, expected_warnings=[ "Unsupported field_name 0 in format() call", "Unsupported field_name x in format() call", ], ) def test_unsupported_expression(self) -> None: """ Should do nothing, since we can't safely expand these expressions. """ before = """ def foo() -> str: return "{}".format(f'bla') def bar() -> str: return "{}".format("'bla'") def baz() -> str: return "{}".format(1 +\\ 2) def foobar() -> str: return "{}".format(( 1 + # Woah, comment 2 )) def foobarbaz() -> str: return "{}".format('\\n') async def awaitable() -> str: return "{}".format(await bla()) """ after = """ def foo() -> str: return "{}".format(f'bla') def bar() -> str: return "{}".format("'bla'") def baz() -> str: return "{}".format(1 +\\ 2) def foobar() -> str: return "{}".format(( 1 + # Woah, comment 2 )) def foobarbaz() -> str: return "{}".format('\\n') async def awaitable() -> str: return "{}".format(await bla()) """ self.assertCodemod( before, after, expected_warnings=[ "Unsupported f-string in format() call", "Cannot embed string with same quote from format() call", "Unsupported backslash in format expression", "Unsupported comment in format() call", "Unsupported backslash in format expression", "Unsupported await in format() call", ], # await isn't supported inside functions in 3.6 python_version="3.7", ) def test_enable_unsupported_comments(self) -> None: """ Should codemod code with a comment in it, by removing the comment. """ before = """ def foobar() -> str: return "{}".format(( 1 + # Woah, comment 2 )) """ after = """ def foobar() -> str: return f"{( 1 + 2 )}" """ self.assertCodemod( before, after, allow_strip_comments=True, python_version="3.7", ) def test_enable_unsupported_await(self) -> None: """ Should codemod code with an await in it, by enabling 3.7+ behavior. """ before = """ async def awaitable() -> str: return "{}".format(await bla()) """ after = """ async def awaitable() -> str: return f"{await bla()}" """ self.assertCodemod( before, after, allow_await=True, python_version="3.7", ) def test_formatspec_conversion(self) -> None: """ Should convert a format specifier which includes format-spec mini language of its own as well as several basic varieties. """ before = """ def foo() -> str: return "{0:#0{1}x}".format(1, 4) def bar() -> str: return "{:#0{}x} {}".format(1, 4, 5) def baz() -> str: return "{x:#0{y}x}".format(x=1, y=4) def foobar() -> str: return "{:0>3d}".format(x) """ after = """ def foo() -> str: return f"{1:#0{4}x}" def bar() -> str: return f"{1:#0{4}x} {5}" def baz() -> str: return f"{1:#0{4}x}" def foobar() -> str: return f"{x:0>3d}" """ self.assertCodemod( before, after, ) def test_position_replacement(self) -> None: """ Should convert a format with positional-only parameters. """ before = """ def foo() -> str: return "{}".format(baz) def bar() -> str: return "{} {} {}".format(foo(), baz, foobar) def baz() -> str: return "foo: {}, baz: {}, foobar: {}!".format(foo(), baz, foobar) def foobar() -> str: return "foo: {2}, baz: {1}, foobar: {0}!".format(foobar, baz, foo()) """ after = """ def foo() -> str: return f"{baz}" def bar() -> str: return f"{foo()} {baz} {foobar}" def baz() -> str: return f"foo: {foo()}, baz: {baz}, foobar: {foobar}!" def foobar() -> str: return f"foo: {foo()}, baz: {baz}, foobar: {foobar}!" """ self.assertCodemod(before, after) def test_name_replacement(self) -> None: """ Should convert a format with name-only parameters. """ before = """ def foo() -> str: return "{baz}".format(baz=baz) def bar() -> str: return "{a} {b} {c}".format(a=foo(), b=baz, c=foobar) """ after = """ def foo() -> str: return f"{baz}" def bar() -> str: return f"{foo()} {baz} {foobar}" """ self.assertCodemod(before, after) def test_replacement_with_escapes(self) -> None: """ Should convert a format while not dropping escape sequences """ before = r""" def foo() -> str: return '"bla": {}\n'.format(baz) def foobar() -> str: return "{{bla}}: {}".format(baz) def bar() -> str: return r"'bla': {}\n".format(baz) def barbaz() -> str: return r"{{bla}}: {}\n".format(baz) def foobarbaz() -> str: return "{{min={}, max={}}}".format(minval, maxval) """ after = r""" def foo() -> str: return f'"bla": {baz}\n' def foobar() -> str: return f"{{bla}}: {baz}" def bar() -> str: return fr"'bla': {baz}\n" def barbaz() -> str: return fr"{{bla}}: {baz}\n" def foobarbaz() -> str: return f"{{min={minval}, max={maxval}}}" """ self.assertCodemod(before, after) def test_replacement_with_expression(self) -> None: """ Should convert a format with attr/subscript expression. """ before = """ def foo() -> str: return "{baz.name}".format(baz=baz) def bar() -> str: return "{baz[0]}".format(baz=baz) def foobar() -> str: return "{0.name}".format(baz) def baz() -> str: return "{0[0]}".format(baz) """ after = """ def foo() -> str: return f"{baz.name}" def bar() -> str: return f"{baz[0]}" def foobar() -> str: return f"{baz.name}" def baz() -> str: return f"{baz[0]}" """ self.assertCodemod(before, after) def test_replacement_with_conversion(self) -> None: """ Should convert a format which uses a conversion """ before = r""" def foo() -> str: return "bla: {0!r}\n".format(baz) """ after = r""" def foo() -> str: return f"bla: {baz!r}\n" """ self.assertCodemod(before, after) def test_replacement_with_newline(self) -> None: """ Should convert a format which uses a conversion """ before = r""" def foo() -> str: return "bla: {}\n".format( baz + bar ) """ after = r""" def foo() -> str: return f"bla: {baz + bar}\n" """ self.assertCodemod(before, after) def test_replacement_with_string(self) -> None: """ Should convert a format which uses string """ before = r""" def foo() -> str: return "bla: {}".format('baz') def bar() -> str: return 'bla: {}'.format("baz") def baz() -> str: return "bla: {}".format("baz") """ after = r""" def foo() -> str: return f"bla: {'baz'}" def bar() -> str: return f'bla: {"baz"}' def baz() -> str: return f"bla: {'baz'}" """ self.assertCodemod(before, after) def test_replacement_with_dict(self) -> None: """ Should convert a format which uses dict """ before = r""" def foo() -> str: return "bla: {}".format({'foo': 'bar'}) """ after = r""" def foo() -> str: return f"bla: {({'foo': 'bar'})}" """ self.assertCodemod(before, after) LibCST-1.2.0/libcst/codemod/commands/tests/test_convert_namedtuple_to_dataclass.py000066400000000000000000000106271456464173300305020ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # from libcst.codemod import CodemodTest from libcst.codemod.commands.convert_namedtuple_to_dataclass import ( ConvertNamedTupleToDataclassCommand, ) class ConvertNamedTupleToDataclassCommandTest(CodemodTest): TRANSFORM = ConvertNamedTupleToDataclassCommand def test_no_change(self) -> None: """ Should result in no change as there are no children of NamedTuple. """ before = """ @dataclass(frozen=True) class Foo: pass """ after = """ @dataclass(frozen=True) class Foo: pass """ self.assertCodemod(before, after) def test_change(self) -> None: """ Should remove the NamedTuple import along with its use as a base class for Foo. Should import dataclasses.dataclass and annotate Foo. """ before = """ from typing import NamedTuple class Foo(NamedTuple): pass """ after = """ from dataclasses import dataclass @dataclass(frozen=True) class Foo: pass """ self.assertCodemod(before, after) def test_with_decorator_already(self) -> None: """ Should retain existing decorator. """ before = """ from typing import NamedTuple @other_decorator class Foo(NamedTuple): pass """ after = """ from dataclasses import dataclass @other_decorator @dataclass(frozen=True) class Foo: pass """ self.assertCodemod(before, after) def test_multiple_bases(self) -> None: """ Should retain all existing bases other than NamedTuple. """ before = """ from typing import NamedTuple class Foo(NamedTuple, OtherBase, YetAnotherBase): pass """ after = """ from dataclasses import dataclass @dataclass(frozen=True) class Foo(OtherBase, YetAnotherBase): pass """ self.assertCodemod(before, after) def test_nested_classes(self) -> None: """ Should perform expected changes on inner classes. """ before = """ from typing import NamedTuple class OuterClass: class InnerClass(NamedTuple): pass """ after = """ from dataclasses import dataclass class OuterClass: @dataclass(frozen=True) class InnerClass: pass """ self.assertCodemod(before, after) def test_aliased_object_import(self) -> None: """ Should detect aliased NamedTuple object import and base. """ before = """ from typing import NamedTuple as nt class Foo(nt): pass """ after = """ from dataclasses import dataclass @dataclass(frozen=True) class Foo: pass """ self.assertCodemod(before, after) def test_aliased_module_import(self) -> None: """ Should detect aliased `typing` module import and base. """ before = """ import typing as typ class Foo(typ.NamedTuple): pass """ after = """ from dataclasses import dataclass @dataclass(frozen=True) class Foo: pass """ self.assertCodemod(before, after) def test_other_unused_imports_not_removed(self) -> None: """ Should not remove any imports other than NamedTuple, even if they are also unused. """ before = """ from typing import NamedTuple import SomeUnusedImport class Foo(NamedTuple): pass """ after = """ import SomeUnusedImport from dataclasses import dataclass @dataclass(frozen=True) class Foo: pass """ self.assertCodemod(before, after) LibCST-1.2.0/libcst/codemod/commands/tests/test_convert_percent_format_to_fstring.py000066400000000000000000000041661456464173300310720ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # from libcst.codemod import CodemodTest from libcst.codemod.commands.convert_percent_format_to_fstring import ( ConvertPercentFormatStringCommand, ) class ConvertPercentFormatStringCommandTest(CodemodTest): TRANSFORM = ConvertPercentFormatStringCommand def test_simple_cases(self) -> None: self.assertCodemod('"a name: %s" % name', 'f"a name: {name}"') self.assertCodemod( '"an attribute %s ." % obj.attr', 'f"an attribute {obj.attr} ."' ) self.assertCodemod('r"raw string value=%s" % val', 'fr"raw string value={val}"') self.assertCodemod( '"The type of var: %s" % type(var)', 'f"The type of var: {type(var)}"' ) self.assertCodemod( '"type of var: %s, value of var: %s" % (type(var), var)', 'f"type of var: {type(var)}, value of var: {var}"', ) self.assertCodemod( '"var1: %s, var2: %s, var3: %s, var4: %s" % (class_object.attribute, dict_lookup["some_key"], some_module.some_function(), var4)', '''f"var1: {class_object.attribute}, var2: {dict_lookup['some_key']}, var3: {some_module.some_function()}, var4: {var4}"''', ) def test_escaping(self) -> None: self.assertCodemod('"%s" % "hi"', '''f"{'hi'}"''') # escape quote self.assertCodemod('"{%s}" % val', 'f"{{{val}}}"') # escape curly bracket self.assertCodemod('"{%s" % val', 'f"{{{val}"') # escape curly bracket self.assertCodemod( "'%s\" double quote is used' % var", "f'{var}\" double quote is used'" ) # escape quote self.assertCodemod( '"a list: %s" % " ".join(var)', '''f"a list: {' '.join(var)}"''' ) # escape quote def test_not_supported_case(self) -> None: code = '"%s" % obj.this_is_a_very_long_expression(parameter)["a_very_long_key"]' self.assertCodemod(code, code) code = 'b"a type %s" % var' self.assertCodemod(code, code) LibCST-1.2.0/libcst/codemod/commands/tests/test_convert_type_comments.py000066400000000000000000000331651456464173300265130ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import sys from typing import Any from libcst.codemod import CodemodTest from libcst.codemod.commands.convert_type_comments import ConvertTypeComments class TestConvertTypeCommentsBase(CodemodTest): maxDiff = 1500 TRANSFORM = ConvertTypeComments def assertCodemod39Plus(self, before: str, after: str, **kwargs: Any) -> None: """ Assert that the codemod works on Python 3.9+, and that we raise a NotImplementedError on other Python versions. """ if (sys.version_info.major, sys.version_info.minor) < (3, 9): with self.assertRaises(NotImplementedError): super().assertCodemod(before, after, **kwargs) else: super().assertCodemod(before, after, **kwargs) class TestConvertTypeComments_AssignForWith(TestConvertTypeCommentsBase): def test_preserves_trailing_comment(self) -> None: before = """ y = 5 # type: int # foo """ after = """ y: int = 5 # foo """ self.assertCodemod39Plus(before, after) def test_convert_assignments(self) -> None: before = """ y = 5 # type: int z = ('this', 7) # type: typing.Tuple[str, int] """ after = """ y: int = 5 z: "typing.Tuple[str, int]" = ('this', 7) """ self.assertCodemod39Plus(before, after) def test_convert_assignments_in_context(self) -> None: """ Also verify that our matching works regardless of spacing """ before = """ def foo(): z = ('this', 7) # type: typing.Tuple[str, int] class C: attr0 = 10# type: int def __init__(self): self.attr1 = True # type: bool """ after = """ def foo(): z: "typing.Tuple[str, int]" = ('this', 7) class C: attr0: int = 10 def __init__(self): self.attr1: bool = True """ self.assertCodemod39Plus(before, after) def test_multiple_elements_in_assign_lhs(self) -> None: before = """ x, y = [], [] # type: List[int], List[str] z, w = [], [] # type: (List[int], List[str]) a, b, *c = range(5) # type: float, float, List[float] d, (e1, e2) = foo() # type: float, (int, str) """ after = """ x: "List[int]" y: "List[str]" x, y = [], [] z: "List[int]" w: "List[str]" z, w = [], [] a: float b: float c: "List[float]" a, b, *c = range(5) d: float e1: int e2: str d, (e1, e2) = foo() """ self.assertCodemod39Plus(before, after) def test_multiple_assignments(self) -> None: before = """ x = y = z = 15 # type: int a, b = c, d = 'this', 'that' # type: (str, str) """ after = """ x: int y: int z: int x = y = z = 15 a: str b: str c: str d: str a, b = c, d = 'this', 'that' """ self.assertCodemod39Plus(before, after) def test_semicolons_with_assignment(self) -> None: """ When we convert an Assign to an AnnAssign, preserve semicolons. But if we have to add separate type declarations, expand them. """ before = """ foo(); x = 12 # type: int bar(); y, z = baz() # type: int, str """ after = """ foo(); x: int = 12 bar() y: int z: str y, z = baz() """ self.assertCodemod39Plus(before, after) def test_converting_for_statements(self) -> None: before = """ # simple binding for x in foo(): # type: int pass # nested binding for (a, (b, c)) in bar(): # type: int, (str, float) pass """ after = """ # simple binding x: int for x in foo(): pass # nested binding a: int b: str c: float for (a, (b, c)) in bar(): pass """ self.assertCodemod39Plus(before, after) def test_converting_with_statements(self) -> None: before = """ # simple binding with open('file') as f: # type: File pass # simple binding, with extra items with foo(), open('file') as f, bar(): # type: File pass # nested binding with bar() as (a, (b, c)): # type: int, (str, float) pass """ after = """ # simple binding f: "File" with open('file') as f: pass # simple binding, with extra items f: "File" with foo(), open('file') as f, bar(): pass # nested binding a: int b: str c: float with bar() as (a, (b, c)): pass """ self.assertCodemod39Plus(before, after) def test_no_change_when_type_comment_unused(self) -> None: before = """ # type-ignores are not type comments x = 10 # type: ignore # a commented type comment (per PEP 484) is not a type comment z = 15 # # type: int # ignore unparseable type comments var = "var" # type: this is not a python type! # a type comment in an illegal location won't be used print("hello") # type: None # These examples are not PEP 484 compliant, and result in arity errors a, b = 1, 2 # type: Tuple[int, int] w = foo() # type: float, str # Multiple assigns with mismatched LHS arities always result in arity # errors, and we only codemod if each target is error-free v = v0, v1 = (3, 5) # type: int, int # Ignore for statements with arity mismatches for x in []: # type: int, int pass # Ignore with statements with arity mismatches with open('file') as (f0, f1): # type: File pass # Ignore with statements that have multiple item bindings with open('file') as f0, open('file') as f1: # type: File pass # In cases where the entire statement cannot successfully be parsed # with `type_comments=True` because of an invalid type comment, we # skip it. Here, annotating the inner `pass` is illegal. for x in []: # type: int pass # type: None """ after = before self.assertCodemod39Plus(before, after) class TestConvertTypeComments_FunctionDef(TestConvertTypeCommentsBase): """ Some notes on our testing strategy: In order to avoid a combinatorial explosion in test cases, we leverage some knowledge about the implementation. Here are the key ideas that allow us to write fewer cases: - The logic for generating annotations is the same for all annotations, and is well-covered by TestConvertTypeComments_AssignForWith, so we can stick to just simple builtin types. - The application of types is independent of where they came from. - Type comment removal is indepenent of type application, other than in the case where we give up entirely. - The rules for which type gets used (existing annotation, inline comment, or func type comment) is independent of the location of a parameter. """ def test_simple_function_type_comments(self) -> None: before = """ def f0(x): # type: (...) -> None pass def f1(x): # type: (int) -> None pass def f2(x, /, y = 'y', *, z = 1.5): # type: (int, str, float) -> None pass def f3(x, *args, y, **kwargs): # type: (str, int, str, float) -> None pass def f4(x, *args, **kwargs): # type: (str, *int, **float) -> None pass """ after = """ def f0(x) -> None: pass def f1(x: int) -> None: pass def f2(x: int, /, y: str = 'y', *, z: float = 1.5) -> None: pass def f3(x: str, *args: int, y: str, **kwargs: float) -> None: pass def f4(x: str, *args: int, **kwargs: float) -> None: pass """ self.assertCodemod39Plus(before, after) def test_prioritization_order_for_type_application(self) -> None: before = """ def f( x: int, # type: str y, # type: str z ): # type: (float, float, float) -> None pass """ after = """ def f( x: int, y: str, z: float ) -> None: pass """ self.assertCodemod39Plus(before, after) def test_inlined_function_type_comments(self) -> None: before = """ def f( x, # not-a-type-comment # also-not-a-type-comment y = 42, # type: int *args, # type: technically-another-line-is-legal :o z, **kwargs, # type: str ): # not-a-type-comment # also-not-a-type-comment pass """ after = """ def f( x, # not-a-type-comment # also-not-a-type-comment y: int = 42, *args: "technically-another-line-is-legal :o", z, **kwargs: str, ): # not-a-type-comment # also-not-a-type-comment pass """ self.assertCodemod39Plus(before, after) def test_method_transforms(self) -> None: before = """ class A: def __init__(self, thing): # type: (str) -> None self.thing = thing @classmethod def make(cls): # type: () -> A return cls("thing") @staticmethod def f(x, y): # type: (object, object) -> None pass def method0( self, other_thing, ): # type: (str) -> bool return self.thing == other_thing def method1( self, # type: A other_thing, # type: str ): # type: (int) -> bool return self.thing == other_thing def method2( self, other_thing, ): # type: (A, str) -> bool return self.thing == other_thing """ after = """ class A: def __init__(self, thing: str) -> None: self.thing = thing @classmethod def make(cls) -> "A": return cls("thing") @staticmethod def f(x: object, y: object) -> None: pass def method0( self, other_thing: str, ) -> bool: return self.thing == other_thing def method1( self: "A", other_thing: str, ) -> bool: return self.thing == other_thing def method2( self: "A", other_thing: str, ) -> bool: return self.thing == other_thing """ self.assertCodemod39Plus(before, after) def test_no_change_if_function_type_comments_unused(self) -> None: before = """ # arity error in arguments def f(x, y): # type: (int) -> float pass # unparseable function type def f(x, y): # type: this is not a type! pass # In cases where the entire statement cannot successfully be parsed # with `type_comments=True` because of an invalid type comment, we # skip it. Here, annotating the inner `pass` is illegal. def f(x, y): # type: (int, int) -> None pass # type: None """ after = before self.assertCodemod39Plus(before, after) def test_do_not_traverse_lambda_Param(self) -> None: """ The Param node can happen not just in FunctionDef but also in Lambda. Make sure this doesn't cause problems. """ before = """ @dataclass class WrapsAFunction: func: Callable msg_gen: Callable = lambda self: f"calling {self.func.__name__}..." """ after = before self.assertCodemod39Plus(before, after) def test_no_quoting(self) -> None: before = """ def f(x): # type: (Foo) -> Foo pass w = x # type: Foo y, z = x, x # type: (Foo, Foo) return w with get_context() as context: # type: Context pass for loop_var in the_iterable: # type: LoopType pass """ after = """ def f(x: Foo) -> Foo: pass w: Foo = x y: Foo z: Foo y, z = x, x return w context: Context with get_context() as context: pass loop_var: LoopType for loop_var in the_iterable: pass """ self.assertCodemod39Plus(before, after, no_quote_annotations=True) LibCST-1.2.0/libcst/codemod/commands/tests/test_ensure_import_present.py000066400000000000000000000024311456464173300265100ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # from libcst.codemod import CodemodTest from libcst.codemod.commands.ensure_import_present import EnsureImportPresentCommand class EnsureImportPresentCommandTest(CodemodTest): TRANSFORM = EnsureImportPresentCommand def test_import_module(self) -> None: before = "" after = "import a" self.assertCodemod(before, after, module="a", entity=None, alias=None) def test_import_entity(self) -> None: before = "" after = "from a import b" self.assertCodemod(before, after, module="a", entity="b", alias=None) def test_import_wildcard(self) -> None: before = "from a import *" after = "from a import *" self.assertCodemod(before, after, module="a", entity="b", alias=None) def test_import_module_aliased(self) -> None: before = "" after = "import a as c" self.assertCodemod(before, after, module="a", entity=None, alias="c") def test_import_entity_aliased(self) -> None: before = "" after = "from a import b as c" self.assertCodemod(before, after, module="a", entity="b", alias="c") LibCST-1.2.0/libcst/codemod/commands/tests/test_fix_pyre_directives.py000066400000000000000000000123161456464173300261260ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # from libcst.codemod import CodemodTest from libcst.codemod.commands.fix_pyre_directives import FixPyreDirectivesCommand class TestFixPyreDirectivesCommand(CodemodTest): TRANSFORM = FixPyreDirectivesCommand def test_no_need_to_fix_simple(self) -> None: """ Tests that a pyre-strict inside the module header doesn't get touched. """ after = ( before ) = """ # pyre-strict from typing import List def baz() -> List[Foo]: pass """ self.assertCodemod(before, after) def test_no_need_to_fix_complex_bottom(self) -> None: """ Tests that a pyre-strict inside the module header doesn't get touched. """ after = ( before ) = """ # This is some header comment. # # pyre-strict from typing import List def baz() -> List[Foo]: pass """ self.assertCodemod(before, after) def test_no_need_to_fix_complex_top(self) -> None: """ Tests that a pyre-strict inside the module header doesn't get touched. """ after = ( before ) = """ # pyre-strict # # This is some header comment. from typing import List def baz() -> List[Foo]: pass """ self.assertCodemod(before, after) def test_fix_misspelled_header(self) -> None: """ Tests that we correctly address poor spelling of a comment. """ before = """ # pyre strict from typing import List def baz() -> List[Foo]: pass """ after = """ # pyre-strict from typing import List def baz() -> List[Foo]: pass """ self.assertCodemod(before, after) def test_fix_misspelled_body(self) -> None: """ Tests that we correctly address poor spelling of a comment. """ before = """ from typing import List # pyre strict def baz() -> List[Foo]: pass """ after = """ # pyre-strict from typing import List def baz() -> List[Foo]: pass """ self.assertCodemod(before, after) def test_fix_header_duplicate(self) -> None: """ Tests that we correctly remove a duplicate, even with a mistake. """ before = """ # pyre-strict # pyre-strict from typing import List def baz() -> List[Foo]: pass """ after = """ # pyre-strict from typing import List def baz() -> List[Foo]: pass """ self.assertCodemod(before, after) def test_fix_body_duplicate(self) -> None: """ Tests that we correctly remove a duplicate, even with a mistake. """ before = """ # This is a comment. # # pyre-strict from typing import List # pyre-strict def baz() -> List[Foo]: pass """ after = """ # This is a comment. # # pyre-strict from typing import List def baz() -> List[Foo]: pass """ self.assertCodemod(before, after) def test_fix_misspelled_header_duplicate(self) -> None: """ Tests that we correctly remove a duplicate, even with a mistake. """ before = """ # pyre-strict # pyre strict from typing import List def baz() -> List[Foo]: pass """ after = """ # pyre-strict from typing import List def baz() -> List[Foo]: pass """ self.assertCodemod(before, after) def test_fix_misspelled_header_duplicate_body(self) -> None: """ Tests that we correctly remove a duplicate, even with a mistake. """ before = """ # pyre-strict from typing import List # pyre strict def baz() -> List[Foo]: pass """ after = """ # pyre-strict from typing import List def baz() -> List[Foo]: pass """ self.assertCodemod(before, after) def test_fix_wrong_location(self) -> None: """ Tests that we correctly move a badly-located pyre-strict. """ before = """ from typing import List # pyre-strict def baz() -> List[Foo]: pass """ after = """ # pyre-strict from typing import List def baz() -> List[Foo]: pass """ self.assertCodemod(before, after) LibCST-1.2.0/libcst/codemod/commands/tests/test_noop.py000066400000000000000000000020551456464173300230320ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # from libcst.codemod import CodemodTest from libcst.codemod.commands.noop import NOOPCommand class TestNOOPCodemod(CodemodTest): TRANSFORM = NOOPCommand def test_noop(self) -> None: before = """ foo: str = "" class Class: pass def foo(a: Class, **kwargs: str) -> Class: t: Class = Class() # This is a comment bar = "" return t bar = Class() foo(bar, baz="bla") """ after = """ foo: str = "" class Class: pass def foo(a: Class, **kwargs: str) -> Class: t: Class = Class() # This is a comment bar = "" return t bar = Class() foo(bar, baz="bla") """ self.assertCodemod(before, after) LibCST-1.2.0/libcst/codemod/commands/tests/test_remove_pyre_directive.py000066400000000000000000000110501456464173300264440ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # from libcst.codemod import CodemodTest from libcst.codemod.commands.remove_pyre_directive import ( RemovePyreStrictCommand, RemovePyreUnsafeCommand, ) class TestRemovePyreStrictCommand(CodemodTest): TRANSFORM = RemovePyreStrictCommand def test_remove_from_file(self) -> None: before = """ # pyre-strict def baz() -> List[Foo]: pass """ after = """ def baz() -> List[Foo]: pass """ self.assertCodemod(before, after) def test_remove_from_file_without_pyre_strict(self) -> None: """ We shouldn't be removing pyre-strict to a file that doesn't have it. """ before = """ def baz() -> List[Foo]: pass """ after = """ def baz() -> List[Foo]: pass """ self.assertCodemod(before, after) def test_remove_from_file_with_pyre_strict_after(self) -> None: """ Test removal if pyre-strict is after comments. """ before = """ # THIS IS A COMMENT! # pyre-strict def baz() -> List[Foo]: pass """ after = """ # THIS IS A COMMENT! def baz() -> List[Foo]: pass """ self.assertCodemod(before, after) def test_remove_from_file_with_pyre_strict_before(self) -> None: """ Test removal if pyre-strict is before comments. """ before = """ # pyre-strict # THIS IS A COMMENT! def baz() -> List[Foo]: pass """ after = """ # THIS IS A COMMENT! def baz() -> List[Foo]: pass """ self.assertCodemod(before, after) def test_remove_from_file_with_comment(self) -> None: """ We should preserve comments and spacing when removing. """ before = """ # YO I'M A COMMENT # pyre-strict def baz() -> List[Foo]: pass """ after = """ # YO I'M A COMMENT def baz() -> List[Foo]: pass """ self.assertCodemod(before, after) class TestRemovePyreUnsafeCommand(CodemodTest): TRANSFORM = RemovePyreUnsafeCommand def test_remove_from_file(self) -> None: before = """ # pyre-unsafe def baz() -> List[Foo]: pass """ after = """ def baz() -> List[Foo]: pass """ self.assertCodemod(before, after) def test_remove_from_file_without_pyre_unsafe(self) -> None: """ We shouldn't be removing pyre-unsafe to a file that doesn't have it. """ before = """ def baz() -> List[Foo]: pass """ after = """ def baz() -> List[Foo]: pass """ self.assertCodemod(before, after) def test_remove_from_file_with_pyre_unsafe_after(self) -> None: """ Test removal if pyre-unsafe is after comments. """ before = """ # THIS IS A COMMENT! # pyre-unsafe def baz() -> List[Foo]: pass """ after = """ # THIS IS A COMMENT! def baz() -> List[Foo]: pass """ self.assertCodemod(before, after) def test_remove_from_file_with_pyre_unsafe_before(self) -> None: """ Test removal if pyre-unsafe is before comments. """ before = """ # pyre-unsafe # THIS IS A COMMENT! def baz() -> List[Foo]: pass """ after = """ # THIS IS A COMMENT! def baz() -> List[Foo]: pass """ self.assertCodemod(before, after) def test_remove_from_file_with_comment(self) -> None: """ We should preserve comments and spacing when removing. """ before = """ # YO I'M A COMMENT # pyre-unsafe def baz() -> List[Foo]: pass """ after = """ # YO I'M A COMMENT def baz() -> List[Foo]: pass """ self.assertCodemod(before, after) LibCST-1.2.0/libcst/codemod/commands/tests/test_remove_unused_imports.py000066400000000000000000000063071456464173300265200ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # from libcst.codemod import CodemodTest from libcst.codemod.commands.remove_unused_imports import RemoveUnusedImportsCommand class RemoveUnusedImportsCommandTest(CodemodTest): TRANSFORM = RemoveUnusedImportsCommand def test_simple_case(self) -> None: before = "import a, b\na()" after = "import a\na()" self.assertCodemod(before, after) def test_double_import(self) -> None: before = "import a\nimport a\na()" self.assertCodemod(before, before) def test_conditional_import(self) -> None: before = """ if True: import a else: import b as a a() """ self.assertCodemod(before, before) def test_unused_in_conditional(self) -> None: before = """ if False: import a """ after = """ if False: pass """ self.assertCodemod(before, after) def test_type_annotations(self) -> None: before = """ import a x: a = 1 """ self.assertCodemod(before, before) def test_dotted_imports(self) -> None: before = """ import a.b, a.b.c import e.f import g.h import x.y, x.y.z def foo() -> None: a.b e.g g.h.i x.y.z """ after = """ import a.b, a.b.c import e.f import g.h import x.y.z def foo() -> None: a.b e.g g.h.i x.y.z """ self.assertCodemod(before, after) def test_enclosed_attributes(self) -> None: before = """ from a.b import c import x def foo() -> None: x.y(c.d()).z() """ self.assertCodemod(before, before) def test_access_in_assignment(self) -> None: before = """ from a import b b(0)[x] = False """ self.assertCodemod(before, before) def test_no_formatting_if_no_unused_imports(self) -> None: before = """ from m import (a, b,) a(b, 'look at these ugly quotes') """ self.assertCodemod(before, before) def test_suppression_on_first_line_of_multiline_import_refers_to_whole_block( self, ) -> None: before = """ from a import ( # lint-ignore: unused-import b, c, ) """ self.assertCodemod(before, before) def test_suppression(self) -> None: before = """ # noqa import a, b import c from x import ( y, z, # noqa ) """ after = """ # noqa import a, b from x import ( z, # noqa ) """ self.assertCodemod(before, after) LibCST-1.2.0/libcst/codemod/commands/tests/test_rename.py000066400000000000000000000370741456464173300233370ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # # pyre-strict from libcst.codemod import CodemodTest from libcst.codemod.commands.rename import RenameCommand class TestRenameCommand(CodemodTest): TRANSFORM = RenameCommand def test_rename_name(self) -> None: before = """ from foo import bar def test() -> None: bar(5) """ after = """ from baz import qux def test() -> None: qux(5) """ self.assertCodemod(before, after, old_name="foo.bar", new_name="baz.qux") def test_rename_name_asname(self) -> None: before = """ from foo import bar as bla def test() -> None: bla(5) """ after = """ from baz import qux def test() -> None: qux(5) """ self.assertCodemod( before, after, old_name="foo.bar", new_name="baz.qux", ) def test_rename_repeated_name_with_asname(self) -> None: before = """ from foo import foo as bla def test() -> None: bla.bla(5) """ after = """ from baz import qux def test() -> None: qux.bla(5) """ self.assertCodemod( before, after, old_name="foo.foo", new_name="baz.qux", ) def test_rename_attr(self) -> None: before = """ import a.b def test() -> None: a.b.c(5) """ after = """ import d.e def test() -> None: d.e.f(5) """ self.assertCodemod( before, after, old_name="a.b.c", new_name="d.e.f", ) def test_rename_attr_asname(self) -> None: before = """ import foo as bar def test() -> None: bar.qux(5) """ after = """ import baz def test() -> None: baz.quux(5) """ self.assertCodemod( before, after, old_name="foo.qux", new_name="baz.quux", ) def test_rename_module_import(self) -> None: before = """ import a.b class Foo(a.b.C): pass """ after = """ import c.b class Foo(c.b.C): pass """ self.assertCodemod( before, after, old_name="a.b", new_name="c.b", ) def test_rename_module_import_2(self) -> None: before = """ import a.b class Foo(a.b.C): pass """ after = """ import c.b class Foo(c.b.C): pass """ self.assertCodemod( before, after, old_name="a", new_name="c", ) def test_rename_module_import_no_change(self) -> None: # Full qualified names don't match, so don't codemod before = """ import a.b class Foo(a.b.C): pass """ self.assertCodemod( before, before, old_name="b", new_name="c.b", ) def test_rename_module_import_from(self) -> None: before = """ from a import b class Foo(b.C): pass """ after = """ from c import b class Foo(b.C): pass """ self.assertCodemod( before, after, old_name="a.b", new_name="c.b", ) def test_rename_module_import_from_2(self) -> None: before = """ from a import b class Foo(b.C): pass """ after = """ from c import b class Foo(b.C): pass """ self.assertCodemod( before, after, old_name="a", new_name="c", ) def test_rename_class(self) -> None: before = """ from a.b import some_class class Foo(some_class): pass """ after = """ from c.b import some_class class Foo(some_class): pass """ self.assertCodemod( before, after, old_name="a.b.some_class", new_name="c.b.some_class", ) def test_rename_importfrom_same_module(self) -> None: before = """ from a.b import Class_1, Class_2 class Foo(Class_1): pass """ after = """ from a.b import Class_3, Class_2 class Foo(Class_3): pass """ self.assertCodemod( before, after, old_name="a.b.Class_1", new_name="a.b.Class_3", ) def test_rename_importfrom_same_module_2(self) -> None: before = """ from a.b import module_1, module_2 class Foo(module_1.Class_1): pass class Fooo(module_2.Class_2): pass """ after = """ from a.b import module_2 from a.b.module_3 import Class_3 class Foo(Class_3): pass class Fooo(module_2.Class_2): pass """ self.assertCodemod( before, after, old_name="a.b.module_1.Class_1", new_name="a.b.module_3.Class_3", ) def test_import_same_module(self) -> None: before = """ import logging logging.warn(1) """ after = """ import logging logging.warning(1) """ self.assertCodemod( before, after, old_name="logging.warn", new_name="logging.warning", ) def test_import_same_dotted_module(self) -> None: before = """ import a.b a.b.warn(1) """ after = """ import a.b a.b.warning(1) """ self.assertCodemod( before, after, old_name="a.b.warn", new_name="a.b.warning", ) def test_rename_local_variable(self) -> None: before = """ x = 5 y = 5 + x """ after = """ z = 5 y = 5 + z """ self.assertCodemod( before, after, old_name="x", new_name="z", ) def test_module_does_not_change(self) -> None: before = """ from a import b class Foo(b): pass """ after = """ from a import c class Foo(c): pass """ self.assertCodemod(before, after, old_name="a.b", new_name="a.c") def test_other_imports_untouched(self) -> None: before = """ import a, b, c class Foo(a.z): bar: b.bar baz: c.baz """ after = """ import d, b, c class Foo(d.z): bar: b.bar baz: c.baz """ self.assertCodemod( before, after, old_name="a.z", new_name="d.z", ) def test_other_import_froms_untouched(self) -> None: before = """ from a import b, c, d class Foo(b): bar: c.bar baz: d.baz """ after = """ from a import c, d from f import b class Foo(b): bar: c.bar baz: d.baz """ self.assertCodemod( before, after, old_name="a.b", new_name="f.b", ) def test_no_removal_of_import_in_use(self) -> None: before = """ import a class Foo(a.b): pass class Foo2(a.c): pass """ after = """ import a, z class Foo(z.b): pass class Foo2(a.c): pass """ self.assertCodemod( before, after, old_name="a.b", new_name="z.b", ) def test_no_removal_of_dotted_import_in_use(self) -> None: before = """ import a.b class Foo(a.b.c): pass class Foo2(a.b.d): pass """ after = """ import a.b, z.b class Foo(z.b.c): pass class Foo2(a.b.d): pass """ self.assertCodemod( before, after, old_name="a.b.c", new_name="z.b.c", ) def test_no_removal_of_import_from_in_use(self) -> None: before = """ from a import b class Foo(b.some_class): bar: b.some_other_class """ after = """ from a import b from blah import some_class class Foo(some_class): bar: b.some_other_class """ self.assertCodemod( before, after, old_name="a.b.some_class", new_name="blah.some_class", ) def test_other_unused_imports_untouched(self) -> None: before = """ import a import b class Foo(a.obj): pass """ after = """ import c import b class Foo(c.obj): pass """ self.assertCodemod( before, after, old_name="a.obj", new_name="c.obj", ) def test_complex_module_rename(self) -> None: before = """ from a.b.c import d class Foo(d.e.f): pass """ after = """ from g.h.i import j class Foo(j): pass """ self.assertCodemod(before, after, old_name="a.b.c.d.e.f", new_name="g.h.i.j") def test_complex_module_rename_with_asname(self) -> None: before = """ from a.b.c import d as ddd class Foo(ddd.e.f): pass """ after = """ from g.h.i import j class Foo(j): pass """ self.assertCodemod(before, after, old_name="a.b.c.d.e.f", new_name="g.h.i.j") def test_names_with_repeated_substrings(self) -> None: before = """ from aa import aaaa class Foo(aaaa.Bar): pass """ after = """ from b import c class Foo(c.Bar): pass """ self.assertCodemod( before, after, old_name="aa.aaaa", new_name="b.c", ) def test_repeated_name(self) -> None: before = """ from foo import foo def bar(): foo(5) """ after = """ from qux import qux def bar(): qux(5) """ self.assertCodemod( before, after, old_name="foo.foo", new_name="qux.qux", ) def test_no_codemod(self) -> None: before = """ from foo import bar def baz(): bar(5) """ self.assertCodemod( before, before, old_name="bar", new_name="qux", ) def test_rename_import_prefix(self) -> None: before = """ import a.b.c.d """ after = """ import x.y.c.d """ self.assertCodemod( before, after, old_name="a.b", new_name="x.y", ) def test_rename_import_from_prefix(self) -> None: before = """ from a.b.c.d import foo """ after = """ from x.y.c.d import foo """ self.assertCodemod( before, after, old_name="a.b", new_name="x.y", ) def test_rename_multiple_occurrences(self) -> None: before = """ from a import b class Foo(b.some_class): pass class Foobar(b.some_class): pass """ after = """ from c.d import some_class class Foo(some_class): pass class Foobar(some_class): pass """ self.assertCodemod( before, after, old_name="a.b.some_class", new_name="c.d.some_class" ) def test_rename_multiple_imports(self) -> None: before = """ import a from a import b from a.c import d class Foo(d): pass class Fooo(b.some_class): pass class Foooo(a.some_class): pass """ after = """ import z from z import b from z.c import d class Foo(d): pass class Fooo(b.some_class): pass class Foooo(z.some_class): pass """ self.assertCodemod(before, after, old_name="a", new_name="z") def test_input_with_colon_sep(self) -> None: before = """ from a.b.c import d class Foo(d.e.f): pass """ after = """ from g.h import i class Foo(i.j): pass """ self.assertCodemod(before, after, old_name="a.b.c.d.e.f", new_name="g.h:i.j") def test_input_with_colon_sep_at_the_end(self) -> None: before = """ from a.b.c import d class Foo(d.e): pass """ after = """ import g.h.i.j class Foo(g.h.i.j.e): pass """ self.assertCodemod(before, after, old_name="a.b.c.d", new_name="g.h.i.j:") def test_input_with_colon_sep_at_the_front(self) -> None: # This case should treat it as if no colon separator. before = """ from a.b.c import d class Foo(d.e): pass """ after = """ from g.h.i import j class Foo(j.e): pass """ self.assertCodemod(before, after, old_name="a.b.c.d", new_name=":g.h.i.j") def test_no_change_because_no_match_was_found(self) -> None: before = """ from foo import bar bar(42) """ self.assertCodemod(before, before, old_name="baz.bar", new_name="qux.bar") def test_rename_single_with_colon(self) -> None: before = """ from a.b import qux print(qux) """ after = """ from a import b print(b.qux) """ self.assertCodemod( before, after, old_name="a.b.qux", new_name="a:b.qux", ) LibCST-1.2.0/libcst/codemod/commands/tests/test_strip_strings_from_types.py000066400000000000000000000132641456464173300272440ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # from libcst.codemod import CodemodTest from libcst.codemod.commands.strip_strings_from_types import StripStringsCommand class TestStripStringsCodemod(CodemodTest): TRANSFORM = StripStringsCommand def test_noop(self) -> None: before = """ foo: str = "" class Class: pass def foo(a: Class, **kwargs: str) -> Class: t: Class = Class() # This is a comment bar = "" return t """ after = """ foo: str = "" class Class: pass def foo(a: Class, **kwargs: str) -> Class: t: Class = Class() # This is a comment bar = "" return t """ self.assertCodemod(before, after) def test_non_async(self) -> None: before = """ class Class: pass def foo(a: "Class", **kwargs: "str") -> "Class": t: "Class" = Class() # This is a comment return t """ after = """ from __future__ import annotations class Class: pass def foo(a: Class, **kwargs: str) -> Class: t: Class = Class() # This is a comment return t """ self.assertCodemod(before, after) def test_async(self) -> None: before = """ class Class: pass async def foo(a: "Class", **kwargs: "str") -> "Class": t: "Class" = Class() # This is a comment return t """ after = """ from __future__ import annotations class Class: pass async def foo(a: Class, **kwargs: str) -> Class: t: Class = Class() # This is a comment return t """ self.assertCodemod(before, after) def test_recursive(self) -> None: before = """ class Class: pass def foo(a: List["Class"]): pass def bar(a: List[Optional["Class"]]): pass def baz(a: "List[Class]"): pass """ after = """ from __future__ import annotations class Class: pass def foo(a: List[Class]): pass def bar(a: List[Optional[Class]]): pass def baz(a: List[Class]): pass """ self.assertCodemod(before, after) def test_literal(self) -> None: before = """ from typing_extensions import Literal class Class: pass def foo(a: Literal["one", "two", "three"]): pass def bar(a: Union["Class", Literal["one", "two", "three"]]): pass """ after = """ from __future__ import annotations from typing_extensions import Literal class Class: pass def foo(a: Literal["one", "two", "three"]): pass def bar(a: Union[Class, Literal["one", "two", "three"]]): pass """ self.assertCodemod(before, after) def test_literal_alias(self) -> None: before = """ from typing_extensions import Literal as Lit class Class: pass def foo(a: Lit["one", "two", "three"]): pass def bar(a: Union["Class", Lit["one", "two", "three"]]): pass """ after = """ from __future__ import annotations from typing_extensions import Literal as Lit class Class: pass def foo(a: Lit["one", "two", "three"]): pass def bar(a: Union[Class, Lit["one", "two", "three"]]): pass """ self.assertCodemod(before, after) def test_literal_object(self) -> None: before = """ import typing_extensions class Class: pass def foo(a: typing_extensions.Literal["one", "two", "three"]): pass def bar(a: Union["Class", typing_extensions.Literal["one", "two", "three"]]): pass """ after = """ from __future__ import annotations import typing_extensions class Class: pass def foo(a: typing_extensions.Literal["one", "two", "three"]): pass def bar(a: Union[Class, typing_extensions.Literal["one", "two", "three"]]): pass """ self.assertCodemod(before, after) def test_literal_object_alias(self) -> None: before = """ import typing_extensions as typext class Class: pass def foo(a: typext.Literal["one", "two", "three"]): pass def bar(a: Union["Class", typext.Literal["one", "two", "three"]]): pass """ after = """ from __future__ import annotations import typing_extensions as typext class Class: pass def foo(a: typext.Literal["one", "two", "three"]): pass def bar(a: Union[Class, typext.Literal["one", "two", "three"]]): pass """ self.assertCodemod(before, after) LibCST-1.2.0/libcst/codemod/commands/tests/test_unnecessary_format_string.py000066400000000000000000000026521456464173300273570ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # from typing import Type from libcst.codemod import Codemod, CodemodTest from libcst.codemod.commands.unnecessary_format_string import UnnecessaryFormatString class TestUnnecessaryFormatString(CodemodTest): TRANSFORM: Type[Codemod] = UnnecessaryFormatString def test_replace(self) -> None: before = r""" good: str = "good" good: str = f"with_arg{arg}" good = "good{arg1}".format(1234) good = "good".format() good = "good" % {} good = "good" % () good = rf"good\d+{bar}" good = f"wow i don't have args but don't mess my braces {{ up }}" bad: str = f"bad" + "bad" bad: str = f'bad' bad: str = rf'bad\d+' """ after = r""" good: str = "good" good: str = f"with_arg{arg}" good = "good{arg1}".format(1234) good = "good".format() good = "good" % {} good = "good" % () good = rf"good\d+{bar}" good = f"wow i don't have args but don't mess my braces {{ up }}" bad: str = "bad" + "bad" bad: str = 'bad' bad: str = r'bad\d+' """ self.assertCodemod(before, after) LibCST-1.2.0/libcst/codemod/commands/unnecessary_format_string.py000066400000000000000000000034211456464173300251510ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # import libcst import libcst.matchers as m from libcst.codemod import VisitorBasedCodemodCommand class UnnecessaryFormatString(VisitorBasedCodemodCommand): DESCRIPTION: str = ( "Converts f-strings which perform no formatting to regular strings." ) @m.leave(m.FormattedString(parts=(m.FormattedStringText(),))) def _check_formatted_string( self, _original_node: libcst.FormattedString, updated_node: libcst.FormattedString, ) -> libcst.BaseExpression: old_string_inner = libcst.ensure_type( updated_node.parts[0], libcst.FormattedStringText ).value if "{{" in old_string_inner or "}}" in old_string_inner: # there are only two characters we need to worry about escaping. return updated_node old_string_literal = updated_node.start + old_string_inner + updated_node.end new_string_literal = ( updated_node.start.replace("f", "").replace("F", "") + old_string_inner + updated_node.end ) old_string_evaled = eval(old_string_literal) # noqa new_string_evaled = eval(new_string_literal) # noqa if old_string_evaled != new_string_evaled: warn_message = ( f"Attempted to codemod |{old_string_literal}| to " + f"|{new_string_literal}| but don't eval to the same! First is |{old_string_evaled}| and " + f"second is |{new_string_evaled}|" ) self.warn(warn_message) return updated_node return libcst.SimpleString(new_string_literal) LibCST-1.2.0/libcst/codemod/tests/000077500000000000000000000000001456464173300166435ustar00rootroot00000000000000LibCST-1.2.0/libcst/codemod/tests/__init__.py000066400000000000000000000002651456464173300207570ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # LibCST-1.2.0/libcst/codemod/tests/codemod_formatter_error_input.py.txt000066400000000000000000000006111456464173300261560ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # # pyre-strict import subprocess from contextlib import AsyncExitStack def fun() -> None: # this is an explicit syntax error to cause formatter error async with AsyncExitStack() as stack: stack LibCST-1.2.0/libcst/codemod/tests/test_codemod.py000066400000000000000000000047671456464173300217040ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # from unittest import expectedFailure import libcst as cst import libcst.matchers as m from libcst.codemod import Codemod, CodemodContext, CodemodTest, SkipFile class SimpleCodemod(Codemod): def __init__(self, context: CodemodContext, *, skip: bool) -> None: super().__init__(context) self.skip = skip def transform_module_impl(self, tree: cst.Module) -> cst.Module: if self.skip: raise SkipFile() else: return tree class TestSkipDetection(CodemodTest): TRANSFORM = SimpleCodemod def test_detect_skip(self) -> None: code = """ def foo() -> None: pass def bar() -> int: return 5 """ self.assertCodemod(code, code, skip=False, expected_skip=False) self.assertCodemod(code, code, skip=True, expected_skip=True) @expectedFailure def test_did_not_skip_but_should(self) -> None: code = """ def foo() -> None: pass def bar() -> int: return 5 """ self.assertCodemod(code, code, skip=False, expected_skip=True) @expectedFailure def test_skipped_but_should_not(self) -> None: code = """ def foo() -> None: pass def bar() -> int: return 5 """ self.assertCodemod(code, code, skip=True, expected_skip=False) class IncrementCodemod(Codemod): def __init__(self, context: CodemodContext, *, iterations: int) -> None: super().__init__(context) self.iterations = iterations def should_allow_multiple_passes(self) -> bool: return True def transform_module_impl(self, tree: cst.Module) -> cst.Module: if self.iterations == 0: return tree self.iterations -= 1 return cst.ensure_type( m.replace( tree, m.Integer(), lambda node, _: node.with_changes(value=str(int(node.value) + 1)), ), cst.Module, ) class TestMultipass(CodemodTest): TRANSFORM = IncrementCodemod def test_multi_iterations(self) -> None: before = """ x = 5 """ after = """ x = 10 """ self.assertCodemod(before, after, iterations=5) LibCST-1.2.0/libcst/codemod/tests/test_codemod_cli.py000066400000000000000000000042271456464173300225220ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # import platform import subprocess import sys from pathlib import Path from unittest import skipIf from libcst._parser.entrypoints import is_native from libcst.testing.utils import UnitTest class TestCodemodCLI(UnitTest): # pyre-ignore - no idea why pyre is complaining about this @skipIf(platform.system() == "Windows", "Windows") def test_codemod_formatter_error_input(self) -> None: rlt = subprocess.run( [ sys.executable, "-m", "libcst.tool", "codemod", "remove_unused_imports.RemoveUnusedImportsCommand", # `ArgumentParser.parse_known_args()`'s behavior dictates that options # need to go after instead of before the codemod command identifier. "--python-version", "3.6", str(Path(__file__).parent / "codemod_formatter_error_input.py.txt"), ], stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) if not is_native(): self.assertIn( "ParserSyntaxError: Syntax Error @ 14:11.", rlt.stderr.decode("utf-8"), ) else: self.assertIn( "error: cannot format -: Cannot parse: 13:10: async with AsyncExitStack() as stack:", rlt.stderr.decode("utf-8"), ) def test_codemod_external(self) -> None: # Test running the NOOP command as an "external command" # against this very file. output = subprocess.check_output( [ sys.executable, "-m", "libcst.tool", "codemod", "-x", # external module "libcst.codemod.commands.noop.NOOPCommand", str(Path(__file__)), ], encoding="utf-8", stderr=subprocess.STDOUT, ) assert "Finished codemodding 1 files!" in output LibCST-1.2.0/libcst/codemod/tests/test_metadata.py000066400000000000000000000031331456464173300220340ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # from textwrap import dedent import libcst as cst from libcst import parse_module from libcst.codemod import CodemodContext, ContextAwareTransformer, ContextAwareVisitor from libcst.metadata import PositionProvider from libcst.testing.utils import UnitTest class TestingCollector(ContextAwareVisitor): METADATA_DEPENDENCIES = (PositionProvider,) def visit_Pass(self, node: cst.Pass) -> None: position = self.get_metadata(PositionProvider, node) self.context.scratch["pass"] = (position.start.line, position.start.column) class TestingTransform(ContextAwareTransformer): METADATA_DEPENDENCIES = (PositionProvider,) def visit_FunctionDef(self, node: cst.FunctionDef) -> None: position = self.get_metadata(PositionProvider, node) self.context.scratch[node.name.value] = ( position.start.line, position.start.column, ) node.visit(TestingCollector(self.context)) class TestMetadata(UnitTest): def test_metadata_works(self) -> None: code = """ def foo() -> None: pass def bar() -> int: return 5 """ module = parse_module(dedent(code)) context = CodemodContext() transform = TestingTransform(context) transform.transform_module(module) self.assertEqual( context.scratch, {"foo": (2, 0), "pass": (3, 4), "bar": (5, 0)} ) LibCST-1.2.0/libcst/codemod/tests/test_runner.py000066400000000000000000000064771456464173300216030ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # from textwrap import dedent from typing import Dict import libcst as cst from libcst.codemod import ( Codemod, CodemodContext, CodemodTest, SkipFile, transform_module, TransformExit, TransformFailure, TransformSkip, TransformSuccess, ) class TestRunner(CodemodTest): def test_runner_default(self) -> None: before = """ def foo() -> None: pass def bar() -> int: return 5 """ after = """ # A comment def foo() -> None: pass def bar() -> int: return 5 """ class SimpleCodemod(Codemod): def transform_module_impl(self, tree: cst.Module) -> cst.Module: self.warn("Testing") return tree.with_changes( header=[cst.EmptyLine(comment=cst.Comment("# A comment"))] ) transform = SimpleCodemod(CodemodContext()) response = transform_module(transform, dedent(before)) self.assertIsInstance(response, TransformSuccess) assert isinstance(response, TransformSuccess) self.assertCodeEqual(response.code, after) self.assertEqual(response.warning_messages, ["Testing"]) def test_runner_interrupted(self) -> None: code = """ def foo() -> None: pass def bar() -> int: return 5 """ class SimpleCodemod(Codemod): def transform_module_impl(self, tree: cst.Module) -> cst.Module: raise KeyboardInterrupt("Testing") transform = SimpleCodemod(CodemodContext()) response = transform_module(transform, dedent(code)) self.assertIsInstance(response, TransformExit) def test_runner_skip(self) -> None: code = """ def foo() -> None: pass def bar() -> int: return 5 """ class SimpleCodemod(Codemod): def transform_module_impl(self, tree: cst.Module) -> cst.Module: self.warn("Testing") raise SkipFile() transform = SimpleCodemod(CodemodContext()) response = transform_module(transform, dedent(code)) self.assertIsInstance(response, TransformSkip) self.assertEqual(response.warning_messages, ["Testing"]) def test_runner_failure(self) -> None: code = """ def foo() -> None: pass def bar() -> int: return 5 """ class SimpleCodemod(Codemod): def transform_module_impl(self, tree: cst.Module) -> cst.Module: self.warn("Testing") somedict: Dict[str, str] = {} somedict["invalid_key"] return tree transform = SimpleCodemod(CodemodContext()) response = transform_module(transform, dedent(code)) self.assertIsInstance(response, TransformFailure) assert isinstance(response, TransformFailure) self.assertEqual(response.warning_messages, ["Testing"]) self.assertIsInstance(response.error, KeyError) LibCST-1.2.0/libcst/codemod/visitors/000077500000000000000000000000001456464173300173635ustar00rootroot00000000000000LibCST-1.2.0/libcst/codemod/visitors/__init__.py000066400000000000000000000024121456464173300214730ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # from libcst.codemod.visitors._add_imports import AddImportsVisitor from libcst.codemod.visitors._apply_type_annotations import ApplyTypeAnnotationsVisitor from libcst.codemod.visitors._gather_comments import GatherCommentsVisitor from libcst.codemod.visitors._gather_exports import GatherExportsVisitor from libcst.codemod.visitors._gather_global_names import GatherGlobalNamesVisitor from libcst.codemod.visitors._gather_imports import GatherImportsVisitor from libcst.codemod.visitors._gather_string_annotation_names import ( GatherNamesFromStringAnnotationsVisitor, ) from libcst.codemod.visitors._gather_unused_imports import GatherUnusedImportsVisitor from libcst.codemod.visitors._imports import ImportItem from libcst.codemod.visitors._remove_imports import RemoveImportsVisitor __all__ = [ "AddImportsVisitor", "ApplyTypeAnnotationsVisitor", "GatherCommentsVisitor", "GatherExportsVisitor", "GatherGlobalNamesVisitor", "GatherImportsVisitor", "GatherNamesFromStringAnnotationsVisitor", "GatherUnusedImportsVisitor", "ImportItem", "RemoveImportsVisitor", ] LibCST-1.2.0/libcst/codemod/visitors/_add_imports.py000066400000000000000000000460461456464173300224130ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from collections import defaultdict from typing import Dict, List, Optional, Sequence, Set, Tuple, Union import libcst from libcst import matchers as m, parse_statement from libcst._nodes.statement import Import, ImportFrom, SimpleStatementLine from libcst.codemod._context import CodemodContext from libcst.codemod._visitor import ContextAwareTransformer from libcst.codemod.visitors._gather_imports import _GatherImportsMixin from libcst.codemod.visitors._imports import ImportItem from libcst.helpers import get_absolute_module_from_package_for_import from libcst.helpers.common import ensure_type class _GatherTopImportsBeforeStatements(_GatherImportsMixin): """ Works similarly to GatherImportsVisitor, but only considers imports declared before any other statements of the module with the exception of docstrings and __strict__ flag. """ def __init__(self, context: CodemodContext) -> None: super().__init__(context) # Track all of the imports found in this transform self.all_imports: List[Union[libcst.Import, libcst.ImportFrom]] = [] def leave_Module(self, original_node: libcst.Module) -> None: start = 1 if _skip_first(original_node) else 0 for stmt in original_node.body[start:]: if m.matches( stmt, m.SimpleStatementLine(body=[m.ImportFrom() | m.Import()]), ): stmt = ensure_type(stmt, SimpleStatementLine) # Workaround for python 3.8 and 3.9, won't accept Union for isinstance if m.matches(stmt.body[0], m.ImportFrom()): imp = ensure_type(stmt.body[0], ImportFrom) self.all_imports.append(imp) if m.matches(stmt.body[0], m.Import()): imp = ensure_type(stmt.body[0], Import) self.all_imports.append(imp) else: break for imp in self.all_imports: if m.matches(imp, m.Import()): imp = ensure_type(imp, Import) self._handle_Import(imp) else: imp = ensure_type(imp, ImportFrom) self._handle_ImportFrom(imp) class AddImportsVisitor(ContextAwareTransformer): """ Ensures that given imports exist in a module. Given a :class:`~libcst.codemod.CodemodContext` and a sequence of tuples specifying a module to import from as a string. Optionally an object to import from that module and any alias to assign that import, ensures that import exists. It will modify existing imports as necessary if the module in question is already being imported from. This is one of the transforms that is available automatically to you when running a codemod. To use it in this manner, import :class:`~libcst.codemod.visitors.AddImportsVisitor` and then call the static :meth:`~libcst.codemod.visitors.AddImportsVisitor.add_needed_import` method, giving it the current context (found as ``self.context`` for all subclasses of :class:`~libcst.codemod.Codemod`), the module you wish to import from and optionally an object you wish to import from that module and any alias you would like to assign that import to. For example:: AddImportsVisitor.add_needed_import(self.context, "typing", "Optional") This will produce the following code in a module, assuming there was no typing import already:: from typing import Optional As another example:: AddImportsVisitor.add_needed_import(self.context, "typing") This will produce the following code in a module, assuming there was no import already:: import typing Note that this is a subclass of :class:`~libcst.CSTTransformer` so it is possible to instantiate it and pass it to a :class:`~libcst.Module` :meth:`~libcst.CSTNode.visit` method. However, it is far easier to use the automatic transform feature of :class:`~libcst.codemod.CodemodCommand` and schedule an import to be added by calling :meth:`~libcst.codemod.visitors.AddImportsVisitor.add_needed_import` """ CONTEXT_KEY = "AddImportsVisitor" @staticmethod def _get_imports_from_context( context: CodemodContext, ) -> List[ImportItem]: imports = context.scratch.get(AddImportsVisitor.CONTEXT_KEY, []) if not isinstance(imports, list): raise Exception("Logic error!") return imports @staticmethod def add_needed_import( context: CodemodContext, module: str, obj: Optional[str] = None, asname: Optional[str] = None, relative: int = 0, ) -> None: """ Schedule an import to be added in a future invocation of this class by updating the ``context`` to include the ``module`` and optionally ``obj`` to be imported as well as optionally ``alias`` to alias the imported ``module`` or ``obj`` to. When subclassing from :class:`~libcst.codemod.CodemodCommand`, this will be performed for you after your transform finishes executing. If you are subclassing from a :class:`~libcst.codemod.Codemod` instead, you will need to call the :meth:`~libcst.codemod.Codemod.transform_module` method on the module under modification with an instance of this class after performing your transform. Note that if the particular ``module`` or ``obj`` you are requesting to import already exists as an import on the current module at the time of executing :meth:`~libcst.codemod.Codemod.transform_module` on an instance of :class:`~libcst.codemod.visitors.AddImportsVisitor`, this will perform no action in order to avoid adding duplicate imports. """ if module == "__future__" and obj is None: raise Exception("Cannot import __future__ directly!") imports = AddImportsVisitor._get_imports_from_context(context) imports.append(ImportItem(module, obj, asname, relative)) context.scratch[AddImportsVisitor.CONTEXT_KEY] = imports def __init__( self, context: CodemodContext, imports: Sequence[ImportItem] = (), ) -> None: # Allow for instantiation from either a context (used when multiple transforms # get chained) or from a direct instantiation. super().__init__(context) imps: List[ImportItem] = [ *AddImportsVisitor._get_imports_from_context(context), *imports, ] # Verify that the imports are valid for imp in imps: if imp.module == "__future__" and imp.obj_name is None: raise Exception("Cannot import __future__ directly!") if imp.module == "__future__" and imp.alias is not None: raise Exception("Cannot import __future__ objects with aliases!") # Resolve relative imports if we have a module name imps = [imp.resolve_relative(self.context.full_package_name) for imp in imps] # List of modules we need to ensure are imported self.module_imports: Set[str] = { imp.module for imp in imps if imp.obj_name is None and imp.alias is None } # List of modules we need to check for object imports on from_imports: Set[str] = { imp.module for imp in imps if imp.obj_name is not None and imp.alias is None } # Mapping of modules we're adding to the object they should import self.module_mapping: Dict[str, Set[str]] = { module: { imp.obj_name for imp in imps if imp.module == module and imp.obj_name is not None and imp.alias is None } for module in sorted(from_imports) } # List of aliased modules we need to ensure are imported self.module_aliases: Dict[str, str] = { imp.module: imp.alias for imp in imps if imp.obj_name is None and imp.alias is not None } # List of modules we need to check for object imports on from_imports_aliases: Set[str] = { imp.module for imp in imps if imp.obj_name is not None and imp.alias is not None } # Mapping of modules we're adding to the object with alias they should import self.alias_mapping: Dict[str, List[Tuple[str, str]]] = { module: [ (imp.obj_name, imp.alias) for imp in imps if imp.module == module and imp.obj_name is not None and imp.alias is not None ] for module in sorted(from_imports_aliases) } # Track the list of imports found at the top of the file self.all_imports: List[Union[libcst.Import, libcst.ImportFrom]] = [] def visit_Module(self, node: libcst.Module) -> None: # Do a preliminary pass to gather the imports we already have at the top gatherer = _GatherTopImportsBeforeStatements(self.context) node.visit(gatherer) self.all_imports = gatherer.all_imports self.module_imports = self.module_imports - gatherer.module_imports for module, alias in gatherer.module_aliases.items(): if module in self.module_aliases and self.module_aliases[module] == alias: del self.module_aliases[module] for module, aliases in gatherer.alias_mapping.items(): for obj, alias in aliases: if ( module in self.alias_mapping and (obj, alias) in self.alias_mapping[module] ): self.alias_mapping[module].remove((obj, alias)) if len(self.alias_mapping[module]) == 0: del self.alias_mapping[module] for module, imports in gatherer.object_mapping.items(): if module not in self.module_mapping: # We don't care about this import at all continue elif "*" in imports: # We already implicitly are importing everything del self.module_mapping[module] else: # Lets figure out what's left to import self.module_mapping[module] = self.module_mapping[module] - imports if not self.module_mapping[module]: # There's nothing left, so lets delete this work item del self.module_mapping[module] def leave_ImportFrom( self, original_node: libcst.ImportFrom, updated_node: libcst.ImportFrom ) -> libcst.ImportFrom: if isinstance(updated_node.names, libcst.ImportStar): # There's nothing to do here! return updated_node # Ensure this is one of the imports at the top if original_node not in self.all_imports: return updated_node # Get the module we're importing as a string, see if we have work to do. module = get_absolute_module_from_package_for_import( self.context.full_package_name, updated_node ) if ( module is None or module not in self.module_mapping and module not in self.alias_mapping ): return updated_node # We have work to do, mark that we won't modify this again. imports_to_add = self.module_mapping.get(module, []) if module in self.module_mapping: del self.module_mapping[module] aliases_to_add = self.alias_mapping.get(module, []) if module in self.alias_mapping: del self.alias_mapping[module] # Now, do the actual update. return updated_node.with_changes( names=[ *( libcst.ImportAlias(name=libcst.Name(imp)) for imp in sorted(imports_to_add) ), *( libcst.ImportAlias( name=libcst.Name(imp), asname=libcst.AsName(name=libcst.Name(alias)), ) for (imp, alias) in sorted(aliases_to_add) ), *updated_node.names, ] ) def _split_module( self, orig_module: libcst.Module, updated_module: libcst.Module ) -> Tuple[ List[Union[libcst.SimpleStatementLine, libcst.BaseCompoundStatement]], List[Union[libcst.SimpleStatementLine, libcst.BaseCompoundStatement]], List[Union[libcst.SimpleStatementLine, libcst.BaseCompoundStatement]], ]: statement_before_import_location = 0 import_add_location = 0 # This works under the principle that while we might modify node contents, # we have yet to modify the number of statements. So we can match on the # original tree but break up the statements of the modified tree. If we # change this assumption in this visitor, we will have to change this code. # Finds the location to add imports. It is the end of the first import block that occurs before any other statement (save for docstrings) # Never insert an import before initial __strict__ flag or docstring if _skip_first(orig_module): statement_before_import_location = import_add_location = 1 for i, statement in enumerate( orig_module.body[statement_before_import_location:] ): if m.matches( statement, m.SimpleStatementLine(body=[m.ImportFrom() | m.Import()]) ): import_add_location = i + statement_before_import_location + 1 else: break return ( list(updated_module.body[:statement_before_import_location]), list( updated_module.body[ statement_before_import_location:import_add_location ] ), list(updated_module.body[import_add_location:]), ) def _insert_empty_line( self, statements: List[ Union[libcst.SimpleStatementLine, libcst.BaseCompoundStatement] ], ) -> List[Union[libcst.SimpleStatementLine, libcst.BaseCompoundStatement]]: if len(statements) < 1: # No statements, nothing to add to return statements if len(statements[0].leading_lines) == 0: # Statement has no leading lines, add one! return [ statements[0].with_changes(leading_lines=(libcst.EmptyLine(),)), *statements[1:], ] if statements[0].leading_lines[0].comment is None: # First line is empty, so its safe to leave as-is return statements # Statement has a comment first line, so lets add one more empty line return [ statements[0].with_changes( leading_lines=(libcst.EmptyLine(), *statements[0].leading_lines) ), *statements[1:], ] def leave_Module( self, original_node: libcst.Module, updated_node: libcst.Module ) -> libcst.Module: # Don't try to modify if we have nothing to do if ( not self.module_imports and not self.module_mapping and not self.module_aliases and not self.alias_mapping ): return updated_node # First, find the insertion point for imports ( statements_before_imports, statements_until_add_imports, statements_after_imports, ) = self._split_module(original_node, updated_node) # Make sure there's at least one empty line before the first non-import statements_after_imports = self._insert_empty_line(statements_after_imports) # Mapping of modules we're adding to the object with and without alias they should import module_and_alias_mapping = defaultdict(list) for module, aliases in self.alias_mapping.items(): module_and_alias_mapping[module].extend(aliases) for module, imports in self.module_mapping.items(): module_and_alias_mapping[module].extend( [(object, None) for object in imports] ) module_and_alias_mapping = { module: sorted(aliases) for module, aliases in module_and_alias_mapping.items() } # Now, add all of the imports we need! return updated_node.with_changes( # pyre-fixme[60]: Concatenation not yet support for multiple variadic tup... body=( *statements_before_imports, *[ parse_statement( f"from {module} import " + ", ".join( [ obj if alias is None else f"{obj} as {alias}" for (obj, alias) in aliases ] ), config=updated_node.config_for_parsing, ) for module, aliases in module_and_alias_mapping.items() if module == "__future__" ], *statements_until_add_imports, *[ parse_statement( f"import {module}", config=updated_node.config_for_parsing ) for module in sorted(self.module_imports) ], *[ parse_statement( f"import {module} as {asname}", config=updated_node.config_for_parsing, ) for (module, asname) in self.module_aliases.items() ], *[ parse_statement( f"from {module} import " + ", ".join( [ obj if alias is None else f"{obj} as {alias}" for (obj, alias) in aliases ] ), config=updated_node.config_for_parsing, ) for module, aliases in module_and_alias_mapping.items() if module != "__future__" ], *statements_after_imports, ) ) def _skip_first(orig_module: libcst.Module) -> bool: # Is there a __strict__ flag or docstring at the top? if m.matches( orig_module, m.Module( body=[ m.SimpleStatementLine( body=[ m.Assign(targets=[m.AssignTarget(target=m.Name("__strict__"))]) ] ), m.ZeroOrMore(), ] ) | m.Module( body=[ m.SimpleStatementLine(body=[m.Expr(value=m.SimpleString())]), m.ZeroOrMore(), ] ), ): return True return False LibCST-1.2.0/libcst/codemod/visitors/_apply_type_annotations.py000066400000000000000000001433551456464173300247120ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from collections import defaultdict from dataclasses import dataclass from typing import Dict, List, Optional, Sequence, Set, Tuple, Union import libcst as cst import libcst.matchers as m from libcst.codemod._context import CodemodContext from libcst.codemod._visitor import ContextAwareTransformer from libcst.codemod.visitors._add_imports import AddImportsVisitor from libcst.codemod.visitors._gather_global_names import GatherGlobalNamesVisitor from libcst.codemod.visitors._gather_imports import GatherImportsVisitor from libcst.codemod.visitors._imports import ImportItem from libcst.helpers import get_full_name_for_node from libcst.metadata import PositionProvider, QualifiedNameProvider NameOrAttribute = Union[cst.Name, cst.Attribute] NAME_OR_ATTRIBUTE = (cst.Name, cst.Attribute) # Union type for *args and **args StarParamType = Union[ None, cst._maybe_sentinel.MaybeSentinel, cst._nodes.expression.Param, cst._nodes.expression.ParamStar, ] def _module_and_target(qualified_name: str) -> Tuple[str, str]: relative_prefix = "" while qualified_name.startswith("."): relative_prefix += "." qualified_name = qualified_name[1:] split = qualified_name.rsplit(".", 1) if len(split) == 1: qualifier, target = "", split[0] else: qualifier, target = split return (relative_prefix + qualifier, target) def _get_unique_qualified_name( visitor: m.MatcherDecoratableVisitor, node: cst.CSTNode ) -> str: name = None names = [q.name for q in visitor.get_metadata(QualifiedNameProvider, node)] if len(names) == 0: # we hit this branch if the stub is directly using a fully # qualified name, which is not technically valid python but is # convenient to allow. name = get_full_name_for_node(node) elif len(names) == 1 and isinstance(names[0], str): name = names[0] if name is None: start = visitor.get_metadata(PositionProvider, node).start raise ValueError( "Could not resolve a unique qualified name for type " + f"{get_full_name_for_node(node)} at {start.line}:{start.column}. " + f"Candidate names were: {names!r}" ) return name def _get_import_alias_names( import_aliases: Sequence[cst.ImportAlias], ) -> Set[str]: import_names = set() for imported_name in import_aliases: asname = imported_name.asname if asname is not None: import_names.add(get_full_name_for_node(asname.name)) else: import_names.add(get_full_name_for_node(imported_name.name)) return import_names def _get_imported_names( imports: Sequence[Union[cst.Import, cst.ImportFrom]], ) -> Set[str]: """ Given a series of import statements (both Import and ImportFrom), determine all of the names that have been imported into the current scope. For example: - ``import foo.bar as bar, foo.baz`` produces ``{'bar', 'foo.baz'}`` - ``from foo import (Bar, Baz as B)`` produces ``{'Bar', 'B'}`` - ``from foo import *`` produces ``set()` because we cannot resolve names """ import_names = set() for _import in imports: if isinstance(_import, cst.Import): import_names.update(_get_import_alias_names(_import.names)) else: names = _import.names if not isinstance(names, cst.ImportStar): import_names.update(_get_import_alias_names(names)) return import_names def _is_non_sentinel( x: Union[None, cst.CSTNode, cst.MaybeSentinel], ) -> bool: return x is not None and x != cst.MaybeSentinel.DEFAULT def _get_string_value( node: cst.SimpleString, ) -> str: s = node.value c = s[-1] return s[s.index(c) : -1] def _find_generic_base( node: cst.ClassDef, ) -> Optional[cst.Arg]: for b in node.bases: if m.matches(b.value, m.Subscript(value=m.Name("Generic"))): return b @dataclass(frozen=True) class FunctionKey: """ Class representing a funciton name and signature. This exists to ensure we do not attempt to apply stubs to functions whose definition is incompatible. """ name: str pos: int kwonly: str posonly: int star_arg: bool star_kwarg: bool @classmethod def make( cls, name: str, params: cst.Parameters, ) -> "FunctionKey": pos = len(params.params) kwonly = ",".join(sorted(x.name.value for x in params.kwonly_params)) posonly = len(params.posonly_params) star_arg = _is_non_sentinel(params.star_arg) star_kwarg = _is_non_sentinel(params.star_kwarg) return cls( name, pos, kwonly, posonly, star_arg, star_kwarg, ) @dataclass(frozen=True) class FunctionAnnotation: parameters: cst.Parameters returns: Optional[cst.Annotation] @dataclass class Annotations: """ Represents all of the annotation information we might add to a class: - All data is keyed on the qualified name relative to the module root - The ``functions`` field also keys on the signature so that we do not apply stub types where the signature is incompatible. The idea is that - ``functions`` contains all function and method type information from the stub, and the qualifier for a method includes the containing class names (e.g. "Cat.meow") - ``attributes`` similarly contains all globals and class-level attribute type information. - The ``class_definitions`` field contains all of the classes defined in the stub. Most of these classes will be ignored in downstream logic (it is *not* used to annotate attributes or method), but there are some cases like TypedDict where a typing-only class needs to be injected. - The field ``typevars`` contains the assign statement for all type variables in the stub, and ``names`` tracks all of the names used in annotations; together these fields tell us which typevars should be included in the codemod (all typevars that appear in annotations.) """ # TODO: consider simplifying this in a few ways: # - We could probably just inject all typevars, used or not. # It doesn't seem to me that our codemod needs to act like # a linter checking for unused names. # - We could probably decide which classes are typing-only # in the visitor rather than the codemod, which would make # it easier to reason locally about (and document) how the # class_definitions field works. functions: Dict[FunctionKey, FunctionAnnotation] attributes: Dict[str, cst.Annotation] class_definitions: Dict[str, cst.ClassDef] typevars: Dict[str, cst.Assign] names: Set[str] @classmethod def empty(cls) -> "Annotations": return Annotations({}, {}, {}, {}, set()) def update(self, other: "Annotations") -> None: self.functions.update(other.functions) self.attributes.update(other.attributes) self.class_definitions.update(other.class_definitions) self.typevars.update(other.typevars) self.names.update(other.names) def finish(self) -> None: self.typevars = {k: v for k, v in self.typevars.items() if k in self.names} @dataclass(frozen=True) class ImportedSymbol: """Import of foo.Bar, where both foo and Bar are potentially aliases.""" module_name: str module_alias: Optional[str] = None target_name: Optional[str] = None target_alias: Optional[str] = None @property def symbol(self) -> Optional[str]: return self.target_alias or self.target_name @property def module_symbol(self) -> str: return self.module_alias or self.module_name class ImportedSymbolCollector(m.MatcherDecoratableVisitor): """ Collect imported symbols from a stub module. """ METADATA_DEPENDENCIES = ( PositionProvider, QualifiedNameProvider, ) def __init__(self, existing_imports: Set[str], context: CodemodContext) -> None: super().__init__() self.existing_imports: Set[str] = existing_imports self.imported_symbols: Dict[str, Set[ImportedSymbol]] = defaultdict(set) self.in_annotation: bool = False def visit_Annotation(self, node: cst.Annotation) -> None: self.in_annotation = True def leave_Annotation(self, original_node: cst.Annotation) -> None: self.in_annotation = False def visit_ClassDef(self, node: cst.ClassDef) -> None: for base in node.bases: value = base.value if isinstance(value, NAME_OR_ATTRIBUTE): self._handle_NameOrAttribute(value) def visit_Name(self, node: cst.Name) -> None: if self.in_annotation: self._handle_NameOrAttribute(node) def visit_Attribute(self, node: cst.Attribute) -> None: if self.in_annotation: self._handle_NameOrAttribute(node) def visit_Subscript(self, node: cst.Subscript) -> bool: if isinstance(node.value, NAME_OR_ATTRIBUTE): return True return _get_unique_qualified_name(self, node) not in ("Type", "typing.Type") def _handle_NameOrAttribute( self, node: NameOrAttribute, ) -> None: # Adds the qualified name to the list of imported symbols obj = sym = None # keep pyre happy if isinstance(node, cst.Name): obj = None sym = node.value elif isinstance(node, cst.Attribute): obj = node.value.value # pyre-ignore[16] sym = node.attr.value qualified_name = _get_unique_qualified_name(self, node) module, target = _module_and_target(qualified_name) if module in ("", "builtins"): return elif qualified_name not in self.existing_imports: mod = ImportedSymbol( module_name=module, module_alias=obj if obj != module else None, target_name=target, target_alias=sym if sym != target else None, ) self.imported_symbols[sym].add(mod) class TypeCollector(m.MatcherDecoratableVisitor): """ Collect type annotations from a stub module. """ METADATA_DEPENDENCIES = ( PositionProvider, QualifiedNameProvider, ) annotations: Annotations def __init__( self, existing_imports: Set[str], module_imports: Dict[str, ImportItem], context: CodemodContext, ) -> None: super().__init__() self.context = context # Existing imports, determined by looking at the target module. # Used to help us determine when a type in a stub will require new imports. # # The contents of this are fully-qualified names of types in scope # as well as module names, although downstream we effectively ignore # the module names as of the current implementation. self.existing_imports: Set[str] = existing_imports # Module imports, gathered by prescanning the stub file to determine # which modules need to be imported directly to qualify their symbols. self.module_imports: Dict[str, ImportItem] = module_imports # Fields that help us track temporary state as we recurse self.qualifier: List[str] = [] self.current_assign: Optional[cst.Assign] = None # used to collect typevars # Store the annotations. self.annotations = Annotations.empty() def visit_ClassDef( self, node: cst.ClassDef, ) -> None: self.qualifier.append(node.name.value) new_bases = [] for base in node.bases: value = base.value if isinstance(value, NAME_OR_ATTRIBUTE): new_value = value.visit(_TypeCollectorDequalifier(self)) elif isinstance(value, cst.Subscript): new_value = value.visit(_TypeCollectorDequalifier(self)) else: start = self.get_metadata(PositionProvider, node).start raise ValueError( "Invalid type used as base class in stub file at " + f"{start.line}:{start.column}. Only subscripts, names, and " + "attributes are valid base classes for static typing." ) new_bases.append(base.with_changes(value=new_value)) self.annotations.class_definitions[node.name.value] = node.with_changes( bases=new_bases ) def leave_ClassDef( self, original_node: cst.ClassDef, ) -> None: self.qualifier.pop() def visit_FunctionDef( self, node: cst.FunctionDef, ) -> bool: self.qualifier.append(node.name.value) returns = node.returns return_annotation = ( returns.visit(_TypeCollectorDequalifier(self)) if returns is not None else None ) assert return_annotation is None or isinstance( return_annotation, cst.Annotation ) parameter_annotations = self._handle_Parameters(node.params) name = ".".join(self.qualifier) key = FunctionKey.make(name, node.params) self.annotations.functions[key] = FunctionAnnotation( parameters=parameter_annotations, returns=return_annotation ) # pyi files don't support inner functions, return False to stop the traversal. return False def leave_FunctionDef( self, original_node: cst.FunctionDef, ) -> None: self.qualifier.pop() def visit_AnnAssign( self, node: cst.AnnAssign, ) -> bool: name = get_full_name_for_node(node.target) if name is not None: self.qualifier.append(name) annotation_value = node.annotation.visit(_TypeCollectorDequalifier(self)) assert isinstance(annotation_value, cst.Annotation) self.annotations.attributes[".".join(self.qualifier)] = annotation_value return True def leave_AnnAssign( self, original_node: cst.AnnAssign, ) -> None: self.qualifier.pop() def visit_Assign( self, node: cst.Assign, ) -> None: self.current_assign = node def leave_Assign( self, original_node: cst.Assign, ) -> None: self.current_assign = None @m.call_if_inside(m.Assign()) @m.visit(m.Call(func=m.Name("TypeVar"))) def record_typevar( self, node: cst.Call, ) -> None: # pyre-ignore current_assign is never None here name = get_full_name_for_node(self.current_assign.targets[0].target) if name is not None: # pyre-ignore current_assign is never None here self.annotations.typevars[name] = self.current_assign self._handle_qualification_and_should_qualify("typing.TypeVar") self.current_assign = None def leave_Module( self, original_node: cst.Module, ) -> None: self.annotations.finish() def _module_and_target( self, qualified_name: str, ) -> Tuple[str, str]: relative_prefix = "" while qualified_name.startswith("."): relative_prefix += "." qualified_name = qualified_name[1:] split = qualified_name.rsplit(".", 1) if len(split) == 1: qualifier, target = "", split[0] else: qualifier, target = split return (relative_prefix + qualifier, target) def _handle_qualification_and_should_qualify( self, qualified_name: str, node: Optional[cst.CSTNode] = None ) -> bool: """ Based on a qualified name and the existing module imports, record that we need to add an import if necessary and return whether or not we should use the qualified name due to a preexisting import. """ module, target = self._module_and_target(qualified_name) if module in ("", "builtins"): return False elif qualified_name not in self.existing_imports: if module in self.existing_imports: return True elif module in self.module_imports: m = self.module_imports[module] if m.obj_name is None: asname = m.alias else: asname = None AddImportsVisitor.add_needed_import( self.context, m.module_name, asname=asname ) return True else: if node and isinstance(node, cst.Name) and node.value != target: asname = node.value else: asname = None AddImportsVisitor.add_needed_import( self.context, module, target, asname=asname, ) return False return False # Handler functions def _handle_Parameters( self, parameters: cst.Parameters, ) -> cst.Parameters: def update_annotations( parameters: Sequence[cst.Param], ) -> List[cst.Param]: updated_parameters = [] for parameter in list(parameters): annotation = parameter.annotation if annotation is not None: parameter = parameter.with_changes( annotation=annotation.visit(_TypeCollectorDequalifier(self)) ) updated_parameters.append(parameter) return updated_parameters return parameters.with_changes(params=update_annotations(parameters.params)) class _TypeCollectorDequalifier(cst.CSTTransformer): def __init__(self, type_collector: "TypeCollector") -> None: self.type_collector = type_collector def leave_Name(self, original_node: cst.Name, updated_node: cst.Name) -> cst.Name: qualified_name = _get_unique_qualified_name(self.type_collector, original_node) should_qualify = self.type_collector._handle_qualification_and_should_qualify( qualified_name, original_node ) self.type_collector.annotations.names.add(qualified_name) if should_qualify: qualified_node = cst.parse_module(qualified_name) return qualified_node # pyre-ignore[7] else: return original_node def visit_Attribute(self, node: cst.Attribute) -> bool: return False def leave_Attribute( self, original_node: cst.Attribute, updated_node: cst.Attribute ) -> cst.BaseExpression: qualified_name = _get_unique_qualified_name(self.type_collector, original_node) should_qualify = self.type_collector._handle_qualification_and_should_qualify( qualified_name, original_node ) self.type_collector.annotations.names.add(qualified_name) if should_qualify: return original_node else: return original_node.attr def leave_Index( self, original_node: cst.Index, updated_node: cst.Index ) -> cst.Index: if isinstance(original_node.value, cst.SimpleString): self.type_collector.annotations.names.add( _get_string_value(original_node.value) ) return updated_node def visit_Subscript(self, node: cst.Subscript) -> bool: return _get_unique_qualified_name(self.type_collector, node) not in ( "Type", "typing.Type", ) def leave_Subscript( self, original_node: cst.Subscript, updated_node: cst.Subscript ) -> cst.Subscript: if _get_unique_qualified_name(self.type_collector, original_node) in ( "Type", "typing.Type", ): # Note: we are intentionally not handling qualification of # anything inside `Type` because it's common to have nested # classes, which we cannot currently distinguish from classes # coming from other modules, appear here. return original_node.with_changes(value=original_node.value.visit(self)) return updated_node @dataclass class AnnotationCounts: global_annotations: int = 0 attribute_annotations: int = 0 parameter_annotations: int = 0 return_annotations: int = 0 classes_added: int = 0 typevars_and_generics_added: int = 0 def any_changes_applied(self) -> bool: return ( self.global_annotations + self.attribute_annotations + self.parameter_annotations + self.return_annotations + self.classes_added + self.typevars_and_generics_added ) > 0 class ApplyTypeAnnotationsVisitor(ContextAwareTransformer): """ Apply type annotations to a source module using the given stub mdules. You can also pass in explicit annotations for functions and attributes and pass in new class definitions that need to be added to the source module. This is one of the transforms that is available automatically to you when running a codemod. To use it in this manner, import :class:`~libcst.codemod.visitors.ApplyTypeAnnotationsVisitor` and then call the static :meth:`~libcst.codemod.visitors.ApplyTypeAnnotationsVisitor.store_stub_in_context` method, giving it the current context (found as ``self.context`` for all subclasses of :class:`~libcst.codemod.Codemod`), the stub module from which you wish to add annotations. For example, you can store the type annotation ``int`` for ``x`` using:: stub_module = parse_module("x: int = ...") ApplyTypeAnnotationsVisitor.store_stub_in_context(self.context, stub_module) You can apply the type annotation using:: source_module = parse_module("x = 1") ApplyTypeAnnotationsVisitor.transform_module(source_module) This will produce the following code:: x: int = 1 If the function or attribute already has a type annotation, it will not be overwritten. To overwrite existing annotations when applying annotations from a stub, use the keyword argument ``overwrite_existing_annotations=True`` when constructing the codemod or when calling ``store_stub_in_context``. """ CONTEXT_KEY = "ApplyTypeAnnotationsVisitor" def __init__( self, context: CodemodContext, annotations: Optional[Annotations] = None, overwrite_existing_annotations: bool = False, use_future_annotations: bool = False, strict_posargs_matching: bool = True, strict_annotation_matching: bool = False, always_qualify_annotations: bool = False, ) -> None: super().__init__(context) # Qualifier for storing the canonical name of the current function. self.qualifier: List[str] = [] self.annotations: Annotations = ( Annotations.empty() if annotations is None else annotations ) self.toplevel_annotations: Dict[str, cst.Annotation] = {} self.visited_classes: Set[str] = set() self.overwrite_existing_annotations = overwrite_existing_annotations self.use_future_annotations = use_future_annotations self.strict_posargs_matching = strict_posargs_matching self.strict_annotation_matching = strict_annotation_matching self.always_qualify_annotations = always_qualify_annotations # We use this to determine the end of the import block so that we can # insert top-level annotations. self.import_statements: List[cst.ImportFrom] = [] # We use this to report annotations added, as well as to determine # whether to abandon the codemod in edge cases where we may have # only made changes to the imports. self.annotation_counts: AnnotationCounts = AnnotationCounts() # We use this to collect typevars, to avoid importing existing ones from the pyi file self.current_assign: Optional[cst.Assign] = None self.typevars: Dict[str, cst.Assign] = {} # Global variables and classes defined on the toplevel of the target module. # Used to help determine which names we need to check are in scope, and add # quotations to avoid undefined forward references in type annotations. self.global_names: Set[str] = set() # We use this to avoid annotating multiple assignments to the same # symbol in a given scope self.already_annotated: Set[str] = set() @staticmethod def store_stub_in_context( context: CodemodContext, stub: cst.Module, overwrite_existing_annotations: bool = False, use_future_annotations: bool = False, strict_posargs_matching: bool = True, strict_annotation_matching: bool = False, always_qualify_annotations: bool = False, ) -> None: """ Store a stub module in the :class:`~libcst.codemod.CodemodContext` so that type annotations from the stub can be applied in a later invocation of this class. If the ``overwrite_existing_annotations`` flag is ``True``, the codemod will overwrite any existing annotations. If you call this function multiple times, only the last values of ``stub`` and ``overwrite_existing_annotations`` will take effect. """ context.scratch[ApplyTypeAnnotationsVisitor.CONTEXT_KEY] = ( stub, overwrite_existing_annotations, use_future_annotations, strict_posargs_matching, strict_annotation_matching, always_qualify_annotations, ) def transform_module_impl( self, tree: cst.Module, ) -> cst.Module: """ Collect type annotations from all stubs and apply them to ``tree``. Gather existing imports from ``tree`` so that we don't add duplicate imports. Gather global names from ``tree`` so forward references are quoted. """ import_gatherer = GatherImportsVisitor(CodemodContext()) tree.visit(import_gatherer) existing_import_names = _get_imported_names(import_gatherer.all_imports) global_names_gatherer = GatherGlobalNamesVisitor(CodemodContext()) tree.visit(global_names_gatherer) self.global_names = global_names_gatherer.global_names.union( global_names_gatherer.class_names ) context_contents = self.context.scratch.get( ApplyTypeAnnotationsVisitor.CONTEXT_KEY ) if context_contents is not None: ( stub, overwrite_existing_annotations, use_future_annotations, strict_posargs_matching, strict_annotation_matching, always_qualify_annotations, ) = context_contents self.overwrite_existing_annotations = ( self.overwrite_existing_annotations or overwrite_existing_annotations ) self.use_future_annotations = ( self.use_future_annotations or use_future_annotations ) self.strict_posargs_matching = ( self.strict_posargs_matching and strict_posargs_matching ) self.strict_annotation_matching = ( self.strict_annotation_matching or strict_annotation_matching ) self.always_qualify_annotations = ( self.always_qualify_annotations or always_qualify_annotations ) module_imports = self._get_module_imports(stub, import_gatherer) visitor = TypeCollector(existing_import_names, module_imports, self.context) cst.MetadataWrapper(stub).visit(visitor) self.annotations.update(visitor.annotations) if self.use_future_annotations: AddImportsVisitor.add_needed_import( self.context, "__future__", "annotations" ) tree_with_imports = AddImportsVisitor(self.context).transform_module(tree) tree_with_changes = tree_with_imports.visit(self) # don't modify the imports if we didn't actually add any type information if self.annotation_counts.any_changes_applied(): return tree_with_changes else: return tree # helpers for collecting type information from the stub files def _get_module_imports( # noqa: C901: too complex self, stub: cst.Module, existing_import_gatherer: GatherImportsVisitor ) -> Dict[str, ImportItem]: """Returns a dict of modules that need to be imported to qualify symbols.""" # We correlate all imported symbols, e.g. foo.bar.Baz, with a list of module # and from imports. If the same unqualified symbol is used from different # modules, we give preference to an explicit from-import if any, and qualify # everything else by importing the module. # # e.g. the following stub: # import foo as quux # from bar import Baz as X # def f(x: X) -> quux.X: ... # will return {'foo': ImportItem("foo", "quux")}. When the apply type # annotation visitor hits `quux.X` it will retrieve the canonical name # `foo.X` and then note that `foo` is in the module imports map, so it will # leave the symbol qualified. import_gatherer = GatherImportsVisitor(CodemodContext()) stub.visit(import_gatherer) symbol_map = import_gatherer.symbol_mapping existing_import_names = _get_imported_names( existing_import_gatherer.all_imports ) symbol_collector = ImportedSymbolCollector(existing_import_names, self.context) cst.MetadataWrapper(stub).visit(symbol_collector) module_imports = {} for sym, imported_symbols in symbol_collector.imported_symbols.items(): existing = existing_import_gatherer.symbol_mapping.get(sym) if existing and any( s.module_name != existing.module_name for s in imported_symbols ): # If a symbol is imported in the main file, we have to qualify # it when imported from a different module in the stub file. used = True elif len(imported_symbols) == 1 and not self.always_qualify_annotations: # If we have a single use of a new symbol we can from-import it continue else: # There are multiple occurrences in the stub file and none in # the main file. At least one can be from-imported. used = False for imp_sym in imported_symbols: if not imp_sym.symbol: continue imp = symbol_map.get(imp_sym.symbol) if self.always_qualify_annotations and sym not in existing_import_names: # Override 'always qualify' if this is a typing import, or # the main file explicitly from-imports a symbol. if imp and imp.module_name != "typing": module_imports[imp.module_name] = imp else: imp = symbol_map.get(imp_sym.module_symbol) if imp: module_imports[imp.module_name] = imp elif not used and imp and imp.module_name == imp_sym.module_name: # We can only import a symbol directly once. used = True elif sym in existing_import_names: if imp: module_imports[imp.module_name] = imp else: imp = symbol_map.get(imp_sym.module_symbol) if imp: # imp will be None in corner cases like # import foo.bar as Baz # x: Baz # which is technically valid python but nonsensical as a # type annotation. Dropping it on the floor for now. module_imports[imp.module_name] = imp return module_imports # helpers for processing annotation nodes def _quote_future_annotations(self, annotation: cst.Annotation) -> cst.Annotation: # TODO: We probably want to make sure references to classes defined in the current # module come to us fully qualified - so we can do the dequalification here and # know to look for what is in-scope without also catching builtins like "None" in the # quoting. This should probably also be extended to handle what imports are in scope, # as well as subscriptable types. # Note: We are collecting all imports and passing this to the type collector grabbing # annotations from the stub file; should consolidate import handling somewhere too. node = annotation.annotation if ( isinstance(node, cst.Name) and (node.value in self.global_names) and not (node.value in self.visited_classes) ): return annotation.with_changes( annotation=cst.SimpleString(value=f'"{node.value}"') ) return annotation # smart constructors: all applied annotations happen via one of these def _apply_annotation_to_attribute_or_global( self, name: str, annotation: cst.Annotation, value: Optional[cst.BaseExpression], ) -> cst.AnnAssign: if len(self.qualifier) == 0: self.annotation_counts.global_annotations += 1 else: self.annotation_counts.attribute_annotations += 1 return cst.AnnAssign( cst.Name(name), self._quote_future_annotations(annotation), value, ) def _apply_annotation_to_parameter( self, parameter: cst.Param, annotation: cst.Annotation, ) -> cst.Param: self.annotation_counts.parameter_annotations += 1 return parameter.with_changes( annotation=self._quote_future_annotations(annotation), ) def _apply_annotation_to_return( self, function_def: cst.FunctionDef, annotation: cst.Annotation, ) -> cst.FunctionDef: self.annotation_counts.return_annotations += 1 return function_def.with_changes( returns=self._quote_future_annotations(annotation), ) # private methods used in the visit and leave methods def _qualifier_name(self) -> str: return ".".join(self.qualifier) def _annotate_single_target( self, node: cst.Assign, updated_node: cst.Assign, ) -> Union[cst.Assign, cst.AnnAssign]: only_target = node.targets[0].target if isinstance(only_target, (cst.Tuple, cst.List)): for element in only_target.elements: value = element.value name = get_full_name_for_node(value) if name is not None and name != "_": self._add_to_toplevel_annotations(name) elif isinstance(only_target, (cst.Subscript)): pass else: name = get_full_name_for_node(only_target) if name is not None: self.qualifier.append(name) qualifier_name = self._qualifier_name() if qualifier_name in self.annotations.attributes and not isinstance( only_target, (cst.Attribute, cst.Subscript) ): if qualifier_name not in self.already_annotated: self.already_annotated.add(qualifier_name) annotation = self.annotations.attributes[qualifier_name] self.qualifier.pop() return self._apply_annotation_to_attribute_or_global( name=name, annotation=annotation, value=node.value, ) else: self.qualifier.pop() return updated_node def _split_module( self, module: cst.Module, updated_module: cst.Module, ) -> Tuple[ List[Union[cst.SimpleStatementLine, cst.BaseCompoundStatement]], List[Union[cst.SimpleStatementLine, cst.BaseCompoundStatement]], ]: import_add_location = 0 # This works under the principle that while we might modify node contents, # we have yet to modify the number of statements. So we can match on the # original tree but break up the statements of the modified tree. If we # change this assumption in this visitor, we will have to change this code. for i, statement in enumerate(module.body): if isinstance(statement, cst.SimpleStatementLine): for possible_import in statement.body: for last_import in self.import_statements: if possible_import is last_import: import_add_location = i + 1 break return ( list(updated_module.body[:import_add_location]), list(updated_module.body[import_add_location:]), ) def _add_to_toplevel_annotations( self, name: str, ) -> None: self.qualifier.append(name) if self._qualifier_name() in self.annotations.attributes: annotation = self.annotations.attributes[self._qualifier_name()] self.toplevel_annotations[name] = annotation self.qualifier.pop() def _update_parameters( self, annotations: FunctionAnnotation, updated_node: cst.FunctionDef, ) -> cst.Parameters: # Update params and default params with annotations # Don't override existing annotations or default values unless asked # to overwrite existing annotations. def update_annotation( parameters: Sequence[cst.Param], annotations: Sequence[cst.Param], positional: bool, ) -> List[cst.Param]: parameter_annotations = {} annotated_parameters = [] positional = positional and not self.strict_posargs_matching for i, parameter in enumerate(annotations): key = i if positional else parameter.name.value if parameter.annotation: parameter_annotations[key] = parameter.annotation.with_changes( whitespace_before_indicator=cst.SimpleWhitespace(value="") ) for i, parameter in enumerate(parameters): key = i if positional else parameter.name.value if key in parameter_annotations and ( self.overwrite_existing_annotations or not parameter.annotation ): parameter = self._apply_annotation_to_parameter( parameter=parameter, annotation=parameter_annotations[key], ) annotated_parameters.append(parameter) return annotated_parameters return updated_node.params.with_changes( params=update_annotation( updated_node.params.params, annotations.parameters.params, positional=True, ), kwonly_params=update_annotation( updated_node.params.kwonly_params, annotations.parameters.kwonly_params, positional=False, ), posonly_params=update_annotation( updated_node.params.posonly_params, annotations.parameters.posonly_params, positional=True, ), ) def _insert_empty_line( self, statements: List[Union[cst.SimpleStatementLine, cst.BaseCompoundStatement]], ) -> List[Union[cst.SimpleStatementLine, cst.BaseCompoundStatement]]: if len(statements) < 1: # No statements, nothing to add to return statements if len(statements[0].leading_lines) == 0: # Statement has no leading lines, add one! return [ statements[0].with_changes(leading_lines=(cst.EmptyLine(),)), *statements[1:], ] if statements[0].leading_lines[0].comment is None: # First line is empty, so its safe to leave as-is return statements # Statement has a comment first line, so lets add one more empty line return [ statements[0].with_changes( leading_lines=(cst.EmptyLine(), *statements[0].leading_lines) ), *statements[1:], ] def _match_signatures( # noqa: C901: Too complex self, function: cst.FunctionDef, annotations: FunctionAnnotation, ) -> bool: """Check that function annotations on both signatures are compatible.""" def compatible( p: Optional[cst.Annotation], q: Optional[cst.Annotation], ) -> bool: if ( self.overwrite_existing_annotations or not _is_non_sentinel(p) or not _is_non_sentinel(q) ): return True if not self.strict_annotation_matching: # We will not overwrite clashing annotations, but the signature as a # whole will be marked compatible so that holes can be filled in. return True return p.annotation.deep_equals(q.annotation) # pyre-ignore[16] def match_posargs( ps: Sequence[cst.Param], qs: Sequence[cst.Param], ) -> bool: if len(ps) != len(qs): return False for p, q in zip(ps, qs): if self.strict_posargs_matching and not p.name.value == q.name.value: return False if not compatible(p.annotation, q.annotation): return False return True def match_kwargs( ps: Sequence[cst.Param], qs: Sequence[cst.Param], ) -> bool: ps_dict = {x.name.value: x for x in ps} qs_dict = {x.name.value: x for x in qs} if set(ps_dict.keys()) != set(qs_dict.keys()): return False for k in ps_dict.keys(): if not compatible(ps_dict[k].annotation, qs_dict[k].annotation): return False return True def match_star( p: StarParamType, q: StarParamType, ) -> bool: return _is_non_sentinel(p) == _is_non_sentinel(q) def match_params( f: cst.FunctionDef, g: FunctionAnnotation, ) -> bool: p, q = f.params, g.parameters return ( match_posargs(p.params, q.params) and match_posargs(p.posonly_params, q.posonly_params) and match_kwargs(p.kwonly_params, q.kwonly_params) and match_star(p.star_arg, q.star_arg) and match_star(p.star_kwarg, q.star_kwarg) ) def match_return( f: cst.FunctionDef, g: FunctionAnnotation, ) -> bool: return compatible(f.returns, g.returns) return match_params(function, annotations) and match_return( function, annotations ) # transform API methods def visit_ClassDef( self, node: cst.ClassDef, ) -> None: self.qualifier.append(node.name.value) def leave_ClassDef( self, original_node: cst.ClassDef, updated_node: cst.ClassDef, ) -> cst.ClassDef: self.visited_classes.add(original_node.name.value) cls_name = ".".join(self.qualifier) self.qualifier.pop() definition = self.annotations.class_definitions.get(cls_name) if definition: b1 = _find_generic_base(definition) b2 = _find_generic_base(updated_node) if b1 and not b2: new_bases = list(updated_node.bases) + [b1] self.annotation_counts.typevars_and_generics_added += 1 return updated_node.with_changes(bases=new_bases) return updated_node def visit_FunctionDef( self, node: cst.FunctionDef, ) -> bool: self.qualifier.append(node.name.value) # pyi files don't support inner functions, return False to stop the traversal. return False def leave_FunctionDef( self, original_node: cst.FunctionDef, updated_node: cst.FunctionDef, ) -> cst.FunctionDef: key = FunctionKey.make(self._qualifier_name(), updated_node.params) self.qualifier.pop() if key in self.annotations.functions: function_annotation = self.annotations.functions[key] # Only add new annotation if: # * we have matching function signatures and # * we are explicitly told to overwrite existing annotations or # * there is no existing annotation if not self._match_signatures(updated_node, function_annotation): return updated_node set_return_annotation = ( self.overwrite_existing_annotations or updated_node.returns is None ) if set_return_annotation and function_annotation.returns is not None: updated_node = self._apply_annotation_to_return( function_def=updated_node, annotation=function_annotation.returns, ) # Don't override default values when annotating functions new_parameters = self._update_parameters(function_annotation, updated_node) return updated_node.with_changes(params=new_parameters) return updated_node def visit_Assign( self, node: cst.Assign, ) -> None: self.current_assign = node @m.call_if_inside(m.Assign()) @m.visit(m.Call(func=m.Name("TypeVar"))) def record_typevar( self, node: cst.Call, ) -> None: # pyre-ignore current_assign is never None here name = get_full_name_for_node(self.current_assign.targets[0].target) if name is not None: # Preserve the whole node, even though we currently just use the # name, so that we can match bounds and variance at some point and # determine if two typevars with the same name are indeed the same. # pyre-ignore current_assign is never None here self.typevars[name] = self.current_assign self.current_assign = None def leave_Assign( self, original_node: cst.Assign, updated_node: cst.Assign, ) -> Union[cst.Assign, cst.AnnAssign]: self.current_assign = None if len(original_node.targets) > 1: for assign in original_node.targets: target = assign.target if isinstance(target, (cst.Name, cst.Attribute)): name = get_full_name_for_node(target) if name is not None and name != "_": # Add separate top-level annotations for `a = b = 1` # as `a: int` and `b: int`. self._add_to_toplevel_annotations(name) return updated_node else: return self._annotate_single_target(original_node, updated_node) def leave_ImportFrom( self, original_node: cst.ImportFrom, updated_node: cst.ImportFrom, ) -> cst.ImportFrom: self.import_statements.append(original_node) return updated_node def leave_Module( self, original_node: cst.Module, updated_node: cst.Module, ) -> cst.Module: fresh_class_definitions = [ definition for name, definition in self.annotations.class_definitions.items() if name not in self.visited_classes ] # NOTE: The entire change will also be abandoned if # self.annotation_counts is all 0s, so if adding any new category make # sure to record it there. if not ( self.toplevel_annotations or fresh_class_definitions or self.annotations.typevars ): return updated_node toplevel_statements = [] # First, find the insertion point for imports statements_before_imports, statements_after_imports = self._split_module( original_node, updated_node ) # Make sure there's at least one empty line before the first non-import statements_after_imports = self._insert_empty_line(statements_after_imports) for name, annotation in self.toplevel_annotations.items(): annotated_assign = self._apply_annotation_to_attribute_or_global( name=name, annotation=annotation, value=None, ) toplevel_statements.append(cst.SimpleStatementLine([annotated_assign])) # TypeVar definitions could be scattered through the file, so do not # attempt to put new ones with existing ones, just add them at the top. typevars = { k: v for k, v in self.annotations.typevars.items() if k not in self.typevars } if typevars: for var, stmt in typevars.items(): toplevel_statements.append(cst.Newline()) toplevel_statements.append(stmt) self.annotation_counts.typevars_and_generics_added += 1 toplevel_statements.append(cst.Newline()) self.annotation_counts.classes_added = len(fresh_class_definitions) toplevel_statements.extend(fresh_class_definitions) return updated_node.with_changes( body=[ *statements_before_imports, *toplevel_statements, *statements_after_imports, ] ) LibCST-1.2.0/libcst/codemod/visitors/_gather_comments.py000066400000000000000000000041361456464173300232570ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import re from typing import Dict, Pattern, Union import libcst as cst from libcst.codemod._context import CodemodContext from libcst.codemod._visitor import ContextAwareVisitor from libcst.metadata import PositionProvider class GatherCommentsVisitor(ContextAwareVisitor): """ Collects all comments matching a certain regex and their line numbers. This visitor is useful for capturing special-purpose comments, for example ``noqa`` style lint suppression annotations. Standalone comments are assumed to affect the line following them, and inline ones are recorded with the line they are on. After visiting a CST, matching comments are collected in the ``comments`` attribute. """ METADATA_DEPENDENCIES = (PositionProvider,) def __init__(self, context: CodemodContext, comment_regex: str) -> None: super().__init__(context) #: Dictionary of comments found in the CST. Keys are line numbers, #: values are comment nodes. self.comments: Dict[int, cst.Comment] = {} self._comment_matcher: Pattern[str] = re.compile(comment_regex) def visit_EmptyLine(self, node: cst.EmptyLine) -> bool: if node.comment is not None: self.handle_comment(node) return False def visit_TrailingWhitespace(self, node: cst.TrailingWhitespace) -> bool: if node.comment is not None: self.handle_comment(node) return False def handle_comment( self, node: Union[cst.EmptyLine, cst.TrailingWhitespace] ) -> None: comment = node.comment assert comment is not None # ensured by callsites above if not self._comment_matcher.match(comment.value): return line = self.get_metadata(PositionProvider, comment).start.line if isinstance(node, cst.EmptyLine): # Standalone comments refer to the next line line += 1 self.comments[line] = comment LibCST-1.2.0/libcst/codemod/visitors/_gather_exports.py000066400000000000000000000133121456464173300231320ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # from typing import Set, Union import libcst as cst import libcst.matchers as m from libcst.codemod._context import CodemodContext from libcst.codemod._visitor import ContextAwareVisitor from libcst.helpers import get_full_name_for_node class GatherExportsVisitor(ContextAwareVisitor): """ Gathers all explicit exports in a module and stores them as attributes on the instance. Intended to be instantiated and passed to a :class:`~libcst.Module` :meth:`~libcst.CSTNode.visit` method in order to gather up information about exports specified in an ``__all__`` variable inside a module. After visiting a module the following attributes will be populated: explicit_exported_objects A sequence of strings representing objects that the module exports directly. Note that when ``__all__`` is absent, this attribute does not store default exported objects by name. For more information on ``__all__``, please see Python's `Modules Documentation `_. """ def __init__(self, context: CodemodContext) -> None: super().__init__(context) # Track any re-exported objects in an __all__ reference and whether # they're defined or not self.explicit_exported_objects: Set[str] = set() # Presumably at some point in the future it would be useful to grab # a list of all implicitly exported objects. That would go here as # well and would follow Python's rule for importing objects that # do not start with an underscore. Because of that, I named the above # `explicit_exported_objects` instead of just `exported_objects` so # that we have a reasonable place to put implicit objects in the future. # Internal bookkeeping self._is_assigned_export: Set[Union[cst.Tuple, cst.List, cst.Set]] = set() self._in_assigned_export: Set[Union[cst.Tuple, cst.List, cst.Set]] = set() def visit_AnnAssign(self, node: cst.AnnAssign) -> bool: value = node.value if value: if self._handle_assign_target(node.target, value): return True return False def visit_AugAssign(self, node: cst.AugAssign) -> bool: if m.matches( node, m.AugAssign( target=m.Name("__all__"), operator=m.AddAssign(), value=m.List() | m.Tuple(), ), ): value = node.value if isinstance(value, (cst.List, cst.Tuple)): self._is_assigned_export.add(value) return True return False def visit_Assign(self, node: cst.Assign) -> bool: for target_node in node.targets: if self._handle_assign_target(target_node.target, node.value): return True return False def _handle_assign_target( self, target: cst.BaseExpression, value: cst.BaseExpression ) -> bool: target_name = get_full_name_for_node(target) if target_name == "__all__": # Assignments such as `__all__ = ["os"]` # or `__all__ = exports = ["os"]` if isinstance(value, (cst.List, cst.Tuple, cst.Set)): self._is_assigned_export.add(value) return True elif isinstance(target, cst.Tuple) and isinstance(value, cst.Tuple): # Assignments such as `__all__, x = ["os"], []` for element_idx, element_node in enumerate(target.elements): element_name = get_full_name_for_node(element_node.value) if element_name == "__all__": element_value = value.elements[element_idx].value if isinstance(element_value, (cst.List, cst.Tuple, cst.Set)): self._is_assigned_export.add(value) self._is_assigned_export.add(element_value) return True return False def visit_List(self, node: cst.List) -> bool: if node in self._is_assigned_export: self._in_assigned_export.add(node) return True return False def leave_List(self, original_node: cst.List) -> None: self._is_assigned_export.discard(original_node) self._in_assigned_export.discard(original_node) def visit_Tuple(self, node: cst.Tuple) -> bool: if node in self._is_assigned_export: self._in_assigned_export.add(node) return True return False def leave_Tuple(self, original_node: cst.Tuple) -> None: self._is_assigned_export.discard(original_node) self._in_assigned_export.discard(original_node) def visit_Set(self, node: cst.Set) -> bool: if node in self._is_assigned_export: self._in_assigned_export.add(node) return True return False def leave_Set(self, original_node: cst.Set) -> None: self._is_assigned_export.discard(original_node) self._in_assigned_export.discard(original_node) def visit_SimpleString(self, node: cst.SimpleString) -> bool: self._handle_string_export(node) return False def visit_ConcatenatedString(self, node: cst.ConcatenatedString) -> bool: self._handle_string_export(node) return False def _handle_string_export( self, node: Union[cst.SimpleString, cst.ConcatenatedString] ) -> None: if self._in_assigned_export: name = node.evaluated_value if not isinstance(name, str): return self.explicit_exported_objects.add(name) LibCST-1.2.0/libcst/codemod/visitors/_gather_global_names.py000066400000000000000000000054241456464173300240560ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Set import libcst from libcst.codemod._context import CodemodContext from libcst.codemod._visitor import ContextAwareVisitor class GatherGlobalNamesVisitor(ContextAwareVisitor): """ Gathers all globally accessible names defined in a module and stores them as attributes on the instance. Intended to be instantiated and passed to a :class:`~libcst.Module` :meth:`~libcst.CSTNode.visit` method in order to gather up information about names defined on a module. Note that this is not a substitute for scope analysis or qualified name support. Please see :ref:`libcst-scope-tutorial` for a more robust way of determining the qualified name and definition for an arbitrary node. Names that are globally accessible through imports are currently not included but can be retrieved with GatherImportsVisitor. After visiting a module the following attributes will be populated: global_names A sequence of strings representing global variables defined in the module toplevel. class_names A sequence of strings representing classes defined in the module toplevel. function_names A sequence of strings representing functions defined in the module toplevel. """ def __init__(self, context: CodemodContext) -> None: super().__init__(context) self.global_names: Set[str] = set() self.class_names: Set[str] = set() self.function_names: Set[str] = set() # Track scope nesting self.scope_depth: int = 0 def visit_ClassDef(self, node: libcst.ClassDef) -> None: if self.scope_depth == 0: self.class_names.add(node.name.value) self.scope_depth += 1 def leave_ClassDef(self, original_node: libcst.ClassDef) -> None: self.scope_depth -= 1 def visit_FunctionDef(self, node: libcst.FunctionDef) -> None: if self.scope_depth == 0: self.function_names.add(node.name.value) self.scope_depth += 1 def leave_FunctionDef(self, original_node: libcst.FunctionDef) -> None: self.scope_depth -= 1 def visit_Assign(self, node: libcst.Assign) -> None: if self.scope_depth != 0: return for assign_target in node.targets: target = assign_target.target if isinstance(target, libcst.Name): self.global_names.add(target.value) def visit_AnnAssign(self, node: libcst.AnnAssign) -> None: if self.scope_depth != 0: return target = node.target if isinstance(target, libcst.Name): self.global_names.add(target.value) LibCST-1.2.0/libcst/codemod/visitors/_gather_imports.py000066400000000000000000000145751456464173300231370ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # from typing import Dict, List, Sequence, Set, Tuple, Union import libcst from libcst.codemod._context import CodemodContext from libcst.codemod._visitor import ContextAwareVisitor from libcst.codemod.visitors._imports import ImportItem from libcst.helpers import get_absolute_module_from_package_for_import class _GatherImportsMixin(ContextAwareVisitor): """ A Mixin class for tracking visited imports. """ def __init__(self, context: CodemodContext) -> None: super().__init__(context) # Track the available imports in this transform self.module_imports: Set[str] = set() self.object_mapping: Dict[str, Set[str]] = {} # Track the aliased imports in this transform self.module_aliases: Dict[str, str] = {} self.alias_mapping: Dict[str, List[Tuple[str, str]]] = {} # Track the import for every symbol introduced into the module self.symbol_mapping: Dict[str, ImportItem] = {} def _handle_Import(self, node: libcst.Import) -> None: for name in node.names: alias = name.evaluated_alias imp = ImportItem(name.evaluated_name, alias=alias) if alias is not None: # Track this as an aliased module self.module_aliases[name.evaluated_name] = alias self.symbol_mapping[alias] = imp else: # Get the module we're importing as a string. self.module_imports.add(name.evaluated_name) self.symbol_mapping[name.evaluated_name] = imp def _handle_ImportFrom(self, node: libcst.ImportFrom) -> None: # Get the module we're importing as a string. module = get_absolute_module_from_package_for_import( self.context.full_package_name, node ) if module is None: # Can't get the absolute import from relative, so we can't # support this. return nodenames = node.names if isinstance(nodenames, libcst.ImportStar): # We cover everything, no need to bother tracking other things self.object_mapping[module] = set("*") return elif isinstance(nodenames, Sequence): # Get the list of imports we're aliasing in this import new_aliases = [ (ia.evaluated_name, ia.evaluated_alias) for ia in nodenames if ia.asname is not None ] if new_aliases: if module not in self.alias_mapping: self.alias_mapping[module] = [] # pyre-ignore We know that aliases are not None here. self.alias_mapping[module].extend(new_aliases) # Get the list of imports we're importing in this import new_objects = {ia.evaluated_name for ia in nodenames if ia.asname is None} if new_objects: if module not in self.object_mapping: self.object_mapping[module] = set() # Make sure that we don't add to a '*' module if "*" in self.object_mapping[module]: self.object_mapping[module] = set("*") return self.object_mapping[module].update(new_objects) for ia in nodenames: imp = ImportItem( module, obj_name=ia.evaluated_name, alias=ia.evaluated_alias ) key = ia.evaluated_alias or ia.evaluated_name self.symbol_mapping[key] = imp class GatherImportsVisitor(_GatherImportsMixin): """ Gathers all imports in a module and stores them as attributes on the instance. Intended to be instantiated and passed to a :class:`~libcst.Module` :meth:`~libcst.CSTNode.visit` method in order to gather up information about imports on a module. Note that this is not a substitute for scope analysis or qualified name support. Please see :ref:`libcst-scope-tutorial` for a more robust way of determining the qualified name and definition for an arbitrary node. After visiting a module the following attributes will be populated: module_imports A sequence of strings representing modules that were imported directly, such as in the case of ``import typing``. Each module directly imported but not aliased will be included here. object_mapping A mapping of strings to sequences of strings representing modules where we imported objects from, such as in the case of ``from typing import Optional``. Each from import that was not aliased will be included here, where the keys of the mapping are the module we are importing from, and the value is a sequence of objects we are importing from the module. module_aliases A mapping of strings representing modules that were imported and aliased, such as in the case of ``import typing as t``. Each module imported this way will be represented as a key in this mapping, and the value will be the local alias of the module. alias_mapping A mapping of strings to sequences of tuples representing modules where we imported objects from and aliased using ``as`` syntax, such as in the case of ``from typing import Optional as opt``. Each from import that was aliased will be included here, where the keys of the mapping are the module we are importing from, and the value is a tuple representing the original object name and the alias. all_imports A collection of all :class:`~libcst.Import` and :class:`~libcst.ImportFrom` statements that were encountered in the module. """ def __init__(self, context: CodemodContext) -> None: super().__init__(context) # Track all of the imports found in this transform self.all_imports: List[Union[libcst.Import, libcst.ImportFrom]] = [] def visit_Import(self, node: libcst.Import) -> None: # Track this import statement for later analysis. self.all_imports.append(node) self._handle_Import(node) def visit_ImportFrom(self, node: libcst.ImportFrom) -> None: # Track this import statement for later analysis. self.all_imports.append(node) self._handle_ImportFrom(node) LibCST-1.2.0/libcst/codemod/visitors/_gather_string_annotation_names.py000066400000000000000000000067661456464173300263700ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import cast, Collection, List, Set, Union import libcst as cst import libcst.matchers as m from libcst.codemod._context import CodemodContext from libcst.codemod._visitor import ContextAwareVisitor from libcst.metadata import MetadataWrapper, QualifiedNameProvider FUNCS_CONSIDERED_AS_STRING_ANNOTATIONS = {"typing.TypeVar"} class GatherNamesFromStringAnnotationsVisitor(ContextAwareVisitor): """ Collects all names from string literals used for typing purposes. This includes annotations like ``foo: "SomeType"``, and parameters to special functions related to typing (currently only `typing.TypeVar`). After visiting, a set of all found names will be available on the ``names`` attribute of this visitor. """ METADATA_DEPENDENCIES = (QualifiedNameProvider,) def __init__( self, context: CodemodContext, typing_functions: Collection[str] = FUNCS_CONSIDERED_AS_STRING_ANNOTATIONS, ) -> None: super().__init__(context) self._typing_functions: Collection[str] = typing_functions self._annotation_stack: List[cst.CSTNode] = [] #: The set of names collected from string literals. self.names: Set[str] = set() def visit_Annotation(self, node: cst.Annotation) -> bool: self._annotation_stack.append(node) return True def leave_Annotation(self, original_node: cst.Annotation) -> None: self._annotation_stack.pop() def visit_Call(self, node: cst.Call) -> bool: qnames = self.get_metadata(QualifiedNameProvider, node) if any(qn.name in self._typing_functions for qn in qnames): self._annotation_stack.append(node) return True return False def leave_Call(self, original_node: cst.Call) -> None: if self._annotation_stack and self._annotation_stack[-1] == original_node: self._annotation_stack.pop() def visit_ConcatenatedString(self, node: cst.ConcatenatedString) -> bool: if self._annotation_stack: self.handle_any_string(node) return False def visit_SimpleString(self, node: cst.SimpleString) -> bool: if self._annotation_stack: self.handle_any_string(node) return False def handle_any_string( self, node: Union[cst.SimpleString, cst.ConcatenatedString] ) -> None: value = node.evaluated_value if value is None: return mod = cst.parse_module(value) extracted_nodes = m.extractall( mod, m.Name( value=m.SaveMatchedNode(m.DoNotCare(), "name"), metadata=m.MatchMetadataIfTrue( cst.metadata.ParentNodeProvider, lambda parent: not isinstance(parent, cst.Attribute), ), ) | m.SaveMatchedNode(m.Attribute(), "attribute"), metadata_resolver=MetadataWrapper(mod, unsafe_skip_copy=True), ) names = { cast(str, values["name"]) for values in extracted_nodes if "name" in values } | { name for values in extracted_nodes if "attribute" in values for name, _ in cst.metadata.scope_provider._gen_dotted_names( cast(cst.Attribute, values["attribute"]) ) } self.names.update(names) LibCST-1.2.0/libcst/codemod/visitors/_gather_unused_imports.py000066400000000000000000000130101456464173300245010ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # from typing import Collection, Iterable, Set, Tuple, Union import libcst as cst from libcst.codemod._context import CodemodContext from libcst.codemod._visitor import ContextAwareVisitor from libcst.codemod.visitors._gather_exports import GatherExportsVisitor from libcst.codemod.visitors._gather_string_annotation_names import ( FUNCS_CONSIDERED_AS_STRING_ANNOTATIONS, GatherNamesFromStringAnnotationsVisitor, ) from libcst.metadata import ProviderT, ScopeProvider from libcst.metadata.scope_provider import _gen_dotted_names MODULES_IGNORED_BY_DEFAULT = {"__future__"} class GatherUnusedImportsVisitor(ContextAwareVisitor): """ Collects all imports from a module not directly used in the same module. Intended to be instantiated and passed to a :class:`libcst.Module` :meth:`~libcst.CSTNode.visit` method to process the full module. Note that imports that are only used indirectly (from other modules) are still collected. After visiting a module the attribute ``unused_imports`` will contain a set of unused :class:`~libcst.ImportAlias` objects, paired with their parent import node. """ # pyre-fixme[8]: Attribute has type # `Tuple[typing.Type[cst.metadata.base_provider.BaseMetadataProvider[object]]]`; # used as `Tuple[typing.Type[cst.metadata.name_provider.QualifiedNameProvider], # typing.Type[cst.metadata.scope_provider.ScopeProvider]]`. METADATA_DEPENDENCIES: Tuple[ProviderT] = ( *GatherNamesFromStringAnnotationsVisitor.METADATA_DEPENDENCIES, ScopeProvider, ) def __init__( self, context: CodemodContext, ignored_modules: Collection[str] = MODULES_IGNORED_BY_DEFAULT, typing_functions: Collection[str] = FUNCS_CONSIDERED_AS_STRING_ANNOTATIONS, ) -> None: super().__init__(context) self._ignored_modules: Collection[str] = ignored_modules self._typing_functions = typing_functions self._string_annotation_names: Set[str] = set() self._exported_names: Set[str] = set() #: Contains a set of (alias, parent_import) pairs that are not used #: in the module after visiting. self.unused_imports: Set[ Tuple[cst.ImportAlias, Union[cst.Import, cst.ImportFrom]] ] = set() def visit_Module(self, node: cst.Module) -> bool: export_collector = GatherExportsVisitor(self.context) node.visit(export_collector) self._exported_names = export_collector.explicit_exported_objects annotation_visitor = GatherNamesFromStringAnnotationsVisitor( self.context, typing_functions=self._typing_functions ) node.visit(annotation_visitor) self._string_annotation_names = annotation_visitor.names return True def visit_Import(self, node: cst.Import) -> bool: self.handle_import(node) return False def visit_ImportFrom(self, node: cst.ImportFrom) -> bool: module = node.module if ( not isinstance(node.names, cst.ImportStar) and module is not None and module.value not in self._ignored_modules ): self.handle_import(node) return False def handle_import(self, node: Union[cst.Import, cst.ImportFrom]) -> None: names = node.names assert not isinstance(names, cst.ImportStar) # hello, type checker for alias in names: self.unused_imports.add((alias, node)) def leave_Module(self, original_node: cst.Module) -> None: self.unused_imports = self.filter_unused_imports(self.unused_imports) def filter_unused_imports( self, candidates: Iterable[Tuple[cst.ImportAlias, Union[cst.Import, cst.ImportFrom]]], ) -> Set[Tuple[cst.ImportAlias, Union[cst.Import, cst.ImportFrom]]]: """ Return the imports in ``candidates`` which are not used. This function implements the main logic of this visitor, and is called after traversal. It calls :meth:`~is_in_use` on each import. Override this in a subclass for additional filtering. """ unused_imports = set() for alias, parent in candidates: scope = self.get_metadata(ScopeProvider, parent) if scope is None: continue if not self.is_in_use(scope, alias): unused_imports.add((alias, parent)) return unused_imports def is_in_use(self, scope: cst.metadata.Scope, alias: cst.ImportAlias) -> bool: """ Check if ``alias`` is in use in the given ``scope``. An alias is in use if it's directly referenced, exported, or appears in a string type annotation. Override this in a subclass for additional filtering. """ asname = alias.asname names = _gen_dotted_names( cst.ensure_type(asname.name, cst.Name) if asname is not None else alias.name ) for name_or_alias, _ in names: if ( name_or_alias in self._exported_names or name_or_alias in self._string_annotation_names ): return True for assignment in scope[name_or_alias]: if ( isinstance(assignment, cst.metadata.ImportAssignment) and len(assignment.references) > 0 ): return True return False LibCST-1.2.0/libcst/codemod/visitors/_imports.py000066400000000000000000000031361456464173300215740ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from dataclasses import dataclass, replace from typing import Optional from libcst.helpers import get_absolute_module_from_package @dataclass(frozen=True) class ImportItem: """Representation of individual import items for codemods.""" module_name: str obj_name: Optional[str] = None alias: Optional[str] = None relative: int = 0 def __post_init__(self) -> None: if self.module_name is None: object.__setattr__(self, "module_name", "") elif self.module_name.startswith("."): mod = self.module_name.lstrip(".") rel = self.relative + len(self.module_name) - len(mod) object.__setattr__(self, "module_name", mod) object.__setattr__(self, "relative", rel) @property def module(self) -> str: return "." * self.relative + self.module_name def resolve_relative(self, package_name: Optional[str]) -> "ImportItem": """Return an ImportItem with an absolute module name if possible.""" mod = self # `import ..a` -> `from .. import a` if mod.relative and mod.obj_name is None: mod = replace(mod, module_name="", obj_name=mod.module_name) if package_name is None: return mod m = get_absolute_module_from_package( package_name, mod.module_name or None, self.relative ) return mod if m is None else replace(mod, module_name=m, relative=0) LibCST-1.2.0/libcst/codemod/visitors/_remove_imports.py000066400000000000000000000500161456464173300231500ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # from typing import Any, Dict, Iterable, List, Optional, Sequence, Set, Tuple, Union import libcst as cst from libcst.codemod._context import CodemodContext from libcst.codemod._visitor import ContextAwareTransformer, ContextAwareVisitor from libcst.codemod.visitors._gather_unused_imports import GatherUnusedImportsVisitor from libcst.helpers import ( get_absolute_module_from_package_for_import, get_full_name_for_node, ) from libcst.metadata import Assignment, ProviderT, ScopeProvider class RemovedNodeVisitor(ContextAwareVisitor): def _remove_imports_from_import_stmt( self, local_name: str, import_node: cst.Import ) -> None: for import_alias in import_node.names: if import_alias.evaluated_alias is None: prefix = import_alias.evaluated_name else: prefix = import_alias.evaluated_alias if local_name == prefix or local_name.startswith(f"{prefix}."): RemoveImportsVisitor.remove_unused_import( self.context, import_alias.evaluated_name, asname=import_alias.evaluated_alias, ) def _remove_imports_from_importfrom_stmt( self, local_name: str, import_node: cst.ImportFrom ) -> None: names = import_node.names if isinstance(names, cst.ImportStar): # We don't handle removing this, so ignore it. return module_name = get_absolute_module_from_package_for_import( self.context.full_package_name, import_node ) if module_name is None: raise Exception("Cannot look up absolute module from relative import!") # We know any local names will refer to this as an alias if # there is one, and as the original name if there is not one for import_alias in names: if import_alias.evaluated_alias is None: prefix = import_alias.evaluated_name else: prefix = import_alias.evaluated_alias if local_name == prefix or local_name.startswith(f"{prefix}."): RemoveImportsVisitor.remove_unused_import( self.context, module_name, obj=import_alias.evaluated_name, asname=import_alias.evaluated_alias, ) def _visit_name_attr_alike(self, node: Union[cst.Name, cst.Attribute]) -> None: # Look up the local name of this node. local_name = get_full_name_for_node(node) if local_name is None: return # Look up the scope for this node, remove the import that caused it to exist. metadata_wrapper = self.context.wrapper if metadata_wrapper is None: raise Exception("Cannot look up import, metadata is not computed for node!") scope_provider = metadata_wrapper.resolve(ScopeProvider) try: scope = scope_provider[node] if scope is None: # This object has no scope, so we can't remove it. return except KeyError: # This object has no scope, so we can't remove it. return while True: for assignment in scope.assignments[node] or set(): # We only care about non-builtins. if isinstance(assignment, Assignment): import_node = assignment.node if isinstance(import_node, cst.Import): self._remove_imports_from_import_stmt(local_name, import_node) elif isinstance(import_node, cst.ImportFrom): self._remove_imports_from_importfrom_stmt( local_name, import_node ) if scope is scope.parent: break scope = scope.parent def visit_Name(self, node: cst.Name) -> None: self._visit_name_attr_alike(node) def visit_Attribute(self, node: cst.Attribute) -> None: self._visit_name_attr_alike(node) class RemoveImportsVisitor(ContextAwareTransformer): """ Attempt to remove given imports from a module, dependent on whether there are any uses of the imported objects. Given a :class:`~libcst.codemod.CodemodContext` and a sequence of tuples specifying a module to remove as a string. Optionally an object being imported from that module and optionally an alias assigned to that imported object, ensures that that import no longer exists as long as there are no remaining references. Note that static analysis is able to determine safely whether an import is still needed given a particular module, but it is currently unable to determine whether an imported object is re-exported and used inside another module unless that object appears in an ``__any__`` list. This is one of the transforms that is available automatically to you when running a codemod. To use it in this manner, import :class:`~libcst.codemod.visitors.RemoveImportsVisitor` and then call the static :meth:`~libcst.codemod.visitors.RemoveImportsVisitor.remove_unused_import` method, giving it the current context (found as ``self.context`` for all subclasses of :class:`~libcst.codemod.Codemod`), the module you wish to remove and optionally an object you wish to stop importing as well as an alias that the object is currently assigned to. For example:: RemoveImportsVisitor.remove_unused_import(self.context, "typing", "Optional") This will remove any ``from typing import Optional`` that exists in the module as long as there are no uses of ``Optional`` in that module. As another example:: RemoveImportsVisitor.remove_unused_import(self.context, "typing") This will remove any ``import typing`` that exists in the module, as long as there are no references to ``typing`` in that module, including references such as ``typing.Optional``. Additionally, :class:`~libcst.codemod.visitors.RemoveImportsVisitor` includes a convenience function :meth:`~libcst.codemod.visitors.RemoveImportsVisitor.remove_unused_import_by_node` which will attempt to schedule removal of all imports referenced in that node and its children. This is especially useful inside transforms when you are going to remove a node using :func:`~libcst.RemoveFromParent` to get rid of a node. For example:: def leave_AnnAssign( self, original_node: cst.AnnAssign, updated_node: cst.AnnAssign, ) -> cst.RemovalSentinel: # Remove all annotated assignment statements, clean up imports. RemoveImportsVisitor.remove_unused_import_by_node(self.context, original_node) return cst.RemovalFromParent() This will remove all annotated assignment statements from a module as well as clean up any imports that were only referenced in those assignments. Note that we pass the ``original_node`` to the helper function as it uses scope analysis under the hood which is only computed on the original tree. Note that this is a subclass of :class:`~libcst.CSTTransformer` so it is possible to instantiate it and pass it to a :class:`~libcst.Module` :meth:`~libcst.CSTNode.visit` method. However, it is far easier to use the automatic transform feature of :class:`~libcst.codemod.CodemodCommand` and schedule an import to be added by calling :meth:`~libcst.codemod.visitors.RemoveImportsVisitor.remove_unused_import` """ CONTEXT_KEY = "RemoveImportsVisitor" METADATA_DEPENDENCIES: Tuple[ProviderT] = ( *GatherUnusedImportsVisitor.METADATA_DEPENDENCIES, ) @staticmethod def _get_imports_from_context( context: CodemodContext, ) -> List[Tuple[str, Optional[str], Optional[str]]]: unused_imports = context.scratch.get(RemoveImportsVisitor.CONTEXT_KEY, []) if not isinstance(unused_imports, list): raise Exception("Logic error!") return unused_imports @staticmethod def remove_unused_import( context: CodemodContext, module: str, obj: Optional[str] = None, asname: Optional[str] = None, ) -> None: """ Schedule an import to be removed in a future invocation of this class by updating the ``context`` to include the ``module`` and optionally ``obj`` which is currently imported as well as optionally ``alias`` that the imported ``module`` or ``obj`` is aliased to. When subclassing from :class:`~libcst.codemod.CodemodCommand`, this will be performed for you after your transform finishes executing. If you are subclassing from a :class:`~libcst.codemod.Codemod` instead, you will need to call the :meth:`~libcst.codemod.Codemod.transform_module` method on the module under modification with an instance of this class after performing your transform. Note that if the particular ``module`` or ``obj`` you are requesting to remove is still in use somewhere in the current module at the time of executing :meth:`~libcst.codemod.Codemod.transform_module` on an instance of :class:`~libcst.codemod.visitors.AddImportsVisitor`, this will perform no action in order to avoid removing an in-use import. """ unused_imports = RemoveImportsVisitor._get_imports_from_context(context) unused_imports.append((module, obj, asname)) context.scratch[RemoveImportsVisitor.CONTEXT_KEY] = unused_imports @staticmethod def remove_unused_import_by_node( context: CodemodContext, node: cst.CSTNode ) -> None: """ Schedule any imports referenced by ``node`` or one of its children to be removed in a future invocation of this class by updating the ``context`` to include the ``module``, ``obj`` and ``alias`` for each import in question. When subclassing from :class:`~libcst.codemod.CodemodCommand`, this will be performed for you after your transform finishes executing. If you are subclassing from a :class:`~libcst.codemod.Codemod` instead, you will need to call the :meth:`~libcst.codemod.Codemod.transform_module` method on the module under modification with an instance of this class after performing your transform. Note that all imports that are referenced by this ``node`` or its children will only be removed if they are not in use at the time of exeucting :meth:`~libcst.codemod.Codemod.transform_module` on an instance of :class:`~libcst.codemod.visitors.AddImportsVisitor` in order to avoid removing an in-use import. """ # Special case both Import and ImportFrom so they can be # directly removed here. if isinstance(node, cst.Import): for import_alias in node.names: RemoveImportsVisitor.remove_unused_import( context, import_alias.evaluated_name, asname=import_alias.evaluated_alias, ) elif isinstance(node, cst.ImportFrom): names = node.names if isinstance(names, cst.ImportStar): # We don't handle removing this, so ignore it. return module_name = get_absolute_module_from_package_for_import( context.full_package_name, node ) if module_name is None: raise Exception("Cannot look up absolute module from relative import!") for import_alias in names: RemoveImportsVisitor.remove_unused_import( context, module_name, obj=import_alias.evaluated_name, asname=import_alias.evaluated_alias, ) else: # Look up all children that could have been imported. Any that # we find will be scheduled for removal. node.visit(RemovedNodeVisitor(context)) def __init__( self, context: CodemodContext, unused_imports: Sequence[Tuple[str, Optional[str], Optional[str]]] = (), ) -> None: # Allow for instantiation from either a context (used when multiple transforms # get chained) or from a direct instantiation. super().__init__(context) all_unused_imports: List[Tuple[str, Optional[str], Optional[str]]] = [ *RemoveImportsVisitor._get_imports_from_context(context), *unused_imports, ] self.unused_module_imports: Dict[str, Optional[str]] = { module: alias for module, obj, alias in all_unused_imports if obj is None } self.unused_obj_imports: Dict[str, Set[Tuple[str, Optional[str]]]] = {} for module, obj, alias in all_unused_imports: if obj is None: continue if module not in self.unused_obj_imports: self.unused_obj_imports[module] = set() self.unused_obj_imports[module].add((obj, alias)) self._unused_imports: Dict[ cst.ImportAlias, Union[cst.Import, cst.ImportFrom] ] = {} def visit_Module(self, node: cst.Module) -> None: visitor = GatherUnusedImportsVisitor(self.context) node.visit(visitor) self._unused_imports = {k: v for (k, v) in visitor.unused_imports} def leave_Import( self, original_node: cst.Import, updated_node: cst.Import ) -> Union[cst.Import, cst.RemovalSentinel]: names_to_keep = [] for import_alias in original_node.names: if import_alias.evaluated_name not in self.unused_module_imports: # This is a keeper since we aren't removing it names_to_keep.append(import_alias) continue if ( import_alias.evaluated_alias != self.unused_module_imports[import_alias.evaluated_name] ): # This is a keeper since the alias does not match # what we are looking for. names_to_keep.append(import_alias) continue # Now that we know we want to remove this module, figure out if # there are any live references to it. if import_alias not in self._unused_imports: names_to_keep.append(import_alias) continue # no changes if names_to_keep == original_node.names: return updated_node # Now, either remove this statement or remove the imports we are # deleting from this statement. if len(names_to_keep) == 0: return cst.RemoveFromParent() if names_to_keep[-1] != original_node.names[-1]: # Remove trailing comma in order to not mess up import statements. names_to_keep = [ *names_to_keep[:-1], names_to_keep[-1].with_changes(comma=cst.MaybeSentinel.DEFAULT), ] return updated_node.with_changes(names=names_to_keep) def _process_importfrom_aliases( self, updated_node: cst.ImportFrom, names: Iterable[cst.ImportAlias], module_name: str, ) -> Dict[str, Any]: updates = {} names_to_keep = [] objects_to_remove = self.unused_obj_imports[module_name] for import_alias in names: # Figure out if it is in our list of things to kill for name, alias in objects_to_remove: if ( name == import_alias.evaluated_name and alias == import_alias.evaluated_alias ): break else: # This is a keeper, we don't have it on our list. names_to_keep.append(import_alias) continue # Now that we know we want to remove this object, figure out if # there are any live references to it. if import_alias not in self._unused_imports: names_to_keep.append(import_alias) continue # We are about to remove `import_alias`. Check if there are any # trailing comments and reparent them to the previous import. # We only do this in case there's a trailing comma, otherwise the # entire import statement is going to be removed anyway. comma = import_alias.comma if isinstance(comma, cst.Comma): if len(names_to_keep) != 0: # there is a previous import alias prev = names_to_keep[-1] if isinstance(prev.comma, cst.Comma): prev = prev.with_deep_changes( prev.comma, whitespace_after=_merge_whitespace_after( prev.comma.whitespace_after, comma.whitespace_after, ), ) else: # The previous alias didn't have a trailing comma. This can # occur if the alias was generated, instead of being parsed # from source. prev = prev.with_changes(comma=comma) names_to_keep[-1] = prev else: # No previous import alias, need to attach comment to `ImportFrom`. # We can only do this if there was a leftparen on the import # statement. Otherwise there can't be any standalone comments # anyway, so it's fine to skip this logic. lpar = updated_node.lpar if isinstance(lpar, cst.LeftParen): updates["lpar"] = lpar.with_changes( whitespace_after=_merge_whitespace_after( lpar.whitespace_after, comma.whitespace_after, ) ) updates["names"] = names_to_keep return updates def leave_ImportFrom( self, original_node: cst.ImportFrom, updated_node: cst.ImportFrom ) -> Union[cst.ImportFrom, cst.RemovalSentinel]: names = original_node.names if isinstance(names, cst.ImportStar): # This is a star import, so we won't remove it. return updated_node # Make sure we actually know the absolute module. module_name = get_absolute_module_from_package_for_import( self.context.full_package_name, updated_node ) if module_name is None or module_name not in self.unused_obj_imports: # This node isn't on our list of todos, so let's bail. return updated_node updates = self._process_importfrom_aliases(updated_node, names, module_name) names_to_keep = updates["names"] # no changes if names_to_keep == names: return updated_node # Now, either remove this statement or remove the imports we are # deleting from this statement. if len(names_to_keep) == 0: return cst.RemoveFromParent() if names_to_keep[-1] != names[-1]: # Remove trailing comma in order to not mess up import statements. names_to_keep = [ *names_to_keep[:-1], names_to_keep[-1].with_changes(comma=cst.MaybeSentinel.DEFAULT), ] updates["names"] = names_to_keep return updated_node.with_changes(**updates) def _merge_whitespace_after( left: cst.BaseParenthesizableWhitespace, right: cst.BaseParenthesizableWhitespace ) -> cst.BaseParenthesizableWhitespace: if not isinstance(right, cst.ParenthesizedWhitespace): return left if not isinstance(left, cst.ParenthesizedWhitespace): return right return left.with_changes( empty_lines=tuple( line for line in right.empty_lines if line.comment is not None ), ) LibCST-1.2.0/libcst/codemod/visitors/tests/000077500000000000000000000000001456464173300205255ustar00rootroot00000000000000LibCST-1.2.0/libcst/codemod/visitors/tests/__init__.py000066400000000000000000000002651456464173300226410ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # LibCST-1.2.0/libcst/codemod/visitors/tests/test_add_imports.py000066400000000000000000000562451456464173300244570ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # from libcst.codemod import CodemodContext, CodemodTest from libcst.codemod.visitors import AddImportsVisitor, ImportItem class TestAddImportsCodemod(CodemodTest): TRANSFORM = AddImportsVisitor def test_noop(self) -> None: """ Should do nothing. """ before = """ def foo() -> None: pass def bar() -> int: return 5 """ after = """ def foo() -> None: pass def bar() -> int: return 5 """ self.assertCodemod(before, after, []) def test_add_module_simple(self) -> None: """ Should add module as an import. """ before = """ def foo() -> None: pass def bar() -> int: return 5 """ after = """ import a.b.c def foo() -> None: pass def bar() -> int: return 5 """ self.assertCodemod(before, after, [ImportItem("a.b.c", None, None)]) def test_dont_add_module_simple(self) -> None: """ Should not add module as an import since it exists """ before = """ import a.b.c def foo() -> None: pass def bar() -> int: return 5 """ after = """ import a.b.c def foo() -> None: pass def bar() -> int: return 5 """ self.assertCodemod(before, after, [ImportItem("a.b.c", None, None)]) def test_add_module_alias_simple(self) -> None: """ Should add module with alias as an import. """ before = """ def foo() -> None: pass def bar() -> int: return 5 """ after = """ import a.b.c as d def foo() -> None: pass def bar() -> int: return 5 """ self.assertCodemod(before, after, [ImportItem("a.b.c", None, "d")]) def test_dont_add_module_alias_simple(self) -> None: """ Should not add module with alias as an import since it exists """ before = """ import a.b.c as d def foo() -> None: pass def bar() -> int: return 5 """ after = """ import a.b.c as d def foo() -> None: pass def bar() -> int: return 5 """ self.assertCodemod(before, after, [ImportItem("a.b.c", None, "d")]) def test_add_module_complex(self) -> None: """ Should add some modules as an import. """ before = """ import argparse import sys def foo() -> None: pass def bar() -> int: return 5 """ after = """ import argparse import sys import a.b.c import defg.hi import jkl as h import i.j as k def foo() -> None: pass def bar() -> int: return 5 """ self.assertCodemod( before, after, [ ImportItem("a.b.c", None, None), ImportItem("defg.hi", None, None), ImportItem("argparse", None, None), ImportItem("jkl", None, "h"), ImportItem("i.j", None, "k"), ], ) def test_add_object_simple(self) -> None: """ Should add object as an import. """ before = """ def foo() -> None: pass def bar() -> int: return 5 """ after = """ from a.b.c import D def foo() -> None: pass def bar() -> int: return 5 """ self.assertCodemod(before, after, [ImportItem("a.b.c", "D", None)]) def test_add_object_alias_simple(self) -> None: """ Should add object with alias as an import. """ before = """ def foo() -> None: pass def bar() -> int: return 5 """ after = """ from a.b.c import D as E def foo() -> None: pass def bar() -> int: return 5 """ self.assertCodemod(before, after, [ImportItem("a.b.c", "D", "E")]) def test_add_future(self) -> None: """ Should add future import before any other imports. """ before = """ import unittest import abc def foo() -> None: pass def bar() -> int: return 5 """ after = """ from __future__ import dummy_feature import unittest import abc def foo() -> None: pass def bar() -> int: return 5 """ self.assertCodemod( before, after, [ImportItem("__future__", "dummy_feature", None)] ) def test_dont_add_object_simple(self) -> None: """ Should not add object as an import since it exists. """ before = """ from a.b.c import D def foo() -> None: pass def bar() -> int: return 5 """ after = """ from a.b.c import D def foo() -> None: pass def bar() -> int: return 5 """ self.assertCodemod(before, after, [ImportItem("a.b.c", "D", None)]) def test_dont_add_object_alias_simple(self) -> None: """ Should not add object as an import since it exists. """ before = """ from a.b.c import D as E def foo() -> None: pass def bar() -> int: return 5 """ after = """ from a.b.c import D as E def foo() -> None: pass def bar() -> int: return 5 """ self.assertCodemod(before, after, [ImportItem("a.b.c", "D", "E")]) def test_add_object_modify_simple(self) -> None: """ Should modify existing import to add new object """ before = """ from a.b.c import E, F def foo() -> None: pass def bar() -> int: return 5 """ after = """ from a.b.c import D, E, F def foo() -> None: pass def bar() -> int: return 5 """ self.assertCodemod(before, after, [ImportItem("a.b.c", "D", None)]) def test_add_object_alias_modify_simple(self) -> None: """ Should modify existing import with alias to add new object """ before = """ from a.b.c import E, F def foo() -> None: pass def bar() -> int: return 5 """ after = """ from a.b.c import D as _, E, F def foo() -> None: pass def bar() -> int: return 5 """ self.assertCodemod(before, after, [ImportItem("a.b.c", "D", "_")]) def test_add_object_modify_complex(self) -> None: """ Should modify existing import to add new object """ before = """ from a.b.c import E, F, G as H from d.e.f import Foo, Bar def foo() -> None: pass def bar() -> int: return 5 """ after = """ from a.b.c import D, E, F, G as H from d.e.f import Baz as Qux, Foo, Bar from g.h.i import V as W, X, Y, Z def foo() -> None: pass def bar() -> int: return 5 """ self.assertCodemod( before, after, [ ImportItem("a.b.c", "D", None), ImportItem("a.b.c", "F", None), ImportItem("a.b.c", "G", "H"), ImportItem("d.e.f", "Foo", None), ImportItem("g.h.i", "Z", None), ImportItem("g.h.i", "X", None), ImportItem("d.e.f", "Bar", None), ImportItem("d.e.f", "Baz", "Qux"), ImportItem("g.h.i", "Y", None), ImportItem("g.h.i", "V", "W"), ImportItem("a.b.c", "F", None), ], ) def test_add_and_modify_complex(self) -> None: """ Should correctly add both module and object imports """ before = """ import argparse import sys from a.b.c import E, F from d.e.f import Foo, Bar import bar as baz def foo() -> None: pass def bar() -> int: return 5 """ after = """ import argparse import sys from a.b.c import D, E, F from d.e.f import Foo, Bar import bar as baz import foo import qux as quux from g.h.i import X, Y, Z def foo() -> None: pass def bar() -> int: return 5 """ self.assertCodemod( before, after, [ ImportItem("a.b.c", "D", None), ImportItem("a.b.c", "F", None), ImportItem("d.e.f", "Foo", None), ImportItem("sys", None, None), ImportItem("g.h.i", "Z", None), ImportItem("g.h.i", "X", None), ImportItem("d.e.f", "Bar", None), ImportItem("g.h.i", "Y", None), ImportItem("foo", None, None), ImportItem("a.b.c", "F", None), ImportItem("bar", None, "baz"), ImportItem("qux", None, "quux"), ], ) def test_add_import_preserve_doctring_simple(self) -> None: """ Should preserve any doctring if adding to the beginning. """ before = """ # This is some docstring def foo() -> None: pass def bar() -> int: return 5 """ after = """ # This is some docstring from a.b.c import D def foo() -> None: pass def bar() -> int: return 5 """ self.assertCodemod(before, after, [ImportItem("a.b.c", "D", None)]) def test_add_import_preserve_doctring_multiples(self) -> None: """ Should preserve any doctring if adding to the beginning. """ before = """ # This is some docstring def foo() -> None: pass def bar() -> int: return 5 """ after = """ # This is some docstring import argparse from a.b.c import D def foo() -> None: pass def bar() -> int: return 5 """ self.assertCodemod( before, after, [ImportItem("a.b.c", "D", None), ImportItem("argparse", None, None)], ) def test_strict_module_no_imports(self) -> None: """ First added import in strict module should go after __strict__ flag. """ before = """ __strict__ = True class Foo: pass """ after = """ __strict__ = True import argparse class Foo: pass """ self.assertCodemod(before, after, [ImportItem("argparse", None, None)]) def test_strict_module_with_imports(self) -> None: """ First added import in strict module should go after __strict__ flag. """ before = """ __strict__ = True import unittest class Foo: pass """ after = """ __strict__ = True import unittest import argparse class Foo: pass """ self.assertCodemod(before, after, [ImportItem("argparse", None, None)]) def test_dont_add_relative_object_simple(self) -> None: """ Should not add object as an import since it exists. """ before = """ from .c import D def foo() -> None: pass def bar() -> int: return 5 """ after = """ from .c import D def foo() -> None: pass def bar() -> int: return 5 """ self.assertCodemod( before, after, [ImportItem("a.b.c", "D", None)], context_override=CodemodContext( full_module_name="a.b.foobar", full_package_name="a.b" ), ) def test_add_object_relative_modify_simple(self) -> None: """ Should modify existing import to add new object """ before = """ from .c import E, F def foo() -> None: pass def bar() -> int: return 5 """ after = """ from .c import D, E, F def foo() -> None: pass def bar() -> int: return 5 """ self.assertCodemod( before, after, [ImportItem("a.b.c", "D", None)], context_override=CodemodContext( full_module_name="a.b.foobar", full_package_name="a.b" ), ) def test_import_order(self) -> None: """ The imports should be in alphabetic order of added imports, added import alias, original imports. """ before = """ from a import b, e, h """ after = """ from a import c, f, d as x, g as y, b, e, h """ self.assertCodemod( before, after, [ ImportItem("a", "f", None), ImportItem("a", "g", "y"), ImportItem("a", "c", None), ImportItem("a", "d", "x"), ], context_override=CodemodContext( full_module_name="a.b.foobar", full_package_name="a.b" ), ) def test_add_explicit_relative(self) -> None: """ Should add a relative import from .. . """ before = """ def foo() -> None: pass def bar() -> int: return 5 """ after = """ from .. import a def foo() -> None: pass def bar() -> int: return 5 """ self.assertCodemod( before, after, [ImportItem("a", None, None, 2)], ) def test_add_explicit_relative_alias(self) -> None: """ Should add a relative import from .. . """ before = """ def foo() -> None: pass def bar() -> int: return 5 """ after = """ from .. import a as foo def foo() -> None: pass def bar() -> int: return 5 """ self.assertCodemod( before, after, [ImportItem("a", None, "foo", 2)], ) def test_add_explicit_relative_object_simple(self) -> None: """ Should add a relative import. """ before = """ def foo() -> None: pass def bar() -> int: return 5 """ after = """ from ..a import B def foo() -> None: pass def bar() -> int: return 5 """ self.assertCodemod( before, after, [ImportItem("a", "B", None, 2)], ) def test_dont_add_explicit_relative_object_simple(self) -> None: """ Should not add object as an import since it exists. """ before = """ from ..c import D def foo() -> None: pass def bar() -> int: return 5 """ after = """ from ..c import D def foo() -> None: pass def bar() -> int: return 5 """ self.assertCodemod( before, after, [ImportItem("c", "D", None, 2)], context_override=CodemodContext( full_module_name="a.b.foobar", full_package_name="a.b" ), ) def test_add_object_explicit_relative_modify_simple(self) -> None: """ Should modify existing import to add new object. """ before = """ from ..c import E, F def foo() -> None: pass def bar() -> int: return 5 """ after = """ from ..c import D, E, F def foo() -> None: pass def bar() -> int: return 5 """ self.assertCodemod( before, after, [ImportItem("c", "D", None, 2)], context_override=CodemodContext( full_module_name="a.b.foobar", full_package_name="a.b" ), ) def test_add_object_resolve_explicit_relative_modify_simple(self) -> None: """ Should merge a relative new module with an absolute existing one. """ before = """ from ..c import E, F def foo() -> None: pass def bar() -> int: return 5 """ after = """ from ..c import D, E, F def foo() -> None: pass def bar() -> int: return 5 """ self.assertCodemod( before, after, [ImportItem("c", "D", None, 2)], context_override=CodemodContext( full_module_name="a.b.foobar", full_package_name="a.b" ), ) def test_add_object_resolve_dotted_relative_modify_simple(self) -> None: """ Should merge a relative new module with an absolute existing one. """ before = """ from ..c import E, F def foo() -> None: pass def bar() -> int: return 5 """ after = """ from ..c import D, E, F def foo() -> None: pass def bar() -> int: return 5 """ self.assertCodemod( before, after, [ImportItem("..c", "D", None)], context_override=CodemodContext( full_module_name="a.b.foobar", full_package_name="a.b" ), ) def test_import_in_docstring_module(self) -> None: """ The import should be added after module docstring. """ before = """ '''Docstring.''' import typing """ after = """ '''Docstring.''' from __future__ import annotations import typing """ self.assertCodemod( before, after, [ImportItem("__future__", "annotations", None)], context_override=CodemodContext( full_module_name="a.b.foobar", full_package_name="a.b" ), ) def test_import_in_module_with_standalone_string_not_a_docstring( self, ) -> None: """ The import should be added after the __future__ imports. """ before = """ from __future__ import annotations from __future__ import division '''docstring.''' def func(): pass """ after = """ from __future__ import annotations from __future__ import division import typing '''docstring.''' def func(): pass """ self.assertCodemod( before, after, [ImportItem("typing", None, None)], context_override=CodemodContext( full_module_name="a.b.foobar", full_package_name="a.b" ), ) def test_add_at_first_block(self) -> None: """ Should add the import only at the end of the first import block. """ before = """ import a import b e() import c import d """ after = """ import a import b import e e() import c import d """ self.assertCodemod(before, after, [ImportItem("e", None, None)]) def test_add_no_import_block_before_statement(self) -> None: """ Should add the import before the call. """ before = """ '''docstring''' e() import a import b """ after = """ '''docstring''' import c e() import a import b """ self.assertCodemod(before, after, [ImportItem("c", None, None)]) def test_do_not_add_existing(self) -> None: """ Should not add the new object import at existing import since it's not at the top """ before = """ '''docstring''' e() import a import b from c import f """ after = """ '''docstring''' from c import e e() import a import b from c import f """ self.assertCodemod(before, after, [ImportItem("c", "e", None)]) def test_add_existing_at_top(self) -> None: """ Should add new import at exisitng from import at top """ before = """ '''docstring''' from c import d e() import a import b from c import f """ after = """ '''docstring''' from c import e, x, d e() import a import b from c import f """ self.assertCodemod( before, after, [ImportItem("c", "x", None), ImportItem("c", "e", None)] ) LibCST-1.2.0/libcst/codemod/visitors/tests/test_apply_type_annotations.py000066400000000000000000001547601456464173300267560ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # import sys import textwrap import unittest from typing import Type from libcst import parse_module from libcst.codemod import Codemod, CodemodContext, CodemodTest from libcst.codemod.visitors._apply_type_annotations import ( AnnotationCounts, ApplyTypeAnnotationsVisitor, ) from libcst.testing.utils import data_provider class TestApplyAnnotationsVisitor(CodemodTest): TRANSFORM: Type[Codemod] = ApplyTypeAnnotationsVisitor def run_simple_test_case( self, stub: str, before: str, after: str, ) -> None: context = CodemodContext() ApplyTypeAnnotationsVisitor.store_stub_in_context( context, parse_module(textwrap.dedent(stub.rstrip())) ) self.assertCodemod(before, after, context_override=context) def run_test_case_with_flags( self, stub: str, before: str, after: str, **kwargs: bool, ) -> None: context = CodemodContext() ApplyTypeAnnotationsVisitor.store_stub_in_context( context, parse_module(textwrap.dedent(stub.rstrip())) ) # Test setting the flag on the codemod instance. # pyre-fixme[6]: Expected `Optional[typing.Sequence[str]]` for 4th param but # got `Dict[str, bool]`. # pyre-fixme[6]: Expected `Optional[str]` for 4th param but got `Dict[str, # bool]`. # pyre-fixme[6]: Expected `bool` for 4th param but got `Dict[str, bool]`. self.assertCodemod(before, after, context_override=context, **kwargs) # Test setting the flag when storing the stub in the context. context = CodemodContext() ApplyTypeAnnotationsVisitor.store_stub_in_context( context, parse_module(textwrap.dedent(stub.rstrip())), **kwargs, ) self.assertCodemod(before, after, context_override=context) @data_provider( { "simple": ( """ bar: int = ... """, """ bar = foo() """, """ bar: int = foo() """, ), "simple_with_existing": ( """ bar: int = ... """, """ bar: str = foo() """, """ bar: str = foo() """, ), "with_separate_declaration": ( """ x: int = ... y: int = ... z: int = ... """, """ x = y = z = 1 """, """ x: int y: int z: int x = y = z = 1 """, ), "needs_added_import": ( """ FOO: a.b.Example = ... """, """ FOO = bar() """, """ from a.b import Example FOO: Example = bar() """, ), "with_generic": ( """ FOO: Union[a.b.Example, int] = ... """, """ FOO = bar() """, """ from a.b import Example FOO: Union[Example, int] = bar() """, ), "with_relative_imports": ( """ from .relative0 import T0 from ..relative1 import T1 from . import relative2 x0: typing.Optional[T0] x1: typing.Optional[T1] x2: typing.Optional[relative2.T2] """, """ x0 = None x1 = None x2 = None """, """ from ..relative1 import T1 from .relative0 import T0 from .relative2 import T2 from typing import Optional x0: Optional[T0] = None x1: Optional[T1] = None x2: Optional[T2] = None """, ), "splitting_multi_assigns": ( """ a: str = ... x: int = ... y: int = ... _: str = ... z: str = ... """, """ a = 'a' x, y = 1, 2 _, z = 'hello world'.split() """, """ x: int y: int z: str a: str = 'a' x, y = 1, 2 _, z = 'hello world'.split() """, ), } ) def test_annotate_globals(self, stub: str, before: str, after: str) -> None: self.run_simple_test_case(stub=stub, before=before, after=after) @data_provider( { "basic_return": ( """ def foo() -> int: ... """, """ def foo(): return 1 """, """ def foo() -> int: return 1 """, ), "return_with_existing_param": ( """ def foo(x: int) -> str: ... """, """ def foo(x: str): pass """, """ def foo(x: str) -> str: pass """, ), "param_with_existng_return": ( """ def foo(x: int) -> int: ... """, """ def foo(x) -> int: return x """, """ def foo(x: int) -> int: return x """, ), "return_and_params_general": ( """ def foo( b: str, c: int = ..., *, d: str = ..., e: int, f: int = ... ) -> int: ... """, """ def foo( b, c=5, *, d="a", e, f=10 ) -> int: return 1 """, """ def foo( b: str, c: int=5, *, d: str="a", e: int, f: int=10 ) -> int: return 1 """, ), "with_import__basic": ( """ def foo() -> bar.Baz: ... """, """ def foo(): return returns_baz() """, """ from bar import Baz def foo() -> Baz: return returns_baz() """, ), "with_import__unneeded_explicit": ( """ import bar def foo() -> bar.Baz: ... """, """ def foo(): return returns_baz() """, """ from bar import Baz def foo() -> Baz: return returns_baz() """, ), # Keep the existing `import A` instead of using `from A import B`. "with_import__preexisting": ( """ def foo() -> bar.Baz: ... """, """ import bar def foo(): return returns_baz() """, """ import bar def foo() -> bar.Baz: return returns_baz() """, ), "with_as_import": ( """ from bar import A as B def foo(x: B): ... """, """ def foo(x): pass """, """ from bar import A as B def foo(x: B): pass """, ), "with_conflicting_imported_symbols": ( """ import a.foo as bar from b.c import Baz as B import d def f(a: d.A, b: B) -> bar.B: ... """, """ def f(a, b): pass """, """ import a.foo as bar from b.c import Baz as B from d import A def f(a: A, b: B) -> bar.B: pass """, ), "with_conflicts_between_imported_and_existing_symbols": ( """ from a import A from b import B def f(x: A, y: B) -> None: ... """, """ from b import A, B def f(x, y): y = A(x) z = B(y) """, """ from b import A, B import a def f(x: a.A, y: B) -> None: y = A(x) z = B(y) """, ), "with_nested_import": ( """ def foo(x: django.http.response.HttpResponse) -> str: ... """, """ def foo(x) -> str: pass """, """ from django.http.response import HttpResponse def foo(x: HttpResponse) -> str: pass """, ), "no_override_existing": ( """ def foo(x: int = 1) -> List[str]: ... """, """ from typing import Iterable, Any def foo(x = 1) -> Iterable[Any]: return [''] """, """ from typing import Iterable, Any def foo(x: int = 1) -> Iterable[Any]: return [''] """, ), "with_typing_import__basic": ( """ from typing import List def foo() -> List[int]: ... """, """ def foo(): return [1] """, """ from typing import List def foo() -> List[int]: return [1] """, ), "with_typing_import__add_to_preexisting_line": ( """ from typing import List def foo() -> List[int]: ... """, """ from typing import Union def foo(): return [1] """, """ from typing import List, Union def foo() -> List[int]: return [1] """, ), "add_imports_for_nested_types": ( """ def foo(x: int) -> Optional[a.b.Example]: ... """, """ def foo(x: int): pass """, """ from a.b import Example def foo(x: int) -> Optional[Example]: pass """, ), "add_imports_for_generics": ( """ def foo(x: int) -> typing.Optional[Example]: ... """, """ def foo(x: int): pass """, """ from typing import Optional def foo(x: int) -> Optional[Example]: pass """, ), "add_imports_for_doubly_nested_types": ( """ def foo(x: int) -> List[Union[a.b.Example, str]]: ... """, """ def foo(x: int): return [barfoo(), ""] """, """ from a.b import Example def foo(x: int) -> List[Union[Example, str]]: return [barfoo(), ""] """, ), "deeply_nested_example_with_multiline_annotation": ( """ def foo(x: int) -> Union[ Coroutine[Any, Any, django.http.response.HttpResponse], str ]: ... """, """ def foo(x: int): pass """, """ from django.http.response import HttpResponse def foo(x: int) -> Union[ Coroutine[Any, Any, HttpResponse], str ]: pass """, ), "do_not_add_imports_inside_of_Type": ( """ from typing import Type def foo() -> Type[foo.A]: ... """, """ def foo(): class A: x = 1 return A """, """ from typing import Type def foo() -> Type[foo.A]: class A: x = 1 return A """, ), # The following two tests verify that we can annotate functions # with async and decorator information, regardless of whether this # is part of the stub file. "async_with_decorators__full_stub": ( """ @second_decorator @first_decorator(5) async def async_with_decorators(r: Request, b: bool) -> django.http.response.HttpResponse: ... """, """ @second_decorator @first_decorator(5) async def async_with_decorators(r, b): return respond(r, b) """, """ from django.http.response import HttpResponse @second_decorator @first_decorator(5) async def async_with_decorators(r: Request, b: bool) -> HttpResponse: return respond(r, b) """, ), "async_with_decorators__bare_stub": ( """ def async_with_decorators(r: Request, b: bool) -> django.http.response.HttpResponse: ... """, """ @second_decorator @first_decorator(5) async def async_with_decorators(r, b): return respond(r, b) """, """ from django.http.response import HttpResponse @second_decorator @first_decorator(5) async def async_with_decorators(r: Request, b: bool) -> HttpResponse: return respond(r, b) """, ), "with_variadic_arguments": ( """ def incomplete_stubs_with_stars( x: int, *args, **kwargs, ) -> None: ... """, """ def incomplete_stubs_with_stars( x, *args: P.args, **kwargs: P.kwargs, ): pass """, """ def incomplete_stubs_with_stars( x: int, *args: P.args, **kwargs: P.kwargs, ) -> None: pass """, ), # test cases named with the REQUIRES_PREEXISTING prefix are verifying # that certain special cases work if the stub and the existing code # happen to align well, but none of these cases are guaranteed to work # in general - for example duplicate type names will generally result in # incorrect codemod. "REQURIES_PREEXISTING_new_import_okay_if_existing_aliased": ( """ def foo() -> b.b.A: ... """, """ from c import A as B, bar def foo(): return bar() """, """ from c import A as B, bar from b.b import A def foo() -> A: return bar() """, ), "REQUIRES_PREEXISTING_fully_qualified_with_alias": ( """ def foo() -> db.Connection: ... """, """ import my.cool.db as db def foo(): return db.Connection() """, """ import my.cool.db as db def foo() -> db.Connection: return db.Connection() """, ), "REQURIRES_PREEXISTING_fully_qualified_typing": ( """ def foo() -> typing.Sequence[int]: ... """, """ import typing def foo(): return [] """, """ import typing def foo() -> typing.Sequence[int]: return [] """, ), } ) def test_annotate_simple_functions( self, stub: str, before: str, after: str ) -> None: self.run_simple_test_case(stub=stub, before=before, after=after) @data_provider( { "respect_default_values_1": ( """ class B: def foo(self, x: int = a.b.A.__add__(1), y=None) -> int: ... """, """ class B: def foo(self, x = A + 1, y = None) -> int: return x """, """ class B: def foo(self, x: int = A + 1, y = None) -> int: return x """, ), "respect_default_values_2": ( """ from typing import Optional class A: def foo(self, atticus, b: Optional[int] = None, c: bool = False): ... """, """ class A: def foo(self, atticus, b = None, c = False): ... """, """ from typing import Optional class A: def foo(self, atticus, b: Optional[int] = None, c: bool = False): ... """, ), } ) def test_annotate_classes(self, stub: str, before: str, after: str) -> None: self.run_simple_test_case(stub=stub, before=before, after=after) @data_provider( { "method_and_function_of_same_name": ( """ def foo() -> int: ... class A: def foo() -> str: ... """, """ def foo(): return 1 class A: def foo(): return '' """, """ def foo() -> int: return 1 class A: def foo() -> str: return '' """, ), "global_and_attribute_of_same_name": ( """ bar: int = ... class A: bar: str = ... """, """ bar = foo() class A: bar = foobar() """, """ bar: int = foo() class A: bar: str = foobar() """, ), "add_global_annotation_simple_case": ( """ a: Dict[str, int] = ... """, """ def foo() -> int: return 1 a = {} a['x'] = foo() """, """ def foo() -> int: return 1 a: Dict[str, int] = {} a['x'] = foo() """, ), "add_global_annotation_with_Type__no_added_import": ( """ from typing import Dict example: Dict[str, Type[foo.Example]] = ... """, """ from typing import Type def foo() -> Type[foo.Example]: class Example: pass return Example example = { "test": foo() } """, """ from typing import Dict, Type def foo() -> Type[foo.Example]: class Example: pass return Example example: Dict[str, Type[foo.Example]] = { "test": foo() } """, ), "tuple_assign__add_new_top_level_declarations": ( """ a: int = ... b: str = ... """, """ def foo() -> Tuple[int, str]: return (1, "") a, b = foo() """, """ a: int b: str def foo() -> Tuple[int, str]: return (1, "") a, b = foo() """, ), "list_assign__add_new_top_level_declarations": ( """ a: int = ... b: str = ... """, """ def foo() -> Tuple[int, str]: return (1, "") [a, b] = foo() """, """ a: int b: str def foo() -> Tuple[int, str]: return (1, "") [a, b] = foo() """, ), "tuples_with_subscripts__add_new_toplevel_declaration": ( """ a: int = ... """, """ from typing import Tuple def foo() -> Tuple[str, int]: return "", 1 b['z'], a = foo() """, """ from typing import Tuple a: int def foo() -> Tuple[str, int]: return "", 1 b['z'], a = foo() """, ), "handle_quoted_annotations": ( """ bar: "a.b.Example" def f(x: "typing.Union[int, str]") -> "typing.Union[int, str]": ... class A: def f(self: "A") -> "A": ... """, """ bar = Example() def f(x): return x class A: def f(self): return self """, """ bar: "a.b.Example" = Example() def f(x: "typing.Union[int, str]") -> "typing.Union[int, str]": return x class A: def f(self: "A") -> "A": return self """, ), } ) def test_annotate_mixed(self, stub: str, before: str, after: str) -> None: self.run_simple_test_case(stub=stub, before=before, after=after) @data_provider( { "insert_new_TypedDict_class_not_in_source_file": ( """ from mypy_extensions import TypedDict class MovieTypedDict(TypedDict): name: str year: int """, """ def foo() -> None: pass """, """ from mypy_extensions import TypedDict class MovieTypedDict(TypedDict): name: str year: int def foo() -> None: pass """, ), "insert_only_TypedDict_class_not_already_in_source": ( """ from mypy_extensions import TypedDict class MovieTypedDict(TypedDict): name: str year: int class ExistingMovieTypedDict(TypedDict): name: str year: int """, """ from mypy_extensions import TypedDict class ExistingMovieTypedDict(TypedDict): name: str year: int def foo() -> None: pass """, """ from mypy_extensions import TypedDict class MovieTypedDict(TypedDict): name: str year: int class ExistingMovieTypedDict(TypedDict): name: str year: int def foo() -> None: pass """, ), } ) def test_adding_typed_dicts(self, stub: str, before: str, after: str) -> None: self.run_simple_test_case(stub=stub, before=before, after=after) @data_provider( { "insert_new_TypeVar_not_in_source_file": ( """ from typing import Dict, TypeVar _KT = TypeVar('_KT') _VT = TypeVar('_VT') class UserDict(Dict[_KT, _VT]): def __init__(self, initialdata: Dict[_KT, _VT] = ...): ... """, """ class UserDict: def __init__(self, initialdata = None): pass """, """ from typing import Dict, TypeVar _KT = TypeVar('_KT') _VT = TypeVar('_VT') class UserDict: def __init__(self, initialdata: Dict[_KT, _VT] = None): pass """, ), "insert_only_used_TypeVar_not_already_in_source": ( """ from typing import Dict, TypeVar K = TypeVar('K') V = TypeVar('V') X = TypeVar('X') class UserDict(Dict[K, V]): def __init__(self, initialdata: Dict[K, V] = ...): ... """, """ from typing import TypeVar V = TypeVar('V') class UserDict: def __init__(self, initialdata = None): pass def f(x: V) -> V: pass """, """ from typing import Dict, TypeVar K = TypeVar('K') V = TypeVar('V') class UserDict: def __init__(self, initialdata: Dict[K, V] = None): pass def f(x: V) -> V: pass """, ), "insert_Generic_base_class": ( """ from typing import TypeVar T = TypeVar('T') X = TypeVar('X') class B(A, Generic[T]): def f(self, x: T) -> T: ... """, """ from typing import TypeVar V = TypeVar('V') def f(x: V) -> V: pass class A: pass class B(A): def f(self, x): pass """, """ from typing import TypeVar T = TypeVar('T') V = TypeVar('V') def f(x: V) -> V: pass class A: pass class B(A, Generic[T]): def f(self, x: T) -> T: pass """, ), } ) def test_adding_typevars(self, stub: str, before: str, after: str) -> None: self.run_simple_test_case(stub=stub, before=before, after=after) @data_provider( { "required_positional_only_args": ( """ def foo( a: int, /, b: str, c: int = ..., *, d: str = ..., e: int, f: int = ... ) -> int: ... """, """ def foo( a, /, b, c=5, *, d="a", e, f=10 ) -> int: return 1 """, """ def foo( a: int, /, b: str, c: int=5, *, d: str="a", e: int, f: int=10 ) -> int: return 1 """, ), "positional_only_arg_with_default_value": ( """ def foo( a: int, b: int = ..., /, c: int = ..., *, d: str = ..., e: int, f: int = ... ) -> int: ... """, """ def foo( a, b = 5, /, c = 10, *, d = "a", e, f = 20 ) -> int: return 1 """, """ def foo( a: int, b: int = 5, /, c: int = 10, *, d: str = "a", e: int, f: int = 20 ) -> int: return 1 """, ), } ) # pyre-fixme[56]: Pyre was not able to infer the type of argument # `sys.version_info < (3, 8)` to decorator factory `unittest.skipIf`. @unittest.skipIf(sys.version_info < (3, 8), "Unsupported Python version") def test_annotate_functions_py38(self, stub: str, before: str, after: str) -> None: self.run_simple_test_case(stub=stub, before=before, after=after) @data_provider( { "fully_annotated_with_different_stub": ( """ def f(a: bool, b: bool) -> str: ... """, """ def f(a: int, b: str) -> bool: return 'hello' """, """ def f(a: bool, b: bool) -> str: return 'hello' """, ), } ) def test_annotate_functions_with_existing_annotations( self, stub: str, before: str, after: str ) -> None: self.run_test_case_with_flags( stub=stub, before=before, after=after, overwrite_existing_annotations=True, ) @data_provider( { "pep_604": ( """ def f(a: int | str, b: int | list[int | list[int | str]]) -> str: ... """, """ def f(a, b): return 'hello' """, """ def f(a: int | str, b: int | list[int | list[int | str]]) -> str: return 'hello' """, ), "pep_604_import": ( """ from typing import Callable from collections.abc import Sequence def f(a: int | str, b: int | list[int | Callable[[str], Sequence]]) -> str: ... """, """ def f(a, b): return 'hello' """, """ from collections.abc import Sequence from typing import Callable def f(a: int | str, b: int | list[int | Callable[[str], Sequence]]) -> str: return 'hello' """, ), } ) def test_annotate_functions_pep_604( self, stub: str, before: str, after: str ) -> None: self.run_test_case_with_flags( stub=stub, before=before, after=after, overwrite_existing_annotations=True, ) @data_provider( { "import_inside_list": ( """ from typing import Callable from collections.abc import Sequence def f(a: Callable[[Sequence[int]], int], b: int) -> str: ... """, """ def f(a, b): return 'hello' """, """ from collections.abc import Sequence from typing import Callable def f(a: Callable[[Sequence[int]], int], b: int) -> str: return 'hello' """, ), } ) def test_annotate_function_nested_imports( self, stub: str, before: str, after: str ) -> None: self.run_test_case_with_flags( stub=stub, before=before, after=after, overwrite_existing_annotations=True, ) @data_provider( { "return_self": ( """ class Foo: def f(self) -> Foo: ... """, """ class Foo: def f(self): return self """, """ class Foo: def f(self) -> "Foo": return self """, ), "return_forward_reference": ( """ class Foo: def f(self) -> Bar: ... class Bar: ... """, """ class Foo: def f(self): return Bar() class Bar: pass """, """ class Foo: def f(self) -> "Bar": return Bar() class Bar: pass """, ), "return_backward_reference": ( """ class Bar: ... class Foo: def f(self) -> Bar: ... """, """ class Bar: pass class Foo: def f(self): return Bar() """, """ class Bar: pass class Foo: def f(self) -> Bar: return Bar() """, ), "return_undefined_name": ( """ class Foo: def f(self) -> Bar: ... """, """ class Foo: def f(self): return self """, """ class Foo: def f(self) -> Bar: return self """, ), "parameter_forward_reference": ( """ def f(input: Bar) -> None: ... class Bar: ... """, """ def f(input): pass class Bar: pass """, """ def f(input: "Bar") -> None: pass class Bar: pass """, ), } ) def test_annotate_with_forward_references( self, stub: str, before: str, after: str ) -> None: self.run_test_case_with_flags( stub=stub, before=before, after=after, overwrite_existing_annotations=True, ) @data_provider( { "fully_annotated_with_untyped_stub": ( """ def f(a, b): ... """, """ def f(a: bool, b: bool) -> str: return "hello" """, """ def f(a: bool, b: bool) -> str: return "hello" """, ), "params_annotated_with_return_from_stub": ( """ def f(a, b) -> str: ... """, """ def f(a: bool, b: bool): return "hello" """, """ def f(a: bool, b: bool) -> str: return "hello" """, ), "partially_annotated_params_with_partial_stub": ( """ def f(a, b: int): ... """, """ def f(a: bool, b) -> str: return "hello" """, """ def f(a: bool, b: int) -> str: return "hello" """, ), } ) def test_annotate_using_incomplete_stubs( self, stub: str, before: str, after: str ) -> None: """ Ensure that when the stubs are missing annotations where the existing code has them, we won't remove the existing annotations even when `overwrite_existing_annotations` is set to `True`. """ self.run_test_case_with_flags( stub=stub, before=before, after=after, overwrite_existing_annotations=True, ) @data_provider( { "basic_example_using_future_annotations": ( """ def f() -> bool: ... """, """ def f(): return True """, """ from __future__ import annotations def f() -> bool: return True """, ), "no_use_future_if_no_changes": ( """ def f() -> bool: ... """, """ def f() -> bool: return True """, """ def f() -> bool: return True """, ), } ) def test_use_future_annotations(self, stub: str, before: str, after: str) -> None: self.run_test_case_with_flags( stub=stub, before=before, after=after, use_future_annotations=True, ) @data_provider( { "mismatched_signature_posargs": ( """ def f(a: bool, b: bool) -> str: ... """, """ def f(a): return 'hello' """, """ def f(a): return 'hello' """, ), "mismatched_signature_annotation": ( """ def f(a: bool, b: bool) -> str: ... """, """ def f(a, b: int): return 'hello' """, """ def f(a: bool, b: int) -> str: return 'hello' """, ), "mismatched_posarg_names": ( """ def f(a: bool, b: bool) -> str: ... """, """ def f(x, y): return 'hello' """, """ def f(x, y): return 'hello' """, ), "mismatched_return_type": ( """ def f(a: bool, b: bool) -> int: ... """, """ def f(a, b) -> str: return 'hello' """, """ def f(a: bool, b: bool) -> str: return 'hello' """, ), "matched_signature": ( """ def f(a: bool, b: bool) -> str: ... """, """ def f(a: bool, b = False): return 'hello' """, """ def f(a: bool, b: bool = False) -> str: return 'hello' """, ), "matched_signature_with_permuted_kwargs": ( """ def f(*, a: bool, b: bool) -> str: ... """, """ def f(*, b: bool, a = False): return 'hello' """, """ def f(*, b: bool, a: bool = False) -> str: return 'hello' """, ), } ) def test_signature_matching(self, stub: str, before: str, after: str) -> None: self.run_test_case_with_flags( stub=stub, before=before, after=after, ) @data_provider( { "mismatched_posarg_names": ( """ def f(a: bool, b: bool) -> str: ... """, """ def f(x, y): return 'hello' """, """ def f(x: bool, y: bool) -> str: return 'hello' """, ), "mismatched_kwarg_names": ( """ def f(p: int, q: str, *, a: bool, b: bool) -> str: ... """, """ def f(p, q, *, x, y): return 'hello' """, """ def f(p, q, *, x, y): return 'hello' """, ), } ) def test_signature_matching_with_nonstrict_posargs( self, stub: str, before: str, after: str ) -> None: self.run_test_case_with_flags( stub=stub, before=before, after=after, strict_posargs_matching=False ) @data_provider( { "mismatched_signature_posargs": ( """ def f(a: bool, b: bool) -> str: ... """, """ def f(a): return 'hello' """, """ def f(a): return 'hello' """, ), "mismatched_signature_annotation": ( """ def f(a: bool, b: bool) -> str: ... """, """ def f(a, b: int): return 'hello' """, """ def f(a, b: int): return 'hello' """, ), "mismatched_posarg_names": ( """ def f(a: bool, b: bool) -> str: ... """, """ def f(x, y): return 'hello' """, """ def f(x, y): return 'hello' """, ), "mismatched_return_type": ( """ def f(a: bool, b: bool) -> int: ... """, """ def f(a, b) -> str: return 'hello' """, """ def f(a, b) -> str: return 'hello' """, ), "matched_signature": ( """ def f(a: bool, b: bool) -> str: ... """, """ def f(a: bool, b = False): return 'hello' """, """ def f(a: bool, b: bool = False) -> str: return 'hello' """, ), "matched_signature_with_permuted_kwargs": ( """ def f(*, a: bool, b: bool) -> str: ... """, """ def f(*, b: bool, a = False): return 'hello' """, """ def f(*, b: bool, a: bool = False) -> str: return 'hello' """, ), } ) def test_signature_matching_with_strict_annotation_matching( self, stub: str, before: str, after: str ) -> None: self.run_test_case_with_flags( stub=stub, before=before, after=after, strict_annotation_matching=True ) @data_provider( { "test_counting_parameters_and_returns": ( """ def f(counted: int, not_counted) -> Counted: ... def g(not_counted: int, counted: str) -> Counted: ... def h(counted: int) -> NotCounted: ... def not_in_module(x: int, y: int) -> str: ... """, """ def f(counted, not_counted): return Counted() def g(not_counted: int, counted): return Counted() def h(counted) -> NotCounted: return Counted() """, """ def f(counted: int, not_counted) -> Counted: return Counted() def g(not_counted: int, counted: str) -> Counted: return Counted() def h(counted: int) -> NotCounted: return Counted() """, AnnotationCounts( parameter_annotations=3, return_annotations=2, ), True, ), "test_counting_globals_classes_and_attributes": ( """ global0: int = ... global1: int class InModule: attr_will_be_found: int attr_will_not_be_found: int class NotInModule: attr: int """, """ global0 = 1 global1, global2 = (1, 1) class InModule: attr_will_be_found = 0 def __init__(self): self.attr_will_not_be_found = 1 """, """ global1: int class NotInModule: attr: int global0: int = 1 global1, global2 = (1, 1) class InModule: attr_will_be_found: int = 0 def __init__(self): self.attr_will_not_be_found = 1 """, AnnotationCounts( global_annotations=2, attribute_annotations=1, classes_added=1, ), True, ), "test_counting_no_changes": ( """ class C: attr_will_not_be_found: bar.X """, """ class C: def __init__(self): self.attr_will_not_be_found = None """, """ class C: def __init__(self): self.attr_will_not_be_found = None """, AnnotationCounts(), False, ), } ) def test_count_annotations( self, stub: str, before: str, after: str, annotation_counts: AnnotationCounts, any_changes_applied: bool, ) -> None: stub = self.make_fixture_data(stub) before = self.make_fixture_data(before) after = self.make_fixture_data(after) context = CodemodContext() ApplyTypeAnnotationsVisitor.store_stub_in_context( context=context, stub=parse_module(stub) ) visitor = ApplyTypeAnnotationsVisitor(context=context) output_code = visitor.transform_module(parse_module(before)).code self.assertEqual(after, output_code) self.assertEqual(str(annotation_counts), str(visitor.annotation_counts)) self.assertEqual( any_changes_applied, visitor.annotation_counts.any_changes_applied() ) @data_provider( { "always_qualify": ( """ from a import A import b def f(x: A, y: b.B) -> None: ... """, """ def f(x, y): pass """, """ import a import b def f(x: a.A, y: b.B) -> None: pass """, ), "never_qualify_typing": ( """ from a import A from b import B from typing import List def f(x: List[A], y: B[A]) -> None: ... """, """ def f(x, y): pass """, """ import a import b from typing import List def f(x: List[a.A], y: b.B[a.A]) -> None: pass """, ), "preserve_explicit_from_import": ( """ from a import A import b def f(x: A, y: b.B) -> None: ... """, """ from b import B def f(x, y): pass """, """ from b import B import a def f(x: a.A, y: B) -> None: pass """, ), } ) def test_signature_matching_with_always_qualify( self, stub: str, before: str, after: str ) -> None: self.run_test_case_with_flags( stub=stub, before=before, after=after, always_qualify_annotations=True ) @data_provider( { "attribute": ( """ class C: x: int """, """ class C: x = 0 C.x = 1 """, """ class C: x: int = 0 C.x = 1 """, ), "subscript": ( """ d: dict[str, int] """, """ d = {} d["k"] = 0 """, """ d: dict[str, int] = {} d["k"] = 0 """, ), "starred": ( """ a: int b: list[int] """, """ a, *b = [1, 2, 3] """, """ a: int b: list[int] a, *b = [1, 2, 3] """, ), "name": ( """ a: int """, """ a = 0 """, """ a: int = 0 """, ), "list": ( """ a: int """, """ [a] = [0] """, """ a: int [a] = [0] """, ), "tuple": ( """ a: int """, """ (a,) = [0] """, """ a: int (a,) = [0] """, ), } ) def test_valid_assign_expressions(self, stub: str, before: str, after: str) -> None: self.run_simple_test_case(stub=stub, before=before, after=after) @data_provider( { "toplevel": ( """ x: int """, """ x = 1 x = 2 """, """ x: int = 1 x = 2 """, ), "class": ( """ class A: x: int """, """ class A: x = 1 x = 2 """, """ class A: x: int = 1 x = 2 """, ), "mixed": ( """ x: int class A: x: int """, """ x = 1 class A: x = 1 x = 2 """, """ x: int = 1 class A: x: int = 1 x = 2 """, ), } ) def test_no_duplicate_annotations(self, stub: str, before: str, after: str) -> None: self.run_simple_test_case(stub=stub, before=before, after=after) LibCST-1.2.0/libcst/codemod/visitors/tests/test_gather_comments.py000066400000000000000000000031311456464173300253130ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # from libcst import Comment, MetadataWrapper, parse_module from libcst.codemod import CodemodContext, CodemodTest from libcst.codemod.visitors import GatherCommentsVisitor from libcst.testing.utils import UnitTest class TestGatherCommentsVisitor(UnitTest): def gather_comments(self, code: str) -> GatherCommentsVisitor: mod = MetadataWrapper(parse_module(CodemodTest.make_fixture_data(code))) mod.resolve_many(GatherCommentsVisitor.METADATA_DEPENDENCIES) instance = GatherCommentsVisitor( CodemodContext(wrapper=mod), r".*\Wnoqa(\W.*)?$" ) mod.visit(instance) return instance def test_no_comments(self) -> None: visitor = self.gather_comments( """ def foo() -> None: pass """ ) self.assertEqual(visitor.comments, {}) def test_noqa_comments(self) -> None: visitor = self.gather_comments( """ import a.b.c # noqa import d # somethingelse # noqa def foo() -> None: pass """ ) self.assertEqual(visitor.comments.keys(), {1, 4}) self.assertTrue(isinstance(visitor.comments[1], Comment)) self.assertEqual(visitor.comments[1].value, "# noqa") self.assertTrue(isinstance(visitor.comments[4], Comment)) self.assertEqual(visitor.comments[4].value, "# noqa") LibCST-1.2.0/libcst/codemod/visitors/tests/test_gather_exports.py000066400000000000000000000105011456464173300251710ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # from libcst import parse_module from libcst.codemod import CodemodContext, CodemodTest from libcst.codemod.visitors import GatherExportsVisitor from libcst.testing.utils import UnitTest class TestGatherExportsVisitor(UnitTest): def gather_exports(self, code: str) -> GatherExportsVisitor: transform_instance = GatherExportsVisitor(CodemodContext()) input_tree = parse_module(CodemodTest.make_fixture_data(code)) input_tree.visit(transform_instance) return transform_instance def test_gather_noop(self) -> None: code = """ from foo import bar from typing import List bar(["foo", "bar"]) list_of_str = ["foo", "bar", "baz"] set_of_str = {"foo", "bar", "baz"} tuple_of_str = ("foo", "bar", "baz") another: List[str] = ["foobar", "foobarbaz"] """ gatherer = self.gather_exports(code) self.assertEqual(gatherer.explicit_exported_objects, set()) def test_gather_exports_simple(self) -> None: code = """ from foo import bar from biz import baz __all__ = ["bar", "baz"] """ gatherer = self.gather_exports(code) self.assertEqual(gatherer.explicit_exported_objects, {"bar", "baz"}) def test_gather_exports_simple2(self) -> None: code = """ from foo import bar from biz import baz __all__ = ["bar"] __all__ += ["baz"] """ gatherer = self.gather_exports(code) self.assertEqual(gatherer.explicit_exported_objects, {"bar", "baz"}) def test_gather_exports_simple_set(self) -> None: code = """ from foo import bar from biz import baz __all__ = {"bar", "baz"} """ gatherer = self.gather_exports(code) self.assertEqual(gatherer.explicit_exported_objects, {"bar", "baz"}) def test_gather_exports_simple_tuple(self) -> None: code = """ from foo import bar from biz import baz __all__ = ("bar", "baz") """ gatherer = self.gather_exports(code) self.assertEqual(gatherer.explicit_exported_objects, {"bar", "baz"}) def test_gather_exports_simple_annotated(self) -> None: code = """ from foo import bar from biz import baz from typing import List __all__: List[str] = ["bar", "baz"] """ gatherer = self.gather_exports(code) self.assertEqual(gatherer.explicit_exported_objects, {"bar", "baz"}) def test_gather_exports_ignore_invalid_1(self) -> None: code = """ from foo import bar from biz import baz __all__ = [bar, baz] """ gatherer = self.gather_exports(code) self.assertEqual(gatherer.explicit_exported_objects, set()) def test_gather_exports_ignore_invalid_2(self) -> None: code = """ from foo import bar from biz import baz __all__ = ["bar", "baz", ["biz"]] """ gatherer = self.gather_exports(code) self.assertEqual(gatherer.explicit_exported_objects, {"bar", "baz"}) def test_gather_exports_ignore_valid_1(self) -> None: code = """ from foo import bar from biz import baz __all__ = ["bar", "b""a""z"] """ gatherer = self.gather_exports(code) self.assertEqual(gatherer.explicit_exported_objects, {"bar", "baz"}) def test_gather_exports_ignore_valid_2(self) -> None: code = """ from foo import bar from biz import baz __all__, _ = ["bar", "baz"], ["biz"] """ gatherer = self.gather_exports(code) self.assertEqual(gatherer.explicit_exported_objects, {"bar", "baz"}) def test_gather_exports_ignore_valid_3(self) -> None: code = """ from foo import bar from biz import baz __all__ = exported = ["bar", "baz"] """ gatherer = self.gather_exports(code) self.assertEqual(gatherer.explicit_exported_objects, {"bar", "baz"}) LibCST-1.2.0/libcst/codemod/visitors/tests/test_gather_global_names.py000066400000000000000000000035361456464173300261220ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # from libcst import parse_module from libcst.codemod import CodemodContext, CodemodTest from libcst.codemod.visitors import GatherGlobalNamesVisitor from libcst.testing.utils import UnitTest class TestGatherGlobalNamesVisitor(UnitTest): def gather_global_names(self, code: str) -> GatherGlobalNamesVisitor: transform_instance = GatherGlobalNamesVisitor( CodemodContext(full_module_name="a.b.foobar") ) input_tree = parse_module(CodemodTest.make_fixture_data(code)) input_tree.visit(transform_instance) return transform_instance def test_gather_nothing(self) -> None: code = """ from a import b b() """ gatherer = self.gather_global_names(code) self.assertEqual(gatherer.global_names, set()) self.assertEqual(gatherer.class_names, set()) self.assertEqual(gatherer.function_names, set()) def test_globals(self) -> None: code = """ x = 1 y = 2 def foo(): pass class Foo: pass """ gatherer = self.gather_global_names(code) self.assertEqual(gatherer.global_names, {"x", "y"}) self.assertEqual(gatherer.class_names, {"Foo"}) self.assertEqual(gatherer.function_names, {"foo"}) def test_omit_nested(self) -> None: code = """ def foo(): x = 1 class Foo: def method(self): pass """ gatherer = self.gather_global_names(code) self.assertEqual(gatherer.global_names, set()) self.assertEqual(gatherer.class_names, {"Foo"}) self.assertEqual(gatherer.function_names, {"foo"}) LibCST-1.2.0/libcst/codemod/visitors/tests/test_gather_imports.py000066400000000000000000000136171456464173300251750ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # from libcst import parse_module from libcst.codemod import CodemodContext, CodemodTest from libcst.codemod.visitors import GatherImportsVisitor from libcst.testing.utils import UnitTest class TestGatherImportsVisitor(UnitTest): def gather_imports(self, code: str) -> GatherImportsVisitor: transform_instance = GatherImportsVisitor( CodemodContext(full_module_name="a.b.foobar", full_package_name="a.b") ) input_tree = parse_module(CodemodTest.make_fixture_data(code)) input_tree.visit(transform_instance) return transform_instance def test_gather_nothing(self) -> None: code = """ def foo() -> None: pass def bar() -> int: return 5 """ gatherer = self.gather_imports(code) self.assertEqual(gatherer.module_imports, set()) self.assertEqual(gatherer.object_mapping, {}) self.assertEqual(gatherer.module_aliases, {}) self.assertEqual(gatherer.alias_mapping, {}) self.assertEqual(len(gatherer.all_imports), 0) def test_gather_module(self) -> None: code = """ import a.b.c import d def foo() -> None: pass def bar() -> int: return 5 """ gatherer = self.gather_imports(code) self.assertEqual(gatherer.module_imports, {"a.b.c", "d"}) self.assertEqual(gatherer.object_mapping, {}) self.assertEqual(gatherer.module_aliases, {}) self.assertEqual(gatherer.alias_mapping, {}) self.assertEqual(len(gatherer.all_imports), 2) def test_gather_aliased_module(self) -> None: code = """ import a.b.c as e import d as f def foo() -> None: pass def bar() -> int: return 5 """ gatherer = self.gather_imports(code) self.assertEqual(gatherer.module_imports, set()) self.assertEqual(gatherer.object_mapping, {}) self.assertEqual(gatherer.module_aliases, {"a.b.c": "e", "d": "f"}) self.assertEqual(gatherer.alias_mapping, {}) self.assertEqual(len(gatherer.all_imports), 2) def test_gather_object(self) -> None: code = """ from a.b.c import d, e, f def foo() -> None: pass def bar() -> int: return 5 """ gatherer = self.gather_imports(code) self.assertEqual(gatherer.module_imports, set()) self.assertEqual(gatherer.object_mapping, {"a.b.c": {"d", "e", "f"}}) self.assertEqual(gatherer.module_aliases, {}) self.assertEqual(gatherer.alias_mapping, {}) self.assertEqual(len(gatherer.all_imports), 1) def test_gather_object_disjoint(self) -> None: code = """ from a.b.c import d, e from a.b.c import f def foo() -> None: pass def bar() -> int: return 5 """ gatherer = self.gather_imports(code) self.assertEqual(gatherer.module_imports, set()) self.assertEqual(gatherer.object_mapping, {"a.b.c": {"d", "e", "f"}}) self.assertEqual(gatherer.module_aliases, {}) self.assertEqual(gatherer.alias_mapping, {}) self.assertEqual(len(gatherer.all_imports), 2) def test_gather_aliased_object(self) -> None: code = """ from a.b.c import d as e, f as g def foo() -> None: pass def bar() -> int: return 5 """ gatherer = self.gather_imports(code) self.assertEqual(gatherer.module_imports, set()) self.assertEqual(gatherer.object_mapping, {}) self.assertEqual(gatherer.module_aliases, {}) self.assertEqual(gatherer.alias_mapping, {"a.b.c": [("d", "e"), ("f", "g")]}) self.assertEqual(len(gatherer.all_imports), 1) def test_gather_aliased_object_disjoint(self) -> None: code = """ from a.b.c import d as e from a.b.c import f as g def foo() -> None: pass def bar() -> int: return 5 """ gatherer = self.gather_imports(code) self.assertEqual(gatherer.module_imports, set()) self.assertEqual(gatherer.object_mapping, {}) self.assertEqual(gatherer.module_aliases, {}) self.assertEqual(gatherer.alias_mapping, {"a.b.c": [("d", "e"), ("f", "g")]}) self.assertEqual(len(gatherer.all_imports), 2) def test_gather_aliased_object_mixed(self) -> None: code = """ from a.b.c import d as e, f, g def foo() -> None: pass def bar() -> int: return 5 """ gatherer = self.gather_imports(code) self.assertEqual(gatherer.module_imports, set()) self.assertEqual(gatherer.object_mapping, {"a.b.c": {"f", "g"}}) self.assertEqual(gatherer.module_aliases, {}) self.assertEqual(gatherer.alias_mapping, {"a.b.c": [("d", "e")]}) self.assertEqual(len(gatherer.all_imports), 1) def test_gather_relative_object(self) -> None: code = """ from .c import d as e, f, g from a.b.c import h, i, j def foo() -> None: pass def bar() -> int: return 5 """ gatherer = self.gather_imports(code) self.assertEqual(gatherer.module_imports, set()) self.assertEqual(gatherer.object_mapping, {"a.b.c": {"f", "g", "h", "i", "j"}}) self.assertEqual(gatherer.module_aliases, {}) self.assertEqual(gatherer.alias_mapping, {"a.b.c": [("d", "e")]}) self.assertEqual(len(gatherer.all_imports), 2) LibCST-1.2.0/libcst/codemod/visitors/tests/test_gather_string_annotation_names.py000066400000000000000000000052241456464173300304160ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # from libcst import MetadataWrapper, parse_module from libcst.codemod import CodemodContext, CodemodTest from libcst.codemod.visitors import GatherNamesFromStringAnnotationsVisitor from libcst.testing.utils import UnitTest class TestGatherNamesFromStringAnnotationsVisitor(UnitTest): def gather_names(self, code: str) -> GatherNamesFromStringAnnotationsVisitor: mod = MetadataWrapper(parse_module(CodemodTest.make_fixture_data(code))) mod.resolve_many(GatherNamesFromStringAnnotationsVisitor.METADATA_DEPENDENCIES) instance = GatherNamesFromStringAnnotationsVisitor(CodemodContext(wrapper=mod)) mod.visit(instance) return instance def test_no_annotations(self) -> None: visitor = self.gather_names( """ def foo() -> None: pass """ ) self.assertEqual(visitor.names, set()) def test_simple_string_annotations(self) -> None: visitor = self.gather_names( """ def foo() -> "None": pass """ ) self.assertEqual(visitor.names, {"None"}) def test_concatenated_string_annotations(self) -> None: visitor = self.gather_names( """ def foo() -> "No" "ne": pass """ ) self.assertEqual(visitor.names, {"None"}) def test_typevars(self) -> None: visitor = self.gather_names( """ from typing import TypeVar as SneakyBastard V = SneakyBastard("V", bound="int") """ ) self.assertEqual(visitor.names, {"V", "int"}) def test_complex(self) -> None: visitor = self.gather_names( """ from typing import TypeVar, TYPE_CHECKING if TYPE_CHECKING: from a import Container, Item def foo(a: "A") -> "Item": pass A = TypeVar("A", bound="Container[Item]") class X: var: "ThisIsExpensiveToImport" # noqa """ ) self.assertEqual( visitor.names, {"A", "Item", "Container", "ThisIsExpensiveToImport"} ) def test_dotted_names(self) -> None: visitor = self.gather_names( """ a: "api.http_exceptions.HttpException" """ ) self.assertEqual( visitor.names, {"api", "api.http_exceptions", "api.http_exceptions.HttpException"}, ) LibCST-1.2.0/libcst/codemod/visitors/tests/test_gather_unused_imports.py000066400000000000000000000067771456464173300265710ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # from typing import Set from libcst import MetadataWrapper, parse_module from libcst.codemod import CodemodContext, CodemodTest from libcst.codemod.visitors import GatherUnusedImportsVisitor from libcst.testing.utils import UnitTest class TestGatherUnusedImportsVisitor(UnitTest): def gather_imports(self, code: str) -> Set[str]: mod = MetadataWrapper(parse_module(CodemodTest.make_fixture_data(code))) mod.resolve_many(GatherUnusedImportsVisitor.METADATA_DEPENDENCIES) instance = GatherUnusedImportsVisitor(CodemodContext(wrapper=mod)) mod.visit(instance) return { alias.evaluated_alias or alias.evaluated_name for alias, _ in instance.unused_imports } def test_no_imports(self) -> None: imports = self.gather_imports( """ foo = 1 """ ) self.assertEqual(imports, set()) def test_dotted_imports(self) -> None: imports = self.gather_imports( """ import a.b.c, d import x.y a.b(d) """ ) self.assertEqual(imports, {"x.y"}) def test_alias(self) -> None: imports = self.gather_imports( """ from bar import baz as baz_alias import bar as bar_alias bar_alias() """ ) self.assertEqual(imports, {"baz_alias"}) def test_import_complex(self) -> None: imports = self.gather_imports( """ import bar import baz, qux import a.b import c.d import x.y.z import e.f as g import h.i as j def foo() -> None: c.d(qux) x.u j() """ ) self.assertEqual(imports, {"bar", "baz", "a.b", "g"}) def test_import_from_complex(self) -> None: imports = self.gather_imports( """ from bar import qux, quux from a.b import c from d.e import f from h.i import j as k from l.m import n as o from x import * def foo() -> None: f(qux) k() """ ) self.assertEqual(imports, {"quux", "c", "o"}) def test_exports(self) -> None: imports = self.gather_imports( """ import a __all__ = ["a"] """ ) self.assertEqual(imports, set()) def test_string_annotation(self) -> None: imports = self.gather_imports( """ from a import b from c import d import m, n.blah foo: "b[int]" bar: List["d"] quux: List["m.blah"] alma: List["n.blah"] """ ) self.assertEqual(imports, set()) def test_typevars(self) -> None: imports = self.gather_imports( """ from typing import TypeVar as Sneaky from a import b t = Sneaky("t", bound="b") """ ) self.assertEqual(imports, set()) def test_future(self) -> None: imports = self.gather_imports( """ from __future__ import cool_feature """ ) self.assertEqual(imports, set()) LibCST-1.2.0/libcst/codemod/visitors/tests/test_remove_imports.py000066400000000000000000000564101456464173300252160ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # import libcst as cst import libcst.matchers as m from libcst.codemod import CodemodContext, CodemodTest, VisitorBasedCodemodCommand from libcst.codemod.visitors import AddImportsVisitor, RemoveImportsVisitor from libcst.metadata import ( QualifiedName, QualifiedNameProvider, QualifiedNameSource, ScopeProvider, ) from libcst.testing.utils import data_provider class TestRemoveImportsCodemod(CodemodTest): TRANSFORM = RemoveImportsVisitor def test_noop(self) -> None: """ Should do nothing. """ before = """ def foo() -> None: pass """ after = """ def foo() -> None: pass """ self.assertCodemod(before, after, []) def test_remove_import_simple(self) -> None: """ Should remove module as import """ before = """ import bar import baz def foo() -> None: pass """ after = """ import bar def foo() -> None: pass """ self.assertCodemod(before, after, [("baz", None, None)]) def test_remove_fromimport_simple(self) -> None: before = "from a import b, c" after = "from a import c" self.assertCodemod(before, after, [("a", "b", None)]) def test_remove_fromimport_keeping_standalone_comment(self) -> None: before = """ from foo import ( bar, # comment baz, ) from loooong import ( bar, # comment short, this_stays ) from third import ( # comment short, this_stays_too ) from fourth import ( a, # comment b, c ) """ after = """ from foo import ( # comment baz, ) from loooong import ( this_stays ) from third import ( this_stays_too ) from fourth import ( a, c ) """ self.assertCodemod( before, after, [ ("foo", "bar", None), ("loooong", "short", None), ("loooong", "bar", None), ("third", "short", None), ("fourth", "b", None), ], ) def test_remove_fromimport_keeping_inline_comment(self) -> None: before = """ from foo import ( # comment bar, # comment2 baz, ) from loooong import ( bar, short, # comment # comment2 this_stays ) from third import ( short, # comment this_stays_too # comment2 ) """ after = """ from foo import ( # comment # comment2 baz, ) from loooong import ( # comment2 this_stays ) from third import ( this_stays_too # comment2 ) """ self.assertCodemod( before, after, [ ("foo", "bar", None), ("loooong", "short", None), ("loooong", "bar", None), ("third", "short", None), ], ) def test_remove_import_alias_simple(self) -> None: """ Should remove aliased module as import """ before = """ import bar import baz as qux def foo() -> None: pass """ after = """ import bar def foo() -> None: pass """ self.assertCodemod(before, after, [("baz", None, "qux")]) def test_dont_remove_import_simple(self) -> None: """ Should not remove module import with reference """ before = """ import bar import baz def foo() -> None: baz.qux() """ after = """ import bar import baz def foo() -> None: baz.qux() """ self.assertCodemod(before, after, [("baz", None, None)]) def test_dont_remove_import_alias_simple(self) -> None: """ Should not remove aliased module import with reference """ before = """ import bar import baz as qux def foo() -> None: qux.quux() """ after = """ import bar import baz as qux def foo() -> None: qux.quux() """ self.assertCodemod(before, after, [("baz", None, "qux")]) def test_dont_remove_import_simple_wrong_alias(self) -> None: """ Should not remove module as import since wrong alias """ before = """ import bar import baz def foo() -> None: pass """ after = """ import bar import baz def foo() -> None: pass """ self.assertCodemod(before, after, [("baz", None, "qux")]) def test_dont_remove_import_wrong_alias_simple(self) -> None: """ Should not remove wrong aliased module as import """ before = """ import bar import baz as qux def foo() -> None: pass """ after = """ import bar import baz as qux def foo() -> None: pass """ self.assertCodemod(before, after, [("baz", None, None)]) def test_remove_importfrom_simple(self) -> None: """ Should remove import from """ before = """ import bar from baz import qux def foo() -> None: pass """ after = """ import bar def foo() -> None: pass """ self.assertCodemod(before, after, [("baz", "qux", None)]) def test_remove_importfrom_alias_simple(self) -> None: """ Should remove import from with alias """ before = """ import bar from baz import qux as quux def foo() -> None: pass """ after = """ import bar def foo() -> None: pass """ self.assertCodemod(before, after, [("baz", "qux", "quux")]) def test_dont_remove_importfrom_simple(self) -> None: """ Should not remove import from with reference """ before = """ import bar from baz import qux def foo() -> None: qux() """ after = """ import bar from baz import qux def foo() -> None: qux() """ self.assertCodemod(before, after, [("baz", "qux", None)]) def test_dont_remove_importfrom_alias_simple(self) -> None: """ Should not remove aliased import from with reference """ before = """ import bar from baz import qux as quux def foo() -> None: quux() """ after = """ import bar from baz import qux as quux def foo() -> None: quux() """ self.assertCodemod(before, after, [("baz", "qux", "quux")]) def test_dont_remove_importfrom_simple_wrong_alias(self) -> None: """ Should not remove import from since it is wrong alias """ before = """ import bar from baz import qux as quux def foo() -> None: pass """ after = """ import bar from baz import qux as quux def foo() -> None: pass """ self.assertCodemod(before, after, [("baz", "qux", None)]) def test_dont_remove_importfrom_alias_simple_wrong_alias(self) -> None: """ Should not remove import from with wrong alias """ before = """ import bar from baz import qux def foo() -> None: pass """ after = """ import bar from baz import qux def foo() -> None: pass """ self.assertCodemod(before, after, [("baz", "qux", "quux")]) def test_remove_importfrom_relative(self) -> None: """ Should remove import from which is relative """ before = """ import bar from .c import qux def foo() -> None: pass """ after = """ import bar def foo() -> None: pass """ self.assertCodemod( before, after, [("a.b.c", "qux", None)], context_override=CodemodContext( full_module_name="a.b.foobar", full_package_name="a.b" ), ) def test_dont_remove_inuse_importfrom_relative(self) -> None: """ Should not remove import from which is relative since it is in use. """ before = """ import bar from .c import qux def foo() -> None: qux() """ after = """ import bar from .c import qux def foo() -> None: qux() """ self.assertCodemod( before, after, [("a.b.c", "qux", None)], context_override=CodemodContext( full_module_name="a.b.foobar", full_package_name="a.b" ), ) def test_dont_remove_wrong_importfrom_relative(self) -> None: """ Should not remove import from which is relative since it is the wrong module. """ before = """ import bar from .c import qux def foo() -> None: pass """ after = """ import bar from .c import qux def foo() -> None: pass """ self.assertCodemod( before, after, [("a.b.d", "qux", None)], context_override=CodemodContext( full_module_name="a.b.foobar", full_package_name="a.b" ), ) def test_remove_import_complex(self) -> None: """ Should remove complex module as import """ before = """ import bar import baz, qux import a.b import c.d import x.y.z import e.f as g import h.i as j def foo() -> None: c.d() x.u j() """ after = """ import bar import qux import c.d import x.y.z import h.i as j def foo() -> None: c.d() x.u j() """ self.assertCodemod( before, after, [ ("baz", None, None), ("a.b", None, None), ("c.d", None, None), ("e.f", None, "g"), ("h.i", None, "j"), ("x.y.z", None, None), ], ) def test_remove_fromimport_complex(self) -> None: """ Should remove complex from import """ before = """ from bar import qux, quux from a.b import c from d.e import f from h.i import j as k from l.m import n as o from x import * def foo() -> None: f() k() """ after = """ from bar import qux from d.e import f from h.i import j as k from x import * def foo() -> None: f() k() """ self.assertCodemod( before, after, [ ("bar", "quux", None), ("a.b", "c", None), ("d.e", "f", None), ("h.i", "j", "k"), ("l.m", "n", "o"), ], ) def test_remove_import_multiple_assignments(self) -> None: """ Should not remove import with multiple assignments """ before = """ from foo import bar from qux import bar def foo() -> None: bar() """ after = """ from foo import bar from qux import bar def foo() -> None: bar() """ self.assertCodemod(before, after, [("foo", "bar", None)]) def test_remove_multiple_imports(self) -> None: """ Multiple imports """ before = """ try: import a except Exception: import a a.hello() """ after = """ try: import a except Exception: import a a.hello() """ self.assertCodemod(before, after, [("a", None, None)]) before = """ try: import a except Exception: import a """ after = """ try: pass except Exception: pass """ self.assertCodemod(before, after, [("a", None, None)]) @data_provider( ( # Simple removal, no other uses. ( """ from foo import bar from qux import baz def fun() -> None: bar() baz() """, """ from qux import baz def fun() -> None: baz() """, ), # Remove a node, other uses, don't remove import. ( """ from foo import bar from qux import baz def fun() -> None: bar() baz() def foobar() -> None: a = bar a() """, """ from foo import bar from qux import baz def fun() -> None: baz() def foobar() -> None: a = bar a() """, ), # Remove an alias. ( """ from foo import bar as other from qux import baz def fun() -> None: other() baz() """, """ from qux import baz def fun() -> None: baz() """, ), # Simple removal, no other uses. ( """ import foo from qux import baz def fun() -> None: foo.bar() baz() """, """ from qux import baz def fun() -> None: baz() """, ), # Remove a node, other uses, don't remove import. ( """ import foo from qux import baz def fun() -> None: foo.bar() baz() def foobar() -> None: a = foo.bar a() """, """ import foo from qux import baz def fun() -> None: baz() def foobar() -> None: a = foo.bar a() """, ), # Remove an alias. ( """ import foo as other from qux import baz def fun() -> None: other.bar() baz() """, """ from qux import baz def fun() -> None: baz() """, ), ) ) def test_remove_import_by_node_simple(self, before: str, after: str) -> None: """ Given a node that's directly referenced in an import, make sure that the import is removed when the node is also removed. """ class RemoveBarTransformer(VisitorBasedCodemodCommand): METADATA_DEPENDENCIES = (QualifiedNameProvider, ScopeProvider) @m.leave( m.SimpleStatementLine( body=[ m.Expr( m.Call( metadata=m.MatchMetadata( QualifiedNameProvider, { QualifiedName( source=QualifiedNameSource.IMPORT, name="foo.bar", ) }, ) ) ) ] ) ) def _leave_foo_bar( self, original_node: cst.SimpleStatementLine, updated_node: cst.SimpleStatementLine, ) -> cst.RemovalSentinel: RemoveImportsVisitor.remove_unused_import_by_node( self.context, original_node ) return cst.RemoveFromParent() module = cst.parse_module(self.make_fixture_data(before)) self.assertCodeEqual( after, RemoveBarTransformer(CodemodContext()).transform_module(module).code ) def test_remove_import_from_node(self) -> None: """ Make sure that if an import node itself is requested for removal, we still do the right thing and only remove it if it is unused. """ before = """ from foo import bar from qux import baz from foo import qux as other from qux import foobar as other2 def fun() -> None: baz() other2() """ after = """ from qux import baz from qux import foobar as other2 def fun() -> None: baz() other2() """ class RemoveImportTransformer(VisitorBasedCodemodCommand): METADATA_DEPENDENCIES = (QualifiedNameProvider, ScopeProvider) def visit_ImportFrom(self, node: cst.ImportFrom) -> None: RemoveImportsVisitor.remove_unused_import_by_node(self.context, node) module = cst.parse_module(self.make_fixture_data(before)) self.assertCodeEqual( after, RemoveImportTransformer(CodemodContext()).transform_module(module).code, ) def test_remove_import_node(self) -> None: """ Make sure that if an import node itself is requested for removal, we still do the right thing and only remove it if it is unused. """ before = """ import foo import qux import bar as other import foobar as other2 def fun() -> None: qux.baz() other2.baz() """ after = """ import qux import foobar as other2 def fun() -> None: qux.baz() other2.baz() """ class RemoveImportTransformer(VisitorBasedCodemodCommand): METADATA_DEPENDENCIES = (QualifiedNameProvider, ScopeProvider) def visit_Import(self, node: cst.Import) -> None: RemoveImportsVisitor.remove_unused_import_by_node(self.context, node) module = cst.parse_module(self.make_fixture_data(before)) self.assertCodeEqual( after, RemoveImportTransformer(CodemodContext()).transform_module(module).code, ) def test_remove_import_with_all(self) -> None: """ Make sure that if an import node itself is requested for removal, we don't remove it if it shows up in an __all__ node. """ before = """ from foo import bar from qux import baz __all__ = ["baz"] """ after = """ from qux import baz __all__ = ["baz"] """ class RemoveImportTransformer(VisitorBasedCodemodCommand): METADATA_DEPENDENCIES = (QualifiedNameProvider, ScopeProvider) def visit_ImportFrom(self, node: cst.ImportFrom) -> None: RemoveImportsVisitor.remove_unused_import_by_node(self.context, node) module = cst.parse_module(self.make_fixture_data(before)) self.assertCodeEqual( after, RemoveImportTransformer(CodemodContext()).transform_module(module).code, ) def test_remove_import_alias_after_inserting(self) -> None: before = "from foo import bar, baz" after = "from foo import quux, baz" class AddRemoveTransformer(VisitorBasedCodemodCommand): def visit_Module(self, node: cst.Module) -> None: AddImportsVisitor.add_needed_import(self.context, "foo", "quux") RemoveImportsVisitor.remove_unused_import(self.context, "foo", "bar") module = cst.parse_module(self.make_fixture_data(before)) self.assertCodeEqual( AddRemoveTransformer(CodemodContext()).transform_module(module).code, after, ) def test_remove_comma(self) -> None: """ Trailing commas should be removed if and only if the last alias is removed. """ before = """ from m import (a, b,) import x, y """ after = """ from m import (b,) import x """ self.assertCodemod(before, after, [("m", "a", None), ("y", None, None)]) LibCST-1.2.0/libcst/helpers/000077500000000000000000000000001456464173300155315ustar00rootroot00000000000000LibCST-1.2.0/libcst/helpers/__init__.py000066400000000000000000000025631456464173300176500ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # from libcst.helpers._template import ( parse_template_expression, parse_template_module, parse_template_statement, ) from libcst.helpers.common import ensure_type from libcst.helpers.expression import ( get_full_name_for_node, get_full_name_for_node_or_raise, ) from libcst.helpers.module import ( calculate_module_and_package, get_absolute_module, get_absolute_module_for_import, get_absolute_module_for_import_or_raise, get_absolute_module_from_package, get_absolute_module_from_package_for_import, get_absolute_module_from_package_for_import_or_raise, insert_header_comments, ModuleNameAndPackage, ) __all__ = [ "calculate_module_and_package", "get_absolute_module", "get_absolute_module_for_import", "get_absolute_module_for_import_or_raise", "get_absolute_module_from_package", "get_absolute_module_from_package_for_import", "get_absolute_module_from_package_for_import_or_raise", "get_full_name_for_node", "get_full_name_for_node_or_raise", "ensure_type", "insert_header_comments", "parse_template_module", "parse_template_statement", "parse_template_expression", "ModuleNameAndPackage", ] LibCST-1.2.0/libcst/helpers/_template.py000066400000000000000000000457131456464173300200670ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # from typing import Dict, Mapping, Optional, Set, Union import libcst as cst from libcst.helpers.common import ensure_type TEMPLATE_PREFIX: str = "__LIBCST_MANGLED_NAME_" TEMPLATE_SUFFIX: str = "_EMAN_DELGNAM_TSCBIL__" ValidReplacementType = Union[ cst.BaseExpression, cst.Annotation, cst.AssignTarget, cst.Param, cst.Parameters, cst.Arg, cst.BaseStatement, cst.BaseSmallStatement, cst.BaseSuite, cst.BaseSlice, cst.SubscriptElement, cst.Decorator, ] def mangled_name(var: str) -> str: return f"{TEMPLATE_PREFIX}{var}{TEMPLATE_SUFFIX}" def unmangled_name(var: str) -> Optional[str]: if TEMPLATE_PREFIX in var and TEMPLATE_SUFFIX in var: prefix, name_and_suffix = var.split(TEMPLATE_PREFIX, 1) name, suffix = name_and_suffix.split(TEMPLATE_SUFFIX, 1) if not prefix and not suffix: return name # This is not a valid mangled name return None def mangle_template(template: str, template_vars: Set[str]) -> str: if TEMPLATE_PREFIX in template or TEMPLATE_SUFFIX in template: raise Exception("Cannot parse a template containing reserved strings") for var in template_vars: original = f"{{{var}}}" if original not in template: raise Exception( f'Template string is missing a reference to "{var}" referred to in kwargs' ) template = template.replace(original, mangled_name(var)) return template class TemplateTransformer(cst.CSTTransformer): def __init__( self, template_replacements: Mapping[str, ValidReplacementType] ) -> None: self.simple_replacements: Dict[str, cst.BaseExpression] = { name: value for name, value in template_replacements.items() if isinstance(value, cst.BaseExpression) } self.annotation_replacements: Dict[str, cst.Annotation] = { name: value for name, value in template_replacements.items() if isinstance(value, cst.Annotation) } self.assignment_replacements: Dict[str, cst.AssignTarget] = { name: value for name, value in template_replacements.items() if isinstance(value, cst.AssignTarget) } self.param_replacements: Dict[str, cst.Param] = { name: value for name, value in template_replacements.items() if isinstance(value, cst.Param) } self.parameters_replacements: Dict[str, cst.Parameters] = { name: value for name, value in template_replacements.items() if isinstance(value, cst.Parameters) } self.arg_replacements: Dict[str, cst.Arg] = { name: value for name, value in template_replacements.items() if isinstance(value, cst.Arg) } self.small_statement_replacements: Dict[str, cst.BaseSmallStatement] = { name: value for name, value in template_replacements.items() if isinstance(value, cst.BaseSmallStatement) } self.statement_replacements: Dict[str, cst.BaseStatement] = { name: value for name, value in template_replacements.items() if isinstance(value, cst.BaseStatement) } self.suite_replacements: Dict[str, cst.BaseSuite] = { name: value for name, value in template_replacements.items() if isinstance(value, cst.BaseSuite) } self.subscript_element_replacements: Dict[str, cst.SubscriptElement] = { name: value for name, value in template_replacements.items() if isinstance(value, cst.SubscriptElement) } self.subscript_index_replacements: Dict[str, cst.BaseSlice] = { name: value for name, value in template_replacements.items() if isinstance(value, cst.BaseSlice) } self.decorator_replacements: Dict[str, cst.Decorator] = { name: value for name, value in template_replacements.items() if isinstance(value, cst.Decorator) } # Figure out if there are any variables that we can't support # inserting into templates. supported_vars = { *[name for name in self.simple_replacements], *[name for name in self.annotation_replacements], *[name for name in self.assignment_replacements], *[name for name in self.param_replacements], *[name for name in self.parameters_replacements], *[name for name in self.arg_replacements], *[name for name in self.small_statement_replacements], *[name for name in self.statement_replacements], *[name for name in self.suite_replacements], *[name for name in self.subscript_element_replacements], *[name for name in self.subscript_index_replacements], *[name for name in self.decorator_replacements], } unsupported_vars = { name for name in template_replacements if name not in supported_vars } if unsupported_vars: raise Exception( f'Template replacement for "{next(iter(unsupported_vars))}" is unsupported' ) def leave_Name( self, original_node: cst.Name, updated_node: cst.Name ) -> cst.BaseExpression: var_name = unmangled_name(updated_node.value) if var_name is None or var_name not in self.simple_replacements: # This is not a valid name, don't modify it return updated_node return self.simple_replacements[var_name].deep_clone() def leave_Annotation( self, original_node: cst.Annotation, updated_node: cst.Annotation, ) -> cst.Annotation: # We can't use matchers here due to circular imports annotation = updated_node.annotation if isinstance(annotation, cst.Name): var_name = unmangled_name(annotation.value) if var_name in self.annotation_replacements: return self.annotation_replacements[var_name].deep_clone() return updated_node def leave_AssignTarget( self, original_node: cst.AssignTarget, updated_node: cst.AssignTarget, ) -> cst.AssignTarget: # We can't use matchers here due to circular imports target = updated_node.target if isinstance(target, cst.Name): var_name = unmangled_name(target.value) if var_name in self.assignment_replacements: return self.assignment_replacements[var_name].deep_clone() return updated_node def leave_Param( self, original_node: cst.Param, updated_node: cst.Param, ) -> cst.Param: var_name = unmangled_name(updated_node.name.value) if var_name in self.param_replacements: return self.param_replacements[var_name].deep_clone() return updated_node def leave_Parameters( self, original_node: cst.Parameters, updated_node: cst.Parameters, ) -> cst.Parameters: # A very special case for when we use a template variable for all # function parameters. if ( len(updated_node.params) == 1 and updated_node.star_arg == cst.MaybeSentinel.DEFAULT and len(updated_node.kwonly_params) == 0 and updated_node.star_kwarg is None and len(updated_node.posonly_params) == 0 and updated_node.posonly_ind == cst.MaybeSentinel.DEFAULT ): # This parameters node has only one argument, which is possibly # a replacement. var_name = unmangled_name(updated_node.params[0].name.value) if var_name in self.parameters_replacements: return self.parameters_replacements[var_name].deep_clone() return updated_node def leave_Arg(self, original_node: cst.Arg, updated_node: cst.Arg) -> cst.Arg: # We can't use matchers here due to circular imports arg = updated_node.value if isinstance(arg, cst.Name): var_name = unmangled_name(arg.value) if var_name in self.arg_replacements: return self.arg_replacements[var_name].deep_clone() return updated_node def leave_SimpleStatementLine( self, original_node: cst.SimpleStatementLine, updated_node: cst.SimpleStatementLine, ) -> cst.BaseStatement: # We can't use matchers here due to circular imports. We take advantage of # the fact that a name on a single line will be parsed as an Expr node # contained in a SimpleStatementLine, so we check for these and see if they # should be expanded template-wise to a statement of some type. if len(updated_node.body) == 1: body_node = updated_node.body[0] if isinstance(body_node, cst.Expr): name_node = body_node.value if isinstance(name_node, cst.Name): var_name = unmangled_name(name_node.value) if var_name in self.statement_replacements: return self.statement_replacements[var_name].deep_clone() return updated_node def leave_Expr( self, original_node: cst.Expr, updated_node: cst.Expr, ) -> cst.BaseSmallStatement: # We can't use matchers here due to circular imports. We do a similar trick # to the above stanza handling SimpleStatementLine to support templates # which are trying to substitute a BaseSmallStatement. name_node = updated_node.value if isinstance(name_node, cst.Name): var_name = unmangled_name(name_node.value) if var_name in self.small_statement_replacements: return self.small_statement_replacements[var_name].deep_clone() return updated_node def leave_SimpleStatementSuite( self, original_node: cst.SimpleStatementSuite, updated_node: cst.SimpleStatementSuite, ) -> cst.BaseSuite: # We can't use matchers here due to circular imports. We take advantage of # the fact that a name in a simple suite will be parsed as an Expr node # contained in a SimpleStatementSuite, so we check for these and see if they # should be expanded template-wise to a base suite of some type. if len(updated_node.body) == 1: body_node = updated_node.body[0] if isinstance(body_node, cst.Expr): name_node = body_node.value if isinstance(name_node, cst.Name): var_name = unmangled_name(name_node.value) if var_name in self.suite_replacements: return self.suite_replacements[var_name].deep_clone() return updated_node def leave_IndentedBlock( self, original_node: cst.IndentedBlock, updated_node: cst.IndentedBlock, ) -> cst.BaseSuite: # We can't use matchers here due to circular imports. We take advantage of # the fact that a name in an indented block will be parsed as an Expr node # contained in a SimpleStatementLine, so we check for these and see if they # should be expanded template-wise to a base suite of some type. if len(updated_node.body) == 1: statement_node = updated_node.body[0] if ( isinstance(statement_node, cst.SimpleStatementLine) and len(statement_node.body) == 1 ): body_node = statement_node.body[0] if isinstance(body_node, cst.Expr): name_node = body_node.value if isinstance(name_node, cst.Name): var_name = unmangled_name(name_node.value) if var_name in self.suite_replacements: return self.suite_replacements[var_name].deep_clone() return updated_node def leave_Index( self, original_node: cst.Index, updated_node: cst.Index, ) -> cst.BaseSlice: # We can't use matchers here due to circular imports expr = updated_node.value if isinstance(expr, cst.Name): var_name = unmangled_name(expr.value) if var_name in self.subscript_index_replacements: return self.subscript_index_replacements[var_name].deep_clone() return updated_node def leave_SubscriptElement( self, original_node: cst.SubscriptElement, updated_node: cst.SubscriptElement, ) -> cst.SubscriptElement: # We can't use matchers here due to circular imports. We use the trick # similar to above stanzas where a template replacement variable will # always show up as a certain type (in this case an Index inside of a # SubscriptElement) in order to successfully replace subscript elements # in templates. index = updated_node.slice if isinstance(index, cst.Index): expr = index.value if isinstance(expr, cst.Name): var_name = unmangled_name(expr.value) if var_name in self.subscript_element_replacements: return self.subscript_element_replacements[var_name].deep_clone() return updated_node def leave_Decorator( self, original_node: cst.Decorator, updated_node: cst.Decorator ) -> cst.Decorator: # We can't use matchers here due to circular imports decorator = updated_node.decorator if isinstance(decorator, cst.Name): var_name = unmangled_name(decorator.value) if var_name in self.decorator_replacements: return self.decorator_replacements[var_name].deep_clone() return updated_node class TemplateChecker(cst.CSTVisitor): def __init__(self, template_vars: Set[str]) -> None: self.template_vars = template_vars def visit_Name(self, node: cst.Name) -> None: for var in self.template_vars: if node.value == mangled_name(var): raise Exception(f'Template variable "{var}" was not replaced properly') def unmangle_nodes( tree: cst.CSTNode, template_replacements: Mapping[str, ValidReplacementType], ) -> cst.CSTNode: unmangler = TemplateTransformer(template_replacements) return ensure_type(tree.visit(unmangler), cst.CSTNode) _DEFAULT_PARTIAL_PARSER_CONFIG: cst.PartialParserConfig = cst.PartialParserConfig() def parse_template_module( template: str, config: cst.PartialParserConfig = _DEFAULT_PARTIAL_PARSER_CONFIG, **template_replacements: ValidReplacementType, ) -> cst.Module: """ Accepts an entire python module template, including all leading and trailing whitespace. Any :class:`~libcst.CSTNode` provided as a keyword argument to this function will be inserted into the template at the appropriate location similar to an f-string expansion. For example:: module = parse_template_module("from {mod} import Foo\\n", mod=Name("bar")) The above code will parse to a module containing a single :class:`~libcst.FromImport` statement, referencing module ``bar`` and importing object ``Foo`` from it. Remember that if you are parsing a template as part of a substitution inside a transform, its considered :ref:`best practice ` to pass in a ``config`` from the current module under transformation. Note that unlike :func:`~libcst.parse_module`, this function does not support bytes as an input. This is due to the fact that it is processed as a template before parsing as a module. """ source = mangle_template(template, {name for name in template_replacements}) module = cst.parse_module(source, config) new_module = ensure_type(unmangle_nodes(module, template_replacements), cst.Module) new_module.visit(TemplateChecker({name for name in template_replacements})) return new_module def parse_template_statement( template: str, config: cst.PartialParserConfig = _DEFAULT_PARTIAL_PARSER_CONFIG, **template_replacements: ValidReplacementType, ) -> Union[cst.SimpleStatementLine, cst.BaseCompoundStatement]: """ Accepts a statement template followed by a trailing newline. If a trailing newline is not provided, one will be added. Any :class:`~libcst.CSTNode` provided as a keyword argument to this function will be inserted into the template at the appropriate location similar to an f-string expansion. For example:: statement = parse_template_statement("assert x > 0, {msg}", msg=SimpleString('"Uh oh!"')) The above code will parse to an assert statement checking that some variable ``x`` is greater than zero, or providing the assert message ``"Uh oh!"``. Remember that if you are parsing a template as part of a substitution inside a transform, its considered :ref:`best practice ` to pass in a ``config`` from the current module under transformation. """ source = mangle_template(template, {name for name in template_replacements}) statement = cst.parse_statement(source, config) new_statement = unmangle_nodes(statement, template_replacements) if not isinstance( new_statement, (cst.SimpleStatementLine, cst.BaseCompoundStatement) ): raise Exception( f"Expected a statement but got a {new_statement.__class__.__name__}!" ) new_statement.visit(TemplateChecker({name for name in template_replacements})) return new_statement def parse_template_expression( template: str, config: cst.PartialParserConfig = _DEFAULT_PARTIAL_PARSER_CONFIG, **template_replacements: ValidReplacementType, ) -> cst.BaseExpression: """ Accepts an expression template on a single line. Leading and trailing whitespace is not valid (there’s nowhere to store it on the expression node). Any :class:`~libcst.CSTNode` provided as a keyword argument to this function will be inserted into the template at the appropriate location similar to an f-string expansion. For example:: expression = parse_template_expression("x + {foo}", foo=Name("y"))) The above code will parse to a :class:`~libcst.BinaryOperation` expression adding two names (``x`` and ``y``) together. Remember that if you are parsing a template as part of a substitution inside a transform, its considered :ref:`best practice ` to pass in a ``config`` from the current module under transformation. """ source = mangle_template(template, {name for name in template_replacements}) expression = cst.parse_expression(source, config) new_expression = ensure_type( unmangle_nodes(expression, template_replacements), cst.BaseExpression ) new_expression.visit(TemplateChecker({name for name in template_replacements})) return new_expression LibCST-1.2.0/libcst/helpers/common.py000066400000000000000000000016451456464173300174010ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # from typing import Type, TypeVar T = TypeVar("T") def ensure_type(node: object, nodetype: Type[T]) -> T: """ Takes any python object, and a LibCST :class:`~libcst.CSTNode` subclass and refines the type of the python object. This is most useful when you already know that a particular object is a certain type but your type checker is not convinced. Note that this does an instance check for you and raises an exception if it is not the right type, so this should be used in situations where you are sure of the type given previous checks. """ if not isinstance(node, nodetype): raise Exception( f"Expected a {nodetype.__name__} but got a {node.__class__.__name__}!" ) return node LibCST-1.2.0/libcst/helpers/expression.py000066400000000000000000000033221456464173300203020ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # from typing import Optional, Union import libcst as cst def get_full_name_for_node(node: Union[str, cst.CSTNode]) -> Optional[str]: """Return a dot concatenated full name for str, :class:`~libcst.Name`, :class:`~libcst.Attribute`. :class:`~libcst.Call`, :class:`~libcst.Subscript`, :class:`~libcst.FunctionDef`, :class:`~libcst.ClassDef`, :class:`~libcst.Decorator`. Return ``None`` for not supported Node. """ if isinstance(node, cst.Name): return node.value elif isinstance(node, str): return node elif isinstance(node, cst.Attribute): return f"{get_full_name_for_node(node.value)}.{node.attr.value}" elif isinstance(node, cst.Call): return get_full_name_for_node(node.func) elif isinstance(node, cst.Subscript): return get_full_name_for_node(node.value) elif isinstance(node, (cst.FunctionDef, cst.ClassDef)): return get_full_name_for_node(node.name) elif isinstance(node, cst.Decorator): return get_full_name_for_node(node.decorator) return None def get_full_name_for_node_or_raise(node: Union[str, cst.CSTNode]) -> str: """Return a dot concatenated full name for str, :class:`~libcst.Name`, :class:`~libcst.Attribute`. :class:`~libcst.Call`, :class:`~libcst.Subscript`, :class:`~libcst.FunctionDef`, :class:`~libcst.ClassDef`. Raise Exception for not supported Node. """ full_name = get_full_name_for_node(node) if full_name is None: raise Exception(f"Not able to parse full name for: {node}") return full_name LibCST-1.2.0/libcst/helpers/module.py000066400000000000000000000137261456464173300174010ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # from dataclasses import dataclass from itertools import islice from pathlib import PurePath from typing import List, Optional from libcst import Comment, EmptyLine, ImportFrom, Module from libcst._types import StrPath from libcst.helpers.expression import get_full_name_for_node def insert_header_comments(node: Module, comments: List[str]) -> Module: """ Insert comments after last non-empty line in header. Use this to insert one or more comments after any copyright preamble in a :class:`~libcst.Module`. Each comment in the list of ``comments`` must start with a ``#`` and will be placed on its own line in the appropriate location. """ # Split the lines up into a contiguous comment-containing section and # the empty whitespace section that follows last_comment_index = -1 for i, line in enumerate(node.header): if line.comment is not None: last_comment_index = i comment_lines = islice(node.header, last_comment_index + 1) empty_lines = islice(node.header, last_comment_index + 1, None) inserted_lines = [EmptyLine(comment=Comment(value=comment)) for comment in comments] # pyre-fixme[60]: Concatenation not yet support for multiple variadic tuples: # `*comment_lines, *inserted_lines, *empty_lines`. return node.with_changes(header=(*comment_lines, *inserted_lines, *empty_lines)) def get_absolute_module( current_module: Optional[str], module_name: Optional[str], num_dots: int ) -> Optional[str]: if num_dots == 0: # This is an absolute import, so the module is correct. return module_name if current_module is None: # We don't actually have the current module available, so we can't compute # the absolute module from relative. return None # We have the current module, as well as the relative, let's compute the base. modules = current_module.split(".") if len(modules) < num_dots: # This relative import goes past the base of the repository, so we can't calculate it. return None base_module = ".".join(modules[:-num_dots]) # Finally, if the module name was supplied, append it to the end. if module_name is not None: # If we went all the way to the top, the base module should be empty, so we # should return the relative bit as absolute. Otherwise, combine the base # module and module name using a dot separator. base_module = ( f"{base_module}.{module_name}" if len(base_module) > 0 else module_name ) # If they tried to import all the way to the root, return None. Otherwise, # return the module itself. return base_module if len(base_module) > 0 else None def get_absolute_module_for_import( current_module: Optional[str], import_node: ImportFrom ) -> Optional[str]: # First, let's try to grab the module name, regardless of relative status. module = import_node.module module_name = get_full_name_for_node(module) if module is not None else None # Now, get the relative import location if it exists. num_dots = len(import_node.relative) return get_absolute_module(current_module, module_name, num_dots) def get_absolute_module_for_import_or_raise( current_module: Optional[str], import_node: ImportFrom ) -> str: module = get_absolute_module_for_import(current_module, import_node) if module is None: raise Exception(f"Unable to compute absolute module for {import_node}") return module def get_absolute_module_from_package( current_package: Optional[str], module_name: Optional[str], num_dots: int ) -> Optional[str]: if num_dots == 0: # This is an absolute import, so the module is correct. return module_name if current_package is None or current_package == "": # We don't actually have the current module available, so we can't compute # the absolute module from relative. return None # see importlib._bootstrap._resolve_name # https://github.com/python/cpython/blob/3.10/Lib/importlib/_bootstrap.py#L902 bits = current_package.rsplit(".", num_dots - 1) if len(bits) < num_dots: return None base = bits[0] return "{}.{}".format(base, module_name) if module_name else base def get_absolute_module_from_package_for_import( current_package: Optional[str], import_node: ImportFrom ) -> Optional[str]: # First, let's try to grab the module name, regardless of relative status. module = import_node.module module_name = get_full_name_for_node(module) if module is not None else None # Now, get the relative import location if it exists. num_dots = len(import_node.relative) return get_absolute_module_from_package(current_package, module_name, num_dots) def get_absolute_module_from_package_for_import_or_raise( current_package: Optional[str], import_node: ImportFrom ) -> str: module = get_absolute_module_from_package_for_import(current_package, import_node) if module is None: raise Exception(f"Unable to compute absolute module for {import_node}") return module @dataclass(frozen=True) class ModuleNameAndPackage: name: str package: str def calculate_module_and_package( repo_root: StrPath, filename: StrPath ) -> ModuleNameAndPackage: # Given an absolute repo_root and an absolute filename, calculate the # python module name for the file. relative_filename = PurePath(filename).relative_to(repo_root) relative_filename = relative_filename.with_suffix("") # handle special cases if relative_filename.stem in ["__init__", "__main__"]: relative_filename = relative_filename.parent package = name = ".".join(relative_filename.parts) else: name = ".".join(relative_filename.parts) package = ".".join(relative_filename.parts[:-1]) return ModuleNameAndPackage(name, package) LibCST-1.2.0/libcst/helpers/paths.py000066400000000000000000000011731456464173300172240ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import os from contextlib import contextmanager from pathlib import Path from typing import Generator from libcst._types import StrPath @contextmanager def chdir(path: StrPath) -> Generator[Path, None, None]: """ Temporarily chdir to the given path, and then return to the previous path. """ try: path = Path(path).resolve() cwd = os.getcwd() os.chdir(path) yield path finally: os.chdir(cwd) LibCST-1.2.0/libcst/helpers/tests/000077500000000000000000000000001456464173300166735ustar00rootroot00000000000000LibCST-1.2.0/libcst/helpers/tests/__init__.py000066400000000000000000000002631456464173300210050ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. LibCST-1.2.0/libcst/helpers/tests/test_expression.py000066400000000000000000000066631456464173300225160ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # from ast import literal_eval from typing import Optional, Union import libcst as cst from libcst.helpers import ( ensure_type, get_full_name_for_node, get_full_name_for_node_or_raise, ) from libcst.testing.utils import data_provider, UnitTest class ExpressionTest(UnitTest): @data_provider( ( ("a string", "a string"), (cst.Name("a_name"), "a_name"), (cst.parse_expression("a.b.c"), "a.b.c"), (cst.parse_expression("a.b()"), "a.b"), (cst.parse_expression("a.b.c[i]"), "a.b.c"), (cst.parse_statement("def fun(): pass"), "fun"), (cst.parse_statement("class cls: pass"), "cls"), ( cst.Decorator( ensure_type(cst.parse_expression("a.b.c.d"), cst.Attribute) ), "a.b.c.d", ), (cst.parse_statement("(a.b()).c()"), None), # not a supported Node type ) ) def test_get_full_name_for_expression( self, input: Union[str, cst.CSTNode], output: Optional[str], ) -> None: self.assertEqual(get_full_name_for_node(input), output) if output is None: with self.assertRaises(Exception): get_full_name_for_node_or_raise(input) else: self.assertEqual(get_full_name_for_node_or_raise(input), output) def test_simplestring_evaluated_value(self) -> None: raw_string = '"a string."' node = ensure_type(cst.parse_expression(raw_string), cst.SimpleString) self.assertEqual(node.value, raw_string) self.assertEqual(node.evaluated_value, literal_eval(raw_string)) def test_integer_evaluated_value(self) -> None: raw_value = "5" node = ensure_type(cst.parse_expression(raw_value), cst.Integer) self.assertEqual(node.value, raw_value) self.assertEqual(node.evaluated_value, literal_eval(raw_value)) def test_float_evaluated_value(self) -> None: raw_value = "5.5" node = ensure_type(cst.parse_expression(raw_value), cst.Float) self.assertEqual(node.value, raw_value) self.assertEqual(node.evaluated_value, literal_eval(raw_value)) def test_complex_evaluated_value(self) -> None: raw_value = "5j" node = ensure_type(cst.parse_expression(raw_value), cst.Imaginary) self.assertEqual(node.value, raw_value) self.assertEqual(node.evaluated_value, literal_eval(raw_value)) def test_concatenated_string_evaluated_value(self) -> None: code = '"This " "is " "a " "concatenated " "string."' node = ensure_type(cst.parse_expression(code), cst.ConcatenatedString) self.assertEqual(node.evaluated_value, "This is a concatenated string.") code = 'b"A concatenated" b" byte."' node = ensure_type(cst.parse_expression(code), cst.ConcatenatedString) self.assertEqual(node.evaluated_value, b"A concatenated byte.") code = '"var=" f" {var}"' node = ensure_type(cst.parse_expression(code), cst.ConcatenatedString) self.assertEqual(node.evaluated_value, None) code = '"var" "=" f" {var}"' node = ensure_type(cst.parse_expression(code), cst.ConcatenatedString) self.assertEqual(node.evaluated_value, None) LibCST-1.2.0/libcst/helpers/tests/test_module.py000066400000000000000000000246551456464173300216050ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # from typing import Optional import libcst as cst from libcst.helpers.common import ensure_type from libcst.helpers.module import ( calculate_module_and_package, get_absolute_module_for_import, get_absolute_module_for_import_or_raise, get_absolute_module_from_package_for_import, get_absolute_module_from_package_for_import_or_raise, insert_header_comments, ModuleNameAndPackage, ) from libcst.testing.utils import data_provider, UnitTest class ModuleTest(UnitTest): def test_insert_header_comments(self) -> None: inserted_comments = ["# INSERT ME", "# AND ME"] comment_lines = ["# First comment", "# Another one", "# comment 3"] empty_lines = [" ", ""] non_header_line = ["SOME_VARIABLE = 0"] original_code = "\n".join(comment_lines + empty_lines + non_header_line) expected_code = "\n".join( comment_lines + inserted_comments + empty_lines + non_header_line ) node = cst.parse_module(original_code) self.assertEqual( insert_header_comments(node, inserted_comments).code, expected_code ) # No comment case original_code = "\n".join(empty_lines + non_header_line) expected_code = "\n".join(inserted_comments + empty_lines + non_header_line) node = cst.parse_module(original_code) self.assertEqual( insert_header_comments(node, inserted_comments).code, expected_code ) # No empty lines case original_code = "\n".join(comment_lines + non_header_line) expected_code = "\n".join(comment_lines + inserted_comments + non_header_line) node = cst.parse_module(original_code) self.assertEqual( insert_header_comments(node, inserted_comments).code, expected_code ) # Empty line between comments comment_lines.insert(1, " ") original_code = "\n".join(comment_lines + empty_lines + non_header_line) expected_code = "\n".join( comment_lines + inserted_comments + empty_lines + non_header_line ) node = cst.parse_module(original_code) self.assertEqual( insert_header_comments(node, inserted_comments).code, expected_code ) # No header case original_code = "\n".join(non_header_line) expected_code = "\n".join(inserted_comments + non_header_line) node = cst.parse_module(original_code) self.assertEqual( insert_header_comments(node, inserted_comments).code, expected_code ) @data_provider( ( # Simple imports that are already absolute. (None, "from a.b import c", "a.b"), ("x.y.z", "from a.b import c", "a.b"), # Relative import that can't be resolved due to missing module. (None, "from ..w import c", None), # Relative import that goes past the module level. ("x", "from ...y import z", None), ("x.y.z", "from .....w import c", None), ("x.y.z", "from ... import c", None), # Correct resolution of absolute from relative modules. ("x.y.z", "from . import c", "x.y"), ("x.y.z", "from .. import c", "x"), ("x.y.z", "from .w import c", "x.y.w"), ("x.y.z", "from ..w import c", "x.w"), ("x.y.z", "from ...w import c", "w"), ) ) def test_get_absolute_module( self, module: Optional[str], importfrom: str, output: Optional[str], ) -> None: node = ensure_type(cst.parse_statement(importfrom), cst.SimpleStatementLine) assert len(node.body) == 1, "Unexpected number of statements!" import_node = ensure_type(node.body[0], cst.ImportFrom) self.assertEqual(get_absolute_module_for_import(module, import_node), output) if output is None: with self.assertRaises(Exception): get_absolute_module_for_import_or_raise(module, import_node) else: self.assertEqual( get_absolute_module_for_import_or_raise(module, import_node), output ) @data_provider( ( # Simple imports that are already absolute. (None, "from a.b import c", "a.b"), ("x/y/z.py", "from a.b import c", "a.b"), ("x/y/z/__init__.py", "from a.b import c", "a.b"), # Relative import that can't be resolved due to missing module. (None, "from ..w import c", None), # Attempted relative import with no known parent package ("__init__.py", "from .y import z", None), ("x.py", "from .y import z", None), # Relative import that goes past the module level. ("x.py", "from ...y import z", None), ("x/y/z.py", "from ... import c", None), ("x/y/z.py", "from ...w import c", None), ("x/y/z/__init__.py", "from .... import c", None), ("x/y/z/__init__.py", "from ....w import c", None), # Correct resolution of absolute from relative modules. ("x/y/z.py", "from . import c", "x.y"), ("x/y/z.py", "from .. import c", "x"), ("x/y/z.py", "from .w import c", "x.y.w"), ("x/y/z.py", "from ..w import c", "x.w"), ("x/y/z/__init__.py", "from . import c", "x.y.z"), ("x/y/z/__init__.py", "from .. import c", "x.y"), ("x/y/z/__init__.py", "from ... import c", "x"), ("x/y/z/__init__.py", "from .w import c", "x.y.z.w"), ("x/y/z/__init__.py", "from ..w import c", "x.y.w"), ("x/y/z/__init__.py", "from ...w import c", "x.w"), ) ) def test_get_absolute_module_from_package( self, filename: Optional[str], importfrom: str, output: Optional[str], ) -> None: package = None if filename is not None: info = calculate_module_and_package(".", filename) package = info.package node = ensure_type(cst.parse_statement(importfrom), cst.SimpleStatementLine) assert len(node.body) == 1, "Unexpected number of statements!" import_node = ensure_type(node.body[0], cst.ImportFrom) self.assertEqual( get_absolute_module_from_package_for_import(package, import_node), output ) if output is None: with self.assertRaises(Exception): get_absolute_module_from_package_for_import_or_raise( package, import_node ) else: self.assertEqual( get_absolute_module_from_package_for_import_or_raise( package, import_node ), output, ) @data_provider( ( # Nodes without an asname (cst.ImportAlias(name=cst.Name("foo")), "foo", None), ( cst.ImportAlias(name=cst.Attribute(cst.Name("foo"), cst.Name("bar"))), "foo.bar", None, ), # Nodes with an asname ( cst.ImportAlias( name=cst.Name("foo"), asname=cst.AsName(name=cst.Name("baz")) ), "foo", "baz", ), ( cst.ImportAlias( name=cst.Attribute(cst.Name("foo"), cst.Name("bar")), asname=cst.AsName(name=cst.Name("baz")), ), "foo.bar", "baz", ), ) ) def test_importalias_helpers( self, alias_node: cst.ImportAlias, full_name: str, alias: Optional[str] ) -> None: self.assertEqual(alias_node.evaluated_name, full_name) self.assertEqual(alias_node.evaluated_alias, alias) @data_provider( ( # Various files inside the root should give back valid modules. ( "/home/username/root", "/home/username/root/file.py", ModuleNameAndPackage("file", ""), ), ( "/home/username/root/", "/home/username/root/file.py", ModuleNameAndPackage("file", ""), ), ( "/home/username/root/", "/home/username/root/some/dir/file.py", ModuleNameAndPackage("some.dir.file", "some.dir"), ), # Various special files inside the root should give back valid modules. ( "/home/username/root/", "/home/username/root/some/dir/__init__.py", ModuleNameAndPackage("some.dir", "some.dir"), ), ( "/home/username/root/", "/home/username/root/some/dir/__main__.py", ModuleNameAndPackage("some.dir", "some.dir"), ), ( "c:/Program Files/", "c:/Program Files/some/dir/file.py", ModuleNameAndPackage("some.dir.file", "some.dir"), ), ( "c:/Program Files/", "c:/Program Files/some/dir/__main__.py", ModuleNameAndPackage("some.dir", "some.dir"), ), ), ) def test_calculate_module_and_package( self, repo_root: str, filename: str, module_and_package: Optional[ModuleNameAndPackage], ) -> None: self.assertEqual( calculate_module_and_package(repo_root, filename), module_and_package ) @data_provider( ( # Providing a file outside the root should raise an exception ("/home/username/root", "/some/dummy/file.py"), ("/home/username/root/", "/some/dummy/file.py"), ("/home/username/root", "/home/username/file.py"), # some windows tests ( "c:/Program Files/", "d:/Program Files/some/dir/file.py", ), ( "c:/Program Files/other/", "c:/Program Files/some/dir/file.py", ), ) ) def test_invalid_module_and_package( self, repo_root: str, filename: str, ) -> None: with self.assertRaises(ValueError): calculate_module_and_package(repo_root, filename) LibCST-1.2.0/libcst/helpers/tests/test_paths.py000066400000000000000000000036031456464173300214250ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from pathlib import Path from tempfile import TemporaryDirectory from libcst.helpers.paths import chdir from libcst.testing.utils import UnitTest class PathsTest(UnitTest): def test_chdir(self) -> None: with TemporaryDirectory() as td: tdp = Path(td).resolve() inner = tdp / "foo" / "bar" inner.mkdir(parents=True) with self.subTest("string paths"): cwd1 = Path.cwd() with chdir(tdp.as_posix()) as path2: cwd2 = Path.cwd() self.assertEqual(tdp, cwd2) self.assertEqual(tdp, path2) with chdir(inner.as_posix()) as path3: cwd3 = Path.cwd() self.assertEqual(inner, cwd3) self.assertEqual(inner, path3) cwd4 = Path.cwd() self.assertEqual(tdp, cwd4) self.assertEqual(cwd2, cwd4) cwd5 = Path.cwd() self.assertEqual(cwd1, cwd5) with self.subTest("pathlib objects"): cwd1 = Path.cwd() with chdir(tdp) as path2: cwd2 = Path.cwd() self.assertEqual(tdp, cwd2) self.assertEqual(tdp, path2) with chdir(inner) as path3: cwd3 = Path.cwd() self.assertEqual(inner, cwd3) self.assertEqual(inner, path3) cwd4 = Path.cwd() self.assertEqual(tdp, cwd4) self.assertEqual(cwd2, cwd4) cwd5 = Path.cwd() self.assertEqual(cwd1, cwd5) LibCST-1.2.0/libcst/helpers/tests/test_template.py000066400000000000000000000270001456464173300221160ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # import os from textwrap import dedent import libcst as cst from libcst.helpers import ( parse_template_expression, parse_template_module, parse_template_statement, ) from libcst.testing.utils import UnitTest class TemplateTest(UnitTest): def dedent(self, code: str) -> str: lines = dedent(code).split(os.linesep) if not lines[0].strip(): lines = lines[1:] if not lines[-1].strip(): lines = [ *lines[:-1], os.linesep, ] return os.linesep.join(lines) def code(self, node: cst.CSTNode) -> str: return cst.Module([]).code_for_node(node) def test_simple_module(self) -> None: module = parse_template_module( self.dedent( """ from {module} import {obj} def foo() -> {obj}: return {obj}() """ ), module=cst.Name("foo"), obj=cst.Name("Bar"), ) self.assertEqual( module.code, self.dedent( """ from foo import Bar def foo() -> Bar: return Bar() """ ), ) def test_simple_statement(self) -> None: statement = parse_template_statement( "assert {test}, {msg}\n", test=cst.Name("True"), msg=cst.SimpleString('"Somehow True is no longer True..."'), ) self.assertEqual( self.code(statement), 'assert True, "Somehow True is no longer True..."\n', ) def test_simple_expression(self) -> None: expression = parse_template_expression( "{a} + {b} + {c}", a=cst.Name("one"), b=cst.Name("two"), c=cst.BinaryOperation( lpar=(cst.LeftParen(),), left=cst.Name("three"), operator=cst.Multiply(), right=cst.Name("four"), rpar=(cst.RightParen(),), ), ) self.assertEqual( self.code(expression), "one + two + (three * four)", ) def test_annotation(self) -> None: # Test that we can insert an annotation expression normally. statement = parse_template_statement( "x: {type} = {val}", type=cst.Name("int"), val=cst.Integer("5"), ) self.assertEqual( self.code(statement), "x: int = 5\n", ) # Test that we can insert an annotation node as a special case. statement = parse_template_statement( "x: {type} = {val}", type=cst.Annotation(cst.Name("int")), val=cst.Integer("5"), ) self.assertEqual( self.code(statement), "x: int = 5\n", ) def test_assign_target(self) -> None: # Test that we can insert an assignment target normally. statement = parse_template_statement( "{a} = {b} = {val}", a=cst.Name("first"), b=cst.Name("second"), val=cst.Integer("5"), ) self.assertEqual( self.code(statement), "first = second = 5\n", ) # Test that we can insert an assignment target as a special case. statement = parse_template_statement( "{a} = {b} = {val}", a=cst.AssignTarget(cst.Name("first")), b=cst.AssignTarget(cst.Name("second")), val=cst.Integer("5"), ) self.assertEqual( self.code(statement), "first = second = 5\n", ) def test_parameters(self) -> None: # Test that we can insert a parameter into a function def normally. statement = parse_template_statement( "def foo({arg}): pass", arg=cst.Name("bar"), ) self.assertEqual( self.code(statement), "def foo(bar): pass\n", ) # Test that we can insert a parameter as a special case. statement = parse_template_statement( "def foo({arg}): pass", arg=cst.Param(cst.Name("bar")), ) self.assertEqual( self.code(statement), "def foo(bar): pass\n", ) # Test that we can insert a parameters list as a special case. statement = parse_template_statement( "def foo({args}): pass", args=cst.Parameters( (cst.Param(cst.Name("bar")),), ), ) self.assertEqual( self.code(statement), "def foo(bar): pass\n", ) # Test filling out multiple parameters statement = parse_template_statement( "def foo({args}): pass", args=cst.Parameters( params=( cst.Param(cst.Name("bar")), cst.Param(cst.Name("baz")), ), star_kwarg=cst.Param(cst.Name("rest")), ), ) self.assertEqual( self.code(statement), "def foo(bar, baz, **rest): pass\n", ) def test_args(self) -> None: # Test that we can insert an argument into a function call normally. statement = parse_template_expression( "foo({arg1}, {arg2})", arg1=cst.Name("bar"), arg2=cst.Name("baz"), ) self.assertEqual( self.code(statement), "foo(bar, baz)", ) # Test that we can insert an argument as a special case. statement = parse_template_expression( "foo({arg1}, {arg2})", arg1=cst.Arg(cst.Name("bar")), arg2=cst.Arg(cst.Name("baz")), ) self.assertEqual( self.code(statement), "foo(bar, baz)", ) def test_statement(self) -> None: # Test that we can insert various types of statements into a # statement list. module = parse_template_module( "{statement1}\n{statement2}\n{statement3}\n", statement1=cst.If( test=cst.Name("foo"), body=cst.SimpleStatementSuite( (cst.Pass(),), ), ), statement2=cst.SimpleStatementLine( (cst.Expr(cst.Call(cst.Name("bar"))),), ), statement3=cst.Pass(), ) self.assertEqual( module.code, "if foo: pass\nbar()\npass\n", ) def test_suite(self) -> None: # Test that we can insert various types of statement suites into a # spot accepting a suite. module = parse_template_module( "if x is True: {suite}\n", suite=cst.SimpleStatementSuite( body=(cst.Pass(),), ), ) self.assertEqual( module.code, "if x is True: pass\n", ) module = parse_template_module( "if x is True: {suite}\n", suite=cst.IndentedBlock( body=( cst.SimpleStatementLine( (cst.Pass(),), ), ), ), ) self.assertEqual( module.code, "if x is True:\n pass\n", ) module = parse_template_module( "if x is True:\n {suite}\n", suite=cst.SimpleStatementSuite( body=(cst.Pass(),), ), ) self.assertEqual( module.code, "if x is True: pass\n", ) module = parse_template_module( "if x is True:\n {suite}\n", suite=cst.IndentedBlock( body=( cst.SimpleStatementLine( (cst.Pass(),), ), ), ), ) self.assertEqual( module.code, "if x is True:\n pass\n", ) def test_subscript(self) -> None: # Test that we can insert various subscript slices into an # acceptible spot. expression = parse_template_expression( "Optional[{type}]", type=cst.Name("int"), ) self.assertEqual( self.code(expression), "Optional[int]", ) expression = parse_template_expression( "Tuple[{type1}, {type2}]", type1=cst.Name("int"), type2=cst.Name("str"), ) self.assertEqual( self.code(expression), "Tuple[int, str]", ) expression = parse_template_expression( "Optional[{type}]", type=cst.Index(cst.Name("int")), ) self.assertEqual( self.code(expression), "Optional[int]", ) expression = parse_template_expression( "Optional[{type}]", type=cst.SubscriptElement(cst.Index(cst.Name("int"))), ) self.assertEqual( self.code(expression), "Optional[int]", ) expression = parse_template_expression( "foo[{slice}]", slice=cst.Slice(cst.Integer("5"), cst.Integer("6")), ) self.assertEqual( self.code(expression), "foo[5:6]", ) expression = parse_template_expression( "foo[{slice}]", slice=cst.SubscriptElement(cst.Slice(cst.Integer("5"), cst.Integer("6"))), ) self.assertEqual( self.code(expression), "foo[5:6]", ) expression = parse_template_expression( "foo[{slice}]", slice=cst.Slice(cst.Integer("5"), cst.Integer("6")), ) self.assertEqual( self.code(expression), "foo[5:6]", ) expression = parse_template_expression( "foo[{slice}]", slice=cst.SubscriptElement(cst.Slice(cst.Integer("5"), cst.Integer("6"))), ) self.assertEqual( self.code(expression), "foo[5:6]", ) expression = parse_template_expression( "foo[{slice1}, {slice2}]", slice1=cst.Slice(cst.Integer("5"), cst.Integer("6")), slice2=cst.Index(cst.Integer("7")), ) self.assertEqual( self.code(expression), "foo[5:6, 7]", ) expression = parse_template_expression( "foo[{slice1}, {slice2}]", slice1=cst.SubscriptElement(cst.Slice(cst.Integer("5"), cst.Integer("6"))), slice2=cst.SubscriptElement(cst.Index(cst.Integer("7"))), ) self.assertEqual( self.code(expression), "foo[5:6, 7]", ) def test_decorators(self) -> None: # Test that we can special-case decorators when needed. statement = parse_template_statement( "@{decorator}\ndef foo(): pass\n", decorator=cst.Name("bar"), ) self.assertEqual( self.code(statement), "@bar\ndef foo(): pass\n", ) statement = parse_template_statement( "@{decorator}\ndef foo(): pass\n", decorator=cst.Decorator(cst.Name("bar")), ) self.assertEqual( self.code(statement), "@bar\ndef foo(): pass\n", ) LibCST-1.2.0/libcst/matchers/000077500000000000000000000000001456464173300156755ustar00rootroot00000000000000LibCST-1.2.0/libcst/matchers/__init__.py000066400000000000000000021104521456464173300200130ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # This file was generated by libcst.codegen.gen_matcher_classes from dataclasses import dataclass from typing import Optional, Sequence, Union from typing_extensions import Literal import libcst as cst from libcst.matchers._decorators import call_if_inside, call_if_not_inside, leave, visit from libcst.matchers._matcher_base import ( AbstractBaseMatcherNodeMeta, AllOf, AtLeastN, AtMostN, BaseMatcherNode, DoesNotMatch, DoNotCare, DoNotCareSentinel, extract, extractall, findall, matches, MatchIfTrue, MatchMetadata, MatchMetadataIfTrue, MatchRegex, OneOf, replace, SaveMatchedNode, TypeOf, ZeroOrMore, ZeroOrOne, ) from libcst.matchers._visitors import ( MatchDecoratorMismatch, MatcherDecoratableTransformer, MatcherDecoratableVisitor, ) class _NodeABC(metaclass=AbstractBaseMatcherNodeMeta): __slots__ = () class BaseAssignTargetExpression(_NodeABC): pass class BaseAugOp(_NodeABC): pass class BaseBinaryOp(_NodeABC): pass class BaseBooleanOp(_NodeABC): pass class BaseComp(_NodeABC): pass class BaseCompOp(_NodeABC): pass class BaseCompoundStatement(_NodeABC): pass class BaseDelTargetExpression(_NodeABC): pass class BaseDict(_NodeABC): pass class BaseDictElement(_NodeABC): pass class BaseElement(_NodeABC): pass class BaseExpression(_NodeABC): pass class BaseFormattedStringContent(_NodeABC): pass class BaseList(_NodeABC): pass class BaseMetadataProvider(_NodeABC): pass class BaseNumber(_NodeABC): pass class BaseParenthesizableWhitespace(_NodeABC): pass class BaseSet(_NodeABC): pass class BaseSimpleComp(_NodeABC): pass class BaseSlice(_NodeABC): pass class BaseSmallStatement(_NodeABC): pass class BaseStatement(_NodeABC): pass class BaseString(_NodeABC): pass class BaseSuite(_NodeABC): pass class BaseUnaryOp(_NodeABC): pass MetadataMatchType = Union[MatchMetadata, MatchMetadataIfTrue] BaseParenthesizableWhitespaceMatchType = Union[ "BaseParenthesizableWhitespace", MetadataMatchType, MatchIfTrue[cst.BaseParenthesizableWhitespace], ] @dataclass(frozen=True, eq=False, unsafe_hash=False) class Add(BaseBinaryOp, BaseMatcherNode): whitespace_before: Union[ BaseParenthesizableWhitespaceMatchType, DoNotCareSentinel, OneOf[BaseParenthesizableWhitespaceMatchType], AllOf[BaseParenthesizableWhitespaceMatchType], ] = DoNotCare() whitespace_after: Union[ BaseParenthesizableWhitespaceMatchType, DoNotCareSentinel, OneOf[BaseParenthesizableWhitespaceMatchType], AllOf[BaseParenthesizableWhitespaceMatchType], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() @dataclass(frozen=True, eq=False, unsafe_hash=False) class AddAssign(BaseAugOp, BaseMatcherNode): whitespace_before: Union[ BaseParenthesizableWhitespaceMatchType, DoNotCareSentinel, OneOf[BaseParenthesizableWhitespaceMatchType], AllOf[BaseParenthesizableWhitespaceMatchType], ] = DoNotCare() whitespace_after: Union[ BaseParenthesizableWhitespaceMatchType, DoNotCareSentinel, OneOf[BaseParenthesizableWhitespaceMatchType], AllOf[BaseParenthesizableWhitespaceMatchType], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() @dataclass(frozen=True, eq=False, unsafe_hash=False) class And(BaseBooleanOp, BaseMatcherNode): whitespace_before: Union[ BaseParenthesizableWhitespaceMatchType, DoNotCareSentinel, OneOf[BaseParenthesizableWhitespaceMatchType], AllOf[BaseParenthesizableWhitespaceMatchType], ] = DoNotCare() whitespace_after: Union[ BaseParenthesizableWhitespaceMatchType, DoNotCareSentinel, OneOf[BaseParenthesizableWhitespaceMatchType], AllOf[BaseParenthesizableWhitespaceMatchType], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() BaseAssignTargetExpressionMatchType = Union[ "BaseAssignTargetExpression", MetadataMatchType, MatchIfTrue[cst.BaseAssignTargetExpression], ] AnnotationMatchType = Union[ "Annotation", MetadataMatchType, MatchIfTrue[cst.Annotation] ] AssignEqualMatchType = Union[ "AssignEqual", MetadataMatchType, MatchIfTrue[cst.AssignEqual] ] SemicolonMatchType = Union["Semicolon", MetadataMatchType, MatchIfTrue[cst.Semicolon]] @dataclass(frozen=True, eq=False, unsafe_hash=False) class AnnAssign(BaseSmallStatement, BaseMatcherNode): target: Union[ BaseAssignTargetExpressionMatchType, DoNotCareSentinel, OneOf[BaseAssignTargetExpressionMatchType], AllOf[BaseAssignTargetExpressionMatchType], ] = DoNotCare() annotation: Union[ AnnotationMatchType, DoNotCareSentinel, OneOf[AnnotationMatchType], AllOf[AnnotationMatchType], ] = DoNotCare() value: Union[ Optional["BaseExpression"], MetadataMatchType, MatchIfTrue[Optional[cst.BaseExpression]], DoNotCareSentinel, OneOf[ Union[ Optional["BaseExpression"], MetadataMatchType, MatchIfTrue[Optional[cst.BaseExpression]], ] ], AllOf[ Union[ Optional["BaseExpression"], MetadataMatchType, MatchIfTrue[Optional[cst.BaseExpression]], ] ], ] = DoNotCare() equal: Union[ AssignEqualMatchType, DoNotCareSentinel, OneOf[AssignEqualMatchType], AllOf[AssignEqualMatchType], ] = DoNotCare() semicolon: Union[ SemicolonMatchType, DoNotCareSentinel, OneOf[SemicolonMatchType], AllOf[SemicolonMatchType], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() BaseExpressionMatchType = Union[ "BaseExpression", MetadataMatchType, MatchIfTrue[cst.BaseExpression] ] @dataclass(frozen=True, eq=False, unsafe_hash=False) class Annotation(BaseMatcherNode): annotation: Union[ BaseExpressionMatchType, DoNotCareSentinel, OneOf[BaseExpressionMatchType], AllOf[BaseExpressionMatchType], ] = DoNotCare() whitespace_before_indicator: Union[ BaseParenthesizableWhitespaceMatchType, DoNotCareSentinel, OneOf[BaseParenthesizableWhitespaceMatchType], AllOf[BaseParenthesizableWhitespaceMatchType], ] = DoNotCare() whitespace_after_indicator: Union[ BaseParenthesizableWhitespaceMatchType, DoNotCareSentinel, OneOf[BaseParenthesizableWhitespaceMatchType], AllOf[BaseParenthesizableWhitespaceMatchType], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() CommaMatchType = Union["Comma", MetadataMatchType, MatchIfTrue[cst.Comma]] @dataclass(frozen=True, eq=False, unsafe_hash=False) class Arg(BaseMatcherNode): value: Union[ BaseExpressionMatchType, DoNotCareSentinel, OneOf[BaseExpressionMatchType], AllOf[BaseExpressionMatchType], ] = DoNotCare() keyword: Union[ Optional["Name"], MetadataMatchType, MatchIfTrue[Optional[cst.Name]], DoNotCareSentinel, OneOf[ Union[Optional["Name"], MetadataMatchType, MatchIfTrue[Optional[cst.Name]]] ], AllOf[ Union[Optional["Name"], MetadataMatchType, MatchIfTrue[Optional[cst.Name]]] ], ] = DoNotCare() equal: Union[ AssignEqualMatchType, DoNotCareSentinel, OneOf[AssignEqualMatchType], AllOf[AssignEqualMatchType], ] = DoNotCare() comma: Union[ CommaMatchType, DoNotCareSentinel, OneOf[CommaMatchType], AllOf[CommaMatchType] ] = DoNotCare() star: Union[ Literal["", "*", "**"], MetadataMatchType, MatchIfTrue[Literal["", "*", "**"]], DoNotCareSentinel, OneOf[ Union[ Literal["", "*", "**"], MetadataMatchType, MatchIfTrue[Literal["", "*", "**"]], ] ], AllOf[ Union[ Literal["", "*", "**"], MetadataMatchType, MatchIfTrue[Literal["", "*", "**"]], ] ], ] = DoNotCare() whitespace_after_star: Union[ BaseParenthesizableWhitespaceMatchType, DoNotCareSentinel, OneOf[BaseParenthesizableWhitespaceMatchType], AllOf[BaseParenthesizableWhitespaceMatchType], ] = DoNotCare() whitespace_after_arg: Union[ BaseParenthesizableWhitespaceMatchType, DoNotCareSentinel, OneOf[BaseParenthesizableWhitespaceMatchType], AllOf[BaseParenthesizableWhitespaceMatchType], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() NameOrTupleOrListMatchType = Union[ "Name", "Tuple", "List", MetadataMatchType, MatchIfTrue[Union[cst.Name, cst.Tuple, cst.List]], ] @dataclass(frozen=True, eq=False, unsafe_hash=False) class AsName(BaseMatcherNode): name: Union[ NameOrTupleOrListMatchType, DoNotCareSentinel, OneOf[NameOrTupleOrListMatchType], AllOf[NameOrTupleOrListMatchType], ] = DoNotCare() whitespace_before_as: Union[ BaseParenthesizableWhitespaceMatchType, DoNotCareSentinel, OneOf[BaseParenthesizableWhitespaceMatchType], AllOf[BaseParenthesizableWhitespaceMatchType], ] = DoNotCare() whitespace_after_as: Union[ BaseParenthesizableWhitespaceMatchType, DoNotCareSentinel, OneOf[BaseParenthesizableWhitespaceMatchType], AllOf[BaseParenthesizableWhitespaceMatchType], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() SimpleWhitespaceMatchType = Union[ "SimpleWhitespace", MetadataMatchType, MatchIfTrue[cst.SimpleWhitespace] ] @dataclass(frozen=True, eq=False, unsafe_hash=False) class Assert(BaseSmallStatement, BaseMatcherNode): test: Union[ BaseExpressionMatchType, DoNotCareSentinel, OneOf[BaseExpressionMatchType], AllOf[BaseExpressionMatchType], ] = DoNotCare() msg: Union[ Optional["BaseExpression"], MetadataMatchType, MatchIfTrue[Optional[cst.BaseExpression]], DoNotCareSentinel, OneOf[ Union[ Optional["BaseExpression"], MetadataMatchType, MatchIfTrue[Optional[cst.BaseExpression]], ] ], AllOf[ Union[ Optional["BaseExpression"], MetadataMatchType, MatchIfTrue[Optional[cst.BaseExpression]], ] ], ] = DoNotCare() comma: Union[ CommaMatchType, DoNotCareSentinel, OneOf[CommaMatchType], AllOf[CommaMatchType] ] = DoNotCare() whitespace_after_assert: Union[ SimpleWhitespaceMatchType, DoNotCareSentinel, OneOf[SimpleWhitespaceMatchType], AllOf[SimpleWhitespaceMatchType], ] = DoNotCare() semicolon: Union[ SemicolonMatchType, DoNotCareSentinel, OneOf[SemicolonMatchType], AllOf[SemicolonMatchType], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() AssignTargetMatchType = Union[ "AssignTarget", MetadataMatchType, MatchIfTrue[cst.AssignTarget] ] @dataclass(frozen=True, eq=False, unsafe_hash=False) class Assign(BaseSmallStatement, BaseMatcherNode): targets: Union[ Sequence[ Union[ AssignTargetMatchType, DoNotCareSentinel, OneOf[AssignTargetMatchType], AllOf[AssignTargetMatchType], AtLeastN[ Union[ AssignTargetMatchType, DoNotCareSentinel, OneOf[AssignTargetMatchType], AllOf[AssignTargetMatchType], ] ], AtMostN[ Union[ AssignTargetMatchType, DoNotCareSentinel, OneOf[AssignTargetMatchType], AllOf[AssignTargetMatchType], ] ], ] ], DoNotCareSentinel, MatchIfTrue[Sequence[cst.AssignTarget]], OneOf[ Union[ Sequence[ Union[ AssignTargetMatchType, OneOf[AssignTargetMatchType], AllOf[AssignTargetMatchType], AtLeastN[ Union[ AssignTargetMatchType, OneOf[AssignTargetMatchType], AllOf[AssignTargetMatchType], ] ], AtMostN[ Union[ AssignTargetMatchType, OneOf[AssignTargetMatchType], AllOf[AssignTargetMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.AssignTarget]], ] ], AllOf[ Union[ Sequence[ Union[ AssignTargetMatchType, OneOf[AssignTargetMatchType], AllOf[AssignTargetMatchType], AtLeastN[ Union[ AssignTargetMatchType, OneOf[AssignTargetMatchType], AllOf[AssignTargetMatchType], ] ], AtMostN[ Union[ AssignTargetMatchType, OneOf[AssignTargetMatchType], AllOf[AssignTargetMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.AssignTarget]], ] ], ] = DoNotCare() value: Union[ BaseExpressionMatchType, DoNotCareSentinel, OneOf[BaseExpressionMatchType], AllOf[BaseExpressionMatchType], ] = DoNotCare() semicolon: Union[ SemicolonMatchType, DoNotCareSentinel, OneOf[SemicolonMatchType], AllOf[SemicolonMatchType], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() @dataclass(frozen=True, eq=False, unsafe_hash=False) class AssignEqual(BaseMatcherNode): whitespace_before: Union[ BaseParenthesizableWhitespaceMatchType, DoNotCareSentinel, OneOf[BaseParenthesizableWhitespaceMatchType], AllOf[BaseParenthesizableWhitespaceMatchType], ] = DoNotCare() whitespace_after: Union[ BaseParenthesizableWhitespaceMatchType, DoNotCareSentinel, OneOf[BaseParenthesizableWhitespaceMatchType], AllOf[BaseParenthesizableWhitespaceMatchType], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() @dataclass(frozen=True, eq=False, unsafe_hash=False) class AssignTarget(BaseMatcherNode): target: Union[ BaseAssignTargetExpressionMatchType, DoNotCareSentinel, OneOf[BaseAssignTargetExpressionMatchType], AllOf[BaseAssignTargetExpressionMatchType], ] = DoNotCare() whitespace_before_equal: Union[ SimpleWhitespaceMatchType, DoNotCareSentinel, OneOf[SimpleWhitespaceMatchType], AllOf[SimpleWhitespaceMatchType], ] = DoNotCare() whitespace_after_equal: Union[ SimpleWhitespaceMatchType, DoNotCareSentinel, OneOf[SimpleWhitespaceMatchType], AllOf[SimpleWhitespaceMatchType], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() @dataclass(frozen=True, eq=False, unsafe_hash=False) class Asynchronous(BaseMatcherNode): whitespace_after: Union[ SimpleWhitespaceMatchType, DoNotCareSentinel, OneOf[SimpleWhitespaceMatchType], AllOf[SimpleWhitespaceMatchType], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() NameMatchType = Union["Name", MetadataMatchType, MatchIfTrue[cst.Name]] DotMatchType = Union["Dot", MetadataMatchType, MatchIfTrue[cst.Dot]] LeftParenMatchType = Union["LeftParen", MetadataMatchType, MatchIfTrue[cst.LeftParen]] RightParenMatchType = Union[ "RightParen", MetadataMatchType, MatchIfTrue[cst.RightParen] ] @dataclass(frozen=True, eq=False, unsafe_hash=False) class Attribute( BaseAssignTargetExpression, BaseDelTargetExpression, BaseExpression, BaseMatcherNode ): value: Union[ BaseExpressionMatchType, DoNotCareSentinel, OneOf[BaseExpressionMatchType], AllOf[BaseExpressionMatchType], ] = DoNotCare() attr: Union[ NameMatchType, DoNotCareSentinel, OneOf[NameMatchType], AllOf[NameMatchType] ] = DoNotCare() dot: Union[ DotMatchType, DoNotCareSentinel, OneOf[DotMatchType], AllOf[DotMatchType] ] = DoNotCare() lpar: Union[ Sequence[ Union[ LeftParenMatchType, DoNotCareSentinel, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], AtLeastN[ Union[ LeftParenMatchType, DoNotCareSentinel, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], AtMostN[ Union[ LeftParenMatchType, DoNotCareSentinel, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], ] ], DoNotCareSentinel, MatchIfTrue[Sequence[cst.LeftParen]], OneOf[ Union[ Sequence[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], AtLeastN[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], AtMostN[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.LeftParen]], ] ], AllOf[ Union[ Sequence[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], AtLeastN[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], AtMostN[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.LeftParen]], ] ], ] = DoNotCare() rpar: Union[ Sequence[ Union[ RightParenMatchType, DoNotCareSentinel, OneOf[RightParenMatchType], AllOf[RightParenMatchType], AtLeastN[ Union[ RightParenMatchType, DoNotCareSentinel, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], AtMostN[ Union[ RightParenMatchType, DoNotCareSentinel, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], ] ], DoNotCareSentinel, MatchIfTrue[Sequence[cst.RightParen]], OneOf[ Union[ Sequence[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], AtLeastN[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], AtMostN[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.RightParen]], ] ], AllOf[ Union[ Sequence[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], AtLeastN[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], AtMostN[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.RightParen]], ] ], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() BaseAugOpMatchType = Union["BaseAugOp", MetadataMatchType, MatchIfTrue[cst.BaseAugOp]] @dataclass(frozen=True, eq=False, unsafe_hash=False) class AugAssign(BaseSmallStatement, BaseMatcherNode): target: Union[ BaseAssignTargetExpressionMatchType, DoNotCareSentinel, OneOf[BaseAssignTargetExpressionMatchType], AllOf[BaseAssignTargetExpressionMatchType], ] = DoNotCare() operator: Union[ BaseAugOpMatchType, DoNotCareSentinel, OneOf[BaseAugOpMatchType], AllOf[BaseAugOpMatchType], ] = DoNotCare() value: Union[ BaseExpressionMatchType, DoNotCareSentinel, OneOf[BaseExpressionMatchType], AllOf[BaseExpressionMatchType], ] = DoNotCare() semicolon: Union[ SemicolonMatchType, DoNotCareSentinel, OneOf[SemicolonMatchType], AllOf[SemicolonMatchType], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() @dataclass(frozen=True, eq=False, unsafe_hash=False) class Await(BaseExpression, BaseMatcherNode): expression: Union[ BaseExpressionMatchType, DoNotCareSentinel, OneOf[BaseExpressionMatchType], AllOf[BaseExpressionMatchType], ] = DoNotCare() lpar: Union[ Sequence[ Union[ LeftParenMatchType, DoNotCareSentinel, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], AtLeastN[ Union[ LeftParenMatchType, DoNotCareSentinel, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], AtMostN[ Union[ LeftParenMatchType, DoNotCareSentinel, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], ] ], DoNotCareSentinel, MatchIfTrue[Sequence[cst.LeftParen]], OneOf[ Union[ Sequence[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], AtLeastN[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], AtMostN[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.LeftParen]], ] ], AllOf[ Union[ Sequence[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], AtLeastN[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], AtMostN[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.LeftParen]], ] ], ] = DoNotCare() rpar: Union[ Sequence[ Union[ RightParenMatchType, DoNotCareSentinel, OneOf[RightParenMatchType], AllOf[RightParenMatchType], AtLeastN[ Union[ RightParenMatchType, DoNotCareSentinel, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], AtMostN[ Union[ RightParenMatchType, DoNotCareSentinel, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], ] ], DoNotCareSentinel, MatchIfTrue[Sequence[cst.RightParen]], OneOf[ Union[ Sequence[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], AtLeastN[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], AtMostN[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.RightParen]], ] ], AllOf[ Union[ Sequence[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], AtLeastN[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], AtMostN[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.RightParen]], ] ], ] = DoNotCare() whitespace_after_await: Union[ BaseParenthesizableWhitespaceMatchType, DoNotCareSentinel, OneOf[BaseParenthesizableWhitespaceMatchType], AllOf[BaseParenthesizableWhitespaceMatchType], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() BaseBinaryOpMatchType = Union[ "BaseBinaryOp", MetadataMatchType, MatchIfTrue[cst.BaseBinaryOp] ] @dataclass(frozen=True, eq=False, unsafe_hash=False) class BinaryOperation(BaseExpression, BaseMatcherNode): left: Union[ BaseExpressionMatchType, DoNotCareSentinel, OneOf[BaseExpressionMatchType], AllOf[BaseExpressionMatchType], ] = DoNotCare() operator: Union[ BaseBinaryOpMatchType, DoNotCareSentinel, OneOf[BaseBinaryOpMatchType], AllOf[BaseBinaryOpMatchType], ] = DoNotCare() right: Union[ BaseExpressionMatchType, DoNotCareSentinel, OneOf[BaseExpressionMatchType], AllOf[BaseExpressionMatchType], ] = DoNotCare() lpar: Union[ Sequence[ Union[ LeftParenMatchType, DoNotCareSentinel, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], AtLeastN[ Union[ LeftParenMatchType, DoNotCareSentinel, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], AtMostN[ Union[ LeftParenMatchType, DoNotCareSentinel, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], ] ], DoNotCareSentinel, MatchIfTrue[Sequence[cst.LeftParen]], OneOf[ Union[ Sequence[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], AtLeastN[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], AtMostN[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.LeftParen]], ] ], AllOf[ Union[ Sequence[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], AtLeastN[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], AtMostN[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.LeftParen]], ] ], ] = DoNotCare() rpar: Union[ Sequence[ Union[ RightParenMatchType, DoNotCareSentinel, OneOf[RightParenMatchType], AllOf[RightParenMatchType], AtLeastN[ Union[ RightParenMatchType, DoNotCareSentinel, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], AtMostN[ Union[ RightParenMatchType, DoNotCareSentinel, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], ] ], DoNotCareSentinel, MatchIfTrue[Sequence[cst.RightParen]], OneOf[ Union[ Sequence[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], AtLeastN[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], AtMostN[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.RightParen]], ] ], AllOf[ Union[ Sequence[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], AtLeastN[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], AtMostN[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.RightParen]], ] ], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() @dataclass(frozen=True, eq=False, unsafe_hash=False) class BitAnd(BaseBinaryOp, BaseMatcherNode): whitespace_before: Union[ BaseParenthesizableWhitespaceMatchType, DoNotCareSentinel, OneOf[BaseParenthesizableWhitespaceMatchType], AllOf[BaseParenthesizableWhitespaceMatchType], ] = DoNotCare() whitespace_after: Union[ BaseParenthesizableWhitespaceMatchType, DoNotCareSentinel, OneOf[BaseParenthesizableWhitespaceMatchType], AllOf[BaseParenthesizableWhitespaceMatchType], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() @dataclass(frozen=True, eq=False, unsafe_hash=False) class BitAndAssign(BaseAugOp, BaseMatcherNode): whitespace_before: Union[ BaseParenthesizableWhitespaceMatchType, DoNotCareSentinel, OneOf[BaseParenthesizableWhitespaceMatchType], AllOf[BaseParenthesizableWhitespaceMatchType], ] = DoNotCare() whitespace_after: Union[ BaseParenthesizableWhitespaceMatchType, DoNotCareSentinel, OneOf[BaseParenthesizableWhitespaceMatchType], AllOf[BaseParenthesizableWhitespaceMatchType], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() @dataclass(frozen=True, eq=False, unsafe_hash=False) class BitInvert(BaseUnaryOp, BaseMatcherNode): whitespace_after: Union[ BaseParenthesizableWhitespaceMatchType, DoNotCareSentinel, OneOf[BaseParenthesizableWhitespaceMatchType], AllOf[BaseParenthesizableWhitespaceMatchType], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() @dataclass(frozen=True, eq=False, unsafe_hash=False) class BitOr(BaseBinaryOp, BaseMatcherNode): whitespace_before: Union[ BaseParenthesizableWhitespaceMatchType, DoNotCareSentinel, OneOf[BaseParenthesizableWhitespaceMatchType], AllOf[BaseParenthesizableWhitespaceMatchType], ] = DoNotCare() whitespace_after: Union[ BaseParenthesizableWhitespaceMatchType, DoNotCareSentinel, OneOf[BaseParenthesizableWhitespaceMatchType], AllOf[BaseParenthesizableWhitespaceMatchType], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() @dataclass(frozen=True, eq=False, unsafe_hash=False) class BitOrAssign(BaseAugOp, BaseMatcherNode): whitespace_before: Union[ BaseParenthesizableWhitespaceMatchType, DoNotCareSentinel, OneOf[BaseParenthesizableWhitespaceMatchType], AllOf[BaseParenthesizableWhitespaceMatchType], ] = DoNotCare() whitespace_after: Union[ BaseParenthesizableWhitespaceMatchType, DoNotCareSentinel, OneOf[BaseParenthesizableWhitespaceMatchType], AllOf[BaseParenthesizableWhitespaceMatchType], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() @dataclass(frozen=True, eq=False, unsafe_hash=False) class BitXor(BaseBinaryOp, BaseMatcherNode): whitespace_before: Union[ BaseParenthesizableWhitespaceMatchType, DoNotCareSentinel, OneOf[BaseParenthesizableWhitespaceMatchType], AllOf[BaseParenthesizableWhitespaceMatchType], ] = DoNotCare() whitespace_after: Union[ BaseParenthesizableWhitespaceMatchType, DoNotCareSentinel, OneOf[BaseParenthesizableWhitespaceMatchType], AllOf[BaseParenthesizableWhitespaceMatchType], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() @dataclass(frozen=True, eq=False, unsafe_hash=False) class BitXorAssign(BaseAugOp, BaseMatcherNode): whitespace_before: Union[ BaseParenthesizableWhitespaceMatchType, DoNotCareSentinel, OneOf[BaseParenthesizableWhitespaceMatchType], AllOf[BaseParenthesizableWhitespaceMatchType], ] = DoNotCare() whitespace_after: Union[ BaseParenthesizableWhitespaceMatchType, DoNotCareSentinel, OneOf[BaseParenthesizableWhitespaceMatchType], AllOf[BaseParenthesizableWhitespaceMatchType], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() BaseBooleanOpMatchType = Union[ "BaseBooleanOp", MetadataMatchType, MatchIfTrue[cst.BaseBooleanOp] ] @dataclass(frozen=True, eq=False, unsafe_hash=False) class BooleanOperation(BaseExpression, BaseMatcherNode): left: Union[ BaseExpressionMatchType, DoNotCareSentinel, OneOf[BaseExpressionMatchType], AllOf[BaseExpressionMatchType], ] = DoNotCare() operator: Union[ BaseBooleanOpMatchType, DoNotCareSentinel, OneOf[BaseBooleanOpMatchType], AllOf[BaseBooleanOpMatchType], ] = DoNotCare() right: Union[ BaseExpressionMatchType, DoNotCareSentinel, OneOf[BaseExpressionMatchType], AllOf[BaseExpressionMatchType], ] = DoNotCare() lpar: Union[ Sequence[ Union[ LeftParenMatchType, DoNotCareSentinel, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], AtLeastN[ Union[ LeftParenMatchType, DoNotCareSentinel, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], AtMostN[ Union[ LeftParenMatchType, DoNotCareSentinel, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], ] ], DoNotCareSentinel, MatchIfTrue[Sequence[cst.LeftParen]], OneOf[ Union[ Sequence[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], AtLeastN[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], AtMostN[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.LeftParen]], ] ], AllOf[ Union[ Sequence[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], AtLeastN[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], AtMostN[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.LeftParen]], ] ], ] = DoNotCare() rpar: Union[ Sequence[ Union[ RightParenMatchType, DoNotCareSentinel, OneOf[RightParenMatchType], AllOf[RightParenMatchType], AtLeastN[ Union[ RightParenMatchType, DoNotCareSentinel, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], AtMostN[ Union[ RightParenMatchType, DoNotCareSentinel, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], ] ], DoNotCareSentinel, MatchIfTrue[Sequence[cst.RightParen]], OneOf[ Union[ Sequence[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], AtLeastN[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], AtMostN[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.RightParen]], ] ], AllOf[ Union[ Sequence[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], AtLeastN[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], AtMostN[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.RightParen]], ] ], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() @dataclass(frozen=True, eq=False, unsafe_hash=False) class Break(BaseSmallStatement, BaseMatcherNode): semicolon: Union[ SemicolonMatchType, DoNotCareSentinel, OneOf[SemicolonMatchType], AllOf[SemicolonMatchType], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() ArgMatchType = Union["Arg", MetadataMatchType, MatchIfTrue[cst.Arg]] @dataclass(frozen=True, eq=False, unsafe_hash=False) class Call(BaseExpression, BaseMatcherNode): func: Union[ BaseExpressionMatchType, DoNotCareSentinel, OneOf[BaseExpressionMatchType], AllOf[BaseExpressionMatchType], ] = DoNotCare() args: Union[ Sequence[ Union[ ArgMatchType, DoNotCareSentinel, OneOf[ArgMatchType], AllOf[ArgMatchType], AtLeastN[ Union[ ArgMatchType, DoNotCareSentinel, OneOf[ArgMatchType], AllOf[ArgMatchType], ] ], AtMostN[ Union[ ArgMatchType, DoNotCareSentinel, OneOf[ArgMatchType], AllOf[ArgMatchType], ] ], ] ], DoNotCareSentinel, MatchIfTrue[Sequence[cst.Arg]], OneOf[ Union[ Sequence[ Union[ ArgMatchType, OneOf[ArgMatchType], AllOf[ArgMatchType], AtLeastN[ Union[ ArgMatchType, OneOf[ArgMatchType], AllOf[ArgMatchType] ] ], AtMostN[ Union[ ArgMatchType, OneOf[ArgMatchType], AllOf[ArgMatchType] ] ], ] ], MatchIfTrue[Sequence[cst.Arg]], ] ], AllOf[ Union[ Sequence[ Union[ ArgMatchType, OneOf[ArgMatchType], AllOf[ArgMatchType], AtLeastN[ Union[ ArgMatchType, OneOf[ArgMatchType], AllOf[ArgMatchType] ] ], AtMostN[ Union[ ArgMatchType, OneOf[ArgMatchType], AllOf[ArgMatchType] ] ], ] ], MatchIfTrue[Sequence[cst.Arg]], ] ], ] = DoNotCare() lpar: Union[ Sequence[ Union[ LeftParenMatchType, DoNotCareSentinel, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], AtLeastN[ Union[ LeftParenMatchType, DoNotCareSentinel, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], AtMostN[ Union[ LeftParenMatchType, DoNotCareSentinel, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], ] ], DoNotCareSentinel, MatchIfTrue[Sequence[cst.LeftParen]], OneOf[ Union[ Sequence[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], AtLeastN[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], AtMostN[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.LeftParen]], ] ], AllOf[ Union[ Sequence[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], AtLeastN[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], AtMostN[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.LeftParen]], ] ], ] = DoNotCare() rpar: Union[ Sequence[ Union[ RightParenMatchType, DoNotCareSentinel, OneOf[RightParenMatchType], AllOf[RightParenMatchType], AtLeastN[ Union[ RightParenMatchType, DoNotCareSentinel, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], AtMostN[ Union[ RightParenMatchType, DoNotCareSentinel, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], ] ], DoNotCareSentinel, MatchIfTrue[Sequence[cst.RightParen]], OneOf[ Union[ Sequence[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], AtLeastN[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], AtMostN[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.RightParen]], ] ], AllOf[ Union[ Sequence[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], AtLeastN[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], AtMostN[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.RightParen]], ] ], ] = DoNotCare() whitespace_after_func: Union[ BaseParenthesizableWhitespaceMatchType, DoNotCareSentinel, OneOf[BaseParenthesizableWhitespaceMatchType], AllOf[BaseParenthesizableWhitespaceMatchType], ] = DoNotCare() whitespace_before_args: Union[ BaseParenthesizableWhitespaceMatchType, DoNotCareSentinel, OneOf[BaseParenthesizableWhitespaceMatchType], AllOf[BaseParenthesizableWhitespaceMatchType], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() BaseSuiteMatchType = Union["BaseSuite", MetadataMatchType, MatchIfTrue[cst.BaseSuite]] DecoratorMatchType = Union["Decorator", MetadataMatchType, MatchIfTrue[cst.Decorator]] EmptyLineMatchType = Union["EmptyLine", MetadataMatchType, MatchIfTrue[cst.EmptyLine]] @dataclass(frozen=True, eq=False, unsafe_hash=False) class ClassDef(BaseCompoundStatement, BaseStatement, BaseMatcherNode): name: Union[ NameMatchType, DoNotCareSentinel, OneOf[NameMatchType], AllOf[NameMatchType] ] = DoNotCare() body: Union[ BaseSuiteMatchType, DoNotCareSentinel, OneOf[BaseSuiteMatchType], AllOf[BaseSuiteMatchType], ] = DoNotCare() bases: Union[ Sequence[ Union[ ArgMatchType, DoNotCareSentinel, OneOf[ArgMatchType], AllOf[ArgMatchType], AtLeastN[ Union[ ArgMatchType, DoNotCareSentinel, OneOf[ArgMatchType], AllOf[ArgMatchType], ] ], AtMostN[ Union[ ArgMatchType, DoNotCareSentinel, OneOf[ArgMatchType], AllOf[ArgMatchType], ] ], ] ], DoNotCareSentinel, MatchIfTrue[Sequence[cst.Arg]], OneOf[ Union[ Sequence[ Union[ ArgMatchType, OneOf[ArgMatchType], AllOf[ArgMatchType], AtLeastN[ Union[ ArgMatchType, OneOf[ArgMatchType], AllOf[ArgMatchType] ] ], AtMostN[ Union[ ArgMatchType, OneOf[ArgMatchType], AllOf[ArgMatchType] ] ], ] ], MatchIfTrue[Sequence[cst.Arg]], ] ], AllOf[ Union[ Sequence[ Union[ ArgMatchType, OneOf[ArgMatchType], AllOf[ArgMatchType], AtLeastN[ Union[ ArgMatchType, OneOf[ArgMatchType], AllOf[ArgMatchType] ] ], AtMostN[ Union[ ArgMatchType, OneOf[ArgMatchType], AllOf[ArgMatchType] ] ], ] ], MatchIfTrue[Sequence[cst.Arg]], ] ], ] = DoNotCare() keywords: Union[ Sequence[ Union[ ArgMatchType, DoNotCareSentinel, OneOf[ArgMatchType], AllOf[ArgMatchType], AtLeastN[ Union[ ArgMatchType, DoNotCareSentinel, OneOf[ArgMatchType], AllOf[ArgMatchType], ] ], AtMostN[ Union[ ArgMatchType, DoNotCareSentinel, OneOf[ArgMatchType], AllOf[ArgMatchType], ] ], ] ], DoNotCareSentinel, MatchIfTrue[Sequence[cst.Arg]], OneOf[ Union[ Sequence[ Union[ ArgMatchType, OneOf[ArgMatchType], AllOf[ArgMatchType], AtLeastN[ Union[ ArgMatchType, OneOf[ArgMatchType], AllOf[ArgMatchType] ] ], AtMostN[ Union[ ArgMatchType, OneOf[ArgMatchType], AllOf[ArgMatchType] ] ], ] ], MatchIfTrue[Sequence[cst.Arg]], ] ], AllOf[ Union[ Sequence[ Union[ ArgMatchType, OneOf[ArgMatchType], AllOf[ArgMatchType], AtLeastN[ Union[ ArgMatchType, OneOf[ArgMatchType], AllOf[ArgMatchType] ] ], AtMostN[ Union[ ArgMatchType, OneOf[ArgMatchType], AllOf[ArgMatchType] ] ], ] ], MatchIfTrue[Sequence[cst.Arg]], ] ], ] = DoNotCare() decorators: Union[ Sequence[ Union[ DecoratorMatchType, DoNotCareSentinel, OneOf[DecoratorMatchType], AllOf[DecoratorMatchType], AtLeastN[ Union[ DecoratorMatchType, DoNotCareSentinel, OneOf[DecoratorMatchType], AllOf[DecoratorMatchType], ] ], AtMostN[ Union[ DecoratorMatchType, DoNotCareSentinel, OneOf[DecoratorMatchType], AllOf[DecoratorMatchType], ] ], ] ], DoNotCareSentinel, MatchIfTrue[Sequence[cst.Decorator]], OneOf[ Union[ Sequence[ Union[ DecoratorMatchType, OneOf[DecoratorMatchType], AllOf[DecoratorMatchType], AtLeastN[ Union[ DecoratorMatchType, OneOf[DecoratorMatchType], AllOf[DecoratorMatchType], ] ], AtMostN[ Union[ DecoratorMatchType, OneOf[DecoratorMatchType], AllOf[DecoratorMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.Decorator]], ] ], AllOf[ Union[ Sequence[ Union[ DecoratorMatchType, OneOf[DecoratorMatchType], AllOf[DecoratorMatchType], AtLeastN[ Union[ DecoratorMatchType, OneOf[DecoratorMatchType], AllOf[DecoratorMatchType], ] ], AtMostN[ Union[ DecoratorMatchType, OneOf[DecoratorMatchType], AllOf[DecoratorMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.Decorator]], ] ], ] = DoNotCare() lpar: Union[ LeftParenMatchType, DoNotCareSentinel, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] = DoNotCare() rpar: Union[ RightParenMatchType, DoNotCareSentinel, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] = DoNotCare() leading_lines: Union[ Sequence[ Union[ EmptyLineMatchType, DoNotCareSentinel, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], AtLeastN[ Union[ EmptyLineMatchType, DoNotCareSentinel, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], ] ], AtMostN[ Union[ EmptyLineMatchType, DoNotCareSentinel, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], ] ], ] ], DoNotCareSentinel, MatchIfTrue[Sequence[cst.EmptyLine]], OneOf[ Union[ Sequence[ Union[ EmptyLineMatchType, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], AtLeastN[ Union[ EmptyLineMatchType, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], ] ], AtMostN[ Union[ EmptyLineMatchType, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.EmptyLine]], ] ], AllOf[ Union[ Sequence[ Union[ EmptyLineMatchType, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], AtLeastN[ Union[ EmptyLineMatchType, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], ] ], AtMostN[ Union[ EmptyLineMatchType, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.EmptyLine]], ] ], ] = DoNotCare() lines_after_decorators: Union[ Sequence[ Union[ EmptyLineMatchType, DoNotCareSentinel, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], AtLeastN[ Union[ EmptyLineMatchType, DoNotCareSentinel, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], ] ], AtMostN[ Union[ EmptyLineMatchType, DoNotCareSentinel, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], ] ], ] ], DoNotCareSentinel, MatchIfTrue[Sequence[cst.EmptyLine]], OneOf[ Union[ Sequence[ Union[ EmptyLineMatchType, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], AtLeastN[ Union[ EmptyLineMatchType, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], ] ], AtMostN[ Union[ EmptyLineMatchType, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.EmptyLine]], ] ], AllOf[ Union[ Sequence[ Union[ EmptyLineMatchType, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], AtLeastN[ Union[ EmptyLineMatchType, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], ] ], AtMostN[ Union[ EmptyLineMatchType, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.EmptyLine]], ] ], ] = DoNotCare() whitespace_after_class: Union[ SimpleWhitespaceMatchType, DoNotCareSentinel, OneOf[SimpleWhitespaceMatchType], AllOf[SimpleWhitespaceMatchType], ] = DoNotCare() whitespace_after_name: Union[ SimpleWhitespaceMatchType, DoNotCareSentinel, OneOf[SimpleWhitespaceMatchType], AllOf[SimpleWhitespaceMatchType], ] = DoNotCare() whitespace_before_colon: Union[ SimpleWhitespaceMatchType, DoNotCareSentinel, OneOf[SimpleWhitespaceMatchType], AllOf[SimpleWhitespaceMatchType], ] = DoNotCare() type_parameters: Union[ Optional["TypeParameters"], MetadataMatchType, MatchIfTrue[Optional[cst.TypeParameters]], DoNotCareSentinel, OneOf[ Union[ Optional["TypeParameters"], MetadataMatchType, MatchIfTrue[Optional[cst.TypeParameters]], ] ], AllOf[ Union[ Optional["TypeParameters"], MetadataMatchType, MatchIfTrue[Optional[cst.TypeParameters]], ] ], ] = DoNotCare() whitespace_after_type_parameters: Union[ SimpleWhitespaceMatchType, DoNotCareSentinel, OneOf[SimpleWhitespaceMatchType], AllOf[SimpleWhitespaceMatchType], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() @dataclass(frozen=True, eq=False, unsafe_hash=False) class Colon(BaseMatcherNode): whitespace_before: Union[ BaseParenthesizableWhitespaceMatchType, DoNotCareSentinel, OneOf[BaseParenthesizableWhitespaceMatchType], AllOf[BaseParenthesizableWhitespaceMatchType], ] = DoNotCare() whitespace_after: Union[ BaseParenthesizableWhitespaceMatchType, DoNotCareSentinel, OneOf[BaseParenthesizableWhitespaceMatchType], AllOf[BaseParenthesizableWhitespaceMatchType], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() @dataclass(frozen=True, eq=False, unsafe_hash=False) class Comma(BaseMatcherNode): whitespace_before: Union[ BaseParenthesizableWhitespaceMatchType, DoNotCareSentinel, OneOf[BaseParenthesizableWhitespaceMatchType], AllOf[BaseParenthesizableWhitespaceMatchType], ] = DoNotCare() whitespace_after: Union[ BaseParenthesizableWhitespaceMatchType, DoNotCareSentinel, OneOf[BaseParenthesizableWhitespaceMatchType], AllOf[BaseParenthesizableWhitespaceMatchType], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() strMatchType = Union[str, MetadataMatchType, MatchIfTrue[str]] @dataclass(frozen=True, eq=False, unsafe_hash=False) class Comment(BaseMatcherNode): value: Union[ strMatchType, DoNotCareSentinel, OneOf[strMatchType], AllOf[strMatchType] ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() CompIfMatchType = Union["CompIf", MetadataMatchType, MatchIfTrue[cst.CompIf]] @dataclass(frozen=True, eq=False, unsafe_hash=False) class CompFor(BaseMatcherNode): target: Union[ BaseAssignTargetExpressionMatchType, DoNotCareSentinel, OneOf[BaseAssignTargetExpressionMatchType], AllOf[BaseAssignTargetExpressionMatchType], ] = DoNotCare() iter: Union[ BaseExpressionMatchType, DoNotCareSentinel, OneOf[BaseExpressionMatchType], AllOf[BaseExpressionMatchType], ] = DoNotCare() ifs: Union[ Sequence[ Union[ CompIfMatchType, DoNotCareSentinel, OneOf[CompIfMatchType], AllOf[CompIfMatchType], AtLeastN[ Union[ CompIfMatchType, DoNotCareSentinel, OneOf[CompIfMatchType], AllOf[CompIfMatchType], ] ], AtMostN[ Union[ CompIfMatchType, DoNotCareSentinel, OneOf[CompIfMatchType], AllOf[CompIfMatchType], ] ], ] ], DoNotCareSentinel, MatchIfTrue[Sequence[cst.CompIf]], OneOf[ Union[ Sequence[ Union[ CompIfMatchType, OneOf[CompIfMatchType], AllOf[CompIfMatchType], AtLeastN[ Union[ CompIfMatchType, OneOf[CompIfMatchType], AllOf[CompIfMatchType], ] ], AtMostN[ Union[ CompIfMatchType, OneOf[CompIfMatchType], AllOf[CompIfMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.CompIf]], ] ], AllOf[ Union[ Sequence[ Union[ CompIfMatchType, OneOf[CompIfMatchType], AllOf[CompIfMatchType], AtLeastN[ Union[ CompIfMatchType, OneOf[CompIfMatchType], AllOf[CompIfMatchType], ] ], AtMostN[ Union[ CompIfMatchType, OneOf[CompIfMatchType], AllOf[CompIfMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.CompIf]], ] ], ] = DoNotCare() inner_for_in: Union[ Optional["CompFor"], MetadataMatchType, MatchIfTrue[Optional[cst.CompFor]], DoNotCareSentinel, OneOf[ Union[ Optional["CompFor"], MetadataMatchType, MatchIfTrue[Optional[cst.CompFor]], ] ], AllOf[ Union[ Optional["CompFor"], MetadataMatchType, MatchIfTrue[Optional[cst.CompFor]], ] ], ] = DoNotCare() asynchronous: Union[ Optional["Asynchronous"], MetadataMatchType, MatchIfTrue[Optional[cst.Asynchronous]], DoNotCareSentinel, OneOf[ Union[ Optional["Asynchronous"], MetadataMatchType, MatchIfTrue[Optional[cst.Asynchronous]], ] ], AllOf[ Union[ Optional["Asynchronous"], MetadataMatchType, MatchIfTrue[Optional[cst.Asynchronous]], ] ], ] = DoNotCare() whitespace_before: Union[ BaseParenthesizableWhitespaceMatchType, DoNotCareSentinel, OneOf[BaseParenthesizableWhitespaceMatchType], AllOf[BaseParenthesizableWhitespaceMatchType], ] = DoNotCare() whitespace_after_for: Union[ BaseParenthesizableWhitespaceMatchType, DoNotCareSentinel, OneOf[BaseParenthesizableWhitespaceMatchType], AllOf[BaseParenthesizableWhitespaceMatchType], ] = DoNotCare() whitespace_before_in: Union[ BaseParenthesizableWhitespaceMatchType, DoNotCareSentinel, OneOf[BaseParenthesizableWhitespaceMatchType], AllOf[BaseParenthesizableWhitespaceMatchType], ] = DoNotCare() whitespace_after_in: Union[ BaseParenthesizableWhitespaceMatchType, DoNotCareSentinel, OneOf[BaseParenthesizableWhitespaceMatchType], AllOf[BaseParenthesizableWhitespaceMatchType], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() @dataclass(frozen=True, eq=False, unsafe_hash=False) class CompIf(BaseMatcherNode): test: Union[ BaseExpressionMatchType, DoNotCareSentinel, OneOf[BaseExpressionMatchType], AllOf[BaseExpressionMatchType], ] = DoNotCare() whitespace_before: Union[ BaseParenthesizableWhitespaceMatchType, DoNotCareSentinel, OneOf[BaseParenthesizableWhitespaceMatchType], AllOf[BaseParenthesizableWhitespaceMatchType], ] = DoNotCare() whitespace_before_test: Union[ BaseParenthesizableWhitespaceMatchType, DoNotCareSentinel, OneOf[BaseParenthesizableWhitespaceMatchType], AllOf[BaseParenthesizableWhitespaceMatchType], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() ComparisonTargetMatchType = Union[ "ComparisonTarget", MetadataMatchType, MatchIfTrue[cst.ComparisonTarget] ] @dataclass(frozen=True, eq=False, unsafe_hash=False) class Comparison(BaseExpression, BaseMatcherNode): left: Union[ BaseExpressionMatchType, DoNotCareSentinel, OneOf[BaseExpressionMatchType], AllOf[BaseExpressionMatchType], ] = DoNotCare() comparisons: Union[ Sequence[ Union[ ComparisonTargetMatchType, DoNotCareSentinel, OneOf[ComparisonTargetMatchType], AllOf[ComparisonTargetMatchType], AtLeastN[ Union[ ComparisonTargetMatchType, DoNotCareSentinel, OneOf[ComparisonTargetMatchType], AllOf[ComparisonTargetMatchType], ] ], AtMostN[ Union[ ComparisonTargetMatchType, DoNotCareSentinel, OneOf[ComparisonTargetMatchType], AllOf[ComparisonTargetMatchType], ] ], ] ], DoNotCareSentinel, MatchIfTrue[Sequence[cst.ComparisonTarget]], OneOf[ Union[ Sequence[ Union[ ComparisonTargetMatchType, OneOf[ComparisonTargetMatchType], AllOf[ComparisonTargetMatchType], AtLeastN[ Union[ ComparisonTargetMatchType, OneOf[ComparisonTargetMatchType], AllOf[ComparisonTargetMatchType], ] ], AtMostN[ Union[ ComparisonTargetMatchType, OneOf[ComparisonTargetMatchType], AllOf[ComparisonTargetMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.ComparisonTarget]], ] ], AllOf[ Union[ Sequence[ Union[ ComparisonTargetMatchType, OneOf[ComparisonTargetMatchType], AllOf[ComparisonTargetMatchType], AtLeastN[ Union[ ComparisonTargetMatchType, OneOf[ComparisonTargetMatchType], AllOf[ComparisonTargetMatchType], ] ], AtMostN[ Union[ ComparisonTargetMatchType, OneOf[ComparisonTargetMatchType], AllOf[ComparisonTargetMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.ComparisonTarget]], ] ], ] = DoNotCare() lpar: Union[ Sequence[ Union[ LeftParenMatchType, DoNotCareSentinel, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], AtLeastN[ Union[ LeftParenMatchType, DoNotCareSentinel, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], AtMostN[ Union[ LeftParenMatchType, DoNotCareSentinel, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], ] ], DoNotCareSentinel, MatchIfTrue[Sequence[cst.LeftParen]], OneOf[ Union[ Sequence[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], AtLeastN[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], AtMostN[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.LeftParen]], ] ], AllOf[ Union[ Sequence[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], AtLeastN[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], AtMostN[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.LeftParen]], ] ], ] = DoNotCare() rpar: Union[ Sequence[ Union[ RightParenMatchType, DoNotCareSentinel, OneOf[RightParenMatchType], AllOf[RightParenMatchType], AtLeastN[ Union[ RightParenMatchType, DoNotCareSentinel, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], AtMostN[ Union[ RightParenMatchType, DoNotCareSentinel, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], ] ], DoNotCareSentinel, MatchIfTrue[Sequence[cst.RightParen]], OneOf[ Union[ Sequence[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], AtLeastN[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], AtMostN[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.RightParen]], ] ], AllOf[ Union[ Sequence[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], AtLeastN[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], AtMostN[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.RightParen]], ] ], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() BaseCompOpMatchType = Union[ "BaseCompOp", MetadataMatchType, MatchIfTrue[cst.BaseCompOp] ] @dataclass(frozen=True, eq=False, unsafe_hash=False) class ComparisonTarget(BaseMatcherNode): operator: Union[ BaseCompOpMatchType, DoNotCareSentinel, OneOf[BaseCompOpMatchType], AllOf[BaseCompOpMatchType], ] = DoNotCare() comparator: Union[ BaseExpressionMatchType, DoNotCareSentinel, OneOf[BaseExpressionMatchType], AllOf[BaseExpressionMatchType], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() SimpleStringOrFormattedStringMatchType = Union[ "SimpleString", "FormattedString", MetadataMatchType, MatchIfTrue[Union[cst.SimpleString, cst.FormattedString]], ] SimpleStringOrFormattedStringOrConcatenatedStringMatchType = Union[ "SimpleString", "FormattedString", "ConcatenatedString", MetadataMatchType, MatchIfTrue[Union[cst.SimpleString, cst.FormattedString, cst.ConcatenatedString]], ] @dataclass(frozen=True, eq=False, unsafe_hash=False) class ConcatenatedString(BaseExpression, BaseString, BaseMatcherNode): left: Union[ SimpleStringOrFormattedStringMatchType, DoNotCareSentinel, OneOf[SimpleStringOrFormattedStringMatchType], AllOf[SimpleStringOrFormattedStringMatchType], ] = DoNotCare() right: Union[ SimpleStringOrFormattedStringOrConcatenatedStringMatchType, DoNotCareSentinel, OneOf[SimpleStringOrFormattedStringOrConcatenatedStringMatchType], AllOf[SimpleStringOrFormattedStringOrConcatenatedStringMatchType], ] = DoNotCare() lpar: Union[ Sequence[ Union[ LeftParenMatchType, DoNotCareSentinel, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], AtLeastN[ Union[ LeftParenMatchType, DoNotCareSentinel, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], AtMostN[ Union[ LeftParenMatchType, DoNotCareSentinel, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], ] ], DoNotCareSentinel, MatchIfTrue[Sequence[cst.LeftParen]], OneOf[ Union[ Sequence[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], AtLeastN[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], AtMostN[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.LeftParen]], ] ], AllOf[ Union[ Sequence[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], AtLeastN[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], AtMostN[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.LeftParen]], ] ], ] = DoNotCare() rpar: Union[ Sequence[ Union[ RightParenMatchType, DoNotCareSentinel, OneOf[RightParenMatchType], AllOf[RightParenMatchType], AtLeastN[ Union[ RightParenMatchType, DoNotCareSentinel, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], AtMostN[ Union[ RightParenMatchType, DoNotCareSentinel, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], ] ], DoNotCareSentinel, MatchIfTrue[Sequence[cst.RightParen]], OneOf[ Union[ Sequence[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], AtLeastN[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], AtMostN[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.RightParen]], ] ], AllOf[ Union[ Sequence[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], AtLeastN[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], AtMostN[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.RightParen]], ] ], ] = DoNotCare() whitespace_between: Union[ BaseParenthesizableWhitespaceMatchType, DoNotCareSentinel, OneOf[BaseParenthesizableWhitespaceMatchType], AllOf[BaseParenthesizableWhitespaceMatchType], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() @dataclass(frozen=True, eq=False, unsafe_hash=False) class Continue(BaseSmallStatement, BaseMatcherNode): semicolon: Union[ SemicolonMatchType, DoNotCareSentinel, OneOf[SemicolonMatchType], AllOf[SemicolonMatchType], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() TrailingWhitespaceMatchType = Union[ "TrailingWhitespace", MetadataMatchType, MatchIfTrue[cst.TrailingWhitespace] ] @dataclass(frozen=True, eq=False, unsafe_hash=False) class Decorator(BaseMatcherNode): decorator: Union[ BaseExpressionMatchType, DoNotCareSentinel, OneOf[BaseExpressionMatchType], AllOf[BaseExpressionMatchType], ] = DoNotCare() leading_lines: Union[ Sequence[ Union[ EmptyLineMatchType, DoNotCareSentinel, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], AtLeastN[ Union[ EmptyLineMatchType, DoNotCareSentinel, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], ] ], AtMostN[ Union[ EmptyLineMatchType, DoNotCareSentinel, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], ] ], ] ], DoNotCareSentinel, MatchIfTrue[Sequence[cst.EmptyLine]], OneOf[ Union[ Sequence[ Union[ EmptyLineMatchType, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], AtLeastN[ Union[ EmptyLineMatchType, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], ] ], AtMostN[ Union[ EmptyLineMatchType, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.EmptyLine]], ] ], AllOf[ Union[ Sequence[ Union[ EmptyLineMatchType, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], AtLeastN[ Union[ EmptyLineMatchType, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], ] ], AtMostN[ Union[ EmptyLineMatchType, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.EmptyLine]], ] ], ] = DoNotCare() whitespace_after_at: Union[ SimpleWhitespaceMatchType, DoNotCareSentinel, OneOf[SimpleWhitespaceMatchType], AllOf[SimpleWhitespaceMatchType], ] = DoNotCare() trailing_whitespace: Union[ TrailingWhitespaceMatchType, DoNotCareSentinel, OneOf[TrailingWhitespaceMatchType], AllOf[TrailingWhitespaceMatchType], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() BaseDelTargetExpressionMatchType = Union[ "BaseDelTargetExpression", MetadataMatchType, MatchIfTrue[cst.BaseDelTargetExpression], ] @dataclass(frozen=True, eq=False, unsafe_hash=False) class Del(BaseSmallStatement, BaseMatcherNode): target: Union[ BaseDelTargetExpressionMatchType, DoNotCareSentinel, OneOf[BaseDelTargetExpressionMatchType], AllOf[BaseDelTargetExpressionMatchType], ] = DoNotCare() whitespace_after_del: Union[ SimpleWhitespaceMatchType, DoNotCareSentinel, OneOf[SimpleWhitespaceMatchType], AllOf[SimpleWhitespaceMatchType], ] = DoNotCare() semicolon: Union[ SemicolonMatchType, DoNotCareSentinel, OneOf[SemicolonMatchType], AllOf[SemicolonMatchType], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() BaseDictElementMatchType = Union[ "BaseDictElement", MetadataMatchType, MatchIfTrue[cst.BaseDictElement] ] LeftCurlyBraceMatchType = Union[ "LeftCurlyBrace", MetadataMatchType, MatchIfTrue[cst.LeftCurlyBrace] ] RightCurlyBraceMatchType = Union[ "RightCurlyBrace", MetadataMatchType, MatchIfTrue[cst.RightCurlyBrace] ] @dataclass(frozen=True, eq=False, unsafe_hash=False) class Dict(BaseDict, BaseExpression, BaseMatcherNode): elements: Union[ Sequence[ Union[ BaseDictElementMatchType, DoNotCareSentinel, OneOf[BaseDictElementMatchType], AllOf[BaseDictElementMatchType], AtLeastN[ Union[ BaseDictElementMatchType, DoNotCareSentinel, OneOf[BaseDictElementMatchType], AllOf[BaseDictElementMatchType], ] ], AtMostN[ Union[ BaseDictElementMatchType, DoNotCareSentinel, OneOf[BaseDictElementMatchType], AllOf[BaseDictElementMatchType], ] ], ] ], DoNotCareSentinel, MatchIfTrue[Sequence[cst.BaseDictElement]], OneOf[ Union[ Sequence[ Union[ BaseDictElementMatchType, OneOf[BaseDictElementMatchType], AllOf[BaseDictElementMatchType], AtLeastN[ Union[ BaseDictElementMatchType, OneOf[BaseDictElementMatchType], AllOf[BaseDictElementMatchType], ] ], AtMostN[ Union[ BaseDictElementMatchType, OneOf[BaseDictElementMatchType], AllOf[BaseDictElementMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.BaseDictElement]], ] ], AllOf[ Union[ Sequence[ Union[ BaseDictElementMatchType, OneOf[BaseDictElementMatchType], AllOf[BaseDictElementMatchType], AtLeastN[ Union[ BaseDictElementMatchType, OneOf[BaseDictElementMatchType], AllOf[BaseDictElementMatchType], ] ], AtMostN[ Union[ BaseDictElementMatchType, OneOf[BaseDictElementMatchType], AllOf[BaseDictElementMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.BaseDictElement]], ] ], ] = DoNotCare() lbrace: Union[ LeftCurlyBraceMatchType, DoNotCareSentinel, OneOf[LeftCurlyBraceMatchType], AllOf[LeftCurlyBraceMatchType], ] = DoNotCare() rbrace: Union[ RightCurlyBraceMatchType, DoNotCareSentinel, OneOf[RightCurlyBraceMatchType], AllOf[RightCurlyBraceMatchType], ] = DoNotCare() lpar: Union[ Sequence[ Union[ LeftParenMatchType, DoNotCareSentinel, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], AtLeastN[ Union[ LeftParenMatchType, DoNotCareSentinel, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], AtMostN[ Union[ LeftParenMatchType, DoNotCareSentinel, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], ] ], DoNotCareSentinel, MatchIfTrue[Sequence[cst.LeftParen]], OneOf[ Union[ Sequence[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], AtLeastN[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], AtMostN[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.LeftParen]], ] ], AllOf[ Union[ Sequence[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], AtLeastN[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], AtMostN[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.LeftParen]], ] ], ] = DoNotCare() rpar: Union[ Sequence[ Union[ RightParenMatchType, DoNotCareSentinel, OneOf[RightParenMatchType], AllOf[RightParenMatchType], AtLeastN[ Union[ RightParenMatchType, DoNotCareSentinel, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], AtMostN[ Union[ RightParenMatchType, DoNotCareSentinel, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], ] ], DoNotCareSentinel, MatchIfTrue[Sequence[cst.RightParen]], OneOf[ Union[ Sequence[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], AtLeastN[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], AtMostN[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.RightParen]], ] ], AllOf[ Union[ Sequence[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], AtLeastN[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], AtMostN[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.RightParen]], ] ], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() CompForMatchType = Union["CompFor", MetadataMatchType, MatchIfTrue[cst.CompFor]] @dataclass(frozen=True, eq=False, unsafe_hash=False) class DictComp(BaseComp, BaseDict, BaseExpression, BaseMatcherNode): key: Union[ BaseExpressionMatchType, DoNotCareSentinel, OneOf[BaseExpressionMatchType], AllOf[BaseExpressionMatchType], ] = DoNotCare() value: Union[ BaseExpressionMatchType, DoNotCareSentinel, OneOf[BaseExpressionMatchType], AllOf[BaseExpressionMatchType], ] = DoNotCare() for_in: Union[ CompForMatchType, DoNotCareSentinel, OneOf[CompForMatchType], AllOf[CompForMatchType], ] = DoNotCare() lbrace: Union[ LeftCurlyBraceMatchType, DoNotCareSentinel, OneOf[LeftCurlyBraceMatchType], AllOf[LeftCurlyBraceMatchType], ] = DoNotCare() rbrace: Union[ RightCurlyBraceMatchType, DoNotCareSentinel, OneOf[RightCurlyBraceMatchType], AllOf[RightCurlyBraceMatchType], ] = DoNotCare() lpar: Union[ Sequence[ Union[ LeftParenMatchType, DoNotCareSentinel, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], AtLeastN[ Union[ LeftParenMatchType, DoNotCareSentinel, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], AtMostN[ Union[ LeftParenMatchType, DoNotCareSentinel, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], ] ], DoNotCareSentinel, MatchIfTrue[Sequence[cst.LeftParen]], OneOf[ Union[ Sequence[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], AtLeastN[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], AtMostN[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.LeftParen]], ] ], AllOf[ Union[ Sequence[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], AtLeastN[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], AtMostN[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.LeftParen]], ] ], ] = DoNotCare() rpar: Union[ Sequence[ Union[ RightParenMatchType, DoNotCareSentinel, OneOf[RightParenMatchType], AllOf[RightParenMatchType], AtLeastN[ Union[ RightParenMatchType, DoNotCareSentinel, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], AtMostN[ Union[ RightParenMatchType, DoNotCareSentinel, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], ] ], DoNotCareSentinel, MatchIfTrue[Sequence[cst.RightParen]], OneOf[ Union[ Sequence[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], AtLeastN[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], AtMostN[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.RightParen]], ] ], AllOf[ Union[ Sequence[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], AtLeastN[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], AtMostN[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.RightParen]], ] ], ] = DoNotCare() whitespace_before_colon: Union[ BaseParenthesizableWhitespaceMatchType, DoNotCareSentinel, OneOf[BaseParenthesizableWhitespaceMatchType], AllOf[BaseParenthesizableWhitespaceMatchType], ] = DoNotCare() whitespace_after_colon: Union[ BaseParenthesizableWhitespaceMatchType, DoNotCareSentinel, OneOf[BaseParenthesizableWhitespaceMatchType], AllOf[BaseParenthesizableWhitespaceMatchType], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() @dataclass(frozen=True, eq=False, unsafe_hash=False) class DictElement(BaseDictElement, BaseMatcherNode): key: Union[ BaseExpressionMatchType, DoNotCareSentinel, OneOf[BaseExpressionMatchType], AllOf[BaseExpressionMatchType], ] = DoNotCare() value: Union[ BaseExpressionMatchType, DoNotCareSentinel, OneOf[BaseExpressionMatchType], AllOf[BaseExpressionMatchType], ] = DoNotCare() comma: Union[ CommaMatchType, DoNotCareSentinel, OneOf[CommaMatchType], AllOf[CommaMatchType] ] = DoNotCare() whitespace_before_colon: Union[ BaseParenthesizableWhitespaceMatchType, DoNotCareSentinel, OneOf[BaseParenthesizableWhitespaceMatchType], AllOf[BaseParenthesizableWhitespaceMatchType], ] = DoNotCare() whitespace_after_colon: Union[ BaseParenthesizableWhitespaceMatchType, DoNotCareSentinel, OneOf[BaseParenthesizableWhitespaceMatchType], AllOf[BaseParenthesizableWhitespaceMatchType], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() @dataclass(frozen=True, eq=False, unsafe_hash=False) class Divide(BaseBinaryOp, BaseMatcherNode): whitespace_before: Union[ BaseParenthesizableWhitespaceMatchType, DoNotCareSentinel, OneOf[BaseParenthesizableWhitespaceMatchType], AllOf[BaseParenthesizableWhitespaceMatchType], ] = DoNotCare() whitespace_after: Union[ BaseParenthesizableWhitespaceMatchType, DoNotCareSentinel, OneOf[BaseParenthesizableWhitespaceMatchType], AllOf[BaseParenthesizableWhitespaceMatchType], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() @dataclass(frozen=True, eq=False, unsafe_hash=False) class DivideAssign(BaseAugOp, BaseMatcherNode): whitespace_before: Union[ BaseParenthesizableWhitespaceMatchType, DoNotCareSentinel, OneOf[BaseParenthesizableWhitespaceMatchType], AllOf[BaseParenthesizableWhitespaceMatchType], ] = DoNotCare() whitespace_after: Union[ BaseParenthesizableWhitespaceMatchType, DoNotCareSentinel, OneOf[BaseParenthesizableWhitespaceMatchType], AllOf[BaseParenthesizableWhitespaceMatchType], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() @dataclass(frozen=True, eq=False, unsafe_hash=False) class Dot(BaseMatcherNode): whitespace_before: Union[ BaseParenthesizableWhitespaceMatchType, DoNotCareSentinel, OneOf[BaseParenthesizableWhitespaceMatchType], AllOf[BaseParenthesizableWhitespaceMatchType], ] = DoNotCare() whitespace_after: Union[ BaseParenthesizableWhitespaceMatchType, DoNotCareSentinel, OneOf[BaseParenthesizableWhitespaceMatchType], AllOf[BaseParenthesizableWhitespaceMatchType], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() @dataclass(frozen=True, eq=False, unsafe_hash=False) class Element(BaseElement, BaseMatcherNode): value: Union[ BaseExpressionMatchType, DoNotCareSentinel, OneOf[BaseExpressionMatchType], AllOf[BaseExpressionMatchType], ] = DoNotCare() comma: Union[ CommaMatchType, DoNotCareSentinel, OneOf[CommaMatchType], AllOf[CommaMatchType] ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() @dataclass(frozen=True, eq=False, unsafe_hash=False) class Ellipsis(BaseExpression, BaseMatcherNode): lpar: Union[ Sequence[ Union[ LeftParenMatchType, DoNotCareSentinel, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], AtLeastN[ Union[ LeftParenMatchType, DoNotCareSentinel, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], AtMostN[ Union[ LeftParenMatchType, DoNotCareSentinel, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], ] ], DoNotCareSentinel, MatchIfTrue[Sequence[cst.LeftParen]], OneOf[ Union[ Sequence[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], AtLeastN[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], AtMostN[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.LeftParen]], ] ], AllOf[ Union[ Sequence[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], AtLeastN[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], AtMostN[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.LeftParen]], ] ], ] = DoNotCare() rpar: Union[ Sequence[ Union[ RightParenMatchType, DoNotCareSentinel, OneOf[RightParenMatchType], AllOf[RightParenMatchType], AtLeastN[ Union[ RightParenMatchType, DoNotCareSentinel, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], AtMostN[ Union[ RightParenMatchType, DoNotCareSentinel, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], ] ], DoNotCareSentinel, MatchIfTrue[Sequence[cst.RightParen]], OneOf[ Union[ Sequence[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], AtLeastN[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], AtMostN[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.RightParen]], ] ], AllOf[ Union[ Sequence[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], AtLeastN[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], AtMostN[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.RightParen]], ] ], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() @dataclass(frozen=True, eq=False, unsafe_hash=False) class Else(BaseMatcherNode): body: Union[ BaseSuiteMatchType, DoNotCareSentinel, OneOf[BaseSuiteMatchType], AllOf[BaseSuiteMatchType], ] = DoNotCare() leading_lines: Union[ Sequence[ Union[ EmptyLineMatchType, DoNotCareSentinel, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], AtLeastN[ Union[ EmptyLineMatchType, DoNotCareSentinel, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], ] ], AtMostN[ Union[ EmptyLineMatchType, DoNotCareSentinel, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], ] ], ] ], DoNotCareSentinel, MatchIfTrue[Sequence[cst.EmptyLine]], OneOf[ Union[ Sequence[ Union[ EmptyLineMatchType, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], AtLeastN[ Union[ EmptyLineMatchType, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], ] ], AtMostN[ Union[ EmptyLineMatchType, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.EmptyLine]], ] ], AllOf[ Union[ Sequence[ Union[ EmptyLineMatchType, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], AtLeastN[ Union[ EmptyLineMatchType, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], ] ], AtMostN[ Union[ EmptyLineMatchType, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.EmptyLine]], ] ], ] = DoNotCare() whitespace_before_colon: Union[ SimpleWhitespaceMatchType, DoNotCareSentinel, OneOf[SimpleWhitespaceMatchType], AllOf[SimpleWhitespaceMatchType], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() boolMatchType = Union[bool, MetadataMatchType, MatchIfTrue[bool]] NewlineMatchType = Union["Newline", MetadataMatchType, MatchIfTrue[cst.Newline]] @dataclass(frozen=True, eq=False, unsafe_hash=False) class EmptyLine(BaseMatcherNode): indent: Union[ boolMatchType, DoNotCareSentinel, OneOf[boolMatchType], AllOf[boolMatchType] ] = DoNotCare() whitespace: Union[ SimpleWhitespaceMatchType, DoNotCareSentinel, OneOf[SimpleWhitespaceMatchType], AllOf[SimpleWhitespaceMatchType], ] = DoNotCare() comment: Union[ Optional["Comment"], MetadataMatchType, MatchIfTrue[Optional[cst.Comment]], DoNotCareSentinel, OneOf[ Union[ Optional["Comment"], MetadataMatchType, MatchIfTrue[Optional[cst.Comment]], ] ], AllOf[ Union[ Optional["Comment"], MetadataMatchType, MatchIfTrue[Optional[cst.Comment]], ] ], ] = DoNotCare() newline: Union[ NewlineMatchType, DoNotCareSentinel, OneOf[NewlineMatchType], AllOf[NewlineMatchType], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() @dataclass(frozen=True, eq=False, unsafe_hash=False) class Equal(BaseCompOp, BaseMatcherNode): whitespace_before: Union[ BaseParenthesizableWhitespaceMatchType, DoNotCareSentinel, OneOf[BaseParenthesizableWhitespaceMatchType], AllOf[BaseParenthesizableWhitespaceMatchType], ] = DoNotCare() whitespace_after: Union[ BaseParenthesizableWhitespaceMatchType, DoNotCareSentinel, OneOf[BaseParenthesizableWhitespaceMatchType], AllOf[BaseParenthesizableWhitespaceMatchType], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() @dataclass(frozen=True, eq=False, unsafe_hash=False) class ExceptHandler(BaseMatcherNode): body: Union[ BaseSuiteMatchType, DoNotCareSentinel, OneOf[BaseSuiteMatchType], AllOf[BaseSuiteMatchType], ] = DoNotCare() type: Union[ Optional["BaseExpression"], MetadataMatchType, MatchIfTrue[Optional[cst.BaseExpression]], DoNotCareSentinel, OneOf[ Union[ Optional["BaseExpression"], MetadataMatchType, MatchIfTrue[Optional[cst.BaseExpression]], ] ], AllOf[ Union[ Optional["BaseExpression"], MetadataMatchType, MatchIfTrue[Optional[cst.BaseExpression]], ] ], ] = DoNotCare() name: Union[ Optional["AsName"], MetadataMatchType, MatchIfTrue[Optional[cst.AsName]], DoNotCareSentinel, OneOf[ Union[ Optional["AsName"], MetadataMatchType, MatchIfTrue[Optional[cst.AsName]] ] ], AllOf[ Union[ Optional["AsName"], MetadataMatchType, MatchIfTrue[Optional[cst.AsName]] ] ], ] = DoNotCare() leading_lines: Union[ Sequence[ Union[ EmptyLineMatchType, DoNotCareSentinel, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], AtLeastN[ Union[ EmptyLineMatchType, DoNotCareSentinel, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], ] ], AtMostN[ Union[ EmptyLineMatchType, DoNotCareSentinel, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], ] ], ] ], DoNotCareSentinel, MatchIfTrue[Sequence[cst.EmptyLine]], OneOf[ Union[ Sequence[ Union[ EmptyLineMatchType, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], AtLeastN[ Union[ EmptyLineMatchType, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], ] ], AtMostN[ Union[ EmptyLineMatchType, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.EmptyLine]], ] ], AllOf[ Union[ Sequence[ Union[ EmptyLineMatchType, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], AtLeastN[ Union[ EmptyLineMatchType, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], ] ], AtMostN[ Union[ EmptyLineMatchType, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.EmptyLine]], ] ], ] = DoNotCare() whitespace_after_except: Union[ SimpleWhitespaceMatchType, DoNotCareSentinel, OneOf[SimpleWhitespaceMatchType], AllOf[SimpleWhitespaceMatchType], ] = DoNotCare() whitespace_before_colon: Union[ SimpleWhitespaceMatchType, DoNotCareSentinel, OneOf[SimpleWhitespaceMatchType], AllOf[SimpleWhitespaceMatchType], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() @dataclass(frozen=True, eq=False, unsafe_hash=False) class ExceptStarHandler(BaseMatcherNode): body: Union[ BaseSuiteMatchType, DoNotCareSentinel, OneOf[BaseSuiteMatchType], AllOf[BaseSuiteMatchType], ] = DoNotCare() type: Union[ BaseExpressionMatchType, DoNotCareSentinel, OneOf[BaseExpressionMatchType], AllOf[BaseExpressionMatchType], ] = DoNotCare() name: Union[ Optional["AsName"], MetadataMatchType, MatchIfTrue[Optional[cst.AsName]], DoNotCareSentinel, OneOf[ Union[ Optional["AsName"], MetadataMatchType, MatchIfTrue[Optional[cst.AsName]] ] ], AllOf[ Union[ Optional["AsName"], MetadataMatchType, MatchIfTrue[Optional[cst.AsName]] ] ], ] = DoNotCare() leading_lines: Union[ Sequence[ Union[ EmptyLineMatchType, DoNotCareSentinel, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], AtLeastN[ Union[ EmptyLineMatchType, DoNotCareSentinel, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], ] ], AtMostN[ Union[ EmptyLineMatchType, DoNotCareSentinel, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], ] ], ] ], DoNotCareSentinel, MatchIfTrue[Sequence[cst.EmptyLine]], OneOf[ Union[ Sequence[ Union[ EmptyLineMatchType, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], AtLeastN[ Union[ EmptyLineMatchType, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], ] ], AtMostN[ Union[ EmptyLineMatchType, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.EmptyLine]], ] ], AllOf[ Union[ Sequence[ Union[ EmptyLineMatchType, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], AtLeastN[ Union[ EmptyLineMatchType, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], ] ], AtMostN[ Union[ EmptyLineMatchType, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.EmptyLine]], ] ], ] = DoNotCare() whitespace_after_except: Union[ SimpleWhitespaceMatchType, DoNotCareSentinel, OneOf[SimpleWhitespaceMatchType], AllOf[SimpleWhitespaceMatchType], ] = DoNotCare() whitespace_after_star: Union[ SimpleWhitespaceMatchType, DoNotCareSentinel, OneOf[SimpleWhitespaceMatchType], AllOf[SimpleWhitespaceMatchType], ] = DoNotCare() whitespace_before_colon: Union[ SimpleWhitespaceMatchType, DoNotCareSentinel, OneOf[SimpleWhitespaceMatchType], AllOf[SimpleWhitespaceMatchType], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() @dataclass(frozen=True, eq=False, unsafe_hash=False) class Expr(BaseSmallStatement, BaseMatcherNode): value: Union[ BaseExpressionMatchType, DoNotCareSentinel, OneOf[BaseExpressionMatchType], AllOf[BaseExpressionMatchType], ] = DoNotCare() semicolon: Union[ SemicolonMatchType, DoNotCareSentinel, OneOf[SemicolonMatchType], AllOf[SemicolonMatchType], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() @dataclass(frozen=True, eq=False, unsafe_hash=False) class Finally(BaseMatcherNode): body: Union[ BaseSuiteMatchType, DoNotCareSentinel, OneOf[BaseSuiteMatchType], AllOf[BaseSuiteMatchType], ] = DoNotCare() leading_lines: Union[ Sequence[ Union[ EmptyLineMatchType, DoNotCareSentinel, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], AtLeastN[ Union[ EmptyLineMatchType, DoNotCareSentinel, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], ] ], AtMostN[ Union[ EmptyLineMatchType, DoNotCareSentinel, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], ] ], ] ], DoNotCareSentinel, MatchIfTrue[Sequence[cst.EmptyLine]], OneOf[ Union[ Sequence[ Union[ EmptyLineMatchType, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], AtLeastN[ Union[ EmptyLineMatchType, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], ] ], AtMostN[ Union[ EmptyLineMatchType, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.EmptyLine]], ] ], AllOf[ Union[ Sequence[ Union[ EmptyLineMatchType, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], AtLeastN[ Union[ EmptyLineMatchType, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], ] ], AtMostN[ Union[ EmptyLineMatchType, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.EmptyLine]], ] ], ] = DoNotCare() whitespace_before_colon: Union[ SimpleWhitespaceMatchType, DoNotCareSentinel, OneOf[SimpleWhitespaceMatchType], AllOf[SimpleWhitespaceMatchType], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() @dataclass(frozen=True, eq=False, unsafe_hash=False) class Float(BaseExpression, BaseNumber, BaseMatcherNode): value: Union[ strMatchType, DoNotCareSentinel, OneOf[strMatchType], AllOf[strMatchType] ] = DoNotCare() lpar: Union[ Sequence[ Union[ LeftParenMatchType, DoNotCareSentinel, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], AtLeastN[ Union[ LeftParenMatchType, DoNotCareSentinel, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], AtMostN[ Union[ LeftParenMatchType, DoNotCareSentinel, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], ] ], DoNotCareSentinel, MatchIfTrue[Sequence[cst.LeftParen]], OneOf[ Union[ Sequence[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], AtLeastN[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], AtMostN[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.LeftParen]], ] ], AllOf[ Union[ Sequence[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], AtLeastN[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], AtMostN[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.LeftParen]], ] ], ] = DoNotCare() rpar: Union[ Sequence[ Union[ RightParenMatchType, DoNotCareSentinel, OneOf[RightParenMatchType], AllOf[RightParenMatchType], AtLeastN[ Union[ RightParenMatchType, DoNotCareSentinel, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], AtMostN[ Union[ RightParenMatchType, DoNotCareSentinel, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], ] ], DoNotCareSentinel, MatchIfTrue[Sequence[cst.RightParen]], OneOf[ Union[ Sequence[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], AtLeastN[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], AtMostN[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.RightParen]], ] ], AllOf[ Union[ Sequence[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], AtLeastN[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], AtMostN[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.RightParen]], ] ], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() @dataclass(frozen=True, eq=False, unsafe_hash=False) class FloorDivide(BaseBinaryOp, BaseMatcherNode): whitespace_before: Union[ BaseParenthesizableWhitespaceMatchType, DoNotCareSentinel, OneOf[BaseParenthesizableWhitespaceMatchType], AllOf[BaseParenthesizableWhitespaceMatchType], ] = DoNotCare() whitespace_after: Union[ BaseParenthesizableWhitespaceMatchType, DoNotCareSentinel, OneOf[BaseParenthesizableWhitespaceMatchType], AllOf[BaseParenthesizableWhitespaceMatchType], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() @dataclass(frozen=True, eq=False, unsafe_hash=False) class FloorDivideAssign(BaseAugOp, BaseMatcherNode): whitespace_before: Union[ BaseParenthesizableWhitespaceMatchType, DoNotCareSentinel, OneOf[BaseParenthesizableWhitespaceMatchType], AllOf[BaseParenthesizableWhitespaceMatchType], ] = DoNotCare() whitespace_after: Union[ BaseParenthesizableWhitespaceMatchType, DoNotCareSentinel, OneOf[BaseParenthesizableWhitespaceMatchType], AllOf[BaseParenthesizableWhitespaceMatchType], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() @dataclass(frozen=True, eq=False, unsafe_hash=False) class For(BaseCompoundStatement, BaseStatement, BaseMatcherNode): target: Union[ BaseAssignTargetExpressionMatchType, DoNotCareSentinel, OneOf[BaseAssignTargetExpressionMatchType], AllOf[BaseAssignTargetExpressionMatchType], ] = DoNotCare() iter: Union[ BaseExpressionMatchType, DoNotCareSentinel, OneOf[BaseExpressionMatchType], AllOf[BaseExpressionMatchType], ] = DoNotCare() body: Union[ BaseSuiteMatchType, DoNotCareSentinel, OneOf[BaseSuiteMatchType], AllOf[BaseSuiteMatchType], ] = DoNotCare() orelse: Union[ Optional["Else"], MetadataMatchType, MatchIfTrue[Optional[cst.Else]], DoNotCareSentinel, OneOf[ Union[Optional["Else"], MetadataMatchType, MatchIfTrue[Optional[cst.Else]]] ], AllOf[ Union[Optional["Else"], MetadataMatchType, MatchIfTrue[Optional[cst.Else]]] ], ] = DoNotCare() asynchronous: Union[ Optional["Asynchronous"], MetadataMatchType, MatchIfTrue[Optional[cst.Asynchronous]], DoNotCareSentinel, OneOf[ Union[ Optional["Asynchronous"], MetadataMatchType, MatchIfTrue[Optional[cst.Asynchronous]], ] ], AllOf[ Union[ Optional["Asynchronous"], MetadataMatchType, MatchIfTrue[Optional[cst.Asynchronous]], ] ], ] = DoNotCare() leading_lines: Union[ Sequence[ Union[ EmptyLineMatchType, DoNotCareSentinel, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], AtLeastN[ Union[ EmptyLineMatchType, DoNotCareSentinel, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], ] ], AtMostN[ Union[ EmptyLineMatchType, DoNotCareSentinel, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], ] ], ] ], DoNotCareSentinel, MatchIfTrue[Sequence[cst.EmptyLine]], OneOf[ Union[ Sequence[ Union[ EmptyLineMatchType, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], AtLeastN[ Union[ EmptyLineMatchType, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], ] ], AtMostN[ Union[ EmptyLineMatchType, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.EmptyLine]], ] ], AllOf[ Union[ Sequence[ Union[ EmptyLineMatchType, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], AtLeastN[ Union[ EmptyLineMatchType, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], ] ], AtMostN[ Union[ EmptyLineMatchType, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.EmptyLine]], ] ], ] = DoNotCare() whitespace_after_for: Union[ SimpleWhitespaceMatchType, DoNotCareSentinel, OneOf[SimpleWhitespaceMatchType], AllOf[SimpleWhitespaceMatchType], ] = DoNotCare() whitespace_before_in: Union[ SimpleWhitespaceMatchType, DoNotCareSentinel, OneOf[SimpleWhitespaceMatchType], AllOf[SimpleWhitespaceMatchType], ] = DoNotCare() whitespace_after_in: Union[ SimpleWhitespaceMatchType, DoNotCareSentinel, OneOf[SimpleWhitespaceMatchType], AllOf[SimpleWhitespaceMatchType], ] = DoNotCare() whitespace_before_colon: Union[ SimpleWhitespaceMatchType, DoNotCareSentinel, OneOf[SimpleWhitespaceMatchType], AllOf[SimpleWhitespaceMatchType], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() BaseFormattedStringContentMatchType = Union[ "BaseFormattedStringContent", MetadataMatchType, MatchIfTrue[cst.BaseFormattedStringContent], ] @dataclass(frozen=True, eq=False, unsafe_hash=False) class FormattedString(BaseExpression, BaseString, BaseMatcherNode): parts: Union[ Sequence[ Union[ BaseFormattedStringContentMatchType, DoNotCareSentinel, OneOf[BaseFormattedStringContentMatchType], AllOf[BaseFormattedStringContentMatchType], AtLeastN[ Union[ BaseFormattedStringContentMatchType, DoNotCareSentinel, OneOf[BaseFormattedStringContentMatchType], AllOf[BaseFormattedStringContentMatchType], ] ], AtMostN[ Union[ BaseFormattedStringContentMatchType, DoNotCareSentinel, OneOf[BaseFormattedStringContentMatchType], AllOf[BaseFormattedStringContentMatchType], ] ], ] ], DoNotCareSentinel, MatchIfTrue[Sequence[cst.BaseFormattedStringContent]], OneOf[ Union[ Sequence[ Union[ BaseFormattedStringContentMatchType, OneOf[BaseFormattedStringContentMatchType], AllOf[BaseFormattedStringContentMatchType], AtLeastN[ Union[ BaseFormattedStringContentMatchType, OneOf[BaseFormattedStringContentMatchType], AllOf[BaseFormattedStringContentMatchType], ] ], AtMostN[ Union[ BaseFormattedStringContentMatchType, OneOf[BaseFormattedStringContentMatchType], AllOf[BaseFormattedStringContentMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.BaseFormattedStringContent]], ] ], AllOf[ Union[ Sequence[ Union[ BaseFormattedStringContentMatchType, OneOf[BaseFormattedStringContentMatchType], AllOf[BaseFormattedStringContentMatchType], AtLeastN[ Union[ BaseFormattedStringContentMatchType, OneOf[BaseFormattedStringContentMatchType], AllOf[BaseFormattedStringContentMatchType], ] ], AtMostN[ Union[ BaseFormattedStringContentMatchType, OneOf[BaseFormattedStringContentMatchType], AllOf[BaseFormattedStringContentMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.BaseFormattedStringContent]], ] ], ] = DoNotCare() start: Union[ strMatchType, DoNotCareSentinel, OneOf[strMatchType], AllOf[strMatchType] ] = DoNotCare() end: Union[ Literal['"', "'", '"""', "'''"], MetadataMatchType, MatchIfTrue[Literal['"', "'", '"""', "'''"]], DoNotCareSentinel, OneOf[ Union[ Literal['"', "'", '"""', "'''"], MetadataMatchType, MatchIfTrue[Literal['"', "'", '"""', "'''"]], ] ], AllOf[ Union[ Literal['"', "'", '"""', "'''"], MetadataMatchType, MatchIfTrue[Literal['"', "'", '"""', "'''"]], ] ], ] = DoNotCare() lpar: Union[ Sequence[ Union[ LeftParenMatchType, DoNotCareSentinel, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], AtLeastN[ Union[ LeftParenMatchType, DoNotCareSentinel, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], AtMostN[ Union[ LeftParenMatchType, DoNotCareSentinel, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], ] ], DoNotCareSentinel, MatchIfTrue[Sequence[cst.LeftParen]], OneOf[ Union[ Sequence[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], AtLeastN[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], AtMostN[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.LeftParen]], ] ], AllOf[ Union[ Sequence[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], AtLeastN[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], AtMostN[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.LeftParen]], ] ], ] = DoNotCare() rpar: Union[ Sequence[ Union[ RightParenMatchType, DoNotCareSentinel, OneOf[RightParenMatchType], AllOf[RightParenMatchType], AtLeastN[ Union[ RightParenMatchType, DoNotCareSentinel, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], AtMostN[ Union[ RightParenMatchType, DoNotCareSentinel, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], ] ], DoNotCareSentinel, MatchIfTrue[Sequence[cst.RightParen]], OneOf[ Union[ Sequence[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], AtLeastN[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], AtMostN[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.RightParen]], ] ], AllOf[ Union[ Sequence[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], AtLeastN[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], AtMostN[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.RightParen]], ] ], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() @dataclass(frozen=True, eq=False, unsafe_hash=False) class FormattedStringExpression(BaseFormattedStringContent, BaseMatcherNode): expression: Union[ BaseExpressionMatchType, DoNotCareSentinel, OneOf[BaseExpressionMatchType], AllOf[BaseExpressionMatchType], ] = DoNotCare() conversion: Union[ Optional[str], MetadataMatchType, MatchIfTrue[Optional[str]], DoNotCareSentinel, OneOf[Union[Optional[str], MetadataMatchType, MatchIfTrue[Optional[str]]]], AllOf[Union[Optional[str], MetadataMatchType, MatchIfTrue[Optional[str]]]], ] = DoNotCare() format_spec: Union[ Optional[Sequence["BaseFormattedStringContent"]], MetadataMatchType, MatchIfTrue[Optional[Sequence[cst.BaseFormattedStringContent]]], DoNotCareSentinel, OneOf[ Union[ Optional[Sequence["BaseFormattedStringContent"]], MetadataMatchType, MatchIfTrue[Optional[Sequence[cst.BaseFormattedStringContent]]], ] ], AllOf[ Union[ Optional[Sequence["BaseFormattedStringContent"]], MetadataMatchType, MatchIfTrue[Optional[Sequence[cst.BaseFormattedStringContent]]], ] ], ] = DoNotCare() whitespace_before_expression: Union[ BaseParenthesizableWhitespaceMatchType, DoNotCareSentinel, OneOf[BaseParenthesizableWhitespaceMatchType], AllOf[BaseParenthesizableWhitespaceMatchType], ] = DoNotCare() whitespace_after_expression: Union[ BaseParenthesizableWhitespaceMatchType, DoNotCareSentinel, OneOf[BaseParenthesizableWhitespaceMatchType], AllOf[BaseParenthesizableWhitespaceMatchType], ] = DoNotCare() equal: Union[ Optional["AssignEqual"], MetadataMatchType, MatchIfTrue[Optional[cst.AssignEqual]], DoNotCareSentinel, OneOf[ Union[ Optional["AssignEqual"], MetadataMatchType, MatchIfTrue[Optional[cst.AssignEqual]], ] ], AllOf[ Union[ Optional["AssignEqual"], MetadataMatchType, MatchIfTrue[Optional[cst.AssignEqual]], ] ], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() @dataclass(frozen=True, eq=False, unsafe_hash=False) class FormattedStringText(BaseFormattedStringContent, BaseMatcherNode): value: Union[ strMatchType, DoNotCareSentinel, OneOf[strMatchType], AllOf[strMatchType] ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() @dataclass(frozen=True, eq=False, unsafe_hash=False) class From(BaseMatcherNode): item: Union[ BaseExpressionMatchType, DoNotCareSentinel, OneOf[BaseExpressionMatchType], AllOf[BaseExpressionMatchType], ] = DoNotCare() whitespace_before_from: Union[ BaseParenthesizableWhitespaceMatchType, DoNotCareSentinel, OneOf[BaseParenthesizableWhitespaceMatchType], AllOf[BaseParenthesizableWhitespaceMatchType], ] = DoNotCare() whitespace_after_from: Union[ BaseParenthesizableWhitespaceMatchType, DoNotCareSentinel, OneOf[BaseParenthesizableWhitespaceMatchType], AllOf[BaseParenthesizableWhitespaceMatchType], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() ParametersMatchType = Union[ "Parameters", MetadataMatchType, MatchIfTrue[cst.Parameters] ] @dataclass(frozen=True, eq=False, unsafe_hash=False) class FunctionDef(BaseCompoundStatement, BaseStatement, BaseMatcherNode): name: Union[ NameMatchType, DoNotCareSentinel, OneOf[NameMatchType], AllOf[NameMatchType] ] = DoNotCare() params: Union[ ParametersMatchType, DoNotCareSentinel, OneOf[ParametersMatchType], AllOf[ParametersMatchType], ] = DoNotCare() body: Union[ BaseSuiteMatchType, DoNotCareSentinel, OneOf[BaseSuiteMatchType], AllOf[BaseSuiteMatchType], ] = DoNotCare() decorators: Union[ Sequence[ Union[ DecoratorMatchType, DoNotCareSentinel, OneOf[DecoratorMatchType], AllOf[DecoratorMatchType], AtLeastN[ Union[ DecoratorMatchType, DoNotCareSentinel, OneOf[DecoratorMatchType], AllOf[DecoratorMatchType], ] ], AtMostN[ Union[ DecoratorMatchType, DoNotCareSentinel, OneOf[DecoratorMatchType], AllOf[DecoratorMatchType], ] ], ] ], DoNotCareSentinel, MatchIfTrue[Sequence[cst.Decorator]], OneOf[ Union[ Sequence[ Union[ DecoratorMatchType, OneOf[DecoratorMatchType], AllOf[DecoratorMatchType], AtLeastN[ Union[ DecoratorMatchType, OneOf[DecoratorMatchType], AllOf[DecoratorMatchType], ] ], AtMostN[ Union[ DecoratorMatchType, OneOf[DecoratorMatchType], AllOf[DecoratorMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.Decorator]], ] ], AllOf[ Union[ Sequence[ Union[ DecoratorMatchType, OneOf[DecoratorMatchType], AllOf[DecoratorMatchType], AtLeastN[ Union[ DecoratorMatchType, OneOf[DecoratorMatchType], AllOf[DecoratorMatchType], ] ], AtMostN[ Union[ DecoratorMatchType, OneOf[DecoratorMatchType], AllOf[DecoratorMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.Decorator]], ] ], ] = DoNotCare() returns: Union[ Optional["Annotation"], MetadataMatchType, MatchIfTrue[Optional[cst.Annotation]], DoNotCareSentinel, OneOf[ Union[ Optional["Annotation"], MetadataMatchType, MatchIfTrue[Optional[cst.Annotation]], ] ], AllOf[ Union[ Optional["Annotation"], MetadataMatchType, MatchIfTrue[Optional[cst.Annotation]], ] ], ] = DoNotCare() asynchronous: Union[ Optional["Asynchronous"], MetadataMatchType, MatchIfTrue[Optional[cst.Asynchronous]], DoNotCareSentinel, OneOf[ Union[ Optional["Asynchronous"], MetadataMatchType, MatchIfTrue[Optional[cst.Asynchronous]], ] ], AllOf[ Union[ Optional["Asynchronous"], MetadataMatchType, MatchIfTrue[Optional[cst.Asynchronous]], ] ], ] = DoNotCare() leading_lines: Union[ Sequence[ Union[ EmptyLineMatchType, DoNotCareSentinel, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], AtLeastN[ Union[ EmptyLineMatchType, DoNotCareSentinel, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], ] ], AtMostN[ Union[ EmptyLineMatchType, DoNotCareSentinel, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], ] ], ] ], DoNotCareSentinel, MatchIfTrue[Sequence[cst.EmptyLine]], OneOf[ Union[ Sequence[ Union[ EmptyLineMatchType, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], AtLeastN[ Union[ EmptyLineMatchType, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], ] ], AtMostN[ Union[ EmptyLineMatchType, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.EmptyLine]], ] ], AllOf[ Union[ Sequence[ Union[ EmptyLineMatchType, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], AtLeastN[ Union[ EmptyLineMatchType, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], ] ], AtMostN[ Union[ EmptyLineMatchType, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.EmptyLine]], ] ], ] = DoNotCare() lines_after_decorators: Union[ Sequence[ Union[ EmptyLineMatchType, DoNotCareSentinel, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], AtLeastN[ Union[ EmptyLineMatchType, DoNotCareSentinel, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], ] ], AtMostN[ Union[ EmptyLineMatchType, DoNotCareSentinel, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], ] ], ] ], DoNotCareSentinel, MatchIfTrue[Sequence[cst.EmptyLine]], OneOf[ Union[ Sequence[ Union[ EmptyLineMatchType, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], AtLeastN[ Union[ EmptyLineMatchType, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], ] ], AtMostN[ Union[ EmptyLineMatchType, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.EmptyLine]], ] ], AllOf[ Union[ Sequence[ Union[ EmptyLineMatchType, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], AtLeastN[ Union[ EmptyLineMatchType, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], ] ], AtMostN[ Union[ EmptyLineMatchType, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.EmptyLine]], ] ], ] = DoNotCare() whitespace_after_def: Union[ SimpleWhitespaceMatchType, DoNotCareSentinel, OneOf[SimpleWhitespaceMatchType], AllOf[SimpleWhitespaceMatchType], ] = DoNotCare() whitespace_after_name: Union[ SimpleWhitespaceMatchType, DoNotCareSentinel, OneOf[SimpleWhitespaceMatchType], AllOf[SimpleWhitespaceMatchType], ] = DoNotCare() whitespace_before_params: Union[ BaseParenthesizableWhitespaceMatchType, DoNotCareSentinel, OneOf[BaseParenthesizableWhitespaceMatchType], AllOf[BaseParenthesizableWhitespaceMatchType], ] = DoNotCare() whitespace_before_colon: Union[ SimpleWhitespaceMatchType, DoNotCareSentinel, OneOf[SimpleWhitespaceMatchType], AllOf[SimpleWhitespaceMatchType], ] = DoNotCare() type_parameters: Union[ Optional["TypeParameters"], MetadataMatchType, MatchIfTrue[Optional[cst.TypeParameters]], DoNotCareSentinel, OneOf[ Union[ Optional["TypeParameters"], MetadataMatchType, MatchIfTrue[Optional[cst.TypeParameters]], ] ], AllOf[ Union[ Optional["TypeParameters"], MetadataMatchType, MatchIfTrue[Optional[cst.TypeParameters]], ] ], ] = DoNotCare() whitespace_after_type_parameters: Union[ SimpleWhitespaceMatchType, DoNotCareSentinel, OneOf[SimpleWhitespaceMatchType], AllOf[SimpleWhitespaceMatchType], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() @dataclass(frozen=True, eq=False, unsafe_hash=False) class GeneratorExp(BaseComp, BaseExpression, BaseSimpleComp, BaseMatcherNode): elt: Union[ BaseExpressionMatchType, DoNotCareSentinel, OneOf[BaseExpressionMatchType], AllOf[BaseExpressionMatchType], ] = DoNotCare() for_in: Union[ CompForMatchType, DoNotCareSentinel, OneOf[CompForMatchType], AllOf[CompForMatchType], ] = DoNotCare() lpar: Union[ Sequence[ Union[ LeftParenMatchType, DoNotCareSentinel, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], AtLeastN[ Union[ LeftParenMatchType, DoNotCareSentinel, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], AtMostN[ Union[ LeftParenMatchType, DoNotCareSentinel, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], ] ], DoNotCareSentinel, MatchIfTrue[Sequence[cst.LeftParen]], OneOf[ Union[ Sequence[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], AtLeastN[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], AtMostN[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.LeftParen]], ] ], AllOf[ Union[ Sequence[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], AtLeastN[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], AtMostN[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.LeftParen]], ] ], ] = DoNotCare() rpar: Union[ Sequence[ Union[ RightParenMatchType, DoNotCareSentinel, OneOf[RightParenMatchType], AllOf[RightParenMatchType], AtLeastN[ Union[ RightParenMatchType, DoNotCareSentinel, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], AtMostN[ Union[ RightParenMatchType, DoNotCareSentinel, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], ] ], DoNotCareSentinel, MatchIfTrue[Sequence[cst.RightParen]], OneOf[ Union[ Sequence[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], AtLeastN[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], AtMostN[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.RightParen]], ] ], AllOf[ Union[ Sequence[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], AtLeastN[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], AtMostN[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.RightParen]], ] ], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() NameItemMatchType = Union["NameItem", MetadataMatchType, MatchIfTrue[cst.NameItem]] @dataclass(frozen=True, eq=False, unsafe_hash=False) class Global(BaseSmallStatement, BaseMatcherNode): names: Union[ Sequence[ Union[ NameItemMatchType, DoNotCareSentinel, OneOf[NameItemMatchType], AllOf[NameItemMatchType], AtLeastN[ Union[ NameItemMatchType, DoNotCareSentinel, OneOf[NameItemMatchType], AllOf[NameItemMatchType], ] ], AtMostN[ Union[ NameItemMatchType, DoNotCareSentinel, OneOf[NameItemMatchType], AllOf[NameItemMatchType], ] ], ] ], DoNotCareSentinel, MatchIfTrue[Sequence[cst.NameItem]], OneOf[ Union[ Sequence[ Union[ NameItemMatchType, OneOf[NameItemMatchType], AllOf[NameItemMatchType], AtLeastN[ Union[ NameItemMatchType, OneOf[NameItemMatchType], AllOf[NameItemMatchType], ] ], AtMostN[ Union[ NameItemMatchType, OneOf[NameItemMatchType], AllOf[NameItemMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.NameItem]], ] ], AllOf[ Union[ Sequence[ Union[ NameItemMatchType, OneOf[NameItemMatchType], AllOf[NameItemMatchType], AtLeastN[ Union[ NameItemMatchType, OneOf[NameItemMatchType], AllOf[NameItemMatchType], ] ], AtMostN[ Union[ NameItemMatchType, OneOf[NameItemMatchType], AllOf[NameItemMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.NameItem]], ] ], ] = DoNotCare() whitespace_after_global: Union[ SimpleWhitespaceMatchType, DoNotCareSentinel, OneOf[SimpleWhitespaceMatchType], AllOf[SimpleWhitespaceMatchType], ] = DoNotCare() semicolon: Union[ SemicolonMatchType, DoNotCareSentinel, OneOf[SemicolonMatchType], AllOf[SemicolonMatchType], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() @dataclass(frozen=True, eq=False, unsafe_hash=False) class GreaterThan(BaseCompOp, BaseMatcherNode): whitespace_before: Union[ BaseParenthesizableWhitespaceMatchType, DoNotCareSentinel, OneOf[BaseParenthesizableWhitespaceMatchType], AllOf[BaseParenthesizableWhitespaceMatchType], ] = DoNotCare() whitespace_after: Union[ BaseParenthesizableWhitespaceMatchType, DoNotCareSentinel, OneOf[BaseParenthesizableWhitespaceMatchType], AllOf[BaseParenthesizableWhitespaceMatchType], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() @dataclass(frozen=True, eq=False, unsafe_hash=False) class GreaterThanEqual(BaseCompOp, BaseMatcherNode): whitespace_before: Union[ BaseParenthesizableWhitespaceMatchType, DoNotCareSentinel, OneOf[BaseParenthesizableWhitespaceMatchType], AllOf[BaseParenthesizableWhitespaceMatchType], ] = DoNotCare() whitespace_after: Union[ BaseParenthesizableWhitespaceMatchType, DoNotCareSentinel, OneOf[BaseParenthesizableWhitespaceMatchType], AllOf[BaseParenthesizableWhitespaceMatchType], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() IfOrElseOrNoneMatchType = Union[ "If", "Else", None, MetadataMatchType, MatchIfTrue[Union[cst.If, cst.Else, None]] ] @dataclass(frozen=True, eq=False, unsafe_hash=False) class If(BaseCompoundStatement, BaseStatement, BaseMatcherNode): test: Union[ BaseExpressionMatchType, DoNotCareSentinel, OneOf[BaseExpressionMatchType], AllOf[BaseExpressionMatchType], ] = DoNotCare() body: Union[ BaseSuiteMatchType, DoNotCareSentinel, OneOf[BaseSuiteMatchType], AllOf[BaseSuiteMatchType], ] = DoNotCare() orelse: Union[ IfOrElseOrNoneMatchType, DoNotCareSentinel, OneOf[IfOrElseOrNoneMatchType], AllOf[IfOrElseOrNoneMatchType], ] = DoNotCare() leading_lines: Union[ Sequence[ Union[ EmptyLineMatchType, DoNotCareSentinel, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], AtLeastN[ Union[ EmptyLineMatchType, DoNotCareSentinel, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], ] ], AtMostN[ Union[ EmptyLineMatchType, DoNotCareSentinel, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], ] ], ] ], DoNotCareSentinel, MatchIfTrue[Sequence[cst.EmptyLine]], OneOf[ Union[ Sequence[ Union[ EmptyLineMatchType, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], AtLeastN[ Union[ EmptyLineMatchType, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], ] ], AtMostN[ Union[ EmptyLineMatchType, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.EmptyLine]], ] ], AllOf[ Union[ Sequence[ Union[ EmptyLineMatchType, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], AtLeastN[ Union[ EmptyLineMatchType, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], ] ], AtMostN[ Union[ EmptyLineMatchType, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.EmptyLine]], ] ], ] = DoNotCare() whitespace_before_test: Union[ SimpleWhitespaceMatchType, DoNotCareSentinel, OneOf[SimpleWhitespaceMatchType], AllOf[SimpleWhitespaceMatchType], ] = DoNotCare() whitespace_after_test: Union[ SimpleWhitespaceMatchType, DoNotCareSentinel, OneOf[SimpleWhitespaceMatchType], AllOf[SimpleWhitespaceMatchType], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() @dataclass(frozen=True, eq=False, unsafe_hash=False) class IfExp(BaseExpression, BaseMatcherNode): test: Union[ BaseExpressionMatchType, DoNotCareSentinel, OneOf[BaseExpressionMatchType], AllOf[BaseExpressionMatchType], ] = DoNotCare() body: Union[ BaseExpressionMatchType, DoNotCareSentinel, OneOf[BaseExpressionMatchType], AllOf[BaseExpressionMatchType], ] = DoNotCare() orelse: Union[ BaseExpressionMatchType, DoNotCareSentinel, OneOf[BaseExpressionMatchType], AllOf[BaseExpressionMatchType], ] = DoNotCare() lpar: Union[ Sequence[ Union[ LeftParenMatchType, DoNotCareSentinel, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], AtLeastN[ Union[ LeftParenMatchType, DoNotCareSentinel, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], AtMostN[ Union[ LeftParenMatchType, DoNotCareSentinel, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], ] ], DoNotCareSentinel, MatchIfTrue[Sequence[cst.LeftParen]], OneOf[ Union[ Sequence[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], AtLeastN[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], AtMostN[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.LeftParen]], ] ], AllOf[ Union[ Sequence[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], AtLeastN[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], AtMostN[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.LeftParen]], ] ], ] = DoNotCare() rpar: Union[ Sequence[ Union[ RightParenMatchType, DoNotCareSentinel, OneOf[RightParenMatchType], AllOf[RightParenMatchType], AtLeastN[ Union[ RightParenMatchType, DoNotCareSentinel, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], AtMostN[ Union[ RightParenMatchType, DoNotCareSentinel, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], ] ], DoNotCareSentinel, MatchIfTrue[Sequence[cst.RightParen]], OneOf[ Union[ Sequence[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], AtLeastN[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], AtMostN[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.RightParen]], ] ], AllOf[ Union[ Sequence[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], AtLeastN[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], AtMostN[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.RightParen]], ] ], ] = DoNotCare() whitespace_before_if: Union[ BaseParenthesizableWhitespaceMatchType, DoNotCareSentinel, OneOf[BaseParenthesizableWhitespaceMatchType], AllOf[BaseParenthesizableWhitespaceMatchType], ] = DoNotCare() whitespace_after_if: Union[ BaseParenthesizableWhitespaceMatchType, DoNotCareSentinel, OneOf[BaseParenthesizableWhitespaceMatchType], AllOf[BaseParenthesizableWhitespaceMatchType], ] = DoNotCare() whitespace_before_else: Union[ BaseParenthesizableWhitespaceMatchType, DoNotCareSentinel, OneOf[BaseParenthesizableWhitespaceMatchType], AllOf[BaseParenthesizableWhitespaceMatchType], ] = DoNotCare() whitespace_after_else: Union[ BaseParenthesizableWhitespaceMatchType, DoNotCareSentinel, OneOf[BaseParenthesizableWhitespaceMatchType], AllOf[BaseParenthesizableWhitespaceMatchType], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() @dataclass(frozen=True, eq=False, unsafe_hash=False) class Imaginary(BaseExpression, BaseNumber, BaseMatcherNode): value: Union[ strMatchType, DoNotCareSentinel, OneOf[strMatchType], AllOf[strMatchType] ] = DoNotCare() lpar: Union[ Sequence[ Union[ LeftParenMatchType, DoNotCareSentinel, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], AtLeastN[ Union[ LeftParenMatchType, DoNotCareSentinel, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], AtMostN[ Union[ LeftParenMatchType, DoNotCareSentinel, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], ] ], DoNotCareSentinel, MatchIfTrue[Sequence[cst.LeftParen]], OneOf[ Union[ Sequence[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], AtLeastN[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], AtMostN[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.LeftParen]], ] ], AllOf[ Union[ Sequence[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], AtLeastN[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], AtMostN[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.LeftParen]], ] ], ] = DoNotCare() rpar: Union[ Sequence[ Union[ RightParenMatchType, DoNotCareSentinel, OneOf[RightParenMatchType], AllOf[RightParenMatchType], AtLeastN[ Union[ RightParenMatchType, DoNotCareSentinel, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], AtMostN[ Union[ RightParenMatchType, DoNotCareSentinel, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], ] ], DoNotCareSentinel, MatchIfTrue[Sequence[cst.RightParen]], OneOf[ Union[ Sequence[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], AtLeastN[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], AtMostN[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.RightParen]], ] ], AllOf[ Union[ Sequence[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], AtLeastN[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], AtMostN[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.RightParen]], ] ], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() ImportAliasMatchType = Union[ "ImportAlias", MetadataMatchType, MatchIfTrue[cst.ImportAlias] ] @dataclass(frozen=True, eq=False, unsafe_hash=False) class Import(BaseSmallStatement, BaseMatcherNode): names: Union[ Sequence[ Union[ ImportAliasMatchType, DoNotCareSentinel, OneOf[ImportAliasMatchType], AllOf[ImportAliasMatchType], AtLeastN[ Union[ ImportAliasMatchType, DoNotCareSentinel, OneOf[ImportAliasMatchType], AllOf[ImportAliasMatchType], ] ], AtMostN[ Union[ ImportAliasMatchType, DoNotCareSentinel, OneOf[ImportAliasMatchType], AllOf[ImportAliasMatchType], ] ], ] ], DoNotCareSentinel, MatchIfTrue[Sequence[cst.ImportAlias]], OneOf[ Union[ Sequence[ Union[ ImportAliasMatchType, OneOf[ImportAliasMatchType], AllOf[ImportAliasMatchType], AtLeastN[ Union[ ImportAliasMatchType, OneOf[ImportAliasMatchType], AllOf[ImportAliasMatchType], ] ], AtMostN[ Union[ ImportAliasMatchType, OneOf[ImportAliasMatchType], AllOf[ImportAliasMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.ImportAlias]], ] ], AllOf[ Union[ Sequence[ Union[ ImportAliasMatchType, OneOf[ImportAliasMatchType], AllOf[ImportAliasMatchType], AtLeastN[ Union[ ImportAliasMatchType, OneOf[ImportAliasMatchType], AllOf[ImportAliasMatchType], ] ], AtMostN[ Union[ ImportAliasMatchType, OneOf[ImportAliasMatchType], AllOf[ImportAliasMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.ImportAlias]], ] ], ] = DoNotCare() semicolon: Union[ SemicolonMatchType, DoNotCareSentinel, OneOf[SemicolonMatchType], AllOf[SemicolonMatchType], ] = DoNotCare() whitespace_after_import: Union[ SimpleWhitespaceMatchType, DoNotCareSentinel, OneOf[SimpleWhitespaceMatchType], AllOf[SimpleWhitespaceMatchType], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() AttributeOrNameMatchType = Union[ "Attribute", "Name", MetadataMatchType, MatchIfTrue[Union[cst.Attribute, cst.Name]] ] @dataclass(frozen=True, eq=False, unsafe_hash=False) class ImportAlias(BaseMatcherNode): name: Union[ AttributeOrNameMatchType, DoNotCareSentinel, OneOf[AttributeOrNameMatchType], AllOf[AttributeOrNameMatchType], ] = DoNotCare() asname: Union[ Optional["AsName"], MetadataMatchType, MatchIfTrue[Optional[cst.AsName]], DoNotCareSentinel, OneOf[ Union[ Optional["AsName"], MetadataMatchType, MatchIfTrue[Optional[cst.AsName]] ] ], AllOf[ Union[ Optional["AsName"], MetadataMatchType, MatchIfTrue[Optional[cst.AsName]] ] ], ] = DoNotCare() comma: Union[ CommaMatchType, DoNotCareSentinel, OneOf[CommaMatchType], AllOf[CommaMatchType] ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() AttributeOrNameOrNoneMatchType = Union[ "Attribute", "Name", None, MetadataMatchType, MatchIfTrue[Union[cst.Attribute, cst.Name, None]], ] @dataclass(frozen=True, eq=False, unsafe_hash=False) class ImportFrom(BaseSmallStatement, BaseMatcherNode): module: Union[ AttributeOrNameOrNoneMatchType, DoNotCareSentinel, OneOf[AttributeOrNameOrNoneMatchType], AllOf[AttributeOrNameOrNoneMatchType], ] = DoNotCare() names: Union[ Union[ Sequence[ Union[ ImportAliasMatchType, DoNotCareSentinel, OneOf[ImportAliasMatchType], AllOf[ImportAliasMatchType], AtLeastN[ Union[ ImportAliasMatchType, DoNotCareSentinel, OneOf[ImportAliasMatchType], AllOf[ImportAliasMatchType], ] ], AtMostN[ Union[ ImportAliasMatchType, DoNotCareSentinel, OneOf[ImportAliasMatchType], AllOf[ImportAliasMatchType], ] ], ] ], DoNotCareSentinel, MatchIfTrue[Sequence[cst.ImportAlias]], OneOf[ Union[ Sequence[ Union[ ImportAliasMatchType, OneOf[ImportAliasMatchType], AllOf[ImportAliasMatchType], AtLeastN[ Union[ ImportAliasMatchType, OneOf[ImportAliasMatchType], AllOf[ImportAliasMatchType], ] ], AtMostN[ Union[ ImportAliasMatchType, OneOf[ImportAliasMatchType], AllOf[ImportAliasMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.ImportAlias]], ] ], AllOf[ Union[ Sequence[ Union[ ImportAliasMatchType, OneOf[ImportAliasMatchType], AllOf[ImportAliasMatchType], AtLeastN[ Union[ ImportAliasMatchType, OneOf[ImportAliasMatchType], AllOf[ImportAliasMatchType], ] ], AtMostN[ Union[ ImportAliasMatchType, OneOf[ImportAliasMatchType], AllOf[ImportAliasMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.ImportAlias]], ] ], ], "ImportStar", MetadataMatchType, MatchIfTrue[ Union[ Sequence[cst.ImportAlias], cst.ImportStar, OneOf[Union[Sequence[cst.ImportAlias], cst.ImportStar]], AllOf[Union[Sequence[cst.ImportAlias], cst.ImportStar]], ] ], DoNotCareSentinel, OneOf[ Union[ Union[ Sequence[ Union[ ImportAliasMatchType, OneOf[ImportAliasMatchType], AllOf[ImportAliasMatchType], AtLeastN[ Union[ ImportAliasMatchType, OneOf[ImportAliasMatchType], AllOf[ImportAliasMatchType], ] ], AtMostN[ Union[ ImportAliasMatchType, OneOf[ImportAliasMatchType], AllOf[ImportAliasMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.ImportAlias]], OneOf[ Union[ Sequence[ Union[ ImportAliasMatchType, OneOf[ImportAliasMatchType], AllOf[ImportAliasMatchType], AtLeastN[ Union[ ImportAliasMatchType, OneOf[ImportAliasMatchType], AllOf[ImportAliasMatchType], ] ], AtMostN[ Union[ ImportAliasMatchType, OneOf[ImportAliasMatchType], AllOf[ImportAliasMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.ImportAlias]], ] ], AllOf[ Union[ Sequence[ Union[ ImportAliasMatchType, OneOf[ImportAliasMatchType], AllOf[ImportAliasMatchType], AtLeastN[ Union[ ImportAliasMatchType, OneOf[ImportAliasMatchType], AllOf[ImportAliasMatchType], ] ], AtMostN[ Union[ ImportAliasMatchType, OneOf[ImportAliasMatchType], AllOf[ImportAliasMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.ImportAlias]], ] ], ], "ImportStar", MetadataMatchType, MatchIfTrue[ Union[ Sequence[cst.ImportAlias], cst.ImportStar, OneOf[Union[Sequence[cst.ImportAlias], cst.ImportStar]], AllOf[Union[Sequence[cst.ImportAlias], cst.ImportStar]], ] ], ] ], AllOf[ Union[ Union[ Sequence[ Union[ ImportAliasMatchType, OneOf[ImportAliasMatchType], AllOf[ImportAliasMatchType], AtLeastN[ Union[ ImportAliasMatchType, OneOf[ImportAliasMatchType], AllOf[ImportAliasMatchType], ] ], AtMostN[ Union[ ImportAliasMatchType, OneOf[ImportAliasMatchType], AllOf[ImportAliasMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.ImportAlias]], OneOf[ Union[ Sequence[ Union[ ImportAliasMatchType, OneOf[ImportAliasMatchType], AllOf[ImportAliasMatchType], AtLeastN[ Union[ ImportAliasMatchType, OneOf[ImportAliasMatchType], AllOf[ImportAliasMatchType], ] ], AtMostN[ Union[ ImportAliasMatchType, OneOf[ImportAliasMatchType], AllOf[ImportAliasMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.ImportAlias]], ] ], AllOf[ Union[ Sequence[ Union[ ImportAliasMatchType, OneOf[ImportAliasMatchType], AllOf[ImportAliasMatchType], AtLeastN[ Union[ ImportAliasMatchType, OneOf[ImportAliasMatchType], AllOf[ImportAliasMatchType], ] ], AtMostN[ Union[ ImportAliasMatchType, OneOf[ImportAliasMatchType], AllOf[ImportAliasMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.ImportAlias]], ] ], ], "ImportStar", MetadataMatchType, MatchIfTrue[ Union[ Sequence[cst.ImportAlias], cst.ImportStar, OneOf[Union[Sequence[cst.ImportAlias], cst.ImportStar]], AllOf[Union[Sequence[cst.ImportAlias], cst.ImportStar]], ] ], ] ], ] = DoNotCare() relative: Union[ Sequence[ Union[ DotMatchType, DoNotCareSentinel, OneOf[DotMatchType], AllOf[DotMatchType], AtLeastN[ Union[ DotMatchType, DoNotCareSentinel, OneOf[DotMatchType], AllOf[DotMatchType], ] ], AtMostN[ Union[ DotMatchType, DoNotCareSentinel, OneOf[DotMatchType], AllOf[DotMatchType], ] ], ] ], DoNotCareSentinel, MatchIfTrue[Sequence[cst.Dot]], OneOf[ Union[ Sequence[ Union[ DotMatchType, OneOf[DotMatchType], AllOf[DotMatchType], AtLeastN[ Union[ DotMatchType, OneOf[DotMatchType], AllOf[DotMatchType] ] ], AtMostN[ Union[ DotMatchType, OneOf[DotMatchType], AllOf[DotMatchType] ] ], ] ], MatchIfTrue[Sequence[cst.Dot]], ] ], AllOf[ Union[ Sequence[ Union[ DotMatchType, OneOf[DotMatchType], AllOf[DotMatchType], AtLeastN[ Union[ DotMatchType, OneOf[DotMatchType], AllOf[DotMatchType] ] ], AtMostN[ Union[ DotMatchType, OneOf[DotMatchType], AllOf[DotMatchType] ] ], ] ], MatchIfTrue[Sequence[cst.Dot]], ] ], ] = DoNotCare() lpar: Union[ Optional["LeftParen"], MetadataMatchType, MatchIfTrue[Optional[cst.LeftParen]], DoNotCareSentinel, OneOf[ Union[ Optional["LeftParen"], MetadataMatchType, MatchIfTrue[Optional[cst.LeftParen]], ] ], AllOf[ Union[ Optional["LeftParen"], MetadataMatchType, MatchIfTrue[Optional[cst.LeftParen]], ] ], ] = DoNotCare() rpar: Union[ Optional["RightParen"], MetadataMatchType, MatchIfTrue[Optional[cst.RightParen]], DoNotCareSentinel, OneOf[ Union[ Optional["RightParen"], MetadataMatchType, MatchIfTrue[Optional[cst.RightParen]], ] ], AllOf[ Union[ Optional["RightParen"], MetadataMatchType, MatchIfTrue[Optional[cst.RightParen]], ] ], ] = DoNotCare() semicolon: Union[ SemicolonMatchType, DoNotCareSentinel, OneOf[SemicolonMatchType], AllOf[SemicolonMatchType], ] = DoNotCare() whitespace_after_from: Union[ SimpleWhitespaceMatchType, DoNotCareSentinel, OneOf[SimpleWhitespaceMatchType], AllOf[SimpleWhitespaceMatchType], ] = DoNotCare() whitespace_before_import: Union[ SimpleWhitespaceMatchType, DoNotCareSentinel, OneOf[SimpleWhitespaceMatchType], AllOf[SimpleWhitespaceMatchType], ] = DoNotCare() whitespace_after_import: Union[ SimpleWhitespaceMatchType, DoNotCareSentinel, OneOf[SimpleWhitespaceMatchType], AllOf[SimpleWhitespaceMatchType], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() @dataclass(frozen=True, eq=False, unsafe_hash=False) class ImportStar(BaseMatcherNode): metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() @dataclass(frozen=True, eq=False, unsafe_hash=False) class In(BaseCompOp, BaseMatcherNode): whitespace_before: Union[ BaseParenthesizableWhitespaceMatchType, DoNotCareSentinel, OneOf[BaseParenthesizableWhitespaceMatchType], AllOf[BaseParenthesizableWhitespaceMatchType], ] = DoNotCare() whitespace_after: Union[ BaseParenthesizableWhitespaceMatchType, DoNotCareSentinel, OneOf[BaseParenthesizableWhitespaceMatchType], AllOf[BaseParenthesizableWhitespaceMatchType], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() BaseStatementMatchType = Union[ "BaseStatement", MetadataMatchType, MatchIfTrue[cst.BaseStatement] ] @dataclass(frozen=True, eq=False, unsafe_hash=False) class IndentedBlock(BaseSuite, BaseMatcherNode): body: Union[ Sequence[ Union[ BaseStatementMatchType, DoNotCareSentinel, OneOf[BaseStatementMatchType], AllOf[BaseStatementMatchType], AtLeastN[ Union[ BaseStatementMatchType, DoNotCareSentinel, OneOf[BaseStatementMatchType], AllOf[BaseStatementMatchType], ] ], AtMostN[ Union[ BaseStatementMatchType, DoNotCareSentinel, OneOf[BaseStatementMatchType], AllOf[BaseStatementMatchType], ] ], ] ], DoNotCareSentinel, MatchIfTrue[Sequence[cst.BaseStatement]], OneOf[ Union[ Sequence[ Union[ BaseStatementMatchType, OneOf[BaseStatementMatchType], AllOf[BaseStatementMatchType], AtLeastN[ Union[ BaseStatementMatchType, OneOf[BaseStatementMatchType], AllOf[BaseStatementMatchType], ] ], AtMostN[ Union[ BaseStatementMatchType, OneOf[BaseStatementMatchType], AllOf[BaseStatementMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.BaseStatement]], ] ], AllOf[ Union[ Sequence[ Union[ BaseStatementMatchType, OneOf[BaseStatementMatchType], AllOf[BaseStatementMatchType], AtLeastN[ Union[ BaseStatementMatchType, OneOf[BaseStatementMatchType], AllOf[BaseStatementMatchType], ] ], AtMostN[ Union[ BaseStatementMatchType, OneOf[BaseStatementMatchType], AllOf[BaseStatementMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.BaseStatement]], ] ], ] = DoNotCare() header: Union[ TrailingWhitespaceMatchType, DoNotCareSentinel, OneOf[TrailingWhitespaceMatchType], AllOf[TrailingWhitespaceMatchType], ] = DoNotCare() indent: Union[ Optional[str], MetadataMatchType, MatchIfTrue[Optional[str]], DoNotCareSentinel, OneOf[Union[Optional[str], MetadataMatchType, MatchIfTrue[Optional[str]]]], AllOf[Union[Optional[str], MetadataMatchType, MatchIfTrue[Optional[str]]]], ] = DoNotCare() footer: Union[ Sequence[ Union[ EmptyLineMatchType, DoNotCareSentinel, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], AtLeastN[ Union[ EmptyLineMatchType, DoNotCareSentinel, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], ] ], AtMostN[ Union[ EmptyLineMatchType, DoNotCareSentinel, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], ] ], ] ], DoNotCareSentinel, MatchIfTrue[Sequence[cst.EmptyLine]], OneOf[ Union[ Sequence[ Union[ EmptyLineMatchType, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], AtLeastN[ Union[ EmptyLineMatchType, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], ] ], AtMostN[ Union[ EmptyLineMatchType, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.EmptyLine]], ] ], AllOf[ Union[ Sequence[ Union[ EmptyLineMatchType, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], AtLeastN[ Union[ EmptyLineMatchType, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], ] ], AtMostN[ Union[ EmptyLineMatchType, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.EmptyLine]], ] ], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() @dataclass(frozen=True, eq=False, unsafe_hash=False) class Index(BaseSlice, BaseMatcherNode): value: Union[ BaseExpressionMatchType, DoNotCareSentinel, OneOf[BaseExpressionMatchType], AllOf[BaseExpressionMatchType], ] = DoNotCare() star: Union[ Optional[Literal["*"]], MetadataMatchType, MatchIfTrue[Optional[Literal["*"]]], DoNotCareSentinel, OneOf[ Union[ Optional[Literal["*"]], MetadataMatchType, MatchIfTrue[Optional[Literal["*"]]], ] ], AllOf[ Union[ Optional[Literal["*"]], MetadataMatchType, MatchIfTrue[Optional[Literal["*"]]], ] ], ] = DoNotCare() whitespace_after_star: Union[ Optional["BaseParenthesizableWhitespace"], MetadataMatchType, MatchIfTrue[Optional[cst.BaseParenthesizableWhitespace]], DoNotCareSentinel, OneOf[ Union[ Optional["BaseParenthesizableWhitespace"], MetadataMatchType, MatchIfTrue[Optional[cst.BaseParenthesizableWhitespace]], ] ], AllOf[ Union[ Optional["BaseParenthesizableWhitespace"], MetadataMatchType, MatchIfTrue[Optional[cst.BaseParenthesizableWhitespace]], ] ], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() @dataclass(frozen=True, eq=False, unsafe_hash=False) class Integer(BaseExpression, BaseNumber, BaseMatcherNode): value: Union[ strMatchType, DoNotCareSentinel, OneOf[strMatchType], AllOf[strMatchType] ] = DoNotCare() lpar: Union[ Sequence[ Union[ LeftParenMatchType, DoNotCareSentinel, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], AtLeastN[ Union[ LeftParenMatchType, DoNotCareSentinel, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], AtMostN[ Union[ LeftParenMatchType, DoNotCareSentinel, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], ] ], DoNotCareSentinel, MatchIfTrue[Sequence[cst.LeftParen]], OneOf[ Union[ Sequence[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], AtLeastN[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], AtMostN[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.LeftParen]], ] ], AllOf[ Union[ Sequence[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], AtLeastN[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], AtMostN[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.LeftParen]], ] ], ] = DoNotCare() rpar: Union[ Sequence[ Union[ RightParenMatchType, DoNotCareSentinel, OneOf[RightParenMatchType], AllOf[RightParenMatchType], AtLeastN[ Union[ RightParenMatchType, DoNotCareSentinel, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], AtMostN[ Union[ RightParenMatchType, DoNotCareSentinel, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], ] ], DoNotCareSentinel, MatchIfTrue[Sequence[cst.RightParen]], OneOf[ Union[ Sequence[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], AtLeastN[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], AtMostN[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.RightParen]], ] ], AllOf[ Union[ Sequence[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], AtLeastN[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], AtMostN[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.RightParen]], ] ], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() @dataclass(frozen=True, eq=False, unsafe_hash=False) class Is(BaseCompOp, BaseMatcherNode): whitespace_before: Union[ BaseParenthesizableWhitespaceMatchType, DoNotCareSentinel, OneOf[BaseParenthesizableWhitespaceMatchType], AllOf[BaseParenthesizableWhitespaceMatchType], ] = DoNotCare() whitespace_after: Union[ BaseParenthesizableWhitespaceMatchType, DoNotCareSentinel, OneOf[BaseParenthesizableWhitespaceMatchType], AllOf[BaseParenthesizableWhitespaceMatchType], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() @dataclass(frozen=True, eq=False, unsafe_hash=False) class IsNot(BaseCompOp, BaseMatcherNode): whitespace_before: Union[ BaseParenthesizableWhitespaceMatchType, DoNotCareSentinel, OneOf[BaseParenthesizableWhitespaceMatchType], AllOf[BaseParenthesizableWhitespaceMatchType], ] = DoNotCare() whitespace_between: Union[ BaseParenthesizableWhitespaceMatchType, DoNotCareSentinel, OneOf[BaseParenthesizableWhitespaceMatchType], AllOf[BaseParenthesizableWhitespaceMatchType], ] = DoNotCare() whitespace_after: Union[ BaseParenthesizableWhitespaceMatchType, DoNotCareSentinel, OneOf[BaseParenthesizableWhitespaceMatchType], AllOf[BaseParenthesizableWhitespaceMatchType], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() ColonMatchType = Union["Colon", MetadataMatchType, MatchIfTrue[cst.Colon]] @dataclass(frozen=True, eq=False, unsafe_hash=False) class Lambda(BaseExpression, BaseMatcherNode): params: Union[ ParametersMatchType, DoNotCareSentinel, OneOf[ParametersMatchType], AllOf[ParametersMatchType], ] = DoNotCare() body: Union[ BaseExpressionMatchType, DoNotCareSentinel, OneOf[BaseExpressionMatchType], AllOf[BaseExpressionMatchType], ] = DoNotCare() colon: Union[ ColonMatchType, DoNotCareSentinel, OneOf[ColonMatchType], AllOf[ColonMatchType] ] = DoNotCare() lpar: Union[ Sequence[ Union[ LeftParenMatchType, DoNotCareSentinel, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], AtLeastN[ Union[ LeftParenMatchType, DoNotCareSentinel, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], AtMostN[ Union[ LeftParenMatchType, DoNotCareSentinel, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], ] ], DoNotCareSentinel, MatchIfTrue[Sequence[cst.LeftParen]], OneOf[ Union[ Sequence[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], AtLeastN[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], AtMostN[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.LeftParen]], ] ], AllOf[ Union[ Sequence[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], AtLeastN[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], AtMostN[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.LeftParen]], ] ], ] = DoNotCare() rpar: Union[ Sequence[ Union[ RightParenMatchType, DoNotCareSentinel, OneOf[RightParenMatchType], AllOf[RightParenMatchType], AtLeastN[ Union[ RightParenMatchType, DoNotCareSentinel, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], AtMostN[ Union[ RightParenMatchType, DoNotCareSentinel, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], ] ], DoNotCareSentinel, MatchIfTrue[Sequence[cst.RightParen]], OneOf[ Union[ Sequence[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], AtLeastN[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], AtMostN[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.RightParen]], ] ], AllOf[ Union[ Sequence[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], AtLeastN[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], AtMostN[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.RightParen]], ] ], ] = DoNotCare() whitespace_after_lambda: Union[ BaseParenthesizableWhitespaceMatchType, DoNotCareSentinel, OneOf[BaseParenthesizableWhitespaceMatchType], AllOf[BaseParenthesizableWhitespaceMatchType], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() @dataclass(frozen=True, eq=False, unsafe_hash=False) class LeftCurlyBrace(BaseMatcherNode): whitespace_after: Union[ BaseParenthesizableWhitespaceMatchType, DoNotCareSentinel, OneOf[BaseParenthesizableWhitespaceMatchType], AllOf[BaseParenthesizableWhitespaceMatchType], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() @dataclass(frozen=True, eq=False, unsafe_hash=False) class LeftParen(BaseMatcherNode): whitespace_after: Union[ BaseParenthesizableWhitespaceMatchType, DoNotCareSentinel, OneOf[BaseParenthesizableWhitespaceMatchType], AllOf[BaseParenthesizableWhitespaceMatchType], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() @dataclass(frozen=True, eq=False, unsafe_hash=False) class LeftShift(BaseBinaryOp, BaseMatcherNode): whitespace_before: Union[ BaseParenthesizableWhitespaceMatchType, DoNotCareSentinel, OneOf[BaseParenthesizableWhitespaceMatchType], AllOf[BaseParenthesizableWhitespaceMatchType], ] = DoNotCare() whitespace_after: Union[ BaseParenthesizableWhitespaceMatchType, DoNotCareSentinel, OneOf[BaseParenthesizableWhitespaceMatchType], AllOf[BaseParenthesizableWhitespaceMatchType], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() @dataclass(frozen=True, eq=False, unsafe_hash=False) class LeftShiftAssign(BaseAugOp, BaseMatcherNode): whitespace_before: Union[ BaseParenthesizableWhitespaceMatchType, DoNotCareSentinel, OneOf[BaseParenthesizableWhitespaceMatchType], AllOf[BaseParenthesizableWhitespaceMatchType], ] = DoNotCare() whitespace_after: Union[ BaseParenthesizableWhitespaceMatchType, DoNotCareSentinel, OneOf[BaseParenthesizableWhitespaceMatchType], AllOf[BaseParenthesizableWhitespaceMatchType], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() @dataclass(frozen=True, eq=False, unsafe_hash=False) class LeftSquareBracket(BaseMatcherNode): whitespace_after: Union[ BaseParenthesizableWhitespaceMatchType, DoNotCareSentinel, OneOf[BaseParenthesizableWhitespaceMatchType], AllOf[BaseParenthesizableWhitespaceMatchType], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() @dataclass(frozen=True, eq=False, unsafe_hash=False) class LessThan(BaseCompOp, BaseMatcherNode): whitespace_before: Union[ BaseParenthesizableWhitespaceMatchType, DoNotCareSentinel, OneOf[BaseParenthesizableWhitespaceMatchType], AllOf[BaseParenthesizableWhitespaceMatchType], ] = DoNotCare() whitespace_after: Union[ BaseParenthesizableWhitespaceMatchType, DoNotCareSentinel, OneOf[BaseParenthesizableWhitespaceMatchType], AllOf[BaseParenthesizableWhitespaceMatchType], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() @dataclass(frozen=True, eq=False, unsafe_hash=False) class LessThanEqual(BaseCompOp, BaseMatcherNode): whitespace_before: Union[ BaseParenthesizableWhitespaceMatchType, DoNotCareSentinel, OneOf[BaseParenthesizableWhitespaceMatchType], AllOf[BaseParenthesizableWhitespaceMatchType], ] = DoNotCare() whitespace_after: Union[ BaseParenthesizableWhitespaceMatchType, DoNotCareSentinel, OneOf[BaseParenthesizableWhitespaceMatchType], AllOf[BaseParenthesizableWhitespaceMatchType], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() BaseElementMatchType = Union[ "BaseElement", MetadataMatchType, MatchIfTrue[cst.BaseElement] ] LeftSquareBracketMatchType = Union[ "LeftSquareBracket", MetadataMatchType, MatchIfTrue[cst.LeftSquareBracket] ] RightSquareBracketMatchType = Union[ "RightSquareBracket", MetadataMatchType, MatchIfTrue[cst.RightSquareBracket] ] @dataclass(frozen=True, eq=False, unsafe_hash=False) class List( BaseAssignTargetExpression, BaseDelTargetExpression, BaseExpression, BaseList, BaseMatcherNode, ): elements: Union[ Sequence[ Union[ BaseElementMatchType, DoNotCareSentinel, OneOf[BaseElementMatchType], AllOf[BaseElementMatchType], AtLeastN[ Union[ BaseElementMatchType, DoNotCareSentinel, OneOf[BaseElementMatchType], AllOf[BaseElementMatchType], ] ], AtMostN[ Union[ BaseElementMatchType, DoNotCareSentinel, OneOf[BaseElementMatchType], AllOf[BaseElementMatchType], ] ], ] ], DoNotCareSentinel, MatchIfTrue[Sequence[cst.BaseElement]], OneOf[ Union[ Sequence[ Union[ BaseElementMatchType, OneOf[BaseElementMatchType], AllOf[BaseElementMatchType], AtLeastN[ Union[ BaseElementMatchType, OneOf[BaseElementMatchType], AllOf[BaseElementMatchType], ] ], AtMostN[ Union[ BaseElementMatchType, OneOf[BaseElementMatchType], AllOf[BaseElementMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.BaseElement]], ] ], AllOf[ Union[ Sequence[ Union[ BaseElementMatchType, OneOf[BaseElementMatchType], AllOf[BaseElementMatchType], AtLeastN[ Union[ BaseElementMatchType, OneOf[BaseElementMatchType], AllOf[BaseElementMatchType], ] ], AtMostN[ Union[ BaseElementMatchType, OneOf[BaseElementMatchType], AllOf[BaseElementMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.BaseElement]], ] ], ] = DoNotCare() lbracket: Union[ LeftSquareBracketMatchType, DoNotCareSentinel, OneOf[LeftSquareBracketMatchType], AllOf[LeftSquareBracketMatchType], ] = DoNotCare() rbracket: Union[ RightSquareBracketMatchType, DoNotCareSentinel, OneOf[RightSquareBracketMatchType], AllOf[RightSquareBracketMatchType], ] = DoNotCare() lpar: Union[ Sequence[ Union[ LeftParenMatchType, DoNotCareSentinel, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], AtLeastN[ Union[ LeftParenMatchType, DoNotCareSentinel, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], AtMostN[ Union[ LeftParenMatchType, DoNotCareSentinel, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], ] ], DoNotCareSentinel, MatchIfTrue[Sequence[cst.LeftParen]], OneOf[ Union[ Sequence[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], AtLeastN[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], AtMostN[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.LeftParen]], ] ], AllOf[ Union[ Sequence[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], AtLeastN[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], AtMostN[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.LeftParen]], ] ], ] = DoNotCare() rpar: Union[ Sequence[ Union[ RightParenMatchType, DoNotCareSentinel, OneOf[RightParenMatchType], AllOf[RightParenMatchType], AtLeastN[ Union[ RightParenMatchType, DoNotCareSentinel, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], AtMostN[ Union[ RightParenMatchType, DoNotCareSentinel, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], ] ], DoNotCareSentinel, MatchIfTrue[Sequence[cst.RightParen]], OneOf[ Union[ Sequence[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], AtLeastN[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], AtMostN[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.RightParen]], ] ], AllOf[ Union[ Sequence[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], AtLeastN[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], AtMostN[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.RightParen]], ] ], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() @dataclass(frozen=True, eq=False, unsafe_hash=False) class ListComp(BaseComp, BaseExpression, BaseList, BaseSimpleComp, BaseMatcherNode): elt: Union[ BaseExpressionMatchType, DoNotCareSentinel, OneOf[BaseExpressionMatchType], AllOf[BaseExpressionMatchType], ] = DoNotCare() for_in: Union[ CompForMatchType, DoNotCareSentinel, OneOf[CompForMatchType], AllOf[CompForMatchType], ] = DoNotCare() lbracket: Union[ LeftSquareBracketMatchType, DoNotCareSentinel, OneOf[LeftSquareBracketMatchType], AllOf[LeftSquareBracketMatchType], ] = DoNotCare() rbracket: Union[ RightSquareBracketMatchType, DoNotCareSentinel, OneOf[RightSquareBracketMatchType], AllOf[RightSquareBracketMatchType], ] = DoNotCare() lpar: Union[ Sequence[ Union[ LeftParenMatchType, DoNotCareSentinel, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], AtLeastN[ Union[ LeftParenMatchType, DoNotCareSentinel, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], AtMostN[ Union[ LeftParenMatchType, DoNotCareSentinel, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], ] ], DoNotCareSentinel, MatchIfTrue[Sequence[cst.LeftParen]], OneOf[ Union[ Sequence[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], AtLeastN[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], AtMostN[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.LeftParen]], ] ], AllOf[ Union[ Sequence[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], AtLeastN[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], AtMostN[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.LeftParen]], ] ], ] = DoNotCare() rpar: Union[ Sequence[ Union[ RightParenMatchType, DoNotCareSentinel, OneOf[RightParenMatchType], AllOf[RightParenMatchType], AtLeastN[ Union[ RightParenMatchType, DoNotCareSentinel, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], AtMostN[ Union[ RightParenMatchType, DoNotCareSentinel, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], ] ], DoNotCareSentinel, MatchIfTrue[Sequence[cst.RightParen]], OneOf[ Union[ Sequence[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], AtLeastN[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], AtMostN[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.RightParen]], ] ], AllOf[ Union[ Sequence[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], AtLeastN[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], AtMostN[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.RightParen]], ] ], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() MatchCaseMatchType = Union["MatchCase", MetadataMatchType, MatchIfTrue[cst.MatchCase]] @dataclass(frozen=True, eq=False, unsafe_hash=False) class Match(BaseCompoundStatement, BaseStatement, BaseMatcherNode): subject: Union[ BaseExpressionMatchType, DoNotCareSentinel, OneOf[BaseExpressionMatchType], AllOf[BaseExpressionMatchType], ] = DoNotCare() cases: Union[ Sequence[ Union[ MatchCaseMatchType, DoNotCareSentinel, OneOf[MatchCaseMatchType], AllOf[MatchCaseMatchType], AtLeastN[ Union[ MatchCaseMatchType, DoNotCareSentinel, OneOf[MatchCaseMatchType], AllOf[MatchCaseMatchType], ] ], AtMostN[ Union[ MatchCaseMatchType, DoNotCareSentinel, OneOf[MatchCaseMatchType], AllOf[MatchCaseMatchType], ] ], ] ], DoNotCareSentinel, MatchIfTrue[Sequence[cst.MatchCase]], OneOf[ Union[ Sequence[ Union[ MatchCaseMatchType, OneOf[MatchCaseMatchType], AllOf[MatchCaseMatchType], AtLeastN[ Union[ MatchCaseMatchType, OneOf[MatchCaseMatchType], AllOf[MatchCaseMatchType], ] ], AtMostN[ Union[ MatchCaseMatchType, OneOf[MatchCaseMatchType], AllOf[MatchCaseMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.MatchCase]], ] ], AllOf[ Union[ Sequence[ Union[ MatchCaseMatchType, OneOf[MatchCaseMatchType], AllOf[MatchCaseMatchType], AtLeastN[ Union[ MatchCaseMatchType, OneOf[MatchCaseMatchType], AllOf[MatchCaseMatchType], ] ], AtMostN[ Union[ MatchCaseMatchType, OneOf[MatchCaseMatchType], AllOf[MatchCaseMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.MatchCase]], ] ], ] = DoNotCare() leading_lines: Union[ Sequence[ Union[ EmptyLineMatchType, DoNotCareSentinel, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], AtLeastN[ Union[ EmptyLineMatchType, DoNotCareSentinel, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], ] ], AtMostN[ Union[ EmptyLineMatchType, DoNotCareSentinel, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], ] ], ] ], DoNotCareSentinel, MatchIfTrue[Sequence[cst.EmptyLine]], OneOf[ Union[ Sequence[ Union[ EmptyLineMatchType, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], AtLeastN[ Union[ EmptyLineMatchType, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], ] ], AtMostN[ Union[ EmptyLineMatchType, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.EmptyLine]], ] ], AllOf[ Union[ Sequence[ Union[ EmptyLineMatchType, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], AtLeastN[ Union[ EmptyLineMatchType, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], ] ], AtMostN[ Union[ EmptyLineMatchType, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.EmptyLine]], ] ], ] = DoNotCare() whitespace_after_match: Union[ SimpleWhitespaceMatchType, DoNotCareSentinel, OneOf[SimpleWhitespaceMatchType], AllOf[SimpleWhitespaceMatchType], ] = DoNotCare() whitespace_before_colon: Union[ SimpleWhitespaceMatchType, DoNotCareSentinel, OneOf[SimpleWhitespaceMatchType], AllOf[SimpleWhitespaceMatchType], ] = DoNotCare() whitespace_after_colon: Union[ TrailingWhitespaceMatchType, DoNotCareSentinel, OneOf[TrailingWhitespaceMatchType], AllOf[TrailingWhitespaceMatchType], ] = DoNotCare() indent: Union[ Optional[str], MetadataMatchType, MatchIfTrue[Optional[str]], DoNotCareSentinel, OneOf[Union[Optional[str], MetadataMatchType, MatchIfTrue[Optional[str]]]], AllOf[Union[Optional[str], MetadataMatchType, MatchIfTrue[Optional[str]]]], ] = DoNotCare() footer: Union[ Sequence[ Union[ EmptyLineMatchType, DoNotCareSentinel, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], AtLeastN[ Union[ EmptyLineMatchType, DoNotCareSentinel, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], ] ], AtMostN[ Union[ EmptyLineMatchType, DoNotCareSentinel, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], ] ], ] ], DoNotCareSentinel, MatchIfTrue[Sequence[cst.EmptyLine]], OneOf[ Union[ Sequence[ Union[ EmptyLineMatchType, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], AtLeastN[ Union[ EmptyLineMatchType, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], ] ], AtMostN[ Union[ EmptyLineMatchType, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.EmptyLine]], ] ], AllOf[ Union[ Sequence[ Union[ EmptyLineMatchType, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], AtLeastN[ Union[ EmptyLineMatchType, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], ] ], AtMostN[ Union[ EmptyLineMatchType, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.EmptyLine]], ] ], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() @dataclass(frozen=True, eq=False, unsafe_hash=False) class MatchAs(BaseMatcherNode): pattern: Union[ Optional["MatchPattern"], MetadataMatchType, MatchIfTrue[Optional[cst.MatchPattern]], DoNotCareSentinel, OneOf[ Union[ Optional["MatchPattern"], MetadataMatchType, MatchIfTrue[Optional[cst.MatchPattern]], ] ], AllOf[ Union[ Optional["MatchPattern"], MetadataMatchType, MatchIfTrue[Optional[cst.MatchPattern]], ] ], ] = DoNotCare() name: Union[ Optional["Name"], MetadataMatchType, MatchIfTrue[Optional[cst.Name]], DoNotCareSentinel, OneOf[ Union[Optional["Name"], MetadataMatchType, MatchIfTrue[Optional[cst.Name]]] ], AllOf[ Union[Optional["Name"], MetadataMatchType, MatchIfTrue[Optional[cst.Name]]] ], ] = DoNotCare() whitespace_before_as: Union[ BaseParenthesizableWhitespaceMatchType, DoNotCareSentinel, OneOf[BaseParenthesizableWhitespaceMatchType], AllOf[BaseParenthesizableWhitespaceMatchType], ] = DoNotCare() whitespace_after_as: Union[ BaseParenthesizableWhitespaceMatchType, DoNotCareSentinel, OneOf[BaseParenthesizableWhitespaceMatchType], AllOf[BaseParenthesizableWhitespaceMatchType], ] = DoNotCare() lpar: Union[ Sequence[ Union[ LeftParenMatchType, DoNotCareSentinel, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], AtLeastN[ Union[ LeftParenMatchType, DoNotCareSentinel, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], AtMostN[ Union[ LeftParenMatchType, DoNotCareSentinel, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], ] ], DoNotCareSentinel, MatchIfTrue[Sequence[cst.LeftParen]], OneOf[ Union[ Sequence[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], AtLeastN[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], AtMostN[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.LeftParen]], ] ], AllOf[ Union[ Sequence[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], AtLeastN[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], AtMostN[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.LeftParen]], ] ], ] = DoNotCare() rpar: Union[ Sequence[ Union[ RightParenMatchType, DoNotCareSentinel, OneOf[RightParenMatchType], AllOf[RightParenMatchType], AtLeastN[ Union[ RightParenMatchType, DoNotCareSentinel, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], AtMostN[ Union[ RightParenMatchType, DoNotCareSentinel, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], ] ], DoNotCareSentinel, MatchIfTrue[Sequence[cst.RightParen]], OneOf[ Union[ Sequence[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], AtLeastN[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], AtMostN[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.RightParen]], ] ], AllOf[ Union[ Sequence[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], AtLeastN[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], AtMostN[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.RightParen]], ] ], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() MatchPatternMatchType = Union[ "MatchPattern", MetadataMatchType, MatchIfTrue[cst.MatchPattern] ] @dataclass(frozen=True, eq=False, unsafe_hash=False) class MatchCase(BaseMatcherNode): pattern: Union[ MatchPatternMatchType, DoNotCareSentinel, OneOf[MatchPatternMatchType], AllOf[MatchPatternMatchType], ] = DoNotCare() body: Union[ BaseSuiteMatchType, DoNotCareSentinel, OneOf[BaseSuiteMatchType], AllOf[BaseSuiteMatchType], ] = DoNotCare() guard: Union[ Optional["BaseExpression"], MetadataMatchType, MatchIfTrue[Optional[cst.BaseExpression]], DoNotCareSentinel, OneOf[ Union[ Optional["BaseExpression"], MetadataMatchType, MatchIfTrue[Optional[cst.BaseExpression]], ] ], AllOf[ Union[ Optional["BaseExpression"], MetadataMatchType, MatchIfTrue[Optional[cst.BaseExpression]], ] ], ] = DoNotCare() leading_lines: Union[ Sequence[ Union[ EmptyLineMatchType, DoNotCareSentinel, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], AtLeastN[ Union[ EmptyLineMatchType, DoNotCareSentinel, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], ] ], AtMostN[ Union[ EmptyLineMatchType, DoNotCareSentinel, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], ] ], ] ], DoNotCareSentinel, MatchIfTrue[Sequence[cst.EmptyLine]], OneOf[ Union[ Sequence[ Union[ EmptyLineMatchType, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], AtLeastN[ Union[ EmptyLineMatchType, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], ] ], AtMostN[ Union[ EmptyLineMatchType, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.EmptyLine]], ] ], AllOf[ Union[ Sequence[ Union[ EmptyLineMatchType, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], AtLeastN[ Union[ EmptyLineMatchType, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], ] ], AtMostN[ Union[ EmptyLineMatchType, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.EmptyLine]], ] ], ] = DoNotCare() whitespace_after_case: Union[ SimpleWhitespaceMatchType, DoNotCareSentinel, OneOf[SimpleWhitespaceMatchType], AllOf[SimpleWhitespaceMatchType], ] = DoNotCare() whitespace_before_if: Union[ SimpleWhitespaceMatchType, DoNotCareSentinel, OneOf[SimpleWhitespaceMatchType], AllOf[SimpleWhitespaceMatchType], ] = DoNotCare() whitespace_after_if: Union[ SimpleWhitespaceMatchType, DoNotCareSentinel, OneOf[SimpleWhitespaceMatchType], AllOf[SimpleWhitespaceMatchType], ] = DoNotCare() whitespace_before_colon: Union[ SimpleWhitespaceMatchType, DoNotCareSentinel, OneOf[SimpleWhitespaceMatchType], AllOf[SimpleWhitespaceMatchType], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() MatchSequenceElementMatchType = Union[ "MatchSequenceElement", MetadataMatchType, MatchIfTrue[cst.MatchSequenceElement] ] MatchKeywordElementMatchType = Union[ "MatchKeywordElement", MetadataMatchType, MatchIfTrue[cst.MatchKeywordElement] ] @dataclass(frozen=True, eq=False, unsafe_hash=False) class MatchClass(BaseMatcherNode): cls: Union[ BaseExpressionMatchType, DoNotCareSentinel, OneOf[BaseExpressionMatchType], AllOf[BaseExpressionMatchType], ] = DoNotCare() patterns: Union[ Sequence[ Union[ MatchSequenceElementMatchType, DoNotCareSentinel, OneOf[MatchSequenceElementMatchType], AllOf[MatchSequenceElementMatchType], AtLeastN[ Union[ MatchSequenceElementMatchType, DoNotCareSentinel, OneOf[MatchSequenceElementMatchType], AllOf[MatchSequenceElementMatchType], ] ], AtMostN[ Union[ MatchSequenceElementMatchType, DoNotCareSentinel, OneOf[MatchSequenceElementMatchType], AllOf[MatchSequenceElementMatchType], ] ], ] ], DoNotCareSentinel, MatchIfTrue[Sequence[cst.MatchSequenceElement]], OneOf[ Union[ Sequence[ Union[ MatchSequenceElementMatchType, OneOf[MatchSequenceElementMatchType], AllOf[MatchSequenceElementMatchType], AtLeastN[ Union[ MatchSequenceElementMatchType, OneOf[MatchSequenceElementMatchType], AllOf[MatchSequenceElementMatchType], ] ], AtMostN[ Union[ MatchSequenceElementMatchType, OneOf[MatchSequenceElementMatchType], AllOf[MatchSequenceElementMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.MatchSequenceElement]], ] ], AllOf[ Union[ Sequence[ Union[ MatchSequenceElementMatchType, OneOf[MatchSequenceElementMatchType], AllOf[MatchSequenceElementMatchType], AtLeastN[ Union[ MatchSequenceElementMatchType, OneOf[MatchSequenceElementMatchType], AllOf[MatchSequenceElementMatchType], ] ], AtMostN[ Union[ MatchSequenceElementMatchType, OneOf[MatchSequenceElementMatchType], AllOf[MatchSequenceElementMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.MatchSequenceElement]], ] ], ] = DoNotCare() kwds: Union[ Sequence[ Union[ MatchKeywordElementMatchType, DoNotCareSentinel, OneOf[MatchKeywordElementMatchType], AllOf[MatchKeywordElementMatchType], AtLeastN[ Union[ MatchKeywordElementMatchType, DoNotCareSentinel, OneOf[MatchKeywordElementMatchType], AllOf[MatchKeywordElementMatchType], ] ], AtMostN[ Union[ MatchKeywordElementMatchType, DoNotCareSentinel, OneOf[MatchKeywordElementMatchType], AllOf[MatchKeywordElementMatchType], ] ], ] ], DoNotCareSentinel, MatchIfTrue[Sequence[cst.MatchKeywordElement]], OneOf[ Union[ Sequence[ Union[ MatchKeywordElementMatchType, OneOf[MatchKeywordElementMatchType], AllOf[MatchKeywordElementMatchType], AtLeastN[ Union[ MatchKeywordElementMatchType, OneOf[MatchKeywordElementMatchType], AllOf[MatchKeywordElementMatchType], ] ], AtMostN[ Union[ MatchKeywordElementMatchType, OneOf[MatchKeywordElementMatchType], AllOf[MatchKeywordElementMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.MatchKeywordElement]], ] ], AllOf[ Union[ Sequence[ Union[ MatchKeywordElementMatchType, OneOf[MatchKeywordElementMatchType], AllOf[MatchKeywordElementMatchType], AtLeastN[ Union[ MatchKeywordElementMatchType, OneOf[MatchKeywordElementMatchType], AllOf[MatchKeywordElementMatchType], ] ], AtMostN[ Union[ MatchKeywordElementMatchType, OneOf[MatchKeywordElementMatchType], AllOf[MatchKeywordElementMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.MatchKeywordElement]], ] ], ] = DoNotCare() whitespace_after_cls: Union[ BaseParenthesizableWhitespaceMatchType, DoNotCareSentinel, OneOf[BaseParenthesizableWhitespaceMatchType], AllOf[BaseParenthesizableWhitespaceMatchType], ] = DoNotCare() whitespace_before_patterns: Union[ BaseParenthesizableWhitespaceMatchType, DoNotCareSentinel, OneOf[BaseParenthesizableWhitespaceMatchType], AllOf[BaseParenthesizableWhitespaceMatchType], ] = DoNotCare() whitespace_after_kwds: Union[ BaseParenthesizableWhitespaceMatchType, DoNotCareSentinel, OneOf[BaseParenthesizableWhitespaceMatchType], AllOf[BaseParenthesizableWhitespaceMatchType], ] = DoNotCare() lpar: Union[ Sequence[ Union[ LeftParenMatchType, DoNotCareSentinel, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], AtLeastN[ Union[ LeftParenMatchType, DoNotCareSentinel, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], AtMostN[ Union[ LeftParenMatchType, DoNotCareSentinel, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], ] ], DoNotCareSentinel, MatchIfTrue[Sequence[cst.LeftParen]], OneOf[ Union[ Sequence[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], AtLeastN[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], AtMostN[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.LeftParen]], ] ], AllOf[ Union[ Sequence[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], AtLeastN[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], AtMostN[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.LeftParen]], ] ], ] = DoNotCare() rpar: Union[ Sequence[ Union[ RightParenMatchType, DoNotCareSentinel, OneOf[RightParenMatchType], AllOf[RightParenMatchType], AtLeastN[ Union[ RightParenMatchType, DoNotCareSentinel, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], AtMostN[ Union[ RightParenMatchType, DoNotCareSentinel, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], ] ], DoNotCareSentinel, MatchIfTrue[Sequence[cst.RightParen]], OneOf[ Union[ Sequence[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], AtLeastN[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], AtMostN[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.RightParen]], ] ], AllOf[ Union[ Sequence[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], AtLeastN[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], AtMostN[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.RightParen]], ] ], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() @dataclass(frozen=True, eq=False, unsafe_hash=False) class MatchKeywordElement(BaseMatcherNode): key: Union[ NameMatchType, DoNotCareSentinel, OneOf[NameMatchType], AllOf[NameMatchType] ] = DoNotCare() pattern: Union[ MatchPatternMatchType, DoNotCareSentinel, OneOf[MatchPatternMatchType], AllOf[MatchPatternMatchType], ] = DoNotCare() comma: Union[ CommaMatchType, DoNotCareSentinel, OneOf[CommaMatchType], AllOf[CommaMatchType] ] = DoNotCare() whitespace_before_equal: Union[ BaseParenthesizableWhitespaceMatchType, DoNotCareSentinel, OneOf[BaseParenthesizableWhitespaceMatchType], AllOf[BaseParenthesizableWhitespaceMatchType], ] = DoNotCare() whitespace_after_equal: Union[ BaseParenthesizableWhitespaceMatchType, DoNotCareSentinel, OneOf[BaseParenthesizableWhitespaceMatchType], AllOf[BaseParenthesizableWhitespaceMatchType], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() MatchSequenceElementOrMatchStarMatchType = Union[ "MatchSequenceElement", "MatchStar", MetadataMatchType, MatchIfTrue[Union[cst.MatchSequenceElement, cst.MatchStar]], ] @dataclass(frozen=True, eq=False, unsafe_hash=False) class MatchList(BaseMatcherNode): patterns: Union[ Sequence[ Union[ MatchSequenceElementOrMatchStarMatchType, DoNotCareSentinel, OneOf[MatchSequenceElementOrMatchStarMatchType], AllOf[MatchSequenceElementOrMatchStarMatchType], AtLeastN[ Union[ MatchSequenceElementOrMatchStarMatchType, DoNotCareSentinel, OneOf[MatchSequenceElementOrMatchStarMatchType], AllOf[MatchSequenceElementOrMatchStarMatchType], ] ], AtMostN[ Union[ MatchSequenceElementOrMatchStarMatchType, DoNotCareSentinel, OneOf[MatchSequenceElementOrMatchStarMatchType], AllOf[MatchSequenceElementOrMatchStarMatchType], ] ], ] ], DoNotCareSentinel, MatchIfTrue[ Sequence[ Union[ cst.MatchSequenceElement, cst.MatchStar, OneOf[Union[cst.MatchSequenceElement, cst.MatchStar]], AllOf[Union[cst.MatchSequenceElement, cst.MatchStar]], ] ] ], OneOf[ Union[ Sequence[ Union[ MatchSequenceElementOrMatchStarMatchType, OneOf[MatchSequenceElementOrMatchStarMatchType], AllOf[MatchSequenceElementOrMatchStarMatchType], AtLeastN[ Union[ MatchSequenceElementOrMatchStarMatchType, OneOf[MatchSequenceElementOrMatchStarMatchType], AllOf[MatchSequenceElementOrMatchStarMatchType], ] ], AtMostN[ Union[ MatchSequenceElementOrMatchStarMatchType, OneOf[MatchSequenceElementOrMatchStarMatchType], AllOf[MatchSequenceElementOrMatchStarMatchType], ] ], ] ], MatchIfTrue[ Sequence[ Union[ cst.MatchSequenceElement, cst.MatchStar, OneOf[Union[cst.MatchSequenceElement, cst.MatchStar]], AllOf[Union[cst.MatchSequenceElement, cst.MatchStar]], ] ] ], ] ], AllOf[ Union[ Sequence[ Union[ MatchSequenceElementOrMatchStarMatchType, OneOf[MatchSequenceElementOrMatchStarMatchType], AllOf[MatchSequenceElementOrMatchStarMatchType], AtLeastN[ Union[ MatchSequenceElementOrMatchStarMatchType, OneOf[MatchSequenceElementOrMatchStarMatchType], AllOf[MatchSequenceElementOrMatchStarMatchType], ] ], AtMostN[ Union[ MatchSequenceElementOrMatchStarMatchType, OneOf[MatchSequenceElementOrMatchStarMatchType], AllOf[MatchSequenceElementOrMatchStarMatchType], ] ], ] ], MatchIfTrue[ Sequence[ Union[ cst.MatchSequenceElement, cst.MatchStar, OneOf[Union[cst.MatchSequenceElement, cst.MatchStar]], AllOf[Union[cst.MatchSequenceElement, cst.MatchStar]], ] ] ], ] ], ] = DoNotCare() lbracket: Union[ Optional["LeftSquareBracket"], MetadataMatchType, MatchIfTrue[Optional[cst.LeftSquareBracket]], DoNotCareSentinel, OneOf[ Union[ Optional["LeftSquareBracket"], MetadataMatchType, MatchIfTrue[Optional[cst.LeftSquareBracket]], ] ], AllOf[ Union[ Optional["LeftSquareBracket"], MetadataMatchType, MatchIfTrue[Optional[cst.LeftSquareBracket]], ] ], ] = DoNotCare() rbracket: Union[ Optional["RightSquareBracket"], MetadataMatchType, MatchIfTrue[Optional[cst.RightSquareBracket]], DoNotCareSentinel, OneOf[ Union[ Optional["RightSquareBracket"], MetadataMatchType, MatchIfTrue[Optional[cst.RightSquareBracket]], ] ], AllOf[ Union[ Optional["RightSquareBracket"], MetadataMatchType, MatchIfTrue[Optional[cst.RightSquareBracket]], ] ], ] = DoNotCare() lpar: Union[ Sequence[ Union[ LeftParenMatchType, DoNotCareSentinel, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], AtLeastN[ Union[ LeftParenMatchType, DoNotCareSentinel, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], AtMostN[ Union[ LeftParenMatchType, DoNotCareSentinel, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], ] ], DoNotCareSentinel, MatchIfTrue[Sequence[cst.LeftParen]], OneOf[ Union[ Sequence[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], AtLeastN[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], AtMostN[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.LeftParen]], ] ], AllOf[ Union[ Sequence[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], AtLeastN[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], AtMostN[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.LeftParen]], ] ], ] = DoNotCare() rpar: Union[ Sequence[ Union[ RightParenMatchType, DoNotCareSentinel, OneOf[RightParenMatchType], AllOf[RightParenMatchType], AtLeastN[ Union[ RightParenMatchType, DoNotCareSentinel, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], AtMostN[ Union[ RightParenMatchType, DoNotCareSentinel, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], ] ], DoNotCareSentinel, MatchIfTrue[Sequence[cst.RightParen]], OneOf[ Union[ Sequence[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], AtLeastN[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], AtMostN[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.RightParen]], ] ], AllOf[ Union[ Sequence[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], AtLeastN[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], AtMostN[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.RightParen]], ] ], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() MatchMappingElementMatchType = Union[ "MatchMappingElement", MetadataMatchType, MatchIfTrue[cst.MatchMappingElement] ] @dataclass(frozen=True, eq=False, unsafe_hash=False) class MatchMapping(BaseMatcherNode): elements: Union[ Sequence[ Union[ MatchMappingElementMatchType, DoNotCareSentinel, OneOf[MatchMappingElementMatchType], AllOf[MatchMappingElementMatchType], AtLeastN[ Union[ MatchMappingElementMatchType, DoNotCareSentinel, OneOf[MatchMappingElementMatchType], AllOf[MatchMappingElementMatchType], ] ], AtMostN[ Union[ MatchMappingElementMatchType, DoNotCareSentinel, OneOf[MatchMappingElementMatchType], AllOf[MatchMappingElementMatchType], ] ], ] ], DoNotCareSentinel, MatchIfTrue[Sequence[cst.MatchMappingElement]], OneOf[ Union[ Sequence[ Union[ MatchMappingElementMatchType, OneOf[MatchMappingElementMatchType], AllOf[MatchMappingElementMatchType], AtLeastN[ Union[ MatchMappingElementMatchType, OneOf[MatchMappingElementMatchType], AllOf[MatchMappingElementMatchType], ] ], AtMostN[ Union[ MatchMappingElementMatchType, OneOf[MatchMappingElementMatchType], AllOf[MatchMappingElementMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.MatchMappingElement]], ] ], AllOf[ Union[ Sequence[ Union[ MatchMappingElementMatchType, OneOf[MatchMappingElementMatchType], AllOf[MatchMappingElementMatchType], AtLeastN[ Union[ MatchMappingElementMatchType, OneOf[MatchMappingElementMatchType], AllOf[MatchMappingElementMatchType], ] ], AtMostN[ Union[ MatchMappingElementMatchType, OneOf[MatchMappingElementMatchType], AllOf[MatchMappingElementMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.MatchMappingElement]], ] ], ] = DoNotCare() lbrace: Union[ LeftCurlyBraceMatchType, DoNotCareSentinel, OneOf[LeftCurlyBraceMatchType], AllOf[LeftCurlyBraceMatchType], ] = DoNotCare() rbrace: Union[ RightCurlyBraceMatchType, DoNotCareSentinel, OneOf[RightCurlyBraceMatchType], AllOf[RightCurlyBraceMatchType], ] = DoNotCare() rest: Union[ Optional["Name"], MetadataMatchType, MatchIfTrue[Optional[cst.Name]], DoNotCareSentinel, OneOf[ Union[Optional["Name"], MetadataMatchType, MatchIfTrue[Optional[cst.Name]]] ], AllOf[ Union[Optional["Name"], MetadataMatchType, MatchIfTrue[Optional[cst.Name]]] ], ] = DoNotCare() whitespace_before_rest: Union[ SimpleWhitespaceMatchType, DoNotCareSentinel, OneOf[SimpleWhitespaceMatchType], AllOf[SimpleWhitespaceMatchType], ] = DoNotCare() trailing_comma: Union[ Optional["Comma"], MetadataMatchType, MatchIfTrue[Optional[cst.Comma]], DoNotCareSentinel, OneOf[ Union[ Optional["Comma"], MetadataMatchType, MatchIfTrue[Optional[cst.Comma]] ] ], AllOf[ Union[ Optional["Comma"], MetadataMatchType, MatchIfTrue[Optional[cst.Comma]] ] ], ] = DoNotCare() lpar: Union[ Sequence[ Union[ LeftParenMatchType, DoNotCareSentinel, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], AtLeastN[ Union[ LeftParenMatchType, DoNotCareSentinel, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], AtMostN[ Union[ LeftParenMatchType, DoNotCareSentinel, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], ] ], DoNotCareSentinel, MatchIfTrue[Sequence[cst.LeftParen]], OneOf[ Union[ Sequence[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], AtLeastN[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], AtMostN[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.LeftParen]], ] ], AllOf[ Union[ Sequence[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], AtLeastN[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], AtMostN[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.LeftParen]], ] ], ] = DoNotCare() rpar: Union[ Sequence[ Union[ RightParenMatchType, DoNotCareSentinel, OneOf[RightParenMatchType], AllOf[RightParenMatchType], AtLeastN[ Union[ RightParenMatchType, DoNotCareSentinel, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], AtMostN[ Union[ RightParenMatchType, DoNotCareSentinel, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], ] ], DoNotCareSentinel, MatchIfTrue[Sequence[cst.RightParen]], OneOf[ Union[ Sequence[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], AtLeastN[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], AtMostN[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.RightParen]], ] ], AllOf[ Union[ Sequence[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], AtLeastN[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], AtMostN[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.RightParen]], ] ], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() @dataclass(frozen=True, eq=False, unsafe_hash=False) class MatchMappingElement(BaseMatcherNode): key: Union[ BaseExpressionMatchType, DoNotCareSentinel, OneOf[BaseExpressionMatchType], AllOf[BaseExpressionMatchType], ] = DoNotCare() pattern: Union[ MatchPatternMatchType, DoNotCareSentinel, OneOf[MatchPatternMatchType], AllOf[MatchPatternMatchType], ] = DoNotCare() comma: Union[ CommaMatchType, DoNotCareSentinel, OneOf[CommaMatchType], AllOf[CommaMatchType] ] = DoNotCare() whitespace_before_colon: Union[ BaseParenthesizableWhitespaceMatchType, DoNotCareSentinel, OneOf[BaseParenthesizableWhitespaceMatchType], AllOf[BaseParenthesizableWhitespaceMatchType], ] = DoNotCare() whitespace_after_colon: Union[ BaseParenthesizableWhitespaceMatchType, DoNotCareSentinel, OneOf[BaseParenthesizableWhitespaceMatchType], AllOf[BaseParenthesizableWhitespaceMatchType], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() MatchOrElementMatchType = Union[ "MatchOrElement", MetadataMatchType, MatchIfTrue[cst.MatchOrElement] ] @dataclass(frozen=True, eq=False, unsafe_hash=False) class MatchOr(BaseMatcherNode): patterns: Union[ Sequence[ Union[ MatchOrElementMatchType, DoNotCareSentinel, OneOf[MatchOrElementMatchType], AllOf[MatchOrElementMatchType], AtLeastN[ Union[ MatchOrElementMatchType, DoNotCareSentinel, OneOf[MatchOrElementMatchType], AllOf[MatchOrElementMatchType], ] ], AtMostN[ Union[ MatchOrElementMatchType, DoNotCareSentinel, OneOf[MatchOrElementMatchType], AllOf[MatchOrElementMatchType], ] ], ] ], DoNotCareSentinel, MatchIfTrue[Sequence[cst.MatchOrElement]], OneOf[ Union[ Sequence[ Union[ MatchOrElementMatchType, OneOf[MatchOrElementMatchType], AllOf[MatchOrElementMatchType], AtLeastN[ Union[ MatchOrElementMatchType, OneOf[MatchOrElementMatchType], AllOf[MatchOrElementMatchType], ] ], AtMostN[ Union[ MatchOrElementMatchType, OneOf[MatchOrElementMatchType], AllOf[MatchOrElementMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.MatchOrElement]], ] ], AllOf[ Union[ Sequence[ Union[ MatchOrElementMatchType, OneOf[MatchOrElementMatchType], AllOf[MatchOrElementMatchType], AtLeastN[ Union[ MatchOrElementMatchType, OneOf[MatchOrElementMatchType], AllOf[MatchOrElementMatchType], ] ], AtMostN[ Union[ MatchOrElementMatchType, OneOf[MatchOrElementMatchType], AllOf[MatchOrElementMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.MatchOrElement]], ] ], ] = DoNotCare() lpar: Union[ Sequence[ Union[ LeftParenMatchType, DoNotCareSentinel, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], AtLeastN[ Union[ LeftParenMatchType, DoNotCareSentinel, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], AtMostN[ Union[ LeftParenMatchType, DoNotCareSentinel, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], ] ], DoNotCareSentinel, MatchIfTrue[Sequence[cst.LeftParen]], OneOf[ Union[ Sequence[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], AtLeastN[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], AtMostN[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.LeftParen]], ] ], AllOf[ Union[ Sequence[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], AtLeastN[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], AtMostN[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.LeftParen]], ] ], ] = DoNotCare() rpar: Union[ Sequence[ Union[ RightParenMatchType, DoNotCareSentinel, OneOf[RightParenMatchType], AllOf[RightParenMatchType], AtLeastN[ Union[ RightParenMatchType, DoNotCareSentinel, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], AtMostN[ Union[ RightParenMatchType, DoNotCareSentinel, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], ] ], DoNotCareSentinel, MatchIfTrue[Sequence[cst.RightParen]], OneOf[ Union[ Sequence[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], AtLeastN[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], AtMostN[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.RightParen]], ] ], AllOf[ Union[ Sequence[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], AtLeastN[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], AtMostN[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.RightParen]], ] ], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() BitOrMatchType = Union["BitOr", MetadataMatchType, MatchIfTrue[cst.BitOr]] @dataclass(frozen=True, eq=False, unsafe_hash=False) class MatchOrElement(BaseMatcherNode): pattern: Union[ MatchPatternMatchType, DoNotCareSentinel, OneOf[MatchPatternMatchType], AllOf[MatchPatternMatchType], ] = DoNotCare() separator: Union[ BitOrMatchType, DoNotCareSentinel, OneOf[BitOrMatchType], AllOf[BitOrMatchType] ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() @dataclass(frozen=True, eq=False, unsafe_hash=False) class MatchPattern(BaseMatcherNode): metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() @dataclass(frozen=True, eq=False, unsafe_hash=False) class MatchSequence(BaseMatcherNode): metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() @dataclass(frozen=True, eq=False, unsafe_hash=False) class MatchSequenceElement(BaseMatcherNode): value: Union[ MatchPatternMatchType, DoNotCareSentinel, OneOf[MatchPatternMatchType], AllOf[MatchPatternMatchType], ] = DoNotCare() comma: Union[ CommaMatchType, DoNotCareSentinel, OneOf[CommaMatchType], AllOf[CommaMatchType] ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() @dataclass(frozen=True, eq=False, unsafe_hash=False) class MatchSingleton(BaseMatcherNode): value: Union[ NameMatchType, DoNotCareSentinel, OneOf[NameMatchType], AllOf[NameMatchType] ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() @dataclass(frozen=True, eq=False, unsafe_hash=False) class MatchStar(BaseMatcherNode): name: Union[ Optional["Name"], MetadataMatchType, MatchIfTrue[Optional[cst.Name]], DoNotCareSentinel, OneOf[ Union[Optional["Name"], MetadataMatchType, MatchIfTrue[Optional[cst.Name]]] ], AllOf[ Union[Optional["Name"], MetadataMatchType, MatchIfTrue[Optional[cst.Name]]] ], ] = DoNotCare() comma: Union[ CommaMatchType, DoNotCareSentinel, OneOf[CommaMatchType], AllOf[CommaMatchType] ] = DoNotCare() whitespace_before_name: Union[ BaseParenthesizableWhitespaceMatchType, DoNotCareSentinel, OneOf[BaseParenthesizableWhitespaceMatchType], AllOf[BaseParenthesizableWhitespaceMatchType], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() @dataclass(frozen=True, eq=False, unsafe_hash=False) class MatchTuple(BaseMatcherNode): patterns: Union[ Sequence[ Union[ MatchSequenceElementOrMatchStarMatchType, DoNotCareSentinel, OneOf[MatchSequenceElementOrMatchStarMatchType], AllOf[MatchSequenceElementOrMatchStarMatchType], AtLeastN[ Union[ MatchSequenceElementOrMatchStarMatchType, DoNotCareSentinel, OneOf[MatchSequenceElementOrMatchStarMatchType], AllOf[MatchSequenceElementOrMatchStarMatchType], ] ], AtMostN[ Union[ MatchSequenceElementOrMatchStarMatchType, DoNotCareSentinel, OneOf[MatchSequenceElementOrMatchStarMatchType], AllOf[MatchSequenceElementOrMatchStarMatchType], ] ], ] ], DoNotCareSentinel, MatchIfTrue[ Sequence[ Union[ cst.MatchSequenceElement, cst.MatchStar, OneOf[Union[cst.MatchSequenceElement, cst.MatchStar]], AllOf[Union[cst.MatchSequenceElement, cst.MatchStar]], ] ] ], OneOf[ Union[ Sequence[ Union[ MatchSequenceElementOrMatchStarMatchType, OneOf[MatchSequenceElementOrMatchStarMatchType], AllOf[MatchSequenceElementOrMatchStarMatchType], AtLeastN[ Union[ MatchSequenceElementOrMatchStarMatchType, OneOf[MatchSequenceElementOrMatchStarMatchType], AllOf[MatchSequenceElementOrMatchStarMatchType], ] ], AtMostN[ Union[ MatchSequenceElementOrMatchStarMatchType, OneOf[MatchSequenceElementOrMatchStarMatchType], AllOf[MatchSequenceElementOrMatchStarMatchType], ] ], ] ], MatchIfTrue[ Sequence[ Union[ cst.MatchSequenceElement, cst.MatchStar, OneOf[Union[cst.MatchSequenceElement, cst.MatchStar]], AllOf[Union[cst.MatchSequenceElement, cst.MatchStar]], ] ] ], ] ], AllOf[ Union[ Sequence[ Union[ MatchSequenceElementOrMatchStarMatchType, OneOf[MatchSequenceElementOrMatchStarMatchType], AllOf[MatchSequenceElementOrMatchStarMatchType], AtLeastN[ Union[ MatchSequenceElementOrMatchStarMatchType, OneOf[MatchSequenceElementOrMatchStarMatchType], AllOf[MatchSequenceElementOrMatchStarMatchType], ] ], AtMostN[ Union[ MatchSequenceElementOrMatchStarMatchType, OneOf[MatchSequenceElementOrMatchStarMatchType], AllOf[MatchSequenceElementOrMatchStarMatchType], ] ], ] ], MatchIfTrue[ Sequence[ Union[ cst.MatchSequenceElement, cst.MatchStar, OneOf[Union[cst.MatchSequenceElement, cst.MatchStar]], AllOf[Union[cst.MatchSequenceElement, cst.MatchStar]], ] ] ], ] ], ] = DoNotCare() lpar: Union[ Sequence[ Union[ LeftParenMatchType, DoNotCareSentinel, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], AtLeastN[ Union[ LeftParenMatchType, DoNotCareSentinel, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], AtMostN[ Union[ LeftParenMatchType, DoNotCareSentinel, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], ] ], DoNotCareSentinel, MatchIfTrue[Sequence[cst.LeftParen]], OneOf[ Union[ Sequence[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], AtLeastN[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], AtMostN[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.LeftParen]], ] ], AllOf[ Union[ Sequence[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], AtLeastN[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], AtMostN[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.LeftParen]], ] ], ] = DoNotCare() rpar: Union[ Sequence[ Union[ RightParenMatchType, DoNotCareSentinel, OneOf[RightParenMatchType], AllOf[RightParenMatchType], AtLeastN[ Union[ RightParenMatchType, DoNotCareSentinel, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], AtMostN[ Union[ RightParenMatchType, DoNotCareSentinel, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], ] ], DoNotCareSentinel, MatchIfTrue[Sequence[cst.RightParen]], OneOf[ Union[ Sequence[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], AtLeastN[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], AtMostN[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.RightParen]], ] ], AllOf[ Union[ Sequence[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], AtLeastN[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], AtMostN[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.RightParen]], ] ], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() @dataclass(frozen=True, eq=False, unsafe_hash=False) class MatchValue(BaseMatcherNode): value: Union[ BaseExpressionMatchType, DoNotCareSentinel, OneOf[BaseExpressionMatchType], AllOf[BaseExpressionMatchType], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() @dataclass(frozen=True, eq=False, unsafe_hash=False) class MatrixMultiply(BaseBinaryOp, BaseMatcherNode): whitespace_before: Union[ BaseParenthesizableWhitespaceMatchType, DoNotCareSentinel, OneOf[BaseParenthesizableWhitespaceMatchType], AllOf[BaseParenthesizableWhitespaceMatchType], ] = DoNotCare() whitespace_after: Union[ BaseParenthesizableWhitespaceMatchType, DoNotCareSentinel, OneOf[BaseParenthesizableWhitespaceMatchType], AllOf[BaseParenthesizableWhitespaceMatchType], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() @dataclass(frozen=True, eq=False, unsafe_hash=False) class MatrixMultiplyAssign(BaseAugOp, BaseMatcherNode): whitespace_before: Union[ BaseParenthesizableWhitespaceMatchType, DoNotCareSentinel, OneOf[BaseParenthesizableWhitespaceMatchType], AllOf[BaseParenthesizableWhitespaceMatchType], ] = DoNotCare() whitespace_after: Union[ BaseParenthesizableWhitespaceMatchType, DoNotCareSentinel, OneOf[BaseParenthesizableWhitespaceMatchType], AllOf[BaseParenthesizableWhitespaceMatchType], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() @dataclass(frozen=True, eq=False, unsafe_hash=False) class Minus(BaseUnaryOp, BaseMatcherNode): whitespace_after: Union[ BaseParenthesizableWhitespaceMatchType, DoNotCareSentinel, OneOf[BaseParenthesizableWhitespaceMatchType], AllOf[BaseParenthesizableWhitespaceMatchType], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() SimpleStatementLineOrBaseCompoundStatementMatchType = Union[ "SimpleStatementLine", "BaseCompoundStatement", MetadataMatchType, MatchIfTrue[Union[cst.SimpleStatementLine, cst.BaseCompoundStatement]], ] @dataclass(frozen=True, eq=False, unsafe_hash=False) class Module(BaseMatcherNode): body: Union[ Sequence[ Union[ SimpleStatementLineOrBaseCompoundStatementMatchType, DoNotCareSentinel, OneOf[SimpleStatementLineOrBaseCompoundStatementMatchType], AllOf[SimpleStatementLineOrBaseCompoundStatementMatchType], AtLeastN[ Union[ SimpleStatementLineOrBaseCompoundStatementMatchType, DoNotCareSentinel, OneOf[SimpleStatementLineOrBaseCompoundStatementMatchType], AllOf[SimpleStatementLineOrBaseCompoundStatementMatchType], ] ], AtMostN[ Union[ SimpleStatementLineOrBaseCompoundStatementMatchType, DoNotCareSentinel, OneOf[SimpleStatementLineOrBaseCompoundStatementMatchType], AllOf[SimpleStatementLineOrBaseCompoundStatementMatchType], ] ], ] ], DoNotCareSentinel, MatchIfTrue[ Sequence[ Union[ cst.SimpleStatementLine, cst.BaseCompoundStatement, OneOf[Union[cst.SimpleStatementLine, cst.BaseCompoundStatement]], AllOf[Union[cst.SimpleStatementLine, cst.BaseCompoundStatement]], ] ] ], OneOf[ Union[ Sequence[ Union[ SimpleStatementLineOrBaseCompoundStatementMatchType, OneOf[SimpleStatementLineOrBaseCompoundStatementMatchType], AllOf[SimpleStatementLineOrBaseCompoundStatementMatchType], AtLeastN[ Union[ SimpleStatementLineOrBaseCompoundStatementMatchType, OneOf[ SimpleStatementLineOrBaseCompoundStatementMatchType ], AllOf[ SimpleStatementLineOrBaseCompoundStatementMatchType ], ] ], AtMostN[ Union[ SimpleStatementLineOrBaseCompoundStatementMatchType, OneOf[ SimpleStatementLineOrBaseCompoundStatementMatchType ], AllOf[ SimpleStatementLineOrBaseCompoundStatementMatchType ], ] ], ] ], MatchIfTrue[ Sequence[ Union[ cst.SimpleStatementLine, cst.BaseCompoundStatement, OneOf[ Union[ cst.SimpleStatementLine, cst.BaseCompoundStatement ] ], AllOf[ Union[ cst.SimpleStatementLine, cst.BaseCompoundStatement ] ], ] ] ], ] ], AllOf[ Union[ Sequence[ Union[ SimpleStatementLineOrBaseCompoundStatementMatchType, OneOf[SimpleStatementLineOrBaseCompoundStatementMatchType], AllOf[SimpleStatementLineOrBaseCompoundStatementMatchType], AtLeastN[ Union[ SimpleStatementLineOrBaseCompoundStatementMatchType, OneOf[ SimpleStatementLineOrBaseCompoundStatementMatchType ], AllOf[ SimpleStatementLineOrBaseCompoundStatementMatchType ], ] ], AtMostN[ Union[ SimpleStatementLineOrBaseCompoundStatementMatchType, OneOf[ SimpleStatementLineOrBaseCompoundStatementMatchType ], AllOf[ SimpleStatementLineOrBaseCompoundStatementMatchType ], ] ], ] ], MatchIfTrue[ Sequence[ Union[ cst.SimpleStatementLine, cst.BaseCompoundStatement, OneOf[ Union[ cst.SimpleStatementLine, cst.BaseCompoundStatement ] ], AllOf[ Union[ cst.SimpleStatementLine, cst.BaseCompoundStatement ] ], ] ] ], ] ], ] = DoNotCare() header: Union[ Sequence[ Union[ EmptyLineMatchType, DoNotCareSentinel, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], AtLeastN[ Union[ EmptyLineMatchType, DoNotCareSentinel, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], ] ], AtMostN[ Union[ EmptyLineMatchType, DoNotCareSentinel, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], ] ], ] ], DoNotCareSentinel, MatchIfTrue[Sequence[cst.EmptyLine]], OneOf[ Union[ Sequence[ Union[ EmptyLineMatchType, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], AtLeastN[ Union[ EmptyLineMatchType, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], ] ], AtMostN[ Union[ EmptyLineMatchType, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.EmptyLine]], ] ], AllOf[ Union[ Sequence[ Union[ EmptyLineMatchType, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], AtLeastN[ Union[ EmptyLineMatchType, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], ] ], AtMostN[ Union[ EmptyLineMatchType, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.EmptyLine]], ] ], ] = DoNotCare() footer: Union[ Sequence[ Union[ EmptyLineMatchType, DoNotCareSentinel, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], AtLeastN[ Union[ EmptyLineMatchType, DoNotCareSentinel, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], ] ], AtMostN[ Union[ EmptyLineMatchType, DoNotCareSentinel, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], ] ], ] ], DoNotCareSentinel, MatchIfTrue[Sequence[cst.EmptyLine]], OneOf[ Union[ Sequence[ Union[ EmptyLineMatchType, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], AtLeastN[ Union[ EmptyLineMatchType, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], ] ], AtMostN[ Union[ EmptyLineMatchType, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.EmptyLine]], ] ], AllOf[ Union[ Sequence[ Union[ EmptyLineMatchType, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], AtLeastN[ Union[ EmptyLineMatchType, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], ] ], AtMostN[ Union[ EmptyLineMatchType, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.EmptyLine]], ] ], ] = DoNotCare() encoding: Union[ strMatchType, DoNotCareSentinel, OneOf[strMatchType], AllOf[strMatchType] ] = DoNotCare() default_indent: Union[ strMatchType, DoNotCareSentinel, OneOf[strMatchType], AllOf[strMatchType] ] = DoNotCare() default_newline: Union[ strMatchType, DoNotCareSentinel, OneOf[strMatchType], AllOf[strMatchType] ] = DoNotCare() has_trailing_newline: Union[ boolMatchType, DoNotCareSentinel, OneOf[boolMatchType], AllOf[boolMatchType] ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() @dataclass(frozen=True, eq=False, unsafe_hash=False) class Modulo(BaseBinaryOp, BaseMatcherNode): whitespace_before: Union[ BaseParenthesizableWhitespaceMatchType, DoNotCareSentinel, OneOf[BaseParenthesizableWhitespaceMatchType], AllOf[BaseParenthesizableWhitespaceMatchType], ] = DoNotCare() whitespace_after: Union[ BaseParenthesizableWhitespaceMatchType, DoNotCareSentinel, OneOf[BaseParenthesizableWhitespaceMatchType], AllOf[BaseParenthesizableWhitespaceMatchType], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() @dataclass(frozen=True, eq=False, unsafe_hash=False) class ModuloAssign(BaseAugOp, BaseMatcherNode): whitespace_before: Union[ BaseParenthesizableWhitespaceMatchType, DoNotCareSentinel, OneOf[BaseParenthesizableWhitespaceMatchType], AllOf[BaseParenthesizableWhitespaceMatchType], ] = DoNotCare() whitespace_after: Union[ BaseParenthesizableWhitespaceMatchType, DoNotCareSentinel, OneOf[BaseParenthesizableWhitespaceMatchType], AllOf[BaseParenthesizableWhitespaceMatchType], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() @dataclass(frozen=True, eq=False, unsafe_hash=False) class Multiply(BaseBinaryOp, BaseMatcherNode): whitespace_before: Union[ BaseParenthesizableWhitespaceMatchType, DoNotCareSentinel, OneOf[BaseParenthesizableWhitespaceMatchType], AllOf[BaseParenthesizableWhitespaceMatchType], ] = DoNotCare() whitespace_after: Union[ BaseParenthesizableWhitespaceMatchType, DoNotCareSentinel, OneOf[BaseParenthesizableWhitespaceMatchType], AllOf[BaseParenthesizableWhitespaceMatchType], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() @dataclass(frozen=True, eq=False, unsafe_hash=False) class MultiplyAssign(BaseAugOp, BaseMatcherNode): whitespace_before: Union[ BaseParenthesizableWhitespaceMatchType, DoNotCareSentinel, OneOf[BaseParenthesizableWhitespaceMatchType], AllOf[BaseParenthesizableWhitespaceMatchType], ] = DoNotCare() whitespace_after: Union[ BaseParenthesizableWhitespaceMatchType, DoNotCareSentinel, OneOf[BaseParenthesizableWhitespaceMatchType], AllOf[BaseParenthesizableWhitespaceMatchType], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() @dataclass(frozen=True, eq=False, unsafe_hash=False) class Name( BaseAssignTargetExpression, BaseDelTargetExpression, BaseExpression, BaseMatcherNode ): value: Union[ strMatchType, DoNotCareSentinel, OneOf[strMatchType], AllOf[strMatchType] ] = DoNotCare() lpar: Union[ Sequence[ Union[ LeftParenMatchType, DoNotCareSentinel, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], AtLeastN[ Union[ LeftParenMatchType, DoNotCareSentinel, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], AtMostN[ Union[ LeftParenMatchType, DoNotCareSentinel, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], ] ], DoNotCareSentinel, MatchIfTrue[Sequence[cst.LeftParen]], OneOf[ Union[ Sequence[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], AtLeastN[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], AtMostN[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.LeftParen]], ] ], AllOf[ Union[ Sequence[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], AtLeastN[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], AtMostN[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.LeftParen]], ] ], ] = DoNotCare() rpar: Union[ Sequence[ Union[ RightParenMatchType, DoNotCareSentinel, OneOf[RightParenMatchType], AllOf[RightParenMatchType], AtLeastN[ Union[ RightParenMatchType, DoNotCareSentinel, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], AtMostN[ Union[ RightParenMatchType, DoNotCareSentinel, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], ] ], DoNotCareSentinel, MatchIfTrue[Sequence[cst.RightParen]], OneOf[ Union[ Sequence[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], AtLeastN[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], AtMostN[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.RightParen]], ] ], AllOf[ Union[ Sequence[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], AtLeastN[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], AtMostN[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.RightParen]], ] ], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() @dataclass(frozen=True, eq=False, unsafe_hash=False) class NameItem(BaseMatcherNode): name: Union[ NameMatchType, DoNotCareSentinel, OneOf[NameMatchType], AllOf[NameMatchType] ] = DoNotCare() comma: Union[ CommaMatchType, DoNotCareSentinel, OneOf[CommaMatchType], AllOf[CommaMatchType] ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() @dataclass(frozen=True, eq=False, unsafe_hash=False) class NamedExpr(BaseExpression, BaseMatcherNode): target: Union[ BaseExpressionMatchType, DoNotCareSentinel, OneOf[BaseExpressionMatchType], AllOf[BaseExpressionMatchType], ] = DoNotCare() value: Union[ BaseExpressionMatchType, DoNotCareSentinel, OneOf[BaseExpressionMatchType], AllOf[BaseExpressionMatchType], ] = DoNotCare() lpar: Union[ Sequence[ Union[ LeftParenMatchType, DoNotCareSentinel, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], AtLeastN[ Union[ LeftParenMatchType, DoNotCareSentinel, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], AtMostN[ Union[ LeftParenMatchType, DoNotCareSentinel, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], ] ], DoNotCareSentinel, MatchIfTrue[Sequence[cst.LeftParen]], OneOf[ Union[ Sequence[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], AtLeastN[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], AtMostN[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.LeftParen]], ] ], AllOf[ Union[ Sequence[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], AtLeastN[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], AtMostN[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.LeftParen]], ] ], ] = DoNotCare() rpar: Union[ Sequence[ Union[ RightParenMatchType, DoNotCareSentinel, OneOf[RightParenMatchType], AllOf[RightParenMatchType], AtLeastN[ Union[ RightParenMatchType, DoNotCareSentinel, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], AtMostN[ Union[ RightParenMatchType, DoNotCareSentinel, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], ] ], DoNotCareSentinel, MatchIfTrue[Sequence[cst.RightParen]], OneOf[ Union[ Sequence[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], AtLeastN[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], AtMostN[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.RightParen]], ] ], AllOf[ Union[ Sequence[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], AtLeastN[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], AtMostN[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.RightParen]], ] ], ] = DoNotCare() whitespace_before_walrus: Union[ BaseParenthesizableWhitespaceMatchType, DoNotCareSentinel, OneOf[BaseParenthesizableWhitespaceMatchType], AllOf[BaseParenthesizableWhitespaceMatchType], ] = DoNotCare() whitespace_after_walrus: Union[ BaseParenthesizableWhitespaceMatchType, DoNotCareSentinel, OneOf[BaseParenthesizableWhitespaceMatchType], AllOf[BaseParenthesizableWhitespaceMatchType], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() @dataclass(frozen=True, eq=False, unsafe_hash=False) class Newline(BaseMatcherNode): value: Union[ Optional[str], MetadataMatchType, MatchIfTrue[Optional[str]], DoNotCareSentinel, OneOf[Union[Optional[str], MetadataMatchType, MatchIfTrue[Optional[str]]]], AllOf[Union[Optional[str], MetadataMatchType, MatchIfTrue[Optional[str]]]], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() @dataclass(frozen=True, eq=False, unsafe_hash=False) class Nonlocal(BaseSmallStatement, BaseMatcherNode): names: Union[ Sequence[ Union[ NameItemMatchType, DoNotCareSentinel, OneOf[NameItemMatchType], AllOf[NameItemMatchType], AtLeastN[ Union[ NameItemMatchType, DoNotCareSentinel, OneOf[NameItemMatchType], AllOf[NameItemMatchType], ] ], AtMostN[ Union[ NameItemMatchType, DoNotCareSentinel, OneOf[NameItemMatchType], AllOf[NameItemMatchType], ] ], ] ], DoNotCareSentinel, MatchIfTrue[Sequence[cst.NameItem]], OneOf[ Union[ Sequence[ Union[ NameItemMatchType, OneOf[NameItemMatchType], AllOf[NameItemMatchType], AtLeastN[ Union[ NameItemMatchType, OneOf[NameItemMatchType], AllOf[NameItemMatchType], ] ], AtMostN[ Union[ NameItemMatchType, OneOf[NameItemMatchType], AllOf[NameItemMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.NameItem]], ] ], AllOf[ Union[ Sequence[ Union[ NameItemMatchType, OneOf[NameItemMatchType], AllOf[NameItemMatchType], AtLeastN[ Union[ NameItemMatchType, OneOf[NameItemMatchType], AllOf[NameItemMatchType], ] ], AtMostN[ Union[ NameItemMatchType, OneOf[NameItemMatchType], AllOf[NameItemMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.NameItem]], ] ], ] = DoNotCare() whitespace_after_nonlocal: Union[ SimpleWhitespaceMatchType, DoNotCareSentinel, OneOf[SimpleWhitespaceMatchType], AllOf[SimpleWhitespaceMatchType], ] = DoNotCare() semicolon: Union[ SemicolonMatchType, DoNotCareSentinel, OneOf[SemicolonMatchType], AllOf[SemicolonMatchType], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() @dataclass(frozen=True, eq=False, unsafe_hash=False) class Not(BaseUnaryOp, BaseMatcherNode): whitespace_after: Union[ BaseParenthesizableWhitespaceMatchType, DoNotCareSentinel, OneOf[BaseParenthesizableWhitespaceMatchType], AllOf[BaseParenthesizableWhitespaceMatchType], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() @dataclass(frozen=True, eq=False, unsafe_hash=False) class NotEqual(BaseCompOp, BaseMatcherNode): value: Union[ strMatchType, DoNotCareSentinel, OneOf[strMatchType], AllOf[strMatchType] ] = DoNotCare() whitespace_before: Union[ BaseParenthesizableWhitespaceMatchType, DoNotCareSentinel, OneOf[BaseParenthesizableWhitespaceMatchType], AllOf[BaseParenthesizableWhitespaceMatchType], ] = DoNotCare() whitespace_after: Union[ BaseParenthesizableWhitespaceMatchType, DoNotCareSentinel, OneOf[BaseParenthesizableWhitespaceMatchType], AllOf[BaseParenthesizableWhitespaceMatchType], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() @dataclass(frozen=True, eq=False, unsafe_hash=False) class NotIn(BaseCompOp, BaseMatcherNode): whitespace_before: Union[ BaseParenthesizableWhitespaceMatchType, DoNotCareSentinel, OneOf[BaseParenthesizableWhitespaceMatchType], AllOf[BaseParenthesizableWhitespaceMatchType], ] = DoNotCare() whitespace_between: Union[ BaseParenthesizableWhitespaceMatchType, DoNotCareSentinel, OneOf[BaseParenthesizableWhitespaceMatchType], AllOf[BaseParenthesizableWhitespaceMatchType], ] = DoNotCare() whitespace_after: Union[ BaseParenthesizableWhitespaceMatchType, DoNotCareSentinel, OneOf[BaseParenthesizableWhitespaceMatchType], AllOf[BaseParenthesizableWhitespaceMatchType], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() @dataclass(frozen=True, eq=False, unsafe_hash=False) class Or(BaseBooleanOp, BaseMatcherNode): whitespace_before: Union[ BaseParenthesizableWhitespaceMatchType, DoNotCareSentinel, OneOf[BaseParenthesizableWhitespaceMatchType], AllOf[BaseParenthesizableWhitespaceMatchType], ] = DoNotCare() whitespace_after: Union[ BaseParenthesizableWhitespaceMatchType, DoNotCareSentinel, OneOf[BaseParenthesizableWhitespaceMatchType], AllOf[BaseParenthesizableWhitespaceMatchType], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() @dataclass(frozen=True, eq=False, unsafe_hash=False) class Param(BaseMatcherNode): name: Union[ NameMatchType, DoNotCareSentinel, OneOf[NameMatchType], AllOf[NameMatchType] ] = DoNotCare() annotation: Union[ Optional["Annotation"], MetadataMatchType, MatchIfTrue[Optional[cst.Annotation]], DoNotCareSentinel, OneOf[ Union[ Optional["Annotation"], MetadataMatchType, MatchIfTrue[Optional[cst.Annotation]], ] ], AllOf[ Union[ Optional["Annotation"], MetadataMatchType, MatchIfTrue[Optional[cst.Annotation]], ] ], ] = DoNotCare() equal: Union[ AssignEqualMatchType, DoNotCareSentinel, OneOf[AssignEqualMatchType], AllOf[AssignEqualMatchType], ] = DoNotCare() default: Union[ Optional["BaseExpression"], MetadataMatchType, MatchIfTrue[Optional[cst.BaseExpression]], DoNotCareSentinel, OneOf[ Union[ Optional["BaseExpression"], MetadataMatchType, MatchIfTrue[Optional[cst.BaseExpression]], ] ], AllOf[ Union[ Optional["BaseExpression"], MetadataMatchType, MatchIfTrue[Optional[cst.BaseExpression]], ] ], ] = DoNotCare() comma: Union[ CommaMatchType, DoNotCareSentinel, OneOf[CommaMatchType], AllOf[CommaMatchType] ] = DoNotCare() star: Union[ strMatchType, DoNotCareSentinel, OneOf[strMatchType], AllOf[strMatchType] ] = DoNotCare() whitespace_after_star: Union[ BaseParenthesizableWhitespaceMatchType, DoNotCareSentinel, OneOf[BaseParenthesizableWhitespaceMatchType], AllOf[BaseParenthesizableWhitespaceMatchType], ] = DoNotCare() whitespace_after_param: Union[ BaseParenthesizableWhitespaceMatchType, DoNotCareSentinel, OneOf[BaseParenthesizableWhitespaceMatchType], AllOf[BaseParenthesizableWhitespaceMatchType], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() @dataclass(frozen=True, eq=False, unsafe_hash=False) class ParamSlash(BaseMatcherNode): comma: Union[ CommaMatchType, DoNotCareSentinel, OneOf[CommaMatchType], AllOf[CommaMatchType] ] = DoNotCare() whitespace_after: Union[ BaseParenthesizableWhitespaceMatchType, DoNotCareSentinel, OneOf[BaseParenthesizableWhitespaceMatchType], AllOf[BaseParenthesizableWhitespaceMatchType], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() @dataclass(frozen=True, eq=False, unsafe_hash=False) class ParamSpec(BaseMatcherNode): name: Union[ NameMatchType, DoNotCareSentinel, OneOf[NameMatchType], AllOf[NameMatchType] ] = DoNotCare() whitespace_after_star: Union[ SimpleWhitespaceMatchType, DoNotCareSentinel, OneOf[SimpleWhitespaceMatchType], AllOf[SimpleWhitespaceMatchType], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() @dataclass(frozen=True, eq=False, unsafe_hash=False) class ParamStar(BaseMatcherNode): comma: Union[ CommaMatchType, DoNotCareSentinel, OneOf[CommaMatchType], AllOf[CommaMatchType] ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() ParamMatchType = Union["Param", MetadataMatchType, MatchIfTrue[cst.Param]] ParamOrParamStarMatchType = Union[ "Param", "ParamStar", MetadataMatchType, MatchIfTrue[Union[cst.Param, cst.ParamStar]], ] ParamSlashMatchType = Union[ "ParamSlash", MetadataMatchType, MatchIfTrue[cst.ParamSlash] ] @dataclass(frozen=True, eq=False, unsafe_hash=False) class Parameters(BaseMatcherNode): params: Union[ Sequence[ Union[ ParamMatchType, DoNotCareSentinel, OneOf[ParamMatchType], AllOf[ParamMatchType], AtLeastN[ Union[ ParamMatchType, DoNotCareSentinel, OneOf[ParamMatchType], AllOf[ParamMatchType], ] ], AtMostN[ Union[ ParamMatchType, DoNotCareSentinel, OneOf[ParamMatchType], AllOf[ParamMatchType], ] ], ] ], DoNotCareSentinel, MatchIfTrue[Sequence[cst.Param]], OneOf[ Union[ Sequence[ Union[ ParamMatchType, OneOf[ParamMatchType], AllOf[ParamMatchType], AtLeastN[ Union[ ParamMatchType, OneOf[ParamMatchType], AllOf[ParamMatchType], ] ], AtMostN[ Union[ ParamMatchType, OneOf[ParamMatchType], AllOf[ParamMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.Param]], ] ], AllOf[ Union[ Sequence[ Union[ ParamMatchType, OneOf[ParamMatchType], AllOf[ParamMatchType], AtLeastN[ Union[ ParamMatchType, OneOf[ParamMatchType], AllOf[ParamMatchType], ] ], AtMostN[ Union[ ParamMatchType, OneOf[ParamMatchType], AllOf[ParamMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.Param]], ] ], ] = DoNotCare() star_arg: Union[ ParamOrParamStarMatchType, DoNotCareSentinel, OneOf[ParamOrParamStarMatchType], AllOf[ParamOrParamStarMatchType], ] = DoNotCare() kwonly_params: Union[ Sequence[ Union[ ParamMatchType, DoNotCareSentinel, OneOf[ParamMatchType], AllOf[ParamMatchType], AtLeastN[ Union[ ParamMatchType, DoNotCareSentinel, OneOf[ParamMatchType], AllOf[ParamMatchType], ] ], AtMostN[ Union[ ParamMatchType, DoNotCareSentinel, OneOf[ParamMatchType], AllOf[ParamMatchType], ] ], ] ], DoNotCareSentinel, MatchIfTrue[Sequence[cst.Param]], OneOf[ Union[ Sequence[ Union[ ParamMatchType, OneOf[ParamMatchType], AllOf[ParamMatchType], AtLeastN[ Union[ ParamMatchType, OneOf[ParamMatchType], AllOf[ParamMatchType], ] ], AtMostN[ Union[ ParamMatchType, OneOf[ParamMatchType], AllOf[ParamMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.Param]], ] ], AllOf[ Union[ Sequence[ Union[ ParamMatchType, OneOf[ParamMatchType], AllOf[ParamMatchType], AtLeastN[ Union[ ParamMatchType, OneOf[ParamMatchType], AllOf[ParamMatchType], ] ], AtMostN[ Union[ ParamMatchType, OneOf[ParamMatchType], AllOf[ParamMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.Param]], ] ], ] = DoNotCare() star_kwarg: Union[ Optional["Param"], MetadataMatchType, MatchIfTrue[Optional[cst.Param]], DoNotCareSentinel, OneOf[ Union[ Optional["Param"], MetadataMatchType, MatchIfTrue[Optional[cst.Param]] ] ], AllOf[ Union[ Optional["Param"], MetadataMatchType, MatchIfTrue[Optional[cst.Param]] ] ], ] = DoNotCare() posonly_params: Union[ Sequence[ Union[ ParamMatchType, DoNotCareSentinel, OneOf[ParamMatchType], AllOf[ParamMatchType], AtLeastN[ Union[ ParamMatchType, DoNotCareSentinel, OneOf[ParamMatchType], AllOf[ParamMatchType], ] ], AtMostN[ Union[ ParamMatchType, DoNotCareSentinel, OneOf[ParamMatchType], AllOf[ParamMatchType], ] ], ] ], DoNotCareSentinel, MatchIfTrue[Sequence[cst.Param]], OneOf[ Union[ Sequence[ Union[ ParamMatchType, OneOf[ParamMatchType], AllOf[ParamMatchType], AtLeastN[ Union[ ParamMatchType, OneOf[ParamMatchType], AllOf[ParamMatchType], ] ], AtMostN[ Union[ ParamMatchType, OneOf[ParamMatchType], AllOf[ParamMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.Param]], ] ], AllOf[ Union[ Sequence[ Union[ ParamMatchType, OneOf[ParamMatchType], AllOf[ParamMatchType], AtLeastN[ Union[ ParamMatchType, OneOf[ParamMatchType], AllOf[ParamMatchType], ] ], AtMostN[ Union[ ParamMatchType, OneOf[ParamMatchType], AllOf[ParamMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.Param]], ] ], ] = DoNotCare() posonly_ind: Union[ ParamSlashMatchType, DoNotCareSentinel, OneOf[ParamSlashMatchType], AllOf[ParamSlashMatchType], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() @dataclass(frozen=True, eq=False, unsafe_hash=False) class ParenthesizedWhitespace(BaseParenthesizableWhitespace, BaseMatcherNode): first_line: Union[ TrailingWhitespaceMatchType, DoNotCareSentinel, OneOf[TrailingWhitespaceMatchType], AllOf[TrailingWhitespaceMatchType], ] = DoNotCare() empty_lines: Union[ Sequence[ Union[ EmptyLineMatchType, DoNotCareSentinel, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], AtLeastN[ Union[ EmptyLineMatchType, DoNotCareSentinel, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], ] ], AtMostN[ Union[ EmptyLineMatchType, DoNotCareSentinel, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], ] ], ] ], DoNotCareSentinel, MatchIfTrue[Sequence[cst.EmptyLine]], OneOf[ Union[ Sequence[ Union[ EmptyLineMatchType, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], AtLeastN[ Union[ EmptyLineMatchType, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], ] ], AtMostN[ Union[ EmptyLineMatchType, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.EmptyLine]], ] ], AllOf[ Union[ Sequence[ Union[ EmptyLineMatchType, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], AtLeastN[ Union[ EmptyLineMatchType, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], ] ], AtMostN[ Union[ EmptyLineMatchType, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.EmptyLine]], ] ], ] = DoNotCare() indent: Union[ boolMatchType, DoNotCareSentinel, OneOf[boolMatchType], AllOf[boolMatchType] ] = DoNotCare() last_line: Union[ SimpleWhitespaceMatchType, DoNotCareSentinel, OneOf[SimpleWhitespaceMatchType], AllOf[SimpleWhitespaceMatchType], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() @dataclass(frozen=True, eq=False, unsafe_hash=False) class Pass(BaseSmallStatement, BaseMatcherNode): semicolon: Union[ SemicolonMatchType, DoNotCareSentinel, OneOf[SemicolonMatchType], AllOf[SemicolonMatchType], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() @dataclass(frozen=True, eq=False, unsafe_hash=False) class Plus(BaseUnaryOp, BaseMatcherNode): whitespace_after: Union[ BaseParenthesizableWhitespaceMatchType, DoNotCareSentinel, OneOf[BaseParenthesizableWhitespaceMatchType], AllOf[BaseParenthesizableWhitespaceMatchType], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() @dataclass(frozen=True, eq=False, unsafe_hash=False) class Power(BaseBinaryOp, BaseMatcherNode): whitespace_before: Union[ BaseParenthesizableWhitespaceMatchType, DoNotCareSentinel, OneOf[BaseParenthesizableWhitespaceMatchType], AllOf[BaseParenthesizableWhitespaceMatchType], ] = DoNotCare() whitespace_after: Union[ BaseParenthesizableWhitespaceMatchType, DoNotCareSentinel, OneOf[BaseParenthesizableWhitespaceMatchType], AllOf[BaseParenthesizableWhitespaceMatchType], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() @dataclass(frozen=True, eq=False, unsafe_hash=False) class PowerAssign(BaseAugOp, BaseMatcherNode): whitespace_before: Union[ BaseParenthesizableWhitespaceMatchType, DoNotCareSentinel, OneOf[BaseParenthesizableWhitespaceMatchType], AllOf[BaseParenthesizableWhitespaceMatchType], ] = DoNotCare() whitespace_after: Union[ BaseParenthesizableWhitespaceMatchType, DoNotCareSentinel, OneOf[BaseParenthesizableWhitespaceMatchType], AllOf[BaseParenthesizableWhitespaceMatchType], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() @dataclass(frozen=True, eq=False, unsafe_hash=False) class Raise(BaseSmallStatement, BaseMatcherNode): exc: Union[ Optional["BaseExpression"], MetadataMatchType, MatchIfTrue[Optional[cst.BaseExpression]], DoNotCareSentinel, OneOf[ Union[ Optional["BaseExpression"], MetadataMatchType, MatchIfTrue[Optional[cst.BaseExpression]], ] ], AllOf[ Union[ Optional["BaseExpression"], MetadataMatchType, MatchIfTrue[Optional[cst.BaseExpression]], ] ], ] = DoNotCare() cause: Union[ Optional["From"], MetadataMatchType, MatchIfTrue[Optional[cst.From]], DoNotCareSentinel, OneOf[ Union[Optional["From"], MetadataMatchType, MatchIfTrue[Optional[cst.From]]] ], AllOf[ Union[Optional["From"], MetadataMatchType, MatchIfTrue[Optional[cst.From]]] ], ] = DoNotCare() whitespace_after_raise: Union[ SimpleWhitespaceMatchType, DoNotCareSentinel, OneOf[SimpleWhitespaceMatchType], AllOf[SimpleWhitespaceMatchType], ] = DoNotCare() semicolon: Union[ SemicolonMatchType, DoNotCareSentinel, OneOf[SemicolonMatchType], AllOf[SemicolonMatchType], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() @dataclass(frozen=True, eq=False, unsafe_hash=False) class Return(BaseSmallStatement, BaseMatcherNode): value: Union[ Optional["BaseExpression"], MetadataMatchType, MatchIfTrue[Optional[cst.BaseExpression]], DoNotCareSentinel, OneOf[ Union[ Optional["BaseExpression"], MetadataMatchType, MatchIfTrue[Optional[cst.BaseExpression]], ] ], AllOf[ Union[ Optional["BaseExpression"], MetadataMatchType, MatchIfTrue[Optional[cst.BaseExpression]], ] ], ] = DoNotCare() whitespace_after_return: Union[ SimpleWhitespaceMatchType, DoNotCareSentinel, OneOf[SimpleWhitespaceMatchType], AllOf[SimpleWhitespaceMatchType], ] = DoNotCare() semicolon: Union[ SemicolonMatchType, DoNotCareSentinel, OneOf[SemicolonMatchType], AllOf[SemicolonMatchType], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() @dataclass(frozen=True, eq=False, unsafe_hash=False) class RightCurlyBrace(BaseMatcherNode): whitespace_before: Union[ BaseParenthesizableWhitespaceMatchType, DoNotCareSentinel, OneOf[BaseParenthesizableWhitespaceMatchType], AllOf[BaseParenthesizableWhitespaceMatchType], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() @dataclass(frozen=True, eq=False, unsafe_hash=False) class RightParen(BaseMatcherNode): whitespace_before: Union[ BaseParenthesizableWhitespaceMatchType, DoNotCareSentinel, OneOf[BaseParenthesizableWhitespaceMatchType], AllOf[BaseParenthesizableWhitespaceMatchType], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() @dataclass(frozen=True, eq=False, unsafe_hash=False) class RightShift(BaseBinaryOp, BaseMatcherNode): whitespace_before: Union[ BaseParenthesizableWhitespaceMatchType, DoNotCareSentinel, OneOf[BaseParenthesizableWhitespaceMatchType], AllOf[BaseParenthesizableWhitespaceMatchType], ] = DoNotCare() whitespace_after: Union[ BaseParenthesizableWhitespaceMatchType, DoNotCareSentinel, OneOf[BaseParenthesizableWhitespaceMatchType], AllOf[BaseParenthesizableWhitespaceMatchType], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() @dataclass(frozen=True, eq=False, unsafe_hash=False) class RightShiftAssign(BaseAugOp, BaseMatcherNode): whitespace_before: Union[ BaseParenthesizableWhitespaceMatchType, DoNotCareSentinel, OneOf[BaseParenthesizableWhitespaceMatchType], AllOf[BaseParenthesizableWhitespaceMatchType], ] = DoNotCare() whitespace_after: Union[ BaseParenthesizableWhitespaceMatchType, DoNotCareSentinel, OneOf[BaseParenthesizableWhitespaceMatchType], AllOf[BaseParenthesizableWhitespaceMatchType], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() @dataclass(frozen=True, eq=False, unsafe_hash=False) class RightSquareBracket(BaseMatcherNode): whitespace_before: Union[ BaseParenthesizableWhitespaceMatchType, DoNotCareSentinel, OneOf[BaseParenthesizableWhitespaceMatchType], AllOf[BaseParenthesizableWhitespaceMatchType], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() @dataclass(frozen=True, eq=False, unsafe_hash=False) class Semicolon(BaseMatcherNode): whitespace_before: Union[ BaseParenthesizableWhitespaceMatchType, DoNotCareSentinel, OneOf[BaseParenthesizableWhitespaceMatchType], AllOf[BaseParenthesizableWhitespaceMatchType], ] = DoNotCare() whitespace_after: Union[ BaseParenthesizableWhitespaceMatchType, DoNotCareSentinel, OneOf[BaseParenthesizableWhitespaceMatchType], AllOf[BaseParenthesizableWhitespaceMatchType], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() @dataclass(frozen=True, eq=False, unsafe_hash=False) class Set(BaseExpression, BaseSet, BaseMatcherNode): elements: Union[ Sequence[ Union[ BaseElementMatchType, DoNotCareSentinel, OneOf[BaseElementMatchType], AllOf[BaseElementMatchType], AtLeastN[ Union[ BaseElementMatchType, DoNotCareSentinel, OneOf[BaseElementMatchType], AllOf[BaseElementMatchType], ] ], AtMostN[ Union[ BaseElementMatchType, DoNotCareSentinel, OneOf[BaseElementMatchType], AllOf[BaseElementMatchType], ] ], ] ], DoNotCareSentinel, MatchIfTrue[Sequence[cst.BaseElement]], OneOf[ Union[ Sequence[ Union[ BaseElementMatchType, OneOf[BaseElementMatchType], AllOf[BaseElementMatchType], AtLeastN[ Union[ BaseElementMatchType, OneOf[BaseElementMatchType], AllOf[BaseElementMatchType], ] ], AtMostN[ Union[ BaseElementMatchType, OneOf[BaseElementMatchType], AllOf[BaseElementMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.BaseElement]], ] ], AllOf[ Union[ Sequence[ Union[ BaseElementMatchType, OneOf[BaseElementMatchType], AllOf[BaseElementMatchType], AtLeastN[ Union[ BaseElementMatchType, OneOf[BaseElementMatchType], AllOf[BaseElementMatchType], ] ], AtMostN[ Union[ BaseElementMatchType, OneOf[BaseElementMatchType], AllOf[BaseElementMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.BaseElement]], ] ], ] = DoNotCare() lbrace: Union[ LeftCurlyBraceMatchType, DoNotCareSentinel, OneOf[LeftCurlyBraceMatchType], AllOf[LeftCurlyBraceMatchType], ] = DoNotCare() rbrace: Union[ RightCurlyBraceMatchType, DoNotCareSentinel, OneOf[RightCurlyBraceMatchType], AllOf[RightCurlyBraceMatchType], ] = DoNotCare() lpar: Union[ Sequence[ Union[ LeftParenMatchType, DoNotCareSentinel, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], AtLeastN[ Union[ LeftParenMatchType, DoNotCareSentinel, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], AtMostN[ Union[ LeftParenMatchType, DoNotCareSentinel, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], ] ], DoNotCareSentinel, MatchIfTrue[Sequence[cst.LeftParen]], OneOf[ Union[ Sequence[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], AtLeastN[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], AtMostN[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.LeftParen]], ] ], AllOf[ Union[ Sequence[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], AtLeastN[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], AtMostN[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.LeftParen]], ] ], ] = DoNotCare() rpar: Union[ Sequence[ Union[ RightParenMatchType, DoNotCareSentinel, OneOf[RightParenMatchType], AllOf[RightParenMatchType], AtLeastN[ Union[ RightParenMatchType, DoNotCareSentinel, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], AtMostN[ Union[ RightParenMatchType, DoNotCareSentinel, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], ] ], DoNotCareSentinel, MatchIfTrue[Sequence[cst.RightParen]], OneOf[ Union[ Sequence[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], AtLeastN[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], AtMostN[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.RightParen]], ] ], AllOf[ Union[ Sequence[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], AtLeastN[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], AtMostN[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.RightParen]], ] ], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() @dataclass(frozen=True, eq=False, unsafe_hash=False) class SetComp(BaseComp, BaseExpression, BaseSet, BaseSimpleComp, BaseMatcherNode): elt: Union[ BaseExpressionMatchType, DoNotCareSentinel, OneOf[BaseExpressionMatchType], AllOf[BaseExpressionMatchType], ] = DoNotCare() for_in: Union[ CompForMatchType, DoNotCareSentinel, OneOf[CompForMatchType], AllOf[CompForMatchType], ] = DoNotCare() lbrace: Union[ LeftCurlyBraceMatchType, DoNotCareSentinel, OneOf[LeftCurlyBraceMatchType], AllOf[LeftCurlyBraceMatchType], ] = DoNotCare() rbrace: Union[ RightCurlyBraceMatchType, DoNotCareSentinel, OneOf[RightCurlyBraceMatchType], AllOf[RightCurlyBraceMatchType], ] = DoNotCare() lpar: Union[ Sequence[ Union[ LeftParenMatchType, DoNotCareSentinel, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], AtLeastN[ Union[ LeftParenMatchType, DoNotCareSentinel, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], AtMostN[ Union[ LeftParenMatchType, DoNotCareSentinel, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], ] ], DoNotCareSentinel, MatchIfTrue[Sequence[cst.LeftParen]], OneOf[ Union[ Sequence[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], AtLeastN[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], AtMostN[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.LeftParen]], ] ], AllOf[ Union[ Sequence[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], AtLeastN[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], AtMostN[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.LeftParen]], ] ], ] = DoNotCare() rpar: Union[ Sequence[ Union[ RightParenMatchType, DoNotCareSentinel, OneOf[RightParenMatchType], AllOf[RightParenMatchType], AtLeastN[ Union[ RightParenMatchType, DoNotCareSentinel, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], AtMostN[ Union[ RightParenMatchType, DoNotCareSentinel, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], ] ], DoNotCareSentinel, MatchIfTrue[Sequence[cst.RightParen]], OneOf[ Union[ Sequence[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], AtLeastN[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], AtMostN[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.RightParen]], ] ], AllOf[ Union[ Sequence[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], AtLeastN[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], AtMostN[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.RightParen]], ] ], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() BaseSmallStatementMatchType = Union[ "BaseSmallStatement", MetadataMatchType, MatchIfTrue[cst.BaseSmallStatement] ] @dataclass(frozen=True, eq=False, unsafe_hash=False) class SimpleStatementLine(BaseStatement, BaseMatcherNode): body: Union[ Sequence[ Union[ BaseSmallStatementMatchType, DoNotCareSentinel, OneOf[BaseSmallStatementMatchType], AllOf[BaseSmallStatementMatchType], AtLeastN[ Union[ BaseSmallStatementMatchType, DoNotCareSentinel, OneOf[BaseSmallStatementMatchType], AllOf[BaseSmallStatementMatchType], ] ], AtMostN[ Union[ BaseSmallStatementMatchType, DoNotCareSentinel, OneOf[BaseSmallStatementMatchType], AllOf[BaseSmallStatementMatchType], ] ], ] ], DoNotCareSentinel, MatchIfTrue[Sequence[cst.BaseSmallStatement]], OneOf[ Union[ Sequence[ Union[ BaseSmallStatementMatchType, OneOf[BaseSmallStatementMatchType], AllOf[BaseSmallStatementMatchType], AtLeastN[ Union[ BaseSmallStatementMatchType, OneOf[BaseSmallStatementMatchType], AllOf[BaseSmallStatementMatchType], ] ], AtMostN[ Union[ BaseSmallStatementMatchType, OneOf[BaseSmallStatementMatchType], AllOf[BaseSmallStatementMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.BaseSmallStatement]], ] ], AllOf[ Union[ Sequence[ Union[ BaseSmallStatementMatchType, OneOf[BaseSmallStatementMatchType], AllOf[BaseSmallStatementMatchType], AtLeastN[ Union[ BaseSmallStatementMatchType, OneOf[BaseSmallStatementMatchType], AllOf[BaseSmallStatementMatchType], ] ], AtMostN[ Union[ BaseSmallStatementMatchType, OneOf[BaseSmallStatementMatchType], AllOf[BaseSmallStatementMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.BaseSmallStatement]], ] ], ] = DoNotCare() leading_lines: Union[ Sequence[ Union[ EmptyLineMatchType, DoNotCareSentinel, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], AtLeastN[ Union[ EmptyLineMatchType, DoNotCareSentinel, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], ] ], AtMostN[ Union[ EmptyLineMatchType, DoNotCareSentinel, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], ] ], ] ], DoNotCareSentinel, MatchIfTrue[Sequence[cst.EmptyLine]], OneOf[ Union[ Sequence[ Union[ EmptyLineMatchType, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], AtLeastN[ Union[ EmptyLineMatchType, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], ] ], AtMostN[ Union[ EmptyLineMatchType, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.EmptyLine]], ] ], AllOf[ Union[ Sequence[ Union[ EmptyLineMatchType, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], AtLeastN[ Union[ EmptyLineMatchType, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], ] ], AtMostN[ Union[ EmptyLineMatchType, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.EmptyLine]], ] ], ] = DoNotCare() trailing_whitespace: Union[ TrailingWhitespaceMatchType, DoNotCareSentinel, OneOf[TrailingWhitespaceMatchType], AllOf[TrailingWhitespaceMatchType], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() @dataclass(frozen=True, eq=False, unsafe_hash=False) class SimpleStatementSuite(BaseSuite, BaseMatcherNode): body: Union[ Sequence[ Union[ BaseSmallStatementMatchType, DoNotCareSentinel, OneOf[BaseSmallStatementMatchType], AllOf[BaseSmallStatementMatchType], AtLeastN[ Union[ BaseSmallStatementMatchType, DoNotCareSentinel, OneOf[BaseSmallStatementMatchType], AllOf[BaseSmallStatementMatchType], ] ], AtMostN[ Union[ BaseSmallStatementMatchType, DoNotCareSentinel, OneOf[BaseSmallStatementMatchType], AllOf[BaseSmallStatementMatchType], ] ], ] ], DoNotCareSentinel, MatchIfTrue[Sequence[cst.BaseSmallStatement]], OneOf[ Union[ Sequence[ Union[ BaseSmallStatementMatchType, OneOf[BaseSmallStatementMatchType], AllOf[BaseSmallStatementMatchType], AtLeastN[ Union[ BaseSmallStatementMatchType, OneOf[BaseSmallStatementMatchType], AllOf[BaseSmallStatementMatchType], ] ], AtMostN[ Union[ BaseSmallStatementMatchType, OneOf[BaseSmallStatementMatchType], AllOf[BaseSmallStatementMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.BaseSmallStatement]], ] ], AllOf[ Union[ Sequence[ Union[ BaseSmallStatementMatchType, OneOf[BaseSmallStatementMatchType], AllOf[BaseSmallStatementMatchType], AtLeastN[ Union[ BaseSmallStatementMatchType, OneOf[BaseSmallStatementMatchType], AllOf[BaseSmallStatementMatchType], ] ], AtMostN[ Union[ BaseSmallStatementMatchType, OneOf[BaseSmallStatementMatchType], AllOf[BaseSmallStatementMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.BaseSmallStatement]], ] ], ] = DoNotCare() leading_whitespace: Union[ SimpleWhitespaceMatchType, DoNotCareSentinel, OneOf[SimpleWhitespaceMatchType], AllOf[SimpleWhitespaceMatchType], ] = DoNotCare() trailing_whitespace: Union[ TrailingWhitespaceMatchType, DoNotCareSentinel, OneOf[TrailingWhitespaceMatchType], AllOf[TrailingWhitespaceMatchType], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() @dataclass(frozen=True, eq=False, unsafe_hash=False) class SimpleString(BaseExpression, BaseString, BaseMatcherNode): value: Union[ strMatchType, DoNotCareSentinel, OneOf[strMatchType], AllOf[strMatchType] ] = DoNotCare() lpar: Union[ Sequence[ Union[ LeftParenMatchType, DoNotCareSentinel, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], AtLeastN[ Union[ LeftParenMatchType, DoNotCareSentinel, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], AtMostN[ Union[ LeftParenMatchType, DoNotCareSentinel, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], ] ], DoNotCareSentinel, MatchIfTrue[Sequence[cst.LeftParen]], OneOf[ Union[ Sequence[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], AtLeastN[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], AtMostN[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.LeftParen]], ] ], AllOf[ Union[ Sequence[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], AtLeastN[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], AtMostN[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.LeftParen]], ] ], ] = DoNotCare() rpar: Union[ Sequence[ Union[ RightParenMatchType, DoNotCareSentinel, OneOf[RightParenMatchType], AllOf[RightParenMatchType], AtLeastN[ Union[ RightParenMatchType, DoNotCareSentinel, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], AtMostN[ Union[ RightParenMatchType, DoNotCareSentinel, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], ] ], DoNotCareSentinel, MatchIfTrue[Sequence[cst.RightParen]], OneOf[ Union[ Sequence[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], AtLeastN[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], AtMostN[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.RightParen]], ] ], AllOf[ Union[ Sequence[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], AtLeastN[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], AtMostN[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.RightParen]], ] ], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() @dataclass(frozen=True, eq=False, unsafe_hash=False) class SimpleWhitespace(BaseParenthesizableWhitespace, BaseMatcherNode): value: Union[ strMatchType, DoNotCareSentinel, OneOf[strMatchType], AllOf[strMatchType] ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() @dataclass(frozen=True, eq=False, unsafe_hash=False) class Slice(BaseSlice, BaseMatcherNode): lower: Union[ Optional["BaseExpression"], MetadataMatchType, MatchIfTrue[Optional[cst.BaseExpression]], DoNotCareSentinel, OneOf[ Union[ Optional["BaseExpression"], MetadataMatchType, MatchIfTrue[Optional[cst.BaseExpression]], ] ], AllOf[ Union[ Optional["BaseExpression"], MetadataMatchType, MatchIfTrue[Optional[cst.BaseExpression]], ] ], ] = DoNotCare() upper: Union[ Optional["BaseExpression"], MetadataMatchType, MatchIfTrue[Optional[cst.BaseExpression]], DoNotCareSentinel, OneOf[ Union[ Optional["BaseExpression"], MetadataMatchType, MatchIfTrue[Optional[cst.BaseExpression]], ] ], AllOf[ Union[ Optional["BaseExpression"], MetadataMatchType, MatchIfTrue[Optional[cst.BaseExpression]], ] ], ] = DoNotCare() step: Union[ Optional["BaseExpression"], MetadataMatchType, MatchIfTrue[Optional[cst.BaseExpression]], DoNotCareSentinel, OneOf[ Union[ Optional["BaseExpression"], MetadataMatchType, MatchIfTrue[Optional[cst.BaseExpression]], ] ], AllOf[ Union[ Optional["BaseExpression"], MetadataMatchType, MatchIfTrue[Optional[cst.BaseExpression]], ] ], ] = DoNotCare() first_colon: Union[ ColonMatchType, DoNotCareSentinel, OneOf[ColonMatchType], AllOf[ColonMatchType] ] = DoNotCare() second_colon: Union[ ColonMatchType, DoNotCareSentinel, OneOf[ColonMatchType], AllOf[ColonMatchType] ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() @dataclass(frozen=True, eq=False, unsafe_hash=False) class StarredDictElement(BaseDictElement, BaseMatcherNode): value: Union[ BaseExpressionMatchType, DoNotCareSentinel, OneOf[BaseExpressionMatchType], AllOf[BaseExpressionMatchType], ] = DoNotCare() comma: Union[ CommaMatchType, DoNotCareSentinel, OneOf[CommaMatchType], AllOf[CommaMatchType] ] = DoNotCare() whitespace_before_value: Union[ BaseParenthesizableWhitespaceMatchType, DoNotCareSentinel, OneOf[BaseParenthesizableWhitespaceMatchType], AllOf[BaseParenthesizableWhitespaceMatchType], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() @dataclass(frozen=True, eq=False, unsafe_hash=False) class StarredElement(BaseElement, BaseExpression, BaseMatcherNode): value: Union[ BaseExpressionMatchType, DoNotCareSentinel, OneOf[BaseExpressionMatchType], AllOf[BaseExpressionMatchType], ] = DoNotCare() comma: Union[ CommaMatchType, DoNotCareSentinel, OneOf[CommaMatchType], AllOf[CommaMatchType] ] = DoNotCare() lpar: Union[ Sequence[ Union[ LeftParenMatchType, DoNotCareSentinel, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], AtLeastN[ Union[ LeftParenMatchType, DoNotCareSentinel, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], AtMostN[ Union[ LeftParenMatchType, DoNotCareSentinel, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], ] ], DoNotCareSentinel, MatchIfTrue[Sequence[cst.LeftParen]], OneOf[ Union[ Sequence[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], AtLeastN[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], AtMostN[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.LeftParen]], ] ], AllOf[ Union[ Sequence[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], AtLeastN[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], AtMostN[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.LeftParen]], ] ], ] = DoNotCare() rpar: Union[ Sequence[ Union[ RightParenMatchType, DoNotCareSentinel, OneOf[RightParenMatchType], AllOf[RightParenMatchType], AtLeastN[ Union[ RightParenMatchType, DoNotCareSentinel, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], AtMostN[ Union[ RightParenMatchType, DoNotCareSentinel, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], ] ], DoNotCareSentinel, MatchIfTrue[Sequence[cst.RightParen]], OneOf[ Union[ Sequence[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], AtLeastN[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], AtMostN[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.RightParen]], ] ], AllOf[ Union[ Sequence[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], AtLeastN[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], AtMostN[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.RightParen]], ] ], ] = DoNotCare() whitespace_before_value: Union[ BaseParenthesizableWhitespaceMatchType, DoNotCareSentinel, OneOf[BaseParenthesizableWhitespaceMatchType], AllOf[BaseParenthesizableWhitespaceMatchType], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() SubscriptElementMatchType = Union[ "SubscriptElement", MetadataMatchType, MatchIfTrue[cst.SubscriptElement] ] @dataclass(frozen=True, eq=False, unsafe_hash=False) class Subscript( BaseAssignTargetExpression, BaseDelTargetExpression, BaseExpression, BaseMatcherNode ): value: Union[ BaseExpressionMatchType, DoNotCareSentinel, OneOf[BaseExpressionMatchType], AllOf[BaseExpressionMatchType], ] = DoNotCare() slice: Union[ Sequence[ Union[ SubscriptElementMatchType, DoNotCareSentinel, OneOf[SubscriptElementMatchType], AllOf[SubscriptElementMatchType], AtLeastN[ Union[ SubscriptElementMatchType, DoNotCareSentinel, OneOf[SubscriptElementMatchType], AllOf[SubscriptElementMatchType], ] ], AtMostN[ Union[ SubscriptElementMatchType, DoNotCareSentinel, OneOf[SubscriptElementMatchType], AllOf[SubscriptElementMatchType], ] ], ] ], DoNotCareSentinel, MatchIfTrue[Sequence[cst.SubscriptElement]], OneOf[ Union[ Sequence[ Union[ SubscriptElementMatchType, OneOf[SubscriptElementMatchType], AllOf[SubscriptElementMatchType], AtLeastN[ Union[ SubscriptElementMatchType, OneOf[SubscriptElementMatchType], AllOf[SubscriptElementMatchType], ] ], AtMostN[ Union[ SubscriptElementMatchType, OneOf[SubscriptElementMatchType], AllOf[SubscriptElementMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.SubscriptElement]], ] ], AllOf[ Union[ Sequence[ Union[ SubscriptElementMatchType, OneOf[SubscriptElementMatchType], AllOf[SubscriptElementMatchType], AtLeastN[ Union[ SubscriptElementMatchType, OneOf[SubscriptElementMatchType], AllOf[SubscriptElementMatchType], ] ], AtMostN[ Union[ SubscriptElementMatchType, OneOf[SubscriptElementMatchType], AllOf[SubscriptElementMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.SubscriptElement]], ] ], ] = DoNotCare() lbracket: Union[ LeftSquareBracketMatchType, DoNotCareSentinel, OneOf[LeftSquareBracketMatchType], AllOf[LeftSquareBracketMatchType], ] = DoNotCare() rbracket: Union[ RightSquareBracketMatchType, DoNotCareSentinel, OneOf[RightSquareBracketMatchType], AllOf[RightSquareBracketMatchType], ] = DoNotCare() lpar: Union[ Sequence[ Union[ LeftParenMatchType, DoNotCareSentinel, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], AtLeastN[ Union[ LeftParenMatchType, DoNotCareSentinel, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], AtMostN[ Union[ LeftParenMatchType, DoNotCareSentinel, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], ] ], DoNotCareSentinel, MatchIfTrue[Sequence[cst.LeftParen]], OneOf[ Union[ Sequence[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], AtLeastN[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], AtMostN[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.LeftParen]], ] ], AllOf[ Union[ Sequence[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], AtLeastN[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], AtMostN[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.LeftParen]], ] ], ] = DoNotCare() rpar: Union[ Sequence[ Union[ RightParenMatchType, DoNotCareSentinel, OneOf[RightParenMatchType], AllOf[RightParenMatchType], AtLeastN[ Union[ RightParenMatchType, DoNotCareSentinel, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], AtMostN[ Union[ RightParenMatchType, DoNotCareSentinel, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], ] ], DoNotCareSentinel, MatchIfTrue[Sequence[cst.RightParen]], OneOf[ Union[ Sequence[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], AtLeastN[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], AtMostN[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.RightParen]], ] ], AllOf[ Union[ Sequence[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], AtLeastN[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], AtMostN[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.RightParen]], ] ], ] = DoNotCare() whitespace_after_value: Union[ BaseParenthesizableWhitespaceMatchType, DoNotCareSentinel, OneOf[BaseParenthesizableWhitespaceMatchType], AllOf[BaseParenthesizableWhitespaceMatchType], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() BaseSliceMatchType = Union["BaseSlice", MetadataMatchType, MatchIfTrue[cst.BaseSlice]] @dataclass(frozen=True, eq=False, unsafe_hash=False) class SubscriptElement(BaseMatcherNode): slice: Union[ BaseSliceMatchType, DoNotCareSentinel, OneOf[BaseSliceMatchType], AllOf[BaseSliceMatchType], ] = DoNotCare() comma: Union[ CommaMatchType, DoNotCareSentinel, OneOf[CommaMatchType], AllOf[CommaMatchType] ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() @dataclass(frozen=True, eq=False, unsafe_hash=False) class Subtract(BaseBinaryOp, BaseMatcherNode): whitespace_before: Union[ BaseParenthesizableWhitespaceMatchType, DoNotCareSentinel, OneOf[BaseParenthesizableWhitespaceMatchType], AllOf[BaseParenthesizableWhitespaceMatchType], ] = DoNotCare() whitespace_after: Union[ BaseParenthesizableWhitespaceMatchType, DoNotCareSentinel, OneOf[BaseParenthesizableWhitespaceMatchType], AllOf[BaseParenthesizableWhitespaceMatchType], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() @dataclass(frozen=True, eq=False, unsafe_hash=False) class SubtractAssign(BaseAugOp, BaseMatcherNode): whitespace_before: Union[ BaseParenthesizableWhitespaceMatchType, DoNotCareSentinel, OneOf[BaseParenthesizableWhitespaceMatchType], AllOf[BaseParenthesizableWhitespaceMatchType], ] = DoNotCare() whitespace_after: Union[ BaseParenthesizableWhitespaceMatchType, DoNotCareSentinel, OneOf[BaseParenthesizableWhitespaceMatchType], AllOf[BaseParenthesizableWhitespaceMatchType], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() @dataclass(frozen=True, eq=False, unsafe_hash=False) class TrailingWhitespace(BaseMatcherNode): whitespace: Union[ SimpleWhitespaceMatchType, DoNotCareSentinel, OneOf[SimpleWhitespaceMatchType], AllOf[SimpleWhitespaceMatchType], ] = DoNotCare() comment: Union[ Optional["Comment"], MetadataMatchType, MatchIfTrue[Optional[cst.Comment]], DoNotCareSentinel, OneOf[ Union[ Optional["Comment"], MetadataMatchType, MatchIfTrue[Optional[cst.Comment]], ] ], AllOf[ Union[ Optional["Comment"], MetadataMatchType, MatchIfTrue[Optional[cst.Comment]], ] ], ] = DoNotCare() newline: Union[ NewlineMatchType, DoNotCareSentinel, OneOf[NewlineMatchType], AllOf[NewlineMatchType], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() ExceptHandlerMatchType = Union[ "ExceptHandler", MetadataMatchType, MatchIfTrue[cst.ExceptHandler] ] @dataclass(frozen=True, eq=False, unsafe_hash=False) class Try(BaseCompoundStatement, BaseStatement, BaseMatcherNode): body: Union[ BaseSuiteMatchType, DoNotCareSentinel, OneOf[BaseSuiteMatchType], AllOf[BaseSuiteMatchType], ] = DoNotCare() handlers: Union[ Sequence[ Union[ ExceptHandlerMatchType, DoNotCareSentinel, OneOf[ExceptHandlerMatchType], AllOf[ExceptHandlerMatchType], AtLeastN[ Union[ ExceptHandlerMatchType, DoNotCareSentinel, OneOf[ExceptHandlerMatchType], AllOf[ExceptHandlerMatchType], ] ], AtMostN[ Union[ ExceptHandlerMatchType, DoNotCareSentinel, OneOf[ExceptHandlerMatchType], AllOf[ExceptHandlerMatchType], ] ], ] ], DoNotCareSentinel, MatchIfTrue[Sequence[cst.ExceptHandler]], OneOf[ Union[ Sequence[ Union[ ExceptHandlerMatchType, OneOf[ExceptHandlerMatchType], AllOf[ExceptHandlerMatchType], AtLeastN[ Union[ ExceptHandlerMatchType, OneOf[ExceptHandlerMatchType], AllOf[ExceptHandlerMatchType], ] ], AtMostN[ Union[ ExceptHandlerMatchType, OneOf[ExceptHandlerMatchType], AllOf[ExceptHandlerMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.ExceptHandler]], ] ], AllOf[ Union[ Sequence[ Union[ ExceptHandlerMatchType, OneOf[ExceptHandlerMatchType], AllOf[ExceptHandlerMatchType], AtLeastN[ Union[ ExceptHandlerMatchType, OneOf[ExceptHandlerMatchType], AllOf[ExceptHandlerMatchType], ] ], AtMostN[ Union[ ExceptHandlerMatchType, OneOf[ExceptHandlerMatchType], AllOf[ExceptHandlerMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.ExceptHandler]], ] ], ] = DoNotCare() orelse: Union[ Optional["Else"], MetadataMatchType, MatchIfTrue[Optional[cst.Else]], DoNotCareSentinel, OneOf[ Union[Optional["Else"], MetadataMatchType, MatchIfTrue[Optional[cst.Else]]] ], AllOf[ Union[Optional["Else"], MetadataMatchType, MatchIfTrue[Optional[cst.Else]]] ], ] = DoNotCare() finalbody: Union[ Optional["Finally"], MetadataMatchType, MatchIfTrue[Optional[cst.Finally]], DoNotCareSentinel, OneOf[ Union[ Optional["Finally"], MetadataMatchType, MatchIfTrue[Optional[cst.Finally]], ] ], AllOf[ Union[ Optional["Finally"], MetadataMatchType, MatchIfTrue[Optional[cst.Finally]], ] ], ] = DoNotCare() leading_lines: Union[ Sequence[ Union[ EmptyLineMatchType, DoNotCareSentinel, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], AtLeastN[ Union[ EmptyLineMatchType, DoNotCareSentinel, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], ] ], AtMostN[ Union[ EmptyLineMatchType, DoNotCareSentinel, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], ] ], ] ], DoNotCareSentinel, MatchIfTrue[Sequence[cst.EmptyLine]], OneOf[ Union[ Sequence[ Union[ EmptyLineMatchType, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], AtLeastN[ Union[ EmptyLineMatchType, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], ] ], AtMostN[ Union[ EmptyLineMatchType, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.EmptyLine]], ] ], AllOf[ Union[ Sequence[ Union[ EmptyLineMatchType, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], AtLeastN[ Union[ EmptyLineMatchType, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], ] ], AtMostN[ Union[ EmptyLineMatchType, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.EmptyLine]], ] ], ] = DoNotCare() whitespace_before_colon: Union[ SimpleWhitespaceMatchType, DoNotCareSentinel, OneOf[SimpleWhitespaceMatchType], AllOf[SimpleWhitespaceMatchType], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() ExceptStarHandlerMatchType = Union[ "ExceptStarHandler", MetadataMatchType, MatchIfTrue[cst.ExceptStarHandler] ] @dataclass(frozen=True, eq=False, unsafe_hash=False) class TryStar(BaseCompoundStatement, BaseStatement, BaseMatcherNode): body: Union[ BaseSuiteMatchType, DoNotCareSentinel, OneOf[BaseSuiteMatchType], AllOf[BaseSuiteMatchType], ] = DoNotCare() handlers: Union[ Sequence[ Union[ ExceptStarHandlerMatchType, DoNotCareSentinel, OneOf[ExceptStarHandlerMatchType], AllOf[ExceptStarHandlerMatchType], AtLeastN[ Union[ ExceptStarHandlerMatchType, DoNotCareSentinel, OneOf[ExceptStarHandlerMatchType], AllOf[ExceptStarHandlerMatchType], ] ], AtMostN[ Union[ ExceptStarHandlerMatchType, DoNotCareSentinel, OneOf[ExceptStarHandlerMatchType], AllOf[ExceptStarHandlerMatchType], ] ], ] ], DoNotCareSentinel, MatchIfTrue[Sequence[cst.ExceptStarHandler]], OneOf[ Union[ Sequence[ Union[ ExceptStarHandlerMatchType, OneOf[ExceptStarHandlerMatchType], AllOf[ExceptStarHandlerMatchType], AtLeastN[ Union[ ExceptStarHandlerMatchType, OneOf[ExceptStarHandlerMatchType], AllOf[ExceptStarHandlerMatchType], ] ], AtMostN[ Union[ ExceptStarHandlerMatchType, OneOf[ExceptStarHandlerMatchType], AllOf[ExceptStarHandlerMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.ExceptStarHandler]], ] ], AllOf[ Union[ Sequence[ Union[ ExceptStarHandlerMatchType, OneOf[ExceptStarHandlerMatchType], AllOf[ExceptStarHandlerMatchType], AtLeastN[ Union[ ExceptStarHandlerMatchType, OneOf[ExceptStarHandlerMatchType], AllOf[ExceptStarHandlerMatchType], ] ], AtMostN[ Union[ ExceptStarHandlerMatchType, OneOf[ExceptStarHandlerMatchType], AllOf[ExceptStarHandlerMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.ExceptStarHandler]], ] ], ] = DoNotCare() orelse: Union[ Optional["Else"], MetadataMatchType, MatchIfTrue[Optional[cst.Else]], DoNotCareSentinel, OneOf[ Union[Optional["Else"], MetadataMatchType, MatchIfTrue[Optional[cst.Else]]] ], AllOf[ Union[Optional["Else"], MetadataMatchType, MatchIfTrue[Optional[cst.Else]]] ], ] = DoNotCare() finalbody: Union[ Optional["Finally"], MetadataMatchType, MatchIfTrue[Optional[cst.Finally]], DoNotCareSentinel, OneOf[ Union[ Optional["Finally"], MetadataMatchType, MatchIfTrue[Optional[cst.Finally]], ] ], AllOf[ Union[ Optional["Finally"], MetadataMatchType, MatchIfTrue[Optional[cst.Finally]], ] ], ] = DoNotCare() leading_lines: Union[ Sequence[ Union[ EmptyLineMatchType, DoNotCareSentinel, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], AtLeastN[ Union[ EmptyLineMatchType, DoNotCareSentinel, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], ] ], AtMostN[ Union[ EmptyLineMatchType, DoNotCareSentinel, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], ] ], ] ], DoNotCareSentinel, MatchIfTrue[Sequence[cst.EmptyLine]], OneOf[ Union[ Sequence[ Union[ EmptyLineMatchType, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], AtLeastN[ Union[ EmptyLineMatchType, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], ] ], AtMostN[ Union[ EmptyLineMatchType, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.EmptyLine]], ] ], AllOf[ Union[ Sequence[ Union[ EmptyLineMatchType, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], AtLeastN[ Union[ EmptyLineMatchType, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], ] ], AtMostN[ Union[ EmptyLineMatchType, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.EmptyLine]], ] ], ] = DoNotCare() whitespace_before_colon: Union[ SimpleWhitespaceMatchType, DoNotCareSentinel, OneOf[SimpleWhitespaceMatchType], AllOf[SimpleWhitespaceMatchType], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() @dataclass(frozen=True, eq=False, unsafe_hash=False) class Tuple( BaseAssignTargetExpression, BaseDelTargetExpression, BaseExpression, BaseMatcherNode ): elements: Union[ Sequence[ Union[ BaseElementMatchType, DoNotCareSentinel, OneOf[BaseElementMatchType], AllOf[BaseElementMatchType], AtLeastN[ Union[ BaseElementMatchType, DoNotCareSentinel, OneOf[BaseElementMatchType], AllOf[BaseElementMatchType], ] ], AtMostN[ Union[ BaseElementMatchType, DoNotCareSentinel, OneOf[BaseElementMatchType], AllOf[BaseElementMatchType], ] ], ] ], DoNotCareSentinel, MatchIfTrue[Sequence[cst.BaseElement]], OneOf[ Union[ Sequence[ Union[ BaseElementMatchType, OneOf[BaseElementMatchType], AllOf[BaseElementMatchType], AtLeastN[ Union[ BaseElementMatchType, OneOf[BaseElementMatchType], AllOf[BaseElementMatchType], ] ], AtMostN[ Union[ BaseElementMatchType, OneOf[BaseElementMatchType], AllOf[BaseElementMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.BaseElement]], ] ], AllOf[ Union[ Sequence[ Union[ BaseElementMatchType, OneOf[BaseElementMatchType], AllOf[BaseElementMatchType], AtLeastN[ Union[ BaseElementMatchType, OneOf[BaseElementMatchType], AllOf[BaseElementMatchType], ] ], AtMostN[ Union[ BaseElementMatchType, OneOf[BaseElementMatchType], AllOf[BaseElementMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.BaseElement]], ] ], ] = DoNotCare() lpar: Union[ Sequence[ Union[ LeftParenMatchType, DoNotCareSentinel, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], AtLeastN[ Union[ LeftParenMatchType, DoNotCareSentinel, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], AtMostN[ Union[ LeftParenMatchType, DoNotCareSentinel, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], ] ], DoNotCareSentinel, MatchIfTrue[Sequence[cst.LeftParen]], OneOf[ Union[ Sequence[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], AtLeastN[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], AtMostN[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.LeftParen]], ] ], AllOf[ Union[ Sequence[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], AtLeastN[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], AtMostN[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.LeftParen]], ] ], ] = DoNotCare() rpar: Union[ Sequence[ Union[ RightParenMatchType, DoNotCareSentinel, OneOf[RightParenMatchType], AllOf[RightParenMatchType], AtLeastN[ Union[ RightParenMatchType, DoNotCareSentinel, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], AtMostN[ Union[ RightParenMatchType, DoNotCareSentinel, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], ] ], DoNotCareSentinel, MatchIfTrue[Sequence[cst.RightParen]], OneOf[ Union[ Sequence[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], AtLeastN[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], AtMostN[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.RightParen]], ] ], AllOf[ Union[ Sequence[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], AtLeastN[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], AtMostN[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.RightParen]], ] ], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() @dataclass(frozen=True, eq=False, unsafe_hash=False) class TypeAlias(BaseSmallStatement, BaseMatcherNode): name: Union[ NameMatchType, DoNotCareSentinel, OneOf[NameMatchType], AllOf[NameMatchType] ] = DoNotCare() value: Union[ BaseExpressionMatchType, DoNotCareSentinel, OneOf[BaseExpressionMatchType], AllOf[BaseExpressionMatchType], ] = DoNotCare() type_parameters: Union[ Optional["TypeParameters"], MetadataMatchType, MatchIfTrue[Optional[cst.TypeParameters]], DoNotCareSentinel, OneOf[ Union[ Optional["TypeParameters"], MetadataMatchType, MatchIfTrue[Optional[cst.TypeParameters]], ] ], AllOf[ Union[ Optional["TypeParameters"], MetadataMatchType, MatchIfTrue[Optional[cst.TypeParameters]], ] ], ] = DoNotCare() whitespace_after_type: Union[ SimpleWhitespaceMatchType, DoNotCareSentinel, OneOf[SimpleWhitespaceMatchType], AllOf[SimpleWhitespaceMatchType], ] = DoNotCare() whitespace_after_name: Union[ SimpleWhitespaceMatchType, DoNotCareSentinel, OneOf[SimpleWhitespaceMatchType], AllOf[SimpleWhitespaceMatchType], ] = DoNotCare() whitespace_after_type_parameters: Union[ SimpleWhitespaceMatchType, DoNotCareSentinel, OneOf[SimpleWhitespaceMatchType], AllOf[SimpleWhitespaceMatchType], ] = DoNotCare() whitespace_after_equals: Union[ SimpleWhitespaceMatchType, DoNotCareSentinel, OneOf[SimpleWhitespaceMatchType], AllOf[SimpleWhitespaceMatchType], ] = DoNotCare() semicolon: Union[ SemicolonMatchType, DoNotCareSentinel, OneOf[SemicolonMatchType], AllOf[SemicolonMatchType], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() TypeVarOrTypeVarTupleOrParamSpecMatchType = Union[ "TypeVar", "TypeVarTuple", "ParamSpec", MetadataMatchType, MatchIfTrue[Union[cst.TypeVar, cst.TypeVarTuple, cst.ParamSpec]], ] @dataclass(frozen=True, eq=False, unsafe_hash=False) class TypeParam(BaseMatcherNode): param: Union[ TypeVarOrTypeVarTupleOrParamSpecMatchType, DoNotCareSentinel, OneOf[TypeVarOrTypeVarTupleOrParamSpecMatchType], AllOf[TypeVarOrTypeVarTupleOrParamSpecMatchType], ] = DoNotCare() comma: Union[ CommaMatchType, DoNotCareSentinel, OneOf[CommaMatchType], AllOf[CommaMatchType] ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() TypeParamMatchType = Union["TypeParam", MetadataMatchType, MatchIfTrue[cst.TypeParam]] @dataclass(frozen=True, eq=False, unsafe_hash=False) class TypeParameters(BaseMatcherNode): params: Union[ Sequence[ Union[ TypeParamMatchType, DoNotCareSentinel, OneOf[TypeParamMatchType], AllOf[TypeParamMatchType], AtLeastN[ Union[ TypeParamMatchType, DoNotCareSentinel, OneOf[TypeParamMatchType], AllOf[TypeParamMatchType], ] ], AtMostN[ Union[ TypeParamMatchType, DoNotCareSentinel, OneOf[TypeParamMatchType], AllOf[TypeParamMatchType], ] ], ] ], DoNotCareSentinel, MatchIfTrue[Sequence[cst.TypeParam]], OneOf[ Union[ Sequence[ Union[ TypeParamMatchType, OneOf[TypeParamMatchType], AllOf[TypeParamMatchType], AtLeastN[ Union[ TypeParamMatchType, OneOf[TypeParamMatchType], AllOf[TypeParamMatchType], ] ], AtMostN[ Union[ TypeParamMatchType, OneOf[TypeParamMatchType], AllOf[TypeParamMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.TypeParam]], ] ], AllOf[ Union[ Sequence[ Union[ TypeParamMatchType, OneOf[TypeParamMatchType], AllOf[TypeParamMatchType], AtLeastN[ Union[ TypeParamMatchType, OneOf[TypeParamMatchType], AllOf[TypeParamMatchType], ] ], AtMostN[ Union[ TypeParamMatchType, OneOf[TypeParamMatchType], AllOf[TypeParamMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.TypeParam]], ] ], ] = DoNotCare() lbracket: Union[ LeftSquareBracketMatchType, DoNotCareSentinel, OneOf[LeftSquareBracketMatchType], AllOf[LeftSquareBracketMatchType], ] = DoNotCare() rbracket: Union[ RightSquareBracketMatchType, DoNotCareSentinel, OneOf[RightSquareBracketMatchType], AllOf[RightSquareBracketMatchType], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() @dataclass(frozen=True, eq=False, unsafe_hash=False) class TypeVar(BaseMatcherNode): name: Union[ NameMatchType, DoNotCareSentinel, OneOf[NameMatchType], AllOf[NameMatchType] ] = DoNotCare() bound: Union[ Optional["BaseExpression"], MetadataMatchType, MatchIfTrue[Optional[cst.BaseExpression]], DoNotCareSentinel, OneOf[ Union[ Optional["BaseExpression"], MetadataMatchType, MatchIfTrue[Optional[cst.BaseExpression]], ] ], AllOf[ Union[ Optional["BaseExpression"], MetadataMatchType, MatchIfTrue[Optional[cst.BaseExpression]], ] ], ] = DoNotCare() colon: Union[ ColonMatchType, DoNotCareSentinel, OneOf[ColonMatchType], AllOf[ColonMatchType] ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() @dataclass(frozen=True, eq=False, unsafe_hash=False) class TypeVarTuple(BaseMatcherNode): name: Union[ NameMatchType, DoNotCareSentinel, OneOf[NameMatchType], AllOf[NameMatchType] ] = DoNotCare() whitespace_after_star: Union[ SimpleWhitespaceMatchType, DoNotCareSentinel, OneOf[SimpleWhitespaceMatchType], AllOf[SimpleWhitespaceMatchType], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() BaseUnaryOpMatchType = Union[ "BaseUnaryOp", MetadataMatchType, MatchIfTrue[cst.BaseUnaryOp] ] @dataclass(frozen=True, eq=False, unsafe_hash=False) class UnaryOperation(BaseExpression, BaseMatcherNode): operator: Union[ BaseUnaryOpMatchType, DoNotCareSentinel, OneOf[BaseUnaryOpMatchType], AllOf[BaseUnaryOpMatchType], ] = DoNotCare() expression: Union[ BaseExpressionMatchType, DoNotCareSentinel, OneOf[BaseExpressionMatchType], AllOf[BaseExpressionMatchType], ] = DoNotCare() lpar: Union[ Sequence[ Union[ LeftParenMatchType, DoNotCareSentinel, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], AtLeastN[ Union[ LeftParenMatchType, DoNotCareSentinel, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], AtMostN[ Union[ LeftParenMatchType, DoNotCareSentinel, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], ] ], DoNotCareSentinel, MatchIfTrue[Sequence[cst.LeftParen]], OneOf[ Union[ Sequence[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], AtLeastN[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], AtMostN[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.LeftParen]], ] ], AllOf[ Union[ Sequence[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], AtLeastN[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], AtMostN[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.LeftParen]], ] ], ] = DoNotCare() rpar: Union[ Sequence[ Union[ RightParenMatchType, DoNotCareSentinel, OneOf[RightParenMatchType], AllOf[RightParenMatchType], AtLeastN[ Union[ RightParenMatchType, DoNotCareSentinel, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], AtMostN[ Union[ RightParenMatchType, DoNotCareSentinel, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], ] ], DoNotCareSentinel, MatchIfTrue[Sequence[cst.RightParen]], OneOf[ Union[ Sequence[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], AtLeastN[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], AtMostN[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.RightParen]], ] ], AllOf[ Union[ Sequence[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], AtLeastN[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], AtMostN[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.RightParen]], ] ], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() @dataclass(frozen=True, eq=False, unsafe_hash=False) class While(BaseCompoundStatement, BaseStatement, BaseMatcherNode): test: Union[ BaseExpressionMatchType, DoNotCareSentinel, OneOf[BaseExpressionMatchType], AllOf[BaseExpressionMatchType], ] = DoNotCare() body: Union[ BaseSuiteMatchType, DoNotCareSentinel, OneOf[BaseSuiteMatchType], AllOf[BaseSuiteMatchType], ] = DoNotCare() orelse: Union[ Optional["Else"], MetadataMatchType, MatchIfTrue[Optional[cst.Else]], DoNotCareSentinel, OneOf[ Union[Optional["Else"], MetadataMatchType, MatchIfTrue[Optional[cst.Else]]] ], AllOf[ Union[Optional["Else"], MetadataMatchType, MatchIfTrue[Optional[cst.Else]]] ], ] = DoNotCare() leading_lines: Union[ Sequence[ Union[ EmptyLineMatchType, DoNotCareSentinel, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], AtLeastN[ Union[ EmptyLineMatchType, DoNotCareSentinel, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], ] ], AtMostN[ Union[ EmptyLineMatchType, DoNotCareSentinel, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], ] ], ] ], DoNotCareSentinel, MatchIfTrue[Sequence[cst.EmptyLine]], OneOf[ Union[ Sequence[ Union[ EmptyLineMatchType, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], AtLeastN[ Union[ EmptyLineMatchType, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], ] ], AtMostN[ Union[ EmptyLineMatchType, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.EmptyLine]], ] ], AllOf[ Union[ Sequence[ Union[ EmptyLineMatchType, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], AtLeastN[ Union[ EmptyLineMatchType, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], ] ], AtMostN[ Union[ EmptyLineMatchType, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.EmptyLine]], ] ], ] = DoNotCare() whitespace_after_while: Union[ SimpleWhitespaceMatchType, DoNotCareSentinel, OneOf[SimpleWhitespaceMatchType], AllOf[SimpleWhitespaceMatchType], ] = DoNotCare() whitespace_before_colon: Union[ SimpleWhitespaceMatchType, DoNotCareSentinel, OneOf[SimpleWhitespaceMatchType], AllOf[SimpleWhitespaceMatchType], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() WithItemMatchType = Union["WithItem", MetadataMatchType, MatchIfTrue[cst.WithItem]] @dataclass(frozen=True, eq=False, unsafe_hash=False) class With(BaseCompoundStatement, BaseStatement, BaseMatcherNode): items: Union[ Sequence[ Union[ WithItemMatchType, DoNotCareSentinel, OneOf[WithItemMatchType], AllOf[WithItemMatchType], AtLeastN[ Union[ WithItemMatchType, DoNotCareSentinel, OneOf[WithItemMatchType], AllOf[WithItemMatchType], ] ], AtMostN[ Union[ WithItemMatchType, DoNotCareSentinel, OneOf[WithItemMatchType], AllOf[WithItemMatchType], ] ], ] ], DoNotCareSentinel, MatchIfTrue[Sequence[cst.WithItem]], OneOf[ Union[ Sequence[ Union[ WithItemMatchType, OneOf[WithItemMatchType], AllOf[WithItemMatchType], AtLeastN[ Union[ WithItemMatchType, OneOf[WithItemMatchType], AllOf[WithItemMatchType], ] ], AtMostN[ Union[ WithItemMatchType, OneOf[WithItemMatchType], AllOf[WithItemMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.WithItem]], ] ], AllOf[ Union[ Sequence[ Union[ WithItemMatchType, OneOf[WithItemMatchType], AllOf[WithItemMatchType], AtLeastN[ Union[ WithItemMatchType, OneOf[WithItemMatchType], AllOf[WithItemMatchType], ] ], AtMostN[ Union[ WithItemMatchType, OneOf[WithItemMatchType], AllOf[WithItemMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.WithItem]], ] ], ] = DoNotCare() body: Union[ BaseSuiteMatchType, DoNotCareSentinel, OneOf[BaseSuiteMatchType], AllOf[BaseSuiteMatchType], ] = DoNotCare() asynchronous: Union[ Optional["Asynchronous"], MetadataMatchType, MatchIfTrue[Optional[cst.Asynchronous]], DoNotCareSentinel, OneOf[ Union[ Optional["Asynchronous"], MetadataMatchType, MatchIfTrue[Optional[cst.Asynchronous]], ] ], AllOf[ Union[ Optional["Asynchronous"], MetadataMatchType, MatchIfTrue[Optional[cst.Asynchronous]], ] ], ] = DoNotCare() leading_lines: Union[ Sequence[ Union[ EmptyLineMatchType, DoNotCareSentinel, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], AtLeastN[ Union[ EmptyLineMatchType, DoNotCareSentinel, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], ] ], AtMostN[ Union[ EmptyLineMatchType, DoNotCareSentinel, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], ] ], ] ], DoNotCareSentinel, MatchIfTrue[Sequence[cst.EmptyLine]], OneOf[ Union[ Sequence[ Union[ EmptyLineMatchType, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], AtLeastN[ Union[ EmptyLineMatchType, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], ] ], AtMostN[ Union[ EmptyLineMatchType, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.EmptyLine]], ] ], AllOf[ Union[ Sequence[ Union[ EmptyLineMatchType, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], AtLeastN[ Union[ EmptyLineMatchType, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], ] ], AtMostN[ Union[ EmptyLineMatchType, OneOf[EmptyLineMatchType], AllOf[EmptyLineMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.EmptyLine]], ] ], ] = DoNotCare() lpar: Union[ LeftParenMatchType, DoNotCareSentinel, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] = DoNotCare() rpar: Union[ RightParenMatchType, DoNotCareSentinel, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] = DoNotCare() whitespace_after_with: Union[ SimpleWhitespaceMatchType, DoNotCareSentinel, OneOf[SimpleWhitespaceMatchType], AllOf[SimpleWhitespaceMatchType], ] = DoNotCare() whitespace_before_colon: Union[ SimpleWhitespaceMatchType, DoNotCareSentinel, OneOf[SimpleWhitespaceMatchType], AllOf[SimpleWhitespaceMatchType], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() @dataclass(frozen=True, eq=False, unsafe_hash=False) class WithItem(BaseMatcherNode): item: Union[ BaseExpressionMatchType, DoNotCareSentinel, OneOf[BaseExpressionMatchType], AllOf[BaseExpressionMatchType], ] = DoNotCare() asname: Union[ Optional["AsName"], MetadataMatchType, MatchIfTrue[Optional[cst.AsName]], DoNotCareSentinel, OneOf[ Union[ Optional["AsName"], MetadataMatchType, MatchIfTrue[Optional[cst.AsName]] ] ], AllOf[ Union[ Optional["AsName"], MetadataMatchType, MatchIfTrue[Optional[cst.AsName]] ] ], ] = DoNotCare() comma: Union[ CommaMatchType, DoNotCareSentinel, OneOf[CommaMatchType], AllOf[CommaMatchType] ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() BaseExpressionOrFromOrNoneMatchType = Union[ "BaseExpression", "From", None, MetadataMatchType, MatchIfTrue[Union[cst.BaseExpression, cst.From, None]], ] @dataclass(frozen=True, eq=False, unsafe_hash=False) class Yield(BaseExpression, BaseMatcherNode): value: Union[ BaseExpressionOrFromOrNoneMatchType, DoNotCareSentinel, OneOf[BaseExpressionOrFromOrNoneMatchType], AllOf[BaseExpressionOrFromOrNoneMatchType], ] = DoNotCare() lpar: Union[ Sequence[ Union[ LeftParenMatchType, DoNotCareSentinel, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], AtLeastN[ Union[ LeftParenMatchType, DoNotCareSentinel, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], AtMostN[ Union[ LeftParenMatchType, DoNotCareSentinel, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], ] ], DoNotCareSentinel, MatchIfTrue[Sequence[cst.LeftParen]], OneOf[ Union[ Sequence[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], AtLeastN[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], AtMostN[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.LeftParen]], ] ], AllOf[ Union[ Sequence[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], AtLeastN[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], AtMostN[ Union[ LeftParenMatchType, OneOf[LeftParenMatchType], AllOf[LeftParenMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.LeftParen]], ] ], ] = DoNotCare() rpar: Union[ Sequence[ Union[ RightParenMatchType, DoNotCareSentinel, OneOf[RightParenMatchType], AllOf[RightParenMatchType], AtLeastN[ Union[ RightParenMatchType, DoNotCareSentinel, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], AtMostN[ Union[ RightParenMatchType, DoNotCareSentinel, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], ] ], DoNotCareSentinel, MatchIfTrue[Sequence[cst.RightParen]], OneOf[ Union[ Sequence[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], AtLeastN[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], AtMostN[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.RightParen]], ] ], AllOf[ Union[ Sequence[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], AtLeastN[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], AtMostN[ Union[ RightParenMatchType, OneOf[RightParenMatchType], AllOf[RightParenMatchType], ] ], ] ], MatchIfTrue[Sequence[cst.RightParen]], ] ], ] = DoNotCare() whitespace_after_yield: Union[ BaseParenthesizableWhitespaceMatchType, DoNotCareSentinel, OneOf[BaseParenthesizableWhitespaceMatchType], AllOf[BaseParenthesizableWhitespaceMatchType], ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, OneOf[MetadataMatchType], AllOf[MetadataMatchType], ] = DoNotCare() __all__ = [ "Add", "AddAssign", "AllOf", "And", "AnnAssign", "Annotation", "Arg", "AsName", "Assert", "Assign", "AssignEqual", "AssignTarget", "Asynchronous", "AtLeastN", "AtMostN", "Attribute", "AugAssign", "Await", "BaseAssignTargetExpression", "BaseAugOp", "BaseBinaryOp", "BaseBooleanOp", "BaseComp", "BaseCompOp", "BaseCompoundStatement", "BaseDelTargetExpression", "BaseDict", "BaseDictElement", "BaseElement", "BaseExpression", "BaseFormattedStringContent", "BaseList", "BaseMatcherNode", "BaseMetadataProvider", "BaseNumber", "BaseParenthesizableWhitespace", "BaseSet", "BaseSimpleComp", "BaseSlice", "BaseSmallStatement", "BaseStatement", "BaseString", "BaseSuite", "BaseUnaryOp", "BinaryOperation", "BitAnd", "BitAndAssign", "BitInvert", "BitOr", "BitOrAssign", "BitXor", "BitXorAssign", "BooleanOperation", "Break", "Call", "ClassDef", "Colon", "Comma", "Comment", "CompFor", "CompIf", "Comparison", "ComparisonTarget", "ConcatenatedString", "Continue", "Decorator", "Del", "Dict", "DictComp", "DictElement", "Divide", "DivideAssign", "DoNotCare", "DoNotCareSentinel", "DoesNotMatch", "Dot", "Element", "Ellipsis", "Else", "EmptyLine", "Equal", "ExceptHandler", "ExceptStarHandler", "Expr", "Finally", "Float", "FloorDivide", "FloorDivideAssign", "For", "FormattedString", "FormattedStringExpression", "FormattedStringText", "From", "FunctionDef", "GeneratorExp", "Global", "GreaterThan", "GreaterThanEqual", "If", "IfExp", "Imaginary", "Import", "ImportAlias", "ImportFrom", "ImportStar", "In", "IndentedBlock", "Index", "Integer", "Is", "IsNot", "Lambda", "LeftCurlyBrace", "LeftParen", "LeftShift", "LeftShiftAssign", "LeftSquareBracket", "LessThan", "LessThanEqual", "List", "ListComp", "Match", "MatchAs", "MatchCase", "MatchClass", "MatchDecoratorMismatch", "MatchIfTrue", "MatchKeywordElement", "MatchList", "MatchMapping", "MatchMappingElement", "MatchMetadata", "MatchMetadataIfTrue", "MatchOr", "MatchOrElement", "MatchPattern", "MatchRegex", "MatchSequence", "MatchSequenceElement", "MatchSingleton", "MatchStar", "MatchTuple", "MatchValue", "MatcherDecoratableTransformer", "MatcherDecoratableVisitor", "MatrixMultiply", "MatrixMultiplyAssign", "Minus", "Module", "Modulo", "ModuloAssign", "Multiply", "MultiplyAssign", "Name", "NameItem", "NamedExpr", "Newline", "Nonlocal", "Not", "NotEqual", "NotIn", "OneOf", "Or", "Param", "ParamSlash", "ParamSpec", "ParamStar", "Parameters", "ParenthesizedWhitespace", "Pass", "Plus", "Power", "PowerAssign", "Raise", "Return", "RightCurlyBrace", "RightParen", "RightShift", "RightShiftAssign", "RightSquareBracket", "SaveMatchedNode", "Semicolon", "Set", "SetComp", "SimpleStatementLine", "SimpleStatementSuite", "SimpleString", "SimpleWhitespace", "Slice", "StarredDictElement", "StarredElement", "Subscript", "SubscriptElement", "Subtract", "SubtractAssign", "TrailingWhitespace", "Try", "TryStar", "Tuple", "TypeAlias", "TypeOf", "TypeParam", "TypeParameters", "TypeVar", "TypeVarTuple", "UnaryOperation", "While", "With", "WithItem", "Yield", "ZeroOrMore", "ZeroOrOne", "call_if_inside", "call_if_not_inside", "extract", "extractall", "findall", "leave", "matches", "replace", "visit", ] LibCST-1.2.0/libcst/matchers/_decorators.py000066400000000000000000000120121456464173300205470ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Callable, TypeVar from libcst.matchers._matcher_base import BaseMatcherNode _CSTVisitFuncT = TypeVar("_CSTVisitFuncT") VISIT_POSITIVE_MATCHER_ATTR: str = "_call_if_inside_matcher" VISIT_NEGATIVE_MATCHER_ATTR: str = "_call_if_not_inside_matcher" CONSTRUCTED_VISIT_MATCHER_ATTR: str = "_visit_matcher" CONSTRUCTED_LEAVE_MATCHER_ATTR: str = "_leave_matcher" def call_if_inside( matcher: BaseMatcherNode, # pyre-fixme[34]: `Variable[_CSTVisitFuncT]` isn't present in the function's parameters. ) -> Callable[[_CSTVisitFuncT], _CSTVisitFuncT]: """ A decorator for visit and leave methods inside a :class:`MatcherDecoratableTransformer` or a :class:`MatcherDecoratableVisitor`. A method that is decorated with this decorator will only be called if it or one of its parents matches the supplied matcher. Use this to selectively gate visit and leave methods to be called only when inside of another relevant node. Note that this works for both node and attribute methods, so you can decorate a ``visit_`` or a ``visit__`` method. """ def inner(original: _CSTVisitFuncT) -> _CSTVisitFuncT: setattr( original, VISIT_POSITIVE_MATCHER_ATTR, [*getattr(original, VISIT_POSITIVE_MATCHER_ATTR, []), matcher], ) return original return inner def call_if_not_inside( matcher: BaseMatcherNode, # pyre-fixme[34]: `Variable[_CSTVisitFuncT]` isn't present in the function's parameters. ) -> Callable[[_CSTVisitFuncT], _CSTVisitFuncT]: """ A decorator for visit and leave methods inside a :class:`MatcherDecoratableTransformer` or a :class:`MatcherDecoratableVisitor`. A method that is decorated with this decorator will only be called if it or one of its parents does not match the supplied matcher. Use this to selectively gate visit and leave methods to be called only when outside of another relevant node. Note that this works for both node and attribute methods, so you can decorate a ``visit_`` or a ``visit__`` method. """ def inner(original: _CSTVisitFuncT) -> _CSTVisitFuncT: setattr( original, VISIT_NEGATIVE_MATCHER_ATTR, [*getattr(original, VISIT_NEGATIVE_MATCHER_ATTR, []), matcher], ) return original return inner # pyre-fixme[34]: `Variable[_CSTVisitFuncT]` isn't present in the function's parameters. def visit(matcher: BaseMatcherNode) -> Callable[[_CSTVisitFuncT], _CSTVisitFuncT]: """ A decorator that allows a method inside a :class:`MatcherDecoratableTransformer` or a :class:`MatcherDecoratableVisitor` visitor to be called when visiting a node that matches the provided matcher. Note that you can use this in combination with :func:`call_if_inside` and :func:`call_if_not_inside` decorators. Unlike explicit ``visit_`` and ``leave_`` methods, functions decorated with this decorator cannot stop child traversal by returning ``False``. Decorated visit functions should always have a return annotation of ``None``. There is no restriction on the number of visit decorators allowed on a method. There is also no restriction on the number of methods that may be decorated with the same matcher. When multiple visit decorators are found on the same method, they act as a simple or, and the method will be called when any one of the contained matches is ``True``. """ def inner(original: _CSTVisitFuncT) -> _CSTVisitFuncT: setattr( original, CONSTRUCTED_VISIT_MATCHER_ATTR, [*getattr(original, CONSTRUCTED_VISIT_MATCHER_ATTR, []), matcher], ) return original return inner # pyre-fixme[34]: `Variable[_CSTVisitFuncT]` isn't present in the function's parameters. def leave(matcher: BaseMatcherNode) -> Callable[[_CSTVisitFuncT], _CSTVisitFuncT]: """ A decorator that allows a method inside a :class:`MatcherDecoratableTransformer` or a :class:`MatcherDecoratableVisitor` visitor to be called when leaving a node that matches the provided matcher. Note that you can use this in combination with :func:`call_if_inside` and :func:`call_if_not_inside` decorators. There is no restriction on the number of leave decorators allowed on a method. There is also no restriction on the number of methods that may be decorated with the same matcher. When multiple leave decorators are found on the same method, they act as a simple or, and the method will be called when any one of the contained matches is ``True``. """ def inner(original: _CSTVisitFuncT) -> _CSTVisitFuncT: setattr( original, CONSTRUCTED_LEAVE_MATCHER_ATTR, [*getattr(original, CONSTRUCTED_LEAVE_MATCHER_ATTR, []), matcher], ) return original return inner LibCST-1.2.0/libcst/matchers/_matcher_base.py000066400000000000000000002323341456464173300210320ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import collections.abc import inspect import re from abc import ABCMeta from dataclasses import dataclass, fields from enum import auto, Enum from typing import ( Callable, cast, Dict, Generic, Iterator, List, Mapping, NoReturn, Optional, Pattern, Sequence, Tuple, Type, TypeVar, Union, ) import libcst import libcst.metadata as meta from libcst import FlattenSentinel, MaybeSentinel, RemovalSentinel from libcst._metadata_dependent import LazyValue class DoNotCareSentinel(Enum): """ A sentinel that is used in matcher classes to indicate that a caller does not care what this value is. We recommend that you do not use this directly, and instead use the :func:`DoNotCare` helper. You do not need to use this for concrete matcher attributes since :func:`DoNotCare` is already the default. """ DEFAULT = auto() def __repr__(self) -> str: return "DoNotCare()" _MatcherT = TypeVar("_MatcherT", covariant=True) _MatchIfTrueT = TypeVar("_MatchIfTrueT", covariant=True) _BaseMatcherNodeSelfT = TypeVar("_BaseMatcherNodeSelfT", bound="BaseMatcherNode") _OtherNodeT = TypeVar("_OtherNodeT") _MetadataValueT = TypeVar("_MetadataValueT") _MatcherTypeT = TypeVar("_MatcherTypeT", bound=Type["BaseMatcherNode"]) _OtherNodeMatcherTypeT = TypeVar( "_OtherNodeMatcherTypeT", bound=Type["BaseMatcherNode"] ) _METADATA_MISSING_SENTINEL = object() class AbstractBaseMatcherNodeMeta(ABCMeta): """ Metaclass that all matcher nodes uses. Allows chaining 2 node type together with an bitwise-or operator to produce an :class:`TypeOf` matcher. """ # pyre-fixme[15]: `__or__` overrides method defined in `type` inconsistently. def __or__(self, node: Type["BaseMatcherNode"]) -> "TypeOf[Type[BaseMatcherNode]]": return TypeOf(self, node) class BaseMatcherNode: """ Base class that all concrete matchers subclass from. :class:`OneOf` and :class:`AllOf` also subclass from this in order to allow them to be used in any place that a concrete matcher is allowed. This means that, for example, you can call :func:`matches` with a concrete matcher, or a :class:`OneOf` with several concrete matchers as options. """ # pyre-fixme[15]: `__or__` overrides method defined in `type` inconsistently. def __or__( self: _BaseMatcherNodeSelfT, other: _OtherNodeT ) -> "OneOf[Union[_BaseMatcherNodeSelfT, _OtherNodeT]]": return OneOf(self, other) def __and__( self: _BaseMatcherNodeSelfT, other: _OtherNodeT ) -> "AllOf[Union[_BaseMatcherNodeSelfT, _OtherNodeT]]": return AllOf(self, other) def __invert__(self: _BaseMatcherNodeSelfT) -> "_BaseMatcherNodeSelfT": return cast(_BaseMatcherNodeSelfT, _InverseOf(self)) def DoNotCare() -> DoNotCareSentinel: """ Used when you want to match exactly one node, but you do not care what node it is. Useful inside sequences such as a :class:`libcst.matchers.Call`'s args attribte. You do not need to use this for concrete matcher attributes since :func:`DoNotCare` is already the default. For example, the following matcher would match against any function calls with three arguments, regardless of the arguments themselves and regardless of the function name that we were calling:: m.Call(args=[m.DoNotCare(), m.DoNotCare(), m.DoNotCare()]) """ return DoNotCareSentinel.DEFAULT class TypeOf(Generic[_MatcherTypeT], BaseMatcherNode): """ Matcher that matches any one of the given types. Useful when you want to work with trees where a common property might belong to more than a single type. For example, if you want either a binary operation or a boolean operation where the left side has a name ``foo``:: m.TypeOf(m.BinaryOperation, m.BooleanOperation)(left = m.Name("foo")) Or you could use the shorthand, like:: (m.BinaryOperation | m.BooleanOperation)(left = m.Name("foo")) Also :class:`TypeOf` matchers can be used with initalizing in the default state of other node matchers (without passing any extra patterns):: m.Name | m.SimpleString The will be equal to:: m.OneOf(m.Name(), m.SimpleString()) """ def __init__(self, *options: Union[_MatcherTypeT, "TypeOf[_MatcherTypeT]"]) -> None: actual_options: List[_MatcherTypeT] = [] for option in options: if isinstance(option, TypeOf): if option.initalized: raise Exception( "Cannot chain an uninitalized TypeOf with an initalized one" ) actual_options.extend(option._raw_options) else: actual_options.append(option) self._initalized = False self._call_items: Tuple[Tuple[object, ...], Dict[str, object]] = ((), {}) self._raw_options: Tuple[_MatcherTypeT, ...] = tuple(actual_options) @property def initalized(self) -> bool: return self._initalized @property def options(self) -> Iterator[BaseMatcherNode]: for option in self._raw_options: args, kwargs = self._call_items matcher_pattern = option(*args, **kwargs) yield matcher_pattern def __call__(self, *args: object, **kwargs: object) -> BaseMatcherNode: self._initalized = True self._call_items = (args, kwargs) return self # pyre-fixme[15]: `__or__` overrides method defined in `type` inconsistently. def __or__( self, other: _OtherNodeMatcherTypeT ) -> "TypeOf[Union[_MatcherTypeT, _OtherNodeMatcherTypeT]]": return TypeOf[Union[_MatcherTypeT, _OtherNodeMatcherTypeT]](self, other) # pyre-fixme[14]: `__and__` overrides method defined in `BaseMatcherNode` # inconsistently. def __and__(self, other: _OtherNodeMatcherTypeT) -> NoReturn: left, right = type(self).__name__, other.__name__ raise TypeError( f"TypeError: unsupported operand type(s) for &: {left!r} and {right!r}" ) def __invert__(self) -> "AllOf[BaseMatcherNode]": return AllOf(*map(DoesNotMatch, self.options)) def __repr__(self) -> str: types = ", ".join(repr(option) for option in self._raw_options) return f"TypeOf({types}, initalized = {self.initalized})" class OneOf(Generic[_MatcherT], BaseMatcherNode): """ Matcher that matches any one of its options. Useful when you want to match against one of several options for a single node. You can also construct a :class:`OneOf` matcher by using Python's bitwise or operator with concrete matcher classes. For example, you could match against ``True``/``False`` like:: m.OneOf(m.Name("True"), m.Name("False")) Or you could use the shorthand, like:: m.Name("True") | m.Name("False") """ def __init__(self, *options: Union[_MatcherT, "OneOf[_MatcherT]"]) -> None: actual_options: List[_MatcherT] = [] for option in options: if isinstance(option, AllOf): raise Exception("Cannot use AllOf and OneOf in combination!") elif isinstance(option, (OneOf, TypeOf)): actual_options.extend(option.options) else: actual_options.append(option) self._options: Sequence[_MatcherT] = tuple(actual_options) @property def options(self) -> Sequence[_MatcherT]: """ The normalized list of options that we can choose from to satisfy a :class:`OneOf` matcher. If any of these matchers are true, the :class:`OneOf` matcher will also be considered a match. """ return self._options # pyre-fixme[15]: `__or__` overrides method defined in `type` inconsistently. def __or__(self, other: _OtherNodeT) -> "OneOf[Union[_MatcherT, _OtherNodeT]]": return OneOf(self, other) def __and__(self, other: _OtherNodeT) -> NoReturn: raise Exception("Cannot use AllOf and OneOf in combination!") def __invert__(self) -> "AllOf[_MatcherT]": # Invert using De Morgan's Law so we don't have to complicate types. return AllOf(*[DoesNotMatch(m) for m in self._options]) def __repr__(self) -> str: return f"OneOf({', '.join([repr(o) for o in self._options])})" class AllOf(Generic[_MatcherT], BaseMatcherNode): """ Matcher that matches all of its options. Useful when you want to match against a concrete matcher and a :class:`MatchIfTrue` at the same time. Also useful when you want to match against a concrete matcher and a :func:`DoesNotMatch` at the same time. You can also construct a :class:`AllOf` matcher by using Python's bitwise and operator with concrete matcher classes. For example, you could match against ``True`` in a roundabout way like:: m.AllOf(m.Name(), m.Name("True")) Or you could use the shorthand, like:: m.Name() & m.Name("True") Similar to :class:`OneOf`, this can be used in place of any concrete matcher. Real-world cases where :class:`AllOf` is useful are hard to come by but they are still provided for the limited edge cases in which they make sense. In the example above, we are redundantly matching against any LibCST :class:`~libcst.Name` node as well as LibCST :class:`~libcst.Name` nodes that have the ``value`` of ``True``. We could drop the first option entirely and get the same result. Often, if you are using a :class:`AllOf`, you can refactor your code to be simpler. For example, the following matches any function call to ``foo``, and any function call which takes zero arguments:: m.AllOf(m.Call(func=m.Name("foo")), m.Call(args=())) This could be refactored into the following equivalent concrete matcher:: m.Call(func=m.Name("foo"), args=()) """ def __init__(self, *options: Union[_MatcherT, "AllOf[_MatcherT]"]) -> None: actual_options: List[_MatcherT] = [] for option in options: if isinstance(option, OneOf): raise Exception("Cannot use AllOf and OneOf in combination!") elif isinstance(option, TypeOf): raise Exception("Cannot use AllOf and TypeOf in combination!") elif isinstance(option, AllOf): actual_options.extend(option.options) else: actual_options.append(option) self._options: Sequence[_MatcherT] = tuple(actual_options) @property def options(self) -> Sequence[_MatcherT]: """ The normalized list of options that we can choose from to satisfy a :class:`AllOf` matcher. If all of these matchers are true, the :class:`AllOf` matcher will also be considered a match. """ return self._options # pyre-fixme[15]: `__or__` overrides method defined in `type` inconsistently. def __or__(self, other: _OtherNodeT) -> NoReturn: raise Exception("Cannot use AllOf and OneOf in combination!") def __and__(self, other: _OtherNodeT) -> "AllOf[Union[_MatcherT, _OtherNodeT]]": return AllOf(self, other) def __invert__(self) -> "OneOf[_MatcherT]": # Invert using De Morgan's Law so we don't have to complicate types. return OneOf(*[DoesNotMatch(m) for m in self._options]) def __repr__(self) -> str: return f"AllOf({', '.join([repr(o) for o in self._options])})" class _InverseOf(Generic[_MatcherT]): """ Matcher that inverts the match result of its child. You can also construct a :class:`_InverseOf` matcher by using Python's bitwise invert operator with concrete matcher classes or any special matcher. Note that you should refrain from constructing a :class:`_InverseOf` directly, and should instead use the :func:`DoesNotMatch` helper function. For example, the following matches against any identifier that isn't ``True``/``False``:: m.DoesNotMatch(m.OneOf(m.Name("True"), m.Name("False"))) Or you could use the shorthand, like: ~(m.Name("True") | m.Name("False")) """ def __init__(self, matcher: _MatcherT) -> None: self._matcher: _MatcherT = matcher @property def matcher(self) -> _MatcherT: """ The matcher that we will evaluate and invert. If this matcher is true, then :class:`_InverseOf` will be considered not a match, and vice-versa. """ return self._matcher # pyre-fixme[15]: `__or__` overrides method defined in `type` inconsistently. def __or__(self, other: _OtherNodeT) -> "OneOf[Union[_MatcherT, _OtherNodeT]]": # Without a cast, pyre thinks that the below OneOf is type OneOf[object] # even though it has the types passed into it. return cast(OneOf[Union[_MatcherT, _OtherNodeT]], OneOf(self, other)) def __and__(self, other: _OtherNodeT) -> "AllOf[Union[_MatcherT, _OtherNodeT]]": # Without a cast, pyre thinks that the below AllOf is type AllOf[object] # even though it has the types passed into it. return cast(AllOf[Union[_MatcherT, _OtherNodeT]], AllOf(self, other)) def __getattr__(self, key: str) -> object: # We lie about types to make _InverseOf appear transparent. So, its conceivable # that somebody might try to dereference an attribute on the _MatcherT wrapped # node and become surprised that it doesn't work. return getattr(self._matcher, key) def __invert__(self) -> _MatcherT: return self._matcher def __repr__(self) -> str: return f"DoesNotMatch({repr(self._matcher)})" class _ExtractMatchingNode(Generic[_MatcherT]): """ Transparent pass-through matcher that captures the node which matches its children, making it available to the caller of :func:`extract` or :func:`extractall`. Note that you should refrain from constructing a :class:`_ExtractMatchingNode` directly, and should instead use the :func:`SaveMatchedNode` helper function. For example, the following will match against any binary operation whose left and right operands are not integers, saving those expressions for later inspection. If used inside :func:`extract` or :func:`extractall`, the resulting dictionary will contain the keys ``left_operand`` and ``right_operand``. m.BinaryOperation( left=m.SaveMatchedNode( m.DoesNotMatch(m.Integer()), "left_operand", ), right=m.SaveMatchedNode( m.DoesNotMatch(m.Integer()), "right_operand", ), ) """ def __init__(self, matcher: _MatcherT, name: str) -> None: self._matcher: _MatcherT = matcher self._name: str = name @property def matcher(self) -> _MatcherT: """ The matcher that we will evaluate and capture matching LibCST nodes for. If this matcher is true, then :class:`_ExtractMatchingNode` will be considered a match and will save the node which matched. """ return self._matcher @property def name(self) -> str: """ The name we will call our captured LibCST node inside the resulting dictionary returned by :func:`extract` or :func:`extractall`. """ return self._name # pyre-fixme[15]: `__or__` overrides method defined in `type` inconsistently. def __or__(self, other: _OtherNodeT) -> "OneOf[Union[_MatcherT, _OtherNodeT]]": # Without a cast, pyre thinks that the below OneOf is type OneOf[object] # even though it has the types passed into it. return cast(OneOf[Union[_MatcherT, _OtherNodeT]], OneOf(self, other)) def __and__(self, other: _OtherNodeT) -> "AllOf[Union[_MatcherT, _OtherNodeT]]": # This doesn't make sense. If we have multiple SaveMatchedNode captures # that are captured with an and, either all of them will be assigned the # same node, or none of them. It makes more sense to move the SaveMatchedNode # up to wrap the AllOf. raise Exception( ( "Cannot use AllOf with SavedMatchedNode children! Instead, you should " + "use SaveMatchedNode(AllOf(options...))." ) ) def __getattr__(self, key: str) -> object: # We lie about types to make _ExtractMatchingNode appear transparent. So, # its conceivable that somebody might try to dereference an attribute on # the _MatcherT wrapped node and become surprised that it doesn't work. return getattr(self._matcher, key) def __invert__(self) -> "_MatcherT": # This doesn't make sense. We don't want to capture a node only if it # doesn't match, since this will never capture anything. raise Exception( ( "Cannot invert a SaveMatchedNode. Instead you should wrap SaveMatchedNode " + "around your inversion itself" ) ) def __repr__(self) -> str: return ( f"SaveMatchedNode(matcher={repr(self._matcher)}, name={repr(self._name)})" ) class MatchIfTrue(Generic[_MatchIfTrueT]): """ Matcher that matches if its child callable returns ``True``. The child callable should take one argument which is the attribute on the LibCST node we are trying to match against. This is useful if you want to do complex logic to determine if an attribute should match or not. One example of this is the :func:`MatchRegex` matcher build on top of :class:`MatchIfTrue` which takes a regular expression and matches any string attribute where a regex match is found. For example, to match on any identifier spelled with the letter ``e``:: m.Name(value=m.MatchIfTrue(lambda value: "e" in value)) This can be used in place of any concrete matcher as long as it is not the root matcher. Calling :func:`matches` directly on a :class:`MatchIfTrue` is redundant since you can just call the child callable directly with the node you are passing to :func:`matches`. """ _func: Callable[[_MatchIfTrueT], bool] def __init__(self, func: Callable[[_MatchIfTrueT], bool]) -> None: self._func = func @property def func(self) -> Callable[[_MatchIfTrueT], bool]: """ The function that we will call with a LibCST node in order to determine if we match. If the function returns ``True`` then we consider ourselves to be a match. """ return self._func # pyre-fixme[15]: `__or__` overrides method defined in `type` inconsistently. def __or__( self, other: _OtherNodeT ) -> "OneOf[Union[MatchIfTrue[_MatchIfTrueT], _OtherNodeT]]": return OneOf(self, other) def __and__( self, other: _OtherNodeT ) -> "AllOf[Union[MatchIfTrue[_MatchIfTrueT], _OtherNodeT]]": return AllOf(self, other) def __invert__(self) -> "MatchIfTrue[_MatchIfTrueT]": # Construct a wrapped version of MatchIfTrue for typing simplicity. # Without the cast, pyre doesn't seem to think the lambda is valid. return MatchIfTrue(lambda val: not self._func(val)) def __repr__(self) -> str: return f"MatchIfTrue({repr(self._func)})" def MatchRegex(regex: Union[str, Pattern[str]]) -> MatchIfTrue[str]: """ Used as a convenience wrapper to :class:`MatchIfTrue` which allows for matching a string attribute against a regex. ``regex`` can be any regular expression string or a compiled ``Pattern``. This uses Python's re module under the hood and is compatible with syntax documented on `docs.python.org `_. For example, to match against any identifier that is at least one character long and only contains alphabetical characters:: m.Name(value=m.MatchRegex(r'[A-Za-z]+')) This can be used in place of any string literal when constructing a concrete matcher. """ def _match_func(value: object) -> bool: if isinstance(value, str): return bool(re.fullmatch(regex, value)) else: return False return MatchIfTrue(_match_func) class _BaseMetadataMatcher: """ Class that's only around for typing purposes. """ pass class MatchMetadata(_BaseMetadataMatcher): """ Matcher that looks up the metadata on the current node using the provided metadata provider and compares the value on the node against the value provided to :class:`MatchMetadata`. If the metadata provider is unresolved, a :class:`LookupError` exeption will be raised and ask you to provide a :class:`~libcst.metadata.MetadataWrapper`. If the metadata value does not exist for a particular node, :class:`MatchMetadata` will be considered not a match. For example, to match against any function call which has one parameter which is used in a load expression context:: m.Call( args=[ m.Arg( m.MatchMetadata( meta.ExpressionContextProvider, meta.ExpressionContext.LOAD, ) ) ] ) To match against any :class:`~libcst.Name` node for the identifier ``foo`` which is the target of an assignment:: m.Name( value="foo", metadata=m.MatchMetadata( meta.ExpressionContextProvider, meta.ExpressionContext.STORE, ) ) This can be used in place of any concrete matcher as long as it is not the root matcher. Calling :func:`matches` directly on a :class:`MatchMetadata` is redundant since you can just check the metadata on the root node that you are passing to :func:`matches`. """ def __init__( self, key: Type[meta.BaseMetadataProvider[_MetadataValueT]], value: _MetadataValueT, ) -> None: self._key: Type[meta.BaseMetadataProvider[_MetadataValueT]] = key self._value: _MetadataValueT = value @property def key(self) -> meta.ProviderT: """ The metadata provider that we will use to fetch values when identifying whether a node matches this matcher. We compare the value returned from the metadata provider to the value provided in ``value`` when determining a match. """ return self._key @property def value(self) -> object: """ The value that we will compare against the return from the metadata provider for each node when determining a match. """ return self._value # pyre-fixme[15]: `__or__` overrides method defined in `type` inconsistently. def __or__(self, other: _OtherNodeT) -> "OneOf[Union[MatchMetadata, _OtherNodeT]]": return OneOf(self, other) def __and__(self, other: _OtherNodeT) -> "AllOf[Union[MatchMetadata, _OtherNodeT]]": return AllOf(self, other) def __invert__(self) -> "MatchMetadata": # We intentionally lie here, for the same reason given in the documentation # for DoesNotMatch. return cast(MatchMetadata, _InverseOf(self)) def __repr__(self) -> str: return f"MatchMetadata(key={repr(self._key)}, value={repr(self._value)})" class MatchMetadataIfTrue(_BaseMetadataMatcher): """ Matcher that looks up the metadata on the current node using the provided metadata provider and passes it to a callable which can inspect the metadata further, returning ``True`` if the matcher should be considered a match. If the metadata provider is unresolved, a :class:`LookupError` exeption will be raised and ask you to provide a :class:`~libcst.metadata.MetadataWrapper`. If the metadata value does not exist for a particular node, :class:`MatchMetadataIfTrue` will be considered not a match. For example, to match against any arg whose qualified name might be ``typing.Dict``:: m.Call( args=[ m.Arg( m.MatchMetadataIfTrue( meta.QualifiedNameProvider, lambda qualnames: any(n.name == "typing.Dict" for n in qualnames) ) ) ] ) To match against any :class:`~libcst.Name` node for the identifier ``foo`` as long as that identifier is found at the beginning of an unindented line:: m.Name( value="foo", metadata=m.MatchMetadataIfTrue( meta.PositionProvider, lambda position: position.start.column == 0, ) ) This can be used in place of any concrete matcher as long as it is not the root matcher. Calling :func:`matches` directly on a :class:`MatchMetadataIfTrue` is redundant since you can just check the metadata on the root node that you are passing to :func:`matches`. """ def __init__( self, key: Type[meta.BaseMetadataProvider[_MetadataValueT]], func: Callable[[_MetadataValueT], bool], ) -> None: self._key: Type[meta.BaseMetadataProvider[_MetadataValueT]] = key self._func: Callable[[_MetadataValueT], bool] = func @property def key(self) -> meta.ProviderT: """ The metadata provider that we will use to fetch values when identifying whether a node matches this matcher. We pass the value returned from the metadata provider to the callable given to us in ``func``. """ return self._key @property def func(self) -> Callable[[object], bool]: """ The function that we will call with a value retrieved from the metadata provider provided in ``key``. If the function returns ``True`` then we consider ourselves to be a match. """ return self._func # pyre-fixme[15]: `__or__` overrides method defined in `type` inconsistently. def __or__( self, other: _OtherNodeT ) -> "OneOf[Union[MatchMetadataIfTrue, _OtherNodeT]]": return OneOf(self, other) def __and__( self, other: _OtherNodeT ) -> "AllOf[Union[MatchMetadataIfTrue, _OtherNodeT]]": return AllOf(self, other) def __invert__(self) -> "MatchMetadataIfTrue": # Construct a wrapped version of MatchMetadataIfTrue for typing simplicity. return MatchMetadataIfTrue(self._key, lambda val: not self._func(val)) def __repr__(self) -> str: return f"MatchMetadataIfTrue(key={repr(self._key)}, func={repr(self._func)})" class _BaseWildcardNode: """ A typing-only class for internal helpers in this module to be able to specify that they take a wildcard node type. """ pass class AtLeastN(Generic[_MatcherT], _BaseWildcardNode): """ Matcher that matches ``n`` or more LibCST nodes in a row in a sequence. :class:`AtLeastN` defaults to matching against the :func:`DoNotCare` matcher, so if you do not specify a matcher as a child, :class:`AtLeastN` will match only by count. If you do specify a matcher as a child, :class:`AtLeastN` will instead make sure that each LibCST node matches the matcher supplied. For example, this will match all function calls with at least 3 arguments:: m.Call(args=[m.AtLeastN(n=3)]) This will match all function calls with 3 or more integer arguments:: m.Call(args=[m.AtLeastN(n=3, matcher=m.Arg(m.Integer()))]) You can combine sequence matchers with concrete matchers and special matchers and it will behave as you expect. For example, this will match all function calls that have 2 or more integer arguments in a row, followed by any arbitrary argument:: m.Call(args=[m.AtLeastN(n=2, matcher=m.Arg(m.Integer())), m.DoNotCare()]) And finally, this will match all function calls that have at least 5 arguments, the final one being an integer:: m.Call(args=[m.AtLeastN(n=4), m.Arg(m.Integer())]) """ def __init__( self, matcher: Union[_MatcherT, DoNotCareSentinel] = DoNotCareSentinel.DEFAULT, *, n: int, ) -> None: if n < 0: raise Exception(f"{self.__class__.__name__} n attribute must be positive") self._n: int = n self._matcher: Union[_MatcherT, DoNotCareSentinel] = matcher @property def n(self) -> int: """ The number of nodes in a row that must match :attr:`AtLeastN.matcher` for this matcher to be considered a match. If there are less than ``n`` matches, this matcher will not be considered a match. If there are equal to or more than ``n`` matches, this matcher will be considered a match. """ return self._n @property def matcher(self) -> Union[_MatcherT, DoNotCareSentinel]: """ The matcher which each node in a sequence needs to match. """ return self._matcher # pyre-fixme[15]: `__or__` overrides method defined in `type` inconsistently. def __or__(self, other: object) -> NoReturn: raise Exception("AtLeastN cannot be used in a OneOf matcher") def __and__(self, other: object) -> NoReturn: raise Exception("AtLeastN cannot be used in an AllOf matcher") def __invert__(self) -> NoReturn: raise Exception("Cannot invert an AtLeastN matcher!") def __repr__(self) -> str: if self._n == 0: return f"ZeroOrMore({repr(self._matcher)})" else: return f"AtLeastN({repr(self._matcher)}, n={self._n})" def ZeroOrMore( matcher: Union[_MatcherT, DoNotCareSentinel] = DoNotCareSentinel.DEFAULT ) -> AtLeastN[Union[_MatcherT, DoNotCareSentinel]]: """ Used as a convenience wrapper to :class:`AtLeastN` when ``n`` is equal to ``0``. Use this when you want to match against any number of nodes in a sequence. For example, this will match any function call with zero or more arguments, as long as all of the arguments are integers:: m.Call(args=[m.ZeroOrMore(m.Arg(m.Integer()))]) This will match any function call where the first argument is an integer and it doesn't matter what the rest of the arguments are:: m.Call(args=[m.Arg(m.Integer()), m.ZeroOrMore()]) You will often want to use :class:`ZeroOrMore` on both sides of a concrete matcher in order to match against sequences that contain a particular node in an arbitrary location. For example, the following will match any function call that takes in at least one string argument anywhere:: m.Call(args=[m.ZeroOrMore(), m.Arg(m.SimpleString()), m.ZeroOrMore()]) """ return cast(AtLeastN[Union[_MatcherT, DoNotCareSentinel]], AtLeastN(matcher, n=0)) class AtMostN(Generic[_MatcherT], _BaseWildcardNode): """ Matcher that matches ``n`` or fewer LibCST nodes in a row in a sequence. :class:`AtMostN` defaults to matching against the :func:`DoNotCare` matcher, so if you do not specify a matcher as a child, :class:`AtMostN` will match only by count. If you do specify a matcher as a child, :class:`AtMostN` will instead make sure that each LibCST node matches the matcher supplied. For example, this will match all function calls with 3 or fewer arguments:: m.Call(args=[m.AtMostN(n=3)]) This will match all function calls with 0, 1 or 2 string arguments:: m.Call(args=[m.AtMostN(n=2, matcher=m.Arg(m.SimpleString()))]) You can combine sequence matchers with concrete matchers and special matchers and it will behave as you expect. For example, this will match all function calls that have 0, 1 or 2 string arguments in a row, followed by an arbitrary argument:: m.Call(args=[m.AtMostN(n=2, matcher=m.Arg(m.SimpleString())), m.DoNotCare()]) And finally, this will match all function calls that have at least 2 arguments, the final one being a string:: m.Call(args=[m.AtMostN(n=2), m.Arg(m.SimpleString())]) """ def __init__( self, matcher: Union[_MatcherT, DoNotCareSentinel] = DoNotCareSentinel.DEFAULT, *, n: int, ) -> None: if n < 0: raise Exception(f"{self.__class__.__name__} n attribute must be positive") self._n: int = n self._matcher: Union[_MatcherT, DoNotCareSentinel] = matcher @property def n(self) -> int: """ The number of nodes in a row that must match :attr:`AtLeastN.matcher` for this matcher to be considered a match. If there are less than or equal to ``n`` matches, then this matcher will be considered a match. Any more than ``n`` matches in a row and this matcher will stop matching and be considered not a match. """ return self._n @property def matcher(self) -> Union[_MatcherT, DoNotCareSentinel]: """ The matcher which each node in a sequence needs to match. """ return self._matcher # pyre-fixme[15]: `__or__` overrides method defined in `type` inconsistently. def __or__(self, other: object) -> NoReturn: raise Exception("AtMostN cannot be used in a OneOf matcher") def __and__(self, other: object) -> NoReturn: raise Exception("AtMostN cannot be used in an AllOf matcher") def __invert__(self) -> NoReturn: raise Exception("Cannot invert an AtMostN matcher!") def __repr__(self) -> str: if self._n == 1: return f"ZeroOrOne({repr(self._matcher)})" else: return f"AtMostN({repr(self._matcher)}, n={self._n})" def ZeroOrOne( matcher: Union[_MatcherT, DoNotCareSentinel] = DoNotCareSentinel.DEFAULT ) -> AtMostN[Union[_MatcherT, DoNotCareSentinel]]: """ Used as a convenience wrapper to :class:`AtMostN` when ``n`` is equal to ``1``. This is effectively a maybe clause. For example, this will match any function call with zero or one integer argument:: m.Call(args=[m.ZeroOrOne(m.Arg(m.Integer()))]) This will match any function call that has two or three arguments, and the first and last arguments are strings:: m.Call(args=[m.Arg(m.SimpleString()), m.ZeroOrOne(), m.Arg(m.SimpleString())]) """ return cast(AtMostN[Union[_MatcherT, DoNotCareSentinel]], AtMostN(matcher, n=1)) def DoesNotMatch(obj: _OtherNodeT) -> _OtherNodeT: """ Matcher helper that inverts the match result of its child. You can also invert a matcher by using Python's bitwise invert operator on concrete matchers or any special matcher. For example, the following matches against any identifier that isn't ``True``/``False``:: m.DoesNotMatch(m.OneOf(m.Name("True"), m.Name("False"))) Or you could use the shorthand, like:: ~(m.Name("True") | m.Name("False")) This can be used in place of any concrete matcher as long as it is not the root matcher. Calling :func:`matches` directly on a :func:`DoesNotMatch` is redundant since you can invert the return of :func:`matches` using a bitwise not. """ # This type is a complete, dirty lie, but there's no way to recursively apply # a parameter to each type inside a Union that may be in a _OtherNodeT. # However, given the way _InverseOf works (it will unwrap itself if # inverted again), and the way we apply De Morgan's law for OneOf and AllOf, # this lie ends up getting us correct typing. Anywhere a node is valid, using # DoesNotMatch(node) is also valid. # # ~MatchIfTrue is still MatchIfTrue # ~MatchMetadataIfTrue is still MatchMetadataIfTrue # ~OneOf[x] is AllOf[~x] # ~AllOf[x] is OneOf[~x] # ~~x is x # # So, under all circumstances, since OneOf/AllOf are both allowed in every # instance, and given that inverting MatchIfTrue is still MatchIfTrue, # and inverting an inverted value returns us the original, its clear that # there are no operations we can possibly do that bring us outside of the # types specified in the concrete matchers as long as we lie that DoesNotMatch # returns the value passed in. if isinstance( obj, ( BaseMatcherNode, MatchIfTrue, _BaseMetadataMatcher, _InverseOf, _ExtractMatchingNode, ), ): # We can use the overridden __invert__ in this case. Pyre doesn't think # we can though, and casting doesn't fix the issue. inverse = ~obj else: # We must wrap in a _InverseOf. inverse = _InverseOf(obj) return cast(_OtherNodeT, inverse) def SaveMatchedNode(matcher: _OtherNodeT, name: str) -> _OtherNodeT: """ Matcher helper that captures the matched node that matched against a matcher class, making it available in the dictionary returned by :func:`extract` or :func:`extractall`. For example, the following will match against any binary operation whose left and right operands are not integers, saving those expressions for later inspection. If used inside :func:`extract` or :func:`extractall`, the resulting dictionary will contain the keys ``left_operand`` and ``right_operand``:: m.BinaryOperation( left=m.SaveMatchedNode( m.DoesNotMatch(m.Integer()), "left_operand", ), right=m.SaveMatchedNode( m.DoesNotMatch(m.Integer()), "right_operand", ), ) This can be used in place of any concrete matcher as long as it is not the root matcher. Calling :func:`extract` directly on a :func:`SaveMatchedNode` is redundant since you already have the reference to the node itself. """ return cast(_OtherNodeT, _ExtractMatchingNode(matcher, name)) def _matches_zero_nodes( matcher: Union[ BaseMatcherNode, _BaseWildcardNode, MatchIfTrue[libcst.CSTNode], _BaseMetadataMatcher, DoNotCareSentinel, ] ) -> bool: if isinstance(matcher, AtLeastN) and matcher.n == 0: return True if isinstance(matcher, AtMostN): return True if isinstance(matcher, _ExtractMatchingNode): return _matches_zero_nodes(matcher.matcher) return False @dataclass(frozen=True) class _SequenceMatchesResult: sequence_capture: Optional[ Dict[str, Union[libcst.CSTNode, Sequence[libcst.CSTNode]]] ] matched_nodes: Optional[ Union[libcst.CSTNode, MaybeSentinel, Sequence[libcst.CSTNode]] ] def _sequence_matches( # noqa: C901 nodes: Sequence[Union[MaybeSentinel, libcst.CSTNode]], matchers: Sequence[ Union[ BaseMatcherNode, _BaseWildcardNode, MatchIfTrue[libcst.CSTNode], _BaseMetadataMatcher, DoNotCareSentinel, ] ], metadata_lookup: Callable[[meta.ProviderT, libcst.CSTNode], object], ) -> _SequenceMatchesResult: if not nodes and not matchers: # Base case, empty lists are always matches return _SequenceMatchesResult({}, None) if not nodes and matchers: # Base case, we have one or more matcher that wasn't matched if all(_matches_zero_nodes(m) for m in matchers): return _SequenceMatchesResult( # pyre-ignore[16]: `MatchIfTrue` has no attribute `name`. {m.name: () for m in matchers if isinstance(m, _ExtractMatchingNode)}, (), ) else: return _SequenceMatchesResult(None, None) if nodes and not matchers: # Base case, we have nodes left that don't match any matcher return _SequenceMatchesResult(None, None) # Recursive case, nodes and matchers LHS matches node = nodes[0] matcher = matchers[0] if isinstance(matcher, DoNotCareSentinel): # We don't care about the value for this node. return _SequenceMatchesResult( _sequence_matches( nodes[1:], matchers[1:], metadata_lookup ).sequence_capture, node, ) elif isinstance(matcher, _BaseWildcardNode): if isinstance(matcher, AtMostN): if matcher.n > 0: # First, assume that this does match a node (greedy). # Consume one node since it matched this matcher. attribute_capture = _attribute_matches( nodes[0], matcher.matcher, metadata_lookup ) if attribute_capture is not None: result = _sequence_matches( nodes[1:], [AtMostN(matcher.matcher, n=matcher.n - 1), *matchers[1:]], metadata_lookup, ) if result.sequence_capture is not None: matched = result.matched_nodes assert isinstance(matched, Sequence) return _SequenceMatchesResult( {**attribute_capture, **result.sequence_capture}, # pyre-fixme[6]: Expected `Union[None, Sequence[libcst._n... (node, *matched), ) # Finally, assume that this does not match the current node. # Consume the matcher but not the node. return _SequenceMatchesResult( _sequence_matches( nodes, matchers[1:], metadata_lookup ).sequence_capture, (), ) elif isinstance(matcher, AtLeastN): if matcher.n > 0: # Only match if we can consume one of the matches, since we still # need to match N nodes. attribute_capture = _attribute_matches( nodes[0], matcher.matcher, metadata_lookup ) if attribute_capture is not None: result = _sequence_matches( nodes[1:], [AtLeastN(matcher.matcher, n=matcher.n - 1), *matchers[1:]], metadata_lookup, ) if result.sequence_capture is not None: matched = result.matched_nodes assert isinstance(matched, Sequence) return _SequenceMatchesResult( {**attribute_capture, **result.sequence_capture}, # pyre-fixme[6]: Expected `Union[None, Sequence[libcst._n... (node, *matched), ) return _SequenceMatchesResult(None, None) else: # First, assume that this does match a node (greedy). # Consume one node since it matched this matcher. attribute_capture = _attribute_matches( nodes[0], matcher.matcher, metadata_lookup ) if attribute_capture is not None: result = _sequence_matches(nodes[1:], matchers, metadata_lookup) if result.sequence_capture is not None: matched = result.matched_nodes assert isinstance(matched, Sequence) return _SequenceMatchesResult( {**attribute_capture, **result.sequence_capture}, # pyre-fixme[6]: Expected `Union[None, Sequence[libcst._n... (node, *matched), ) # Now, assume that this does not match the current node. # Consume the matcher but not the node. return _SequenceMatchesResult( _sequence_matches( nodes, matchers[1:], metadata_lookup ).sequence_capture, (), ) else: # There are no other types of wildcard consumers, but we're making # pyre happy with that fact. raise Exception(f"Logic error unrecognized wildcard {type(matcher)}!") elif isinstance(matcher, _ExtractMatchingNode): # See if the raw matcher matches. If it does, capture the sequence we matched and store it. result = _sequence_matches( nodes, [matcher.matcher, *matchers[1:]], metadata_lookup ) if result.sequence_capture is not None: return _SequenceMatchesResult( { # Our own match capture comes first, since we wnat to allow the same # name later in the sequence to override us. matcher.name: result.matched_nodes, **result.sequence_capture, }, result.matched_nodes, ) return _SequenceMatchesResult(None, None) match_capture = _matches(node, matcher, metadata_lookup) if match_capture is not None: # These values match directly result = _sequence_matches(nodes[1:], matchers[1:], metadata_lookup) if result.sequence_capture is not None: return _SequenceMatchesResult( {**match_capture, **result.sequence_capture}, node ) # Failed recursive case, no match return _SequenceMatchesResult(None, None) _AttributeValueT = Optional[Union[MaybeSentinel, libcst.CSTNode, str, bool]] _AttributeMatcherT = Optional[Union[BaseMatcherNode, DoNotCareSentinel, str, bool]] def _attribute_matches( # noqa: C901 node: Union[_AttributeValueT, Sequence[_AttributeValueT]], matcher: Union[_AttributeMatcherT, Sequence[_AttributeMatcherT]], metadata_lookup: Callable[[meta.ProviderT, libcst.CSTNode], object], ) -> Optional[Dict[str, Union[libcst.CSTNode, Sequence[libcst.CSTNode]]]]: if isinstance(matcher, DoNotCareSentinel): # We don't care what this is, so don't penalize a non-match. return {} if isinstance(matcher, _InverseOf): # Return the opposite evaluation return ( {} if _attribute_matches(node, matcher.matcher, metadata_lookup) is None else None ) if isinstance(matcher, _ExtractMatchingNode): attribute_capture = _attribute_matches(node, matcher.matcher, metadata_lookup) if attribute_capture is not None: return { # Our own match capture comes last, since its higher in the tree # so we want to override any child match captures by the same name. **attribute_capture, matcher.name: node, } return None if isinstance(matcher, MatchIfTrue): # We should only return if the matcher function is true. return {} if matcher.func(node) else None if matcher is None: # Should exactly be None return {} if node is None else None if isinstance(matcher, str): # Should exactly match matcher text return {} if node == matcher else None if isinstance(matcher, bool): # Should exactly match matcher bool return {} if node is matcher else None if isinstance(node, collections.abc.Sequence): # Given we've generated the types for matchers based on LibCST, we know that # this is true unless the node is badly constructed and types were ignored. node = cast(Sequence[Union[MaybeSentinel, libcst.CSTNode]], node) if isinstance(matcher, OneOf): # We should compare against each of the sequences in the OneOf for m in matcher.options: if isinstance(m, collections.abc.Sequence): # Should match the sequence of requested nodes result = _sequence_matches(node, m, metadata_lookup) if result.sequence_capture is not None: return result.sequence_capture elif isinstance(m, MatchIfTrue): # TODO: return captures return {} if m.func(node) else None elif isinstance(matcher, AllOf): # We should compare against each of the sequences in the AllOf all_captures = {} for m in matcher.options: if isinstance(m, collections.abc.Sequence): # Should match the sequence of requested nodes result = _sequence_matches(node, m, metadata_lookup) if result.sequence_capture is None: return None all_captures = {**all_captures, **result.sequence_capture} else: # The value in the AllOf wasn't a sequence, it can't match. return None # We passed the checks above for each node, so we passed. return all_captures elif isinstance(matcher, collections.abc.Sequence): # We should assume that this matcher is a sequence to compare. Given # the way we generate match classes, this should be true unless the # match is badly constructed and types were ignored. return _sequence_matches( node, cast( Sequence[ Union[ BaseMatcherNode, _BaseWildcardNode, MatchIfTrue[libcst.CSTNode], DoNotCareSentinel, ] ], matcher, ), metadata_lookup, ).sequence_capture # We exhausted our possibilities, there's no match return None # Base case, should match node via matcher. We know the type of node is # correct here because we generate matchers directly off of LibCST nodes, # so the only way it is wrong is if the node was badly constructed and # types were ignored. return _matches( cast(Union[MaybeSentinel, libcst.CSTNode], node), # pyre-fixme[24]: Generic type `MatchIfTrue` expects 1 type parameter. cast(Union[BaseMatcherNode, MatchIfTrue, _BaseMetadataMatcher], matcher), metadata_lookup, ) def _metadata_matches( # noqa: C901 node: libcst.CSTNode, metadata: Union[ _BaseMetadataMatcher, AllOf[_BaseMetadataMatcher], OneOf[_BaseMetadataMatcher], _InverseOf[_BaseMetadataMatcher], _ExtractMatchingNode[_BaseMetadataMatcher], ], metadata_lookup: Callable[[meta.ProviderT, libcst.CSTNode], object], ) -> Optional[Dict[str, Union[libcst.CSTNode, Sequence[libcst.CSTNode]]]]: if isinstance(metadata, OneOf): for metadata in metadata.options: metadata_capture = _metadata_matches(node, metadata, metadata_lookup) if metadata_capture is not None: return metadata_capture return None elif isinstance(metadata, AllOf): all_captures = {} for metadata in metadata.options: metadata_capture = _metadata_matches(node, metadata, metadata_lookup) if metadata_capture is None: return None all_captures = {**all_captures, **metadata_capture} # We passed the above checks, so we pass the matcher. return all_captures elif isinstance(metadata, _InverseOf): return ( {} if _metadata_matches(node, metadata.matcher, metadata_lookup) is None else None ) elif isinstance(metadata, _ExtractMatchingNode): metadata_capture = _metadata_matches(node, metadata.matcher, metadata_lookup) if metadata_capture is not None: return { # Our own match capture comes last, since its higher in the tree # so we want to override any child match captures by the same name. **metadata_capture, metadata.name: node, } return None elif isinstance(metadata, MatchMetadataIfTrue): actual_value = metadata_lookup(metadata.key, node) if actual_value is _METADATA_MISSING_SENTINEL: return None return {} if metadata.func(actual_value) else None elif isinstance(metadata, MatchMetadata): actual_value = metadata_lookup(metadata.key, node) if actual_value is _METADATA_MISSING_SENTINEL: return None return {} if actual_value == metadata.value else None else: raise Exception("Logic error!") def _node_matches( # noqa: C901 node: libcst.CSTNode, matcher: Union[ BaseMatcherNode, MatchIfTrue[libcst.CSTNode], _BaseMetadataMatcher, _InverseOf[ Union[ BaseMatcherNode, MatchIfTrue[libcst.CSTNode], _BaseMetadataMatcher, ] ], _ExtractMatchingNode[ Union[ BaseMatcherNode, MatchIfTrue[libcst.CSTNode], _BaseMetadataMatcher, ] ], ], metadata_lookup: Callable[[meta.ProviderT, libcst.CSTNode], object], ) -> Optional[Dict[str, Union[libcst.CSTNode, Sequence[libcst.CSTNode]]]]: # If this is a _InverseOf, then invert the result. if isinstance(matcher, _InverseOf): return ( {} if _node_matches(node, matcher.matcher, metadata_lookup) is None else None ) # If this is an _ExtractMatchingNode, grab the resulting call and pass the check # forward. if isinstance(matcher, _ExtractMatchingNode): node_capture = _node_matches(node, matcher.matcher, metadata_lookup) if node_capture is not None: return { # We come last here since we're further up the tree, so we want to # override any identically named child match nodes. **node_capture, matcher.name: node, } return None # Now, check if this is a lambda matcher. if isinstance(matcher, MatchIfTrue): return {} if matcher.func(node) else None if isinstance(matcher, (MatchMetadata, MatchMetadataIfTrue)): return _metadata_matches(node, matcher, metadata_lookup) # Now, check that the node and matcher classes are the same. if node.__class__.__name__ != matcher.__class__.__name__: return None # Now, check that the children match for each attribute. all_captures = {} for field in fields(matcher): if field.name == "_metadata": # We don't care about this field, its a dataclasses implementation detail. continue elif field.name == "metadata": # Special field we respect for matching metadata on a particular node. desired = getattr(matcher, field.name) if isinstance(desired, DoNotCareSentinel): # We don't care about this continue metadata_capture = _metadata_matches(node, desired, metadata_lookup) if metadata_capture is None: return None all_captures = {**all_captures, **metadata_capture} else: desired = getattr(matcher, field.name) actual = getattr(node, field.name) attribute_capture = _attribute_matches(actual, desired, metadata_lookup) if attribute_capture is None: return None all_captures = {**all_captures, **attribute_capture} # We didn't find a non-match in the above loop, so it matches! return all_captures def _matches( node: Union[MaybeSentinel, libcst.CSTNode], matcher: Union[ BaseMatcherNode, MatchIfTrue[libcst.CSTNode], _BaseMetadataMatcher, _InverseOf[ Union[ BaseMatcherNode, MatchIfTrue[libcst.CSTNode], _BaseMetadataMatcher, ] ], _ExtractMatchingNode[ Union[ BaseMatcherNode, MatchIfTrue[libcst.CSTNode], _BaseMetadataMatcher, ] ], ], metadata_lookup: Callable[[meta.ProviderT, libcst.CSTNode], object], ) -> Optional[Dict[str, Union[libcst.CSTNode, Sequence[libcst.CSTNode]]]]: if isinstance(node, MaybeSentinel): # We can't possibly match on a maybe sentinel, so it only matches if # the matcher we have is a _InverseOf. return {} if isinstance(matcher, _InverseOf) else None # Now, evaluate the matcher node itself. if isinstance(matcher, (OneOf, TypeOf)): for matcher in matcher.options: node_capture = _node_matches(node, matcher, metadata_lookup) if node_capture is not None: return node_capture return None elif isinstance(matcher, AllOf): all_captures = {} for matcher in matcher.options: node_capture = _node_matches(node, matcher, metadata_lookup) if node_capture is None: return None all_captures = {**all_captures, **node_capture} return all_captures else: return _node_matches(node, matcher, metadata_lookup) def _construct_metadata_fetcher_null() -> ( Callable[[meta.ProviderT, libcst.CSTNode], object] ): def _fetch(provider: meta.ProviderT, node: libcst.CSTNode) -> NoReturn: raise LookupError( f"{provider.__name__} is not resolved; did you forget a MetadataWrapper?" ) return _fetch def _construct_metadata_fetcher_dependent( dependent_class: libcst.MetadataDependent, ) -> Callable[[meta.ProviderT, libcst.CSTNode], object]: def _fetch(provider: meta.ProviderT, node: libcst.CSTNode) -> object: return dependent_class.get_metadata(provider, node, _METADATA_MISSING_SENTINEL) return _fetch def _construct_metadata_fetcher_wrapper( wrapper: libcst.MetadataWrapper, ) -> Callable[[meta.ProviderT, libcst.CSTNode], object]: metadata: Dict[meta.ProviderT, Mapping[libcst.CSTNode, object]] = {} def _fetch(provider: meta.ProviderT, node: libcst.CSTNode) -> object: if provider not in metadata: metadata[provider] = wrapper.resolve(provider) node_metadata = metadata[provider].get(node, _METADATA_MISSING_SENTINEL) if isinstance(node_metadata, LazyValue): node_metadata = node_metadata() return node_metadata return _fetch def extract( node: Union[MaybeSentinel, RemovalSentinel, libcst.CSTNode], matcher: BaseMatcherNode, *, metadata_resolver: Optional[ Union[libcst.MetadataDependent, libcst.MetadataWrapper] ] = None, ) -> Optional[Dict[str, Union[libcst.CSTNode, Sequence[libcst.CSTNode]]]]: """ Given an arbitrary node from a LibCST tree, and an arbitrary matcher, returns a dictionary of extracted children of the tree if the node matches the shape defined by the matcher. Note that the node can also be a :class:`~libcst.RemovalSentinel` or a :class:`~libcst.MaybeSentinel` in order to use extract directly on transform results and node attributes. In these cases, :func:`extract` will always return ``None``. If the node matches the shape defined by the matcher, the return will be a dictionary whose keys are defined by the :func:`SaveMatchedNode` name parameter, and the values will be the node or sequence that was present at that location in the shape defined by the matcher. In the case of multiple :func:`SaveMatchedNode` matches with the same name, parent nodes will take prioirity over child nodes, and nodes later in sequences will take priority over nodes earlier in sequences. The matcher can be any concrete matcher that subclasses from :class:`BaseMatcherNode`, or a :class:`OneOf`/:class:`AllOf` special matcher. It cannot be a :class:`MatchIfTrue` or a :func:`DoesNotMatch` matcher since these are redundant. It cannot be a :class:`AtLeastN` or :class:`AtMostN` matcher because these types are wildcards which can only be used inside sequences. """ if isinstance(node, RemovalSentinel): # We can't possibly match on a removal sentinel, so it doesn't match. return None if isinstance(matcher, (AtLeastN, AtMostN, MatchIfTrue, _BaseMetadataMatcher)): # We can't match this, since these matchers are forbidden at top level. # These are not subclasses of BaseMatcherNode, but in the case that the # user is not using type checking, this should still behave correctly. return None if metadata_resolver is None: fetcher = _construct_metadata_fetcher_null() elif isinstance(metadata_resolver, libcst.MetadataWrapper): fetcher = _construct_metadata_fetcher_wrapper(metadata_resolver) else: fetcher = _construct_metadata_fetcher_dependent(metadata_resolver) return _matches(node, matcher, fetcher) def matches( node: Union[MaybeSentinel, RemovalSentinel, libcst.CSTNode], matcher: BaseMatcherNode, *, metadata_resolver: Optional[ Union[libcst.MetadataDependent, libcst.MetadataWrapper] ] = None, ) -> bool: """ Given an arbitrary node from a LibCST tree, and an arbitrary matcher, returns ``True`` if the node matches the shape defined by the matcher. Note that the node can also be a :class:`~libcst.RemovalSentinel` or a :class:`~libcst.MaybeSentinel` in order to use matches directly on transform results and node attributes. In these cases, :func:`matches` will always return ``False``. The matcher can be any concrete matcher that subclasses from :class:`BaseMatcherNode`, or a :class:`OneOf`/:class:`AllOf` special matcher. It cannot be a :class:`MatchIfTrue` or a :func:`DoesNotMatch` matcher since these are redundant. It cannot be a :class:`AtLeastN` or :class:`AtMostN` matcher because these types are wildcards which can only be used inside sequences. """ return extract(node, matcher, metadata_resolver=metadata_resolver) is not None class _FindAllVisitor(libcst.CSTVisitor): def __init__( self, matcher: Union[ BaseMatcherNode, MatchIfTrue[libcst.CSTNode], _BaseMetadataMatcher, _InverseOf[ Union[ BaseMatcherNode, MatchIfTrue[libcst.CSTNode], _BaseMetadataMatcher, ] ], ], metadata_lookup: Callable[[meta.ProviderT, libcst.CSTNode], object], ) -> None: self.matcher = matcher self.metadata_lookup = metadata_lookup self.found_nodes: List[libcst.CSTNode] = [] self.extracted_nodes: List[ Dict[str, Union[libcst.CSTNode, Sequence[libcst.CSTNode]]] ] = [] def on_visit(self, node: libcst.CSTNode) -> bool: match = _matches(node, self.matcher, self.metadata_lookup) if match is not None: self.found_nodes.append(node) self.extracted_nodes.append(match) return True def _find_or_extract_all( tree: Union[MaybeSentinel, RemovalSentinel, libcst.CSTNode, meta.MetadataWrapper], matcher: Union[ BaseMatcherNode, MatchIfTrue[libcst.CSTNode], _BaseMetadataMatcher, # The inverse clause is left off of the public functions `findall` and # `extractall` because we play a dirty trick. We lie to the typechecker # that `DoesNotMatch` returns identity, so the public functions don't # need to be aware of inverses. If we could represent predicate logic # in python types we could get away with this, but that's not the state # of things right now. _InverseOf[ Union[ BaseMatcherNode, MatchIfTrue[libcst.CSTNode], _BaseMetadataMatcher, ] ], ], *, metadata_resolver: Optional[ Union[libcst.MetadataDependent, libcst.MetadataWrapper] ] = None, ) -> Tuple[ Sequence[libcst.CSTNode], Sequence[Dict[str, Union[libcst.CSTNode, Sequence[libcst.CSTNode]]]], ]: if isinstance(tree, (RemovalSentinel, MaybeSentinel)): # We can't possibly match on a removal sentinel, so it doesn't match. return [], [] if isinstance(matcher, (AtLeastN, AtMostN)): # We can't match this, since these matchers are forbidden at top level. # These are not subclasses of BaseMatcherNode, but in the case that the # user is not using type checking, this should still behave correctly. return [], [] if isinstance(tree, meta.MetadataWrapper) and metadata_resolver is None: # Provide a convenience for calling findall directly on a MetadataWrapper. metadata_resolver = tree if metadata_resolver is None: fetcher = _construct_metadata_fetcher_null() elif isinstance(metadata_resolver, libcst.MetadataWrapper): fetcher = _construct_metadata_fetcher_wrapper(metadata_resolver) else: fetcher = _construct_metadata_fetcher_dependent(metadata_resolver) finder = _FindAllVisitor(matcher, fetcher) tree.visit(finder) return finder.found_nodes, finder.extracted_nodes def findall( tree: Union[MaybeSentinel, RemovalSentinel, libcst.CSTNode, meta.MetadataWrapper], matcher: Union[BaseMatcherNode, MatchIfTrue[libcst.CSTNode], _BaseMetadataMatcher], *, metadata_resolver: Optional[ Union[libcst.MetadataDependent, libcst.MetadataWrapper] ] = None, ) -> Sequence[libcst.CSTNode]: """ Given an arbitrary node from a LibCST tree and an arbitrary matcher, iterates over that node and all children returning a sequence of all child nodes that match the given matcher. Note that the tree can also be a :class:`~libcst.RemovalSentinel` or a :class:`~libcst.MaybeSentinel` in order to use findall directly on transform results and node attributes. In these cases, :func:`findall` will always return an empty sequence. Note also that instead of a LibCST tree, you can instead pass in a :class:`~libcst.metadata.MetadataWrapper`. This mirrors the fact that you can call ``visit`` on a :class:`~libcst.metadata.MetadataWrapper` in order to iterate over it with a transform. If you provide a wrapper for the tree and do not set the ``metadata_resolver`` parameter specifically, it will automatically be set to the wrapper for you. The matcher can be any concrete matcher that subclasses from :class:`BaseMatcherNode`, or a :class:`OneOf`/:class:`AllOf` special matcher. Unlike :func:`matches`, it can also be a :class:`MatchIfTrue` or :func:`DoesNotMatch` matcher, since we are traversing the tree looking for matches. It cannot be a :class:`AtLeastN` or :class:`AtMostN` matcher because these types are wildcards which can only be used inside sequences. """ nodes, _ = _find_or_extract_all(tree, matcher, metadata_resolver=metadata_resolver) return nodes def extractall( tree: Union[MaybeSentinel, RemovalSentinel, libcst.CSTNode, meta.MetadataWrapper], matcher: Union[BaseMatcherNode, MatchIfTrue[libcst.CSTNode], _BaseMetadataMatcher], *, metadata_resolver: Optional[ Union[libcst.MetadataDependent, libcst.MetadataWrapper] ] = None, ) -> Sequence[Dict[str, Union[libcst.CSTNode, Sequence[libcst.CSTNode]]]]: """ Given an arbitrary node from a LibCST tree and an arbitrary matcher, iterates over that node and all children returning a sequence of dictionaries representing the saved and extracted children specified by :func:`SaveMatchedNode` for each match found in the tree. This is analogous to running a :func:`findall` over a tree, then running :func:`extract` with the same matcher over each of the returned nodes. Note that the tree can also be a :class:`~libcst.RemovalSentinel` or a :class:`~libcst.MaybeSentinel` in order to use extractall directly on transform results and node attributes. In these cases, :func:`extractall` will always return an empty sequence. Note also that instead of a LibCST tree, you can instead pass in a :class:`~libcst.metadata.MetadataWrapper`. This mirrors the fact that you can call ``visit`` on a :class:`~libcst.metadata.MetadataWrapper` in order to iterate over it with a transform. If you provide a wrapper for the tree and do not set the ``metadata_resolver`` parameter specifically, it will automatically be set to the wrapper for you. The matcher can be any concrete matcher that subclasses from :class:`BaseMatcherNode`, or a :class:`OneOf`/:class:`AllOf` special matcher. Unlike :func:`matches`, it can also be a :class:`MatchIfTrue` or :func:`DoesNotMatch` matcher, since we are traversing the tree looking for matches. It cannot be a :class:`AtLeastN` or :class:`AtMostN` matcher because these types are wildcards which can only be usedi inside sequences. """ _, extractions = _find_or_extract_all( tree, matcher, metadata_resolver=metadata_resolver ) return extractions class _ReplaceTransformer(libcst.CSTTransformer): def __init__( self, matcher: Union[ BaseMatcherNode, MatchIfTrue[libcst.CSTNode], _BaseMetadataMatcher, _InverseOf[ Union[ BaseMatcherNode, MatchIfTrue[libcst.CSTNode], _BaseMetadataMatcher, ] ], ], metadata_lookup: Callable[[meta.ProviderT, libcst.CSTNode], object], replacement: Union[ MaybeSentinel, RemovalSentinel, libcst.CSTNode, Callable[ [ libcst.CSTNode, Dict[str, Union[libcst.CSTNode, Sequence[libcst.CSTNode]]], ], Union[MaybeSentinel, RemovalSentinel, libcst.CSTNode], ], ], ) -> None: self.matcher = matcher self.metadata_lookup = metadata_lookup self.replacement: Callable[ [ libcst.CSTNode, Dict[str, Union[libcst.CSTNode, Sequence[libcst.CSTNode]]], ], Union[MaybeSentinel, RemovalSentinel, libcst.CSTNode], ] if inspect.isfunction(replacement): self.replacement = replacement elif isinstance(replacement, (MaybeSentinel, RemovalSentinel)): self.replacement = lambda node, matches: replacement else: # pyre-ignore We know this is a CSTNode. self.replacement = lambda node, matches: replacement.deep_clone() # We run into a really weird problem here, where we need to run the match # and extract step on the original node in order for metadata to work. # However, if we do that, then using things like `deep_replace` will fail # since any extracted nodes are the originals, not the updates and LibCST # does replacement by identity for safety reasons. If we try to run the # match and extract step on the updated node (or twice, once for the match # and once for the extract), it will fail to extract if any metadata-based # matchers are used. So, we try to compromise with the best of both worlds. # We track all node updates, and when we send the extracted nodes to the # replacement callable, we look up the original nodes and replace them with # updated nodes. In the case that an update made the node no-longer exist, # we act as if there was not a match (because in reality, there would not # have been if we had run the matcher on the update). self.node_lut: Dict[libcst.CSTNode, libcst.CSTNode] = {} def _node_translate( self, node_or_sequence: Union[libcst.CSTNode, Sequence[libcst.CSTNode]] ) -> Union[libcst.CSTNode, Sequence[libcst.CSTNode]]: if isinstance(node_or_sequence, Sequence): return tuple(self.node_lut[node] for node in node_or_sequence) else: return self.node_lut[node_or_sequence] def _extraction_translate( self, extracted: Dict[str, Union[libcst.CSTNode, Sequence[libcst.CSTNode]]] ) -> Dict[str, Union[libcst.CSTNode, Sequence[libcst.CSTNode]]]: return {key: self._node_translate(val) for key, val in extracted.items()} def on_leave( self, original_node: libcst.CSTNode, updated_node: libcst.CSTNode ) -> Union[libcst.CSTNode, MaybeSentinel, RemovalSentinel]: # Track original to updated node mapping for this node. self.node_lut[original_node] = updated_node # This gets complicated. We need to do the match on the original node, # but we want to do the extraction on the updated node. This is so # metadata works properly in matchers. So, if we get a match, we fix # up the nodes in the match and return that to the replacement lambda. extracted = _matches(original_node, self.matcher, self.metadata_lookup) if extracted is not None: try: # Attempt to do a translation from original to updated node. extracted = self._extraction_translate(extracted) except KeyError: # One of the nodes we looked up doesn't exist anymore, this # is no longer a match. This can happen if a child node was # modified, making this original match not applicable anymore. extracted = None if extracted is not None: # We're replacing this node entirely, so don't save the original # updated node. We don't want this to be part of a parent match # since we can't guarantee that the update matches anymore. del self.node_lut[original_node] return self.replacement(updated_node, extracted) return updated_node def replace( tree: Union[MaybeSentinel, RemovalSentinel, libcst.CSTNode, meta.MetadataWrapper], matcher: Union[BaseMatcherNode, MatchIfTrue[libcst.CSTNode], _BaseMetadataMatcher], replacement: Union[ MaybeSentinel, RemovalSentinel, libcst.CSTNode, Callable[ [ libcst.CSTNode, Dict[str, Union[libcst.CSTNode, Sequence[libcst.CSTNode]]], ], Union[MaybeSentinel, RemovalSentinel, libcst.CSTNode], ], ], *, metadata_resolver: Optional[ Union[libcst.MetadataDependent, libcst.MetadataWrapper] ] = None, ) -> Union[MaybeSentinel, RemovalSentinel, libcst.CSTNode]: """ Given an arbitrary node from a LibCST tree and an arbitrary matcher, iterates over that node and all children and replaces each node that matches the supplied matcher with a supplied replacement. Note that the replacement can either be a valid node type, or a callable which takes the matched node and a dictionary of any extracted child values and returns a valid node type. If you provide a valid LibCST node type, :func:`replace` will replace every node that matches the supplied matcher with the replacement node. If you provide a callable, :func:`replace` will run :func:`extract` over all matched nodes and call the callable with both the node that should be replaced and the dictionary returned by :func:`extract`. Under all circumstances a new tree is returned. :func:`extract` should be viewed as a short-cut to writing a transform which also returns a new tree even when no changes are applied. Note that the tree can also be a :class:`~libcst.RemovalSentinel` or a :class:`~libcst.MaybeSentinel` in order to use replace directly on transform results and node attributes. In these cases, :func:`replace` will return the same :class:`~libcst.RemovalSentinel` or :class:`~libcst.MaybeSentinel`. Note also that instead of a LibCST tree, you can instead pass in a :class:`~libcst.metadata.MetadataWrapper`. This mirrors the fact that you can call ``visit`` on a :class:`~libcst.metadata.MetadataWrapper` in order to iterate over it with a transform. If you provide a wrapper for the tree and do not set the ``metadata_resolver`` parameter specifically, it will automatically be set to the wrapper for you. The matcher can be any concrete matcher that subclasses from :class:`BaseMatcherNode`, or a :class:`OneOf`/:class:`AllOf` special matcher. Unlike :func:`matches`, it can also be a :class:`MatchIfTrue` or :func:`DoesNotMatch` matcher, since we are traversing the tree looking for matches. It cannot be a :class:`AtLeastN` or :class:`AtMostN` matcher because these types are wildcards which can only be usedi inside sequences. """ if isinstance(tree, (RemovalSentinel, MaybeSentinel)): # We can't do any replacements on this, so return the tree exactly. return tree if isinstance(matcher, (AtLeastN, AtMostN)): # We can't match this, since these matchers are forbidden at top level. # These are not subclasses of BaseMatcherNode, but in the case that the # user is not using type checking, this should still behave correctly. if isinstance(tree, libcst.CSTNode): return tree.deep_clone() elif isinstance(tree, meta.MetadataWrapper): return tree.module.deep_clone() else: raise Exception("Logic error!") if isinstance(tree, meta.MetadataWrapper) and metadata_resolver is None: # Provide a convenience for calling replace directly on a MetadataWrapper. metadata_resolver = tree if metadata_resolver is None: fetcher = _construct_metadata_fetcher_null() elif isinstance(metadata_resolver, libcst.MetadataWrapper): fetcher = _construct_metadata_fetcher_wrapper(metadata_resolver) else: fetcher = _construct_metadata_fetcher_dependent(metadata_resolver) replacer = _ReplaceTransformer(matcher, fetcher, replacement) new_tree = tree.visit(replacer) if isinstance(new_tree, FlattenSentinel): # The above transform never returns FlattenSentinel, so this isn't possible raise Exception("Logic error, cannot get a FlattenSentinel here!") return new_tree LibCST-1.2.0/libcst/matchers/_return_types.py000066400000000000000000000242561456464173300211620ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # This file was generated by libcst.codegen.gen_type_mapping from typing import Dict as TypingDict, Type, Union from libcst._maybe_sentinel import MaybeSentinel from libcst._nodes.base import CSTNode from libcst._nodes.expression import ( Annotation, Arg, Asynchronous, Attribute, Await, BaseDictElement, BaseElement, BaseExpression, BaseFormattedStringContent, BaseSlice, BinaryOperation, BooleanOperation, Call, Comparison, ComparisonTarget, CompFor, CompIf, ConcatenatedString, Dict, DictComp, DictElement, Element, Ellipsis, Float, FormattedString, FormattedStringExpression, FormattedStringText, From, GeneratorExp, IfExp, Imaginary, Index, Integer, Lambda, LeftCurlyBrace, LeftParen, LeftSquareBracket, List, ListComp, Name, NamedExpr, Param, Parameters, ParamSlash, ParamStar, RightCurlyBrace, RightParen, RightSquareBracket, Set, SetComp, SimpleString, Slice, StarredDictElement, StarredElement, Subscript, SubscriptElement, Tuple, UnaryOperation, Yield, ) from libcst._nodes.module import Module from libcst._nodes.op import ( Add, AddAssign, And, AssignEqual, BaseAugOp, BaseBinaryOp, BaseBooleanOp, BaseCompOp, BaseUnaryOp, BitAnd, BitAndAssign, BitInvert, BitOr, BitOrAssign, BitXor, BitXorAssign, Colon, Comma, Divide, DivideAssign, Dot, Equal, FloorDivide, FloorDivideAssign, GreaterThan, GreaterThanEqual, ImportStar, In, Is, IsNot, LeftShift, LeftShiftAssign, LessThan, LessThanEqual, MatrixMultiply, MatrixMultiplyAssign, Minus, Modulo, ModuloAssign, Multiply, MultiplyAssign, Not, NotEqual, NotIn, Or, Plus, Power, PowerAssign, RightShift, RightShiftAssign, Semicolon, Subtract, SubtractAssign, ) from libcst._nodes.statement import ( AnnAssign, AsName, Assert, Assign, AssignTarget, AugAssign, BaseSmallStatement, BaseStatement, BaseSuite, Break, ClassDef, Continue, Decorator, Del, Else, ExceptHandler, ExceptStarHandler, Expr, Finally, For, FunctionDef, Global, If, Import, ImportAlias, ImportFrom, IndentedBlock, Match, MatchAs, MatchCase, MatchClass, MatchKeywordElement, MatchList, MatchMapping, MatchMappingElement, MatchOr, MatchOrElement, MatchPattern, MatchSequence, MatchSequenceElement, MatchSingleton, MatchStar, MatchTuple, MatchValue, NameItem, Nonlocal, ParamSpec, Pass, Raise, Return, SimpleStatementLine, SimpleStatementSuite, Try, TryStar, TypeAlias, TypeParam, TypeParameters, TypeVar, TypeVarTuple, While, With, WithItem, ) from libcst._nodes.whitespace import ( BaseParenthesizableWhitespace, Comment, EmptyLine, Newline, ParenthesizedWhitespace, SimpleWhitespace, TrailingWhitespace, ) from libcst._removal_sentinel import RemovalSentinel TYPED_FUNCTION_RETURN_MAPPING: TypingDict[Type[CSTNode], object] = { Add: BaseBinaryOp, AddAssign: BaseAugOp, And: BaseBooleanOp, AnnAssign: Union[BaseSmallStatement, RemovalSentinel], Annotation: Annotation, Arg: Union[Arg, RemovalSentinel], AsName: AsName, Assert: Union[BaseSmallStatement, RemovalSentinel], Assign: Union[BaseSmallStatement, RemovalSentinel], AssignEqual: Union[AssignEqual, MaybeSentinel], AssignTarget: Union[AssignTarget, RemovalSentinel], Asynchronous: Asynchronous, Attribute: BaseExpression, AugAssign: Union[BaseSmallStatement, RemovalSentinel], Await: BaseExpression, BinaryOperation: BaseExpression, BitAnd: BaseBinaryOp, BitAndAssign: BaseAugOp, BitInvert: BaseUnaryOp, BitOr: Union[BaseBinaryOp, MaybeSentinel], BitOrAssign: BaseAugOp, BitXor: BaseBinaryOp, BitXorAssign: BaseAugOp, BooleanOperation: BaseExpression, Break: Union[BaseSmallStatement, RemovalSentinel], Call: BaseExpression, ClassDef: Union[BaseStatement, RemovalSentinel], Colon: Union[Colon, MaybeSentinel], Comma: Union[Comma, MaybeSentinel], Comment: Comment, CompFor: CompFor, CompIf: CompIf, Comparison: BaseExpression, ComparisonTarget: Union[ComparisonTarget, RemovalSentinel], ConcatenatedString: BaseExpression, Continue: Union[BaseSmallStatement, RemovalSentinel], Decorator: Union[Decorator, RemovalSentinel], Del: Union[BaseSmallStatement, RemovalSentinel], Dict: BaseExpression, DictComp: BaseExpression, DictElement: Union[BaseDictElement, RemovalSentinel], Divide: BaseBinaryOp, DivideAssign: BaseAugOp, Dot: Union[Dot, RemovalSentinel], Element: Union[BaseElement, RemovalSentinel], Ellipsis: BaseExpression, Else: Else, EmptyLine: Union[EmptyLine, RemovalSentinel], Equal: BaseCompOp, ExceptHandler: Union[ExceptHandler, RemovalSentinel], ExceptStarHandler: Union[ExceptStarHandler, RemovalSentinel], Expr: Union[BaseSmallStatement, RemovalSentinel], Finally: Finally, Float: BaseExpression, FloorDivide: BaseBinaryOp, FloorDivideAssign: BaseAugOp, For: Union[BaseStatement, RemovalSentinel], FormattedString: BaseExpression, FormattedStringExpression: Union[BaseFormattedStringContent, RemovalSentinel], FormattedStringText: Union[BaseFormattedStringContent, RemovalSentinel], From: From, FunctionDef: Union[BaseStatement, RemovalSentinel], GeneratorExp: BaseExpression, Global: Union[BaseSmallStatement, RemovalSentinel], GreaterThan: BaseCompOp, GreaterThanEqual: BaseCompOp, If: Union[BaseStatement, RemovalSentinel], IfExp: BaseExpression, Imaginary: BaseExpression, Import: Union[BaseSmallStatement, RemovalSentinel], ImportAlias: Union[ImportAlias, RemovalSentinel], ImportFrom: Union[BaseSmallStatement, RemovalSentinel], ImportStar: ImportStar, In: BaseCompOp, IndentedBlock: BaseSuite, Index: BaseSlice, Integer: BaseExpression, Is: BaseCompOp, IsNot: BaseCompOp, Lambda: BaseExpression, LeftCurlyBrace: LeftCurlyBrace, LeftParen: Union[LeftParen, MaybeSentinel, RemovalSentinel], LeftShift: BaseBinaryOp, LeftShiftAssign: BaseAugOp, LeftSquareBracket: LeftSquareBracket, LessThan: BaseCompOp, LessThanEqual: BaseCompOp, List: BaseExpression, ListComp: BaseExpression, Match: Union[BaseStatement, RemovalSentinel], MatchAs: MatchPattern, MatchCase: MatchCase, MatchClass: MatchPattern, MatchKeywordElement: Union[MatchKeywordElement, RemovalSentinel], MatchList: MatchPattern, MatchMapping: MatchPattern, MatchMappingElement: Union[MatchMappingElement, RemovalSentinel], MatchOr: MatchPattern, MatchOrElement: Union[MatchOrElement, RemovalSentinel], MatchPattern: MatchPattern, MatchSequence: MatchPattern, MatchSequenceElement: Union[MatchSequenceElement, RemovalSentinel], MatchSingleton: MatchPattern, MatchStar: MatchStar, MatchTuple: MatchPattern, MatchValue: MatchPattern, MatrixMultiply: BaseBinaryOp, MatrixMultiplyAssign: BaseAugOp, Minus: BaseUnaryOp, Module: Module, Modulo: BaseBinaryOp, ModuloAssign: BaseAugOp, Multiply: BaseBinaryOp, MultiplyAssign: BaseAugOp, Name: BaseExpression, NameItem: Union[NameItem, RemovalSentinel], NamedExpr: BaseExpression, Newline: Newline, Nonlocal: Union[BaseSmallStatement, RemovalSentinel], Not: BaseUnaryOp, NotEqual: BaseCompOp, NotIn: BaseCompOp, Or: BaseBooleanOp, Param: Union[Param, MaybeSentinel, RemovalSentinel], ParamSlash: Union[ParamSlash, MaybeSentinel], ParamSpec: ParamSpec, ParamStar: Union[ParamStar, MaybeSentinel], Parameters: Parameters, ParenthesizedWhitespace: Union[BaseParenthesizableWhitespace, MaybeSentinel], Pass: Union[BaseSmallStatement, RemovalSentinel], Plus: BaseUnaryOp, Power: BaseBinaryOp, PowerAssign: BaseAugOp, Raise: Union[BaseSmallStatement, RemovalSentinel], Return: Union[BaseSmallStatement, RemovalSentinel], RightCurlyBrace: RightCurlyBrace, RightParen: Union[RightParen, MaybeSentinel, RemovalSentinel], RightShift: BaseBinaryOp, RightShiftAssign: BaseAugOp, RightSquareBracket: RightSquareBracket, Semicolon: Union[Semicolon, MaybeSentinel], Set: BaseExpression, SetComp: BaseExpression, SimpleStatementLine: Union[BaseStatement, RemovalSentinel], SimpleStatementSuite: BaseSuite, SimpleString: BaseExpression, SimpleWhitespace: Union[BaseParenthesizableWhitespace, MaybeSentinel], Slice: BaseSlice, StarredDictElement: Union[BaseDictElement, RemovalSentinel], StarredElement: BaseExpression, Subscript: BaseExpression, SubscriptElement: Union[SubscriptElement, RemovalSentinel], Subtract: BaseBinaryOp, SubtractAssign: BaseAugOp, TrailingWhitespace: TrailingWhitespace, Try: Union[BaseStatement, RemovalSentinel], TryStar: Union[BaseStatement, RemovalSentinel], Tuple: BaseExpression, TypeAlias: Union[BaseSmallStatement, RemovalSentinel], TypeParam: Union[TypeParam, RemovalSentinel], TypeParameters: TypeParameters, TypeVar: TypeVar, TypeVarTuple: TypeVarTuple, UnaryOperation: BaseExpression, While: Union[BaseStatement, RemovalSentinel], With: Union[BaseStatement, RemovalSentinel], WithItem: Union[WithItem, RemovalSentinel], Yield: BaseExpression, } LibCST-1.2.0/libcst/matchers/_visitors.py000066400000000000000000001032251456464173300202730ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from inspect import ismethod, signature from typing import ( Any, Callable, cast, Dict, get_type_hints, List, Optional, Sequence, Set, Tuple, Type, Union, ) import libcst as cst from libcst import CSTTransformer, CSTVisitor from libcst._types import CSTNodeT from libcst.matchers._decorators import ( CONSTRUCTED_LEAVE_MATCHER_ATTR, CONSTRUCTED_VISIT_MATCHER_ATTR, VISIT_NEGATIVE_MATCHER_ATTR, VISIT_POSITIVE_MATCHER_ATTR, ) from libcst.matchers._matcher_base import ( AllOf, AtLeastN, AtMostN, BaseMatcherNode, extract, extractall, findall, matches, MatchIfTrue, MatchMetadata, MatchMetadataIfTrue, OneOf, replace, ) from libcst.matchers._return_types import TYPED_FUNCTION_RETURN_MAPPING try: # PEP 604 unions, in Python 3.10+ from types import UnionType except ImportError: # We use this for isinstance; no annotation will be an instance of this class UnionType: pass CONCRETE_METHODS: Set[str] = { *{f"visit_{cls.__name__}" for cls in TYPED_FUNCTION_RETURN_MAPPING}, *{f"leave_{cls.__name__}" for cls in TYPED_FUNCTION_RETURN_MAPPING}, } # pyre-ignore We don't care about Any here, its not exposed. def _match_decorator_unpickler(kwargs: Any) -> "MatchDecoratorMismatch": return MatchDecoratorMismatch(**kwargs) class MatchDecoratorMismatch(Exception): def __init__(self, func: str, message: str) -> None: super().__init__(f"Invalid function signature for {func}: {message}") self.func = func self.message = message def __reduce__( self, ) -> Tuple[Callable[..., "MatchDecoratorMismatch"], Tuple[object, ...]]: return ( _match_decorator_unpickler, ({"func": self.func, "message": self.message},), ) def _get_possible_match_classes(matcher: BaseMatcherNode) -> List[Type[cst.CSTNode]]: if isinstance(matcher, (OneOf, AllOf)): return [getattr(cst, m.__class__.__name__) for m in matcher.options] else: return [getattr(cst, matcher.__class__.__name__)] def _annotation_is_union(annotation: object) -> bool: return ( isinstance(annotation, UnionType) or getattr(annotation, "__origin__", None) is Union ) def _get_possible_annotated_classes(annotation: object) -> List[Type[object]]: if _annotation_is_union(annotation): return getattr(annotation, "__args__", []) else: return [cast(Type[object], annotation)] def _get_valid_leave_annotations_for_classes( classes: Sequence[Type[cst.CSTNode]], ) -> Set[Type[object]]: retval: Set[Type[object]] = set() for cls in classes: # Look up the leave annotation for each class, combine them so we get a list of # all possible valid return annotations. Its not really possible for us (or # pyre) to fully enforce return types given the presence of OneOf/AllOf matchers, so # we do the best we can by taking a union of all valid return annotations. retval.update( _get_possible_annotated_classes(TYPED_FUNCTION_RETURN_MAPPING[cls]) ) return retval def _verify_return_annotation( possible_match_classes: Sequence[Type[cst.CSTNode]], # pyre-ignore We only care that meth is callable. meth: Callable[..., Any], decorator_name: str, *, expected_none: bool, ) -> None: type_hints = get_type_hints(meth) if expected_none: # Simply look for any annotation at all and if it exists, verify that # it is "None". if type_hints.get("return", type(None)) is not type(None): # noqa: E721 raise MatchDecoratorMismatch( meth.__qualname__, f"@{decorator_name} should only decorate functions that do " + "not return.", ) else: if "return" not in type_hints: # Can't check this, type annotation not supplied. return possible_annotated_classes = _get_possible_annotated_classes( type_hints["return"] ) possible_returns = _get_valid_leave_annotations_for_classes( possible_match_classes ) # Look at the union of specified return annotation, make sure that # they are all subclasses of the original leave_ return # annotations. This catches when somebody tries to return a new node # that we know can't fit where the existing node was in the tree. for ret in possible_annotated_classes: for annotation in possible_returns: if issubclass(ret, annotation): # This annotation is a superclass of the possible match, # so we know that the types are correct. break else: # The current ret was not a subclass of any of the annotated # return types. raise MatchDecoratorMismatch( meth.__qualname__, f"@{decorator_name} decorated function cannot return " + f"the type {ret.__name__}.", ) def _verify_parameter_annotations( possible_match_classes: Sequence[Type[cst.CSTNode]], # pyre-ignore We only care that meth is callable. meth: Callable[..., Any], decorator_name: str, *, expected_param_count: int, ) -> None: # First, verify that the number of parameters is sane. meth_signature = signature(meth) if len(meth_signature.parameters) != expected_param_count: raise MatchDecoratorMismatch( meth.__qualname__, f"@{decorator_name} should decorate functions which take " + f"{expected_param_count} parameter" + ("s" if expected_param_count > 1 else ""), ) # Finally, for each parameter, make sure that the annotation includes # each of the classes that might appear given the match string. This # can be done in the simple case by just specifying the correct cst node # type. For complex matches that use OneOf/AllOf, this could be a base class # that encompases all possible matches, or a union. params = [v for k, v in get_type_hints(meth).items() if k != "return"] for param in params: # Go through each possible matcher, and make sure that the annotation # for types is a superclass of each matcher. possible_annotated_classes = _get_possible_annotated_classes(param) for match in possible_match_classes: for annotation in possible_annotated_classes: if issubclass(match, annotation): # This annotation is a superclass of the possible match, # so we know that the types are correct. break else: # The current match was not a subclass of any of the annotated # types. raise MatchDecoratorMismatch( meth.__qualname__, f"@{decorator_name} can be called with {match.__name__} " + "but the decorated function parameter annotations do " + "not include this type.", ) def _check_types( # pyre-ignore We don't care about the type of sequence, just that its callable. decoratormap: Dict[BaseMatcherNode, Sequence[Callable[..., Any]]], decorator_name: str, *, expected_param_count: int, expected_none_return: bool, ) -> None: for matcher, methods in decoratormap.items(): # Given the matcher class we have, get the list of possible cst nodes that # could be passed to the functionis we wrap. possible_match_classes = _get_possible_match_classes(matcher) has_invalid_top_level = any( isinstance(m, (AtLeastN, AtMostN, MatchIfTrue)) for m in possible_match_classes ) # Now, loop through each function we wrap and verify that the type signature # is valid. for meth in methods: # First thing first, make sure this isn't wrapping an inner class. if not ismethod(meth): raise MatchDecoratorMismatch( meth.__qualname__, "Matcher decorators should only be used on methods of " + "MatcherDecoratableTransformer or " + "MatcherDecoratableVisitor", ) if has_invalid_top_level: raise MatchDecoratorMismatch( meth.__qualname__, "The root matcher in a matcher decorator cannot be an " + "AtLeastN, AtMostN or MatchIfTrue matcher", ) # Now, check that the return annotation is valid. _verify_return_annotation( possible_match_classes, meth, decorator_name, expected_none=expected_none_return, ) # Finally, check that the parameter annotations are valid. _verify_parameter_annotations( possible_match_classes, meth, decorator_name, expected_param_count=expected_param_count, ) def _gather_matchers(obj: object) -> Set[BaseMatcherNode]: visit_matchers: Set[BaseMatcherNode] = set() for func in dir(obj): try: for matcher in getattr(getattr(obj, func), VISIT_POSITIVE_MATCHER_ATTR, []): visit_matchers.add(cast(BaseMatcherNode, matcher)) for matcher in getattr(getattr(obj, func), VISIT_NEGATIVE_MATCHER_ATTR, []): visit_matchers.add(cast(BaseMatcherNode, matcher)) except Exception: # This could be a caculated property, and calling getattr() evaluates it. # We have no control over the implementation detail, so if it raises, we # should not crash. pass return visit_matchers def _assert_not_concrete( decorator_name: str, func: Callable[[cst.CSTNode], None] ) -> None: if func.__name__ in CONCRETE_METHODS: raise MatchDecoratorMismatch( func.__qualname__, f"@{decorator_name} should not decorate functions that are concrete " + "visit or leave methods.", ) def _gather_constructed_visit_funcs( obj: object, ) -> Dict[BaseMatcherNode, Sequence[Callable[[cst.CSTNode], None]]]: constructed_visitors: Dict[ BaseMatcherNode, Sequence[Callable[[cst.CSTNode], None]] ] = {} for funcname in dir(obj): try: possible_func = getattr(obj, funcname) if not ismethod(possible_func): continue func = cast(Callable[[cst.CSTNode], None], possible_func) except Exception: # This could be a caculated property, and calling getattr() evaluates it. # We have no control over the implementation detail, so if it raises, we # should not crash. continue matchers = getattr(func, CONSTRUCTED_VISIT_MATCHER_ATTR, []) if matchers: # Make sure that we aren't accidentally putting a @visit on a visit_Node. _assert_not_concrete("visit", func) for matcher in matchers: casted_matcher = cast(BaseMatcherNode, matcher) constructed_visitors[casted_matcher] = ( *constructed_visitors.get(casted_matcher, ()), func, ) return constructed_visitors # pyre-ignore: There is no reasonable way to type this, so ignore the Any type. This # is because the leave_* methods have a different signature depending on whether they # are in a MatcherDecoratableTransformer or a MatcherDecoratableVisitor. def _gather_constructed_leave_funcs( obj: object, ) -> Dict[BaseMatcherNode, Sequence[Callable[..., Any]]]: constructed_visitors: Dict[ BaseMatcherNode, Sequence[Callable[[cst.CSTNode], None]] ] = {} for funcname in dir(obj): try: possible_func = getattr(obj, funcname) if not ismethod(possible_func): continue func = cast(Callable[[cst.CSTNode], None], possible_func) except Exception: # This could be a caculated property, and calling getattr() evaluates it. # We have no control over the implementation detail, so if it raises, we # should not crash. continue matchers = getattr(func, CONSTRUCTED_LEAVE_MATCHER_ATTR, []) if matchers: # Make sure that we aren't accidentally putting a @leave on a leave_Node. _assert_not_concrete("leave", func) for matcher in matchers: casted_matcher = cast(BaseMatcherNode, matcher) constructed_visitors[casted_matcher] = ( *constructed_visitors.get(casted_matcher, ()), func, ) return constructed_visitors def _visit_matchers( matchers: Dict[BaseMatcherNode, Optional[cst.CSTNode]], node: cst.CSTNode, metadata_resolver: cst.MetadataDependent, ) -> Dict[BaseMatcherNode, Optional[cst.CSTNode]]: new_matchers: Dict[BaseMatcherNode, Optional[cst.CSTNode]] = {} for matcher, existing_node in matchers.items(): # We don't care about visiting matchers that are already true. if existing_node is None and matches( node, matcher, metadata_resolver=metadata_resolver ): # This node matches! Remember which node it was so we can # cancel it later. new_matchers[matcher] = node else: new_matchers[matcher] = existing_node return new_matchers def _leave_matchers( matchers: Dict[BaseMatcherNode, Optional[cst.CSTNode]], node: cst.CSTNode ) -> Dict[BaseMatcherNode, Optional[cst.CSTNode]]: new_matchers: Dict[BaseMatcherNode, Optional[cst.CSTNode]] = {} for matcher, existing_node in matchers.items(): if node is existing_node: # This node matches, so we are no longer inside it. new_matchers[matcher] = None else: # We aren't leaving this node. new_matchers[matcher] = existing_node return new_matchers def _all_positive_matchers_true( all_matchers: Dict[BaseMatcherNode, Optional[cst.CSTNode]], obj: object ) -> bool: requested_matchers = getattr(obj, VISIT_POSITIVE_MATCHER_ATTR, []) for matcher in requested_matchers: if all_matchers[matcher] is None: # The passed in object has been decorated with a matcher that isn't # active. return False return True def _all_negative_matchers_false( all_matchers: Dict[BaseMatcherNode, Optional[cst.CSTNode]], obj: object ) -> bool: requested_matchers = getattr(obj, VISIT_NEGATIVE_MATCHER_ATTR, []) for matcher in requested_matchers: if all_matchers[matcher] is not None: # The passed in object has been decorated with a matcher that is active. return False return True def _should_allow_visit( all_matchers: Dict[BaseMatcherNode, Optional[cst.CSTNode]], obj: object ) -> bool: return _all_positive_matchers_true( all_matchers, obj ) and _all_negative_matchers_false(all_matchers, obj) def _visit_constructed_funcs( visit_funcs: Dict[BaseMatcherNode, Sequence[Callable[[cst.CSTNode], None]]], all_matchers: Dict[BaseMatcherNode, Optional[cst.CSTNode]], node: cst.CSTNode, metadata_resolver: cst.MetadataDependent, ) -> None: for matcher, visit_funcs in visit_funcs.items(): if matches(node, matcher, metadata_resolver=metadata_resolver): for visit_func in visit_funcs: if _should_allow_visit(all_matchers, visit_func): visit_func(node) class MatcherDecoratableTransformer(CSTTransformer): """ This class provides all of the features of a :class:`libcst.CSTTransformer`, and additionally supports various decorators to control when methods get called when traversing a tree. Use this instead of a :class:`libcst.CSTTransformer` if you wish to do more powerful decorator-based visiting. """ def __init__(self) -> None: CSTTransformer.__init__(self) # List of gating matchers that we need to track and evaluate. We use these # in conjuction with the call_if_inside and call_if_not_inside decorators # to determine whether or not to call a visit/leave function. self._matchers: Dict[BaseMatcherNode, Optional[cst.CSTNode]] = { m: None for m in _gather_matchers(self) } # Mapping of matchers to functions. If in the course of visiting the tree, # a node matches one of these matchers, the corresponding function will be # called as if it was a visit_* method. self._extra_visit_funcs: Dict[ BaseMatcherNode, Sequence[Callable[[cst.CSTNode], None]] ] = _gather_constructed_visit_funcs(self) # Mapping of matchers to functions. If in the course of leaving the tree, # a node matches one of these matchers, the corresponding function will be # called as if it was a leave_* method. self._extra_leave_funcs: Dict[ BaseMatcherNode, Sequence[ Callable[ [cst.CSTNode, cst.CSTNode], Union[cst.CSTNode, cst.RemovalSentinel] ] ], ] = _gather_constructed_leave_funcs(self) # Make sure visit/leave functions constructed with @visit and @leave decorators # have correct type annotations. _check_types( self._extra_visit_funcs, "visit", expected_param_count=1, expected_none_return=True, ) _check_types( self._extra_leave_funcs, "leave", expected_param_count=2, expected_none_return=False, ) def on_visit(self, node: cst.CSTNode) -> bool: # First, evaluate any matchers that we have which we are not inside already. self._matchers = _visit_matchers(self._matchers, node, self) # Now, call any visitors that were hooked using a visit decorator. _visit_constructed_funcs(self._extra_visit_funcs, self._matchers, node, self) # Now, evaluate whether this current function has any matchers it requires. if not _should_allow_visit( self._matchers, getattr(self, f"visit_{type(node).__name__}", None) ): # We shouldn't visit this directly. However, we should continue # visiting its children. return True # Either the visit_func doesn't exist, we have no matchers, or we passed all # matchers. In either case, just call the superclass behavior. return CSTTransformer.on_visit(self, node) def on_leave( self, original_node: CSTNodeT, updated_node: CSTNodeT ) -> Union[CSTNodeT, cst.RemovalSentinel]: # First, evaluate whether this current function has a decorator on it. if _should_allow_visit( self._matchers, getattr(self, f"leave_{type(original_node).__name__}", None) ): retval = CSTTransformer.on_leave(self, original_node, updated_node) else: retval = updated_node # Now, call any visitors that were hooked using a leave decorator. for matcher, leave_funcs in reversed(list(self._extra_leave_funcs.items())): if not self.matches(original_node, matcher): continue for leave_func in leave_funcs: if _should_allow_visit(self._matchers, leave_func) and isinstance( retval, cst.CSTNode ): retval = leave_func(original_node, retval) # Now, see if we have any matchers we should deactivate. self._matchers = _leave_matchers(self._matchers, original_node) # pyre-ignore The return value of on_leave is subtly wrong in that we can # actually return any value that passes this node's parent's constructor # validation. Fixing this is beyond the scope of this file, and would involve # forcing a lot of ensure_type() checks across the codebase. return retval def on_visit_attribute(self, node: cst.CSTNode, attribute: str) -> None: # Evaluate whether this current function has a decorator on it. if _should_allow_visit( self._matchers, getattr(self, f"visit_{type(node).__name__}_{attribute}", None), ): # Either the visit_func doesn't exist, we have no matchers, or we passed all # matchers. In either case, just call the superclass behavior. return CSTTransformer.on_visit_attribute(self, node, attribute) def on_leave_attribute(self, original_node: cst.CSTNode, attribute: str) -> None: # Evaluate whether this current function has a decorator on it. if _should_allow_visit( self._matchers, getattr(self, f"leave_{type(original_node).__name__}_{attribute}", None), ): # Either the visit_func doesn't exist, we have no matchers, or we passed all # matchers. In either case, just call the superclass behavior. CSTTransformer.on_leave_attribute(self, original_node, attribute) def matches( self, node: Union[cst.MaybeSentinel, cst.RemovalSentinel, cst.CSTNode], matcher: BaseMatcherNode, ) -> bool: """ A convenience method to call :func:`~libcst.matchers.matches` without requiring an explicit parameter for metadata. Since our instance is an instance of :class:`libcst.MetadataDependent`, we work as a metadata resolver. Please see documentation for :func:`~libcst.matchers.matches` as it is identical to this function. """ return matches(node, matcher, metadata_resolver=self) def findall( self, tree: Union[cst.MaybeSentinel, cst.RemovalSentinel, cst.CSTNode], matcher: Union[ BaseMatcherNode, MatchIfTrue[cst.CSTNode], MatchMetadata, MatchMetadataIfTrue, ], ) -> Sequence[cst.CSTNode]: """ A convenience method to call :func:`~libcst.matchers.findall` without requiring an explicit parameter for metadata. Since our instance is an instance of :class:`libcst.MetadataDependent`, we work as a metadata resolver. Please see documentation for :func:`~libcst.matchers.findall` as it is identical to this function. """ return findall(tree, matcher, metadata_resolver=self) def extract( self, node: Union[cst.MaybeSentinel, cst.RemovalSentinel, cst.CSTNode], matcher: BaseMatcherNode, ) -> Optional[Dict[str, Union[cst.CSTNode, Sequence[cst.CSTNode]]]]: """ A convenience method to call :func:`~libcst.matchers.extract` without requiring an explicit parameter for metadata. Since our instance is an instance of :class:`libcst.MetadataDependent`, we work as a metadata resolver. Please see documentation for :func:`~libcst.matchers.extract` as it is identical to this function. """ return extract(node, matcher, metadata_resolver=self) def extractall( self, tree: Union[cst.MaybeSentinel, cst.RemovalSentinel, cst.CSTNode], matcher: Union[ BaseMatcherNode, MatchIfTrue[cst.CSTNode], MatchMetadata, MatchMetadataIfTrue, ], ) -> Sequence[Dict[str, Union[cst.CSTNode, Sequence[cst.CSTNode]]]]: """ A convenience method to call :func:`~libcst.matchers.extractall` without requiring an explicit parameter for metadata. Since our instance is an instance of :class:`libcst.MetadataDependent`, we work as a metadata resolver. Please see documentation for :func:`~libcst.matchers.extractall` as it is identical to this function. """ return extractall(tree, matcher, metadata_resolver=self) def replace( self, tree: Union[cst.MaybeSentinel, cst.RemovalSentinel, cst.CSTNode], matcher: Union[ BaseMatcherNode, MatchIfTrue[cst.CSTNode], MatchMetadata, MatchMetadataIfTrue, ], replacement: Union[ cst.MaybeSentinel, cst.RemovalSentinel, cst.CSTNode, Callable[ [cst.CSTNode, Dict[str, Union[cst.CSTNode, Sequence[cst.CSTNode]]]], Union[cst.MaybeSentinel, cst.RemovalSentinel, cst.CSTNode], ], ], ) -> Union[cst.MaybeSentinel, cst.RemovalSentinel, cst.CSTNode]: """ A convenience method to call :func:`~libcst.matchers.replace` without requiring an explicit parameter for metadata. Since our instance is an instance of :class:`libcst.MetadataDependent`, we work as a metadata resolver. Please see documentation for :func:`~libcst.matchers.replace` as it is identical to this function. """ return replace(tree, matcher, replacement, metadata_resolver=self) class MatcherDecoratableVisitor(CSTVisitor): """ This class provides all of the features of a :class:`libcst.CSTVisitor`, and additionally supports various decorators to control when methods get called when traversing a tree. Use this instead of a :class:`libcst.CSTVisitor` if you wish to do more powerful decorator-based visiting. """ def __init__(self) -> None: CSTVisitor.__init__(self) # List of gating matchers that we need to track and evaluate. We use these # in conjuction with the call_if_inside and call_if_not_inside decorators # to determine whether or not to call a visit/leave function. self._matchers: Dict[BaseMatcherNode, Optional[cst.CSTNode]] = { m: None for m in _gather_matchers(self) } # Mapping of matchers to functions. If in the course of visiting the tree, # a node matches one of these matchers, the corresponding function will be # called as if it was a visit_* method. self._extra_visit_funcs: Dict[ BaseMatcherNode, Sequence[Callable[[cst.CSTNode], None]] ] = _gather_constructed_visit_funcs(self) # Mapping of matchers to functions. If in the course of leaving the tree, # a node matches one of these matchers, the corresponding function will be # called as if it was a leave_* method. self._extra_leave_funcs: Dict[ BaseMatcherNode, Sequence[Callable[[cst.CSTNode], None]] ] = _gather_constructed_leave_funcs(self) # Make sure visit/leave functions constructed with @visit and @leave decorators # have correct type annotations. _check_types( self._extra_visit_funcs, "visit", expected_param_count=1, expected_none_return=True, ) _check_types( self._extra_leave_funcs, "leave", expected_param_count=1, expected_none_return=True, ) def on_visit(self, node: cst.CSTNode) -> bool: # First, evaluate any matchers that we have which we are not inside already. self._matchers = _visit_matchers(self._matchers, node, self) # Now, call any visitors that were hooked using a visit decorator. _visit_constructed_funcs(self._extra_visit_funcs, self._matchers, node, self) # Now, evaluate whether this current function has a decorator on it. if not _should_allow_visit( self._matchers, getattr(self, f"visit_{type(node).__name__}", None) ): # We shouldn't visit this directly. However, we should continue # visiting its children. return True # Either the visit_func doesn't exist, we have no matchers, or we passed all # matchers. In either case, just call the superclass behavior. return CSTVisitor.on_visit(self, node) def on_leave(self, original_node: cst.CSTNode) -> None: # First, evaluate whether this current function has a decorator on it. if _should_allow_visit( self._matchers, getattr(self, f"leave_{type(original_node).__name__}", None) ): CSTVisitor.on_leave(self, original_node) # Now, call any visitors that were hooked using a leave decorator. for matcher, leave_funcs in reversed(list(self._extra_leave_funcs.items())): if not self.matches(original_node, matcher): continue for leave_func in leave_funcs: if _should_allow_visit(self._matchers, leave_func): leave_func(original_node) # Now, see if we have any matchers we should deactivate. self._matchers = _leave_matchers(self._matchers, original_node) def on_visit_attribute(self, node: cst.CSTNode, attribute: str) -> None: # Evaluate whether this current function has a decorator on it. if _should_allow_visit( self._matchers, getattr(self, f"visit_{type(node).__name__}_{attribute}", None), ): # Either the visit_func doesn't exist, we have no matchers, or we passed all # matchers. In either case, just call the superclass behavior. return CSTVisitor.on_visit_attribute(self, node, attribute) def on_leave_attribute(self, original_node: cst.CSTNode, attribute: str) -> None: # Evaluate whether this current function has a decorator on it. if _should_allow_visit( self._matchers, getattr(self, f"leave_{type(original_node).__name__}_{attribute}", None), ): # Either the visit_func doesn't exist, we have no matchers, or we passed all # matchers. In either case, just call the superclass behavior. CSTVisitor.on_leave_attribute(self, original_node, attribute) def matches( self, node: Union[cst.MaybeSentinel, cst.RemovalSentinel, cst.CSTNode], matcher: BaseMatcherNode, ) -> bool: """ A convenience method to call :func:`~libcst.matchers.matches` without requiring an explicit parameter for metadata. Since our instance is an instance of :class:`libcst.MetadataDependent`, we work as a metadata resolver. Please see documentation for :func:`~libcst.matchers.matches` as it is identical to this function. """ return matches(node, matcher, metadata_resolver=self) def findall( self, tree: Union[cst.MaybeSentinel, cst.RemovalSentinel, cst.CSTNode], matcher: Union[ BaseMatcherNode, MatchIfTrue[cst.CSTNode], MatchMetadata, MatchMetadataIfTrue, ], ) -> Sequence[cst.CSTNode]: """ A convenience method to call :func:`~libcst.matchers.findall` without requiring an explicit parameter for metadata. Since our instance is an instance of :class:`libcst.MetadataDependent`, we work as a metadata resolver. Please see documentation for :func:`~libcst.matchers.findall` as it is identical to this function. """ return findall(tree, matcher, metadata_resolver=self) def extract( self, node: Union[cst.MaybeSentinel, cst.RemovalSentinel, cst.CSTNode], matcher: BaseMatcherNode, ) -> Optional[Dict[str, Union[cst.CSTNode, Sequence[cst.CSTNode]]]]: """ A convenience method to call :func:`~libcst.matchers.extract` without requiring an explicit parameter for metadata. Since our instance is an instance of :class:`libcst.MetadataDependent`, we work as a metadata resolver. Please see documentation for :func:`~libcst.matchers.extract` as it is identical to this function. """ return extract(node, matcher, metadata_resolver=self) def extractall( self, tree: Union[cst.MaybeSentinel, cst.RemovalSentinel, cst.CSTNode], matcher: Union[ BaseMatcherNode, MatchIfTrue[cst.CSTNode], MatchMetadata, MatchMetadataIfTrue, ], ) -> Sequence[Dict[str, Union[cst.CSTNode, Sequence[cst.CSTNode]]]]: """ A convenience method to call :func:`~libcst.matchers.extractall` without requiring an explicit parameter for metadata. Since our instance is an instance of :class:`libcst.MetadataDependent`, we work as a metadata resolver. Please see documentation for :func:`~libcst.matchers.extractall` as it is identical to this function. """ return extractall(tree, matcher, metadata_resolver=self) def replace( self, tree: Union[cst.MaybeSentinel, cst.RemovalSentinel, cst.CSTNode], matcher: Union[ BaseMatcherNode, MatchIfTrue[cst.CSTNode], MatchMetadata, MatchMetadataIfTrue, ], replacement: Union[ cst.MaybeSentinel, cst.RemovalSentinel, cst.CSTNode, Callable[ [cst.CSTNode, Dict[str, Union[cst.CSTNode, Sequence[cst.CSTNode]]]], Union[cst.MaybeSentinel, cst.RemovalSentinel, cst.CSTNode], ], ], ) -> Union[cst.MaybeSentinel, cst.RemovalSentinel, cst.CSTNode]: """ A convenience method to call :func:`~libcst.matchers.replace` without requiring an explicit parameter for metadata. Since our instance is an instance of :class:`libcst.MetadataDependent`, we work as a metadata resolver. Please see documentation for :func:`~libcst.matchers.replace` as it is identical to this function. """ return replace(tree, matcher, replacement, metadata_resolver=self) LibCST-1.2.0/libcst/matchers/tests/000077500000000000000000000000001456464173300170375ustar00rootroot00000000000000LibCST-1.2.0/libcst/matchers/tests/__init__.py000066400000000000000000000002631456464173300211510ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. LibCST-1.2.0/libcst/matchers/tests/test_decorators.py000066400000000000000000001046131456464173300226220ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import sys from ast import literal_eval from textwrap import dedent from typing import List, Set from unittest import skipIf import libcst as cst import libcst.matchers as m from libcst.matchers import ( call_if_inside, call_if_not_inside, leave, MatcherDecoratableTransformer, MatcherDecoratableVisitor, visit, ) from libcst.testing.utils import UnitTest def fixture(code: str) -> cst.Module: return cst.parse_module(dedent(code)) class MatchersGatingDecoratorsTest(UnitTest): def test_call_if_inside_transform_simple(self) -> None: # Set up a simple visitor with a call_if_inside decorator. class TestVisitor(MatcherDecoratableTransformer): def __init__(self) -> None: super().__init__() self.visits: List[str] = [] self.leaves: List[str] = [] @call_if_inside(m.FunctionDef(m.Name("foo"))) def visit_SimpleString(self, node: cst.SimpleString) -> None: self.visits.append(node.value) @call_if_inside(m.FunctionDef()) def leave_SimpleString( self, original_node: cst.SimpleString, updated_node: cst.SimpleString ) -> cst.SimpleString: self.leaves.append(updated_node.value) return updated_node # Parse a module and verify we visited correctly. module = fixture( """ a = "foo" b = "bar" def foo() -> None: return "baz" def bar() -> None: return "foobar" """ ) visitor = TestVisitor() module.visit(visitor) # We should have only visited a select number of nodes. self.assertEqual(visitor.visits, ['"baz"']) self.assertEqual(visitor.leaves, ['"baz"', '"foobar"']) def test_call_if_inside_verify_original_transform(self) -> None: # Set up a simple visitor with a call_if_inside decorator. class TestVisitor(MatcherDecoratableTransformer): def __init__(self) -> None: super().__init__() self.func_visits: List[str] = [] self.str_visits: List[str] = [] @call_if_inside(m.FunctionDef(m.Name("foo"))) def visit_SimpleString(self, node: cst.SimpleString) -> None: self.str_visits.append(node.value) def visit_FunctionDef(self, node: cst.FunctionDef) -> None: self.func_visits.append(node.name.value) # Parse a module and verify we visited correctly. module = fixture( """ a = "foo" b = "bar" def foo() -> None: return "baz" def bar() -> None: return "foobar" """ ) visitor = TestVisitor() module.visit(visitor) # We should have only visited a select number of nodes. self.assertEqual(visitor.func_visits, ["foo", "bar"]) self.assertEqual(visitor.str_visits, ['"baz"']) def test_call_if_inside_collect_simple(self) -> None: # Set up a simple visitor with a call_if_inside decorator. class TestVisitor(MatcherDecoratableVisitor): def __init__(self) -> None: super().__init__() self.visits: List[str] = [] self.leaves: List[str] = [] @call_if_inside(m.FunctionDef(m.Name("foo"))) def visit_SimpleString(self, node: cst.SimpleString) -> None: self.visits.append(node.value) @call_if_inside(m.FunctionDef()) def leave_SimpleString(self, original_node: cst.SimpleString) -> None: self.leaves.append(original_node.value) # Parse a module and verify we visited correctly. module = fixture( """ a = "foo" b = "bar" def foo() -> None: return "baz" def bar() -> None: return "foobar" """ ) visitor = TestVisitor() module.visit(visitor) # We should have only visited a select number of nodes. self.assertEqual(visitor.visits, ['"baz"']) self.assertEqual(visitor.leaves, ['"baz"', '"foobar"']) def test_call_if_inside_verify_original_collect(self) -> None: # Set up a simple visitor with a call_if_inside decorator. class TestVisitor(MatcherDecoratableVisitor): def __init__(self) -> None: super().__init__() self.func_visits: List[str] = [] self.str_visits: List[str] = [] @call_if_inside(m.FunctionDef(m.Name("foo"))) def visit_SimpleString(self, node: cst.SimpleString) -> None: self.str_visits.append(node.value) def visit_FunctionDef(self, node: cst.FunctionDef) -> None: self.func_visits.append(node.name.value) # Parse a module and verify we visited correctly. module = fixture( """ a = "foo" b = "bar" def foo() -> None: return "baz" def bar() -> None: return "foobar" """ ) visitor = TestVisitor() module.visit(visitor) # We should have only visited a select number of nodes. self.assertEqual(visitor.func_visits, ["foo", "bar"]) self.assertEqual(visitor.str_visits, ['"baz"']) def test_multiple_visitors_collect(self) -> None: # Set up a simple visitor with multiple visit decorators. class TestVisitor(MatcherDecoratableVisitor): def __init__(self) -> None: super().__init__() self.visits: List[str] = [] @call_if_inside(m.ClassDef(m.Name("A"))) @call_if_inside(m.FunctionDef(m.Name("foo"))) def visit_SimpleString(self, node: cst.SimpleString) -> None: self.visits.append(node.value) # Parse a module and verify we visited correctly. module = fixture( """ def foo() -> None: return "foo" class A: def foo(self) -> None: return "baz" """ ) visitor = TestVisitor() module.visit(visitor) # We should have only visited a select number of nodes. self.assertEqual(visitor.visits, ['"baz"']) def test_multiple_visitors_transform(self) -> None: # Set up a simple visitor with multiple visit decorators. class TestVisitor(MatcherDecoratableTransformer): def __init__(self) -> None: super().__init__() self.visits: List[str] = [] @call_if_inside(m.ClassDef(m.Name("A"))) @call_if_inside(m.FunctionDef(m.Name("foo"))) def visit_SimpleString(self, node: cst.SimpleString) -> None: self.visits.append(node.value) # Parse a module and verify we visited correctly. module = fixture( """ def foo() -> None: return "foo" class A: def foo(self) -> None: return "baz" """ ) visitor = TestVisitor() module.visit(visitor) # We should have only visited a select number of nodes. self.assertEqual(visitor.visits, ['"baz"']) def test_call_if_not_inside_transform_simple(self) -> None: # Set up a simple visitor with a call_if_inside decorator. class TestVisitor(MatcherDecoratableTransformer): def __init__(self) -> None: super().__init__() self.visits: List[str] = [] self.leaves: List[str] = [] @call_if_not_inside(m.FunctionDef(m.Name("foo"))) def visit_SimpleString(self, node: cst.SimpleString) -> None: self.visits.append(node.value) @call_if_not_inside(m.FunctionDef()) def leave_SimpleString( self, original_node: cst.SimpleString, updated_node: cst.SimpleString ) -> cst.SimpleString: self.leaves.append(updated_node.value) return updated_node # Parse a module and verify we visited correctly. module = fixture( """ a = "foo" b = "bar" def foo() -> None: return "baz" def bar() -> None: return "foobar" """ ) visitor = TestVisitor() module.visit(visitor) # We should have only visited a select number of nodes. self.assertEqual(visitor.visits, ['"foo"', '"bar"', '"foobar"']) self.assertEqual(visitor.leaves, ['"foo"', '"bar"']) def test_visit_if_inot_inside_verify_original_transform(self) -> None: # Set up a simple visitor with a call_if_inside decorator. class TestVisitor(MatcherDecoratableTransformer): def __init__(self) -> None: super().__init__() self.func_visits: List[str] = [] self.str_visits: List[str] = [] @call_if_not_inside(m.FunctionDef(m.Name("foo"))) def visit_SimpleString(self, node: cst.SimpleString) -> None: self.str_visits.append(node.value) def visit_FunctionDef(self, node: cst.FunctionDef) -> None: self.func_visits.append(node.name.value) # Parse a module and verify we visited correctly. module = fixture( """ a = "foo" b = "bar" def foo() -> None: return "baz" def bar() -> None: return "foobar" """ ) visitor = TestVisitor() module.visit(visitor) # We should have only visited a select number of nodes. self.assertEqual(visitor.func_visits, ["foo", "bar"]) self.assertEqual(visitor.str_visits, ['"foo"', '"bar"', '"foobar"']) def test_call_if_not_inside_collect_simple(self) -> None: # Set up a simple visitor with a call_if_inside decorator. class TestVisitor(MatcherDecoratableVisitor): def __init__(self) -> None: super().__init__() self.visits: List[str] = [] self.leaves: List[str] = [] @call_if_not_inside(m.FunctionDef(m.Name("foo"))) def visit_SimpleString(self, node: cst.SimpleString) -> None: self.visits.append(node.value) @call_if_not_inside(m.FunctionDef()) def leave_SimpleString(self, original_node: cst.SimpleString) -> None: self.leaves.append(original_node.value) # Parse a module and verify we visited correctly. module = fixture( """ a = "foo" b = "bar" def foo() -> None: return "baz" def bar() -> None: return "foobar" """ ) visitor = TestVisitor() module.visit(visitor) # We should have only visited a select number of nodes. self.assertEqual(visitor.visits, ['"foo"', '"bar"', '"foobar"']) self.assertEqual(visitor.leaves, ['"foo"', '"bar"']) def test_visit_if_inot_inside_verify_original_collect(self) -> None: # Set up a simple visitor with a call_if_inside decorator. class TestVisitor(MatcherDecoratableVisitor): def __init__(self) -> None: super().__init__() self.func_visits: List[str] = [] self.str_visits: List[str] = [] @call_if_not_inside(m.FunctionDef(m.Name("foo"))) def visit_SimpleString(self, node: cst.SimpleString) -> None: self.str_visits.append(node.value) def visit_FunctionDef(self, node: cst.FunctionDef) -> None: self.func_visits.append(node.name.value) # Parse a module and verify we visited correctly. module = fixture( """ a = "foo" b = "bar" def foo() -> None: return "baz" def bar() -> None: return "foobar" """ ) visitor = TestVisitor() module.visit(visitor) # We should have only visited a select number of nodes. self.assertEqual(visitor.func_visits, ["foo", "bar"]) self.assertEqual(visitor.str_visits, ['"foo"', '"bar"', '"foobar"']) class MatchersVisitLeaveDecoratorsTest(UnitTest): def test_visit_transform(self) -> None: # Set up a simple visitor with a visit and leave decorator. class TestVisitor(MatcherDecoratableTransformer): def __init__(self) -> None: super().__init__() self.visits: List[str] = [] self.leaves: List[str] = [] @visit(m.FunctionDef(m.Name("foo") | m.Name("bar"))) def visit_function(self, node: cst.FunctionDef) -> None: self.visits.append(node.name.value) @leave(m.FunctionDef(m.Name("bar") | m.Name("baz"))) def leave_function( self, original_node: cst.FunctionDef, updated_node: cst.FunctionDef ) -> cst.FunctionDef: self.leaves.append(updated_node.name.value) return updated_node # Parse a module and verify we visited correctly. module = fixture( """ a = "foo" b = "bar" def foo() -> None: return "baz" def bar() -> None: return "foobar" def baz() -> None: return "foobar" """ ) visitor = TestVisitor() module.visit(visitor) # We should have only visited a select number of nodes. self.assertEqual(visitor.visits, ["foo", "bar"]) self.assertEqual(visitor.leaves, ["bar", "baz"]) def test_visit_collector(self) -> None: # Set up a simple visitor with a visit and leave decorator. class TestVisitor(MatcherDecoratableVisitor): def __init__(self) -> None: super().__init__() self.visits: List[str] = [] self.leaves: List[str] = [] @visit(m.FunctionDef(m.Name("foo") | m.Name("bar"))) def visit_function(self, node: cst.FunctionDef) -> None: self.visits.append(node.name.value) @leave(m.FunctionDef(m.Name("bar") | m.Name("baz"))) def leave_function(self, original_node: cst.FunctionDef) -> None: self.leaves.append(original_node.name.value) # Parse a module and verify we visited correctly. module = fixture( """ a = "foo" b = "bar" def foo() -> None: return "baz" def bar() -> None: return "foobar" def baz() -> None: return "foobar" """ ) visitor = TestVisitor() module.visit(visitor) # We should have only visited a select number of nodes. self.assertEqual(visitor.visits, ["foo", "bar"]) self.assertEqual(visitor.leaves, ["bar", "baz"]) def test_stacked_visit_transform(self) -> None: # Set up a simple visitor with a visit and leave decorator. class TestVisitor(MatcherDecoratableTransformer): def __init__(self) -> None: super().__init__() self.visits: List[str] = [] self.leaves: List[str] = [] @visit(m.FunctionDef(m.Name("foo"))) @visit(m.FunctionDef(m.Name("bar"))) def visit_function(self, node: cst.FunctionDef) -> None: self.visits.append(node.name.value) @leave(m.FunctionDef(m.Name("bar"))) @leave(m.FunctionDef(m.Name("baz"))) def leave_function( self, original_node: cst.FunctionDef, updated_node: cst.FunctionDef ) -> cst.FunctionDef: self.leaves.append(updated_node.name.value) return updated_node # Parse a module and verify we visited correctly. module = fixture( """ a = "foo" b = "bar" def foo() -> None: return "baz" def bar() -> None: return "foobar" def baz() -> None: return "foobar" """ ) visitor = TestVisitor() module.visit(visitor) # We should have only visited a select number of nodes. self.assertEqual(visitor.visits, ["foo", "bar"]) self.assertEqual(visitor.leaves, ["bar", "baz"]) def test_stacked_visit_collector(self) -> None: # Set up a simple visitor with a visit and leave decorator. class TestVisitor(MatcherDecoratableVisitor): def __init__(self) -> None: super().__init__() self.visits: List[str] = [] self.leaves: List[str] = [] @visit(m.FunctionDef(m.Name("foo"))) @visit(m.FunctionDef(m.Name("bar"))) def visit_function(self, node: cst.FunctionDef) -> None: self.visits.append(node.name.value) @leave(m.FunctionDef(m.Name("bar"))) @leave(m.FunctionDef(m.Name("baz"))) def leave_function(self, original_node: cst.FunctionDef) -> None: self.leaves.append(original_node.name.value) # Parse a module and verify we visited correctly. module = fixture( """ a = "foo" b = "bar" def foo() -> None: return "baz" def bar() -> None: return "foobar" def baz() -> None: return "foobar" """ ) visitor = TestVisitor() module.visit(visitor) # We should have only visited a select number of nodes. self.assertEqual(visitor.visits, ["foo", "bar"]) self.assertEqual(visitor.leaves, ["bar", "baz"]) self.assertEqual(visitor.leaves, ["bar", "baz"]) def test_duplicate_visit_transform(self) -> None: # Set up a simple visitor with a visit and leave decorator. class TestVisitor(MatcherDecoratableTransformer): def __init__(self) -> None: super().__init__() self.visits: Set[str] = set() self.leaves: Set[str] = set() @visit(m.FunctionDef(m.Name("foo"))) def visit_function1(self, node: cst.FunctionDef) -> None: self.visits.add(node.name.value + "1") @visit(m.FunctionDef(m.Name("foo"))) def visit_function2(self, node: cst.FunctionDef) -> None: self.visits.add(node.name.value + "2") @leave(m.FunctionDef(m.Name("bar"))) def leave_function1( self, original_node: cst.FunctionDef, updated_node: cst.FunctionDef ) -> cst.FunctionDef: self.leaves.add(updated_node.name.value + "1") return updated_node @leave(m.FunctionDef(m.Name("bar"))) def leave_function2( self, original_node: cst.FunctionDef, updated_node: cst.FunctionDef ) -> cst.FunctionDef: self.leaves.add(updated_node.name.value + "2") return updated_node # Parse a module and verify we visited correctly. module = fixture( """ a = "foo" b = "bar" def foo() -> None: return "baz" def bar() -> None: return "foobar" def baz() -> None: return "foobar" """ ) visitor = TestVisitor() module.visit(visitor) # We should have only visited a select number of nodes. self.assertEqual(visitor.visits, {"foo1", "foo2"}) self.assertEqual(visitor.leaves, {"bar1", "bar2"}) def test_duplicate_visit_collector(self) -> None: # Set up a simple visitor with a visit and leave decorator. class TestVisitor(MatcherDecoratableVisitor): def __init__(self) -> None: super().__init__() self.visits: Set[str] = set() self.leaves: Set[str] = set() @visit(m.FunctionDef(m.Name("foo"))) def visit_function1(self, node: cst.FunctionDef) -> None: self.visits.add(node.name.value + "1") @visit(m.FunctionDef(m.Name("foo"))) def visit_function2(self, node: cst.FunctionDef) -> None: self.visits.add(node.name.value + "2") @leave(m.FunctionDef(m.Name("bar"))) def leave_function1(self, original_node: cst.FunctionDef) -> None: self.leaves.add(original_node.name.value + "1") @leave(m.FunctionDef(m.Name("bar"))) def leave_function2(self, original_node: cst.FunctionDef) -> None: self.leaves.add(original_node.name.value + "2") # Parse a module and verify we visited correctly. module = fixture( """ a = "foo" b = "bar" def foo() -> None: return "baz" def bar() -> None: return "foobar" def baz() -> None: return "foobar" """ ) visitor = TestVisitor() module.visit(visitor) # We should have only visited a select number of nodes. self.assertEqual(visitor.visits, {"foo1", "foo2"}) self.assertEqual(visitor.leaves, {"bar1", "bar2"}) def test_gated_visit_transform(self) -> None: # Set up a simple visitor with a visit and leave decorator. class TestVisitor(MatcherDecoratableTransformer): def __init__(self) -> None: super().__init__() self.visits: Set[str] = set() self.leaves: Set[str] = set() @call_if_inside(m.FunctionDef(m.Name("foo"))) @visit(m.SimpleString()) def visit_string1(self, node: cst.SimpleString) -> None: self.visits.add(literal_eval(node.value) + "1") @call_if_not_inside(m.FunctionDef(m.Name("bar"))) @visit(m.SimpleString()) def visit_string2(self, node: cst.SimpleString) -> None: self.visits.add(literal_eval(node.value) + "2") @call_if_inside(m.FunctionDef(m.Name("baz"))) @leave(m.SimpleString()) def leave_string1( self, original_node: cst.SimpleString, updated_node: cst.SimpleString ) -> cst.SimpleString: self.leaves.add(literal_eval(updated_node.value) + "1") return updated_node @call_if_not_inside(m.FunctionDef(m.Name("foo"))) @leave(m.SimpleString()) def leave_string2( self, original_node: cst.SimpleString, updated_node: cst.SimpleString ) -> cst.SimpleString: self.leaves.add(literal_eval(updated_node.value) + "2") return updated_node # Parse a module and verify we visited correctly. module = fixture( """ a = "foo" b = "bar" def foo() -> None: return "baz" def bar() -> None: return "foobar" def baz() -> None: return "foobarbaz" """ ) visitor = TestVisitor() module.visit(visitor) # We should have only visited a select number of nodes. self.assertEqual(visitor.visits, {"baz1", "foo2", "bar2", "baz2", "foobarbaz2"}) self.assertEqual( visitor.leaves, {"foobarbaz1", "foo2", "bar2", "foobar2", "foobarbaz2"} ) def test_gated_visit_collect(self) -> None: # Set up a simple visitor with a visit and leave decorator. class TestVisitor(MatcherDecoratableVisitor): def __init__(self) -> None: super().__init__() self.visits: Set[str] = set() self.leaves: Set[str] = set() @call_if_inside(m.FunctionDef(m.Name("foo"))) @visit(m.SimpleString()) def visit_string1(self, node: cst.SimpleString) -> None: self.visits.add(literal_eval(node.value) + "1") @call_if_not_inside(m.FunctionDef(m.Name("bar"))) @visit(m.SimpleString()) def visit_string2(self, node: cst.SimpleString) -> None: self.visits.add(literal_eval(node.value) + "2") @call_if_inside(m.FunctionDef(m.Name("baz"))) @leave(m.SimpleString()) def leave_string1(self, original_node: cst.SimpleString) -> None: self.leaves.add(literal_eval(original_node.value) + "1") @call_if_not_inside(m.FunctionDef(m.Name("foo"))) @leave(m.SimpleString()) def leave_string2(self, original_node: cst.SimpleString) -> None: self.leaves.add(literal_eval(original_node.value) + "2") # Parse a module and verify we visited correctly. module = fixture( """ a = "foo" b = "bar" def foo() -> None: return "baz" def bar() -> None: return "foobar" def baz() -> None: return "foobarbaz" """ ) visitor = TestVisitor() module.visit(visitor) # We should have only visited a select number of nodes. self.assertEqual(visitor.visits, {"baz1", "foo2", "bar2", "baz2", "foobarbaz2"}) self.assertEqual( visitor.leaves, {"foobarbaz1", "foo2", "bar2", "foobar2", "foobarbaz2"} ) def test_transform_order(self) -> None: # Set up a simple visitor with a visit and leave decorator. class TestVisitor(MatcherDecoratableTransformer): @call_if_inside(m.FunctionDef(m.Name("bar"))) @leave(m.SimpleString()) def leave_string1( self, original_node: cst.SimpleString, updated_node: cst.SimpleString ) -> cst.SimpleString: return updated_node.with_changes( value=f'"prefix{literal_eval(updated_node.value)}"' ) @call_if_inside(m.FunctionDef(m.Name("bar"))) @leave(m.SimpleString()) def leave_string2( self, original_node: cst.SimpleString, updated_node: cst.SimpleString ) -> cst.SimpleString: return updated_node.with_changes( value=f'"{literal_eval(updated_node.value)}suffix"' ) @call_if_inside(m.FunctionDef(m.Name("bar"))) def leave_SimpleString( self, original_node: cst.SimpleString, updated_node: cst.SimpleString ) -> cst.SimpleString: return updated_node.with_changes( value=f'"{"".join(reversed(literal_eval(updated_node.value)))}"' ) # Parse a module and verify we visited correctly. module = fixture( """ a = "foo" b = "bar" def foo() -> None: return "baz" def bar() -> None: return "foobar" def baz() -> None: return "foobarbaz" """ ) visitor = TestVisitor() actual = module.visit(visitor) expected = fixture( """ a = "foo" b = "bar" def foo() -> None: return "baz" def bar() -> None: return "prefixraboofsuffix" def baz() -> None: return "foobarbaz" """ ) self.assertTrue(expected.deep_equals(actual)) def test_call_if_inside_visitor_attribute(self) -> None: # Set up a simple visitor with a call_if_inside decorator. class TestVisitor(MatcherDecoratableVisitor): def __init__(self) -> None: super().__init__() self.visits: List[str] = [] self.leaves: List[str] = [] @call_if_inside(m.FunctionDef(m.Name("foo"))) def visit_SimpleString_lpar(self, node: cst.SimpleString) -> None: self.visits.append(node.value) @call_if_inside(m.FunctionDef()) def leave_SimpleString_lpar(self, node: cst.SimpleString) -> None: self.leaves.append(node.value) # Parse a module and verify we visited correctly. module = fixture( """ a = "foo" b = "bar" def foo() -> None: return "baz" def bar() -> None: return "foobar" """ ) visitor = TestVisitor() module.visit(visitor) # We should have only visited a select number of nodes. self.assertEqual(visitor.visits, ['"baz"']) self.assertEqual(visitor.leaves, ['"baz"', '"foobar"']) def test_call_if_inside_transform_attribute(self) -> None: # Set up a simple visitor with a call_if_inside decorator. class TestVisitor(MatcherDecoratableTransformer): def __init__(self) -> None: super().__init__() self.visits: List[str] = [] self.leaves: List[str] = [] @call_if_inside(m.FunctionDef(m.Name("foo"))) def visit_SimpleString_lpar(self, node: cst.SimpleString) -> None: self.visits.append(node.value) @call_if_inside(m.FunctionDef()) def leave_SimpleString_lpar(self, node: cst.SimpleString) -> None: self.leaves.append(node.value) # Parse a module and verify we visited correctly. module = fixture( """ a = "foo" b = "bar" def foo() -> None: return "baz" def bar() -> None: return "foobar" """ ) visitor = TestVisitor() module.visit(visitor) # We should have only visited a select number of nodes. self.assertEqual(visitor.visits, ['"baz"']) self.assertEqual(visitor.leaves, ['"baz"', '"foobar"']) def test_call_if_not_inside_visitor_attribute(self) -> None: # Set up a simple visitor with a call_if_inside decorator. class TestVisitor(MatcherDecoratableVisitor): def __init__(self) -> None: super().__init__() self.visits: List[str] = [] self.leaves: List[str] = [] @call_if_not_inside(m.FunctionDef(m.Name("foo"))) def visit_SimpleString_lpar(self, node: cst.SimpleString) -> None: self.visits.append(node.value) @call_if_not_inside(m.FunctionDef()) def leave_SimpleString_lpar(self, node: cst.SimpleString) -> None: self.leaves.append(node.value) # Parse a module and verify we visited correctly. module = fixture( """ a = "foo" b = "bar" def foo() -> None: return "baz" def bar() -> None: return "foobar" """ ) visitor = TestVisitor() module.visit(visitor) # We should have only visited a select number of nodes. self.assertEqual(visitor.visits, ['"foo"', '"bar"', '"foobar"']) self.assertEqual(visitor.leaves, ['"foo"', '"bar"']) def test_call_if_not_inside_transform_attribute(self) -> None: # Set up a simple visitor with a call_if_inside decorator. class TestVisitor(MatcherDecoratableTransformer): def __init__(self) -> None: super().__init__() self.visits: List[str] = [] self.leaves: List[str] = [] @call_if_not_inside(m.FunctionDef(m.Name("foo"))) def visit_SimpleString_lpar(self, node: cst.SimpleString) -> None: self.visits.append(node.value) @call_if_not_inside(m.FunctionDef()) def leave_SimpleString_lpar(self, node: cst.SimpleString) -> None: self.leaves.append(node.value) # Parse a module and verify we visited correctly. module = fixture( """ a = "foo" b = "bar" def foo() -> None: return "baz" def bar() -> None: return "foobar" """ ) visitor = TestVisitor() module.visit(visitor) # We should have only visited a select number of nodes. self.assertEqual(visitor.visits, ['"foo"', '"bar"', '"foobar"']) self.assertEqual(visitor.leaves, ['"foo"', '"bar"']) def test_init_with_unhashable_types(self) -> None: # Set up a simple visitor with a call_if_inside decorator. class TestVisitor(MatcherDecoratableTransformer): def __init__(self) -> None: super().__init__() self.visits: List[str] = [] @call_if_inside( m.FunctionDef(m.Name("foo"), params=m.Parameters([m.ZeroOrMore()])) ) def visit_SimpleString(self, node: cst.SimpleString) -> None: self.visits.append(node.value) # Parse a module and verify we visited correctly. module = fixture( """ a = "foo" b = "bar" def foo() -> None: return "baz" def bar() -> None: return "foobar" """ ) visitor = TestVisitor() module.visit(visitor) # We should have only visited a select number of nodes. self.assertEqual(visitor.visits, ['"baz"']) class MatchersUnionDecoratorsTest(UnitTest): @skipIf(bool(sys.version_info < (3, 10)), "new union syntax not available") def test_init_with_new_union_annotation(self) -> None: class TransformerWithUnionReturnAnnotation(m.MatcherDecoratableTransformer): @m.leave(m.ImportFrom(module=m.Name(value="typing"))) def test( self, original_node: cst.ImportFrom, updated_node: cst.ImportFrom ) -> cst.ImportFrom | cst.RemovalSentinel: pass # assert that init (specifically _check_types on return annotation) passes TransformerWithUnionReturnAnnotation() LibCST-1.2.0/libcst/matchers/tests/test_extract.py000066400000000000000000000366211456464173300221320ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # from typing import Tuple import libcst as cst import libcst.matchers as m import libcst.metadata as meta from libcst.testing.utils import UnitTest class MatchersExtractTest(UnitTest): def _make_coderange( self, start: Tuple[int, int], end: Tuple[int, int] ) -> meta.CodeRange: return meta.CodeRange( start=meta.CodePosition(line=start[0], column=start[1]), end=meta.CodePosition(line=end[0], column=end[1]), ) def test_extract_sentinel(self) -> None: # Verify behavior when provided a sentinel nothing = m.extract( cst.RemovalSentinel.REMOVE, m.Call(func=m.SaveMatchedNode(m.Name(), name="func")), ) self.assertIsNone(nothing) nothing = m.extract( cst.MaybeSentinel.DEFAULT, m.Call(func=m.SaveMatchedNode(m.Name(), name="func")), ) self.assertIsNone(nothing) def test_extract_tautology(self) -> None: expression = cst.parse_expression("a + b[c], d(e, f * g)") nodes = m.extract( expression, m.SaveMatchedNode( m.Tuple(elements=[m.Element(m.BinaryOperation()), m.Element(m.Call())]), name="node", ), ) self.assertEqual(nodes, {"node": expression}) def test_extract_simple(self) -> None: # Verify true behavior expression = cst.parse_expression("a + b[c], d(e, f * g)") nodes = m.extract( expression, m.Tuple( elements=[ m.Element( m.BinaryOperation(left=m.SaveMatchedNode(m.Name(), "left")) ), m.Element(m.Call()), ] ), ) extracted_node = cst.ensure_type( cst.ensure_type(expression, cst.Tuple).elements[0].value, cst.BinaryOperation, ).left self.assertEqual(nodes, {"left": extracted_node}) # Verify false behavior nodes = m.extract( expression, m.Tuple( elements=[ m.Element( m.BinaryOperation(left=m.SaveMatchedNode(m.Subscript(), "left")) ), m.Element(m.Call()), ] ), ) self.assertIsNone(nodes) def test_extract_multiple(self) -> None: expression = cst.parse_expression("a + b[c], d(e, f * g)") nodes = m.extract( expression, m.Tuple( elements=[ m.Element( m.BinaryOperation(left=m.SaveMatchedNode(m.Name(), "left")) ), m.Element(m.Call(func=m.SaveMatchedNode(m.Name(), "func"))), ] ), ) extracted_node_left = cst.ensure_type( cst.ensure_type(expression, cst.Tuple).elements[0].value, cst.BinaryOperation, ).left extracted_node_func = cst.ensure_type( cst.ensure_type(expression, cst.Tuple).elements[1].value, cst.Call ).func self.assertEqual( nodes, {"left": extracted_node_left, "func": extracted_node_func} ) def test_extract_predicates(self) -> None: expression = cst.parse_expression("a + b[c], d(e, f * g)") nodes = m.extract( expression, m.Tuple( elements=[ m.Element( m.BinaryOperation(left=m.SaveMatchedNode(m.Name(), "left")) ), m.Element( m.Call( func=m.SaveMatchedNode(m.Name(), "func") | m.SaveMatchedNode(m.Attribute(), "attr") ) ), ] ), ) extracted_node_left = cst.ensure_type( cst.ensure_type(expression, cst.Tuple).elements[0].value, cst.BinaryOperation, ).left extracted_node_func = cst.ensure_type( cst.ensure_type(expression, cst.Tuple).elements[1].value, cst.Call ).func self.assertEqual( nodes, {"left": extracted_node_left, "func": extracted_node_func} ) expression = cst.parse_expression("a + b[c], d.z(e, f * g)") nodes = m.extract( expression, m.Tuple( elements=[ m.Element( m.BinaryOperation(left=m.SaveMatchedNode(m.Name(), "left")) ), m.Element( m.Call( func=m.SaveMatchedNode(m.Name(), "func") | m.SaveMatchedNode(m.Attribute(), "attr") ) ), ] ), ) extracted_node_left = cst.ensure_type( cst.ensure_type(expression, cst.Tuple).elements[0].value, cst.BinaryOperation, ).left extracted_node_attr = cst.ensure_type( cst.ensure_type(expression, cst.Tuple).elements[1].value, cst.Call ).func self.assertEqual( nodes, {"left": extracted_node_left, "attr": extracted_node_attr} ) def test_extract_metadata(self) -> None: # Verify true behavior module = cst.parse_module("a + b[c], d(e, f * g)") wrapper = cst.MetadataWrapper(module) expression = cst.ensure_type( cst.ensure_type(wrapper.module.body[0], cst.SimpleStatementLine).body[0], cst.Expr, ).value nodes = m.extract( expression, m.Tuple( elements=[ m.Element( m.BinaryOperation( left=m.Name( metadata=m.SaveMatchedNode( m.MatchMetadata( meta.PositionProvider, self._make_coderange((1, 0), (1, 1)), ), "left", ) ) ) ), m.Element(m.Call()), ] ), metadata_resolver=wrapper, ) extracted_node = cst.ensure_type( cst.ensure_type(expression, cst.Tuple).elements[0].value, cst.BinaryOperation, ).left self.assertEqual(nodes, {"left": extracted_node}) # Verify false behavior nodes = m.extract( expression, m.Tuple( elements=[ m.Element( m.BinaryOperation( left=m.Name( metadata=m.SaveMatchedNode( m.MatchMetadata( meta.PositionProvider, self._make_coderange((1, 0), (1, 2)), ), "left", ) ) ) ), m.Element(m.Call()), ] ), metadata_resolver=wrapper, ) self.assertIsNone(nodes) def test_extract_precedence_parent(self) -> None: expression = cst.parse_expression("a + b[c], d(e, f * g)") nodes = m.extract( expression, m.Tuple( elements=[ m.DoNotCare(), m.Element( m.SaveMatchedNode( m.Call( args=[ m.Arg(m.SaveMatchedNode(m.Name(), "name")), m.DoNotCare(), ] ), "name", ) ), ] ), ) extracted_node = cst.ensure_type(expression, cst.Tuple).elements[1].value self.assertEqual(nodes, {"name": extracted_node}) def test_extract_precedence_sequence(self) -> None: expression = cst.parse_expression("a + b[c], d(e, f * g)") nodes = m.extract( expression, m.Tuple( elements=[ m.DoNotCare(), m.Element( m.Call( args=[ m.Arg(m.SaveMatchedNode(m.DoNotCare(), "arg")), m.Arg(m.SaveMatchedNode(m.DoNotCare(), "arg")), ] ) ), ] ), ) extracted_node = ( cst.ensure_type( cst.ensure_type(expression, cst.Tuple).elements[1].value, cst.Call ) .args[1] .value ) self.assertEqual(nodes, {"arg": extracted_node}) def test_extract_precedence_sequence_wildcard(self) -> None: expression = cst.parse_expression("a + b[c], d(e, f * g)") nodes = m.extract( expression, m.Tuple( elements=[ m.DoNotCare(), m.Element( m.Call( args=[ m.ZeroOrMore( m.Arg(m.SaveMatchedNode(m.DoNotCare(), "arg")) ) ] ) ), ] ), ) extracted_node = ( cst.ensure_type( cst.ensure_type(expression, cst.Tuple).elements[1].value, cst.Call ) .args[1] .value ) self.assertEqual(nodes, {"arg": extracted_node}) def test_extract_optional_wildcard(self) -> None: expression = cst.parse_expression("a + b[c], d(e, f * g)") nodes = m.extract( expression, m.Tuple( elements=[ m.DoNotCare(), m.Element( m.Call( args=[ m.ZeroOrMore(), m.ZeroOrOne( m.Arg(m.SaveMatchedNode(m.Attribute(), "arg")) ), ] ) ), ] ), ) self.assertEqual(nodes, {}) def test_extract_optional_wildcard_head(self) -> None: expression = cst.parse_expression("[3]") nodes = m.extract( expression, m.List( elements=[ m.SaveMatchedNode(m.ZeroOrMore(), "head1"), m.SaveMatchedNode(m.ZeroOrMore(), "head2"), m.Element(value=m.Integer(value="3")), ] ), ) self.assertEqual(nodes, {"head1": (), "head2": ()}) def test_extract_optional_wildcard_tail(self) -> None: expression = cst.parse_expression("[3]") nodes = m.extract( expression, m.List( elements=[ m.Element(value=m.Integer(value="3")), m.SaveMatchedNode(m.ZeroOrMore(), "tail1"), m.SaveMatchedNode(m.ZeroOrMore(), "tail2"), ] ), ) self.assertEqual(nodes, {"tail1": (), "tail2": ()}) def test_extract_optional_wildcard_present(self) -> None: expression = cst.parse_expression("a + b[c], d(e, f * g, h.i.j)") nodes = m.extract( expression, m.Tuple( elements=[ m.DoNotCare(), m.Element( m.Call( args=[ m.DoNotCare(), m.DoNotCare(), m.ZeroOrOne( m.Arg(m.SaveMatchedNode(m.Attribute(), "arg")) ), ] ) ), ] ), ) extracted_node = ( cst.ensure_type( cst.ensure_type(expression, cst.Tuple).elements[1].value, cst.Call ) .args[2] .value ) self.assertEqual(nodes, {"arg": extracted_node}) def test_extract_sequence(self) -> None: expression = cst.parse_expression("a + b[c], d(e, f * g, h.i.j)") nodes = m.extract( expression, m.Tuple( elements=[ m.DoNotCare(), m.Element(m.Call(args=m.SaveMatchedNode([m.ZeroOrMore()], "args"))), ] ), ) extracted_seq = cst.ensure_type( cst.ensure_type(expression, cst.Tuple).elements[1].value, cst.Call ).args self.assertEqual(nodes, {"args": extracted_seq}) def test_extract_sequence_element(self) -> None: # Verify true behavior expression = cst.parse_expression("a + b[c], d(e, f * g, h.i.j)") nodes = m.extract( expression, m.Tuple( elements=[ m.DoNotCare(), m.Element(m.Call(args=[m.SaveMatchedNode(m.ZeroOrMore(), "args")])), ] ), ) extracted_seq = tuple( cst.ensure_type( cst.ensure_type(expression, cst.Tuple).elements[1].value, cst.Call ).args ) self.assertEqual(nodes, {"args": extracted_seq}) # Verify false behavior nodes = m.extract( expression, m.Tuple( elements=[ m.DoNotCare(), m.Element( m.Call( args=[ m.SaveMatchedNode( m.ZeroOrMore(m.Arg(m.Subscript())), "args" ) ] ) ), ] ), ) self.assertIsNone(nodes) def test_extract_sequence_multiple_wildcards(self) -> None: expression = cst.parse_expression("1, 2, 3, 4") nodes = m.extract( expression, m.Tuple( elements=( m.SaveMatchedNode(m.ZeroOrMore(), "head"), m.SaveMatchedNode(m.Element(value=m.Integer(value="3")), "element"), m.SaveMatchedNode(m.ZeroOrMore(), "tail"), ) ), ) tuple_elements = cst.ensure_type(expression, cst.Tuple).elements self.assertEqual( nodes, { "head": tuple(tuple_elements[:2]), "element": tuple_elements[2], "tail": tuple(tuple_elements[3:]), }, ) LibCST-1.2.0/libcst/matchers/tests/test_findall.py000066400000000000000000000151011456464173300220570ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # from textwrap import dedent from typing import Optional, Sequence import libcst as cst import libcst.matchers as m import libcst.metadata as meta from libcst.matchers import extractall, findall from libcst.testing.utils import UnitTest class MatchersFindAllTest(UnitTest): def assertNodeSequenceEqual( self, seq1: Sequence[cst.CSTNode], seq2: Sequence[cst.CSTNode], msg: Optional[str] = None, ) -> None: suffix = "" if msg is None else f"\n{msg}" if len(seq1) != len(seq2): raise AssertionError( f"\n{seq1!r}\nis not deeply equal to \n{seq2!r}{suffix}" ) for node1, node2 in zip(seq1, seq2): if not node1.deep_equals(node2): raise AssertionError( f"\n{seq1!r}\nis not deeply equal to \n{seq2!r}{suffix}" ) def test_findall_with_sentinels(self) -> None: # Verify behavior when provided a sentinel nothing = findall(cst.RemovalSentinel.REMOVE, m.Name("True") | m.Name("False")) self.assertNodeSequenceEqual(nothing, []) nothing = findall(cst.MaybeSentinel.DEFAULT, m.Name("True") | m.Name("False")) self.assertNodeSequenceEqual(nothing, []) def test_simple_findall(self) -> None: # Find all booleans in a tree code = """ a = 1 b = True def foo(bar: int) -> bool: return False """ module = cst.parse_module(dedent(code)) booleans = findall(module, m.Name("True") | m.Name("False")) self.assertNodeSequenceEqual(booleans, [cst.Name("True"), cst.Name("False")]) def test_findall_with_metadata_wrapper(self) -> None: # Find all assignments in a tree code = """ a = 1 b = True def foo(bar: int) -> bool: return False """ module = cst.parse_module(dedent(code)) wrapper = meta.MetadataWrapper(module) # Test that when we find over a wrapper, we implicitly use it for # metadata as well as traversal. booleans = findall( wrapper, m.MatchMetadata( meta.ExpressionContextProvider, meta.ExpressionContext.STORE ), ) self.assertNodeSequenceEqual( booleans, [ cst.Name("a"), cst.Name("b"), cst.Name("foo"), cst.Name("bar"), ], ) # Test that we can provide an explicit resolver and tree booleans = findall( wrapper.module, m.MatchMetadata( meta.ExpressionContextProvider, meta.ExpressionContext.STORE ), metadata_resolver=wrapper, ) self.assertNodeSequenceEqual( booleans, [ cst.Name("a"), cst.Name("b"), cst.Name("foo"), cst.Name("bar"), ], ) # Test that failing to provide metadata leads to raising an informative exception with self.assertRaises( LookupError, msg="ExpressionContextProvider is not resolved; did you forget a MetadataWrapper?", ): booleans = findall( wrapper.module, m.MatchMetadata( meta.ExpressionContextProvider, meta.ExpressionContext.STORE ), ) def test_findall_with_visitors(self) -> None: # Find all assignments in a tree class TestVisitor(m.MatcherDecoratableVisitor): METADATA_DEPENDENCIES: Sequence[meta.ProviderT] = ( meta.ExpressionContextProvider, ) def __init__(self) -> None: super().__init__() self.results: Sequence[cst.CSTNode] = () def visit_Module(self, node: cst.Module) -> None: self.results = self.findall( node, m.MatchMetadata( meta.ExpressionContextProvider, meta.ExpressionContext.STORE ), ) code = """ a = 1 b = True def foo(bar: int) -> bool: return False """ module = cst.parse_module(dedent(code)) wrapper = meta.MetadataWrapper(module) visitor = TestVisitor() wrapper.visit(visitor) self.assertNodeSequenceEqual( visitor.results, [ cst.Name("a"), cst.Name("b"), cst.Name("foo"), cst.Name("bar"), ], ) def test_findall_with_transformers(self) -> None: # Find all assignments in a tree class TestTransformer(m.MatcherDecoratableTransformer): METADATA_DEPENDENCIES: Sequence[meta.ProviderT] = ( meta.ExpressionContextProvider, ) def __init__(self) -> None: super().__init__() self.results: Sequence[cst.CSTNode] = () def visit_Module(self, node: cst.Module) -> None: self.results = self.findall( node, m.MatchMetadata( meta.ExpressionContextProvider, meta.ExpressionContext.STORE ), ) code = """ a = 1 b = True def foo(bar: int) -> bool: return False """ module = cst.parse_module(dedent(code)) wrapper = meta.MetadataWrapper(module) visitor = TestTransformer() wrapper.visit(visitor) self.assertNodeSequenceEqual( visitor.results, [ cst.Name("a"), cst.Name("b"), cst.Name("foo"), cst.Name("bar"), ], ) class MatchersExtractAllTest(UnitTest): def test_extractall_simple(self) -> None: expression = cst.parse_expression("a + b[c], d(e, f * g, h.i.j)") matches = extractall(expression, m.Arg(m.SaveMatchedNode(~m.Name(), "expr"))) extracted_args = cst.ensure_type( cst.ensure_type(expression, cst.Tuple).elements[1].value, cst.Call ).args self.assertEqual( matches, [{"expr": extracted_args[1].value}, {"expr": extracted_args[2].value}], ) LibCST-1.2.0/libcst/matchers/tests/test_matchers.py000066400000000000000000001512241456464173300222630ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import dataclasses import libcst as cst import libcst.matchers as m from libcst.matchers import matches from libcst.testing.utils import UnitTest class MatchersMatcherTest(UnitTest): def test_simple_matcher_true(self) -> None: # Match based on identical attributes. self.assertTrue(matches(cst.Name("foo"), m.Name("foo"))) def test_simple_matcher_false(self) -> None: # Fail to match due to incorrect value on Name. self.assertFalse(matches(cst.Name("foo"), m.Name("bar"))) def test_complex_matcher_true(self) -> None: # Match on any Call, not caring about arguments. self.assertTrue( matches( cst.Call( func=cst.Name("foo"), args=( cst.Arg(cst.Integer("1")), cst.Arg(cst.Integer("2")), cst.Arg(cst.Integer("3")), ), ), m.Call(), ) ) # Match on any Call to a function named "foo". self.assertTrue( matches( cst.Call( func=cst.Name("foo"), args=( cst.Arg(cst.Integer("1")), cst.Arg(cst.Integer("2")), cst.Arg(cst.Integer("3")), ), ), m.Call(m.Name("foo")), ) ) # Match on any Call to a function named "foo" with three arguments. self.assertTrue( matches( cst.Call( func=cst.Name("foo"), args=( cst.Arg(cst.Integer("1")), cst.Arg(cst.Integer("2")), cst.Arg(cst.Integer("3")), ), ), m.Call(func=m.Name("foo"), args=(m.Arg(), m.Arg(), m.Arg())), ) ) # Match any Call to a function named "foo" with three integer arguments. self.assertTrue( matches( cst.Call( func=cst.Name("foo"), args=( cst.Arg(cst.Integer("1")), cst.Arg(cst.Integer("2")), cst.Arg(cst.Integer("3")), ), ), m.Call( func=m.Name("foo"), args=(m.Arg(m.Integer()), m.Arg(m.Integer()), m.Arg(m.Integer())), ), ) ) # Match any Call to a function named "foo" with integer arguments 1, 2, 3. self.assertTrue( matches( cst.Call( func=cst.Name("foo"), args=( cst.Arg(cst.Integer("1")), cst.Arg(cst.Integer("2")), cst.Arg(cst.Integer("3")), ), ), m.Call( func=m.Name("foo"), args=( m.Arg(m.Integer("1")), m.Arg(m.Integer("2")), m.Arg(m.Integer("3")), ), ), ) ) # Match any Call to a function named "foo" with three arguments, the last one # being the integer 3. self.assertTrue( matches( cst.Call( func=cst.Name("foo"), args=( cst.Arg(cst.Integer("1")), cst.Arg(cst.Integer("2")), cst.Arg(cst.Integer("3")), ), ), m.Call( func=m.Name("foo"), args=(m.DoNotCare(), m.DoNotCare(), m.Arg(m.Integer("3"))), ), ) ) def test_complex_matcher_false(self) -> None: # Fail to match since this is a Call, not a FunctionDef. self.assertFalse( matches( cst.Call( func=cst.Name("foo"), args=( cst.Arg(cst.Integer("1")), cst.Arg(cst.Integer("2")), cst.Arg(cst.Integer("3")), ), ), m.FunctionDef(), ) ) # Fail to match a function named "bar". self.assertFalse( matches( cst.Call( func=cst.Name("foo"), args=( cst.Arg(cst.Integer("1")), cst.Arg(cst.Integer("2")), cst.Arg(cst.Integer("3")), ), ), m.Call(m.Name("bar")), ) ) # Fail to match a function named "foo" with two arguments. self.assertFalse( matches( cst.Call( func=cst.Name("foo"), args=( cst.Arg(cst.Integer("1")), cst.Arg(cst.Integer("2")), cst.Arg(cst.Integer("3")), ), ), m.Call(func=m.Name("foo"), args=(m.Arg(), m.Arg())), ) ) # Fail to match a function named "foo" with three integer arguments # 3, 2, 1. self.assertFalse( matches( cst.Call( func=cst.Name("foo"), args=( cst.Arg(cst.Integer("1")), cst.Arg(cst.Integer("2")), cst.Arg(cst.Integer("3")), ), ), m.Call( func=m.Name("foo"), args=( m.Arg(m.Integer("3")), m.Arg(m.Integer("2")), m.Arg(m.Integer("1")), ), ), ) ) # Fail to match a function named "foo" with three arguments, the last one # being the integer 1. self.assertFalse( matches( cst.Call( func=cst.Name("foo"), args=( cst.Arg(cst.Integer("1")), cst.Arg(cst.Integer("2")), cst.Arg(cst.Integer("3")), ), ), m.Call( func=m.Name("foo"), args=(m.DoNotCare(), m.DoNotCare(), m.Arg(m.Integer("1"))), ), ) ) def test_type_of_matcher_true(self) -> None: self.assertTrue(matches(cst.Name("true"), m.TypeOf(m.Name))) self.assertTrue(matches(cst.Name("true"), m.TypeOf(m.Name)(value="true"))) self.assertTrue(matches(cst.Name("true"), m.Name | m.Float | m.SimpleString)) self.assertTrue( matches(cst.SimpleString("'foo'"), m.TypeOf(m.Name, m.SimpleString)) ) self.assertTrue( matches( cst.SimpleString("'foo'"), m.TypeOf(m.Name, m.SimpleString)(value="'foo'"), ) ) with self.assertRaises(Exception): # pyre-ignore m.TypeOf(cst.Float)(value=1.0) | cst.Name with self.assertRaises(TypeError): # pyre-ignore m.TypeOf(cst.Float) & cst.SimpleString for case in ( cst.BinaryOperation( left=cst.Name("foo"), operator=cst.Add(), right=cst.Name("bar") ), cst.BooleanOperation( left=cst.Name("foo"), operator=cst.Or(), right=cst.Name("bar") ), ): self.assertTrue( matches( case, (m.BinaryOperation | m.BooleanOperation)(left=m.Name("foo")) ) ) new_case = dataclasses.replace(case, left=case.right, right=case.left) self.assertTrue( matches( new_case, ~(m.BinaryOperation | m.BooleanOperation)(left=m.Name("foo")), ) ) def test_type_of_matcher_false(self) -> None: self.assertFalse(matches(cst.Name("true"), m.TypeOf(m.SimpleString))) self.assertFalse(matches(cst.Name("true"), m.TypeOf(m.Name)(value="false"))) self.assertFalse( matches(cst.Name("true"), m.TypeOf(m.SimpleString)(value="true")) ) self.assertFalse( matches(cst.SimpleString("'foo'"), m.TypeOf(m.Name, m.Attribute)) ) self.assertFalse( matches( cst.SimpleString("'foo'"), m.TypeOf(m.Name, m.Attribute)(value="'foo'") ) ) self.assertFalse( matches( cst.SimpleString("'foo'"), m.TypeOf(m.Name, m.SimpleString)(value="'bar'"), ) ) for case in ( cst.BinaryOperation( left=cst.Name("foo"), operator=cst.Add(), right=cst.Name("bar") ), cst.BooleanOperation( left=cst.Name("foo"), operator=cst.Or(), right=cst.Name("bar") ), ): self.assertFalse( matches( case, (m.BinaryOperation | m.BooleanOperation)(left=m.Name("bar")) ) ) self.assertFalse( matches( case, ~(m.BinaryOperation | m.BooleanOperation)(left=m.Name("foo")) ) ) def test_or_matcher_true(self) -> None: # Match on either True or False identifier. self.assertTrue( matches(cst.Name("True"), m.OneOf(m.Name("True"), m.Name("False"))) ) # Match when one of the option is a TypeOf self.assertTrue( matches( cst.Name("True"), m.OneOf(m.TypeOf(m.Name, m.NameItem)("True"), m.Name("False")), ) ) # Match any assignment that assigns a value of True or False to an # unspecified target. self.assertTrue( matches( cst.Assign((cst.AssignTarget(cst.Name("x")),), cst.Name("True")), m.Assign(value=m.OneOf(m.Name("True"), m.Name("False"))), ) ) self.assertTrue( matches( cst.Call( func=cst.Name("foo"), args=( cst.Arg(cst.Integer("1")), cst.Arg(cst.Integer("2")), cst.Arg(cst.Integer("3")), ), ), m.Call( func=m.Name("foo"), args=m.OneOf( ( m.Arg(m.Integer("3")), m.Arg(m.Integer("2")), m.Arg(m.Integer("1")), ), ( m.Arg(m.Integer("1")), m.Arg(m.Integer("2")), m.Arg(m.Integer("3")), ), ), ), ) ) def test_or_matcher_false(self) -> None: # Fail to match since None is not True or False. self.assertFalse( matches(cst.Name("None"), m.OneOf(m.Name("True"), m.Name("False"))) ) # Fail to match since assigning None to a target is not the same as # assigning True or False to a target. self.assertFalse( matches( cst.Assign((cst.AssignTarget(cst.Name("x")),), cst.Name("None")), m.Assign(value=m.OneOf(m.Name("True"), m.Name("False"))), ) ) self.assertFalse( matches( cst.Call( func=cst.Name("foo"), args=( cst.Arg(cst.Integer("1")), cst.Arg(cst.Integer("2")), cst.Arg(cst.Integer("3")), ), ), m.Call( func=m.Name("foo"), args=m.OneOf( ( m.Arg(m.Integer("3")), m.Arg(m.Integer("2")), m.Arg(m.Integer("1")), ), ( m.Arg(m.Integer("4")), m.Arg(m.Integer("5")), m.Arg(m.Integer("6")), ), ), ), ) ) def test_or_operator_matcher_true(self) -> None: # Match on either True or False identifier. self.assertTrue(matches(cst.Name("True"), m.Name("True") | m.Name("False"))) # Match on either True or False identifier. self.assertTrue(matches(cst.Name("False"), m.Name("True") | m.Name("False"))) # Match on either True, False or None identifier. self.assertTrue( matches(cst.Name("None"), m.Name("True") | m.Name("False") | m.Name("None")) ) # Match any assignment that assigns a value of True or False to an # unspecified target. self.assertTrue( matches( cst.Assign((cst.AssignTarget(cst.Name("x")),), cst.Name("True")), m.Assign(value=m.Name("True") | m.Name("False")), ) ) def test_or_operator_matcher_false(self) -> None: # Fail to match since None is not True or False. self.assertFalse(matches(cst.Name("None"), m.Name("True") | m.Name("False"))) # Fail to match since assigning None to a target is not the same as # assigning True or False to a target. self.assertFalse( matches( cst.Assign((cst.AssignTarget(cst.Name("x")),), cst.Name("None")), m.Assign(value=m.Name("True") | m.Name("False")), ) ) def test_zero_or_more_matcher_no_args_true(self) -> None: # Match a function call to "foo" with any number of arguments as # long as the first one is an integer with the value 1. self.assertTrue( matches( cst.Call( func=cst.Name("foo"), args=( cst.Arg(cst.Integer("1")), cst.Arg(cst.Integer("2")), cst.Arg(cst.Integer("3")), ), ), m.Call( func=m.Name("foo"), args=(m.Arg(m.Integer("1")), m.ZeroOrMore()) ), ) ) # Match a function call to "foo" with any number of arguments as # long as one of them is an integer with the value 1. self.assertTrue( matches( cst.Call( func=cst.Name("foo"), args=( cst.Arg(cst.Integer("1")), cst.Arg(cst.Integer("2")), cst.Arg(cst.Integer("3")), ), ), m.Call( func=m.Name("foo"), args=(m.ZeroOrMore(), m.Arg(m.Integer("1")), m.ZeroOrMore()), ), ) ) # Match a function call to "foo" with any number of arguments as # long as one of them is an integer with the value 2. self.assertTrue( matches( cst.Call( func=cst.Name("foo"), args=( cst.Arg(cst.Integer("1")), cst.Arg(cst.Integer("2")), cst.Arg(cst.Integer("3")), ), ), m.Call( func=m.Name("foo"), args=(m.ZeroOrMore(), m.Arg(m.Integer("2")), m.ZeroOrMore()), ), ) ) # Match a function call to "foo" with any number of arguments as # long as one of them is an integer with the value 3. self.assertTrue( matches( cst.Call( func=cst.Name("foo"), args=( cst.Arg(cst.Integer("1")), cst.Arg(cst.Integer("2")), cst.Arg(cst.Integer("3")), ), ), m.Call( func=m.Name("foo"), args=(m.ZeroOrMore(), m.Arg(m.Integer("3")), m.ZeroOrMore()), ), ) ) # Match a function call to "foo" with any number of arguments as # long as the last one is an integer with the value 3. self.assertTrue( matches( cst.Call( func=cst.Name("foo"), args=( cst.Arg(cst.Integer("1")), cst.Arg(cst.Integer("2")), cst.Arg(cst.Integer("3")), ), ), m.Call( func=m.Name("foo"), args=(m.ZeroOrMore(), m.Arg(m.Integer("3"))) ), ) ) # Match a function call to "foo" with any number of arguments as # long as there are two arguments with the values 1 and 3 anywhere # in the argument list, respecting order. self.assertTrue( matches( cst.Call( func=cst.Name("foo"), args=( cst.Arg(cst.Integer("1")), cst.Arg(cst.Integer("2")), cst.Arg(cst.Integer("3")), ), ), m.Call( func=m.Name("foo"), args=( m.ZeroOrMore(), m.Arg(m.Integer("1")), m.ZeroOrMore(), m.Arg(m.Integer("3")), m.ZeroOrMore(), ), ), ) ) # Match a function call to "foo" with any number of arguments as # long as there are three arguments with the values 1, 2 and 3 anywhere # in the argument list, respecting order. self.assertTrue( matches( cst.Call( func=cst.Name("foo"), args=( cst.Arg(cst.Integer("1")), cst.Arg(cst.Integer("2")), cst.Arg(cst.Integer("3")), ), ), m.Call( func=m.Name("foo"), args=( m.ZeroOrMore(), m.Arg(m.Integer("1")), m.ZeroOrMore(), m.Arg(m.Integer("2")), m.ZeroOrMore(), m.Arg(m.Integer("3")), m.ZeroOrMore(), ), ), ) ) def test_at_least_n_matcher_no_args_true(self) -> None: # Match a function call to "foo" with at least one argument. self.assertTrue( matches( cst.Call( func=cst.Name("foo"), args=( cst.Arg(cst.Integer("1")), cst.Arg(cst.Integer("2")), cst.Arg(cst.Integer("3")), ), ), m.Call(func=m.Name("foo"), args=(m.AtLeastN(n=1),)), ) ) # Match a function call to "foo" with at least two arguments. self.assertTrue( matches( cst.Call( func=cst.Name("foo"), args=( cst.Arg(cst.Integer("1")), cst.Arg(cst.Integer("2")), cst.Arg(cst.Integer("3")), ), ), m.Call(func=m.Name("foo"), args=(m.AtLeastN(n=2),)), ) ) # Match a function call to "foo" with at least three arguments. self.assertTrue( matches( cst.Call( func=cst.Name("foo"), args=( cst.Arg(cst.Integer("1")), cst.Arg(cst.Integer("2")), cst.Arg(cst.Integer("3")), ), ), m.Call(func=m.Name("foo"), args=(m.AtLeastN(n=3),)), ) ) # Match a function call to "foo" with at least two arguments the # first one being the integer 1. self.assertTrue( matches( cst.Call( func=cst.Name("foo"), args=( cst.Arg(cst.Integer("1")), cst.Arg(cst.Integer("2")), cst.Arg(cst.Integer("3")), ), ), m.Call( func=m.Name("foo"), args=(m.Arg(m.Integer("1")), m.AtLeastN(n=1)) ), ) ) # Match a function call to "foo" with at least three arguments the # first one being the integer 1. self.assertTrue( matches( cst.Call( func=cst.Name("foo"), args=( cst.Arg(cst.Integer("1")), cst.Arg(cst.Integer("2")), cst.Arg(cst.Integer("3")), ), ), m.Call( func=m.Name("foo"), args=(m.Arg(m.Integer("1")), m.AtLeastN(n=2)) ), ) ) # Match a function call to "foo" with at least three arguments. The # There should be an argument with the value 2, which should have # at least one argument before and one argument after. self.assertTrue( matches( cst.Call( func=cst.Name("foo"), args=( cst.Arg(cst.Integer("1")), cst.Arg(cst.Integer("2")), cst.Arg(cst.Integer("3")), ), ), m.Call( func=m.Name("foo"), args=(m.AtLeastN(n=1), m.Arg(m.Integer("2")), m.AtLeastN(n=1)), ), ) ) # Match a function call to "foo" with at least two arguments, the last # one being the value 3. self.assertTrue( matches( cst.Call( func=cst.Name("foo"), args=( cst.Arg(cst.Integer("1")), cst.Arg(cst.Integer("2")), cst.Arg(cst.Integer("3")), ), ), m.Call( func=m.Name("foo"), args=(m.AtLeastN(n=1), m.Arg(m.Integer("3"))) ), ) ) # Match a function call to "foo" with at least three arguments, the last # one being the value 3. self.assertTrue( matches( cst.Call( func=cst.Name("foo"), args=( cst.Arg(cst.Integer("1")), cst.Arg(cst.Integer("2")), cst.Arg(cst.Integer("3")), ), ), m.Call( func=m.Name("foo"), args=(m.AtLeastN(n=2), m.Arg(m.Integer("3"))) ), ) ) def test_at_least_n_matcher_no_args_false(self) -> None: # Fail to match a function call to "foo" with at least four arguments. self.assertFalse( matches( cst.Call( func=cst.Name("foo"), args=( cst.Arg(cst.Integer("1")), cst.Arg(cst.Integer("2")), cst.Arg(cst.Integer("3")), ), ), m.Call(func=m.Name("foo"), args=(m.AtLeastN(n=4),)), ) ) # Fail to match a function call to "foo" with at least four arguments, # the first one being the value 1. self.assertFalse( matches( cst.Call( func=cst.Name("foo"), args=( cst.Arg(cst.Integer("1")), cst.Arg(cst.Integer("2")), cst.Arg(cst.Integer("3")), ), ), m.Call( func=m.Name("foo"), args=(m.Arg(m.Integer("1")), m.AtLeastN(n=3)) ), ) ) # Fail to match a function call to "foo" with at least three arguments, # the last one being the value 2. self.assertFalse( matches( cst.Call( func=cst.Name("foo"), args=( cst.Arg(cst.Integer("1")), cst.Arg(cst.Integer("2")), cst.Arg(cst.Integer("3")), ), ), m.Call( func=m.Name("foo"), args=(m.AtLeastN(n=2), m.Arg(m.Integer("2"))) ), ) ) def test_zero_or_more_matcher_args_true(self) -> None: # Match a function call to "foo" where the first argument is the integer # value 1, and the rest of the arguements are wildcards. self.assertTrue( matches( cst.Call( func=cst.Name("foo"), args=( cst.Arg(cst.Integer("1")), cst.Arg(cst.Integer("2")), cst.Arg(cst.Integer("3")), ), ), m.Call( func=m.Name("foo"), args=(m.Arg(m.Integer("1")), m.ZeroOrMore(m.Arg())), ), ) ) # Match a function call to "foo" where the first argument is the integer # value 1, and the rest of the arguements are integers of any value. self.assertTrue( matches( cst.Call( func=cst.Name("foo"), args=( cst.Arg(cst.Integer("1")), cst.Arg(cst.Integer("2")), cst.Arg(cst.Integer("3")), ), ), m.Call( func=m.Name("foo"), args=(m.Arg(m.Integer("1")), m.ZeroOrMore(m.Arg(m.Integer()))), ), ) ) # Match a function call to "foo" with zero or more arguments, where the # first argument can optionally be the integer 1 or 2, and the second # can only be the integer 2. This case verifies non-greedy behavior in the # matcher. self.assertTrue( matches( cst.Call( func=cst.Name("foo"), args=( cst.Arg(cst.Integer("1")), cst.Arg(cst.Integer("2")), cst.Arg(cst.Integer("3")), ), ), m.Call( func=m.Name("foo"), args=( m.ZeroOrMore(m.Arg(m.OneOf(m.Integer("1"), m.Integer("2")))), m.Arg(m.Integer("2")), m.ZeroOrMore(), ), ), ) ) # Match a function call to "foo" where the first argument is the integer # value 1, and the rest of the arguements are integers with the value # 2 or 3. self.assertTrue( matches( cst.Call( func=cst.Name("foo"), args=( cst.Arg(cst.Integer("1")), cst.Arg(cst.Integer("2")), cst.Arg(cst.Integer("3")), ), ), m.Call( func=m.Name("foo"), args=( m.Arg(m.Integer("1")), m.ZeroOrMore(m.Arg(m.OneOf(m.Integer("2"), m.Integer("3")))), ), ), ) ) def test_zero_or_more_matcher_args_false(self) -> None: # Fail to match a function call to "foo" where the first argument is the # integer value 1, and the rest of the arguments are strings. self.assertFalse( matches( cst.Call( func=cst.Name("foo"), args=( cst.Arg(cst.Integer("1")), cst.Arg(cst.Integer("2")), cst.Arg(cst.Integer("3")), ), ), m.Call( func=m.Name("foo"), args=(m.Arg(m.Integer("1")), m.ZeroOrMore(m.Arg(m.SimpleString()))), ), ) ) # Fail to match a function call to "foo" where the first argument is the # integer value 1, and the rest of the arguements are integers with the # value 2. self.assertFalse( matches( cst.Call( func=cst.Name("foo"), args=( cst.Arg(cst.Integer("1")), cst.Arg(cst.Integer("2")), cst.Arg(cst.Integer("3")), ), ), m.Call( func=m.Name("foo"), args=(m.Arg(m.Integer("1")), m.ZeroOrMore(m.Arg(m.Integer("2")))), ), ) ) def test_at_least_n_matcher_args_true(self) -> None: # Match a function call to "foo" where the first argument is the integer # value 1, and there are at least two wildcard arguments after. self.assertTrue( matches( cst.Call( func=cst.Name("foo"), args=( cst.Arg(cst.Integer("1")), cst.Arg(cst.Integer("2")), cst.Arg(cst.Integer("3")), ), ), m.Call( func=m.Name("foo"), args=(m.Arg(m.Integer("1")), m.AtLeastN(m.Arg(), n=2)), ), ) ) # Match a function call to "foo" where the first argument is the integer # value 1, and there are at least two arguements are integers of any value # after. self.assertTrue( matches( cst.Call( func=cst.Name("foo"), args=( cst.Arg(cst.Integer("1")), cst.Arg(cst.Integer("2")), cst.Arg(cst.Integer("3")), ), ), m.Call( func=m.Name("foo"), args=(m.Arg(m.Integer("1")), m.AtLeastN(m.Arg(m.Integer()), n=2)), ), ) ) # Match a function call to "foo" where the first argument is the integer # value 1, and there are at least two arguements that are integers with the # value 2 or 3 after. self.assertTrue( matches( cst.Call( func=cst.Name("foo"), args=( cst.Arg(cst.Integer("1")), cst.Arg(cst.Integer("2")), cst.Arg(cst.Integer("3")), ), ), m.Call( func=m.Name("foo"), args=( m.Arg(m.Integer("1")), m.AtLeastN(m.Arg(m.OneOf(m.Integer("2"), m.Integer("3"))), n=2), ), ), ) ) def test_at_least_n_matcher_args_false(self) -> None: # Fail to match a function call to "foo" where the first argument is the # integer value 1, and there are at least two arguments after that are # strings. self.assertFalse( matches( cst.Call( func=cst.Name("foo"), args=( cst.Arg(cst.Integer("1")), cst.Arg(cst.Integer("2")), cst.Arg(cst.Integer("3")), ), ), m.Call( func=m.Name("foo"), args=( m.Arg(m.Integer("1")), m.AtLeastN(m.Arg(m.SimpleString()), n=2), ), ), ) ) # Fail to match a function call to "foo" where the first argument is the integer # value 1, and there are at least three wildcard arguments after. self.assertFalse( matches( cst.Call( func=cst.Name("foo"), args=( cst.Arg(cst.Integer("1")), cst.Arg(cst.Integer("2")), cst.Arg(cst.Integer("3")), ), ), m.Call( func=m.Name("foo"), args=(m.Arg(m.Integer("1")), m.AtLeastN(m.Arg(), n=3)), ), ) ) # Fail to match a function call to "foo" where the first argument is the # integer value 1, and there are at least two arguements that are integers with # the value 2 after. self.assertFalse( matches( cst.Call( func=cst.Name("foo"), args=( cst.Arg(cst.Integer("1")), cst.Arg(cst.Integer("2")), cst.Arg(cst.Integer("3")), ), ), m.Call( func=m.Name("foo"), args=( m.Arg(m.Integer("1")), m.AtLeastN(m.Arg(m.Integer("2")), n=2), ), ), ) ) def test_at_most_n_matcher_no_args_true(self) -> None: # Match a function call to "foo" with at most two arguments. self.assertTrue( matches( cst.Call(func=cst.Name("foo"), args=(cst.Arg(cst.Integer("1")),)), m.Call(func=m.Name("foo"), args=(m.AtMostN(n=2),)), ) ) # Match a function call to "foo" with at most two arguments. self.assertTrue( matches( cst.Call( func=cst.Name("foo"), args=(cst.Arg(cst.Integer("1")), cst.Arg(cst.Integer("2"))), ), m.Call(func=m.Name("foo"), args=(m.AtMostN(n=2),)), ) ) # Match a function call to "foo" with at most six arguments, the last # one being the integer 1. self.assertTrue( matches( cst.Call(func=cst.Name("foo"), args=(cst.Arg(cst.Integer("1")),)), m.Call( func=m.Name("foo"), args=[m.AtMostN(n=5), m.Arg(m.Integer("1"))] ), ) ) # Match a function call to "foo" with at most six arguments, the last # one being the integer 1. self.assertTrue( matches( cst.Call( func=cst.Name("foo"), args=(cst.Arg(cst.Integer("1")), cst.Arg(cst.Integer("2"))), ), m.Call( func=m.Name("foo"), args=(m.AtMostN(n=5), m.Arg(m.Integer("2"))) ), ) ) # Match a function call to "foo" with at most six arguments, the first # one being the integer 1. self.assertTrue( matches( cst.Call( func=cst.Name("foo"), args=(cst.Arg(cst.Integer("1")), cst.Arg(cst.Integer("2"))), ), m.Call( func=m.Name("foo"), args=(m.Arg(m.Integer("1")), m.AtMostN(n=5)) ), ) ) # Match a function call to "foo" with at most six arguments, the first # one being the integer 1. self.assertTrue( matches( cst.Call( func=cst.Name("foo"), args=(cst.Arg(cst.Integer("1")), cst.Arg(cst.Integer("2"))), ), m.Call(func=m.Name("foo"), args=(m.Arg(m.Integer("1")), m.ZeroOrOne())), ) ) def test_at_most_n_matcher_no_args_false(self) -> None: # Fail to match a function call to "foo" with at most two arguments. self.assertFalse( matches( cst.Call( func=cst.Name("foo"), args=( cst.Arg(cst.Integer("1")), cst.Arg(cst.Integer("2")), cst.Arg(cst.Integer("3")), ), ), m.Call(func=m.Name("foo"), args=(m.AtMostN(n=2),)), ) ) # Fail to match a function call to "foo" with at most two arguments, # the last one being the integer 3. self.assertFalse( matches( cst.Call( func=cst.Name("foo"), args=( cst.Arg(cst.Integer("1")), cst.Arg(cst.Integer("2")), cst.Arg(cst.Integer("3")), ), ), m.Call( func=m.Name("foo"), args=(m.AtMostN(n=1), m.Arg(m.Integer("3"))) ), ) ) # Fail to match a function call to "foo" with at most two arguments, # the last one being the integer 3. self.assertFalse( matches( cst.Call( func=cst.Name("foo"), args=( cst.Arg(cst.Integer("1")), cst.Arg(cst.Integer("2")), cst.Arg(cst.Integer("3")), ), ), m.Call(func=m.Name("foo"), args=(m.ZeroOrOne(), m.Arg(m.Integer("3")))), ) ) def test_at_most_n_matcher_args_true(self) -> None: # Match a function call to "foo" with at most two arguments, both of which # are the integer 1. self.assertTrue( matches( cst.Call(func=cst.Name("foo"), args=(cst.Arg(cst.Integer("1")),)), m.Call( func=m.Name("foo"), args=(m.AtMostN(m.Arg(m.Integer("1")), n=2),) ), ) ) # Match a function call to "foo" with at most two arguments, both of which # can be the integer 1 or 2. self.assertTrue( matches( cst.Call( func=cst.Name("foo"), args=(cst.Arg(cst.Integer("1")), cst.Arg(cst.Integer("2"))), ), m.Call( func=m.Name("foo"), args=( m.AtMostN(m.Arg(m.OneOf(m.Integer("1"), m.Integer("2"))), n=2), ), ), ) ) # Match a function call to "foo" with at most two arguments, the first # one being the integer 1 and the second one, if included, being the # integer 2. self.assertTrue( matches( cst.Call( func=cst.Name("foo"), args=(cst.Arg(cst.Integer("1")), cst.Arg(cst.Integer("2"))), ), m.Call( func=m.Name("foo"), args=(m.Arg(m.Integer("1")), m.ZeroOrOne(m.Arg(m.Integer("2")))), ), ) ) # Match a function call to "foo" with at most six arguments, the first # one being the integer 1 and the second one, if included, being the # integer 2. self.assertTrue( matches( cst.Call( func=cst.Name("foo"), args=(cst.Arg(cst.Integer("1")), cst.Arg(cst.Integer("2"))), ), m.Call( func=m.Name("foo"), args=(m.Arg(m.Integer("1")), m.ZeroOrOne(m.Arg(m.Integer("2")))), ), ) ) def test_at_most_n_matcher_args_false(self) -> None: # Fail to match a function call to "foo" with at most three arguments, # all of which are the integer 4. self.assertFalse( matches( cst.Call( func=cst.Name("foo"), args=( cst.Arg(cst.Integer("1")), cst.Arg(cst.Integer("2")), cst.Arg(cst.Integer("3")), ), ), m.Call( func=m.Name("foo"), args=(m.AtMostN(m.Arg(m.Integer("4")), n=3),) ), ) ) def test_lambda_matcher_true(self) -> None: # Match based on identical attributes. self.assertTrue( matches( cst.Name("foo"), m.Name(value=m.MatchIfTrue(lambda value: "o" in value)) ) ) def test_lambda_matcher_false(self) -> None: # Fail to match due to incorrect value on Name. self.assertFalse( matches( cst.Name("foo"), m.Name(value=m.MatchIfTrue(lambda value: "a" in value)) ) ) def test_regex_matcher_true(self) -> None: # Match based on identical attributes. self.assertTrue(matches(cst.Name("foo"), m.Name(value=m.MatchRegex(r".*o.*")))) def test_regex_matcher_false(self) -> None: # Fail to match due to incorrect value on Name. self.assertFalse(matches(cst.Name("foo"), m.Name(value=m.MatchRegex(r".*a.*")))) def test_and_matcher_true(self) -> None: # Match on True identifier in roundabout way. self.assertTrue( matches( cst.Name("True"), m.AllOf(m.Name(), m.Name(value=m.MatchRegex(r"True"))) ) ) self.assertTrue( matches( cst.Call( func=cst.Name("foo"), args=( cst.Arg(cst.Integer("1")), cst.Arg(cst.Integer("2")), cst.Arg(cst.Integer("3")), ), ), m.Call( func=m.Name("foo"), args=m.AllOf( (m.Arg(), m.Arg(), m.Arg()), ( m.Arg(m.Integer("1")), m.Arg(m.Integer("2")), m.Arg(m.Integer("3")), ), ), ), ) ) def test_and_matcher_false(self) -> None: # Fail to match since True and False cannot match. self.assertFalse( matches(cst.Name("None"), m.AllOf(m.Name("True"), m.Name("False"))) ) self.assertFalse( matches( cst.Call( func=cst.Name("foo"), args=( cst.Arg(cst.Integer("1")), cst.Arg(cst.Integer("2")), cst.Arg(cst.Integer("3")), ), ), m.Call( func=m.Name("foo"), args=m.AllOf( (m.Arg(), m.Arg(), m.Arg()), ( m.Arg(m.Integer("3")), m.Arg(m.Integer("2")), m.Arg(m.Integer("1")), ), ), ), ) ) def test_and_operator_matcher_true(self) -> None: # Match on True identifier in roundabout way. self.assertTrue( matches(cst.Name("True"), m.Name() & m.Name(value=m.MatchRegex(r"True"))) ) # Match in a really roundabout way that verifies the __or__ behavior on # AllOf itself. self.assertTrue( matches( cst.Name("True"), m.Name() & m.Name(value=m.MatchRegex(r"True")) & m.Name("True"), ) ) # Verify that MatchIfTrue works with __and__ behavior properly. self.assertTrue( matches( cst.Name("True"), m.MatchIfTrue(lambda x: isinstance(x, cst.Name)) & m.Name(value=m.MatchRegex(r"True")), ) ) self.assertTrue( matches( cst.Name("True"), m.Name(value=m.MatchRegex(r"True")) & m.MatchIfTrue(lambda x: isinstance(x, cst.Name)), ) ) def test_and_operator_matcher_false(self) -> None: # Fail to match since True and False cannot match. self.assertFalse(matches(cst.Name("None"), m.Name("True") & m.Name("False"))) def test_does_not_match_true(self) -> None: # Match on any call that takes one argument that isn't the value None. self.assertTrue( matches( cst.Call(func=cst.Name("foo"), args=(cst.Arg(cst.Name("True")),)), m.Call(args=(m.Arg(value=m.DoesNotMatch(m.Name("None"))),)), ) ) self.assertTrue( matches( cst.Call(func=cst.Name("foo"), args=(cst.Arg(cst.Integer("1")),)), m.Call(args=(m.DoesNotMatch(m.Arg(m.Name("None"))),)), ) ) self.assertTrue( matches( cst.Call(func=cst.Name("foo"), args=(cst.Arg(cst.Integer("1")),)), m.Call(args=m.DoesNotMatch((m.Arg(m.Integer("2")),))), ) ) # Match any call that takes an argument which isn't True or False. self.assertTrue( matches( cst.Call(func=cst.Name("foo"), args=(cst.Arg(cst.Integer("1")),)), m.Call( args=( m.Arg( value=m.DoesNotMatch( m.OneOf(m.Name("True"), m.Name("False")) ) ), ) ), ) ) # Match any name node that doesn't match the regex for True self.assertTrue( matches( cst.Name("False"), m.Name(value=m.DoesNotMatch(m.MatchRegex(r"True"))) ) ) def test_does_not_match_operator_true(self) -> None: # Match on any call that takes one argument that isn't the value None. self.assertTrue( matches( cst.Call(func=cst.Name("foo"), args=(cst.Arg(cst.Name("True")),)), m.Call(args=(m.Arg(value=~(m.Name("None"))),)), ) ) self.assertTrue( matches( cst.Call(func=cst.Name("foo"), args=(cst.Arg(cst.Integer("1")),)), m.Call(args=(~(m.Arg(m.Name("None"))),)), ) ) # Match any call that takes an argument which isn't True or False. self.assertTrue( matches( cst.Call(func=cst.Name("foo"), args=(cst.Arg(cst.Integer("1")),)), m.Call(args=(m.Arg(value=~(m.Name("True") | m.Name("False"))),)), ) ) self.assertTrue( matches( cst.Call(func=cst.Name("foo"), args=(cst.Arg(cst.Name("None")),)), m.Call(args=(m.Arg(value=(~(m.Name("True"))) & (~(m.Name("False")))),)), ) ) # Roundabout way to verify that or operator works with inverted nodes. self.assertTrue( matches( cst.Call(func=cst.Name("foo"), args=(cst.Arg(cst.Name("False")),)), m.Call(args=(m.Arg(value=(~(m.Name("True"))) | (~(m.Name("True")))),)), ) ) # Roundabout way to verify that inverse operator works properly on AllOf. self.assertTrue( matches( cst.Call(func=cst.Name("foo"), args=(cst.Arg(cst.Integer("1")),)), m.Call(args=(m.Arg(value=~(m.Name() & m.Name("True"))),)), ) ) # Match any name node that doesn't match the regex for True self.assertTrue( matches(cst.Name("False"), m.Name(value=~(m.MatchRegex(r"True")))) ) def test_does_not_match_false(self) -> None: # Match on any call that takes one argument that isn't the value None. self.assertFalse( matches( cst.Call(func=cst.Name("foo"), args=(cst.Arg(cst.Name("None")),)), m.Call(args=(m.Arg(value=m.DoesNotMatch(m.Name("None"))),)), ) ) self.assertFalse( matches( cst.Call(func=cst.Name("foo"), args=(cst.Arg(cst.Integer("1")),)), m.Call(args=(m.DoesNotMatch(m.Arg(m.Integer("1"))),)), ) ) self.assertFalse( matches( cst.Call(func=cst.Name("foo"), args=(cst.Arg(cst.Integer("1")),)), m.Call(args=m.DoesNotMatch((m.Arg(m.Integer("1")),))), ) ) # Match any call that takes an argument which isn't True or False. self.assertFalse( matches( cst.Call(func=cst.Name("foo"), args=(cst.Arg(cst.Name("False")),)), m.Call( args=( m.Arg( value=m.DoesNotMatch( m.OneOf(m.Name("True"), m.Name("False")) ) ), ) ), ) ) self.assertFalse( matches( cst.Call(func=cst.Name("foo"), args=(cst.Arg(cst.Name("True")),)), m.Call(args=(m.Arg(value=(~(m.Name("True"))) & (~(m.Name("False")))),)), ) ) # Match any name node that doesn't match the regex for True self.assertFalse( matches( cst.Name("True"), m.Name(value=m.DoesNotMatch(m.MatchRegex(r"True"))) ) ) def test_does_not_match_operator_false(self) -> None: # Match on any call that takes one argument that isn't the value None. self.assertFalse( matches( cst.Call(func=cst.Name("foo"), args=(cst.Arg(cst.Name("None")),)), m.Call(args=(m.Arg(value=~(m.Name("None"))),)), ) ) self.assertFalse( matches( cst.Call(func=cst.Name("foo"), args=(cst.Arg(cst.Integer("1")),)), m.Call(args=((~(m.Arg(m.Integer("1")))),)), ) ) # Match any call that takes an argument which isn't True or False. self.assertFalse( matches( cst.Call(func=cst.Name("foo"), args=(cst.Arg(cst.Name("False")),)), m.Call(args=(m.Arg(value=~(m.Name("True") | m.Name("False"))),)), ) ) # Roundabout way of verifying ~(x&y) behavior. self.assertFalse( matches( cst.Call(func=cst.Name("foo"), args=(cst.Arg(cst.Name("False")),)), m.Call(args=(m.Arg(value=~(m.Name() & m.Name("False"))),)), ) ) # Roundabout way of verifying (~x)|(~y) behavior self.assertFalse( matches( cst.Call(func=cst.Name("foo"), args=(cst.Arg(cst.Name("True")),)), m.Call(args=(m.Arg(value=(~(m.Name("True"))) | (~(m.Name("True")))),)), ) ) # Match any name node that doesn't match the regex for True self.assertFalse( matches(cst.Name("True"), m.Name(value=~(m.MatchRegex(r"True")))) ) def test_inverse_inverse_is_identity(self) -> None: # Verify that we don't wrap an InverseOf in an InverseOf in normal circumstances. identity = m.Name("True") self.assertTrue(m.DoesNotMatch(m.DoesNotMatch(identity)) is identity) self.assertTrue((~(~identity)) is identity) LibCST-1.2.0/libcst/matchers/tests/test_matchers_with_metadata.py000066400000000000000000000552251456464173300251620ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from textwrap import dedent from typing import Sequence, Set, Tuple import libcst as cst import libcst.matchers as m import libcst.metadata as meta from libcst.matchers import matches from libcst.testing.utils import UnitTest class MatchersMetadataTest(UnitTest): def _make_fixture( self, code: str ) -> Tuple[cst.BaseExpression, meta.MetadataWrapper]: module = cst.parse_module(dedent(code)) wrapper = cst.MetadataWrapper(module) return ( cst.ensure_type( cst.ensure_type(wrapper.module.body[0], cst.SimpleStatementLine).body[ 0 ], cst.Expr, ).value, wrapper, ) def _make_coderange( self, start: Tuple[int, int], end: Tuple[int, int] ) -> meta.CodeRange: return meta.CodeRange( start=meta.CodePosition(line=start[0], column=start[1]), end=meta.CodePosition(line=end[0], column=end[1]), ) def test_simple_matcher_true(self) -> None: # Match on a simple node based on the type and the position. node, wrapper = self._make_fixture("foo") self.assertTrue( matches( node, m.Name( value="foo", metadata=m.MatchMetadata( meta.PositionProvider, self._make_coderange((1, 0), (1, 3)), ), ), metadata_resolver=wrapper, ) ) # Match on any binary expression where the two children are in exact spots. node, wrapper = self._make_fixture("a + b") self.assertTrue( matches( node, m.BinaryOperation( left=m.MatchMetadata( meta.PositionProvider, self._make_coderange((1, 0), (1, 1)), ), right=m.MatchMetadata( meta.PositionProvider, self._make_coderange((1, 4), (1, 5)), ), ), metadata_resolver=wrapper, ) ) def test_simple_matcher_false(self) -> None: # Fail to match on a simple node based on the type and the position. node, wrapper = self._make_fixture("foo") self.assertFalse( matches( node, m.Name( value="foo", metadata=m.MatchMetadata( meta.PositionProvider, self._make_coderange((2, 0), (2, 3)), ), ), metadata_resolver=wrapper, ) ) # Fail to match on any binary expression where the two children are in exact spots. node, wrapper = self._make_fixture("foo + bar") self.assertFalse( matches( node, m.BinaryOperation( left=m.MatchMetadata( meta.PositionProvider, self._make_coderange((1, 0), (1, 1)), ), right=m.MatchMetadata( meta.PositionProvider, self._make_coderange((1, 4), (1, 5)), ), ), metadata_resolver=wrapper, ) ) def test_predicate_logic(self) -> None: # Verify that we can or things together. matcher = m.BinaryOperation( left=m.OneOf( m.MatchMetadata( meta.PositionProvider, self._make_coderange((1, 0), (1, 1)) ), m.MatchMetadata( meta.PositionProvider, self._make_coderange((1, 0), (1, 2)) ), ) ) node, wrapper = self._make_fixture("a + b") self.assertTrue(matches(node, matcher, metadata_resolver=wrapper)) node, wrapper = self._make_fixture("12 + 3") self.assertTrue(matches(node, matcher, metadata_resolver=wrapper)) node, wrapper = self._make_fixture("123 + 4") self.assertFalse(matches(node, matcher, metadata_resolver=wrapper)) # Verify that we can and things together matcher = m.BinaryOperation( left=m.AllOf( m.MatchMetadata( meta.PositionProvider, self._make_coderange((1, 0), (1, 1)) ), m.MatchMetadata( meta.ExpressionContextProvider, meta.ExpressionContext.LOAD ), ) ) node, wrapper = self._make_fixture("a + b") self.assertTrue(matches(node, matcher, metadata_resolver=wrapper)) node, wrapper = self._make_fixture("ab + cd") self.assertFalse(matches(node, matcher, metadata_resolver=wrapper)) # Verify that we can not things matcher = m.BinaryOperation( left=m.DoesNotMatch( m.MatchMetadata( meta.ExpressionContextProvider, meta.ExpressionContext.STORE ) ) ) node, wrapper = self._make_fixture("a + b") self.assertTrue(matches(node, matcher, metadata_resolver=wrapper)) def test_predicate_logic_operators(self) -> None: # Verify that we can or things together. matcher = m.BinaryOperation( left=( m.MatchMetadata( meta.PositionProvider, self._make_coderange((1, 0), (1, 1)) ) | m.MatchMetadata( meta.PositionProvider, self._make_coderange((1, 0), (1, 2)) ) ) ) node, wrapper = self._make_fixture("a + b") self.assertTrue(matches(node, matcher, metadata_resolver=wrapper)) node, wrapper = self._make_fixture("12 + 3") self.assertTrue(matches(node, matcher, metadata_resolver=wrapper)) node, wrapper = self._make_fixture("123 + 4") self.assertFalse(matches(node, matcher, metadata_resolver=wrapper)) # Verify that we can and things together matcher = m.BinaryOperation( left=( m.MatchMetadata( meta.PositionProvider, self._make_coderange((1, 0), (1, 1)) ) & m.MatchMetadata( meta.ExpressionContextProvider, meta.ExpressionContext.LOAD ) ) ) node, wrapper = self._make_fixture("a + b") self.assertTrue(matches(node, matcher, metadata_resolver=wrapper)) node, wrapper = self._make_fixture("ab + cd") self.assertFalse(matches(node, matcher, metadata_resolver=wrapper)) # Verify that we can not things matcher = m.BinaryOperation( left=( ~( m.MatchMetadata( meta.ExpressionContextProvider, meta.ExpressionContext.STORE ) ) ) ) node, wrapper = self._make_fixture("a + b") self.assertTrue(matches(node, matcher, metadata_resolver=wrapper)) def test_predicate_logic_on_attributes(self) -> None: # Verify that we can or things together. matcher = m.BinaryOperation( left=m.Name( metadata=m.OneOf( m.MatchMetadata( meta.PositionProvider, self._make_coderange((1, 0), (1, 1)), ), m.MatchMetadata( meta.PositionProvider, self._make_coderange((1, 0), (1, 2)), ), ) ) ) node, wrapper = self._make_fixture("a + b") self.assertTrue(matches(node, matcher, metadata_resolver=wrapper)) matcher = m.BinaryOperation( left=m.Integer( metadata=m.OneOf( m.MatchMetadata( meta.PositionProvider, self._make_coderange((1, 0), (1, 1)), ), m.MatchMetadata( meta.PositionProvider, self._make_coderange((1, 0), (1, 2)), ), ) ) ) node, wrapper = self._make_fixture("12 + 3") self.assertTrue(matches(node, matcher, metadata_resolver=wrapper)) node, wrapper = self._make_fixture("123 + 4") self.assertFalse(matches(node, matcher, metadata_resolver=wrapper)) # Verify that we can and things together matcher = m.BinaryOperation( left=m.Name( metadata=m.AllOf( m.MatchMetadata( meta.PositionProvider, self._make_coderange((1, 0), (1, 1)), ), m.MatchMetadata( meta.ExpressionContextProvider, meta.ExpressionContext.LOAD ), ) ) ) node, wrapper = self._make_fixture("a + b") self.assertTrue(matches(node, matcher, metadata_resolver=wrapper)) node, wrapper = self._make_fixture("ab + cd") self.assertFalse(matches(node, matcher, metadata_resolver=wrapper)) # Verify that we can not things matcher = m.BinaryOperation( left=m.Name( metadata=m.DoesNotMatch( m.MatchMetadata( meta.ExpressionContextProvider, meta.ExpressionContext.STORE ) ) ) ) node, wrapper = self._make_fixture("a + b") self.assertTrue(matches(node, matcher, metadata_resolver=wrapper)) def test_predicate_logic_operators_on_attributes(self) -> None: # Verify that we can or things together. matcher = m.BinaryOperation( left=m.Name( metadata=m.MatchMetadata( meta.PositionProvider, self._make_coderange((1, 0), (1, 1)) ) | m.MatchMetadata( meta.PositionProvider, self._make_coderange((1, 0), (1, 2)) ) ) ) node, wrapper = self._make_fixture("a + b") self.assertTrue(matches(node, matcher, metadata_resolver=wrapper)) matcher = m.BinaryOperation( left=m.Integer( metadata=m.MatchMetadata( meta.PositionProvider, self._make_coderange((1, 0), (1, 1)) ) | m.MatchMetadata( meta.PositionProvider, self._make_coderange((1, 0), (1, 2)) ) ) ) node, wrapper = self._make_fixture("12 + 3") self.assertTrue(matches(node, matcher, metadata_resolver=wrapper)) node, wrapper = self._make_fixture("123 + 4") self.assertFalse(matches(node, matcher, metadata_resolver=wrapper)) # Verify that we can and things together matcher = m.BinaryOperation( left=m.Name( metadata=m.MatchMetadata( meta.PositionProvider, self._make_coderange((1, 0), (1, 1)) ) & m.MatchMetadata( meta.ExpressionContextProvider, meta.ExpressionContext.LOAD ) ) ) node, wrapper = self._make_fixture("a + b") self.assertTrue(matches(node, matcher, metadata_resolver=wrapper)) node, wrapper = self._make_fixture("ab + cd") self.assertFalse(matches(node, matcher, metadata_resolver=wrapper)) # Verify that we can not things matcher = m.BinaryOperation( left=m.Name( metadata=~( m.MatchMetadata( meta.ExpressionContextProvider, meta.ExpressionContext.STORE ) ) ) ) node, wrapper = self._make_fixture("a + b") self.assertTrue(matches(node, matcher, metadata_resolver=wrapper)) def test_lambda_metadata_matcher(self) -> None: # Match on qualified name provider module = cst.parse_module( "from typing import List\n\ndef foo() -> None: pass\n" ) wrapper = cst.MetadataWrapper(module) functiondef = cst.ensure_type(wrapper.module.body[1], cst.FunctionDef) self.assertTrue( matches( functiondef, m.FunctionDef( name=m.MatchMetadataIfTrue( meta.QualifiedNameProvider, lambda qualnames: any( n.name in {"foo", "bar", "baz"} for n in qualnames ), ) ), metadata_resolver=wrapper, ) ) self.assertFalse( matches( functiondef, m.FunctionDef( name=m.MatchMetadataIfTrue( meta.QualifiedNameProvider, lambda qualnames: any( n.name in {"bar", "baz"} for n in qualnames ), ) ), metadata_resolver=wrapper, ) ) def test_lambda_metadata_matcher_with_unresolved_metadata(self) -> None: # Match on qualified name provider module = cst.parse_module( "from typing import List\n\ndef foo() -> None: pass\n" ) functiondef = cst.ensure_type(module.body[1], cst.FunctionDef) # Test that when the metadata is unresolved, raise an informative exception. with self.assertRaises( LookupError, msg="QualifiedNameProvider is not resolved; did you forget a MetadataWrapper?", ): matches( functiondef, m.FunctionDef( name=m.MatchMetadataIfTrue( meta.QualifiedNameProvider, lambda qualnames: any( n.name in {"foo", "bar", "baz"} for n in qualnames ), ) ), ) def test_lambda_metadata_matcher_with_no_metadata(self) -> None: class VoidProvider(meta.BatchableMetadataProvider[object]): """A dummy metadata provider""" module = cst.parse_module( "from typing import List\n\ndef foo() -> None: pass\n" ) wrapper = cst.MetadataWrapper(module) functiondef = cst.ensure_type(wrapper.module.body[1], cst.FunctionDef) # Test that when the node has no corresponding metadata, there is no match. self.assertFalse( matches( functiondef, m.FunctionDef(name=m.MatchMetadataIfTrue(VoidProvider, lambda _: True)), metadata_resolver=wrapper, ) ) def test_lambda_metadata_matcher_operators(self) -> None: # Match on qualified name provider module = cst.parse_module( "from typing import List\n\ndef bar() -> None: pass\n" ) wrapper = cst.MetadataWrapper(module) functiondef = cst.ensure_type(wrapper.module.body[1], cst.FunctionDef) self.assertTrue( matches( functiondef, m.FunctionDef( name=m.MatchMetadataIfTrue( meta.QualifiedNameProvider, lambda qualnames: any(n.name == "foo" for n in qualnames), ) | m.MatchMetadataIfTrue( meta.QualifiedNameProvider, lambda qualnames: any(n.name == "bar" for n in qualnames), ) ), metadata_resolver=wrapper, ) ) self.assertFalse( matches( functiondef, m.FunctionDef( name=m.MatchMetadataIfTrue( meta.QualifiedNameProvider, lambda qualnames: any(n.name == "foo" for n in qualnames), ) & m.MatchMetadataIfTrue( meta.QualifiedNameProvider, lambda qualnames: any(n.name == "bar" for n in qualnames), ) ), metadata_resolver=wrapper, ) ) self.assertTrue( matches( functiondef, m.FunctionDef( name=( ~m.MatchMetadataIfTrue( meta.QualifiedNameProvider, lambda qualnames: any(n.name == "foo" for n in qualnames), ) ) & m.MatchMetadataIfTrue( meta.QualifiedNameProvider, lambda qualnames: any(n.name == "bar" for n in qualnames), ) ), metadata_resolver=wrapper, ) ) class MatchersVisitorMetadataTest(UnitTest): def _make_fixture(self, code: str) -> cst.MetadataWrapper: return cst.MetadataWrapper(cst.parse_module(dedent(code))) def test_matches_on_visitors(self) -> None: # Set up a simple visitor that has a metadata dependency, try to use it in matchers. class TestVisitor(m.MatcherDecoratableVisitor): METADATA_DEPENDENCIES: Sequence[meta.ProviderT] = ( meta.ExpressionContextProvider, ) def __init__(self) -> None: super().__init__() self.match_names: Set[str] = set() def visit_Name(self, node: cst.Name) -> None: # Only match name nodes that are being assigned to. if self.matches( node, m.Name( metadata=m.MatchMetadata( meta.ExpressionContextProvider, meta.ExpressionContext.STORE ) ), ): self.match_names.add(node.value) module = self._make_fixture( """ a = 1 + 2 b = 3 + 4 + d + e def foo() -> str: c = "baz" return c def bar() -> int: return b del foo del bar """ ) visitor = TestVisitor() module.visit(visitor) self.assertEqual(visitor.match_names, {"a", "b", "c", "foo", "bar"}) def test_matches_on_transformers(self) -> None: # Set up a simple visitor that has a metadata dependency, try to use it in matchers. class TestTransformer(m.MatcherDecoratableTransformer): METADATA_DEPENDENCIES: Sequence[meta.ProviderT] = ( meta.ExpressionContextProvider, ) def __init__(self) -> None: super().__init__() self.match_names: Set[str] = set() def visit_Name(self, node: cst.Name) -> None: # Only match name nodes that are being assigned to. if self.matches( node, m.Name( metadata=m.MatchMetadata( meta.ExpressionContextProvider, meta.ExpressionContext.STORE ) ), ): self.match_names.add(node.value) module = self._make_fixture( """ a = 1 + 2 b = 3 + 4 + d + e def foo() -> str: c = "baz" return c def bar() -> int: return b del foo del bar """ ) visitor = TestTransformer() module.visit(visitor) self.assertEqual(visitor.match_names, {"a", "b", "c", "foo", "bar"}) def test_matches_decorator_on_visitors(self) -> None: # Set up a simple visitor that has a metadata dependency, try to use it in matchers. class TestVisitor(m.MatcherDecoratableVisitor): METADATA_DEPENDENCIES: Sequence[meta.ProviderT] = ( meta.ExpressionContextProvider, ) def __init__(self) -> None: super().__init__() self.match_names: Set[str] = set() @m.visit( m.Name( metadata=m.MatchMetadata( meta.ExpressionContextProvider, meta.ExpressionContext.STORE ) ) ) def _visit_assignments(self, node: cst.Name) -> None: # Only match name nodes that are being assigned to. self.match_names.add(node.value) module = self._make_fixture( """ a = 1 + 2 b = 3 + 4 + d + e def foo() -> str: c = "baz" return c def bar() -> int: return b del foo del bar """ ) visitor = TestVisitor() module.visit(visitor) self.assertEqual(visitor.match_names, {"a", "b", "c", "foo", "bar"}) def test_matches_decorator_on_transformers(self) -> None: # Set up a simple visitor that has a metadata dependency, try to use it in matchers. class TestTransformer(m.MatcherDecoratableTransformer): METADATA_DEPENDENCIES: Sequence[meta.ProviderT] = ( meta.ExpressionContextProvider, ) def __init__(self) -> None: super().__init__() self.match_names: Set[str] = set() @m.visit( m.Name( metadata=m.MatchMetadata( meta.ExpressionContextProvider, meta.ExpressionContext.STORE ) ) ) def _visit_assignments(self, node: cst.Name) -> None: # Only match name nodes that are being assigned to. self.match_names.add(node.value) module = self._make_fixture( """ a = 1 + 2 b = 3 + 4 + d + e def foo() -> str: c = "baz" return c def bar() -> int: return b del foo del bar """ ) visitor = TestTransformer() module.visit(visitor) self.assertEqual(visitor.match_names, {"a", "b", "c", "foo", "bar"}) LibCST-1.2.0/libcst/matchers/tests/test_replace.py000066400000000000000000000252571456464173300220760ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # from typing import Dict, Sequence, Union import libcst as cst import libcst.matchers as m import libcst.metadata as meta from libcst.testing.utils import UnitTest class MatchersReplaceTest(UnitTest): def test_replace_sentinel(self) -> None: def _swap_bools( node: cst.CSTNode, extraction: Dict[str, Union[cst.CSTNode, Sequence[cst.CSTNode]]], ) -> cst.CSTNode: return cst.Name( "True" if cst.ensure_type(node, cst.Name).value == "False" else "False" ) # Verify behavior when provided a sentinel replaced = m.replace( cst.RemovalSentinel.REMOVE, m.Name("True") | m.Name("False"), _swap_bools ) self.assertEqual(replaced, cst.RemovalSentinel.REMOVE) replaced = m.replace( cst.MaybeSentinel.DEFAULT, m.Name("True") | m.Name("False"), _swap_bools ) self.assertEqual(replaced, cst.MaybeSentinel.DEFAULT) def test_replace_noop(self) -> None: def _swap_bools( node: cst.CSTNode, extraction: Dict[str, Union[cst.CSTNode, Sequence[cst.CSTNode]]], ) -> cst.CSTNode: return cst.Name( "True" if cst.ensure_type(node, cst.Name).value == "False" else "False" ) # Verify behavior when there's nothing to replace. original = cst.parse_module("foo: int = 5\ndef bar() -> str:\n return 's'\n") replaced = cst.ensure_type( m.replace(original, m.Name("True") | m.Name("False"), _swap_bools), cst.Module, ) # Should be identical tree contents self.assertTrue(original.deep_equals(replaced)) # However, should be a new tree by identity self.assertNotEqual(id(original), id(replaced)) def test_replace_simple(self) -> None: # Verify behavior when there's a static node as a replacement original = cst.parse_module( "foo: bool = True\ndef bar() -> bool:\n return False\n" ) replaced = cst.ensure_type( m.replace(original, m.Name("True") | m.Name("False"), cst.Name("boolean")), cst.Module, ).code self.assertEqual( replaced, "foo: bool = boolean\ndef bar() -> bool:\n return boolean\n" ) def test_replace_simple_sentinel(self) -> None: # Verify behavior when there's a sentinel as a replacement original = cst.parse_module( "def bar(x: int, y: int) -> bool:\n return False\n" ) replaced = cst.ensure_type( m.replace(original, m.Param(), cst.RemoveFromParent()), cst.Module ).code self.assertEqual(replaced, "def bar() -> bool:\n return False\n") def test_replace_actual(self) -> None: def _swap_bools( node: cst.CSTNode, extraction: Dict[str, Union[cst.CSTNode, Sequence[cst.CSTNode]]], ) -> cst.CSTNode: return cst.Name( "True" if cst.ensure_type(node, cst.Name).value == "False" else "False" ) # Verify behavior when there's lots to replace. original = cst.parse_module( "foo: bool = True\ndef bar() -> bool:\n return False\n" ) replaced = cst.ensure_type( m.replace(original, m.Name("True") | m.Name("False"), _swap_bools), cst.Module, ).code self.assertEqual( replaced, "foo: bool = False\ndef bar() -> bool:\n return True\n" ) def test_replace_add_one(self) -> None: def _add_one( node: cst.CSTNode, extraction: Dict[str, Union[cst.CSTNode, Sequence[cst.CSTNode]]], ) -> cst.CSTNode: return cst.Integer(str(int(cst.ensure_type(node, cst.Integer).value) + 1)) # Verify slightly more complex transform behavior. original = cst.parse_module("foo: int = 36\ndef bar() -> int:\n return 41\n") replaced = cst.ensure_type( m.replace(original, m.Integer(), _add_one), cst.Module ).code self.assertEqual(replaced, "foo: int = 37\ndef bar() -> int:\n return 42\n") def test_replace_add_one_to_foo_args(self) -> None: def _add_one_to_arg( node: cst.CSTNode, extraction: Dict[str, Union[cst.CSTNode, Sequence[cst.CSTNode]]], ) -> cst.CSTNode: return node.deep_replace( # This can be either a node or a sequence, pyre doesn't know. cst.ensure_type(extraction["arg"], cst.CSTNode), # Grab the arg and add one to its value. cst.Integer( str(int(cst.ensure_type(extraction["arg"], cst.Integer).value) + 1) ), ) # Verify way more complex transform behavior. original = cst.parse_module( "foo: int = 37\ndef bar(baz: int) -> int:\n return baz\n\nbiz: int = bar(41)\n" ) replaced = cst.ensure_type( m.replace( original, m.Call( func=m.Name("bar"), args=[m.Arg(m.SaveMatchedNode(m.Integer(), "arg"))], ), _add_one_to_arg, ), cst.Module, ).code self.assertEqual( replaced, "foo: int = 37\ndef bar(baz: int) -> int:\n return baz\n\nbiz: int = bar(42)\n", ) def test_replace_sequence_extract(self) -> None: def _reverse_params( node: cst.CSTNode, extraction: Dict[str, Union[cst.CSTNode, Sequence[cst.CSTNode]]], ) -> cst.CSTNode: return cst.ensure_type(node, cst.FunctionDef).with_changes( # pyre-ignore We know "params" is a Sequence[Parameters] but asserting that # to pyre is difficult. params=cst.Parameters(params=list(reversed(extraction["params"]))) ) # Verify that we can still extract sequences with replace. original = cst.parse_module( "def bar(baz: int, foo: int, ) -> int:\n return baz + foo\n" ) replaced = cst.ensure_type( m.replace( original, m.FunctionDef( params=m.Parameters( params=m.SaveMatchedNode([m.ZeroOrMore(m.Param())], "params") ) ), _reverse_params, ), cst.Module, ).code self.assertEqual( replaced, "def bar(foo: int, baz: int, ) -> int:\n return baz + foo\n" ) def test_replace_metadata(self) -> None: def _rename_foo( node: cst.CSTNode, extraction: Dict[str, Union[cst.CSTNode, Sequence[cst.CSTNode]]], ) -> cst.CSTNode: return cst.ensure_type(node, cst.Name).with_changes(value="replaced") original = cst.parse_module( "foo: int = 37\ndef bar(foo: int) -> int:\n return foo\n\nbiz: int = bar(42)\n" ) wrapper = cst.MetadataWrapper(original) replaced = cst.ensure_type( m.replace( wrapper, m.Name( metadata=m.MatchMetadataIfTrue( meta.QualifiedNameProvider, lambda qualnames: any(n.name == "foo" for n in qualnames), ) ), _rename_foo, ), cst.Module, ).code self.assertEqual( replaced, "replaced: int = 37\ndef bar(foo: int) -> int:\n return foo\n\nbiz: int = bar(42)\n", ) def test_replace_metadata_on_transform(self) -> None: def _rename_foo( node: cst.CSTNode, extraction: Dict[str, Union[cst.CSTNode, Sequence[cst.CSTNode]]], ) -> cst.CSTNode: return cst.ensure_type(node, cst.Name).with_changes(value="replaced") original = cst.parse_module( "foo: int = 37\ndef bar(foo: int) -> int:\n return foo\n\nbiz: int = bar(42)\n" ) wrapper = cst.MetadataWrapper(original) class TestTransformer(m.MatcherDecoratableTransformer): METADATA_DEPENDENCIES: Sequence[meta.ProviderT] = ( meta.QualifiedNameProvider, ) def leave_Module( self, original_node: cst.Module, updated_node: cst.Module ) -> cst.Module: # Somewhat contrived scenario to test codepaths. return cst.ensure_type( self.replace( original_node, m.Name( metadata=m.MatchMetadataIfTrue( meta.QualifiedNameProvider, lambda qualnames: any( n.name == "foo" for n in qualnames ), ) ), _rename_foo, ), cst.Module, ) replaced = cst.ensure_type(wrapper.visit(TestTransformer()), cst.Module).code self.assertEqual( replaced, "replaced: int = 37\ndef bar(foo: int) -> int:\n return foo\n\nbiz: int = bar(42)\n", ) def test_replace_updated_node_changes(self) -> None: def _replace_nested( node: cst.CSTNode, extraction: Dict[str, Union[cst.CSTNode, Sequence[cst.CSTNode]]], ) -> cst.CSTNode: return cst.ensure_type(node, cst.Call).with_changes( args=[ cst.Arg( cst.Name( value=cst.ensure_type( cst.ensure_type(extraction["inner"], cst.Call).func, cst.Name, ).value + "_immediate" ) ) ] ) original = cst.parse_module( "def foo(val: int) -> int:\n return val\nbar = foo\nbaz = foo\nbiz = foo\nfoo(bar(baz(biz(5))))\n" ) replaced = cst.ensure_type( m.replace( original, m.Call(args=[m.Arg(m.SaveMatchedNode(m.Call(), "inner"))]), _replace_nested, ), cst.Module, ).code self.assertEqual( replaced, "def foo(val: int) -> int:\n return val\nbar = foo\nbaz = foo\nbiz = foo\nfoo(bar_immediate)\n", ) LibCST-1.2.0/libcst/matchers/tests/test_visitors.py000066400000000000000000000473361456464173300223470ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import pickle from typing import Union import libcst as cst import libcst.matchers as m from libcst.matchers import ( leave, MatchDecoratorMismatch, MatcherDecoratableTransformer, MatcherDecoratableVisitor, visit, ) from libcst.testing.utils import UnitTest class MatchersVisitLeaveDecoratorTypingTest(UnitTest): def test_valid_collector_simple(self) -> None: class TestVisitor(MatcherDecoratableVisitor): @visit(m.SimpleString()) def _string_visit(self, node: cst.SimpleString) -> None: pass @leave(m.SimpleString()) def _string_leave(self, original_node: cst.SimpleString) -> None: pass # Instantiating this class should not raise any errors TestVisitor() def test_valid_transformer_simple(self) -> None: class TestVisitor(MatcherDecoratableTransformer): @visit(m.SimpleString()) def _string_visit(self, node: cst.SimpleString) -> None: pass @leave(m.SimpleString()) def _string_leave( self, original_node: cst.SimpleString, updated_node: cst.SimpleString ) -> cst.SimpleString: return updated_node # Instantiating this class should not raise any errors TestVisitor() def test_valid_transformer_base_class(self) -> None: class TestVisitor(MatcherDecoratableTransformer): @leave(m.SimpleString()) def _string_leave( self, original_node: cst.SimpleString, updated_node: cst.SimpleString ) -> cst.BaseExpression: return updated_node # Instantiating this class should not raise any errors TestVisitor() def test_valid_collector_visit_union(self) -> None: class TestVisitor(MatcherDecoratableVisitor): @visit(m.SimpleString() | m.Name()) def _string_visit(self, node: Union[cst.SimpleString, cst.Name]) -> None: pass # Instantiating this class should not raise any errors TestVisitor() def test_valid_transformer_visit_union(self) -> None: class TestVisitor(MatcherDecoratableTransformer): @visit(m.SimpleString() | m.Name()) def _string_visit(self, node: Union[cst.SimpleString, cst.Name]) -> None: pass # Instantiating this class should not raise any errors TestVisitor() def test_valid_collector_visit_superclass(self) -> None: class TestVisitor(MatcherDecoratableVisitor): @visit(m.SimpleString() | m.Name()) def _string_visit(self, node: cst.BaseExpression) -> None: pass # Instantiating this class should not raise any errors TestVisitor() def test_valid_transformer_visit_superclass(self) -> None: class TestVisitor(MatcherDecoratableTransformer): @visit(m.SimpleString() | m.Name()) def _string_visit(self, node: cst.BaseExpression) -> None: pass # Instantiating this class should not raise any errors TestVisitor() def test_valid_collector_leave_union(self) -> None: class TestVisitor(MatcherDecoratableVisitor): @leave(m.SimpleString() | m.Name()) def _string_leave(self, node: Union[cst.SimpleString, cst.Name]) -> None: pass # Instantiating this class should not raise any errors TestVisitor() def test_valid_transformer_leave_union(self) -> None: class TestVisitor(MatcherDecoratableTransformer): @leave(m.SimpleString() | m.Name()) def _string_leave( self, original_node: Union[cst.SimpleString, cst.Name], updated_node: Union[cst.SimpleString, cst.Name], ) -> Union[cst.SimpleString, cst.Name]: return updated_node # Instantiating this class should not raise any errors TestVisitor() def test_valid_collector_leave_superclass(self) -> None: class TestVisitor(MatcherDecoratableVisitor): @leave(m.SimpleString() | m.Name()) def _string_leave(self, node: cst.BaseExpression) -> None: pass # Instantiating this class should not raise any errors TestVisitor() def test_valid_transformer_leave_superclass(self) -> None: class TestVisitor(MatcherDecoratableTransformer): @leave(m.SimpleString() | m.Name()) def _string_leave( self, original_node: cst.BaseExpression, updated_node: cst.BaseExpression, ) -> cst.BaseExpression: return updated_node # Instantiating this class should not raise any errors TestVisitor() def test_valid_transformer_leave_return_maybe(self) -> None: class TestVisitor(MatcherDecoratableTransformer): @leave(m.AssignEqual()) def _assign_equal_leave( self, original_node: cst.AssignEqual, updated_node: cst.AssignEqual ) -> Union[cst.AssignEqual, cst.MaybeSentinel]: return updated_node # Instantiating this class should not raise any errors TestVisitor() def test_valid_transformer_leave_return_remove(self) -> None: class TestVisitor(MatcherDecoratableTransformer): @leave(m.AssignTarget()) def _string_visit( self, original_node: cst.AssignTarget, updated_node: cst.AssignTarget ) -> Union[cst.AssignTarget, cst.RemovalSentinel]: return updated_node # Instantiating this class should not raise any errors TestVisitor() def test_invalid_collector_visit_return(self) -> None: class TestVisitor(MatcherDecoratableVisitor): @visit(m.SimpleString()) def _string_visit(self, node: cst.SimpleString) -> bool: return False # Instantiating this class should raise a runtime error with self.assertRaisesRegex( MatchDecoratorMismatch, "@visit should only decorate functions that do not return", ): TestVisitor() def test_invalid_transformer_visit_return(self) -> None: class TestVisitor(MatcherDecoratableTransformer): @visit(m.SimpleString()) def _string_visit(self, node: cst.SimpleString) -> bool: return False # Instantiating this class should raise a runtime error with self.assertRaisesRegex( MatchDecoratorMismatch, "@visit should only decorate functions that do not return", ): TestVisitor() def test_invalid_transformer_visit_num_params(self) -> None: class TestVisitor(MatcherDecoratableTransformer): @visit(m.SimpleString()) def _string_visit( self, original_node: cst.SimpleString, updated_node: cst.SimpleString ) -> None: pass # Instantiating this class should raise a runtime error with self.assertRaisesRegex( MatchDecoratorMismatch, "@visit should decorate functions which take 1 parameter", ): TestVisitor() def test_invalid_collector_visit_num_params(self) -> None: class TestVisitor(MatcherDecoratableVisitor): @visit(m.SimpleString()) def _string_visit( self, original_node: cst.SimpleString, updated_node: cst.SimpleString ) -> None: pass # Instantiating this class should raise a runtime error with self.assertRaisesRegex( MatchDecoratorMismatch, "@visit should decorate functions which take 1 parameter", ): TestVisitor() def test_invalid_transformer_leave_num_params(self) -> None: class TestVisitor(MatcherDecoratableTransformer): @leave(m.SimpleString()) def _string_leave( self, original_node: cst.SimpleString ) -> cst.SimpleString: return original_node # Instantiating this class should raise a runtime error with self.assertRaisesRegex( MatchDecoratorMismatch, "@leave should decorate functions which take 2 parameters", ): TestVisitor() def test_invalid_collector_leave_num_params(self) -> None: class TestVisitor(MatcherDecoratableVisitor): @leave(m.SimpleString()) def _string_leave( self, original_node: cst.SimpleString, updated_node: cst.SimpleString ) -> None: pass # Instantiating this class should raise a runtime error with self.assertRaisesRegex( MatchDecoratorMismatch, "@leave should decorate functions which take 1 parameter", ): TestVisitor() def test_invalid_collector_leave_return(self) -> None: class TestVisitor(MatcherDecoratableVisitor): @leave(m.SimpleString()) def _string_leave(self, original_node: cst.SimpleString) -> bool: return False # Instantiating this class should raise a runtime error with self.assertRaisesRegex( MatchDecoratorMismatch, "@leave should only decorate functions that do not return", ): TestVisitor() def test_invalid_transformer_leave_return_invalid_superclass(self) -> None: class TestVisitor(MatcherDecoratableTransformer): @leave(m.SimpleString()) def _string_visit( self, original_node: cst.SimpleString, updated_node: cst.SimpleString ) -> cst.BaseParenthesizableWhitespace: return cst.SimpleWhitespace("") # Instantiating this class should raise a runtime error with self.assertRaisesRegex( MatchDecoratorMismatch, "@leave decorated function cannot return the type BaseParenthesizableWhitespace", ): TestVisitor() def test_invalid_transformer_leave_return_wrong_type(self) -> None: class TestVisitor(MatcherDecoratableTransformer): @leave(m.SimpleString()) def _string_visit( self, original_node: cst.SimpleString, updated_node: cst.SimpleString ) -> cst.Pass: return cst.Pass() # Instantiating this class should raise a runtime error with self.assertRaisesRegex( MatchDecoratorMismatch, "@leave decorated function cannot return the type Pass", ): TestVisitor() def test_invalid_transformer_leave_return_invalid_maybe(self) -> None: class TestVisitor(MatcherDecoratableTransformer): @leave(m.SimpleString()) def _string_visit( self, original_node: cst.SimpleString, updated_node: cst.SimpleString ) -> Union[cst.SimpleString, cst.MaybeSentinel]: return updated_node # Instantiating this class should raise a runtime error with self.assertRaisesRegex( MatchDecoratorMismatch, "@leave decorated function cannot return the type MaybeSentinel", ): TestVisitor() def test_invalid_transformer_leave_return_invalid_remove(self) -> None: class TestVisitor(MatcherDecoratableTransformer): @leave(m.SimpleString()) def _string_visit( self, original_node: cst.SimpleString, updated_node: cst.SimpleString ) -> Union[cst.SimpleString, cst.RemovalSentinel]: return updated_node # Instantiating this class should raise a runtime error with self.assertRaisesRegex( MatchDecoratorMismatch, "@leave decorated function cannot return the type RemovalSentinel", ): TestVisitor() def test_invalid_transformer_leave_return_invalid_union(self) -> None: class TestVisitor(MatcherDecoratableTransformer): @leave(m.SimpleString() | m.Name()) def _string_leave( self, original_node: Union[cst.SimpleString, cst.Name], updated_node: Union[cst.SimpleString, cst.Name], ) -> Union[cst.SimpleString, cst.Pass]: return cst.SimpleString('""') # Instantiating this class should raise a runtime error with self.assertRaisesRegex( MatchDecoratorMismatch, "@leave decorated function cannot return the type Pass", ): TestVisitor() def test_invalid_collector_visit_union(self) -> None: class TestVisitor(MatcherDecoratableVisitor): @visit(m.SimpleString() | m.Name()) def _string_visit(self, node: cst.SimpleString) -> None: pass # Instantiating this class should raise a runtime error with self.assertRaisesRegex( MatchDecoratorMismatch, "@visit can be called with Name but the decorated function parameter annotations do not include this type", ): TestVisitor() def test_invalid_transformer_visit_union(self) -> None: class TestVisitor(MatcherDecoratableTransformer): @visit(m.SimpleString() | m.Name()) def _string_visit(self, node: cst.SimpleString) -> None: pass # Instantiating this class should raise a runtime error with self.assertRaisesRegex( MatchDecoratorMismatch, "@visit can be called with Name but the decorated function parameter annotations do not include this type", ): TestVisitor() def test_invalid_collector_visit_superclass(self) -> None: class TestVisitor(MatcherDecoratableVisitor): @visit(m.SimpleString() | m.Pass()) def _string_visit(self, node: cst.BaseExpression) -> None: pass # Instantiating this class should raise a runtime error with self.assertRaisesRegex( MatchDecoratorMismatch, "@visit can be called with Pass but the decorated function parameter annotations do not include this type", ): TestVisitor() def test_invalid_transformer_visit_superclass(self) -> None: class TestVisitor(MatcherDecoratableTransformer): @visit(m.SimpleString() | m.Pass()) def _string_visit(self, node: cst.BaseExpression) -> None: pass # Instantiating this class should raise a runtime error with self.assertRaisesRegex( MatchDecoratorMismatch, "@visit can be called with Pass but the decorated function parameter annotations do not include this type", ): TestVisitor() def test_invalid_collector_leave_union(self) -> None: class TestVisitor(MatcherDecoratableVisitor): @leave(m.SimpleString() | m.Name()) def _string_leave(self, node: cst.SimpleString) -> None: pass # Instantiating this class should raise a runtime error with self.assertRaisesRegex( MatchDecoratorMismatch, "@leave can be called with Name but the decorated function parameter annotations do not include this type", ): TestVisitor() def test_invalid_transformer_leave_union(self) -> None: class TestVisitor(MatcherDecoratableTransformer): @leave(m.SimpleString() | m.Name()) def _string_leave( self, original_node: cst.SimpleString, updated_node: cst.SimpleString ) -> cst.BaseExpression: return updated_node # Instantiating this class should raise a runtime error with self.assertRaisesRegex( MatchDecoratorMismatch, "@leave can be called with Name but the decorated function parameter annotations do not include this type", ): TestVisitor() def test_invalid_collector_leave_superclass(self) -> None: class TestVisitor(MatcherDecoratableVisitor): @leave(m.SimpleString() | m.Pass()) def _string_leave(self, node: cst.BaseExpression) -> None: pass # Instantiating this class should raise a runtime error with self.assertRaisesRegex( MatchDecoratorMismatch, "@leave can be called with Pass but the decorated function parameter annotations do not include this type", ): TestVisitor() def test_invalid_transformer_leave_superclass(self) -> None: class TestVisitor(MatcherDecoratableTransformer): @leave(m.SimpleString() | m.Pass()) def _string_leave( self, original_node: cst.BaseExpression, updated_node: cst.BaseExpression, ) -> cst.BaseExpression: return updated_node # Instantiating this class should raise a runtime error with self.assertRaisesRegex( MatchDecoratorMismatch, "@leave can be called with Pass but the decorated function parameter annotations do not include this type", ): TestVisitor() def test_bad_visit_collecter_decorator(self) -> None: class TestVisitor(MatcherDecoratableVisitor): @visit(m.SimpleString()) def visit_SimpleString(self, node: cst.SimpleString) -> None: pass # Instantiating this class should raise a runtime error with self.assertRaisesRegex( MatchDecoratorMismatch, "@visit should not decorate functions that are concrete visit or leave methods", ): TestVisitor() def test_bad_leave_collecter_decorator(self) -> None: class TestVisitor(MatcherDecoratableVisitor): @leave(m.SimpleString()) def leave_SimpleString( self, original_node: cst.SimpleString, updated_node: cst.SimpleString ) -> None: pass # Instantiating this class should raise a runtime error with self.assertRaisesRegex( MatchDecoratorMismatch, "@leave should not decorate functions that are concrete visit or leave methods", ): TestVisitor() def test_bad_visit_transform_decorator(self) -> None: class TestVisitor(MatcherDecoratableTransformer): @visit(m.SimpleString()) def visit_SimpleString(self, node: cst.SimpleString) -> None: pass # Instantiating this class should raise a runtime error with self.assertRaisesRegex( MatchDecoratorMismatch, "@visit should not decorate functions that are concrete visit or leave methods", ): TestVisitor() def test_bad_leave_transform_decorator(self) -> None: class TestVisitor(MatcherDecoratableTransformer): @leave(m.SimpleString()) def leave_SimpleString( self, original_node: cst.SimpleString, updated_node: cst.SimpleString ) -> cst.SimpleString: return updated_node # Instantiating this class should raise a runtime error with self.assertRaisesRegex( MatchDecoratorMismatch, "@leave should not decorate functions that are concrete visit or leave methods", ): TestVisitor() def test_pickleable_exception(self) -> None: original = MatchDecoratorMismatch("func", "message") serialized = pickle.dumps(original) unserialized = pickle.loads(serialized) self.assertEqual(original.message, unserialized.message) self.assertEqual(original.func, unserialized.func) LibCST-1.2.0/libcst/metadata/000077500000000000000000000000001456464173300156475ustar00rootroot00000000000000LibCST-1.2.0/libcst/metadata/__init__.py000066400000000000000000000050311456464173300177570ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from libcst._position import CodePosition, CodeRange from libcst.metadata.accessor_provider import AccessorProvider from libcst.metadata.base_provider import ( BaseMetadataProvider, BatchableMetadataProvider, ProviderT, VisitorMetadataProvider, ) from libcst.metadata.expression_context_provider import ( ExpressionContext, ExpressionContextProvider, ) from libcst.metadata.file_path_provider import FilePathProvider from libcst.metadata.full_repo_manager import FullRepoManager from libcst.metadata.name_provider import ( FullyQualifiedNameProvider, QualifiedNameProvider, ) from libcst.metadata.parent_node_provider import ParentNodeProvider from libcst.metadata.position_provider import ( PositionProvider, WhitespaceInclusivePositionProvider, ) from libcst.metadata.reentrant_codegen import ( CodegenPartial, ExperimentalReentrantCodegenProvider, ) from libcst.metadata.scope_provider import ( Access, Accesses, Assignment, Assignments, BaseAssignment, BuiltinAssignment, BuiltinScope, ClassScope, ComprehensionScope, FunctionScope, GlobalScope, ImportAssignment, QualifiedName, QualifiedNameSource, Scope, ScopeProvider, ) from libcst.metadata.span_provider import ByteSpanPositionProvider, CodeSpan from libcst.metadata.type_inference_provider import TypeInferenceProvider from libcst.metadata.wrapper import MetadataWrapper __all__ = [ "CodePosition", "CodeRange", "CodeSpan", "WhitespaceInclusivePositionProvider", "PositionProvider", "ByteSpanPositionProvider", "BaseMetadataProvider", "ExpressionContext", "ExpressionContextProvider", "BaseAssignment", "Assignment", "BuiltinAssignment", "ImportAssignment", "BuiltinScope", "Access", "Scope", "GlobalScope", "FunctionScope", "ClassScope", "ComprehensionScope", "ScopeProvider", "ParentNodeProvider", "QualifiedName", "QualifiedNameSource", "MetadataWrapper", "BatchableMetadataProvider", "VisitorMetadataProvider", "QualifiedNameProvider", "FullyQualifiedNameProvider", "ProviderT", "Assignments", "Accesses", "TypeInferenceProvider", "FullRepoManager", "AccessorProvider", "FilePathProvider", # Experimental APIs: "ExperimentalReentrantCodegenProvider", "CodegenPartial", ] LibCST-1.2.0/libcst/metadata/accessor_provider.py000066400000000000000000000010421456464173300217320ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import dataclasses import libcst as cst from libcst.metadata.base_provider import VisitorMetadataProvider class AccessorProvider(VisitorMetadataProvider[str]): def on_visit(self, node: cst.CSTNode) -> bool: for f in dataclasses.fields(node): child = getattr(node, f.name) self.set_metadata(child, f.name) return True LibCST-1.2.0/libcst/metadata/base_provider.py000066400000000000000000000125431456464173300210520ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from pathlib import Path from types import MappingProxyType from typing import ( Callable, Generic, List, Mapping, MutableMapping, Optional, Type, TYPE_CHECKING, TypeVar, Union, ) from libcst._batched_visitor import BatchableCSTVisitor from libcst._metadata_dependent import ( _T as _MetadataT, _UNDEFINED_DEFAULT, LazyValue, MetadataDependent, ) from libcst._visitors import CSTVisitor if TYPE_CHECKING: from libcst._nodes.base import CSTNode from libcst._nodes.module import _ModuleSelfT as _ModuleT, Module from libcst.metadata.wrapper import MetadataWrapper ProviderT = Type["BaseMetadataProvider[object]"] # BaseMetadataProvider[int] would be a subtype of BaseMetadataProvider[object], so the # typevar is covariant. _ProvidedMetadataT = TypeVar("_ProvidedMetadataT", covariant=True) MaybeLazyMetadataT = Union[LazyValue[_ProvidedMetadataT], _ProvidedMetadataT] # We can't use an ABCMeta here, because of metaclass conflicts class BaseMetadataProvider(MetadataDependent, Generic[_ProvidedMetadataT]): """ The low-level base class for all metadata providers. This class should be extended for metadata providers that are not visitor-based. This class is generic. A subclass of ``BaseMetadataProvider[T]`` will provider metadata of type ``T``. """ #: Cache of metadata computed by this provider # # N.B. This has some typing variance problems. See `set_metadata` for an # explanation. _computed: MutableMapping["CSTNode", MaybeLazyMetadataT] #: Implement gen_cache to indicate the metadata provider depends on cache from external #: system. This function will be called by :class:`~libcst.metadata.FullRepoManager` #: to compute required cache object per file path. gen_cache: Optional[Callable[[Path, List[str], int], Mapping[str, object]]] = None def __init__(self, cache: object = None) -> None: super().__init__() self._computed: MutableMapping["CSTNode", MaybeLazyMetadataT] = {} if self.gen_cache and cache is None: # The metadata provider implementation is responsible to store and use cache. raise Exception( f"Cache is required for initializing {self.__class__.__name__}." ) self.cache = cache def _gen( self, wrapper: "MetadataWrapper" ) -> Mapping["CSTNode", MaybeLazyMetadataT]: """ Resolves and returns metadata mapping for the module in ``wrapper``. This method is used by the metadata resolver and should not be called directly. """ self._computed = {} # Resolve metadata dependencies for this provider with self.resolve(wrapper): self._gen_impl(wrapper.module) # Copy into a mapping proxy to ensure immutability return MappingProxyType(dict(self._computed)) def _gen_impl(self, module: "Module") -> None: """ Override this method with a metadata computation implementation. """ ... def set_metadata(self, node: "CSTNode", value: MaybeLazyMetadataT) -> None: """ Record a metadata value ``value`` for ``node``. """ self._computed[node] = value def get_metadata( self, key: Type["BaseMetadataProvider[_MetadataT]"], node: "CSTNode", default: Union[ MaybeLazyMetadataT, Type[_UNDEFINED_DEFAULT] ] = _UNDEFINED_DEFAULT, ) -> _MetadataT: """ The same method as :func:`~libcst.MetadataDependent.get_metadata` except metadata is accessed from ``self._computed`` in addition to ``self.metadata``. See :func:`~libcst.MetadataDependent.get_metadata`. """ if key is type(self): if default is not _UNDEFINED_DEFAULT: ret = self._computed.get(node, default) else: ret = self._computed[node] if isinstance(ret, LazyValue): return ret() return ret return super().get_metadata(key, node, default) class VisitorMetadataProvider(CSTVisitor, BaseMetadataProvider[_ProvidedMetadataT]): """ The low-level base class for all non-batchable visitor-based metadata providers. Inherits from :class:`~libcst.CSTVisitor`. This class is generic. A subclass of ``VisitorMetadataProvider[T]`` will provider metadata of type ``T``. """ def _gen_impl(self, module: "_ModuleT") -> None: module.visit(self) class BatchableMetadataProvider( BatchableCSTVisitor, BaseMetadataProvider[_ProvidedMetadataT] ): """ The low-level base class for all batchable visitor-based metadata providers. Batchable providers should be preferred when possible as they are more efficient to run compared to non-batchable visitor-based providers. Inherits from :class:`~libcst.BatchableCSTVisitor`. This class is generic. A subclass of ``BatchableMetadataProvider[T]`` will provider metadata of type ``T``. """ def _gen_impl(self, module: "Module") -> None: """ Batchables providers are resolved through _gen_batchable] so no implementation should be provided in _gen_impl. """ pass LibCST-1.2.0/libcst/metadata/expression_context_provider.py000066400000000000000000000172211456464173300241010ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from enum import auto, Enum from typing import Optional, Sequence import libcst as cst from libcst.metadata.base_provider import BatchableMetadataProvider class ExpressionContext(Enum): """Used in :class:`ExpressionContextProvider` to represent context of a variable reference.""" #: Load the value of a variable reference. #: #: >>> libcst.MetadataWrapper(libcst.parse_module("a")).resolve(libcst.ExpressionContextProvider) #: mappingproxy({Name( #: value='a', #: lpar=[], #: rpar=[], #: ): }) LOAD = auto() #: Store a value to a variable reference by :class:`~libcst.Assign` (``=``), #: :class:`~libcst.AugAssign` (e.g. ``+=``, ``-=``, etc), or #: :class:`~libcst.AnnAssign`. #: #: >>> libcst.MetadataWrapper(libcst.parse_module("a = b")).resolve(libcst.ExpressionContextProvider) #: mappingproxy({Name( #: value='a', #: lpar=[], #: rpar=[], #: ): , Name( #: value='b', #: lpar=[], #: rpar=[], #: ): }) STORE = auto() #: Delete value of a variable reference by ``del``. #: #: >>> libcst.MetadataWrapper(libcst.parse_module("del a")).resolve(libcst.ExpressionContextProvider) #: mappingproxy({Name( #: value='a', #: lpar=[], #: rpar=[], #: ): < ExpressionContext.DEL: 3 >}) DEL = auto() class ExpressionContextVisitor(cst.CSTVisitor): def __init__( self, provider: "ExpressionContextProvider", context: ExpressionContext ) -> None: self.provider = provider self.context = context def visit_Assign(self, node: cst.Assign) -> bool: for target in node.targets: target.visit( ExpressionContextVisitor(self.provider, ExpressionContext.STORE) ) node.value.visit(self) return False def visit_AnnAssign(self, node: cst.AnnAssign) -> bool: node.target.visit( ExpressionContextVisitor(self.provider, ExpressionContext.STORE) ) node.annotation.visit(self) value = node.value if value: value.visit(self) return False def visit_AugAssign(self, node: cst.AugAssign) -> bool: node.target.visit( ExpressionContextVisitor(self.provider, ExpressionContext.STORE) ) node.value.visit(self) return False def visit_NamedExpr(self, node: cst.NamedExpr) -> bool: node.target.visit( ExpressionContextVisitor(self.provider, ExpressionContext.STORE) ) node.value.visit(self) return False def visit_Name(self, node: cst.Name) -> bool: self.provider.set_metadata(node, self.context) return False def visit_AsName(self, node: cst.AsName) -> Optional[bool]: node.name.visit( ExpressionContextVisitor(self.provider, ExpressionContext.STORE) ) return False def visit_CompFor(self, node: cst.CompFor) -> bool: node.target.visit( ExpressionContextVisitor(self.provider, ExpressionContext.STORE) ) node.iter.visit(self) for i in node.ifs: i.visit(self) inner_for_in = node.inner_for_in if inner_for_in: inner_for_in.visit(self) return False def visit_For(self, node: cst.For) -> bool: node.target.visit( ExpressionContextVisitor(self.provider, ExpressionContext.STORE) ) node.iter.visit(self) node.body.visit(self) orelse = node.orelse if orelse: orelse.visit(self) return False def visit_Del(self, node: cst.Del) -> bool: node.target.visit( ExpressionContextVisitor(self.provider, ExpressionContext.DEL) ) return False def visit_Attribute(self, node: cst.Attribute) -> bool: self.provider.set_metadata(node, self.context) node.value.visit( ExpressionContextVisitor(self.provider, ExpressionContext.LOAD) ) # don't visit attr (Name), so attr has no context return False def visit_Subscript(self, node: cst.Subscript) -> bool: self.provider.set_metadata(node, self.context) node.value.visit( ExpressionContextVisitor(self.provider, ExpressionContext.LOAD) ) slice = node.slice if isinstance(slice, Sequence): for sli in slice: sli.visit( ExpressionContextVisitor(self.provider, ExpressionContext.LOAD) ) else: slice.visit(ExpressionContextVisitor(self.provider, ExpressionContext.LOAD)) return False def visit_Tuple(self, node: cst.Tuple) -> Optional[bool]: self.provider.set_metadata(node, self.context) def visit_List(self, node: cst.List) -> Optional[bool]: self.provider.set_metadata(node, self.context) def visit_StarredElement(self, node: cst.StarredElement) -> Optional[bool]: self.provider.set_metadata(node, self.context) def visit_ClassDef(self, node: cst.ClassDef) -> Optional[bool]: node.name.visit( ExpressionContextVisitor(self.provider, ExpressionContext.STORE) ) node.body.visit(self) for base in node.bases: base.visit(self) for keyword in node.keywords: keyword.visit(self) for decorator in node.decorators: decorator.visit(self) return False def visit_FunctionDef(self, node: cst.FunctionDef) -> Optional[bool]: node.name.visit( ExpressionContextVisitor(self.provider, ExpressionContext.STORE) ) node.params.visit(self) node.body.visit(self) for decorator in node.decorators: decorator.visit(self) returns = node.returns if returns: returns.visit(self) return False def visit_Param(self, node: cst.Param) -> Optional[bool]: node.name.visit( ExpressionContextVisitor(self.provider, ExpressionContext.STORE) ) annotation = node.annotation if annotation: annotation.visit(self) default = node.default if default: default.visit(self) return False class ExpressionContextProvider(BatchableMetadataProvider[ExpressionContext]): """ Provides :class:`ExpressionContext` metadata (mimics the `expr_context `__ in ast) for the following node types: :class:`~libcst.Attribute`, :class:`~libcst.Subscript`, :class:`~libcst.StarredElement` , :class:`~libcst.List`, :class:`~libcst.Tuple` and :class:`~libcst.Name`. Note that a :class:`~libcst.Name` may not always have context because of the differences between ast and LibCST. E.g. :attr:`~libcst.Attribute.attr` is a :class:`~libcst.Name` in LibCST but a str in ast. To honor ast implementation, we don't assign context to :attr:`~libcst.Attribute.attr`. Three context types :attr:`ExpressionContext.STORE`, :attr:`ExpressionContext.LOAD` and :attr:`ExpressionContext.DEL` are provided. """ def visit_Module(self, node: cst.Module) -> Optional[bool]: node.visit(ExpressionContextVisitor(self, ExpressionContext.LOAD)) LibCST-1.2.0/libcst/metadata/file_path_provider.py000066400000000000000000000035071456464173300220730ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from pathlib import Path from typing import List, Mapping, Optional import libcst as cst from libcst.metadata.base_provider import BatchableMetadataProvider class FilePathProvider(BatchableMetadataProvider[Path]): """ Provides the path to the current file on disk as metadata for the root :class:`~libcst.Module` node. Requires a :class:`~libcst.metadata.FullRepoManager`. The returned path will always be resolved to an absolute path using :func:`pathlib.Path.resolve`. Example usage: .. code:: python class CustomVisitor(CSTVisitor): METADATA_DEPENDENCIES = [FilePathProvider] path: pathlib.Path def visit_Module(self, node: libcst.Module) -> None: self.path = self.get_metadata(FilePathProvider, node) .. code:: >>> mgr = FullRepoManager(".", {"libcst/_types.py"}, {FilePathProvider}) >>> wrapper = mgr.get_metadata_wrapper_for_path("libcst/_types.py") >>> fqnames = wrapper.resolve(FilePathProvider) >>> {type(k): v for k, v in wrapper.resolve(FilePathProvider).items()} {: PosixPath('/home/user/libcst/_types.py')} """ @classmethod def gen_cache( cls, root_path: Path, paths: List[str], timeout: Optional[int] = None ) -> Mapping[str, Path]: cache = {path: (root_path / path).resolve() for path in paths} return cache def __init__(self, cache: Path) -> None: super().__init__(cache) self.path: Path = cache def visit_Module(self, node: cst.Module) -> Optional[bool]: self.set_metadata(node, self.path) return False LibCST-1.2.0/libcst/metadata/full_repo_manager.py000066400000000000000000000112431456464173300217030ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from pathlib import Path from typing import Collection, Dict, List, Mapping, TYPE_CHECKING import libcst as cst from libcst._types import StrPath from libcst.metadata.wrapper import MetadataWrapper if TYPE_CHECKING: from libcst.metadata.base_provider import ProviderT # noqa: F401 class FullRepoManager: def __init__( self, repo_root_dir: StrPath, paths: Collection[str], providers: Collection["ProviderT"], timeout: int = 5, ) -> None: """ Given project root directory with pyre and watchman setup, :class:`~libcst.metadata.FullRepoManager` handles the inter process communication to read the required full repository cache data for metadata provider like :class:`~libcst.metadata.TypeInferenceProvider`. :param paths: a collection of paths to access full repository data. :param providers: a collection of metadata provider classes require accessing full repository data, currently supports :class:`~libcst.metadata.TypeInferenceProvider` and :class:`~libcst.metadata.FullyQualifiedNameProvider`. :param timeout: number of seconds. Raises `TimeoutExpired `_ when timeout. """ self.root_path: Path = Path(repo_root_dir) self._cache: Dict["ProviderT", Mapping[str, object]] = {} self._timeout = timeout self._providers = providers self._paths: List[str] = list(paths) @property def cache(self) -> Dict["ProviderT", Mapping[str, object]]: """ The full repository cache data for all metadata providers passed in the ``providers`` parameter when constructing :class:`~libcst.metadata.FullRepoManager`. Each provider is mapped to a mapping of path to cache. """ # Make sure that the cache is available to us. If resolve_cache() was called manually then this is a noop. self.resolve_cache() return self._cache def resolve_cache(self) -> None: """ Resolve cache for all providers that require it. Normally this is called by :meth:`~FullRepoManager.get_cache_for_path` so you do not need to call it manually. However, if you intend to do a single cache resolution pass before forking, it is a good idea to call this explicitly to control when cache resolution happens. """ if not self._cache: cache: Dict["ProviderT", Mapping[str, object]] = {} for provider in self._providers: handler = provider.gen_cache if handler: cache[provider] = handler( self.root_path, self._paths, self._timeout ) self._cache = cache def get_cache_for_path(self, path: str) -> Mapping["ProviderT", object]: """ Retrieve cache for a source file. The file needs to appear in the ``paths`` parameter when constructing :class:`~libcst.metadata.FullRepoManager`. .. code-block:: python manager = FullRepoManager(".", {"a.py", "b.py"}, {TypeInferenceProvider}) MetadataWrapper(module, cache=manager.get_cache_for_path("a.py")) """ if path not in self._paths: raise Exception( "The path needs to be in paths parameter when constructing FullRepoManager for efficient batch processing." ) # Make sure that the cache is available to us. If the user called # resolve_cache() manually then this is a noop. self.resolve_cache() return { provider: data for provider, files in self._cache.items() for _path, data in files.items() if _path == path } def get_metadata_wrapper_for_path(self, path: str) -> MetadataWrapper: """ Create a :class:`~libcst.metadata.MetadataWrapper` given a source file path. The path needs to be a path relative to project root directory. The source code is read and parsed as :class:`~libcst.Module` for :class:`~libcst.metadata.MetadataWrapper`. .. code-block:: python manager = FullRepoManager(".", {"a.py", "b.py"}, {TypeInferenceProvider}) wrapper = manager.get_metadata_wrapper_for_path("a.py") """ module = cst.parse_module((self.root_path / path).read_text()) cache = self.get_cache_for_path(path) return MetadataWrapper(module, True, cache) LibCST-1.2.0/libcst/metadata/name_provider.py000066400000000000000000000163111456464173300210550ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import dataclasses from pathlib import Path from typing import Collection, List, Mapping, Optional, Union import libcst as cst from libcst._metadata_dependent import LazyValue, MetadataDependent from libcst.helpers.module import calculate_module_and_package, ModuleNameAndPackage from libcst.metadata.base_provider import BatchableMetadataProvider from libcst.metadata.scope_provider import ( QualifiedName, QualifiedNameSource, ScopeProvider, ) class QualifiedNameProvider(BatchableMetadataProvider[Collection[QualifiedName]]): """ Compute possible qualified names of a variable CSTNode (extends `PEP-3155 `_). It uses the :func:`~libcst.metadata.Scope.get_qualified_names_for` underlying to get qualified names. Multiple qualified names may be returned, such as when we have conditional imports or an import shadows another. E.g., the provider finds ``a.b``, ``d.e`` and ``f.g`` as possible qualified names of ``c``:: >>> wrapper = MetadataWrapper( >>> cst.parse_module(dedent( >>> ''' >>> if something: >>> from a import b as c >>> elif otherthing: >>> from d import e as c >>> else: >>> from f import g as c >>> c() >>> ''' >>> )) >>> ) >>> call = wrapper.module.body[1].body[0].value >>> wrapper.resolve(QualifiedNameProvider)[call], { QualifiedName(name="a.b", source=QualifiedNameSource.IMPORT), QualifiedName(name="d.e", source=QualifiedNameSource.IMPORT), QualifiedName(name="f.g", source=QualifiedNameSource.IMPORT), } For qualified name of a variable in a function or a comprehension, please refer :func:`~libcst.metadata.Scope.get_qualified_names_for` for more detail. """ METADATA_DEPENDENCIES = (ScopeProvider,) def visit_Module(self, node: cst.Module) -> Optional[bool]: visitor = QualifiedNameVisitor(self) node.visit(visitor) @staticmethod def has_name( visitor: MetadataDependent, node: cst.CSTNode, name: Union[str, QualifiedName] ) -> bool: """Check if any of qualified name has the str name or :class:`~libcst.metadata.QualifiedName` name.""" qualified_names = visitor.get_metadata(QualifiedNameProvider, node, set()) if isinstance(name, str): return any(qn.name == name for qn in qualified_names) else: return any(qn == name for qn in qualified_names) class QualifiedNameVisitor(cst.CSTVisitor): def __init__(self, provider: "QualifiedNameProvider") -> None: self.provider: QualifiedNameProvider = provider def on_visit(self, node: cst.CSTNode) -> bool: scope = self.provider.get_metadata(ScopeProvider, node, None) if scope: self.provider.set_metadata( node, LazyValue(lambda: scope.get_qualified_names_for(node)) ) else: self.provider.set_metadata(node, set()) super().on_visit(node) return True class FullyQualifiedNameProvider(BatchableMetadataProvider[Collection[QualifiedName]]): """ Provide fully qualified names for CST nodes. Like :class:`QualifiedNameProvider`, but the provided :class:`QualifiedName` instances have absolute identifier names instead of local to the current module. This provider is initialized with the current module's fully qualified name, and can be used with :class:`~libcst.metadata.FullRepoManager`. The module's fully qualified name itself is stored as a metadata of the :class:`~libcst.Module` node. Compared to :class:`QualifiedNameProvider`, it also resolves relative imports. Example usage:: >>> mgr = FullRepoManager(".", {"dir/a.py"}, {FullyQualifiedNameProvider}) >>> wrapper = mgr.get_metadata_wrapper_for_path("dir/a.py") >>> fqnames = wrapper.resolve(FullyQualifiedNameProvider) >>> {type(k): v for (k, v) in fqnames.items()} {: {QualifiedName(name='dir.a', source=)}} """ METADATA_DEPENDENCIES = (QualifiedNameProvider,) @classmethod def gen_cache( cls, root_path: Path, paths: List[str], timeout: Optional[int] = None ) -> Mapping[str, ModuleNameAndPackage]: cache = {path: calculate_module_and_package(root_path, path) for path in paths} return cache def __init__(self, cache: ModuleNameAndPackage) -> None: super().__init__(cache) self.module_name: str = cache.name self.package_name: str = cache.package def visit_Module(self, node: cst.Module) -> bool: visitor = FullyQualifiedNameVisitor(self, self.module_name, self.package_name) node.visit(visitor) self.set_metadata( node, {QualifiedName(name=self.module_name, source=QualifiedNameSource.LOCAL)}, ) return True class FullyQualifiedNameVisitor(cst.CSTVisitor): @staticmethod def _fully_qualify_local(module_name: str, package_name: str, name: str) -> str: abs_name = name.lstrip(".") num_dots = len(name) - len(abs_name) # handle relative import if num_dots > 0: name = abs_name # see importlib._bootstrap._resolve_name # https://github.com/python/cpython/blob/3.10/Lib/importlib/_bootstrap.py#L902 bits = package_name.rsplit(".", num_dots - 1) if len(bits) < num_dots: raise ImportError("attempted relative import beyond top-level package") module_name = bits[0] return f"{module_name}.{name}" @staticmethod def _fully_qualify( module_name: str, package_name: str, qname: QualifiedName ) -> QualifiedName: if qname.source == QualifiedNameSource.BUILTIN: # builtins are already fully qualified return qname name = qname.name if qname.source == QualifiedNameSource.IMPORT and not name.startswith("."): # non-relative imports are already fully qualified return qname new_name = FullyQualifiedNameVisitor._fully_qualify_local( module_name, package_name, qname.name ) return dataclasses.replace(qname, name=new_name) def __init__( self, provider: FullyQualifiedNameProvider, module_name: str, package_name: str ) -> None: self.module_name = module_name self.package_name = package_name self.provider = provider def on_visit(self, node: cst.CSTNode) -> bool: qnames = self.provider.get_metadata(QualifiedNameProvider, node) if qnames is not None: self.provider.set_metadata( node, { FullyQualifiedNameVisitor._fully_qualify( self.module_name, self.package_name, qname ) for qname in qnames }, ) return True LibCST-1.2.0/libcst/metadata/parent_node_provider.py000066400000000000000000000015451456464173300224360ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Optional import libcst as cst from libcst.metadata.base_provider import BatchableMetadataProvider class ParentNodeVisitor(cst.CSTVisitor): def __init__(self, provider: "ParentNodeProvider") -> None: self.provider: ParentNodeProvider = provider super().__init__() def on_leave(self, original_node: cst.CSTNode) -> None: for child in original_node.children: self.provider.set_metadata(child, original_node) super().on_leave(original_node) class ParentNodeProvider(BatchableMetadataProvider[cst.CSTNode]): def visit_Module(self, node: cst.Module) -> Optional[bool]: node.visit(ParentNodeVisitor(self)) LibCST-1.2.0/libcst/metadata/position_provider.py000066400000000000000000000112311456464173300217750ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import re from contextlib import contextmanager from dataclasses import dataclass, field from typing import Iterator, List, Optional, Pattern from libcst._add_slots import add_slots from libcst._nodes.base import CSTNode from libcst._nodes.internal import CodegenState from libcst._nodes.module import Module from libcst._position import CodePosition, CodeRange from libcst.metadata.base_provider import BaseMetadataProvider NEWLINE_RE: Pattern[str] = re.compile(r"\r\n?|\n") @add_slots @dataclass(frozen=False) class WhitespaceInclusivePositionProvidingCodegenState(CodegenState): # These are derived from a Module default_indent: str default_newline: str provider: BaseMetadataProvider[CodeRange] indent_tokens: List[str] = field(default_factory=list) tokens: List[str] = field(default_factory=list) line: int = 1 # one-indexed column: int = 0 # zero-indexed _stack: List[CodePosition] = field(init=False, default_factory=list) def add_indent_tokens(self) -> None: self.tokens.extend(self.indent_tokens) for token in self.indent_tokens: self._update_position(token) def add_token(self, value: str) -> None: self.tokens.append(value) self._update_position(value) def _update_position(self, value: str) -> None: """ Computes new line and column numbers from adding the token [value]. """ segments = NEWLINE_RE.split(value) if len(segments) == 1: # contains no newlines # no change to self.lines self.column += len(value) else: self.line += len(segments) - 1 # newline resets column back to 0, but a trailing token may shift column self.column = len(segments[-1]) def before_codegen(self, node: "CSTNode") -> None: self._stack.append(CodePosition(self.line, self.column)) def after_codegen(self, node: "CSTNode") -> None: # we must unconditionally pop the stack, else we could end up in a broken state start_pos = self._stack.pop() # Don't overwrite existing position information # (i.e. semantic position has already been recorded) if node not in self.provider._computed: end_pos = CodePosition(self.line, self.column) node_range = CodeRange(start_pos, end_pos) self.provider._computed[node] = node_range class WhitespaceInclusivePositionProvider(BaseMetadataProvider[CodeRange]): """ Generates line and column metadata. The start and ending bounds of the positions produced by this provider include all whitespace owned by the node. """ def _gen_impl(self, module: Module) -> None: state = WhitespaceInclusivePositionProvidingCodegenState( default_indent=module.default_indent, default_newline=module.default_newline, provider=self, ) module._codegen(state) @add_slots @dataclass(frozen=False) class PositionProvidingCodegenState(WhitespaceInclusivePositionProvidingCodegenState): @contextmanager def record_syntactic_position( self, node: CSTNode, *, start_node: Optional[CSTNode] = None, end_node: Optional[CSTNode] = None, ) -> Iterator[None]: start = CodePosition(self.line, self.column) try: yield finally: end = CodePosition(self.line, self.column) # Override with positions hoisted from child nodes if provided start = ( self.provider._computed[start_node].start if start_node is not None else start ) end = self.provider._computed[end_node].end if end_node is not None else end self.provider._computed[node] = CodeRange(start, end) class PositionProvider(BaseMetadataProvider[CodeRange]): """ Generates line and column metadata. These positions are defined by the start and ending bounds of a node ignoring most instances of leading and trailing whitespace when it is not syntactically significant. The positions provided by this provider should eventually match the positions used by `Pyre `__ for equivalent nodes. """ def _gen_impl(self, module: Module) -> None: state = PositionProvidingCodegenState( default_indent=module.default_indent, default_newline=module.default_newline, provider=self, ) module._codegen(state) LibCST-1.2.0/libcst/metadata/reentrant_codegen.py000066400000000000000000000203071456464173300217110ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from dataclasses import dataclass, field from typing import List, Optional, Sequence from libcst import BaseStatement, CSTNode, Module from libcst._add_slots import add_slots from libcst._nodes.internal import CodegenState from libcst.metadata import BaseMetadataProvider class CodegenPartial: """ Provided by :class:`ExperimentalReentrantCodegenProvider`. Stores enough information to generate either a small patch (:meth:`get_modified_code_range`) or a new file (:meth:`get_modified_code`) by replacing the old node at this position. """ __slots__ = [ "start_offset", "end_offset", "has_trailing_newline", "_indent_tokens", "_prev_codegen_state", ] def __init__(self, state: "_ReentrantCodegenState") -> None: # store a frozen copy of these values, since they change over time self.start_offset: int = state.start_offset_stack[-1] self.end_offset: int = state.char_offset self.has_trailing_newline: bool = True # this may get updated to False later self._indent_tokens: Sequence[str] = tuple(state.indent_tokens) # everything else can be accessed from the codegen state object self._prev_codegen_state: _ReentrantCodegenState = state def get_original_module_code(self) -> str: """ Equivalent to :meth:`libcst.Module.bytes` on the top-level module that contains this statement, except that it uses the cached result from our previous code generation pass, so it's faster. """ return self._prev_codegen_state.get_code() def get_original_module_bytes(self) -> bytes: """ Equivalent to :meth:`libcst.Module.bytes` on the top-level module that contains this statement, except that it uses the cached result from our previous code generation pass, so it's faster. """ return self.get_original_module_code().encode(self._prev_codegen_state.encoding) def get_original_statement_code(self) -> str: """ Equivalent to :meth:`libcst.Module.code_for_node` on the current statement, except that it uses the cached result from our previous code generation pass, so it's faster. """ return self._prev_codegen_state.get_code()[self.start_offset : self.end_offset] def get_modified_statement_code(self, node: BaseStatement) -> str: """ Gets the new code for ``node`` as if it were in same location as the old statement being replaced. This means that it inherits details like the old statement's indentation. """ new_codegen_state = CodegenState( default_indent=self._prev_codegen_state.default_indent, default_newline=self._prev_codegen_state.default_newline, indent_tokens=list(self._indent_tokens), ) node._codegen(new_codegen_state) if not self.has_trailing_newline: new_codegen_state.pop_trailing_newline() return "".join(new_codegen_state.tokens) def get_modified_module_code(self, node: BaseStatement) -> str: """ Gets the new code for the module at the root of this statement's tree, but with the supplied replacement ``node`` in its place. """ original = self.get_original_module_code() patch = self.get_modified_statement_code(node) return f"{original[:self.start_offset]}{patch}{original[self.end_offset:]}" def get_modified_module_bytes(self, node: BaseStatement) -> bytes: """ Gets the new bytes for the module at the root of this statement's tree, but with the supplied replacement ``node`` in its place. """ return self.get_modified_module_code(node).encode( self._prev_codegen_state.encoding ) @add_slots @dataclass(frozen=False) class _ReentrantCodegenState(CodegenState): provider: BaseMetadataProvider[CodegenPartial] encoding: str = "utf-8" indent_size: int = 0 char_offset: int = 0 start_offset_stack: List[int] = field(default_factory=list) cached_code: Optional[str] = None trailing_partials: List[CodegenPartial] = field(default_factory=list) def increase_indent(self, value: str) -> None: super(_ReentrantCodegenState, self).increase_indent(value) self.indent_size += len(value) def decrease_indent(self) -> None: self.indent_size -= len(self.indent_tokens[-1]) super(_ReentrantCodegenState, self).decrease_indent() def add_indent_tokens(self) -> None: super(_ReentrantCodegenState, self).add_indent_tokens() self.char_offset += self.indent_size def add_token(self, value: str) -> None: super(_ReentrantCodegenState, self).add_token(value) self.char_offset += len(value) self.trailing_partials.clear() def before_codegen(self, node: CSTNode) -> None: if not isinstance(node, BaseStatement): return self.start_offset_stack.append(self.char_offset) def after_codegen(self, node: CSTNode) -> None: if not isinstance(node, BaseStatement): return partial = CodegenPartial(self) self.provider.set_metadata(node, partial) self.start_offset_stack.pop() self.trailing_partials.append(partial) def pop_trailing_newline(self) -> None: """ :class:`libcst.Module` contains a hack where it removes the last token (a newline) if the original file didn't have a newline. If this happens, we need to go back through every node at the end of the file, and fix their `end_offset`. """ for tp in self.trailing_partials: tp.end_offset -= len(self.tokens[-1]) tp.has_trailing_newline = False super(_ReentrantCodegenState, self).pop_trailing_newline() def get_code(self) -> str: # Ideally this would use functools.cached_property, but that's only in # Python 3.8+. # # This is a little ugly to make pyre's attribute refinement checks happy. cached_code = self.cached_code if cached_code is not None: return cached_code cached_code = "".join(self.tokens) self.cached_code = cached_code return cached_code class ExperimentalReentrantCodegenProvider(BaseMetadataProvider[CodegenPartial]): """ An experimental API that allows fast generation of modified code by recording an initial code-generation pass, and incrementally applying updates. It is a performance optimization for a few niche use-cases and is not user-friendly. **This API may change at any time without warning (including in minor releases).** This is rarely useful. Instead you should make multiple modifications to a single syntax tree, and generate the code once. However, we can think of a few use-cases for this API (hence, why it exists): - When linting a file, you might generate multiple independent patches that a user can accept or reject. Depending on your architecture, it may be advantageous to avoid regenerating the file when computing each patch. - You might want to call out to an external utility (e.g. a typechecker, such as pyre or mypy) to validate a small change. You may need to generate and test lots of these patches. Restrictions: - For safety and sanity reasons, the smallest/only level of granularity is a statement. If you need to patch part of a statement, you regenerate the entire statement. If you need to regenerate an entire module, just call :meth:`libcst.Module.code`. - This does not (currently) operate recursively. You can patch an unpatched piece of code multiple times, but you can't layer additional patches on an already patched piece of code. """ def _gen_impl(self, module: Module) -> None: state = _ReentrantCodegenState( default_indent=module.default_indent, default_newline=module.default_newline, provider=self, encoding=module.encoding, ) module._codegen(state) LibCST-1.2.0/libcst/metadata/scope_provider.py000066400000000000000000001355321456464173300212550ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import abc import builtins from collections import defaultdict from contextlib import contextmanager, ExitStack from dataclasses import dataclass from enum import auto, Enum from typing import ( Collection, Dict, Iterator, List, Mapping, MutableMapping, Optional, Set, Tuple, Type, Union, ) import libcst as cst from libcst import ensure_type from libcst._add_slots import add_slots from libcst.helpers import get_full_name_for_node from libcst.metadata.base_provider import BatchableMetadataProvider from libcst.metadata.expression_context_provider import ( ExpressionContext, ExpressionContextProvider, ) # Comprehensions are handled separately in _visit_comp_alike due to # the complexity of the semantics _ASSIGNMENT_LIKE_NODES = ( cst.AnnAssign, cst.AsName, cst.Assign, cst.AugAssign, cst.ClassDef, cst.CompFor, cst.FunctionDef, cst.Global, cst.Import, cst.ImportFrom, cst.NamedExpr, cst.Nonlocal, cst.Parameters, cst.WithItem, cst.TypeVar, cst.TypeAlias, cst.TypeVarTuple, cst.ParamSpec, ) @add_slots @dataclass(frozen=False) class Access: """ An Access records an access of an assignment. .. note:: This scope analysis only analyzes access via a :class:`~libcst.Name` or a :class:`~libcst.Name` node embedded in other node like :class:`~libcst.Call` or :class:`~libcst.Attribute`. It doesn't support type annontation using :class:`~libcst.SimpleString` literal for forward references. E.g. in this example, the ``"Tree"`` isn't parsed as an access:: class Tree: def __new__(cls) -> "Tree": ... """ #: The node of the access. A name is an access when the expression context is #: :attr:`ExpressionContext.LOAD`. This is usually the name node representing the #: access, except for: 1) dotted imports, when it might be the attribute that #: represents the most specific part of the imported symbol; and 2) string #: annotations, when it is the entire string literal node: Union[cst.Name, cst.Attribute, cst.BaseString] #: The scope of the access. Note that a access could be in a child scope of its #: assignment. scope: "Scope" is_annotation: bool is_type_hint: bool __assignments: Set["BaseAssignment"] __index: int def __init__( self, node: cst.Name, scope: "Scope", is_annotation: bool, is_type_hint: bool ) -> None: self.node = node self.scope = scope self.is_annotation = is_annotation self.is_type_hint = is_type_hint self.__assignments = set() self.__index = scope._assignment_count def __hash__(self) -> int: return id(self) @property def referents(self) -> Collection["BaseAssignment"]: """Return all assignments of the access.""" return self.__assignments @property def _index(self) -> int: return self.__index def record_assignment(self, assignment: "BaseAssignment") -> None: if assignment.scope != self.scope or assignment._index < self.__index: self.__assignments.add(assignment) def record_assignments(self, name: str) -> None: assignments = self.scope._resolve_scope_for_access(name, self.scope) # filter out assignments that happened later than this access previous_assignments = { assignment for assignment in assignments if assignment.scope != self.scope or assignment._index < self.__index } if not previous_assignments and assignments and self.scope.parent != self.scope: previous_assignments = self.scope.parent._resolve_scope_for_access( name, self.scope ) self.__assignments |= previous_assignments class QualifiedNameSource(Enum): IMPORT = auto() BUILTIN = auto() LOCAL = auto() @add_slots @dataclass(frozen=True) class QualifiedName: #: Qualified name, e.g. ``a.b.c`` or ``fn..var``. name: str #: Source of the name, either :attr:`QualifiedNameSource.IMPORT`, :attr:`QualifiedNameSource.BUILTIN` #: or :attr:`QualifiedNameSource.LOCAL`. source: QualifiedNameSource class BaseAssignment(abc.ABC): """Abstract base class of :class:`Assignment` and :class:`BuitinAssignment`.""" #: The name of assignment. name: str #: The scope associates to assignment. scope: "Scope" __accesses: Set[Access] def __init__(self, name: str, scope: "Scope") -> None: self.name = name self.scope = scope self.__accesses = set() def record_access(self, access: Access) -> None: if access.scope != self.scope or self._index < access._index: self.__accesses.add(access) def record_accesses(self, accesses: Set[Access]) -> None: later_accesses = { access for access in accesses if access.scope != self.scope or self._index < access._index } self.__accesses |= later_accesses earlier_accesses = accesses - later_accesses if earlier_accesses and self.scope.parent != self.scope: # Accesses "earlier" than the relevant assignment should be attached # to assignments of the same name in the parent for shadowed_assignment in self.scope.parent[self.name]: shadowed_assignment.record_accesses(earlier_accesses) @property def references(self) -> Collection[Access]: """Return all accesses of the assignment.""" # we don't want to publicly expose the mutable version of this return self.__accesses def __hash__(self) -> int: return id(self) @property def _index(self) -> int: """Return an integer that represents the order of assignments in `scope`""" return -1 @abc.abstractmethod def get_qualified_names_for(self, full_name: str) -> Set[QualifiedName]: ... class Assignment(BaseAssignment): """An assignment records the name, CSTNode and its accesses.""" #: The node of assignment, it could be a :class:`~libcst.Import`, :class:`~libcst.ImportFrom`, #: :class:`~libcst.Name`, :class:`~libcst.FunctionDef`, or :class:`~libcst.ClassDef`. node: cst.CSTNode __index: int def __init__( self, name: str, scope: "Scope", node: cst.CSTNode, index: int ) -> None: self.node = node self.__index = index super().__init__(name, scope) @property def _index(self) -> int: return self.__index def get_qualified_names_for(self, full_name: str) -> Set[QualifiedName]: return { QualifiedName( f"{self.scope._name_prefix}.{full_name}" if self.scope._name_prefix else full_name, QualifiedNameSource.LOCAL, ) } # even though we don't override the constructor. class BuiltinAssignment(BaseAssignment): """ A BuiltinAssignment represents an value provide by Python as a builtin, including `functions `_, `constants `_, and `types `_. """ def get_qualified_names_for(self, full_name: str) -> Set[QualifiedName]: return {QualifiedName(f"builtins.{self.name}", QualifiedNameSource.BUILTIN)} class ImportAssignment(Assignment): """An assignment records the import node and it's alias""" as_name: cst.CSTNode def __init__( self, name: str, scope: "Scope", node: cst.CSTNode, index: int, as_name: cst.CSTNode, ) -> None: super().__init__(name, scope, node, index) self.as_name = as_name def get_module_name_for_import(self) -> str: module = "" if isinstance(self.node, cst.ImportFrom): module_attr = self.node.module relative = self.node.relative if module_attr: module = get_full_name_for_node(module_attr) or "" if relative: module = "." * len(relative) + module return module def get_qualified_names_for(self, full_name: str) -> Set[QualifiedName]: module = self.get_module_name_for_import() results = set() assert isinstance(self.node, (cst.ImportFrom, cst.Import)) import_names = self.node.names if not isinstance(import_names, cst.ImportStar): for name in import_names: real_name = get_full_name_for_node(name.name) if not real_name: continue # real_name can contain `.` for dotted imports # for these we want to find the longest prefix that matches full_name parts = real_name.split(".") real_names = [".".join(parts[:i]) for i in range(len(parts), 0, -1)] for real_name in real_names: as_name = real_name if module and module.endswith("."): # from . import a # real_name should be ".a" real_name = f"{module}{real_name}" elif module: real_name = f"{module}.{real_name}" if name and name.asname: eval_alias = name.evaluated_alias if eval_alias is not None: as_name = eval_alias if full_name.startswith(as_name): remaining_name = full_name.split(as_name, 1)[1] if remaining_name and not remaining_name.startswith("."): continue remaining_name = remaining_name.lstrip(".") results.add( QualifiedName( f"{real_name}.{remaining_name}" if remaining_name else real_name, QualifiedNameSource.IMPORT, ) ) break return results class Assignments: """A container to provide all assignments in a scope.""" def __init__(self, assignments: Mapping[str, Collection[BaseAssignment]]) -> None: self._assignments = assignments def __iter__(self) -> Iterator[BaseAssignment]: """Iterate through all assignments by ``for i in scope.assignments``.""" for assignments in self._assignments.values(): for assignment in assignments: yield assignment def __getitem__(self, node: Union[str, cst.CSTNode]) -> Collection[BaseAssignment]: """Get assignments given a name str or :class:`~libcst.CSTNode` by ``scope.assignments[node]``""" name = get_full_name_for_node(node) return set(self._assignments[name]) if name in self._assignments else set() def __contains__(self, node: Union[str, cst.CSTNode]) -> bool: """Check if a name str or :class:`~libcst.CSTNode` has any assignment by ``node in scope.assignments``""" return len(self[node]) > 0 class Accesses: """A container to provide all accesses in a scope.""" def __init__(self, accesses: Mapping[str, Collection[Access]]) -> None: self._accesses = accesses def __iter__(self) -> Iterator[Access]: """Iterate through all accesses by ``for i in scope.accesses``.""" for accesses in self._accesses.values(): for access in accesses: yield access def __getitem__(self, node: Union[str, cst.CSTNode]) -> Collection[Access]: """Get accesses given a name str or :class:`~libcst.CSTNode` by ``scope.accesses[node]``""" name = get_full_name_for_node(node) return self._accesses[name] if name in self._accesses else set() def __contains__(self, node: Union[str, cst.CSTNode]) -> bool: """Check if a name str or :class:`~libcst.CSTNode` has any access by ``node in scope.accesses``""" return len(self[node]) > 0 class Scope(abc.ABC): """ Base class of all scope classes. Scope object stores assignments from imports, variable assignments, function definition or class definition. A scope has a parent scope which represents the inheritance relationship. That means an assignment in parent scope is viewable to the child scope and the child scope may overwrites the assignment by using the same name. Use ``name in scope`` to check whether a name is viewable in the scope. Use ``scope[name]`` to retrieve all viewable assignments in the scope. .. note:: This scope analysis module only analyzes local variable names and it doesn't handle attribute names; for example, given ``a.b.c = 1``, local variable name ``a`` is recorded as an assignment instead of ``c`` or ``a.b.c``. To analyze the assignment/access of arbitrary object attributes, we leave the job to type inference metadata provider coming in the future. """ #: Parent scope. Note the parent scope of a GlobalScope is itself. parent: "Scope" #: Refers to the GlobalScope. globals: "GlobalScope" _assignments: MutableMapping[str, Set[BaseAssignment]] _assignment_count: int _accesses_by_name: MutableMapping[str, Set[Access]] _accesses_by_node: MutableMapping[cst.CSTNode, Set[Access]] _name_prefix: str def __init__(self, parent: "Scope") -> None: super().__init__() self.parent = parent self.globals = parent.globals self._assignments = defaultdict(set) self._assignment_count = 0 self._accesses_by_name = defaultdict(set) self._accesses_by_node = defaultdict(set) self._name_prefix = "" def record_assignment(self, name: str, node: cst.CSTNode) -> None: target = self._find_assignment_target(name) target._assignments[name].add( Assignment( name=name, scope=target, node=node, index=target._assignment_count ) ) def record_import_assignment( self, name: str, node: cst.CSTNode, as_name: cst.CSTNode ) -> None: target = self._find_assignment_target(name) target._assignments[name].add( ImportAssignment( name=name, scope=target, node=node, as_name=as_name, index=target._assignment_count, ) ) def _find_assignment_target(self, name: str) -> "Scope": return self def record_access(self, name: str, access: Access) -> None: self._accesses_by_name[name].add(access) self._accesses_by_node[access.node].add(access) def _is_visible_from_children(self, from_scope: "Scope") -> bool: """Returns if the assignments in this scope can be accessed from children. This is normally True, except for class scopes:: def outer_fn(): v = ... # outer_fn's declaration class InnerCls: v = ... # shadows outer_fn's declaration class InnerInnerCls: v = ... # shadows all previous declarations of v def inner_fn(): nonlocal v v = ... # this refers to outer_fn's declaration # and not to any of the inner classes' as those are # hidden from their children. """ return True def _next_visible_parent( self, from_scope: "Scope", first: Optional["Scope"] = None ) -> "Scope": parent = first if first is not None else self.parent while not parent._is_visible_from_children(from_scope): parent = parent.parent return parent @abc.abstractmethod def __contains__(self, name: str) -> bool: """Check if the name str exist in current scope by ``name in scope``.""" ... def __getitem__(self, name: str) -> Set[BaseAssignment]: """ Get assignments given a name str by ``scope[name]``. .. note:: *Why does it return a list of assignments given a name instead of just one assignment?* Many programming languages differentiate variable declaration and assignment. Further, those programming languages often disallow duplicate declarations within the same scope, and will often hoist the declaration (without its assignment) to the top of the scope. These design decisions make static analysis much easier, because it's possible to match a name against its single declaration for a given scope. As an example, the following code would be valid in JavaScript:: function fn() { console.log(value); // value is defined here, because the declaration is hoisted, but is currently 'undefined'. var value = 5; // A function-scoped declaration. } fn(); // prints 'undefined'. In contrast, Python's declaration and assignment are identical and are not hoisted:: if conditional_value: value = 5 elif other_conditional_value: value = 10 print(value) # possibly valid, depending on conditional execution This code may throw a ``NameError`` if both conditional values are falsy. It also means that depending on the codepath taken, the original declaration could come from either ``value = ...`` assignment node. As a result, instead of returning a single declaration, we're forced to return a collection of all of the assignments we think could have defined a given name by the time a piece of code is executed. For the above example, value would resolve to a set of both assignments. """ return self._resolve_scope_for_access(name, self) @abc.abstractmethod def _resolve_scope_for_access( self, name: str, from_scope: "Scope" ) -> Set[BaseAssignment]: ... def __hash__(self) -> int: return id(self) @abc.abstractmethod def record_global_overwrite(self, name: str) -> None: ... @abc.abstractmethod def record_nonlocal_overwrite(self, name: str) -> None: ... def get_qualified_names_for( self, node: Union[str, cst.CSTNode] ) -> Collection[QualifiedName]: """Get all :class:`~libcst.metadata.QualifiedName` in current scope given a :class:`~libcst.CSTNode`. The source of a qualified name can be either :attr:`QualifiedNameSource.IMPORT`, :attr:`QualifiedNameSource.BUILTIN` or :attr:`QualifiedNameSource.LOCAL`. Given the following example, ``c`` has qualified name ``a.b.c`` with source ``IMPORT``, ``f`` has qualified name ``Cls.f`` with source ``LOCAL``, ``a`` has qualified name ``Cls.f..a``, ``i`` has qualified name ``Cls.f...i``, and the builtin ``int`` has qualified name ``builtins.int`` with source ``BUILTIN``:: from a.b import c class Cls: def f(self) -> "c": c() a = int("1") [i for i in c()] We extends `PEP-3155 `_ (defines ``__qualname__`` for class and function only; function namespace is followed by a ````) to provide qualified name for all :class:`~libcst.CSTNode` recorded by :class:`~libcst.metadata.Assignment` and :class:`~libcst.metadata.Access`. The namespace of a comprehension (:class:`~libcst.ListComp`, :class:`~libcst.SetComp`, :class:`~libcst.DictComp`) is represented with ````. An imported name may be used for type annotation with :class:`~libcst.SimpleString` and currently resolving the qualified given :class:`~libcst.SimpleString` is not supported considering it could be a complex type annotation in the string which is hard to resolve, e.g. ``List[Union[int, str]]``. """ # if this node is an access we know the assignment and we can use that name node_accesses = ( self._accesses_by_node.get(node) if isinstance(node, cst.CSTNode) else None ) if node_accesses: return { qname for access in node_accesses for referent in access.referents for qname in referent.get_qualified_names_for(referent.name) } full_name = get_full_name_for_node(node) if full_name is None: return set() assignments = set() prefix = full_name while prefix: if prefix in self: assignments = self[prefix] break idx = prefix.rfind(".") prefix = None if idx == -1 else prefix[:idx] if not isinstance(node, str): for assignment in assignments: if isinstance(assignment, Assignment) and _is_assignment( node, assignment.node ): return assignment.get_qualified_names_for(full_name) results = set() for assignment in assignments: results |= assignment.get_qualified_names_for(full_name) return results @property def assignments(self) -> Assignments: """Return an :class:`~libcst.metadata.Assignments` contains all assignmens in current scope.""" return Assignments(self._assignments) @property def accesses(self) -> Accesses: """Return an :class:`~libcst.metadata.Accesses` contains all accesses in current scope.""" return Accesses(self._accesses_by_name) class BuiltinScope(Scope): """ A BuiltinScope represents python builtin declarations. See https://docs.python.org/3/library/builtins.html """ def __init__(self, globals: Scope) -> None: self.globals: Scope = globals # must be defined before Scope.__init__ is called super().__init__(parent=self) def __contains__(self, name: str) -> bool: return hasattr(builtins, name) def _resolve_scope_for_access( self, name: str, from_scope: "Scope" ) -> Set[BaseAssignment]: if name in self._assignments: return self._assignments[name] if hasattr(builtins, name): # note - we only see the builtin assignments during the deferred # access resolution. unfortunately that means we have to create the # assignment here, which can cause the set to mutate during iteration self._assignments[name].add(BuiltinAssignment(name, self)) return self._assignments[name] return set() def record_global_overwrite(self, name: str) -> None: raise NotImplementedError("global overwrite in builtin scope are not allowed") def record_nonlocal_overwrite(self, name: str) -> None: raise NotImplementedError("declarations in builtin scope are not allowed") def _find_assignment_target(self, name: str) -> "Scope": raise NotImplementedError("assignments in builtin scope are not allowed") class GlobalScope(Scope): """ A GlobalScope is the scope of module. All module level assignments are recorded in GlobalScope. """ def __init__(self) -> None: super().__init__(parent=BuiltinScope(self)) def __contains__(self, name: str) -> bool: if name in self._assignments: return len(self._assignments[name]) > 0 return name in self._next_visible_parent(self) def _resolve_scope_for_access( self, name: str, from_scope: "Scope" ) -> Set[BaseAssignment]: if name in self._assignments: return self._assignments[name] parent = self._next_visible_parent(from_scope) return parent[name] def record_global_overwrite(self, name: str) -> None: pass def record_nonlocal_overwrite(self, name: str) -> None: raise NotImplementedError("nonlocal declaration not allowed at module level") class LocalScope(Scope, abc.ABC): _scope_overwrites: Dict[str, Scope] #: Name of function. Used as qualified name. name: Optional[str] #: The :class:`~libcst.CSTNode` node defines the current scope. node: cst.CSTNode def __init__( self, parent: Scope, node: cst.CSTNode, name: Optional[str] = None ) -> None: super().__init__(parent) self.name = name self.node = node self._scope_overwrites = {} # pyre-fixme[4]: Attribute `_name_prefix` of class `LocalScope` has type `str` but no type is specified. self._name_prefix = self._make_name_prefix() def record_global_overwrite(self, name: str) -> None: self._scope_overwrites[name] = self.globals def record_nonlocal_overwrite(self, name: str) -> None: self._scope_overwrites[name] = self.parent def _find_assignment_target(self, name: str) -> "Scope": if name in self._scope_overwrites: scope = self._scope_overwrites[name] return self._next_visible_parent(self, scope)._find_assignment_target(name) else: return super()._find_assignment_target(name) def __contains__(self, name: str) -> bool: if name in self._scope_overwrites: return name in self._scope_overwrites[name] if name in self._assignments: return len(self._assignments[name]) > 0 return name in self._next_visible_parent(self) def _resolve_scope_for_access( self, name: str, from_scope: "Scope" ) -> Set[BaseAssignment]: if name in self._scope_overwrites: scope = self._scope_overwrites[name] return self._next_visible_parent( from_scope, scope )._resolve_scope_for_access(name, from_scope) if name in self._assignments: return self._assignments[name] else: return self._next_visible_parent(from_scope)._resolve_scope_for_access( name, from_scope ) def _make_name_prefix(self) -> str: # filter falsey strings out return ".".join(filter(None, [self.parent._name_prefix, self.name, ""])) # even though we don't override the constructor. class FunctionScope(LocalScope): """ When a function is defined, it creates a FunctionScope. """ pass # even though we don't override the constructor. class ClassScope(LocalScope): """ When a class is defined, it creates a ClassScope. """ def _is_visible_from_children(self, from_scope: "Scope") -> bool: return from_scope.parent is self and isinstance(from_scope, AnnotationScope) def _make_name_prefix(self) -> str: # filter falsey strings out return ".".join(filter(None, [self.parent._name_prefix, self.name])) # even though we don't override the constructor. class ComprehensionScope(LocalScope): """ Comprehensions and generator expressions create their own scope. For example, in [i for i in range(10)] The variable ``i`` is only viewable within the ComprehensionScope. """ # TODO: Assignment expressions (Python 3.8) will complicate ComprehensionScopes, # and will require us to handle such assignments as non-local. # https://www.python.org/dev/peps/pep-0572/#scope-of-the-target def _make_name_prefix(self) -> str: # filter falsey strings out return ".".join(filter(None, [self.parent._name_prefix, ""])) class AnnotationScope(LocalScope): """ Scopes used for type aliases and type parameters as defined by PEP-695. These scopes are created for type parameters using the special syntax, as well as type aliases. See https://peps.python.org/pep-0695/#scoping-behavior for more. """ def _make_name_prefix(self) -> str: # these scopes are transparent for the purposes of qualified names return self.parent._name_prefix # Generates dotted names from an Attribute or Name node: # Attribute(value=Name(value="a"), attr=Name(value="b")) -> ("a.b", "a") # each string has the corresponding CSTNode attached to it def _gen_dotted_names( node: Union[cst.Attribute, cst.Name] ) -> Iterator[Tuple[str, Union[cst.Attribute, cst.Name]]]: if isinstance(node, cst.Name): yield node.value, node else: value = node.value if isinstance(value, cst.Call): value = value.func if isinstance(value, (cst.Attribute, cst.Name)): name_values = _gen_dotted_names(value) try: next_name, next_node = next(name_values) except StopIteration: return else: yield next_name, next_node yield from name_values elif isinstance(value, (cst.Attribute, cst.Name)): name_values = _gen_dotted_names(value) try: next_name, next_node = next(name_values) except StopIteration: return else: yield f"{next_name}.{node.attr.value}", node yield next_name, next_node yield from name_values def _is_assignment(node: cst.CSTNode, assignment_node: cst.CSTNode) -> bool: """ Returns true if ``node`` is part of the assignment at ``assignment_node``. Normally this is just a simple identity check, except for imports where the assignment is attached to the entire import statement but we are interested in ``Name`` nodes inside the statement. """ if node is assignment_node: return True if isinstance(assignment_node, (cst.Import, cst.ImportFrom)): aliases = assignment_node.names if isinstance(aliases, cst.ImportStar): return False for alias in aliases: if alias.name is node: return True asname = alias.asname if asname is not None: if asname.name is node: return True return False @dataclass(frozen=True) class DeferredAccess: access: Access enclosing_attribute: Optional[cst.Attribute] enclosing_string_annotation: Optional[cst.BaseString] class ScopeVisitor(cst.CSTVisitor): # since it's probably not useful. That can makes this visitor cleaner. def __init__(self, provider: "ScopeProvider") -> None: super().__init__() self.provider: ScopeProvider = provider self.scope: Scope = GlobalScope() self.__deferred_accesses: List[DeferredAccess] = [] self.__top_level_attribute_stack: List[Optional[cst.Attribute]] = [None] self.__in_annotation_stack: List[bool] = [False] self.__in_type_hint_stack: List[bool] = [False] self.__in_ignored_subscript: Set[cst.Subscript] = set() self.__last_string_annotation: Optional[cst.BaseString] = None self.__ignore_annotation: int = 0 @contextmanager def _new_scope( self, kind: Type[LocalScope], node: cst.CSTNode, name: Optional[str] = None ) -> Iterator[None]: parent_scope = self.scope self.scope = kind(parent_scope, node, name) try: yield finally: self.scope = parent_scope @contextmanager def _switch_scope(self, scope: Scope) -> Iterator[None]: current_scope = self.scope self.scope = scope try: yield finally: self.scope = current_scope def _visit_import_alike(self, node: Union[cst.Import, cst.ImportFrom]) -> bool: names = node.names if isinstance(names, cst.ImportStar): return False # make sure node.names is Sequence[ImportAlias] for name in names: self.provider.set_metadata(name, self.scope) asname = name.asname if asname is not None: name_values = _gen_dotted_names(cst.ensure_type(asname.name, cst.Name)) import_node_asname = asname.name else: name_values = _gen_dotted_names(name.name) import_node_asname = name.name for name_value, _ in name_values: self.scope.record_import_assignment( name_value, node, import_node_asname ) return False def visit_Import(self, node: cst.Import) -> Optional[bool]: return self._visit_import_alike(node) def visit_ImportFrom(self, node: cst.ImportFrom) -> Optional[bool]: return self._visit_import_alike(node) def visit_Attribute(self, node: cst.Attribute) -> Optional[bool]: if self.__top_level_attribute_stack[-1] is None: self.__top_level_attribute_stack[-1] = node node.value.visit(self) # explicitly not visiting attr if self.__top_level_attribute_stack[-1] is node: self.__top_level_attribute_stack[-1] = None return False def visit_Call(self, node: cst.Call) -> Optional[bool]: self.__top_level_attribute_stack.append(None) self.__in_type_hint_stack.append(False) qnames = {qn.name for qn in self.scope.get_qualified_names_for(node)} if "typing.NewType" in qnames or "typing.TypeVar" in qnames: node.func.visit(self) self.__in_type_hint_stack[-1] = True for arg in node.args[1:]: arg.visit(self) return False if "typing.cast" in qnames: node.func.visit(self) if len(node.args) > 0: self.__in_type_hint_stack.append(True) node.args[0].visit(self) self.__in_type_hint_stack.pop() for arg in node.args[1:]: arg.visit(self) return False return True def leave_Call(self, original_node: cst.Call) -> None: self.__top_level_attribute_stack.pop() self.__in_type_hint_stack.pop() def visit_Annotation(self, node: cst.Annotation) -> Optional[bool]: self.__in_annotation_stack.append(True) def leave_Annotation(self, original_node: cst.Annotation) -> None: self.__in_annotation_stack.pop() def visit_SimpleString(self, node: cst.SimpleString) -> Optional[bool]: self._handle_string_annotation(node) return False def visit_ConcatenatedString(self, node: cst.ConcatenatedString) -> Optional[bool]: return not self._handle_string_annotation(node) def _handle_string_annotation( self, node: Union[cst.SimpleString, cst.ConcatenatedString] ) -> bool: """Returns whether it successfully handled the string annotation""" if ( self.__in_type_hint_stack[-1] or self.__in_annotation_stack[-1] ) and not self.__in_ignored_subscript: value = node.evaluated_value if value: top_level_annotation = self.__last_string_annotation is None if top_level_annotation: self.__last_string_annotation = node try: mod = cst.parse_module(value) mod.visit(self) except cst.ParserSyntaxError: # swallow string annotation parsing errors # this is the same behavior as cPython pass if top_level_annotation: self.__last_string_annotation = None return True return False def visit_Subscript(self, node: cst.Subscript) -> Optional[bool]: in_type_hint = False if isinstance(node.value, cst.Name): qnames = {qn.name for qn in self.scope.get_qualified_names_for(node.value)} if any(qn.startswith(("typing.", "typing_extensions.")) for qn in qnames): in_type_hint = True if "typing.Literal" in qnames or "typing_extensions.Literal" in qnames: self.__in_ignored_subscript.add(node) self.__in_type_hint_stack.append(in_type_hint) return True def leave_Subscript(self, original_node: cst.Subscript) -> None: self.__in_type_hint_stack.pop() self.__in_ignored_subscript.discard(original_node) def visit_Name(self, node: cst.Name) -> Optional[bool]: # not all Name have ExpressionContext context = self.provider.get_metadata(ExpressionContextProvider, node, None) if context == ExpressionContext.STORE: self.scope.record_assignment(node.value, node) elif context in (ExpressionContext.LOAD, ExpressionContext.DEL, None): access = Access( node, self.scope, is_annotation=bool( self.__in_annotation_stack[-1] and not self.__ignore_annotation ), is_type_hint=bool(self.__in_type_hint_stack[-1]), ) self.__deferred_accesses.append( DeferredAccess( access=access, enclosing_attribute=self.__top_level_attribute_stack[-1], enclosing_string_annotation=self.__last_string_annotation, ) ) def visit_FunctionDef(self, node: cst.FunctionDef) -> Optional[bool]: self.scope.record_assignment(node.name.value, node) self.provider.set_metadata(node.name, self.scope) with ExitStack() as stack: if node.type_parameters: stack.enter_context(self._new_scope(AnnotationScope, node, None)) node.type_parameters.visit(self) with self._new_scope( FunctionScope, node, get_full_name_for_node(node.name) ): node.params.visit(self) node.body.visit(self) for decorator in node.decorators: decorator.visit(self) returns = node.returns if returns: returns.visit(self) return False def visit_Lambda(self, node: cst.Lambda) -> Optional[bool]: with self._new_scope(FunctionScope, node): node.params.visit(self) node.body.visit(self) return False def visit_Param(self, node: cst.Param) -> Optional[bool]: self.scope.record_assignment(node.name.value, node) self.provider.set_metadata(node.name, self.scope) with self._switch_scope(self.scope.parent): for field in [node.default, node.annotation]: if field: field.visit(self) return False def visit_Arg(self, node: cst.Arg) -> bool: # The keyword of Arg is neither an Assignment nor an Access and we explicitly don't visit it. value = node.value if value: value.visit(self) return False def visit_ClassDef(self, node: cst.ClassDef) -> Optional[bool]: self.scope.record_assignment(node.name.value, node) self.provider.set_metadata(node.name, self.scope) for decorator in node.decorators: decorator.visit(self) with ExitStack() as stack: if node.type_parameters: stack.enter_context(self._new_scope(AnnotationScope, node, None)) node.type_parameters.visit(self) for base in node.bases: base.visit(self) for keyword in node.keywords: keyword.visit(self) with self._new_scope(ClassScope, node, get_full_name_for_node(node.name)): for statement in node.body.body: statement.visit(self) return False def visit_ClassDef_bases(self, node: cst.ClassDef) -> None: self.__ignore_annotation += 1 def leave_ClassDef_bases(self, node: cst.ClassDef) -> None: self.__ignore_annotation -= 1 def visit_Global(self, node: cst.Global) -> Optional[bool]: for name_item in node.names: self.scope.record_global_overwrite(name_item.name.value) return False def visit_Nonlocal(self, node: cst.Nonlocal) -> Optional[bool]: for name_item in node.names: self.scope.record_nonlocal_overwrite(name_item.name.value) return False def visit_ListComp(self, node: cst.ListComp) -> Optional[bool]: return self._visit_comp_alike(node) def visit_SetComp(self, node: cst.SetComp) -> Optional[bool]: return self._visit_comp_alike(node) def visit_DictComp(self, node: cst.DictComp) -> Optional[bool]: return self._visit_comp_alike(node) def visit_GeneratorExp(self, node: cst.GeneratorExp) -> Optional[bool]: return self._visit_comp_alike(node) def _visit_comp_alike( self, node: Union[cst.ListComp, cst.SetComp, cst.DictComp, cst.GeneratorExp] ) -> bool: """ Cheat sheet: `[elt for target in iter if ifs]` Terminology: target: The variable or pattern we're storing each element of the iter in. iter: The thing we're iterating over. ifs: A list of conditions provided elt: The value that will be computed and "yielded" each time the loop iterates. For most comprehensions, this is just the `node.elt`, but DictComp has `key` and `value`, which behave like `node.elt` would. Nested Comprehension: ``[a for b in c for a in b]`` is a "nested" ListComp. The outer iterator is in ``node.for_in`` and the inner iterator is in ``node.for_in.inner_for_in``. The first comprehension object's iter in generators is evaluated outside of the ComprehensionScope. Every other comprehension's iter is evaluated inside the ComprehensionScope. Even though that doesn't seem very sane, but that appears to be how it works. non_flat = [ [1,2,3], [4,5,6], [7,8] flat = [y for x in non_flat for y in x] # this works fine # This will give a "NameError: name 'x' is not defined": flat = [y for x in x for y in x] # x isn't defined, because the first iter is evaluted outside the scope. # This will give an UnboundLocalError, indicating that the second # comprehension's iter value is evaluated inside the scope as its elt. # UnboundLocalError: local variable 'y' referenced before assignment flat = [y for x in non_flat for y in y] """ for_in = node.for_in for_in.iter.visit(self) self.provider.set_metadata(for_in, self.scope) with self._new_scope(ComprehensionScope, node): for_in.target.visit(self) # Things from here on can refer to the target. self.scope._assignment_count += 1 for condition in for_in.ifs: condition.visit(self) inner_for_in = for_in.inner_for_in if inner_for_in: inner_for_in.visit(self) if isinstance(node, cst.DictComp): node.key.visit(self) node.value.visit(self) else: node.elt.visit(self) return False def visit_For(self, node: cst.For) -> Optional[bool]: node.target.visit(self) self.scope._assignment_count += 1 for child in [node.iter, node.body, node.orelse, node.asynchronous]: if child is not None: child.visit(self) return False def infer_accesses(self) -> None: # Aggregate access with the same name and batch add with set union as an optimization. # In worst case, all accesses (m) and assignments (n) refer to the same name, # the time complexity is O(m x n), this optimizes it as O(m + n). scope_name_accesses = defaultdict(set) for def_access in self.__deferred_accesses: access, enclosing_attribute, enclosing_string_annotation = ( def_access.access, def_access.enclosing_attribute, def_access.enclosing_string_annotation, ) name = ensure_type(access.node, cst.Name).value if enclosing_attribute is not None: # if _gen_dotted_names doesn't generate any values, fall back to # the original name node above for attr_name, node in _gen_dotted_names(enclosing_attribute): if attr_name in access.scope: access.node = node name = attr_name break if enclosing_string_annotation is not None: access.node = enclosing_string_annotation scope_name_accesses[(access.scope, name)].add(access) access.record_assignments(name) access.scope.record_access(name, access) for (scope, name), accesses in scope_name_accesses.items(): for assignment in scope._resolve_scope_for_access(name, scope): assignment.record_accesses(accesses) self.__deferred_accesses = [] def on_leave(self, original_node: cst.CSTNode) -> None: self.provider.set_metadata(original_node, self.scope) if isinstance(original_node, _ASSIGNMENT_LIKE_NODES): self.scope._assignment_count += 1 super().on_leave(original_node) def visit_TypeAlias(self, node: cst.TypeAlias) -> Optional[bool]: self.scope.record_assignment(node.name.value, node) with self._new_scope(AnnotationScope, node, None): if node.type_parameters is not None: node.type_parameters.visit(self) node.value.visit(self) return False def visit_TypeVar(self, node: cst.TypeVar) -> Optional[bool]: self.scope.record_assignment(node.name.value, node) if node.bound is not None: node.bound.visit(self) return False def visit_TypeVarTuple(self, node: cst.TypeVarTuple) -> Optional[bool]: self.scope.record_assignment(node.name.value, node) return False def visit_ParamSpec(self, node: cst.ParamSpec) -> Optional[bool]: self.scope.record_assignment(node.name.value, node) return False class ScopeProvider(BatchableMetadataProvider[Optional[Scope]]): """ :class:`ScopeProvider` traverses the entire module and creates the scope inheritance structure. It provides the scope of name assignment and accesses. It is useful for more advanced static analysis. E.g. given a :class:`~libcst.FunctionDef` node, we can check the type of its Scope to figure out whether it is a class method (:class:`ClassScope`) or a regular function (:class:`GlobalScope`). Scope metadata is available for most node types other than formatting information nodes (whitespace, parentheses, etc.). """ METADATA_DEPENDENCIES = (ExpressionContextProvider,) def visit_Module(self, node: cst.Module) -> Optional[bool]: visitor = ScopeVisitor(self) node.visit(visitor) visitor.infer_accesses() LibCST-1.2.0/libcst/metadata/span_provider.py000066400000000000000000000070261456464173300211010ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from contextlib import contextmanager from dataclasses import dataclass, field from typing import Callable, Iterator, List, Optional from libcst import CSTNode, Module from libcst._nodes.internal import CodegenState from libcst.metadata.base_provider import BaseMetadataProvider @dataclass(frozen=True) class CodeSpan: """ Represents the position of a piece of code by its starting position and length. Note: This class does not specify the unit of distance - it can be bytes, Unicode characters, or something else entirely. """ #: Offset of the code from the beginning of the file. Can be 0. start: int #: Length of the span length: int @dataclass(frozen=False) class SpanProvidingCodegenState(CodegenState): provider: BaseMetadataProvider[CodeSpan] get_length: Optional[Callable[[str], int]] = None position: int = 0 _stack: List[int] = field(default_factory=list) def add_indent_tokens(self) -> None: super().add_indent_tokens() for token in self.indent_tokens: self._update_position(token) def add_token(self, value: str) -> None: super().add_token(value) self._update_position(value) def _update_position(self, value: str) -> None: get_length = self.get_length or len self.position += get_length(value) def before_codegen(self, node: CSTNode) -> None: self._stack.append(self.position) def after_codegen(self, node: CSTNode) -> None: start = self._stack.pop() if node not in self.provider._computed: end = self.position self.provider._computed[node] = CodeSpan(start, length=end - start) @contextmanager def record_syntactic_position( self, node: CSTNode, *, start_node: Optional[CSTNode] = None, end_node: Optional[CSTNode] = None, ) -> Iterator[None]: start = self.position try: yield finally: end = self.position start = ( self.provider._computed[start_node].start if start_node is not None else start ) if end_node is not None: end_span = self.provider._computed[end_node] length = (end_span.start + end_span.length) - start else: length = end - start self.provider._computed[node] = CodeSpan(start, length=length) def byte_length_in_utf8(value: str) -> int: return len(value.encode("utf8")) class ByteSpanPositionProvider(BaseMetadataProvider[CodeSpan]): """ Generates offset and length metadata for nodes' positions. For each :class:`CSTNode` this provider generates a :class:`CodeSpan` that contains the byte-offset of the node from the start of the file, and its length (also in bytes). The whitespace owned by the node is not included in this length. Note: offset and length measure bytes, not characters (which is significant for example in the case of Unicode characters encoded in more than one byte) """ def _gen_impl(self, module: Module) -> None: state = SpanProvidingCodegenState( default_indent=module.default_indent, default_newline=module.default_newline, provider=self, get_length=byte_length_in_utf8, ) module._codegen(state) LibCST-1.2.0/libcst/metadata/tests/000077500000000000000000000000001456464173300170115ustar00rootroot00000000000000LibCST-1.2.0/libcst/metadata/tests/__init__.py000066400000000000000000000002631456464173300211230ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. LibCST-1.2.0/libcst/metadata/tests/test_accessor_provider.py000066400000000000000000000037421456464173300241440ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import dataclasses from textwrap import dedent import libcst as cst from libcst.metadata import AccessorProvider, MetadataWrapper from libcst.testing.utils import data_provider, UnitTest class DependentVisitor(cst.CSTVisitor): METADATA_DEPENDENCIES = (AccessorProvider,) def __init__(self, *, test: UnitTest) -> None: self.test = test def on_visit(self, node: cst.CSTNode) -> bool: for f in dataclasses.fields(node): child = getattr(node, f.name) if type(child) is cst.CSTNode: accessor = self.get_metadata(AccessorProvider, child) self.test.assertEqual(accessor, f.name) return True class AccessorProviderTest(UnitTest): @data_provider( ( ( """ foo = 'toplevel' fn1(foo) fn2(foo) def fn_def(): foo = 'shadow' fn3(foo) """, ), ( """ global_var = None @cls_attr class Cls(cls_attr, kwarg=cls_attr): cls_attr = 5 def f(): pass """, ), ( """ iterator = None condition = None [elt for target in iterator if condition] {elt for target in iterator if condition} {elt: target for target in iterator if condition} (elt for target in iterator if condition) """, ), ) ) def test_accessor_provier(self, code: str) -> None: wrapper = MetadataWrapper(cst.parse_module(dedent(code))) wrapper.visit(DependentVisitor(test=self)) LibCST-1.2.0/libcst/metadata/tests/test_base_provider.py000066400000000000000000000130071456464173300232470ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import cast import libcst as cst from libcst import parse_module from libcst._metadata_dependent import LazyValue from libcst.metadata import ( BatchableMetadataProvider, MetadataWrapper, VisitorMetadataProvider, ) from libcst.metadata.wrapper import _gen_batchable from libcst.testing.utils import UnitTest class BaseMetadataProviderTest(UnitTest): def test_visitor_provider(self) -> None: class SimpleProvider(VisitorMetadataProvider[int]): """ Sets metadata on every node to 1. """ def on_visit(self, node: cst.CSTNode) -> bool: self.set_metadata(node, 1) return True wrapper = MetadataWrapper(parse_module("pass; return")) module = wrapper.module pass_ = cast(cst.SimpleStatementLine, module.body[0]).body[0] return_ = cast(cst.SimpleStatementLine, module.body[0]).body[1] provider = SimpleProvider() metadata = provider._gen(wrapper) # Check access on provider self.assertEqual(provider.get_metadata(SimpleProvider, module), 1) self.assertEqual(provider.get_metadata(SimpleProvider, pass_), 1) self.assertEqual(provider.get_metadata(SimpleProvider, return_), 1) # Check returned mapping self.assertEqual(metadata[module], 1) self.assertEqual(metadata[pass_], 1) self.assertEqual(metadata[return_], 1) def test_batchable_provider(self) -> None: class SimpleProvider(BatchableMetadataProvider[int]): """ Sets metadata on every pass node to 1 and every return node to 2. """ def visit_Pass(self, node: cst.Pass) -> None: self.set_metadata(node, 1) def visit_Return(self, node: cst.Return) -> None: self.set_metadata(node, 2) wrapper = MetadataWrapper(parse_module("pass; return; pass")) module = wrapper.module pass_ = cast(cst.SimpleStatementLine, module.body[0]).body[0] return_ = cast(cst.SimpleStatementLine, module.body[0]).body[1] pass_2 = cast(cst.SimpleStatementLine, module.body[0]).body[2] provider = SimpleProvider() metadata = _gen_batchable(wrapper, [provider]) # Check access on provider self.assertEqual(provider.get_metadata(SimpleProvider, pass_), 1) self.assertEqual(provider.get_metadata(SimpleProvider, return_), 2) self.assertEqual(provider.get_metadata(SimpleProvider, pass_2), 1) # Check returned mapping self.assertEqual(metadata[SimpleProvider][pass_], 1) self.assertEqual(metadata[SimpleProvider][return_], 2) self.assertEqual(metadata[SimpleProvider][pass_2], 1) def test_lazy_visitor_provider(self) -> None: class SimpleLazyProvider(VisitorMetadataProvider[int]): """ Sets metadata on every node to a callable that returns 1. """ def on_visit(self, node: cst.CSTNode) -> bool: self.set_metadata(node, LazyValue(lambda: 1)) return True wrapper = MetadataWrapper(parse_module("pass; return")) module = wrapper.module pass_ = cast(cst.SimpleStatementLine, module.body[0]).body[0] return_ = cast(cst.SimpleStatementLine, module.body[0]).body[1] provider = SimpleLazyProvider() metadata = provider._gen(wrapper) # Check access on provider self.assertEqual(provider.get_metadata(SimpleLazyProvider, module), 1) self.assertEqual(provider.get_metadata(SimpleLazyProvider, pass_), 1) self.assertEqual(provider.get_metadata(SimpleLazyProvider, return_), 1) # Check returned mapping self.assertTrue(isinstance(metadata[module], LazyValue)) self.assertTrue(isinstance(metadata[pass_], LazyValue)) self.assertTrue(isinstance(metadata[return_], LazyValue)) def testlazy_batchable_provider(self) -> None: class SimpleLazyProvider(BatchableMetadataProvider[int]): """ Sets metadata on every pass node to a callable that returns 1, and every return node to a callable that returns 2. """ def visit_Pass(self, node: cst.Pass) -> None: self.set_metadata(node, LazyValue(lambda: 1)) def visit_Return(self, node: cst.Return) -> None: self.set_metadata(node, LazyValue(lambda: 2)) wrapper = MetadataWrapper(parse_module("pass; return; pass")) module = wrapper.module pass_ = cast(cst.SimpleStatementLine, module.body[0]).body[0] return_ = cast(cst.SimpleStatementLine, module.body[0]).body[1] pass_2 = cast(cst.SimpleStatementLine, module.body[0]).body[2] provider = SimpleLazyProvider() metadata = _gen_batchable(wrapper, [provider]) # Check access on provider self.assertEqual(provider.get_metadata(SimpleLazyProvider, pass_), 1) self.assertEqual(provider.get_metadata(SimpleLazyProvider, return_), 2) self.assertEqual(provider.get_metadata(SimpleLazyProvider, pass_2), 1) # Check returned mapping self.assertTrue(isinstance(metadata[SimpleLazyProvider][pass_], LazyValue)) self.assertTrue(isinstance(metadata[SimpleLazyProvider][return_], LazyValue)) self.assertTrue(isinstance(metadata[SimpleLazyProvider][pass_2], LazyValue)) LibCST-1.2.0/libcst/metadata/tests/test_expression_context_provider.py000066400000000000000000000351231456464173300263030ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from textwrap import dedent from typing import cast, Dict, Optional import libcst as cst from libcst import parse_module from libcst._visitors import CSTVisitor from libcst.metadata import ( ExpressionContext, ExpressionContextProvider, MetadataWrapper, ) from libcst.testing.utils import UnitTest class DependentVisitor(CSTVisitor): METADATA_DEPENDENCIES = (ExpressionContextProvider,) def __init__( self, *, test: UnitTest, name_to_context: Dict[str, Optional[ExpressionContext]] = {}, attribute_to_context: Dict[str, ExpressionContext] = {}, subscript_to_context: Dict[str, ExpressionContext] = {}, starred_element_to_context: Dict[str, ExpressionContext] = {}, tuple_to_context: Dict[str, ExpressionContext] = {}, list_to_context: Dict[str, ExpressionContext] = {}, ) -> None: self.test = test self.name_to_context = name_to_context self.attribute_to_context = attribute_to_context self.subscript_to_context = subscript_to_context self.starred_element_to_context = starred_element_to_context self.tuple_to_context = tuple_to_context self.list_to_context = list_to_context def visit_Name(self, node: cst.Name) -> None: self.test.assertEqual( self.get_metadata(ExpressionContextProvider, node, None), self.name_to_context[node.value], f"Context doesn't match for Name {node.value}", ) def visit_Attribute(self, node: cst.Attribute) -> None: self.test.assertEqual( self.get_metadata(ExpressionContextProvider, node), self.attribute_to_context[cst.Module([]).code_for_node(node)], ) def visit_Subscript(self, node: cst.Subscript) -> None: self.test.assertEqual( self.get_metadata(ExpressionContextProvider, node), # to test it easier, assuming we only use a Name as Subscript value self.subscript_to_context[cst.Module([]).code_for_node(node)], ) def visit_StarredElement(self, node: cst.StarredElement) -> None: self.test.assertEqual( self.get_metadata(ExpressionContextProvider, node), # to test it easier, assuming we only use a Name as StarredElement value self.starred_element_to_context[cast(cst.Name, node.value).value], ) def visit_Tuple(self, node: cst.Tuple) -> None: self.test.assertEqual( self.get_metadata(ExpressionContextProvider, node), # to test it easier, assuming we only use Name as Tuple elements self.tuple_to_context[cst.Module([]).code_for_node(node)], ) def visit_List(self, node: cst.List) -> None: self.test.assertEqual( self.get_metadata(ExpressionContextProvider, node), # to test it easier, assuming we only use Name as List elements self.list_to_context[cst.Module([]).code_for_node(node)], ) def visit_Call(self, node: cst.Call) -> None: with self.test.assertRaises(KeyError): self.get_metadata(ExpressionContextProvider, node) class ExpressionContextProviderTest(UnitTest): def test_simple_load(self) -> None: wrapper = MetadataWrapper(parse_module("a")) wrapper.visit( DependentVisitor(test=self, name_to_context={"a": ExpressionContext.LOAD}) ) def test_simple_assign(self) -> None: wrapper = MetadataWrapper(parse_module("a = b")) wrapper.visit( DependentVisitor( test=self, name_to_context={ "a": ExpressionContext.STORE, "b": ExpressionContext.LOAD, }, ) ) def test_assign_to_attribute(self) -> None: wrapper = MetadataWrapper(parse_module("a.b = c.d")) wrapper.visit( DependentVisitor( test=self, name_to_context={ "a": ExpressionContext.LOAD, "b": None, "c": ExpressionContext.LOAD, "d": None, }, attribute_to_context={ "a.b": ExpressionContext.STORE, "c.d": ExpressionContext.LOAD, }, ) ) wrapper = MetadataWrapper(parse_module("a.b.c = d.e.f")) wrapper.visit( DependentVisitor( test=self, name_to_context={ "a": ExpressionContext.LOAD, "b": None, "c": None, "d": ExpressionContext.LOAD, "e": None, "f": None, }, attribute_to_context={ "a.b": ExpressionContext.LOAD, "a.b.c": ExpressionContext.STORE, "d.e": ExpressionContext.LOAD, "d.e.f": ExpressionContext.LOAD, }, ) ) def test_assign_with_subscript(self) -> None: wrapper = MetadataWrapper(parse_module("a[b] = c[d]")) wrapper.visit( DependentVisitor( test=self, name_to_context={ "a": ExpressionContext.LOAD, "b": ExpressionContext.LOAD, "c": ExpressionContext.LOAD, "d": ExpressionContext.LOAD, }, subscript_to_context={ "a[b]": ExpressionContext.STORE, "c[d]": ExpressionContext.LOAD, }, ) ) wrapper = MetadataWrapper(parse_module("x.y[start:end, idx]")) wrapper.visit( DependentVisitor( test=self, name_to_context={ "x": ExpressionContext.LOAD, "y": None, "start": ExpressionContext.LOAD, "end": ExpressionContext.LOAD, "idx": ExpressionContext.LOAD, }, subscript_to_context={"x.y[start:end, idx]": ExpressionContext.LOAD}, attribute_to_context={"x.y": ExpressionContext.LOAD}, ) ) def test_augassign(self) -> None: wrapper = MetadataWrapper(parse_module("a += b")) wrapper.visit( DependentVisitor( test=self, name_to_context={ "a": ExpressionContext.STORE, "b": ExpressionContext.LOAD, }, ) ) def test_annassign(self) -> None: wrapper = MetadataWrapper(parse_module("a: str = b")) wrapper.visit( DependentVisitor( test=self, name_to_context={ "a": ExpressionContext.STORE, "b": ExpressionContext.LOAD, "str": ExpressionContext.LOAD, }, ) ) def test_starred_element_with_assign(self) -> None: wrapper = MetadataWrapper(parse_module("*a = b")) wrapper.visit( DependentVisitor( test=self, name_to_context={ "a": ExpressionContext.STORE, "b": ExpressionContext.LOAD, }, starred_element_to_context={"a": ExpressionContext.STORE}, ) ) def test_del_simple(self) -> None: wrapper = MetadataWrapper(parse_module("del a")) wrapper.visit( DependentVisitor(test=self, name_to_context={"a": ExpressionContext.DEL}) ) def test_del_with_subscript(self) -> None: wrapper = MetadataWrapper(parse_module("del a[b]")) wrapper.visit( DependentVisitor( test=self, name_to_context={ "a": ExpressionContext.LOAD, "b": ExpressionContext.LOAD, }, subscript_to_context={"a[b]": ExpressionContext.DEL}, ) ) def test_del_with_tuple(self) -> None: wrapper = MetadataWrapper(parse_module("del a, b")) wrapper.visit( DependentVisitor( test=self, name_to_context={ "a": ExpressionContext.DEL, "b": ExpressionContext.DEL, }, tuple_to_context={"a, b": ExpressionContext.DEL}, ) ) def test_tuple_with_assign(self) -> None: wrapper = MetadataWrapper(parse_module("a, = b")) wrapper.visit( DependentVisitor( test=self, name_to_context={ "a": ExpressionContext.STORE, "b": ExpressionContext.LOAD, }, tuple_to_context={"a,": ExpressionContext.STORE}, ) ) def test_nested_tuple_with_assign(self) -> None: wrapper = MetadataWrapper(parse_module("((a, b), c) = ((1, 2), 3)")) wrapper.visit( DependentVisitor( test=self, name_to_context={ "a": ExpressionContext.STORE, "b": ExpressionContext.STORE, "c": ExpressionContext.STORE, }, tuple_to_context={ "(a, b)": ExpressionContext.STORE, "((a, b), c)": ExpressionContext.STORE, "(1, 2)": ExpressionContext.LOAD, "((1, 2), 3)": ExpressionContext.LOAD, }, ) ) def test_list_with_assign(self) -> None: wrapper = MetadataWrapper(parse_module("[a] = [b]")) wrapper.visit( DependentVisitor( test=self, name_to_context={ "a": ExpressionContext.STORE, "b": ExpressionContext.LOAD, }, list_to_context={ "[a]": ExpressionContext.STORE, "[b]": ExpressionContext.LOAD, }, ) ) def test_nested_list_with_assign(self) -> None: wrapper = MetadataWrapper(parse_module("[[a, b], c] = [[d, e], f]")) wrapper.visit( DependentVisitor( test=self, name_to_context={ "a": ExpressionContext.STORE, "b": ExpressionContext.STORE, "c": ExpressionContext.STORE, "d": ExpressionContext.LOAD, "e": ExpressionContext.LOAD, "f": ExpressionContext.LOAD, }, list_to_context={ "[a, b]": ExpressionContext.STORE, "[[a, b], c]": ExpressionContext.STORE, "[d, e]": ExpressionContext.LOAD, "[[d, e], f]": ExpressionContext.LOAD, }, ) ) def test_expressions_with_assign(self) -> None: wrapper = MetadataWrapper(parse_module("f(a)[b] = c")) wrapper.visit( DependentVisitor( test=self, name_to_context={ "a": ExpressionContext.LOAD, "b": ExpressionContext.LOAD, "c": ExpressionContext.LOAD, "f": ExpressionContext.LOAD, }, subscript_to_context={"f(a)[b]": ExpressionContext.STORE}, ) ) def test_invalid_type_for_context(self) -> None: wrapper = MetadataWrapper(parse_module("a()")) wrapper.visit( DependentVisitor(test=self, name_to_context={"a": ExpressionContext.LOAD}) ) def test_with_as(self) -> None: wrapper = MetadataWrapper(parse_module("with a() as b:\n pass")) wrapper.visit( DependentVisitor( test=self, name_to_context={ "a": ExpressionContext.LOAD, "b": ExpressionContext.STORE, }, ) ) def test_except_as(self) -> None: wrapper = MetadataWrapper( parse_module("try: ...\nexcept Exception as ex:\n pass") ) wrapper.visit( DependentVisitor( test=self, name_to_context={ "Exception": ExpressionContext.LOAD, "ex": ExpressionContext.STORE, }, ) ) def test_for(self) -> None: wrapper = MetadataWrapper(parse_module("for i in items:\n j = 1")) wrapper.visit( DependentVisitor( test=self, name_to_context={ "i": ExpressionContext.STORE, "items": ExpressionContext.LOAD, "j": ExpressionContext.STORE, }, ) ) def test_class(self) -> None: code = """ class Foo(Bar): x = y """ wrapper = MetadataWrapper(parse_module(dedent(code))) wrapper.visit( DependentVisitor( test=self, name_to_context={ "Foo": ExpressionContext.STORE, "Bar": ExpressionContext.LOAD, "x": ExpressionContext.STORE, "y": ExpressionContext.LOAD, }, ) ) def test_function(self) -> None: code = """def foo(x: int = y) -> None: pass""" wrapper = MetadataWrapper(parse_module(code)) wrapper.visit( DependentVisitor( test=self, name_to_context={ "foo": ExpressionContext.STORE, "x": ExpressionContext.STORE, "int": ExpressionContext.LOAD, "y": ExpressionContext.LOAD, "None": ExpressionContext.LOAD, }, ) ) def test_walrus(self) -> None: code = """ if x := y: pass """ wrapper = MetadataWrapper( parse_module( dedent(code), config=cst.PartialParserConfig(python_version="3.8") ) ) wrapper.visit( DependentVisitor( test=self, name_to_context={ "x": ExpressionContext.STORE, "y": ExpressionContext.LOAD, }, ) ) LibCST-1.2.0/libcst/metadata/tests/test_file_path_provider.py000066400000000000000000000120411456464173300242650ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from pathlib import Path from tempfile import TemporaryDirectory from typing import Set import libcst from libcst._visitors import CSTVisitor from libcst.helpers.paths import chdir from libcst.metadata import FilePathProvider, FullRepoManager, MetadataWrapper from libcst.testing.utils import UnitTest class FilePathProviderTest(UnitTest): def setUp(self) -> None: self.td = TemporaryDirectory() self.tdp = Path(self.td.name).resolve() self.addCleanup(self.td.cleanup) def test_provider_cache(self) -> None: pkg = self.tdp / "pkg" pkg.mkdir() files = [Path(pkg / name) for name in ("file1.py", "file2.py", "file3.py")] [file.write_text("print('hello')\n") for file in files] with self.subTest("absolute paths"): repo_manager = FullRepoManager( self.tdp, [f.as_posix() for f in files], {FilePathProvider} ) repo_manager.resolve_cache() expected = { FilePathProvider: {f.as_posix(): f for f in files}, } self.assertDictEqual(expected, repo_manager.cache) with self.subTest("repo relative paths"): repo_manager = FullRepoManager( self.tdp, [f.relative_to(self.tdp).as_posix() for f in files], {FilePathProvider}, ) repo_manager.resolve_cache() expected = { FilePathProvider: { f.relative_to(self.tdp).as_posix(): f for f in files }, } self.assertDictEqual(expected, repo_manager.cache) with self.subTest("dot relative paths"): with chdir(self.tdp): repo_manager = FullRepoManager( ".", [f.relative_to(self.tdp).as_posix() for f in files], {FilePathProvider}, ) repo_manager.resolve_cache() expected = { FilePathProvider: { f.relative_to(self.tdp).as_posix(): f for f in files }, } self.assertDictEqual(expected, repo_manager.cache) def test_visitor(self) -> None: pkg = self.tdp / "pkg" pkg.mkdir() files = [Path(pkg / name) for name in ("file1.py", "file2.py", "file3.py")] [file.write_text("print('hello')\n") for file in files] seen: Set[Path] = set() class FakeVisitor(CSTVisitor): METADATA_DEPENDENCIES = [FilePathProvider] def visit_Module(self, node: libcst.Module) -> None: seen.add(self.get_metadata(FilePathProvider, node)) with self.subTest("absolute paths"): seen.clear() repo_manager = FullRepoManager( self.tdp, [f.as_posix() for f in files], {FilePathProvider} ) repo_manager.resolve_cache() for file in files: module = libcst.parse_module(file.read_bytes()) wrapper = MetadataWrapper( module, cache=repo_manager.get_cache_for_path(file.as_posix()) ) wrapper.visit(FakeVisitor()) expected = set(files) self.assertSetEqual(expected, seen) with self.subTest("repo relative paths"): seen.clear() repo_manager = FullRepoManager( self.tdp, [f.relative_to(self.tdp).as_posix() for f in files], {FilePathProvider}, ) repo_manager.resolve_cache() for file in files: module = libcst.parse_module(file.read_bytes()) wrapper = MetadataWrapper( module, cache=repo_manager.get_cache_for_path( file.relative_to(self.tdp).as_posix() ), ) wrapper.visit(FakeVisitor()) expected = set(files) self.assertSetEqual(expected, seen) with self.subTest("dot relative paths"): with chdir(self.tdp): seen.clear() repo_manager = FullRepoManager( ".", [f.relative_to(self.tdp).as_posix() for f in files], {FilePathProvider}, ) repo_manager.resolve_cache() for file in files: module = libcst.parse_module(file.read_bytes()) wrapper = MetadataWrapper( module, cache=repo_manager.get_cache_for_path( file.relative_to(self.tdp).as_posix() ), ) wrapper.visit(FakeVisitor()) expected = set(files) self.assertSetEqual(expected, seen) LibCST-1.2.0/libcst/metadata/tests/test_full_repo_manager.py000066400000000000000000000053371456464173300241130ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import json from pathlib import Path from unittest.mock import Mock, patch from libcst.metadata.full_repo_manager import FullRepoManager from libcst.metadata.tests.test_type_inference_provider import _test_simple_class_helper from libcst.metadata.type_inference_provider import TypeInferenceProvider from libcst.testing.utils import UnitTest REPO_ROOT_DIR: str = str(Path(__file__).parent.parent.parent.resolve()) class FullRepoManagerTest(UnitTest): @patch.object(TypeInferenceProvider, "gen_cache") def test_get_metadata_wrapper_with_empty_cache(self, gen_cache: Mock) -> None: path = "tests/pyre/simple_class.py" gen_cache.return_value = {path: {"types": []}} manager = FullRepoManager(REPO_ROOT_DIR, [path], [TypeInferenceProvider]) wrapper = manager.get_metadata_wrapper_for_path(path) self.assertEqual(wrapper.resolve(TypeInferenceProvider), {}) @patch.object(TypeInferenceProvider, "gen_cache") def test_get_metadata_wrapper_with_patched_cache(self, gen_cache: Mock) -> None: path_prefix = "tests/pyre/simple_class" path = f"{path_prefix}.py" gen_cache.return_value = { path: json.loads((Path(REPO_ROOT_DIR) / f"{path_prefix}.json").read_text()) } manager = FullRepoManager(REPO_ROOT_DIR, [path], [TypeInferenceProvider]) wrapper = manager.get_metadata_wrapper_for_path(path) _test_simple_class_helper(self, wrapper) @patch.object(TypeInferenceProvider, "gen_cache") def test_get_metadata_wrapper_with_invalid_path(self, gen_cache: Mock) -> None: path = "tests/pyre/simple_class.py" gen_cache.return_value = {path: {"types": []}} manager = FullRepoManager( REPO_ROOT_DIR, ["invalid_path.py"], [TypeInferenceProvider] ) with self.assertRaisesRegex( Exception, "The path needs to be in paths parameter when constructing FullRepoManager for efficient batch processing.", ): manager.get_metadata_wrapper_for_path(path) @patch.object(TypeInferenceProvider, "gen_cache") def test_get_full_repo_cache(self, gen_cache: Mock) -> None: path_prefix = "tests/pyre/simple_class" path = f"{path_prefix}.py" mock_cache = { path: json.loads((Path(REPO_ROOT_DIR) / f"{path_prefix}.json").read_text()) } gen_cache.return_value = mock_cache manager = FullRepoManager(REPO_ROOT_DIR, path, [TypeInferenceProvider]) cache = manager.cache self.assertEqual(cache, {TypeInferenceProvider: mock_cache}) LibCST-1.2.0/libcst/metadata/tests/test_metadata_provider.py000066400000000000000000000276461456464173300241330ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from unittest.mock import Mock import libcst as cst from libcst import parse_module from libcst._exceptions import MetadataException from libcst._visitors import CSTTransformer from libcst.metadata import ( BatchableMetadataProvider, MetadataWrapper, VisitorMetadataProvider, ) from libcst.testing.utils import UnitTest class MetadataProviderTest(UnitTest): def test_visitor_provider(self) -> None: """ Tests that visitor providers are resolved correctly. Sets 2 metadata entries for every node: SimpleProvider -> 1 DependentProvider - > 2 """ test = self class SimpleProvider(VisitorMetadataProvider[int]): def on_visit(self, node: cst.CSTNode) -> bool: self.set_metadata(node, 1) return True class DependentProvider(VisitorMetadataProvider[int]): METADATA_DEPENDENCIES = (SimpleProvider,) def on_visit(self, node: cst.CSTNode) -> bool: self.set_metadata(node, self.get_metadata(SimpleProvider, node) + 1) return True class DependentVisitor(CSTTransformer): # Declare both providers so the visitor has acesss to both types of metadata METADATA_DEPENDENCIES = (DependentProvider, SimpleProvider) def visit_Module(self, node: cst.Module) -> None: # Check metadata is set test.assertEqual(self.get_metadata(SimpleProvider, node), 1) test.assertEqual(self.get_metadata(DependentProvider, node), 2) def visit_Pass(self, node: cst.Pass) -> None: # Check metadata is set test.assertEqual(self.get_metadata(SimpleProvider, node), 1) test.assertEqual(self.get_metadata(DependentProvider, node), 2) module = parse_module("pass") MetadataWrapper(module).visit(DependentVisitor()) def test_batched_provider(self) -> None: """ Tests that batchable providers are resolved correctly. Sets metadata on: - pass: BatchedProviderA -> 1 BatchedProviderB -> "a" """ test = self mock = Mock() class BatchedProviderA(BatchableMetadataProvider[int]): def visit_Pass(self, node: cst.Pass) -> None: mock.visited_a() self.set_metadata(node, 1) class BatchedProviderB(BatchableMetadataProvider[str]): def visit_Pass(self, node: cst.Pass) -> None: mock.visited_b() self.set_metadata(node, "a") class DependentVisitor(CSTTransformer): METADATA_DEPENDENCIES = (BatchedProviderA, BatchedProviderB) def visit_Pass(self, node: cst.Pass) -> None: # Check metadata is set test.assertEqual(self.get_metadata(BatchedProviderA, node), 1) test.assertEqual(self.get_metadata(BatchedProviderB, node), "a") module = parse_module("pass") MetadataWrapper(module).visit(DependentVisitor()) # Check that each batchable visitor is only called once mock.visited_a.assert_called_once() mock.visited_b.assert_called_once() def test_mixed_providers(self) -> None: """ Tests that a mixed set of providers is resolved properly. Sets metadata on pass: BatchedProviderA -> 2 BatchedProviderB -> 3 DependentProvider -> 5 DependentBatched -> 4 """ test = self mock = Mock() class SimpleProvider(VisitorMetadataProvider[int]): def visit_Pass(self, node: cst.CSTNode) -> None: mock.visited_simple() self.set_metadata(node, 1) class BatchedProviderA(BatchableMetadataProvider[int]): METADATA_DEPENDENCIES = (SimpleProvider,) def visit_Pass(self, node: cst.Pass) -> None: mock.visited_a() self.set_metadata(node, 2) class BatchedProviderB(BatchableMetadataProvider[int]): METADATA_DEPENDENCIES = (SimpleProvider,) def visit_Pass(self, node: cst.Pass) -> None: mock.visited_b() self.set_metadata(node, 3) class DependentProvider(VisitorMetadataProvider[int]): METADATA_DEPENDENCIES = (BatchedProviderA, BatchedProviderB) def on_visit(self, node: cst.CSTNode) -> bool: sum = self.get_metadata(BatchedProviderA, node, 0) + self.get_metadata( BatchedProviderB, node, 0 ) self.set_metadata(node, sum) return True class BatchedProviderC(BatchableMetadataProvider[int]): METADATA_DEPENDENCIES = (BatchedProviderA,) def visit_Pass(self, node: cst.Pass) -> None: mock.visited_c() self.set_metadata(node, self.get_metadata(BatchedProviderA, node) * 2) class DependentVisitor(CSTTransformer): METADATA_DEPENDENCIES = ( BatchedProviderA, BatchedProviderB, BatchedProviderC, DependentProvider, ) def visit_Module(self, node: cst.Module) -> None: # Dependent visitor set metadata on all nodes but for module it # defaulted to 0 because BatchedProviderA/B only set metadata on # pass nodes test.assertEqual(self.get_metadata(DependentProvider, node), 0) def visit_Pass(self, node: cst.Pass) -> None: # Check metadata is set test.assertEqual(self.get_metadata(BatchedProviderA, node), 2) test.assertEqual(self.get_metadata(BatchedProviderB, node), 3) test.assertEqual(self.get_metadata(BatchedProviderC, node), 4) test.assertEqual(self.get_metadata(DependentProvider, node), 5) module = parse_module("pass") MetadataWrapper(module).visit(DependentVisitor()) # Check each visitor is called once mock.visited_simple.assert_called_once() mock.visited_a.assert_called_once() mock.visited_b.assert_called_once() mock.visited_c.assert_called_once() def test_inherited_metadata(self) -> None: """ Tests that classes inherit access to metadata declared by their base classes. """ test_runner = self mock = Mock() class SimpleProvider(VisitorMetadataProvider[int]): def visit_Pass(self, node: cst.Pass) -> None: mock.visited_simple() self.set_metadata(node, 1) class VisitorA(CSTTransformer): METADATA_DEPENDENCIES = (SimpleProvider,) class VisitorB(VisitorA): def visit_Pass(self, node: cst.Pass) -> None: test_runner.assertEqual(self.get_metadata(SimpleProvider, node), 1) module = parse_module("pass") MetadataWrapper(module).visit(VisitorB()) # Check each visitor is called once mock.visited_simple.assert_called_once() def test_provider_inherited_metadata(self) -> None: """ Tests that providers inherit access to metadata declared by their base classes. """ test_runner = self mock = Mock() class ProviderA(VisitorMetadataProvider[int]): def visit_Pass(self, node: cst.Pass) -> None: mock.visited_a() self.set_metadata(node, 1) class ProviderB(VisitorMetadataProvider[int]): METADATA_DEPENDENCIES = (ProviderA,) class ProviderC(ProviderB): def visit_Pass(self, node: cst.Pass) -> None: mock.visited_c() test_runner.assertEqual(self.get_metadata(ProviderA, node), 1) class Visitor(CSTTransformer): METADATA_DEPENDENCIES = (ProviderC,) module = parse_module("pass") MetadataWrapper(module).visit(Visitor()) # Check each visitor is called once mock.visited_a.assert_called_once() mock.visited_c.assert_called_once() def test_batchable_provider_inherited_metadata(self) -> None: """ Tests that batchable providers inherit access to metadata declared by their base classes. """ test_runner = self mock = Mock() class ProviderA(VisitorMetadataProvider[int]): def visit_Pass(self, node: cst.Pass) -> None: mock.visited_a() self.set_metadata(node, 1) class ProviderB(BatchableMetadataProvider[int]): METADATA_DEPENDENCIES = (ProviderA,) class ProviderC(ProviderB): def visit_Pass(self, node: cst.Pass) -> None: mock.visited_c() test_runner.assertEqual(self.get_metadata(ProviderA, node), 1) class VisitorA(CSTTransformer): METADATA_DEPENDENCIES = (ProviderC,) module = parse_module("pass") MetadataWrapper(module).visit(VisitorA()) # Check each visitor is called once mock.visited_a.assert_called_once() mock.visited_c.assert_called_once() def test_self_metadata(self) -> None: """ Tests a provider can access its own metadata (assuming it has been set properly.) """ test_runner = self class ProviderA(VisitorMetadataProvider[bool]): def on_visit(self, node: cst.CSTNode) -> bool: self.set_metadata(node, True) return True def on_leave(self, original_node: cst.CSTNode) -> None: test_runner.assertEqual( self.get_metadata(type(self), original_node), True ) class AVisitor(CSTTransformer): METADATA_DEPENDENCIES = (ProviderA,) cst.Module([]).visit(AVisitor()) def test_unset_metadata(self) -> None: """ Tests that access to unset metadata throws a key error. """ class ProviderA(VisitorMetadataProvider[bool]): pass class AVisitor(CSTTransformer): METADATA_DEPENDENCIES = (ProviderA,) def on_visit(self, node: cst.CSTNode) -> bool: self.get_metadata(ProviderA, node) return True with self.assertRaisesRegex( KeyError, "ProviderA is a dependency, but not set; did you forget a MetadataWrapper?", ): cst.Module([]).visit(AVisitor()) def test_undeclared_metadata(self) -> None: """ Tests that access to undeclared metadata throws a key error. """ class ProviderA(VisitorMetadataProvider[bool]): pass class ProviderB(VisitorMetadataProvider[bool]): pass class AVisitor(CSTTransformer): METADATA_DEPENDENCIES = (ProviderA,) def on_visit(self, node: cst.CSTNode) -> bool: self.get_metadata(ProviderA, node, True) self.get_metadata(ProviderB, node) return True with self.assertRaisesRegex( KeyError, "ProviderB is not declared as a dependency in AVisitor.METADATA_DEPENDENCIES.", ): MetadataWrapper(cst.Module([])).visit(AVisitor()) def test_circular_dependency(self) -> None: """ Tests that circular dependencies are detected. """ class ProviderA(VisitorMetadataProvider[str]): pass ProviderA.METADATA_DEPENDENCIES = (ProviderA,) class BadVisitor(CSTTransformer): METADATA_DEPENDENCIES = (ProviderA,) with self.assertRaisesRegex( MetadataException, "Detected circular dependencies in ProviderA" ): MetadataWrapper(cst.Module([])).visit(BadVisitor()) LibCST-1.2.0/libcst/metadata/tests/test_metadata_wrapper.py000066400000000000000000000101001456464173300237320ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Optional from unittest.mock import Mock import libcst as cst from libcst.metadata import ( BatchableMetadataProvider, MetadataWrapper, VisitorMetadataProvider, ) from libcst.testing.utils import UnitTest class MetadataWrapperTest(UnitTest): def test_copies_tree(self) -> None: m = cst.parse_module("pass") mw = MetadataWrapper(m) self.assertTrue(mw.module.deep_equals(m)) self.assertIsNot(mw.module, m) def test_unsafe_skip_copy(self) -> None: m = cst.parse_module("pass") mw = MetadataWrapper(m, unsafe_skip_copy=True) self.assertIs(mw.module, m) def test_equality_by_identity(self) -> None: m = cst.parse_module("pass") mw1 = MetadataWrapper(m) mw2 = MetadataWrapper(m) self.assertEqual(mw1, mw1) self.assertEqual(mw2, mw2) self.assertNotEqual(mw1, mw2) def test_hash_by_identity(self) -> None: m = cst.parse_module("pass") mw1 = MetadataWrapper(m) mw2 = MetadataWrapper(m, unsafe_skip_copy=True) mw3 = MetadataWrapper(m, unsafe_skip_copy=True) self.assertEqual(hash(mw1), hash(mw1)) self.assertEqual(hash(mw2), hash(mw2)) self.assertEqual(hash(mw3), hash(mw3)) self.assertNotEqual(hash(mw1), hash(mw2)) self.assertNotEqual(hash(mw1), hash(mw3)) self.assertNotEqual(hash(mw2), hash(mw3)) def test_metadata_cache(self) -> None: class DummyMetadataProvider(BatchableMetadataProvider[None]): gen_cache = tuple m = cst.parse_module("pass") mw = MetadataWrapper(m) with self.assertRaisesRegex( Exception, "Cache is required for initializing DummyMetadataProvider." ): mw.resolve(DummyMetadataProvider) class SimpleCacheMetadataProvider(BatchableMetadataProvider[object]): gen_cache = tuple def __init__(self, cache: object) -> None: super().__init__(cache) self.cache = cache def visit_Pass(self, node: cst.Pass) -> Optional[bool]: self.set_metadata(node, self.cache) cached_data = object() mw = MetadataWrapper(m, cache={SimpleCacheMetadataProvider: cached_data}) pass_node = cst.ensure_type(mw.module.body[0], cst.SimpleStatementLine).body[0] self.assertEqual( mw.resolve(SimpleCacheMetadataProvider)[pass_node], cached_data ) def test_resolve_provider_twice(self) -> None: """ Tests that resolving the same provider twice is a no-op """ mock = Mock() class ProviderA(VisitorMetadataProvider[bool]): def visit_Pass(self, node: cst.Pass) -> None: mock.visited_a() module = cst.parse_module("pass") wrapper = MetadataWrapper(module) wrapper.resolve(ProviderA) mock.visited_a.assert_called_once() wrapper.resolve(ProviderA) mock.visited_a.assert_called_once() def test_resolve_dependent_provider_twice(self) -> None: """ Tests that resolving the same provider twice is a no-op """ mock = Mock() class ProviderA(VisitorMetadataProvider[bool]): def visit_Pass(self, node: cst.Pass) -> None: mock.visited_a() class ProviderB(VisitorMetadataProvider[bool]): METADATA_DEPENDENCIES = (ProviderA,) def visit_Pass(self, node: cst.Pass) -> None: mock.visited_b() module = cst.parse_module("pass") wrapper = MetadataWrapper(module) wrapper.resolve(ProviderA) mock.visited_a.assert_called_once() wrapper.resolve(ProviderB) mock.visited_a.assert_called_once() mock.visited_b.assert_called_once() wrapper.resolve(ProviderA) mock.visited_a.assert_called_once() mock.visited_b.assert_called_once() LibCST-1.2.0/libcst/metadata/tests/test_name_provider.py000066400000000000000000000467751456464173300232770ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from pathlib import Path from tempfile import TemporaryDirectory from textwrap import dedent from typing import Collection, Dict, Mapping, Optional, Set, Tuple import libcst as cst from libcst import ensure_type from libcst._nodes.base import CSTNode from libcst.metadata import ( FullyQualifiedNameProvider, MetadataWrapper, QualifiedName, QualifiedNameProvider, QualifiedNameSource, ) from libcst.metadata.full_repo_manager import FullRepoManager from libcst.metadata.name_provider import FullyQualifiedNameVisitor from libcst.testing.utils import data_provider, UnitTest class QNameVisitor(cst.CSTVisitor): METADATA_DEPENDENCIES = (QualifiedNameProvider,) def __init__(self) -> None: self.qnames: Dict["CSTNode", Collection[QualifiedName]] = {} def on_visit(self, node: cst.CSTNode) -> bool: qname = self.get_metadata(QualifiedNameProvider, node) self.qnames[node] = qname return True def get_qualified_name_metadata_provider( module_str: str, ) -> Tuple[cst.Module, Mapping[cst.CSTNode, Collection[QualifiedName]]]: wrapper = MetadataWrapper(cst.parse_module(dedent(module_str))) visitor = QNameVisitor() wrapper.visit(visitor) return wrapper.module, visitor.qnames def get_qualified_names(module_str: str) -> Set[QualifiedName]: _, qnames_map = get_qualified_name_metadata_provider(module_str) return {qname for qnames in qnames_map.values() for qname in qnames} def get_fully_qualified_names(file_path: str, module_str: str) -> Set[QualifiedName]: wrapper = cst.MetadataWrapper( cst.parse_module(dedent(module_str)), cache={ FullyQualifiedNameProvider: FullyQualifiedNameProvider.gen_cache( Path(""), [file_path], None ).get(file_path, "") }, ) return { qname for qnames in wrapper.resolve(FullyQualifiedNameProvider).values() for qname in qnames } class QualifiedNameProviderTest(UnitTest): def test_imports(self) -> None: qnames = get_qualified_names( """ from a.b import c as d d """ ) self.assertEqual({"a.b.c"}, {qname.name for qname in qnames}) for qname in qnames: self.assertEqual(qname.source, QualifiedNameSource.IMPORT, msg=f"{qname}") def test_builtins(self) -> None: qnames = get_qualified_names( """ int(None) """ ) self.assertEqual( {"builtins.int", "builtins.None"}, {qname.name for qname in qnames} ) for qname in qnames: self.assertEqual(qname.source, QualifiedNameSource.BUILTIN, msg=f"{qname}") def test_locals(self) -> None: qnames = get_qualified_names( """ class X: a: "X" """ ) self.assertEqual({"X", "X.a"}, {qname.name for qname in qnames}) for qname in qnames: self.assertEqual(qname.source, QualifiedNameSource.LOCAL, msg=f"{qname}") def test_simple_qualified_names(self) -> None: m, names = get_qualified_name_metadata_provider( """ from a.b import c class Cls: def f(self) -> "c": c() d = {} d['key'] = 0 def g(): pass g() """ ) cls = ensure_type(m.body[1], cst.ClassDef) f = ensure_type(cls.body.body[0], cst.FunctionDef) self.assertEqual( names[ensure_type(f.returns, cst.Annotation).annotation], {QualifiedName("a.b.c", QualifiedNameSource.IMPORT)}, ) c_call = ensure_type( ensure_type(f.body.body[0], cst.SimpleStatementLine).body[0], cst.Expr ).value self.assertEqual( names[c_call], {QualifiedName("a.b.c", QualifiedNameSource.IMPORT)} ) self.assertEqual( names[c_call], {QualifiedName("a.b.c", QualifiedNameSource.IMPORT)} ) g_call = ensure_type( ensure_type(m.body[3], cst.SimpleStatementLine).body[0], cst.Expr ).value self.assertEqual(names[g_call], {QualifiedName("g", QualifiedNameSource.LOCAL)}) d_name = ( ensure_type( ensure_type(f.body.body[1], cst.SimpleStatementLine).body[0], cst.Assign ) .targets[0] .target ) self.assertEqual( names[d_name], {QualifiedName("Cls.f..d", QualifiedNameSource.LOCAL)}, ) d_subscript = ( ensure_type( ensure_type(f.body.body[2], cst.SimpleStatementLine).body[0], cst.Assign ) .targets[0] .target ) self.assertEqual( names[d_subscript], {QualifiedName("Cls.f..d", QualifiedNameSource.LOCAL)}, ) def test_nested_qualified_names(self) -> None: m, names = get_qualified_name_metadata_provider( """ class A: def f1(self): def f2(): pass f2() def f3(self): class B(): ... B() def f4(): def f5(): class C: pass C() f5() """ ) cls_a = ensure_type(m.body[0], cst.ClassDef) self.assertEqual(names[cls_a], {QualifiedName("A", QualifiedNameSource.LOCAL)}) func_f1 = ensure_type(cls_a.body.body[0], cst.FunctionDef) self.assertEqual( names[func_f1], {QualifiedName("A.f1", QualifiedNameSource.LOCAL)} ) func_f2_call = ensure_type( ensure_type(func_f1.body.body[1], cst.SimpleStatementLine).body[0], cst.Expr ).value self.assertEqual( names[func_f2_call], {QualifiedName("A.f1..f2", QualifiedNameSource.LOCAL)}, ) func_f3 = ensure_type(cls_a.body.body[1], cst.FunctionDef) self.assertEqual( names[func_f3], {QualifiedName("A.f3", QualifiedNameSource.LOCAL)} ) call_b = ensure_type( ensure_type(func_f3.body.body[1], cst.SimpleStatementLine).body[0], cst.Expr ).value self.assertEqual( names[call_b], {QualifiedName("A.f3..B", QualifiedNameSource.LOCAL)} ) func_f4 = ensure_type(m.body[1], cst.FunctionDef) self.assertEqual( names[func_f4], {QualifiedName("f4", QualifiedNameSource.LOCAL)} ) func_f5 = ensure_type(func_f4.body.body[0], cst.FunctionDef) self.assertEqual( names[func_f5], {QualifiedName("f4..f5", QualifiedNameSource.LOCAL)} ) cls_c = func_f5.body.body[0] self.assertEqual( names[cls_c], {QualifiedName("f4..f5..C", QualifiedNameSource.LOCAL)}, ) def test_multiple_assignments(self) -> None: m, names = get_qualified_name_metadata_provider( """ if 1: from a import b as c elif 2: from d import e as c c() """ ) call = ensure_type( ensure_type(m.body[1], cst.SimpleStatementLine).body[0], cst.Expr ).value self.assertEqual( names[call], { QualifiedName(name="a.b", source=QualifiedNameSource.IMPORT), QualifiedName(name="d.e", source=QualifiedNameSource.IMPORT), }, ) def test_comprehension(self) -> None: m, names = get_qualified_name_metadata_provider( """ class C: def fn(self) -> None: [[k for k in i] for i in [j for j in range(10)]] # Note: # The qualified name of i is straightforward to be "C.fn...i". # ListComp j is evaluated outside of the ListComp i. # so j has qualified name "C.fn...j". # ListComp k is evaluated inside ListComp i. # so k has qualified name "C.fn....k". """ ) cls_def = ensure_type(m.body[0], cst.ClassDef) fn_def = ensure_type(cls_def.body.body[0], cst.FunctionDef) outer_comp = ensure_type( ensure_type( ensure_type(fn_def.body.body[0], cst.SimpleStatementLine).body[0], cst.Expr, ).value, cst.ListComp, ) i = outer_comp.for_in.target self.assertEqual( names[i], { QualifiedName( name="C.fn...i", source=QualifiedNameSource.LOCAL, ) }, ) inner_comp_j = ensure_type(outer_comp.for_in.iter, cst.ListComp) j = inner_comp_j.for_in.target self.assertEqual( names[j], { QualifiedName( name="C.fn...j", source=QualifiedNameSource.LOCAL, ) }, ) inner_comp_k = ensure_type(outer_comp.elt, cst.ListComp) k = inner_comp_k.for_in.target self.assertEqual( names[k], { QualifiedName( name="C.fn....k", source=QualifiedNameSource.LOCAL, ) }, ) def test_has_name_helper(self) -> None: class TestVisitor(cst.CSTVisitor): METADATA_DEPENDENCIES = (QualifiedNameProvider,) def __init__(self, test: UnitTest) -> None: self.test = test def visit_Call(self, node: cst.Call) -> Optional[bool]: self.test.assertTrue( QualifiedNameProvider.has_name(self, node, "a.b.c") ) self.test.assertFalse(QualifiedNameProvider.has_name(self, node, "a.b")) self.test.assertTrue( QualifiedNameProvider.has_name( self, node, QualifiedName("a.b.c", QualifiedNameSource.IMPORT) ) ) self.test.assertFalse( QualifiedNameProvider.has_name( self, node, QualifiedName("a.b.c", QualifiedNameSource.LOCAL) ) ) MetadataWrapper(cst.parse_module("import a;a.b.c()")).visit(TestVisitor(self)) def test_name_in_attribute(self) -> None: m, names = get_qualified_name_metadata_provider( """ obj = object() obj.eval """ ) attr = ensure_type( ensure_type( ensure_type(m.body[1], cst.SimpleStatementLine).body[0], cst.Expr ).value, cst.Attribute, ) self.assertEqual( names[attr], {QualifiedName(name="obj.eval", source=QualifiedNameSource.LOCAL)}, ) eval = attr.attr self.assertEqual(names[eval], set()) def test_repeated_values_in_qualified_name(self) -> None: m, names = get_qualified_name_metadata_provider( """ import a class Foo: bar: a.aa.aaa """ ) foo = ensure_type(m.body[1], cst.ClassDef) bar = ensure_type( ensure_type( ensure_type(foo.body, cst.IndentedBlock).body[0], cst.SimpleStatementLine, ).body[0], cst.AnnAssign, ) annotation = ensure_type(bar.annotation, cst.Annotation) attribute = ensure_type(annotation.annotation, cst.Attribute) self.assertEqual( names[attribute], {QualifiedName("a.aa.aaa", QualifiedNameSource.IMPORT)} ) def test_multiple_qualified_names(self) -> None: m, names = get_qualified_name_metadata_provider( """ if False: def f(): pass elif False: from b import f else: import f import a.b as f f() """ ) if_ = ensure_type(m.body[0], cst.If) first_f = ensure_type(if_.body.body[0], cst.FunctionDef) second_f_alias = ensure_type( ensure_type( ensure_type(if_.orelse, cst.If).body.body[0], cst.SimpleStatementLine, ).body[0], cst.ImportFrom, ).names self.assertFalse(isinstance(second_f_alias, cst.ImportStar)) second_f = second_f_alias[0].name third_f_alias = ensure_type( ensure_type( ensure_type(ensure_type(if_.orelse, cst.If).orelse, cst.Else).body.body[ 0 ], cst.SimpleStatementLine, ).body[0], cst.Import, ).names self.assertFalse(isinstance(third_f_alias, cst.ImportStar)) third_f = third_f_alias[0].name fourth_f = ensure_type( ensure_type( ensure_type(m.body[1], cst.SimpleStatementLine).body[0], cst.Import ) .names[0] .asname, cst.AsName, ).name call = ensure_type( ensure_type( ensure_type(m.body[2], cst.SimpleStatementLine).body[0], cst.Expr ).value, cst.Call, ) self.assertEqual( names[first_f], {QualifiedName("f", QualifiedNameSource.LOCAL)} ) self.assertEqual(names[second_f], set()) self.assertEqual(names[third_f], set()) self.assertEqual(names[fourth_f], set()) self.assertEqual( names[call], { QualifiedName("f", QualifiedNameSource.IMPORT), QualifiedName("b.f", QualifiedNameSource.IMPORT), QualifiedName("f", QualifiedNameSource.LOCAL), QualifiedName("a.b", QualifiedNameSource.IMPORT), }, ) def test_shadowed_assignments(self) -> None: m, names = get_qualified_name_metadata_provider( """ from lib import a,b,c a = a class Test: b = b def func(): c = c """ ) # pyre-fixme[53]: Captured variable `names` is not annotated. def test_name(node: cst.CSTNode, qnames: Set[QualifiedName]) -> None: name = ensure_type( ensure_type(node, cst.SimpleStatementLine).body[0], cst.Assign ).value self.assertEqual(names[name], qnames) test_name(m.body[1], {QualifiedName("lib.a", QualifiedNameSource.IMPORT)}) cls = ensure_type(m.body[2], cst.ClassDef) test_name( cls.body.body[0], {QualifiedName("lib.b", QualifiedNameSource.IMPORT)} ) func = ensure_type(m.body[3], cst.FunctionDef) test_name( func.body.body[0], {QualifiedName("lib.c", QualifiedNameSource.IMPORT)} ) class FullyQualifiedNameProviderTest(UnitTest): @data_provider( ( # test module names ("a/b/c.py", "", {"a.b.c": QualifiedNameSource.LOCAL}), ("a/b.py", "", {"a.b": QualifiedNameSource.LOCAL}), ("a.py", "", {"a": QualifiedNameSource.LOCAL}), ("a/b/__init__.py", "", {"a.b": QualifiedNameSource.LOCAL}), ("a/b/__main__.py", "", {"a.b": QualifiedNameSource.LOCAL}), # test builtinxsx ( "test/module.py", "int(None)", { "test.module": QualifiedNameSource.LOCAL, "builtins.int": QualifiedNameSource.BUILTIN, "builtins.None": QualifiedNameSource.BUILTIN, }, ), # test imports ( "some/test/module.py", """ from a.b import c as d from . import rel from .lol import rel2 from .. import thing as rel3 d, rel, rel2, rel3 """, { "some.test.module": QualifiedNameSource.LOCAL, "a.b.c": QualifiedNameSource.IMPORT, "some.test.rel": QualifiedNameSource.IMPORT, "some.test.lol.rel2": QualifiedNameSource.IMPORT, "some.thing": QualifiedNameSource.IMPORT, }, ), # test more imports ( "some/test/module/__init__.py", """ from . import rel from .lol import rel2 rel, rel2 """, { "some.test.module": QualifiedNameSource.LOCAL, "some.test.module.rel": QualifiedNameSource.IMPORT, "some.test.module.lol.rel2": QualifiedNameSource.IMPORT, }, ), # test locals ( "some/test/module.py", """ class X: a: X """, { "some.test.module": QualifiedNameSource.LOCAL, "some.test.module.X": QualifiedNameSource.LOCAL, "some.test.module.X.a": QualifiedNameSource.LOCAL, }, ), ) ) def test_qnames( self, file: str, code: str, names: Dict[str, QualifiedNameSource] ) -> None: qnames = get_fully_qualified_names(file, code) self.assertSetEqual( set(names.keys()), {qname.name for qname in qnames}, ) for qname in qnames: self.assertEqual(qname.source, names[qname.name], msg=f"{qname}") def test_local_qualification(self) -> None: module_name = "some.test.module" package_name = "some.test" for name, expected in [ (".foo", "some.test.foo"), ("..bar", "some.bar"), ("foo", "some.test.module.foo"), ]: with self.subTest(name=name): self.assertEqual( FullyQualifiedNameVisitor._fully_qualify_local( module_name, package_name, name ), expected, ) class FullyQualifiedNameIntegrationTest(UnitTest): def test_with_full_repo_manager(self) -> None: with TemporaryDirectory() as dir: root = Path(dir) file_path = root / "pkg/mod.py" file_path.parent.mkdir() file_path.touch() file_path_str = file_path.as_posix() mgr = FullRepoManager(root, [file_path_str], [FullyQualifiedNameProvider]) wrapper = mgr.get_metadata_wrapper_for_path(file_path_str) fqnames = wrapper.resolve(FullyQualifiedNameProvider) (mod, names) = next(iter(fqnames.items())) self.assertIsInstance(mod, cst.Module) self.assertEqual( names, {QualifiedName(name="pkg.mod", source=QualifiedNameSource.LOCAL)} ) LibCST-1.2.0/libcst/metadata/tests/test_parent_node_provider.py000066400000000000000000000035571456464173300246440ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from textwrap import dedent import libcst as cst from libcst.metadata import MetadataWrapper, ParentNodeProvider from libcst.testing.utils import data_provider, UnitTest class DependentVisitor(cst.CSTVisitor): METADATA_DEPENDENCIES = (ParentNodeProvider,) def __init__(self, *, test: UnitTest) -> None: self.test = test def on_visit(self, node: cst.CSTNode) -> bool: for child in node.children: parent = self.get_metadata(ParentNodeProvider, child) self.test.assertEqual(parent, node) return True class ParentNodeProviderTest(UnitTest): @data_provider( ( ( """ foo = 'toplevel' fn1(foo) fn2(foo) def fn_def(): foo = 'shadow' fn3(foo) """, ), ( """ global_var = None @cls_attr class Cls(cls_attr, kwarg=cls_attr): cls_attr = 5 def f(): pass """, ), ( """ iterator = None condition = None [elt for target in iterator if condition] {elt for target in iterator if condition} {elt: target for target in iterator if condition} (elt for target in iterator if condition) """, ), ) ) def test_parent_node_provier(self, code: str) -> None: wrapper = MetadataWrapper(cst.parse_module(dedent(code))) wrapper.visit(DependentVisitor(test=self)) LibCST-1.2.0/libcst/metadata/tests/test_position_provider.py000066400000000000000000000134431456464173300242050ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Tuple import libcst as cst from libcst import parse_module from libcst._batched_visitor import BatchableCSTVisitor from libcst._visitors import CSTVisitor from libcst.metadata import ( CodeRange, MetadataWrapper, PositionProvider, WhitespaceInclusivePositionProvider, ) from libcst.metadata.position_provider import ( PositionProvidingCodegenState, WhitespaceInclusivePositionProvidingCodegenState, ) from libcst.testing.utils import UnitTest def position( state: WhitespaceInclusivePositionProvidingCodegenState, ) -> Tuple[int, int]: return state.line, state.column class PositionProviderTest(UnitTest): def test_visitor_provider(self) -> None: """ Sets 2 metadata entries for every node: SimpleProvider -> 1 DependentProvider - > 2 """ test = self class DependentVisitor(CSTVisitor): METADATA_DEPENDENCIES = (PositionProvider,) def visit_Pass(self, node: cst.Pass) -> None: test.assertEqual( self.get_metadata(PositionProvider, node), CodeRange((1, 0), (1, 4)) ) wrapper = MetadataWrapper(parse_module("pass")) wrapper.visit(DependentVisitor()) def test_equal_range(self) -> None: test = self expected_range = CodeRange((1, 4), (1, 6)) class EqualPositionVisitor(CSTVisitor): METADATA_DEPENDENCIES = (PositionProvider,) def visit_Equal(self, node: cst.Equal) -> None: test.assertEqual( self.get_metadata(PositionProvider, node), expected_range ) def visit_NotEqual(self, node: cst.NotEqual) -> None: test.assertEqual( self.get_metadata(PositionProvider, node), expected_range ) MetadataWrapper(parse_module("var == 1")).visit(EqualPositionVisitor()) MetadataWrapper(parse_module("var != 1")).visit(EqualPositionVisitor()) def test_batchable_provider(self) -> None: test = self class ABatchable(BatchableCSTVisitor): METADATA_DEPENDENCIES = (PositionProvider,) def visit_Pass(self, node: cst.Pass) -> None: test.assertEqual( self.get_metadata(PositionProvider, node), CodeRange((1, 0), (1, 4)) ) wrapper = MetadataWrapper(parse_module("pass")) wrapper.visit_batched([ABatchable()]) class PositionProvidingCodegenStateTest(UnitTest): def test_codegen_initial_position(self) -> None: state = WhitespaceInclusivePositionProvidingCodegenState( " " * 4, "\n", WhitespaceInclusivePositionProvider() ) self.assertEqual(position(state), (1, 0)) def test_codegen_add_token(self) -> None: state = WhitespaceInclusivePositionProvidingCodegenState( " " * 4, "\n", WhitespaceInclusivePositionProvider() ) state.add_token("1234") self.assertEqual(position(state), (1, 4)) def test_codegen_add_tokens(self) -> None: state = WhitespaceInclusivePositionProvidingCodegenState( " " * 4, "\n", WhitespaceInclusivePositionProvider() ) state.add_token("1234\n1234") self.assertEqual(position(state), (2, 4)) def test_codegen_add_newline(self) -> None: state = WhitespaceInclusivePositionProvidingCodegenState( " " * 4, "\n", WhitespaceInclusivePositionProvider() ) state.add_token("\n") self.assertEqual(position(state), (2, 0)) def test_codegen_add_indent_tokens(self) -> None: state = WhitespaceInclusivePositionProvidingCodegenState( " " * 4, "\n", WhitespaceInclusivePositionProvider() ) state.increase_indent(state.default_indent) state.add_indent_tokens() self.assertEqual(position(state), (1, 4)) def test_codegen_decrease_indent(self) -> None: state = WhitespaceInclusivePositionProvidingCodegenState( " " * 4, "\n", WhitespaceInclusivePositionProvider() ) state.increase_indent(state.default_indent) state.increase_indent(state.default_indent) state.increase_indent(state.default_indent) state.decrease_indent() state.add_indent_tokens() self.assertEqual(position(state), (1, 8)) def test_whitespace_inclusive_position(self) -> None: # create a dummy node node = cst.Pass() # simulate codegen behavior for the dummy node # generates the code " pass " state = WhitespaceInclusivePositionProvidingCodegenState( " " * 4, "\n", WhitespaceInclusivePositionProvider() ) state.before_codegen(node) state.add_token(" ") with state.record_syntactic_position(node): state.add_token("pass") state.add_token(" ") state.after_codegen(node) # check whitespace is correctly recorded self.assertEqual(state.provider._computed[node], CodeRange((1, 0), (1, 6))) def test_position(self) -> None: # create a dummy node node = cst.Pass() # simulate codegen behavior for the dummy node # generates the code " pass " state = PositionProvidingCodegenState(" " * 4, "\n", PositionProvider()) state.before_codegen(node) state.add_token(" ") with state.record_syntactic_position(node): state.add_token("pass") state.add_token(" ") state.after_codegen(node) # check syntactic position ignores whitespace self.assertEqual(state.provider._computed[node], CodeRange((1, 1), (1, 5))) LibCST-1.2.0/libcst/metadata/tests/test_reentrant_codegen.py000066400000000000000000000074141456464173300241160ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from textwrap import dedent from typing import Callable import libcst as cst from libcst.metadata import ExperimentalReentrantCodegenProvider, MetadataWrapper from libcst.testing.utils import data_provider, UnitTest class ExperimentalReentrantCodegenProviderTest(UnitTest): @data_provider( { "simple_top_level_statement": { "old_module": ( """\ import math c = math.sqrt(a*a + b*b) """ ), "new_module": ( """\ import math c = math.hypot(a, b) """ ), "old_node": lambda m: m.body[1], "new_node": cst.parse_statement("c = math.hypot(a, b)"), }, "replacement_inside_block": { "old_module": ( """\ import math def do_math(a, b): c = math.sqrt(a*a + b*b) return c """ ), "new_module": ( """\ import math def do_math(a, b): c = math.hypot(a, b) return c """ ), "old_node": lambda m: m.body[1].body.body[0], "new_node": cst.parse_statement("c = math.hypot(a, b)"), }, "missing_trailing_newline": { "old_module": "old_fn()", # this module has no trailing newline "new_module": "new_fn()", "old_node": lambda m: m.body[0], "new_node": cst.parse_statement("new_fn()\n"), }, "nested_blocks_with_missing_trailing_newline": { "old_module": ( """\ if outer: if inner: old_fn()""" # this module has no trailing newline ), "new_module": ( """\ if outer: if inner: new_fn()""" ), "old_node": lambda m: m.body[0].body.body[0].body.body[0], "new_node": cst.parse_statement("new_fn()\n"), }, } ) def test_provider( self, old_module: str, new_module: str, old_node: Callable[[cst.Module], cst.CSTNode], new_node: cst.BaseStatement, ) -> None: old_module = dedent(old_module) new_module = dedent(new_module) mw = MetadataWrapper(cst.parse_module(old_module)) codegen_partial = mw.resolve(ExperimentalReentrantCodegenProvider)[ old_node(mw.module) ] self.assertEqual(codegen_partial.get_original_module_code(), old_module) self.assertEqual(codegen_partial.get_modified_module_code(new_node), new_module) def test_byte_conversion( self, ) -> None: module_bytes = "fn()\n".encode("utf-16") mw = MetadataWrapper( cst.parse_module("fn()\n", cst.PartialParserConfig(encoding="utf-16")) ) codegen_partial = mw.resolve(ExperimentalReentrantCodegenProvider)[ mw.module.body[0] ] self.assertEqual(codegen_partial.get_original_module_bytes(), module_bytes) self.assertEqual( codegen_partial.get_modified_module_bytes(cst.parse_statement("fn2()\n")), "fn2()\n".encode("utf-16"), ) LibCST-1.2.0/libcst/metadata/tests/test_scope_provider.py000066400000000000000000002506451456464173300234610ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import sys from textwrap import dedent from typing import cast, Mapping, Sequence, Tuple from unittest import mock import libcst as cst from libcst import ensure_type from libcst._parser.entrypoints import is_native from libcst.metadata import MetadataWrapper from libcst.metadata.scope_provider import ( _gen_dotted_names, AnnotationScope, Assignment, BuiltinAssignment, BuiltinScope, ClassScope, ComprehensionScope, FunctionScope, GlobalScope, ImportAssignment, LocalScope, QualifiedName, QualifiedNameSource, Scope, ScopeProvider, ) from libcst.testing.utils import data_provider, UnitTest class DependentVisitor(cst.CSTVisitor): METADATA_DEPENDENCIES = (ScopeProvider,) def get_scope_metadata_provider( module_str: str, ) -> Tuple[cst.Module, Mapping[cst.CSTNode, Scope]]: wrapper = MetadataWrapper(cst.parse_module(dedent(module_str))) return ( wrapper.module, cast( Mapping[cst.CSTNode, Scope], wrapper.resolve(ScopeProvider) ), # we're sure every node has an associated scope ) class ScopeProviderTest(UnitTest): def test_not_in_scope(self) -> None: m, scopes = get_scope_metadata_provider( """ pass """ ) global_scope = scopes[m] self.assertEqual(global_scope["not_in_scope"], set()) def test_accesses(self) -> None: m, scopes = get_scope_metadata_provider( """ foo = 'toplevel' fn1(foo) fn2(foo) def fn_def(): foo = 'shadow' fn3(foo) """ ) scope_of_module = scopes[m] self.assertIsInstance(scope_of_module, GlobalScope) global_foo_assignments = list(scope_of_module["foo"]) self.assertEqual(len(global_foo_assignments), 1) foo_assignment = global_foo_assignments[0] self.assertEqual(len(foo_assignment.references), 2) fn1_call_arg = ensure_type( ensure_type( ensure_type(m.body[1], cst.SimpleStatementLine).body[0], cst.Expr ).value, cst.Call, ).args[0] fn2_call_arg = ensure_type( ensure_type( ensure_type(m.body[2], cst.SimpleStatementLine).body[0], cst.Expr ).value, cst.Call, ).args[0] self.assertEqual( {access.node for access in foo_assignment.references}, {fn1_call_arg.value, fn2_call_arg.value}, ) func_body = ensure_type(m.body[3], cst.FunctionDef).body func_foo_statement = func_body.body[0] scope_of_func_statement = scopes[func_foo_statement] self.assertIsInstance(scope_of_func_statement, FunctionScope) func_foo_assignments = scope_of_func_statement["foo"] self.assertEqual(len(func_foo_assignments), 1) foo_assignment = list(func_foo_assignments)[0] self.assertEqual(len(foo_assignment.references), 1) fn3_call_arg = ensure_type( ensure_type( ensure_type(func_body.body[1], cst.SimpleStatementLine).body[0], cst.Expr, ).value, cst.Call, ).args[0] self.assertEqual( {access.node for access in foo_assignment.references}, {fn3_call_arg.value} ) wrapper = MetadataWrapper(cst.parse_module("from a import b\n")) wrapper.visit(DependentVisitor()) wrapper = MetadataWrapper(cst.parse_module("def a():\n from b import c\n\n")) wrapper.visit(DependentVisitor()) def test_fstring_accesses(self) -> None: m, scopes = get_scope_metadata_provider( """ from a import b f"{b}" "hello" """ ) global_scope = scopes[m] self.assertIsInstance(global_scope, GlobalScope) global_accesses = list(global_scope.accesses) self.assertEqual(len(global_accesses), 1) import_node = ensure_type( ensure_type(m.body[0], cst.SimpleStatementLine).body[0], cst.ImportFrom ) b_referent = list(global_accesses[0].referents)[0] self.assertIsInstance(b_referent, Assignment) if isinstance(b_referent, Assignment): # for the typechecker's eyes self.assertEqual(b_referent.node, import_node) @data_provider((("any",), ("True",), ("Exception",), ("__name__",))) def test_builtins(self, builtin: str) -> None: m, scopes = get_scope_metadata_provider( """ def fn(): pass """ ) scope_of_module = scopes[m] self.assertIsInstance(scope_of_module, GlobalScope) self.assertEqual(len(scope_of_module[builtin]), 1) self.assertEqual(len(scope_of_module["something_not_a_builtin"]), 0) scope_of_builtin = scope_of_module.parent self.assertIsInstance(scope_of_builtin, BuiltinScope) self.assertEqual(len(scope_of_builtin[builtin]), 1) self.assertEqual(len(scope_of_builtin["something_not_a_builtin"]), 0) func_body = ensure_type(m.body[0], cst.FunctionDef).body func_pass_statement = func_body.body[0] scope_of_func_statement = scopes[func_pass_statement] self.assertIsInstance(scope_of_func_statement, FunctionScope) self.assertEqual(len(scope_of_func_statement[builtin]), 1) self.assertEqual(len(scope_of_func_statement["something_not_a_builtin"]), 0) def test_import(self) -> None: m, scopes = get_scope_metadata_provider( """ import foo.bar import fizz.buzz as fizzbuzz import a.b.c import d.e.f as g """ ) scope_of_module = scopes[m] import_0 = cst.ensure_type( cst.ensure_type(m.body[0], cst.SimpleStatementLine).body[0], cst.Import ) self.assertEqual(scopes[import_0], scope_of_module) import_aliases = import_0.names if not isinstance(import_aliases, cst.ImportStar): for alias in import_aliases: self.assertEqual(scopes[alias], scope_of_module) for idx, in_scopes in enumerate( [ ["foo", "foo.bar"], ["fizzbuzz"], ["a", "a.b", "a.b.c"], ["g"], ] ): for in_scope in in_scopes: self.assertEqual( len(scope_of_module[in_scope]), 1, f"{in_scope} should be in scope." ) assignment = cast(ImportAssignment, list(scope_of_module[in_scope])[0]) self.assertEqual( assignment.name, in_scope, f"ImportAssignment name {assignment.name} should equal to {in_scope}.", ) import_node = ensure_type(m.body[idx], cst.SimpleStatementLine).body[0] self.assertEqual( assignment.node, import_node, f"The node of ImportAssignment {assignment.node} should equal to {import_node}", ) self.assertTrue(isinstance(import_node, (cst.Import, cst.ImportFrom))) names = import_node.names self.assertFalse(isinstance(names, cst.ImportStar)) alias = names[0] as_name = alias.asname.name if alias.asname else alias.name self.assertEqual( assignment.as_name, as_name, f"The alias name of ImportAssignment {assignment.as_name} should equal to {as_name}", ) def test_dotted_import_access(self) -> None: m, scopes = get_scope_metadata_provider( """ import a.b.c, x.y a.b.c(x.z) """ ) scope_of_module = scopes[m] first_statement = ensure_type(m.body[1], cst.SimpleStatementLine) call = ensure_type( ensure_type(first_statement.body[0], cst.Expr).value, cst.Call ) self.assertTrue("a.b.c" in scope_of_module) self.assertTrue("a" in scope_of_module) self.assertEqual(scope_of_module.accesses["a"], set()) a_b_c_assignment = cast(ImportAssignment, list(scope_of_module["a.b.c"])[0]) a_b_c_access = list(a_b_c_assignment.references)[0] self.assertEqual(scope_of_module.accesses["a.b.c"], {a_b_c_access}) self.assertEqual(a_b_c_access.node, call.func) x_assignment = cast(Assignment, list(scope_of_module["x"])[0]) x_access = list(x_assignment.references)[0] self.assertEqual(scope_of_module.accesses["x"], {x_access}) self.assertEqual( x_access.node, ensure_type(call.args[0].value, cst.Attribute).value ) self.assertTrue("x.y" in scope_of_module) self.assertEqual(list(scope_of_module["x.y"])[0].references, set()) self.assertEqual(scope_of_module.accesses["x.y"], set()) def test_dotted_import_access_reference_by_node(self) -> None: m, scopes = get_scope_metadata_provider( """ import a.b.c a.b.c() """ ) scope_of_module = scopes[m] first_statement = ensure_type(m.body[1], cst.SimpleStatementLine) call = ensure_type( ensure_type(first_statement.body[0], cst.Expr).value, cst.Call ) a_b_c_assignment = cast(ImportAssignment, list(scope_of_module["a.b.c"])[0]) a_b_c_access = list(a_b_c_assignment.references)[0] self.assertEqual(scope_of_module.accesses[call], {a_b_c_access}) self.assertEqual(a_b_c_access.node, call.func) def test_decorator_access_reference_by_node(self) -> None: m, scopes = get_scope_metadata_provider( """ import decorator @decorator def f(): pass """ ) scope_of_module = scopes[m] function_def = ensure_type(m.body[1], cst.FunctionDef) decorator = function_def.decorators[0] self.assertTrue("decorator" in scope_of_module) decorator_assignment = cast( ImportAssignment, list(scope_of_module["decorator"])[0] ) decorator_access = list(decorator_assignment.references)[0] self.assertEqual(scope_of_module.accesses[decorator], {decorator_access}) def test_dotted_import_with_call_access(self) -> None: m, scopes = get_scope_metadata_provider( """ import os.path os.path.join("A", "B").lower() """ ) scope_of_module = scopes[m] first_statement = ensure_type(m.body[1], cst.SimpleStatementLine) attr = ensure_type( ensure_type( ensure_type( ensure_type( ensure_type(first_statement.body[0], cst.Expr).value, cst.Call ).func, cst.Attribute, ).value, cst.Call, ).func, cst.Attribute, ).value self.assertTrue("os.path" in scope_of_module) self.assertTrue("os" in scope_of_module) os_path_join_assignment = cast( ImportAssignment, list(scope_of_module["os.path"])[0] ) os_path_join_assignment_references = list(os_path_join_assignment.references) self.assertNotEqual(len(os_path_join_assignment_references), 0) os_path_join_access = os_path_join_assignment_references[0] self.assertEqual(scope_of_module.accesses["os"], set()) self.assertEqual(scope_of_module.accesses["os.path"], {os_path_join_access}) self.assertEqual(scope_of_module.accesses["os.path.join"], set()) self.assertEqual(os_path_join_access.node, attr) def test_import_from(self) -> None: m, scopes = get_scope_metadata_provider( """ from foo.bar import a, b as b_renamed from . import c from .foo import d """ ) scope_of_module = scopes[m] import_from = cst.ensure_type( cst.ensure_type(m.body[0], cst.SimpleStatementLine).body[0], cst.ImportFrom ) self.assertEqual(scopes[import_from], scope_of_module) import_aliases = import_from.names if not isinstance(import_aliases, cst.ImportStar): for alias in import_aliases: self.assertEqual(scopes[alias], scope_of_module) for idx, in_scope, imported_object_idx in [ (0, "a", 0), (0, "b_renamed", 1), (1, "c", 0), (2, "d", 0), ]: self.assertEqual( len(scope_of_module[in_scope]), 1, f"{in_scope} should be in scope." ) import_assignment = cast( ImportAssignment, list(scope_of_module[in_scope])[0] ) self.assertEqual( import_assignment.name, in_scope, f"The name of ImportAssignment {import_assignment.name} should equal to {in_scope}.", ) import_node = ensure_type(m.body[idx], cst.SimpleStatementLine).body[0] self.assertEqual( import_assignment.node, import_node, f"The node of ImportAssignment {import_assignment.node} should equal to {import_node}", ) self.assertTrue(isinstance(import_node, (cst.Import, cst.ImportFrom))) names = import_node.names self.assertFalse(isinstance(names, cst.ImportStar)) alias = names[imported_object_idx] as_name = alias.asname.name if alias.asname else alias.name self.assertEqual( import_assignment.as_name, as_name, f"The alias name of ImportAssignment {import_assignment.as_name} should equal to {as_name}", ) for not_in_scope in ["foo", "bar", "foo.bar", "b"]: self.assertEqual( len(scope_of_module[not_in_scope]), 0, f"{not_in_scope} should not be in scope.", ) def test_function_scope(self) -> None: m, scopes = get_scope_metadata_provider( """ global_var = None def foo(arg, **kwargs): local_var = 5 """ ) scope_of_module = scopes[m] func_def = ensure_type(m.body[1], cst.FunctionDef) self.assertEqual(scopes[func_def], scopes[func_def.name]) func_body_statement = func_def.body.body[0] scope_of_func = scopes[func_body_statement] self.assertIsInstance(scope_of_func, FunctionScope) self.assertTrue("global_var" in scope_of_module) self.assertTrue("global_var" in scope_of_func) self.assertTrue("arg" not in scope_of_module) self.assertTrue("arg" in scope_of_func) self.assertTrue("kwargs" not in scope_of_module) self.assertTrue("kwargs" in scope_of_func) self.assertTrue("local_var" not in scope_of_module) self.assertTrue("local_var" in scope_of_func) def test_class_scope(self) -> None: m, scopes = get_scope_metadata_provider( """ global_var = None @cls_attr class Cls(cls_attr, kwarg=cls_attr): cls_attr = 5 def f(): pass """ ) scope_of_module = scopes[m] self.assertIsInstance(scope_of_module, GlobalScope) cls_assignments = list(scope_of_module["Cls"]) self.assertEqual(len(cls_assignments), 1) cls_assignment = cast(Assignment, cls_assignments[0]) cls_def = ensure_type(m.body[1], cst.ClassDef) self.assertEqual(cls_assignment.node, cls_def) self.assertEqual(scopes[cls_def], scopes[cls_def.name]) cls_body = cls_def.body cls_body_statement = cls_body.body[0] scope_of_class = scopes[cls_body_statement] self.assertIsInstance(scope_of_class, ClassScope) func_body = ensure_type(cls_body.body[1], cst.FunctionDef).body func_body_statement = func_body.body[0] scope_of_func = scopes[func_body_statement] self.assertIsInstance(scope_of_func, FunctionScope) self.assertTrue("global_var" in scope_of_module) self.assertTrue("global_var" in scope_of_class) self.assertTrue("global_var" in scope_of_func) self.assertTrue("Cls" in scope_of_module) self.assertTrue("Cls" in scope_of_class) self.assertTrue("Cls" in scope_of_func) self.assertTrue("cls_attr" not in scope_of_module) self.assertTrue("cls_attr" in scope_of_class) self.assertTrue("cls_attr" not in scope_of_func) def test_comprehension_scope(self) -> None: m, scopes = get_scope_metadata_provider( """ iterator = None condition = None [elt for target in iterator if condition] {elt for target in iterator if condition} {elt: target for target in iterator if condition} (elt for target in iterator if condition) """ ) scope_of_module = scopes[m] self.assertIsInstance(scope_of_module, GlobalScope) list_comp = ensure_type( ensure_type( ensure_type(m.body[2], cst.SimpleStatementLine).body[0], cst.Expr ).value, cst.ListComp, ) scope_of_list_comp = scopes[list_comp.elt] self.assertIsInstance(scope_of_list_comp, ComprehensionScope) set_comp = ensure_type( ensure_type( ensure_type(m.body[3], cst.SimpleStatementLine).body[0], cst.Expr ).value, cst.SetComp, ) scope_of_set_comp = scopes[set_comp.elt] self.assertIsInstance(scope_of_set_comp, ComprehensionScope) dict_comp = ensure_type( ensure_type( ensure_type(m.body[4], cst.SimpleStatementLine).body[0], cst.Expr ).value, cst.DictComp, ) scope_of_dict_comp = scopes[dict_comp.key] self.assertIsInstance(scope_of_dict_comp, ComprehensionScope) generator_expr = ensure_type( ensure_type( ensure_type(m.body[5], cst.SimpleStatementLine).body[0], cst.Expr ).value, cst.GeneratorExp, ) scope_of_generator_expr = scopes[generator_expr.elt] self.assertIsInstance(scope_of_generator_expr, ComprehensionScope) self.assertTrue("iterator" in scope_of_module) self.assertTrue("iterator" in scope_of_list_comp) self.assertTrue("iterator" in scope_of_set_comp) self.assertTrue("iterator" in scope_of_dict_comp) self.assertTrue("iterator" in scope_of_generator_expr) self.assertTrue("condition" in scope_of_module) self.assertTrue("condition" in scope_of_list_comp) self.assertTrue("condition" in scope_of_set_comp) self.assertTrue("condition" in scope_of_dict_comp) self.assertTrue("condition" in scope_of_generator_expr) self.assertTrue("elt" not in scope_of_module) self.assertTrue("elt" not in scope_of_list_comp) self.assertTrue("elt" not in scope_of_set_comp) self.assertTrue("elt" not in scope_of_dict_comp) self.assertTrue("elt" not in scope_of_generator_expr) self.assertTrue("target" not in scope_of_module) self.assertTrue("target" in scope_of_list_comp) self.assertTrue("target" in scope_of_set_comp) self.assertTrue("target" in scope_of_dict_comp) self.assertTrue("target" in scope_of_generator_expr) def test_nested_comprehension_scope(self) -> None: m, scopes = get_scope_metadata_provider( """ [y for x in iterator for y in x] """ ) scope_of_module = scopes[m] self.assertIsInstance(scope_of_module, GlobalScope) list_comp = ensure_type( ensure_type( ensure_type(m.body[0], cst.SimpleStatementLine).body[0], cst.Expr ).value, cst.ListComp, ) scope_of_list_comp = scopes[list_comp.elt] self.assertIsInstance(scope_of_list_comp, ComprehensionScope) self.assertIs(scopes[list_comp], scope_of_module) self.assertIs(scopes[list_comp.elt], scope_of_list_comp) self.assertIs(scopes[list_comp.for_in], scope_of_module) self.assertIs(scopes[list_comp.for_in.iter], scope_of_module) self.assertIs(scopes[list_comp.for_in.target], scope_of_list_comp) inner_for_in = ensure_type(list_comp.for_in.inner_for_in, cst.CompFor) self.assertIs(scopes[inner_for_in], scope_of_list_comp) self.assertIs(scopes[inner_for_in.iter], scope_of_list_comp) self.assertIs(scopes[inner_for_in.target], scope_of_list_comp) def test_global_scope_overwrites(self) -> None: codes = ( """ class Cls: def f(): global var var = ... """, """ class Cls: def f(): global var import f as var """, ) for code in codes: m, scopes = get_scope_metadata_provider(code) scope_of_module = scopes[m] self.assertIsInstance(scope_of_module, GlobalScope) self.assertTrue("var" in scope_of_module) cls = ensure_type(m.body[0], cst.ClassDef) scope_of_cls = scopes[cls.body.body[0]] self.assertIsInstance(scope_of_cls, ClassScope) self.assertTrue("var" in scope_of_cls) f = ensure_type(cls.body.body[0], cst.FunctionDef) scope_of_f = scopes[f.body.body[0]] self.assertIsInstance(scope_of_f, FunctionScope) self.assertTrue("var" in scope_of_f) self.assertEqual(scope_of_f["var"], scope_of_module["var"]) def test_nonlocal_scope_overwrites(self) -> None: codes = ( """ def outer_f(): var = ... class Cls: var = ... def inner_f(): nonlocal var var = ... """, """ def outer_f(): import f as var class Cls: var = ... def inner_f(): nonlocal var var = ... """, """ def outer_f(): var = ... class Cls: var = ... def inner_f(): nonlocal var import f as var """, ) for code in codes: m, scopes = get_scope_metadata_provider(code) scope_of_module = scopes[m] self.assertIsInstance(scope_of_module, GlobalScope) self.assertTrue("var" not in scope_of_module) outer_f = ensure_type(m.body[0], cst.FunctionDef) outer_f_body_var = ensure_type( ensure_type(outer_f.body.body[0], cst.SimpleStatementLine).body[0], cst.CSTNode, ) scope_of_outer_f = scopes[outer_f_body_var] self.assertIsInstance(scope_of_outer_f, FunctionScope) self.assertTrue("var" in scope_of_outer_f) self.assertEqual(len(scope_of_outer_f["var"]), 2) cls = ensure_type(outer_f.body.body[1], cst.ClassDef) scope_of_cls = scopes[cls.body.body[0]] self.assertIsInstance(scope_of_cls, ClassScope) self.assertTrue("var" in scope_of_cls) inner_f = ensure_type(cls.body.body[1], cst.FunctionDef) inner_f_body_var = ensure_type( ensure_type(inner_f.body.body[1], cst.SimpleStatementLine).body[0], cst.CSTNode, ) scope_of_inner_f = scopes[inner_f_body_var] self.assertIsInstance(scope_of_inner_f, FunctionScope) self.assertTrue("var" in scope_of_inner_f) self.assertEqual(len(scope_of_inner_f["var"]), 2) self.assertEqual( { cast(Assignment, assignment).node for assignment in scope_of_outer_f["var"] }, { outer_f_body_var.targets[0].target if isinstance(outer_f_body_var, cst.Assign) else outer_f_body_var, inner_f_body_var.targets[0].target if isinstance(inner_f_body_var, cst.Assign) else inner_f_body_var, }, ) def test_local_scope_shadowing_with_functions(self) -> None: m, scopes = get_scope_metadata_provider( """ def f(): def f(): f = ... """ ) scope_of_module = scopes[m] self.assertIsInstance(scope_of_module, GlobalScope) self.assertTrue("f" in scope_of_module) outer_f = ensure_type(m.body[0], cst.FunctionDef) scope_of_outer_f = scopes[outer_f.body.body[0]] self.assertIsInstance(scope_of_outer_f, FunctionScope) self.assertTrue("f" in scope_of_outer_f) out_f_assignment = list(scope_of_module["f"])[0] self.assertEqual(cast(Assignment, out_f_assignment).node, outer_f) inner_f = ensure_type(outer_f.body.body[0], cst.FunctionDef) scope_of_inner_f = scopes[inner_f.body.body[0]] self.assertIsInstance(scope_of_inner_f, FunctionScope) self.assertTrue("f" in scope_of_inner_f) inner_f_assignment = list(scope_of_outer_f["f"])[0] self.assertEqual(cast(Assignment, inner_f_assignment).node, inner_f) def test_func_param_scope(self) -> None: m, scopes = get_scope_metadata_provider( """ @decorator def f(x: T=1, *vararg, y: T=2, z, **kwarg) -> RET: pass """ ) scope_of_module = scopes[m] self.assertIsInstance(scope_of_module, GlobalScope) self.assertTrue("f" in scope_of_module) f = ensure_type(m.body[0], cst.FunctionDef) scope_of_f = scopes[f.body.body[0]] self.assertIsInstance(scope_of_f, FunctionScope) decorator = f.decorators[0] x = f.params.params[0] xT = ensure_type(x.annotation, cst.Annotation) one = ensure_type(x.default, cst.BaseExpression) vararg = ensure_type(f.params.star_arg, cst.Param) y = f.params.kwonly_params[0] yT = ensure_type(y.annotation, cst.Annotation) two = ensure_type(y.default, cst.BaseExpression) z = f.params.kwonly_params[1] kwarg = ensure_type(f.params.star_kwarg, cst.Param) ret = ensure_type(f.returns, cst.Annotation).annotation self.assertEqual(scopes[decorator], scope_of_module) self.assertEqual(scopes[x], scope_of_f) self.assertEqual(scopes[xT], scope_of_module) self.assertEqual(scopes[one], scope_of_module) self.assertEqual(scopes[vararg], scope_of_f) self.assertEqual(scopes[y], scope_of_f) self.assertEqual(scopes[yT], scope_of_module) self.assertEqual(scopes[z], scope_of_f) self.assertEqual(scopes[two], scope_of_module) self.assertEqual(scopes[kwarg], scope_of_f) self.assertEqual(scopes[ret], scope_of_module) self.assertTrue("x" not in scope_of_module) self.assertTrue("x" in scope_of_f) self.assertTrue("vararg" not in scope_of_module) self.assertTrue("vararg" in scope_of_f) self.assertTrue("y" not in scope_of_module) self.assertTrue("y" in scope_of_f) self.assertTrue("z" not in scope_of_module) self.assertTrue("z" in scope_of_f) self.assertTrue("kwarg" not in scope_of_module) self.assertTrue("kwarg" in scope_of_f) self.assertEqual(cast(Assignment, list(scope_of_f["x"])[0]).node, x) self.assertEqual(cast(Assignment, list(scope_of_f["vararg"])[0]).node, vararg) self.assertEqual(cast(Assignment, list(scope_of_f["y"])[0]).node, y) self.assertEqual(cast(Assignment, list(scope_of_f["z"])[0]).node, z) self.assertEqual(cast(Assignment, list(scope_of_f["kwarg"])[0]).node, kwarg) def test_lambda_param_scope(self) -> None: m, scopes = get_scope_metadata_provider( """ lambda x=1, *vararg, y=2, z, **kwarg:x """ ) scope_of_module = scopes[m] self.assertIsInstance(scope_of_module, GlobalScope) f = ensure_type( ensure_type( ensure_type(m.body[0], cst.SimpleStatementLine).body[0], cst.Expr ).value, cst.Lambda, ) scope_of_f = scopes[f.body] self.assertIsInstance(scope_of_f, FunctionScope) x = f.params.params[0] one = ensure_type(x.default, cst.BaseExpression) vararg = ensure_type(f.params.star_arg, cst.Param) y = f.params.kwonly_params[0] two = ensure_type(y.default, cst.BaseExpression) z = f.params.kwonly_params[1] kwarg = ensure_type(f.params.star_kwarg, cst.Param) self.assertEqual(scopes[x], scope_of_f) self.assertEqual(scopes[one], scope_of_module) self.assertEqual(scopes[vararg], scope_of_f) self.assertEqual(scopes[y], scope_of_f) self.assertEqual(scopes[z], scope_of_f) self.assertEqual(scopes[two], scope_of_module) self.assertEqual(scopes[kwarg], scope_of_f) self.assertTrue("x" not in scope_of_module) self.assertTrue("x" in scope_of_f) self.assertTrue("vararg" not in scope_of_module) self.assertTrue("vararg" in scope_of_f) self.assertTrue("y" not in scope_of_module) self.assertTrue("y" in scope_of_f) self.assertTrue("z" not in scope_of_module) self.assertTrue("z" in scope_of_f) self.assertTrue("kwarg" not in scope_of_module) self.assertTrue("kwarg" in scope_of_f) self.assertEqual(cast(Assignment, list(scope_of_f["x"])[0]).node, x) self.assertEqual(cast(Assignment, list(scope_of_f["vararg"])[0]).node, vararg) self.assertEqual(cast(Assignment, list(scope_of_f["y"])[0]).node, y) self.assertEqual(cast(Assignment, list(scope_of_f["z"])[0]).node, z) self.assertEqual(cast(Assignment, list(scope_of_f["kwarg"])[0]).node, kwarg) def test_except_handler(self) -> None: """ The ``except as`` is a special case. The asname is only available in the excep body block and it'll be removed when existing the block. See https://docs.python.org/3.4/reference/compound_stmts.html#except We don't create a new block for except body because we don't handle del in our Scope Analysis. """ m, scopes = get_scope_metadata_provider( """ try: ... except Exception as ex: ... """ ) scope_of_module = scopes[m] self.assertIsInstance(scope_of_module, GlobalScope) self.assertTrue("ex" in scope_of_module) self.assertEqual( cast(Assignment, list(scope_of_module["ex"])[0]).node, ensure_type( ensure_type(m.body[0], cst.Try).handlers[0].name, cst.AsName ).name, ) def test_with_asname(self) -> None: m, scopes = get_scope_metadata_provider( """ with open(file_name) as f: ... """ ) scope_of_module = scopes[m] self.assertIsInstance(scope_of_module, GlobalScope) self.assertTrue("f" in scope_of_module) self.assertEqual( cast(Assignment, list(scope_of_module["f"])[0]).node, ensure_type( ensure_type(m.body[0], cst.With).items[0].asname, cst.AsName ).name, ) def test_get_qualified_names_for(self) -> None: m, scopes = get_scope_metadata_provider( """ from a.b import c class Cls: def f(self) -> "c": c() d = {} d['key'] = 0 def g(): pass g() """ ) cls = ensure_type(m.body[1], cst.ClassDef) f = ensure_type(cls.body.body[0], cst.FunctionDef) scope_of_module = scopes[m] self.assertEqual( scope_of_module.get_qualified_names_for( ensure_type(f.returns, cst.Annotation).annotation ), set(), "Get qualified name given a SimpleString type annotation is not supported", ) c_call = ensure_type( ensure_type(f.body.body[0], cst.SimpleStatementLine).body[0], cst.Expr ).value scope_of_f = scopes[c_call] self.assertIsInstance(scope_of_f, FunctionScope) self.assertEqual( scope_of_f.get_qualified_names_for(c_call), {QualifiedName("a.b.c", QualifiedNameSource.IMPORT)}, ) self.assertEqual( scope_of_f.get_qualified_names_for(c_call), {QualifiedName("a.b.c", QualifiedNameSource.IMPORT)}, ) g_call = ensure_type( ensure_type(m.body[3], cst.SimpleStatementLine).body[0], cst.Expr ).value self.assertIsInstance(scope_of_module, GlobalScope) self.assertEqual( scope_of_module.get_qualified_names_for(g_call), {QualifiedName("g", QualifiedNameSource.LOCAL)}, ) d_name = ( ensure_type( ensure_type(f.body.body[1], cst.SimpleStatementLine).body[0], cst.Assign ) .targets[0] .target ) self.assertEqual( scope_of_f.get_qualified_names_for(d_name), {QualifiedName("Cls.f..d", QualifiedNameSource.LOCAL)}, ) d_subscript = ( ensure_type( ensure_type(f.body.body[2], cst.SimpleStatementLine).body[0], cst.Assign ) .targets[0] .target ) self.assertEqual( scope_of_f.get_qualified_names_for(d_subscript), {QualifiedName("Cls.f..d", QualifiedNameSource.LOCAL)}, ) for builtin in ["map", "int", "dict"]: self.assertEqual( scope_of_f.get_qualified_names_for(cst.Name(value=builtin)), {QualifiedName(f"builtins.{builtin}", QualifiedNameSource.BUILTIN)}, f"Test builtin: {builtin}.", ) self.assertEqual( scope_of_module.get_qualified_names_for(cst.Name(value="d")), set(), "Test variable d in global scope.", ) def test_get_qualified_names_for_nested_cases(self) -> None: m, scopes = get_scope_metadata_provider( """ class A: def f1(self): def f2(): pass f2() def f3(self): class B(): ... B() def f4(): def f5(): class C: pass C() f5() """ ) cls_a = ensure_type(m.body[0], cst.ClassDef) func_f1 = ensure_type(cls_a.body.body[0], cst.FunctionDef) scope_of_cls_a = scopes[func_f1] self.assertIsInstance(scope_of_cls_a, ClassScope) self.assertEqual( scope_of_cls_a.get_qualified_names_for(func_f1), {QualifiedName("A.f1", QualifiedNameSource.LOCAL)}, ) func_f2_call = ensure_type( ensure_type(func_f1.body.body[1], cst.SimpleStatementLine).body[0], cst.Expr ).value scope_of_f1 = scopes[func_f2_call] self.assertIsInstance(scope_of_f1, FunctionScope) self.assertEqual( scope_of_f1.get_qualified_names_for(func_f2_call), {QualifiedName("A.f1..f2", QualifiedNameSource.LOCAL)}, ) func_f3 = ensure_type(cls_a.body.body[1], cst.FunctionDef) call_b = ensure_type( ensure_type(func_f3.body.body[1], cst.SimpleStatementLine).body[0], cst.Expr ).value scope_of_f3 = scopes[call_b] self.assertIsInstance(scope_of_f3, FunctionScope) self.assertEqual( scope_of_f3.get_qualified_names_for(call_b), {QualifiedName("A.f3..B", QualifiedNameSource.LOCAL)}, ) func_f4 = ensure_type(m.body[1], cst.FunctionDef) func_f5 = ensure_type(func_f4.body.body[0], cst.FunctionDef) scope_of_f4 = scopes[func_f5] self.assertIsInstance(scope_of_f4, FunctionScope) self.assertEqual( scope_of_f4.get_qualified_names_for(func_f5), {QualifiedName("f4..f5", QualifiedNameSource.LOCAL)}, ) cls_c = func_f5.body.body[0] scope_of_f5 = scopes[cls_c] self.assertIsInstance(scope_of_f5, FunctionScope) self.assertEqual( scope_of_f5.get_qualified_names_for(cls_c), {QualifiedName("f4..f5..C", QualifiedNameSource.LOCAL)}, ) def test_get_qualified_names_for_the_same_prefix(self) -> None: m, scopes = get_scope_metadata_provider( """ from a import b, bc bc() """ ) call = ensure_type( ensure_type( ensure_type(m.body[1], cst.SimpleStatementLine).body[0], cst.Expr ).value, cst.Call, ) module_scope = scopes[m] self.assertEqual( module_scope.get_qualified_names_for(call.func), {QualifiedName("a.bc", QualifiedNameSource.IMPORT)}, ) def test_get_qualified_names_for_dotted_imports(self) -> None: m, scopes = get_scope_metadata_provider( """ import a.b.c a(a.b.d) """ ) call = ensure_type( ensure_type( ensure_type(m.body[1], cst.SimpleStatementLine).body[0], cst.Expr ).value, cst.Call, ) module_scope = scopes[m] self.assertEqual( module_scope.get_qualified_names_for(call.func), {QualifiedName("a", QualifiedNameSource.IMPORT)}, ) self.assertEqual( module_scope.get_qualified_names_for(call.args[0].value), {QualifiedName("a.b.d", QualifiedNameSource.IMPORT)}, ) import_stmt = ensure_type( ensure_type(m.body[0], cst.SimpleStatementLine).body[0], cst.Import ) a_b_c = ensure_type(import_stmt.names[0].name, cst.Attribute) a_b = ensure_type(a_b_c.value, cst.Attribute) a = a_b.value self.assertEqual( module_scope.get_qualified_names_for(a_b_c), {QualifiedName("a.b.c", QualifiedNameSource.IMPORT)}, ) self.assertEqual( module_scope.get_qualified_names_for(a_b), {QualifiedName("a.b", QualifiedNameSource.IMPORT)}, ) self.assertEqual( module_scope.get_qualified_names_for(a), {QualifiedName("a", QualifiedNameSource.IMPORT)}, ) def test_multiple_assignments(self) -> None: m, scopes = get_scope_metadata_provider( """ if 1: from a import b as c elif 2: from d import e as c c() """ ) call = ensure_type( ensure_type(m.body[1], cst.SimpleStatementLine).body[0], cst.Expr ).value scope = scopes[call] self.assertIsInstance(scope, GlobalScope) self.assertEqual( scope.get_qualified_names_for(call), { QualifiedName(name="a.b", source=QualifiedNameSource.IMPORT), QualifiedName(name="d.e", source=QualifiedNameSource.IMPORT), }, ) self.assertEqual( scope.get_qualified_names_for("c"), { QualifiedName(name="a.b", source=QualifiedNameSource.IMPORT), QualifiedName(name="d.e", source=QualifiedNameSource.IMPORT), }, ) def test_assignments_and_accesses(self) -> None: m, scopes = get_scope_metadata_provider( """ a = 1 def f(): a = 2 a, b def g(): b = a a """ ) a_outer_assign = ( ensure_type( ensure_type(m.body[0], cst.SimpleStatementLine).body[0], cst.Assign ) .targets[0] .target ) a_outer_access = ensure_type( ensure_type(m.body[2], cst.SimpleStatementLine).body[0], cst.Expr ).value scope_of_module = scopes[a_outer_assign] a_outer_assignments = scope_of_module.assignments[a_outer_access] self.assertEqual(len(a_outer_assignments), 1) a_outer_assignment = list(a_outer_assignments)[0] self.assertEqual(cast(Assignment, a_outer_assignment).node, a_outer_assign) self.assertEqual( {i.node for i in a_outer_assignment.references}, {a_outer_access} ) a_outer_assesses = scope_of_module.accesses[a_outer_assign] self.assertEqual(len(a_outer_assesses), 1) self.assertEqual(list(a_outer_assesses)[0].node, a_outer_access) self.assertEqual( {cast(Assignment, i).node for i in list(a_outer_assesses)[0].referents}, {a_outer_assign}, ) self.assertTrue(a_outer_assign in scope_of_module.accesses) self.assertTrue(a_outer_assign in scope_of_module.assignments) self.assertTrue(a_outer_access in scope_of_module.accesses) self.assertTrue(a_outer_access in scope_of_module.assignments) f = ensure_type(m.body[1], cst.FunctionDef) a_inner_assign = ( ensure_type( ensure_type( ensure_type(f.body, cst.IndentedBlock).body[0], cst.SimpleStatementLine, ).body[0], cst.Assign, ) .targets[0] .target ) scope_of_f = scopes[a_inner_assign] a_inner_assignments = scope_of_f.assignments["a"] self.assertEqual(len(a_inner_assignments), 1) self.assertEqual( cast(Assignment, list(a_inner_assignments)[0]).node, a_inner_assign ) tup = ensure_type( ensure_type( ensure_type( ensure_type(f.body, cst.IndentedBlock).body[1], cst.SimpleStatementLine, ).body[0], cst.Expr, ).value, cst.Tuple, ) a_inner_access = tup.elements[0].value b_inner_access = tup.elements[1].value all_inner_accesses = [i for i in scope_of_f.accesses] self.assertEqual(len(all_inner_accesses), 2) self.assertEqual( {i.node for i in all_inner_accesses}, {a_inner_access, b_inner_access} ) g = ensure_type(ensure_type(f.body, cst.IndentedBlock).body[2], cst.FunctionDef) inner_most_assign = ensure_type( ensure_type(g.body.body[0], cst.SimpleStatementLine).body[0], cst.Assign ) b_inner_most_assign = inner_most_assign.targets[0].target a_inner_most_access = inner_most_assign.value scope_of_g = scopes[b_inner_most_assign] self.assertEqual({i.node for i in scope_of_g.accesses}, {a_inner_most_access}) self.assertEqual( {cast(Assignment, i).node for i in scope_of_g.assignments}, {b_inner_most_assign}, ) self.assertEqual(len(set(scopes.values())), 3) def test_annotation_access(self) -> None: m, scopes = get_scope_metadata_provider( """ from typing import Literal, NewType, Optional, TypeVar, Callable, cast from a import A, B, C, D, D2, E, E2, F, G, G2, H, I, J, K, K2, L, M def x(a: A): pass def y(b: "B"): pass def z(c: Literal["C"]): pass DType = TypeVar("D2", bound=D) EType = TypeVar("E2", bound="E") FType = TypeVar("F") GType = NewType("G2", "Optional[G]") HType = Optional["H"] IType = Callable[..., I] class Test(Generic[J]): pass castedK = cast("K", "K2") castedL = cast("L", M) """ ) imp = ensure_type( ensure_type(m.body[1], cst.SimpleStatementLine).body[0], cst.ImportFrom ) scope = scopes[imp] assignment = list(scope["A"])[0] self.assertIsInstance(assignment, Assignment) self.assertEqual(len(assignment.references), 1) references = list(assignment.references) self.assertTrue(references[0].is_annotation) assignment = list(scope["B"])[0] self.assertIsInstance(assignment, Assignment) self.assertEqual(len(assignment.references), 1) references = list(assignment.references) self.assertTrue(references[0].is_annotation) reference_node = references[0].node self.assertIsInstance(reference_node, cst.SimpleString) if isinstance(reference_node, cst.SimpleString): self.assertEqual(reference_node.evaluated_value, "B") assignment = list(scope["C"])[0] self.assertIsInstance(assignment, Assignment) self.assertEqual(len(assignment.references), 0) assignment = list(scope["D"])[0] self.assertIsInstance(assignment, Assignment) self.assertEqual(len(assignment.references), 1) references = list(assignment.references) self.assertFalse(references[0].is_annotation) self.assertTrue(references[0].is_type_hint) assignment = list(scope["D2"])[0] self.assertIsInstance(assignment, Assignment) self.assertEqual(len(assignment.references), 0) assignment = list(scope["E"])[0] self.assertIsInstance(assignment, Assignment) self.assertEqual(len(assignment.references), 1) references = list(assignment.references) self.assertFalse(references[0].is_annotation) self.assertTrue(references[0].is_type_hint) reference_node = references[0].node self.assertIsInstance(reference_node, cst.SimpleString) if isinstance(reference_node, cst.SimpleString): self.assertEqual(reference_node.evaluated_value, "E") assignment = list(scope["E2"])[0] self.assertIsInstance(assignment, Assignment) self.assertEqual(len(assignment.references), 0) assignment = list(scope["F"])[0] self.assertIsInstance(assignment, Assignment) self.assertEqual(len(assignment.references), 0) assignment = list(scope["G"])[0] self.assertIsInstance(assignment, Assignment) self.assertEqual(len(assignment.references), 1) references = list(assignment.references) self.assertFalse(references[0].is_annotation) self.assertTrue(references[0].is_type_hint) reference_node = references[0].node self.assertIsInstance(reference_node, cst.SimpleString) if isinstance(reference_node, cst.SimpleString): self.assertEqual(reference_node.evaluated_value, "Optional[G]") assignment = list(scope["G2"])[0] self.assertIsInstance(assignment, Assignment) self.assertEqual(len(assignment.references), 0) assignment = list(scope["H"])[0] self.assertIsInstance(assignment, Assignment) self.assertEqual(len(assignment.references), 1) references = list(assignment.references) self.assertFalse(references[0].is_annotation) self.assertTrue(references[0].is_type_hint) reference_node = references[0].node self.assertIsInstance(reference_node, cst.SimpleString) if isinstance(reference_node, cst.SimpleString): self.assertEqual(reference_node.evaluated_value, "H") assignment = list(scope["I"])[0] self.assertIsInstance(assignment, Assignment) self.assertEqual(len(assignment.references), 1) references = list(assignment.references) self.assertFalse(references[0].is_annotation) assignment = list(scope["J"])[0] self.assertIsInstance(assignment, Assignment) self.assertEqual(len(assignment.references), 1) references = list(assignment.references) self.assertFalse(references[0].is_annotation) assignment = list(scope["K"])[0] self.assertIsInstance(assignment, Assignment) self.assertEqual(len(assignment.references), 1) references = list(assignment.references) self.assertFalse(references[0].is_annotation) reference_node = references[0].node self.assertIsInstance(reference_node, cst.SimpleString) if isinstance(reference_node, cst.SimpleString): self.assertEqual(reference_node.evaluated_value, "K") assignment = list(scope["K2"])[0] self.assertIsInstance(assignment, Assignment) self.assertEqual(len(assignment.references), 0) assignment = list(scope["L"])[0] self.assertIsInstance(assignment, Assignment) self.assertEqual(len(assignment.references), 1) references = list(assignment.references) reference_node = references[0].node self.assertIsInstance(reference_node, cst.SimpleString) if isinstance(reference_node, cst.SimpleString): self.assertEqual(reference_node.evaluated_value, "L") assignment = list(scope["M"])[0] self.assertIsInstance(assignment, Assignment) self.assertEqual(len(assignment.references), 1) references = list(assignment.references) def test_insane_annotation_access(self) -> None: m, scopes = get_scope_metadata_provider( r""" from typing import TypeVar, Optional from a import G TypeVar("G2", bound="Optional[\"G\"]") """ ) imp = ensure_type( ensure_type(m.body[1], cst.SimpleStatementLine).body[0], cst.ImportFrom ) call = ensure_type( ensure_type( ensure_type(m.body[2], cst.SimpleStatementLine).body[0], cst.Expr ).value, cst.Call, ) bound = call.args[1].value scope = scopes[imp] assignment = next(iter(scope["G"])) self.assertIsInstance(assignment, Assignment) self.assertEqual(len(assignment.references), 1) self.assertEqual(list(assignment.references)[0].node, bound) def test_dotted_annotation_access(self) -> None: m, scopes = get_scope_metadata_provider( r""" from typing import TypeVar import a.G TypeVar("G2", bound="a.G") """ ) imp = ensure_type( ensure_type(m.body[1], cst.SimpleStatementLine).body[0], cst.Import ) call = ensure_type( ensure_type( ensure_type(m.body[2], cst.SimpleStatementLine).body[0], cst.Expr ).value, cst.Call, ) bound = call.args[1].value scope = scopes[imp] assignment = next(iter(scope["a.G"])) self.assertIsInstance(assignment, Assignment) self.assertEqual(len(assignment.references), 1) self.assertEqual(list(assignment.references)[0].node, bound) def test_node_of_scopes(self) -> None: m, scopes = get_scope_metadata_provider( """ def f1(): target() class C: attr = target() """ ) f1 = ensure_type(m.body[0], cst.FunctionDef) target_call = ensure_type( ensure_type(f1.body.body[0], cst.SimpleStatementLine).body[0], cst.Expr ).value f1_scope = scopes[target_call] self.assertIsInstance(f1_scope, FunctionScope) self.assertEqual(cast(FunctionScope, f1_scope).node, f1) c = ensure_type(m.body[1], cst.ClassDef) target_call_2 = ensure_type( ensure_type(c.body.body[0], cst.SimpleStatementLine).body[0], cst.Assign ).value c_scope = scopes[target_call_2] self.assertIsInstance(c_scope, ClassScope) self.assertEqual(cast(ClassScope, c_scope).node, c) def test_with_statement(self) -> None: m, scopes = get_scope_metadata_provider( """ import unittest.mock with unittest.mock.patch("something") as obj: obj.f1() unittest.mock """ ) import_ = ensure_type(m.body[0], cst.SimpleStatementLine).body[0] assignments = scopes[import_]["unittest"] self.assertEqual(len(assignments), 1) self.assertEqual(cast(Assignment, list(assignments)[0]).node, import_) with_ = ensure_type(m.body[1], cst.With) fn_call = with_.items[0].item self.assertEqual( scopes[fn_call].get_qualified_names_for(fn_call), { QualifiedName( name="unittest.mock.patch", source=QualifiedNameSource.IMPORT ) }, ) mock = ensure_type( ensure_type(m.body[2], cst.SimpleStatementLine).body[0], cst.Expr ).value self.assertEqual( scopes[fn_call].get_qualified_names_for(mock), {QualifiedName(name="unittest.mock", source=QualifiedNameSource.IMPORT)}, ) def test_del_context_names(self) -> None: m, scopes = get_scope_metadata_provider( """ import a dic = {} del dic del dic["key"] del a.b """ ) dic = ensure_type( ensure_type( ensure_type(m.body[1], cst.SimpleStatementLine).body[0], cst.Assign ).targets[0], cst.AssignTarget, ).target del_dic = ensure_type( ensure_type(m.body[2], cst.SimpleStatementLine).body[0], cst.Del ) scope = scopes[del_dic] assignments = list(scope["dic"]) self.assertEqual(len(assignments), 1) dic_assign = assignments[0] self.assertIsInstance(dic_assign, Assignment) self.assertEqual(cast(Assignment, dic_assign).node, dic) self.assertEqual(len(dic_assign.references), 2) del_dic_subscript = ensure_type( ensure_type( ensure_type(m.body[3], cst.SimpleStatementLine).body[0], cst.Del ).target, cst.Subscript, ) self.assertSetEqual( {i.node for i in dic_assign.references}, {del_dic.target, del_dic_subscript.value}, ) assignments = list(scope["a"]) self.assertEqual(len(assignments), 1) a_assign = assignments[0] self.assertIsInstance(a_assign, Assignment) import_a = ensure_type(m.body[0], cst.SimpleStatementLine).body[0] self.assertEqual(cast(Assignment, a_assign).node, import_a) self.assertEqual(len(a_assign.references), 1) del_a_b = ensure_type( ensure_type(m.body[4], cst.SimpleStatementLine).body[0], cst.Del ) self.assertEqual( {i.node for i in a_assign.references}, {ensure_type(del_a_b.target, cst.Attribute).value}, ) self.assertEqual(scope["b"], set()) def test_keyword_arg_in_call(self) -> None: m, scopes = get_scope_metadata_provider("call(arg=val)") call = ensure_type( ensure_type(m.body[0], cst.SimpleStatementLine).body[0], cst.Expr ).value scope = scopes[call] self.assertIsInstance(scope, GlobalScope) self.assertEqual(len(scope["arg"]), 0) # no assignment should exist def test_global_contains_is_read_only(self) -> None: gscope = GlobalScope() before_assignments = list(gscope.assignments) before_accesses = list(gscope.accesses) self.assertFalse("doesnt_exist" in gscope) self.assertEqual(list(gscope.accesses), before_accesses) self.assertEqual(list(gscope.assignments), before_assignments) def test_contains_is_read_only(self) -> None: for s in [LocalScope, FunctionScope, ClassScope, ComprehensionScope]: with self.subTest(scope=s): gscope = GlobalScope() scope = s(parent=gscope, node=cst.Name("lol")) before_assignments = list(scope.assignments) before_accesses = list(scope.accesses) before_overwrites = list(scope._scope_overwrites.items()) before_parent_assignments = list(scope.parent.assignments) before_parent_accesses = list(scope.parent.accesses) self.assertFalse("doesnt_exist" in scope) self.assertEqual(list(scope.accesses), before_accesses) self.assertEqual(list(scope.assignments), before_assignments) self.assertEqual( list(scope._scope_overwrites.items()), before_overwrites ) self.assertEqual( list(scope.parent.assignments), before_parent_assignments ) self.assertEqual(list(scope.parent.accesses), before_parent_accesses) def test_attribute_of_function_call(self) -> None: get_scope_metadata_provider("foo().bar") def test_attribute_of_subscript_called(self) -> None: m, scopes = get_scope_metadata_provider("foo[0].bar.baz()") scope = scopes[m] self.assertIn("foo", scope.accesses) def test_self(self) -> None: with open(__file__) as f: get_scope_metadata_provider(f.read()) def test_get_qualified_names_for_is_read_only(self) -> None: m, scopes = get_scope_metadata_provider( """ import a import b """ ) a = m.body[0] scope = scopes[a] assignments_before = list(scope.assignments) accesses_before = list(scope.accesses) scope.get_qualified_names_for("doesnt_exist") self.assertEqual(list(scope.assignments), assignments_before) self.assertEqual(list(scope.accesses), accesses_before) def test_gen_dotted_names(self) -> None: names = {name for name, node in _gen_dotted_names(cst.Name(value="a"))} self.assertEqual(names, {"a"}) names = { name for name, node in _gen_dotted_names( cst.Attribute(value=cst.Name(value="a"), attr=cst.Name(value="b")) ) } self.assertEqual(names, {"a.b", "a"}) names = { name for name, node in _gen_dotted_names( cst.Attribute( value=cst.Call( func=cst.Attribute( value=cst.Attribute( value=cst.Name(value="a"), attr=cst.Name(value="b") ), attr=cst.Name(value="c"), ), args=[], ), attr=cst.Name(value="d"), ) ) } self.assertEqual(names, {"a.b.c", "a.b", "a"}) def test_ordering(self) -> None: m, scopes = get_scope_metadata_provider( """ from a import b class X: x = b b = b y = b """ ) global_scope = scopes[m] import_stmt = ensure_type( ensure_type(m.body[0], cst.SimpleStatementLine).body[0], cst.ImportFrom ) first_assignment = list(global_scope.assignments)[0] assert isinstance(first_assignment, cst.metadata.Assignment) self.assertEqual(first_assignment.node, import_stmt) global_refs = first_assignment.references self.assertEqual(len(global_refs), 2) global_refs_nodes = {x.node for x in global_refs} class_def = ensure_type(m.body[1], cst.ClassDef) x = ensure_type( ensure_type(class_def.body.body[0], cst.SimpleStatementLine).body[0], cst.Assign, ) self.assertIn(x.value, global_refs_nodes) class_b = ensure_type( ensure_type(class_def.body.body[1], cst.SimpleStatementLine).body[0], cst.Assign, ) self.assertIn(class_b.value, global_refs_nodes) class_accesses = list(scopes[x].accesses) self.assertEqual(len(class_accesses), 3) self.assertIn( class_b.targets[0].target, [ ref.node for acc in class_accesses for ref in acc.referents if isinstance(ref, Assignment) ], ) y = ensure_type( ensure_type(class_def.body.body[2], cst.SimpleStatementLine).body[0], cst.Assign, ) self.assertIn(y.value, [access.node for access in class_accesses]) def test_ordering_between_scopes(self) -> None: m, scopes = get_scope_metadata_provider( """ def f(a): print(a) print(b) a = 1 b = 1 """ ) f = cst.ensure_type(m.body[0], cst.FunctionDef) a_param = f.params.params[0].name a_param_assignment = list(scopes[a_param]["a"])[0] a_param_refs = list(a_param_assignment.references) first_print = cst.ensure_type( cst.ensure_type( cst.ensure_type(f.body.body[0], cst.SimpleStatementLine).body[0], cst.Expr, ).value, cst.Call, ) second_print = cst.ensure_type( cst.ensure_type( cst.ensure_type(f.body.body[1], cst.SimpleStatementLine).body[0], cst.Expr, ).value, cst.Call, ) self.assertEqual( first_print.args[0].value, a_param_refs[0].node, ) a_global = ( cst.ensure_type( cst.ensure_type(m.body[1], cst.SimpleStatementLine).body[0], cst.Assign ) .targets[0] .target ) a_global_assignment = list(scopes[a_global]["a"])[0] a_global_refs = list(a_global_assignment.references) self.assertEqual(a_global_refs, []) b_global = ( cst.ensure_type( cst.ensure_type(m.body[2], cst.SimpleStatementLine).body[0], cst.Assign ) .targets[0] .target ) b_global_assignment = list(scopes[b_global]["b"])[0] b_global_refs = list(b_global_assignment.references) self.assertEqual(len(b_global_refs), 1) self.assertEqual(b_global_refs[0].node, second_print.args[0].value) def test_ordering_comprehension(self) -> None: m, scopes = get_scope_metadata_provider( """ def f(a): [a for a in [] for b in a] [b for a in [] for b in a] [a for a in [] for a in []] a = 1 """ ) f = cst.ensure_type(m.body[0], cst.FunctionDef) a_param = f.params.params[0].name a_param_assignment = list(scopes[a_param]["a"])[0] a_param_refs = list(a_param_assignment.references) self.assertEqual(a_param_refs, []) first_comp = cst.ensure_type( cst.ensure_type( cst.ensure_type(f.body.body[0], cst.SimpleStatementLine).body[0], cst.Expr, ).value, cst.ListComp, ) a_comp_assignment = list(scopes[first_comp.elt]["a"])[0] self.assertEqual(len(a_comp_assignment.references), 2) self.assertIn( first_comp.elt, [ref.node for ref in a_comp_assignment.references] ) second_comp = cst.ensure_type( cst.ensure_type( cst.ensure_type(f.body.body[1], cst.SimpleStatementLine).body[0], cst.Expr, ).value, cst.ListComp, ) b_comp_assignment = list(scopes[second_comp.elt]["b"])[0] self.assertEqual(len(b_comp_assignment.references), 1) a_second_comp_assignment = list(scopes[second_comp.elt]["a"])[0] self.assertEqual(len(a_second_comp_assignment.references), 1) third_comp = cst.ensure_type( cst.ensure_type( cst.ensure_type(f.body.body[2], cst.SimpleStatementLine).body[0], cst.Expr, ).value, cst.ListComp, ) a_third_comp_assignments = list(scopes[third_comp.elt]["a"]) self.assertEqual(len(a_third_comp_assignments), 2) a_third_comp_access = list(scopes[third_comp.elt].accesses)[0] self.assertEqual(a_third_comp_access.node, third_comp.elt) # We record both assignments because it's impossible to know which one # the access refers to without running the program self.assertEqual(len(a_third_comp_access.referents), 2) inner_for_in = third_comp.for_in.inner_for_in self.assertIsNotNone(inner_for_in) if inner_for_in: self.assertIn( inner_for_in.target, { ref.node for ref in a_third_comp_access.referents if isinstance(ref, Assignment) }, ) a_global = ( cst.ensure_type( cst.ensure_type(m.body[1], cst.SimpleStatementLine).body[0], cst.Assign ) .targets[0] .target ) a_global_assignment = list(scopes[a_global]["a"])[0] a_global_refs = list(a_global_assignment.references) self.assertEqual(a_global_refs, []) def test_ordering_comprehension_confusing(self) -> None: m, scopes = get_scope_metadata_provider( """ def f(a): [a for a in a] a = 1 """ ) f = cst.ensure_type(m.body[0], cst.FunctionDef) a_param = f.params.params[0].name a_param_assignment = list(scopes[a_param]["a"])[0] a_param_refs = list(a_param_assignment.references) self.assertEqual(len(a_param_refs), 1) comp = cst.ensure_type( cst.ensure_type( cst.ensure_type(f.body.body[0], cst.SimpleStatementLine).body[0], cst.Expr, ).value, cst.ListComp, ) a_comp_assignment = list(scopes[comp.elt]["a"])[0] self.assertEqual(list(a_param_refs)[0].node, comp.for_in.iter) self.assertEqual(len(a_comp_assignment.references), 1) self.assertEqual(list(a_comp_assignment.references)[0].node, comp.elt) def test_for_scope_ordering(self) -> None: m, scopes = get_scope_metadata_provider( """ def f(): for x in []: x class X: def f(): for x in []: x """ ) for scope in scopes.values(): for acc in scope.accesses: self.assertEqual( len(acc.referents), 1, msg=( "Access for node has incorrect number of referents: " + f"{acc.node}" ), ) def test_no_out_of_order_references_in_global_scope(self) -> None: m, scopes = get_scope_metadata_provider( """ x = y y = 1 """ ) for scope in scopes.values(): for acc in scope.accesses: self.assertEqual( len(acc.referents), 0, msg=( "Access for node has incorrect number of referents: " + f"{acc.node}" ), ) def test_walrus_accesses(self) -> None: if sys.version_info < (3, 8): self.skipTest("This python version doesn't support :=") m, scopes = get_scope_metadata_provider( """ if x := y: y = 1 x """ ) for scope in scopes.values(): for acc in scope.accesses: self.assertEqual( len(acc.referents), 1 if getattr(acc.node, "value", None) == "x" else 0, msg=( "Access for node has incorrect number of referents: " + f"{acc.node}" ), ) @data_provider( { "TypeVar": { "code": """ from typing import TypeVar TypeVar("Name", "int") """, "calls": [mock.call("int")], }, "Dict": { "code": """ from typing import Dict Dict["str", "int"] """, "calls": [mock.call("str"), mock.call("int")], }, "cast_no_annotation": { "code": """ from typing import Dict, cast cast(Dict[str, str], {})["3rr0r"] """, "calls": [], }, "cast_second_arg": { "code": """ from typing import cast cast(str, "foo") """, "calls": [], }, "cast_first_arg": { "code": """ from typing import cast cast("int", "foo") """, "calls": [ mock.call("int"), ], }, "typevar_func": { "code": """ from typing import TypeVar TypeVar("Name", func("int")) """, "calls": [], }, "literal": { "code": """ from typing import Literal Literal[\"G\"] """, "calls": [], }, "nested_str": { "code": r""" from typing import TypeVar, Optional from a import G TypeVar("G2", bound="Optional[\"G\"]") """, "calls": [mock.call('Optional["G"]'), mock.call("G")], }, "class_self_ref": { "code": """ from typing import TypeVar class HelperClass: value: TypeVar("THelperClass", bound="HelperClass") """, "calls": [mock.call("HelperClass")], }, } ) def test_parse_string_annotations( self, *, code: str, calls: Sequence[mock._Call] ) -> None: parse = cst.parse_module with mock.patch("libcst.parse_module") as parse_mock: parse_mock.side_effect = parse get_scope_metadata_provider(dedent(code)) calls = [mock.call(dedent(code))] + list(calls) self.assertEqual(parse_mock.call_count, len(calls)) parse_mock.assert_has_calls(calls) def test_builtin_scope(self) -> None: m, scopes = get_scope_metadata_provider( """ a = pow(1, 2) def foo(): b = pow(2, 3) """ ) scope_of_module = scopes[m] self.assertIsInstance(scope_of_module, GlobalScope) self.assertEqual(len(scope_of_module["pow"]), 1) builtin_pow_assignment = list(scope_of_module["pow"])[0] self.assertIsInstance(builtin_pow_assignment, BuiltinAssignment) self.assertIsInstance(builtin_pow_assignment.scope, BuiltinScope) global_a_assignments = scope_of_module["a"] self.assertEqual(len(global_a_assignments), 1) a_assignment = list(global_a_assignments)[0] self.assertIsInstance(a_assignment, Assignment) func_body = ensure_type(m.body[1], cst.FunctionDef).body func_statement = func_body.body[0] scope_of_func_statement = scopes[func_statement] self.assertIsInstance(scope_of_func_statement, FunctionScope) func_b_assignments = scope_of_func_statement["b"] self.assertEqual(len(func_b_assignments), 1) b_assignment = list(func_b_assignments)[0] self.assertIsInstance(b_assignment, Assignment) builtin_pow_accesses = list(builtin_pow_assignment.references) self.assertEqual(len(builtin_pow_accesses), 2) def test_override_builtin_scope(self) -> None: m, scopes = get_scope_metadata_provider( """ def pow(x, y): return x ** y a = pow(1, 2) def foo(): b = pow(2, 3) """ ) scope_of_module = scopes[m] self.assertIsInstance(scope_of_module, GlobalScope) self.assertEqual(len(scope_of_module["pow"]), 1) global_pow_assignment = list(scope_of_module["pow"])[0] self.assertIsInstance(global_pow_assignment, Assignment) self.assertIsInstance(global_pow_assignment.scope, GlobalScope) global_a_assignments = scope_of_module["a"] self.assertEqual(len(global_a_assignments), 1) a_assignment = list(global_a_assignments)[0] self.assertIsInstance(a_assignment, Assignment) func_body = ensure_type(m.body[2], cst.FunctionDef).body func_statement = func_body.body[0] scope_of_func_statement = scopes[func_statement] self.assertIsInstance(scope_of_func_statement, FunctionScope) func_b_assignments = scope_of_func_statement["b"] self.assertEqual(len(func_b_assignments), 1) b_assignment = list(func_b_assignments)[0] self.assertIsInstance(b_assignment, Assignment) global_pow_accesses = list(global_pow_assignment.references) self.assertEqual(len(global_pow_accesses), 2) def test_annotation_access_in_typevar_bound(self) -> None: m, scopes = get_scope_metadata_provider( """ from typing import TypeVar class Test: var: TypeVar("T", bound="Test") """ ) imp = ensure_type( ensure_type(m.body[0], cst.SimpleStatementLine).body[0], cst.ImportFrom ) scope = scopes[imp] assignment = list(scope["Test"])[0] self.assertIsInstance(assignment, Assignment) self.assertEqual(len(assignment.references), 1) references = list(assignment.references) self.assertTrue(references[0].is_annotation) def test_prefix_match(self) -> None: """Verify that a name doesn't overmatch on prefix""" m, scopes = get_scope_metadata_provider( """ def something(): ... """ ) scope = scopes[m] self.assertEqual( scope.get_qualified_names_for(cst.Name("something")), {QualifiedName(name="something", source=QualifiedNameSource.LOCAL)}, ) self.assertEqual( scope.get_qualified_names_for(cst.Name("something_else")), set(), ) def test_type_alias_scope(self) -> None: if not is_native(): self.skipTest("type aliases are only supported in the native parser") m, scopes = get_scope_metadata_provider( """ type A = C lol: A """ ) alias = ensure_type( ensure_type(m.body[0], cst.SimpleStatementLine).body[0], cst.TypeAlias ) self.assertIsInstance(scopes[alias], GlobalScope) a_assignments = list(scopes[alias]["A"]) self.assertEqual(len(a_assignments), 1) lol = ensure_type( ensure_type(m.body[1], cst.SimpleStatementLine).body[0], cst.AnnAssign ) self.assertEqual(len(a_references := list(a_assignments[0].references)), 1) self.assertEqual(a_references[0].node, lol.annotation.annotation) self.assertIsInstance(scopes[alias.value], AnnotationScope) def test_type_alias_param(self) -> None: if not is_native(): self.skipTest("type parameters are only supported in the native parser") m, scopes = get_scope_metadata_provider( """ B = int type A[T: B] = T lol: T """ ) alias = ensure_type( ensure_type(m.body[1], cst.SimpleStatementLine).body[0], cst.TypeAlias ) assert alias.type_parameters param_scope = scopes[alias.type_parameters] self.assertEqual(len(t_assignments := list(param_scope["T"])), 1) self.assertEqual(len(t_refs := list(t_assignments[0].references)), 1) self.assertEqual(t_refs[0].node, alias.value) b = ( ensure_type( ensure_type(m.body[0], cst.SimpleStatementLine).body[0], cst.Assign ) .targets[0] .target ) b_assignment = list(scopes[b]["B"])[0] self.assertEqual( {ref.node for ref in b_assignment.references}, {ensure_type(alias.type_parameters.params[0].param, cst.TypeVar).bound}, ) def test_type_alias_tuple_and_paramspec(self) -> None: if not is_native(): self.skipTest("type parameters are only supported in the native parser") m, scopes = get_scope_metadata_provider( """ type A[*T] = T lol: T type A[**T] = T lol: T """ ) alias_tuple = ensure_type( ensure_type(m.body[0], cst.SimpleStatementLine).body[0], cst.TypeAlias ) assert alias_tuple.type_parameters param_scope = scopes[alias_tuple.type_parameters] self.assertEqual(len(t_assignments := list(param_scope["T"])), 1) self.assertEqual(len(t_refs := list(t_assignments[0].references)), 1) self.assertEqual(t_refs[0].node, alias_tuple.value) alias_paramspec = ensure_type( ensure_type(m.body[2], cst.SimpleStatementLine).body[0], cst.TypeAlias ) assert alias_paramspec.type_parameters param_scope = scopes[alias_paramspec.type_parameters] self.assertEqual(len(t_assignments := list(param_scope["T"])), 1) self.assertEqual(len(t_refs := list(t_assignments[0].references)), 1) self.assertEqual(t_refs[0].node, alias_paramspec.value) def test_class_type_params(self) -> None: if not is_native(): self.skipTest("type parameters are only supported in the native parser") m, scopes = get_scope_metadata_provider( """ class W[T]: def f() -> T: pass def g[T]() -> T: pass """ ) cls = ensure_type(m.body[0], cst.ClassDef) cls_scope = scopes[cls.body.body[0]] self.assertEqual(len(t_assignments_in_cls := list(cls_scope["T"])), 1) assert cls.type_parameters self.assertEqual( ensure_type(t_assignments_in_cls[0], Assignment).node, cls.type_parameters.params[0].param, ) self.assertEqual( len(t_refs_in_cls := list(t_assignments_in_cls[0].references)), 1 ) f = ensure_type(cls.body.body[0], cst.FunctionDef) assert f.returns self.assertEqual(t_refs_in_cls[0].node, f.returns.annotation) g = ensure_type(cls.body.body[1], cst.FunctionDef) assert g.type_parameters assert g.returns self.assertEqual(len(t_assignments_in_g := list(scopes[g.body]["T"])), 1) self.assertEqual( ensure_type(t_assignments_in_g[0], Assignment).node, g.type_parameters.params[0].param, ) self.assertEqual(len(t_refs_in_g := list(t_assignments_in_g[0].references)), 1) self.assertEqual(t_refs_in_g[0].node, g.returns.annotation) def test_nested_class_type_params(self) -> None: if not is_native(): self.skipTest("type parameters are only supported in the native parser") m, scopes = get_scope_metadata_provider( """ class Outer: class Nested[T: Outer]: pass """ ) outer = ensure_type(m.body[0], cst.ClassDef) outer_refs = list(list(scopes[outer]["Outer"])[0].references) self.assertEqual(len(outer_refs), 1) inner = ensure_type(outer.body.body[0], cst.ClassDef) assert inner.type_parameters self.assertEqual( outer_refs[0].node, ensure_type(inner.type_parameters.params[0].param, cst.TypeVar).bound, ) def test_annotation_refers_to_nested_class(self) -> None: if not is_native(): self.skipTest("type parameters are only supported in the native parser") m, scopes = get_scope_metadata_provider( """ class Outer: class Nested: pass type Alias = Nested def meth1[T: Nested](self): pass def meth2[T](self, arg: Nested): pass """ ) outer = ensure_type(m.body[0], cst.ClassDef) nested = ensure_type(outer.body.body[0], cst.ClassDef) alias = ensure_type( ensure_type(outer.body.body[1], cst.SimpleStatementLine).body[0], cst.TypeAlias, ) self.assertIsInstance(scopes[alias.value], AnnotationScope) nested_refs_within_alias = list(scopes[alias.value].accesses["Nested"]) self.assertEqual(len(nested_refs_within_alias), 1) self.assertEqual( { ensure_type(ref, Assignment).node for ref in nested_refs_within_alias[0].referents }, {nested}, ) meth1 = ensure_type(outer.body.body[2], cst.FunctionDef) self.assertIsInstance(scopes[meth1], ClassScope) assert meth1.type_parameters meth1_typevar = ensure_type(meth1.type_parameters.params[0].param, cst.TypeVar) meth1_typevar_scope = scopes[meth1_typevar] self.assertIsInstance(meth1_typevar_scope, AnnotationScope) nested_refs_within_meth1 = list(meth1_typevar_scope.accesses["Nested"]) self.assertEqual(len(nested_refs_within_meth1), 1) self.assertEqual( { ensure_type(ref, Assignment).node for ref in nested_refs_within_meth1[0].referents }, {nested}, ) meth2 = ensure_type(outer.body.body[3], cst.FunctionDef) meth2_annotation = meth2.params.params[1].annotation assert meth2_annotation nested_refs_within_meth2 = list(scopes[meth2_annotation].accesses["Nested"]) self.assertEqual(len(nested_refs_within_meth2), 1) self.assertEqual( { ensure_type(ref, Assignment).node for ref in nested_refs_within_meth2[0].referents }, {nested}, ) def test_body_isnt_subject_to_special_annotation_rule(self) -> None: if not is_native(): self.skipTest("type parameters are only supported in the native parser") m, scopes = get_scope_metadata_provider( """ class Outer: class Inner: pass def f[T: Inner](self): Inner """ ) outer = ensure_type(m.body[0], cst.ClassDef) # note: this is different from global scope outer_scope = scopes[outer.body.body[0]] inner_assignment = list(outer_scope["Inner"])[0] self.assertEqual(len(inner_assignment.references), 1) f = ensure_type(outer.body.body[1], cst.FunctionDef) assert f.type_parameters T = ensure_type(f.type_parameters.params[0].param, cst.TypeVar) self.assertIs(list(inner_assignment.references)[0].node, T.bound) inner_in_func_body = ensure_type(f.body.body[0], cst.Expr) f_scope = scopes[inner_in_func_body] self.assertIn(inner_in_func_body.value, f_scope.accesses) self.assertEqual(list(f_scope.accesses)[0].referents, set()) LibCST-1.2.0/libcst/metadata/tests/test_span_provider.py000066400000000000000000000064311456464173300233010ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import libcst as cst from libcst.metadata.span_provider import ( byte_length_in_utf8, ByteSpanPositionProvider, CodeSpan, SpanProvidingCodegenState, ) from libcst.testing.utils import UnitTest class SpanProvidingCodegenStateTest(UnitTest): def test_initial_position(self) -> None: state = SpanProvidingCodegenState( " " * 4, "\n", get_length=byte_length_in_utf8, provider=ByteSpanPositionProvider(), ) self.assertEqual(state.position, 0) def test_add_token(self) -> None: state = SpanProvidingCodegenState( " " * 4, "\n", get_length=byte_length_in_utf8, provider=ByteSpanPositionProvider(), ) state.add_token("12") self.assertEqual(state.position, 2) def test_add_non_ascii_token(self) -> None: state = SpanProvidingCodegenState( " " * 4, "\n", get_length=byte_length_in_utf8, provider=ByteSpanPositionProvider(), ) state.add_token("🤡") self.assertEqual(state.position, 4) def test_add_indent_tokens(self) -> None: state = SpanProvidingCodegenState( " " * 4, "\n", get_length=byte_length_in_utf8, provider=ByteSpanPositionProvider(), ) state.increase_indent(state.default_indent) state.add_indent_tokens() self.assertEqual(state.position, 4) def test_span(self) -> None: node = cst.Pass() state = SpanProvidingCodegenState( " " * 4, "\n", get_length=byte_length_in_utf8, provider=ByteSpanPositionProvider(), ) state.before_codegen(node) state.add_token(" ") with state.record_syntactic_position(node): state.add_token("pass") state.add_token(" ") state.after_codegen(node) span = state.provider._computed[node] self.assertEqual(span.start, 1) self.assertEqual(span.length, 4) class ByteSpanPositionProviderTest(UnitTest): def test_visitor_provider(self) -> None: test = self class SomeVisitor(cst.CSTVisitor): METADATA_DEPENDENCIES = (ByteSpanPositionProvider,) def visit_Pass(self, node: cst.Pass) -> None: test.assertEqual( self.get_metadata(ByteSpanPositionProvider, node), CodeSpan(start=0, length=4), ) wrapper = cst.MetadataWrapper(cst.parse_module("pass")) wrapper.visit(SomeVisitor()) def test_batchable_provider(self) -> None: test = self class SomeVisitor(cst.BatchableCSTVisitor): METADATA_DEPENDENCIES = (ByteSpanPositionProvider,) def visit_Pass(self, node: cst.Pass) -> None: test.assertEqual( self.get_metadata(ByteSpanPositionProvider, node), CodeSpan(start=0, length=4), ) wrapper = cst.MetadataWrapper(cst.parse_module("pass")) wrapper.visit_batched([SomeVisitor()]) LibCST-1.2.0/libcst/metadata/tests/test_type_inference_provider.py000066400000000000000000000073561456464173300253460ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import json import os import subprocess import sys from pathlib import Path from typing import cast, Mapping, Optional from unittest import skipIf import libcst as cst from libcst import MetadataWrapper from libcst.metadata.type_inference_provider import PyreData, TypeInferenceProvider from libcst.testing.utils import data_provider, UnitTest from libcst.tests.test_pyre_integration import TEST_SUITE_PATH def _test_simple_class_helper(test: UnitTest, wrapper: MetadataWrapper) -> None: types = wrapper.resolve(TypeInferenceProvider) m = wrapper.module assign = cst.ensure_type( cst.ensure_type( cst.ensure_type( cst.ensure_type(m.body[1].body, cst.IndentedBlock).body[0], cst.FunctionDef, ).body.body[0], cst.SimpleStatementLine, ).body[0], cst.AnnAssign, ) self_number_attr = cst.ensure_type(assign.target, cst.Attribute) test.assertEqual(types[self_number_attr], "int") value = assign.value if value: test.assertEqual(types[value], "int") # self test.assertEqual(types[self_number_attr.value], "simple_class.Item") collector_assign = cst.ensure_type( cst.ensure_type(m.body[3], cst.SimpleStatementLine).body[0], cst.Assign ) collector = collector_assign.targets[0].target test.assertEqual(types[collector], "simple_class.ItemCollector") items_assign = cst.ensure_type( cst.ensure_type(m.body[4], cst.SimpleStatementLine).body[0], cst.AnnAssign ) items = items_assign.target test.assertEqual(types[items], "typing.Sequence[simple_class.Item]") @skipIf( sys.version_info < (3, 7), "TypeInferenceProvider doesn't support 3.6 and below" ) @skipIf(sys.platform == "win32", "TypeInferenceProvider doesn't support windows") class TypeInferenceProviderTest(UnitTest): maxDiff: Optional[int] = None @classmethod def setUpClass(cls) -> None: os.chdir(TEST_SUITE_PATH) try: subprocess.run(["pyre", "-n", "start", "--no-watchman"]) except subprocess.TimeoutExpired as exc: raise exc @classmethod def tearDownClass(cls) -> None: try: subprocess.run(["pyre", "-n", "stop"], cwd=TEST_SUITE_PATH) except subprocess.TimeoutExpired as exc: raise exc @data_provider( ((TEST_SUITE_PATH / "simple_class.py", TEST_SUITE_PATH / "simple_class.json"),) ) def test_gen_cache(self, source_path: Path, data_path: Path) -> None: cache = TypeInferenceProvider.gen_cache( root_path=source_path.parent, paths=[source_path.name], timeout=None ) result = cast(Mapping[str, object], cache[source_path.name]) data: PyreData = json.loads(data_path.read_text()) self.assertDictEqual( data, result, "Pyre query result mismatch, try running `scripts/regenerate-fixtures.py`?", ) @data_provider( ((TEST_SUITE_PATH / "simple_class.py", TEST_SUITE_PATH / "simple_class.json"),) ) def test_simple_class_types(self, source_path: Path, data_path: Path) -> None: data: PyreData = json.loads(data_path.read_text()) wrapper = MetadataWrapper( cst.parse_module(source_path.read_text()), cache={TypeInferenceProvider: data}, ) _test_simple_class_helper(self, wrapper) def test_with_empty_cache(self) -> None: tip = TypeInferenceProvider({}) self.assertEqual(tip.lookup, {}) tip = TypeInferenceProvider(PyreData()) self.assertEqual(tip.lookup, {}) LibCST-1.2.0/libcst/metadata/type_inference_provider.py000066400000000000000000000110171456464173300231320ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import json import subprocess from pathlib import Path from typing import Dict, List, Mapping, Optional, Sequence, Tuple from mypy_extensions import TypedDict import libcst as cst from libcst._position import CodePosition, CodeRange from libcst.metadata.base_provider import BatchableMetadataProvider from libcst.metadata.position_provider import PositionProvider class Position(TypedDict): line: int column: int class Location(TypedDict): path: str start: Position stop: Position class InferredType(TypedDict): location: Location annotation: str class PyreData(TypedDict, total=False): types: Sequence[InferredType] class TypeInferenceProvider(BatchableMetadataProvider[str]): """ Access inferred type annotation through `Pyre Query API `_. It requires `setup watchman `_ and start pyre server by running ``pyre`` command. The inferred type is a string of `type annotation `_. E.g. ``typing.List[libcst._nodes.expression.Name]`` is the inferred type of name ``n`` in expression ``n = [cst.Name("")]``. All name references use the fully qualified name regardless how the names are imported. (e.g. ``import libcst; libcst.Name`` and ``import libcst as cst; cst.Name`` refer to the same name.) Pyre infers the type of :class:`~libcst.Name`, :class:`~libcst.Attribute` and :class:`~libcst.Call` nodes. The inter process communication to Pyre server is managed by :class:`~libcst.metadata.FullRepoManager`. """ METADATA_DEPENDENCIES = (PositionProvider,) @staticmethod # pyre-fixme[40]: Static method `gen_cache` cannot override a non-static method # defined in `cst.metadata.base_provider.BaseMetadataProvider`. def gen_cache( root_path: Path, paths: List[str], timeout: Optional[int] ) -> Mapping[str, object]: params = ",".join(f"path='{root_path / path}'" for path in paths) cmd_args = ["pyre", "--noninteractive", "query", f"types({params})"] try: stdout, stderr, return_code = run_command(cmd_args, timeout=timeout) except subprocess.TimeoutExpired as exc: raise exc if return_code != 0: raise Exception(f"stderr:\n {stderr}\nstdout:\n {stdout}") try: resp = json.loads(stdout)["response"] except Exception as e: raise Exception(f"{e}\n\nstderr:\n {stderr}\nstdout:\n {stdout}") return {path: _process_pyre_data(data) for path, data in zip(paths, resp)} def __init__(self, cache: PyreData) -> None: super().__init__(cache) lookup: Dict[CodeRange, str] = {} cache_types = cache.get("types", []) for item in cache_types: location = item["location"] start = location["start"] end = location["stop"] lookup[ CodeRange( start=CodePosition(start["line"], start["column"]), end=CodePosition(end["line"], end["column"]), ) ] = item["annotation"] self.lookup: Dict[CodeRange, str] = lookup def _parse_metadata(self, node: cst.CSTNode) -> None: range = self.get_metadata(PositionProvider, node) if range in self.lookup: self.set_metadata(node, self.lookup.pop(range)) def visit_Name(self, node: cst.Name) -> Optional[bool]: self._parse_metadata(node) def visit_Attribute(self, node: cst.Attribute) -> Optional[bool]: self._parse_metadata(node) def visit_Call(self, node: cst.Call) -> Optional[bool]: self._parse_metadata(node) def run_command( cmd_args: List[str], timeout: Optional[int] = None ) -> Tuple[str, str, int]: process = subprocess.run(cmd_args, capture_output=True, timeout=timeout) return process.stdout.decode(), process.stderr.decode(), process.returncode class RawPyreData(TypedDict): path: str types: Sequence[InferredType] def _process_pyre_data(data: RawPyreData) -> PyreData: return {"types": sorted(data["types"], key=_sort_by_position)} def _sort_by_position(data: InferredType) -> Tuple[int, int, int, int]: start = data["location"]["start"] stop = data["location"]["stop"] return start["line"], start["column"], stop["line"], stop["column"] LibCST-1.2.0/libcst/metadata/wrapper.py000066400000000000000000000166271456464173300177150ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # import textwrap from contextlib import ExitStack from types import MappingProxyType from typing import ( Any, cast, Collection, Iterable, Mapping, MutableMapping, MutableSet, Optional, Type, TYPE_CHECKING, TypeVar, ) from libcst._batched_visitor import BatchableCSTVisitor, visit_batched, VisitorMethod from libcst._exceptions import MetadataException from libcst.metadata.base_provider import BatchableMetadataProvider if TYPE_CHECKING: from libcst._nodes.base import CSTNode # noqa: F401 from libcst._nodes.module import Module # noqa: F401 from libcst._visitors import CSTVisitorT # noqa: F401 from libcst.metadata.base_provider import ( # noqa: F401 BaseMetadataProvider, ProviderT, ) _T = TypeVar("_T") def _gen_batchable( wrapper: "MetadataWrapper", # pyre-fixme[2]: Parameter `providers` must have a type that does not contain `Any` providers: Iterable[BatchableMetadataProvider[Any]], ) -> Mapping["ProviderT", Mapping["CSTNode", object]]: """ Returns map of metadata mappings from resolving ``providers`` on ``wrapper``. """ wrapper.visit_batched(providers) # Make immutable metadata mapping # pyre-ignore[7] return {type(p): MappingProxyType(dict(p._computed)) for p in providers} def _gather_providers( providers: Collection["ProviderT"], gathered: MutableSet["ProviderT"] ) -> MutableSet["ProviderT"]: """ Recursively gathers all the given providers and their dependencies. """ for P in providers: if P not in gathered: gathered.add(P) _gather_providers(P.METADATA_DEPENDENCIES, gathered) return gathered def _resolve_impl( wrapper: "MetadataWrapper", providers: Collection["ProviderT"] ) -> None: """ Updates the _metadata map on wrapper with metadata from the given providers as well as their dependencies. """ completed = set(wrapper._metadata.keys()) remaining = _gather_providers(set(providers), set()) - completed while len(remaining) > 0: batchable = set() for P in remaining: if set(P.METADATA_DEPENDENCIES).issubset(completed): if issubclass(P, BatchableMetadataProvider): batchable.add(P) else: wrapper._metadata[P] = ( P(wrapper._cache.get(P))._gen(wrapper) if P.gen_cache else P()._gen(wrapper) ) completed.add(P) initialized_batchable = [ p(wrapper._cache.get(p)) if p.gen_cache else p() for p in batchable ] metadata_batch = _gen_batchable(wrapper, initialized_batchable) wrapper._metadata.update(metadata_batch) completed |= batchable if len(completed) == 0 and len(batchable) == 0: # remaining must be non-empty at this point names = ", ".join([P.__name__ for P in remaining]) raise MetadataException(f"Detected circular dependencies in {names}") remaining -= completed class MetadataWrapper: """ A wrapper around a :class:`~libcst.Module` that stores associated metadata for that module. When a :class:`MetadataWrapper` is constructed over a module, the wrapper will store a deep copy of the original module. This means ``MetadataWrapper(module).module == module`` is ``False``. This copying operation ensures that a node will never appear twice (by identity) in the same tree. This allows us to uniquely look up metadata for a node based on a node's identity. """ __slots__ = ["__module", "_metadata", "_cache"] __module: "Module" _metadata: MutableMapping["ProviderT", Mapping["CSTNode", object]] _cache: Mapping["ProviderT", object] def __init__( self, module: "Module", unsafe_skip_copy: bool = False, cache: Mapping["ProviderT", object] = {}, ) -> None: """ :param module: The module to wrap. This is deeply copied by default. :param unsafe_skip_copy: When true, this skips the deep cloning of the module. This can provide a small performance benefit, but you should only use this if you know that there are no duplicate nodes in your tree (e.g. this module came from the parser). :param cache: Pass the needed cache to wrapper to be used when resolving metadata. """ # Ensure that module is safe to use by copying the module to remove # any duplicate nodes. if not unsafe_skip_copy: module = module.deep_clone() self.__module = module self._metadata = {} self._cache = cache def __repr__(self) -> str: return f"MetadataWrapper(\n{textwrap.indent(repr(self.module), ' ' * 4)},\n)" @property def module(self) -> "Module": """ The module that's wrapped by this MetadataWrapper. By default, this is a deep copy of the passed in module. :: mw = ModuleWrapper(module) # Because `mw.module is not module`, you probably want to do visit and do # your analysis on `mw.module`, not `module`. mw.module.visit(DoSomeAnalysisVisitor) """ # use a property getter to enforce that this is a read-only variable return self.__module def resolve( self, provider: Type["BaseMetadataProvider[_T]"] ) -> Mapping["CSTNode", _T]: """ Returns a copy of the metadata mapping computed by ``provider``. """ if provider in self._metadata: metadata = self._metadata[provider] else: metadata = self.resolve_many([provider])[provider] return cast(Mapping["CSTNode", _T], metadata) def resolve_many( self, providers: Collection["ProviderT"] ) -> Mapping["ProviderT", Mapping["CSTNode", object]]: """ Returns a copy of the map of metadata mapping computed by each provider in ``providers``. The returned map does not contain any metadata from undeclared metadata dependencies that ``providers`` has. """ _resolve_impl(self, providers) # Only return what what declared in providers return {k: self._metadata[k] for k in providers} def visit(self, visitor: "CSTVisitorT") -> "Module": """ Convenience method to resolve metadata before performing a traversal over ``self.module`` with ``visitor``. See :func:`~libcst.Module.visit`. """ with visitor.resolve(self): return self.module.visit(visitor) def visit_batched( self, visitors: Iterable[BatchableCSTVisitor], before_visit: Optional[VisitorMethod] = None, after_leave: Optional[VisitorMethod] = None, ) -> "CSTNode": """ Convenience method to resolve metadata before performing a traversal over ``self.module`` with ``visitors``. See :func:`~libcst.visit_batched`. """ with ExitStack() as stack: # Resolve dependencies of visitors for v in visitors: stack.enter_context(v.resolve(self)) return visit_batched(self.module, visitors, before_visit, after_leave) LibCST-1.2.0/libcst/py.typed000066400000000000000000000000001456464173300155540ustar00rootroot00000000000000LibCST-1.2.0/libcst/testing/000077500000000000000000000000001456464173300155445ustar00rootroot00000000000000LibCST-1.2.0/libcst/testing/__init__.py000066400000000000000000000002631456464173300176560ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. LibCST-1.2.0/libcst/testing/utils.py000066400000000000000000000137321456464173300172640ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # pyre-unsafe import inspect import re from functools import wraps from typing import ( Any, Callable, Dict, Iterable, List, Mapping, Optional, Sequence, Tuple, TypeVar, Union, ) from unittest import TestCase DATA_PROVIDER_DATA_ATTR_NAME = "__data_provider_data" DATA_PROVIDER_DESCRIPTION_PREFIX = "_data_provider_" PROVIDER_TEST_LIMIT_ATTR_NAME = "__provider_test_limit" DEFAULT_TEST_LIMIT = 256 T = TypeVar("T") def none_throws(value: Optional[T], message: str = "Unexpected None value") -> T: assert value is not None, message return value def update_test_limit(test_method: Any, test_limit: int) -> None: # Store the maximum number of generated tests on the test_method. Since # contextmanager_provider can be specified multiple times, we need to # take the maximum of the existing attribute and the current value existing_test_limit = getattr( test_method, PROVIDER_TEST_LIMIT_ATTR_NAME, test_limit ) setattr( test_method, PROVIDER_TEST_LIMIT_ATTR_NAME, max(existing_test_limit, test_limit) ) def try_get_provider_attr( member_name: str, member: Any, attr_name: str ) -> Optional[Any]: if inspect.isfunction(member) and member_name.startswith("test"): return getattr(member, attr_name, None) return None def populate_data_provider_tests(dct: Dict[str, Any]) -> None: test_methods_to_add: Dict[str, Callable] = {} test_methods_to_remove: List[str] = [] for member_name, member in dct.items(): provider_data = try_get_provider_attr( member_name, member, DATA_PROVIDER_DATA_ATTR_NAME ) if provider_data is not None: for description, data in ( provider_data.items() if isinstance(provider_data, dict) else enumerate(provider_data) ): if isinstance(provider_data, dict): description = f"{DATA_PROVIDER_DESCRIPTION_PREFIX}{description}" assert re.fullmatch( r"[a-zA-Z0-9_]+", str(description) ), f"Testcase description must be a valid python identifier: '{description}'" @wraps(member) def new_test( self: object, data: Iterable[object] = data, member: Callable[..., object] = member, ) -> object: if isinstance(data, dict): return member(self, **data) else: return member(self, *data) name = f"{member_name}_{description}" new_test.__name__ = name test_methods_to_add[name] = new_test if not test_methods_to_add: raise ValueError( f"No data_provider tests were created for {member_name}! Please double check your data." ) test_methods_to_remove.append(member_name) dct.update(test_methods_to_add) # Remove all old methods for test_name in test_methods_to_remove: del dct[test_name] def validate_provider_tests(dct: Dict[str, Any]) -> None: members_to_replace = {} for member_name, member in dct.items(): test_limit = try_get_provider_attr( member_name, member, PROVIDER_TEST_LIMIT_ATTR_NAME ) if test_limit is not None: data = try_get_provider_attr( member_name, member, DATA_PROVIDER_DATA_ATTR_NAME ) num_tests = len(data) if data else 1 if num_tests > test_limit: # We don't use wraps() here so that the test isn't expanded # as it normally would be by whichever provider it uses def test_replacement( self: Any, member_name: Any = member_name, num_tests: Any = num_tests, test_limit: Any = test_limit, ) -> None: raise AssertionError( f"{member_name} generated {num_tests} tests but the limit is " + f"{test_limit}. You can increase the number of " + "allowed tests by specifying test_limit, but please " + "consider whether you really need to test all of " + "these combinations." ) setattr(test_replacement, "__name__", member_name) members_to_replace[member_name] = test_replacement for member_name, new_member in members_to_replace.items(): dct[member_name] = new_member TestCaseType = Union[Sequence[object], Mapping[str, object]] # Can't use Sequence[TestCaseType] here as some clients may pass in a Generator[TestCaseType] StaticDataType = Union[Iterable[TestCaseType], Mapping[str, TestCaseType]] def data_provider( static_data: StaticDataType, *, test_limit: int = DEFAULT_TEST_LIMIT ) -> Callable[[Callable], Callable]: # We need to be able to iterate over static_data more than once # (for validation), so if we weren't passed in a dict, list, or tuple # then we'll just create a list from the data if not isinstance(static_data, (dict, list, tuple)): static_data = list(static_data) def test_decorator(test_method: Callable) -> Callable: update_test_limit(test_method, test_limit) setattr(test_method, DATA_PROVIDER_DATA_ATTR_NAME, static_data) return test_method return test_decorator class BaseTestMeta(type): def __new__(mcs, name: str, bases: Tuple[type, ...], dct: Dict[str, Any]) -> object: validate_provider_tests(dct) populate_data_provider_tests(dct) return super().__new__(mcs, name, bases, dict(dct)) class UnitTest(TestCase, metaclass=BaseTestMeta): pass LibCST-1.2.0/libcst/tests/000077500000000000000000000000001456464173300152315ustar00rootroot00000000000000LibCST-1.2.0/libcst/tests/__init__.py000066400000000000000000000002631456464173300173430ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. LibCST-1.2.0/libcst/tests/__main__.py000066400000000000000000000006571456464173300173330ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from unittest import main from libcst._parser.entrypoints import is_native if __name__ == "__main__": parser_type = "native" if is_native() else "pure" print(f"running tests with {parser_type!r} parser") main(module=None, verbosity=2) LibCST-1.2.0/libcst/tests/pyre/000077500000000000000000000000001456464173300162105ustar00rootroot00000000000000LibCST-1.2.0/libcst/tests/pyre/.pyre_configuration000066400000000000000000000000771456464173300221230ustar00rootroot00000000000000{ "source_directories": [ "." ], "search_path": [] } LibCST-1.2.0/libcst/tests/pyre/simple_class.json000066400000000000000000000214701456464173300215650ustar00rootroot00000000000000{ "types": [ { "annotation": "typing.Type[typing.Sequence]", "location": { "start": { "column": 19, "line": 7 }, "stop": { "column": 27, "line": 7 } } }, { "annotation": "typing.Type[simple_class.Item]", "location": { "start": { "column": 6, "line": 10 }, "stop": { "column": 10, "line": 10 } } }, { "annotation": "typing.Callable(simple_class.Item.__init__)[[Named(self, simple_class.Item), Named(n, int)], None]", "location": { "start": { "column": 8, "line": 11 }, "stop": { "column": 16, "line": 11 } } }, { "annotation": "simple_class.Item", "location": { "start": { "column": 17, "line": 11 }, "stop": { "column": 21, "line": 11 } } }, { "annotation": "int", "location": { "start": { "column": 23, "line": 11 }, "stop": { "column": 24, "line": 11 } } }, { "annotation": "typing.Type[int]", "location": { "start": { "column": 26, "line": 11 }, "stop": { "column": 29, "line": 11 } } }, { "annotation": "typing.Type[None]", "location": { "start": { "column": 34, "line": 11 }, "stop": { "column": 38, "line": 11 } } }, { "annotation": "simple_class.Item", "location": { "start": { "column": 8, "line": 12 }, "stop": { "column": 12, "line": 12 } } }, { "annotation": "int", "location": { "start": { "column": 8, "line": 12 }, "stop": { "column": 19, "line": 12 } } }, { "annotation": "typing.Type[int]", "location": { "start": { "column": 21, "line": 12 }, "stop": { "column": 24, "line": 12 } } }, { "annotation": "int", "location": { "start": { "column": 27, "line": 12 }, "stop": { "column": 28, "line": 12 } } }, { "annotation": "typing.Type[simple_class.ItemCollector]", "location": { "start": { "column": 6, "line": 15 }, "stop": { "column": 19, "line": 15 } } }, { "annotation": "typing.Callable(simple_class.ItemCollector.get_items)[[Named(self, simple_class.ItemCollector), Named(n, int)], typing.Sequence[simple_class.Item]]", "location": { "start": { "column": 8, "line": 16 }, "stop": { "column": 17, "line": 16 } } }, { "annotation": "simple_class.ItemCollector", "location": { "start": { "column": 18, "line": 16 }, "stop": { "column": 22, "line": 16 } } }, { "annotation": "int", "location": { "start": { "column": 24, "line": 16 }, "stop": { "column": 25, "line": 16 } } }, { "annotation": "typing.Type[int]", "location": { "start": { "column": 27, "line": 16 }, "stop": { "column": 30, "line": 16 } } }, { "annotation": "typing.Type[typing.Sequence[simple_class.Item]]", "location": { "start": { "column": 35, "line": 16 }, "stop": { "column": 49, "line": 16 } } }, { "annotation": "typing.List[simple_class.Item]", "location": { "start": { "column": 15, "line": 17 }, "stop": { "column": 42, "line": 17 } } }, { "annotation": "typing.Type[simple_class.Item]", "location": { "start": { "column": 16, "line": 17 }, "stop": { "column": 20, "line": 17 } } }, { "annotation": "simple_class.Item", "location": { "start": { "column": 16, "line": 17 }, "stop": { "column": 23, "line": 17 } } }, { "annotation": "int", "location": { "start": { "column": 28, "line": 17 }, "stop": { "column": 29, "line": 17 } } }, { "annotation": "typing.Type[range]", "location": { "start": { "column": 33, "line": 17 }, "stop": { "column": 38, "line": 17 } } }, { "annotation": "range", "location": { "start": { "column": 33, "line": 17 }, "stop": { "column": 41, "line": 17 } } }, { "annotation": "int", "location": { "start": { "column": 39, "line": 17 }, "stop": { "column": 40, "line": 17 } } }, { "annotation": "simple_class.ItemCollector", "location": { "start": { "column": 0, "line": 20 }, "stop": { "column": 9, "line": 20 } } }, { "annotation": "typing.Type[simple_class.ItemCollector]", "location": { "start": { "column": 12, "line": 20 }, "stop": { "column": 25, "line": 20 } } }, { "annotation": "simple_class.ItemCollector", "location": { "start": { "column": 12, "line": 20 }, "stop": { "column": 27, "line": 20 } } }, { "annotation": "typing.Sequence[simple_class.Item]", "location": { "start": { "column": 0, "line": 21 }, "stop": { "column": 5, "line": 21 } } }, { "annotation": "typing.Type[typing.Sequence[simple_class.Item]]", "location": { "start": { "column": 7, "line": 21 }, "stop": { "column": 21, "line": 21 } } }, { "annotation": "simple_class.ItemCollector", "location": { "start": { "column": 24, "line": 21 }, "stop": { "column": 33, "line": 21 } } }, { "annotation": "BoundMethod[typing.Callable(simple_class.ItemCollector.get_items)[[Named(self, simple_class.ItemCollector), Named(n, int)], typing.Sequence[simple_class.Item]], simple_class.ItemCollector]", "location": { "start": { "column": 24, "line": 21 }, "stop": { "column": 43, "line": 21 } } }, { "annotation": "typing.Sequence[simple_class.Item]", "location": { "start": { "column": 24, "line": 21 }, "stop": { "column": 46, "line": 21 } } }, { "annotation": "typing_extensions.Literal[3]", "location": { "start": { "column": 44, "line": 21 }, "stop": { "column": 45, "line": 21 } } }, { "annotation": "simple_class.Item", "location": { "start": { "column": 4, "line": 22 }, "stop": { "column": 8, "line": 22 } } }, { "annotation": "simple_class.Item", "location": { "start": { "column": 12, "line": 22 }, "stop": { "column": 17, "line": 22 } } }, { "annotation": "simple_class.Item", "location": { "start": { "column": 4, "line": 23 }, "stop": { "column": 8, "line": 23 } } }, { "annotation": "int", "location": { "start": { "column": 4, "line": 23 }, "stop": { "column": 15, "line": 23 } } } ] }LibCST-1.2.0/libcst/tests/pyre/simple_class.py000066400000000000000000000010231456464173300212340ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # fmt: off from typing import Sequence class Item: def __init__(self, n: int) -> None: self.number: int = n class ItemCollector: def get_items(self, n: int) -> Sequence[Item]: return [Item(i) for i in range(n)] collector = ItemCollector() items: Sequence[Item] = collector.get_items(3) for item in items: item.number LibCST-1.2.0/libcst/tests/test_add_slots.py000066400000000000000000000020661456464173300206220ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import pickle from dataclasses import dataclass from typing import ClassVar from libcst._add_slots import add_slots from libcst.testing.utils import UnitTest # this test class needs to be defined at module level to test pickling. @add_slots @dataclass(frozen=True) class A: x: int y: str Z: ClassVar[int] = 5 class AddSlotsTest(UnitTest): def test_pickle(self) -> None: a = A(1, "foo") self.assertEqual(a, pickle.loads(pickle.dumps(a))) object.__delattr__(a, "y") self.assertEqual(a.x, pickle.loads(pickle.dumps(a)).x) def test_prevents_slots_overlap(self) -> None: class A: __slots__ = ("x",) class B(A): __slots__ = ("z",) @add_slots @dataclass class C(B): x: int y: str z: bool self.assertSequenceEqual(C.__slots__, ("y",)) LibCST-1.2.0/libcst/tests/test_batched_visitor.py000066400000000000000000000054161456464173300220210ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import cast from unittest.mock import Mock import libcst as cst from libcst import BatchableCSTVisitor, parse_module, visit_batched from libcst.testing.utils import UnitTest class BatchedVisitorTest(UnitTest): def test_simple(self) -> None: mock = Mock() class ABatchable(BatchableCSTVisitor): def visit_Del(self, node: cst.Del) -> None: object.__setattr__(node, "target", mock.visited_a()) class BBatchable(BatchableCSTVisitor): def visit_Del(self, node: cst.Del) -> None: object.__setattr__(node, "semicolon", mock.visited_b()) module = visit_batched(parse_module("del a"), [ABatchable(), BBatchable()]) del_ = cast(cst.SimpleStatementLine, module.body[0]).body[0] # Check that each visitor was only called once mock.visited_a.assert_called_once() mock.visited_b.assert_called_once() # Check properties were set self.assertEqual(object.__getattribute__(del_, "target"), mock.visited_a()) self.assertEqual(object.__getattribute__(del_, "semicolon"), mock.visited_b()) def test_all_visits(self) -> None: mock = Mock() class Batchable(BatchableCSTVisitor): def visit_If(self, node: cst.If) -> None: object.__setattr__(node, "test", mock.visit_If()) def visit_If_body(self, node: cst.If) -> None: object.__setattr__(node, "leading_lines", mock.visit_If_body()) def leave_If_body(self, node: cst.If) -> None: object.__setattr__(node, "orelse", mock.leave_If_body()) def leave_If(self, original_node: cst.If) -> None: object.__setattr__( original_node, "whitespace_before_test", mock.leave_If() ) module = visit_batched(parse_module("if True: pass"), [Batchable()]) if_ = cast(cst.SimpleStatementLine, module.body[0]) # Check that each visitor was only called once mock.visit_If.assert_called_once() mock.leave_If.assert_called_once() mock.visit_If_body.assert_called_once() mock.leave_If_body.assert_called_once() # Check properties were set self.assertEqual(object.__getattribute__(if_, "test"), mock.visit_If()) self.assertEqual( object.__getattribute__(if_, "leading_lines"), mock.visit_If_body() ) self.assertEqual(object.__getattribute__(if_, "orelse"), mock.leave_If_body()) self.assertEqual( object.__getattribute__(if_, "whitespace_before_test"), mock.leave_If() ) LibCST-1.2.0/libcst/tests/test_deep_clone.py000066400000000000000000000031561456464173300207440ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from textwrap import dedent from typing import Set import libcst as cst from libcst.testing.utils import data_provider, UnitTest class DeepCloneTest(UnitTest): @data_provider( ( # Simple program ( """ foo = 'toplevel' fn1(foo) fn2(foo) def fn_def(): foo = 'shadow' fn3(foo) """, ), ) ) def test_deep_clone(self, code: str) -> None: test_case = self class NodeGatherVisitor(cst.CSTVisitor): def __init__(self) -> None: self.nodes: Set[int] = set() def on_visit(self, node: cst.CSTNode) -> bool: self.nodes.add(id(node)) return True class NodeVerifyVisitor(cst.CSTVisitor): def __init__(self, nodes: Set[int]) -> None: self.nodes = nodes def on_visit(self, node: cst.CSTNode) -> bool: test_case.assertFalse( id(node) in self.nodes, f"Node {node} was not cloned properly!" ) return True module = cst.parse_module(dedent(code)) gatherer = NodeGatherVisitor() module.visit(gatherer) new_module = module.deep_clone() self.assertTrue(module.deep_equals(new_module)) new_module.visit(NodeVerifyVisitor(gatherer.nodes)) LibCST-1.2.0/libcst/tests/test_deep_replace.py000066400000000000000000000106671456464173300212640ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from textwrap import dedent from typing import Optional import libcst as cst from libcst.testing.utils import UnitTest class DeepReplaceTest(UnitTest): def test_deep_replace_simple(self) -> None: old_code = """ pass """ new_code = """ break """ module = cst.parse_module(dedent(old_code)) pass_stmt = cst.ensure_type(module.body[0], cst.SimpleStatementLine).body[0] new_module = cst.ensure_type( module.deep_replace(pass_stmt, cst.Break()), cst.Module ) self.assertEqual(new_module.code, dedent(new_code)) def test_deep_replace_complex(self) -> None: old_code = """ def a(): def b(): def c(): pass """ new_code = """ def a(): def b(): def d(): break """ module = cst.parse_module(dedent(old_code)) outer_fun = cst.ensure_type(module.body[0], cst.FunctionDef) middle_fun = cst.ensure_type( cst.ensure_type(outer_fun.body, cst.IndentedBlock).body[0], cst.FunctionDef ) inner_fun = cst.ensure_type( cst.ensure_type(middle_fun.body, cst.IndentedBlock).body[0], cst.FunctionDef ) new_module = cst.ensure_type( module.deep_replace( inner_fun, cst.FunctionDef( name=cst.Name("d"), params=cst.Parameters(), body=cst.SimpleStatementSuite(body=(cst.Break(),)), ), ), cst.Module, ) self.assertEqual(new_module.code, dedent(new_code)) def test_deep_replace_identity(self) -> None: old_code = """ pass """ new_code = """ break """ module = cst.parse_module(dedent(old_code)) new_module = module.deep_replace( module, cst.Module( header=(cst.EmptyLine(),), body=(cst.SimpleStatementLine(body=(cst.Break(),)),), ), ) self.assertEqual(new_module.code, dedent(new_code)) def test_deep_remove_complex(self) -> None: old_code = """ def a(): def b(): def c(): print("Hello, world!") """ new_code = """ def a(): def b(): pass """ module = cst.parse_module(dedent(old_code)) outer_fun = cst.ensure_type(module.body[0], cst.FunctionDef) middle_fun = cst.ensure_type( cst.ensure_type(outer_fun.body, cst.IndentedBlock).body[0], cst.FunctionDef ) inner_fun = cst.ensure_type( cst.ensure_type(middle_fun.body, cst.IndentedBlock).body[0], cst.FunctionDef ) new_module = cst.ensure_type(module.deep_remove(inner_fun), cst.Module) self.assertEqual(new_module.code, dedent(new_code)) def test_with_deep_changes_complex(self) -> None: old_code = """ def a(): def b(): def c(): print("Hello, world!") """ new_code = """ def a(): def b(): def c(): print("Goodbye, world!") """ class NodeFinder(cst.CSTVisitor): # I wrote this so I wouldn't have to do a nasty multi-level # tree walk, but it is also a nice example of how to implement # a simple node find in the absence of official support. def __init__(self) -> None: super().__init__() self.node: Optional[cst.CSTNode] = None def visit_SimpleString(self, node: cst.SimpleString) -> None: self.node = node module = cst.parse_module(dedent(old_code)) node_finder = NodeFinder() module.visit(node_finder) node = node_finder.node assert node is not None, "Expected to find a string node!" new_module = cst.ensure_type( module.with_deep_changes(node, value='"Goodbye, world!"'), cst.Module ) self.assertEqual(new_module.code, dedent(new_code)) LibCST-1.2.0/libcst/tests/test_e2e.py000066400000000000000000000051221456464173300173150ustar00rootroot00000000000000import contextlib import os from pathlib import Path from tempfile import TemporaryDirectory from typing import Generator from unittest import TestCase from libcst import BaseExpression, Call, matchers as m, Name from libcst.codemod import ( CodemodContext, gather_files, parallel_exec_transform_with_prettyprint, VisitorBasedCodemodCommand, ) from libcst.codemod.visitors import AddImportsVisitor class PrintToPPrintCommand(VisitorBasedCodemodCommand): def leave_Call(self, original_node: Call, updated_node: Call) -> BaseExpression: if m.matches(updated_node, m.Call(func=m.Name("print"))): AddImportsVisitor.add_needed_import( self.context, "pprint", "pprint", ) return updated_node.with_changes(func=Name("pprint")) return super().leave_Call(original_node, updated_node) @contextlib.contextmanager def temp_workspace() -> Generator[Path, None, None]: cwd = os.getcwd() with TemporaryDirectory() as temp_dir: try: ws = Path(temp_dir).resolve() os.chdir(ws) yield ws finally: os.chdir(cwd) class ToolE2ETest(TestCase): def test_leaky_codemod(self) -> None: with temp_workspace() as tmp: # File to trigger codemod example: Path = tmp / "example.py" example.write_text("""print("Hello")""") # File that should not be modified other = tmp / "other.py" other.touch() # Just a dir named "dir.py", should be ignored adir = tmp / "dir.py" adir.mkdir() # Run command command_instance = PrintToPPrintCommand(CodemodContext()) files = gather_files(".") result = parallel_exec_transform_with_prettyprint( command_instance, files, format_code=False, hide_progress=True, ) print(result) # Check results self.assertEqual(2, result.successes) self.assertEqual(0, result.skips) self.assertEqual(0, result.failures) # Expect example.py to be modified self.assertIn( "from pprint import pprint", example.read_text(), "import missing in example.py", ) # Expect other.py to NOT be modified self.assertNotIn( "from pprint import pprint", other.read_text(), "import found in other.py", ) LibCST-1.2.0/libcst/tests/test_exceptions.py000066400000000000000000000055631456464173300210340ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import pickle from textwrap import dedent import libcst as cst from libcst.testing.utils import data_provider, UnitTest class ExceptionsTest(UnitTest): @data_provider( { "simple": ( cst.ParserSyntaxError( "some message", lines=["abcd"], raw_line=1, raw_column=0 ), dedent( """ Syntax Error @ 1:1. some message abcd ^ """ ).strip(), ), "tab_expansion": ( cst.ParserSyntaxError( "some message", lines=["\tabcd\r\n"], raw_line=1, raw_column=2 ), dedent( """ Syntax Error @ 1:10. some message abcd ^ """ ).strip(), ), "shows_last_line_with_text": ( cst.ParserSyntaxError( "some message", lines=["abcd\n", "efgh\n", "\n", "\n", "\n", "\n", "\n"], raw_line=5, raw_column=0, ), dedent( """ Syntax Error @ 5:1. some message efgh ^ """ ).strip(), ), "empty_file": ( cst.ParserSyntaxError( "some message", lines=[""], raw_line=1, raw_column=0 ), dedent( """ Syntax Error @ 1:1. some message """ # There's no code snippet here because the input file was empty. ).strip(), ), } ) def test_parser_syntax_error_str( self, err: cst.ParserSyntaxError, expected: str ) -> None: self.assertEqual(str(err), expected) def test_pickle(self) -> None: """ It's common to use LibCST with multiprocessing to process files in parallel. Multiprocessing uses pickle by default, so we should make sure our errors can be pickled/unpickled. """ orig_exception = cst.ParserSyntaxError( "some message", lines=["abcd"], raw_line=1, raw_column=0 ) pickled_blob = pickle.dumps(orig_exception) new_exception = pickle.loads(pickled_blob) self.assertEqual(repr(orig_exception), repr(new_exception)) self.assertEqual(str(orig_exception), str(new_exception)) LibCST-1.2.0/libcst/tests/test_fuzz.py000066400000000000000000000176461456464173300176560ustar00rootroot00000000000000# Copyright (c) Zac Hatfield-Dodds # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ Fuzz-tests for libCST, by Zac Hatfield-Dodds (zac@hypothesis.works) For Hypothesis documentation, see https://hypothesis.readthedocs.io/ For my Python code generator, see https://pypi.org/project/hypothesmith/ """ import ast import os import unittest from datetime import timedelta import hypothesis from hypothesmith import from_grammar import libcst # If in doubt, you should use these "unit test" settings. They tune the timeouts # and example-reproduction behaviour for these tests' unusually large inputs. hypothesis.settings.register_profile( name="settings-for-unit-tests", print_blob=True, deadline=timedelta(seconds=1800), suppress_health_check=[ hypothesis.HealthCheck.too_slow, hypothesis.HealthCheck.filter_too_much, ], ) hypothesis.settings.load_profile("settings-for-unit-tests") # When the test settings stop finding new bugs, you can run `python test_fuzz.py` # to find more. We turn the number of examples up, and skip the initial "reuse" # phase in favor of looking for new bugs... but put everything we find in the # database so it will be replayed next time we use the normal settings. hypothesis.settings.register_profile( name="settings-for-fuzzing", parent=hypothesis.settings.get_profile("settings-for-unit-tests"), max_examples=1_000_000_000, phases=(hypothesis.Phase.generate, hypothesis.Phase.shrink), ) class FuzzTest(unittest.TestCase): """Fuzz-tests based on Hypothesis and Hypothesmith.""" @unittest.skipUnless( bool(os.environ.get("HYPOTHESIS", False)), "Hypothesis not requested" ) @hypothesis.given(source_code=from_grammar(start="file_input")) def test_parsing_compilable_module_strings(self, source_code: str) -> None: """The `from_grammar()` strategy generates strings from Python's grammar. This has a bunch of weird edge cases because "strings accepted by Cpython" isn't actually the same set as "strings accepted by the grammar", and for a few other weird reasons you can ask Zac about if you're interested. Valid values for ``start`` are ``"single_input"``, ``"file_input"``, or ``"eval_input"``; respectively a single interactive statement, a module or sequence of commands read from a file, and input for the eval() function. .. warning:: DO NOT EXECUTE CODE GENERATED BY THE `from_grammar()` STRATEGY. It could do literally anything that running Python code is able to do, including changing, deleting, or uploading important data. Arbitrary code can be useful, but "arbitrary code execution" can be very, very bad. """ self.reject_invalid_code(source_code, mode="exec") self.reject_unsupported_code(source_code) tree = libcst.parse_module(source_code) self.assertEqual(source_code, tree.code) @unittest.skipUnless( bool(os.environ.get("HYPOTHESIS", False)), "Hypothesis not requested" ) @hypothesis.given(source_code=from_grammar(start="eval_input").map(str.strip)) def test_parsing_compilable_expression_strings(self, source_code: str) -> None: """Much like statements, but for expressions this time. We change the start production of the grammar, the compile mode, and the libCST parse function, but codegen is as for statements. """ self.reject_invalid_code(source_code, mode="eval") self.reject_unsupported_code(source_code) try: tree = libcst.parse_expression(source_code) self.verify_identical_asts( source_code, libcst.Module([]).code_for_node(tree), mode="eval" ) except libcst.ParserSyntaxError: # Unlike statements, which allow us to strip trailing whitespace, # expressions require no whitespace or newlines. Its much more work # to attempt to detect and strip comments and whitespace at the end # of expressions, so instead we will reject this input. There's a # chance we could miss some stuff here, but it should be caught by # statement or module fuzzers. We will still catch any instance where # expressions are parsed and rendered by LibCST in a way that changes # the AST. hypothesis.reject() @unittest.skipUnless( bool(os.environ.get("HYPOTHESIS", False)), "Hypothesis not requested" ) @hypothesis.given( source_code=from_grammar(start="single_input").map( lambda s: s.replace("\n", "") + "\n" ) ) def test_parsing_compilable_statement_strings(self, source_code: str) -> None: """Just like the above, but for statements. We change the start production of the grammar, the compile mode, the libCST parse function, and the codegen method. """ self.reject_invalid_code(source_code, mode="single") self.reject_unsupported_code(source_code) tree = libcst.parse_statement(source_code) self.verify_identical_asts( source_code, libcst.Module([]).code_for_node(tree), mode="single" ) def verify_identical_asts( self, original_code: str, rendered_code: str, mode: str ) -> None: assert mode in {"eval", "exec", "single"} self.assertEqual( ast.dump(ast.parse(original_code, mode=mode)), ast.dump(ast.parse(rendered_code, mode=mode)), ) @staticmethod def reject_invalid_code(source_code: str, mode: str) -> None: """Input validation helper shared by modules, statements, and expressions.""" # We start by compiling our source code to byte code, and rejecting inputs # where this fails. This is to guard against spurious failures due to # e.g. `eval` only being a keyword in Python 3.7 assert mode in {"eval", "exec", "single"} hypothesis.note(source_code) try: compile(source_code, "", mode) except Exception: # We're going to check here that libCST also rejects this string. # If libCST parses it's a test failure; if not we reject this input # so Hypothesis spends as little time as possible exploring invalid # code. (usually I'd use a custom mutator, but this is free so...) parser = dict( eval=libcst.parse_expression, exec=libcst.parse_module, single=libcst.parse_statement, ) try: tree = parser[mode](source_code) msg = f"libCST parsed a string rejected by compile() into {tree!r}" assert False, msg except Exception: hypothesis.reject() assert False, "unreachable" @staticmethod def reject_unsupported_code(source_code: str) -> None: """ There are a few edge cases in Python that are too obscure and too hard to support reasonably. If we encounter code that is generated by Hypothesis which contains such features, we should reject it so we don't get failures that we aren't going to fix. """ if "\f" in source_code: # This is standard whitespsce, but it resets the indentation. So it # takes the unique position of being allowed in an un-indented prefix # while still making the program parse. We don't have a concept for # such whitespace, so we have nowhere to put it. Consequently, while # we can parse such code, we cannot round-trip it without losing the # \f characters. So, since part of these fuzz tests verifies that we # round trip perfectly, reject this. hypothesis.reject() if __name__ == "__main__": hypothesis.settings.load_profile("settings-for-fuzzing") unittest.main() LibCST-1.2.0/libcst/tests/test_pyre_integration.py000066400000000000000000000102111456464173300222170ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import json from pathlib import Path from typing import Dict, List, Mapping, Optional, Tuple, Union import libcst as cst from libcst.metadata import MetadataWrapper, PositionProvider from libcst.metadata.type_inference_provider import PyreData from libcst.testing.utils import data_provider, UnitTest TEST_SUITE_PATH: Path = Path(__file__).parent / "pyre" class TypeVerificationVisitor(cst.CSTVisitor): METADATA_DEPENDENCIES = (PositionProvider,) def __init__( self, lookup: Mapping[Tuple[int, int, int, int], str], test: UnitTest ) -> None: self.lookup = lookup self.test = test self.attributes: List[cst.Attribute] = [] # stack of Attribute self.imports: List[Union[cst.Import, cst.ImportFrom]] = [] # stack of imports self.annotations: List[cst.Annotation] = [] # stack of Annotation super().__init__() def visit_Attribute(self, node: cst.Attribute) -> Optional[bool]: pos = self.get_metadata(PositionProvider, node) start = pos.start end = pos.end self.attributes.append(node) tup = (start.line, start.column, end.line, end.column) # remove this if condition when the type issues are fixed. self.test.assertIn( tup, self.lookup, f"Attribute node {node} at {tup} found without inferred type.", ) def leave_Attribute(self, original_node: cst.Attribute) -> None: self.attributes.pop() def visit_Name(self, node: cst.Name) -> Optional[bool]: if ( len(self.imports) > 0 or len(self.attributes) > 0 or len(self.annotations) > 0 ): return pos = self.get_metadata(PositionProvider, node) start = pos.start end = pos.end tup = (start.line, start.column, end.line, end.column) # remove this if condition when the type issues are fixed. if node.value not in {"n", "i"}: self.test.assertIn( tup, self.lookup, f"Name node {node.value} at {tup} found without inferred type.", ) def visit_Import(self, node: cst.Import) -> Optional[bool]: self.imports.append(node) def leave_Import(self, original_node: cst.Import) -> None: self.imports.pop() def visit_ImportFrom(self, node: cst.ImportFrom) -> Optional[bool]: self.imports.append(node) def leave_ImportFrom(self, original_node: cst.ImportFrom) -> None: self.imports.pop() def visit_Annotation(self, node: cst.Annotation) -> Optional[bool]: self.annotations.append(node) def leave_Annotation(self, original_node: cst.Annotation) -> None: self.annotations.pop() class PyreIntegrationTest(UnitTest): # pyre-fixme[56]: Pyre was not able to infer the type of argument # `comprehension((source_path, data_path) for generators(generator((source_path, # data_path) in zip(TEST_SUITE_PATH.glob("*.py"), TEST_SUITE_PATH.glob("*.json")) # if )))` to decorator factory `libcst.testing.utils.data_provider`. @data_provider( ( (source_path, data_path) for source_path, data_path in zip( TEST_SUITE_PATH.glob("*.py"), TEST_SUITE_PATH.glob("*.json") ) ) ) def test_type_availability(self, source_path: Path, data_path: Path) -> None: module = cst.parse_module(source_path.read_text()) data: PyreData = json.loads(data_path.read_text()) lookup: Dict[Tuple[int, int, int, int], str] = {} for t in data["types"]: loc = t["location"] start = loc["start"] stop = loc["stop"] lookup[(start["line"], start["column"], stop["line"], stop["column"])] = t[ "annotation" ] MetadataWrapper(module).visit(TypeVerificationVisitor(lookup, self)) if __name__ == "__main__": import sys print("run `scripts/regenerate-fixtures.py` instead") sys.exit(1) LibCST-1.2.0/libcst/tests/test_roundtrip.py000066400000000000000000000017221456464173300206720ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from pathlib import Path from unittest import TestCase from libcst import parse_module from libcst._parser.entrypoints import is_native fixtures: Path = Path(__file__).parent.parent.parent / "native/libcst/tests/fixtures" class RoundTripTests(TestCase): def test_clean_roundtrip(self) -> None: if not is_native(): self.skipTest("pure python parser doesn't work with this") self.assertTrue(fixtures.exists(), f"{fixtures} should exist") files = list(fixtures.iterdir()) self.assertGreater(len(files), 0) for file in files: with self.subTest(file=str(file)): src = file.read_text(encoding="utf-8") mod = parse_module(src) self.assertEqual(mod.code, src) LibCST-1.2.0/libcst/tests/test_tabs.py000066400000000000000000000013771456464173300176030ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from libcst._tabs import expand_tabs from libcst.testing.utils import data_provider, UnitTest class ExpandTabsTest(UnitTest): @data_provider( [ ("\t", " " * 8), ("\t\t", " " * 16), (" \t", " " * 8), ("\t ", " " * 12), ("abcd\t", "abcd "), ("abcdefg\t", "abcdefg "), ("abcdefgh\t", "abcdefgh "), ("\tsuffix", " suffix"), ] ) def test_expand_tabs(self, input: str, output: str) -> None: self.assertEqual(expand_tabs(input), output) LibCST-1.2.0/libcst/tests/test_tool.py000066400000000000000000000622101456464173300176200ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from textwrap import dedent from libcst import parse_module from libcst.testing.utils import UnitTest from libcst.tool import dump class PrettyPrintNodesTest(UnitTest): def test_full_tree(self) -> None: module = r""" Module( body=[ FunctionDef( name=Name( value='foo', lpar=[], rpar=[], ), params=Parameters( params=[ Param( name=Name( value='a', lpar=[], rpar=[], ), annotation=Annotation( annotation=Name( value='str', lpar=[], rpar=[], ), whitespace_before_indicator=SimpleWhitespace( value='', ), whitespace_after_indicator=SimpleWhitespace( value=' ', ), ), equal=MaybeSentinel.DEFAULT, default=None, comma=MaybeSentinel.DEFAULT, star='', whitespace_after_star=SimpleWhitespace( value='', ), whitespace_after_param=SimpleWhitespace( value='', ), ), ], star_arg=MaybeSentinel.DEFAULT, kwonly_params=[], star_kwarg=None, posonly_params=[], posonly_ind=MaybeSentinel.DEFAULT, ), body=IndentedBlock( body=[ SimpleStatementLine( body=[ Pass( semicolon=Semicolon( whitespace_before=SimpleWhitespace( value=' ', ), whitespace_after=SimpleWhitespace( value=' ', ), ), ), Pass( semicolon=MaybeSentinel.DEFAULT, ), ], leading_lines=[], trailing_whitespace=TrailingWhitespace( whitespace=SimpleWhitespace( value='', ), comment=None, newline=Newline( value=None, ), ), ), SimpleStatementLine( body=[ Return( value=None, whitespace_after_return=SimpleWhitespace( value='', ), semicolon=MaybeSentinel.DEFAULT, ), ], leading_lines=[], trailing_whitespace=TrailingWhitespace( whitespace=SimpleWhitespace( value='', ), comment=None, newline=Newline( value=None, ), ), ), ], header=TrailingWhitespace( whitespace=SimpleWhitespace( value='', ), comment=None, newline=Newline( value=None, ), ), indent=None, footer=[], ), decorators=[], returns=Annotation( annotation=Name( value='None', lpar=[], rpar=[], ), whitespace_before_indicator=SimpleWhitespace( value=' ', ), whitespace_after_indicator=SimpleWhitespace( value=' ', ), ), asynchronous=None, leading_lines=[], lines_after_decorators=[], whitespace_after_def=SimpleWhitespace( value=' ', ), whitespace_after_name=SimpleWhitespace( value='', ), whitespace_before_params=SimpleWhitespace( value='', ), whitespace_before_colon=SimpleWhitespace( value='', ), type_parameters=None, whitespace_after_type_parameters=SimpleWhitespace( value='', ), ), ], header=[], footer=[], encoding='utf-8', default_indent=' ', default_newline='\n', has_trailing_newline=True, ) """ # Compare against a known string representation, as unmangled from # python indent. self.assertEqual( dedent(module[1:])[:-1], dump( parse_module("def foo(a: str) -> None:\n pass ; pass\n return\n"), show_whitespace=True, show_defaults=True, show_syntax=True, ), ) def test_hidden_whitespace(self) -> None: module = r""" Module( body=[ FunctionDef( name=Name( value='foo', lpar=[], rpar=[], ), params=Parameters( params=[ Param( name=Name( value='a', lpar=[], rpar=[], ), annotation=Annotation( annotation=Name( value='str', lpar=[], rpar=[], ), ), equal=MaybeSentinel.DEFAULT, default=None, comma=MaybeSentinel.DEFAULT, star='', ), ], star_arg=MaybeSentinel.DEFAULT, kwonly_params=[], star_kwarg=None, posonly_params=[], posonly_ind=MaybeSentinel.DEFAULT, ), body=IndentedBlock( body=[ SimpleStatementLine( body=[ Pass( semicolon=Semicolon(), ), Pass( semicolon=MaybeSentinel.DEFAULT, ), ], ), SimpleStatementLine( body=[ Return( value=None, semicolon=MaybeSentinel.DEFAULT, ), ], ), ], ), decorators=[], returns=Annotation( annotation=Name( value='None', lpar=[], rpar=[], ), ), asynchronous=None, type_parameters=None, ), ], encoding='utf-8', default_indent=' ', default_newline='\n', has_trailing_newline=True, ) """ # Compare against a known string representation, as unmangled from # python indent. self.assertEqual( dedent(module[1:])[:-1], dump( parse_module("def foo(a: str) -> None:\n pass ; pass\n return\n"), show_whitespace=False, show_defaults=True, show_syntax=True, ), ) def test_hidden_defaults(self) -> None: module = r""" Module( body=[ FunctionDef( name=Name( value='foo', ), params=Parameters( params=[ Param( name=Name( value='a', ), annotation=Annotation( annotation=Name( value='str', ), whitespace_before_indicator=SimpleWhitespace( value='', ), ), star='', ), ], ), body=IndentedBlock( body=[ SimpleStatementLine( body=[ Pass( semicolon=Semicolon( whitespace_before=SimpleWhitespace( value=' ', ), whitespace_after=SimpleWhitespace( value=' ', ), ), ), Pass(), ], ), SimpleStatementLine( body=[ Return( whitespace_after_return=SimpleWhitespace( value='', ), ), ], ), ], ), returns=Annotation( annotation=Name( value='None', ), whitespace_before_indicator=SimpleWhitespace( value=' ', ), ), ), ], ) """ # Compare against a known string representation, as unmangled from # python indent. self.assertEqual( dedent(module[1:])[:-1], dump( parse_module("def foo(a: str) -> None:\n pass ; pass\n return\n"), show_whitespace=True, show_defaults=False, show_syntax=True, ), ) def test_hidden_whitespace_and_defaults(self) -> None: module = r""" Module( body=[ FunctionDef( name=Name( value='foo', ), params=Parameters( params=[ Param( name=Name( value='a', ), annotation=Annotation( annotation=Name( value='str', ), ), star='', ), ], ), body=IndentedBlock( body=[ SimpleStatementLine( body=[ Pass( semicolon=Semicolon(), ), Pass(), ], ), SimpleStatementLine( body=[ Return(), ], ), ], ), returns=Annotation( annotation=Name( value='None', ), ), ), ], ) """ # Compare against a known string representation, as unmangled from # python indent. self.assertEqual( dedent(module[1:])[:-1], dump( parse_module("def foo(a: str) -> None:\n pass ; pass\n return\n"), show_whitespace=False, show_defaults=False, show_syntax=True, ), ) def test_hidden_syntax(self) -> None: module = r""" Module( body=[ FunctionDef( name=Name( value='foo', lpar=[], rpar=[], ), params=Parameters( params=[ Param( name=Name( value='a', lpar=[], rpar=[], ), annotation=Annotation( annotation=Name( value='str', lpar=[], rpar=[], ), whitespace_before_indicator=SimpleWhitespace( value='', ), whitespace_after_indicator=SimpleWhitespace( value=' ', ), ), default=None, star='', whitespace_after_star=SimpleWhitespace( value='', ), whitespace_after_param=SimpleWhitespace( value='', ), ), ], star_arg=MaybeSentinel.DEFAULT, kwonly_params=[], star_kwarg=None, posonly_params=[], posonly_ind=MaybeSentinel.DEFAULT, ), body=IndentedBlock( body=[ SimpleStatementLine( body=[ Pass(), Pass(), ], leading_lines=[], trailing_whitespace=TrailingWhitespace( whitespace=SimpleWhitespace( value='', ), comment=None, newline=Newline( value=None, ), ), ), SimpleStatementLine( body=[ Return( value=None, whitespace_after_return=SimpleWhitespace( value='', ), ), ], leading_lines=[], trailing_whitespace=TrailingWhitespace( whitespace=SimpleWhitespace( value='', ), comment=None, newline=Newline( value=None, ), ), ), ], header=TrailingWhitespace( whitespace=SimpleWhitespace( value='', ), comment=None, newline=Newline( value=None, ), ), indent=None, footer=[], ), decorators=[], returns=Annotation( annotation=Name( value='None', lpar=[], rpar=[], ), whitespace_before_indicator=SimpleWhitespace( value=' ', ), whitespace_after_indicator=SimpleWhitespace( value=' ', ), ), asynchronous=None, leading_lines=[], lines_after_decorators=[], whitespace_after_def=SimpleWhitespace( value=' ', ), whitespace_after_name=SimpleWhitespace( value='', ), whitespace_before_params=SimpleWhitespace( value='', ), whitespace_before_colon=SimpleWhitespace( value='', ), type_parameters=None, whitespace_after_type_parameters=SimpleWhitespace( value='', ), ), ], header=[], footer=[], ) """ # Compare against a known string representation, as unmangled from # python indent. self.assertEqual( dedent(module[1:])[:-1], dump( parse_module("def foo(a: str) -> None:\n pass ; pass\n return\n"), show_whitespace=True, show_defaults=True, show_syntax=False, ), ) def test_hidden_whitespace_and_syntax(self) -> None: module = r""" Module( body=[ FunctionDef( name=Name( value='foo', lpar=[], rpar=[], ), params=Parameters( params=[ Param( name=Name( value='a', lpar=[], rpar=[], ), annotation=Annotation( annotation=Name( value='str', lpar=[], rpar=[], ), ), default=None, star='', ), ], star_arg=MaybeSentinel.DEFAULT, kwonly_params=[], star_kwarg=None, posonly_params=[], posonly_ind=MaybeSentinel.DEFAULT, ), body=IndentedBlock( body=[ SimpleStatementLine( body=[ Pass(), Pass(), ], ), SimpleStatementLine( body=[ Return( value=None, ), ], ), ], ), decorators=[], returns=Annotation( annotation=Name( value='None', lpar=[], rpar=[], ), ), asynchronous=None, type_parameters=None, ), ], ) """ # Compare against a known string representation, as unmangled from # python indent. self.assertEqual( dedent(module[1:])[:-1], dump( parse_module("def foo(a: str) -> None:\n pass ; pass\n return\n"), show_whitespace=False, show_defaults=True, show_syntax=False, ), ) def test_hidden_defaults_and_syntax(self) -> None: module = r""" Module( body=[ FunctionDef( name=Name( value='foo', ), params=Parameters( params=[ Param( name=Name( value='a', ), annotation=Annotation( annotation=Name( value='str', ), whitespace_before_indicator=SimpleWhitespace( value='', ), ), star='', ), ], ), body=IndentedBlock( body=[ SimpleStatementLine( body=[ Pass(), Pass(), ], ), SimpleStatementLine( body=[ Return( whitespace_after_return=SimpleWhitespace( value='', ), ), ], ), ], ), returns=Annotation( annotation=Name( value='None', ), whitespace_before_indicator=SimpleWhitespace( value=' ', ), ), ), ], ) """ # Compare against a known string representation, as unmangled from # python indent. self.assertEqual( dedent(module[1:])[:-1], dump( parse_module("def foo(a: str) -> None:\n pass ; pass\n return\n"), show_whitespace=True, show_defaults=False, show_syntax=False, ), ) def test_hidden_whitespace_and_defaults_and_syntax(self) -> None: module = r""" Module( body=[ FunctionDef( name=Name( value='foo', ), params=Parameters( params=[ Param( name=Name( value='a', ), annotation=Annotation( annotation=Name( value='str', ), ), star='', ), ], ), body=IndentedBlock( body=[ SimpleStatementLine( body=[ Pass(), Pass(), ], ), SimpleStatementLine( body=[ Return(), ], ), ], ), returns=Annotation( annotation=Name( value='None', ), ), ), ], ) """ # Compare against a known string representation, as unmangled from # python indent. self.assertEqual( dedent(module[1:])[:-1], dump( parse_module("def foo(a: str) -> None:\n pass ; pass\n return\n"), show_whitespace=False, show_defaults=False, show_syntax=False, ), ) LibCST-1.2.0/libcst/tests/test_type_enforce.py000066400000000000000000000175341456464173300213360ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from types import MappingProxyType from typing import ( Any, AsyncGenerator, ClassVar, Dict, Iterable, List, Mapping, MutableMapping, NamedTuple, Optional, Sequence, Set, Tuple, Type, TYPE_CHECKING, Union, ) from typing_extensions import Literal from libcst._type_enforce import is_value_of_type from libcst.testing.utils import data_provider, UnitTest if TYPE_CHECKING: from collections import Counter # noqa: F401 class MyExampleClass: pass class MyExampleChildClass(MyExampleClass): pass class WeirdSubclassOfList(List[int]): pass class MyExampleMetaclass(type): pass class MyExampleClassWithMetaclass(metaclass=MyExampleMetaclass): pass # lint-ignore: NoNamedTupleRule class NamedTupleSubclass(NamedTuple): a: str b: int class TypeEnforcementTest(UnitTest): @data_provider( [ # basic types and Optionals ("foo", str), (123, int), (123, Optional[int]), # int is compatible with float (123, float), # ClassVar (123, ClassVar[int]), (123, ClassVar), # implicitly ClassVar[Any] # Literals (123, Literal[123]), ("abc", Literal["abc"]), (True, Literal[True]), ("of", Literal["one", "of", "many"]), ("of", Union[Literal["one"], Literal["of"], Literal["many"]]), # Unions are supported (123, Union[str, int, None, MyExampleClass]), ("foo", Union[str, int, None, MyExampleClass]), (None, Union[str, int, None, MyExampleClass]), (MyExampleClass(), Union[str, int, None, MyExampleClass]), # And unions are supported recursively (None, Union[Optional[str], Optional[int]]), (123, Union[Optional[str], Optional[int]]), # Iterables are supported and must match the type covariantly ([123], List[int]), ([123], Iterable[int]), # pyre-ignore This is a type specification ([123], Iterable), ((123,), Iterable[int]), ([123], Sequence[int]), ((123,), Sequence[int]), ({123}, Set[int]), (WeirdSubclassOfList([123]), List[int]), (WeirdSubclassOfList([123]), WeirdSubclassOfList), # Tuples must match the number of args and each type ((123,), Tuple[int]), ((123, "foo", None), Tuple[int, str, Optional[str]]), ((123,), Tuple), (NamedTupleSubclass("foo", 123), Tuple[str, int]), (NamedTupleSubclass("foo", 123), NamedTupleSubclass), # forward references should just pass for anything # pyre-ignore Pyre doesn't think a forwardref is a typevar. (MyExampleClass(), Optional["Counter"]), # class variables get unwrapped, and behave like their underlying type (MyExampleClass(), ClassVar[MyExampleClass]), # dicts work ({123: "foo"}, Dict[int, str]), ({123: None}, Dict[int, Optional[str]]), ({123: "foo"}, Dict), ({123: None}, Mapping[int, Optional[str]]), ({123: "foo"}, Mapping), ({123: {234: MyExampleClass()}}, Mapping[int, Dict[int, MyExampleClass]]), (MappingProxyType({}), Mapping), (MappingProxyType({}), MappingProxyType), (MappingProxyType({123: None}), Mapping[int, Optional[str]]), # covariance vs invariance: for non-mutable types we allow subclasses. Here # our type is iterable lists of ints, which means we allow an iterable of a subclass of list (of ints) ([WeirdSubclassOfList([123])], Iterable[List[int]]), # or a bit more clearly with some simple classes: ({"foo": MyExampleChildClass()}, Mapping[str, MyExampleClass]), ([MyExampleChildClass()], Iterable[MyExampleClass]), ([MyExampleClass()], Iterable[MyExampleClass]), # note that the invariant check doesnt propagate unnecessarily. If we have # an expected type like: # List[ -> means its invariant # Iterable[ -> means its covariant # MyExampleClass -> has a subclass # # We would still allow List[Iterable[MyExampleChildClass]]. ([[MyExampleChildClass()]], List[Iterable[MyExampleClass]]), (None, Any), (123, Any), (MyExampleClassWithMetaclass(), Any), ] ) def test_basic_pass(self, value: object, expected_type: object) -> None: self.assertTrue( is_value_of_type(value, expected_type), f"value {value!r} was supposed to be of type {expected_type!r}", ) @data_provider( [ # basic types and Optionals ("foo", int), (123, str), (None, int), ("foo", Optional[int]), (MyExampleClassWithMetaclass(), int), # ClassVar (123, ClassVar[str]), # Literals (123, Literal[321]), (123, Literal["123"]), ("abc", Literal["cba"]), ("abc", Literal[123]), (True, Literal[False]), ("missing", Literal["one", "of", "many"]), ("missing", Union[Literal["one"], Literal["of"], Literal["many"]]), # If nothing matches a Union it will fail (123, Union[str, None, MyExampleClass]), (None, Union[str, int, MyExampleClass]), (MyExampleClass(), Union[str, int, None]), (MyExampleClass(), Union[Optional[str], Optional[int]]), # Tuples require the number of args to match, as well # as each value mapped to each arg ((123,), Tuple[str]), ((123,), Tuple[str, int, str]), ((123,), Tuple[str, int, str]), (("foo", 123), NamedTupleSubclass), # class variables get unwrapped, and behave like their underlying type (MyExampleClass(), ClassVar[MyExampleChildClass]), # check mapping subclasses (MappingProxyType({}), Dict), (MappingProxyType({}), MutableMapping), # we check each key and value ({123: "foo", 234: None}, Dict[int, str]), ({123: None, 234: 9001}, Dict[int, Optional[str]]), # covariance vs invariance: for mutable types we have *invariant* asserts, so # for a mutable list of class X we do not allow subclasses of X ({"foo": MyExampleChildClass()}, Dict[str, MyExampleClass]), ([MyExampleChildClass()], List[MyExampleClass]), # like the invariant propagation check above, we do respect this flag # deeper inside types. so # Iterable [ -> means its covariant # List[ -> means its invariant # MyExampleClass -> has a subclass # does not allow List[List[MyExampleChildClass]] # pyre-ignore This is a type specification ([[MyExampleChildClass()]], Iterable[List[MyExampleClass]]), # Iterables allow subclassing, but sets are not lists and vice versa. ([123], Set[int]), ({123}, List[int]), (WeirdSubclassOfList([123]), Set[int]), ] ) def test_basic_fail(self, value: object, expected_type: Type[object]) -> None: self.assertFalse(is_value_of_type(value, expected_type)) def test_not_implemented(self) -> None: with self.assertRaises(NotImplementedError): # pyre-ignore Pyre doesn't like the params to AsyncGenerator is_value_of_type("something", AsyncGenerator[None, None]) LibCST-1.2.0/libcst/tests/test_visitor.py000066400000000000000000000062061456464173300203450ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import List import libcst as cst from libcst import CSTTransformer, CSTVisitor, parse_module from libcst.testing.utils import UnitTest class VisitorTest(UnitTest): def test_visitor(self) -> None: class SomeVisitor(CSTVisitor): def __init__(self) -> None: self.visit_order: List[str] = [] def visit_If(self, node: cst.If) -> None: self.visit_order.append("visit_If") def leave_If(self, original_node: cst.If) -> None: self.visit_order.append("leave_If") def visit_If_test(self, node: cst.If) -> None: self.visit_order.append("visit_If_test") def leave_If_test(self, node: cst.If) -> None: self.visit_order.append("leave_If_test") def visit_Name(self, node: cst.Name) -> None: self.visit_order.append("visit_Name") def leave_Name(self, original_node: cst.Name) -> None: self.visit_order.append("leave_Name") # Create and visit a simple module. module = parse_module("if True:\n pass") visitor = SomeVisitor() module.visit(visitor) # Check that visits worked. self.assertEqual( visitor.visit_order, [ "visit_If", "visit_If_test", "visit_Name", "leave_Name", "leave_If_test", "leave_If", ], ) def test_transformer(self) -> None: class SomeTransformer(CSTTransformer): def __init__(self) -> None: self.visit_order: List[str] = [] def visit_If(self, node: cst.If) -> None: self.visit_order.append("visit_If") def leave_If(self, original_node: cst.If, updated_node: cst.If) -> cst.If: self.visit_order.append("leave_If") return updated_node def visit_If_test(self, node: cst.If) -> None: self.visit_order.append("visit_If_test") def leave_If_test(self, node: cst.If) -> None: self.visit_order.append("leave_If_test") def visit_Name(self, node: cst.Name) -> None: self.visit_order.append("visit_Name") def leave_Name( self, original_node: cst.Name, updated_node: cst.Name ) -> cst.Name: self.visit_order.append("leave_Name") return updated_node # Create and visit a simple module. module = parse_module("if True:\n pass") transformer = SomeTransformer() module.visit(transformer) # Check that visits worked. self.assertEqual( transformer.visit_order, [ "visit_If", "visit_If_test", "visit_Name", "leave_Name", "leave_If_test", "leave_If", ], ) LibCST-1.2.0/libcst/tool.py000066400000000000000000000753231456464173300154300ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # Usage: # # python -m libcst.tool --help # python -m libcst.tool print python_file.py import argparse import dataclasses import importlib import inspect import os import os.path import shutil import sys import textwrap from abc import ABC, abstractmethod from typing import Any, Callable, Dict, List, Sequence, Tuple, Type import yaml from libcst import ( CSTNode, IndentedBlock, LIBCST_VERSION, Module, parse_module, PartialParserConfig, ) from libcst._nodes.deep_equals import deep_equals from libcst._parser.parso.utils import parse_version_string from libcst.codemod import ( CodemodCommand, CodemodContext, diff_code, exec_transform_with_prettyprint, gather_files, parallel_exec_transform_with_prettyprint, ) _DEFAULT_INDENT: str = " " def _node_repr_recursive( # noqa: C901 node: object, *, indent: str = _DEFAULT_INDENT, show_defaults: bool = False, show_syntax: bool = False, show_whitespace: bool = False, ) -> List[str]: if isinstance(node, CSTNode): # This is a CSTNode, we must pretty-print it. tokens: List[str] = [node.__class__.__name__] fields: Sequence["dataclasses.Field[object]"] = dataclasses.fields(node) # Hide all fields prefixed with "_" fields = [f for f in fields if f.name[0] != "_"] # Filter whitespace nodes if needed if not show_whitespace: def _is_whitespace(field: "dataclasses.Field[object]") -> bool: if "whitespace" in field.name: return True if "leading_lines" in field.name: return True if "lines_after_decorators" in field.name: return True if isinstance(node, (IndentedBlock, Module)) and field.name in [ "header", "footer", ]: return True if isinstance(node, IndentedBlock) and field.name == "indent": return True return False fields = [f for f in fields if not _is_whitespace(f)] # Filter values which aren't changed from their defaults if not show_defaults: def _get_default(fld: "dataclasses.Field[object]") -> object: if fld.default_factory is not dataclasses.MISSING: # pyre-fixme[29]: `Union[dataclasses._MISSING_TYPE, # dataclasses._DefaultFactory[object]]` is not a function. return fld.default_factory() return fld.default fields = [ f for f in fields if not deep_equals(getattr(node, f.name), _get_default(f)) ] # Filter out values which aren't interesting if needed if not show_syntax: def _is_syntax(field: "dataclasses.Field[object]") -> bool: if isinstance(node, Module) and field.name in [ "encoding", "default_indent", "default_newline", "has_trailing_newline", ]: return True type_str = repr(field.type) if ( "Sentinel" in type_str and field.name not in ["star_arg", "star", "posonly_ind"] and "whitespace" not in field.name ): # This is a value that can optionally be specified, so its # definitely syntax. return True for name in ["Semicolon", "Colon", "Comma", "Dot", "AssignEqual"]: # These are all nodes that exist for separation syntax if name in type_str: return True return False fields = [f for f in fields if not _is_syntax(f)] if len(fields) == 0: tokens.append("()") else: tokens.append("(\n") for field in fields: child_tokens: List[str] = [field.name, "="] value = getattr(node, field.name) if isinstance(value, (str, bytes)) or not isinstance(value, Sequence): # Render out the node contents child_tokens.extend( _node_repr_recursive( value, indent=indent, show_whitespace=show_whitespace, show_defaults=show_defaults, show_syntax=show_syntax, ) ) elif isinstance(value, Sequence): # Render out a list of individual nodes if len(value) > 0: child_tokens.append("[\n") list_tokens: List[str] = [] last_value = len(value) - 1 for j, v in enumerate(value): list_tokens.extend( _node_repr_recursive( v, indent=indent, show_whitespace=show_whitespace, show_defaults=show_defaults, show_syntax=show_syntax, ) ) if j != last_value: list_tokens.append(",\n") else: list_tokens.append(",") split_by_line = "".join(list_tokens).split("\n") child_tokens.append( "\n".join(f"{indent}{t}" for t in split_by_line) ) child_tokens.append("\n]") else: child_tokens.append("[]") else: raise Exception("Logic error!") # Handle indentation and trailing comma. split_by_line = "".join(child_tokens).split("\n") tokens.append("\n".join(f"{indent}{t}" for t in split_by_line)) tokens.append(",\n") tokens.append(")") return tokens else: # This is a python value, just return the repr return [repr(node)] def dump( node: CSTNode, *, indent: str = _DEFAULT_INDENT, show_defaults: bool = False, show_syntax: bool = False, show_whitespace: bool = False, ) -> str: """ Returns a string representation of the node that contains minimal differences from the default contruction of the node while also hiding whitespace and syntax fields. Setting ``show_default`` to ``True`` will add fields regardless if their value is different from the default value. Setting ``show_whitespace`` will add whitespace fields and setting ``show_syntax`` will add syntax fields while respecting the value of ``show_default``. When all keyword args are set to true, the output of this function is indentical to the __repr__ method of the node. """ return "".join( _node_repr_recursive( node, indent=indent, show_defaults=show_defaults, show_syntax=show_syntax, show_whitespace=show_whitespace, ) ) def _print_tree_impl(proc_name: str, command_args: List[str]) -> int: parser = argparse.ArgumentParser( description="Print the LibCST tree representation of a file.", prog=f"{proc_name} print", fromfile_prefix_chars="@", ) parser.add_argument( "infile", metavar="INFILE", help='File to print tree for. Use "-" for stdin', type=str, ) parser.add_argument( "--show-whitespace", action="store_true", help="Show whitespace nodes in printed tree", ) parser.add_argument( "--show-defaults", action="store_true", help="Show values that are unchanged from the default", ) parser.add_argument( "--show-syntax", action="store_true", help="Show values that exist only for syntax, like commas or semicolons", ) parser.add_argument( "--indent-string", default=_DEFAULT_INDENT, help=f"String to use for indenting levels, defaults to {_DEFAULT_INDENT!r}", ) parser.add_argument( "-p", "--python-version", metavar="VERSION", help=( "Override the version string used for parsing Python source files. Defaults " + "to the version of python used to run this tool." ), type=str, default=None, ) args = parser.parse_args(command_args) infile = args.infile # Grab input file if infile == "-": code = sys.stdin.read() else: with open(infile, "rb") as fp: code = fp.read() tree = parse_module( code, config=( PartialParserConfig(python_version=args.python_version) if args.python_version is not None else PartialParserConfig() ), ) print( dump( tree, indent=args.indent_string, show_defaults=args.show_defaults, show_syntax=args.show_syntax, show_whitespace=args.show_whitespace, ) ) return 0 def _default_config() -> Dict[str, Any]: return { "generated_code_marker": f"@gen{''}erated", "formatter": ["black", "-"], "blacklist_patterns": [], "modules": ["libcst.codemod.commands"], "repo_root": ".", } CONFIG_FILE_NAME = ".libcst.codemod.yaml" def _find_and_load_config(proc_name: str) -> Dict[str, Any]: # Initialize with some sane defaults. config = _default_config() # Walk up the filesystem looking for a config file. current_dir = os.path.abspath(os.getcwd()) previous_dir = None found_config = False while current_dir != previous_dir: # See if the config file exists config_file = os.path.join(current_dir, CONFIG_FILE_NAME) if os.path.isfile(config_file): # Load it, override defaults with what is in the config. with open(config_file, "r") as fp: possible_config = yaml.safe_load(fp.read()) # Lets be careful with all user input so we don't crash. if isinstance(possible_config, dict): # Grab the generated code marker. for str_setting in ["generated_code_marker"]: if str_setting in possible_config and isinstance( possible_config[str_setting], str ): config[str_setting] = possible_config[str_setting] # Grab the formatter, blacklisted patterns and module directories. for list_setting in ["formatter", "blacklist_patterns", "modules"]: if ( list_setting in possible_config and isinstance(possible_config[list_setting], list) and all( isinstance(s, str) for s in possible_config[list_setting] ) ): config[list_setting] = possible_config[list_setting] # Grab the repo root config. for path_setting in ["repo_root"]: if path_setting in possible_config and isinstance( possible_config[path_setting], str ): config[path_setting] = os.path.abspath( os.path.join(current_dir, possible_config[path_setting]), ) # We successfully located a file, stop traversing. found_config = True break # Try the parent directory. previous_dir = current_dir current_dir = os.path.abspath(os.path.join(current_dir, os.pardir)) requires_config = bool(os.environ.get("LIBCST_TOOL_REQUIRE_CONFIG", "")) if requires_config and not found_config: raise Exception( f"Did not find a {CONFIG_FILE_NAME} in current directory or any " + "parent directory! Perhaps you meant to run this command from a " + "configured subdirectory, or you need to initialize a new project " + f'using "{proc_name} initialize"?' ) # Make sure that the formatter is findable. if config["formatter"]: exe = shutil.which(config["formatter"][0]) or config["formatter"][0] config["formatter"] = [os.path.abspath(exe), *config["formatter"][1:]] return config def _codemod_impl(proc_name: str, command_args: List[str]) -> int: # noqa: C901 # Grab the configuration for running this, if it exsts. config = _find_and_load_config(proc_name) # First, try to grab the command with a first pass. We aren't going to react # to user input here, so refuse to add help. Help will be parsed in the # full parser below once we know the command and have added its arguments. parser = argparse.ArgumentParser(add_help=False, fromfile_prefix_chars="@") parser.add_argument("command", metavar="COMMAND", type=str, nargs="?", default=None) ext_action = parser.add_argument( "-x", "--external", action="store_true", default=False, help="Interpret `command` as just a module/class specifier", ) args, _ = parser.parse_known_args(command_args) # Now, try to load the class and get its arguments for help purposes. if args.command is not None: command_module_name, _, command_class_name = args.command.rpartition(".") if not (command_module_name and command_class_name): print(f"{args.command} is not a valid codemod command", file=sys.stderr) return 1 if args.external: # There's no error handling here on purpose; if the user opted in for `-x`, # they'll probably want to see the exact import error too. command_class = getattr( importlib.import_module(command_module_name), command_class_name, ) else: command_class = None for module in config["modules"]: try: command_class = getattr( importlib.import_module(f"{module}.{command_module_name}"), command_class_name, ) break # Only swallow known import errors, show the rest of the exceptions # to the user who is trying to run the codemod. except AttributeError: continue except ModuleNotFoundError: continue if command_class is None: print( f"Could not find {command_module_name} in any configured modules", file=sys.stderr, ) return 1 else: # Dummy, specifically to allow for running --help with no arguments. command_class = CodemodCommand # Now, construct the full parser, parse the args and run the class. parser = argparse.ArgumentParser( description=( "Execute a codemod against a series of files." if command_class is CodemodCommand else command_class.DESCRIPTION ), prog=f"{proc_name} codemod", fromfile_prefix_chars="@", ) parser._add_action(ext_action) parser.add_argument( "command", metavar="COMMAND", type=str, help=( "The name of the file (minus the path and extension) and class joined with " + "a '.' that defines your command (e.g. strip_strings_from_types.StripStringsCommand)" ), ) parser.add_argument( "path", metavar="PATH", nargs="+", help=( "Path to codemod. Can be a directory, file, or multiple of either. To " + 'instead read from stdin and write to stdout, use "-"' ), ) parser.add_argument( "-j", "--jobs", metavar="JOBS", help="Number of jobs to use when processing files. Defaults to number of cores", type=int, default=None, ) parser.add_argument( "-p", "--python-version", metavar="VERSION", help=( "Override the version string used for parsing Python source files. Defaults " + "to the version of python used to run this tool." ), type=str, default=None, ) parser.add_argument( "-u", "--unified-diff", metavar="CONTEXT", help="Output unified diff instead of contents. Implies outputting to stdout", type=int, nargs="?", default=None, const=5, ) parser.add_argument( "--include-generated", action="store_true", help="Codemod generated files." ) parser.add_argument( "--include-stubs", action="store_true", help="Codemod typing stub files." ) parser.add_argument( "--no-format", action="store_true", help="Don't format resulting codemod with configured formatter.", ) parser.add_argument( "--show-successes", action="store_true", help="Print files successfully codemodded with no warnings.", ) parser.add_argument( "--hide-generated-warnings", action="store_true", help="Do not print files that are skipped for being autogenerated.", ) parser.add_argument( "--hide-blacklisted-warnings", action="store_true", help="Do not print files that are skipped for being blacklisted.", ) parser.add_argument( "--hide-progress", action="store_true", help="Do not print progress indicator. Useful if calling from a script.", ) command_class.add_args(parser) args = parser.parse_args(command_args) codemod_args = { k: v for k, v in vars(args).items() if k not in { "command", "external", "hide_blacklisted_warnings", "hide_generated_warnings", "hide_progress", "include_generated", "include_stubs", "jobs", "no_format", "path", "python_version", "show_successes", "unified_diff", } } command_instance = command_class(CodemodContext(), **codemod_args) # Sepcify target version for black formatter if os.path.basename(config["formatter"][0]) in ("black", "black.exe"): parsed_version = parse_version_string(args.python_version) config["formatter"] = [ config["formatter"][0], "--target-version", f"py{parsed_version.major}{parsed_version.minor}", ] + config["formatter"][1:] # Special case for allowing stdin/stdout. Note that this does not allow for # full-repo metadata since there is no path. if any(p == "-" for p in args.path): if len(args.path) > 1: raise Exception("Cannot specify multiple paths when reading from stdin!") print("Codemodding from stdin", file=sys.stderr) oldcode = sys.stdin.read() newcode = exec_transform_with_prettyprint( command_instance, oldcode, include_generated=args.include_generated, generated_code_marker=config["generated_code_marker"], format_code=not args.no_format, formatter_args=config["formatter"], python_version=args.python_version, ) if not newcode: print("Failed to codemod from stdin", file=sys.stderr) return 1 # Now, either print or diff the code if args.unified_diff: print(diff_code(oldcode, newcode, args.unified_diff, filename="stdin")) else: print(newcode) return 0 # Let's run it! files = gather_files(args.path, include_stubs=args.include_stubs) try: result = parallel_exec_transform_with_prettyprint( command_instance, files, jobs=args.jobs, unified_diff=args.unified_diff, include_generated=args.include_generated, generated_code_marker=config["generated_code_marker"], format_code=not args.no_format, formatter_args=config["formatter"], show_successes=args.show_successes, hide_generated=args.hide_generated_warnings, hide_blacklisted=args.hide_blacklisted_warnings, hide_progress=args.hide_progress, blacklist_patterns=config["blacklist_patterns"], python_version=args.python_version, repo_root=config["repo_root"], ) except KeyboardInterrupt: print("Interrupted!", file=sys.stderr) return 2 # Print a fancy summary at the end. print( f"Finished codemodding {result.successes + result.skips + result.failures} files!", file=sys.stderr, ) print(f" - Transformed {result.successes} files successfully.", file=sys.stderr) print(f" - Skipped {result.skips} files.", file=sys.stderr) print(f" - Failed to codemod {result.failures} files.", file=sys.stderr) print(f" - {result.warnings} warnings were generated.", file=sys.stderr) return 1 if result.failures > 0 else 0 class _SerializerBase(ABC): def __init__(self, comment: str) -> None: self.comment = comment def serialize(self, key: str, value: object) -> str: comments = os.linesep.join( f"# {comment}" for comment in textwrap.wrap(self.comment) ) return f"{comments}{os.linesep}{self._serialize_impl(key, value)}{os.linesep}" @abstractmethod def _serialize_impl(self, key: str, value: object) -> str: ... class _StrSerializer(_SerializerBase): def _serialize_impl(self, key: str, value: object) -> str: return f"{key}: {value!r}" class _ListSerializer(_SerializerBase): def __init__(self, comment: str, *, newlines: bool = False) -> None: super().__init__(comment) self.newlines = newlines def _serialize_impl(self, key: str, value: object) -> str: if not isinstance(value, list): raise Exception("Can only serialize lists!") if self.newlines: values = [f"- {v!r}" for v in value] return f"{key}:{os.linesep}{os.linesep.join(values)}" else: values = [repr(v) for v in value] return f"{key}: [{', '.join(values)}]" def _initialize_impl(proc_name: str, command_args: List[str]) -> int: # Now, construct the full parser, parse the args and run the class. parser = argparse.ArgumentParser( description="Initialize a directory by writing a default LibCST config to it.", prog=f"{proc_name} initialize", fromfile_prefix_chars="@", ) parser.add_argument( "path", metavar="PATH", type=str, help="Path to initialize with a default LibCST codemod configuration", ) args = parser.parse_args(command_args) # Get default configuration file, write it to the YAML file we # recognize as our config. default_config = _default_config() # We serialize for ourselves here, since PyYAML doesn't allow # us to control comments in the default file. serializers: Dict[str, _SerializerBase] = { "generated_code_marker": _StrSerializer( "String that LibCST should look for in code which indicates " + "that the module is generated code." ), "formatter": _ListSerializer( "Command line and arguments for invoking a code formatter. " + "Anything specified here must be capable of taking code via " + "stdin and returning formatted code via stdout." ), "blacklist_patterns": _ListSerializer( "List of regex patterns which LibCST will evaluate against " + "filenames to determine if the module should be touched." ), "modules": _ListSerializer( "List of modules that contain codemods inside of them.", newlines=True ), "repo_root": _StrSerializer( "Absolute or relative path of the repository root, used for " + "providing full-repo metadata. Relative paths should be " + "specified with this file location as the base." ), } config_str = "".join( serializers[key].serialize(key, val) for key, val in default_config.items() ) # For safety, verify that it parses to the identical file. actual_config = yaml.safe_load(config_str) if actual_config != default_config: raise Exception("Logic error, serialization is invalid!") config_file = os.path.abspath(os.path.join(args.path, CONFIG_FILE_NAME)) with open(config_file, "w") as fp: fp.write(config_str) print(f"Successfully wrote default config file to {config_file}") return 0 def _recursive_find(base_dir: str, base_module: str) -> List[Tuple[str, object]]: """ Given a base directory and a base module, recursively walk the directory looking for importable python modules, returning them and their relative module name based off of the base_module. """ modules: List[Tuple[str, object]] = [] for path in os.listdir(base_dir): full_path = os.path.join(base_dir, path) if os.path.isdir(full_path): # Recursively add files in subdirectories. additions = _recursive_find(full_path, f"{base_module}.{path}") for module_name, module_object in additions: modules.append((f"{path}.{module_name}", module_object)) continue if not os.path.isfile(full_path) or not path.endswith(".py"): continue try: module_name = path[:-3] potential_codemod = importlib.import_module(f"{base_module}.{module_name}") modules.append((module_name, potential_codemod)) except Exception: # Unlike running a codemod, listing shouldn't crash with exceptions. continue return modules def _list_impl(proc_name: str, command_args: List[str]) -> int: # noqa: C901 # Grab the configuration so we can determine which modules to list from config = _find_and_load_config(proc_name) parser = argparse.ArgumentParser( description="List all codemods available to run.", prog=f"{proc_name} list", fromfile_prefix_chars="@", ) _ = parser.parse_args(command_args) # Now, import each of the modules to determine their paths. codemods: Dict[Type[CodemodCommand], str] = {} for module in config["modules"]: try: imported_module = importlib.import_module(module) except Exception: # Unlike running a codemod, listing shouldn't crash with exceptions. imported_module = None if not imported_module: print( f"Could not import {module}, cannot list codemods inside it", file=sys.stderr, ) continue # Grab the path, try to import all of the files inside of it. # pyre-fixme[6]: For 1st argument expected `PathLike[Variable[AnyStr <: # [str, bytes]]]` but got `Optional[str]`. path = os.path.dirname(os.path.abspath(imported_module.__file__)) for name, imported_module in _recursive_find(path, module): for objname in dir(imported_module): try: obj = getattr(imported_module, objname) if not issubclass(obj, CodemodCommand): continue if inspect.isabstract(obj): continue # isabstract is broken for direct subclasses of ABC which # don't themselves define any abstract methods, so lets # check for that here. if any(cls[0] is ABC for cls in inspect.getclasstree([obj])): continue # Deduplicate any codemods that were referenced in other # codemods. Always take the shortest name. fullname = f"{name}.{obj.__name__}" if obj in codemods: if len(fullname) < len(codemods[obj]): codemods[obj] = fullname else: codemods[obj] = fullname except TypeError: continue printable_codemods: List[str] = [ f"{name} - {obj.DESCRIPTION}" for obj, name in codemods.items() ] print("\n".join(sorted(printable_codemods))) return 0 def main(proc_name: str, cli_args: List[str]) -> int: # Hack to allow "--help" to print out generic help, but also allow subcommands # to customize their parsing and help messages. first_arg = cli_args[0] if cli_args else "--help" add_help = first_arg in {"--help", "-h"} # Create general parser to determine which command we are invoking. parser: argparse.ArgumentParser = argparse.ArgumentParser( description="Collection of utilities that ship with LibCST.", add_help=add_help, prog=proc_name, fromfile_prefix_chars="@", ) parser.add_argument( "--version", help="Print current version of LibCST toolset.", action="version", version=f"LibCST version {LIBCST_VERSION}", # pyre-ignore[16] pyre bug? ) parser.add_argument( "action", help="Action to take. Valid options include: print, codemod, list, initialize.", choices=["print", "codemod", "list", "initialize"], ) args, command_args = parser.parse_known_args(cli_args) # Create a dummy command in case the user manages to get into # this state. def _invalid_command(proc_name: str, command_args: List[str]) -> int: print("Please specify a command!\n", file=sys.stderr) parser.print_help(sys.stderr) return 1 # Look up the command and delegate parsing/running. lookup: Dict[str, Callable[[str, List[str]], int]] = { "print": _print_tree_impl, "codemod": _codemod_impl, "initialize": _initialize_impl, "list": _list_impl, } return lookup.get(args.action or None, _invalid_command)(proc_name, command_args) if __name__ == "__main__": sys.exit( main(os.environ.get("LIBCST_TOOL_COMMAND_NAME", "libcst.tool"), sys.argv[1:]) ) LibCST-1.2.0/native/000077500000000000000000000000001456464173300140755ustar00rootroot00000000000000LibCST-1.2.0/native/Cargo.lock000066400000000000000000000637051456464173300160150ustar00rootroot00000000000000# This file is automatically @generated by Cargo. # It is not intended for manual editing. version = 3 [[package]] name = "aho-corasick" version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6748e8def348ed4d14996fa801f4122cd763fff530258cdc03f64b25f89d3a5a" dependencies = [ "memchr", ] [[package]] name = "anes" version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299" [[package]] name = "annotate-snippets" version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c7021ce4924a3f25f802b2cccd1af585e39ea1a363a1aa2e72afe54b67a3a7a7" [[package]] name = "anstyle" version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "15c4c2c83f81532e5845a733998b6971faca23490340a418e9b72a3ec9de12ea" [[package]] name = "autocfg" version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" [[package]] name = "bitflags" version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b4682ae6287fcf752ecaabbfcc7b6f9b72aa33933dc23a554d853aea8eea8635" [[package]] name = "bumpalo" version = "3.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0d261e256854913907f67ed06efbc3338dfe6179796deefc1ff763fc1aee5535" [[package]] name = "cast" version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" [[package]] name = "cc" version = "1.0.83" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f1174fb0b6ec23863f8b971027804a42614e347eafb0a95bf0b12cdae21fc4d0" dependencies = [ "libc", ] [[package]] name = "cfg-if" version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "chic" version = "1.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a5b5db619f3556839cb2223ae86ff3f9a09da2c5013be42bc9af08c9589bf70c" dependencies = [ "annotate-snippets", ] [[package]] name = "ciborium" version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b0c137568cc60b904a7724001b35ce2630fd00d5d84805fbb608ab89509d788f" dependencies = [ "ciborium-io", "ciborium-ll", "serde", ] [[package]] name = "ciborium-io" version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "346de753af073cc87b52b2083a506b38ac176a44cfb05497b622e27be899b369" [[package]] name = "ciborium-ll" version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "213030a2b5a4e0c0892b6652260cf6ccac84827b83a85a534e178e3906c4cf1b" dependencies = [ "ciborium-io", "half", ] [[package]] name = "clap" version = "4.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1d5f1946157a96594eb2d2c10eb7ad9a2b27518cb3000209dec700c35df9197d" dependencies = [ "clap_builder", ] [[package]] name = "clap_builder" version = "4.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "78116e32a042dd73c2901f0dc30790d20ff3447f3e3472fad359e8c3d282bcd6" dependencies = [ "anstyle", "clap_lex", ] [[package]] name = "clap_lex" version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cd7cc57abe963c6d3b9d8be5b06ba7c8957a930305ca90304f24ef040aa6f961" [[package]] name = "criterion" version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f2b12d017a929603d80db1831cd3a24082f8137ce19c69e6447f54f5fc8d692f" dependencies = [ "anes", "cast", "ciborium", "clap", "criterion-plot", "is-terminal", "itertools 0.10.5", "num-traits", "once_cell", "oorandom", "plotters", "rayon", "regex", "serde", "serde_derive", "serde_json", "tinytemplate", "walkdir", ] [[package]] name = "criterion-plot" version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6b50826342786a51a89e2da3a28f1c32b06e387201bc2d19791f622c673706b1" dependencies = [ "cast", "itertools 0.10.5", ] [[package]] name = "crossbeam-channel" version = "0.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4c02a4d71819009c192cf4872265391563fd6a84c81ff2c0f2a7026ca4c1d85c" dependencies = [ "cfg-if", "crossbeam-utils", ] [[package]] name = "crossbeam-deque" version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6455c0ca19f0d2fbf751b908d5c55c1f5cbc65e03c4225427254b46890bdde1e" dependencies = [ "cfg-if", "crossbeam-epoch", "crossbeam-utils", ] [[package]] name = "crossbeam-epoch" version = "0.9.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "07db9d94cbd326813772c968ccd25999e5f8ae22f4f8d1b11effa37ef6ce281d" dependencies = [ "autocfg", "cfg-if", "crossbeam-utils", "memoffset 0.6.5", "once_cell", "scopeguard", ] [[package]] name = "crossbeam-utils" version = "0.8.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7d82ee10ce34d7bc12c2122495e7593a9c41347ecdd64185af4ecf72cb1a7f83" dependencies = [ "cfg-if", "once_cell", ] [[package]] name = "difference" version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "524cbf6897b527295dff137cec09ecf3a05f4fddffd7dfcd1585403449e74198" [[package]] name = "either" version = "1.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e78d4f1cc4ae33bbfc157ed5d5a5ef3bc29227303d595861deb238fcec4e9457" [[package]] name = "errno" version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6b30f669a7961ef1631673d2766cc92f52d64f7ef354d4fe0ddfd30ed52f0f4f" dependencies = [ "errno-dragonfly", "libc", "windows-sys", ] [[package]] name = "errno-dragonfly" version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "aa68f1b12764fab894d2755d2518754e71b4fd80ecfb822714a1206c2aab39bf" dependencies = [ "cc", "libc", ] [[package]] name = "glob" version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9b919933a397b79c37e33b77bb2aa3dc8eb6e165ad809e58ff75bc7db2e34574" [[package]] name = "half" version = "1.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eabb4a44450da02c90444cf74558da904edde8fb4e9035a9a6a4e15445af0bd7" [[package]] name = "heck" version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" [[package]] name = "hermit-abi" version = "0.1.19" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33" dependencies = [ "libc", ] [[package]] name = "hermit-abi" version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "443144c8cdadd93ebf52ddb4056d257f5b52c04d3c804e657d19eb73fc33668b" [[package]] name = "indoc" version = "2.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e186cfbae8084e513daff4240b4797e342f988cecda4fb6c939150f96315fd8" [[package]] name = "instant" version = "0.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c" dependencies = [ "cfg-if", ] [[package]] name = "is-terminal" version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cb0889898416213fab133e1d33a0e5858a48177452750691bde3666d0fdbaf8b" dependencies = [ "hermit-abi 0.3.2", "rustix", "windows-sys", ] [[package]] name = "itertools" version = "0.10.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473" dependencies = [ "either", ] [[package]] name = "itertools" version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b1c173a5686ce8bfa551b3563d0c2170bf24ca44da99c7ca4bfdab5418c3fe57" dependencies = [ "either", ] [[package]] name = "itoa" version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "112c678d4050afce233f4f2852bb2eb519230b3cf12f33585275537d7e41578d" [[package]] name = "js-sys" version = "0.3.58" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c3fac17f7123a73ca62df411b1bf727ccc805daa070338fda671c86dac1bdc27" dependencies = [ "wasm-bindgen", ] [[package]] name = "lazy_static" version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" [[package]] name = "libc" version = "0.2.149" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a08173bc88b7955d1b3145aa561539096c421ac8debde8cbc3612ec635fee29b" [[package]] name = "libcst" version = "1.2.0" dependencies = [ "chic", "criterion", "difference", "itertools 0.11.0", "libcst_derive", "memchr", "paste", "peg", "pyo3", "rayon", "regex", "thiserror", ] [[package]] name = "libcst_derive" version = "1.2.0" dependencies = [ "quote", "syn 2.0.41", "trybuild", ] [[package]] name = "linux-raw-sys" version = "0.4.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "da2479e8c062e40bf0066ffa0bc823de0a9368974af99c9f6df941d2c231e03f" [[package]] name = "lock_api" version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "327fa5b6a6940e4699ec49a9beae1ea4845c6bab9314e4f84ac68742139d8c53" dependencies = [ "autocfg", "scopeguard", ] [[package]] name = "log" version = "0.4.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "abb12e687cfb44aa40f41fc3978ef76448f9b6038cad6aef4259d3c095a2382e" dependencies = [ "cfg-if", ] [[package]] name = "memchr" version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d" [[package]] name = "memoffset" version = "0.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5aa361d4faea93603064a027415f07bd8e1d5c88c9fbf68bf56a285428fd79ce" dependencies = [ "autocfg", ] [[package]] name = "memoffset" version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5a634b1c61a95585bd15607c6ab0c4e5b226e695ff2800ba0cdccddf208c406c" dependencies = [ "autocfg", ] [[package]] name = "num-traits" version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "578ede34cf02f8924ab9447f50c28075b4d3e5b269972345e7e0372b38c6cdcd" dependencies = [ "autocfg", ] [[package]] name = "num_cpus" version = "1.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "19e64526ebdee182341572e50e9ad03965aa510cd94427a4549448f285e957a1" dependencies = [ "hermit-abi 0.1.19", "libc", ] [[package]] name = "once_cell" version = "1.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "86f0b0d4bf799edbc74508c1e8bf170ff5f41238e5f8225603ca7caaae2b7860" [[package]] name = "oorandom" version = "11.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0ab1bc2a289d34bd04a330323ac98a1b4bc82c9d9fcb1e66b63caa84da26b575" [[package]] name = "parking_lot" version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7d17b78036a60663b797adeaee46f5c9dfebb86948d1255007a1d6be0271ff99" dependencies = [ "instant", "lock_api", "parking_lot_core", ] [[package]] name = "parking_lot_core" version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d76e8e1493bcac0d2766c42737f34458f1c8c50c0d23bcb24ea953affb273216" dependencies = [ "cfg-if", "instant", "libc", "redox_syscall", "smallvec", "winapi", ] [[package]] name = "paste" version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b1de2e551fb905ac83f73f7aedf2f0cb4a0da7e35efa24a202a936269f1f18e1" [[package]] name = "peg" version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a07f2cafdc3babeebc087e499118343442b742cc7c31b4d054682cc598508554" dependencies = [ "peg-macros", "peg-runtime", ] [[package]] name = "peg-macros" version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4a90084dc05cf0428428e3d12399f39faad19b0909f64fb9170c9fdd6d9cd49b" dependencies = [ "peg-runtime", "proc-macro2", "quote", ] [[package]] name = "peg-runtime" version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9fa00462b37ead6d11a82c9d568b26682d78e0477dc02d1966c013af80969739" [[package]] name = "plotters" version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "32a3fd9ec30b9749ce28cd91f255d569591cdf937fe280c312143e3c4bad6f2a" dependencies = [ "num-traits", "plotters-backend", "plotters-svg", "wasm-bindgen", "web-sys", ] [[package]] name = "plotters-backend" version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d88417318da0eaf0fdcdb51a0ee6c3bed624333bff8f946733049380be67ac1c" [[package]] name = "plotters-svg" version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "521fa9638fa597e1dc53e9412a4f9cefb01187ee1f7413076f9e6749e2885ba9" dependencies = [ "plotters-backend", ] [[package]] name = "proc-macro2" version = "1.0.70" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "39278fbbf5fb4f646ce651690877f89d1c5811a3d4acb27700c1cb3cdb78fd3b" dependencies = [ "unicode-ident", ] [[package]] name = "pyo3" version = "0.20.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9a89dc7a5850d0e983be1ec2a463a171d20990487c3cfcd68b5363f1ee3d6fe0" dependencies = [ "cfg-if", "indoc", "libc", "memoffset 0.9.0", "parking_lot", "pyo3-build-config", "pyo3-ffi", "pyo3-macros", "unindent", ] [[package]] name = "pyo3-build-config" version = "0.20.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "07426f0d8fe5a601f26293f300afd1a7b1ed5e78b2a705870c5f30893c5163be" dependencies = [ "once_cell", "target-lexicon", ] [[package]] name = "pyo3-ffi" version = "0.20.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dbb7dec17e17766b46bca4f1a4215a85006b4c2ecde122076c562dd058da6cf1" dependencies = [ "libc", "pyo3-build-config", ] [[package]] name = "pyo3-macros" version = "0.20.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "05f738b4e40d50b5711957f142878cfa0f28e054aa0ebdfc3fd137a843f74ed3" dependencies = [ "proc-macro2", "pyo3-macros-backend", "quote", "syn 2.0.41", ] [[package]] name = "pyo3-macros-backend" version = "0.20.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0fc910d4851847827daf9d6cdd4a823fbdaab5b8818325c5e97a86da79e8881f" dependencies = [ "heck", "proc-macro2", "quote", "syn 2.0.41", ] [[package]] name = "quote" version = "1.0.33" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5267fca4496028628a95160fc423a33e8b2e6af8a5302579e322e4b520293cae" dependencies = [ "proc-macro2", ] [[package]] name = "rayon" version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1d2df5196e37bcc87abebc0053e20787d73847bb33134a69841207dd0a47f03b" dependencies = [ "either", "rayon-core", ] [[package]] name = "rayon-core" version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4b8f95bd6966f5c87776639160a66bd8ab9895d9d4ab01ddba9fc60661aebe8d" dependencies = [ "crossbeam-channel", "crossbeam-deque", "crossbeam-utils", "num_cpus", ] [[package]] name = "redox_syscall" version = "0.2.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "62f25bc4c7e55e0b0b7a1d43fb893f4fa1361d0abe38b9ce4f323c2adfe6ef42" dependencies = [ "bitflags 1.3.2", ] [[package]] name = "regex" version = "1.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "81bc1d4caf89fac26a70747fe603c130093b53c773888797a6329091246d651a" dependencies = [ "aho-corasick", "memchr", "regex-automata", "regex-syntax", ] [[package]] name = "regex-automata" version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fed1ceff11a1dddaee50c9dc8e4938bd106e9d89ae372f192311e7da498e3b69" dependencies = [ "aho-corasick", "memchr", "regex-syntax", ] [[package]] name = "regex-syntax" version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e5ea92a5b6195c6ef2a0295ea818b312502c6fc94dde986c5553242e18fd4ce2" [[package]] name = "rustix" version = "0.38.19" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "745ecfa778e66b2b63c88a61cb36e0eea109e803b0b86bf9879fbc77c70e86ed" dependencies = [ "bitflags 2.4.0", "errno", "libc", "linux-raw-sys", "windows-sys", ] [[package]] name = "ryu" version = "1.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f3f6f92acf49d1b98f7a81226834412ada05458b7364277387724a237f062695" [[package]] name = "same-file" version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502" dependencies = [ "winapi-util", ] [[package]] name = "scopeguard" version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" [[package]] name = "serde" version = "1.0.145" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "728eb6351430bccb993660dfffc5a72f91ccc1295abaa8ce19b27ebe4f75568b" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" version = "1.0.145" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "81fa1584d3d1bcacd84c277a0dfe21f5b0f6accf4a23d04d4c6d61f1af522b4c" dependencies = [ "proc-macro2", "quote", "syn 1.0.109", ] [[package]] name = "serde_json" version = "1.0.81" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9b7ce2b32a1aed03c558dc61a5cd328f15aff2dbc17daad8fb8af04d2100e15c" dependencies = [ "itoa", "ryu", "serde", ] [[package]] name = "smallvec" version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cc88c725d61fc6c3132893370cac4a0200e3fedf5da8331c570664b1987f5ca2" [[package]] name = "syn" version = "1.0.109" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" dependencies = [ "proc-macro2", "quote", "unicode-ident", ] [[package]] name = "syn" version = "2.0.41" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "44c8b28c477cc3bf0e7966561e3460130e1255f7a1cf71931075f1c5e7a7e269" dependencies = [ "proc-macro2", "quote", "unicode-ident", ] [[package]] name = "target-lexicon" version = "0.12.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c02424087780c9b71cc96799eaeddff35af2bc513278cda5c99fc1f5d026d3c1" [[package]] name = "termcolor" version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bab24d30b911b2376f3a13cc2cd443142f0c81dda04c118693e35b3835757755" dependencies = [ "winapi-util", ] [[package]] name = "thiserror" version = "1.0.37" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "10deb33631e3c9018b9baf9dcbbc4f737320d2b576bac10f6aefa048fa407e3e" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" version = "1.0.37" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "982d17546b47146b28f7c22e3d08465f6b8903d0ea13c1660d9d84a6e7adcdbb" dependencies = [ "proc-macro2", "quote", "syn 1.0.109", ] [[package]] name = "tinytemplate" version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "be4d6b5f19ff7664e8c98d03e2139cb510db9b0a60b55f8e8709b689d939b6bc" dependencies = [ "serde", "serde_json", ] [[package]] name = "toml" version = "0.5.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8d82e1a7758622a465f8cee077614c73484dac5b836c02ff6a40d5d1010324d7" dependencies = [ "serde", ] [[package]] name = "trybuild" version = "1.0.71" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ea496675d71016e9bc76aa42d87f16aefd95447cc5818e671e12b2d7e269075d" dependencies = [ "glob", "once_cell", "serde", "serde_derive", "serde_json", "termcolor", "toml", ] [[package]] name = "unicode-ident" version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5bd2fe26506023ed7b5e1e315add59d6f584c621d037f9368fea9cfb988f368c" [[package]] name = "unindent" version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c7de7d73e1754487cb58364ee906a499937a0dfabd86bcb980fa99ec8c8fa2ce" [[package]] name = "walkdir" version = "2.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "808cf2735cd4b6866113f648b791c6adc5714537bc222d9347bb203386ffda56" dependencies = [ "same-file", "winapi", "winapi-util", ] [[package]] name = "wasm-bindgen" version = "0.2.81" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7c53b543413a17a202f4be280a7e5c62a1c69345f5de525ee64f8cfdbc954994" dependencies = [ "cfg-if", "wasm-bindgen-macro", ] [[package]] name = "wasm-bindgen-backend" version = "0.2.81" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5491a68ab4500fa6b4d726bd67408630c3dbe9c4fe7bda16d5c82a1fd8c7340a" dependencies = [ "bumpalo", "lazy_static", "log", "proc-macro2", "quote", "syn 1.0.109", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-macro" version = "0.2.81" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c441e177922bc58f1e12c022624b6216378e5febc2f0533e41ba443d505b80aa" dependencies = [ "quote", "wasm-bindgen-macro-support", ] [[package]] name = "wasm-bindgen-macro-support" version = "0.2.81" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7d94ac45fcf608c1f45ef53e748d35660f168490c10b23704c7779ab8f5c3048" dependencies = [ "proc-macro2", "quote", "syn 1.0.109", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" version = "0.2.81" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a89911bd99e5f3659ec4acf9c4d93b0a90fe4a2a11f15328472058edc5261be" [[package]] name = "web-sys" version = "0.3.58" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2fed94beee57daf8dd7d51f2b15dc2bcde92d7a72304cdf662a4371008b71b90" dependencies = [ "js-sys", "wasm-bindgen", ] [[package]] name = "winapi" version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" dependencies = [ "winapi-i686-pc-windows-gnu", "winapi-x86_64-pc-windows-gnu", ] [[package]] name = "winapi-i686-pc-windows-gnu" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" [[package]] name = "winapi-util" version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178" dependencies = [ "winapi", ] [[package]] name = "winapi-x86_64-pc-windows-gnu" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" [[package]] name = "windows-sys" version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" dependencies = [ "windows-targets", ] [[package]] name = "windows-targets" version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9a2fa6e2155d7247be68c096456083145c183cbbbc2764150dda45a87197940c" dependencies = [ "windows_aarch64_gnullvm", "windows_aarch64_msvc", "windows_i686_gnu", "windows_i686_msvc", "windows_x86_64_gnu", "windows_x86_64_gnullvm", "windows_x86_64_msvc", ] [[package]] name = "windows_aarch64_gnullvm" version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" [[package]] name = "windows_aarch64_msvc" version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" [[package]] name = "windows_i686_gnu" version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" [[package]] name = "windows_i686_msvc" version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" [[package]] name = "windows_x86_64_gnu" version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" [[package]] name = "windows_x86_64_gnullvm" version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" [[package]] name = "windows_x86_64_msvc" version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" LibCST-1.2.0/native/Cargo.toml000066400000000000000000000000761456464173300160300ustar00rootroot00000000000000[workspace] members = [ "libcst", "libcst_derive", ] LibCST-1.2.0/native/libcst/000077500000000000000000000000001456464173300153555ustar00rootroot00000000000000LibCST-1.2.0/native/libcst/Cargo.toml000066400000000000000000000026241456464173300173110ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. [package] name = "libcst" version = "1.2.0" authors = ["LibCST Developers"] edition = "2018" rust-version = "1.70" description = "A Python parser and Concrete Syntax Tree library." license-file = "LICENSE" repository = "https://github.com/Instagram/LibCST" documentation = "https://libcst.rtfd.org" keywords = ["python", "cst", "ast"] categories = ["parser-implementations"] [lib] name = "libcst_native" crate-type = ["cdylib", "rlib"] [[bin]] name = "parse" path = "src/bin.rs" [features] # This is a bit of a hack, since `cargo test` doesn't work with `extension-module`. # To run tests, use `cargo test --no-default-features`. # # Once https://github.com/PyO3/pyo3/pull/1123 lands, it may be better to use # `-Zextra-link-arg` for this instead. default = ["py"] py = ["pyo3", "pyo3/extension-module"] trace = ["peg/trace"] [dependencies] paste = "1.0.9" pyo3 = { version = "0.20", optional = true } thiserror = "1.0.37" peg = "0.8.1" chic = "1.2.2" regex = "1.9.3" memchr = "2.5.0" libcst_derive = { path = "../libcst_derive", version = "1.2.0" } [dev-dependencies] criterion = { version = "0.5.1", features = ["html_reports"] } difference = "2.0.0" rayon = "1.7.0" itertools = "0.11.0" [[bench]] name = "parser_benchmark" harness = false LibCST-1.2.0/native/libcst/Grammar000066400000000000000000000772331456464173300167020ustar00rootroot00000000000000# PEG grammar for Python 3.9 @trailer ''' void * _PyPegen_parse(Parser *p) { // Initialize keywords p->keywords = reserved_keywords; p->n_keyword_lists = n_keyword_lists; // Run parser void *result = NULL; if (p->start_rule == Py_file_input) { result = file_rule(p); } else if (p->start_rule == Py_single_input) { result = interactive_rule(p); } else if (p->start_rule == Py_eval_input) { result = eval_rule(p); } else if (p->start_rule == Py_func_type_input) { result = func_type_rule(p); } else if (p->start_rule == Py_fstring_input) { result = fstring_rule(p); } return result; } // The end ''' file[mod_ty]: a=[statements] ENDMARKER { _PyPegen_make_module(p, a) } interactive[mod_ty]: a=statement_newline { Interactive(a, p->arena) } eval[mod_ty]: a=expressions NEWLINE* ENDMARKER { Expression(a, p->arena) } func_type[mod_ty]: '(' a=[type_expressions] ')' '->' b=expression NEWLINE* ENDMARKER { FunctionType(a, b, p->arena) } fstring[expr_ty]: star_expressions # type_expressions allow */** but ignore them type_expressions[asdl_seq*]: | a=','.expression+ ',' '*' b=expression ',' '**' c=expression { _PyPegen_seq_append_to_end(p, CHECK(_PyPegen_seq_append_to_end(p, a, b)), c) } | a=','.expression+ ',' '*' b=expression { _PyPegen_seq_append_to_end(p, a, b) } | a=','.expression+ ',' '**' b=expression { _PyPegen_seq_append_to_end(p, a, b) } | '*' a=expression ',' '**' b=expression { _PyPegen_seq_append_to_end(p, CHECK(_PyPegen_singleton_seq(p, a)), b) } | '*' a=expression { _PyPegen_singleton_seq(p, a) } | '**' a=expression { _PyPegen_singleton_seq(p, a) } | ','.expression+ statements[asdl_seq*]: a=statement+ { _PyPegen_seq_flatten(p, a) } statement[asdl_seq*]: a=compound_stmt { _PyPegen_singleton_seq(p, a) } | simple_stmt statement_newline[asdl_seq*]: | a=compound_stmt NEWLINE { _PyPegen_singleton_seq(p, a) } | simple_stmt | NEWLINE { _PyPegen_singleton_seq(p, CHECK(_Py_Pass(EXTRA))) } | ENDMARKER { _PyPegen_interactive_exit(p) } simple_stmt[asdl_seq*]: | a=small_stmt !';' NEWLINE { _PyPegen_singleton_seq(p, a) } # Not needed, there for speedup | a=';'.small_stmt+ [';'] NEWLINE { a } # NOTE: assignment MUST precede expression, else parsing a simple assignment # will throw a SyntaxError. small_stmt[stmt_ty] (memo): | assignment | e=star_expressions { _Py_Expr(e, EXTRA) } | &'return' return_stmt | &('import' | 'from') import_stmt | &'raise' raise_stmt | 'pass' { _Py_Pass(EXTRA) } | &'del' del_stmt | &'yield' yield_stmt | &'assert' assert_stmt | 'break' { _Py_Break(EXTRA) } | 'continue' { _Py_Continue(EXTRA) } | &'global' global_stmt | &'nonlocal' nonlocal_stmt compound_stmt[stmt_ty]: | &('def' | '@' | ASYNC) function_def | &'if' if_stmt | &('class' | '@') class_def | &('with' | ASYNC) with_stmt | &('for' | ASYNC) for_stmt | &'try' try_stmt | &'while' while_stmt # NOTE: annotated_rhs may start with 'yield'; yield_expr must start with 'yield' assignment[stmt_ty]: | a=NAME ':' b=expression c=['=' d=annotated_rhs { d }] { CHECK_VERSION( 6, "Variable annotation syntax is", _Py_AnnAssign(CHECK(_PyPegen_set_expr_context(p, a, Store)), b, c, 1, EXTRA) ) } | a=('(' b=single_target ')' { b } | single_subscript_attribute_target) ':' b=expression c=['=' d=annotated_rhs { d }] { CHECK_VERSION(6, "Variable annotations syntax is", _Py_AnnAssign(a, b, c, 0, EXTRA)) } | a=(z=star_targets '=' { z })+ b=(yield_expr | star_expressions) !'=' tc=[TYPE_COMMENT] { _Py_Assign(a, b, NEW_TYPE_COMMENT(p, tc), EXTRA) } | a=single_target b=augassign ~ c=(yield_expr | star_expressions) { _Py_AugAssign(a, b->kind, c, EXTRA) } | invalid_assignment augassign[AugOperator*]: | '+=' { _PyPegen_augoperator(p, Add) } | '-=' { _PyPegen_augoperator(p, Sub) } | '*=' { _PyPegen_augoperator(p, Mult) } | '@=' { CHECK_VERSION(5, "The '@' operator is", _PyPegen_augoperator(p, MatMult)) } | '/=' { _PyPegen_augoperator(p, Div) } | '%=' { _PyPegen_augoperator(p, Mod) } | '&=' { _PyPegen_augoperator(p, BitAnd) } | '|=' { _PyPegen_augoperator(p, BitOr) } | '^=' { _PyPegen_augoperator(p, BitXor) } | '<<=' { _PyPegen_augoperator(p, LShift) } | '>>=' { _PyPegen_augoperator(p, RShift) } | '**=' { _PyPegen_augoperator(p, Pow) } | '//=' { _PyPegen_augoperator(p, FloorDiv) } global_stmt[stmt_ty]: 'global' a=','.NAME+ { _Py_Global(CHECK(_PyPegen_map_names_to_ids(p, a)), EXTRA) } nonlocal_stmt[stmt_ty]: 'nonlocal' a=','.NAME+ { _Py_Nonlocal(CHECK(_PyPegen_map_names_to_ids(p, a)), EXTRA) } yield_stmt[stmt_ty]: y=yield_expr { _Py_Expr(y, EXTRA) } assert_stmt[stmt_ty]: 'assert' a=expression b=[',' z=expression { z }] { _Py_Assert(a, b, EXTRA) } del_stmt[stmt_ty]: | 'del' a=del_targets &(';' | NEWLINE) { _Py_Delete(a, EXTRA) } | invalid_del_stmt import_stmt[stmt_ty]: import_name | import_from import_name[stmt_ty]: 'import' a=dotted_as_names { _Py_Import(a, EXTRA) } # note below: the ('.' | '...') is necessary because '...' is tokenized as ELLIPSIS import_from[stmt_ty]: | 'from' a=('.' | '...')* b=dotted_name 'import' c=import_from_targets { _Py_ImportFrom(b->v.Name.id, c, _PyPegen_seq_count_dots(a), EXTRA) } | 'from' a=('.' | '...')+ 'import' b=import_from_targets { _Py_ImportFrom(NULL, b, _PyPegen_seq_count_dots(a), EXTRA) } import_from_targets[asdl_seq*]: | '(' a=import_from_as_names [','] ')' { a } | import_from_as_names !',' | '*' { _PyPegen_singleton_seq(p, CHECK(_PyPegen_alias_for_star(p))) } | invalid_import_from_targets import_from_as_names[asdl_seq*]: | a=','.import_from_as_name+ { a } import_from_as_name[alias_ty]: | a=NAME b=['as' z=NAME { z }] { _Py_alias(a->v.Name.id, (b) ? ((expr_ty) b)->v.Name.id : NULL, p->arena) } dotted_as_names[asdl_seq*]: | a=','.dotted_as_name+ { a } dotted_as_name[alias_ty]: | a=dotted_name b=['as' z=NAME { z }] { _Py_alias(a->v.Name.id, (b) ? ((expr_ty) b)->v.Name.id : NULL, p->arena) } dotted_name[expr_ty]: | a=dotted_name '.' b=NAME { _PyPegen_join_names_with_dot(p, a, b) } | NAME if_stmt[stmt_ty]: | 'if' a=named_expression ':' b=block c=elif_stmt { _Py_If(a, b, CHECK(_PyPegen_singleton_seq(p, c)), EXTRA) } | 'if' a=named_expression ':' b=block c=[else_block] { _Py_If(a, b, c, EXTRA) } elif_stmt[stmt_ty]: | 'elif' a=named_expression ':' b=block c=elif_stmt { _Py_If(a, b, CHECK(_PyPegen_singleton_seq(p, c)), EXTRA) } | 'elif' a=named_expression ':' b=block c=[else_block] { _Py_If(a, b, c, EXTRA) } else_block[asdl_seq*]: 'else' ':' b=block { b } while_stmt[stmt_ty]: | 'while' a=named_expression ':' b=block c=[else_block] { _Py_While(a, b, c, EXTRA) } for_stmt[stmt_ty]: | 'for' t=star_targets 'in' ~ ex=star_expressions ':' tc=[TYPE_COMMENT] b=block el=[else_block] { _Py_For(t, ex, b, el, NEW_TYPE_COMMENT(p, tc), EXTRA) } | ASYNC 'for' t=star_targets 'in' ~ ex=star_expressions ':' tc=[TYPE_COMMENT] b=block el=[else_block] { CHECK_VERSION(5, "Async for loops are", _Py_AsyncFor(t, ex, b, el, NEW_TYPE_COMMENT(p, tc), EXTRA)) } | invalid_for_target with_stmt[stmt_ty]: | 'with' '(' a=','.with_item+ ','? ')' ':' b=block { _Py_With(a, b, NULL, EXTRA) } | 'with' a=','.with_item+ ':' tc=[TYPE_COMMENT] b=block { _Py_With(a, b, NEW_TYPE_COMMENT(p, tc), EXTRA) } | ASYNC 'with' '(' a=','.with_item+ ','? ')' ':' b=block { CHECK_VERSION(5, "Async with statements are", _Py_AsyncWith(a, b, NULL, EXTRA)) } | ASYNC 'with' a=','.with_item+ ':' tc=[TYPE_COMMENT] b=block { CHECK_VERSION(5, "Async with statements are", _Py_AsyncWith(a, b, NEW_TYPE_COMMENT(p, tc), EXTRA)) } with_item[withitem_ty]: | e=expression 'as' t=star_target &(',' | ')' | ':') { _Py_withitem(e, t, p->arena) } | invalid_with_item | e=expression { _Py_withitem(e, NULL, p->arena) } try_stmt[stmt_ty]: | 'try' ':' b=block f=finally_block { _Py_Try(b, NULL, NULL, f, EXTRA) } | 'try' ':' b=block ex=except_block+ el=[else_block] f=[finally_block] { _Py_Try(b, ex, el, f, EXTRA) } except_block[excepthandler_ty]: | 'except' e=expression t=['as' z=NAME { z }] ':' b=block { _Py_ExceptHandler(e, (t) ? ((expr_ty) t)->v.Name.id : NULL, b, EXTRA) } | 'except' ':' b=block { _Py_ExceptHandler(NULL, NULL, b, EXTRA) } finally_block[asdl_seq*]: 'finally' ':' a=block { a } return_stmt[stmt_ty]: | 'return' a=[star_expressions] { _Py_Return(a, EXTRA) } raise_stmt[stmt_ty]: | 'raise' a=expression b=['from' z=expression { z }] { _Py_Raise(a, b, EXTRA) } | 'raise' { _Py_Raise(NULL, NULL, EXTRA) } function_def[stmt_ty]: | d=decorators f=function_def_raw { _PyPegen_function_def_decorators(p, d, f) } | function_def_raw function_def_raw[stmt_ty]: | 'def' n=NAME '(' params=[params] ')' a=['->' z=expression { z }] ':' tc=[func_type_comment] b=block { _Py_FunctionDef(n->v.Name.id, (params) ? params : CHECK(_PyPegen_empty_arguments(p)), b, NULL, a, NEW_TYPE_COMMENT(p, tc), EXTRA) } | ASYNC 'def' n=NAME '(' params=[params] ')' a=['->' z=expression { z }] ':' tc=[func_type_comment] b=block { CHECK_VERSION( 5, "Async functions are", _Py_AsyncFunctionDef(n->v.Name.id, (params) ? params : CHECK(_PyPegen_empty_arguments(p)), b, NULL, a, NEW_TYPE_COMMENT(p, tc), EXTRA) ) } func_type_comment[Token*]: | NEWLINE t=TYPE_COMMENT &(NEWLINE INDENT) { t } # Must be followed by indented block | invalid_double_type_comments | TYPE_COMMENT params[arguments_ty]: | invalid_parameters | parameters parameters[arguments_ty]: | a=slash_no_default b=param_no_default* c=param_with_default* d=[star_etc] { _PyPegen_make_arguments(p, a, NULL, b, c, d) } | a=slash_with_default b=param_with_default* c=[star_etc] { _PyPegen_make_arguments(p, NULL, a, NULL, b, c) } | a=param_no_default+ b=param_with_default* c=[star_etc] { _PyPegen_make_arguments(p, NULL, NULL, a, b, c) } | a=param_with_default+ b=[star_etc] { _PyPegen_make_arguments(p, NULL, NULL, NULL, a, b)} | a=star_etc { _PyPegen_make_arguments(p, NULL, NULL, NULL, NULL, a) } # Some duplication here because we can't write (',' | &')'), # which is because we don't support empty alternatives (yet). # slash_no_default[asdl_seq*]: | a=param_no_default+ '/' ',' { a } | a=param_no_default+ '/' &')' { a } slash_with_default[SlashWithDefault*]: | a=param_no_default* b=param_with_default+ '/' ',' { _PyPegen_slash_with_default(p, a, b) } | a=param_no_default* b=param_with_default+ '/' &')' { _PyPegen_slash_with_default(p, a, b) } star_etc[StarEtc*]: | '*' a=param_no_default b=param_maybe_default* c=[kwds] { _PyPegen_star_etc(p, a, b, c) } | '*' ',' b=param_maybe_default+ c=[kwds] { _PyPegen_star_etc(p, NULL, b, c) } | a=kwds { _PyPegen_star_etc(p, NULL, NULL, a) } | invalid_star_etc kwds[arg_ty]: '**' a=param_no_default { a } # One parameter. This *includes* a following comma and type comment. # # There are three styles: # - No default # - With default # - Maybe with default # # There are two alternative forms of each, to deal with type comments: # - Ends in a comma followed by an optional type comment # - No comma, optional type comment, must be followed by close paren # The latter form is for a final parameter without trailing comma. # param_no_default[arg_ty]: | a=param ',' tc=TYPE_COMMENT? { _PyPegen_add_type_comment_to_arg(p, a, tc) } | a=param tc=TYPE_COMMENT? &')' { _PyPegen_add_type_comment_to_arg(p, a, tc) } param_with_default[NameDefaultPair*]: | a=param c=default ',' tc=TYPE_COMMENT? { _PyPegen_name_default_pair(p, a, c, tc) } | a=param c=default tc=TYPE_COMMENT? &')' { _PyPegen_name_default_pair(p, a, c, tc) } param_maybe_default[NameDefaultPair*]: | a=param c=default? ',' tc=TYPE_COMMENT? { _PyPegen_name_default_pair(p, a, c, tc) } | a=param c=default? tc=TYPE_COMMENT? &')' { _PyPegen_name_default_pair(p, a, c, tc) } param[arg_ty]: a=NAME b=annotation? { _Py_arg(a->v.Name.id, b, NULL, EXTRA) } annotation[expr_ty]: ':' a=expression { a } default[expr_ty]: '=' a=expression { a } decorators[asdl_seq*]: a=('@' f=named_expression NEWLINE { f })+ { a } class_def[stmt_ty]: | a=decorators b=class_def_raw { _PyPegen_class_def_decorators(p, a, b) } | class_def_raw class_def_raw[stmt_ty]: | 'class' a=NAME b=['(' z=[arguments] ')' { z }] ':' c=block { _Py_ClassDef(a->v.Name.id, (b) ? ((expr_ty) b)->v.Call.args : NULL, (b) ? ((expr_ty) b)->v.Call.keywords : NULL, c, NULL, EXTRA) } block[asdl_seq*] (memo): | NEWLINE INDENT a=statements DEDENT { a } | simple_stmt | invalid_block star_expressions[expr_ty]: | a=star_expression b=(',' c=star_expression { c })+ [','] { _Py_Tuple(CHECK(_PyPegen_seq_insert_in_front(p, a, b)), Load, EXTRA) } | a=star_expression ',' { _Py_Tuple(CHECK(_PyPegen_singleton_seq(p, a)), Load, EXTRA) } | star_expression star_expression[expr_ty] (memo): | '*' a=bitwise_or { _Py_Starred(a, Load, EXTRA) } | expression star_named_expressions[asdl_seq*]: a=','.star_named_expression+ [','] { a } star_named_expression[expr_ty]: | '*' a=bitwise_or { _Py_Starred(a, Load, EXTRA) } | named_expression named_expression[expr_ty]: | a=NAME ':=' ~ b=expression { _Py_NamedExpr(CHECK(_PyPegen_set_expr_context(p, a, Store)), b, EXTRA) } | expression !':=' | invalid_named_expression annotated_rhs[expr_ty]: yield_expr | star_expressions expressions[expr_ty]: | a=expression b=(',' c=expression { c })+ [','] { _Py_Tuple(CHECK(_PyPegen_seq_insert_in_front(p, a, b)), Load, EXTRA) } | a=expression ',' { _Py_Tuple(CHECK(_PyPegen_singleton_seq(p, a)), Load, EXTRA) } | expression expression[expr_ty] (memo): | a=disjunction 'if' b=disjunction 'else' c=expression { _Py_IfExp(b, a, c, EXTRA) } | disjunction | lambdef lambdef[expr_ty]: | 'lambda' a=[lambda_params] ':' b=expression { _Py_Lambda((a) ? a : CHECK(_PyPegen_empty_arguments(p)), b, EXTRA) } lambda_params[arguments_ty]: | invalid_lambda_parameters | lambda_parameters # lambda_parameters etc. duplicates parameters but without annotations # or type comments, and if there's no comma after a parameter, we expect # a colon, not a close parenthesis. (For more, see parameters above.) # lambda_parameters[arguments_ty]: | a=lambda_slash_no_default b=lambda_param_no_default* c=lambda_param_with_default* d=[lambda_star_etc] { _PyPegen_make_arguments(p, a, NULL, b, c, d) } | a=lambda_slash_with_default b=lambda_param_with_default* c=[lambda_star_etc] { _PyPegen_make_arguments(p, NULL, a, NULL, b, c) } | a=lambda_param_no_default+ b=lambda_param_with_default* c=[lambda_star_etc] { _PyPegen_make_arguments(p, NULL, NULL, a, b, c) } | a=lambda_param_with_default+ b=[lambda_star_etc] { _PyPegen_make_arguments(p, NULL, NULL, NULL, a, b)} | a=lambda_star_etc { _PyPegen_make_arguments(p, NULL, NULL, NULL, NULL, a) } lambda_slash_no_default[asdl_seq*]: | a=lambda_param_no_default+ '/' ',' { a } | a=lambda_param_no_default+ '/' &':' { a } lambda_slash_with_default[SlashWithDefault*]: | a=lambda_param_no_default* b=lambda_param_with_default+ '/' ',' { _PyPegen_slash_with_default(p, a, b) } | a=lambda_param_no_default* b=lambda_param_with_default+ '/' &':' { _PyPegen_slash_with_default(p, a, b) } lambda_star_etc[StarEtc*]: | '*' a=lambda_param_no_default b=lambda_param_maybe_default* c=[lambda_kwds] { _PyPegen_star_etc(p, a, b, c) } | '*' ',' b=lambda_param_maybe_default+ c=[lambda_kwds] { _PyPegen_star_etc(p, NULL, b, c) } | a=lambda_kwds { _PyPegen_star_etc(p, NULL, NULL, a) } | invalid_lambda_star_etc lambda_kwds[arg_ty]: '**' a=lambda_param_no_default { a } lambda_param_no_default[arg_ty]: | a=lambda_param ',' { a } | a=lambda_param &':' { a } lambda_param_with_default[NameDefaultPair*]: | a=lambda_param c=default ',' { _PyPegen_name_default_pair(p, a, c, NULL) } | a=lambda_param c=default &':' { _PyPegen_name_default_pair(p, a, c, NULL) } lambda_param_maybe_default[NameDefaultPair*]: | a=lambda_param c=default? ',' { _PyPegen_name_default_pair(p, a, c, NULL) } | a=lambda_param c=default? &':' { _PyPegen_name_default_pair(p, a, c, NULL) } lambda_param[arg_ty]: a=NAME { _Py_arg(a->v.Name.id, NULL, NULL, EXTRA) } disjunction[expr_ty] (memo): | a=conjunction b=('or' c=conjunction { c })+ { _Py_BoolOp( Or, CHECK(_PyPegen_seq_insert_in_front(p, a, b)), EXTRA) } | conjunction conjunction[expr_ty] (memo): | a=inversion b=('and' c=inversion { c })+ { _Py_BoolOp( And, CHECK(_PyPegen_seq_insert_in_front(p, a, b)), EXTRA) } | inversion inversion[expr_ty] (memo): | 'not' a=inversion { _Py_UnaryOp(Not, a, EXTRA) } | comparison comparison[expr_ty]: | a=bitwise_or b=compare_op_bitwise_or_pair+ { _Py_Compare(a, CHECK(_PyPegen_get_cmpops(p, b)), CHECK(_PyPegen_get_exprs(p, b)), EXTRA) } | bitwise_or compare_op_bitwise_or_pair[CmpopExprPair*]: | eq_bitwise_or | noteq_bitwise_or | lte_bitwise_or | lt_bitwise_or | gte_bitwise_or | gt_bitwise_or | notin_bitwise_or | in_bitwise_or | isnot_bitwise_or | is_bitwise_or eq_bitwise_or[CmpopExprPair*]: '==' a=bitwise_or { _PyPegen_cmpop_expr_pair(p, Eq, a) } noteq_bitwise_or[CmpopExprPair*]: | (tok='!=' { _PyPegen_check_barry_as_flufl(p, tok) ? NULL : tok}) a=bitwise_or {_PyPegen_cmpop_expr_pair(p, NotEq, a) } lte_bitwise_or[CmpopExprPair*]: '<=' a=bitwise_or { _PyPegen_cmpop_expr_pair(p, LtE, a) } lt_bitwise_or[CmpopExprPair*]: '<' a=bitwise_or { _PyPegen_cmpop_expr_pair(p, Lt, a) } gte_bitwise_or[CmpopExprPair*]: '>=' a=bitwise_or { _PyPegen_cmpop_expr_pair(p, GtE, a) } gt_bitwise_or[CmpopExprPair*]: '>' a=bitwise_or { _PyPegen_cmpop_expr_pair(p, Gt, a) } notin_bitwise_or[CmpopExprPair*]: 'not' 'in' a=bitwise_or { _PyPegen_cmpop_expr_pair(p, NotIn, a) } in_bitwise_or[CmpopExprPair*]: 'in' a=bitwise_or { _PyPegen_cmpop_expr_pair(p, In, a) } isnot_bitwise_or[CmpopExprPair*]: 'is' 'not' a=bitwise_or { _PyPegen_cmpop_expr_pair(p, IsNot, a) } is_bitwise_or[CmpopExprPair*]: 'is' a=bitwise_or { _PyPegen_cmpop_expr_pair(p, Is, a) } bitwise_or[expr_ty]: | a=bitwise_or '|' b=bitwise_xor { _Py_BinOp(a, BitOr, b, EXTRA) } | bitwise_xor bitwise_xor[expr_ty]: | a=bitwise_xor '^' b=bitwise_and { _Py_BinOp(a, BitXor, b, EXTRA) } | bitwise_and bitwise_and[expr_ty]: | a=bitwise_and '&' b=shift_expr { _Py_BinOp(a, BitAnd, b, EXTRA) } | shift_expr shift_expr[expr_ty]: | a=shift_expr '<<' b=sum { _Py_BinOp(a, LShift, b, EXTRA) } | a=shift_expr '>>' b=sum { _Py_BinOp(a, RShift, b, EXTRA) } | sum sum[expr_ty]: | a=sum '+' b=term { _Py_BinOp(a, Add, b, EXTRA) } | a=sum '-' b=term { _Py_BinOp(a, Sub, b, EXTRA) } | term term[expr_ty]: | a=term '*' b=factor { _Py_BinOp(a, Mult, b, EXTRA) } | a=term '/' b=factor { _Py_BinOp(a, Div, b, EXTRA) } | a=term '//' b=factor { _Py_BinOp(a, FloorDiv, b, EXTRA) } | a=term '%' b=factor { _Py_BinOp(a, Mod, b, EXTRA) } | a=term '@' b=factor { CHECK_VERSION(5, "The '@' operator is", _Py_BinOp(a, MatMult, b, EXTRA)) } | factor factor[expr_ty] (memo): | '+' a=factor { _Py_UnaryOp(UAdd, a, EXTRA) } | '-' a=factor { _Py_UnaryOp(USub, a, EXTRA) } | '~' a=factor { _Py_UnaryOp(Invert, a, EXTRA) } | power power[expr_ty]: | a=await_primary '**' b=factor { _Py_BinOp(a, Pow, b, EXTRA) } | await_primary await_primary[expr_ty] (memo): | AWAIT a=primary { CHECK_VERSION(5, "Await expressions are", _Py_Await(a, EXTRA)) } | primary primary[expr_ty]: | invalid_primary # must be before 'primay genexp' because of invalid_genexp | a=primary '.' b=NAME { _Py_Attribute(a, b->v.Name.id, Load, EXTRA) } | a=primary b=genexp { _Py_Call(a, CHECK(_PyPegen_singleton_seq(p, b)), NULL, EXTRA) } | a=primary '(' b=[arguments] ')' { _Py_Call(a, (b) ? ((expr_ty) b)->v.Call.args : NULL, (b) ? ((expr_ty) b)->v.Call.keywords : NULL, EXTRA) } | a=primary '[' b=slices ']' { _Py_Subscript(a, b, Load, EXTRA) } | atom slices[expr_ty]: | a=slice !',' { a } | a=','.slice+ [','] { _Py_Tuple(a, Load, EXTRA) } slice[expr_ty]: | a=[expression] ':' b=[expression] c=[':' d=[expression] { d }] { _Py_Slice(a, b, c, EXTRA) } | a=expression { a } atom[expr_ty]: | NAME | 'True' { _Py_Constant(Py_True, NULL, EXTRA) } | 'False' { _Py_Constant(Py_False, NULL, EXTRA) } | 'None' { _Py_Constant(Py_None, NULL, EXTRA) } | '__peg_parser__' { RAISE_SYNTAX_ERROR("You found it!") } | &STRING strings | NUMBER | &'(' (tuple | group | genexp) | &'[' (list | listcomp) | &'{' (dict | set | dictcomp | setcomp) | '...' { _Py_Constant(Py_Ellipsis, NULL, EXTRA) } strings[expr_ty] (memo): a=STRING+ { _PyPegen_concatenate_strings(p, a) } list[expr_ty]: | '[' a=[star_named_expressions] ']' { _Py_List(a, Load, EXTRA) } listcomp[expr_ty]: | '[' a=named_expression ~ b=for_if_clauses ']' { _Py_ListComp(a, b, EXTRA) } | invalid_comprehension tuple[expr_ty]: | '(' a=[y=star_named_expression ',' z=[star_named_expressions] { _PyPegen_seq_insert_in_front(p, y, z) } ] ')' { _Py_Tuple(a, Load, EXTRA) } group[expr_ty]: | '(' a=(yield_expr | named_expression) ')' { a } | invalid_group genexp[expr_ty]: | '(' a=named_expression ~ b=for_if_clauses ')' { _Py_GeneratorExp(a, b, EXTRA) } | invalid_comprehension set[expr_ty]: '{' a=star_named_expressions '}' { _Py_Set(a, EXTRA) } setcomp[expr_ty]: | '{' a=named_expression ~ b=for_if_clauses '}' { _Py_SetComp(a, b, EXTRA) } | invalid_comprehension dict[expr_ty]: | '{' a=[double_starred_kvpairs] '}' { _Py_Dict(CHECK(_PyPegen_get_keys(p, a)), CHECK(_PyPegen_get_values(p, a)), EXTRA) } dictcomp[expr_ty]: | '{' a=kvpair b=for_if_clauses '}' { _Py_DictComp(a->key, a->value, b, EXTRA) } | invalid_dict_comprehension double_starred_kvpairs[asdl_seq*]: a=','.double_starred_kvpair+ [','] { a } double_starred_kvpair[KeyValuePair*]: | '**' a=bitwise_or { _PyPegen_key_value_pair(p, NULL, a) } | kvpair kvpair[KeyValuePair*]: a=expression ':' b=expression { _PyPegen_key_value_pair(p, a, b) } for_if_clauses[asdl_seq*]: | for_if_clause+ for_if_clause[comprehension_ty]: | ASYNC 'for' a=star_targets 'in' ~ b=disjunction c=('if' z=disjunction { z })* { CHECK_VERSION(6, "Async comprehensions are", _Py_comprehension(a, b, c, 1, p->arena)) } | 'for' a=star_targets 'in' ~ b=disjunction c=('if' z=disjunction { z })* { _Py_comprehension(a, b, c, 0, p->arena) } | invalid_for_target yield_expr[expr_ty]: | 'yield' 'from' a=expression { _Py_YieldFrom(a, EXTRA) } | 'yield' a=[star_expressions] { _Py_Yield(a, EXTRA) } arguments[expr_ty] (memo): | a=args [','] &')' { a } | invalid_arguments args[expr_ty]: | a=','.(starred_expression | named_expression !'=')+ b=[',' k=kwargs {k}] { _PyPegen_collect_call_seqs(p, a, b, EXTRA) } | a=kwargs { _Py_Call(_PyPegen_dummy_name(p), CHECK_NULL_ALLOWED(_PyPegen_seq_extract_starred_exprs(p, a)), CHECK_NULL_ALLOWED(_PyPegen_seq_delete_starred_exprs(p, a)), EXTRA) } kwargs[asdl_seq*]: | a=','.kwarg_or_starred+ ',' b=','.kwarg_or_double_starred+ { _PyPegen_join_sequences(p, a, b) } | ','.kwarg_or_starred+ | ','.kwarg_or_double_starred+ starred_expression[expr_ty]: | '*' a=expression { _Py_Starred(a, Load, EXTRA) } kwarg_or_starred[KeywordOrStarred*]: | a=NAME '=' b=expression { _PyPegen_keyword_or_starred(p, CHECK(_Py_keyword(a->v.Name.id, b, EXTRA)), 1) } | a=starred_expression { _PyPegen_keyword_or_starred(p, a, 0) } | invalid_kwarg kwarg_or_double_starred[KeywordOrStarred*]: | a=NAME '=' b=expression { _PyPegen_keyword_or_starred(p, CHECK(_Py_keyword(a->v.Name.id, b, EXTRA)), 1) } | '**' a=expression { _PyPegen_keyword_or_starred(p, CHECK(_Py_keyword(NULL, a, EXTRA)), 1) } | invalid_kwarg # NOTE: star_targets may contain *bitwise_or, targets may not. star_targets[expr_ty]: | a=star_target !',' { a } | a=star_target b=(',' c=star_target { c })* [','] { _Py_Tuple(CHECK(_PyPegen_seq_insert_in_front(p, a, b)), Store, EXTRA) } star_targets_list_seq[asdl_seq*]: a=','.star_target+ [','] { a } star_targets_tuple_seq[asdl_seq*]: | a=star_target b=(',' c=star_target { c })+ [','] { _PyPegen_seq_insert_in_front(p, a, b) } | a=star_target ',' { _PyPegen_singleton_seq(p, a) } star_target[expr_ty] (memo): | '*' a=(!'*' star_target) { _Py_Starred(CHECK(_PyPegen_set_expr_context(p, a, Store)), Store, EXTRA) } | target_with_star_atom target_with_star_atom[expr_ty] (memo): | a=t_primary '.' b=NAME !t_lookahead { _Py_Attribute(a, b->v.Name.id, Store, EXTRA) } | a=t_primary '[' b=slices ']' !t_lookahead { _Py_Subscript(a, b, Store, EXTRA) } | star_atom star_atom[expr_ty]: | a=NAME { _PyPegen_set_expr_context(p, a, Store) } | '(' a=target_with_star_atom ')' { _PyPegen_set_expr_context(p, a, Store) } | '(' a=[star_targets_tuple_seq] ')' { _Py_Tuple(a, Store, EXTRA) } | '[' a=[star_targets_list_seq] ']' { _Py_List(a, Store, EXTRA) } single_target[expr_ty]: | single_subscript_attribute_target | a=NAME { _PyPegen_set_expr_context(p, a, Store) } | '(' a=single_target ')' { a } single_subscript_attribute_target[expr_ty]: | a=t_primary '.' b=NAME !t_lookahead { _Py_Attribute(a, b->v.Name.id, Store, EXTRA) } | a=t_primary '[' b=slices ']' !t_lookahead { _Py_Subscript(a, b, Store, EXTRA) } del_targets[asdl_seq*]: a=','.del_target+ [','] { a } del_target[expr_ty] (memo): | a=t_primary '.' b=NAME !t_lookahead { _Py_Attribute(a, b->v.Name.id, Del, EXTRA) } | a=t_primary '[' b=slices ']' !t_lookahead { _Py_Subscript(a, b, Del, EXTRA) } | del_t_atom del_t_atom[expr_ty]: | a=NAME { _PyPegen_set_expr_context(p, a, Del) } | '(' a=del_target ')' { _PyPegen_set_expr_context(p, a, Del) } | '(' a=[del_targets] ')' { _Py_Tuple(a, Del, EXTRA) } | '[' a=[del_targets] ']' { _Py_List(a, Del, EXTRA) } targets[asdl_seq*]: a=','.target+ [','] { a } target[expr_ty] (memo): | a=t_primary '.' b=NAME !t_lookahead { _Py_Attribute(a, b->v.Name.id, Store, EXTRA) } | a=t_primary '[' b=slices ']' !t_lookahead { _Py_Subscript(a, b, Store, EXTRA) } | t_atom t_primary[expr_ty]: | a=t_primary '.' b=NAME &t_lookahead { _Py_Attribute(a, b->v.Name.id, Load, EXTRA) } | a=t_primary '[' b=slices ']' &t_lookahead { _Py_Subscript(a, b, Load, EXTRA) } | a=t_primary b=genexp &t_lookahead { _Py_Call(a, CHECK(_PyPegen_singleton_seq(p, b)), NULL, EXTRA) } | a=t_primary '(' b=[arguments] ')' &t_lookahead { _Py_Call(a, (b) ? ((expr_ty) b)->v.Call.args : NULL, (b) ? ((expr_ty) b)->v.Call.keywords : NULL, EXTRA) } | a=atom &t_lookahead { a } t_lookahead: '(' | '[' | '.' t_atom[expr_ty]: | a=NAME { _PyPegen_set_expr_context(p, a, Store) } | '(' a=target ')' { _PyPegen_set_expr_context(p, a, Store) } | '(' b=[targets] ')' { _Py_Tuple(b, Store, EXTRA) } | '[' b=[targets] ']' { _Py_List(b, Store, EXTRA) } # From here on, there are rules for invalid syntax with specialised error messages invalid_arguments: | args ',' '*' { RAISE_SYNTAX_ERROR("iterable argument unpacking follows keyword argument unpacking") } | a=expression for_if_clauses ',' [args | expression for_if_clauses] { RAISE_SYNTAX_ERROR_KNOWN_LOCATION(a, "Generator expression must be parenthesized") } | a=args for_if_clauses { _PyPegen_nonparen_genexp_in_call(p, a) } | args ',' a=expression for_if_clauses { RAISE_SYNTAX_ERROR_KNOWN_LOCATION(a, "Generator expression must be parenthesized") } | a=args ',' args { _PyPegen_arguments_parsing_error(p, a) } invalid_kwarg: | !(NAME '=') a=expression b='=' { RAISE_SYNTAX_ERROR_KNOWN_LOCATION( a, "expression cannot contain assignment, perhaps you meant \"==\"?") } invalid_named_expression: | a=expression ':=' expression { RAISE_SYNTAX_ERROR_KNOWN_LOCATION( a, "cannot use assignment expressions with %s", _PyPegen_get_expr_name(a)) } invalid_assignment: | a=invalid_ann_assign_target ':' expression { RAISE_SYNTAX_ERROR_KNOWN_LOCATION( a, "only single target (not %s) can be annotated", _PyPegen_get_expr_name(a) )} | a=star_named_expression ',' star_named_expressions* ':' expression { RAISE_SYNTAX_ERROR_KNOWN_LOCATION(a, "only single target (not tuple) can be annotated") } | a=expression ':' expression { RAISE_SYNTAX_ERROR_KNOWN_LOCATION(a, "illegal target for annotation") } | (star_targets '=')* a=star_expressions '=' { RAISE_SYNTAX_ERROR_INVALID_TARGET(STAR_TARGETS, a) } | (star_targets '=')* a=yield_expr '=' { RAISE_SYNTAX_ERROR_KNOWN_LOCATION(a, "assignment to yield expression not possible") } | a=star_expressions augassign (yield_expr | star_expressions) { RAISE_SYNTAX_ERROR_KNOWN_LOCATION( a, "'%s' is an illegal expression for augmented assignment", _PyPegen_get_expr_name(a) )} invalid_ann_assign_target[expr_ty]: | list | tuple | '(' a=invalid_ann_assign_target ')' { a } invalid_del_stmt: | 'del' a=star_expressions { RAISE_SYNTAX_ERROR_INVALID_TARGET(DEL_TARGETS, a) } invalid_block: | NEWLINE !INDENT { RAISE_INDENTATION_ERROR("expected an indented block") } invalid_primary: | primary a='{' { RAISE_SYNTAX_ERROR_KNOWN_LOCATION(a, "invalid syntax") } invalid_comprehension: | ('[' | '(' | '{') a=starred_expression for_if_clauses { RAISE_SYNTAX_ERROR_KNOWN_LOCATION(a, "iterable unpacking cannot be used in comprehension") } invalid_dict_comprehension: | '{' a='**' bitwise_or for_if_clauses '}' { RAISE_SYNTAX_ERROR_KNOWN_LOCATION(a, "dict unpacking cannot be used in dict comprehension") } invalid_parameters: | param_no_default* (slash_with_default | param_with_default+) param_no_default { RAISE_SYNTAX_ERROR("non-default argument follows default argument") } invalid_lambda_parameters: | lambda_param_no_default* (lambda_slash_with_default | lambda_param_with_default+) lambda_param_no_default { RAISE_SYNTAX_ERROR("non-default argument follows default argument") } invalid_star_etc: | '*' (')' | ',' (')' | '**')) { RAISE_SYNTAX_ERROR("named arguments must follow bare *") } | '*' ',' TYPE_COMMENT { RAISE_SYNTAX_ERROR("bare * has associated type comment") } invalid_lambda_star_etc: | '*' (':' | ',' (':' | '**')) { RAISE_SYNTAX_ERROR("named arguments must follow bare *") } invalid_double_type_comments: | TYPE_COMMENT NEWLINE TYPE_COMMENT NEWLINE INDENT { RAISE_SYNTAX_ERROR("Cannot have two type comments on def") } invalid_with_item: | expression 'as' a=expression { RAISE_SYNTAX_ERROR_INVALID_TARGET(STAR_TARGETS, a) } invalid_for_target: | ASYNC? 'for' a=star_expressions { RAISE_SYNTAX_ERROR_INVALID_TARGET(FOR_TARGETS, a) } invalid_group: | '(' a=starred_expression ')' { RAISE_SYNTAX_ERROR_KNOWN_LOCATION(a, "can't use starred expression here") } invalid_import_from_targets: | import_from_as_names ',' { RAISE_SYNTAX_ERROR("trailing comma not allowed without surrounding parentheses") }LibCST-1.2.0/native/libcst/LICENSE000066400000000000000000000111341456464173300163620ustar00rootroot00000000000000All contributions towards LibCST are MIT licensed. Some Python files have been derived from the standard library and are therefore PSF licensed. Modifications on these files are dual licensed (both MIT and PSF). These files are: - libcst/_parser/base_parser.py - libcst/_parser/parso/utils.py - libcst/_parser/parso/pgen2/generator.py - libcst/_parser/parso/pgen2/grammar_parser.py - libcst/_parser/parso/python/py_token.py - libcst/_parser/parso/python/tokenize.py - libcst/_parser/parso/tests/test_fstring.py - libcst/_parser/parso/tests/test_tokenize.py - libcst/_parser/parso/tests/test_utils.py - native/libcst/src/tokenizer/core/mod.rs - native/libcst/src/tokenizer/core/string_types.rs Some Python files have been taken from dataclasses and are therefore Apache licensed. Modifications on these files are licensed under Apache 2.0 license. These files are: - libcst/_add_slots.py ------------------------------------------------------------------------------- MIT License Copyright (c) Meta Platforms, Inc. and affiliates. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ------------------------------------------------------------------------------- PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2 1. This LICENSE AGREEMENT is between the Python Software Foundation ("PSF"), and the Individual or Organization ("Licensee") accessing and otherwise using this software ("Python") in source or binary form and its associated documentation. 2. Subject to the terms and conditions of this License Agreement, PSF hereby grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce, analyze, test, perform and/or display publicly, prepare derivative works, distribute, and otherwise use Python alone or in any derivative version, provided, however, that PSF's License Agreement and PSF's notice of copyright, i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014, 2015 Python Software Foundation; All Rights Reserved" are retained in Python alone or in any derivative version prepared by Licensee. 3. In the event Licensee prepares a derivative work that is based on or incorporates Python or any part thereof, and wants to make the derivative work available to others as provided herein, then Licensee hereby agrees to include in any such work a brief summary of the changes made to Python. 4. PSF is making Python available to Licensee on an "AS IS" basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT INFRINGE ANY THIRD PARTY RIGHTS. 5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON, OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF. 6. This License Agreement will automatically terminate upon a material breach of its terms and conditions. 7. Nothing in this License Agreement shall be deemed to create any relationship of agency, partnership, or joint venture between PSF and Licensee. This License Agreement does not grant permission to use PSF trademarks or trade name in a trademark sense to endorse or promote products or services of Licensee, or any third party. 8. By copying, installing or otherwise using Python, Licensee agrees to be bound by the terms and conditions of this License Agreement. ------------------------------------------------------------------------------- APACHE LICENSE, VERSION 2.0 http://www.apache.org/licenses/LICENSE-2.0 LibCST-1.2.0/native/libcst/README.md000066400000000000000000000143711456464173300166420ustar00rootroot00000000000000# libcst/native A native extension to enable parsing of new Python grammar in LibCST. The extension is written in Rust, and exposed to Python using [PyO3](https://pyo3.rs/). This is packaged together with libcst, and can be imported from `libcst.native`. By default the LibCST APIs use this module for all parsing. Later on, the parser library might be packaged separately as [a Rust crate](https://crates.io). Pull requests towards this are much appreciated. ## Goals 1. Adopt the CPython grammar definition as closely as possible to reduce maintenance burden. This means using a PEG parser. 2. Feature-parity with the pure-python LibCST parser: the API should be easy to use from Python, support parsing with a target version, bytes and strings as inputs, etc. 3. [future] Performance. The aspirational goal is to be within 2x CPython performance, which would enable LibCST to be used in interactive use cases (think IDEs). 4. [future] Error recovery. The parser should be able to handle partially complete documents, returning a CST for the syntactically correct parts, and a list of errors found. ## Structure The extension is organized into two rust crates: `libcst_derive` contains some macros to facilitate various features of CST nodes, and `libcst` contains the `parser` itself (including the Python grammar), a `tokenizer` implementation by @bgw, and a very basic representation of CST `nodes`. Parsing is done by 1. **tokenizing** the input utf-8 string (bytes are not supported at the Rust layer, they are converted to utf-8 strings by the python wrapper) 2. running the **PEG parser** on the tokenized input, which also captures certain anchor tokens in the resulting syntax tree 3. using the anchor tokens to **inflate** the syntax tree into a proper CST These steps are wrapped into a high-level `parse_module` API [here](https://github.com/Instagram/LibCST/blob/main/native/libcst/src/lib.rs#L43), along with `parse_statement` and `parse_expression` functions which all just accept the input string and an optional encoding. These Rust functions are exposed to Python [here](https://github.com/Instagram/LibCST/blob/main/native/libcst/src/py.rs) using the excellent [PyO3](https://pyo3.rs/) library, plus an `IntoPy` trait which is mostly implemented via a macro in `libcst_derive`. ## Hacking ### Nodes All CST nodes are marked with the `#[cst_node]` proc macro, which duplicates the node types; for a node named `Foo`, there's: - `DeflatedFoo`, which is the output of the parsing phase and isn't exposed through the API of the crate. - it has two lifetime parameters: `'r` (or `'input` in the grammar) is the lifetime of `Token` references, and `'a` is the lifetime of `str` slices from the original input - `TokenRef` fields are contained here, while whitespace fields aren't - if there aren't any fields that refer to other CST nodes or `TokenRef`s, there's an extra (private) `_phantom` field that "contains" the two lifetime parameters (this is to make the type parameters of all `DeflatedFoo` types uniform) - it implements the `Inflate` trait, which converts `DeflatedFoo` into `Foo` - `Foo`, which is what's publicly exposed in the crate and is the output of `Inflate`ing `DeflatedFoo`. - it only retains the second (`'a`) lifetime parameter of `DeflatedFoo` to refer back to slices of the original input string - whitespace fields are contained here, but `TokenRef`s aren't - `IntoPy` is implemented for it (assuming the `py` crate feature is enabled), which contains code to translate `Foo` back into a Python object; hence, the fields on `Foo` match the Python CST node implementations (barring fields marked with `#[skip_py]`) ### Grammar The grammar is mostly a straightforward translation from the [CPython grammar](https://github.com/python/cpython/blob/main/Grammar/python.gram), with some exceptions: * The output of grammar rules are deflated CST nodes that capture the AST plus additional anchor token references used for whitespace parsing later on. * Rules in the grammar must be strongly typed, as enforced by the Rust compiler. The CPython grammar rules are a bit more loosely-typed in comparison. * Some features in the CPython peg parser are not supported by rust-peg: keywords, mutually recursive rules, special `invalid_` rules, the `~` operator, terminating the parser early. The PEG parser is run on a `Vec` of `Token`s (more precisely `&'input Vec>`), and tries its best to avoid allocating any strings, working only with references. As such, the output nodes don't own any strings, but refer to slices of the original input (hence the `'input, 'a` lifetime parameters on almost all nodes). ### Whitespace parsing The `Inflate` trait is responsible for taking a "deflated", skeleton CST node, and parsing out the relevant whitespace from the anchor tokens to produce an "inflated" (normal) CST node. In addition to the deflated node, inflation requires a whitespace config object which contains global information required for certain aspects of whitespace parsing, like the default indentation. Inflation consumes the deflated node, while mutating the tokens referenced by it. This is important to make sure whitespace is only ever assigned to at most one CST node. The `Inflate` trait implementation needs to ensure that all whitespace is assigned to a CST node; this is generally verified using roundtrip tests (i.e. parsing code and then generating it back to then assert the original and generated are byte-by-byte equal). The general convention is that the top-most possible node owns a certain piece of whitespace, which should be straightforward to achieve in a top-down parser like `Inflate`. In cases where whitespace is shared between sibling nodes, usually the leftmost node owns the whitespace except in the case of trailing commas and closing parentheses, where the latter owns the whitespace (for backwards compatibility with the pure python parser). See the implementation of `inflate_element` for how this is done. ### Tests In addition to running the python test suite, you can run some tests written in rust with ``` cd native cargo test ``` These include unit and roundtrip tests. Additionally, some benchmarks can be run on x86-based architectures using `cargo bench`. ### Code Formatting Use `cargo fmt` to format your code. LibCST-1.2.0/native/libcst/benches/000077500000000000000000000000001456464173300167645ustar00rootroot00000000000000LibCST-1.2.0/native/libcst/benches/parser_benchmark.rs000066400000000000000000000123761456464173300226510ustar00rootroot00000000000000// Copyright (c) Meta Platforms, Inc. and affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the root directory of this source tree use std::{ path::{Component, PathBuf}, time::Duration, }; use criterion::{ black_box, criterion_group, criterion_main, measurement::Measurement, BatchSize, BenchmarkId, Criterion, Throughput, }; use itertools::Itertools; use rayon::prelude::*; use libcst_native::{ parse_module, parse_tokens_without_whitespace, tokenize, Codegen, Config, Inflate, }; #[cfg(not(windows))] const NEWLINE: &str = "\n"; #[cfg(windows)] const NEWLINE: &str = "\r\n"; fn load_all_fixtures_vec() -> Vec { let mut path = PathBuf::from(file!()); path.pop(); path.pop(); path = path .components() .skip(1) .chain( vec!["tests".as_ref(), "fixtures".as_ref()] .into_iter() .map(Component::Normal), ) .collect(); path.read_dir() .expect("read_dir") .into_iter() .map(|file| { let path = file.unwrap().path(); std::fs::read_to_string(&path).expect("reading_file") }) .collect() } fn load_all_fixtures() -> String { load_all_fixtures_vec().join(NEWLINE) } pub fn inflate_benchmarks(c: &mut Criterion) { let fixture = load_all_fixtures(); let tokens = tokenize(fixture.as_str()).expect("tokenize failed"); let tokvec = tokens.clone().into(); let mut group = c.benchmark_group("inflate"); group.bench_function("all", |b| { b.iter_batched( || { let conf = Config::new(fixture.as_str(), &tokens); let m = parse_tokens_without_whitespace(&tokvec, fixture.as_str(), None) .expect("parse failed"); (conf, m) }, |(conf, m)| black_box(m.inflate(&conf)), BatchSize::SmallInput, ) }); group.finish(); } pub fn parser_benchmarks(c: &mut Criterion) { let fixture = load_all_fixtures(); let mut group = c.benchmark_group("parse"); group.measurement_time(Duration::from_secs(15)); group.bench_function("all", |b| { b.iter_batched( || tokenize(fixture.as_str()).expect("tokenize failed").into(), |tokens| { black_box(drop(parse_tokens_without_whitespace( &tokens, fixture.as_str(), None, ))) }, BatchSize::SmallInput, ) }); group.finish(); } pub fn codegen_benchmarks(c: &mut Criterion) { let input = load_all_fixtures(); let m = parse_module(input.as_str(), None).expect("parse failed"); let mut group = c.benchmark_group("codegen"); group.bench_function("all", |b| { b.iter(|| { let mut state = Default::default(); #[allow(clippy::unit_arg)] black_box(m.codegen(&mut state)); }) }); group.finish(); } pub fn tokenize_benchmarks(c: &mut Criterion) { let input = load_all_fixtures(); let mut group = c.benchmark_group("tokenize"); group.measurement_time(Duration::from_secs(15)); group.bench_function("all", |b| b.iter(|| black_box(tokenize(input.as_str())))); group.finish(); } pub fn parse_into_cst_benchmarks(c: &mut Criterion) { let fixture = load_all_fixtures(); let mut group = c.benchmark_group("parse_into_cst"); group.measurement_time(Duration::from_secs(15)); group.bench_function("all", |b| { b.iter(|| black_box(parse_module(&fixture, None))) }); group.finish(); } pub fn parse_into_cst_multithreaded_benchmarks( c: &mut Criterion, ) where ::Value: Send, { let fixtures = load_all_fixtures_vec(); let mut group = c.benchmark_group("parse_into_cst_parallel"); group.measurement_time(Duration::from_secs(15)); group.warm_up_time(Duration::from_secs(5)); for thread_count in 1..10 { let expanded_fixtures = (0..thread_count) .flat_map(|_| fixtures.clone()) .collect_vec(); group.throughput(Throughput::Elements(expanded_fixtures.len() as u64)); group.bench_with_input( BenchmarkId::from_parameter(thread_count), &thread_count, |b, thread_count| { let thread_pool = rayon::ThreadPoolBuilder::new() .num_threads(*thread_count) .build() .unwrap(); thread_pool.install(|| { b.iter_with_large_drop(|| { expanded_fixtures .par_iter() .map(|contents| black_box(parse_module(&contents, None))) .collect::>() }); }); }, ); } group.finish(); } criterion_group!( name=benches; config=Criterion::default(); targets=parser_benchmarks, codegen_benchmarks, inflate_benchmarks, tokenize_benchmarks, parse_into_cst_benchmarks, parse_into_cst_multithreaded_benchmarks ); criterion_main!(benches); LibCST-1.2.0/native/libcst/src/000077500000000000000000000000001456464173300161445ustar00rootroot00000000000000LibCST-1.2.0/native/libcst/src/bin.rs000066400000000000000000000016441456464173300172670ustar00rootroot00000000000000// Copyright (c) Meta Platforms, Inc. and affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the root directory of this source tree use libcst_native::*; use std::{ env, io::{self, Read}, process::exit, }; pub fn main() { let mut str = std::string::String::new(); io::stdin().read_to_string(&mut str).unwrap(); match parse_module(str.as_ref(), None) { Err(e) => { eprintln!("{}", prettify_error(e, "stdin")); exit(1); } Ok(m) => { let first_arg = env::args().nth(1).unwrap_or_else(|| "".to_string()); if first_arg == "-d" { println!("{:#?}", m); } if first_arg != "-n" { let mut state = Default::default(); m.codegen(&mut state); print!("{}", state.to_string()); } } }; } LibCST-1.2.0/native/libcst/src/lib.rs000066400000000000000000000130661456464173300172660ustar00rootroot00000000000000// Copyright (c) Meta Platforms, Inc. and affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the root directory of this source tree. use std::cmp::{max, min}; mod tokenizer; pub use tokenizer::whitespace_parser::Config; use tokenizer::{whitespace_parser, TokConfig, Token, TokenIterator}; mod nodes; use nodes::deflated::Module as DeflatedModule; pub use nodes::*; mod parser; use parser::{ParserError, Result, TokVec}; #[cfg(feature = "py")] pub mod py; pub fn tokenize(text: &str) -> Result> { let iter = TokenIterator::new( text, &TokConfig { async_hacks: false, split_fstring: true, }, ); iter.collect::, _>>() .map_err(|err| ParserError::TokenizerError(err, text)) } pub fn parse_module<'a>( mut module_text: &'a str, encoding: Option<&str>, ) -> Result<'a, Module<'a>> { // Strip UTF-8 BOM if let Some(stripped) = module_text.strip_prefix('\u{feff}') { module_text = stripped; } let tokens = tokenize(module_text)?; let conf = whitespace_parser::Config::new(module_text, &tokens); let tokvec = tokens.into(); let m = parse_tokens_without_whitespace(&tokvec, module_text, encoding)?; Ok(m.inflate(&conf)?) } pub fn parse_tokens_without_whitespace<'r, 'a>( tokens: &'r TokVec<'a>, module_text: &'a str, encoding: Option<&str>, ) -> Result<'a, DeflatedModule<'r, 'a>> { let m = parser::python::file(tokens, module_text, encoding) .map_err(|err| ParserError::ParserError(err, module_text))?; Ok(m) } pub fn parse_statement(text: &str) -> Result { let tokens = tokenize(text)?; let conf = whitespace_parser::Config::new(text, &tokens); let tokvec = tokens.into(); let stm = parser::python::statement_input(&tokvec, text) .map_err(|err| ParserError::ParserError(err, text))?; Ok(stm.inflate(&conf)?) } pub fn parse_expression(text: &str) -> Result { let tokens = tokenize(text)?; let conf = whitespace_parser::Config::new(text, &tokens); let tokvec = tokens.into(); let expr = parser::python::expression_input(&tokvec, text) .map_err(|err| ParserError::ParserError(err, text))?; Ok(expr.inflate(&conf)?) } // n starts from 1 fn bol_offset(source: &str, n: i32) -> usize { if n <= 1 { return 0; } source .match_indices('\n') .nth((n - 2) as usize) .map(|(index, _)| index + 1) .unwrap_or_else(|| source.len()) } pub fn prettify_error(err: ParserError, label: &str) -> std::string::String { match err { ParserError::ParserError(e, module_text) => { let loc = e.location; let context = 1; let start_offset = bol_offset(module_text, loc.start_pos.line as i32 - context); let end_offset = bol_offset(module_text, loc.end_pos.line as i32 + context + 1); let source = &module_text[start_offset..end_offset]; let start = loc.start_pos.offset - start_offset; let end = loc.end_pos.offset - start_offset; chic::Error::new(label) .error( max( 1, loc.start_pos .line .checked_sub(context as usize) .unwrap_or(1), ), start, if start == end { min(end + 1, end_offset - start_offset + 1) } else { end }, source, format!( "expected {} {} -> {}", e.expected, loc.start_pos, loc.end_pos ), ) .to_string() } e => format!("Parse error for {}: {}", label, e), } } #[cfg(test)] mod test { use super::*; use tokenizer::TokError; #[test] fn test_simple() { let n = parse_module("1_", None); assert_eq!( n.err().unwrap(), ParserError::TokenizerError(TokError::BadDecimal, "1_") ); } #[test] fn test_bare_minimum_funcdef() { parse_module("def f(): ...", None).expect("parse error"); } #[test] fn test_funcdef_params() { parse_module("def g(a, b): ...", None).expect("parse error"); } #[test] fn test_single_statement_with_no_newline() { for src in &[ "(\n \\\n)", "(\n \\\n)", "(\n '''\n''')", "del _", "if _:\n '''\n)'''", "if _:\n ('''\n''')", "if _:\n '''\n '''", "if _:\n '''\n ''' ", ] { parse_module(src, None).unwrap_or_else(|e| panic!("'{}' doesn't parse: {}", src, e)); } } #[test] fn bol_offset_first_line() { assert_eq!(0, bol_offset("hello", 1)); assert_eq!(0, bol_offset("hello", 0)); assert_eq!(0, bol_offset("hello\nhello", 1)); assert_eq!(0, bol_offset("hello\nhello", 0)); } #[test] fn bol_offset_second_line() { assert_eq!(5, bol_offset("hello", 2)); assert_eq!(6, bol_offset("hello\nhello", 2)); assert_eq!(6, bol_offset("hello\nhello\nhello", 2)); } #[test] fn bol_offset_last_line() { assert_eq!(5, bol_offset("hello", 3)); assert_eq!(11, bol_offset("hello\nhello", 3)); assert_eq!(12, bol_offset("hello\nhello\nhello", 3)); } } LibCST-1.2.0/native/libcst/src/nodes/000077500000000000000000000000001456464173300172545ustar00rootroot00000000000000LibCST-1.2.0/native/libcst/src/nodes/codegen.rs000066400000000000000000000030321456464173300212240ustar00rootroot00000000000000// Copyright (c) Meta Platforms, Inc. and affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the root directory of this source tree. use std::fmt; #[derive(Debug)] pub struct CodegenState<'a> { pub tokens: String, pub indent_tokens: Vec<&'a str>, pub default_newline: &'a str, pub default_indent: &'a str, } impl<'a> CodegenState<'a> { pub fn indent(&mut self, v: &'a str) { self.indent_tokens.push(v); } pub fn dedent(&mut self) { self.indent_tokens.pop(); } pub fn add_indent(&mut self) { self.tokens.extend(self.indent_tokens.iter().cloned()); } pub fn add_token(&mut self, tok: &'a str) { self.tokens.push_str(tok); } } impl<'a> fmt::Display for CodegenState<'a> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{}", self.tokens) } } pub trait Codegen<'a> { fn codegen(&self, state: &mut CodegenState<'a>); } impl<'a, T> Codegen<'a> for Option where T: Codegen<'a>, { fn codegen(&self, state: &mut CodegenState<'a>) { if let Some(s) = &self { s.codegen(state); } } } #[cfg(windows)] const LINE_ENDING: &str = "\r\n"; #[cfg(not(windows))] const LINE_ENDING: &str = "\n"; impl<'a> Default for CodegenState<'a> { fn default() -> Self { Self { default_newline: LINE_ENDING, default_indent: " ", indent_tokens: Default::default(), tokens: Default::default(), } } } LibCST-1.2.0/native/libcst/src/nodes/expression.rs000066400000000000000000002344011456464173300220250ustar00rootroot00000000000000// Copyright (c) Meta Platforms, Inc. and affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the root directory of this source tree. use std::mem::swap; use crate::{ inflate_helpers::adjust_parameters_trailing_whitespace, nodes::{ op::*, statement::*, traits::{Inflate, ParenthesizedDeflatedNode, ParenthesizedNode, Result, WithComma}, whitespace::ParenthesizableWhitespace, Annotation, AssignEqual, AssignTargetExpression, BinaryOp, BooleanOp, Codegen, CodegenState, Colon, Comma, CompOp, Dot, UnaryOp, }, tokenizer::{ whitespace_parser::{parse_parenthesizable_whitespace, Config}, Token, }, }; #[cfg(feature = "py")] use libcst_derive::TryIntoPy; use libcst_derive::{cst_node, Codegen, Inflate, ParenthesizedDeflatedNode, ParenthesizedNode}; type TokenRef<'r, 'a> = &'r Token<'a>; #[cst_node(Default)] pub struct Parameters<'a> { pub params: Vec>, pub star_arg: Option>, pub kwonly_params: Vec>, pub star_kwarg: Option>, pub posonly_params: Vec>, pub posonly_ind: Option>, } impl<'a> Parameters<'a> { pub fn is_empty(&self) -> bool { self.params.is_empty() && self.star_arg.is_none() && self.kwonly_params.is_empty() && self.star_kwarg.is_none() && self.posonly_params.is_empty() && self.posonly_ind.is_none() } } impl<'r, 'a> DeflatedParameters<'r, 'a> { pub fn is_empty(&self) -> bool { self.params.is_empty() && self.star_arg.is_none() && self.kwonly_params.is_empty() && self.star_kwarg.is_none() && self.posonly_params.is_empty() && self.posonly_ind.is_none() } } impl<'r, 'a> Inflate<'a> for DeflatedParameters<'r, 'a> { type Inflated = Parameters<'a>; fn inflate(self, config: &Config<'a>) -> Result { let posonly_params = self.posonly_params.inflate(config)?; let posonly_ind = self.posonly_ind.inflate(config)?; let params = self.params.inflate(config)?; let star_arg = self.star_arg.inflate(config)?; let kwonly_params = self.kwonly_params.inflate(config)?; let star_kwarg = self.star_kwarg.inflate(config)?; Ok(Self::Inflated { params, star_arg, kwonly_params, star_kwarg, posonly_params, posonly_ind, }) } } #[cst_node(Inflate)] pub enum StarArg<'a> { Star(Box>), Param(Box>), } impl<'a> Codegen<'a> for Parameters<'a> { fn codegen(&self, state: &mut CodegenState<'a>) { let params_after_kwonly = self.star_kwarg.is_some(); let params_after_regular = !self.kwonly_params.is_empty() || params_after_kwonly; let params_after_posonly = !self.params.is_empty() || params_after_regular; let star_included = self.star_arg.is_some() || !self.kwonly_params.is_empty(); for p in &self.posonly_params { p.codegen(state, None, true); } match &self.posonly_ind { Some(ind) => ind.codegen(state, params_after_posonly), _ => { if !self.posonly_params.is_empty() { if params_after_posonly { state.add_token("/, "); } else { state.add_token("/"); } } } } let param_size = self.params.len(); for (i, p) in self.params.iter().enumerate() { p.codegen(state, None, params_after_regular || i < param_size - 1); } let kwonly_size = self.kwonly_params.len(); match &self.star_arg { None => { if star_included { state.add_token("*, ") } } Some(StarArg::Param(p)) => p.codegen( state, Some("*"), kwonly_size > 0 || self.star_kwarg.is_some(), ), Some(StarArg::Star(s)) => s.codegen(state), } for (i, p) in self.kwonly_params.iter().enumerate() { p.codegen(state, None, params_after_kwonly || i < kwonly_size - 1); } if let Some(star) = &self.star_kwarg { star.codegen(state, Some("**"), false) } } } #[cst_node] pub struct ParamSlash<'a> { pub comma: Option>, pub whitespace_after: ParenthesizableWhitespace<'a>, pub(crate) tok: TokenRef<'a>, } impl<'a> ParamSlash<'a> { fn codegen(&self, state: &mut CodegenState<'a>, default_comma: bool) { state.add_token("/"); self.whitespace_after.codegen(state); match (&self.comma, default_comma) { (Some(comma), _) => comma.codegen(state), (None, true) => state.add_token(", "), _ => {} } } } impl<'r, 'a> Inflate<'a> for DeflatedParamSlash<'r, 'a> { type Inflated = ParamSlash<'a>; fn inflate(self, config: &Config<'a>) -> Result { let whitespace_after = parse_parenthesizable_whitespace(config, &mut self.tok.whitespace_after.borrow_mut())?; let comma = self.comma.inflate(config)?; Ok(Self::Inflated { comma, whitespace_after, }) } } #[cst_node] pub struct ParamStar<'a> { pub comma: Comma<'a>, } impl<'a> Codegen<'a> for ParamStar<'a> { fn codegen(&self, state: &mut CodegenState<'a>) { state.add_token("*"); self.comma.codegen(state); } } impl<'r, 'a> Inflate<'a> for DeflatedParamStar<'r, 'a> { type Inflated = ParamStar<'a>; fn inflate(self, config: &Config<'a>) -> Result { let comma = self.comma.inflate(config)?; Ok(Self::Inflated { comma }) } } #[cst_node(ParenthesizedNode, Default)] pub struct Name<'a> { pub value: &'a str, pub lpar: Vec>, pub rpar: Vec>, } impl<'r, 'a> Inflate<'a> for DeflatedName<'r, 'a> { type Inflated = Name<'a>; fn inflate(self, config: &Config<'a>) -> Result { let lpar = self.lpar.inflate(config)?; let rpar = self.rpar.inflate(config)?; Ok(Self::Inflated { value: self.value, lpar, rpar, }) } } impl<'a> Codegen<'a> for Name<'a> { fn codegen(&self, state: &mut CodegenState<'a>) { self.parenthesize(state, |state| { state.add_token(self.value); }); } } #[cst_node] pub struct Param<'a> { pub name: Name<'a>, pub annotation: Option>, pub equal: Option>, pub default: Option>, pub comma: Option>, pub star: Option<&'a str>, pub whitespace_after_star: ParenthesizableWhitespace<'a>, pub whitespace_after_param: ParenthesizableWhitespace<'a>, pub(crate) star_tok: Option>, } impl<'r, 'a> Inflate<'a> for DeflatedParam<'r, 'a> { type Inflated = Param<'a>; fn inflate(mut self, config: &Config<'a>) -> Result { let name = self.name.inflate(config)?; let annotation = self.annotation.inflate(config)?; let equal = self.equal.inflate(config)?; let default = self.default.inflate(config)?; let comma = self.comma.inflate(config)?; let whitespace_after_star = if let Some(star_tok) = self.star_tok.as_mut() { parse_parenthesizable_whitespace(config, &mut star_tok.whitespace_after.borrow_mut())? } else { Default::default() }; let whitespace_after_param = Default::default(); // TODO Ok(Self::Inflated { name, annotation, equal, default, comma, star: self.star, whitespace_after_star, whitespace_after_param, }) } } impl<'r, 'a> Default for DeflatedParam<'r, 'a> { fn default() -> Self { Self { name: Default::default(), annotation: None, equal: None, default: None, comma: None, star: Some(""), // Note: this preserves a quirk of the pure python parser star_tok: None, } } } impl<'a> Param<'a> { fn codegen( &self, state: &mut CodegenState<'a>, default_star: Option<&'a str>, default_comma: bool, ) { match (self.star, default_star) { (Some(star), _) => state.add_token(star), (None, Some(star)) => state.add_token(star), _ => {} } self.whitespace_after_star.codegen(state); self.name.codegen(state); if let Some(ann) = &self.annotation { ann.codegen(state, ":"); } match (&self.equal, &self.default) { (Some(equal), Some(def)) => { equal.codegen(state); def.codegen(state); } (None, Some(def)) => { state.add_token(" = "); def.codegen(state); } _ => {} } match &self.comma { Some(comma) => comma.codegen(state), None if default_comma => state.add_token(", "), _ => {} } self.whitespace_after_param.codegen(state); } } #[cst_node] pub struct Arg<'a> { pub value: Expression<'a>, pub keyword: Option>, pub equal: Option>, pub comma: Option>, pub star: &'a str, pub whitespace_after_star: ParenthesizableWhitespace<'a>, pub whitespace_after_arg: ParenthesizableWhitespace<'a>, pub(crate) star_tok: Option>, } impl<'r, 'a> Inflate<'a> for DeflatedArg<'r, 'a> { type Inflated = Arg<'a>; fn inflate(mut self, config: &Config<'a>) -> Result { let whitespace_after_star = if let Some(star_tok) = self.star_tok.as_mut() { parse_parenthesizable_whitespace(config, &mut star_tok.whitespace_after.borrow_mut())? } else { Default::default() }; let keyword = self.keyword.inflate(config)?; let equal = self.equal.inflate(config)?; let value = self.value.inflate(config)?; let comma = self.comma.inflate(config)?; // whitespace_after_arg is handled in Call let whitespace_after_arg = Default::default(); Ok(Self::Inflated { value, keyword, equal, comma, star: self.star, whitespace_after_star, whitespace_after_arg, }) } } impl<'a> Arg<'a> { pub fn codegen(&self, state: &mut CodegenState<'a>, default_comma: bool) { state.add_token(self.star); self.whitespace_after_star.codegen(state); if let Some(kw) = &self.keyword { kw.codegen(state); } if let Some(eq) = &self.equal { eq.codegen(state); } else if self.keyword.is_some() { state.add_token(" = "); } self.value.codegen(state); if let Some(comma) = &self.comma { comma.codegen(state); } else if default_comma { state.add_token(", "); } self.whitespace_after_arg.codegen(state); } } impl<'r, 'a> WithComma<'r, 'a> for DeflatedArg<'r, 'a> { fn with_comma(self, c: DeflatedComma<'r, 'a>) -> Self { Self { comma: Some(c), ..self } } } #[cst_node] #[derive(Default)] pub struct LeftParen<'a> { /// Any space that appears directly after this left parenthesis. pub whitespace_after: ParenthesizableWhitespace<'a>, pub(crate) lpar_tok: TokenRef<'a>, } impl<'a> Codegen<'a> for LeftParen<'a> { fn codegen(&self, state: &mut CodegenState<'a>) { state.add_token("("); self.whitespace_after.codegen(state); } } impl<'r, 'a> Inflate<'a> for DeflatedLeftParen<'r, 'a> { type Inflated = LeftParen<'a>; fn inflate(self, config: &Config<'a>) -> Result { let whitespace_after = parse_parenthesizable_whitespace( config, &mut (*self.lpar_tok).whitespace_after.borrow_mut(), )?; Ok(Self::Inflated { whitespace_after }) } } #[cst_node] #[derive(Default)] pub struct RightParen<'a> { /// Any space that appears directly before this right parenthesis. pub whitespace_before: ParenthesizableWhitespace<'a>, pub(crate) rpar_tok: TokenRef<'a>, } impl<'a> Codegen<'a> for RightParen<'a> { fn codegen(&self, state: &mut CodegenState<'a>) { self.whitespace_before.codegen(state); state.add_token(")"); } } impl<'r, 'a> Inflate<'a> for DeflatedRightParen<'r, 'a> { type Inflated = RightParen<'a>; fn inflate(self, config: &Config<'a>) -> Result { let whitespace_before = parse_parenthesizable_whitespace( config, &mut (*self.rpar_tok).whitespace_before.borrow_mut(), )?; Ok(Self::Inflated { whitespace_before }) } } #[cst_node(ParenthesizedNode, Codegen, Inflate)] pub enum Expression<'a> { Name(Box>), Ellipsis(Box>), Integer(Box>), Float(Box>), Imaginary(Box>), Comparison(Box>), UnaryOperation(Box>), BinaryOperation(Box>), BooleanOperation(Box>), Attribute(Box>), Tuple(Box>), Call(Box>), GeneratorExp(Box>), ListComp(Box>), SetComp(Box>), DictComp(Box>), List(Box>), Set(Box>), Dict(Box>), Subscript(Box>), StarredElement(Box>), IfExp(Box>), Lambda(Box>), Yield(Box>), Await(Box>), SimpleString(Box>), ConcatenatedString(Box>), FormattedString(Box>), NamedExpr(Box>), } #[cst_node(ParenthesizedNode)] pub struct Ellipsis<'a> { pub lpar: Vec>, pub rpar: Vec>, } impl<'a> Codegen<'a> for Ellipsis<'a> { fn codegen(&self, state: &mut CodegenState<'a>) { self.parenthesize(state, |state| { state.add_token("..."); }) } } impl<'r, 'a> Inflate<'a> for DeflatedEllipsis<'r, 'a> { type Inflated = Ellipsis<'a>; fn inflate(self, config: &Config<'a>) -> Result { let lpar = self.lpar.inflate(config)?; let rpar = self.rpar.inflate(config)?; Ok(Self::Inflated { lpar, rpar }) } } #[cst_node(ParenthesizedNode)] pub struct Integer<'a> { /// A string representation of the integer, such as ``"100000"`` or /// ``"100_000"``. pub value: &'a str, pub lpar: Vec>, pub rpar: Vec>, } impl<'a> Codegen<'a> for Integer<'a> { fn codegen(&self, state: &mut CodegenState<'a>) { self.parenthesize(state, |state| { state.add_token(self.value); }) } } impl<'r, 'a> Inflate<'a> for DeflatedInteger<'r, 'a> { type Inflated = Integer<'a>; fn inflate(self, config: &Config<'a>) -> Result { let lpar = self.lpar.inflate(config)?; let rpar = self.rpar.inflate(config)?; Ok(Self::Inflated { value: self.value, lpar, rpar, }) } } #[cst_node(ParenthesizedNode)] pub struct Float<'a> { /// A string representation of the floating point number, such as ```"0.05"``, /// ``".050"``, or ``"5e-2"``. pub value: &'a str, pub lpar: Vec>, pub rpar: Vec>, } impl<'a> Codegen<'a> for Float<'a> { fn codegen(&self, state: &mut CodegenState<'a>) { self.parenthesize(state, |state| { state.add_token(self.value); }) } } impl<'r, 'a> Inflate<'a> for DeflatedFloat<'r, 'a> { type Inflated = Float<'a>; fn inflate(self, config: &Config<'a>) -> Result { let lpar = self.lpar.inflate(config)?; let rpar = self.rpar.inflate(config)?; Ok(Self::Inflated { value: self.value, lpar, rpar, }) } } #[cst_node(ParenthesizedNode)] pub struct Imaginary<'a> { /// A string representation of the complex number, such as ``"2j"`` pub value: &'a str, pub lpar: Vec>, pub rpar: Vec>, } impl<'a> Codegen<'a> for Imaginary<'a> { fn codegen(&self, state: &mut CodegenState<'a>) { self.parenthesize(state, |state| { state.add_token(self.value); }) } } impl<'r, 'a> Inflate<'a> for DeflatedImaginary<'r, 'a> { type Inflated = Imaginary<'a>; fn inflate(self, config: &Config<'a>) -> Result { let lpar = self.lpar.inflate(config)?; let rpar = self.rpar.inflate(config)?; Ok(Self::Inflated { value: self.value, lpar, rpar, }) } } #[cst_node(ParenthesizedNode)] pub struct Comparison<'a> { pub left: Box>, pub comparisons: Vec>, pub lpar: Vec>, pub rpar: Vec>, } impl<'a> Codegen<'a> for Comparison<'a> { fn codegen(&self, state: &mut CodegenState<'a>) { self.parenthesize(state, |state| { self.left.codegen(state); for comp in &self.comparisons { comp.codegen(state); } }) } } impl<'r, 'a> Inflate<'a> for DeflatedComparison<'r, 'a> { type Inflated = Comparison<'a>; fn inflate(self, config: &Config<'a>) -> Result { let lpar = self.lpar.inflate(config)?; let left = self.left.inflate(config)?; let comparisons = self.comparisons.inflate(config)?; let rpar = self.rpar.inflate(config)?; Ok(Self::Inflated { left, comparisons, lpar, rpar, }) } } #[cst_node(ParenthesizedNode)] pub struct UnaryOperation<'a> { pub operator: UnaryOp<'a>, pub expression: Box>, pub lpar: Vec>, pub rpar: Vec>, } impl<'a> Codegen<'a> for UnaryOperation<'a> { fn codegen(&self, state: &mut CodegenState<'a>) { self.parenthesize(state, |state| { self.operator.codegen(state); self.expression.codegen(state); }) } } impl<'r, 'a> Inflate<'a> for DeflatedUnaryOperation<'r, 'a> { type Inflated = UnaryOperation<'a>; fn inflate(self, config: &Config<'a>) -> Result { let lpar = self.lpar.inflate(config)?; let operator = self.operator.inflate(config)?; let expression = self.expression.inflate(config)?; let rpar = self.rpar.inflate(config)?; Ok(Self::Inflated { operator, expression, lpar, rpar, }) } } #[cst_node(ParenthesizedNode)] pub struct BinaryOperation<'a> { pub left: Box>, pub operator: BinaryOp<'a>, pub right: Box>, pub lpar: Vec>, pub rpar: Vec>, } impl<'a> Codegen<'a> for BinaryOperation<'a> { fn codegen(&self, state: &mut CodegenState<'a>) { self.parenthesize(state, |state| { self.left.codegen(state); self.operator.codegen(state); self.right.codegen(state); }) } } impl<'r, 'a> Inflate<'a> for DeflatedBinaryOperation<'r, 'a> { type Inflated = BinaryOperation<'a>; fn inflate(self, config: &Config<'a>) -> Result { let lpar = self.lpar.inflate(config)?; let left = self.left.inflate(config)?; let operator = self.operator.inflate(config)?; let right = self.right.inflate(config)?; let rpar = self.rpar.inflate(config)?; Ok(Self::Inflated { left, operator, right, lpar, rpar, }) } } #[cst_node(ParenthesizedNode)] pub struct BooleanOperation<'a> { pub left: Box>, pub operator: BooleanOp<'a>, pub right: Box>, pub lpar: Vec>, pub rpar: Vec>, } impl<'a> Codegen<'a> for BooleanOperation<'a> { fn codegen(&self, state: &mut CodegenState<'a>) { self.parenthesize(state, |state| { self.left.codegen(state); self.operator.codegen(state); self.right.codegen(state); }) } } impl<'r, 'a> Inflate<'a> for DeflatedBooleanOperation<'r, 'a> { type Inflated = BooleanOperation<'a>; fn inflate(self, config: &Config<'a>) -> Result { let lpar = self.lpar.inflate(config)?; let left = self.left.inflate(config)?; let operator = self.operator.inflate(config)?; let right = self.right.inflate(config)?; let rpar = self.rpar.inflate(config)?; Ok(Self::Inflated { left, operator, right, lpar, rpar, }) } } #[cst_node(ParenthesizedNode)] pub struct Call<'a> { pub func: Box>, pub args: Vec>, pub lpar: Vec>, pub rpar: Vec>, pub whitespace_after_func: ParenthesizableWhitespace<'a>, pub whitespace_before_args: ParenthesizableWhitespace<'a>, pub(crate) lpar_tok: TokenRef<'a>, pub(crate) rpar_tok: TokenRef<'a>, } impl<'r, 'a> Inflate<'a> for DeflatedCall<'r, 'a> { type Inflated = Call<'a>; fn inflate(self, config: &Config<'a>) -> Result { let lpar = self.lpar.inflate(config)?; let func = self.func.inflate(config)?; let whitespace_after_func = parse_parenthesizable_whitespace( config, &mut (*self.lpar_tok).whitespace_before.borrow_mut(), )?; let whitespace_before_args = parse_parenthesizable_whitespace( config, &mut (*self.lpar_tok).whitespace_after.borrow_mut(), )?; let mut args = self.args.inflate(config)?; if let Some(arg) = args.last_mut() { if arg.comma.is_none() { arg.whitespace_after_arg = parse_parenthesizable_whitespace( config, &mut (*self.rpar_tok).whitespace_before.borrow_mut(), )?; } } let rpar = self.rpar.inflate(config)?; Ok(Self::Inflated { func, args, lpar, rpar, whitespace_after_func, whitespace_before_args, }) } } impl<'a> Codegen<'a> for Call<'a> { fn codegen(&self, state: &mut CodegenState<'a>) { self.parenthesize(state, |state| { self.func.codegen(state); self.whitespace_after_func.codegen(state); state.add_token("("); self.whitespace_before_args.codegen(state); let arg_len = self.args.len(); for (i, arg) in self.args.iter().enumerate() { arg.codegen(state, i + 1 < arg_len); } state.add_token(")"); }) } } #[cst_node(ParenthesizedNode)] pub struct Attribute<'a> { pub value: Box>, pub attr: Name<'a>, pub dot: Dot<'a>, pub lpar: Vec>, pub rpar: Vec>, } impl<'r, 'a> Inflate<'a> for DeflatedAttribute<'r, 'a> { type Inflated = Attribute<'a>; fn inflate(self, config: &Config<'a>) -> Result { let lpar = self.lpar.inflate(config)?; let value = self.value.inflate(config)?; let dot = self.dot.inflate(config)?; let attr = self.attr.inflate(config)?; let rpar = self.rpar.inflate(config)?; Ok(Self::Inflated { value, attr, dot, lpar, rpar, }) } } impl<'a> Codegen<'a> for Attribute<'a> { fn codegen(&self, state: &mut CodegenState<'a>) { self.parenthesize(state, |state| { self.value.codegen(state); self.dot.codegen(state); self.attr.codegen(state); }) } } #[cst_node(Codegen, Inflate)] pub enum NameOrAttribute<'a> { N(Box>), A(Box>), } impl<'r, 'a> std::convert::From> for DeflatedExpression<'r, 'a> { fn from(x: DeflatedNameOrAttribute<'r, 'a>) -> Self { match x { DeflatedNameOrAttribute::N(n) => Self::Name(n), DeflatedNameOrAttribute::A(a) => Self::Attribute(a), } } } #[cst_node] pub struct ComparisonTarget<'a> { pub operator: CompOp<'a>, pub comparator: Expression<'a>, } impl<'a> Codegen<'a> for ComparisonTarget<'a> { fn codegen(&self, state: &mut CodegenState<'a>) { self.operator.codegen(state); self.comparator.codegen(state); } } impl<'r, 'a> Inflate<'a> for DeflatedComparisonTarget<'r, 'a> { type Inflated = ComparisonTarget<'a>; fn inflate(self, config: &Config<'a>) -> Result { let operator = self.operator.inflate(config)?; let comparator = self.comparator.inflate(config)?; Ok(Self::Inflated { operator, comparator, }) } } #[cst_node(ParenthesizedNode)] pub struct StarredElement<'a> { pub value: Box>, pub comma: Option>, pub lpar: Vec>, pub rpar: Vec>, pub whitespace_before_value: ParenthesizableWhitespace<'a>, pub(crate) star_tok: TokenRef<'a>, } impl<'r, 'a> DeflatedStarredElement<'r, 'a> { pub fn inflate_element(self, config: &Config<'a>, is_last: bool) -> Result> { let lpar = self.lpar.inflate(config)?; let whitespace_before_value = parse_parenthesizable_whitespace( config, &mut (*self.star_tok).whitespace_after.borrow_mut(), )?; let value = self.value.inflate(config)?; let rpar = self.rpar.inflate(config)?; let comma = if is_last { self.comma.map(|c| c.inflate_before(config)).transpose() } else { self.comma.inflate(config) }?; Ok(StarredElement { value, comma, lpar, rpar, whitespace_before_value, }) } } impl<'r, 'a> Inflate<'a> for DeflatedStarredElement<'r, 'a> { type Inflated = StarredElement<'a>; fn inflate(self, config: &Config<'a>) -> Result { self.inflate_element(config, false) } } impl<'a> Codegen<'a> for StarredElement<'a> { fn codegen(&self, state: &mut CodegenState<'a>) { self.parenthesize(state, |state| { state.add_token("*"); self.whitespace_before_value.codegen(state); self.value.codegen(state); }); if let Some(comma) = &self.comma { comma.codegen(state); } } } #[allow(clippy::large_enum_variant)] #[cst_node(NoIntoPy)] pub enum Element<'a> { Simple { value: Expression<'a>, comma: Option>, }, Starred(Box>), } impl<'a> Element<'a> { pub fn codegen( &self, state: &mut CodegenState<'a>, default_comma: bool, default_comma_whitespace: bool, ) { match self { Self::Simple { value, comma } => { value.codegen(state); if let Some(comma) = comma { comma.codegen(state) } } Self::Starred(s) => s.codegen(state), } let maybe_comma = match self { Self::Simple { comma, .. } => comma, Self::Starred(s) => &s.comma, }; if maybe_comma.is_none() && default_comma { state.add_token(if default_comma_whitespace { ", " } else { "," }); } } } impl<'r, 'a> DeflatedElement<'r, 'a> { pub fn inflate_element(self, config: &Config<'a>, is_last: bool) -> Result> { Ok(match self { Self::Starred(s) => Element::Starred(Box::new(s.inflate_element(config, is_last)?)), Self::Simple { value, comma } => Element::Simple { value: value.inflate(config)?, comma: if is_last { comma.map(|c| c.inflate_before(config)).transpose()? } else { comma.inflate(config)? }, }, }) } } impl<'r, 'a> WithComma<'r, 'a> for DeflatedElement<'r, 'a> { fn with_comma(self, comma: DeflatedComma<'r, 'a>) -> Self { let comma = Some(comma); match self { Self::Simple { value, .. } => Self::Simple { comma, value }, Self::Starred(mut s) => { s.comma = comma; Self::Starred(s) } } } } impl<'r, 'a> std::convert::From> for DeflatedElement<'r, 'a> { fn from(e: DeflatedExpression<'r, 'a>) -> Self { match e { DeflatedExpression::StarredElement(e) => Self::Starred(e), value => Self::Simple { value, comma: None }, } } } #[cst_node(ParenthesizedNode, Default)] pub struct Tuple<'a> { pub elements: Vec>, pub lpar: Vec>, pub rpar: Vec>, } impl<'r, 'a> Inflate<'a> for DeflatedTuple<'r, 'a> { type Inflated = Tuple<'a>; fn inflate(self, config: &Config<'a>) -> Result { let lpar = self.lpar.inflate(config)?; let len = self.elements.len(); let elements = self .elements .into_iter() .enumerate() .map(|(idx, el)| el.inflate_element(config, idx + 1 == len)) .collect::>>()?; let rpar = self.rpar.inflate(config)?; Ok(Self::Inflated { elements, lpar, rpar, }) } } impl<'a> Codegen<'a> for Tuple<'a> { fn codegen(&self, state: &mut CodegenState<'a>) { self.parenthesize(state, |state| { let len = self.elements.len(); if len == 1 { self.elements.first().unwrap().codegen(state, true, false); } else { for (idx, el) in self.elements.iter().enumerate() { el.codegen(state, idx < len - 1, true); } } }); } } #[cst_node(ParenthesizedNode)] pub struct GeneratorExp<'a> { pub elt: Box>, pub for_in: Box>, pub lpar: Vec>, pub rpar: Vec>, } impl<'a> Codegen<'a> for GeneratorExp<'a> { fn codegen(&self, state: &mut CodegenState<'a>) { self.parenthesize(state, |state| { self.elt.codegen(state); self.for_in.codegen(state); }) } } impl<'r, 'a> Inflate<'a> for DeflatedGeneratorExp<'r, 'a> { type Inflated = GeneratorExp<'a>; fn inflate(self, config: &Config<'a>) -> Result { let lpar = self.lpar.inflate(config)?; let elt = self.elt.inflate(config)?; let for_in = self.for_in.inflate(config)?; let rpar = self.rpar.inflate(config)?; Ok(Self::Inflated { elt, for_in, lpar, rpar, }) } } #[cst_node(ParenthesizedNode)] pub struct ListComp<'a> { pub elt: Box>, pub for_in: Box>, pub lbracket: LeftSquareBracket<'a>, pub rbracket: RightSquareBracket<'a>, pub lpar: Vec>, pub rpar: Vec>, } impl<'a> Codegen<'a> for ListComp<'a> { fn codegen(&self, state: &mut CodegenState<'a>) { self.parenthesize(state, |state| { self.lbracket.codegen(state); self.elt.codegen(state); self.for_in.codegen(state); self.rbracket.codegen(state); }) } } impl<'r, 'a> Inflate<'a> for DeflatedListComp<'r, 'a> { type Inflated = ListComp<'a>; fn inflate(self, config: &Config<'a>) -> Result { let lpar = self.lpar.inflate(config)?; let lbracket = self.lbracket.inflate(config)?; let elt = self.elt.inflate(config)?; let for_in = self.for_in.inflate(config)?; let rbracket = self.rbracket.inflate(config)?; let rpar = self.rpar.inflate(config)?; Ok(Self::Inflated { elt, for_in, lbracket, rbracket, lpar, rpar, }) } } #[cst_node] #[derive(Default)] pub struct LeftSquareBracket<'a> { pub whitespace_after: ParenthesizableWhitespace<'a>, pub(crate) tok: TokenRef<'a>, } impl<'a> Codegen<'a> for LeftSquareBracket<'a> { fn codegen(&self, state: &mut CodegenState<'a>) { state.add_token("["); self.whitespace_after.codegen(state); } } impl<'r, 'a> Inflate<'a> for DeflatedLeftSquareBracket<'r, 'a> { type Inflated = LeftSquareBracket<'a>; fn inflate(self, config: &Config<'a>) -> Result { let whitespace_after = parse_parenthesizable_whitespace( config, &mut (*self.tok).whitespace_after.borrow_mut(), )?; Ok(Self::Inflated { whitespace_after }) } } #[cst_node] #[derive(Default)] pub struct RightSquareBracket<'a> { pub whitespace_before: ParenthesizableWhitespace<'a>, pub(crate) tok: TokenRef<'a>, } impl<'a> Codegen<'a> for RightSquareBracket<'a> { fn codegen(&self, state: &mut CodegenState<'a>) { self.whitespace_before.codegen(state); state.add_token("]"); } } impl<'r, 'a> Inflate<'a> for DeflatedRightSquareBracket<'r, 'a> { type Inflated = RightSquareBracket<'a>; fn inflate(self, config: &Config<'a>) -> Result { let whitespace_before = parse_parenthesizable_whitespace( config, &mut (*self.tok).whitespace_before.borrow_mut(), )?; Ok(Self::Inflated { whitespace_before }) } } #[cst_node(ParenthesizedNode)] pub struct SetComp<'a> { pub elt: Box>, pub for_in: Box>, pub lbrace: LeftCurlyBrace<'a>, pub rbrace: RightCurlyBrace<'a>, pub lpar: Vec>, pub rpar: Vec>, } impl<'r, 'a> Inflate<'a> for DeflatedSetComp<'r, 'a> { type Inflated = SetComp<'a>; fn inflate(self, config: &Config<'a>) -> Result { let lpar = self.lpar.inflate(config)?; let lbrace = self.lbrace.inflate(config)?; let elt = self.elt.inflate(config)?; let for_in = self.for_in.inflate(config)?; let rbrace = self.rbrace.inflate(config)?; let rpar = self.rpar.inflate(config)?; Ok(Self::Inflated { elt, for_in, lbrace, rbrace, lpar, rpar, }) } } impl<'a> Codegen<'a> for SetComp<'a> { fn codegen(&self, state: &mut CodegenState<'a>) { self.parenthesize(state, |state| { self.lbrace.codegen(state); self.elt.codegen(state); self.for_in.codegen(state); self.rbrace.codegen(state); }) } } #[cst_node(ParenthesizedNode)] pub struct DictComp<'a> { pub key: Box>, pub value: Box>, pub for_in: Box>, pub lbrace: LeftCurlyBrace<'a>, pub rbrace: RightCurlyBrace<'a>, pub lpar: Vec>, pub rpar: Vec>, pub whitespace_before_colon: ParenthesizableWhitespace<'a>, pub whitespace_after_colon: ParenthesizableWhitespace<'a>, pub(crate) colon_tok: TokenRef<'a>, } impl<'r, 'a> Inflate<'a> for DeflatedDictComp<'r, 'a> { type Inflated = DictComp<'a>; fn inflate(self, config: &Config<'a>) -> Result { let lpar = self.lpar.inflate(config)?; let lbrace = self.lbrace.inflate(config)?; let key = self.key.inflate(config)?; let whitespace_before_colon = parse_parenthesizable_whitespace( config, &mut (*self.colon_tok).whitespace_before.borrow_mut(), )?; let whitespace_after_colon = parse_parenthesizable_whitespace( config, &mut (*self.colon_tok).whitespace_after.borrow_mut(), )?; let value = self.value.inflate(config)?; let for_in = self.for_in.inflate(config)?; let rbrace = self.rbrace.inflate(config)?; let rpar = self.rpar.inflate(config)?; Ok(Self::Inflated { key, value, for_in, lbrace, rbrace, lpar, rpar, whitespace_before_colon, whitespace_after_colon, }) } } impl<'a> Codegen<'a> for DictComp<'a> { fn codegen(&self, state: &mut CodegenState<'a>) { self.parenthesize(state, |state| { self.lbrace.codegen(state); self.key.codegen(state); self.whitespace_before_colon.codegen(state); state.add_token(":"); self.whitespace_after_colon.codegen(state); self.value.codegen(state); self.for_in.codegen(state); self.rbrace.codegen(state); }) } } #[cst_node] pub struct LeftCurlyBrace<'a> { pub whitespace_after: ParenthesizableWhitespace<'a>, pub(crate) tok: TokenRef<'a>, } impl<'a> Default for LeftCurlyBrace<'a> { fn default() -> Self { Self { whitespace_after: Default::default(), } } } impl<'r, 'a> Inflate<'a> for DeflatedLeftCurlyBrace<'r, 'a> { type Inflated = LeftCurlyBrace<'a>; fn inflate(self, config: &Config<'a>) -> Result { let whitespace_after = parse_parenthesizable_whitespace( config, &mut (*self.tok).whitespace_after.borrow_mut(), )?; Ok(Self::Inflated { whitespace_after }) } } impl<'a> Codegen<'a> for LeftCurlyBrace<'a> { fn codegen(&self, state: &mut CodegenState<'a>) { state.add_token("{"); self.whitespace_after.codegen(state); } } #[cst_node] pub struct RightCurlyBrace<'a> { pub whitespace_before: ParenthesizableWhitespace<'a>, pub(crate) tok: TokenRef<'a>, } impl<'a> Default for RightCurlyBrace<'a> { fn default() -> Self { Self { whitespace_before: Default::default(), } } } impl<'r, 'a> Inflate<'a> for DeflatedRightCurlyBrace<'r, 'a> { type Inflated = RightCurlyBrace<'a>; fn inflate(self, config: &Config<'a>) -> Result { let whitespace_before = parse_parenthesizable_whitespace( config, &mut (*self.tok).whitespace_before.borrow_mut(), )?; Ok(Self::Inflated { whitespace_before }) } } impl<'a> Codegen<'a> for RightCurlyBrace<'a> { fn codegen(&self, state: &mut CodegenState<'a>) { self.whitespace_before.codegen(state); state.add_token("}"); } } #[cst_node] pub struct CompFor<'a> { pub target: AssignTargetExpression<'a>, pub iter: Expression<'a>, pub ifs: Vec>, pub inner_for_in: Option>>, pub asynchronous: Option>, pub whitespace_before: ParenthesizableWhitespace<'a>, pub whitespace_after_for: ParenthesizableWhitespace<'a>, pub whitespace_before_in: ParenthesizableWhitespace<'a>, pub whitespace_after_in: ParenthesizableWhitespace<'a>, pub(crate) async_tok: Option>, pub(crate) for_tok: TokenRef<'a>, pub(crate) in_tok: TokenRef<'a>, } impl<'a> Codegen<'a> for CompFor<'a> { fn codegen(&self, state: &mut CodegenState<'a>) { self.whitespace_before.codegen(state); if let Some(asynchronous) = &self.asynchronous { asynchronous.codegen(state); } state.add_token("for"); self.whitespace_after_for.codegen(state); self.target.codegen(state); self.whitespace_before_in.codegen(state); state.add_token("in"); self.whitespace_after_in.codegen(state); self.iter.codegen(state); for if_ in &self.ifs { if_.codegen(state); } if let Some(inner) = &self.inner_for_in { inner.codegen(state); } } } impl<'r, 'a> Inflate<'a> for DeflatedCompFor<'r, 'a> { type Inflated = CompFor<'a>; fn inflate(mut self, config: &Config<'a>) -> Result { let mut whitespace_before = parse_parenthesizable_whitespace( config, &mut (*self.for_tok).whitespace_before.borrow_mut(), )?; let asynchronous = if let Some(asy_tok) = self.async_tok.as_mut() { // If there is an async keyword, the start of the CompFor expression is // considered to be this keyword, so whitespace_before needs to adjust but // Asynchronous will own the whitespace before the for token. let mut asy_whitespace_after = parse_parenthesizable_whitespace( config, &mut asy_tok.whitespace_before.borrow_mut(), )?; swap(&mut asy_whitespace_after, &mut whitespace_before); Some(Asynchronous { whitespace_after: asy_whitespace_after, }) } else { None }; let whitespace_after_for = parse_parenthesizable_whitespace( config, &mut (*self.for_tok).whitespace_after.borrow_mut(), )?; let target = self.target.inflate(config)?; let whitespace_before_in = parse_parenthesizable_whitespace( config, &mut (*self.in_tok).whitespace_before.borrow_mut(), )?; let whitespace_after_in = parse_parenthesizable_whitespace( config, &mut (*self.in_tok).whitespace_after.borrow_mut(), )?; let iter = self.iter.inflate(config)?; let ifs = self.ifs.inflate(config)?; let inner_for_in = self.inner_for_in.inflate(config)?; Ok(Self::Inflated { target, iter, ifs, inner_for_in, asynchronous, whitespace_before, whitespace_after_for, whitespace_before_in, whitespace_after_in, }) } } #[cst_node] pub struct Asynchronous<'a> { pub whitespace_after: ParenthesizableWhitespace<'a>, } impl<'a> Codegen<'a> for Asynchronous<'a> { fn codegen(&self, state: &mut CodegenState<'a>) { state.add_token("async"); self.whitespace_after.codegen(state); } } pub(crate) fn make_async<'r, 'a>() -> DeflatedAsynchronous<'r, 'a> { DeflatedAsynchronous { _phantom: Default::default(), } } #[cst_node] pub struct CompIf<'a> { pub test: Expression<'a>, pub whitespace_before: ParenthesizableWhitespace<'a>, pub whitespace_before_test: ParenthesizableWhitespace<'a>, pub(crate) if_tok: TokenRef<'a>, } impl<'a> Codegen<'a> for CompIf<'a> { fn codegen(&self, state: &mut CodegenState<'a>) { self.whitespace_before.codegen(state); state.add_token("if"); self.whitespace_before_test.codegen(state); self.test.codegen(state); } } impl<'r, 'a> Inflate<'a> for DeflatedCompIf<'r, 'a> { type Inflated = CompIf<'a>; fn inflate(self, config: &Config<'a>) -> Result { let whitespace_before = parse_parenthesizable_whitespace( config, &mut (*self.if_tok).whitespace_before.borrow_mut(), )?; let whitespace_before_test = parse_parenthesizable_whitespace( config, &mut (*self.if_tok).whitespace_after.borrow_mut(), )?; let test = self.test.inflate(config)?; Ok(Self::Inflated { test, whitespace_before, whitespace_before_test, }) } } #[cst_node(ParenthesizedNode)] pub struct List<'a> { pub elements: Vec>, pub lbracket: LeftSquareBracket<'a>, pub rbracket: RightSquareBracket<'a>, pub lpar: Vec>, pub rpar: Vec>, } impl<'r, 'a> Inflate<'a> for DeflatedList<'r, 'a> { type Inflated = List<'a>; fn inflate(self, config: &Config<'a>) -> Result { let lpar = self.lpar.inflate(config)?; let lbracket = self.lbracket.inflate(config)?; let len = self.elements.len(); let elements = self .elements .into_iter() .enumerate() .map(|(idx, el)| el.inflate_element(config, idx + 1 == len)) .collect::>>()?; let rbracket = if !elements.is_empty() { // lbracket owns all the whitespace if there are no elements self.rbracket.inflate(config)? } else { Default::default() }; let rpar = self.rpar.inflate(config)?; Ok(Self::Inflated { elements, lbracket, rbracket, lpar, rpar, }) } } impl<'a> Codegen<'a> for List<'a> { fn codegen(&self, state: &mut CodegenState<'a>) { self.parenthesize(state, |state| { self.lbracket.codegen(state); let len = self.elements.len(); for (idx, el) in self.elements.iter().enumerate() { el.codegen(state, idx < len - 1, true); } self.rbracket.codegen(state); }) } } #[cst_node(ParenthesizedNode)] pub struct Set<'a> { pub elements: Vec>, pub lbrace: LeftCurlyBrace<'a>, pub rbrace: RightCurlyBrace<'a>, pub lpar: Vec>, pub rpar: Vec>, } impl<'r, 'a> Inflate<'a> for DeflatedSet<'r, 'a> { type Inflated = Set<'a>; fn inflate(self, config: &Config<'a>) -> Result { let lpar = self.lpar.inflate(config)?; let lbrace = self.lbrace.inflate(config)?; let len = self.elements.len(); let elements = self .elements .into_iter() .enumerate() .map(|(idx, el)| el.inflate_element(config, idx + 1 == len)) .collect::>>()?; let rbrace = if !elements.is_empty() { self.rbrace.inflate(config)? } else { Default::default() }; let rpar = self.rpar.inflate(config)?; Ok(Self::Inflated { elements, lbrace, rbrace, lpar, rpar, }) } } impl<'a> Codegen<'a> for Set<'a> { fn codegen(&self, state: &mut CodegenState<'a>) { self.parenthesize(state, |state| { self.lbrace.codegen(state); let len = self.elements.len(); for (idx, el) in self.elements.iter().enumerate() { el.codegen(state, idx < len - 1, true); } self.rbrace.codegen(state); }) } } #[cst_node(ParenthesizedNode)] pub struct Dict<'a> { pub elements: Vec>, pub lbrace: LeftCurlyBrace<'a>, pub rbrace: RightCurlyBrace<'a>, pub lpar: Vec>, pub rpar: Vec>, } impl<'r, 'a> Inflate<'a> for DeflatedDict<'r, 'a> { type Inflated = Dict<'a>; fn inflate(self, config: &Config<'a>) -> Result { let lpar = self.lpar.inflate(config)?; let lbrace = self.lbrace.inflate(config)?; let len = self.elements.len(); let elements = self .elements .into_iter() .enumerate() .map(|(idx, el)| el.inflate_element(config, idx + 1 == len)) .collect::>>()?; let rbrace = if !elements.is_empty() { self.rbrace.inflate(config)? } else { Default::default() }; let rpar = self.rpar.inflate(config)?; Ok(Self::Inflated { elements, lbrace, rbrace, lpar, rpar, }) } } impl<'a> Codegen<'a> for Dict<'a> { fn codegen(&self, state: &mut CodegenState<'a>) { self.parenthesize(state, |state| { self.lbrace.codegen(state); let len = self.elements.len(); for (idx, el) in self.elements.iter().enumerate() { el.codegen(state, idx < len - 1, true); } self.rbrace.codegen(state); }) } } #[cst_node(NoIntoPy)] pub enum DictElement<'a> { Simple { key: Expression<'a>, value: Expression<'a>, comma: Option>, whitespace_before_colon: ParenthesizableWhitespace<'a>, whitespace_after_colon: ParenthesizableWhitespace<'a>, colon_tok: TokenRef<'a>, }, Starred(StarredDictElement<'a>), } impl<'r, 'a> DeflatedDictElement<'r, 'a> { pub fn inflate_element( self, config: &Config<'a>, last_element: bool, ) -> Result> { Ok(match self { Self::Starred(s) => DictElement::Starred(s.inflate_element(config, last_element)?), Self::Simple { key, value, comma, colon_tok, .. } => { let whitespace_before_colon = parse_parenthesizable_whitespace( config, &mut colon_tok.whitespace_before.borrow_mut(), )?; let whitespace_after_colon = parse_parenthesizable_whitespace( config, &mut colon_tok.whitespace_after.borrow_mut(), )?; DictElement::Simple { key: key.inflate(config)?, whitespace_before_colon, whitespace_after_colon, value: value.inflate(config)?, comma: if last_element { comma.map(|c| c.inflate_before(config)).transpose() } else { comma.inflate(config) }?, } } }) } } impl<'a> DictElement<'a> { fn codegen( &self, state: &mut CodegenState<'a>, default_comma: bool, default_comma_whitespace: bool, ) { match self { Self::Simple { key, value, comma, whitespace_before_colon, whitespace_after_colon, .. } => { key.codegen(state); whitespace_before_colon.codegen(state); state.add_token(":"); whitespace_after_colon.codegen(state); value.codegen(state); if let Some(comma) = comma { comma.codegen(state) } } Self::Starred(s) => s.codegen(state), } let maybe_comma = match self { Self::Simple { comma, .. } => comma, Self::Starred(s) => &s.comma, }; if maybe_comma.is_none() && default_comma { state.add_token(if default_comma_whitespace { ", " } else { "," }); } } } impl<'r, 'a> WithComma<'r, 'a> for DeflatedDictElement<'r, 'a> { fn with_comma(self, comma: DeflatedComma<'r, 'a>) -> Self { let comma = Some(comma); match self { Self::Starred(s) => Self::Starred(DeflatedStarredDictElement { comma, ..s }), Self::Simple { key, value, colon_tok, .. } => Self::Simple { comma, key, value, colon_tok, }, } } } #[cst_node] pub struct StarredDictElement<'a> { pub value: Expression<'a>, pub comma: Option>, pub whitespace_before_value: ParenthesizableWhitespace<'a>, pub(crate) star_tok: TokenRef<'a>, } impl<'r, 'a> DeflatedStarredDictElement<'r, 'a> { fn inflate_element( self, config: &Config<'a>, last_element: bool, ) -> Result> { let whitespace_before_value = parse_parenthesizable_whitespace( config, &mut (*self.star_tok).whitespace_after.borrow_mut(), )?; let value = self.value.inflate(config)?; let comma = if last_element { self.comma.map(|c| c.inflate_before(config)).transpose() } else { self.comma.inflate(config) }?; Ok(StarredDictElement { value, comma, whitespace_before_value, }) } } impl<'a> Codegen<'a> for StarredDictElement<'a> { fn codegen(&self, state: &mut CodegenState<'a>) { state.add_token("**"); self.whitespace_before_value.codegen(state); self.value.codegen(state); if let Some(comma) = &self.comma { comma.codegen(state); } } } #[cst_node(Codegen, Inflate)] pub enum BaseSlice<'a> { Index(Box>), Slice(Box>), } #[cst_node] pub struct Index<'a> { pub value: Expression<'a>, pub star: Option<&'a str>, pub whitespace_after_star: Option>, pub(crate) star_tok: Option>, } impl<'r, 'a> Inflate<'a> for DeflatedIndex<'r, 'a> { type Inflated = Index<'a>; fn inflate(mut self, config: &Config<'a>) -> Result { let (star, whitespace_after_star) = if let Some(star_tok) = self.star_tok.as_mut() { ( Some(star_tok.string), Some(parse_parenthesizable_whitespace( config, &mut star_tok.whitespace_after.borrow_mut(), )?), ) } else { (None, None) }; let value = self.value.inflate(config)?; Ok(Self::Inflated { value, star, whitespace_after_star, }) } } impl<'a> Codegen<'a> for Index<'a> { fn codegen(&self, state: &mut CodegenState<'a>) { if let Some(star) = self.star { state.add_token(star); } self.whitespace_after_star.codegen(state); self.value.codegen(state); } } #[cst_node] pub struct Slice<'a> { #[cfg_attr(feature = "py", no_py_default)] pub lower: Option>, #[cfg_attr(feature = "py", no_py_default)] pub upper: Option>, pub step: Option>, pub first_colon: Colon<'a>, pub second_colon: Option>, } impl<'r, 'a> Inflate<'a> for DeflatedSlice<'r, 'a> { type Inflated = Slice<'a>; fn inflate(self, config: &Config<'a>) -> Result { let lower = self.lower.inflate(config)?; let first_colon = self.first_colon.inflate(config)?; let upper = self.upper.inflate(config)?; let second_colon = self.second_colon.inflate(config)?; let step = self.step.inflate(config)?; Ok(Self::Inflated { lower, upper, step, first_colon, second_colon, }) } } impl<'a> Codegen<'a> for Slice<'a> { fn codegen(&self, state: &mut CodegenState<'a>) { if let Some(lower) = &self.lower { lower.codegen(state); } self.first_colon.codegen(state); if let Some(upper) = &self.upper { upper.codegen(state); } if let Some(second_colon) = &self.second_colon { second_colon.codegen(state); } else if self.step.is_some() { state.add_token(";"); } if let Some(step) = &self.step { step.codegen(state); } } } #[cst_node] pub struct SubscriptElement<'a> { pub slice: BaseSlice<'a>, pub comma: Option>, } impl<'r, 'a> Inflate<'a> for DeflatedSubscriptElement<'r, 'a> { type Inflated = SubscriptElement<'a>; fn inflate(self, config: &Config<'a>) -> Result { let slice = self.slice.inflate(config)?; let comma = self.comma.inflate(config)?; Ok(Self::Inflated { slice, comma }) } } impl<'a> Codegen<'a> for SubscriptElement<'a> { fn codegen(&self, state: &mut CodegenState<'a>) { self.slice.codegen(state); if let Some(comma) = &self.comma { comma.codegen(state); } } } #[cst_node(ParenthesizedNode)] pub struct Subscript<'a> { pub value: Box>, pub slice: Vec>, pub lbracket: LeftSquareBracket<'a>, pub rbracket: RightSquareBracket<'a>, pub lpar: Vec>, pub rpar: Vec>, pub whitespace_after_value: ParenthesizableWhitespace<'a>, } impl<'r, 'a> Inflate<'a> for DeflatedSubscript<'r, 'a> { type Inflated = Subscript<'a>; fn inflate(self, config: &Config<'a>) -> Result { let lpar = self.lpar.inflate(config)?; let value = self.value.inflate(config)?; let whitespace_after_value = parse_parenthesizable_whitespace( config, &mut self.lbracket.tok.whitespace_before.borrow_mut(), )?; let lbracket = self.lbracket.inflate(config)?; let slice = self.slice.inflate(config)?; let rbracket = self.rbracket.inflate(config)?; let rpar = self.rpar.inflate(config)?; Ok(Self::Inflated { value, slice, lbracket, rbracket, lpar, rpar, whitespace_after_value, }) } } impl<'a> Codegen<'a> for Subscript<'a> { fn codegen(&self, state: &mut CodegenState<'a>) { self.parenthesize(state, |state| { self.value.codegen(state); self.whitespace_after_value.codegen(state); self.lbracket.codegen(state); let len = self.slice.len(); for (i, slice) in self.slice.iter().enumerate() { slice.codegen(state); if slice.comma.is_none() && i + 1 < len { state.add_token(", ") } } self.rbracket.codegen(state); }) } } #[cst_node(ParenthesizedNode)] pub struct IfExp<'a> { pub test: Box>, pub body: Box>, pub orelse: Box>, pub lpar: Vec>, pub rpar: Vec>, pub whitespace_before_if: ParenthesizableWhitespace<'a>, pub whitespace_after_if: ParenthesizableWhitespace<'a>, pub whitespace_before_else: ParenthesizableWhitespace<'a>, pub whitespace_after_else: ParenthesizableWhitespace<'a>, pub(crate) if_tok: TokenRef<'a>, pub(crate) else_tok: TokenRef<'a>, } impl<'r, 'a> Inflate<'a> for DeflatedIfExp<'r, 'a> { type Inflated = IfExp<'a>; fn inflate(self, config: &Config<'a>) -> Result { let lpar = self.lpar.inflate(config)?; let body = self.body.inflate(config)?; let whitespace_before_if = parse_parenthesizable_whitespace( config, &mut (*self.if_tok).whitespace_before.borrow_mut(), )?; let whitespace_after_if = parse_parenthesizable_whitespace( config, &mut (*self.if_tok).whitespace_after.borrow_mut(), )?; let test = self.test.inflate(config)?; let whitespace_before_else = parse_parenthesizable_whitespace( config, &mut (*self.else_tok).whitespace_before.borrow_mut(), )?; let whitespace_after_else = parse_parenthesizable_whitespace( config, &mut (*self.else_tok).whitespace_after.borrow_mut(), )?; let orelse = self.orelse.inflate(config)?; let rpar = self.rpar.inflate(config)?; Ok(Self::Inflated { test, body, orelse, lpar, rpar, whitespace_before_if, whitespace_after_if, whitespace_before_else, whitespace_after_else, }) } } impl<'a> Codegen<'a> for IfExp<'a> { fn codegen(&self, state: &mut CodegenState<'a>) { self.parenthesize(state, |state| { self.body.codegen(state); self.whitespace_before_if.codegen(state); state.add_token("if"); self.whitespace_after_if.codegen(state); self.test.codegen(state); self.whitespace_before_else.codegen(state); state.add_token("else"); self.whitespace_after_else.codegen(state); self.orelse.codegen(state); }) } } #[cst_node(ParenthesizedNode)] pub struct Lambda<'a> { pub params: Box>, pub body: Box>, pub colon: Colon<'a>, pub lpar: Vec>, pub rpar: Vec>, pub whitespace_after_lambda: Option>, pub(crate) lambda_tok: TokenRef<'a>, } impl<'r, 'a> Inflate<'a> for DeflatedLambda<'r, 'a> { type Inflated = Lambda<'a>; fn inflate(self, config: &Config<'a>) -> Result { let lpar = self.lpar.inflate(config)?; let whitespace_after_lambda = if !self.params.is_empty() { Some(parse_parenthesizable_whitespace( config, &mut (*self.lambda_tok).whitespace_after.borrow_mut(), )?) } else { Default::default() }; let mut params = self.params.inflate(config)?; adjust_parameters_trailing_whitespace(config, &mut params, &self.colon.tok)?; let colon = self.colon.inflate(config)?; let body = self.body.inflate(config)?; let rpar = self.rpar.inflate(config)?; Ok(Self::Inflated { params, body, colon, lpar, rpar, whitespace_after_lambda, }) } } impl<'a> Codegen<'a> for Lambda<'a> { fn codegen(&self, state: &mut CodegenState<'a>) { self.parenthesize(state, |state| { state.add_token("lambda"); if let Some(ws) = &self.whitespace_after_lambda { ws.codegen(state); } else if !self.params.is_empty() { // there's one or more params, add a space state.add_token(" ") } self.params.codegen(state); self.colon.codegen(state); self.body.codegen(state); }) } } #[cst_node] pub struct From<'a> { pub item: Expression<'a>, pub whitespace_before_from: Option>, pub whitespace_after_from: ParenthesizableWhitespace<'a>, pub(crate) tok: TokenRef<'a>, } impl<'a> From<'a> { pub fn codegen(&self, state: &mut CodegenState<'a>, default_space: &'a str) { if let Some(ws) = &self.whitespace_before_from { ws.codegen(state); } else { state.add_token(default_space); } state.add_token("from"); self.whitespace_after_from.codegen(state); self.item.codegen(state); } } impl<'r, 'a> Inflate<'a> for DeflatedFrom<'r, 'a> { type Inflated = From<'a>; fn inflate(self, config: &Config<'a>) -> Result { let whitespace_before_from = Some(parse_parenthesizable_whitespace( config, &mut (*self.tok).whitespace_before.borrow_mut(), )?); let whitespace_after_from = parse_parenthesizable_whitespace( config, &mut (*self.tok).whitespace_after.borrow_mut(), )?; let item = self.item.inflate(config)?; Ok(Self::Inflated { item, whitespace_before_from, whitespace_after_from, }) } } #[cst_node] pub enum YieldValue<'a> { Expression(Box>), From(Box>), } impl<'r, 'a> Inflate<'a> for DeflatedYieldValue<'r, 'a> { type Inflated = YieldValue<'a>; fn inflate(self, config: &Config<'a>) -> Result { Ok(match self { Self::Expression(e) => Self::Inflated::Expression(e.inflate(config)?), Self::From(e) => { let mut e = e.inflate(config)?; e.whitespace_before_from = None; Self::Inflated::From(e) } }) } } impl<'a> YieldValue<'a> { fn codegen(&self, state: &mut CodegenState<'a>, default_space: &'a str) { match self { Self::Expression(e) => e.codegen(state), Self::From(f) => f.codegen(state, default_space), } } } #[cst_node(ParenthesizedNode)] pub struct Yield<'a> { pub value: Option>>, pub lpar: Vec>, pub rpar: Vec>, pub whitespace_after_yield: Option>, pub(crate) yield_tok: TokenRef<'a>, } impl<'r, 'a> Inflate<'a> for DeflatedYield<'r, 'a> { type Inflated = Yield<'a>; fn inflate(self, config: &Config<'a>) -> Result { let lpar = self.lpar.inflate(config)?; let whitespace_after_yield = if self.value.is_some() { Some(parse_parenthesizable_whitespace( config, &mut (*self.yield_tok).whitespace_after.borrow_mut(), )?) } else { Default::default() }; let value = self.value.inflate(config)?; let rpar = self.rpar.inflate(config)?; Ok(Self::Inflated { value, lpar, rpar, whitespace_after_yield, }) } } impl<'a> Codegen<'a> for Yield<'a> { fn codegen(&self, state: &mut CodegenState<'a>) { self.parenthesize(state, |state| { state.add_token("yield"); if let Some(ws) = &self.whitespace_after_yield { ws.codegen(state); } else if self.value.is_some() { state.add_token(" "); } if let Some(val) = &self.value { val.codegen(state, "") } }) } } #[cst_node(ParenthesizedNode)] pub struct Await<'a> { pub expression: Box>, pub lpar: Vec>, pub rpar: Vec>, pub whitespace_after_await: ParenthesizableWhitespace<'a>, pub(crate) await_tok: TokenRef<'a>, } impl<'r, 'a> Inflate<'a> for DeflatedAwait<'r, 'a> { type Inflated = Await<'a>; fn inflate(self, config: &Config<'a>) -> Result { let lpar = self.lpar.inflate(config)?; let whitespace_after_await = parse_parenthesizable_whitespace( config, &mut (*self.await_tok).whitespace_after.borrow_mut(), )?; let expression = self.expression.inflate(config)?; let rpar = self.rpar.inflate(config)?; Ok(Self::Inflated { expression, lpar, rpar, whitespace_after_await, }) } } impl<'a> Codegen<'a> for Await<'a> { fn codegen(&self, state: &mut CodegenState<'a>) { self.parenthesize(state, |state| { state.add_token("await"); self.whitespace_after_await.codegen(state); self.expression.codegen(state); }) } } #[cst_node(Codegen, Inflate)] pub enum String<'a> { Simple(SimpleString<'a>), Concatenated(ConcatenatedString<'a>), Formatted(FormattedString<'a>), } impl<'r, 'a> std::convert::From> for DeflatedExpression<'r, 'a> { fn from(s: DeflatedString<'r, 'a>) -> Self { match s { DeflatedString::Simple(s) => Self::SimpleString(Box::new(s)), DeflatedString::Concatenated(s) => Self::ConcatenatedString(Box::new(s)), DeflatedString::Formatted(s) => Self::FormattedString(Box::new(s)), } } } #[cst_node(ParenthesizedNode)] pub struct ConcatenatedString<'a> { pub left: Box>, pub right: Box>, pub lpar: Vec>, pub rpar: Vec>, pub whitespace_between: ParenthesizableWhitespace<'a>, // we capture the next token after each string piece so Inflate can extract the // whitespace between individual pieces pub(crate) right_tok: TokenRef<'a>, } impl<'r, 'a> Inflate<'a> for DeflatedConcatenatedString<'r, 'a> { type Inflated = ConcatenatedString<'a>; fn inflate(self, config: &Config<'a>) -> Result { let lpar = self.lpar.inflate(config)?; let left = self.left.inflate(config)?; let whitespace_between = parse_parenthesizable_whitespace( config, &mut (*self.right_tok).whitespace_before.borrow_mut(), )?; let right = self.right.inflate(config)?; let rpar = self.rpar.inflate(config)?; Ok(Self::Inflated { left, right, lpar, rpar, whitespace_between, }) } } impl<'a> Codegen<'a> for ConcatenatedString<'a> { fn codegen(&self, state: &mut CodegenState<'a>) { self.parenthesize(state, |state| { self.left.codegen(state); self.whitespace_between.codegen(state); self.right.codegen(state); }) } } #[cst_node(ParenthesizedNode, Default)] pub struct SimpleString<'a> { /// The texual representation of the string, including quotes, prefix /// characters, and any escape characters present in the original source code, /// such as ``r"my string\n"``. pub value: &'a str, pub lpar: Vec>, pub rpar: Vec>, } impl<'r, 'a> Inflate<'a> for DeflatedSimpleString<'r, 'a> { type Inflated = SimpleString<'a>; fn inflate(self, config: &Config<'a>) -> Result { let lpar = self.lpar.inflate(config)?; let rpar = self.rpar.inflate(config)?; Ok(Self::Inflated { value: self.value, lpar, rpar, }) } } impl<'a> Codegen<'a> for SimpleString<'a> { fn codegen(&self, state: &mut CodegenState<'a>) { self.parenthesize(state, |state| state.add_token(self.value)) } } #[cst_node] pub struct FormattedStringText<'a> { pub value: &'a str, } impl<'r, 'a> Inflate<'a> for DeflatedFormattedStringText<'r, 'a> { type Inflated = FormattedStringText<'a>; fn inflate(self, _config: &Config<'a>) -> Result { Ok(Self::Inflated { value: self.value }) } } impl<'a> Codegen<'a> for FormattedStringText<'a> { fn codegen(&self, state: &mut CodegenState<'a>) { state.add_token(self.value); } } pub(crate) fn make_fstringtext<'r, 'a>(value: &'a str) -> DeflatedFormattedStringText<'r, 'a> { DeflatedFormattedStringText { value, _phantom: Default::default(), } } #[cst_node] pub struct FormattedStringExpression<'a> { pub expression: Expression<'a>, pub conversion: Option<&'a str>, pub format_spec: Option>>, pub whitespace_before_expression: ParenthesizableWhitespace<'a>, pub whitespace_after_expression: ParenthesizableWhitespace<'a>, pub equal: Option>, pub(crate) lbrace_tok: TokenRef<'a>, // This is None if there's an equal sign, otherwise it's the first token of // (conversion, format spec, right brace) in that order pub(crate) after_expr_tok: Option>, } impl<'r, 'a> Inflate<'a> for DeflatedFormattedStringExpression<'r, 'a> { type Inflated = FormattedStringExpression<'a>; fn inflate(mut self, config: &Config<'a>) -> Result { let whitespace_before_expression = parse_parenthesizable_whitespace( config, &mut (*self.lbrace_tok).whitespace_after.borrow_mut(), )?; let expression = self.expression.inflate(config)?; let equal = self.equal.inflate(config)?; let whitespace_after_expression = if let Some(after_expr_tok) = self.after_expr_tok.as_mut() { parse_parenthesizable_whitespace( config, &mut after_expr_tok.whitespace_before.borrow_mut(), )? } else { Default::default() }; let format_spec = self.format_spec.inflate(config)?; Ok(Self::Inflated { expression, conversion: self.conversion, format_spec, whitespace_before_expression, whitespace_after_expression, equal, }) } } impl<'a> Codegen<'a> for FormattedStringExpression<'a> { fn codegen(&self, state: &mut CodegenState<'a>) { state.add_token("{"); self.whitespace_before_expression.codegen(state); self.expression.codegen(state); if let Some(eq) = &self.equal { eq.codegen(state); } self.whitespace_after_expression.codegen(state); if let Some(conv) = &self.conversion { state.add_token("!"); state.add_token(conv); } if let Some(specs) = &self.format_spec { state.add_token(":"); for spec in specs { spec.codegen(state); } } state.add_token("}"); } } #[cst_node(Codegen, Inflate)] pub enum FormattedStringContent<'a> { Text(FormattedStringText<'a>), Expression(Box>), } #[cst_node(ParenthesizedNode)] pub struct FormattedString<'a> { pub parts: Vec>, pub start: &'a str, pub end: &'a str, pub lpar: Vec>, pub rpar: Vec>, } impl<'r, 'a> Inflate<'a> for DeflatedFormattedString<'r, 'a> { type Inflated = FormattedString<'a>; fn inflate(self, config: &Config<'a>) -> Result { let lpar = self.lpar.inflate(config)?; let parts = self.parts.inflate(config)?; let rpar = self.rpar.inflate(config)?; Ok(Self::Inflated { parts, start: self.start, end: self.end, lpar, rpar, }) } } impl<'a> Codegen<'a> for FormattedString<'a> { fn codegen(&self, state: &mut CodegenState<'a>) { self.parenthesize(state, |state| { state.add_token(self.start); for part in &self.parts { part.codegen(state); } state.add_token(self.end); }) } } #[cst_node(ParenthesizedNode)] pub struct NamedExpr<'a> { pub target: Box>, pub value: Box>, pub lpar: Vec>, pub rpar: Vec>, pub whitespace_before_walrus: ParenthesizableWhitespace<'a>, pub whitespace_after_walrus: ParenthesizableWhitespace<'a>, pub(crate) walrus_tok: TokenRef<'a>, } impl<'a> Codegen<'a> for NamedExpr<'a> { fn codegen(&self, state: &mut CodegenState<'a>) { self.parenthesize(state, |state| { self.target.codegen(state); self.whitespace_before_walrus.codegen(state); state.add_token(":="); self.whitespace_after_walrus.codegen(state); self.value.codegen(state); }) } } impl<'r, 'a> Inflate<'a> for DeflatedNamedExpr<'r, 'a> { type Inflated = NamedExpr<'a>; fn inflate(self, config: &Config<'a>) -> Result { let lpar = self.lpar.inflate(config)?; let target = self.target.inflate(config)?; let whitespace_before_walrus = parse_parenthesizable_whitespace( config, &mut self.walrus_tok.whitespace_before.borrow_mut(), )?; let whitespace_after_walrus = parse_parenthesizable_whitespace( config, &mut self.walrus_tok.whitespace_after.borrow_mut(), )?; let value = self.value.inflate(config)?; let rpar = self.rpar.inflate(config)?; Ok(Self::Inflated { target, value, lpar, rpar, whitespace_before_walrus, whitespace_after_walrus, }) } } #[cfg(feature = "py")] mod py { use pyo3::types::PyModule; use super::*; use crate::nodes::traits::py::TryIntoPy; // TODO: this could be a derive helper attribute to override the python class name impl<'a> TryIntoPy for Element<'a> { fn try_into_py(self, py: pyo3::Python) -> pyo3::PyResult { match self { Self::Starred(s) => s.try_into_py(py), Self::Simple { value, comma } => { let libcst = PyModule::import(py, "libcst")?; let kwargs = [ Some(("value", value.try_into_py(py)?)), comma .map(|x| x.try_into_py(py)) .transpose()? .map(|x| ("comma", x)), ] .iter() .filter(|x| x.is_some()) .map(|x| x.as_ref().unwrap()) .collect::>() .into_py_dict(py); Ok(libcst .getattr("Element") .expect("no Element found in libcst") .call((), Some(kwargs))? .into()) } } } } // TODO: this could be a derive helper attribute to override the python class name impl<'a> TryIntoPy for DictElement<'a> { fn try_into_py(self, py: pyo3::Python) -> pyo3::PyResult { match self { Self::Starred(s) => s.try_into_py(py), Self::Simple { key, value, comma, whitespace_after_colon, whitespace_before_colon, .. } => { let libcst = PyModule::import(py, "libcst")?; let kwargs = [ Some(("key", key.try_into_py(py)?)), Some(("value", value.try_into_py(py)?)), Some(( "whitespace_before_colon", whitespace_before_colon.try_into_py(py)?, )), Some(( "whitespace_after_colon", whitespace_after_colon.try_into_py(py)?, )), comma .map(|x| x.try_into_py(py)) .transpose()? .map(|x| ("comma", x)), ] .iter() .filter(|x| x.is_some()) .map(|x| x.as_ref().unwrap()) .collect::>() .into_py_dict(py); Ok(libcst .getattr("DictElement") .expect("no Element found in libcst") .call((), Some(kwargs))? .into()) } } } } } LibCST-1.2.0/native/libcst/src/nodes/inflate_helpers.rs000066400000000000000000000023361456464173300227720ustar00rootroot00000000000000// Copyright (c) Meta Platforms, Inc. and affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the root directory of this source tree use crate::{ nodes::traits::Result, tokenizer::{ whitespace_parser::{parse_parenthesizable_whitespace, Config}, Token, }, Param, Parameters, StarArg, }; pub(crate) fn adjust_parameters_trailing_whitespace<'a>( config: &Config<'a>, parameters: &mut Parameters<'a>, next_tok: &Token<'a>, ) -> Result<()> { let do_adjust = |param: &mut Param<'a>| -> Result<()> { let whitespace_after = parse_parenthesizable_whitespace(config, &mut next_tok.whitespace_before.borrow_mut())?; if param.comma.is_none() { param.whitespace_after_param = whitespace_after; } Ok(()) }; if let Some(param) = &mut parameters.star_kwarg { do_adjust(param)?; } else if let Some(param) = parameters.kwonly_params.last_mut() { do_adjust(param)?; } else if let Some(StarArg::Param(param)) = parameters.star_arg.as_mut() { do_adjust(param)?; } else if let Some(param) = parameters.params.last_mut() { do_adjust(param)?; } Ok(()) } LibCST-1.2.0/native/libcst/src/nodes/macros.rs000066400000000000000000000026501456464173300211110ustar00rootroot00000000000000// Copyright (c) Meta Platforms, Inc. and affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the root directory of this source tree. /// Generates a function that lazily imports and caches a module's member. This will hold a /// permanent reference to the imported member. Python's module cache is rarely purged though, so /// it typically won't matter. /// /// This cache is cheaper than looking up the module in python's module cache inspecting the /// module's `__dict__` each time you want access to the member. /// /// If you have multiple imports from the same module, we'll call `py.import` once for each member /// of the module. #[macro_export] macro_rules! py_import { ( $module_name:expr, $member_name:expr, $getter_fn:ident ) => { paste::paste! { static [] : pyo3::once_cell::GILOnceCell> = pyo3::once_cell::GILOnceCell::new(); fn $getter_fn<'py>(py: pyo3::Python<'py>) -> pyo3::PyResult<&'py pyo3::PyAny> { Ok([].get_or_init(py, || { Ok(py.import($module_name)?.get($member_name)?.to_object(py)) }) .as_ref() .map_err(|err| err.clone_ref(py))? .as_ref(py)) } } }; } LibCST-1.2.0/native/libcst/src/nodes/mod.rs000066400000000000000000000172701456464173300204100ustar00rootroot00000000000000// Copyright (c) Meta Platforms, Inc. and affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the root directory of this source tree pub(crate) mod whitespace; pub use whitespace::{ Comment, EmptyLine, Fakeness, Newline, ParenthesizableWhitespace, ParenthesizedWhitespace, SimpleWhitespace, TrailingWhitespace, }; pub(crate) mod statement; pub use statement::{ AnnAssign, Annotation, AsName, Assert, Assign, AssignTarget, AssignTargetExpression, AugAssign, Break, ClassDef, CompoundStatement, Continue, Decorator, Del, DelTargetExpression, Else, ExceptHandler, ExceptStarHandler, Expr, Finally, For, FunctionDef, Global, If, Import, ImportAlias, ImportFrom, ImportNames, IndentedBlock, Match, MatchAs, MatchCase, MatchClass, MatchKeywordElement, MatchList, MatchMapping, MatchMappingElement, MatchOr, MatchOrElement, MatchPattern, MatchSequence, MatchSequenceElement, MatchSingleton, MatchStar, MatchTuple, MatchValue, NameItem, Nonlocal, OrElse, Pass, Raise, Return, SimpleStatementLine, SimpleStatementSuite, SmallStatement, StarrableMatchSequenceElement, Statement, Suite, Try, TryStar, While, With, WithItem, }; pub(crate) mod expression; pub use expression::{ Arg, Asynchronous, Attribute, Await, BaseSlice, BinaryOperation, BooleanOperation, Call, CompFor, CompIf, Comparison, ComparisonTarget, ConcatenatedString, Dict, DictComp, DictElement, Element, Ellipsis, Expression, Float, FormattedString, FormattedStringContent, FormattedStringExpression, FormattedStringText, From, GeneratorExp, IfExp, Imaginary, Index, Integer, Lambda, LeftCurlyBrace, LeftParen, LeftSquareBracket, List, ListComp, Name, NameOrAttribute, NamedExpr, Param, ParamSlash, ParamStar, Parameters, RightCurlyBrace, RightParen, RightSquareBracket, Set, SetComp, SimpleString, Slice, StarArg, StarredDictElement, StarredElement, String, Subscript, SubscriptElement, Tuple, UnaryOperation, Yield, YieldValue, }; pub(crate) mod op; pub use op::{ AssignEqual, AugOp, BinaryOp, BitOr, BooleanOp, Colon, Comma, CompOp, Dot, ImportStar, Semicolon, UnaryOp, }; pub(crate) mod module; pub use module::Module; mod codegen; pub use codegen::{Codegen, CodegenState}; pub(crate) mod traits; pub use traits::{Inflate, ParenthesizedNode, WithComma, WithLeadingLines}; pub(crate) mod inflate_helpers; pub(crate) mod deflated { pub use super::expression::{ DeflatedArg as Arg, DeflatedAsynchronous as Asynchronous, DeflatedAttribute as Attribute, DeflatedAwait as Await, DeflatedBaseSlice as BaseSlice, DeflatedBinaryOperation as BinaryOperation, DeflatedBooleanOperation as BooleanOperation, DeflatedCall as Call, DeflatedCompFor as CompFor, DeflatedCompIf as CompIf, DeflatedComparison as Comparison, DeflatedComparisonTarget as ComparisonTarget, DeflatedConcatenatedString as ConcatenatedString, DeflatedDict as Dict, DeflatedDictComp as DictComp, DeflatedDictElement as DictElement, DeflatedElement as Element, DeflatedEllipsis as Ellipsis, DeflatedExpression as Expression, DeflatedFloat as Float, DeflatedFormattedString as FormattedString, DeflatedFormattedStringContent as FormattedStringContent, DeflatedFormattedStringExpression as FormattedStringExpression, DeflatedFormattedStringText as FormattedStringText, DeflatedFrom as From, DeflatedGeneratorExp as GeneratorExp, DeflatedIfExp as IfExp, DeflatedImaginary as Imaginary, DeflatedIndex as Index, DeflatedInteger as Integer, DeflatedLambda as Lambda, DeflatedLeftCurlyBrace as LeftCurlyBrace, DeflatedLeftParen as LeftParen, DeflatedLeftSquareBracket as LeftSquareBracket, DeflatedList as List, DeflatedListComp as ListComp, DeflatedName as Name, DeflatedNameOrAttribute as NameOrAttribute, DeflatedNamedExpr as NamedExpr, DeflatedParam as Param, DeflatedParamSlash as ParamSlash, DeflatedParamStar as ParamStar, DeflatedParameters as Parameters, DeflatedRightCurlyBrace as RightCurlyBrace, DeflatedRightParen as RightParen, DeflatedRightSquareBracket as RightSquareBracket, DeflatedSet as Set, DeflatedSetComp as SetComp, DeflatedSimpleString as SimpleString, DeflatedSlice as Slice, DeflatedStarArg as StarArg, DeflatedStarredDictElement as StarredDictElement, DeflatedStarredElement as StarredElement, DeflatedString as String, DeflatedSubscript as Subscript, DeflatedSubscriptElement as SubscriptElement, DeflatedTuple as Tuple, DeflatedUnaryOperation as UnaryOperation, DeflatedYield as Yield, DeflatedYieldValue as YieldValue, }; pub use super::module::DeflatedModule as Module; pub use super::op::{ DeflatedAssignEqual as AssignEqual, DeflatedAugOp as AugOp, DeflatedBinaryOp as BinaryOp, DeflatedBitOr as BitOr, DeflatedBooleanOp as BooleanOp, DeflatedColon as Colon, DeflatedComma as Comma, DeflatedCompOp as CompOp, DeflatedDot as Dot, DeflatedImportStar as ImportStar, DeflatedSemicolon as Semicolon, DeflatedUnaryOp as UnaryOp, }; pub use super::statement::{ DeflatedAnnAssign as AnnAssign, DeflatedAnnotation as Annotation, DeflatedAsName as AsName, DeflatedAssert as Assert, DeflatedAssign as Assign, DeflatedAssignTarget as AssignTarget, DeflatedAssignTargetExpression as AssignTargetExpression, DeflatedAugAssign as AugAssign, DeflatedBreak as Break, DeflatedClassDef as ClassDef, DeflatedCompoundStatement as CompoundStatement, DeflatedContinue as Continue, DeflatedDecorator as Decorator, DeflatedDel as Del, DeflatedDelTargetExpression as DelTargetExpression, DeflatedElse as Else, DeflatedExceptHandler as ExceptHandler, DeflatedExceptStarHandler as ExceptStarHandler, DeflatedExpr as Expr, DeflatedFinally as Finally, DeflatedFor as For, DeflatedFunctionDef as FunctionDef, DeflatedGlobal as Global, DeflatedIf as If, DeflatedImport as Import, DeflatedImportAlias as ImportAlias, DeflatedImportFrom as ImportFrom, DeflatedImportNames as ImportNames, DeflatedIndentedBlock as IndentedBlock, DeflatedMatch as Match, DeflatedMatchAs as MatchAs, DeflatedMatchCase as MatchCase, DeflatedMatchClass as MatchClass, DeflatedMatchKeywordElement as MatchKeywordElement, DeflatedMatchList as MatchList, DeflatedMatchMapping as MatchMapping, DeflatedMatchMappingElement as MatchMappingElement, DeflatedMatchOr as MatchOr, DeflatedMatchOrElement as MatchOrElement, DeflatedMatchPattern as MatchPattern, DeflatedMatchSequence as MatchSequence, DeflatedMatchSequenceElement as MatchSequenceElement, DeflatedMatchSingleton as MatchSingleton, DeflatedMatchStar as MatchStar, DeflatedMatchTuple as MatchTuple, DeflatedMatchValue as MatchValue, DeflatedNameItem as NameItem, DeflatedNonlocal as Nonlocal, DeflatedOrElse as OrElse, DeflatedParamSpec as ParamSpec, DeflatedPass as Pass, DeflatedRaise as Raise, DeflatedReturn as Return, DeflatedSimpleStatementLine as SimpleStatementLine, DeflatedSimpleStatementSuite as SimpleStatementSuite, DeflatedSmallStatement as SmallStatement, DeflatedStarrableMatchSequenceElement as StarrableMatchSequenceElement, DeflatedStatement as Statement, DeflatedSuite as Suite, DeflatedTry as Try, DeflatedTryStar as TryStar, DeflatedTypeAlias as TypeAlias, DeflatedTypeParam as TypeParam, DeflatedTypeParameters as TypeParameters, DeflatedTypeVar as TypeVar, DeflatedTypeVarLike as TypeVarLike, DeflatedTypeVarTuple as TypeVarTuple, DeflatedWhile as While, DeflatedWith as With, DeflatedWithItem as WithItem, }; } LibCST-1.2.0/native/libcst/src/nodes/module.rs000066400000000000000000000054411456464173300211130ustar00rootroot00000000000000// Copyright (c) Meta Platforms, Inc. and affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the root directory of this source tree. use std::mem::swap; use crate::tokenizer::whitespace_parser::parse_empty_lines; use crate::tokenizer::Token; use crate::{ nodes::{ codegen::{Codegen, CodegenState}, statement::*, whitespace::EmptyLine, }, tokenizer::whitespace_parser::Config, }; use libcst_derive::cst_node; #[cfg(feature = "py")] use libcst_derive::TryIntoPy; use super::traits::{Inflate, Result, WithLeadingLines}; type TokenRef<'r, 'a> = &'r Token<'a>; #[cst_node] pub struct Module<'a> { pub body: Vec>, pub header: Vec>, pub footer: Vec>, pub default_indent: &'a str, pub default_newline: &'a str, pub has_trailing_newline: bool, pub encoding: String, pub(crate) eof_tok: TokenRef<'a>, } impl<'a> Codegen<'a> for Module<'a> { fn codegen(&self, state: &mut CodegenState<'a>) { for h in &self.header { h.codegen(state); } for s in &self.body { s.codegen(state); } for nl in &self.footer { nl.codegen(state); } } } impl<'r, 'a> Inflate<'a> for DeflatedModule<'r, 'a> { type Inflated = Module<'a>; fn inflate(self, config: &Config<'a>) -> Result { let default_indent = config.default_indent; let default_newline = config.default_newline; let has_trailing_newline = config.has_trailing_newline(); let mut body = self.body.inflate(config)?; let mut footer = parse_empty_lines( config, &mut (*self.eof_tok).whitespace_before.borrow_mut(), Some(""), )?; let mut header = vec![]; if let Some(stmt) = body.first_mut() { swap(stmt.leading_lines(), &mut header); let mut last_indented = None; for (num, line) in footer.iter().enumerate() { if !line.whitespace.0.is_empty() { last_indented = Some(num); } else if line.comment.is_some() { // This is a non-indented comment. Everything from here should belong in the // footer. break; } } if let Some(num) = last_indented { let (_, rest) = footer.split_at(num); footer = rest.to_vec(); } } else { swap(&mut header, &mut footer); } Ok(Self::Inflated { body, header, footer, default_indent, default_newline, has_trailing_newline, encoding: self.encoding, }) } } LibCST-1.2.0/native/libcst/src/nodes/op.rs000066400000000000000000001430531456464173300202460ustar00rootroot00000000000000// Copyright (c) Meta Platforms, Inc. and affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the root directory of this source tree. use super::{whitespace::ParenthesizableWhitespace, Codegen, CodegenState}; use crate::{ nodes::traits::{Inflate, Result}, tokenizer::{ whitespace_parser::{parse_parenthesizable_whitespace, parse_simple_whitespace, Config}, Token, }, }; use libcst_derive::cst_node; #[cfg(feature = "py")] use libcst_derive::TryIntoPy; type TokenRef<'r, 'a> = &'r Token<'a>; #[cst_node] pub struct Semicolon<'a> { /// Any space that appears directly before this semicolon. pub whitespace_before: ParenthesizableWhitespace<'a>, /// Any space that appears directly after this semicolon. pub whitespace_after: ParenthesizableWhitespace<'a>, #[cfg_attr(feature = "py", skip_py)] pub(crate) tok: TokenRef<'a>, } impl<'a> Codegen<'a> for Semicolon<'a> { fn codegen(&self, state: &mut CodegenState<'a>) { self.whitespace_before.codegen(state); state.add_token(";"); self.whitespace_after.codegen(state); } } impl<'r, 'a> Inflate<'a> for DeflatedSemicolon<'r, 'a> { type Inflated = Semicolon<'a>; fn inflate(self, config: &Config<'a>) -> Result { let whitespace_before = ParenthesizableWhitespace::SimpleWhitespace( parse_simple_whitespace(config, &mut (*self.tok).whitespace_before.borrow_mut())?, ); let whitespace_after = ParenthesizableWhitespace::SimpleWhitespace( parse_simple_whitespace(config, &mut (*self.tok).whitespace_after.borrow_mut())?, ); Ok(Self::Inflated { whitespace_before, whitespace_after, }) } } #[cst_node] pub struct Comma<'a> { /// Any space that appears directly before this comma. pub whitespace_before: ParenthesizableWhitespace<'a>, /// Any space that appears directly after this comma. pub whitespace_after: ParenthesizableWhitespace<'a>, #[cfg_attr(feature = "py", skip_py)] pub(crate) tok: TokenRef<'a>, } impl<'a> Codegen<'a> for Comma<'a> { fn codegen(&self, state: &mut CodegenState<'a>) { self.whitespace_before.codegen(state); state.add_token(","); self.whitespace_after.codegen(state); } } impl<'r, 'a> Inflate<'a> for DeflatedComma<'r, 'a> { type Inflated = Comma<'a>; fn inflate(self, config: &Config<'a>) -> Result { let whitespace_before = parse_parenthesizable_whitespace( config, &mut (*self.tok).whitespace_before.borrow_mut(), )?; let whitespace_after = parse_parenthesizable_whitespace( config, &mut (*self.tok).whitespace_after.borrow_mut(), )?; Ok(Self::Inflated { whitespace_before, whitespace_after, }) } } impl<'r, 'a> DeflatedComma<'r, 'a> { pub fn inflate_before(self, config: &Config<'a>) -> Result> { let whitespace_before = parse_parenthesizable_whitespace( config, &mut (*self.tok).whitespace_before.borrow_mut(), )?; let whitespace_after = Default::default(); Ok(Comma { whitespace_before, whitespace_after, }) } } #[cst_node] pub struct AssignEqual<'a> { /// Any space that appears directly before this equal sign. pub whitespace_before: ParenthesizableWhitespace<'a>, /// Any space that appears directly after this equal sign. pub whitespace_after: ParenthesizableWhitespace<'a>, #[cfg_attr(feature = "py", skip_py)] pub(crate) tok: TokenRef<'a>, } impl<'a> Codegen<'a> for AssignEqual<'a> { fn codegen(&self, state: &mut CodegenState<'a>) { self.whitespace_before.codegen(state); state.add_token("="); self.whitespace_after.codegen(state); } } impl<'r, 'a> Inflate<'a> for DeflatedAssignEqual<'r, 'a> { type Inflated = AssignEqual<'a>; fn inflate(self, config: &Config<'a>) -> Result { let whitespace_before = parse_parenthesizable_whitespace( config, &mut (*self.tok).whitespace_before.borrow_mut(), )?; let whitespace_after = parse_parenthesizable_whitespace( config, &mut (*self.tok).whitespace_after.borrow_mut(), )?; Ok(Self::Inflated { whitespace_before, whitespace_after, }) } } #[cst_node] pub struct Dot<'a> { /// Any space that appears directly before this dot. pub whitespace_before: ParenthesizableWhitespace<'a>, /// Any space that appears directly after this dot. pub whitespace_after: ParenthesizableWhitespace<'a>, #[cfg_attr(feature = "py", skip_py)] pub(crate) tok: TokenRef<'a>, } impl<'a> Codegen<'a> for Dot<'a> { fn codegen(&self, state: &mut CodegenState<'a>) { self.whitespace_before.codegen(state); state.add_token("."); self.whitespace_after.codegen(state); } } impl<'r, 'a> Inflate<'a> for DeflatedDot<'r, 'a> { type Inflated = Dot<'a>; fn inflate(mut self, config: &Config<'a>) -> Result { let whitespace_before = self.inflate_before(config)?; let whitespace_after = self.inflate_after(config)?; Ok(Self::Inflated { whitespace_before, whitespace_after, }) } } impl<'r, 'a> DeflatedDot<'r, 'a> { fn inflate_before(&mut self, config: &Config<'a>) -> Result> { parse_parenthesizable_whitespace(config, &mut (*self.tok).whitespace_before.borrow_mut()) } fn inflate_after(&mut self, config: &Config<'a>) -> Result> { parse_parenthesizable_whitespace(config, &mut (*self.tok).whitespace_after.borrow_mut()) } } #[cst_node] pub struct ImportStar {} pub(crate) fn make_importstar<'r, 'a>() -> DeflatedImportStar<'r, 'a> { DeflatedImportStar { _phantom: Default::default(), } } impl<'a> Codegen<'a> for ImportStar { fn codegen(&self, state: &mut CodegenState<'a>) { state.add_token("*"); } } impl<'r, 'a> Inflate<'a> for DeflatedImportStar<'r, 'a> { type Inflated = ImportStar; fn inflate(self, _config: &Config<'a>) -> Result { Ok(ImportStar {}) } } #[cst_node] pub enum UnaryOp<'a> { Plus { whitespace_after: ParenthesizableWhitespace<'a>, #[cfg_attr(feature = "py", skip_py)] tok: TokenRef<'a>, }, Minus { whitespace_after: ParenthesizableWhitespace<'a>, #[cfg_attr(feature = "py", skip_py)] tok: TokenRef<'a>, }, BitInvert { whitespace_after: ParenthesizableWhitespace<'a>, #[cfg_attr(feature = "py", skip_py)] tok: TokenRef<'a>, }, Not { whitespace_after: ParenthesizableWhitespace<'a>, #[cfg_attr(feature = "py", skip_py)] tok: TokenRef<'a>, }, } impl<'a> Codegen<'a> for UnaryOp<'a> { fn codegen(&self, state: &mut CodegenState<'a>) { let (tok, whitespace_after) = match self { Self::Plus { whitespace_after, .. } => ("+", whitespace_after), Self::Minus { whitespace_after, .. } => ("-", whitespace_after), Self::BitInvert { whitespace_after, .. } => ("~", whitespace_after), Self::Not { whitespace_after, .. } => ("not", whitespace_after), }; state.add_token(tok); whitespace_after.codegen(state); } } impl<'r, 'a> Inflate<'a> for DeflatedUnaryOp<'r, 'a> { type Inflated = UnaryOp<'a>; fn inflate(self, config: &Config<'a>) -> Result { Ok(match self { Self::Plus { tok, .. } => { let whitespace_after = parse_parenthesizable_whitespace( config, &mut (*tok).whitespace_after.borrow_mut(), )?; Self::Inflated::Plus { whitespace_after } } Self::Minus { tok, .. } => { let whitespace_after = parse_parenthesizable_whitespace( config, &mut (*tok).whitespace_after.borrow_mut(), )?; Self::Inflated::Minus { whitespace_after } } Self::BitInvert { tok, .. } => { let whitespace_after = parse_parenthesizable_whitespace( config, &mut (*tok).whitespace_after.borrow_mut(), )?; Self::Inflated::BitInvert { whitespace_after } } Self::Not { tok, .. } => { let whitespace_after = parse_parenthesizable_whitespace( config, &mut (*tok).whitespace_after.borrow_mut(), )?; Self::Inflated::Not { whitespace_after } } }) } } #[cst_node] pub enum BooleanOp<'a> { And { whitespace_before: ParenthesizableWhitespace<'a>, whitespace_after: ParenthesizableWhitespace<'a>, #[cfg_attr(feature = "py", skip_py)] tok: TokenRef<'a>, }, Or { whitespace_before: ParenthesizableWhitespace<'a>, whitespace_after: ParenthesizableWhitespace<'a>, #[cfg_attr(feature = "py", skip_py)] tok: TokenRef<'a>, }, } impl<'a> Codegen<'a> for BooleanOp<'a> { fn codegen(&self, state: &mut CodegenState<'a>) { let (tok, ws_bef, ws_aft) = match self { Self::And { whitespace_after, whitespace_before, .. } => ("and", whitespace_before, whitespace_after), Self::Or { whitespace_after, whitespace_before, .. } => ("or", whitespace_before, whitespace_after), }; ws_bef.codegen(state); state.add_token(tok); ws_aft.codegen(state); } } impl<'r, 'a> Inflate<'a> for DeflatedBooleanOp<'r, 'a> { type Inflated = BooleanOp<'a>; fn inflate(self, config: &Config<'a>) -> Result { Ok(match self { Self::And { tok, .. } => { let whitespace_before = parse_parenthesizable_whitespace( config, &mut (*tok).whitespace_before.borrow_mut(), )?; let whitespace_after = parse_parenthesizable_whitespace( config, &mut (*tok).whitespace_after.borrow_mut(), )?; Self::Inflated::And { whitespace_before, whitespace_after, } } Self::Or { tok, .. } => { let whitespace_before = parse_parenthesizable_whitespace( config, &mut (*tok).whitespace_before.borrow_mut(), )?; let whitespace_after = parse_parenthesizable_whitespace( config, &mut (*tok).whitespace_after.borrow_mut(), )?; Self::Inflated::Or { whitespace_before, whitespace_after, } } }) } } #[cst_node] pub enum BinaryOp<'a> { Add { whitespace_before: ParenthesizableWhitespace<'a>, whitespace_after: ParenthesizableWhitespace<'a>, #[cfg_attr(feature = "py", skip_py)] tok: TokenRef<'a>, }, Subtract { whitespace_before: ParenthesizableWhitespace<'a>, whitespace_after: ParenthesizableWhitespace<'a>, #[cfg_attr(feature = "py", skip_py)] tok: TokenRef<'a>, }, Multiply { whitespace_before: ParenthesizableWhitespace<'a>, whitespace_after: ParenthesizableWhitespace<'a>, #[cfg_attr(feature = "py", skip_py)] tok: TokenRef<'a>, }, Divide { whitespace_before: ParenthesizableWhitespace<'a>, whitespace_after: ParenthesizableWhitespace<'a>, #[cfg_attr(feature = "py", skip_py)] tok: TokenRef<'a>, }, FloorDivide { whitespace_before: ParenthesizableWhitespace<'a>, whitespace_after: ParenthesizableWhitespace<'a>, #[cfg_attr(feature = "py", skip_py)] tok: TokenRef<'a>, }, Modulo { whitespace_before: ParenthesizableWhitespace<'a>, whitespace_after: ParenthesizableWhitespace<'a>, #[cfg_attr(feature = "py", skip_py)] tok: TokenRef<'a>, }, Power { whitespace_before: ParenthesizableWhitespace<'a>, whitespace_after: ParenthesizableWhitespace<'a>, #[cfg_attr(feature = "py", skip_py)] tok: TokenRef<'a>, }, LeftShift { whitespace_before: ParenthesizableWhitespace<'a>, whitespace_after: ParenthesizableWhitespace<'a>, #[cfg_attr(feature = "py", skip_py)] tok: TokenRef<'a>, }, RightShift { whitespace_before: ParenthesizableWhitespace<'a>, whitespace_after: ParenthesizableWhitespace<'a>, #[cfg_attr(feature = "py", skip_py)] tok: TokenRef<'a>, }, BitOr { whitespace_before: ParenthesizableWhitespace<'a>, whitespace_after: ParenthesizableWhitespace<'a>, #[cfg_attr(feature = "py", skip_py)] tok: TokenRef<'a>, }, BitAnd { whitespace_before: ParenthesizableWhitespace<'a>, whitespace_after: ParenthesizableWhitespace<'a>, #[cfg_attr(feature = "py", skip_py)] tok: TokenRef<'a>, }, BitXor { whitespace_before: ParenthesizableWhitespace<'a>, whitespace_after: ParenthesizableWhitespace<'a>, #[cfg_attr(feature = "py", skip_py)] tok: TokenRef<'a>, }, MatrixMultiply { whitespace_before: ParenthesizableWhitespace<'a>, whitespace_after: ParenthesizableWhitespace<'a>, #[cfg_attr(feature = "py", skip_py)] tok: TokenRef<'a>, }, } impl<'a> Codegen<'a> for BinaryOp<'a> { fn codegen(&self, state: &mut CodegenState<'a>) { let (whitespace_before, whitespace_after) = match self { Self::Add { whitespace_before, whitespace_after, } | Self::Subtract { whitespace_before, whitespace_after, } | Self::Multiply { whitespace_before, whitespace_after, } | Self::Divide { whitespace_before, whitespace_after, } | Self::FloorDivide { whitespace_before, whitespace_after, } | Self::Modulo { whitespace_before, whitespace_after, } | Self::Power { whitespace_before, whitespace_after, } | Self::LeftShift { whitespace_before, whitespace_after, } | Self::RightShift { whitespace_before, whitespace_after, } | Self::BitOr { whitespace_before, whitespace_after, } | Self::BitAnd { whitespace_before, whitespace_after, } | Self::BitXor { whitespace_before, whitespace_after, } | Self::MatrixMultiply { whitespace_before, whitespace_after, } => (whitespace_before, whitespace_after), }; let tok = match self { BinaryOp::Add { .. } => "+", BinaryOp::Subtract { .. } => "-", BinaryOp::Multiply { .. } => "*", BinaryOp::Divide { .. } => "/", BinaryOp::FloorDivide { .. } => "//", BinaryOp::Modulo { .. } => "%", BinaryOp::Power { .. } => "**", BinaryOp::LeftShift { .. } => "<<", BinaryOp::RightShift { .. } => ">>", BinaryOp::BitOr { .. } => "|", BinaryOp::BitAnd { .. } => "&", BinaryOp::BitXor { .. } => "^", BinaryOp::MatrixMultiply { .. } => "@", }; whitespace_before.codegen(state); state.add_token(tok); whitespace_after.codegen(state); } } impl<'r, 'a> Inflate<'a> for DeflatedBinaryOp<'r, 'a> { type Inflated = BinaryOp<'a>; fn inflate(self, config: &Config<'a>) -> Result { Ok(match self { Self::Add { tok, .. } => { let whitespace_before = parse_parenthesizable_whitespace( config, &mut (*tok).whitespace_before.borrow_mut(), )?; let whitespace_after = parse_parenthesizable_whitespace( config, &mut (*tok).whitespace_after.borrow_mut(), )?; Self::Inflated::Add { whitespace_before, whitespace_after, } } Self::Subtract { tok, .. } => { let whitespace_before = parse_parenthesizable_whitespace( config, &mut (*tok).whitespace_before.borrow_mut(), )?; let whitespace_after = parse_parenthesizable_whitespace( config, &mut (*tok).whitespace_after.borrow_mut(), )?; Self::Inflated::Subtract { whitespace_before, whitespace_after, } } Self::Multiply { tok, .. } => { let whitespace_before = parse_parenthesizable_whitespace( config, &mut (*tok).whitespace_before.borrow_mut(), )?; let whitespace_after = parse_parenthesizable_whitespace( config, &mut (*tok).whitespace_after.borrow_mut(), )?; Self::Inflated::Multiply { whitespace_before, whitespace_after, } } Self::Divide { tok, .. } => { let whitespace_before = parse_parenthesizable_whitespace( config, &mut (*tok).whitespace_before.borrow_mut(), )?; let whitespace_after = parse_parenthesizable_whitespace( config, &mut (*tok).whitespace_after.borrow_mut(), )?; Self::Inflated::Divide { whitespace_before, whitespace_after, } } Self::FloorDivide { tok, .. } => { let whitespace_before = parse_parenthesizable_whitespace( config, &mut (*tok).whitespace_before.borrow_mut(), )?; let whitespace_after = parse_parenthesizable_whitespace( config, &mut (*tok).whitespace_after.borrow_mut(), )?; Self::Inflated::FloorDivide { whitespace_before, whitespace_after, } } Self::Modulo { tok, .. } => { let whitespace_before = parse_parenthesizable_whitespace( config, &mut (*tok).whitespace_before.borrow_mut(), )?; let whitespace_after = parse_parenthesizable_whitespace( config, &mut (*tok).whitespace_after.borrow_mut(), )?; Self::Inflated::Modulo { whitespace_before, whitespace_after, } } Self::Power { tok, .. } => { let whitespace_before = parse_parenthesizable_whitespace( config, &mut (*tok).whitespace_before.borrow_mut(), )?; let whitespace_after = parse_parenthesizable_whitespace( config, &mut (*tok).whitespace_after.borrow_mut(), )?; Self::Inflated::Power { whitespace_before, whitespace_after, } } Self::LeftShift { tok, .. } => { let whitespace_before = parse_parenthesizable_whitespace( config, &mut (*tok).whitespace_before.borrow_mut(), )?; let whitespace_after = parse_parenthesizable_whitespace( config, &mut (*tok).whitespace_after.borrow_mut(), )?; Self::Inflated::LeftShift { whitespace_before, whitespace_after, } } Self::RightShift { tok, .. } => { let whitespace_before = parse_parenthesizable_whitespace( config, &mut (*tok).whitespace_before.borrow_mut(), )?; let whitespace_after = parse_parenthesizable_whitespace( config, &mut (*tok).whitespace_after.borrow_mut(), )?; Self::Inflated::RightShift { whitespace_before, whitespace_after, } } Self::BitOr { tok, .. } => { let whitespace_before = parse_parenthesizable_whitespace( config, &mut (*tok).whitespace_before.borrow_mut(), )?; let whitespace_after = parse_parenthesizable_whitespace( config, &mut (*tok).whitespace_after.borrow_mut(), )?; Self::Inflated::BitOr { whitespace_before, whitespace_after, } } Self::BitAnd { tok, .. } => { let whitespace_before = parse_parenthesizable_whitespace( config, &mut (*tok).whitespace_before.borrow_mut(), )?; let whitespace_after = parse_parenthesizable_whitespace( config, &mut (*tok).whitespace_after.borrow_mut(), )?; Self::Inflated::BitAnd { whitespace_before, whitespace_after, } } Self::BitXor { tok, .. } => { let whitespace_before = parse_parenthesizable_whitespace( config, &mut (*tok).whitespace_before.borrow_mut(), )?; let whitespace_after = parse_parenthesizable_whitespace( config, &mut (*tok).whitespace_after.borrow_mut(), )?; Self::Inflated::BitXor { whitespace_before, whitespace_after, } } Self::MatrixMultiply { tok, .. } => { let whitespace_before = parse_parenthesizable_whitespace( config, &mut (*tok).whitespace_before.borrow_mut(), )?; let whitespace_after = parse_parenthesizable_whitespace( config, &mut (*tok).whitespace_after.borrow_mut(), )?; Self::Inflated::MatrixMultiply { whitespace_before, whitespace_after, } } }) } } #[cst_node] pub enum CompOp<'a> { LessThan { whitespace_before: ParenthesizableWhitespace<'a>, whitespace_after: ParenthesizableWhitespace<'a>, #[cfg_attr(feature = "py", skip_py)] tok: TokenRef<'a>, }, GreaterThan { whitespace_before: ParenthesizableWhitespace<'a>, whitespace_after: ParenthesizableWhitespace<'a>, #[cfg_attr(feature = "py", skip_py)] tok: TokenRef<'a>, }, LessThanEqual { whitespace_before: ParenthesizableWhitespace<'a>, whitespace_after: ParenthesizableWhitespace<'a>, #[cfg_attr(feature = "py", skip_py)] tok: TokenRef<'a>, }, GreaterThanEqual { whitespace_before: ParenthesizableWhitespace<'a>, whitespace_after: ParenthesizableWhitespace<'a>, #[cfg_attr(feature = "py", skip_py)] tok: TokenRef<'a>, }, Equal { whitespace_before: ParenthesizableWhitespace<'a>, whitespace_after: ParenthesizableWhitespace<'a>, #[cfg_attr(feature = "py", skip_py)] tok: TokenRef<'a>, }, NotEqual { whitespace_before: ParenthesizableWhitespace<'a>, whitespace_after: ParenthesizableWhitespace<'a>, #[cfg_attr(feature = "py", skip_py)] tok: TokenRef<'a>, }, In { whitespace_before: ParenthesizableWhitespace<'a>, whitespace_after: ParenthesizableWhitespace<'a>, #[cfg_attr(feature = "py", skip_py)] tok: TokenRef<'a>, }, NotIn { whitespace_before: ParenthesizableWhitespace<'a>, whitespace_between: ParenthesizableWhitespace<'a>, whitespace_after: ParenthesizableWhitespace<'a>, #[cfg_attr(feature = "py", skip_py)] not_tok: TokenRef<'a>, #[cfg_attr(feature = "py", skip_py)] in_tok: TokenRef<'a>, }, Is { whitespace_before: ParenthesizableWhitespace<'a>, whitespace_after: ParenthesizableWhitespace<'a>, #[cfg_attr(feature = "py", skip_py)] tok: TokenRef<'a>, }, IsNot { whitespace_before: ParenthesizableWhitespace<'a>, whitespace_between: ParenthesizableWhitespace<'a>, whitespace_after: ParenthesizableWhitespace<'a>, #[cfg_attr(feature = "py", skip_py)] is_tok: TokenRef<'a>, #[cfg_attr(feature = "py", skip_py)] not_tok: TokenRef<'a>, }, } impl<'a> Codegen<'a> for CompOp<'a> { fn codegen(&self, state: &mut CodegenState<'a>) { let (bef, aft, between) = match self { Self::LessThan { whitespace_before, whitespace_after, } | Self::GreaterThan { whitespace_before, whitespace_after, } | Self::LessThanEqual { whitespace_before, whitespace_after, } | Self::GreaterThanEqual { whitespace_before, whitespace_after, } | Self::Equal { whitespace_before, whitespace_after, } | Self::NotEqual { whitespace_before, whitespace_after, } | Self::In { whitespace_before, whitespace_after, } | Self::Is { whitespace_before, whitespace_after, } => (whitespace_before, whitespace_after, None), Self::IsNot { whitespace_before, whitespace_between, whitespace_after, } => ( whitespace_before, whitespace_after, Some(whitespace_between), ), Self::NotIn { whitespace_before, whitespace_between, whitespace_after, } => ( whitespace_before, whitespace_after, Some(whitespace_between), ), }; let (first_tok, second_tok) = match self { CompOp::LessThan { .. } => ("<", None), CompOp::GreaterThan { .. } => (">", None), CompOp::LessThanEqual { .. } => ("<=", None), CompOp::GreaterThanEqual { .. } => (">=", None), CompOp::Equal { .. } => ("==", None), CompOp::NotEqual { .. } => ("!=", None), CompOp::In { .. } => ("in", None), CompOp::NotIn { .. } => ("not", Some("in")), CompOp::Is { .. } => ("is", None), CompOp::IsNot { .. } => ("is", Some("not")), }; bef.codegen(state); state.add_token(first_tok); if let (Some(btw), Some(second_tok)) = (between, second_tok) { btw.codegen(state); state.add_token(second_tok); } aft.codegen(state); } } impl<'r, 'a> Inflate<'a> for DeflatedCompOp<'r, 'a> { type Inflated = CompOp<'a>; fn inflate(self, config: &Config<'a>) -> Result { Ok(match self { Self::LessThan { tok, .. } => { let whitespace_before = parse_parenthesizable_whitespace( config, &mut (*tok).whitespace_before.borrow_mut(), )?; let whitespace_after = parse_parenthesizable_whitespace( config, &mut (*tok).whitespace_after.borrow_mut(), )?; Self::Inflated::LessThan { whitespace_before, whitespace_after, } } Self::GreaterThan { tok, .. } => { let whitespace_before = parse_parenthesizable_whitespace( config, &mut (*tok).whitespace_before.borrow_mut(), )?; let whitespace_after = parse_parenthesizable_whitespace( config, &mut (*tok).whitespace_after.borrow_mut(), )?; Self::Inflated::GreaterThan { whitespace_before, whitespace_after, } } Self::LessThanEqual { tok, .. } => { let whitespace_before = parse_parenthesizable_whitespace( config, &mut (*tok).whitespace_before.borrow_mut(), )?; let whitespace_after = parse_parenthesizable_whitespace( config, &mut (*tok).whitespace_after.borrow_mut(), )?; Self::Inflated::LessThanEqual { whitespace_before, whitespace_after, } } Self::GreaterThanEqual { tok, .. } => { let whitespace_before = parse_parenthesizable_whitespace( config, &mut (*tok).whitespace_before.borrow_mut(), )?; let whitespace_after = parse_parenthesizable_whitespace( config, &mut (*tok).whitespace_after.borrow_mut(), )?; Self::Inflated::GreaterThanEqual { whitespace_before, whitespace_after, } } Self::Equal { tok, .. } => { let whitespace_before = parse_parenthesizable_whitespace( config, &mut (*tok).whitespace_before.borrow_mut(), )?; let whitespace_after = parse_parenthesizable_whitespace( config, &mut (*tok).whitespace_after.borrow_mut(), )?; Self::Inflated::Equal { whitespace_before, whitespace_after, } } Self::NotEqual { tok, .. } => { let whitespace_before = parse_parenthesizable_whitespace( config, &mut (*tok).whitespace_before.borrow_mut(), )?; let whitespace_after = parse_parenthesizable_whitespace( config, &mut (*tok).whitespace_after.borrow_mut(), )?; Self::Inflated::NotEqual { whitespace_before, whitespace_after, } } Self::In { tok, .. } => { let whitespace_before = parse_parenthesizable_whitespace( config, &mut (*tok).whitespace_before.borrow_mut(), )?; let whitespace_after = parse_parenthesizable_whitespace( config, &mut (*tok).whitespace_after.borrow_mut(), )?; Self::Inflated::In { whitespace_before, whitespace_after, } } Self::Is { tok, .. } => { let whitespace_before = parse_parenthesizable_whitespace( config, &mut (*tok).whitespace_before.borrow_mut(), )?; let whitespace_after = parse_parenthesizable_whitespace( config, &mut (*tok).whitespace_after.borrow_mut(), )?; Self::Inflated::Is { whitespace_before, whitespace_after, } } Self::IsNot { is_tok, not_tok, .. } => { let whitespace_before = parse_parenthesizable_whitespace( config, &mut (*is_tok).whitespace_before.borrow_mut(), )?; let whitespace_between = parse_parenthesizable_whitespace( config, &mut (*is_tok).whitespace_after.borrow_mut(), )?; let whitespace_after = parse_parenthesizable_whitespace( config, &mut (*not_tok).whitespace_after.borrow_mut(), )?; Self::Inflated::IsNot { whitespace_before, whitespace_between, whitespace_after, } } Self::NotIn { not_tok, in_tok, .. } => { let whitespace_before = parse_parenthesizable_whitespace( config, &mut (*not_tok).whitespace_before.borrow_mut(), )?; let whitespace_between = parse_parenthesizable_whitespace( config, &mut (*not_tok).whitespace_after.borrow_mut(), )?; let whitespace_after = parse_parenthesizable_whitespace( config, &mut (*in_tok).whitespace_after.borrow_mut(), )?; Self::Inflated::NotIn { whitespace_before, whitespace_between, whitespace_after, } } }) } } #[cst_node] pub struct Colon<'a> { pub whitespace_before: ParenthesizableWhitespace<'a>, pub whitespace_after: ParenthesizableWhitespace<'a>, #[cfg_attr(feature = "py", skip_py)] pub(crate) tok: TokenRef<'a>, } impl<'r, 'a> Inflate<'a> for DeflatedColon<'r, 'a> { type Inflated = Colon<'a>; fn inflate(self, config: &Config<'a>) -> Result { let whitespace_before = parse_parenthesizable_whitespace( config, &mut (*self.tok).whitespace_before.borrow_mut(), )?; let whitespace_after = parse_parenthesizable_whitespace( config, &mut (*self.tok).whitespace_after.borrow_mut(), )?; Ok(Self::Inflated { whitespace_before, whitespace_after, }) } } impl<'a> Codegen<'a> for Colon<'a> { fn codegen(&self, state: &mut CodegenState<'a>) { self.whitespace_before.codegen(state); state.add_token(":"); self.whitespace_after.codegen(state); } } #[cst_node] pub enum AugOp<'a> { AddAssign { whitespace_before: ParenthesizableWhitespace<'a>, whitespace_after: ParenthesizableWhitespace<'a>, #[cfg_attr(feature = "py", skip_py)] tok: TokenRef<'a>, }, SubtractAssign { whitespace_before: ParenthesizableWhitespace<'a>, whitespace_after: ParenthesizableWhitespace<'a>, #[cfg_attr(feature = "py", skip_py)] tok: TokenRef<'a>, }, MultiplyAssign { whitespace_before: ParenthesizableWhitespace<'a>, whitespace_after: ParenthesizableWhitespace<'a>, #[cfg_attr(feature = "py", skip_py)] tok: TokenRef<'a>, }, MatrixMultiplyAssign { whitespace_before: ParenthesizableWhitespace<'a>, whitespace_after: ParenthesizableWhitespace<'a>, #[cfg_attr(feature = "py", skip_py)] tok: TokenRef<'a>, }, DivideAssign { whitespace_before: ParenthesizableWhitespace<'a>, whitespace_after: ParenthesizableWhitespace<'a>, #[cfg_attr(feature = "py", skip_py)] tok: TokenRef<'a>, }, ModuloAssign { whitespace_before: ParenthesizableWhitespace<'a>, whitespace_after: ParenthesizableWhitespace<'a>, #[cfg_attr(feature = "py", skip_py)] tok: TokenRef<'a>, }, BitAndAssign { whitespace_before: ParenthesizableWhitespace<'a>, whitespace_after: ParenthesizableWhitespace<'a>, #[cfg_attr(feature = "py", skip_py)] tok: TokenRef<'a>, }, BitOrAssign { whitespace_before: ParenthesizableWhitespace<'a>, whitespace_after: ParenthesizableWhitespace<'a>, #[cfg_attr(feature = "py", skip_py)] tok: TokenRef<'a>, }, BitXorAssign { whitespace_before: ParenthesizableWhitespace<'a>, whitespace_after: ParenthesizableWhitespace<'a>, #[cfg_attr(feature = "py", skip_py)] tok: TokenRef<'a>, }, LeftShiftAssign { whitespace_before: ParenthesizableWhitespace<'a>, whitespace_after: ParenthesizableWhitespace<'a>, #[cfg_attr(feature = "py", skip_py)] tok: TokenRef<'a>, }, RightShiftAssign { whitespace_before: ParenthesizableWhitespace<'a>, whitespace_after: ParenthesizableWhitespace<'a>, #[cfg_attr(feature = "py", skip_py)] tok: TokenRef<'a>, }, PowerAssign { whitespace_before: ParenthesizableWhitespace<'a>, whitespace_after: ParenthesizableWhitespace<'a>, #[cfg_attr(feature = "py", skip_py)] tok: TokenRef<'a>, }, FloorDivideAssign { whitespace_before: ParenthesizableWhitespace<'a>, whitespace_after: ParenthesizableWhitespace<'a>, #[cfg_attr(feature = "py", skip_py)] tok: TokenRef<'a>, }, } impl<'r, 'a> Inflate<'a> for DeflatedAugOp<'r, 'a> { type Inflated = AugOp<'a>; fn inflate(self, config: &Config<'a>) -> Result { Ok(match self { Self::AddAssign { tok, .. } => { let whitespace_before = parse_parenthesizable_whitespace( config, &mut (*tok).whitespace_before.borrow_mut(), )?; let whitespace_after = parse_parenthesizable_whitespace( config, &mut (*tok).whitespace_after.borrow_mut(), )?; Self::Inflated::AddAssign { whitespace_before, whitespace_after, } } Self::SubtractAssign { tok, .. } => { let whitespace_before = parse_parenthesizable_whitespace( config, &mut (*tok).whitespace_before.borrow_mut(), )?; let whitespace_after = parse_parenthesizable_whitespace( config, &mut (*tok).whitespace_after.borrow_mut(), )?; Self::Inflated::SubtractAssign { whitespace_before, whitespace_after, } } Self::MultiplyAssign { tok, .. } => { let whitespace_before = parse_parenthesizable_whitespace( config, &mut (*tok).whitespace_before.borrow_mut(), )?; let whitespace_after = parse_parenthesizable_whitespace( config, &mut (*tok).whitespace_after.borrow_mut(), )?; Self::Inflated::MultiplyAssign { whitespace_before, whitespace_after, } } Self::MatrixMultiplyAssign { tok, .. } => { let whitespace_before = parse_parenthesizable_whitespace( config, &mut (*tok).whitespace_before.borrow_mut(), )?; let whitespace_after = parse_parenthesizable_whitespace( config, &mut (*tok).whitespace_after.borrow_mut(), )?; Self::Inflated::MatrixMultiplyAssign { whitespace_before, whitespace_after, } } Self::DivideAssign { tok, .. } => { let whitespace_before = parse_parenthesizable_whitespace( config, &mut (*tok).whitespace_before.borrow_mut(), )?; let whitespace_after = parse_parenthesizable_whitespace( config, &mut (*tok).whitespace_after.borrow_mut(), )?; Self::Inflated::DivideAssign { whitespace_before, whitespace_after, } } Self::ModuloAssign { tok, .. } => { let whitespace_before = parse_parenthesizable_whitespace( config, &mut (*tok).whitespace_before.borrow_mut(), )?; let whitespace_after = parse_parenthesizable_whitespace( config, &mut (*tok).whitespace_after.borrow_mut(), )?; Self::Inflated::ModuloAssign { whitespace_before, whitespace_after, } } Self::BitAndAssign { tok, .. } => { let whitespace_before = parse_parenthesizable_whitespace( config, &mut (*tok).whitespace_before.borrow_mut(), )?; let whitespace_after = parse_parenthesizable_whitespace( config, &mut (*tok).whitespace_after.borrow_mut(), )?; Self::Inflated::BitAndAssign { whitespace_before, whitespace_after, } } Self::BitOrAssign { tok, .. } => { let whitespace_before = parse_parenthesizable_whitespace( config, &mut (*tok).whitespace_before.borrow_mut(), )?; let whitespace_after = parse_parenthesizable_whitespace( config, &mut (*tok).whitespace_after.borrow_mut(), )?; Self::Inflated::BitOrAssign { whitespace_before, whitespace_after, } } Self::BitXorAssign { tok, .. } => { let whitespace_before = parse_parenthesizable_whitespace( config, &mut (*tok).whitespace_before.borrow_mut(), )?; let whitespace_after = parse_parenthesizable_whitespace( config, &mut (*tok).whitespace_after.borrow_mut(), )?; Self::Inflated::BitXorAssign { whitespace_before, whitespace_after, } } Self::LeftShiftAssign { tok, .. } => { let whitespace_before = parse_parenthesizable_whitespace( config, &mut (*tok).whitespace_before.borrow_mut(), )?; let whitespace_after = parse_parenthesizable_whitespace( config, &mut (*tok).whitespace_after.borrow_mut(), )?; Self::Inflated::LeftShiftAssign { whitespace_before, whitespace_after, } } Self::RightShiftAssign { tok, .. } => { let whitespace_before = parse_parenthesizable_whitespace( config, &mut (*tok).whitespace_before.borrow_mut(), )?; let whitespace_after = parse_parenthesizable_whitespace( config, &mut (*tok).whitespace_after.borrow_mut(), )?; Self::Inflated::RightShiftAssign { whitespace_before, whitespace_after, } } Self::PowerAssign { tok, .. } => { let whitespace_before = parse_parenthesizable_whitespace( config, &mut (*tok).whitespace_before.borrow_mut(), )?; let whitespace_after = parse_parenthesizable_whitespace( config, &mut (*tok).whitespace_after.borrow_mut(), )?; Self::Inflated::PowerAssign { whitespace_before, whitespace_after, } } Self::FloorDivideAssign { tok, .. } => { let whitespace_before = parse_parenthesizable_whitespace( config, &mut (*tok).whitespace_before.borrow_mut(), )?; let whitespace_after = parse_parenthesizable_whitespace( config, &mut (*tok).whitespace_after.borrow_mut(), )?; Self::Inflated::FloorDivideAssign { whitespace_before, whitespace_after, } } }) } } impl<'a> Codegen<'a> for AugOp<'a> { fn codegen(&self, state: &mut CodegenState<'a>) { let (tok, bef, aft) = match self { Self::AddAssign { whitespace_before, whitespace_after, .. } => ("+=", whitespace_before, whitespace_after), Self::SubtractAssign { whitespace_before, whitespace_after, .. } => ("-=", whitespace_before, whitespace_after), Self::MultiplyAssign { whitespace_before, whitespace_after, .. } => ("*=", whitespace_before, whitespace_after), Self::MatrixMultiplyAssign { whitespace_before, whitespace_after, .. } => ("@=", whitespace_before, whitespace_after), Self::DivideAssign { whitespace_before, whitespace_after, .. } => ("/=", whitespace_before, whitespace_after), Self::ModuloAssign { whitespace_before, whitespace_after, .. } => ("%=", whitespace_before, whitespace_after), Self::BitAndAssign { whitespace_before, whitespace_after, .. } => ("&=", whitespace_before, whitespace_after), Self::BitOrAssign { whitespace_before, whitespace_after, .. } => ("|=", whitespace_before, whitespace_after), Self::BitXorAssign { whitespace_before, whitespace_after, .. } => ("^=", whitespace_before, whitespace_after), Self::LeftShiftAssign { whitespace_before, whitespace_after, .. } => ("<<=", whitespace_before, whitespace_after), Self::RightShiftAssign { whitespace_before, whitespace_after, .. } => (">>=", whitespace_before, whitespace_after), Self::PowerAssign { whitespace_before, whitespace_after, .. } => ("**=", whitespace_before, whitespace_after), Self::FloorDivideAssign { whitespace_before, whitespace_after, .. } => ("//=", whitespace_before, whitespace_after), }; bef.codegen(state); state.add_token(tok); aft.codegen(state); } } #[cst_node] pub struct BitOr<'a> { pub whitespace_before: ParenthesizableWhitespace<'a>, pub whitespace_after: ParenthesizableWhitespace<'a>, pub(crate) tok: TokenRef<'a>, } impl<'r, 'a> Inflate<'a> for DeflatedBitOr<'r, 'a> { type Inflated = BitOr<'a>; fn inflate(self, config: &Config<'a>) -> Result { let whitespace_before = parse_parenthesizable_whitespace( config, &mut (*self.tok).whitespace_before.borrow_mut(), )?; let whitespace_after = parse_parenthesizable_whitespace( config, &mut (*self.tok).whitespace_after.borrow_mut(), )?; Ok(Self::Inflated { whitespace_before, whitespace_after, }) } } impl<'a> Codegen<'a> for BitOr<'a> { fn codegen(&self, state: &mut CodegenState<'a>) { self.whitespace_before.codegen(state); state.add_token("|"); self.whitespace_after.codegen(state); } } LibCST-1.2.0/native/libcst/src/nodes/parser_config.rs000066400000000000000000000106571456464173300224540ustar00rootroot00000000000000// Copyright (c) Meta Platforms, Inc. and affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the root directory of this source tree. use pyo3::exceptions::PyIndexError; use pyo3::prelude::*; use pyo3::types::{IntoPyDict, PyDict, PySequence, PyString}; use pyo3::wrap_pyfunction; use crate::py_cached::PyCached; #[pyclass(subclass, module = "libcst_native.parser_config")] #[text_signature = "(*, lines, default_newline)"] pub struct BaseWhitespaceParserConfig { pub lines: PyCached>, pub default_newline: PyCached, } #[pymethods] impl BaseWhitespaceParserConfig { #[new] fn new(lines: &PySequence, default_newline: &PyString) -> PyResult { // These fields will get initialized when ParserConfig.__init__ (our subclass) runs Ok(Self { lines: lines.extract()?, default_newline: default_newline.extract()?, }) } #[getter] fn get_lines(&self, py: Python) -> PyObject { self.lines.to_object(py) } #[getter] fn get_default_newline(&self, py: Python) -> PyObject { self.default_newline.to_object(py) } } impl BaseWhitespaceParserConfig { /// Equivalent to `config.lines.unwrap()[line_number - 1]`, but it return a PyErr when we get /// an index that's out of range, instead of panicing. pub fn get_line(&self, line_number: usize) -> PyResult<&str> { let err_fn = || PyIndexError::new_err(format!("line number of {} is out of range", line_number)); self.lines .get(line_number.checked_sub(1).ok_or_else(err_fn)?) .map(|l| &l[..]) .ok_or_else(err_fn) } /// Equivalent to `config.get_line(line_number)[column_index..]`, but it return a PyErr when /// we get an column index that's out of range, instead of panicing. pub fn get_line_after_column(&self, line_number: usize, column_index: usize) -> PyResult<&str> { self.get_line(line_number)? .get(column_index..) .ok_or_else(|| { PyIndexError::new_err(format!("column index of {} is out of range", column_index)) }) } } // These fields are private and PyObject, since we don't currently care about using them from // within rust. #[pyclass(extends=BaseWhitespaceParserConfig, module="libcst_native.parser_config")] #[text_signature = "(*, lines, encoding, default_indent, default_newline, has_trailing_newline, version, future_imports)"] pub struct ParserConfig { // lines is inherited #[pyo3(get)] encoding: PyObject, #[pyo3(get)] default_indent: PyObject, // default_newline is inherited #[pyo3(get)] has_trailing_newline: PyObject, #[pyo3(get)] version: PyObject, #[pyo3(get)] future_imports: PyObject, } #[pymethods] impl ParserConfig { #[new] fn new( lines: &PySequence, encoding: PyObject, default_indent: PyObject, default_newline: &PyString, has_trailing_newline: PyObject, version: PyObject, future_imports: PyObject, ) -> PyResult<(Self, BaseWhitespaceParserConfig)> { Ok(( Self { encoding, default_indent, has_trailing_newline, version, future_imports, }, BaseWhitespaceParserConfig::new(lines, default_newline)?, )) } } /// An internal helper function used by python unit tests to compare configs. #[pyfunction] fn parser_config_asdict<'py>(py: Python<'py>, config: PyRef<'py, ParserConfig>) -> &'py PyDict { let super_config: &BaseWhitespaceParserConfig = config.as_ref(); vec![ ("lines", super_config.lines.to_object(py)), ("encoding", config.encoding.clone_ref(py)), ("default_indent", config.default_indent.clone_ref(py)), ( "default_newline", super_config.default_newline.to_object(py), ), ( "has_trailing_newline", config.has_trailing_newline.clone_ref(py), ), ("version", config.version.clone_ref(py)), ("future_imports", config.future_imports.clone_ref(py)), ] .into_py_dict(py) } pub fn init_module(_py: Python, m: &PyModule) -> PyResult<()> { m.add_class::()?; m.add_class::()?; m.add_function(wrap_pyfunction!(parser_config_asdict, m)?) .unwrap(); Ok(self) } LibCST-1.2.0/native/libcst/src/nodes/py_cached.rs000066400000000000000000000030641456464173300215440ustar00rootroot00000000000000// Copyright (c) Meta Platforms, Inc. and affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the root directory of this source tree. use pyo3::prelude::*; use std::convert::AsRef; use std::ops::Deref; /// An immutable wrapper around a rust type T and it's PyObject equivalent. Caches the conversion /// to and from the PyObject. pub struct PyCached { native: T, py_object: PyObject, } impl PyCached where T: ToPyObject, { pub fn new(py: Python, native: T) -> Self { Self { py_object: native.to_object(py), native, } } } impl<'source, T> FromPyObject<'source> for PyCached where T: FromPyObject<'source>, { fn extract(ob: &'source PyAny) -> PyResult { Python::with_gil(|py| { Ok(PyCached { native: ob.extract()?, py_object: ob.to_object(py), }) }) } } impl IntoPy for PyCached { fn into_py(self, _py: Python) -> PyObject { self.py_object } } impl ToPyObject for PyCached { fn to_object(&self, py: Python) -> PyObject { self.py_object.clone_ref(py) } } impl AsRef for PyCached { fn as_ref(&self) -> &T { &self.native } } impl Deref for PyCached { type Target = T; fn deref(&self) -> &Self::Target { &self.native } } impl From for PyCached where T: ToPyObject, { fn from(val: T) -> Self { Python::with_gil(|py| Self::new(py, val)) } } LibCST-1.2.0/native/libcst/src/nodes/statement.rs000066400000000000000000003363231456464173300216400ustar00rootroot00000000000000// Copyright (c) Meta Platforms, Inc. and affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the root directory of this source tree. use std::mem::swap; use super::{ inflate_helpers::adjust_parameters_trailing_whitespace, Attribute, Codegen, CodegenState, Comma, Dot, EmptyLine, Expression, From, ImportStar, LeftParen, List, Name, NameOrAttribute, Parameters, ParenthesizableWhitespace, RightParen, Semicolon, SimpleWhitespace, StarredElement, Subscript, TrailingWhitespace, Tuple, }; use crate::{ nodes::{ expression::*, op::*, traits::{ Inflate, ParenthesizedDeflatedNode, ParenthesizedNode, Result, WithComma, WithLeadingLines, }, }, tokenizer::{ whitespace_parser::{ parse_empty_lines, parse_parenthesizable_whitespace, parse_simple_whitespace, parse_trailing_whitespace, Config, }, Token, }, LeftCurlyBrace, LeftSquareBracket, RightCurlyBrace, RightSquareBracket, }; #[cfg(feature = "py")] use libcst_derive::TryIntoPy; use libcst_derive::{cst_node, Codegen, Inflate, ParenthesizedDeflatedNode, ParenthesizedNode}; type TokenRef<'r, 'a> = &'r Token<'a>; #[allow(clippy::large_enum_variant)] #[cst_node(Inflate, Codegen)] pub enum Statement<'a> { Simple(SimpleStatementLine<'a>), Compound(CompoundStatement<'a>), } impl<'a> WithLeadingLines<'a> for Statement<'a> { fn leading_lines(&mut self) -> &mut Vec> { match self { Self::Simple(s) => &mut s.leading_lines, Self::Compound(c) => c.leading_lines(), } } } #[allow(clippy::large_enum_variant)] #[cst_node(Inflate, Codegen)] pub enum CompoundStatement<'a> { FunctionDef(FunctionDef<'a>), If(If<'a>), For(For<'a>), While(While<'a>), ClassDef(ClassDef<'a>), Try(Try<'a>), TryStar(TryStar<'a>), With(With<'a>), Match(Match<'a>), } impl<'a> WithLeadingLines<'a> for CompoundStatement<'a> { fn leading_lines(&mut self) -> &mut Vec> { match self { Self::FunctionDef(f) => &mut f.leading_lines, Self::If(f) => &mut f.leading_lines, Self::For(f) => &mut f.leading_lines, Self::While(f) => &mut f.leading_lines, Self::ClassDef(c) => &mut c.leading_lines, Self::Try(t) => &mut t.leading_lines, Self::TryStar(t) => &mut t.leading_lines, Self::With(w) => &mut w.leading_lines, Self::Match(m) => &mut m.leading_lines, } } } #[cst_node(Inflate, Codegen)] pub enum Suite<'a> { IndentedBlock(IndentedBlock<'a>), SimpleStatementSuite(SimpleStatementSuite<'a>), } #[cst_node] pub struct IndentedBlock<'a> { /// Sequence of statements belonging to this indented block. pub body: Vec>, /// Any optional trailing comment and the final ``NEWLINE`` at the end of the line. pub header: TrailingWhitespace<'a>, /// A string represents a specific indentation. A ``None`` value uses the modules's /// default indentation. This is included because indentation is allowed to be /// inconsistent across a file, just not ambiguously. pub indent: Option<&'a str>, /// Any trailing comments or lines after the dedent that are owned by this indented /// block. Statements own preceeding and same-line trailing comments, but not /// trailing lines, so it falls on :class:`IndentedBlock` to own it. In the case /// that a statement follows an :class:`IndentedBlock`, that statement will own the /// comments and lines that are at the same indent as the statement, and this /// :class:`IndentedBlock` will own the comments and lines that are indented /// further. pub footer: Vec>, pub(crate) newline_tok: TokenRef<'a>, pub(crate) indent_tok: TokenRef<'a>, pub(crate) dedent_tok: TokenRef<'a>, } impl<'a> Codegen<'a> for IndentedBlock<'a> { fn codegen(&self, state: &mut CodegenState<'a>) { self.header.codegen(state); let indent = match self.indent { Some(i) => i, None => state.default_indent, }; state.indent(indent); if self.body.is_empty() { // Empty indented blocks are not syntactically valid in Python unless they // contain a 'pass' statement, so add one here. state.add_indent(); state.add_token("pass"); state.add_token(state.default_newline); } else { for stmt in &self.body { // IndentedBlock is responsible for adjusting the current indentation // level, but its children are responsible for actually adding that // indentation to the token list. stmt.codegen(state); } } for f in &self.footer { f.codegen(state); } state.dedent(); } } impl<'r, 'a> Inflate<'a> for DeflatedIndentedBlock<'r, 'a> { type Inflated = IndentedBlock<'a>; fn inflate(self, config: &Config<'a>) -> Result { let body = self.body.inflate(config)?; // We want to be able to only keep comments in the footer that are actually for // this IndentedBlock. We do so by assuming that lines which are indented to the // same level as the block itself are comments that go at the footer of the // block. Comments that are indented to less than this indent are assumed to // belong to the next line of code. We override the indent here because the // dedent node's absolute indent is the resulting indentation after the dedent // is performed. Its this way because the whitespace state for both the dedent's // whitespace_after and the next BaseCompoundStatement's whitespace_before is // shared. This allows us to partially parse here and parse the rest of the // whitespace and comments on the next line, effectively making sure that // comments are attached to the correct node. let footer = parse_empty_lines( config, &mut (*self.dedent_tok).whitespace_after.borrow_mut(), Some(self.indent_tok.whitespace_before.borrow().absolute_indent), )?; let header = parse_trailing_whitespace( config, &mut (*self.newline_tok).whitespace_before.borrow_mut(), )?; let mut indent = self.indent_tok.relative_indent; if indent == Some(config.default_indent) { indent = None; } Ok(Self::Inflated { body, header, indent, footer, }) } } #[cst_node] pub struct SimpleStatementSuite<'a> { /// Sequence of small statements. All but the last statement are required to have /// a semicolon. pub body: Vec>, /// The whitespace between the colon in the parent statement and the body. pub leading_whitespace: SimpleWhitespace<'a>, /// Any optional trailing comment and the final ``NEWLINE`` at the end of the line. pub trailing_whitespace: TrailingWhitespace<'a>, pub(crate) first_tok: TokenRef<'a>, pub(crate) newline_tok: TokenRef<'a>, } impl<'r, 'a> Inflate<'a> for DeflatedSimpleStatementSuite<'r, 'a> { type Inflated = SimpleStatementSuite<'a>; fn inflate(self, config: &Config<'a>) -> Result { let leading_whitespace = parse_simple_whitespace( config, &mut (*self.first_tok).whitespace_before.borrow_mut(), )?; let body = self.body.inflate(config)?; let trailing_whitespace = parse_trailing_whitespace( config, &mut (*self.newline_tok).whitespace_before.borrow_mut(), )?; Ok(Self::Inflated { body, leading_whitespace, trailing_whitespace, }) } } fn _simple_statement_codegen<'a>( body: &[SmallStatement<'a>], trailing_whitespace: &TrailingWhitespace<'a>, state: &mut CodegenState<'a>, ) { for stmt in body { stmt.codegen(state); // TODO: semicolon } if body.is_empty() { // Empty simple statement blocks are not syntactically valid in Python // unless they contain a 'pass' statement, so add one here. state.add_token("pass") } trailing_whitespace.codegen(state); } impl<'a> Codegen<'a> for SimpleStatementSuite<'a> { fn codegen(&self, state: &mut CodegenState<'a>) { self.leading_whitespace.codegen(state); _simple_statement_codegen(&self.body, &self.trailing_whitespace, state); } } #[cst_node] pub struct SimpleStatementLine<'a> { /// Sequence of small statements. All but the last statement are required to have /// a semicolon. pub body: Vec>, /// Sequence of empty lines appearing before this simple statement line. pub leading_lines: Vec>, /// Any optional trailing comment and the final ``NEWLINE`` at the end of the line. pub trailing_whitespace: TrailingWhitespace<'a>, pub(crate) first_tok: TokenRef<'a>, pub(crate) newline_tok: TokenRef<'a>, } impl<'a> Codegen<'a> for SimpleStatementLine<'a> { fn codegen(&self, state: &mut CodegenState<'a>) { for line in &self.leading_lines { line.codegen(state); } state.add_indent(); _simple_statement_codegen(&self.body, &self.trailing_whitespace, state); } } impl<'r, 'a> Inflate<'a> for DeflatedSimpleStatementLine<'r, 'a> { type Inflated = SimpleStatementLine<'a>; fn inflate(self, config: &Config<'a>) -> Result { let leading_lines = parse_empty_lines( config, &mut (*self.first_tok).whitespace_before.borrow_mut(), None, )?; let body = self.body.inflate(config)?; let trailing_whitespace = parse_trailing_whitespace( config, &mut (*self.newline_tok).whitespace_before.borrow_mut(), )?; Ok(Self::Inflated { body, leading_lines, trailing_whitespace, }) } } #[allow(dead_code, clippy::large_enum_variant)] #[cst_node(Codegen, Inflate)] pub enum SmallStatement<'a> { Pass(Pass<'a>), Break(Break<'a>), Continue(Continue<'a>), Return(Return<'a>), Expr(Expr<'a>), Assert(Assert<'a>), Import(Import<'a>), ImportFrom(ImportFrom<'a>), Assign(Assign<'a>), AnnAssign(AnnAssign<'a>), Raise(Raise<'a>), Global(Global<'a>), Nonlocal(Nonlocal<'a>), AugAssign(AugAssign<'a>), Del(Del<'a>), TypeAlias(TypeAlias<'a>), } impl<'r, 'a> DeflatedSmallStatement<'r, 'a> { pub fn with_semicolon(self, semicolon: Option>) -> Self { match self { Self::Pass(p) => Self::Pass(p.with_semicolon(semicolon)), Self::Break(p) => Self::Break(p.with_semicolon(semicolon)), Self::Continue(p) => Self::Continue(p.with_semicolon(semicolon)), Self::Expr(p) => Self::Expr(p.with_semicolon(semicolon)), Self::Import(i) => Self::Import(i.with_semicolon(semicolon)), Self::ImportFrom(i) => Self::ImportFrom(i.with_semicolon(semicolon)), Self::Assign(a) => Self::Assign(a.with_semicolon(semicolon)), Self::AnnAssign(a) => Self::AnnAssign(a.with_semicolon(semicolon)), Self::Return(r) => Self::Return(r.with_semicolon(semicolon)), Self::Assert(a) => Self::Assert(a.with_semicolon(semicolon)), Self::Raise(r) => Self::Raise(r.with_semicolon(semicolon)), Self::Global(g) => Self::Global(g.with_semicolon(semicolon)), Self::Nonlocal(l) => Self::Nonlocal(l.with_semicolon(semicolon)), Self::AugAssign(a) => Self::AugAssign(a.with_semicolon(semicolon)), Self::Del(d) => Self::Del(d.with_semicolon(semicolon)), Self::TypeAlias(t) => Self::TypeAlias(t.with_semicolon(semicolon)), } } } #[cst_node] pub struct Pass<'a> { pub semicolon: Option>, } impl<'r, 'a> DeflatedPass<'r, 'a> { pub fn with_semicolon(self, semicolon: Option>) -> Self { Self { semicolon } } } impl<'a> Codegen<'a> for Pass<'a> { fn codegen(&self, state: &mut CodegenState<'a>) { state.add_token("pass"); self.semicolon.codegen(state); } } impl<'r, 'a> Inflate<'a> for DeflatedPass<'r, 'a> { type Inflated = Pass<'a>; fn inflate(self, config: &Config<'a>) -> Result { let semicolon = self.semicolon.inflate(config)?; Ok(Self::Inflated { semicolon }) } } #[cst_node] pub struct Break<'a> { pub semicolon: Option>, } impl<'r, 'a> DeflatedBreak<'r, 'a> { pub fn with_semicolon(self, semicolon: Option>) -> Self { Self { semicolon } } } impl<'a> Codegen<'a> for Break<'a> { fn codegen(&self, state: &mut CodegenState<'a>) { state.add_token("break"); self.semicolon.codegen(state); } } impl<'r, 'a> Inflate<'a> for DeflatedBreak<'r, 'a> { type Inflated = Break<'a>; fn inflate(self, config: &Config<'a>) -> Result { let semicolon = self.semicolon.inflate(config)?; Ok(Self::Inflated { semicolon }) } } #[cst_node] pub struct Continue<'a> { pub semicolon: Option>, } impl<'r, 'a> DeflatedContinue<'r, 'a> { pub fn with_semicolon(self, semicolon: Option>) -> Self { Self { semicolon } } } impl<'a> Codegen<'a> for Continue<'a> { fn codegen(&self, state: &mut CodegenState<'a>) { state.add_token("continue"); self.semicolon.codegen(state); } } impl<'r, 'a> Inflate<'a> for DeflatedContinue<'r, 'a> { type Inflated = Continue<'a>; fn inflate(self, config: &Config<'a>) -> Result { let semicolon = self.semicolon.inflate(config)?; Ok(Self::Inflated { semicolon }) } } #[cst_node] pub struct Expr<'a> { pub value: Expression<'a>, pub semicolon: Option>, } impl<'r, 'a> DeflatedExpr<'r, 'a> { pub fn with_semicolon(self, semicolon: Option>) -> Self { Self { semicolon, ..self } } } impl<'a> Codegen<'a> for Expr<'a> { fn codegen(&self, state: &mut CodegenState<'a>) { self.value.codegen(state); self.semicolon.codegen(state); } } impl<'r, 'a> Inflate<'a> for DeflatedExpr<'r, 'a> { type Inflated = Expr<'a>; fn inflate(self, config: &Config<'a>) -> Result { let value = self.value.inflate(config)?; let semicolon = self.semicolon.inflate(config)?; Ok(Self::Inflated { value, semicolon }) } } #[cst_node] pub struct Assign<'a> { pub targets: Vec>, pub value: Expression<'a>, pub semicolon: Option>, } impl<'a> Codegen<'a> for Assign<'a> { fn codegen(&self, state: &mut CodegenState<'a>) { for target in &self.targets { target.codegen(state); } self.value.codegen(state); if let Some(semi) = &self.semicolon { semi.codegen(state); } } } impl<'r, 'a> Inflate<'a> for DeflatedAssign<'r, 'a> { type Inflated = Assign<'a>; fn inflate(self, config: &Config<'a>) -> Result { let targets = self.targets.inflate(config)?; let value = self.value.inflate(config)?; let semicolon = self.semicolon.inflate(config)?; Ok(Self::Inflated { targets, value, semicolon, }) } } impl<'r, 'a> DeflatedAssign<'r, 'a> { pub fn with_semicolon(self, semicolon: Option>) -> Self { Self { semicolon, ..self } } } #[cst_node] pub struct AssignTarget<'a> { pub target: AssignTargetExpression<'a>, pub whitespace_before_equal: SimpleWhitespace<'a>, pub whitespace_after_equal: SimpleWhitespace<'a>, pub(crate) equal_tok: TokenRef<'a>, } impl<'a> Codegen<'a> for AssignTarget<'a> { fn codegen(&self, state: &mut CodegenState<'a>) { self.target.codegen(state); self.whitespace_before_equal.codegen(state); state.add_token("="); self.whitespace_after_equal.codegen(state); } } impl<'r, 'a> Inflate<'a> for DeflatedAssignTarget<'r, 'a> { type Inflated = AssignTarget<'a>; fn inflate(self, config: &Config<'a>) -> Result { let target = self.target.inflate(config)?; let whitespace_before_equal = parse_simple_whitespace( config, &mut (*self.equal_tok).whitespace_before.borrow_mut(), )?; let whitespace_after_equal = parse_simple_whitespace(config, &mut (*self.equal_tok).whitespace_after.borrow_mut())?; Ok(Self::Inflated { target, whitespace_before_equal, whitespace_after_equal, }) } } #[allow(clippy::large_enum_variant)] #[cst_node(Codegen, ParenthesizedNode, Inflate)] pub enum AssignTargetExpression<'a> { Name(Box>), Attribute(Box>), StarredElement(Box>), Tuple(Box>), List(Box>), Subscript(Box>), } #[cst_node] pub struct Import<'a> { pub names: Vec>, pub semicolon: Option>, pub whitespace_after_import: SimpleWhitespace<'a>, pub(crate) import_tok: TokenRef<'a>, } impl<'a> Codegen<'a> for Import<'a> { fn codegen(&self, state: &mut CodegenState<'a>) { state.add_token("import"); self.whitespace_after_import.codegen(state); for (i, name) in self.names.iter().enumerate() { name.codegen(state); if name.comma.is_none() && i < self.names.len() - 1 { state.add_token(", "); } } if let Some(semi) = &self.semicolon { semi.codegen(state); } } } impl<'r, 'a> Inflate<'a> for DeflatedImport<'r, 'a> { type Inflated = Import<'a>; fn inflate(self, config: &Config<'a>) -> Result { let whitespace_after_import = parse_simple_whitespace( config, &mut (*self.import_tok).whitespace_after.borrow_mut(), )?; let names = self.names.inflate(config)?; let semicolon = self.semicolon.inflate(config)?; Ok(Self::Inflated { names, semicolon, whitespace_after_import, }) } } impl<'r, 'a> DeflatedImport<'r, 'a> { pub fn with_semicolon(self, semicolon: Option>) -> Self { Self { semicolon, ..self } } } #[cst_node] pub struct ImportFrom<'a> { #[cfg_attr(feature = "py", no_py_default)] pub module: Option>, pub names: ImportNames<'a>, pub relative: Vec>, pub lpar: Option>, pub rpar: Option>, pub semicolon: Option>, pub whitespace_after_from: SimpleWhitespace<'a>, pub whitespace_before_import: SimpleWhitespace<'a>, pub whitespace_after_import: SimpleWhitespace<'a>, pub(crate) from_tok: TokenRef<'a>, pub(crate) import_tok: TokenRef<'a>, } impl<'a> Codegen<'a> for ImportFrom<'a> { fn codegen(&self, state: &mut CodegenState<'a>) { state.add_token("from"); self.whitespace_after_from.codegen(state); for dot in &self.relative { dot.codegen(state); } if let Some(module) = &self.module { module.codegen(state); } self.whitespace_before_import.codegen(state); state.add_token("import"); self.whitespace_after_import.codegen(state); if let Some(lpar) = &self.lpar { lpar.codegen(state); } self.names.codegen(state); if let Some(rpar) = &self.rpar { rpar.codegen(state); } if let Some(semi) = &self.semicolon { semi.codegen(state); } } } impl<'r, 'a> Inflate<'a> for DeflatedImportFrom<'r, 'a> { type Inflated = ImportFrom<'a>; fn inflate(self, config: &Config<'a>) -> Result { let whitespace_after_from = parse_simple_whitespace(config, &mut (*self.from_tok).whitespace_after.borrow_mut())?; let module = self.module.inflate(config)?; let whitespace_after_import = parse_simple_whitespace( config, &mut (*self.import_tok).whitespace_after.borrow_mut(), )?; let mut relative = inflate_dots(self.relative, config)?; let mut whitespace_before_import = Default::default(); if !relative.is_empty() && module.is_none() { // For relative-only imports relocate the space after the final dot to be owned // by the import token. if let Some(Dot { whitespace_after: ParenthesizableWhitespace::SimpleWhitespace(dot_ws), .. }) = relative.last_mut() { swap(dot_ws, &mut whitespace_before_import); } } else { whitespace_before_import = parse_simple_whitespace( config, &mut (*self.import_tok).whitespace_before.borrow_mut(), )?; } let lpar = self.lpar.inflate(config)?; let names = self.names.inflate(config)?; let rpar = self.rpar.inflate(config)?; let semicolon = self.semicolon.inflate(config)?; Ok(Self::Inflated { module, names, relative, lpar, rpar, semicolon, whitespace_after_from, whitespace_before_import, whitespace_after_import, }) } } fn inflate_dots<'r, 'a>( dots: Vec>, config: &Config<'a>, ) -> Result>> { let mut ret: Vec> = vec![]; let mut last_tok: Option> = None; for dot in dots { if let Some(last_tokref) = &last_tok { // Consecutive dots having the same Token can only happen if `...` was // parsed as a single ELLIPSIS token. In this case the token's // whitespace_before belongs to the first dot, but the whitespace_after is // moved to the 3rd dot (by swapping it twice) if last_tokref.start_pos == dot.tok.start_pos { let mut subsequent_dot = Dot { whitespace_before: Default::default(), whitespace_after: Default::default(), }; swap( &mut ret.last_mut().unwrap().whitespace_after, &mut subsequent_dot.whitespace_after, ); ret.push(subsequent_dot); continue; } } last_tok = Some(dot.tok); ret.push(dot.inflate(config)?); } Ok(ret) } impl<'r, 'a> DeflatedImportFrom<'r, 'a> { pub fn with_semicolon(self, semicolon: Option>) -> Self { Self { semicolon, ..self } } } #[cst_node] pub struct ImportAlias<'a> { pub name: NameOrAttribute<'a>, pub asname: Option>, pub comma: Option>, } impl<'r, 'a> Inflate<'a> for DeflatedImportAlias<'r, 'a> { type Inflated = ImportAlias<'a>; fn inflate(self, config: &Config<'a>) -> Result { let name = self.name.inflate(config)?; let asname = self.asname.inflate(config)?; let comma = self.comma.inflate(config)?; Ok(Self::Inflated { name, asname, comma, }) } } impl<'r, 'a> WithComma<'r, 'a> for DeflatedImportAlias<'r, 'a> { fn with_comma(self, comma: DeflatedComma<'r, 'a>) -> Self { let comma = Some(comma); Self { comma, ..self } } } impl<'a> Codegen<'a> for ImportAlias<'a> { fn codegen(&self, state: &mut CodegenState<'a>) { self.name.codegen(state); if let Some(asname) = &self.asname { asname.codegen(state); } if let Some(comma) = &self.comma { comma.codegen(state); } } } #[cst_node] pub struct AsName<'a> { pub name: AssignTargetExpression<'a>, pub whitespace_before_as: ParenthesizableWhitespace<'a>, pub whitespace_after_as: ParenthesizableWhitespace<'a>, pub(crate) as_tok: TokenRef<'a>, } impl<'a> Codegen<'a> for AsName<'a> { fn codegen(&self, state: &mut CodegenState<'a>) { self.whitespace_before_as.codegen(state); state.add_token("as"); self.whitespace_after_as.codegen(state); self.name.codegen(state); } } impl<'r, 'a> Inflate<'a> for DeflatedAsName<'r, 'a> { type Inflated = AsName<'a>; fn inflate(self, config: &Config<'a>) -> Result { let whitespace_before_as = parse_parenthesizable_whitespace( config, &mut (*self.as_tok).whitespace_before.borrow_mut(), )?; let whitespace_after_as = parse_parenthesizable_whitespace( config, &mut (*self.as_tok).whitespace_after.borrow_mut(), )?; let name = self.name.inflate(config)?; Ok(Self::Inflated { name, whitespace_before_as, whitespace_after_as, }) } } #[cst_node(Inflate)] pub enum ImportNames<'a> { Star(ImportStar), Aliases(Vec>), } impl<'a> Codegen<'a> for ImportNames<'a> { fn codegen(&self, state: &mut CodegenState<'a>) { match self { Self::Star(s) => s.codegen(state), Self::Aliases(aliases) => { for (i, alias) in aliases.iter().enumerate() { alias.codegen(state); if alias.comma.is_none() && i < aliases.len() - 1 { state.add_token(", "); } } } } } } #[cst_node] pub struct FunctionDef<'a> { pub name: Name<'a>, pub type_parameters: Option>, pub params: Parameters<'a>, pub body: Suite<'a>, pub decorators: Vec>, pub returns: Option>, pub asynchronous: Option>, pub leading_lines: Vec>, pub lines_after_decorators: Vec>, pub whitespace_after_def: SimpleWhitespace<'a>, pub whitespace_after_name: SimpleWhitespace<'a>, pub whitespace_after_type_parameters: SimpleWhitespace<'a>, pub whitespace_before_params: ParenthesizableWhitespace<'a>, pub whitespace_before_colon: SimpleWhitespace<'a>, pub(crate) async_tok: Option>, pub(crate) def_tok: TokenRef<'a>, pub(crate) open_paren_tok: TokenRef<'a>, pub(crate) close_paren_tok: TokenRef<'a>, pub(crate) colon_tok: TokenRef<'a>, } impl<'r, 'a> DeflatedFunctionDef<'r, 'a> { pub fn with_decorators(self, decorators: Vec>) -> Self { Self { decorators, ..self } } } impl<'a> Codegen<'a> for FunctionDef<'a> { fn codegen(&self, state: &mut CodegenState<'a>) { for l in &self.leading_lines { l.codegen(state); } for dec in self.decorators.iter() { dec.codegen(state); } for l in &self.lines_after_decorators { l.codegen(state); } state.add_indent(); if let Some(asy) = &self.asynchronous { asy.codegen(state); } state.add_token("def"); self.whitespace_after_def.codegen(state); self.name.codegen(state); self.whitespace_after_name.codegen(state); if let Some(tp) = &self.type_parameters { tp.codegen(state); self.whitespace_after_type_parameters.codegen(state); } state.add_token("("); self.whitespace_before_params.codegen(state); self.params.codegen(state); state.add_token(")"); if let Some(ann) = &self.returns { ann.codegen(state, "->"); } self.whitespace_before_colon.codegen(state); state.add_token(":"); self.body.codegen(state); } } impl<'r, 'a> Inflate<'a> for DeflatedFunctionDef<'r, 'a> { type Inflated = FunctionDef<'a>; fn inflate(mut self, config: &Config<'a>) -> Result { let mut decorators = self.decorators.inflate(config)?; let (asynchronous, leading_lines) = if let Some(asy) = self.async_tok.as_mut() { let whitespace_after = parse_parenthesizable_whitespace(config, &mut asy.whitespace_after.borrow_mut())?; ( Some(Asynchronous { whitespace_after }), Some(parse_empty_lines( config, &mut asy.whitespace_before.borrow_mut(), None, )?), ) } else { (None, None) }; let mut leading_lines = if let Some(ll) = leading_lines { ll } else { parse_empty_lines( config, &mut (*self.def_tok).whitespace_before.borrow_mut(), None, )? }; let mut lines_after_decorators = Default::default(); if let Some(dec) = decorators.first_mut() { swap(&mut lines_after_decorators, &mut leading_lines); swap(&mut dec.leading_lines, &mut leading_lines); } let whitespace_after_def = parse_simple_whitespace(config, &mut (*self.def_tok).whitespace_after.borrow_mut())?; let name = self.name.inflate(config)?; let whitespace_after_name; let mut type_parameters = Default::default(); let mut whitespace_after_type_parameters = Default::default(); if let Some(tp) = self.type_parameters { let rbracket_tok = tp.rbracket.tok.clone(); whitespace_after_name = parse_simple_whitespace( config, &mut tp.lbracket.tok.whitespace_before.borrow_mut(), )?; type_parameters = Some(tp.inflate(config)?); whitespace_after_type_parameters = parse_simple_whitespace(config, &mut rbracket_tok.whitespace_after.borrow_mut())?; } else { whitespace_after_name = parse_simple_whitespace( config, &mut self.open_paren_tok.whitespace_before.borrow_mut(), )?; } let whitespace_before_params = parse_parenthesizable_whitespace( config, &mut (*self.open_paren_tok).whitespace_after.borrow_mut(), )?; let mut params = self.params.inflate(config)?; adjust_parameters_trailing_whitespace(config, &mut params, &self.close_paren_tok)?; let returns = self.returns.inflate(config)?; let whitespace_before_colon = parse_simple_whitespace( config, &mut (*self.colon_tok).whitespace_before.borrow_mut(), )?; let body = self.body.inflate(config)?; Ok(Self::Inflated { name, type_parameters, params, body, decorators, returns, asynchronous, leading_lines, lines_after_decorators, whitespace_after_def, whitespace_after_name, whitespace_after_type_parameters, whitespace_before_params, whitespace_before_colon, }) } } #[cst_node] pub struct Decorator<'a> { pub decorator: Expression<'a>, pub leading_lines: Vec>, pub whitespace_after_at: SimpleWhitespace<'a>, pub trailing_whitespace: TrailingWhitespace<'a>, pub(crate) at_tok: TokenRef<'a>, pub(crate) newline_tok: TokenRef<'a>, } impl<'a> Codegen<'a> for Decorator<'a> { fn codegen(&self, state: &mut CodegenState<'a>) { for ll in self.leading_lines.iter() { ll.codegen(state); } state.add_indent(); state.add_token("@"); self.whitespace_after_at.codegen(state); self.decorator.codegen(state); self.trailing_whitespace.codegen(state); } } impl<'r, 'a> Inflate<'a> for DeflatedDecorator<'r, 'a> { type Inflated = Decorator<'a>; fn inflate(self, config: &Config<'a>) -> Result { let leading_lines = parse_empty_lines( config, &mut (*self.at_tok).whitespace_before.borrow_mut(), None, )?; let whitespace_after_at = parse_simple_whitespace(config, &mut (*self.at_tok).whitespace_after.borrow_mut())?; let decorator = self.decorator.inflate(config)?; let trailing_whitespace = parse_trailing_whitespace( config, &mut (*self.newline_tok).whitespace_before.borrow_mut(), )?; Ok(Self::Inflated { decorator, leading_lines, whitespace_after_at, trailing_whitespace, }) } } #[cst_node] pub struct If<'a> { /// The expression that, when evaluated, should give us a truthy value pub test: Expression<'a>, // The body of this compound statement. pub body: Suite<'a>, /// An optional ``elif`` or ``else`` clause. ``If`` signifies an ``elif`` block. pub orelse: Option>>, /// Sequence of empty lines appearing before this compound statement line. pub leading_lines: Vec>, /// The whitespace appearing after the ``if`` keyword but before the test /// expression. pub whitespace_before_test: SimpleWhitespace<'a>, /// The whitespace appearing after the test expression but before the colon. pub whitespace_after_test: SimpleWhitespace<'a>, /// Signifies if this instance represents an ``elif`` or an ``if`` block. #[cfg_attr(feature = "py", skip_py)] pub is_elif: bool, pub(crate) if_tok: TokenRef<'a>, pub(crate) colon_tok: TokenRef<'a>, } impl<'a> Codegen<'a> for If<'a> { fn codegen(&self, state: &mut CodegenState<'a>) { for l in &self.leading_lines { l.codegen(state); } state.add_indent(); state.add_token(if self.is_elif { "elif" } else { "if" }); self.whitespace_before_test.codegen(state); self.test.codegen(state); self.whitespace_after_test.codegen(state); state.add_token(":"); self.body.codegen(state); if let Some(orelse) = &self.orelse { orelse.codegen(state) } } } impl<'r, 'a> Inflate<'a> for DeflatedIf<'r, 'a> { type Inflated = If<'a>; fn inflate(self, config: &Config<'a>) -> Result { let leading_lines = parse_empty_lines( config, &mut (*self.if_tok).whitespace_before.borrow_mut(), None, )?; let whitespace_before_test = parse_simple_whitespace(config, &mut (*self.if_tok).whitespace_after.borrow_mut())?; let test = self.test.inflate(config)?; let whitespace_after_test = parse_simple_whitespace( config, &mut (*self.colon_tok).whitespace_before.borrow_mut(), )?; let body = self.body.inflate(config)?; let orelse = self.orelse.inflate(config)?; Ok(Self::Inflated { test, body, orelse, leading_lines, whitespace_before_test, whitespace_after_test, is_elif: self.is_elif, }) } } #[allow(clippy::large_enum_variant)] #[cst_node(Inflate, Codegen)] pub enum OrElse<'a> { Elif(If<'a>), Else(Else<'a>), } #[cst_node] pub struct Else<'a> { pub body: Suite<'a>, /// Sequence of empty lines appearing before this compound statement line. pub leading_lines: Vec>, /// The whitespace appearing after the ``else`` keyword but before the colon. pub whitespace_before_colon: SimpleWhitespace<'a>, pub(crate) else_tok: TokenRef<'a>, pub(crate) colon_tok: TokenRef<'a>, } impl<'a> Codegen<'a> for Else<'a> { fn codegen(&self, state: &mut CodegenState<'a>) { for l in &self.leading_lines { l.codegen(state); } state.add_indent(); state.add_token("else"); self.whitespace_before_colon.codegen(state); state.add_token(":"); self.body.codegen(state); } } impl<'r, 'a> Inflate<'a> for DeflatedElse<'r, 'a> { type Inflated = Else<'a>; fn inflate(self, config: &Config<'a>) -> Result { let leading_lines = parse_empty_lines( config, &mut (*self.else_tok).whitespace_before.borrow_mut(), None, )?; let whitespace_before_colon = parse_simple_whitespace( config, &mut (*self.colon_tok).whitespace_before.borrow_mut(), )?; let body = self.body.inflate(config)?; Ok(Self::Inflated { body, leading_lines, whitespace_before_colon, }) } } #[cst_node] pub struct Annotation<'a> { pub annotation: Expression<'a>, pub whitespace_before_indicator: Option>, pub whitespace_after_indicator: ParenthesizableWhitespace<'a>, pub(crate) tok: TokenRef<'a>, } impl<'a> Annotation<'a> { pub fn codegen(&self, state: &mut CodegenState<'a>, default_indicator: &'a str) { if let Some(ws) = &self.whitespace_before_indicator { ws.codegen(state); } else if default_indicator == "->" { state.add_token(" "); } else { panic!("Variable annotation but whitespace is None"); } state.add_token(default_indicator); self.whitespace_after_indicator.codegen(state); self.annotation.codegen(state); } } impl<'r, 'a> Inflate<'a> for DeflatedAnnotation<'r, 'a> { type Inflated = Annotation<'a>; fn inflate(self, config: &Config<'a>) -> Result { let whitespace_before_indicator = Some(parse_parenthesizable_whitespace( config, &mut (*self.tok).whitespace_before.borrow_mut(), )?); let whitespace_after_indicator = parse_parenthesizable_whitespace( config, &mut (*self.tok).whitespace_after.borrow_mut(), )?; let annotation = self.annotation.inflate(config)?; Ok(Self::Inflated { annotation, whitespace_before_indicator, whitespace_after_indicator, }) } } #[cst_node] pub struct AnnAssign<'a> { pub target: AssignTargetExpression<'a>, pub annotation: Annotation<'a>, pub value: Option>, pub equal: Option>, pub semicolon: Option>, } impl<'a> Codegen<'a> for AnnAssign<'a> { fn codegen(&self, state: &mut CodegenState<'a>) { self.target.codegen(state); self.annotation.codegen(state, ":"); if let Some(eq) = &self.equal { eq.codegen(state); } else if self.value.is_some() { state.add_token(" = "); } if let Some(value) = &self.value { value.codegen(state); } if let Some(semi) = &self.semicolon { semi.codegen(state); } } } impl<'r, 'a> Inflate<'a> for DeflatedAnnAssign<'r, 'a> { type Inflated = AnnAssign<'a>; fn inflate(self, config: &Config<'a>) -> Result { let target = self.target.inflate(config)?; let annotation = self.annotation.inflate(config)?; let value = self.value.inflate(config)?; let equal = self.equal.inflate(config)?; let semicolon = self.semicolon.inflate(config)?; Ok(Self::Inflated { target, annotation, value, equal, semicolon, }) } } impl<'r, 'a> DeflatedAnnAssign<'r, 'a> { pub fn with_semicolon(self, semicolon: Option>) -> Self { Self { semicolon, ..self } } } #[cst_node] pub struct Return<'a> { pub value: Option>, pub whitespace_after_return: Option>, pub semicolon: Option>, pub(crate) return_tok: TokenRef<'a>, } impl<'a> Codegen<'a> for Return<'a> { fn codegen(&self, state: &mut CodegenState<'a>) { state.add_token("return"); if let Some(ws) = &self.whitespace_after_return { ws.codegen(state); } else if self.value.is_some() { state.add_token(" "); } if let Some(val) = &self.value { val.codegen(state); } if let Some(semi) = &self.semicolon { semi.codegen(state); } } } impl<'r, 'a> Inflate<'a> for DeflatedReturn<'r, 'a> { type Inflated = Return<'a>; fn inflate(self, config: &Config<'a>) -> Result { let whitespace_after_return = if self.value.is_some() { Some(parse_simple_whitespace( config, &mut (*self.return_tok).whitespace_after.borrow_mut(), )?) } else { // otherwise space is owned by semicolon or small statement // whitespace is not None to preserve a quirk of the pure python parser Some(Default::default()) }; let value = self.value.inflate(config)?; let semicolon = self.semicolon.inflate(config)?; Ok(Self::Inflated { value, whitespace_after_return, semicolon, }) } } impl<'r, 'a> DeflatedReturn<'r, 'a> { pub fn with_semicolon(self, semicolon: Option>) -> Self { Self { semicolon, ..self } } } #[cst_node] pub struct Assert<'a> { pub test: Expression<'a>, pub msg: Option>, pub comma: Option>, pub whitespace_after_assert: SimpleWhitespace<'a>, pub semicolon: Option>, pub(crate) assert_tok: TokenRef<'a>, } impl<'a> Codegen<'a> for Assert<'a> { fn codegen(&self, state: &mut CodegenState<'a>) { state.add_token("assert"); self.whitespace_after_assert.codegen(state); self.test.codegen(state); if let Some(comma) = &self.comma { comma.codegen(state); } else if self.msg.is_some() { state.add_token(", "); } if let Some(msg) = &self.msg { msg.codegen(state); } if let Some(semi) = &self.semicolon { semi.codegen(state); } } } impl<'r, 'a> Inflate<'a> for DeflatedAssert<'r, 'a> { type Inflated = Assert<'a>; fn inflate(self, config: &Config<'a>) -> Result { let whitespace_after_assert = parse_simple_whitespace( config, &mut (*self.assert_tok).whitespace_after.borrow_mut(), )?; let test = self.test.inflate(config)?; let comma = self.comma.inflate(config)?; let msg = self.msg.inflate(config)?; let semicolon = self.semicolon.inflate(config)?; Ok(Self::Inflated { test, msg, comma, whitespace_after_assert, semicolon, }) } } impl<'r, 'a> DeflatedAssert<'r, 'a> { pub fn with_semicolon(self, semicolon: Option>) -> Self { Self { semicolon, ..self } } } #[cst_node] pub struct Raise<'a> { pub exc: Option>, pub cause: Option>, pub whitespace_after_raise: Option>, pub semicolon: Option>, pub(crate) raise_tok: TokenRef<'a>, } impl<'r, 'a> Inflate<'a> for DeflatedRaise<'r, 'a> { type Inflated = Raise<'a>; fn inflate(self, config: &Config<'a>) -> Result { let whitespace_after_raise = if self.exc.is_some() { Some(parse_simple_whitespace( config, &mut (*self.raise_tok).whitespace_after.borrow_mut(), )?) } else { Default::default() }; let exc = self.exc.inflate(config)?; let mut cause = self.cause.inflate(config)?; if exc.is_none() { if let Some(cause) = cause.as_mut() { // in `raise from`, `raise` owns the shared whitespace cause.whitespace_before_from = None; } } let semicolon = self.semicolon.inflate(config)?; Ok(Self::Inflated { exc, cause, whitespace_after_raise, semicolon, }) } } impl<'a> Codegen<'a> for Raise<'a> { fn codegen(&self, state: &mut CodegenState<'a>) { state.add_token("raise"); if let Some(ws) = &self.whitespace_after_raise { ws.codegen(state); } else if self.exc.is_some() { state.add_token(" "); } if let Some(exc) = &self.exc { exc.codegen(state); } if let Some(cause) = &self.cause { cause.codegen(state, " "); } if let Some(semi) = &self.semicolon { semi.codegen(state); } } } impl<'r, 'a> DeflatedRaise<'r, 'a> { pub fn with_semicolon(self, semicolon: Option>) -> Self { Self { semicolon, ..self } } } #[cst_node] pub struct NameItem<'a> { pub name: Name<'a>, pub comma: Option>, } impl<'r, 'a> Inflate<'a> for DeflatedNameItem<'r, 'a> { type Inflated = NameItem<'a>; fn inflate(self, config: &Config<'a>) -> Result { let name = self.name.inflate(config)?; let comma = self.comma.inflate(config)?; Ok(Self::Inflated { name, comma }) } } impl<'a> NameItem<'a> { fn codegen(&self, state: &mut CodegenState<'a>, default_comma: bool) { self.name.codegen(state); if let Some(comma) = &self.comma { comma.codegen(state); } else if default_comma { state.add_token(", "); } } } #[cst_node] pub struct Global<'a> { pub names: Vec>, pub whitespace_after_global: SimpleWhitespace<'a>, pub semicolon: Option>, pub(crate) tok: TokenRef<'a>, } impl<'r, 'a> Inflate<'a> for DeflatedGlobal<'r, 'a> { type Inflated = Global<'a>; fn inflate(self, config: &Config<'a>) -> Result { let whitespace_after_global = parse_simple_whitespace(config, &mut (*self.tok).whitespace_after.borrow_mut())?; let names = self.names.inflate(config)?; let semicolon = self.semicolon.inflate(config)?; Ok(Self::Inflated { names, whitespace_after_global, semicolon, }) } } impl<'a> Codegen<'a> for Global<'a> { fn codegen(&self, state: &mut CodegenState<'a>) { state.add_token("global"); self.whitespace_after_global.codegen(state); let len = self.names.len(); for (i, name) in self.names.iter().enumerate() { name.codegen(state, i + 1 != len); } if let Some(semicolon) = &self.semicolon { semicolon.codegen(state); } } } impl<'r, 'a> DeflatedGlobal<'r, 'a> { pub fn with_semicolon(self, semicolon: Option>) -> Self { Self { semicolon, ..self } } } #[cst_node] pub struct Nonlocal<'a> { pub names: Vec>, pub whitespace_after_nonlocal: SimpleWhitespace<'a>, pub semicolon: Option>, pub(crate) tok: TokenRef<'a>, } impl<'r, 'a> Inflate<'a> for DeflatedNonlocal<'r, 'a> { type Inflated = Nonlocal<'a>; fn inflate(self, config: &Config<'a>) -> Result { let whitespace_after_nonlocal = parse_simple_whitespace(config, &mut (*self.tok).whitespace_after.borrow_mut())?; let names = self.names.inflate(config)?; let semicolon = self.semicolon.inflate(config)?; Ok(Self::Inflated { names, whitespace_after_nonlocal, semicolon, }) } } impl<'a> Codegen<'a> for Nonlocal<'a> { fn codegen(&self, state: &mut CodegenState<'a>) { state.add_token("nonlocal"); self.whitespace_after_nonlocal.codegen(state); let len = self.names.len(); for (i, name) in self.names.iter().enumerate() { name.codegen(state, i + 1 != len); } if let Some(semicolon) = &self.semicolon { semicolon.codegen(state); } } } impl<'r, 'a> DeflatedNonlocal<'r, 'a> { pub fn with_semicolon(self, semicolon: Option>) -> Self { Self { semicolon, ..self } } } #[cst_node] pub struct For<'a> { pub target: AssignTargetExpression<'a>, pub iter: Expression<'a>, pub body: Suite<'a>, pub orelse: Option>, pub asynchronous: Option>, pub leading_lines: Vec>, pub whitespace_after_for: SimpleWhitespace<'a>, pub whitespace_before_in: SimpleWhitespace<'a>, pub whitespace_after_in: SimpleWhitespace<'a>, pub whitespace_before_colon: SimpleWhitespace<'a>, pub(crate) async_tok: Option>, pub(crate) for_tok: TokenRef<'a>, pub(crate) in_tok: TokenRef<'a>, pub(crate) colon_tok: TokenRef<'a>, } impl<'a> Codegen<'a> for For<'a> { fn codegen(&self, state: &mut CodegenState<'a>) { for ll in &self.leading_lines { ll.codegen(state); } state.add_indent(); if let Some(asy) = &self.asynchronous { asy.codegen(state); } state.add_token("for"); self.whitespace_after_for.codegen(state); self.target.codegen(state); self.whitespace_before_in.codegen(state); state.add_token("in"); self.whitespace_after_in.codegen(state); self.iter.codegen(state); self.whitespace_before_colon.codegen(state); state.add_token(":"); self.body.codegen(state); if let Some(e) = &self.orelse { e.codegen(state); } } } impl<'r, 'a> Inflate<'a> for DeflatedFor<'r, 'a> { type Inflated = For<'a>; fn inflate(mut self, config: &Config<'a>) -> Result { let (asynchronous, leading_lines) = if let Some(asy) = self.async_tok.as_mut() { let whitespace_after = parse_parenthesizable_whitespace(config, &mut asy.whitespace_after.borrow_mut())?; ( Some(Asynchronous { whitespace_after }), Some(parse_empty_lines( config, &mut asy.whitespace_before.borrow_mut(), None, )?), ) } else { (None, None) }; let leading_lines = if let Some(ll) = leading_lines { ll } else { parse_empty_lines( config, &mut (*self.for_tok).whitespace_before.borrow_mut(), None, )? }; let whitespace_after_for = parse_simple_whitespace(config, &mut (*self.for_tok).whitespace_after.borrow_mut())?; let target = self.target.inflate(config)?; let whitespace_before_in = parse_simple_whitespace(config, &mut (*self.in_tok).whitespace_before.borrow_mut())?; let whitespace_after_in = parse_simple_whitespace(config, &mut (*self.in_tok).whitespace_after.borrow_mut())?; let iter = self.iter.inflate(config)?; let whitespace_before_colon = parse_simple_whitespace( config, &mut (*self.colon_tok).whitespace_before.borrow_mut(), )?; let body = self.body.inflate(config)?; let orelse = self.orelse.inflate(config)?; Ok(Self::Inflated { target, iter, body, orelse, asynchronous, leading_lines, whitespace_after_for, whitespace_before_in, whitespace_after_in, whitespace_before_colon, }) } } #[cst_node] pub struct While<'a> { pub test: Expression<'a>, pub body: Suite<'a>, pub orelse: Option>, pub leading_lines: Vec>, pub whitespace_after_while: SimpleWhitespace<'a>, pub whitespace_before_colon: SimpleWhitespace<'a>, pub(crate) while_tok: TokenRef<'a>, pub(crate) colon_tok: TokenRef<'a>, } impl<'a> Codegen<'a> for While<'a> { fn codegen(&self, state: &mut CodegenState<'a>) { for ll in &self.leading_lines { ll.codegen(state); } state.add_indent(); state.add_token("while"); self.whitespace_after_while.codegen(state); self.test.codegen(state); self.whitespace_before_colon.codegen(state); state.add_token(":"); self.body.codegen(state); if let Some(orelse) = &self.orelse { orelse.codegen(state); } } } impl<'r, 'a> Inflate<'a> for DeflatedWhile<'r, 'a> { type Inflated = While<'a>; fn inflate(self, config: &Config<'a>) -> Result { let leading_lines = parse_empty_lines( config, &mut (*self.while_tok).whitespace_before.borrow_mut(), None, )?; let whitespace_after_while = parse_simple_whitespace(config, &mut (*self.while_tok).whitespace_after.borrow_mut())?; let test = self.test.inflate(config)?; let whitespace_before_colon = parse_simple_whitespace( config, &mut (*self.colon_tok).whitespace_before.borrow_mut(), )?; let body = self.body.inflate(config)?; let orelse = self.orelse.inflate(config)?; Ok(Self::Inflated { test, body, orelse, leading_lines, whitespace_after_while, whitespace_before_colon, }) } } #[cst_node] pub struct ClassDef<'a> { pub name: Name<'a>, pub type_parameters: Option>, pub body: Suite<'a>, pub bases: Vec>, pub keywords: Vec>, pub decorators: Vec>, pub lpar: Option>, pub rpar: Option>, pub leading_lines: Vec>, pub lines_after_decorators: Vec>, pub whitespace_after_class: SimpleWhitespace<'a>, pub whitespace_after_name: SimpleWhitespace<'a>, pub whitespace_after_type_parameters: SimpleWhitespace<'a>, pub whitespace_before_colon: SimpleWhitespace<'a>, pub(crate) class_tok: TokenRef<'a>, pub(crate) lpar_tok: Option>, pub(crate) rpar_tok: Option>, pub(crate) colon_tok: TokenRef<'a>, } impl<'a> Codegen<'a> for ClassDef<'a> { fn codegen(&self, state: &mut CodegenState<'a>) { for ll in &self.leading_lines { ll.codegen(state); } for dec in &self.decorators { dec.codegen(state); } for lad in &self.lines_after_decorators { lad.codegen(state); } state.add_indent(); state.add_token("class"); self.whitespace_after_class.codegen(state); self.name.codegen(state); self.whitespace_after_name.codegen(state); if let Some(tp) = &self.type_parameters { tp.codegen(state); self.whitespace_after_type_parameters.codegen(state); } let need_parens = !self.bases.is_empty() || !self.keywords.is_empty(); if let Some(lpar) = &self.lpar { lpar.codegen(state); } else if need_parens { state.add_token("("); } let args = self.bases.iter().chain(self.keywords.iter()); let len = self.bases.len() + self.keywords.len(); for (i, arg) in args.enumerate() { arg.codegen(state, i + 1 < len); } if let Some(rpar) = &self.rpar { rpar.codegen(state); } else if need_parens { state.add_token(")"); } self.whitespace_before_colon.codegen(state); state.add_token(":"); self.body.codegen(state); } } impl<'r, 'a> Inflate<'a> for DeflatedClassDef<'r, 'a> { type Inflated = ClassDef<'a>; fn inflate(mut self, config: &Config<'a>) -> Result { let mut leading_lines = parse_empty_lines( config, &mut (*self.class_tok).whitespace_before.borrow_mut(), None, )?; let mut decorators = self.decorators.inflate(config)?; let mut lines_after_decorators = Default::default(); if let Some(dec) = decorators.first_mut() { swap(&mut lines_after_decorators, &mut leading_lines); swap(&mut dec.leading_lines, &mut leading_lines); } let whitespace_after_class = parse_simple_whitespace(config, &mut (*self.class_tok).whitespace_after.borrow_mut())?; let name = self.name.inflate(config)?; let (mut whitespace_after_name, mut type_parameters, mut whitespace_after_type_parameters) = Default::default(); if let Some(tparams) = self.type_parameters { let rbracket_tok = tparams.rbracket.tok.clone(); whitespace_after_name = parse_simple_whitespace( config, &mut tparams.lbracket.tok.whitespace_before.borrow_mut(), )?; type_parameters = Some(tparams.inflate(config)?); whitespace_after_type_parameters = parse_simple_whitespace(config, &mut rbracket_tok.whitespace_after.borrow_mut())?; } else if let Some(lpar_tok) = self.lpar_tok.as_mut() { whitespace_after_name = parse_simple_whitespace(config, &mut lpar_tok.whitespace_before.borrow_mut())?; } let lpar = self.lpar.inflate(config)?; let bases = self.bases.inflate(config)?; let keywords = self.keywords.inflate(config)?; let rpar = self.rpar.inflate(config)?; let whitespace_before_colon = parse_simple_whitespace( config, &mut (*self.colon_tok).whitespace_before.borrow_mut(), )?; let body = self.body.inflate(config)?; Ok(Self::Inflated { name, type_parameters, body, bases, keywords, decorators, lpar, rpar, leading_lines, lines_after_decorators, whitespace_after_class, whitespace_after_type_parameters, whitespace_after_name, whitespace_before_colon, }) } } impl<'r, 'a> DeflatedClassDef<'r, 'a> { pub fn with_decorators(self, decorators: Vec>) -> Self { Self { decorators, ..self } } } #[cst_node] pub struct Finally<'a> { pub body: Suite<'a>, pub leading_lines: Vec>, pub whitespace_before_colon: SimpleWhitespace<'a>, pub(crate) finally_tok: TokenRef<'a>, pub(crate) colon_tok: TokenRef<'a>, } impl<'a> Codegen<'a> for Finally<'a> { fn codegen(&self, state: &mut CodegenState<'a>) { for ll in &self.leading_lines { ll.codegen(state); } state.add_indent(); state.add_token("finally"); self.whitespace_before_colon.codegen(state); state.add_token(":"); self.body.codegen(state); } } impl<'r, 'a> Inflate<'a> for DeflatedFinally<'r, 'a> { type Inflated = Finally<'a>; fn inflate(self, config: &Config<'a>) -> Result { let leading_lines = parse_empty_lines( config, &mut (*self.finally_tok).whitespace_before.borrow_mut(), None, )?; let whitespace_before_colon = parse_simple_whitespace( config, &mut (*self.colon_tok).whitespace_before.borrow_mut(), )?; let body = self.body.inflate(config)?; Ok(Self::Inflated { body, leading_lines, whitespace_before_colon, }) } } #[cst_node] pub struct ExceptHandler<'a> { pub body: Suite<'a>, pub r#type: Option>, pub name: Option>, pub leading_lines: Vec>, pub whitespace_after_except: SimpleWhitespace<'a>, pub whitespace_before_colon: SimpleWhitespace<'a>, pub(crate) except_tok: TokenRef<'a>, pub(crate) colon_tok: TokenRef<'a>, } impl<'a> Codegen<'a> for ExceptHandler<'a> { fn codegen(&self, state: &mut CodegenState<'a>) { for ll in &self.leading_lines { ll.codegen(state); } state.add_indent(); state.add_token("except"); self.whitespace_after_except.codegen(state); if let Some(t) = &self.r#type { t.codegen(state); } if let Some(n) = &self.name { n.codegen(state); } self.whitespace_before_colon.codegen(state); state.add_token(":"); self.body.codegen(state); } } impl<'r, 'a> Inflate<'a> for DeflatedExceptHandler<'r, 'a> { type Inflated = ExceptHandler<'a>; fn inflate(self, config: &Config<'a>) -> Result { let leading_lines = parse_empty_lines( config, &mut (*self.except_tok).whitespace_before.borrow_mut(), None, )?; let whitespace_after_except = parse_simple_whitespace( config, &mut (*self.except_tok).whitespace_after.borrow_mut(), )?; let r#type = self.r#type.inflate(config)?; let name = self.name.inflate(config)?; let whitespace_before_colon = if name.is_some() { parse_simple_whitespace( config, &mut (*self.colon_tok).whitespace_before.borrow_mut(), )? } else { Default::default() }; let body = self.body.inflate(config)?; Ok(Self::Inflated { body, r#type, name, leading_lines, whitespace_after_except, whitespace_before_colon, }) } } #[cst_node] pub struct ExceptStarHandler<'a> { pub body: Suite<'a>, pub r#type: Expression<'a>, pub name: Option>, pub leading_lines: Vec>, pub whitespace_after_except: SimpleWhitespace<'a>, pub whitespace_after_star: SimpleWhitespace<'a>, pub whitespace_before_colon: SimpleWhitespace<'a>, pub(crate) except_tok: TokenRef<'a>, pub(crate) star_tok: TokenRef<'a>, pub(crate) colon_tok: TokenRef<'a>, } impl<'a> Codegen<'a> for ExceptStarHandler<'a> { fn codegen(&self, state: &mut CodegenState<'a>) { for ll in &self.leading_lines { ll.codegen(state); } state.add_indent(); state.add_token("except"); self.whitespace_after_except.codegen(state); state.add_token("*"); self.whitespace_after_star.codegen(state); self.r#type.codegen(state); if let Some(n) = &self.name { n.codegen(state); } self.whitespace_before_colon.codegen(state); state.add_token(":"); self.body.codegen(state); } } impl<'r, 'a> Inflate<'a> for DeflatedExceptStarHandler<'r, 'a> { type Inflated = ExceptStarHandler<'a>; fn inflate(self, config: &Config<'a>) -> Result { let leading_lines = parse_empty_lines( config, &mut self.except_tok.whitespace_before.borrow_mut(), None, )?; let whitespace_after_except = parse_simple_whitespace(config, &mut self.except_tok.whitespace_after.borrow_mut())?; let whitespace_after_star = parse_simple_whitespace(config, &mut self.star_tok.whitespace_after.borrow_mut())?; let r#type = self.r#type.inflate(config)?; let name = self.name.inflate(config)?; let whitespace_before_colon = if name.is_some() { parse_simple_whitespace(config, &mut self.colon_tok.whitespace_before.borrow_mut())? } else { Default::default() }; let body = self.body.inflate(config)?; Ok(Self::Inflated { body, r#type, name, leading_lines, whitespace_after_except, whitespace_after_star, whitespace_before_colon, }) } } #[cst_node] pub struct Try<'a> { pub body: Suite<'a>, pub handlers: Vec>, pub orelse: Option>, pub finalbody: Option>, pub leading_lines: Vec>, pub whitespace_before_colon: SimpleWhitespace<'a>, pub(crate) try_tok: TokenRef<'a>, // colon_tok unnecessary } impl<'a> Codegen<'a> for Try<'a> { fn codegen(&self, state: &mut CodegenState<'a>) { for ll in &self.leading_lines { ll.codegen(state); } state.add_indent(); state.add_token("try"); self.whitespace_before_colon.codegen(state); state.add_token(":"); self.body.codegen(state); for h in &self.handlers { h.codegen(state); } if let Some(e) = &self.orelse { e.codegen(state); } if let Some(f) = &self.finalbody { f.codegen(state); } } } impl<'r, 'a> Inflate<'a> for DeflatedTry<'r, 'a> { type Inflated = Try<'a>; fn inflate(self, config: &Config<'a>) -> Result { let leading_lines = parse_empty_lines( config, &mut (*self.try_tok).whitespace_before.borrow_mut(), None, )?; let whitespace_before_colon = parse_simple_whitespace(config, &mut (*self.try_tok).whitespace_after.borrow_mut())?; let body = self.body.inflate(config)?; let handlers = self.handlers.inflate(config)?; let orelse = self.orelse.inflate(config)?; let finalbody = self.finalbody.inflate(config)?; Ok(Self::Inflated { body, handlers, orelse, finalbody, leading_lines, whitespace_before_colon, }) } } #[cst_node] pub struct TryStar<'a> { pub body: Suite<'a>, pub handlers: Vec>, pub orelse: Option>, pub finalbody: Option>, pub leading_lines: Vec>, pub whitespace_before_colon: SimpleWhitespace<'a>, pub(crate) try_tok: TokenRef<'a>, // colon_tok unnecessary } impl<'a> Codegen<'a> for TryStar<'a> { fn codegen(&self, state: &mut CodegenState<'a>) { for ll in &self.leading_lines { ll.codegen(state); } state.add_indent(); state.add_token("try"); self.whitespace_before_colon.codegen(state); state.add_token(":"); self.body.codegen(state); for h in &self.handlers { h.codegen(state); } if let Some(e) = &self.orelse { e.codegen(state); } if let Some(f) = &self.finalbody { f.codegen(state); } } } impl<'r, 'a> Inflate<'a> for DeflatedTryStar<'r, 'a> { type Inflated = TryStar<'a>; fn inflate(self, config: &Config<'a>) -> Result { let leading_lines = parse_empty_lines( config, &mut (*self.try_tok).whitespace_before.borrow_mut(), None, )?; let whitespace_before_colon = parse_simple_whitespace(config, &mut (*self.try_tok).whitespace_after.borrow_mut())?; let body = self.body.inflate(config)?; let handlers = self.handlers.inflate(config)?; let orelse = self.orelse.inflate(config)?; let finalbody = self.finalbody.inflate(config)?; Ok(Self::Inflated { body, handlers, orelse, finalbody, leading_lines, whitespace_before_colon, }) } } #[cst_node] pub struct AugAssign<'a> { pub target: AssignTargetExpression<'a>, pub operator: AugOp<'a>, pub value: Expression<'a>, pub semicolon: Option>, } impl<'r, 'a> Inflate<'a> for DeflatedAugAssign<'r, 'a> { type Inflated = AugAssign<'a>; fn inflate(self, config: &Config<'a>) -> Result { let target = self.target.inflate(config)?; let operator = self.operator.inflate(config)?; let value = self.value.inflate(config)?; let semicolon = self.semicolon.inflate(config)?; Ok(Self::Inflated { target, operator, value, semicolon, }) } } impl<'a> Codegen<'a> for AugAssign<'a> { fn codegen(&self, state: &mut CodegenState<'a>) { self.target.codegen(state); self.operator.codegen(state); self.value.codegen(state); if let Some(s) = &self.semicolon { s.codegen(state); } } } impl<'r, 'a> DeflatedAugAssign<'r, 'a> { pub fn with_semicolon(self, semicolon: Option>) -> Self { Self { semicolon, ..self } } } #[cst_node] pub struct WithItem<'a> { pub item: Expression<'a>, pub asname: Option>, pub comma: Option>, } impl<'r, 'a> DeflatedWithItem<'r, 'a> { fn inflate_withitem(self, config: &Config<'a>, is_last: bool) -> Result> { let item = self.item.inflate(config)?; let asname = self.asname.inflate(config)?; let comma = if is_last { self.comma.map(|c| c.inflate_before(config)).transpose()? } else { self.comma.map(|c| c.inflate(config)).transpose()? }; Ok(WithItem { item, asname, comma, }) } } impl<'a> Codegen<'a> for WithItem<'a> { fn codegen(&self, state: &mut CodegenState<'a>) { self.item.codegen(state); if let Some(n) = &self.asname { n.codegen(state); } if let Some(c) = &self.comma { c.codegen(state); } } } impl<'r, 'a> WithComma<'r, 'a> for DeflatedWithItem<'r, 'a> { fn with_comma(self, comma: DeflatedComma<'r, 'a>) -> Self { Self { comma: Some(comma), ..self } } } #[cst_node] pub struct With<'a> { pub items: Vec>, pub body: Suite<'a>, pub asynchronous: Option>, pub leading_lines: Vec>, pub lpar: Option>, pub rpar: Option>, pub whitespace_after_with: SimpleWhitespace<'a>, pub whitespace_before_colon: SimpleWhitespace<'a>, pub(crate) async_tok: Option>, pub(crate) with_tok: TokenRef<'a>, pub(crate) colon_tok: TokenRef<'a>, } impl<'a> Codegen<'a> for With<'a> { fn codegen(&self, state: &mut CodegenState<'a>) { for ll in &self.leading_lines { ll.codegen(state); } state.add_indent(); if let Some(asy) = &self.asynchronous { asy.codegen(state); } state.add_token("with"); self.whitespace_after_with.codegen(state); // TODO: Force parens whenever there are newlines in // the commas of self.items. // // For now, only the python API does this. let need_parens = false; if let Some(lpar) = &self.lpar { lpar.codegen(state); } else if need_parens { state.add_token("("); } let len = self.items.len(); for (i, item) in self.items.iter().enumerate() { item.codegen(state); if item.comma.is_none() && i + 1 < len { state.add_token(", "); } } if let Some(rpar) = &self.rpar { rpar.codegen(state); } else if need_parens { state.add_token(")"); } self.whitespace_before_colon.codegen(state); state.add_token(":"); self.body.codegen(state); } } impl<'r, 'a> Inflate<'a> for DeflatedWith<'r, 'a> { type Inflated = With<'a>; fn inflate(mut self, config: &Config<'a>) -> Result { let (asynchronous, leading_lines) = if let Some(asy) = self.async_tok.as_mut() { let whitespace_after = parse_parenthesizable_whitespace(config, &mut asy.whitespace_after.borrow_mut())?; ( Some(Asynchronous { whitespace_after }), Some(parse_empty_lines( config, &mut asy.whitespace_before.borrow_mut(), None, )?), ) } else { (None, None) }; let leading_lines = if let Some(ll) = leading_lines { ll } else { parse_empty_lines( config, &mut (*self.with_tok).whitespace_before.borrow_mut(), None, )? }; let whitespace_after_with = parse_simple_whitespace(config, &mut (*self.with_tok).whitespace_after.borrow_mut())?; let lpar = self.lpar.map(|lpar| lpar.inflate(config)).transpose()?; let len = self.items.len(); let items = self .items .into_iter() .enumerate() .map(|(idx, el)| el.inflate_withitem(config, idx + 1 == len)) .collect::>>()?; let rpar = if !items.is_empty() { // rpar only has whitespace if items is non empty self.rpar.map(|rpar| rpar.inflate(config)).transpose()? } else { Default::default() }; let whitespace_before_colon = parse_simple_whitespace( config, &mut (*self.colon_tok).whitespace_before.borrow_mut(), )?; let body = self.body.inflate(config)?; Ok(Self::Inflated { items, body, asynchronous, leading_lines, lpar, rpar, whitespace_after_with, whitespace_before_colon, }) } } #[cst_node(Codegen, ParenthesizedNode, Inflate)] pub enum DelTargetExpression<'a> { Name(Box>), Attribute(Box>), Tuple(Box>), List(Box>), Subscript(Box>), } impl<'r, 'a> std::convert::From> for DeflatedExpression<'r, 'a> { fn from(d: DeflatedDelTargetExpression<'r, 'a>) -> Self { match d { DeflatedDelTargetExpression::Attribute(a) => Self::Attribute(a), DeflatedDelTargetExpression::List(l) => Self::List(l), DeflatedDelTargetExpression::Name(n) => Self::Name(n), DeflatedDelTargetExpression::Subscript(s) => Self::Subscript(s), DeflatedDelTargetExpression::Tuple(t) => Self::Tuple(t), } } } impl<'r, 'a> std::convert::From> for DeflatedElement<'r, 'a> { fn from(d: DeflatedDelTargetExpression<'r, 'a>) -> Self { Self::Simple { value: d.into(), comma: None, } } } #[cst_node] pub struct Del<'a> { pub target: DelTargetExpression<'a>, pub whitespace_after_del: SimpleWhitespace<'a>, pub semicolon: Option>, pub(crate) tok: TokenRef<'a>, } impl<'r, 'a> Inflate<'a> for DeflatedDel<'r, 'a> { type Inflated = Del<'a>; fn inflate(self, config: &Config<'a>) -> Result { let whitespace_after_del = parse_simple_whitespace(config, &mut (*self.tok).whitespace_after.borrow_mut())?; let target = self.target.inflate(config)?; let semicolon = self.semicolon.inflate(config)?; Ok(Self::Inflated { target, whitespace_after_del, semicolon, }) } } impl<'a> Codegen<'a> for Del<'a> { fn codegen(&self, state: &mut CodegenState<'a>) { state.add_token("del"); self.whitespace_after_del.codegen(state); self.target.codegen(state); if let Some(semi) = &self.semicolon { semi.codegen(state); } } } impl<'r, 'a> DeflatedDel<'r, 'a> { pub fn with_semicolon(self, semicolon: Option>) -> Self { Self { semicolon, ..self } } } #[cst_node] pub struct Match<'a> { pub subject: Expression<'a>, pub cases: Vec>, pub leading_lines: Vec>, pub whitespace_after_match: SimpleWhitespace<'a>, pub whitespace_before_colon: SimpleWhitespace<'a>, pub whitespace_after_colon: TrailingWhitespace<'a>, pub indent: Option<&'a str>, pub footer: Vec>, pub(crate) match_tok: TokenRef<'a>, pub(crate) colon_tok: TokenRef<'a>, pub(crate) indent_tok: TokenRef<'a>, pub(crate) dedent_tok: TokenRef<'a>, } impl<'a> Codegen<'a> for Match<'a> { fn codegen(&self, state: &mut CodegenState<'a>) { for l in &self.leading_lines { l.codegen(state); } state.add_indent(); state.add_token("match"); self.whitespace_after_match.codegen(state); self.subject.codegen(state); self.whitespace_before_colon.codegen(state); state.add_token(":"); self.whitespace_after_colon.codegen(state); let indent = self.indent.unwrap_or(state.default_indent); state.indent(indent); // Note: empty cases is a syntax error for c in &self.cases { c.codegen(state); } for f in &self.footer { f.codegen(state); } state.dedent(); } } impl<'r, 'a> Inflate<'a> for DeflatedMatch<'r, 'a> { type Inflated = Match<'a>; fn inflate(self, config: &Config<'a>) -> Result { let leading_lines = parse_empty_lines( config, &mut self.match_tok.whitespace_before.borrow_mut(), None, )?; let whitespace_after_match = parse_simple_whitespace(config, &mut self.match_tok.whitespace_after.borrow_mut())?; let subject = self.subject.inflate(config)?; let whitespace_before_colon = parse_simple_whitespace(config, &mut self.colon_tok.whitespace_before.borrow_mut())?; let whitespace_after_colon = parse_trailing_whitespace(config, &mut self.colon_tok.whitespace_after.borrow_mut())?; let mut indent = self.indent_tok.relative_indent; if indent == Some(config.default_indent) { indent = None; } let cases = self.cases.inflate(config)?; // See note about footers in `IndentedBlock`'s inflate fn let footer = parse_empty_lines( config, &mut self.dedent_tok.whitespace_after.borrow_mut(), Some(self.indent_tok.whitespace_before.borrow().absolute_indent), )?; Ok(Self::Inflated { subject, cases, leading_lines, whitespace_after_match, whitespace_before_colon, whitespace_after_colon, indent, footer, }) } } #[cst_node] pub struct MatchCase<'a> { pub pattern: MatchPattern<'a>, pub guard: Option>, pub body: Suite<'a>, pub leading_lines: Vec>, pub whitespace_after_case: SimpleWhitespace<'a>, pub whitespace_before_if: SimpleWhitespace<'a>, pub whitespace_after_if: SimpleWhitespace<'a>, pub whitespace_before_colon: SimpleWhitespace<'a>, pub(crate) case_tok: TokenRef<'a>, pub(crate) if_tok: Option>, pub(crate) colon_tok: TokenRef<'a>, } impl<'a> Codegen<'a> for MatchCase<'a> { fn codegen(&self, state: &mut CodegenState<'a>) { for l in &self.leading_lines { l.codegen(state); } state.add_indent(); state.add_token("case"); self.whitespace_after_case.codegen(state); self.pattern.codegen(state); if let Some(guard) = &self.guard { self.whitespace_before_if.codegen(state); state.add_token("if"); self.whitespace_after_if.codegen(state); guard.codegen(state); } self.whitespace_before_colon.codegen(state); state.add_token(":"); self.body.codegen(state); } } impl<'r, 'a> Inflate<'a> for DeflatedMatchCase<'r, 'a> { type Inflated = MatchCase<'a>; fn inflate(mut self, config: &Config<'a>) -> Result { let leading_lines = parse_empty_lines( config, &mut self.case_tok.whitespace_before.borrow_mut(), None, )?; let whitespace_after_case = parse_simple_whitespace(config, &mut self.case_tok.whitespace_after.borrow_mut())?; let pattern = self.pattern.inflate(config)?; let (whitespace_before_if, whitespace_after_if, guard) = if let Some(if_tok) = self.if_tok.as_mut() { ( parse_simple_whitespace(config, &mut if_tok.whitespace_before.borrow_mut())?, parse_simple_whitespace(config, &mut if_tok.whitespace_after.borrow_mut())?, self.guard.inflate(config)?, ) } else { Default::default() }; let whitespace_before_colon = parse_simple_whitespace(config, &mut self.colon_tok.whitespace_before.borrow_mut())?; let body = self.body.inflate(config)?; Ok(Self::Inflated { pattern, guard, body, leading_lines, whitespace_after_case, whitespace_before_if, whitespace_after_if, whitespace_before_colon, }) } } #[allow(clippy::large_enum_variant)] #[cst_node(Codegen, Inflate, ParenthesizedNode)] pub enum MatchPattern<'a> { Value(MatchValue<'a>), Singleton(MatchSingleton<'a>), Sequence(MatchSequence<'a>), Mapping(MatchMapping<'a>), Class(MatchClass<'a>), As(Box>), Or(Box>), } #[cst_node] pub struct MatchValue<'a> { pub value: Expression<'a>, } impl<'a> ParenthesizedNode<'a> for MatchValue<'a> { fn lpar(&self) -> &Vec> { self.value.lpar() } fn rpar(&self) -> &Vec> { self.value.rpar() } fn parenthesize(&self, state: &mut CodegenState<'a>, f: F) where F: FnOnce(&mut CodegenState<'a>), { self.value.parenthesize(state, f) } fn with_parens(self, left: LeftParen<'a>, right: RightParen<'a>) -> Self { Self { value: self.value.with_parens(left, right), } } } impl<'a> Codegen<'a> for MatchValue<'a> { fn codegen(&self, state: &mut CodegenState<'a>) { self.value.codegen(state) } } impl<'r, 'a> Inflate<'a> for DeflatedMatchValue<'r, 'a> { type Inflated = MatchValue<'a>; fn inflate(self, config: &Config<'a>) -> Result { let value = self.value.inflate(config)?; Ok(Self::Inflated { value }) } } impl<'r, 'a> ParenthesizedDeflatedNode<'r, 'a> for DeflatedMatchValue<'r, 'a> { fn lpar(&self) -> &Vec> { self.value.lpar() } fn rpar(&self) -> &Vec> { self.value.rpar() } fn with_parens( self, left: DeflatedLeftParen<'r, 'a>, right: DeflatedRightParen<'r, 'a>, ) -> Self { Self { value: self.value.with_parens(left, right), } } } #[cst_node] pub struct MatchSingleton<'a> { pub value: Name<'a>, } impl<'a> ParenthesizedNode<'a> for MatchSingleton<'a> { fn lpar(&self) -> &Vec> { self.value.lpar() } fn rpar(&self) -> &Vec> { self.value.rpar() } fn parenthesize(&self, state: &mut CodegenState<'a>, f: F) where F: FnOnce(&mut CodegenState<'a>), { self.value.parenthesize(state, f) } fn with_parens(self, left: LeftParen<'a>, right: RightParen<'a>) -> Self { Self { value: self.value.with_parens(left, right), } } } impl<'a> Codegen<'a> for MatchSingleton<'a> { fn codegen(&self, state: &mut CodegenState<'a>) { self.value.codegen(state) } } impl<'r, 'a> Inflate<'a> for DeflatedMatchSingleton<'r, 'a> { type Inflated = MatchSingleton<'a>; fn inflate(self, config: &Config<'a>) -> Result { let value = self.value.inflate(config)?; Ok(Self::Inflated { value }) } } impl<'r, 'a> ParenthesizedDeflatedNode<'r, 'a> for DeflatedMatchSingleton<'r, 'a> { fn lpar(&self) -> &Vec> { self.value.lpar() } fn rpar(&self) -> &Vec> { self.value.rpar() } fn with_parens( self, left: DeflatedLeftParen<'r, 'a>, right: DeflatedRightParen<'r, 'a>, ) -> Self { Self { value: self.value.with_parens(left, right), } } } #[allow(clippy::large_enum_variant)] #[cst_node(Codegen, Inflate, ParenthesizedNode)] pub enum MatchSequence<'a> { MatchList(MatchList<'a>), MatchTuple(MatchTuple<'a>), } #[cst_node(ParenthesizedNode)] pub struct MatchList<'a> { pub patterns: Vec>, pub lbracket: Option>, pub rbracket: Option>, pub lpar: Vec>, pub rpar: Vec>, } impl<'a> Codegen<'a> for MatchList<'a> { fn codegen(&self, state: &mut CodegenState<'a>) { self.parenthesize(state, |state| { self.lbracket.codegen(state); let len = self.patterns.len(); if len == 1 { self.patterns.first().unwrap().codegen(state, false, false); } else { for (idx, pat) in self.patterns.iter().enumerate() { pat.codegen(state, idx < len - 1, true); } } self.rbracket.codegen(state); }) } } impl<'r, 'a> Inflate<'a> for DeflatedMatchList<'r, 'a> { type Inflated = MatchList<'a>; fn inflate(self, config: &Config<'a>) -> Result { let lpar = self.lpar.inflate(config)?; let lbracket = self.lbracket.inflate(config)?; let len = self.patterns.len(); let patterns = self .patterns .into_iter() .enumerate() .map(|(idx, el)| el.inflate_element(config, idx + 1 == len)) .collect::>>()?; let rbracket = self.rbracket.inflate(config)?; let rpar = self.rpar.inflate(config)?; Ok(Self::Inflated { patterns, lbracket, rbracket, lpar, rpar, }) } } #[cst_node(ParenthesizedNode)] pub struct MatchTuple<'a> { pub patterns: Vec>, pub lpar: Vec>, pub rpar: Vec>, } impl<'a> Codegen<'a> for MatchTuple<'a> { fn codegen(&self, state: &mut CodegenState<'a>) { self.parenthesize(state, |state| { let len = self.patterns.len(); if len == 1 { self.patterns.first().unwrap().codegen(state, true, false); } else { for (idx, pat) in self.patterns.iter().enumerate() { pat.codegen(state, idx < len - 1, true); } } }) } } impl<'r, 'a> Inflate<'a> for DeflatedMatchTuple<'r, 'a> { type Inflated = MatchTuple<'a>; fn inflate(self, config: &Config<'a>) -> Result { let lpar = self.lpar.inflate(config)?; let len = self.patterns.len(); let patterns = self .patterns .into_iter() .enumerate() .map(|(idx, el)| el.inflate_element(config, idx + 1 == len)) .collect::>>()?; let rpar = self.rpar.inflate(config)?; Ok(Self::Inflated { patterns, lpar, rpar, }) } } #[allow(clippy::large_enum_variant)] #[cst_node] pub enum StarrableMatchSequenceElement<'a> { Simple(MatchSequenceElement<'a>), Starred(MatchStar<'a>), } impl<'a> StarrableMatchSequenceElement<'a> { fn codegen( &self, state: &mut CodegenState<'a>, default_comma: bool, default_comma_whitespace: bool, ) { match &self { Self::Simple(s) => s.codegen(state, default_comma, default_comma_whitespace), Self::Starred(s) => s.codegen(state, default_comma, default_comma_whitespace), } } } impl<'r, 'a> DeflatedStarrableMatchSequenceElement<'r, 'a> { fn inflate_element( self, config: &Config<'a>, last_element: bool, ) -> Result> { Ok(match self { Self::Simple(s) => { StarrableMatchSequenceElement::Simple(s.inflate_element(config, last_element)?) } Self::Starred(s) => { StarrableMatchSequenceElement::Starred(s.inflate_element(config, last_element)?) } }) } } impl<'r, 'a> WithComma<'r, 'a> for DeflatedStarrableMatchSequenceElement<'r, 'a> { fn with_comma(self, comma: DeflatedComma<'r, 'a>) -> Self { match self { Self::Simple(s) => Self::Simple(s.with_comma(comma)), Self::Starred(s) => Self::Starred(s.with_comma(comma)), } } } #[cst_node] pub struct MatchSequenceElement<'a> { pub value: MatchPattern<'a>, pub comma: Option>, } impl<'a> MatchSequenceElement<'a> { fn codegen( &self, state: &mut CodegenState<'a>, default_comma: bool, default_comma_whitespace: bool, ) { self.value.codegen(state); self.comma.codegen(state); if self.comma.is_none() && default_comma { state.add_token(if default_comma_whitespace { ", " } else { "," }); } } } impl<'r, 'a> DeflatedMatchSequenceElement<'r, 'a> { fn inflate_element( self, config: &Config<'a>, last_element: bool, ) -> Result> { let value = self.value.inflate(config)?; let comma = if last_element { self.comma.map(|c| c.inflate_before(config)).transpose() } else { self.comma.inflate(config) }?; Ok(MatchSequenceElement { value, comma }) } } impl<'r, 'a> WithComma<'r, 'a> for DeflatedMatchSequenceElement<'r, 'a> { fn with_comma(self, comma: DeflatedComma<'r, 'a>) -> Self { Self { comma: Some(comma), ..self } } } #[cst_node] pub struct MatchStar<'a> { pub name: Option>, pub comma: Option>, pub whitespace_before_name: ParenthesizableWhitespace<'a>, pub(crate) star_tok: TokenRef<'a>, } impl<'a> MatchStar<'a> { fn codegen( &self, state: &mut CodegenState<'a>, default_comma: bool, default_comma_whitespace: bool, ) { state.add_token("*"); self.whitespace_before_name.codegen(state); if let Some(name) = &self.name { name.codegen(state); } else { state.add_token("_"); } self.comma.codegen(state); if self.comma.is_none() && default_comma { state.add_token(if default_comma_whitespace { ", " } else { "," }); } } } impl<'r, 'a> DeflatedMatchStar<'r, 'a> { fn inflate_element(self, config: &Config<'a>, last_element: bool) -> Result> { let whitespace_before_name = parse_parenthesizable_whitespace( config, &mut self.star_tok.whitespace_after.borrow_mut(), )?; let name = self.name.inflate(config)?; let comma = if last_element { self.comma.map(|c| c.inflate_before(config)).transpose() } else { self.comma.inflate(config) }?; Ok(MatchStar { name, comma, whitespace_before_name, }) } } impl<'r, 'a> WithComma<'r, 'a> for DeflatedMatchStar<'r, 'a> { fn with_comma(self, comma: DeflatedComma<'r, 'a>) -> Self { Self { comma: Some(comma), ..self } } } #[cst_node(ParenthesizedNode)] pub struct MatchMapping<'a> { pub elements: Vec>, pub rest: Option>, pub trailing_comma: Option>, pub lbrace: LeftCurlyBrace<'a>, pub rbrace: RightCurlyBrace<'a>, pub lpar: Vec>, pub rpar: Vec>, pub whitespace_before_rest: SimpleWhitespace<'a>, pub(crate) star_tok: Option>, } impl<'a> Codegen<'a> for MatchMapping<'a> { fn codegen(&self, state: &mut CodegenState<'a>) { self.parenthesize(state, |state| { self.lbrace.codegen(state); let len = self.elements.len(); for (idx, el) in self.elements.iter().enumerate() { el.codegen(state, self.rest.is_some() || idx < len - 1); } if let Some(rest) = &self.rest { state.add_token("**"); self.whitespace_before_rest.codegen(state); rest.codegen(state); self.trailing_comma.codegen(state); } self.rbrace.codegen(state); }) } } impl<'r, 'a> Inflate<'a> for DeflatedMatchMapping<'r, 'a> { type Inflated = MatchMapping<'a>; fn inflate(mut self, config: &Config<'a>) -> Result { let lpar = self.lpar.inflate(config)?; let lbrace = self.lbrace.inflate(config)?; let len = self.elements.len(); let no_star = self.star_tok.is_none(); let elements = self .elements .into_iter() .enumerate() .map(|(idx, el)| el.inflate_element(config, no_star && idx + 1 == len)) .collect::>>()?; let (whitespace_before_rest, rest, trailing_comma) = if let Some(star_tok) = self.star_tok.as_mut() { ( parse_simple_whitespace(config, &mut star_tok.whitespace_after.borrow_mut())?, self.rest.inflate(config)?, self.trailing_comma .map(|c| c.inflate_before(config)) .transpose()?, ) } else { Default::default() }; let rbrace = self.rbrace.inflate(config)?; let rpar = self.rpar.inflate(config)?; Ok(Self::Inflated { elements, rest, trailing_comma, lbrace, rbrace, lpar, rpar, whitespace_before_rest, }) } } #[cst_node] pub struct MatchMappingElement<'a> { pub key: Expression<'a>, pub pattern: MatchPattern<'a>, pub comma: Option>, pub whitespace_before_colon: ParenthesizableWhitespace<'a>, pub whitespace_after_colon: ParenthesizableWhitespace<'a>, pub(crate) colon_tok: TokenRef<'a>, } impl<'a> MatchMappingElement<'a> { fn codegen(&self, state: &mut CodegenState<'a>, default_comma: bool) { self.key.codegen(state); self.whitespace_before_colon.codegen(state); state.add_token(":"); self.whitespace_after_colon.codegen(state); self.pattern.codegen(state); self.comma.codegen(state); if self.comma.is_none() && default_comma { state.add_token(", "); } } } impl<'r, 'a> DeflatedMatchMappingElement<'r, 'a> { fn inflate_element( self, config: &Config<'a>, last_element: bool, ) -> Result> { let key = self.key.inflate(config)?; let whitespace_before_colon = parse_parenthesizable_whitespace( config, &mut self.colon_tok.whitespace_before.borrow_mut(), )?; let whitespace_after_colon = parse_parenthesizable_whitespace( config, &mut self.colon_tok.whitespace_after.borrow_mut(), )?; let pattern = self.pattern.inflate(config)?; let comma = if last_element { self.comma.map(|c| c.inflate_before(config)).transpose() } else { self.comma.inflate(config) }?; Ok(MatchMappingElement { key, pattern, comma, whitespace_before_colon, whitespace_after_colon, }) } } impl<'r, 'a> WithComma<'r, 'a> for DeflatedMatchMappingElement<'r, 'a> { fn with_comma(self, comma: DeflatedComma<'r, 'a>) -> Self { Self { comma: Some(comma), ..self } } } #[cst_node(ParenthesizedNode)] pub struct MatchClass<'a> { pub cls: NameOrAttribute<'a>, pub patterns: Vec>, pub kwds: Vec>, pub lpar: Vec>, pub rpar: Vec>, pub whitespace_after_cls: ParenthesizableWhitespace<'a>, pub whitespace_before_patterns: ParenthesizableWhitespace<'a>, pub whitespace_after_kwds: ParenthesizableWhitespace<'a>, pub(crate) lpar_tok: TokenRef<'a>, pub(crate) rpar_tok: TokenRef<'a>, } impl<'a> Codegen<'a> for MatchClass<'a> { fn codegen(&self, state: &mut CodegenState<'a>) { self.parenthesize(state, |state| { self.cls.codegen(state); self.whitespace_after_cls.codegen(state); state.add_token("("); self.whitespace_before_patterns.codegen(state); let patlen = self.patterns.len(); let kwdlen = self.kwds.len(); for (idx, pat) in self.patterns.iter().enumerate() { pat.codegen(state, idx < patlen - 1 + kwdlen, patlen == 1 && kwdlen == 0); } for (idx, kwd) in self.kwds.iter().enumerate() { kwd.codegen(state, idx < kwdlen - 1); } self.whitespace_after_kwds.codegen(state); state.add_token(")"); }) } } impl<'r, 'a> Inflate<'a> for DeflatedMatchClass<'r, 'a> { type Inflated = MatchClass<'a>; fn inflate(self, config: &Config<'a>) -> Result { let lpar = self.lpar.inflate(config)?; let cls = self.cls.inflate(config)?; let whitespace_after_cls = parse_parenthesizable_whitespace( config, &mut self.lpar_tok.whitespace_before.borrow_mut(), )?; let whitespace_before_patterns = parse_parenthesizable_whitespace( config, &mut self.lpar_tok.whitespace_after.borrow_mut(), )?; let patlen = self.patterns.len(); let kwdlen = self.kwds.len(); let patterns = self .patterns .into_iter() .enumerate() .map(|(idx, pat)| pat.inflate_element(config, idx + 1 == patlen + kwdlen)) .collect::>()?; let kwds = self .kwds .into_iter() .enumerate() .map(|(idx, kwd)| kwd.inflate_element(config, idx + 1 == kwdlen)) .collect::>()?; let whitespace_after_kwds = parse_parenthesizable_whitespace( config, &mut self.rpar_tok.whitespace_before.borrow_mut(), )?; let rpar = self.rpar.inflate(config)?; Ok(Self::Inflated { cls, patterns, kwds, lpar, rpar, whitespace_after_cls, whitespace_before_patterns, whitespace_after_kwds, }) } } #[cst_node] pub struct MatchKeywordElement<'a> { pub key: Name<'a>, pub pattern: MatchPattern<'a>, pub comma: Option>, pub whitespace_before_equal: ParenthesizableWhitespace<'a>, pub whitespace_after_equal: ParenthesizableWhitespace<'a>, pub(crate) equal_tok: TokenRef<'a>, } impl<'a> MatchKeywordElement<'a> { fn codegen(&self, state: &mut CodegenState<'a>, default_comma: bool) { self.key.codegen(state); self.whitespace_before_equal.codegen(state); state.add_token("="); self.whitespace_after_equal.codegen(state); self.pattern.codegen(state); self.comma.codegen(state); if self.comma.is_none() && default_comma { state.add_token(", "); } } } impl<'r, 'a> DeflatedMatchKeywordElement<'r, 'a> { fn inflate_element( self, config: &Config<'a>, last_element: bool, ) -> Result> { let key = self.key.inflate(config)?; let whitespace_before_equal = parse_parenthesizable_whitespace( config, &mut self.equal_tok.whitespace_before.borrow_mut(), )?; let whitespace_after_equal = parse_parenthesizable_whitespace( config, &mut self.equal_tok.whitespace_after.borrow_mut(), )?; let pattern = self.pattern.inflate(config)?; let comma = if last_element { self.comma.map(|c| c.inflate_before(config)).transpose() } else { self.comma.inflate(config) }?; Ok(MatchKeywordElement { key, pattern, comma, whitespace_before_equal, whitespace_after_equal, }) } } impl<'r, 'a> WithComma<'r, 'a> for DeflatedMatchKeywordElement<'r, 'a> { fn with_comma(self, comma: DeflatedComma<'r, 'a>) -> Self { Self { comma: Some(comma), ..self } } } #[cst_node(ParenthesizedNode)] pub struct MatchAs<'a> { pub pattern: Option>, pub name: Option>, pub lpar: Vec>, pub rpar: Vec>, pub whitespace_before_as: Option>, pub whitespace_after_as: Option>, pub(crate) as_tok: Option>, } impl<'a> Codegen<'a> for MatchAs<'a> { fn codegen(&self, state: &mut CodegenState<'a>) { self.parenthesize(state, |state| { if let Some(pat) = &self.pattern { pat.codegen(state); self.whitespace_before_as.codegen(state); state.add_token("as"); self.whitespace_after_as.codegen(state); } if let Some(name) = &self.name { name.codegen(state); } else { state.add_token("_"); } }) } } impl<'r, 'a> Inflate<'a> for DeflatedMatchAs<'r, 'a> { type Inflated = MatchAs<'a>; fn inflate(mut self, config: &Config<'a>) -> Result { let lpar = self.lpar.inflate(config)?; let pattern = self.pattern.inflate(config)?; let (whitespace_before_as, whitespace_after_as) = if let Some(as_tok) = self.as_tok.as_mut() { ( Some(parse_parenthesizable_whitespace( config, &mut as_tok.whitespace_before.borrow_mut(), )?), Some(parse_parenthesizable_whitespace( config, &mut as_tok.whitespace_after.borrow_mut(), )?), ) } else { Default::default() }; let name = self.name.inflate(config)?; let rpar = self.rpar.inflate(config)?; Ok(Self::Inflated { pattern, name, lpar, rpar, whitespace_before_as, whitespace_after_as, }) } } #[cst_node] pub struct MatchOrElement<'a> { pub pattern: MatchPattern<'a>, pub separator: Option>, } impl<'a> MatchOrElement<'a> { fn codegen(&self, state: &mut CodegenState<'a>, default_separator: bool) { self.pattern.codegen(state); self.separator.codegen(state); if self.separator.is_none() && default_separator { state.add_token(" | "); } } } impl<'r, 'a> Inflate<'a> for DeflatedMatchOrElement<'r, 'a> { type Inflated = MatchOrElement<'a>; fn inflate(self, config: &Config<'a>) -> Result { let pattern = self.pattern.inflate(config)?; let separator = self.separator.inflate(config)?; Ok(Self::Inflated { pattern, separator }) } } #[cst_node(ParenthesizedNode)] pub struct MatchOr<'a> { pub patterns: Vec>, pub lpar: Vec>, pub rpar: Vec>, } impl<'a> Codegen<'a> for MatchOr<'a> { fn codegen(&self, state: &mut CodegenState<'a>) { self.parenthesize(state, |state| { let len = self.patterns.len(); for (idx, pat) in self.patterns.iter().enumerate() { pat.codegen(state, idx + 1 < len) } }) } } impl<'r, 'a> Inflate<'a> for DeflatedMatchOr<'r, 'a> { type Inflated = MatchOr<'a>; fn inflate(self, config: &Config<'a>) -> Result { let lpar = self.lpar.inflate(config)?; let patterns = self.patterns.inflate(config)?; let rpar = self.rpar.inflate(config)?; Ok(Self::Inflated { patterns, lpar, rpar, }) } } #[cst_node] pub struct TypeVar<'a> { pub name: Name<'a>, pub bound: Option>>, pub colon: Option>, } impl<'a> Codegen<'a> for TypeVar<'a> { fn codegen(&self, state: &mut CodegenState<'a>) { self.name.codegen(state); self.colon.codegen(state); if let Some(bound) = &self.bound { bound.codegen(state); } } } impl<'r, 'a> Inflate<'a> for DeflatedTypeVar<'r, 'a> { type Inflated = TypeVar<'a>; fn inflate(self, config: &Config<'a>) -> Result { let name = self.name.inflate(config)?; let colon = self.colon.inflate(config)?; let bound = self.bound.inflate(config)?; Ok(Self::Inflated { name, bound, colon }) } } #[cst_node] pub struct TypeVarTuple<'a> { pub name: Name<'a>, pub whitespace_after_star: SimpleWhitespace<'a>, pub(crate) star_tok: TokenRef<'a>, } impl<'a> Codegen<'a> for TypeVarTuple<'a> { fn codegen(&self, state: &mut CodegenState<'a>) { state.add_token("*"); self.whitespace_after_star.codegen(state); self.name.codegen(state); } } impl<'r, 'a> Inflate<'a> for DeflatedTypeVarTuple<'r, 'a> { type Inflated = TypeVarTuple<'a>; fn inflate(self, config: &Config<'a>) -> Result { let whitespace_after_star = parse_simple_whitespace(config, &mut self.star_tok.whitespace_after.borrow_mut())?; let name = self.name.inflate(config)?; Ok(Self::Inflated { name, whitespace_after_star, }) } } #[cst_node] pub struct ParamSpec<'a> { pub name: Name<'a>, pub whitespace_after_star: SimpleWhitespace<'a>, pub(crate) star_tok: TokenRef<'a>, } impl<'a> Codegen<'a> for ParamSpec<'a> { fn codegen(&self, state: &mut CodegenState<'a>) { state.add_token("**"); self.whitespace_after_star.codegen(state); self.name.codegen(state); } } impl<'r, 'a> Inflate<'a> for DeflatedParamSpec<'r, 'a> { type Inflated = ParamSpec<'a>; fn inflate(self, config: &Config<'a>) -> Result { let whitespace_after_star = parse_simple_whitespace(config, &mut self.star_tok.whitespace_after.borrow_mut())?; let name = self.name.inflate(config)?; Ok(Self::Inflated { name, whitespace_after_star, }) } } #[cst_node(Inflate, Codegen)] pub enum TypeVarLike<'a> { TypeVar(TypeVar<'a>), TypeVarTuple(TypeVarTuple<'a>), ParamSpec(ParamSpec<'a>), } #[cst_node] pub struct TypeParam<'a> { pub param: TypeVarLike<'a>, pub comma: Option>, } impl<'a> Codegen<'a> for TypeParam<'a> { fn codegen(&self, state: &mut CodegenState<'a>) { self.param.codegen(state); self.comma.codegen(state); } } impl<'r, 'a> Inflate<'a> for DeflatedTypeParam<'r, 'a> { type Inflated = TypeParam<'a>; fn inflate(self, config: &Config<'a>) -> Result { let param = self.param.inflate(config)?; let comma = self.comma.inflate(config)?; Ok(Self::Inflated { param, comma }) } } impl<'r, 'a> WithComma<'r, 'a> for DeflatedTypeParam<'r, 'a> { fn with_comma(self, comma: DeflatedComma<'r, 'a>) -> Self { Self { comma: Some(comma), ..self } } } #[cst_node] pub struct TypeParameters<'a> { pub params: Vec>, pub lbracket: LeftSquareBracket<'a>, pub rbracket: RightSquareBracket<'a>, } impl<'a> Codegen<'a> for TypeParameters<'a> { fn codegen(&self, state: &mut CodegenState<'a>) { self.lbracket.codegen(state); let params_len = self.params.len(); for (idx, param) in self.params.iter().enumerate() { param.codegen(state); if idx + 1 < params_len && param.comma.is_none() { state.add_token(", "); } } self.rbracket.codegen(state); } } impl<'r, 'a> Inflate<'a> for DeflatedTypeParameters<'r, 'a> { type Inflated = TypeParameters<'a>; fn inflate(self, config: &Config<'a>) -> Result { let lbracket = self.lbracket.inflate(config)?; let params = self.params.inflate(config)?; let rbracket = self.rbracket.inflate(config)?; Ok(Self::Inflated { params, lbracket, rbracket, }) } } #[cst_node] pub struct TypeAlias<'a> { pub name: Name<'a>, pub value: Box>, pub type_parameters: Option>, pub whitespace_after_type: SimpleWhitespace<'a>, pub whitespace_after_name: Option>, pub whitespace_after_type_parameters: Option>, pub whitespace_after_equals: SimpleWhitespace<'a>, pub semicolon: Option>, pub(crate) type_tok: TokenRef<'a>, pub(crate) lbracket_tok: Option>, pub(crate) equals_tok: TokenRef<'a>, } impl<'a> Codegen<'a> for TypeAlias<'a> { fn codegen(&self, state: &mut CodegenState<'a>) { state.add_token("type"); self.whitespace_after_type.codegen(state); self.name.codegen(state); if self.whitespace_after_name.is_none() && self.type_parameters.is_none() { state.add_token(" "); } else { self.whitespace_after_name.codegen(state); } if self.type_parameters.is_some() { self.type_parameters.codegen(state); self.whitespace_after_type_parameters.codegen(state); } state.add_token("="); self.whitespace_after_equals.codegen(state); self.value.codegen(state); self.semicolon.codegen(state); } } impl<'r, 'a> Inflate<'a> for DeflatedTypeAlias<'r, 'a> { type Inflated = TypeAlias<'a>; fn inflate(self, config: &Config<'a>) -> Result { let whitespace_after_type = parse_simple_whitespace(config, &mut self.type_tok.whitespace_after.borrow_mut())?; let name = self.name.inflate(config)?; let whitespace_after_name = Some(if let Some(tok) = self.lbracket_tok { parse_simple_whitespace(config, &mut tok.whitespace_before.borrow_mut()) } else { parse_simple_whitespace(config, &mut self.equals_tok.whitespace_before.borrow_mut()) }?); let type_parameters = self.type_parameters.inflate(config)?; let whitespace_after_type_parameters = if type_parameters.is_some() { Some(parse_simple_whitespace( config, &mut self.equals_tok.whitespace_before.borrow_mut(), )?) } else { None }; let whitespace_after_equals = parse_simple_whitespace(config, &mut self.equals_tok.whitespace_after.borrow_mut())?; let value = self.value.inflate(config)?; let semicolon = self.semicolon.inflate(config)?; Ok(Self::Inflated { name, value, type_parameters, whitespace_after_type, whitespace_after_name, whitespace_after_type_parameters, whitespace_after_equals, semicolon, }) } } impl<'r, 'a> DeflatedTypeAlias<'r, 'a> { pub fn with_semicolon(self, semicolon: Option>) -> Self { Self { semicolon, ..self } } } LibCST-1.2.0/native/libcst/src/nodes/test_utils.rs000066400000000000000000000022031456464173300220160ustar00rootroot00000000000000// Copyright (c) Meta Platforms, Inc. and affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the root directory of this source tree. use pyo3::prelude::*; py_import!("libcst._nodes.deep_equals", "deep_equals", get_deep_equals); pub fn repr_or_panic(py: Python, value: T) -> String where T: ToPyObject, { value .to_object(py) .as_ref(py) .repr() .expect("failed to call repr") .extract() .expect("repr should've returned str") } pub fn py_assert_deep_equals(py: Python, left: L, right: R) where L: ToPyObject, R: ToPyObject, { let (left, right) = (left.to_object(py), right.to_object(py)); let equals = get_deep_equals(py) .expect("failed to import deep_equals") .call1((&left, &right)) .expect("failed to call deep_equals") .extract::() .expect("deep_equals should return a bool"); if !equals { panic!( "assertion failed: {} was not deeply equal to {}", repr_or_panic(py, &left), repr_or_panic(py, &right), ); } } LibCST-1.2.0/native/libcst/src/nodes/traits.rs000066400000000000000000000126411456464173300211340ustar00rootroot00000000000000// Copyright (c) Meta Platforms, Inc. and affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the root directory of this source tree use crate::{ nodes::expression::{DeflatedLeftParen, DeflatedRightParen}, nodes::op::DeflatedComma, tokenizer::whitespace_parser::{Config, WhitespaceError}, Codegen, CodegenState, EmptyLine, LeftParen, RightParen, }; use std::ops::Deref; pub trait WithComma<'r, 'a> { fn with_comma(self, comma: DeflatedComma<'r, 'a>) -> Self; } pub trait ParenthesizedNode<'a> { fn lpar(&self) -> &Vec>; fn rpar(&self) -> &Vec>; fn parenthesize(&self, state: &mut CodegenState<'a>, f: F) where F: FnOnce(&mut CodegenState<'a>), { for lpar in self.lpar() { lpar.codegen(state); } f(state); for rpar in self.rpar() { rpar.codegen(state); } } fn with_parens(self, left: LeftParen<'a>, right: RightParen<'a>) -> Self; } impl<'a, T: ParenthesizedNode<'a>> ParenthesizedNode<'a> for Box { fn lpar(&self) -> &Vec> { self.deref().lpar() } fn rpar(&self) -> &Vec> { self.deref().rpar() } fn parenthesize(&self, state: &mut CodegenState<'a>, f: F) where F: FnOnce(&mut CodegenState<'a>), { self.deref().parenthesize(state, f) } fn with_parens(self, left: LeftParen<'a>, right: RightParen<'a>) -> Self { Self::new((*self).with_parens(left, right)) } } pub trait ParenthesizedDeflatedNode<'r, 'a> { fn lpar(&self) -> &Vec>; fn rpar(&self) -> &Vec>; fn with_parens( self, left: DeflatedLeftParen<'r, 'a>, right: DeflatedRightParen<'r, 'a>, ) -> Self; } impl<'r, 'a, T: ParenthesizedDeflatedNode<'r, 'a>> ParenthesizedDeflatedNode<'r, 'a> for Box { fn lpar(&self) -> &Vec> { self.deref().lpar() } fn rpar(&self) -> &Vec> { self.deref().rpar() } fn with_parens( self, left: DeflatedLeftParen<'r, 'a>, right: DeflatedRightParen<'r, 'a>, ) -> Self { Self::new((*self).with_parens(left, right)) } } pub trait WithLeadingLines<'a> { fn leading_lines(&mut self) -> &mut Vec>; } pub type Result = std::result::Result; pub trait Inflate<'a> where Self: Sized, { type Inflated; fn inflate(self, config: &Config<'a>) -> Result; } impl<'a, T: Inflate<'a>> Inflate<'a> for Option { type Inflated = Option; fn inflate(self, config: &Config<'a>) -> Result { self.map(|x| x.inflate(config)).transpose() } } impl<'a, T: Inflate<'a> + ?Sized> Inflate<'a> for Box { type Inflated = Box; fn inflate(self, config: &Config<'a>) -> Result { match (*self).inflate(config) { Ok(a) => Ok(Box::new(a)), Err(e) => Err(e), } } } impl<'a, T: Inflate<'a>> Inflate<'a> for Vec { type Inflated = Vec; fn inflate(self, config: &Config<'a>) -> Result { self.into_iter().map(|item| item.inflate(config)).collect() } } #[cfg(feature = "py")] pub mod py { use pyo3::{types::PyAny, types::PyTuple, IntoPy, PyObject, PyResult, Python}; // TODO: replace with upstream implementation once // https://github.com/PyO3/pyo3/issues/1813 is resolved pub trait TryIntoPy: Sized { fn try_into_py(self, py: Python) -> PyResult; } // I wish: // impl> TryIntoPy for T { // fn try_into_py(self, py: Python) -> PyResult { // Ok(self.into_py(py)) // } // } impl TryIntoPy for bool { fn try_into_py(self, py: Python) -> PyResult { Ok(self.into_py(py)) } } impl> TryIntoPy for Box where T: TryIntoPy, { fn try_into_py(self, py: Python) -> PyResult { (*self).try_into_py(py) } } impl TryIntoPy for Option where T: TryIntoPy, { fn try_into_py(self, py: Python) -> PyResult { Ok(match self { None => py.None(), Some(x) => x.try_into_py(py)?, }) } } impl TryIntoPy for Vec where T: TryIntoPy, { fn try_into_py(self, py: Python) -> PyResult { let converted = self .into_iter() .map(|x| x.try_into_py(py)) .collect::>>()? .into_iter(); Ok(PyTuple::new(py, converted).into()) } } impl TryIntoPy for PyTuple { fn try_into_py(self, py: Python) -> PyResult { Ok(self.into_py(py)) } } impl<'a> TryIntoPy for &'a str { fn try_into_py(self, py: Python) -> PyResult { Ok(self.into_py(py)) } } impl TryIntoPy for &'_ T where T: AsRef, { fn try_into_py(self, py: Python) -> PyResult { Ok(self.into_py(py)) } } } LibCST-1.2.0/native/libcst/src/nodes/whitespace.rs000066400000000000000000000106371456464173300217650ustar00rootroot00000000000000// Copyright (c) Meta Platforms, Inc. and affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the root directory of this source tree. #[cfg(feature = "py")] use libcst_derive::TryIntoPy; use super::{Codegen, CodegenState}; #[derive(Debug, Eq, PartialEq, Default, Clone)] #[cfg_attr(feature = "py", derive(TryIntoPy))] pub struct SimpleWhitespace<'a>(pub &'a str); impl<'a> Codegen<'a> for SimpleWhitespace<'a> { fn codegen(&self, state: &mut CodegenState<'a>) { state.add_token(self.0); } } #[derive(Debug, Eq, PartialEq, Clone)] #[cfg_attr(feature = "py", derive(TryIntoPy))] pub struct Comment<'a>(pub &'a str); impl<'a> Default for Comment<'a> { fn default() -> Self { Self("#") } } impl<'a> Codegen<'a> for Comment<'a> { fn codegen(&self, state: &mut CodegenState<'a>) { state.add_token(self.0); } } #[derive(Debug, Eq, PartialEq, Default, Clone)] #[cfg_attr(feature = "py", derive(TryIntoPy))] pub struct Newline<'a>(pub Option<&'a str>, pub Fakeness); #[derive(Debug, PartialEq, Eq, Clone)] pub enum Fakeness { Fake, Real, } impl Default for Fakeness { fn default() -> Self { Self::Real } } impl<'a> Codegen<'a> for Newline<'a> { fn codegen(&self, state: &mut CodegenState<'a>) { if let Fakeness::Fake = self.1 { return; } if let Some(value) = self.0 { state.add_token(value); } else { state.add_token(state.default_newline); } } } #[derive(Debug, Eq, PartialEq, Default, Clone)] #[cfg_attr(feature = "py", derive(TryIntoPy))] pub struct TrailingWhitespace<'a> { pub whitespace: SimpleWhitespace<'a>, pub comment: Option>, pub newline: Newline<'a>, } impl<'a> Codegen<'a> for TrailingWhitespace<'a> { fn codegen(&self, state: &mut CodegenState<'a>) { self.whitespace.codegen(state); if let Some(comment) = &self.comment { comment.codegen(state); } self.newline.codegen(state); } } #[derive(Debug, Clone, PartialEq, Eq)] #[cfg_attr(feature = "py", derive(TryIntoPy))] pub struct EmptyLine<'a> { pub indent: bool, pub whitespace: SimpleWhitespace<'a>, pub comment: Option>, pub newline: Newline<'a>, } impl<'a> Codegen<'a> for EmptyLine<'a> { fn codegen(&self, state: &mut CodegenState<'a>) { if self.indent { state.add_indent() } self.whitespace.codegen(state); if let Some(comment) = &self.comment { comment.codegen(state); } self.newline.codegen(state); } } impl<'a> Default for EmptyLine<'a> { fn default() -> Self { Self { indent: true, whitespace: Default::default(), comment: Default::default(), newline: Default::default(), } } } impl<'a> EmptyLine<'a> { pub fn new( indent: bool, whitespace: SimpleWhitespace<'a>, comment: Option>, newline: Newline<'a>, ) -> Self { Self { indent, whitespace, comment, newline, } } } #[derive(Debug, Eq, PartialEq, Default, Clone)] #[cfg_attr(feature = "py", derive(TryIntoPy))] pub struct ParenthesizedWhitespace<'a> { pub first_line: TrailingWhitespace<'a>, pub empty_lines: Vec>, pub indent: bool, pub last_line: SimpleWhitespace<'a>, } impl<'a> Codegen<'a> for ParenthesizedWhitespace<'a> { fn codegen(&self, state: &mut CodegenState<'a>) { self.first_line.codegen(state); for line in &self.empty_lines { line.codegen(state); } if self.indent { state.add_indent() } self.last_line.codegen(state); } } #[derive(Debug, Eq, PartialEq, Clone)] #[cfg_attr(feature = "py", derive(TryIntoPy))] pub enum ParenthesizableWhitespace<'a> { SimpleWhitespace(SimpleWhitespace<'a>), ParenthesizedWhitespace(ParenthesizedWhitespace<'a>), } impl<'a> Codegen<'a> for ParenthesizableWhitespace<'a> { fn codegen(&self, state: &mut CodegenState<'a>) { match self { Self::SimpleWhitespace(w) => w.codegen(state), Self::ParenthesizedWhitespace(w) => w.codegen(state), } } } impl<'a> Default for ParenthesizableWhitespace<'a> { fn default() -> Self { Self::SimpleWhitespace(SimpleWhitespace("")) } } LibCST-1.2.0/native/libcst/src/parser/000077500000000000000000000000001456464173300174405ustar00rootroot00000000000000LibCST-1.2.0/native/libcst/src/parser/errors.rs000066400000000000000000000057761456464173300213410ustar00rootroot00000000000000// Copyright (c) Meta Platforms, Inc. and affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the root directory of this source tree use crate::parser::grammar::TokVec; use crate::tokenizer::whitespace_parser::WhitespaceError; use crate::tokenizer::TokError; use peg::Parse; use thiserror::Error; #[allow(clippy::enum_variant_names)] #[derive(Debug, Error, PartialEq, Eq)] pub enum ParserError<'a> { #[error("tokenizer error: {0}")] TokenizerError(TokError<'a>, &'a str), #[error("parser error: {0}")] ParserError( peg::error::ParseError< as Parse>::PositionRepr>, &'a str, ), #[error(transparent)] WhitespaceError(#[from] WhitespaceError), #[error("invalid operator")] OperatorError, } #[cfg(feature = "py")] mod py_error { use pyo3::types::{IntoPyDict, PyModule}; use pyo3::{IntoPy, PyErr, PyErrArguments, Python}; use super::ParserError; struct Details { message: String, lines: Vec, raw_line: u32, raw_column: u32, } impl<'a> From> for PyErr { fn from(e: ParserError) -> Self { Python::with_gil(|py| { let lines = match &e { ParserError::TokenizerError(_, text) | ParserError::ParserError(_, text) => { text.lines().collect::>() } _ => vec![""], }; let (mut line, mut col) = match &e { ParserError::ParserError(err, ..) => { (err.location.start_pos.line, err.location.start_pos.column) } _ => (0, 0), }; if line + 1 > lines.len() { line = lines.len() - 1; col = 0; } let kwargs = [ ("message", e.to_string().into_py(py)), ("lines", lines.into_py(py)), ("raw_line", (line + 1).into_py(py)), ("raw_column", col.into_py(py)), ] .into_py_dict(py); let libcst = PyModule::import(py, "libcst").expect("libcst cannot be imported"); PyErr::from_value( libcst .getattr("ParserSyntaxError") .expect("ParserSyntaxError not found") .call((), Some(kwargs)) .expect("failed to instantiate"), ) }) } } impl<'a> PyErrArguments for Details { fn arguments(self, py: pyo3::Python) -> pyo3::PyObject { [ ("message", self.message.into_py(py)), ("lines", self.lines.into_py(py)), ("raw_line", self.raw_line.into_py(py)), ("raw_column", self.raw_column.into_py(py)), ] .into_py_dict(py) .into_py(py) } } } LibCST-1.2.0/native/libcst/src/parser/grammar.rs000066400000000000000000003350101456464173300214360ustar00rootroot00000000000000// Copyright (c) Meta Platforms, Inc. and affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the root directory of this source tree. use std::rc::Rc; use crate::expression::make_async; use crate::nodes::deflated::*; use crate::nodes::expression::make_fstringtext; use crate::nodes::op::make_importstar; use crate::nodes::traits::ParenthesizedDeflatedNode; use crate::parser::ParserError; use crate::tokenizer::{TokType, Token}; use crate::WithComma; use peg::str::LineCol; use peg::{parser, Parse, ParseElem, RuleResult}; use TokType::{ Async, Await as AWAIT, Dedent, EndMarker, FStringEnd, FStringStart, FStringString, Indent, Name as NameTok, Newline as NL, Number, String as STRING, }; pub type Result<'a, T> = std::result::Result>; type GrammarResult = std::result::Result; #[derive(Debug)] pub struct TokVec<'a>(Vec>>); impl<'a> std::convert::From>> for TokVec<'a> { fn from(vec: Vec>) -> Self { TokVec(vec.into_iter().map(Rc::new).collect()) } } #[derive(Debug, PartialEq, Eq)] pub struct ParseLoc { pub start_pos: LineCol, pub end_pos: LineCol, } impl std::fmt::Display for ParseLoc { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { self.start_pos.fmt(f) } } impl<'a> Parse for TokVec<'a> { type PositionRepr = ParseLoc; fn start(&self) -> usize { 0 } fn is_eof(&self, pos: usize) -> bool { pos >= self.0.len() } fn position_repr(&self, pos: usize) -> Self::PositionRepr { let tok = self.0.get(pos).unwrap_or_else(|| self.0.last().unwrap()); ParseLoc { start_pos: LineCol { line: tok.start_pos.line_number(), column: tok.start_pos.char_column_number(), offset: tok.start_pos.byte_idx(), }, end_pos: LineCol { line: tok.end_pos.line_number(), column: tok.end_pos.char_column_number(), offset: tok.end_pos.byte_idx(), }, } } } type TokenRef<'input, 'a> = &'input Token<'a>; impl<'input, 'a: 'input> ParseElem<'input> for TokVec<'a> { type Element = TokenRef<'input, 'a>; fn parse_elem(&'input self, pos: usize) -> RuleResult { match self.0.get(pos) { Some(tok) => RuleResult::Matched(pos + 1, tok), None => RuleResult::Failed, } } } const MAX_RECURSION_DEPTH: usize = 3000; parser! { pub grammar python<'a>(input: &'a str) for TokVec<'a> { // Starting Rules pub rule file(encoding: Option<&str>) -> Module<'input, 'a> = traced(<_file(encoding.unwrap_or("utf-8"))>) pub rule expression_input() -> Expression<'input, 'a> = traced() pub rule statement_input() -> Statement<'input, 'a> = traced() rule _file(encoding: &str) -> Module<'input, 'a> = s:statements()? eof:tok(EndMarker, "EOF") { make_module(s.unwrap_or_default(), eof, encoding) } // General statements rule statements() -> Vec> = statement()+ rule statement() -> Statement<'input, 'a> = c:compound_stmt() { Statement::Compound(c) } / s:simple_stmts() { Statement::Simple(make_simple_statement_line(s)) } rule simple_stmts() -> SimpleStatementParts<'input, 'a> = first_tok:&_ stmts:separated_trailer(, ) nl:tok(NL, "NEWLINE") { SimpleStatementParts { first_tok, first_statement: stmts.0, rest: stmts.1, last_semi: stmts.2, nl, } } #[cache] rule simple_stmt() -> SmallStatement<'input, 'a> = assignment() / &lit("type") s: type_stmt() {SmallStatement::TypeAlias(s)} / e:star_expressions() { SmallStatement::Expr(Expr { value: e, semicolon: None }) } / &lit("return") s:return_stmt() { SmallStatement::Return(s) } // this is expanded from the original grammar's import_stmt rule / &lit("import") i:import_name() { SmallStatement::Import(i) } / &lit("from") i:import_from() { SmallStatement::ImportFrom(i) } / &lit("raise") r:raise_stmt() { SmallStatement::Raise(r) } / lit("pass") { SmallStatement::Pass(Pass { semicolon: None }) } / &lit("del") s:del_stmt() { SmallStatement::Del(s) } / &lit("yield") s:yield_stmt() { SmallStatement::Expr(Expr { value: s, semicolon: None }) } / &lit("assert") s:assert_stmt() {SmallStatement::Assert(s)} / lit("break") { SmallStatement::Break(Break { semicolon: None })} / lit("continue") { SmallStatement::Continue(Continue { semicolon: None })} / &lit("global") s:global_stmt() {SmallStatement::Global(s)} / &lit("nonlocal") s:nonlocal_stmt() {SmallStatement::Nonlocal(s)} rule compound_stmt() -> CompoundStatement<'input, 'a> = &(lit("def") / lit("@") / tok(Async, "ASYNC")) f:function_def() { CompoundStatement::FunctionDef(f) } / &lit("if") f:if_stmt() { CompoundStatement::If(f) } / &(lit("class") / lit("@")) c:class_def() { CompoundStatement::ClassDef(c) } / &(lit("with") / tok(Async, "ASYNC")) w:with_stmt() { CompoundStatement::With(w) } / &(lit("for") / tok(Async, "ASYNC")) f:for_stmt() { CompoundStatement::For(f) } / &lit("try") t:try_stmt() { CompoundStatement::Try(t) } / &lit("try") t:try_star_stmt() { CompoundStatement::TryStar(t) } / &lit("while") w:while_stmt() { CompoundStatement::While(w) } / m:match_stmt() { CompoundStatement::Match(m) } // Simple statements rule assignment() -> SmallStatement<'input, 'a> = a:name() col:lit(":") ann:expression() rhs:(eq:lit("=") d:annotated_rhs() {(eq, d)})? { SmallStatement::AnnAssign(make_ann_assignment( AssignTargetExpression::Name(Box::new(a)), col, ann, rhs)) } // TODO: there's an extra '(' single_target ')' clause here in upstream / a:single_subscript_attribute_target() col:lit(":") ann:expression() rhs:(eq:lit("=") d:annotated_rhs() {(eq, d)})? { SmallStatement::AnnAssign(make_ann_assignment(a, col, ann, rhs)) } / lhs:(t:star_targets() eq:lit("=") {(t, eq)})+ rhs:(yield_expr() / star_expressions()) !lit("=") { SmallStatement::Assign(make_assignment(lhs, rhs)) } / t:single_target() op:augassign() rhs:(yield_expr() / star_expressions()) { SmallStatement::AugAssign(make_aug_assign(t, op, rhs)) } rule annotated_rhs() -> Expression<'input, 'a> = yield_expr() / star_expressions() rule augassign() -> AugOp<'input, 'a> = &(lit("+=") / lit("-=") / lit("*=") / lit("@=") / lit("/=") / lit("%=") / lit("&=") / lit("|=") / lit("^=") / lit("<<=") / lit(">>=") / lit("**=") / lit("//=")) tok:_ {? make_aug_op(tok).map_err(|_| "aug_op") } rule return_stmt() -> Return<'input, 'a> = kw:lit("return") a:star_expressions()? { make_return(kw, a) } rule raise_stmt() -> Raise<'input, 'a> = kw:lit("raise") exc:expression() rest:(f:lit("from") cau:expression() {(f, cau)})? { make_raise(kw, Some(exc), rest) } / kw:lit("raise") { make_raise(kw, None, None) } rule global_stmt() -> Global<'input, 'a> = kw:lit("global") init:(n:name() c:comma() {(n, c)})* last:name() { make_global(kw, init, last) } rule nonlocal_stmt() -> Nonlocal<'input, 'a> = kw:lit("nonlocal") init:(n:name() c:comma() {(n, c)})* last:name() { make_nonlocal(kw, init, last) } rule del_stmt() -> Del<'input, 'a> = kw:lit("del") t:del_target() &(lit(";") / tok(NL, "NEWLINE")) { make_del(kw, t) } / kw:lit("del") t:del_targets() &(lit(";") / tok(NL, "NEWLINE")) { make_del(kw, make_del_tuple(None, t, None)) } rule yield_stmt() -> Expression<'input, 'a> = yield_expr() rule assert_stmt() -> Assert<'input, 'a> = kw:lit("assert") test:expression() rest:(c:comma() msg:expression() {(c, msg)})? { make_assert(kw, test, rest) } // Import statements rule import_name() -> Import<'input, 'a> = kw:lit("import") a:dotted_as_names() { make_import(kw, a) } rule import_from() -> ImportFrom<'input, 'a> = from:lit("from") dots:dots()? m:dotted_name() import:lit("import") als:import_from_targets() { make_import_from(from, dots.unwrap_or_default(), Some(m), import, als) } / from:lit("from") dots:dots() import:lit("import") als:import_from_targets() { make_import_from(from, dots, None, import, als) } rule import_from_targets() -> ParenthesizedImportNames<'input, 'a> = lpar:lpar() als:import_from_as_names() c:comma()? rpar:rpar() { let mut als = als; if let (comma@Some(_), Some(mut last)) = (c, als.last_mut()) { last.comma = comma; } (Some(lpar), ImportNames::Aliases(als), Some(rpar)) } / als:import_from_as_names() !lit(",") { (None, ImportNames::Aliases(als), None)} / star:lit("*") { (None, ImportNames::Star(make_importstar()), None) } rule import_from_as_names() -> Vec> = items:separated(, ) { make_import_from_as_names(items.0, items.1) } rule import_from_as_name() -> ImportAlias<'input, 'a> = n:name() asname:(kw:lit("as") z:name() {(kw, z)})? { make_import_alias(NameOrAttribute::N(Box::new(n)), asname) } rule dotted_as_names() -> Vec> = init:(d:dotted_as_name() c:comma() {d.with_comma(c)})* last:dotted_as_name() { concat(init, vec![last]) } rule dotted_as_name() -> ImportAlias<'input, 'a> = n:dotted_name() asname:(kw:lit("as") z:name() {(kw, z)})? { make_import_alias(n, asname) } // TODO: why does this diverge from CPython? rule dotted_name() -> NameOrAttribute<'input, 'a> = first:name() tail:(dot:lit(".") n:name() {(dot, n)})* { make_name_or_attr(first, tail) } // Compound statements // Common elements #[cache] rule block() -> Suite<'input, 'a> = n:tok(NL, "NEWLINE") ind:tok(Indent, "INDENT") s:statements() ded:tok(Dedent, "DEDENT") { make_indented_block(n, ind, s, ded) } / s:simple_stmts() { make_simple_statement_suite(s) } rule decorators() -> Vec> = (at:lit("@") e:named_expression() nl:tok(NL, "NEWLINE") { make_decorator(at, e, nl) } )+ // Class definitions rule class_def() -> ClassDef<'input, 'a> = d:decorators() c:class_def_raw() { c.with_decorators(d) } / class_def_raw() rule class_def_raw() -> ClassDef<'input, 'a> = kw:lit("class") n:name() t:type_params()? arg:(l:lpar() a:arguments()? r:rpar() {(l, a, r)})? col:lit(":") b:block() {? make_class_def(kw, n, t, arg, col, b) } // Function definitions rule function_def() -> FunctionDef<'input, 'a> = d:decorators() f:function_def_raw() {f.with_decorators(d)} / function_def_raw() rule _returns() -> Annotation<'input, 'a> = l:lit("->") e:expression() { make_annotation(l, e) } rule function_def_raw() -> FunctionDef<'input, 'a> = def:lit("def") n:name() t:type_params()? op:lit("(") params:params()? cp:lit(")") ty:_returns()? c:lit(":") b:block() { make_function_def(None, def, n, t, op, params, cp, ty, c, b) } / asy:tok(Async, "ASYNC") def:lit("def") n:name() t:type_params()? op:lit("(") params:params()? cp:lit(")") ty:_returns()? c:lit(":") b:block() { make_function_def(Some(asy), def, n, t, op, params, cp, ty, c, b) } // Function parameters rule params() -> Parameters<'input, 'a> = parameters() rule parameters() -> Parameters<'input, 'a> = a:slash_no_default() b:param_no_default()* c:param_with_default()* d:star_etc()? { make_parameters(Some(a), concat(b, c), d) } / a:slash_with_default() b:param_with_default()* d:star_etc()? { make_parameters(Some(a), b, d) } / a:param_no_default()+ b:param_with_default()* d:star_etc()? { make_parameters(None, concat(a, b), d) } / a:param_with_default()+ d:star_etc()? { make_parameters(None, a, d) } / d:star_etc() { make_parameters(None, vec![], Some(d)) } rule slash_no_default() -> (Vec>, ParamSlash<'input, 'a>) = a:param_no_default()+ tok:lit("/") com:comma() { (a, ParamSlash { comma: Some(com), tok }) } / a:param_no_default()+ tok:lit("/") &lit(")") { (a, ParamSlash { comma: None, tok }) } rule slash_with_default() -> (Vec>, ParamSlash<'input, 'a>) = a:param_no_default()* b:param_with_default()+ tok:lit("/") c:comma() { (concat(a, b), ParamSlash { comma: Some(c), tok }) } / a:param_no_default()* b:param_with_default()+ tok:lit("/") &lit(")") { (concat(a, b), ParamSlash { comma: None, tok }) } rule star_etc() -> StarEtc<'input, 'a> = star:lit("*") a:param_no_default() b:param_maybe_default()* kw:kwds()? { StarEtc(Some(StarArg::Param(Box::new( add_param_star(a, star)))), b, kw) } / star:lit("*") a:param_no_default_star_annotation() b:param_maybe_default()* kw:kwds()? { StarEtc(Some(StarArg::Param(Box::new( add_param_star(a, star)))), b, kw) } / lit("*") c:comma() b:param_maybe_default()+ kw:kwds()? { StarEtc(Some(StarArg::Star(Box::new(ParamStar {comma:c }))), b, kw) } / kw:kwds() { StarEtc(None, vec![], Some(kw)) } rule kwds() -> Param<'input, 'a> = star:lit("**") a:param_no_default() { add_param_star(a, star) } rule param_no_default() -> Param<'input, 'a> = a:param() c:lit(",") { add_param_default(a, None, Some(c)) } / a:param() &lit(")") {a} rule param_no_default_star_annotation() -> Param<'input, 'a> = a:param_star_annotation() c:lit(",") { add_param_default(a, None, Some(c))} / a:param_star_annotation() &lit(")") {a} rule param_with_default() -> Param<'input, 'a> = a:param() def:default() c:lit(",") { add_param_default(a, Some(def), Some(c)) } / a:param() def:default() &lit(")") { add_param_default(a, Some(def), None) } rule param_maybe_default() -> Param<'input, 'a> = a:param() def:default()? c:lit(",") { add_param_default(a, def, Some(c)) } / a:param() def:default()? &lit(")") { add_param_default(a, def, None) } rule param() -> Param<'input, 'a> = n:name() a:annotation()? { Param {name: n, annotation: a, ..Default::default() } } rule param_star_annotation() -> Param<'input, 'a> = n:name() a:star_annotation() { Param {name: n, annotation: Some(a), ..Default::default() } } rule annotation() -> Annotation<'input, 'a> = col:lit(":") e:expression() { make_annotation(col, e) } rule star_annotation() -> Annotation<'input, 'a> = col:lit(":") e:star_expression() { make_annotation(col, e) } rule default() -> (AssignEqual<'input, 'a>, Expression<'input, 'a>) = eq:lit("=") ex:expression() { (make_assign_equal(eq), ex) } // If statement rule if_stmt() -> If<'input, 'a> = i:lit("if") a:named_expression() col:lit(":") b:block() elif:elif_stmt() { make_if(i, a, col, b, Some(OrElse::Elif(elif)), false) } / i:lit("if") a:named_expression() col:lit(":") b:block() el:else_block()? { make_if(i, a, col, b, el.map(OrElse::Else), false) } rule elif_stmt() -> If<'input, 'a> = i:lit("elif") a:named_expression() col:lit(":") b:block() elif:elif_stmt() { make_if(i, a, col, b, Some(OrElse::Elif(elif)), true) } / i:lit("elif") a:named_expression() col:lit(":") b:block() el:else_block()? { make_if(i, a, col, b, el.map(OrElse::Else), true) } rule else_block() -> Else<'input, 'a> = el:lit("else") col:lit(":") b:block() { make_else(el, col, b) } // While statement rule while_stmt() -> While<'input, 'a> = kw:lit("while") test:named_expression() col:lit(":") b:block() el:else_block()? { make_while(kw, test, col, b, el) } // For statement rule for_stmt() -> For<'input, 'a> = f:lit("for") t:star_targets() i:lit("in") it:star_expressions() c:lit(":") b:block() el:else_block()? { make_for(None, f, t, i, it, c, b, el) } / asy:tok(Async, "ASYNC") f:lit("for") t:star_targets() i:lit("in") it:star_expressions() c:lit(":") b:block() el:else_block()? { make_for(Some(asy), f, t, i, it, c, b, el) } // With statement rule with_stmt() -> With<'input, 'a> = kw:lit("with") l:lpar() items:separated_trailer(, ) r:rpar() col:lit(":") b:block() { make_with(None, kw, Some(l), comma_separate(items.0, items.1, items.2), Some(r), col, b) } / kw:lit("with") items:separated(, ) col:lit(":") b:block() { make_with(None, kw, None, comma_separate(items.0, items.1, None), None, col, b) } / asy:tok(Async, "ASYNC") kw:lit("with") l:lpar() items:separated_trailer(, ) r:rpar() col:lit(":") b:block() { make_with(Some(asy), kw, Some(l), comma_separate(items.0, items.1, items.2), Some(r), col, b) } / asy:tok(Async, "ASYNC") kw:lit("with") items:separated(, ) col:lit(":") b:block() { make_with(Some(asy), kw, None, comma_separate(items.0, items.1, None), None, col, b) } rule with_item() -> WithItem<'input, 'a> = e:expression() a:lit("as") t:star_target() &(lit(",") / lit(":") / rpar()) { make_with_item(e, Some(a), Some(t)) } / e:expression() { make_with_item(e, None, None) } // Try statement rule try_stmt() -> Try<'input, 'a> = kw:lit("try") lit(":") b:block() f:finally_block() { make_try(kw, b, vec![], None, Some(f)) } / kw:lit("try") lit(":") b:block() ex:except_block()+ el:else_block()? f:finally_block()? { make_try(kw, b, ex, el, f) } // Note: this is separate because TryStar is a different type in LibCST rule try_star_stmt() -> TryStar<'input, 'a> = kw:lit("try") lit(":") b:block() ex:except_star_block()+ el:else_block()? f:finally_block()? { make_try_star(kw, b, ex, el, f) } // Except statement rule except_block() -> ExceptHandler<'input, 'a> = kw:lit("except") e:expression() a:(k:lit("as") n:name() {(k, n)})? col:lit(":") b:block() { make_except(kw, Some(e), a, col, b) } / kw:lit("except") col:lit(":") b:block() { make_except(kw, None, None, col, b) } rule except_star_block() -> ExceptStarHandler<'input, 'a> = kw:lit("except") star:lit("*") e:expression() a:(k:lit("as") n:name() {(k, n)})? col:lit(":") b:block() { make_except_star(kw, star, e, a, col, b) } rule finally_block() -> Finally<'input, 'a> = kw:lit("finally") col:lit(":") b:block() { make_finally(kw, col, b) } // Match statement rule match_stmt() -> Match<'input, 'a> = kw:lit("match") subject:subject_expr() col:lit(":") tok(NL, "NEWLINE") i:tok(Indent, "INDENT") cases:case_block()+ d:tok(Dedent, "DEDENT") { make_match(kw, subject, col, i, cases, d) } rule subject_expr() -> Expression<'input, 'a> = first:star_named_expression() c:comma() rest:star_named_expressions()? { Expression::Tuple(Box::new( make_tuple_from_elements(first.with_comma(c), rest.unwrap_or_default())) ) } / named_expression() rule case_block() -> MatchCase<'input, 'a> = kw:lit("case") pattern:patterns() guard:guard()? col:lit(":") body:block() { make_case(kw, pattern, guard, col, body) } rule guard() -> (TokenRef<'input, 'a>, Expression<'input, 'a>) = kw:lit("if") exp:named_expression() { (kw, exp) } rule patterns() -> MatchPattern<'input, 'a> = pats:open_sequence_pattern() { MatchPattern::Sequence(make_list_pattern(None, pats, None)) } / pattern() rule pattern() -> MatchPattern<'input, 'a> = as_pattern() / or_pattern() rule as_pattern() -> MatchPattern<'input, 'a> = pat:or_pattern() kw:lit("as") target:pattern_capture_target() { make_as_pattern(Some(pat), Some(kw), Some(target)) } rule or_pattern() -> MatchPattern<'input, 'a> = pats:separated(, ) { make_or_pattern(pats.0, pats.1) } rule closed_pattern() -> MatchPattern<'input, 'a> = literal_pattern() / capture_pattern() / wildcard_pattern() / value_pattern() / group_pattern() / sequence_pattern() / mapping_pattern() / class_pattern() rule literal_pattern() -> MatchPattern<'input, 'a> = val:signed_number() !(lit("+") / lit("-")) { make_match_value(val) } / val:complex_number() { make_match_value(val) } / val:strings() { make_match_value(val.into()) } / n:lit("None") { make_match_singleton(make_name(n)) } / n:lit("True") { make_match_singleton(make_name(n)) } / n:lit("False") { make_match_singleton(make_name(n)) } rule literal_expr() -> Expression<'input, 'a> = val:signed_number() !(lit("+") / lit("-")) { val } / val:complex_number() { val } / val:strings() { val.into() } / n:lit("None") { Expression::Name(Box::new(make_name(n))) } / n:lit("True") { Expression::Name(Box::new(make_name(n))) } / n:lit("False") { Expression::Name(Box::new(make_name(n))) } rule complex_number() -> Expression<'input, 'a> = re:signed_real_number() op:(lit("+")/lit("-")) im:imaginary_number() {? make_binary_op(re, op, im).map_err(|_| "complex number") } rule signed_number() -> Expression<'input, 'a> = n:tok(Number, "number") { make_number(n) } / op:lit("-") n:tok(Number, "number") {? make_unary_op(op, make_number(n)).map_err(|_| "signed number") } rule signed_real_number() -> Expression<'input, 'a> = real_number() / op:lit("-") n:real_number() {? make_unary_op(op, n).map_err(|_| "signed real number") } rule real_number() -> Expression<'input, 'a> = n:tok(Number, "number") {? ensure_real_number(n) } rule imaginary_number() -> Expression<'input, 'a> = n:tok(Number, "number") {? ensure_imaginary_number(n) } rule capture_pattern() -> MatchPattern<'input, 'a> = t:pattern_capture_target() { make_as_pattern(None, None, Some(t)) } rule pattern_capture_target() -> Name<'input, 'a> = !lit("_") n:name() !(lit(".") / lit("(") / lit("=")) { n } rule wildcard_pattern() -> MatchPattern<'input, 'a> = lit("_") { make_as_pattern(None, None, None) } rule value_pattern() -> MatchPattern<'input, 'a> = v:attr() !(lit(".") / lit("(") / lit("=")) { make_match_value(v.into()) } // In upstream attr and name_or_attr are mutually recursive, but rust-peg // doesn't support this yet. rule attr() -> NameOrAttribute<'input, 'a> = &(name() lit(".")) v:name_or_attr() { v } #[cache_left_rec] rule name_or_attr() -> NameOrAttribute<'input, 'a> = val:name_or_attr() d:lit(".") attr:name() { NameOrAttribute::A(Box::new(make_attribute(val.into(), d, attr))) } / n:name() { NameOrAttribute::N(Box::new(n)) } rule group_pattern() -> MatchPattern<'input, 'a> = l:lpar() pat:pattern() r:rpar() { pat.with_parens(l, r) } rule sequence_pattern() -> MatchPattern<'input, 'a> = l:lbrak() pats:maybe_sequence_pattern()? r:rbrak() { MatchPattern::Sequence( make_list_pattern(Some(l), pats.unwrap_or_default(), Some(r)) ) } / l:lpar() pats:open_sequence_pattern()? r:rpar() { MatchPattern::Sequence(make_tuple_pattern(l, pats.unwrap_or_default(), r)) } rule open_sequence_pattern() -> Vec> = pat:maybe_star_pattern() c:comma() pats:maybe_sequence_pattern()? { make_open_sequence_pattern(pat, c, pats.unwrap_or_default()) } rule maybe_sequence_pattern() -> Vec> = pats:separated_trailer(, ) { comma_separate(pats.0, pats.1, pats.2) } rule maybe_star_pattern() -> StarrableMatchSequenceElement<'input, 'a> = s:star_pattern() { StarrableMatchSequenceElement::Starred(s) } / p:pattern() { StarrableMatchSequenceElement::Simple( make_match_sequence_element(p) ) } rule star_pattern() -> MatchStar<'input, 'a> = star:lit("*") t:pattern_capture_target() {make_match_star(star, Some(t))} / star:lit("*") t:wildcard_pattern() { make_match_star(star, None) } rule mapping_pattern() -> MatchPattern<'input, 'a> = l:lbrace() r:rbrace() { make_match_mapping(l, vec![], None, None, None, None, r) } / l:lbrace() rest:double_star_pattern() trail:comma()? r:rbrace() { make_match_mapping(l, vec![], None, Some(rest.0), Some(rest.1), trail, r) } / l:lbrace() items:items_pattern() c:comma() rest:double_star_pattern() trail:comma()? r:rbrace() { make_match_mapping(l, items, Some(c), Some(rest.0), Some(rest.1), trail, r) } / l:lbrace() items:items_pattern() trail:comma()? r:rbrace() { make_match_mapping(l, items, trail, None, None, None, r) } rule items_pattern() -> Vec> = pats:separated(, ) { comma_separate(pats.0, pats.1, None) } rule key_value_pattern() -> MatchMappingElement<'input, 'a> = key:(literal_expr() / a:attr() {a.into()}) colon:lit(":") pat:pattern() { make_match_mapping_element(key, colon, pat) } rule double_star_pattern() -> (TokenRef<'input, 'a>, Name<'input, 'a>) = star:lit("**") n:pattern_capture_target() { (star, n) } rule class_pattern() -> MatchPattern<'input, 'a> = cls:name_or_attr() l:lit("(") r:lit(")") { make_class_pattern(cls, l, vec![], None, vec![], None, r) } / cls:name_or_attr() l:lit("(") pats:positional_patterns() c:comma()? r:lit(")") { make_class_pattern(cls, l, pats, c, vec![], None, r) } / cls:name_or_attr() l:lit("(") kwds:keyword_patterns() c:comma()? r:lit(")") { make_class_pattern(cls, l, vec![], None, kwds, c, r) } / cls:name_or_attr() l:lit("(") pats:positional_patterns() c:comma() kwds:keyword_patterns() trail:comma()? r:lit(")") { make_class_pattern(cls, l, pats, Some(c), kwds, trail, r) } rule positional_patterns() -> Vec> = pats:separated(, ) { comma_separate(pats.0, pats.1, None) } rule keyword_patterns() -> Vec> = pats:separated(, ) { comma_separate(pats.0, pats.1, None) } rule keyword_pattern() -> MatchKeywordElement<'input, 'a> = arg:name() eq:lit("=") value:pattern() { make_match_keyword_element(arg, eq, value) } // Type statement rule type_stmt() -> TypeAlias<'input, 'a> = t:lit("type") n:name() ps:type_params()? eq:lit("=") v:expression() { make_type_alias(t, n, ps, eq, v) } // Type parameter declaration rule type_params() -> TypeParameters<'input, 'a> = lb:lbrak() ps:separated_trailer(, ) rb:rbrak() { make_type_parameters(lb, comma_separate(ps.0, ps.1, ps.2), rb) } rule type_param() -> TypeParam<'input, 'a> = n:name() b:type_param_bound()? { make_type_var(n, b) } / s:lit("*") n:name() { make_type_var_tuple(s, n) } / s:lit("**") n:name() { make_param_spec(s, n) } rule type_param_bound() -> TypeParamBound<'input, 'a> = c:lit(":") e:expression() { make_type_param_bound(c, e) } // Expressions #[cache] rule expression() -> Expression<'input, 'a> = _conditional_expression() / lambdef() rule _conditional_expression() -> Expression<'input, 'a> = body:disjunction() i:lit("if") test:disjunction() e:lit("else") oe:expression() { Expression::IfExp(Box::new(make_ifexp(body, i, test, e, oe))) } / disjunction() rule yield_expr() -> Expression<'input, 'a> = y:lit("yield") f:lit("from") a:expression() { Expression::Yield(Box::new(make_yield(y, Some(f), Some(a)))) } / y:lit("yield") a:star_expressions()? { Expression::Yield(Box::new(make_yield(y, None, a))) } rule star_expressions() -> Expression<'input, 'a> = first:star_expression() rest:(comma:comma() e:star_expression() { (comma, expr_to_element(e)) })+ comma:comma()? { Expression::Tuple(Box::new(make_tuple(expr_to_element(first), rest, comma, None, None))) } / e:star_expression() comma:comma() { Expression::Tuple(Box::new(make_tuple(expr_to_element(e), vec![], Some(comma), None, None))) } / star_expression() #[cache] rule star_expression() -> Expression<'input, 'a> = star:lit("*") e:bitwise_or() { Expression::StarredElement(Box::new(make_starred_element(star, expr_to_element(e)))) } / expression() rule star_named_expressions() -> Vec> = exps:separated_trailer(, ) { comma_separate(exps.0, exps.1, exps.2) } rule star_named_expression() -> Element<'input, 'a> = star:lit("*") e:bitwise_or() { Element::Starred(Box::new(make_starred_element(star, expr_to_element(e)))) } / e:named_expression() { expr_to_element(e) } rule named_expression() -> Expression<'input, 'a> = a:name() op:lit(":=") b:expression() { Expression::NamedExpr(Box::new(make_named_expr(a, op, b))) } / e:expression() !lit(":=") { e } #[cache] rule disjunction() -> Expression<'input, 'a> = a:conjunction() b:(or:lit("or") inner:conjunction() { (or, inner) })+ {? make_boolean_op(a, b).map_err(|e| "expected disjunction") } / conjunction() #[cache] rule conjunction() -> Expression<'input, 'a> = a:inversion() b:(and:lit("and") inner:inversion() { (and, inner) })+ {? make_boolean_op(a, b).map_err(|e| "expected conjunction") } / inversion() #[cache] rule inversion() -> Expression<'input, 'a> = not:lit("not") a:inversion() {? make_unary_op(not, a).map_err(|e| "expected inversion") } / comparison() // Comparison operators #[cache] rule comparison() -> Expression<'input, 'a> = a:bitwise_or() b:compare_op_bitwise_or_pair()+ { make_comparison(a, b) } / bitwise_or() // This implementation diverges slightly from CPython (3.9) to avoid bloating // the parser cache and increase readability. #[cache] rule compare_op_bitwise_or_pair() -> (CompOp<'input, 'a>, Expression<'input, 'a>) = _op_bitwise_or("==") / _op_bitwise_or("!=") // TODO: support barry_as_flufl / _op_bitwise_or("<=") / _op_bitwise_or("<") / _op_bitwise_or(">=") / _op_bitwise_or(">") / _op_bitwise_or2("not", "in") / _op_bitwise_or("in") / _op_bitwise_or2("is", "not") / _op_bitwise_or("is") rule _op_bitwise_or(o: &'static str) -> (CompOp<'input, 'a>, Expression<'input, 'a>) = op:lit(o) e:bitwise_or() {? make_comparison_operator(op) .map(|op| (op, e)) .map_err(|_| "comparison") } rule _op_bitwise_or2(first: &'static str, second: &'static str) -> (CompOp<'input, 'a>, Expression<'input, 'a>) = f:lit(first) s:lit(second) e:bitwise_or() {? make_comparison_operator_2(f, s) .map(|op| (op, e)) .map_err(|_| "comparison") } #[cache_left_rec] rule bitwise_or() -> Expression<'input, 'a> = a:bitwise_or() op:lit("|") b:bitwise_xor() {? make_binary_op(a, op, b).map_err(|e| "expected bitwise_or") } / bitwise_xor() #[cache_left_rec] rule bitwise_xor() -> Expression<'input, 'a> = a:bitwise_xor() op:lit("^") b:bitwise_and() {? make_binary_op(a, op, b).map_err(|e| "expected bitwise_xor") } / bitwise_and() #[cache_left_rec] rule bitwise_and() -> Expression<'input, 'a> = a:bitwise_and() op:lit("&") b:shift_expr() {? make_binary_op(a, op, b).map_err(|e| "expected bitwise_and") } / shift_expr() #[cache_left_rec] rule shift_expr() -> Expression<'input, 'a> = a:shift_expr() op:lit("<<") b:sum() {? make_binary_op(a, op, b).map_err(|e| "expected shift_expr") } / a:shift_expr() op:lit(">>") b:sum() {? make_binary_op(a, op, b).map_err(|e| "expected shift_expr") } / sum() #[cache_left_rec] rule sum() -> Expression<'input, 'a> = a:sum() op:lit("+") b:term() {? make_binary_op(a, op, b).map_err(|e| "expected sum") } / a:sum() op:lit("-") b:term() {? make_binary_op(a, op, b).map_err(|e| "expected sum") } / term() #[cache_left_rec] rule term() -> Expression<'input, 'a> = a:term() op:lit("*") b:factor() {? make_binary_op(a, op, b).map_err(|e| "expected term") } / a:term() op:lit("/") b:factor() {? make_binary_op(a, op, b).map_err(|e| "expected term") } / a:term() op:lit("//") b:factor() {? make_binary_op(a, op, b).map_err(|e| "expected term") } / a:term() op:lit("%") b:factor() {? make_binary_op(a, op, b).map_err(|e| "expected term") } / a:term() op:lit("@") b:factor() {? make_binary_op(a, op, b).map_err(|e| "expected term") } / factor() #[cache] rule factor() -> Expression<'input, 'a> = op:lit("+") a:factor() {? make_unary_op(op, a).map_err(|e| "expected factor") } / op:lit("-") a:factor() {? make_unary_op(op, a).map_err(|e| "expected factor") } / op:lit("~") a:factor() {? make_unary_op(op, a).map_err(|e| "expected factor") } / power() rule power() -> Expression<'input, 'a> = a:await_primary() op:lit("**") b:factor() {? make_binary_op(a, op, b).map_err(|e| "expected power") } / await_primary() // Primary elements rule await_primary() -> Expression<'input, 'a> = aw:tok(AWAIT, "AWAIT") e:primary() { Expression::Await(Box::new(make_await(aw, e))) } / primary() #[cache_left_rec] rule primary() -> Expression<'input, 'a> = v:primary() dot:lit(".") attr:name() { Expression::Attribute(Box::new(make_attribute(v, dot, attr))) } / a:primary() b:genexp() { Expression::Call(Box::new(make_genexp_call(a, b))) } / f:primary() lpar:lit("(") arg:arguments()? rpar:lit(")") { Expression::Call(Box::new(make_call(f, lpar, arg.unwrap_or_default(), rpar))) } / v:primary() lbrak:lbrak() s:slices() rbrak:rbrak() { Expression::Subscript(Box::new(make_subscript(v, lbrak, s, rbrak))) } / atom() rule slices() -> Vec> = s:slice() !lit(",") { vec![SubscriptElement { slice: s, comma: None }] } / slices:separated_trailer(, ) { make_slices(slices.0, slices.1, slices.2) } rule slice() -> BaseSlice<'input, 'a> = l:expression()? col:lit(":") u:expression()? rest:(c:lit(":") s:expression()? {(c, s)})? { make_slice(l, col, u, rest) } / e:starred_expression() { make_index_from_arg(e) } / v:named_expression() { make_index(v) } rule atom() -> Expression<'input, 'a> = n:name() { Expression::Name(Box::new(n)) } / n:lit("True") { Expression::Name(Box::new(make_name(n))) } / n:lit("False") { Expression::Name(Box::new(make_name(n))) } / n:lit("None") { Expression::Name(Box::new(make_name(n))) } / &(tok(STRING, "") / tok(FStringStart, "")) s:strings() {s.into()} / n:tok(Number, "NUMBER") { make_number(n) } / &lit("(") e:(tuple() / group() / (g:genexp() {Expression::GeneratorExp(Box::new(g))})) {e} / &lit("[") e:(list() / listcomp()) {e} / &lit("{") e:(dict() / set() / dictcomp() / setcomp()) {e} / lit("...") { Expression::Ellipsis(Box::new(Ellipsis {lpar: vec![], rpar: vec![]}))} rule group() -> Expression<'input, 'a> = lpar:lpar() e:(yield_expr() / named_expression()) rpar:rpar() { e.with_parens(lpar, rpar) } // Lambda functions rule lambdef() -> Expression<'input, 'a> = kw:lit("lambda") p:lambda_params()? c:lit(":") b:expression() { Expression::Lambda(Box::new(make_lambda(kw, p.unwrap_or_default(), c, b))) } rule lambda_params() -> Parameters<'input, 'a> = lambda_parameters() // lambda_parameters etc. duplicates parameters but without annotations or type // comments, and if there's no comma after a parameter, we expect a colon, not a // close parenthesis. rule lambda_parameters() -> Parameters<'input, 'a> = a:lambda_slash_no_default() b:lambda_param_no_default()* c:lambda_param_with_default()* d:lambda_star_etc()? { make_parameters(Some(a), concat(b, c), d) } / a:lambda_slash_with_default() b:lambda_param_with_default()* d:lambda_star_etc()? { make_parameters(Some(a), b, d) } / a:lambda_param_no_default()+ b:lambda_param_with_default()* d:lambda_star_etc()? { make_parameters(None, concat(a, b), d) } / a:lambda_param_with_default()+ d:lambda_star_etc()? { make_parameters(None, a, d) } / d:lambda_star_etc() { make_parameters(None, vec![], Some(d)) } rule lambda_slash_no_default() -> (Vec>, ParamSlash<'input, 'a>) = a:lambda_param_no_default()+ tok:lit("/") com:comma() { (a, ParamSlash { comma: Some(com), tok } ) } / a:lambda_param_no_default()+ tok:lit("/") &lit(":") { (a, ParamSlash { comma: None, tok }) } rule lambda_slash_with_default() -> (Vec>, ParamSlash<'input, 'a>) = a:lambda_param_no_default()* b:lambda_param_with_default()+ tok:lit("/") c:comma(){ (concat(a, b), ParamSlash { comma: Some(c), tok }) } / a:lambda_param_no_default()* b:lambda_param_with_default()+ tok:lit("/") &lit(":") { (concat(a, b), ParamSlash { comma: None, tok }) } rule lambda_star_etc() -> StarEtc<'input, 'a> = star:lit("*") a:lambda_param_no_default() b:lambda_param_maybe_default()* kw:lambda_kwds()? { StarEtc(Some(StarArg::Param( Box::new(add_param_star(a, star)) )), b, kw) } / lit("*") c:comma() b:lambda_param_maybe_default()+ kw:lambda_kwds()? { StarEtc(Some(StarArg::Star(Box::new(ParamStar {comma: c}))), b, kw) } / kw:lambda_kwds() { StarEtc(None, vec![], Some(kw)) } rule lambda_kwds() -> Param<'input, 'a> = star:lit("**") a:lambda_param_no_default() { add_param_star(a, star) } rule lambda_param_no_default() -> Param<'input, 'a> = a:lambda_param() c:lit(",") { add_param_default(a, None, Some(c)) } / a:lambda_param() &lit(":") {a} rule lambda_param_with_default() -> Param<'input, 'a> = a:lambda_param() def:default() c:lit(",") { add_param_default(a, Some(def), Some(c)) } / a:lambda_param() def:default() &lit(":") { add_param_default(a, Some(def), None) } rule lambda_param_maybe_default() -> Param<'input, 'a> = a:lambda_param() def:default()? c:lit(",") { add_param_default(a, def, Some(c)) } / a:lambda_param() def:default()? &lit(":") { add_param_default(a, def, None) } rule lambda_param() -> Param<'input, 'a> = name:name() { Param { name, ..Default::default() } } // Literals rule strings() -> String<'input, 'a> = s:(str:tok(STRING, "STRING") t:&_ {(make_string(str), t)} / str:fstring() t:&_ {(String::Formatted(str), t)})+ {? make_strings(s) } rule list() -> Expression<'input, 'a> = lbrak:lbrak() e:star_named_expressions()? rbrak:rbrak() { Expression::List(Box::new( make_list(lbrak, e.unwrap_or_default(), rbrak)) ) } rule tuple() -> Expression<'input, 'a> = lpar:lpar() first:star_named_expression() &lit(",") rest:(c:comma() e:star_named_expression() {(c, e)})* trailing_comma:comma()? rpar:rpar() { Expression::Tuple(Box::new( make_tuple(first, rest, trailing_comma, Some(lpar), Some(rpar)) )) } / lpar:lpar() rpar:lit(")") { Expression::Tuple(Box::new(Tuple::default().with_parens( lpar, RightParen { rpar_tok: rpar } )))} rule set() -> Expression<'input, 'a> = lbrace:lbrace() e:star_named_expressions()? rbrace:rbrace() { Expression::Set(Box::new(make_set(lbrace, e.unwrap_or_default(), rbrace))) } // Dicts rule dict() -> Expression<'input, 'a> = lbrace:lbrace() els:double_starred_keypairs()? rbrace:rbrace() { Expression::Dict(Box::new(make_dict(lbrace, els.unwrap_or_default(), rbrace))) } rule double_starred_keypairs() -> Vec> = pairs:separated_trailer(, ) { make_double_starred_keypairs(pairs.0, pairs.1, pairs.2) } rule double_starred_kvpair() -> DictElement<'input, 'a> = s:lit("**") e:bitwise_or() { DictElement::Starred(make_double_starred_element(s, e)) } / k:kvpair() { make_dict_element(k) } rule kvpair() -> (Expression<'input, 'a>, TokenRef<'input, 'a>, Expression<'input, 'a>) = k:expression() colon:lit(":") v:expression() { (k, colon, v) } // Comprehensions & generators rule for_if_clauses() -> CompFor<'input, 'a> = c:for_if_clause()+ {? merge_comp_fors(c) } rule for_if_clause() -> CompFor<'input, 'a> = asy:_async() f:lit("for") tgt:star_targets() i:lit("in") iter:disjunction() ifs:_comp_if()* { make_for_if(Some(asy), f, tgt, i, iter, ifs) } / f:lit("for") tgt:star_targets() i:lit("in") iter:disjunction() ifs:_comp_if()* { make_for_if(None, f, tgt, i, iter, ifs) } rule _comp_if() -> CompIf<'input, 'a> = kw:lit("if") cond:disjunction() { make_comp_if(kw, cond) } rule listcomp() -> Expression<'input, 'a> = lbrak:lbrak() elt:named_expression() comp:for_if_clauses() rbrak:rbrak() { Expression::ListComp(Box::new(make_list_comp(lbrak, elt, comp, rbrak))) } rule setcomp() -> Expression<'input, 'a> = l:lbrace() elt:named_expression() comp:for_if_clauses() r:rbrace() { Expression::SetComp(Box::new(make_set_comp(l, elt, comp, r))) } rule genexp() -> GeneratorExp<'input, 'a> = lpar:lpar() g:_bare_genexp() rpar:rpar() { g.with_parens(lpar, rpar) } rule _bare_genexp() -> GeneratorExp<'input, 'a> = elt:named_expression() comp:for_if_clauses() { make_bare_genexp(elt, comp) } rule dictcomp() -> Expression<'input, 'a> = lbrace:lbrace() elt:kvpair() comp:for_if_clauses() rbrace:rbrace() { Expression::DictComp(Box::new(make_dict_comp(lbrace, elt, comp, rbrace))) } // Function call arguments rule arguments() -> Vec> = a:args() trail:comma()? &lit(")") {add_arguments_trailing_comma(a, trail)} rule args() -> Vec> = first:_posarg() rest:(c:comma() a:_posarg() {(c, a)})* kw:(c:comma() k:kwargs() {(c, k)})? { let (trail, kw) = kw.map(|(x,y)| (Some(x), Some(y))).unwrap_or((None, None)); concat( comma_separate(first, rest, trail), kw.unwrap_or_default(), ) } / kwargs() rule _posarg() -> Arg<'input, 'a> = a:(starred_expression() / e:named_expression() { make_arg(e) }) !lit("=") { a } rule kwargs() -> Vec> = sitems:separated(, ) scomma:comma() ditems:separated(, ) { concat( comma_separate(sitems.0, sitems.1, Some(scomma)), comma_separate(ditems.0, ditems.1, None), ) } / items:separated(, ) { comma_separate(items.0, items.1, None) } / items:separated(, ) { comma_separate(items.0, items.1, None) } rule starred_expression() -> Arg<'input, 'a> = star:lit("*") e:expression() { make_star_arg(star, e) } rule kwarg_or_starred() -> Arg<'input, 'a> = _kwarg() / starred_expression() rule kwarg_or_double_starred() -> Arg<'input, 'a> = _kwarg() / star:lit("**") e:expression() { make_star_arg(star, e) } rule _kwarg() -> Arg<'input, 'a> = n:name() eq:lit("=") v:expression() { make_kwarg(n, eq, v) } // Assignment targets // Generic targets rule star_targets() -> AssignTargetExpression<'input, 'a> = a:star_target() !lit(",") {a} / targets:separated_trailer(, ) { AssignTargetExpression::Tuple(Box::new( make_tuple(targets.0, targets.1, targets.2, None, None) )) } rule star_targets_list_seq() -> Vec> = targets:separated_trailer(, ) { comma_separate(targets.0, targets.1, targets.2) } // This differs from star_targets below because it requires at least two items // in the tuple rule star_targets_tuple_seq() -> Tuple<'input, 'a> = first:(t:star_target() {assign_target_to_element(t)}) rest:(c:comma() t:star_target() {(c, assign_target_to_element(t))})+ trail:comma()? { make_tuple(first, rest, trail, None, None) } / t:star_target() trail:comma()? { make_tuple(assign_target_to_element(t), vec![], trail, None, None) } #[cache] rule star_target() -> AssignTargetExpression<'input, 'a> = star:lit("*") !lit("*") t:star_target() { AssignTargetExpression::StarredElement(Box::new( make_starred_element(star, assign_target_to_element(t)) )) } / target_with_star_atom() #[cache] rule target_with_star_atom() -> AssignTargetExpression<'input, 'a> = a:t_primary() dot:lit(".") n:name() !t_lookahead() { AssignTargetExpression::Attribute(Box::new(make_attribute(a, dot, n))) } / a:t_primary() lbrak:lbrak() s:slices() rbrak:rbrak() !t_lookahead() { AssignTargetExpression::Subscript(Box::new( make_subscript(a, lbrak, s, rbrak) )) } / a:star_atom() {a} rule star_atom() -> AssignTargetExpression<'input, 'a> = a:name() { AssignTargetExpression::Name(Box::new(a)) } / lpar:lpar() a:target_with_star_atom() rpar:rpar() { a.with_parens(lpar, rpar) } / lpar:lpar() a:star_targets_tuple_seq()? rpar:rpar() { AssignTargetExpression::Tuple(Box::new( a.unwrap_or_default().with_parens(lpar, rpar) )) } / lbrak:lbrak() a:star_targets_list_seq()? rbrak:rbrak() { AssignTargetExpression::List(Box::new( make_list(lbrak, a.unwrap_or_default(), rbrak) )) } rule single_target() -> AssignTargetExpression<'input, 'a> = single_subscript_attribute_target() / n:name() { AssignTargetExpression::Name(Box::new(n)) } / lpar:lpar() t:single_target() rpar:rpar() { t.with_parens(lpar, rpar) } rule single_subscript_attribute_target() -> AssignTargetExpression<'input, 'a> = a:t_primary() dot:lit(".") n:name() !t_lookahead() { AssignTargetExpression::Attribute(Box::new(make_attribute(a, dot, n))) } / a:t_primary() lbrak:lbrak() s:slices() rbrak:rbrak() !t_lookahead() { AssignTargetExpression::Subscript(Box::new( make_subscript(a, lbrak, s, rbrak) )) } #[cache_left_rec] rule t_primary() -> Expression<'input, 'a> = value:t_primary() dot:lit(".") attr:name() &t_lookahead() { Expression::Attribute(Box::new(make_attribute(value, dot, attr))) } / v:t_primary() l:lbrak() s:slices() r:rbrak() &t_lookahead() { Expression::Subscript(Box::new(make_subscript(v, l, s, r))) } / f:t_primary() gen:genexp() &t_lookahead() { Expression::Call(Box::new(make_genexp_call(f, gen))) } / f:t_primary() lpar:lit("(") arg:arguments()? rpar:lit(")") &t_lookahead() { Expression::Call(Box::new(make_call(f, lpar, arg.unwrap_or_default(), rpar))) } / a:atom() &t_lookahead() {a} rule t_lookahead() -> () = (lit("(") / lit("[") / lit(".")) {} // Targets for del statements rule del_targets() -> Vec> = t:separated_trailer(, ) { comma_separate(t.0, t.1, t.2) } rule del_target() -> DelTargetExpression<'input, 'a> = a:t_primary() d:lit(".") n:name() !t_lookahead() { DelTargetExpression::Attribute(Box::new(make_attribute(a, d, n))) } / a:t_primary() lbrak:lbrak() s:slices() rbrak:rbrak() !t_lookahead() { DelTargetExpression::Subscript(Box::new( make_subscript(a, lbrak, s, rbrak) )) } / del_t_atom() rule del_t_atom() -> DelTargetExpression<'input, 'a> = n:name() { DelTargetExpression::Name(Box::new(n)) } / l:lpar() d:del_target() r:rpar() { d.with_parens(l, r) } / l:lpar() d:del_targets()? r:rpar() { make_del_tuple(Some(l), d.unwrap_or_default(), Some(r)) } / l:lbrak() d:del_targets()? r:rbrak() { DelTargetExpression::List(Box::new( make_list(l, d.unwrap_or_default(), r) )) } // F-strings rule fstring() -> FormattedString<'input, 'a> = start:tok(FStringStart, "f\"") parts:(_f_string() / _f_replacement())* end:tok(FStringEnd, "\"") { make_fstring(start.string, parts, end.string) } rule _f_string() -> FormattedStringContent<'input, 'a> = t:tok(FStringString, "f-string contents") { FormattedStringContent::Text(make_fstringtext(t.string)) } rule _f_replacement() -> FormattedStringContent<'input, 'a> = lb:lit("{") e:_f_expr() eq:lit("=")? conv:(t:lit("!") c:_f_conversion() {(t,c)})? spec:(t:lit(":") s:_f_spec() {(t,s)})? rb:lit("}") { FormattedStringContent::Expression(Box::new( make_fstring_expression(lb, e, eq, conv, spec, rb) )) } rule _f_expr() -> Expression<'input, 'a> = (g:_bare_genexp() {Expression::GeneratorExp(Box::new(g))}) / star_expressions() / yield_expr() rule _f_conversion() -> &'a str = lit("r") {"r"} / lit("s") {"s"} / lit("a") {"a"} rule _f_spec() -> Vec> = (_f_string() / _f_replacement())* // CST helpers rule comma() -> Comma<'input, 'a> = c:lit(",") { make_comma(c) } rule dots() -> Vec> = ds:((dot:lit(".") { make_dot(dot) })+ / tok:lit("...") { vec![make_dot(tok), make_dot(tok), make_dot(tok)]} )+ { ds.into_iter().flatten().collect() } rule lpar() -> LeftParen<'input, 'a> = a:lit("(") { make_lpar(a) } rule rpar() -> RightParen<'input, 'a> = a:lit(")") { make_rpar(a) } rule lbrak() -> LeftSquareBracket<'input, 'a> = tok:lit("[") { make_left_bracket(tok) } rule rbrak() -> RightSquareBracket<'input, 'a> = tok:lit("]") { make_right_bracket(tok) } rule lbrace() -> LeftCurlyBrace<'input, 'a> = tok:lit("{") { make_left_brace(tok) } rule rbrace() -> RightCurlyBrace<'input, 'a> = tok:lit("}") { make_right_brace(tok) } /// matches any token, not just whitespace rule _() -> TokenRef<'input, 'a> = [t] { t } rule lit(lit: &'static str) -> TokenRef<'input, 'a> = [t] {? if t.string == lit { Ok(t) } else { Err(lit) } } rule tok(tok: TokType, err: &'static str) -> TokenRef<'input, 'a> = [t] {? if t.r#type == tok { Ok(t) } else { Err(err) } } rule name() -> Name<'input, 'a> = !( lit("False") / lit("None") / lit("True") / lit("and") / lit("as") / lit("assert") / lit("async") / lit("await") / lit("break") / lit("class") / lit("continue") / lit("def") / lit("del") / lit("elif") / lit("else") / lit("except") / lit("finally") / lit("for") / lit("from") / lit("global") / lit("if") / lit("import") / lit("in") / lit("is") / lit("lambda") / lit("nonlocal") / lit("not") / lit("or") / lit("pass") / lit("raise") / lit("return") / lit("try") / lit("while") / lit("with") / lit("yield") ) t:tok(NameTok, "NAME") {make_name(t)} rule _async() -> TokenRef<'input, 'a> = tok(Async, "ASYNC") rule separated_trailer(el: rule, sep: rule) -> (El, Vec<(Sep, El)>, Option) = e:el() rest:(s:sep() e:el() {(s, e)})* trailer:sep()? {(e, rest, trailer)} rule separated(el: rule, sep: rule) -> (El, Vec<(Sep, El)>) = e:el() rest:(s:sep() e:el() {(s, e)})* {(e, rest)} rule traced(e: rule) -> T = &(_* { #[cfg(feature = "trace")] { println!("[PEG_INPUT_START]"); println!("{}", input); println!("[PEG_TRACE_START]"); } }) e:e()? {? #[cfg(feature = "trace")] println!("[PEG_TRACE_STOP]"); e.ok_or("") } } } #[allow(clippy::too_many_arguments)] fn make_function_def<'input, 'a>( async_tok: Option>, def_tok: TokenRef<'input, 'a>, name: Name<'input, 'a>, type_parameters: Option>, open_paren_tok: TokenRef<'input, 'a>, params: Option>, close_paren_tok: TokenRef<'input, 'a>, returns: Option>, colon_tok: TokenRef<'input, 'a>, body: Suite<'input, 'a>, ) -> FunctionDef<'input, 'a> { let asynchronous = async_tok.as_ref().map(|_| make_async()); FunctionDef { name, type_parameters, params: params.unwrap_or_default(), body, decorators: Default::default(), returns, asynchronous, async_tok, def_tok, open_paren_tok, close_paren_tok, colon_tok, } } fn make_decorator<'input, 'a>( at_tok: TokenRef<'input, 'a>, name: Expression<'input, 'a>, newline_tok: TokenRef<'input, 'a>, ) -> Decorator<'input, 'a> { Decorator { decorator: name, newline_tok, at_tok, } } fn make_comparison<'input, 'a>( head: Expression<'input, 'a>, tail: Vec<(CompOp<'input, 'a>, Expression<'input, 'a>)>, ) -> Expression<'input, 'a> { let mut comparisons = vec![]; for (operator, e) in tail { comparisons.push(ComparisonTarget { operator, comparator: e, }); } Expression::Comparison(Box::new(Comparison { left: Box::new(head), comparisons, lpar: vec![], rpar: vec![], })) } fn make_comparison_operator<'input, 'a>( tok: TokenRef<'input, 'a>, ) -> Result<'a, CompOp<'input, 'a>> { match tok.string { "<" => Ok(CompOp::LessThan { tok }), ">" => Ok(CompOp::GreaterThan { tok }), "<=" => Ok(CompOp::LessThanEqual { tok }), ">=" => Ok(CompOp::GreaterThanEqual { tok }), "==" => Ok(CompOp::Equal { tok }), "!=" => Ok(CompOp::NotEqual { tok }), "in" => Ok(CompOp::In { tok }), "is" => Ok(CompOp::Is { tok }), _ => Err(ParserError::OperatorError), } } fn make_comparison_operator_2<'input, 'a>( first: TokenRef<'input, 'a>, second: TokenRef<'input, 'a>, ) -> Result<'a, CompOp<'input, 'a>> { match (first.string, second.string) { ("is", "not") => Ok(CompOp::IsNot { is_tok: first, not_tok: second, }), ("not", "in") => Ok(CompOp::NotIn { not_tok: first, in_tok: second, }), _ => Err(ParserError::OperatorError), } } fn make_boolean_op<'input, 'a>( head: Expression<'input, 'a>, tail: Vec<(TokenRef<'input, 'a>, Expression<'input, 'a>)>, ) -> Result<'a, Expression<'input, 'a>> { if tail.is_empty() { return Ok(head); } let mut expr = head; for (tok, right) in tail { expr = Expression::BooleanOperation(Box::new(BooleanOperation { left: Box::new(expr), operator: make_boolean_operator(tok)?, right: Box::new(right), lpar: vec![], rpar: vec![], })) } Ok(expr) } fn make_boolean_operator<'input, 'a>( tok: TokenRef<'input, 'a>, ) -> Result<'a, BooleanOp<'input, 'a>> { match tok.string { "and" => Ok(BooleanOp::And { tok }), "or" => Ok(BooleanOp::Or { tok }), _ => Err(ParserError::OperatorError), } } fn make_binary_op<'input, 'a>( left: Expression<'input, 'a>, op: TokenRef<'input, 'a>, right: Expression<'input, 'a>, ) -> Result<'a, Expression<'input, 'a>> { let operator = make_binary_operator(op)?; Ok(Expression::BinaryOperation(Box::new(BinaryOperation { left: Box::new(left), operator, right: Box::new(right), lpar: vec![], rpar: vec![], }))) } fn make_binary_operator<'input, 'a>(tok: TokenRef<'input, 'a>) -> Result<'a, BinaryOp<'input, 'a>> { match tok.string { "+" => Ok(BinaryOp::Add { tok }), "-" => Ok(BinaryOp::Subtract { tok }), "*" => Ok(BinaryOp::Multiply { tok }), "/" => Ok(BinaryOp::Divide { tok }), "//" => Ok(BinaryOp::FloorDivide { tok }), "%" => Ok(BinaryOp::Modulo { tok }), "**" => Ok(BinaryOp::Power { tok }), "<<" => Ok(BinaryOp::LeftShift { tok }), ">>" => Ok(BinaryOp::RightShift { tok }), "|" => Ok(BinaryOp::BitOr { tok }), "&" => Ok(BinaryOp::BitAnd { tok }), "^" => Ok(BinaryOp::BitXor { tok }), "@" => Ok(BinaryOp::MatrixMultiply { tok }), _ => Err(ParserError::OperatorError), } } fn make_unary_op<'input, 'a>( op: TokenRef<'input, 'a>, tail: Expression<'input, 'a>, ) -> Result<'a, Expression<'input, 'a>> { let operator = make_unary_operator(op)?; Ok(Expression::UnaryOperation(Box::new(UnaryOperation { operator, expression: Box::new(tail), lpar: vec![], rpar: vec![], }))) } fn make_unary_operator<'input, 'a>(tok: TokenRef<'input, 'a>) -> Result<'a, UnaryOp<'input, 'a>> { match tok.string { "+" => Ok(UnaryOp::Plus { tok }), "-" => Ok(UnaryOp::Minus { tok }), "~" => Ok(UnaryOp::BitInvert { tok }), "not" => Ok(UnaryOp::Not { tok }), _ => Err(ParserError::OperatorError), } } fn make_number<'input, 'a>(num: TokenRef<'input, 'a>) -> Expression<'input, 'a> { super::numbers::parse_number(num.string) } fn make_indented_block<'input, 'a>( nl: TokenRef<'input, 'a>, indent: TokenRef<'input, 'a>, statements: Vec>, dedent: TokenRef<'input, 'a>, ) -> Suite<'input, 'a> { Suite::IndentedBlock(IndentedBlock { body: statements, indent: Default::default(), newline_tok: nl, indent_tok: indent, dedent_tok: dedent, }) } struct SimpleStatementParts<'input, 'a> { first_tok: TokenRef<'input, 'a>, // The first token of the first statement. Used for its whitespace first_statement: SmallStatement<'input, 'a>, rest: Vec<(TokenRef<'input, 'a>, SmallStatement<'input, 'a>)>, // semicolon, statement pairs last_semi: Option>, nl: TokenRef<'input, 'a>, } fn make_semicolon<'input, 'a>(tok: TokenRef<'input, 'a>) -> Semicolon<'input, 'a> { Semicolon { tok } } fn _make_simple_statement<'input, 'a>( parts: SimpleStatementParts<'input, 'a>, ) -> ( TokenRef<'input, 'a>, Vec>, TokenRef<'input, 'a>, ) { let mut body = vec![]; let mut current = parts.first_statement; for (semi, next) in parts.rest { body.push(current.with_semicolon(Some(make_semicolon(semi)))); current = next; } if let Some(semi) = parts.last_semi { current = current.with_semicolon(Some(make_semicolon(semi))); } body.push(current); (parts.first_tok, body, parts.nl) } fn make_simple_statement_suite<'input, 'a>( parts: SimpleStatementParts<'input, 'a>, ) -> Suite<'input, 'a> { let (first_tok, body, newline_tok) = _make_simple_statement(parts); Suite::SimpleStatementSuite(SimpleStatementSuite { body, first_tok, newline_tok, }) } fn make_simple_statement_line<'input, 'a>( parts: SimpleStatementParts<'input, 'a>, ) -> SimpleStatementLine<'input, 'a> { let (first_tok, body, newline_tok) = _make_simple_statement(parts); SimpleStatementLine { body, first_tok, newline_tok, } } fn make_if<'input, 'a>( if_tok: TokenRef<'input, 'a>, cond: Expression<'input, 'a>, colon_tok: TokenRef<'input, 'a>, block: Suite<'input, 'a>, orelse: Option>, is_elif: bool, ) -> If<'input, 'a> { If { test: cond, body: block, orelse: orelse.map(Box::new), is_elif, if_tok, colon_tok, } } fn make_else<'input, 'a>( else_tok: TokenRef<'input, 'a>, colon_tok: TokenRef<'input, 'a>, block: Suite<'input, 'a>, ) -> Else<'input, 'a> { Else { body: block, else_tok, colon_tok, } } struct StarEtc<'input, 'a>( Option>, Vec>, Option>, ); fn make_parameters<'input, 'a>( posonly: Option<(Vec>, ParamSlash<'input, 'a>)>, params: Vec>, star_etc: Option>, ) -> Parameters<'input, 'a> { let (posonly_params, posonly_ind) = match posonly { Some((a, b)) => (a, Some(b)), None => (vec![], None), }; let (star_arg, kwonly_params, star_kwarg) = match star_etc { None => (None, vec![], None), Some(StarEtc(a, b, c)) => (a, b, c), }; Parameters { params, star_arg, kwonly_params, star_kwarg, posonly_params, posonly_ind, } } fn add_param_default<'input, 'a>( param: Param<'input, 'a>, def: Option<(AssignEqual<'input, 'a>, Expression<'input, 'a>)>, comma_tok: Option>, ) -> Param<'input, 'a> { let comma = comma_tok.map(make_comma); let (equal, default) = match def { Some((a, b)) => (Some(a), Some(b)), None => (None, None), }; Param { equal, default, comma, ..param } } fn add_param_star<'input, 'a>( param: Param<'input, 'a>, star: TokenRef<'input, 'a>, ) -> Param<'input, 'a> { let str = star.string; Param { star: Some(str), star_tok: Some(star), ..param } } fn make_assign_equal<'input, 'a>(tok: TokenRef<'input, 'a>) -> AssignEqual<'input, 'a> { AssignEqual { tok } } fn make_comma<'input, 'a>(tok: TokenRef<'input, 'a>) -> Comma<'input, 'a> { Comma { tok } } fn concat(a: Vec, b: Vec) -> Vec { a.into_iter().chain(b.into_iter()).collect() } fn make_name_or_attr<'input, 'a>( first_tok: Name<'input, 'a>, mut tail: Vec<(TokenRef<'input, 'a>, Name<'input, 'a>)>, ) -> NameOrAttribute<'input, 'a> { if let Some((dot, name)) = tail.pop() { let dot = make_dot(dot); return NameOrAttribute::A(Box::new(Attribute { attr: name, dot, lpar: Default::default(), rpar: Default::default(), value: Box::new(make_name_or_attr(first_tok, tail).into()), })); } else { NameOrAttribute::N(Box::new(first_tok)) } } fn make_name<'input, 'a>(tok: TokenRef<'input, 'a>) -> Name<'input, 'a> { Name { value: tok.string, ..Default::default() } } fn make_dot<'input, 'a>(tok: TokenRef<'input, 'a>) -> Dot<'input, 'a> { Dot { tok } } fn make_import_alias<'input, 'a>( name: NameOrAttribute<'input, 'a>, asname: Option<(TokenRef<'input, 'a>, Name<'input, 'a>)>, ) -> ImportAlias<'input, 'a> { ImportAlias { name, asname: asname.map(|(x, y)| make_as_name(x, AssignTargetExpression::Name(Box::new(y)))), comma: None, } } fn make_as_name<'input, 'a>( as_tok: TokenRef<'input, 'a>, name: AssignTargetExpression<'input, 'a>, ) -> AsName<'input, 'a> { AsName { name, as_tok } } type ParenthesizedImportNames<'input, 'a> = ( Option>, ImportNames<'input, 'a>, Option>, ); fn make_import_from<'input, 'a>( from_tok: TokenRef<'input, 'a>, dots: Vec>, module: Option>, import_tok: TokenRef<'input, 'a>, aliases: ParenthesizedImportNames<'input, 'a>, ) -> ImportFrom<'input, 'a> { let (lpar, names, rpar) = aliases; ImportFrom { module, names, relative: dots, lpar, rpar, semicolon: None, from_tok, import_tok, } } fn make_import<'input, 'a>( import_tok: TokenRef<'input, 'a>, names: Vec>, ) -> Import<'input, 'a> { Import { names, semicolon: None, import_tok, } } fn make_import_from_as_names<'input, 'a>( first: ImportAlias<'input, 'a>, tail: Vec<(Comma<'input, 'a>, ImportAlias<'input, 'a>)>, ) -> Vec> { let mut ret = vec![]; let mut cur = first; for (comma, alias) in tail { ret.push(cur.with_comma(comma)); cur = alias; } ret.push(cur); ret } fn make_lpar<'input, 'a>(tok: TokenRef<'input, 'a>) -> LeftParen<'input, 'a> { LeftParen { lpar_tok: tok } } fn make_rpar<'input, 'a>(tok: TokenRef<'input, 'a>) -> RightParen<'input, 'a> { RightParen { rpar_tok: tok } } fn make_module<'input, 'a>( body: Vec>, tok: TokenRef<'input, 'a>, encoding: &str, ) -> Module<'input, 'a> { Module { body, eof_tok: tok, default_indent: " ", default_newline: "\n", has_trailing_newline: false, encoding: encoding.to_string(), } } fn make_attribute<'input, 'a>( value: Expression<'input, 'a>, dot: TokenRef<'input, 'a>, attr: Name<'input, 'a>, ) -> Attribute<'input, 'a> { let dot = make_dot(dot); Attribute { attr, dot, lpar: Default::default(), rpar: Default::default(), value: Box::new(value), } } fn make_starred_element<'input, 'a>( star_tok: TokenRef<'input, 'a>, rest: Element<'input, 'a>, ) -> StarredElement<'input, 'a> { let value = match rest { Element::Simple { value, .. } => value, _ => panic!("Internal error while making starred element"), }; StarredElement { value: Box::new(value), lpar: Default::default(), rpar: Default::default(), comma: Default::default(), star_tok, } } fn assign_target_to_element<'input, 'a>( expr: AssignTargetExpression<'input, 'a>, ) -> Element<'input, 'a> { match expr { AssignTargetExpression::Attribute(a) => Element::Simple { value: Expression::Attribute(a), comma: Default::default(), }, AssignTargetExpression::Name(a) => Element::Simple { value: Expression::Name(a), comma: Default::default(), }, AssignTargetExpression::Tuple(a) => Element::Simple { value: Expression::Tuple(a), comma: Default::default(), }, AssignTargetExpression::StarredElement(s) => Element::Starred(s), AssignTargetExpression::List(l) => Element::Simple { value: Expression::List(l), comma: Default::default(), }, AssignTargetExpression::Subscript(s) => Element::Simple { value: Expression::Subscript(s), comma: Default::default(), }, } } fn make_assignment<'input, 'a>( lhs: Vec<(AssignTargetExpression<'input, 'a>, TokenRef<'input, 'a>)>, rhs: Expression<'input, 'a>, ) -> Assign<'input, 'a> { let mut targets = vec![]; for (target, equal_tok) in lhs { targets.push(AssignTarget { target, equal_tok }); } Assign { targets, value: rhs, semicolon: Default::default(), } } fn expr_to_element<'input, 'a>(expr: Expression<'input, 'a>) -> Element<'input, 'a> { match expr { Expression::StarredElement(inner_expr) => Element::Starred(inner_expr), _ => Element::Simple { value: expr, comma: Default::default(), }, } } fn make_tuple<'input, 'a>( first: Element<'input, 'a>, rest: Vec<(Comma<'input, 'a>, Element<'input, 'a>)>, trailing_comma: Option>, lpar: Option>, rpar: Option>, ) -> Tuple<'input, 'a> { let elements = comma_separate(first, rest, trailing_comma); let lpar = lpar.map(|l| vec![l]).unwrap_or_default(); let rpar = rpar.map(|r| vec![r]).unwrap_or_default(); Tuple { elements, lpar, rpar, } } fn make_tuple_from_elements<'input, 'a>( first: Element<'input, 'a>, mut rest: Vec>, ) -> Tuple<'input, 'a> { rest.insert(0, first); Tuple { elements: rest, lpar: Default::default(), rpar: Default::default(), } } fn make_kwarg<'input, 'a>( name: Name<'input, 'a>, eq: TokenRef<'input, 'a>, value: Expression<'input, 'a>, ) -> Arg<'input, 'a> { let equal = Some(make_assign_equal(eq)); let keyword = Some(name); Arg { value, keyword, equal, comma: None, star: "", star_tok: None, } } fn make_star_arg<'input, 'a>( star: TokenRef<'input, 'a>, expr: Expression<'input, 'a>, ) -> Arg<'input, 'a> { let str = star.string; Arg { value: expr, keyword: None, equal: None, comma: None, star: str, star_tok: Some(star), } } fn make_call<'input, 'a>( func: Expression<'input, 'a>, lpar_tok: TokenRef<'input, 'a>, args: Vec>, rpar_tok: TokenRef<'input, 'a>, ) -> Call<'input, 'a> { let lpar = vec![]; let rpar = vec![]; let func = Box::new(func); Call { func, args, lpar, rpar, lpar_tok, rpar_tok, } } fn make_genexp_call<'input, 'a>( func: Expression<'input, 'a>, mut genexp: GeneratorExp<'input, 'a>, ) -> Call<'input, 'a> { // func ( (genexp) ) // ^ // lpar_tok // lpar_tok is the same token that was used to parse genexp's first lpar. // Nothing owns the whitespace before lpar_tok, so the same token is passed in here // again, to be converted into whitespace_after_func. We then split off a pair of // parenthesis from genexp, since now Call will own them. let mut lpars = genexp.lpar.into_iter(); let lpar_tok = lpars.next().expect("genexp without lpar").lpar_tok; genexp.lpar = lpars.collect(); let rpar_tok = genexp.rpar.pop().expect("genexp without rpar").rpar_tok; Call { func: Box::new(func), args: vec![Arg { value: Expression::GeneratorExp(Box::new(genexp)), keyword: None, equal: None, comma: None, star: "", star_tok: None, }], lpar: vec![], rpar: vec![], lpar_tok, rpar_tok, } } fn make_arg<'input, 'a>(expr: Expression<'input, 'a>) -> Arg<'input, 'a> { Arg { value: expr, keyword: Default::default(), equal: Default::default(), comma: Default::default(), star: Default::default(), star_tok: None, } } fn make_comp_if<'input, 'a>( if_tok: TokenRef<'input, 'a>, test: Expression<'input, 'a>, ) -> CompIf<'input, 'a> { CompIf { test, if_tok } } fn make_for_if<'input, 'a>( async_tok: Option>, for_tok: TokenRef<'input, 'a>, target: AssignTargetExpression<'input, 'a>, in_tok: TokenRef<'input, 'a>, iter: Expression<'input, 'a>, ifs: Vec>, ) -> CompFor<'input, 'a> { let inner_for_in = None; let asynchronous = async_tok.as_ref().map(|_| make_async()); CompFor { target, iter, ifs, inner_for_in, asynchronous, async_tok, for_tok, in_tok, } } fn make_bare_genexp<'input, 'a>( elt: Expression<'input, 'a>, for_in: CompFor<'input, 'a>, ) -> GeneratorExp<'input, 'a> { GeneratorExp { elt: Box::new(elt), for_in: Box::new(for_in), lpar: Default::default(), rpar: Default::default(), } } fn merge_comp_fors<'input, 'a>( comp_fors: Vec>, ) -> GrammarResult> { if comp_fors.len() > MAX_RECURSION_DEPTH { return Err("shallower comprehension"); } let mut it = comp_fors.into_iter().rev(); let first = it.next().expect("cant merge empty comp_fors"); Ok(it.fold(first, |acc, curr| CompFor { inner_for_in: Some(Box::new(acc)), ..curr })) } fn make_left_bracket<'input, 'a>(tok: TokenRef<'input, 'a>) -> LeftSquareBracket<'input, 'a> { LeftSquareBracket { tok } } fn make_right_bracket<'input, 'a>(tok: TokenRef<'input, 'a>) -> RightSquareBracket<'input, 'a> { RightSquareBracket { tok } } fn make_left_brace<'input, 'a>(tok: TokenRef<'input, 'a>) -> LeftCurlyBrace<'input, 'a> { LeftCurlyBrace { tok } } fn make_right_brace<'input, 'a>(tok: TokenRef<'input, 'a>) -> RightCurlyBrace<'input, 'a> { RightCurlyBrace { tok } } fn make_list_comp<'input, 'a>( lbracket: LeftSquareBracket<'input, 'a>, elt: Expression<'input, 'a>, for_in: CompFor<'input, 'a>, rbracket: RightSquareBracket<'input, 'a>, ) -> ListComp<'input, 'a> { ListComp { elt: Box::new(elt), for_in: Box::new(for_in), lbracket, rbracket, lpar: Default::default(), rpar: Default::default(), } } fn make_set_comp<'input, 'a>( lbrace: LeftCurlyBrace<'input, 'a>, elt: Expression<'input, 'a>, for_in: CompFor<'input, 'a>, rbrace: RightCurlyBrace<'input, 'a>, ) -> SetComp<'input, 'a> { SetComp { elt: Box::new(elt), for_in: Box::new(for_in), lbrace, rbrace, lpar: Default::default(), rpar: Default::default(), } } fn make_dict_comp<'input, 'a>( lbrace: LeftCurlyBrace<'input, 'a>, kvpair: ( Expression<'input, 'a>, TokenRef<'input, 'a>, Expression<'input, 'a>, ), for_in: CompFor<'input, 'a>, rbrace: RightCurlyBrace<'input, 'a>, ) -> DictComp<'input, 'a> { let (key, colon_tok, value) = kvpair; DictComp { key: Box::new(key), value: Box::new(value), for_in: Box::new(for_in), lbrace, rbrace, lpar: vec![], rpar: vec![], colon_tok, } } fn make_list<'input, 'a>( lbracket: LeftSquareBracket<'input, 'a>, elements: Vec>, rbracket: RightSquareBracket<'input, 'a>, ) -> List<'input, 'a> { List { elements, lbracket, rbracket, lpar: Default::default(), rpar: Default::default(), } } fn make_set<'input, 'a>( lbrace: LeftCurlyBrace<'input, 'a>, elements: Vec>, rbrace: RightCurlyBrace<'input, 'a>, ) -> Set<'input, 'a> { Set { elements, lbrace, rbrace, lpar: Default::default(), rpar: Default::default(), } } fn comma_separate<'input, 'a, T>( first: T, rest: Vec<(Comma<'input, 'a>, T)>, trailing_comma: Option>, ) -> Vec where T: WithComma<'input, 'a>, { let mut elements = vec![]; let mut current = first; for (comma, next) in rest { elements.push(current.with_comma(comma)); current = next; } if let Some(comma) = trailing_comma { current = current.with_comma(comma); } elements.push(current); elements } fn make_dict<'input, 'a>( lbrace: LeftCurlyBrace<'input, 'a>, elements: Vec>, rbrace: RightCurlyBrace<'input, 'a>, ) -> Dict<'input, 'a> { Dict { elements, lbrace, rbrace, lpar: Default::default(), rpar: Default::default(), } } fn make_double_starred_keypairs<'input, 'a>( first: DictElement<'input, 'a>, rest: Vec<(Comma<'input, 'a>, DictElement<'input, 'a>)>, trailing_comma: Option>, ) -> Vec> { let mut elements = vec![]; let mut current = first; for (comma, next) in rest { elements.push(current.with_comma(comma)); current = next; } if let Some(comma) = trailing_comma { current = current.with_comma(comma); } elements.push(current); elements } fn make_dict_element<'input, 'a>( el: ( Expression<'input, 'a>, TokenRef<'input, 'a>, Expression<'input, 'a>, ), ) -> DictElement<'input, 'a> { let (key, colon_tok, value) = el; DictElement::Simple { key, value, comma: Default::default(), colon_tok, } } fn make_double_starred_element<'input, 'a>( star_tok: TokenRef<'input, 'a>, value: Expression<'input, 'a>, ) -> StarredDictElement<'input, 'a> { StarredDictElement { value, comma: Default::default(), star_tok, } } fn make_index<'input, 'a>(value: Expression<'input, 'a>) -> BaseSlice<'input, 'a> { BaseSlice::Index(Box::new(Index { value, star: None, star_tok: None, })) } fn make_index_from_arg<'input, 'a>(arg: Arg<'input, 'a>) -> BaseSlice<'input, 'a> { BaseSlice::Index(Box::new(Index { value: arg.value, star: Some(arg.star), star_tok: arg.star_tok, })) } fn make_colon<'input, 'a>(tok: TokenRef<'input, 'a>) -> Colon<'input, 'a> { Colon { tok } } fn make_slice<'input, 'a>( lower: Option>, first_colon: TokenRef<'input, 'a>, upper: Option>, rest: Option<(TokenRef<'input, 'a>, Option>)>, ) -> BaseSlice<'input, 'a> { let first_colon = make_colon(first_colon); let (second_colon, step) = if let Some((tok, step)) = rest { (Some(make_colon(tok)), step) } else { (None, None) }; BaseSlice::Slice(Box::new(Slice { lower, upper, step, first_colon, second_colon, })) } fn make_slices<'input, 'a>( first: BaseSlice<'input, 'a>, rest: Vec<(Comma<'input, 'a>, BaseSlice<'input, 'a>)>, trailing_comma: Option>, ) -> Vec> { let mut elements = vec![]; let mut current = first; for (comma, next) in rest { elements.push(SubscriptElement { slice: current, comma: Some(comma), }); current = next; } elements.push(SubscriptElement { slice: current, comma: trailing_comma, }); elements } fn make_subscript<'input, 'a>( value: Expression<'input, 'a>, lbracket: LeftSquareBracket<'input, 'a>, slice: Vec>, rbracket: RightSquareBracket<'input, 'a>, ) -> Subscript<'input, 'a> { Subscript { value: Box::new(value), slice, lbracket, rbracket, lpar: Default::default(), rpar: Default::default(), } } fn make_ifexp<'input, 'a>( body: Expression<'input, 'a>, if_tok: TokenRef<'input, 'a>, test: Expression<'input, 'a>, else_tok: TokenRef<'input, 'a>, orelse: Expression<'input, 'a>, ) -> IfExp<'input, 'a> { IfExp { test: Box::new(test), body: Box::new(body), orelse: Box::new(orelse), lpar: Default::default(), rpar: Default::default(), if_tok, else_tok, } } fn add_arguments_trailing_comma<'input, 'a>( mut args: Vec>, trailing_comma: Option>, ) -> Vec> { if let Some(comma) = trailing_comma { let last = args.pop().unwrap(); args.push(last.with_comma(comma)); } args } fn make_lambda<'input, 'a>( lambda_tok: TokenRef<'input, 'a>, params: Parameters<'input, 'a>, colon_tok: TokenRef<'input, 'a>, expr: Expression<'input, 'a>, ) -> Lambda<'input, 'a> { let colon = make_colon(colon_tok); Lambda { params: Box::new(params), body: Box::new(expr), colon, lpar: Default::default(), rpar: Default::default(), lambda_tok, } } fn make_annotation<'input, 'a>( tok: TokenRef<'input, 'a>, ann: Expression<'input, 'a>, ) -> Annotation<'input, 'a> { Annotation { annotation: ann, tok, } } fn make_ann_assignment<'input, 'a>( target: AssignTargetExpression<'input, 'a>, col: TokenRef<'input, 'a>, ann: Expression<'input, 'a>, rhs: Option<(TokenRef<'input, 'a>, Expression<'input, 'a>)>, ) -> AnnAssign<'input, 'a> { let annotation = make_annotation(col, ann); let (eq, value) = rhs.map(|(x, y)| (Some(x), Some(y))).unwrap_or((None, None)); let equal = eq.map(make_assign_equal); AnnAssign { target, annotation, value, equal, semicolon: None, } } fn make_yield<'input, 'a>( yield_tok: TokenRef<'input, 'a>, f: Option>, e: Option>, ) -> Yield<'input, 'a> { let value = match (f, e) { (None, None) => None, (Some(f), Some(e)) => Some(YieldValue::From(Box::new(make_from(f, e)))), (None, Some(e)) => Some(YieldValue::Expression(Box::new(e))), _ => panic!("yield from without expression"), }; Yield { value: value.map(Box::new), lpar: Default::default(), rpar: Default::default(), yield_tok, } } fn make_from<'input, 'a>(tok: TokenRef<'input, 'a>, e: Expression<'input, 'a>) -> From<'input, 'a> { From { item: e, tok } } fn make_return<'input, 'a>( return_tok: TokenRef<'input, 'a>, value: Option>, ) -> Return<'input, 'a> { Return { value, semicolon: Default::default(), return_tok, } } fn make_assert<'input, 'a>( assert_tok: TokenRef<'input, 'a>, test: Expression<'input, 'a>, rest: Option<(Comma<'input, 'a>, Expression<'input, 'a>)>, ) -> Assert<'input, 'a> { let (comma, msg) = if let Some((c, msg)) = rest { (Some(c), Some(msg)) } else { (None, None) }; Assert { test, msg, comma, semicolon: Default::default(), assert_tok, } } fn make_raise<'input, 'a>( raise_tok: TokenRef<'input, 'a>, exc: Option>, rest: Option<(TokenRef<'input, 'a>, Expression<'input, 'a>)>, ) -> Raise<'input, 'a> { let cause = rest.map(|(t, e)| make_from(t, e)); Raise { exc, cause, semicolon: Default::default(), raise_tok, } } fn make_global<'input, 'a>( tok: TokenRef<'input, 'a>, init: Vec<(Name<'input, 'a>, Comma<'input, 'a>)>, last: Name<'input, 'a>, ) -> Global<'input, 'a> { let mut names: Vec> = init .into_iter() .map(|(name, c)| NameItem { name, comma: Some(c), }) .collect(); names.push(NameItem { name: last, comma: None, }); Global { names, semicolon: Default::default(), tok, } } fn make_nonlocal<'input, 'a>( tok: TokenRef<'input, 'a>, init: Vec<(Name<'input, 'a>, Comma<'input, 'a>)>, last: Name<'input, 'a>, ) -> Nonlocal<'input, 'a> { let mut names: Vec> = init .into_iter() .map(|(name, c)| NameItem { name, comma: Some(c), }) .collect(); names.push(NameItem { name: last, comma: None, }); Nonlocal { names, semicolon: Default::default(), tok, } } #[allow(clippy::too_many_arguments)] fn make_for<'input, 'a>( async_tok: Option>, for_tok: TokenRef<'input, 'a>, target: AssignTargetExpression<'input, 'a>, in_tok: TokenRef<'input, 'a>, iter: Expression<'input, 'a>, colon_tok: TokenRef<'input, 'a>, body: Suite<'input, 'a>, orelse: Option>, ) -> For<'input, 'a> { let asynchronous = async_tok.as_ref().map(|_| make_async()); For { target, iter, body, orelse, asynchronous, async_tok, for_tok, in_tok, colon_tok, } } fn make_while<'input, 'a>( while_tok: TokenRef<'input, 'a>, test: Expression<'input, 'a>, colon_tok: TokenRef<'input, 'a>, body: Suite<'input, 'a>, orelse: Option>, ) -> While<'input, 'a> { While { test, body, orelse, while_tok, colon_tok, } } fn make_await<'input, 'a>( await_tok: TokenRef<'input, 'a>, expression: Expression<'input, 'a>, ) -> Await<'input, 'a> { Await { expression: Box::new(expression), lpar: Default::default(), rpar: Default::default(), await_tok, } } fn make_class_def<'input, 'a>( class_tok: TokenRef<'input, 'a>, name: Name<'input, 'a>, type_parameters: Option>, args: Option<( LeftParen<'input, 'a>, Option>>, RightParen<'input, 'a>, )>, colon_tok: TokenRef<'input, 'a>, body: Suite<'input, 'a>, ) -> std::result::Result, &'static str> { let mut bases = vec![]; let mut keywords = vec![]; let mut lpar_tok = None; let mut rpar_tok = None; let mut lpar = None; let mut rpar = None; if let Some((lpar_, args, rpar_)) = args { lpar_tok = Some(lpar_.lpar_tok); rpar_tok = Some(rpar_.rpar_tok); lpar = Some(lpar_); rpar = Some(rpar_); if let Some(args) = args { let mut current_arg = &mut bases; let mut seen_keyword = false; for arg in args { if arg.star == "**" || arg.keyword.is_some() { current_arg = &mut keywords; seen_keyword = true; } if seen_keyword && (arg.star == "*" || (arg.star.is_empty() && arg.keyword.is_none())) { return Err("Positional argument follows keyword argument"); } // TODO: libcst-python does validation here current_arg.push(arg); } } } Ok(ClassDef { name, type_parameters, body, bases, keywords, decorators: vec![], lpar, rpar, class_tok, lpar_tok, rpar_tok, colon_tok, }) } fn make_string<'input, 'a>(tok: TokenRef<'input, 'a>) -> String<'input, 'a> { String::Simple(SimpleString { value: tok.string, ..Default::default() }) } fn make_strings<'input, 'a>( s: Vec<(String<'input, 'a>, TokenRef<'input, 'a>)>, ) -> GrammarResult> { if s.len() > MAX_RECURSION_DEPTH { return Err("shorter concatenated string"); } let mut strings = s.into_iter().rev(); let (first, _) = strings.next().expect("no strings to make a string of"); Ok(strings.fold(first, |acc, (str, tok)| { let ret: String<'input, 'a> = String::Concatenated(ConcatenatedString { left: Box::new(str), right: Box::new(acc), lpar: Default::default(), rpar: Default::default(), right_tok: tok, }); ret })) } fn make_fstring_expression<'input, 'a>( lbrace_tok: TokenRef<'input, 'a>, expression: Expression<'input, 'a>, eq: Option>, conversion_pair: Option<(TokenRef<'input, 'a>, &'a str)>, format_pair: Option<( TokenRef<'input, 'a>, Vec>, )>, rbrace_tok: TokenRef<'input, 'a>, ) -> FormattedStringExpression<'input, 'a> { let equal = eq.map(make_assign_equal); let (conversion_tok, conversion) = if let Some((t, c)) = conversion_pair { (Some(t), Some(c)) } else { (None, None) }; let (format_tok, format_spec) = if let Some((t, f)) = format_pair { (Some(t), Some(f)) } else { (None, None) }; let after_expr_tok = if equal.is_some() { None } else if let Some(tok) = conversion_tok { Some(tok) } else if let Some(tok) = format_tok { Some(tok) } else { Some(rbrace_tok) }; FormattedStringExpression { expression, conversion, format_spec, equal, lbrace_tok, after_expr_tok, } } fn make_fstring<'input, 'a>( start: &'a str, parts: Vec>, end: &'a str, ) -> FormattedString<'input, 'a> { FormattedString { start, parts, end, lpar: Default::default(), rpar: Default::default(), } } fn make_finally<'input, 'a>( finally_tok: TokenRef<'input, 'a>, colon_tok: TokenRef<'input, 'a>, body: Suite<'input, 'a>, ) -> Finally<'input, 'a> { Finally { body, finally_tok, colon_tok, } } fn make_except<'input, 'a>( except_tok: TokenRef<'input, 'a>, exp: Option>, as_: Option<(TokenRef<'input, 'a>, Name<'input, 'a>)>, colon_tok: TokenRef<'input, 'a>, body: Suite<'input, 'a>, ) -> ExceptHandler<'input, 'a> { // TODO: AsName should come from outside let name = as_.map(|(x, y)| make_as_name(x, AssignTargetExpression::Name(Box::new(y)))); ExceptHandler { body, r#type: exp, name, except_tok, colon_tok, } } fn make_except_star<'input, 'a>( except_tok: TokenRef<'input, 'a>, star_tok: TokenRef<'input, 'a>, exp: Expression<'input, 'a>, as_: Option<(TokenRef<'input, 'a>, Name<'input, 'a>)>, colon_tok: TokenRef<'input, 'a>, body: Suite<'input, 'a>, ) -> ExceptStarHandler<'input, 'a> { // TODO: AsName should come from outside let name = as_.map(|(x, y)| make_as_name(x, AssignTargetExpression::Name(Box::new(y)))); ExceptStarHandler { body, r#type: exp, name, except_tok, colon_tok, star_tok, } } fn make_try<'input, 'a>( try_tok: TokenRef<'input, 'a>, body: Suite<'input, 'a>, handlers: Vec>, orelse: Option>, finalbody: Option>, ) -> Try<'input, 'a> { Try { body, handlers, orelse, finalbody, try_tok, } } fn make_try_star<'input, 'a>( try_tok: TokenRef<'input, 'a>, body: Suite<'input, 'a>, handlers: Vec>, orelse: Option>, finalbody: Option>, ) -> TryStar<'input, 'a> { TryStar { body, handlers, orelse, finalbody, try_tok, } } fn make_aug_op<'input, 'a>(tok: TokenRef<'input, 'a>) -> Result<'a, AugOp<'input, 'a>> { Ok(match tok.string { "+=" => AugOp::AddAssign { tok }, "-=" => AugOp::SubtractAssign { tok }, "*=" => AugOp::MultiplyAssign { tok }, "@=" => AugOp::MatrixMultiplyAssign { tok }, "/=" => AugOp::DivideAssign { tok }, "%=" => AugOp::ModuloAssign { tok }, "&=" => AugOp::BitAndAssign { tok }, "|=" => AugOp::BitOrAssign { tok }, "^=" => AugOp::BitXorAssign { tok }, "<<=" => AugOp::LeftShiftAssign { tok }, ">>=" => AugOp::RightShiftAssign { tok }, "**=" => AugOp::PowerAssign { tok }, "//=" => AugOp::FloorDivideAssign { tok }, _ => return Err(ParserError::OperatorError), }) } fn make_aug_assign<'input, 'a>( target: AssignTargetExpression<'input, 'a>, operator: AugOp<'input, 'a>, value: Expression<'input, 'a>, ) -> AugAssign<'input, 'a> { AugAssign { target, operator, value, semicolon: Default::default(), } } fn make_with_item<'input, 'a>( item: Expression<'input, 'a>, as_: Option>, n: Option>, ) -> WithItem<'input, 'a> { let asname = match (as_, n) { (Some(as_), Some(n)) => Some(make_as_name(as_, n)), (None, None) => None, _ => panic!("as and name should be present or missing together"), }; WithItem { item, asname, comma: Default::default(), } } fn make_with<'input, 'a>( async_tok: Option>, with_tok: TokenRef<'input, 'a>, lpar: Option>, items: Vec>, rpar: Option>, colon_tok: TokenRef<'input, 'a>, body: Suite<'input, 'a>, ) -> With<'input, 'a> { let asynchronous = async_tok.as_ref().map(|_| make_async()); With { items, body, asynchronous, lpar, rpar, async_tok, with_tok, colon_tok, } } fn make_del<'input, 'a>( tok: TokenRef<'input, 'a>, target: DelTargetExpression<'input, 'a>, ) -> Del<'input, 'a> { Del { target, semicolon: Default::default(), tok, } } fn make_del_tuple<'input, 'a>( lpar: Option>, elements: Vec>, rpar: Option>, ) -> DelTargetExpression<'input, 'a> { DelTargetExpression::Tuple(Box::new(Tuple { elements, lpar: lpar.map(|x| vec![x]).unwrap_or_default(), rpar: rpar.map(|x| vec![x]).unwrap_or_default(), })) } fn make_named_expr<'input, 'a>( name: Name<'input, 'a>, tok: TokenRef<'input, 'a>, expr: Expression<'input, 'a>, ) -> NamedExpr<'input, 'a> { NamedExpr { target: Box::new(Expression::Name(Box::new(name))), value: Box::new(expr), lpar: Default::default(), rpar: Default::default(), walrus_tok: tok, } } fn make_match<'input, 'a>( match_tok: TokenRef<'input, 'a>, subject: Expression<'input, 'a>, colon_tok: TokenRef<'input, 'a>, indent_tok: TokenRef<'input, 'a>, cases: Vec>, dedent_tok: TokenRef<'input, 'a>, ) -> Match<'input, 'a> { Match { subject, cases, indent: Default::default(), match_tok, colon_tok, indent_tok, dedent_tok, } } fn make_case<'input, 'a>( case_tok: TokenRef<'input, 'a>, pattern: MatchPattern<'input, 'a>, guard: Option<(TokenRef<'input, 'a>, Expression<'input, 'a>)>, colon_tok: TokenRef<'input, 'a>, body: Suite<'input, 'a>, ) -> MatchCase<'input, 'a> { let (if_tok, guard) = match guard { Some((if_tok, guard)) => (Some(if_tok), Some(guard)), None => (None, None), }; MatchCase { pattern, guard, body, case_tok, if_tok, colon_tok, } } fn make_match_value<'input, 'a>(value: Expression<'input, 'a>) -> MatchPattern<'input, 'a> { MatchPattern::Value(MatchValue { value }) } fn make_match_singleton<'input, 'a>(value: Name<'input, 'a>) -> MatchPattern<'input, 'a> { MatchPattern::Singleton(MatchSingleton { value }) } fn make_list_pattern<'input, 'a>( lbracket: Option>, patterns: Vec>, rbracket: Option>, ) -> MatchSequence<'input, 'a> { MatchSequence::MatchList(MatchList { patterns, lbracket, rbracket, lpar: Default::default(), rpar: Default::default(), }) } fn make_as_pattern<'input, 'a>( pattern: Option>, as_tok: Option>, name: Option>, ) -> MatchPattern<'input, 'a> { MatchPattern::As(Box::new(MatchAs { pattern, name, lpar: Default::default(), rpar: Default::default(), as_tok, })) } fn make_bit_or<'input, 'a>(tok: TokenRef<'input, 'a>) -> BitOr<'input, 'a> { BitOr { tok } } fn make_or_pattern<'input, 'a>( first: MatchPattern<'input, 'a>, rest: Vec<(TokenRef<'input, 'a>, MatchPattern<'input, 'a>)>, ) -> MatchPattern<'input, 'a> { if rest.is_empty() { return first; } let mut patterns = vec![]; let mut current = first; for (sep, next) in rest { let op = make_bit_or(sep); patterns.push(MatchOrElement { pattern: current, separator: Some(op), }); current = next; } patterns.push(MatchOrElement { pattern: current, separator: None, }); MatchPattern::Or(Box::new(MatchOr { patterns, lpar: Default::default(), rpar: Default::default(), })) } fn ensure_real_number<'input, 'a>( tok: TokenRef<'input, 'a>, ) -> GrammarResult> { match make_number(tok) { e @ (Expression::Integer(_) | Expression::Float(_)) => Ok(e), _ => Err("real number"), } } fn ensure_imaginary_number<'input, 'a>( tok: TokenRef<'input, 'a>, ) -> GrammarResult> { match make_number(tok) { e @ Expression::Imaginary(_) => Ok(e), _ => Err("imaginary number"), } } fn make_tuple_pattern<'input, 'a>( lpar: LeftParen<'input, 'a>, patterns: Vec>, rpar: RightParen<'input, 'a>, ) -> MatchSequence<'input, 'a> { MatchSequence::MatchTuple(MatchTuple { patterns, lpar: vec![lpar], rpar: vec![rpar], }) } fn make_open_sequence_pattern<'input, 'a>( first: StarrableMatchSequenceElement<'input, 'a>, comma: Comma<'input, 'a>, mut rest: Vec>, ) -> Vec> { rest.insert(0, first.with_comma(comma)); rest } fn make_match_sequence_element<'input, 'a>( value: MatchPattern<'input, 'a>, ) -> MatchSequenceElement<'input, 'a> { MatchSequenceElement { value, comma: Default::default(), } } fn make_match_star<'input, 'a>( star_tok: TokenRef<'input, 'a>, name: Option>, ) -> MatchStar<'input, 'a> { MatchStar { name, comma: Default::default(), star_tok, } } fn make_match_mapping<'input, 'a>( lbrace: LeftCurlyBrace<'input, 'a>, mut elements: Vec>, el_comma: Option>, star_tok: Option>, rest: Option>, trailing_comma: Option>, rbrace: RightCurlyBrace<'input, 'a>, ) -> MatchPattern<'input, 'a> { if let Some(c) = el_comma { if let Some(el) = elements.pop() { elements.push(el.with_comma(c)); } // TODO: else raise error } MatchPattern::Mapping(MatchMapping { elements, rest, trailing_comma, lbrace, rbrace, lpar: Default::default(), rpar: Default::default(), star_tok, }) } fn make_match_mapping_element<'input, 'a>( key: Expression<'input, 'a>, colon_tok: TokenRef<'input, 'a>, pattern: MatchPattern<'input, 'a>, ) -> MatchMappingElement<'input, 'a> { MatchMappingElement { key, pattern, comma: Default::default(), colon_tok, } } fn make_class_pattern<'input, 'a>( cls: NameOrAttribute<'input, 'a>, lpar_tok: TokenRef<'input, 'a>, mut patterns: Vec>, pat_comma: Option>, mut kwds: Vec>, kwd_comma: Option>, rpar_tok: TokenRef<'input, 'a>, ) -> MatchPattern<'input, 'a> { if let Some(c) = pat_comma { if let Some(el) = patterns.pop() { patterns.push(el.with_comma(c)); } // TODO: else raise error } if let Some(c) = kwd_comma { if let Some(el) = kwds.pop() { kwds.push(el.with_comma(c)); } // TODO: else raise error } MatchPattern::Class(MatchClass { cls, patterns, kwds, lpar: Default::default(), rpar: Default::default(), lpar_tok, rpar_tok, }) } fn make_match_keyword_element<'input, 'a>( key: Name<'input, 'a>, equal_tok: TokenRef<'input, 'a>, pattern: MatchPattern<'input, 'a>, ) -> MatchKeywordElement<'input, 'a> { MatchKeywordElement { key, pattern, comma: Default::default(), equal_tok, } } struct TypeParamBound<'input, 'a>(TokenRef<'input, 'a>, Expression<'input, 'a>); fn make_type_param_bound<'input, 'a>( colon_tok: TokenRef<'input, 'a>, e: Expression<'input, 'a>, ) -> TypeParamBound<'input, 'a> { TypeParamBound(colon_tok, e) } fn make_param_spec<'input, 'a>( star_tok: TokenRef<'input, 'a>, name: Name<'input, 'a>, ) -> TypeParam<'input, 'a> { TypeParam { param: TypeVarLike::ParamSpec(ParamSpec { name, star_tok }), comma: Default::default(), } } fn make_type_var_tuple<'input, 'a>( star_tok: TokenRef<'input, 'a>, name: Name<'input, 'a>, ) -> TypeParam<'input, 'a> { TypeParam { param: TypeVarLike::TypeVarTuple(TypeVarTuple { name, star_tok }), comma: Default::default(), } } fn make_type_var<'input, 'a>( name: Name<'input, 'a>, bound: Option>, ) -> TypeParam<'input, 'a> { let (bound, colon) = match bound { Some(TypeParamBound(c, e)) => (Some(Box::new(e)), Some(make_colon(c))), _ => (None, None), }; TypeParam { param: TypeVarLike::TypeVar(TypeVar { name, bound, colon }), comma: Default::default(), } } fn make_type_parameters<'input, 'a>( lbracket: LeftSquareBracket<'input, 'a>, params: Vec>, rbracket: RightSquareBracket<'input, 'a>, ) -> TypeParameters<'input, 'a> { TypeParameters { lbracket, params, rbracket, } } fn make_type_alias<'input, 'a>( type_tok: TokenRef<'input, 'a>, name: Name<'input, 'a>, type_parameters: Option>, equals_tok: TokenRef<'input, 'a>, value: Expression<'input, 'a>, ) -> TypeAlias<'input, 'a> { let lbracket_tok = if let Some(tp) = &type_parameters { Some(tp.lbracket.tok) } else { None }; TypeAlias { type_tok, name, type_parameters, equals_tok, value: Box::new(value), semicolon: Default::default(), lbracket_tok, } } LibCST-1.2.0/native/libcst/src/parser/mod.rs000066400000000000000000000004761456464173300205740ustar00rootroot00000000000000// Copyright (c) Meta Platforms, Inc. and affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the root directory of this source tree mod errors; mod grammar; mod numbers; pub use errors::ParserError; pub(crate) use grammar::TokVec; pub use grammar::{python, Result}; LibCST-1.2.0/native/libcst/src/parser/numbers.rs000066400000000000000000000043311456464173300214620ustar00rootroot00000000000000// Copyright (c) Meta Platforms, Inc. and affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the root directory of this source tree use regex::Regex; use crate::nodes::deflated::{Expression, Float, Imaginary, Integer}; static HEX: &str = r"0[xX](?:_?[0-9a-fA-F])+"; static BIN: &str = r"0[bB](?:_?[01])+"; static OCT: &str = r"0[oO](?:_?[0-7])+"; static DECIMAL: &str = r"(?:0(?:_?0)*|[1-9](?:_?[0-9])*)"; static EXPONENT: &str = r"[eE][-+]?[0-9](?:_?[0-9])*"; // Note: these don't exactly match the python implementation (exponent is not included) static POINT_FLOAT: &str = r"([0-9](?:_?[0-9])*\.(?:[0-9](?:_?[0-9])*)?|\.[0-9](?:_?[0-9])*)"; static EXP_FLOAT: &str = r"[0-9](?:_?[0-9])*"; thread_local! { static INTEGER_RE: Regex = Regex::new(format!("^({}|{}|{}|{})$", HEX, BIN, OCT, DECIMAL).as_str()).expect("regex"); static FLOAT_RE: Regex = Regex::new( format!( "^({}({})?|{}{})$", POINT_FLOAT, EXPONENT, EXP_FLOAT, EXPONENT ) .as_str(), ) .expect("regex"); static IMAGINARY_RE: Regex = Regex::new( format!( r"^([0-9](?:_?[0-9])*[jJ]|({}({})?|{}{})[jJ])$", POINT_FLOAT, EXPONENT, EXP_FLOAT, EXPONENT ) .as_str(), ) .expect("regex"); } pub(crate) fn parse_number(raw: &str) -> Expression { if INTEGER_RE.with(|r| r.is_match(raw)) { Expression::Integer(Box::new(Integer { value: raw, lpar: Default::default(), rpar: Default::default(), })) } else if FLOAT_RE.with(|r| r.is_match(raw)) { Expression::Float(Box::new(Float { value: raw, lpar: Default::default(), rpar: Default::default(), })) } else if IMAGINARY_RE.with(|r| r.is_match(raw)) { Expression::Imaginary(Box::new(Imaginary { value: raw, lpar: Default::default(), rpar: Default::default(), })) } else { Expression::Integer(Box::new(Integer { value: raw, lpar: Default::default(), rpar: Default::default(), })) } } LibCST-1.2.0/native/libcst/src/py.rs000066400000000000000000000017231456464173300171450ustar00rootroot00000000000000// Copyright (c) Meta Platforms, Inc. and affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the root directory of this source tree use crate::nodes::traits::py::TryIntoPy; use pyo3::prelude::*; #[pymodule] #[pyo3(name = "native")] pub fn libcst_native(_py: Python, m: &PyModule) -> PyResult<()> { #[pyfn(m)] fn parse_module(source: String, encoding: Option<&str>) -> PyResult { let m = crate::parse_module(source.as_str(), encoding)?; Python::with_gil(|py| m.try_into_py(py)) } #[pyfn(m)] fn parse_expression(source: String) -> PyResult { let expr = crate::parse_expression(source.as_str())?; Python::with_gil(|py| expr.try_into_py(py)) } #[pyfn(m)] fn parse_statement(source: String) -> PyResult { let stm = crate::parse_statement(source.as_str())?; Python::with_gil(|py| stm.try_into_py(py)) } Ok(()) } LibCST-1.2.0/native/libcst/src/tokenizer/000077500000000000000000000000001456464173300201565ustar00rootroot00000000000000LibCST-1.2.0/native/libcst/src/tokenizer/core/000077500000000000000000000000001456464173300211065ustar00rootroot00000000000000LibCST-1.2.0/native/libcst/src/tokenizer/core/LICENSE000066400000000000000000000045431456464173300221210ustar00rootroot00000000000000PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2 1. This LICENSE AGREEMENT is between the Python Software Foundation ("PSF"), and the Individual or Organization ("Licensee") accessing and otherwise using this software ("Python") in source or binary form and its associated documentation. 2. Subject to the terms and conditions of this License Agreement, PSF hereby grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce, analyze, test, perform and/or display publicly, prepare derivative works, distribute, and otherwise use Python alone or in any derivative version, provided, however, that PSF's License Agreement and PSF's notice of copyright, i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014, 2015 Python Software Foundation; All Rights Reserved" are retained in Python alone or in any derivative version prepared by Licensee. 3. In the event Licensee prepares a derivative work that is based on or incorporates Python or any part thereof, and wants to make the derivative work available to others as provided herein, then Licensee hereby agrees to include in any such work a brief summary of the changes made to Python. 4. PSF is making Python available to Licensee on an "AS IS" basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT INFRINGE ANY THIRD PARTY RIGHTS. 5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON, OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF. 6. This License Agreement will automatically terminate upon a material breach of its terms and conditions. 7. Nothing in this License Agreement shall be deemed to create any relationship of agency, partnership, or joint venture between PSF and Licensee. This License Agreement does not grant permission to use PSF trademarks or trade name in a trademark sense to endorse or promote products or services of Licensee, or any third party. 8. By copying, installing or otherwise using Python, Licensee agrees to be bound by the terms and conditions of this License Agreement. LibCST-1.2.0/native/libcst/src/tokenizer/core/README.md000066400000000000000000000001641456464173300223660ustar00rootroot00000000000000Files in this directory are a derivative of CPython's tokenizer, and are therefore available under the PSF license. LibCST-1.2.0/native/libcst/src/tokenizer/core/mod.rs000066400000000000000000001400201456464173300222300ustar00rootroot00000000000000// This implementation is Copyright (c) Meta Platforms, Inc. and affiliates. // // CPython 3.10.0a5 and the original C code this is based on is // Copyright (c) 2001-2021 Python Software Foundation; All Rights Reserved // // Portions of this module (f-string splitting) are based on parso's tokenize.py, which is also PSF // licensed. /// A port of CPython's tokenizer.c to Rust, with the following significant modifications: /// /// - PEP 263 (encoding detection) support isn't implemented. We depend on other code to do this for /// us right now, and expect that the input is utf-8 by the time we see it. /// /// - Removed support for tokenizing from a file handle without reading the whole file in at once. /// This significantly complicates parsing and memory is cheap, so we require that the whole file /// is read in and converted to a unicode string before tokenization can begin. /// /// - Removed support for the interactive interpreter parsing mode. /// /// - Tweaked the `translate_newlines` functionality and moved most of it into TextPosition. `\r` /// characters are no longer removed from the input buffer, so strings may contain `\r` characters /// that should be normalized prior to being interpreted. /// /// - Added support for tracking more detailed position information via TextPosition. As a /// consequence, consuming and then backing up a character (`tok_nextc`/`tok_backup`) is more /// expensive, and we prefer to call `TextPosition::peek()` instead. /// /// - Removed support for tokenizing type comments. /// /// - Reduced the number of different supported token types to match what parso's tokenizer yields. /// /// - Uses some regular expressions. Regular expression are a good fit for a tokenizer, but we don't /// use regular expressions everywhere because we can't generate as good of error messages with /// them. /// /// - Added support for breaking apart f-strings into multiple tokens, matching Parso's tokenizer /// behavior. CPython instead runs the parser recursively to parse f-strings. /// /// Also, in general, the code is less tightly optimized. The CPython implementation is crazy /// optimized in ways that wouldn't translate well to rust (e.g. it parses the input utf-8 buffer as /// raw bytes instead of unicode codepoints). /// /// The implementation should still be faster than any pure-Python implementation, and most /// optimizations (avoiding string copies when slicing) carry over to Rust very well. /// /// Planned (not yet implemented) features: /// /// - Add more feature flags to more closely match the behavior of older versions of Python 3.x. /// /// - Support for a Python 2 mode that tokenizes Python 2.7 code and fails on certain new Python 3 /// syntax that wasn't supported in 2.7. /// /// - Maybe add back support for tokenizing type comments? /// /// This implementation is tailored to LibCST's needs. If you're looking for a more general-purpose /// pure-Rust Python parser, consider using [RustPython's parser][]. /// /// [RustPython's parser]: https://crates.io/crates/rustpython-parser mod string_types; use regex::Regex; use std::cell::RefCell; use std::cmp::Ordering; use std::convert::TryInto; use std::fmt::Debug; use std::fmt::Formatter; use std::rc::Rc; use crate::tokenizer::{ core::string_types::{FStringNode, StringQuoteChar, StringQuoteSize}, operators::OPERATOR_RE, text_position::{TextPosition, TextPositionSnapshot}, whitespace_parser::State as WhitespaceState, }; /// The maximum number of indentation levels at any given point in time. CPython's tokenizer.c caps /// this to avoid the complexity of allocating a dynamic array, but we're using a Vec, so it's not /// necessary, but we're keeping it to maintain compatibility. const MAX_INDENT: usize = 100; // MAX_CHAR should be std::char::MAX once assoc_char_consts is stablized. // https://github.com/rust-lang/rust/issues/71763 const MAX_CHAR: char = '\u{10ffff}'; thread_local! { static SPACE_TAB_FORMFEED_RE: Regex = Regex::new(r"\A[ \f\t]+").expect("regex"); static ANY_NON_NEWLINE_RE: Regex = Regex::new(r"\A[^\r\n]+").expect("regex"); static STRING_PREFIX_RE: Regex = Regex::new(r"\A(?i)(u|[bf]r|r[bf]|r|b|f)").expect("regex"); static POTENTIAL_IDENTIFIER_TAIL_RE: Regex = Regex::new(r"\A([a-zA-Z0-9_]|[^\x00-\x7f])+").expect("regex"); static DECIMAL_DOT_DIGIT_RE: Regex = Regex::new(r"\A\.[0-9]").expect("regex"); static DECIMAL_TAIL_RE: Regex = Regex::new(r"\A[0-9](_?[0-9])*").expect("regex"); static HEXADECIMAL_TAIL_RE: Regex = Regex::new(r"\A(_?[0-9a-fA-F])+").expect("regex"); static OCTAL_TAIL_RE: Regex = Regex::new(r"\A(_?[0-7])+").expect("regex"); static BINARY_TAIL_RE: Regex = Regex::new(r"\A(_?[01])+").expect("regex"); /// Used to verify identifiers when there's a non-ascii character in them. // This changes across unicode revisions. We'd need to ship our own unicode tables to 100% match a // given Python version's behavior. static UNICODE_IDENTIFIER_RE: Regex = Regex::new(r"\A[\p{XID_Start}_]\p{XID_Continue}*\z").expect("regex"); } #[derive(Debug, Eq, PartialEq, Copy, Clone)] pub enum TokType { String, Name, Number, Op, Newline, Indent, Dedent, Async, Await, FStringStart, FStringString, FStringEnd, EndMarker, } #[derive(Debug, thiserror::Error, Eq, PartialEq)] pub enum TokError<'t> { #[error("inconsistent mixing of tabs and spaces")] TabSpace, #[error("too many indentation levels")] TooDeep, #[error("no matching outer block for dedent")] Dedent, #[error("unexpected characters after a line continuation")] LineContinuation, #[error("unexpected end of file after a line continuation")] LineContinuationEof, #[error("{0:?} is not a valid identifier")] BadIdentifier(&'t str), #[error("invalid decimal literal")] BadDecimal, #[error( "{}{}", "leading zeros in decimal integer literals are not permitted; use an 0o prefix for octal ", "integers" )] BadDecimalLeadingZeros, #[error("invalid hexadecimal literal")] BadHexadecimal, #[error("invalid octal literal")] BadOctal, #[error("invalid digit {0:?} in octal literal")] BadOctalDigit(char), #[error("invalid binary literal")] BadBinary, #[error("invalid digit {0:?} in binary literal")] BadBinaryDigit(char), #[error("unterminated string literal")] UnterminatedString, #[error("unterminated triple-quoted string literal")] UnterminatedTripleQuotedString, #[error("unmatched {0:?}")] UnmatchedClosingParen(char), #[error("Closing parenthesis {1:?} does not match opening parenthesis {0:?}")] MismatchedClosingParen(char, char), #[error("Closing parenthesis {1:?} does not match opening parenthesis {0:?} on line {2:}")] MismatchedClosingParenOnLine(char, char, usize), #[error("{0:?} is not a valid character in this position")] BadCharacter(char), } // Clone is used for async_hacks, which needs to speculatively look-ahead one token. #[derive(Clone)] pub struct TokState<'t> { /// The full program's source code (similar to `tok->str` or `tok->buf` in the CPython source /// code). We don't support reading the file line-by-line from a file handle like CPython does, /// so this is the whole program pre-converted to utf-8. pub text_pos: TextPosition<'t>, /// Start of the most recently returned token. pub start_pos: TextPositionSnapshot, /// True after we've encountered an error or there's no more text to process. done: bool, /// How many spaces a tab counts as (always 8) tab_size: usize, /// How many spaces a tab counts as in alt_indent_stack (always 1) alt_tab_size: usize, /// Stack of indentation levels where a tab is counted as 8 characters, used for tracking /// dedents. Length is current indentation level. Should never have more than MAX_INDENT /// entries. indent_stack: Vec, /// Used to check that tabs and spaces are not mixed. alt_indent_stack: Vec, /// Beginning of line. True if at the beginning of a new line. at_bol: bool, /// The number of bytes at the beginning of the line, as measured by consume_bol_whitespace. /// Used by libcst to capture (and then validate and parse) the indentation. pub bol_width: usize, /// Set by `consume_bol_whitespace`, true if the current line is blank. blank_line: bool, /// Pending intents (if > 0) or dedents (if < 0). Used when multiple tokens need to be produced /// at once. pending_indents: i32, /// Length is `() [] {}` parenthesis nesting level. Used to allow free continuations inside /// them. Stack entries are to verify that closing parenthesis match opening parenthesis. /// Tuple is (character, lineno). paren_stack: Vec<(char, usize)>, /// Whether we're in a continuation line. cont_line: bool, /// True if async/await aren't always keywords. async_hacks: bool, /// True if tokens are inside an 'async def' body. async_def: bool, /// Indentation level of the outermost 'async def'. async_def_indent: usize, /// True if the outermost 'async def' had at least one NEWLINE token after it. async_def_nl: bool, /// Splits f-strings into multiple tokens instead of a STRING token if true. /// /// CPython doesn't directly split f-strings in the tokenizer (and therefore doesn't support /// this option). Instead, when the parser encounters an f-string, it recursively re-runs the /// tokenizer and parser. /// /// Supporting this at the tokenizer-level is pretty nasty and adds a lot of complexity. /// Eventually, we should probably support this at the parser-level instead. split_fstring: bool, fstring_stack: Vec, missing_nl_before_eof: bool, } pub struct TokConfig { /// Used in Python 3.5 and 3.6. If enabled, async/await are sometimes keywords and sometimes /// identifiers, depending on if they're being used in the context of an async function. This /// breaks async comprehensions outside of async functions. pub async_hacks: bool, pub split_fstring: bool, // Not currently supported: // type_comments: bool, } fn is_digit>>(ch: C) -> bool { matches!(ch.into(), Some('0'..='9')) } #[derive(Debug)] enum NumberState { StartDigit, Fraction, Exponent, Imaginary, } impl<'t> TokState<'t> { pub fn new(text: &'t str, config: &TokConfig) -> Self { let text_pos = TextPosition::new(text); let start_pos = (&text_pos).into(); Self { text_pos, start_pos, done: false, tab_size: 8, alt_tab_size: 1, indent_stack: Vec::new(), alt_indent_stack: Vec::new(), at_bol: true, bol_width: 0, blank_line: false, pending_indents: 0, paren_stack: Vec::new(), cont_line: false, async_hacks: config.async_hacks, async_def: false, async_def_indent: 0, async_def_nl: false, split_fstring: config.split_fstring, fstring_stack: Vec::new(), missing_nl_before_eof: text.is_empty() || text.as_bytes()[text.len() - 1] != b'\n', } } pub fn is_parenthesized(&self) -> bool { !self.paren_stack.is_empty() } /// Implementation of `next()`, wrapped by next() to allow for easier error handling. Roughly /// equivalent to `tok_get` in the C source code. fn next_inner(&mut self) -> Result> { if self.split_fstring { if let Some(tos) = self.fstring_stack.last() { if !tos.is_in_expr() { self.start_pos = (&self.text_pos).into(); let is_in_format_spec = tos.is_in_format_spec(); let is_raw_string = tos.is_raw_string; if let Some(tok) = self.maybe_consume_fstring_string(is_in_format_spec, is_raw_string)? { return Ok(tok); } if let Some(tok) = self.maybe_consume_fstring_end() { return Ok(tok); } } } } // This will never consume a token, but it may set blank_line and it may set // pending_indents. self.consume_bol_whitespace()?; // Return pending indents/dedents if let Some(t) = self.process_pending_indents() { self.start_pos = (&self.text_pos).into(); return Ok(t); } self.maybe_close_async_def(); 'again: loop { // Skip spaces SPACE_TAB_FORMFEED_RE.with(|v| self.text_pos.consume(v)); // Skip comment, unless it's a type comment if self.text_pos.peek() == Some('#') { ANY_NON_NEWLINE_RE.with(|v| self.text_pos.consume(v)); // type_comment is not supported } // Set start of current token self.start_pos = (&self.text_pos).into(); return match self.text_pos.peek() { // Check for EOF now None => { if self.missing_nl_before_eof && !self.blank_line { self.at_bol = true; self.missing_nl_before_eof = false; Ok(TokType::Newline) } else { let hanging_indents = self.indent_stack.len() as i32; if self.pending_indents == 0 && hanging_indents != 0 { // We've reached EOF but there are still pending indents not // accounted for. Flush them out. self.pending_indents = -hanging_indents; self.indent_stack.clear(); self.alt_indent_stack.clear(); self.missing_nl_before_eof = false; } if let Some(t) = self.process_pending_indents() { Ok(t) } else { Ok(TokType::EndMarker) } } } // Identifier (most frequent token!) Some('a'..='z') | Some('A'..='Z') | Some('_') | Some('\u{80}'..=MAX_CHAR) => { self.consume_identifier_or_prefixed_string() } // Newline Some('\n') => { self.text_pos.next(); self.at_bol = true; if self.split_fstring && self.fstring_stack.last().map(|node| node.allow_multiline()) == Some(false) { Err(TokError::UnterminatedString) } else if self.blank_line || !self.paren_stack.is_empty() { // this newline doesn't count // recurse (basically `goto nextline`) self.next_inner() } else { self.cont_line = false; if self.async_def { self.async_def_nl = true; } Ok(TokType::Newline) } } // Ellipsis Some('.') if self.text_pos.consume("...") => { return Ok(TokType::Op); } // Number starting with period Some('.') if DECIMAL_DOT_DIGIT_RE.with(|r| self.text_pos.matches(r)) => { self.consume_number(NumberState::Fraction) } // Dot Some('.') => { self.text_pos.next(); Ok(TokType::Op) } // Number Some('0'..='9') => self.consume_number(NumberState::StartDigit), // String Some('\'') | Some('"') => self.consume_string(), // Line continuation Some('\\') => { self.text_pos.next(); if let Some('\n') = self.text_pos.next() { if self.text_pos.peek() == None { Err(TokError::LineContinuationEof) } else { self.cont_line = true; // Read next line continue 'again; } } else { Err(TokError::LineContinuation) } } Some(ch @ '(') | Some(ch @ '[') | Some(ch @ '{') => { self.text_pos.next(); if let Some(tos) = self.fstring_stack.last_mut() { tos.open_parentheses(); } self.paren_stack.push((ch, self.text_pos.line_number())); Ok(TokType::Op) } Some(closing @ ')') | Some(closing @ ']') | Some(closing @ '}') => { self.text_pos.next(); if let Some(tos) = self.fstring_stack.last_mut() { tos.close_parentheses(); } if let Some((opening, line_number)) = self.paren_stack.pop() { match (opening, closing) { ('(', ')') | ('[', ']') | ('{', '}') => Ok(TokType::Op), _ => { if line_number != self.text_pos.line_number() { Err(TokError::MismatchedClosingParenOnLine( opening, closing, line_number, )) } else { Err(TokError::MismatchedClosingParen(opening, closing)) } } } } else { Err(TokError::UnmatchedClosingParen(closing)) } } Some(':') if self .fstring_stack .last() .map(|tos| tos.parentheses_count - tos.format_spec_count == 1) .unwrap_or(false) => { // N.B. This may capture the walrus operator and pass it to the formatter. // That's intentional. PEP 572 says: "Assignment expressions inside of f-strings // require parentheses." // // >>> f'{x:=10}' # Valid, passes '=10' to formatter let tos = self .fstring_stack .last_mut() .expect("fstring_stack is not empty"); tos.format_spec_count += 1; self.text_pos.next(); Ok(TokType::Op) } // Operator Some(_) if OPERATOR_RE.with(|r| self.text_pos.consume(r)) => Ok(TokType::Op), // Bad character // If nothing works, fall back to this error. CPython returns an OP in this case, // and then just relies on the parser to generate a generic syntax error. Some(ch) => Err(TokError::BadCharacter(ch)), }; } } /// Consumes the whitespace (and comments) at the beginning of the line. May emit an error. Will /// mutate `pending_indents`, so you must check `pending_indents` after calling this. fn consume_bol_whitespace(&mut self) -> Result<(), TokError<'t>> { self.blank_line = false; if !self.at_bol { return Ok(()); } let mut col = 0; // column where tab counts as 8 characters let mut altcol = 0; // column where tab counts as 1 character self.at_bol = false; self.bol_width = 0; // consume space, tab, and formfeed characters loop { match self.text_pos.peek() { Some(' ') => { col += 1; altcol += 1; self.bol_width += 1; self.text_pos.next(); } Some('\t') => { // Increment both col and altcol using different tab sizes. Tabs snap to the // next multiple of self.tab_size. col = (col / self.tab_size + 1) * self.tab_size; // altcol will later be used for detecting mixed tabs and spaces. altcol = (altcol / self.alt_tab_size + 1) * self.alt_tab_size; self.bol_width += 1; self.text_pos.next(); } // Control-L (formfeed) for emacs users Some('\x0c') => { col = 0; altcol = 0; self.bol_width += 1; self.text_pos.next(); } _ => { break; } } } // Lines with only whitespace and/or comments and/or a line continuation // character shouldn't affect the indentation and are not passed to the parser // as NEWLINE tokens. self.blank_line = matches!( self.text_pos.peek(), Some('#') | Some('\n') | Some('\\') | None ); if self.blank_line || !self.paren_stack.is_empty() { return Ok(()); } let prev_col = self.indent_stack.last().unwrap_or(&0); match col.cmp(prev_col) { Ordering::Equal => { // No change if altcol != *self.alt_indent_stack.last().unwrap_or(&0) { return Err(TokError::TabSpace); } } Ordering::Greater => { // col > prev_col // Indent -- always one if self.indent_stack.len() + 1 >= MAX_INDENT { return Err(TokError::TooDeep); } // col > prev_col, therefore altcol > prev_altcol, unless there's badly mixed tabs // and spaces if altcol <= *self.alt_indent_stack.last().unwrap_or(&0) { return Err(TokError::TabSpace); } // only emit indents if we're not at EOF if self.text_pos.peek().is_some() { self.pending_indents += 1; self.indent_stack.push(col); self.alt_indent_stack.push(altcol); } } Ordering::Less => { // c < prev_col // Dedent -- any number, must be consistent while matches!(self.indent_stack.last(), Some(&ind_cols) if col < ind_cols) { self.pending_indents -= 1; self.indent_stack.pop(); self.alt_indent_stack.pop(); } if col != *self.indent_stack.last().unwrap_or(&0) { return Err(TokError::Dedent); } if altcol != *self.alt_indent_stack.last().unwrap_or(&0) { return Err(TokError::TabSpace); } } } Ok(()) } fn process_pending_indents(&mut self) -> Option { if self.pending_indents != 0 { if self.pending_indents < 0 { self.pending_indents += 1; Some(TokType::Dedent) } else { self.pending_indents -= 1; Some(TokType::Indent) } } else { None } } fn maybe_close_async_def(&mut self) { // Check if we are closing an async function if self.async_def && !self.blank_line // (This is irrelevant to the rust implementation which doesn't support type_comments // yet, but the comment is preserved for posterity) // Due to some implementation artifacts of type comments, a TYPE_COMMENT at the start of // a function won't set an indentation level and it will produce a NEWLINE after it. To // avoid spuriously ending an async function due to this, wait until we have some // non-newline char in front of us. // && self.text_pos.peek() == Some('\n') && self.paren_stack.is_empty() // There was a NEWLINE after ASYNC DEF, so we're past the signature. && self.async_def_nl // Current indentation level is less than where the async function was defined && self.async_def_indent >= self.indent_stack.len() { self.async_def = false; self.async_def_indent = 0; self.async_def_nl = false; } } fn consume_identifier_or_prefixed_string(&mut self) -> Result> { // Process the various legal combinations of b"", r"", u"", and f"". if STRING_PREFIX_RE.with(|r| self.text_pos.consume(r)) { if let Some('"') | Some('\'') = self.text_pos.peek() { // We found a string, not an identifier. Bail! if self.split_fstring && self .text_pos .slice_from_start_pos(&self.start_pos) .contains(&['f', 'F'][..]) { return self.consume_fstring_start(); } else { return self.consume_string(); } } } else { // the next character must be a potential identifier start, aka `[a-zA-Z_]|[^\x00-\x7f]` let first_ch = self.text_pos.next(); debug_assert!(matches!( first_ch, Some('a'..='z') | Some('A'..='Z') | Some('_') | Some('\u{80}'..=MAX_CHAR) )); } POTENTIAL_IDENTIFIER_TAIL_RE.with(|r| self.text_pos.consume(r)); let identifier_str = self.text_pos.slice_from_start_pos(&self.start_pos); if !verify_identifier(identifier_str) { // TODO: async/await return Err(TokError::BadIdentifier(identifier_str)); } let allow_async = !self.async_hacks || self.async_def; match (identifier_str, allow_async) { ("async", true) => Ok(TokType::Async), ("await", true) => Ok(TokType::Await), ("async", false) => { // The current token is 'async' and async_hacks is enabled. // Look ahead one token to see if that is 'def'. // This clone is expensive, but modern code doesn't need async_hacks. let mut lookahead_state = self.clone(); if lookahead_state.next_inner() == Ok(TokType::Name) && lookahead_state .text_pos .slice_from_start_pos(&lookahead_state.start_pos) == "def" { self.async_def = true; self.async_def_indent = self.indent_stack.len(); Ok(TokType::Async) } else { Ok(TokType::Name) } } _ => Ok(TokType::Name), } } fn consume_number(&mut self, state: NumberState) -> Result> { // This is organized as a state machine. The match could also be rewritten into multiple // functions, but this is closer to how the C code is written (with gotos). match state { NumberState::StartDigit => { let start_digit_ch = self.text_pos.peek(); debug_assert!(is_digit(start_digit_ch)); if start_digit_ch == Some('0') { self.text_pos.next(); match self.text_pos.peek() { Some('x') | Some('X') => { self.text_pos.next(); if !HEXADECIMAL_TAIL_RE.with(|r| self.text_pos.consume(r)) || self.text_pos.peek() == Some('_') { Err(TokError::BadHexadecimal) } else { Ok(TokType::Number) } } Some('o') | Some('O') => { self.text_pos.next(); if !OCTAL_TAIL_RE.with(|r| self.text_pos.consume(r)) || self.text_pos.peek() == Some('_') { return Err(TokError::BadOctal); } if let Some(next_ch) = self.text_pos.peek() { if is_digit(next_ch) { return Err(TokError::BadOctalDigit(next_ch)); } } Ok(TokType::Number) } Some('b') | Some('B') => { self.text_pos.next(); if !BINARY_TAIL_RE.with(|r| self.text_pos.consume(r)) || self.text_pos.peek() == Some('_') { return Err(TokError::BadBinary); } if let Some(next_ch) = self.text_pos.peek() { if is_digit(next_ch) { return Err(TokError::BadBinaryDigit(next_ch)); } } Ok(TokType::Number) } _ => { let mut nonzero = false; // Maybe old-style octal. In any case, allow '0' as a literal loop { if self.text_pos.peek() == Some('_') { self.text_pos.next(); if !is_digit(self.text_pos.peek()) { return Err(TokError::BadDecimal); } } if self.text_pos.peek() != Some('0') { break; } self.text_pos.next(); } if is_digit(self.text_pos.peek()) { nonzero = true; self.consume_decimal_tail()?; } if self.text_pos.peek() == Some('.') { self.consume_number(NumberState::Fraction) } else if let Some('e') | Some('E') = self.text_pos.peek() { self.consume_number(NumberState::Exponent) } else if let Some('j') | Some('J') = self.text_pos.peek() { self.consume_number(NumberState::Imaginary) } else if nonzero { Err(TokError::BadDecimalLeadingZeros) } else { Ok(TokType::Number) } } } } else { self.consume_decimal_tail()?; if self.text_pos.peek() == Some('.') { self.consume_number(NumberState::Fraction) } else if let Some('e') | Some('E') = self.text_pos.peek() { self.consume_number(NumberState::Exponent) } else if let Some('j') | Some('J') = self.text_pos.peek() { self.consume_number(NumberState::Imaginary) } else { Ok(TokType::Number) } } } NumberState::Fraction => { let dot_ch = self.text_pos.next(); debug_assert!(dot_ch == Some('.')); if is_digit(self.text_pos.peek()) { self.consume_decimal_tail()?; } if let Some('e') | Some('E') = self.text_pos.peek() { self.consume_number(NumberState::Exponent) } else if let Some('j') | Some('J') = self.text_pos.peek() { self.consume_number(NumberState::Imaginary) } else { Ok(TokType::Number) } } NumberState::Exponent => { let e_ch = self.text_pos.next(); debug_assert!(matches!(e_ch, Some('e') | Some('E'))); if let Some('+') | Some('-') = self.text_pos.peek() { self.text_pos.next(); if !is_digit(self.text_pos.peek()) { return Err(TokError::BadDecimal); } } else if !is_digit(self.text_pos.peek()) { // Don't consume the 'e'. It could be part of an identifier after this number. self.text_pos.backup_no_newline(); return Ok(TokType::Number); } self.consume_decimal_tail()?; if let Some('j') | Some('J') = self.text_pos.peek() { self.consume_number(NumberState::Imaginary) } else { Ok(TokType::Number) } } NumberState::Imaginary => { let j_ch = self.text_pos.next(); debug_assert!(matches!(j_ch, Some('j') | Some('J'))); Ok(TokType::Number) } } } /// Processes a decimal tail. This is the bit after the dot or after an E in a float. fn consume_decimal_tail(&mut self) -> Result<(), TokError<'t>> { let result = DECIMAL_TAIL_RE.with(|r| self.text_pos.consume(r)); // Assumption: If we've been called, the first character is an integer, so we must have a // regex match debug_assert!(result, "try_decimal_tail was called on a non-digit char"); if self.text_pos.peek() == Some('_') { Err(TokError::BadDecimal) } else { Ok(()) } } fn consume_open_quote(&mut self) -> (StringQuoteChar, StringQuoteSize) { let quote_char: StringQuoteChar = self .text_pos .peek() .try_into() .expect("the next character must be a quote when calling consume_open_quote"); let triple_quote_pattern = quote_char.triple_str(); let quote_size = if self.text_pos.consume(triple_quote_pattern) { StringQuoteSize::Triple } else { self.text_pos.next(); // consume the single character instead StringQuoteSize::Single }; (quote_char, quote_size) } fn consume_string(&mut self) -> Result> { // Assumption: The opening quote has not been consumed. Leading characters (b, r, f, etc) // have been consumed. let (quote_char, quote_size) = self.consume_open_quote(); let quote_raw = quote_char.into(); let mut end_quote_size: usize = 0; let quote_usize: usize = quote_size.into(); while end_quote_size != quote_usize { match (self.text_pos.next(), quote_size) { (None, StringQuoteSize::Triple) => { return Err(TokError::UnterminatedTripleQuotedString); } (None, StringQuoteSize::Single) | (Some('\n'), StringQuoteSize::Single) => { return Err(TokError::UnterminatedString); } (ch @ Some('\''), _) | (ch @ Some('"'), _) if ch == Some(quote_raw) => { end_quote_size += 1; } (Some(ch), _) => { end_quote_size = 0; if ch == '\\' { // skip escaped char self.text_pos.next(); } } } } Ok(TokType::String) } fn consume_fstring_start(&mut self) -> Result> { let (quote_char, quote_size) = self.consume_open_quote(); let is_raw_string = self .text_pos .slice_from_start_pos(&self.start_pos) .contains(&['r', 'R'][..]); self.fstring_stack .push(FStringNode::new(quote_char, quote_size, is_raw_string)); Ok(TokType::FStringStart) } fn maybe_consume_fstring_string( &mut self, is_in_format_spec: bool, is_raw_string: bool, ) -> Result, TokError<'t>> { let allow_multiline = self.fstring_stack.last().map(|node| node.allow_multiline()) == Some(true); let mut in_named_unicode: bool = false; let mut ok_result = Ok(None); // value to return if we reach the end and don't error out 'outer: loop { match (self.text_pos.peek(), allow_multiline) { (None, true) => { return Err(TokError::UnterminatedTripleQuotedString); } (None, false) | (Some('\n'), false) => { return Err(TokError::UnterminatedString); } (ch @ Some('\''), _) | (ch @ Some('"'), _) => { // see if this actually terminates the most recent fstring if let Some(node) = self.fstring_stack.last() { if ch == Some(node.quote_char.into()) { match node.quote_size { StringQuoteSize::Single => { break 'outer; } StringQuoteSize::Triple => { if self.text_pos.matches(node.quote_char.triple_str()) { break 'outer; } } } } } self.text_pos.next(); } (Some('\\'), _) if !is_raw_string => { self.text_pos.next(); if is_in_format_spec { if let Some('{') | Some('}') = self.text_pos.peek() { // don't consume { or } because we want those to be interpreted as OP // tokens } else { // skip escaped char (e.g. \', \", or newline/line continuation) self.text_pos.next(); } } else if let Some( '\n' | '\\' | '\'' | '"' | 'a' | 'b' | 'f' | 'n' | 'r' | 't' | 'v' | 'x' | '0'..='9' | 'N' | 'u' | 'U', ) = self.text_pos.peek() { // skip escaped char let next_ch = self.text_pos.next(); // check if this is a \N sequence if let Some('N') = next_ch { // swallow the next open curly brace if it exists if let Some('{') = self.text_pos.peek() { in_named_unicode = true; self.text_pos.next(); } } } } (Some('\\'), _) if is_raw_string => { self.text_pos.next(); // skip escaped end-of-string marker or backslash if let Some('"' | '\'' | '\\') = self.text_pos.peek() { self.text_pos.next(); } } (Some('{'), _) => { if is_in_format_spec { // don't actually consume the {, and generate an OP for it instead break 'outer; } let consumed_double = self.text_pos.consume("{{"); if !consumed_double { break 'outer; } } (Some('}'), _) => { if in_named_unicode { in_named_unicode = false; self.text_pos.next(); } else if is_in_format_spec { // don't actually consume the }, and generate an OP for it instead break 'outer; } else if !self.text_pos.consume("}}") { return Err(TokError::UnmatchedClosingParen('}')); } } _ => { self.text_pos.next(); } } ok_result = Ok(Some(TokType::FStringString)); } ok_result } fn maybe_consume_fstring_end(&mut self) -> Option { let ch = self.text_pos.peek(); if let Some(node) = self.fstring_stack.last() { if ch == Some(node.quote_char.into()) { if node.quote_size == StringQuoteSize::Triple { self.text_pos.consume(node.quote_char.triple_str()); } else { self.text_pos.next(); // already matched } self.fstring_stack.pop(); return Some(TokType::FStringEnd); } } None } } impl<'t> Iterator for TokState<'t> { type Item = Result>; /// Returns the next token type. fn next(&mut self) -> Option>> { // This implementation wraps `next_inner`, which does the actual work. if self.done { None } else { match self.next_inner() { Err(err) => { self.done = true; Some(Err(err)) } Ok(TokType::EndMarker) => { self.done = true; Some(Ok(TokType::EndMarker)) } Ok(t) => Some(Ok(t)), } } } } /// Returns true if the given string is a valid Python 3.x identifier. Follows [PEP 3131][]. /// /// [PEP 3131]: https://www.python.org/dev/peps/pep-3131/ fn verify_identifier(name: &str) -> bool { // TODO: If `name` is non-ascii, must first normalize name to NFKC. // Common case: If the entire string is ascii, we can avoid the more expensive regex check, // since the tokenizer already validates ascii characters before calling us. name.is_ascii() || UNICODE_IDENTIFIER_RE.with(|r| r.is_match(name)) } #[derive(Clone)] pub struct Token<'a> { pub r#type: TokType, pub string: &'a str, pub start_pos: TextPositionSnapshot, pub end_pos: TextPositionSnapshot, pub whitespace_before: Rc>>, pub whitespace_after: Rc>>, pub relative_indent: Option<&'a str>, } impl<'a> Debug for Token<'a> { fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), std::fmt::Error> { write!( f, "Token({:?}, {}, start={:?}, end={:?}, relative_indent={:?}, ws_before={:?}, ws_after={:?}", self.r#type, self.string, self.start_pos, self.end_pos, self.relative_indent, self.whitespace_before, self.whitespace_after ) } } // Dummy Eq implementation. We never compare Tokens like this impl<'a> PartialEq for Token<'a> { fn eq(&self, _other: &Self) -> bool { true } } impl<'a> Eq for Token<'a> {} pub struct TokenIterator<'a> { previous_whitespace: Option>>>, core_state: TokState<'a>, absolute_indents: Vec<&'a str>, } impl<'a> TokenIterator<'a> { pub fn new(module_text: &'a str, config: &TokConfig) -> Self { Self { previous_whitespace: None, absolute_indents: vec![], core_state: TokState::new(module_text, config), } } } impl<'a> Iterator for TokenIterator<'a> { type Item = Result, TokError<'a>>; fn next(&mut self) -> Option { let next = self.core_state.next(); next.as_ref()?; Some((|| { let tok_type = next.unwrap()?; let relative_indent = match tok_type { TokType::Indent => { let end_idx = self.core_state.text_pos.byte_idx(); let start_idx = end_idx - self.core_state.bol_width; let absolute_indent = &self.core_state.text_pos.text()[start_idx..end_idx]; let relative_indent = if let Some(prev_absolute_indent) = self.absolute_indents.last() { if let Some(ri) = absolute_indent.strip_prefix(prev_absolute_indent) { ri } else { // TODO: return the correct exception type, improve error message return Err(TokError::Dedent); } } else { // there's no previous indent, absolute_indent is relative_indent absolute_indent }; self.absolute_indents.push(absolute_indent); // HACKY: mutate and fixup the previous whitespace state if let Some(ws) = self.previous_whitespace.as_mut() { ws.borrow_mut().absolute_indent = absolute_indent; } Some(relative_indent) } TokType::Dedent => { self.absolute_indents.pop(); // HACKY: mutate and fixup the previous whitespace state if let Some(ws) = self.previous_whitespace.as_mut() { ws.borrow_mut().absolute_indent = self.absolute_indents.last().unwrap_or(&""); } None } _ => None, }; let text_pos = &self.core_state.text_pos; let whitespace_before = self.previous_whitespace.clone().unwrap_or_default(); let whitespace_after = match tok_type { TokType::Indent | TokType::Dedent | TokType::EndMarker => whitespace_before.clone(), _ => Rc::new(RefCell::new(WhitespaceState { line: text_pos.line_number(), column: text_pos.char_column_number(), column_byte: text_pos.byte_column_number(), byte_offset: text_pos.byte_idx(), absolute_indent: self.absolute_indents.last().unwrap_or(&""), is_parenthesized: self.core_state.is_parenthesized(), })), }; self.previous_whitespace = Some(whitespace_after.clone()); Ok(Token { r#type: tok_type, string: text_pos.slice_from_start_pos(&self.core_state.start_pos), start_pos: self.core_state.start_pos.clone(), end_pos: text_pos.into(), whitespace_after: whitespace_after.clone(), whitespace_before: whitespace_before.clone(), relative_indent, }) })()) } } LibCST-1.2.0/native/libcst/src/tokenizer/core/string_types.rs000066400000000000000000000060631456464173300242130ustar00rootroot00000000000000// This implementation is Copyright (c) Meta Platforms, Inc. and affiliates. // // CPython 3.10.0a5 and the original C code this is based on is // Copyright (c) 2001-2021 Python Software Foundation; All Rights Reserved // // Portions of this module (f-string splitting) are based on parso's tokenize.py, which is also PSF // licensed. /// Helper types for string processing in the core tokenizer. use std::convert::TryFrom; use crate::tokenizer::text_position::TextPositionSnapshot; #[derive(Clone, Copy, Eq, PartialEq)] pub enum StringQuoteSize { Single, Triple, } impl From for usize { fn from(qs: StringQuoteSize) -> Self { match qs { StringQuoteSize::Single => 1, StringQuoteSize::Triple => 3, } } } #[derive(Clone, Copy)] pub enum StringQuoteChar { Apostrophe, DoubleQuote, } impl StringQuoteChar { pub fn triple_str(&self) -> &'static str { match self { Self::Apostrophe => "'''", Self::DoubleQuote => "\"\"\"", } } } impl From for char { fn from(ch: StringQuoteChar) -> Self { match ch { StringQuoteChar::Apostrophe => '\'', StringQuoteChar::DoubleQuote => '"', } } } #[derive(Debug, thiserror::Error)] #[error("{0:?} is not a valid string quote character")] pub struct StringQuoteCharConversionError(Option); impl TryFrom> for StringQuoteChar { type Error = StringQuoteCharConversionError; fn try_from(ch: Option) -> Result { match ch { Some('\'') => Ok(StringQuoteChar::Apostrophe), Some('"') => Ok(StringQuoteChar::DoubleQuote), _ => Err(StringQuoteCharConversionError(ch)), } } } #[derive(Clone)] pub struct FStringNode { pub quote_char: StringQuoteChar, pub quote_size: StringQuoteSize, pub parentheses_count: usize, pub string_start: Option, // In the syntax there can be multiple format_spec's nested: {x:{y:3}} pub format_spec_count: usize, pub is_raw_string: bool, } impl FStringNode { pub fn new( quote_char: StringQuoteChar, quote_size: StringQuoteSize, is_raw_string: bool, ) -> Self { Self { quote_char, quote_size, parentheses_count: 0, string_start: None, format_spec_count: 0, is_raw_string, } } pub fn open_parentheses(&mut self) { self.parentheses_count += 1; } pub fn close_parentheses(&mut self) { if self.is_in_format_spec() { self.format_spec_count -= 1; } self.parentheses_count -= 1; } pub fn allow_multiline(&self) -> bool { self.quote_size == StringQuoteSize::Triple || self.is_in_expr() } pub fn is_in_expr(&self) -> bool { self.parentheses_count > self.format_spec_count } pub fn is_in_format_spec(&self) -> bool { !self.is_in_expr() && self.format_spec_count > 0 } } LibCST-1.2.0/native/libcst/src/tokenizer/debug_utils.rs000066400000000000000000000007661456464173300230430ustar00rootroot00000000000000// Copyright (c) Meta Platforms, Inc. and affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the root directory of this source tree. use std::fmt; /// An empty struct that when writes "..." when using `fmt::Debug`. Useful for omitting fields when /// using `fmt::Formatter::debug_struct`. pub struct EllipsisDebug; impl fmt::Debug for EllipsisDebug { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.write_str("...") } } LibCST-1.2.0/native/libcst/src/tokenizer/mod.rs000066400000000000000000000005011456464173300212770ustar00rootroot00000000000000// Copyright (c) Meta Platforms, Inc. and affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the root directory of this source tree. mod core; mod debug_utils; mod operators; mod text_position; pub mod whitespace_parser; pub use self::core::*; #[cfg(test)] mod tests; LibCST-1.2.0/native/libcst/src/tokenizer/operators.rs000066400000000000000000000050221456464173300225410ustar00rootroot00000000000000// Copyright (c) Meta Platforms, Inc. and affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the root directory of this source tree. // // Part of this file is derived from the CPython documentation, which is available under the // zero-clause BSD license. That license does not require that derivative works cite the original // code or that we retain the original work's copyright information. // https://docs.python.org/3/license.html#zero-clause-bsd-license-for-code-in-the-python-release-documentation use regex::Regex; /// A list of strings that make up all the possible operators in a specific version of Python. /// Derived from the [CPython's token documentation](https://docs.python.org/3/library/token.html). pub const OPERATORS: &[&str] = &[ "(", // LPAR ")", // RPAR "[", // LSQB "]", // RSQB ":", // COLON ",", // COMMA ";", // SEMI "+", // PLUS "-", // MINUS "*", // STAR "/", // SLASH "|", // VBAR "&", // AMPER "<", // LESS ">", // GREATER "=", // EQUAL ".", // DOT "%", // PERCENT "{", // LBRACE "}", // RBRACE "==", // EQEQUAL "!=", // NOTEQUAL "<=", // LESSEQUAL ">=", // GREATEREQUAL "~", // TILDE "^", // CIRCUMFLEX "<<", // LEFTSHIFT ">>", // RIGHTSHIFT "**", // DOUBLESTAR "+=", // PLUSEQUAL "-=", // MINEQUAL "*=", // STAREQUAL "/=", // SLASHEQUAL "%=", // PERCENTEQUAL "&=", // AMPEREQUAL "|=", // VBAREQUAL "^=", // CIRCUMFLEXEQUAL "<<=", // LEFTSHIFTEQUAL ">>=", // RIGHTSHIFTEQUAL "**=", // DOUBLESTAREQUAL "//", // DOUBLESLASH "//=", // DOUBLESLASHEQUAL "@", // AT "@=", // ATEQUAL "->", // RARROW "...", // ELLIPSIS ":=", // COLONEQUAL // Not a real operator, but needed to support the split_fstring feature "!", // The fake operator added by PEP 401. Technically only valid if used with: // // from __future__ import barry_as_FLUFL "<>", ]; thread_local! { pub static OPERATOR_RE: Regex = { // sort operators so that we try to match the longest ones first let mut sorted_operators: Box<[&str]> = OPERATORS.into(); sorted_operators.sort_unstable_by_key(|op| usize::MAX - op.len()); Regex::new(&format!( r"\A({})", sorted_operators .iter() .map(|op| regex::escape(op)) .collect::>() .join("|") )) .expect("regex") }; } LibCST-1.2.0/native/libcst/src/tokenizer/tests.rs000066400000000000000000000565621456464173300217040ustar00rootroot00000000000000// Copyright (c) Meta Platforms, Inc. and affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the root directory of this source tree. /// Tests for the functionality in `tokenize::core`. These tests are not part of the `core` module /// because they're not a derivative work of CPython, and are therefore not subject to the PSF /// license. use crate::tokenizer::core::{TokConfig, TokError, TokState, TokType}; fn default_config() -> TokConfig { TokConfig { async_hacks: false, split_fstring: false, } } fn tokenize_with_end_marker<'t>( text: &'t str, config: &TokConfig, ) -> Result, TokError<'t>> { let mut result = Vec::new(); let mut state = TokState::new(text, config); while let Some(tok_type) = state.next() { result.push(( tok_type?, state.text_pos.slice_from_start_pos(&state.start_pos), )); } Ok(result) } fn tokenize_all<'t>( text: &'t str, config: &TokConfig, ) -> Result, TokError<'t>> { let mut result = tokenize_with_end_marker(text, config)?; // Remove the EndMarker, since it's on every non-error token stream. assert_eq!(result.pop().expect("EndMarker").0, TokType::EndMarker); // Also remove fake newline at the end if let Some((TokType::Newline, "")) = result.last() { result.pop(); } Ok(result) } #[test] fn test_indentifier() { assert_eq!( tokenize_all("test input", &default_config()), Ok(vec![(TokType::Name, "test"), (TokType::Name, "input")]) ); assert_eq!( tokenize_all("__with_underscores", &default_config()), Ok(vec![(TokType::Name, "__with_underscores")]) ); assert_eq!( tokenize_all("{ends_with_op}", &default_config()), Ok(vec![ (TokType::Op, "{"), (TokType::Name, "ends_with_op"), (TokType::Op, "}") ]) ); assert_eq!( tokenize_all("\u{0100}\u{0101}\u{0102}unicode", &default_config()), Ok(vec![(TokType::Name, "\u{0100}\u{0101}\u{0102}unicode")]) ); } #[test] fn test_async_await() { // normally async/await are keywords assert_eq!( tokenize_all("async await", &default_config()), Ok(vec![(TokType::Async, "async"), (TokType::Await, "await")]) ); // with async_hacks, async/await are handled as identifiers by default assert_eq!( tokenize_all( "async await", &TokConfig { async_hacks: true, ..default_config() } ), Ok(vec![(TokType::Name, "async"), (TokType::Name, "await")]) ); // with async_hacks, async/await are handled as keywords in functions assert_eq!( tokenize_all( "async def fn():\n await foo\nawait bar", &TokConfig { async_hacks: true, ..default_config() } ), Ok(vec![ // this async is followed by a def, so it's converted to an Async (TokType::Async, "async"), (TokType::Name, "def"), (TokType::Name, "fn"), (TokType::Op, "("), (TokType::Op, ")"), (TokType::Op, ":"), (TokType::Newline, "\n"), (TokType::Indent, ""), // this await is inside a function, and is converted into an Await (TokType::Await, "await"), (TokType::Name, "foo"), (TokType::Newline, "\n"), (TokType::Dedent, ""), // this await is outside the function, and is turned into an identifier (TokType::Name, "await"), (TokType::Name, "bar") ]) ); } #[test] fn test_blankline() { assert_eq!( tokenize_all("\n \n\t\n\x0c\n\n", &default_config()), Ok(vec![]) ); } #[test] fn test_newline() { assert_eq!( tokenize_all("a\nb\rc\r\n", &default_config()), Ok(vec![ (TokType::Name, "a"), (TokType::Newline, "\n"), (TokType::Name, "b"), (TokType::Newline, "\r"), (TokType::Name, "c"), (TokType::Newline, "\r\n") ]) ); } #[test] fn test_indent_dedent() { assert_eq!( tokenize_all("one\n two\n sameindent\n", &default_config()), Ok(vec![ (TokType::Name, "one"), (TokType::Newline, "\n"), (TokType::Indent, ""), (TokType::Name, "two"), (TokType::Newline, "\n"), (TokType::Name, "sameindent"), (TokType::Newline, "\n"), (TokType::Dedent, "") ]) ); assert_eq!( tokenize_all("one\n two\n \tthree\n", &default_config()), Ok(vec![ (TokType::Name, "one"), (TokType::Newline, "\n"), (TokType::Indent, ""), (TokType::Name, "two"), (TokType::Newline, "\n"), (TokType::Indent, ""), (TokType::Name, "three"), (TokType::Newline, "\n"), (TokType::Dedent, ""), (TokType::Dedent, "") ]) ); // indentation decreases to a new (smaller) indentation level that wasn't on the stack assert_eq!( tokenize_all(" one\n two", &default_config()), Err(TokError::Dedent), ); // TabSpace error without change in indentation assert_eq!( tokenize_all(" one\n\ttwo\n", &default_config()), Err(TokError::TabSpace), ); // TabSpace error with increase in indentation assert_eq!( tokenize_all(" one\n\t\ttwo\n", &default_config()), Err(TokError::TabSpace), ); // TabSpace error with decrease in indentation assert_eq!( tokenize_all(" one\n \ttwo\n\tthree\n", &default_config()), Err(TokError::TabSpace), ); // this looks like a TabSpace error, but CPython allows it, so we should too assert!(tokenize_all(" \tone\n\t two\n", &default_config()).is_ok()); } #[test] fn test_integer_decimal() { assert_eq!( tokenize_all("123456789", &default_config()), Ok(vec![(TokType::Number, "123456789")]) ); assert_eq!( tokenize_all("1_2_3", &default_config()), Ok(vec![(TokType::Number, "1_2_3")]) ); // doesn't consume trailing underscores assert_eq!( tokenize_all("123_", &default_config()), Err(TokError::BadDecimal), ); } #[test] fn test_integer_leading_zeros() { assert_eq!( tokenize_all("000", &default_config()), Ok(vec![(TokType::Number, "000")]) ); assert_eq!( tokenize_all("0_0_0", &default_config()), Ok(vec![(TokType::Number, "0_0_0")]) ); assert_eq!( tokenize_all("00123", &default_config()), Err(TokError::BadDecimalLeadingZeros) ); } #[test] fn test_integer_hexadecimal() { assert_eq!( tokenize_all("0x00Aa12Ff", &default_config()), Ok(vec![(TokType::Number, "0x00Aa12Ff")]), ); assert_eq!( tokenize_all("0x_1_2_3", &default_config()), Ok(vec![(TokType::Number, "0x_1_2_3")]), ); assert_eq!( tokenize_all("0x123_", &default_config()), Err(TokError::BadHexadecimal), ); } #[test] fn test_integer_octal() { assert_eq!( tokenize_all("0o001234567", &default_config()), Ok(vec![(TokType::Number, "0o001234567")]), ); assert_eq!( tokenize_all("0o_1_2_3", &default_config()), Ok(vec![(TokType::Number, "0o_1_2_3")]), ); assert_eq!( tokenize_all("0o123_", &default_config()), Err(TokError::BadOctal), ); assert_eq!( tokenize_all("0o789", &default_config()), Err(TokError::BadOctalDigit('8')), ); } #[test] fn test_integer_binary() { assert_eq!( tokenize_all("0b00101011", &default_config()), Ok(vec![(TokType::Number, "0b00101011")]), ); assert_eq!( tokenize_all("0b_0_1_0_1", &default_config()), Ok(vec![(TokType::Number, "0b_0_1_0_1")]), ); assert_eq!( tokenize_all("0b0101_", &default_config()), Err(TokError::BadBinary), ); assert_eq!( tokenize_all("0b0123", &default_config()), Err(TokError::BadBinaryDigit('2')), ); } #[test] fn test_fraction() { // fraction starting with a dot assert_eq!( tokenize_all(".5", &default_config()), Ok(vec![(TokType::Number, ".5")]) ); // fraction starting with a dot using E assert_eq!( tokenize_all(".5e9", &default_config()), Ok(vec![(TokType::Number, ".5e9")]) ); // fraction starting with a dot using J assert_eq!( tokenize_all(".5j", &default_config()), Ok(vec![(TokType::Number, ".5j")]) ); // fraction starting with a zero assert_eq!( tokenize_all("0.5", &default_config()), Ok(vec![(TokType::Number, "0.5")]) ); // fraction starting with a zero using E assert_eq!( tokenize_all("0.5e9", &default_config()), Ok(vec![(TokType::Number, "0.5e9")]) ); // fraction starting with a zero using J assert_eq!( tokenize_all("0.5j", &default_config()), Ok(vec![(TokType::Number, "0.5j")]) ); // fraction with underscores assert_eq!( tokenize_all("1_0.2_5", &default_config()), Ok(vec![(TokType::Number, "1_0.2_5")]) ); // underscores after the fraction are an error assert_eq!( tokenize_all(".5_", &default_config()), Err(TokError::BadDecimal), ); // doesn't consume underscores around the dot assert_eq!( tokenize_all("1_.25", &default_config()), Err(TokError::BadDecimal), ); // doesn't consume underscores around the dot assert_eq!( tokenize_all("1._25", &default_config()), Ok(vec![(TokType::Number, "1."), (TokType::Name, "_25")]) ); } #[test] fn test_string() { // empty, single quote assert_eq!( tokenize_all("''", &default_config()), Ok(vec![(TokType::String, "''")]), ); // empty, double quote assert_eq!( tokenize_all(r#""""#, &default_config()), Ok(vec![(TokType::String, r#""""#)]), ); // simple string assert_eq!( tokenize_all("'test'", &default_config()), Ok(vec![(TokType::String, "'test'")]), ); // mixed quotes assert_eq!( tokenize_all(r#""test'"#, &default_config()), Err(TokError::UnterminatedString), ); // single quoted strings can contain double quotes, double quoted strings can contain single // quotes assert_eq!( tokenize_all( r#"'she said "hey"' "but he'd ignored her""#, &default_config() ), Ok(vec![ (TokType::String, r#"'she said "hey"'"#), (TokType::String, r#""but he'd ignored her""#) ]), ); // escape characters assert_eq!( tokenize_all("'a\\b\\c\\d\\e\\'\\f\\g'", &default_config()), Ok(vec![(TokType::String, "'a\\b\\c\\d\\e\\'\\f\\g'"),]), ); // newline in the middle of a string causes an unterminated string assert_eq!( tokenize_all("'first\nsecond'", &default_config()), Err(TokError::UnterminatedString), ); // newlines can be escaped and are preserved in the output assert_eq!( tokenize_all("'first\\\nsecond\\\r\nthird\\\r'", &default_config()), Ok(vec![(TokType::String, "'first\\\nsecond\\\r\nthird\\\r'"),]), ); } #[test] fn test_string_triple_quoted() { // empty, single quote assert_eq!( tokenize_all("''''''", &default_config()), Ok(vec![(TokType::String, "''''''")]), ); // empty, double quote assert_eq!( tokenize_all(r#""""""""#, &default_config()), Ok(vec![(TokType::String, r#""""""""#)]), ); // simple string with newlines assert_eq!( tokenize_all("'''\nmulti\rline\r\n'''", &default_config()), Ok(vec![(TokType::String, "'''\nmulti\rline\r\n'''")]), ); // unterminated string assert_eq!( tokenize_all( "'''hey'there's''quotes'here, but not '' three'", &default_config() ), Err(TokError::UnterminatedTripleQuotedString), ); } #[test] fn test_string_prefix() { // works with double-quoted string assert_eq!( tokenize_all(r#"b"""#, &default_config()), Ok(vec![(TokType::String, r#"b"""#)]), ); // works with triple-quoted string assert_eq!( tokenize_all("b'''test'''", &default_config()), Ok(vec![(TokType::String, "b'''test'''")]), ); // prefix can be capitalized assert_eq!( tokenize_all("B'' R'' U'' F''", &default_config()), Ok(vec![ (TokType::String, "B''"), (TokType::String, "R''"), (TokType::String, "U''"), (TokType::String, "F''"), ]), ); // valid prefixes assert_eq!( tokenize_all("b'' r'' u'' f'' br'' fr'' rb'' rf''", &default_config()), Ok(vec![ (TokType::String, "b''"), (TokType::String, "r''"), (TokType::String, "u''"), (TokType::String, "f''"), (TokType::String, "br''"), (TokType::String, "fr''"), (TokType::String, "rb''"), (TokType::String, "rf''"), ]), ); // invalid prefixes assert_eq!( tokenize_all("bb'' rr'' uu'' ff'' ur'' ub'' uf'' fb''", &default_config()), Ok(vec![ (TokType::Name, "bb"), (TokType::String, "''"), (TokType::Name, "rr"), (TokType::String, "''"), (TokType::Name, "uu"), (TokType::String, "''"), (TokType::Name, "ff"), (TokType::String, "''"), (TokType::Name, "ur"), (TokType::String, "''"), (TokType::Name, "ub"), (TokType::String, "''"), (TokType::Name, "uf"), (TokType::String, "''"), (TokType::Name, "fb"), (TokType::String, "''"), ]), ); // raw string escapes assert_eq!( tokenize_all("r'\\''", &default_config()), Ok(vec![(TokType::String, "r'\\''")]), ); assert_eq!( tokenize_all(r#"r"\"""#, &default_config()), Ok(vec![(TokType::String, r#"r"\"""#)]), ); assert_eq!( tokenize_all(r#"r'\\'"#, &default_config()), Ok(vec![(TokType::String, r#"r'\\'"#)]), ); let config = TokConfig { split_fstring: true, ..default_config() }; assert_eq!( tokenize_all("rf'\\''", &config), Ok(vec![ (TokType::FStringStart, "rf'"), (TokType::FStringString, "\\'"), (TokType::FStringEnd, "'"), ]), ); assert_eq!( tokenize_all(r#"rf"\"""#, &config), Ok(vec![ (TokType::FStringStart, "rf\""), (TokType::FStringString, r#"\""#), (TokType::FStringEnd, "\""), ]), ); assert_eq!( tokenize_all(r#"rf'\\'"#, &config), Ok(vec![ (TokType::FStringStart, "rf'"), (TokType::FStringString, r#"\\"#), (TokType::FStringEnd, "'"), ]), ); } #[test] fn test_split_fstring() { let config = TokConfig { split_fstring: true, ..default_config() }; assert_eq!( tokenize_all("f''", &config), Ok(vec![ (TokType::FStringStart, "f'"), (TokType::FStringEnd, "'"), ]), ); assert_eq!( tokenize_all("f'{value}'", &config), Ok(vec![ (TokType::FStringStart, "f'"), (TokType::Op, "{"), (TokType::Name, "value"), (TokType::Op, "}"), (TokType::FStringEnd, "'"), ]), ); assert_eq!( tokenize_all("f'{{just a string}}'", &config), Ok(vec![ (TokType::FStringStart, "f'"), (TokType::FStringString, r"{{just a string}}"), (TokType::FStringEnd, "'"), ]), ); assert_eq!( tokenize_all(r"f'\N{Latin Small Letter A}'", &config), Ok(vec![ (TokType::FStringStart, "f'"), (TokType::FStringString, r"\N{Latin Small Letter A}"), (TokType::FStringEnd, "'"), ]), ); // format specifier assert_eq!( tokenize_all("f'result: {value:{width}.{precision}}'", &config), Ok(vec![ (TokType::FStringStart, "f'"), (TokType::FStringString, "result: "), (TokType::Op, "{"), (TokType::Name, "value"), (TokType::Op, ":"), (TokType::Op, "{"), (TokType::Name, "width"), (TokType::Op, "}"), (TokType::FStringString, "."), (TokType::Op, "{"), (TokType::Name, "precision"), (TokType::Op, "}"), (TokType::Op, "}"), (TokType::FStringEnd, "'"), ]), ); // the walrus operator isn't valid unless parenthesized assert_eq!( tokenize_all("f'{a := b}'", &config), Ok(vec![ (TokType::FStringStart, "f'"), (TokType::Op, "{"), (TokType::Name, "a"), (TokType::Op, ":"), (TokType::FStringString, "= b"), (TokType::Op, "}"), (TokType::FStringEnd, "'"), ]), ); // once parenthesized, this is recognized as the walrus operator assert_eq!( tokenize_all("f'{(a := b)}'", &config), Ok(vec![ (TokType::FStringStart, "f'"), (TokType::Op, "{"), (TokType::Op, "("), (TokType::Name, "a"), (TokType::Op, ":="), (TokType::Name, "b"), (TokType::Op, ")"), (TokType::Op, "}"), (TokType::FStringEnd, "'"), ]), ); } #[test] fn test_fstring_escapes() { let config = TokConfig { split_fstring: true, ..default_config() }; assert_eq!( tokenize_all("f'\\{{\\}}'", &config), Ok(vec![ (TokType::FStringStart, "f'"), (TokType::FStringString, "\\{{\\}}"), (TokType::FStringEnd, "'"), ]) ); assert_eq!( tokenize_all(r#"f"regexp_like(path, '.*\{file_type}$')""#, &config), Ok(vec![ (TokType::FStringStart, "f\""), (TokType::FStringString, "regexp_like(path, '.*\\"), (TokType::Op, "{"), (TokType::Name, "file_type"), (TokType::Op, "}"), (TokType::FStringString, "$')"), (TokType::FStringEnd, "\""), ]) ); } #[test] fn test_operator() { assert_eq!( tokenize_all("= == * ** **= -> . .. ...", &default_config()), Ok(vec![ (TokType::Op, "="), (TokType::Op, "=="), (TokType::Op, "*"), (TokType::Op, "**"), (TokType::Op, "**="), (TokType::Op, "->"), (TokType::Op, "."), (TokType::Op, "."), (TokType::Op, "."), (TokType::Op, "...") ]), ); } #[test] fn test_fake_newline() { assert_eq!( tokenize_with_end_marker("foo", &default_config()), Ok(vec![ (TokType::Name, "foo"), (TokType::Newline, ""), (TokType::EndMarker, "") ]) ); } #[test] fn test_fake_newline_when_at_bol() { assert_eq!( tokenize_with_end_marker("(\n \\\n)", &default_config()), Ok(vec![ (TokType::Op, "("), (TokType::Op, ")"), (TokType::Newline, ""), (TokType::EndMarker, "") ]) ) } #[test] fn test_no_fake_newline_for_empty_input() { assert_eq!( tokenize_with_end_marker("", &default_config()), Ok(vec![(TokType::EndMarker, "")]) ); } #[test] fn test_no_fake_newline_for_only_whitespaces() { assert_eq!( tokenize_with_end_marker(" ", &default_config()), Ok(vec![(TokType::EndMarker, "")]) ); } #[test] fn test_add_dedents_after_fake_newline() { assert_eq!( tokenize_with_end_marker("if 1:\n if 2:\n foo", &default_config()), Ok(vec![ (TokType::Name, "if"), (TokType::Number, "1"), (TokType::Op, ":"), (TokType::Newline, "\n"), (TokType::Indent, ""), (TokType::Name, "if"), (TokType::Number, "2"), (TokType::Op, ":"), (TokType::Newline, "\n"), (TokType::Indent, ""), (TokType::Name, "foo"), (TokType::Newline, ""), (TokType::Dedent, ""), (TokType::Dedent, ""), (TokType::EndMarker, "") ]) ); } #[test] fn test_add_dedents_for_dangling_indent() { assert_eq!( tokenize_with_end_marker("if 1:\n if 2:\n ", &default_config()), Ok(vec![ (TokType::Name, "if"), (TokType::Number, "1"), (TokType::Op, ":"), (TokType::Newline, "\n"), (TokType::Indent, ""), (TokType::Name, "if"), (TokType::Number, "2"), (TokType::Op, ":"), (TokType::Newline, "\n"), (TokType::Dedent, ""), (TokType::EndMarker, "") ]) ); } #[test] fn test_add_dedents_for_dangling_indent_with_comment() { assert_eq!( tokenize_with_end_marker("if 1:\n if 2:\n # foo", &default_config()), Ok(vec![ (TokType::Name, "if"), (TokType::Number, "1"), (TokType::Op, ":"), (TokType::Newline, "\n"), (TokType::Indent, ""), (TokType::Name, "if"), (TokType::Number, "2"), (TokType::Op, ":"), (TokType::Newline, "\n"), (TokType::Dedent, ""), (TokType::EndMarker, "") ]) ); } #[test] fn test_inconsistent_indentation_at_eof() { assert_eq!( tokenize_all("if 1:\n pass\n ", &default_config()), Ok(vec![ (TokType::Name, "if"), (TokType::Number, "1"), (TokType::Op, ":"), (TokType::Newline, "\n"), (TokType::Indent, ""), (TokType::Name, "pass"), (TokType::Newline, "\n"), (TokType::Dedent, ""), ]) ) } #[test] fn test_nested_f_string_specs() { let config = TokConfig { split_fstring: true, ..default_config() }; assert_eq!( tokenize_all("f'{_:{_:}{_}}'", &config), Ok(vec![ (TokType::FStringStart, "f'"), (TokType::Op, "{"), (TokType::Name, "_"), (TokType::Op, ":"), (TokType::Op, "{"), (TokType::Name, "_"), (TokType::Op, ":"), (TokType::Op, "}"), (TokType::Op, "{"), (TokType::Name, "_"), (TokType::Op, "}"), (TokType::Op, "}"), (TokType::FStringEnd, "'") ]) ) } #[test] fn test_nested_f_strings() { let config = TokConfig { split_fstring: true, ..default_config() }; assert_eq!( tokenize_all("f'{f'{2}'}'", &config), Ok(vec![ (TokType::FStringStart, "f'"), (TokType::Op, "{"), (TokType::FStringStart, "f'"), (TokType::Op, "{"), (TokType::Number, "2"), (TokType::Op, "}"), (TokType::FStringEnd, "'"), (TokType::Op, "}"), (TokType::FStringEnd, "'") ]) ) } LibCST-1.2.0/native/libcst/src/tokenizer/text_position/000077500000000000000000000000001456464173300230665ustar00rootroot00000000000000LibCST-1.2.0/native/libcst/src/tokenizer/text_position/char_width.rs000066400000000000000000000220161456464173300255510ustar00rootroot00000000000000// Copyright (c) Meta Platforms, Inc. and affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the root directory of this source tree. use std::str::Chars; #[derive(Debug, Eq, PartialEq)] pub struct CharWidth { pub byte_width: usize, pub char_width: usize, pub character: char, } /// Iterates over characters (unicode codepoints) normalizing `'\r'` and `"\r\n"` to `'\n'`. Also /// gives the width of each character, but `'\r\n'` is counted as 2 bytes and 2 characters instead /// of one even after being normalized to '\n'. #[derive(Clone)] pub struct NewlineNormalizedCharWidths<'t> { iter: Chars<'t>, text: &'t str, idx: usize, } impl<'t> NewlineNormalizedCharWidths<'t> { pub fn new(text: &'t str) -> Self { Self { text, iter: text.chars(), idx: 0, } } pub fn previous(&mut self) -> Option<::Item> { // This function is called infrequently. let mut back_iter = self.text[..self.idx].chars(); let result = match back_iter.next_back() { // Unlikely: \n, normalization *may* be needed Some('\n') => { // Peek at the previous character to see we're a `\r\n` sequence match back_iter.next_back() { Some('\r') => Some(CharWidth { byte_width: '\r'.len_utf8() + '\n'.len_utf8(), char_width: 2, character: '\n', }), _ => Some(CharWidth { byte_width: '\n'.len_utf8(), char_width: 1, character: '\n', }), } } // Unlikely: \r, normalization is needed Some('\r') => Some(CharWidth { byte_width: '\n'.len_utf8(), char_width: 1, character: '\n', }), // Common case: Not \r or \n, so no normalization is needed Some(ch) => Some(CharWidth { byte_width: ch.len_utf8(), char_width: 1, character: ch, }), // Unlikely: EOF None => None, }; if let Some(r) = &result { self.idx -= r.byte_width; self.iter = self.text[self.idx..].chars(); } result } pub fn peek_character(&self) -> Option { // This function is called very frequently. // // We're not using peekable or caching here, since this should be cheap enough on it's own, // though benchmarking might prove otherwise. match self.iter.clone().next() { Some('\r') => Some('\n'), ch => ch, } } } impl<'t> Iterator for NewlineNormalizedCharWidths<'t> { type Item = CharWidth; fn next(&mut self) -> Option { // This function is called very frequently. let result = match self.iter.next() { // Unlikely: \r, normalization is needed Some('\r') => { // Peek at the next character to see if it's '\n'. let mut speculative = self.iter.clone(); match speculative.next() { Some('\n') => { self.iter = speculative; Some(CharWidth { byte_width: '\r'.len_utf8() + '\n'.len_utf8(), char_width: 2, character: '\n', }) } _ => Some(CharWidth { byte_width: '\r'.len_utf8(), char_width: 1, character: '\n', }), } } // Common case: Not \r, so no normalization is needed Some(ch) => Some(CharWidth { byte_width: ch.len_utf8(), char_width: 1, character: ch, }), // Unlikely: EOF None => None, }; if let Some(r) = &result { self.idx += r.byte_width; } result } } #[cfg(test)] mod tests { use super::*; #[test] fn test_ascii_no_newlines() { let mut cw = NewlineNormalizedCharWidths::new("in"); // go forward assert_eq!(cw.peek_character(), Some('i')); assert_eq!( cw.next(), Some(CharWidth { byte_width: 1, char_width: 1, character: 'i' }) ); assert_eq!(cw.peek_character(), Some('n')); assert_eq!( cw.next(), Some(CharWidth { byte_width: 1, char_width: 1, character: 'n' }) ); // end of text assert_eq!(cw.peek_character(), None); assert_eq!(cw.next(), None); // go backwards assert_eq!( cw.previous(), Some(CharWidth { byte_width: 1, char_width: 1, character: 'n' }) ); assert_eq!( cw.previous(), Some(CharWidth { byte_width: 1, char_width: 1, character: 'i' }) ); // beginning of text assert_eq!(cw.previous(), None); // try going foward again assert_eq!(cw.peek_character(), Some('i')); assert_eq!( cw.next(), Some(CharWidth { byte_width: 1, char_width: 1, character: 'i' }) ); } #[test] fn test_unicode_no_newlines() { // "test" with an accented 'e' let mut cw = NewlineNormalizedCharWidths::new("t\u{00e9}st"); // go forward assert_eq!( cw.next(), Some(CharWidth { byte_width: 1, char_width: 1, character: 't' }) ); assert_eq!(cw.peek_character(), Some('\u{00e9}')); assert_eq!( cw.next(), Some(CharWidth { byte_width: 2, char_width: 1, character: '\u{00e9}' }) ); assert_eq!(cw.peek_character(), Some('s')); assert_eq!( cw.next(), Some(CharWidth { byte_width: 1, char_width: 1, character: 's' }) ); // go backwards assert_eq!( cw.previous(), Some(CharWidth { byte_width: 1, char_width: 1, character: 's' }) ); assert_eq!( cw.previous(), Some(CharWidth { byte_width: 2, char_width: 1, character: '\u{00e9}' }) ); assert_eq!( cw.previous(), Some(CharWidth { byte_width: 1, char_width: 1, character: 't' }) ); } #[test] fn test_newlines() { let mut cw = NewlineNormalizedCharWidths::new("\n\r\r\n"); // go forward assert_eq!(cw.peek_character(), Some('\n')); assert_eq!( cw.next(), Some(CharWidth { byte_width: 1, char_width: 1, character: '\n' }) ); assert_eq!(cw.peek_character(), Some('\n')); assert_eq!( cw.next(), Some(CharWidth { byte_width: 1, char_width: 1, character: '\n' }) ); assert_eq!(cw.peek_character(), Some('\n')); assert_eq!( cw.next(), Some(CharWidth { byte_width: 2, char_width: 2, character: '\n' }) ); // end of text assert_eq!(cw.peek_character(), None); assert_eq!(cw.next(), None); // go backwards assert_eq!( cw.previous(), Some(CharWidth { byte_width: 2, char_width: 2, character: '\n' }) ); assert_eq!( cw.previous(), Some(CharWidth { byte_width: 1, char_width: 1, character: '\n' }) ); assert_eq!( cw.previous(), Some(CharWidth { byte_width: 1, char_width: 1, character: '\n' }) ); // beginning of text assert_eq!(cw.previous(), None); } #[test] fn test_empty() { let mut cw = NewlineNormalizedCharWidths::new(""); assert_eq!(cw.peek_character(), None); assert_eq!(cw.next(), None); assert_eq!(cw.previous(), None); } } LibCST-1.2.0/native/libcst/src/tokenizer/text_position/mod.rs000066400000000000000000000272531456464173300242240ustar00rootroot00000000000000// Copyright (c) Meta Platforms, Inc. and affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the root directory of this source tree. mod char_width; use regex::Regex; use std::fmt; use crate::tokenizer::debug_utils::EllipsisDebug; use char_width::NewlineNormalizedCharWidths; pub trait TextPattern { fn match_len(&self, text: &str) -> Option; } impl TextPattern for &Regex { // make sure to anchor your regex with \A fn match_len(&self, text: &str) -> Option { self.find(text).map(|m| m.end()) } } impl TextPattern for &str { // make sure to anchor your regex with \A fn match_len(&self, text: &str) -> Option { if text.starts_with(self) { Some(self.len()) } else { None } } } // This is Clone, since that's needed to support async_hacks, but you probably don't usually want to // clone. Use TextPositionSnapshot instead. #[derive(Clone)] pub struct TextPosition<'t> { text: &'t str, char_widths: NewlineNormalizedCharWidths<'t>, inner_byte_idx: usize, inner_char_column_number: usize, inner_byte_column_number: usize, inner_line_number: usize, } /// A lightweight immutable version of TextPosition that's slightly /// cheaper to construct/store. Used for storing the start position of tokens. #[derive(Clone, PartialEq, Eq, Debug)] pub struct TextPositionSnapshot { pub inner_byte_idx: usize, pub inner_char_column_number: usize, pub inner_line_number: usize, } impl TextPositionSnapshot { pub fn byte_idx(&self) -> usize { self.inner_byte_idx } pub fn char_column_number(&self) -> usize { self.inner_char_column_number } pub fn line_number(&self) -> usize { self.inner_line_number } } impl<'t> TextPosition<'t> { pub fn new(text: &'t str) -> Self { Self { text, char_widths: NewlineNormalizedCharWidths::new(text), inner_byte_idx: 0, inner_char_column_number: 0, inner_byte_column_number: 0, inner_line_number: 1, } } /// Peeks at the next character. Similar to `std::iter::Peekable`, but doesn't modify our /// internal position counters like wrapping this in `Peekable` would. pub fn peek(&mut self) -> Option<::Item> { self.char_widths.peek_character() } /// Matches, but does not consume TextPattern. /// /// Caution: This does not normalize `'\r'` characters, like `peek()` and `next()` do. pub fn matches(&self, pattern: P) -> bool { let rest_of_text = &self.text[self.inner_byte_idx..]; let match_len = pattern.match_len(rest_of_text); match match_len { Some(match_len) => { assert!( !rest_of_text[..match_len].contains(|x| x == '\r' || x == '\n'), "matches pattern must not match a newline", ); true } None => false, } } /// Moves the iterator back one character. Panics if a newline is encountered or if we try to /// back up past the beginning of the text. pub fn backup_no_newline(&mut self) { if let Some(cw) = self.char_widths.previous() { // If we tried to back up across a newline, we'd have to recompute char_column_number, // which would be expensive, so it's unsupported. self.inner_char_column_number = self .inner_char_column_number .checked_sub(1) .expect("cannot back up past the beginning of a line."); self.inner_byte_column_number = self .inner_byte_column_number .checked_sub(cw.byte_width) .expect("cannot back up past the beginning of a line."); self.inner_byte_idx -= cw.byte_width; } else { panic!("Tried to backup past the beginning of the text.") } } /// Tries to consume the given TextPattern, moving the TextPosition forward. Returns false if no /// match was found. Does not support newlines. /// /// Panics if a newline is consumed as part of the pattern. pub fn consume(&mut self, pattern: P) -> bool { let rest_of_text = &self.text[self.inner_byte_idx..]; if let Some(len) = pattern.match_len(rest_of_text) { let new_byte_idx = self.inner_byte_idx + len; // Call next() a bunch of times to advance the character counters. There's no way to // shortcut this because we don't know how many characters are in a slice of bytes, // though we could use a faster algorithm that inspects multiple characters at once // (e.g. SIMD). while self.inner_byte_idx < new_byte_idx { // We can't support newline normalization in this API without copying the string, so // rather than exposing that (potentially dangerous) behavior, panic if it happens. assert!( self.next() != Some('\n'), "consume pattern must not match a newline", ); } // this shouldn't be possible for the provided implementations of TextPattern debug_assert!( self.inner_byte_idx == new_byte_idx, "pattern ended on a non-character boundary", ); true } else { false } } pub fn text(&self) -> &'t str { self.text } pub fn slice_from_start_pos(&self, start_pos: &TextPositionSnapshot) -> &'t str { &self.text[start_pos.byte_idx()..self.byte_idx()] } /// Returns the number of bytes we've traversed. This is useful for Rust code that needs to /// slice the input source code, since Rust slices operate on bytes and not unicode codepoints. pub fn byte_idx(&self) -> usize { self.inner_byte_idx } /// Returns the column number in terms of number of characters (unicode codepoints) past the /// beginning of the line. Zero-indexed. pub fn char_column_number(&self) -> usize { self.inner_char_column_number } pub fn byte_column_number(&self) -> usize { self.inner_byte_column_number } /// Returns the one-indexed line number. pub fn line_number(&self) -> usize { self.inner_line_number } } impl Iterator for TextPosition<'_> { type Item = char; /// Gets the next character. This has the side-effect of advancing the internal position /// counters. fn next(&mut self) -> Option { if let Some(cw) = self.char_widths.next() { self.inner_byte_idx += cw.byte_width; match cw.character { '\n' => { self.inner_line_number += 1; self.inner_char_column_number = 0; self.inner_byte_column_number = 0; } _ => { self.inner_char_column_number += cw.char_width; self.inner_byte_column_number += cw.byte_width; } } Some(cw.character) } else { None } } } impl fmt::Debug for TextPosition<'_> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("TextPosition") .field("text", &EllipsisDebug) .field("char_widths", &EllipsisDebug) .field("inner_byte_idx", &self.inner_byte_idx) .field("inner_char_column_number", &self.inner_char_column_number) .field("inner_byte_column_number", &self.inner_byte_column_number) .field("inner_line_number", &self.inner_line_number) .finish() } } impl From<&TextPosition<'_>> for TextPositionSnapshot { fn from(tp: &TextPosition) -> Self { Self { inner_byte_idx: tp.inner_byte_idx, inner_char_column_number: tp.inner_char_column_number, inner_line_number: tp.inner_line_number, } } } #[cfg(test)] mod tests { use super::*; #[test] fn test_empty() { let mut pos = TextPosition::new(""); assert_eq!(pos.byte_idx(), 0); assert_eq!(pos.char_column_number(), 0); assert_eq!(pos.line_number(), 1); assert_eq!(pos.peek(), None); assert!(!pos.consume(&Regex::new(r"\Awon't match").unwrap())); assert!(pos.consume(&Regex::new(r"\A").unwrap())); assert_eq!(pos.next(), None); // call next() again to verify that it's fused assert_eq!(pos.next(), None); } #[test] fn test_ascii() { let mut pos = TextPosition::new("abcdefg"); assert_eq!(pos.peek(), Some('a')); assert_eq!(pos.next(), Some('a')); assert_eq!(pos.byte_idx(), 1); assert_eq!(pos.char_column_number(), 1); assert_eq!(pos.line_number(), 1); // consume a few characters with a regex assert!(!pos.consume(&Regex::new(r"\Awon't match").unwrap())); assert!(pos.consume(&Regex::new(r"\Abcd").unwrap())); assert_eq!(pos.byte_idx(), 4); assert_eq!(pos.char_column_number(), 4); assert_eq!(pos.line_number(), 1); // consume the rest of the text assert_eq!(pos.next(), Some('e')); assert_eq!(pos.next(), Some('f')); assert_eq!(pos.next(), Some('g')); assert_eq!(pos.next(), None); assert_eq!(pos.byte_idx(), 7); assert_eq!(pos.char_column_number(), 7); assert_eq!(pos.line_number(), 1); } #[test] fn test_unicode() { let mut pos = TextPosition::new("\u{00e9}abc"); assert_eq!(pos.peek(), Some('\u{00e9}')); assert_eq!(pos.next(), Some('\u{00e9}')); } #[test] fn test_newline_lf() { let mut pos = TextPosition::new("ab\nde"); assert_eq!(pos.next(), Some('a')); assert_eq!(pos.next(), Some('b')); assert_eq!(pos.line_number(), 1); assert_eq!(pos.char_column_number(), 2); assert_eq!(pos.next(), Some('\n')); assert_eq!(pos.line_number(), 2); assert_eq!(pos.char_column_number(), 0); assert_eq!(pos.next(), Some('d')); assert_eq!(pos.next(), Some('e')); assert_eq!(pos.next(), None); assert_eq!(pos.line_number(), 2); assert_eq!(pos.char_column_number(), 2); assert_eq!(pos.byte_idx(), 5); } #[test] fn test_newline_cr() { let mut pos = TextPosition::new("ab\rde"); assert_eq!(pos.next(), Some('a')); assert_eq!(pos.next(), Some('b')); assert_eq!(pos.line_number(), 1); assert_eq!(pos.char_column_number(), 2); assert_eq!(pos.next(), Some('\n')); assert_eq!(pos.line_number(), 2); assert_eq!(pos.char_column_number(), 0); assert_eq!(pos.next(), Some('d')); assert_eq!(pos.next(), Some('e')); assert_eq!(pos.next(), None); assert_eq!(pos.line_number(), 2); assert_eq!(pos.char_column_number(), 2); assert_eq!(pos.byte_idx(), 5); } #[test] fn test_newline_cr_lf() { let mut pos = TextPosition::new("ab\r\nde"); assert_eq!(pos.next(), Some('a')); assert_eq!(pos.next(), Some('b')); assert_eq!(pos.line_number(), 1); assert_eq!(pos.char_column_number(), 2); assert_eq!(pos.next(), Some('\n')); assert_eq!(pos.line_number(), 2); assert_eq!(pos.char_column_number(), 0); assert_eq!(pos.next(), Some('d')); assert_eq!(pos.next(), Some('e')); assert_eq!(pos.next(), None); assert_eq!(pos.line_number(), 2); assert_eq!(pos.char_column_number(), 2); assert_eq!(pos.byte_idx(), 6); } } LibCST-1.2.0/native/libcst/src/tokenizer/whitespace_parser.rs000066400000000000000000000373651456464173300242520ustar00rootroot00000000000000// Copyright (c) Meta Platforms, Inc. and affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the root directory of this source tree use crate::nodes::{ Comment, EmptyLine, Fakeness, Newline, ParenthesizableWhitespace, ParenthesizedWhitespace, SimpleWhitespace, TrailingWhitespace, }; use memchr::{memchr2, memchr2_iter}; use thiserror::Error; use crate::Token; use super::TokType; #[allow(clippy::upper_case_acronyms, clippy::enum_variant_names)] #[derive(Error, Debug, PartialEq, Eq)] pub enum WhitespaceError { #[error("WTF")] WTF, #[error("Internal error while parsing whitespace: {0}")] InternalError(String), #[error("Failed to parse mandatory trailing whitespace")] TrailingWhitespaceError, } type Result = std::result::Result; #[derive(Debug, PartialEq, Eq, Clone)] pub struct State<'a> { pub line: usize, // one-indexed (to match parso's behavior) pub column: usize, // zero-indexed (to match parso's behavior) pub column_byte: usize, pub absolute_indent: &'a str, pub is_parenthesized: bool, pub byte_offset: usize, } impl<'a> Default for State<'a> { fn default() -> Self { Self { line: 1, column: 0, column_byte: 0, absolute_indent: "", is_parenthesized: false, byte_offset: 0, } } } // TODO pub struct Config<'a> { pub input: &'a str, pub lines: Vec<&'a str>, pub default_newline: &'a str, pub default_indent: &'a str, } impl<'a> Config<'a> { pub fn new(input: &'a str, tokens: &[Token<'a>]) -> Self { let mut default_indent = " "; for tok in tokens { if tok.r#type == TokType::Indent { default_indent = tok.relative_indent.unwrap(); break; } } let mut lines = Vec::new(); let mut start = 0; let mut newline_positions = memchr2_iter(b'\n', b'\r', input.as_bytes()); while let Some(newline_position) = newline_positions.next() { let newline_character = input.as_bytes()[newline_position] as char; let len = if newline_character == '\r' && input.as_bytes().get(newline_position + 1) == Some(&b'\n') { // Skip the next '\n' newline_positions.next(); 2 } else { 1 }; let end = newline_position + len; lines.push(&input[start..end]); start = end; } // Push the last line if it isn't terminated by a newline character if start < input.len() { lines.push(&input[start..]); } let default_newline = match lines.first().map(|line| line.as_bytes()).unwrap_or(&[]) { [.., b'\r', b'\n'] => "\r\n", [.., b'\n'] => "\n", [.., b'\r'] => "\r", _ => "\n", }; Self { input, lines, default_newline, default_indent, } } pub fn has_trailing_newline(&self) -> bool { self.input.ends_with('\n') && !self.input.ends_with("\\\n") && !self.input.ends_with("\\\r\n") } fn get_line(&self, line_number: usize) -> Result<&'a str> { let err_fn = || { WhitespaceError::InternalError(format!( "tried to get line {} which is out of range", line_number )) }; self.lines .get(line_number.checked_sub(1).ok_or_else(err_fn)?) .map(|l| &l[..]) .ok_or_else(err_fn) } fn get_line_after_column(&self, line_number: usize, column_index: usize) -> Result<&'a str> { self.get_line(line_number)? .get(column_index..) .ok_or_else(|| { WhitespaceError::InternalError(format!( "Column index {} out of range for line {}", column_index, line_number )) }) } } #[derive(Debug)] enum ParsedEmptyLine<'a> { NoIndent, Line(EmptyLine<'a>), } fn parse_empty_line<'a>( config: &Config<'a>, state: &mut State, override_absolute_indent: Option<&'a str>, ) -> Result> { let mut speculative_state = state.clone(); if let Ok(indent) = parse_indent(config, &mut speculative_state, override_absolute_indent) { let whitespace = parse_simple_whitespace(config, &mut speculative_state)?; let comment = parse_comment(config, &mut speculative_state)?; if let Some(newline) = parse_newline(config, &mut speculative_state)? { *state = speculative_state; return Ok(ParsedEmptyLine::Line(EmptyLine { indent, whitespace, comment, newline, })); } } Ok(ParsedEmptyLine::NoIndent) } fn _parse_empty_lines<'a>( config: &Config<'a>, state: &mut State<'a>, override_absolute_indent: Option<&'a str>, ) -> Result, EmptyLine<'a>)>> { let mut lines = vec![]; loop { let last_state = state.clone(); let parsed_line = parse_empty_line(config, state, override_absolute_indent)?; if *state == last_state { break; } match parsed_line { ParsedEmptyLine::NoIndent => break, ParsedEmptyLine::Line(l) => lines.push((state.clone(), l)), } } Ok(lines) } pub fn parse_empty_lines<'a>( config: &Config<'a>, state: &mut State<'a>, override_absolute_indent: Option<&'a str>, ) -> Result>> { // If override_absolute_indent is Some, then we need to parse all lines up to and including the // last line that is indented at our level. These all belong to the footer and not to the next // line's leading_lines. // // We don't know what the last line with indent=True is, and there could be indent=False lines // interspersed with indent=True lines, so we need to speculatively parse all possible empty // lines, and then unwind to find the last empty line with indent=True. let mut speculative_state = state.clone(); let mut lines = _parse_empty_lines(config, &mut speculative_state, override_absolute_indent)?; if override_absolute_indent.is_some() { // Remove elements from the end until we find an indented line. while let Some((_, empty_line)) = lines.last() { if empty_line.indent { break; } lines.pop(); } } if let Some((final_state, _)) = lines.last() { // update the state to match the last line that we captured *state = final_state.clone(); } Ok(lines.into_iter().map(|(_, e)| e).collect()) } pub fn parse_comment<'a>(config: &Config<'a>, state: &mut State) -> Result>> { let newline_after = config.get_line_after_column(state.line, state.column_byte)?; if newline_after.as_bytes().first() != Some(&b'#') { return Ok(None); } let comment_str = if let Some(idx) = memchr2(b'\n', b'\r', newline_after.as_bytes()) { &newline_after[..idx] } else { newline_after }; advance_this_line( config, state, comment_str.chars().count(), comment_str.len(), )?; Ok(Some(Comment(comment_str))) } pub fn parse_newline<'a>(config: &Config<'a>, state: &mut State) -> Result>> { let newline_after = config.get_line_after_column(state.line, state.column_byte)?; let len = match newline_after.as_bytes() { [b'\n', ..] => 1, [b'\r', b'\n', ..] => 2, [b'\r', ..] => 1, _ => 0, }; if len > 0 { let newline_str = &newline_after[..len]; advance_this_line(config, state, len, len)?; if state.column_byte != config.get_line(state.line)?.len() { return Err(WhitespaceError::InternalError(format!( "Found newline at ({}, {}) but it's not EOL", state.line, state.column ))); } if state.line < config.lines.len() { advance_to_next_line(config, state)?; } return Ok(Some(Newline( if newline_str == config.default_newline { None } else { Some(newline_str) }, Fakeness::Real, ))); } // If we're at the end of the file but not on BOL, that means this is the fake // newline inserted by the tokenizer. if state.byte_offset == config.input.len() && state.column_byte != 0 { return Ok(Some(Newline(None, Fakeness::Fake))); } Ok(None) } pub fn parse_optional_trailing_whitespace<'a>( config: &Config<'a>, state: &mut State, ) -> Result>> { let mut speculative_state = state.clone(); let whitespace = parse_simple_whitespace(config, &mut speculative_state)?; let comment = parse_comment(config, &mut speculative_state)?; if let Some(newline) = parse_newline(config, &mut speculative_state)? { *state = speculative_state; Ok(Some(TrailingWhitespace { whitespace, comment, newline, })) } else { Ok(None) } } pub fn parse_trailing_whitespace<'a>( config: &Config<'a>, state: &mut State, ) -> Result> { match parse_optional_trailing_whitespace(config, state)? { Some(ws) => Ok(ws), _ => Err(WhitespaceError::TrailingWhitespaceError), } } fn parse_indent<'a>( config: &Config<'a>, state: &mut State, override_absolute_indent: Option<&'a str>, ) -> Result { let absolute_indent = override_absolute_indent.unwrap_or(state.absolute_indent); if state.column_byte != 0 { if state.column_byte == config.get_line(state.line)?.len() && state.line == config.lines.len() { Ok(false) } else { Err(WhitespaceError::InternalError( "Column should not be 0 when parsing an index".to_string(), )) } } else { Ok( if config .get_line_after_column(state.line, state.column_byte)? .starts_with(absolute_indent) { state.column_byte += absolute_indent.len(); state.column += absolute_indent.chars().count(); state.byte_offset += absolute_indent.len(); true } else { false }, ) } } fn advance_to_next_line<'a>(config: &Config<'a>, state: &mut State) -> Result<()> { let cur_line = config.get_line(state.line)?; state.byte_offset += cur_line.len() - state.column_byte; state.column = 0; state.column_byte = 0; state.line += 1; Ok(()) } fn advance_this_line<'a>( config: &Config<'a>, state: &mut State, char_count: usize, offset: usize, ) -> Result<()> { let cur_line = config.get_line(state.line)?; if cur_line.len() < state.column_byte + offset { return Err(WhitespaceError::InternalError(format!( "Tried to advance past line {}'s end", state.line ))); } state.column += char_count; state.column_byte += offset; state.byte_offset += offset; Ok(()) } pub fn parse_simple_whitespace<'a>( config: &Config<'a>, state: &mut State, ) -> Result> { let capture_ws = |line, col| -> Result<&'a str> { let line = config.get_line_after_column(line, col)?; let bytes = line.as_bytes(); let mut idx = 0; while idx < bytes.len() { match bytes[idx..] { [b' ' | b'\t' | b'\x0c', ..] => idx += 1, [b'\\', b'\r', b'\n', ..] => idx += 3, [b'\\', b'\r' | b'\n', ..] => idx += 2, _ => break, } } Ok(&line[..idx]) }; let start_offset = state.byte_offset; let mut prev_line: &str; loop { prev_line = capture_ws(state.line, state.column_byte)?; if !prev_line.contains('\\') { break; } advance_to_next_line(config, state)?; } advance_this_line(config, state, prev_line.chars().count(), prev_line.len())?; Ok(SimpleWhitespace( &config.input[start_offset..state.byte_offset], )) } pub fn parse_parenthesizable_whitespace<'a>( config: &Config<'a>, state: &mut State<'a>, ) -> Result> { if state.is_parenthesized { if let Some(ws) = parse_parenthesized_whitespace(config, state)? { return Ok(ParenthesizableWhitespace::ParenthesizedWhitespace(ws)); } } parse_simple_whitespace(config, state).map(ParenthesizableWhitespace::SimpleWhitespace) } pub fn parse_parenthesized_whitespace<'a>( config: &Config<'a>, state: &mut State<'a>, ) -> Result>> { if let Some(first_line) = parse_optional_trailing_whitespace(config, state)? { let empty_lines = _parse_empty_lines(config, state, None)? .into_iter() .map(|(_, line)| line) .collect(); let indent = parse_indent(config, state, None)?; let last_line = parse_simple_whitespace(config, state)?; Ok(Some(ParenthesizedWhitespace { first_line, empty_lines, indent, last_line, })) } else { Ok(None) } } #[cfg(test)] mod tests { use crate::{tokenize, Comment, Config, Result, SimpleWhitespace}; use super::{parse_comment, parse_simple_whitespace}; #[test] fn config_mixed_newlines() -> Result<'static, ()> { let source = "'' % {\n'test1': '',\r 'test2': '',\r\n}"; let tokens = tokenize(source)?; let config = Config::new(source, &tokens); assert_eq!( &config.lines, &["'' % {\n", "'test1': '',\r", " 'test2': '',\r\n", "}"] ); Ok(()) } fn _parse_simple_whitespace(src: &str) -> Result { let tokens = tokenize(src)?; let config = Config::new(src, &tokens); let mut state = Default::default(); Ok(parse_simple_whitespace(&config, &mut state)?) } #[test] fn simple_whitespace_line_continuations() -> Result<'static, ()> { assert_eq!( _parse_simple_whitespace(" \\\n # foo")?, SimpleWhitespace(" \\\n ") ); assert_eq!( _parse_simple_whitespace(" \\\r # foo")?, SimpleWhitespace(" \\\r ") ); assert_eq!( _parse_simple_whitespace(" \\\r\n # foo")?, SimpleWhitespace(" \\\r\n ") ); assert_eq!( _parse_simple_whitespace(" \\\r\n\\\n # foo")?, SimpleWhitespace(" \\\r\n\\\n ") ); Ok(()) } #[test] fn simple_whitespace_mixed() -> Result<'static, ()> { assert_eq!( _parse_simple_whitespace(" \t\x0clol")?, SimpleWhitespace(" \t\x0c"), ); Ok(()) } fn _parse_comment(src: &str) -> Result> { let tokens = tokenize(src)?; let config = Config::new(src, &tokens); let mut state = Default::default(); Ok(parse_comment(&config, &mut state)?) } #[test] fn single_comment() -> Result<'static, ()> { assert_eq!(_parse_comment("# foo\n# bar")?, Some(Comment("# foo"))); Ok(()) } #[test] fn comment_until_eof() -> Result<'static, ()> { assert_eq!(_parse_comment("#")?, Some(Comment("#"))); Ok(()) } #[test] fn no_comment() -> Result<'static, ()> { assert_eq!(_parse_comment("foo")?, None); assert_eq!(_parse_comment("\n")?, None); Ok(()) } } LibCST-1.2.0/native/libcst/tests/000077500000000000000000000000001456464173300165175ustar00rootroot00000000000000LibCST-1.2.0/native/libcst/tests/.gitattributes000066400000000000000000000000511456464173300214060ustar00rootroot00000000000000fixtures/mixed_newlines.py autocrlf=falseLibCST-1.2.0/native/libcst/tests/fixtures/000077500000000000000000000000001456464173300203705ustar00rootroot00000000000000LibCST-1.2.0/native/libcst/tests/fixtures/big_binary_operator.py000066400000000000000000000020431456464173300247610ustar00rootroot00000000000000( # 350 binary operators lets go 'X' + 'Y' + 'Z' + 'Q' + 'T' + 'X' + 'Y' + 'Z' + 'Q' + 'T' + 'X' + 'Y' + 'Z' + 'Q' + 'T' + 'X' + 'Y' + 'Z' + 'Q' + 'T' + 'X' + 'Y' + 'Z' + 'Q' + 'T' + 'X' + 'Y' + 'Z' + 'Q' + 'T' + 'X' + 'Y' + 'Z' + 'Q' + 'T' + 'X' + 'Y' + 'Z' + 'Q' + 'T' + 'X' + 'Y' + 'Z' + 'Q' + 'T' + 'X' + 'Y' + 'Z' + 'Q' + 'T' + 'X' + 'Y' + 'Z' + 'Q' + 'T' + 'X' + 'Y' + 'Z' + 'Q' + 'T' + 'X' + 'Y' + 'Z' + 'Q' + 'T' + 'X' + 'Y' + 'Z' + 'Q' + 'T' + 'X' + 'Y' + 'Z' + 'Q' + 'T' + 'X' + 'Y' + 'Z' + 'Q' + 'T' + 'X' + 'Y' + 'Z' + 'Q' + 'T' + 'X' + 'Y' + 'Z' + 'Q' + 'T' + 'X' + 'Y' + 'Z' + 'Q' + 'T' + 'X' + 'Y' + 'Z' + 'Q' + 'T' + 'X' + 'Y' + 'Z' + 'Q' + 'T' + 'X' + 'Y' + 'Z' + 'Q' + 'T' + 'X' + 'Y' + 'Z' + 'Q' + 'T' + 'X' + 'Y' + 'Z' + 'Q' + 'T' + 'X' + 'Y' + 'Z' + 'Q' + 'T' + 'X' + 'Y' + 'Z' + 'Q' + 'T' + 'X' + 'Y' + 'Z' + 'Q' + 'T' + 'X' + 'Y' + 'Z' + 'Q' + 'T' + 'X' + 'Y' + 'Z' + 'Q' + 'T' + 'X' + 'Y' + 'Z' + 'Q' + 'T' ) LibCST-1.2.0/native/libcst/tests/fixtures/class_craziness.py000066400000000000000000000005701456464173300241320ustar00rootroot00000000000000class Foo: ... class Bar : ... class Old ( ) : gold : int class OO ( Foo ) : ... class OOP ( Foo , Bar, ) : pass class OOPS ( Foo , ) : pass class OOPSI ( Foo, * Bar , metaclass = foo , ): pass class OOPSIE ( list , *args, kw = arg , ** kwargs ) : what : does_this_even = mean def __init__(self) -> None: self.foo: Bar = Bar() LibCST-1.2.0/native/libcst/tests/fixtures/comments.py000066400000000000000000000037071456464173300225760ustar00rootroot00000000000000#!/usr/bin/env python3 # fmt: on # Some license here. # # Has many lines. Many, many lines. # Many, many, many lines. """Module docstring. Possibly also many, many lines. """ import os.path import sys import a from b.c.d.e import X # some noqa comment try: import fast except ImportError: import slow as fast # Some comment before a function. y = 1 ( # some strings y # type: ignore ) def function(default=None): """Docstring comes first. Possibly many lines. """ # FIXME: Some comment about why this function is crap but still in production. import inner_imports if inner_imports.are_evil(): # Explains why we have this if. # In great detail indeed. x = X() return x.method1() # type: ignore # This return is also commented for some reason. return default # Explains why we use global state. GLOBAL_STATE = {"a": a(1), "b": a(2), "c": a(3)} # Another comment! # This time two lines. class Foo: """Docstring for class Foo. Example from Sphinx docs.""" #: Doc comment for class attribute Foo.bar. #: It can have multiple lines. bar = 1 flox = 1.5 #: Doc comment for Foo.flox. One line only. baz = 2 """Docstring for class attribute Foo.baz.""" def __init__(self): #: Doc comment for instance attribute qux. self.qux = 3 self.spam = 4 """Docstring for instance attribute spam.""" #'

This is pweave!

@fast(really=True) async def wat(): # This comment, for some reason \ # contains a trailing backslash. async with X.open_async() as x: # Some more comments result = await x.method1() # Comment after ending a block. if result: print("A OK", file=sys.stdout) # Comment between things. print() if True: # Hanging comments # because why not pass # Some closing comments. # Maybe Vim or Emacs directives for formatting. # Who knows. LibCST-1.2.0/native/libcst/tests/fixtures/comparisons.py000066400000000000000000000007271456464173300233050ustar00rootroot00000000000000if not 1: pass if 1 and 1: pass if 1 or 1: pass if not not not 1: pass if not 1 and 1 and 1: pass if 1 and 1 or 1 and 1 and 1 or not 1 and 1: pass if 1: pass #x = (1 == 1) if 1 == 1: pass if 1 != 1: pass if 1 < 1: pass if 1 > 1: pass if 1 <= 1: pass if 1 >= 1: pass if x is x: pass #if x is not x: pass #if 1 in (): pass #if 1 not in (): pass if 1 < 1 > 1 == 1 >= 1 <= 1 != 1 in 1 in x is x is x: pass #if 1 < 1 > 1 == 1 >= 1 <= 1 != 1 in 1 not in x is x is not x: pass LibCST-1.2.0/native/libcst/tests/fixtures/dangling_indent.py000066400000000000000000000000231456464173300240610ustar00rootroot00000000000000if 1: pass LibCST-1.2.0/native/libcst/tests/fixtures/decorated_function_without_body.py000066400000000000000000000000441456464173300273770ustar00rootroot00000000000000@hello @bello def f () : ...LibCST-1.2.0/native/libcst/tests/fixtures/dysfunctional_del.py000066400000000000000000000001741456464173300244520ustar00rootroot00000000000000# dysfunctional_del.py del a del a[1] del a.b.c del ( a, b , c ) del [ a, b , c ] del a , b, c del a[1] , b [ 2]LibCST-1.2.0/native/libcst/tests/fixtures/expr.py000066400000000000000000000234151456464173300217250ustar00rootroot00000000000000... "some_string" b"\\xa3" Name None True False 1 1.0 1j True or False True or False or None True and False True and False and None (Name1 and Name2) or Name3 Name1 and Name2 or Name3 Name1 or (Name2 and Name3) Name1 or Name2 and Name3 (Name1 and Name2) or (Name3 and Name4) Name1 and Name2 or Name3 and Name4 Name1 or (Name2 and Name3) or Name4 Name1 or Name2 and Name3 or Name4 v1 << 2 1 >> v2 1 % finished 1 + v2 - v3 * 4 ^ 5 ** v6 / 7 // 8 ((1 + v2) - (v3 * 4)) ^ (((5 ** v6) / 7) // 8) not great ~great +value -1 ~int and not v1 ^ 123 + v2 | True (~int) and (not ((v1 ^ (123 + v2)) | True)) +(really ** -(confusing ** ~(operator ** -precedence))) flags & ~ select.EPOLLIN and waiters.write_task is not None lambda arg: None lambda arg : None lambda a=True: a lambda a=True : a lambda a, b, c=True: a lambda a, b, c=True, *, d=(1 << v2), e='str': a lambda a, b, c=True, *vararg, d=(v1 << 2), e='str', **kwargs: a + b lambda a, b, c=True, *vararg, d=(v1 << 2), e='str', **kwargs : a + b manylambdas = lambda x=lambda y=lambda z=1: z: y(): x() foo = (lambda port_id, ignore_missing: {"port1": port1_resource, "port2": port2_resource}[port_id]) 1 if True else 2 _ if 0else _ str or None if True else str or bytes or None (str or None) if True else (str or bytes or None) str or None if (1 if True else 2) else str or bytes or None (str or None) if (1 if True else 2) else (str or bytes or None) ((super_long_variable_name or None) if (1 if super_long_test_name else 2) else (str or bytes or None)) {'2.7': dead, '3.7': (long_live or die_hard)} {'2.7': dead, '3.7': (long_live or die_hard), **{'3.6': verygood}} {**a, **b, **c} {"2.7", "3.6", "3.7", "3.8", "3.9"} {"2.7", "3.6", "3.7", "3.8", "3.9",} {"2.7", "3.6", "3.7", "3.8", "3.9", ("4.0" if gilectomy else "3.10")} ({"a": "b"}, (True or False), (+value), "string", b"bytes") or None () (1,) (1, 2) (1, 2, 3) [] [ ] [ 1 , ] [1, 2, 3, 4, 5, 6, 7, 8, 9, (10 or A), (11 or B), (12 or C)] [ 1, 2, 3, ] [*a] [*range(10)] [ *a, 4, 5, ] [ 4, *a, 5, ] [ this_is_a_very_long_variable_which_will_force_a_delimiter_split, element, another, *more, ] { } { 1 , } { 1 : 2 , } {i for i in (1, 2, 3)} {(i ** 2) for i in (1, 2, 3)} {(i ** 2) for i, _ in ((1, "a"), (2, "b"), (3, "c"))} {((i ** 2) + j) for i in (1, 2, 3) for j in (1, 2, 3)} [i for i in (1, 2, 3)] [(i ** 2) for i in (1, 2, 3)] [(i ** 2) for i, _ in ((1, "a"), (2, "b"), (3, "c"))] [((i ** 2) + j) for i in (1, 2, 3) for j in (1, 2, 3)] {i: 0 for i in (1, 2, 3)} {i: j for i, j in ((1, "a"), (2, "b"), (3, "c"))} {a: b * 2 for a, b in dictionary.items()} {a: b * -2 for a, b in dictionary.items()} { k: v for k, v in this_is_a_very_long_variable_which_will_cause_a_trailing_comma_which_breaks_the_comprehension } Python3 > Python2 > COBOL Life is Life call() call(arg) call(kwarg="hey") call(arg, kwarg="hey") call(arg, another, kwarg="hey", **kwargs) call( this_is_a_very_long_variable_which_will_force_a_delimiter_split, arg, another, kwarg="hey", **kwargs, ) # note: no trailing comma pre-3.6 call(*gidgets[:2]) call(a, *gidgets[:2]) call(**screen_kwargs) call(b, **screen_kwargs) call()()()()()() call(**self.screen_kwargs) call(b, **self.screen_kwargs) call(a=a, *args) call(a=a, *args,) call(a=a, **kwargs) call(a=a, **kwargs,) lukasz.langa.pl call.me(maybe) 1 .real 1.0 .real ....__class__ list[str] dict[str, int] tuple[str, ...] tuple[str, int, float, dict[str, int]] tuple[ str, int, float, dict[str, int], ] very_long_variable_name_filters: t.List[ t.Tuple[str, t.Union[str, t.List[t.Optional[str]]]], ] xxxx_xxxxx_xxxx_xxx: Callable[..., List[SomeClass]] = classmethod( # type: ignore sync(async_xxxx_xxx_xxxx_xxxxx_xxxx_xxx.__func__) ) xxxx_xxx_xxxx_xxxxx_xxxx_xxx: Callable[..., List[SomeClass]] = classmethod( # type: ignore sync(async_xxxx_xxx_xxxx_xxxxx_xxxx_xxx.__func__) ) xxxx_xxx_xxxx_xxxxx_xxxx_xxx: Callable[..., List[SomeClass]] = classmethod( sync(async_xxxx_xxx_xxxx_xxxxx_xxxx_xxx.__func__) ) # type: ignore (str or None) if (sys.version_info[0] > (3,)) else (str or bytes or None) {"2.7": dead, "3.7": long_live or die_hard} {"2.7", "3.6", "3.7", "3.8", "3.9", "4.0" if gilectomy else "3.10"} [1, 2, 3, 4, 5, 6, 7, 8, 9, 10 or A, 11 or B, 12 or C] (SomeName) SomeName (Good, Bad, Ugly) (i for i in (1, 2, 3)) ((i ** 2) for i in (1, 2, 3)) ((i ** 2) for i, _ in ((1, "a"), (2, "b"), (3, "c"))) (((i ** 2) + j) for i in (1, 2, 3) for j in (1, 2, 3)) (*starred,) { "id": "1", "type": "type", "started_at": now(), "ended_at": now() + timedelta(days=10), "priority": 1, "import_session_id": 1, **kwargs, } a = (1,) b = (1,) c = 1 d = (1,) + a + (2,) e = (1,).count(1) f = 1, *range(10) g = 1, *"ten" what_is_up_with_those_new_coord_names = (coord_names + set(vars_to_create)) + set( vars_to_remove ) what_is_up_with_those_new_coord_names = (coord_names | set(vars_to_create)) - set( vars_to_remove ) result = ( session.query(models.Customer.id) .filter( models.Customer.account_id == account_id, models.Customer.email == email_address ) .order_by(models.Customer.id.asc()) .all() ) result = ( session.query(models.Customer.id) .filter( models.Customer.account_id == account_id, models.Customer.email == email_address ) .order_by( models.Customer.id.asc(), ) .all() ) Ø = set() authors.łukasz.say_thanks() authors.lukasz.say_thanks() mapping = { A: 0.25 * (10.0 / 12), B: 0.1 * (10.0 / 12), C: 0.1 * (10.0 / 12), D: 0.1 * (10.0 / 12), } [ a for [ a , ] in [ [ 1 ] ] ] def gen(): if 1: if 2: if 3: if not is_value_of_type( subkey, type_args[0], # key type is always invariant invariant_check=True, ): return False yield from outside_of_generator a = yield b = yield c = yield async def f(): await some.complicated[0].call(with_args=(True or (1 is not 1))) lambda : None print(*[] or [1]) print(**{1: 3} if False else {x: x for x in range(3)}) print(*lambda x: x) assert not Test, "Short message" assert this is ComplexTest and not requirements.fit_in_a_single_line( force=False ), "Short message" assert parens is TooMany for (x,) in (1,), (2,), (3,): ... for y in (): ... for z in (i for i in (1, 2, 3)): ... for i in call(): ... for j in 1 + (2 + 3): ... else: ... while this and that: ... while this and that: ... else: ... for ( addr_family, addr_type, addr_proto, addr_canonname, addr_sockaddr, ) in socket.getaddrinfo("google.com", "http"): pass a = ( aaaa.bbbb.cccc.dddd.eeee.ffff.gggg.hhhh.iiii.jjjj.kkkk.llll.mmmm.nnnn.oooo.pppp in qqqq.rrrr.ssss.tttt.uuuu.vvvv.xxxx.yyyy.zzzz ) a = ( aaaa.bbbb.cccc.dddd.eeee.ffff.gggg.hhhh.iiii.jjjj.kkkk.llll.mmmm.nnnn.oooo.pppp not in qqqq.rrrr.ssss.tttt.uuuu.vvvv.xxxx.yyyy.zzzz ) a = ( aaaa.bbbb.cccc.dddd.eeee.ffff.gggg.hhhh.iiii.jjjj.kkkk.llll.mmmm.nnnn.oooo.pppp is qqqq.rrrr.ssss.tttt.uuuu.vvvv.xxxx.yyyy.zzzz ) a = ( aaaa.bbbb.cccc.dddd.eeee.ffff.gggg.hhhh.iiii.jjjj.kkkk.llll.mmmm.nnnn.oooo.pppp is not qqqq.rrrr.ssss.tttt.uuuu.vvvv.xxxx.yyyy.zzzz ) if ( threading.current_thread() != threading.main_thread() and threading.current_thread() != threading.main_thread() or signal.getsignal(signal.SIGINT) != signal.default_int_handler ): return True if ( aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa | aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa ): return True if ( aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa & aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa ): return True if ( aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa + aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa ): return True if ( aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa - aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa ): return True if ( aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa * aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa ): return True if ( aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa / aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa ): return True if ( ~aaaa.a + aaaa.b - aaaa.c * aaaa.d / aaaa.e | aaaa.f & aaaa.g % aaaa.h ^ aaaa.i << aaaa.k >> aaaa.l ** aaaa.m // aaaa.n ): return True if ( ~aaaaaaaa.a + aaaaaaaa.b - aaaaaaaa.c @ aaaaaaaa.d / aaaaaaaa.e | aaaaaaaa.f & aaaaaaaa.g % aaaaaaaa.h ^ aaaaaaaa.i << aaaaaaaa.k >> aaaaaaaa.l ** aaaaaaaa.m // aaaaaaaa.n ): return True if ( ~aaaaaaaaaaaaaaaa.a + aaaaaaaaaaaaaaaa.b - aaaaaaaaaaaaaaaa.c * aaaaaaaaaaaaaaaa.d @ aaaaaaaaaaaaaaaa.e | aaaaaaaaaaaaaaaa.f & aaaaaaaaaaaaaaaa.g % aaaaaaaaaaaaaaaa.h ^ aaaaaaaaaaaaaaaa.i << aaaaaaaaaaaaaaaa.k >> aaaaaaaaaaaaaaaa.l ** aaaaaaaaaaaaaaaa.m // aaaaaaaaaaaaaaaa.n ): return True aaaaaaaaaaaaaaaa + aaaaaaaaaaaaaaaa - aaaaaaaaaaaaaaaa * ( aaaaaaaaaaaaaaaa + aaaaaaaaaaaaaaaa ) / (aaaaaaaaaaaaaaaa + aaaaaaaaaaaaaaaa + aaaaaaaaaaaaaaaa) aaaaaaaaaaaaaaaa + aaaaaaaaaaaaaaaa aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa >> aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa << aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa bbbb >> bbbb * bbbb aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa ^ bbbb.a & aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa ^ aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa a += B a[x] @= foo().bar this.is_not >>= a.monad last_call() # standalone comment at ENDMARKER LibCST-1.2.0/native/libcst/tests/fixtures/expr_statement.py000066400000000000000000000002211456464173300237770ustar00rootroot000000000000001 1, 2, 3 x = 1 x = 1, 2, 3 x = y = z = 1, 2, 3 x, y, z = 1, 2, 3 abc = a, b, c = x, y, z = xyz = 1, 2, (3, 4) ( ( ( ... ) ) ) a , = bLibCST-1.2.0/native/libcst/tests/fixtures/fun_with_func_defs.py000066400000000000000000000037201456464173300246030ustar00rootroot00000000000000def f(a, /,): pass def f(a, / ,): pass def f(a, / ): pass def f(a, /, c, d, e): pass def f(a, /, c, *, d, e): pass def f(a, /, c, *, d, e, **kwargs): pass def f(a=1, /,): pass def f(a=1, /, b=2, c=4): pass def f(a=1, /, b=2, *, c=4): pass def f(a=1, /, b=2, *, c): pass def f(a=1, /, b=2, *, c=4, **kwargs): pass def f(a=1, /, b=2, *, c, **kwargs,): pass def g( a, /, ): pass def f(a, /, c, d, e): pass def f(a, /, c, *, d, e): pass def foo(a, * , bar): pass def f( a, /, c, *, d, e, **kwargs, ): pass def f( a=1, /, ): pass def say_hello( self, user: str, / ): print('Hello ' + user) def f(a=1, /, b=2, c=4): pass def f(a=1, /, b=2, *, c=4): pass def f(a=1, /, b=2, *, c): pass def f( a=1, /, b=2, *, c=4, **kwargs, ): pass def f( a=1, /, b=2, *, c, **kwargs, ): pass async def foo ( bar : Baz , ) -> zooooooooom : ... async def foo(bar : Baz = 0 ) : ... async def foo() -> Bar: ... async def outer( foo ) -> Bar : def inner(lol: Lol) -> None: async def core (): await lol def second(inner): pass def stars ( yes : bool = True , / , noes : List[bool] = [ * falses ], * all : The[Rest], but : Wait[Theres[More]] , ** it : ends[now] , ) -> ret: pass def stars ( yes : bool = True , / , noes : List[bool] = [ * falses ], * all : The[Rest], but : Wait[Theres[More]] , ** it : ends[now[without_a_comma]] ) -> ret : pass def foo(bar: (yield)) -> (yield): something: (yield another) def foo( bar: (yield)) -> (yield) : something: (yield another) return 3 # no return # yes def f(): for (yield 1)[1] in [1]: pass @decorators # foo @woohoo def f(): pass @getattr(None, '', lambda a: lambda b: a(b+1)) def f(): ... @a(now_this = lol) def f(): ... LibCST-1.2.0/native/libcst/tests/fixtures/global_nonlocal.py000066400000000000000000000000651456464173300240700ustar00rootroot00000000000000global a global b , c, d nonlocal a nonlocal a , bLibCST-1.2.0/native/libcst/tests/fixtures/import.py000066400000000000000000000006401456464173300222540ustar00rootroot00000000000000# 'import' dotted_as_names import sys import time, sys # 'from' dotted_name 'import' ('*' | '(' import_as_names ')' | import_as_names) from time import time from time import (time) from sys import path, argv from sys import (path, argv) from sys import (path, argv,) from sys import * from a import (b, ) from . import a from .a import b from ... import a from ...a import b from .... import a from ...... import aLibCST-1.2.0/native/libcst/tests/fixtures/indents_but_no_eol_before_eof.py000066400000000000000000000000561456464173300267670ustar00rootroot00000000000000if 1: if 2: if 3: passLibCST-1.2.0/native/libcst/tests/fixtures/just_a_comment_without_nl.py000066400000000000000000000000421456464173300262210ustar00rootroot00000000000000# just a comment without a newlineLibCST-1.2.0/native/libcst/tests/fixtures/malicious_match.py000066400000000000000000000016241456464173300241060ustar00rootroot00000000000000 # foo match ( foo ) : #comment # more comments case False : # comment ... case ( True ) : ... case _ : ... case ( _ ) : ... # foo # bar match x: case "StringMatchValue" : pass case [1, 2] : pass case [ 1 , * foo , * _ , ]: pass case [ [ _, ] , *_ ]: pass case {1: _, 2: _}: pass case { "foo" : bar , ** rest } : pass case { 1 : {**rest} , } : pass case Point2D(): pass case Cls ( 0 , ) : pass case Cls ( x=0, y = 2) :pass case Cls ( 0 , 1 , x = 0 , y = 2 ) : pass case [x] as y: pass case [x] as y : pass case (True)as x:pass case Foo:pass case (Foo):pass case ( Foo ) : pass case [ ( Foo ) , ]: pass case Foo|Bar|Baz : pass case Foo | Bar | ( Baz): pass case x,y , * more :pass case y.z: pass case 1, 2: pass LibCST-1.2.0/native/libcst/tests/fixtures/mixed_newlines.py000066400000000000000000000000471456464173300237550ustar00rootroot00000000000000"" % { 'test1': '', 'test2': '', } LibCST-1.2.0/native/libcst/tests/fixtures/pep646.py000066400000000000000000000012451456464173300217700ustar00rootroot00000000000000# see https://github.com/python/cpython/pull/31018/files#diff-3f516b60719dd445d33225e4f316b36e85c9c51a843a0147349d11a005c55937 A[*b] A[ * b ] A[ * b , ] A[*b] = 1 del A[*b] A[* b , * b] A[ b, *b] A[* b, b] A[ * b,b, b] A[b, *b, b] A[*A[b, *b, b], b] A[b, ...] A[*A[b, ...]] A[ * ( 1,2,3)] A[ * [ 1,2,3]] A[1:2, *t] A[1:, *t, 1:2] A[:, *t, :] A[*t, :, *t] A[* returns_list()] A[*returns_list(), * returns_list(), b] def f1(*args: *b): pass def f2(*args: *b, arg1): pass def f3(*args: *b, arg1: int): pass def f4(*args: *b, arg1: int = 1): pass def f(*args: *tuple[int, ...]): pass def f(*args: *tuple[int, *Ts]): pass def f() -> tuple[int, *tuple[int, ...]]: passLibCST-1.2.0/native/libcst/tests/fixtures/raise.py000066400000000000000000000001021456464173300220360ustar00rootroot00000000000000raise raise foo raise foo from bar raise lol() from f() + 1LibCST-1.2.0/native/libcst/tests/fixtures/smol_statements.py000066400000000000000000000001561456464173300241650ustar00rootroot00000000000000def f(): pass ; break ; continue ; return ; return foo assert foo , bar ; a += 2 LibCST-1.2.0/native/libcst/tests/fixtures/spacious_spaces.py000066400000000000000000000000061456464173300241220ustar00rootroot00000000000000 LibCST-1.2.0/native/libcst/tests/fixtures/starry_tries.py000066400000000000000000000006021456464173300234720ustar00rootroot00000000000000#foo. try : pass # foo except * lol as LOL : pass except * f: # foo pass else : pass finally : foo try: pass except*f: pass finally: pass try: # 1 try: # 2 pass # 3 # 4 finally: # 5 pass # 6 # 7 except *foo: #8 pass #9 LibCST-1.2.0/native/libcst/tests/fixtures/suicidal_slices.py000066400000000000000000000006501456464173300241020ustar00rootroot00000000000000slice[0] slice[0:1] slice[0:1:2] slice[:] slice[:-1] slice[1:] slice[::-1] slice[d :: d + 1] slice[:c, c - 1] numpy[:, 0:1] numpy[:, :-1] numpy[0, :] numpy[:, i] numpy[0, :2] numpy[:N, 0] numpy[:2, :4] numpy[2:4, 1:5] numpy[4:, 2:] numpy[:, (0, 1, 2, 5)] numpy[0, [0]] numpy[:, [i]] numpy[1 : c + 1, c] numpy[-(c + 1) :, d] numpy[:, l[-2]] numpy[:, ::-1] numpy[np.newaxis, :] ( spaces [:: , a : , a : a : a , ] )LibCST-1.2.0/native/libcst/tests/fixtures/super_strings.py000066400000000000000000000015701456464173300236540ustar00rootroot00000000000000_ = "" _ = '' _ = """""" _ = '''''' _ = 'a' "string" 'that' r"is" 'concatenated ' b"string " b"and non f" rb'string' ( "parenthesized" "concatenated" """triple quoted """ ) _ = f"string" f"string" "bonanza" f'starts' r"""here""" _ = f"something {{**not** an expression}} {but(this._is)} {{and this isn't.}} end" _(f"ok { expr = !r: aosidjhoi } end") print(f"{self.ERASE_CURRENT_LINE}{self._human_seconds(elapsed_time)} {percent:.{self.pretty_precision}f}% complete, {self.estimate_completion(elapsed_time, finished, left)} estimated for {left} files to go...") f"{"\n".join()}" f"___{ x }___" f"___{( x )}___" f'\{{\}}' f"regexp_like(path, '.*\{file_type}$')" f"\lfoo" f"{_:{_:}{a}}" f"foo {f"bar {x}"} baz" f'some words {a+b:.3f} more words {c+d=} final words' f"{'':*^{1:{1}}}" f"{'':*^{1:{1:{1}}}}" f"{f"{f"{f"{f"{f"{1+1}"}"}"}"}"}" LibCST-1.2.0/native/libcst/tests/fixtures/terrible_tries.py000066400000000000000000000006411456464173300237610ustar00rootroot00000000000000#foo. try : bar() finally : pass try : pass # foo except lol as LOL : pass except : # foo pass else : pass finally : foo try: pass except: pass finally: pass try: # 1 try: # 2 pass # 3 # 4 finally: # 5 pass # 6 # 7 except foo: #8 pass #9 LibCST-1.2.0/native/libcst/tests/fixtures/trailing_comment_without_nl.py000066400000000000000000000000171456464173300265470ustar00rootroot00000000000000 # hehehe >:)LibCST-1.2.0/native/libcst/tests/fixtures/trailing_whitespace.py000066400000000000000000000000341456464173300247640ustar00rootroot00000000000000 x = 42 print(x) LibCST-1.2.0/native/libcst/tests/fixtures/tuple_shenanigans.py000066400000000000000000000006071456464173300244540ustar00rootroot00000000000000(1, 2) (1, 2, 3) # alright here we go. () (()) (((())), ()) ( # evil >:) # evil >:( ) # ... (1,) ( * 1 , * 2 ,) *_ = (l,) () = x ( ) = ( x, ) (x) = (x) ( x , ) = x ( x , *y , * z , ) = l ( x , *y , * z , ) = ( x , *y , * z , ) = ( x , *y , * z , x ) ( x , # :) bar, * baz , ) =\ ( (let, *s, ( ) ) , nest , them , ( * t , * u , * p , l , * e , s , ) )LibCST-1.2.0/native/libcst/tests/fixtures/type_parameters.py000066400000000000000000000024731456464173300241540ustar00rootroot00000000000000# fmt: off type TA = int type TA1[A] = lambda A: A class Outer[A]: type TA1[A] = None type TA1[A, B] = dict[A, B] class Outer[A]: def inner[B](self): type TA1[C] = TA1[A, B] | int return TA1 def more_generic[T, *Ts, **P](): type TA[T2, *Ts2, **P2] = tuple[Callable[P, tuple[T, *Ts]], Callable[P2, tuple[T2, *Ts2]]] return TA type Recursive = Recursive def func[A](A): return A class ClassA: def func[__A](self, __A): return __A class ClassA[A, B](dict[A, B]): ... class ClassA[A]: def funcB[B](self): class ClassC[C]: def funcD[D](self): return lambda: (A, B, C, D) return ClassC class Child[T](Base[lambda: (int, outer_var, T)]): ... type Alias[T: ([T for T in (T, [1])[1]], T)] = [T for T in T.__name__] type Alias[T: [lambda: T for T in (T, [1])[1]]] = [lambda: T for T in T.__name__] class Foo[T: Foo, U: (Foo, Foo)]: pass def func[T](a: T = "a", *, b: T = "b"): return (a, b) def func1[A: str, B: str | int, C: (int, str)](): return (A, B, C) type A [ T , * V ] =foo;type B=A def AAAAAAAAAAAAAAAAAA [ T : int ,*Ts , ** TT ] ():pass class AAAAAAAAAAAAAAAAAA [ T : int ,*Ts , ** TT ] :pass def yikes[A:int,*B,**C](*d:*tuple[A,*B,...])->A:passLibCST-1.2.0/native/libcst/tests/fixtures/vast_emptiness.py000066400000000000000000000000001456464173300237740ustar00rootroot00000000000000LibCST-1.2.0/native/libcst/tests/fixtures/with_wickedness.py000066400000000000000000000012601456464173300241330ustar00rootroot00000000000000# with_wickedness with foo : pass with foo, bar: pass with (foo, bar): pass with (foo, bar,): pass with foo, bar as bar: pass with (foo, bar as bar): pass with (foo, bar as bar,): pass async def f(): async with foo: with bar: pass async with foo : pass async with foo, bar: pass async with (foo, bar): pass async with (foo, bar,): pass async with foo, bar as bar: pass async with (foo, bar as bar): pass async with (foo, bar as bar,): pass async with foo(1+1) as bar , 1 as (a, b, ) , 2 as [a, b] , 3 as a[b] : pass LibCST-1.2.0/native/libcst/tests/fixtures/wonky_walrus.py000066400000000000000000000003121456464173300235020ustar00rootroot00000000000000( foo := 5 ) any((lastNum := num) == 1 for num in [1, 2, 3]) [(lastNum := num) == 1 for num in [1, 2, 3]] while f := x(): pass if f := x(): pass f(y:=1) f(x, y := 1 ) _[_:=10]LibCST-1.2.0/native/libcst/tests/parser_roundtrip.rs000066400000000000000000000031511456464173300224670ustar00rootroot00000000000000// Copyright (c) Meta Platforms, Inc. and affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the root directory of this source tree use difference::assert_diff; use itertools::Itertools; use libcst_native::{parse_module, prettify_error, Codegen}; use std::{ iter::once, path::{Component, PathBuf}, }; fn all_fixtures() -> impl Iterator { let mut path = PathBuf::from(file!()); path.pop(); path = path .components() .skip(1) .chain(once(Component::Normal("fixtures".as_ref()))) .collect(); path.read_dir().expect("read_dir").into_iter().map(|file| { let path = file.unwrap().path(); let contents = std::fs::read_to_string(&path).expect("reading file"); (path, contents) }) } #[test] fn roundtrip_fixtures() { for (path, input) in all_fixtures() { let input = if let Some(stripped) = input.strip_prefix('\u{feff}') { stripped } else { &input }; let m = match parse_module(input, None) { Ok(m) => m, Err(e) => panic!("{}", prettify_error(e, format!("{:#?}", path).as_ref())), }; let mut state = Default::default(); m.codegen(&mut state); let generated = state.to_string(); if generated != input { let got = visualize(&generated); let expected = visualize(input); assert_diff!(expected.as_ref(), got.as_ref(), "", 0); } } } fn visualize(s: &str) -> String { s.replace(' ', "▩").lines().join("↩\n") } LibCST-1.2.0/native/libcst_derive/000077500000000000000000000000001456464173300167135ustar00rootroot00000000000000LibCST-1.2.0/native/libcst_derive/Cargo.toml000066400000000000000000000005611456464173300206450ustar00rootroot00000000000000[package] name = "libcst_derive" version = "1.2.0" edition = "2018" description = "Proc macro helpers for libcst." license-file = "LICENSE" repository = "https://github.com/Instagram/LibCST" documentation = "https://libcst.rtfd.org" keywords = ["macros", "python"] [lib] proc-macro = true [dependencies] syn = "2.0" quote = "1.0" [dev-dependencies] trybuild = "1.0" LibCST-1.2.0/native/libcst_derive/LICENSE000066400000000000000000000111341456464173300177200ustar00rootroot00000000000000All contributions towards LibCST are MIT licensed. Some Python files have been derived from the standard library and are therefore PSF licensed. Modifications on these files are dual licensed (both MIT and PSF). These files are: - libcst/_parser/base_parser.py - libcst/_parser/parso/utils.py - libcst/_parser/parso/pgen2/generator.py - libcst/_parser/parso/pgen2/grammar_parser.py - libcst/_parser/parso/python/py_token.py - libcst/_parser/parso/python/tokenize.py - libcst/_parser/parso/tests/test_fstring.py - libcst/_parser/parso/tests/test_tokenize.py - libcst/_parser/parso/tests/test_utils.py - native/libcst/src/tokenizer/core/mod.rs - native/libcst/src/tokenizer/core/string_types.rs Some Python files have been taken from dataclasses and are therefore Apache licensed. Modifications on these files are licensed under Apache 2.0 license. These files are: - libcst/_add_slots.py ------------------------------------------------------------------------------- MIT License Copyright (c) Meta Platforms, Inc. and affiliates. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ------------------------------------------------------------------------------- PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2 1. This LICENSE AGREEMENT is between the Python Software Foundation ("PSF"), and the Individual or Organization ("Licensee") accessing and otherwise using this software ("Python") in source or binary form and its associated documentation. 2. Subject to the terms and conditions of this License Agreement, PSF hereby grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce, analyze, test, perform and/or display publicly, prepare derivative works, distribute, and otherwise use Python alone or in any derivative version, provided, however, that PSF's License Agreement and PSF's notice of copyright, i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014, 2015 Python Software Foundation; All Rights Reserved" are retained in Python alone or in any derivative version prepared by Licensee. 3. In the event Licensee prepares a derivative work that is based on or incorporates Python or any part thereof, and wants to make the derivative work available to others as provided herein, then Licensee hereby agrees to include in any such work a brief summary of the changes made to Python. 4. PSF is making Python available to Licensee on an "AS IS" basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT INFRINGE ANY THIRD PARTY RIGHTS. 5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON, OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF. 6. This License Agreement will automatically terminate upon a material breach of its terms and conditions. 7. Nothing in this License Agreement shall be deemed to create any relationship of agency, partnership, or joint venture between PSF and Licensee. This License Agreement does not grant permission to use PSF trademarks or trade name in a trademark sense to endorse or promote products or services of Licensee, or any third party. 8. By copying, installing or otherwise using Python, Licensee agrees to be bound by the terms and conditions of this License Agreement. ------------------------------------------------------------------------------- APACHE LICENSE, VERSION 2.0 http://www.apache.org/licenses/LICENSE-2.0 LibCST-1.2.0/native/libcst_derive/src/000077500000000000000000000000001456464173300175025ustar00rootroot00000000000000LibCST-1.2.0/native/libcst_derive/src/codegen.rs000066400000000000000000000042241456464173300214560ustar00rootroot00000000000000// Copyright (c) Meta Platforms, Inc. and affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the root directory of this source tree use proc_macro::TokenStream; use quote::{quote, quote_spanned}; use syn::{self, spanned::Spanned, Data, DataEnum, DeriveInput, Fields, FieldsUnnamed}; pub(crate) fn impl_codegen(ast: &DeriveInput) -> TokenStream { match &ast.data { Data::Enum(e) => impl_enum(ast, e), Data::Struct(s) => quote_spanned! { s.struct_token.span() => compile_error!("Struct type is not supported") } .into(), Data::Union(u) => quote_spanned! { u.union_token.span() => compile_error!("Union type is not supported") } .into(), } } fn impl_enum(ast: &DeriveInput, e: &DataEnum) -> TokenStream { let mut varnames = vec![]; for var in e.variants.iter() { match &var.fields { Fields::Named(n) => { return quote_spanned! { n.span() => compile_error!("Named enum fields not supported") } .into() } f @ Fields::Unit => { return quote_spanned! { f.span() => compile_error!("Empty enum variants not supported") } .into() } Fields::Unnamed(FieldsUnnamed { unnamed, .. }) => { if unnamed.len() > 1 { return quote_spanned! { unnamed.span() => compile_error!("Multiple unnamed fields not supported") } .into(); } varnames.push(&var.ident); } } } let ident = &ast.ident; let generics = &ast.generics; let gen = quote! { impl<'a> Codegen<'a> for #ident #generics { fn codegen(&self, state: &mut CodegenState<'a>) { match self { #(Self::#varnames(x) => x.codegen(state),)* } } } }; gen.into() } LibCST-1.2.0/native/libcst_derive/src/cstnode.rs000066400000000000000000000317601456464173300215160ustar00rootroot00000000000000// Copyright (c) Meta Platforms, Inc. and affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the root directory of this source tree use proc_macro::TokenStream; use quote::{format_ident, quote, quote_spanned, ToTokens}; use syn::{ self, parse::{Parse, ParseStream}, parse_quote, punctuated::{Pair, Punctuated}, spanned::Spanned, token::Comma, AngleBracketedGenericArguments, Attribute, Data, DataEnum, DataStruct, DeriveInput, Field, Fields, FieldsNamed, FieldsUnnamed, GenericArgument, Generics, Ident, Meta, Path, PathArguments, PathSegment, Token, Type, TypePath, Visibility, }; pub(crate) struct CSTNodeParams { traits: Punctuated, } #[derive(PartialEq, Eq)] enum SupportedTrait { ParenthesizedNode, Codegen, Inflate, NoIntoPy, Default, } pub(crate) fn impl_cst_node(ast: DeriveInput, args: CSTNodeParams) -> TokenStream { match ast.data { Data::Enum(e) => impl_enum(args, ast.attrs, ast.vis, ast.ident, ast.generics, e), Data::Struct(s) => impl_struct(args, ast.attrs, ast.vis, ast.ident, ast.generics, s), Data::Union(u) => quote_spanned! { u.union_token.span() => compile_error!("Union type is not supported") } .into(), } } impl CSTNodeParams { fn has_trait(&self, treyt: &SupportedTrait) -> bool { self.traits.iter().any(|x| x == treyt) } } impl Parse for SupportedTrait { fn parse(input: ParseStream) -> syn::Result { if input.peek(Ident) { let id: Ident = input.parse()?; return match id.to_string().as_str() { "ParenthesizedNode" => Ok(Self::ParenthesizedNode), "Codegen" => Ok(Self::Codegen), "Inflate" => Ok(Self::Inflate), "NoIntoPy" => Ok(Self::NoIntoPy), "Default" => Ok(Self::Default), _ => Err(input.error("Not a supported trait to derive for cst_node")), }; } Err(input.error("Pass in trait names to be derived")) } } impl Parse for CSTNodeParams { fn parse(input: ParseStream) -> syn::Result { Ok(Self { traits: input.parse_terminated(SupportedTrait::parse, Token![,])?, }) } } // enum Foo<'a> { // Variant(Box>), // } // => // enum Foo<'a> { // Variant(Box>), // } // enum DeflatedFoo<'r, 'a> { // Variant(Box>), // } fn impl_enum( args: CSTNodeParams, mut attrs: Vec, vis: Visibility, ident: Ident, generics: Generics, mut e: DataEnum, ) -> TokenStream { let deflated_vis = vis.clone(); let deflated_ident = format_ident!("Deflated{}", &ident); let deflated_generics: Generics = parse_quote!(<'r, 'a>); let mut deflated_variant_tokens = vec![]; for var in e.variants.iter_mut() { let (inflated_fields, deflated_fields) = impl_fields(var.fields.clone()); var.fields = deflated_fields; deflated_variant_tokens.push(var.to_token_stream()); var.fields = inflated_fields; } add_inflated_attrs(&args, &mut attrs); let inflated = DeriveInput { attrs, vis, ident, generics, data: Data::Enum(e), }; let deflated_attrs = get_deflated_attrs(&args); let gen = quote! { #[derive(Debug, PartialEq, Eq, Clone)] #inflated #[derive(Debug, PartialEq, Eq, Clone)] #(#deflated_attrs)* #deflated_vis enum #deflated_ident#deflated_generics { #(#deflated_variant_tokens,)* } }; gen.into() } fn get_deflated_attrs(args: &CSTNodeParams) -> Vec { let mut deflated_attrs: Vec = vec![]; if args.has_trait(&SupportedTrait::Inflate) { deflated_attrs.push(parse_quote!(#[derive(Inflate)])); } if args.has_trait(&SupportedTrait::ParenthesizedNode) { deflated_attrs.push(parse_quote!(#[derive(ParenthesizedDeflatedNode)])) } if args.has_trait(&SupportedTrait::Default) { deflated_attrs.push(parse_quote!(#[derive(Default)])); } deflated_attrs } fn add_inflated_attrs(args: &CSTNodeParams, attrs: &mut Vec) { if args.has_trait(&SupportedTrait::Codegen) { attrs.push(parse_quote!(#[derive(Codegen)])); } if args.has_trait(&SupportedTrait::ParenthesizedNode) { attrs.push(parse_quote!(#[derive(ParenthesizedNode)])); } if args.has_trait(&SupportedTrait::Default) { attrs.push(parse_quote!(#[derive(Default)])); } if !args.has_trait(&SupportedTrait::NoIntoPy) { attrs.push(parse_quote!(#[cfg_attr(feature = "py", derive(TryIntoPy))])); } } // pub struct Foo<'a> { // pub bar: Bar<'a>, // pub value: &'a str, // pub whitespace_after: SimpleWhitespace<'a>, // pub(crate) tok: Option, // } // => // pub struct Foo<'a> { // pub bar: Bar<'a>, // pub value: &'a str, // pub whitespace_after: SimpleWhitespace<'a>, // } // struct DeflatedFoo<'r, 'a> { // pub bar: DeflatedBar<'r, 'a>, // pub value: &'a str, // pub tok: Option> // } fn impl_struct( args: CSTNodeParams, mut attrs: Vec, vis: Visibility, ident: Ident, generics: Generics, mut s: DataStruct, ) -> TokenStream { let deflated_vis = vis.clone(); let deflated_ident = format_ident!("Deflated{}", &ident); let deflated_generics: Generics = parse_quote!(<'r, 'a>); let (inflated_fields, deflated_fields) = impl_fields(s.fields); s.fields = inflated_fields; add_inflated_attrs(&args, &mut attrs); let inflated = DeriveInput { attrs, vis, ident, generics, data: Data::Struct(s), }; let deflated_attrs = get_deflated_attrs(&args); let gen = quote! { #[derive(Debug, PartialEq, Eq, Clone)] #inflated #[derive(Debug, PartialEq, Eq, Clone)] #(#deflated_attrs)* #deflated_vis struct #deflated_ident#deflated_generics #deflated_fields }; gen.into() } fn impl_fields(fields: Fields) -> (Fields, Fields) { match &fields { Fields::Unnamed(fs) => { let deflated_fields = impl_unnamed_fields(fs.clone()); (fields, Fields::Unnamed(deflated_fields)) } Fields::Named(fs) => impl_named_fields(fs.clone()), Fields::Unit => (Fields::Unit, Fields::Unit), } } fn impl_unnamed_fields(mut deflated_fields: FieldsUnnamed) -> FieldsUnnamed { let mut added_lifetime = false; deflated_fields.unnamed = deflated_fields .unnamed .into_pairs() .map(|pair| { let (deflated, lifetime) = make_into_deflated(pair); added_lifetime |= lifetime; deflated }) .collect(); // Make sure all Deflated* types have 'r 'a lifetime params if !added_lifetime { deflated_fields.unnamed.push(parse_quote! { std::marker::PhantomData<&'r &'a ()> }); } deflated_fields } fn impl_named_fields(mut fields: FieldsNamed) -> (Fields, Fields) { let mut deflated_fields = fields.clone(); let mut added_lifetime = false; // Drop whitespace fields from deflated fields // And add lifetimes to tokenref fields deflated_fields.named = deflated_fields .named .into_pairs() .filter(|pair| { let id = pair.value().ident.as_ref().unwrap().to_string(); !id.contains("whitespace") && id != "footer" && id != "header" && id != "leading_lines" && id != "lines_after_decorators" }) .map(|pair| { if is_builtin(pair.value()) { pair } else { let (deflated, lifetime) = make_into_deflated(pair); added_lifetime |= lifetime; deflated } }) .map(|pair| { let (mut val, punct) = pair.into_tuple(); val.attrs = val.attrs.into_iter().filter(is_not_intopy_attr).collect(); Pair::new(val, punct) }) .collect(); // Make sure all Deflated* types have 'r 'a lifetime params if !added_lifetime { deflated_fields.named.push(parse_quote! { _phantom: std::marker::PhantomData<&'r &'a ()> }); } // Drop tokenref fields from inflated fields fields.named = fields .named .into_pairs() .filter(|pair| !is_token_ref(pair.value())) .collect(); (Fields::Named(fields), Fields::Named(deflated_fields)) } fn is_builtin(field: &Field) -> bool { get_pathseg(&field.ty) .map(|seg| { let segstr = seg.ident.to_string(); segstr == "str" || segstr == "bool" || segstr == "String" }) .unwrap_or_default() } fn is_token_ref(field: &Field) -> bool { if let Some(seg) = rightmost_path_segment(&field.ty) { return format!("{}", seg.ident) == "TokenRef"; } false } // foo::bar -> foo::Deflatedbar<'r, 'a> fn make_into_deflated(mut pair: Pair) -> (Pair, bool) { let mut added_lifetime = true; if let Some(seg) = rightmost_path_segment_mut(&mut pair.value_mut().ty) { let seg_name = seg.ident.to_string(); if seg_name != "TokenRef" { seg.ident = format_ident!("Deflated{}", seg_name); } match seg.arguments { PathArguments::None => { seg.arguments = PathArguments::AngleBracketed(parse_quote!(<'r, 'a>)); } PathArguments::AngleBracketed(AngleBracketedGenericArguments { ref mut args, .. }) => { args.insert(0, parse_quote!('r)); } _ => todo!(), } } else { added_lifetime = false; } (pair, added_lifetime) } // foo::bar::baz> -> baz> fn get_pathseg(ty: &Type) -> Option<&PathSegment> { match ty { Type::Path(TypePath { path, .. }) => path.segments.last(), _ => None, } } // foo::bar::baz> -> quux<'a> fn rightmost_path_segment(ty: &Type) -> Option<&PathSegment> { let mut candidate = get_pathseg(ty); loop { if let Some(pathseg) = candidate { if let PathArguments::AngleBracketed(AngleBracketedGenericArguments { args, .. }) = &pathseg.arguments { if let Some(GenericArgument::Type(t)) = args.last() { candidate = get_pathseg(t); continue; } } } break; } candidate } fn get_pathseg_mut(ty: &mut Type) -> Option<&mut PathSegment> { match ty { Type::Path(TypePath { path, .. }) => path.segments.last_mut(), _ => None, } } fn has_more_mut(candidate: &Option<&mut PathSegment>) -> bool { if let Some(PathArguments::AngleBracketed(AngleBracketedGenericArguments { ref args, .. })) = candidate.as_ref().map(|c| &c.arguments) { matches!(args.last(), Some(GenericArgument::Type(_))) } else { false } } fn rightmost_path_segment_mut(ty: &mut Type) -> Option<&mut PathSegment> { let mut candidate = get_pathseg_mut(ty); while has_more_mut(&candidate) { candidate = match candidate.unwrap().arguments { PathArguments::AngleBracketed(AngleBracketedGenericArguments { ref mut args, .. }) => { if let Some(GenericArgument::Type(t)) = args.last_mut() { get_pathseg_mut(t) } else { unreachable!(); } } _ => unreachable!(), }; } candidate } fn is_not_intopy_attr(attr: &Attribute) -> bool { let path = attr.path(); // support #[cfg_attr(feature = "py", skip_py)] if path.is_ident("cfg_attr") { return match attr.parse_args_with(|input: ParseStream| { let _: Meta = input.parse()?; let _: Token![,] = input.parse()?; let nested_path: Path = input.parse()?; let _: Option = input.parse()?; Ok(nested_path) }) { Ok(nested_path) => !is_intopy_attr_path(&nested_path), Err(_) => false, }; } !is_intopy_attr_path(path) } fn is_intopy_attr_path(path: &Path) -> bool { path.is_ident("skip_py") || path.is_ident("no_py_default") } #[test] fn trybuild() { let t = trybuild::TestCases::new(); t.pass("tests/pass/*.rs"); } #[test] fn test_is_not_intopy_attr() { assert!(!is_not_intopy_attr(&parse_quote!(#[skip_py]))); assert!(!is_not_intopy_attr(&parse_quote!(#[no_py_default]))); assert!(!is_not_intopy_attr( &parse_quote!(#[cfg_attr(foo="bar",skip_py)]) )); assert!(!is_not_intopy_attr( &parse_quote!(#[cfg_attr(foo="bar",no_py_default)]) )); assert!(is_not_intopy_attr(&parse_quote!(#[skippy]))); assert!(is_not_intopy_attr( &parse_quote!(#[cfg_attr(foo="bar",skippy)]) )); } LibCST-1.2.0/native/libcst_derive/src/inflate.rs000066400000000000000000000051521456464173300214750ustar00rootroot00000000000000// Copyright (c) Meta Platforms, Inc. and affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the root directory of this source tree use proc_macro::TokenStream; use quote::{format_ident, quote, quote_spanned}; use syn::{self, spanned::Spanned, Data, DataEnum, DeriveInput, Fields, FieldsUnnamed}; pub(crate) fn impl_inflate(ast: &DeriveInput) -> TokenStream { match &ast.data { Data::Enum(e) => impl_inflate_enum(ast, e), Data::Struct(s) => quote_spanned! { s.struct_token.span() => compile_error!("Struct type is not supported") } .into(), Data::Union(u) => quote_spanned! { u.union_token.span() => compile_error!("Union type is not supported") } .into(), } } fn impl_inflate_enum(ast: &DeriveInput, e: &DataEnum) -> TokenStream { let mut varnames = vec![]; for var in e.variants.iter() { match &var.fields { Fields::Named(n) => { return quote_spanned! { n.span() => compile_error!("Named enum fields not supported") } .into() } f @ Fields::Unit => { return quote_spanned! { f.span() => compile_error!("Empty enum variants not supported") } .into() } Fields::Unnamed(FieldsUnnamed { unnamed, .. }) => { if unnamed.len() > 1 { return quote_spanned! { unnamed.span() => compile_error!("Multiple unnamed fields not supported") } .into(); } varnames.push(&var.ident); } } } let ident = &ast.ident; let generics = &ast.generics; let ident_str = ident.to_string(); let inflated_ident = format_ident!( "{}", ident_str .strip_prefix("Deflated") .expect("Cannot implement Inflate on a non-Deflated item") ); let gen = quote! { impl#generics Inflate<'a> for #ident #generics { type Inflated = #inflated_ident <'a>; fn inflate(mut self, config: & crate::tokenizer::whitespace_parser::Config<'a>) -> std::result::Result { match self { #(Self::#varnames(x) => Ok(Self::Inflated::#varnames(x.inflate(config)?)),)* } } } }; gen.into() } LibCST-1.2.0/native/libcst_derive/src/into_py.rs000066400000000000000000000151361456464173300215370ustar00rootroot00000000000000// Copyright (c) Meta Platforms, Inc. and affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the root directory of this source tree use proc_macro::TokenStream; use quote::{format_ident, quote, quote_spanned, ToTokens}; use syn::{ spanned::Spanned, Attribute, Data, DataEnum, DataStruct, DeriveInput, Fields, FieldsNamed, FieldsUnnamed, Type, TypePath, Visibility, }; pub(crate) fn impl_into_py(ast: &DeriveInput) -> TokenStream { match &ast.data { Data::Enum(e) => impl_into_py_enum(ast, e), Data::Struct(s) => impl_into_py_struct(ast, s), Data::Union(u) => quote_spanned! { u.union_token.span() => compile_error!("Union type is not supported") } .into(), } } fn impl_into_py_enum(ast: &DeriveInput, e: &DataEnum) -> TokenStream { let mut toks = vec![]; for var in e.variants.iter() { let varname = &var.ident; match &var.fields { Fields::Named(n) => { let mut fieldnames = vec![]; for field in n.named.iter() { if has_attr(&field.attrs, "skip_py") { continue; } fieldnames.push(field.ident.as_ref().unwrap()); } let kwargs_toks = fields_to_kwargs(&var.fields, true); toks.push(quote! { Self::#varname { #(#fieldnames,)* .. } => { let libcst = pyo3::types::PyModule::import(py, "libcst")?; let kwargs = #kwargs_toks ; Ok(libcst .getattr(stringify!(#varname)) .expect(stringify!(no #varname found in libcst)) .call((), Some(kwargs))? .into()) } }) } f @ Fields::Unit => { return quote_spanned! { f.span() => compile_error!("Empty enum variants not supported") } .into() } Fields::Unnamed(_) => { toks.push(quote! { Self::#varname(x, ..) => x.try_into_py(py), }); } } } let ident = &ast.ident; let generics = &ast.generics; let gen = quote! { use pyo3::types::IntoPyDict as _; #[automatically_derived] impl#generics crate::nodes::traits::py::TryIntoPy for #ident #generics { fn try_into_py(self, py: pyo3::Python) -> pyo3::PyResult { match self { #(#toks)* } } } }; gen.into() } fn impl_into_py_struct(ast: &DeriveInput, e: &DataStruct) -> TokenStream { let kwargs_toks = fields_to_kwargs(&e.fields, false); let ident = &ast.ident; let generics = &ast.generics; let gen = quote! { use pyo3::types::IntoPyDict as _; #[automatically_derived] impl#generics crate::nodes::traits::py::TryIntoPy for #ident #generics { fn try_into_py(self, py: pyo3::Python) -> pyo3::PyResult { let libcst = pyo3::types::PyModule::import(py, "libcst")?; let kwargs = #kwargs_toks ; Ok(libcst .getattr(stringify!(#ident)) .expect(stringify!(no #ident found in libcst)) .call((), Some(kwargs))? .into()) } } }; gen.into() } fn fields_to_kwargs(fields: &Fields, is_enum: bool) -> quote::__private::TokenStream { let mut empty_kwargs = false; let mut py_varnames = vec![]; let mut rust_varnames = vec![]; let mut optional_py_varnames = vec![]; let mut optional_rust_varnames = vec![]; match &fields { Fields::Named(FieldsNamed { named, .. }) => { for field in named.iter() { if has_attr(&field.attrs, "skip_py") { continue; } if let Some(ident) = field.ident.as_ref() { let include = if let Visibility::Public(_) = field.vis { true } else { is_enum }; if include { let pyname = format_ident!("{}", ident); let rustname = if is_enum { ident.to_token_stream() } else { quote! { self.#ident } }; if !has_attr(&field.attrs, "no_py_default") { if let Type::Path(TypePath { path, .. }) = &field.ty { if let Some(first) = path.segments.first() { if first.ident == "Option" { optional_py_varnames.push(pyname); optional_rust_varnames.push(rustname); continue; } } } } py_varnames.push(pyname); rust_varnames.push(rustname); } } } empty_kwargs = py_varnames.is_empty() && optional_py_varnames.is_empty() } Fields::Unnamed(FieldsUnnamed { unnamed, .. }) => { if unnamed.first().is_some() { py_varnames.push(format_ident!("value")); rust_varnames.push(quote! { self.0 }); } else { empty_kwargs = true; } } Fields::Unit => { empty_kwargs = true; } }; let kwargs_pairs = quote! { #(Some((stringify!(#py_varnames), #rust_varnames.try_into_py(py)?)),)* }; let optional_pairs = quote! { #(#optional_rust_varnames.map(|x| x.try_into_py(py)).transpose()?.map(|x| (stringify!(#optional_py_varnames), x)),)* }; if empty_kwargs { quote! { pyo3::types::PyDict::new(py) } } else { quote! { [ #kwargs_pairs #optional_pairs ] .iter() .filter(|x| x.is_some()) .map(|x| x.as_ref().unwrap()) .collect::>() .into_py_dict(py) } } } fn has_attr(attrs: &[Attribute], name: &'static str) -> bool { attrs.iter().any(|attr| attr.path().is_ident(name)) } LibCST-1.2.0/native/libcst_derive/src/lib.rs000066400000000000000000000030341456464173300206160ustar00rootroot00000000000000// Copyright (c) Meta Platforms, Inc. and affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the root directory of this source tree mod inflate; use inflate::impl_inflate; mod parenthesized_node; use parenthesized_node::impl_parenthesized_node; mod codegen; use codegen::impl_codegen; mod into_py; use into_py::impl_into_py; mod cstnode; use cstnode::{impl_cst_node, CSTNodeParams}; use proc_macro::TokenStream; use syn::{parse_macro_input, DeriveInput}; #[proc_macro_derive(Inflate)] pub fn inflate_derive(input: TokenStream) -> TokenStream { let ast = syn::parse(input).unwrap(); impl_inflate(&ast) } #[proc_macro_derive(ParenthesizedNode)] pub fn parenthesized_node_derive(input: TokenStream) -> TokenStream { impl_parenthesized_node(&syn::parse(input).unwrap(), false) } #[proc_macro_derive(ParenthesizedDeflatedNode)] pub fn parenthesized_deflated_node_derive(input: TokenStream) -> TokenStream { impl_parenthesized_node(&syn::parse(input).unwrap(), true) } #[proc_macro_derive(Codegen)] pub fn codegen_derive(input: TokenStream) -> TokenStream { impl_codegen(&syn::parse(input).unwrap()) } #[proc_macro_derive(TryIntoPy, attributes(skip_py, no_py_default))] pub fn into_py(input: TokenStream) -> TokenStream { impl_into_py(&syn::parse(input).unwrap()) } #[proc_macro_attribute] pub fn cst_node(args: TokenStream, input: TokenStream) -> TokenStream { let args = parse_macro_input!(args as CSTNodeParams); impl_cst_node(parse_macro_input!(input as DeriveInput), args) } LibCST-1.2.0/native/libcst_derive/src/parenthesized_node.rs000066400000000000000000000101161456464173300237210ustar00rootroot00000000000000// Copyright (c) Meta Platforms, Inc. and affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the root directory of this source tree use proc_macro::TokenStream; use quote::{quote, quote_spanned}; use syn::{ parse_quote, spanned::Spanned, Data, DataEnum, DeriveInput, Fields, FieldsUnnamed, Ident, }; pub(crate) fn impl_parenthesized_node(ast: &DeriveInput, deflated: bool) -> TokenStream { match &ast.data { Data::Enum(e) => impl_enum(ast, e, deflated), Data::Struct(_) => impl_struct(ast, deflated), Data::Union(u) => quote_spanned! { u.union_token.span() => compile_error!("Union type is not supported") } .into(), } } fn idents(deflated: bool) -> (Ident, Ident, Ident) { let treyt: Ident = if deflated { parse_quote!(ParenthesizedDeflatedNode) } else { parse_quote!(ParenthesizedNode) }; let leftparen: Ident = if deflated { parse_quote!(DeflatedLeftParen) } else { parse_quote!(LeftParen) }; let rightparen: Ident = if deflated { parse_quote!(DeflatedRightParen) } else { parse_quote!(RightParen) }; (treyt, leftparen, rightparen) } fn impl_struct(ast: &DeriveInput, deflated: bool) -> TokenStream { let ident = &ast.ident; let generics = if deflated { parse_quote!(<'r, 'a>) } else { ast.generics.clone() }; let (treyt, leftparen, rightparen) = idents(deflated); let gen = quote! { impl#generics #treyt#generics for #ident #generics { fn lpar(&self) -> &Vec<#leftparen#generics> { &self.lpar } fn rpar(&self) -> &Vec<#rightparen#generics> { &self.rpar } fn with_parens(self, left: #leftparen#generics, right: #rightparen#generics) -> Self { let mut lpar = self.lpar; let mut rpar = self.rpar; lpar.insert(0, left); rpar.push(right); #[allow(clippy::needless_update)] Self { lpar, rpar, ..self } } } }; gen.into() } fn impl_enum(ast: &DeriveInput, e: &DataEnum, deflated: bool) -> TokenStream { let mut varnames = vec![]; for var in e.variants.iter() { match &var.fields { Fields::Named(n) => { return quote_spanned! { n.span() => compile_error!("Named enum fields not supported") } .into() } f @ Fields::Unit => { return quote_spanned! { f.span() => compile_error!("Empty enum variants not supported") } .into() } Fields::Unnamed(FieldsUnnamed { unnamed, .. }) => { if unnamed.len() > 1 { return quote_spanned! { unnamed.span() => compile_error!("Multiple unnamed fields not supported") } .into(); } varnames.push(&var.ident); } } } let ident = &ast.ident; let generics = if deflated { parse_quote!(<'r, 'a>) } else { ast.generics.clone() }; let (treyt, leftparen, rightparen) = idents(deflated); let gen = quote! { impl#generics #treyt#generics for #ident #generics { fn lpar(&self) -> &Vec<#leftparen#generics> { match self { #(Self::#varnames(x) => x.lpar(),)* } } fn rpar(&self) -> &Vec<#rightparen#generics> { match self { #(Self::#varnames(x) => x.rpar(),)* } } fn with_parens(self, left: #leftparen#generics, right: #rightparen#generics) -> Self { match self { #(Self::#varnames(x) => Self::#varnames(x.with_parens(left, right)),)* } } } }; gen.into() } LibCST-1.2.0/native/libcst_derive/tests/000077500000000000000000000000001456464173300200555ustar00rootroot00000000000000LibCST-1.2.0/native/libcst_derive/tests/pass/000077500000000000000000000000001456464173300210235ustar00rootroot00000000000000LibCST-1.2.0/native/libcst_derive/tests/pass/minimal_cst.rs000066400000000000000000000057151456464173300237000ustar00rootroot00000000000000// Copyright (c) Meta Platforms, Inc. and affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the root directory of this source tree use libcst_derive::{cst_node, Codegen}; pub enum Error {} type TokenRef<'r, 'a> = &'r &'a str; pub type Result = std::result::Result; pub struct Config<'a> { #[allow(dead_code)] foo: &'a str, } pub trait Inflate<'a> where Self: Sized, { type Inflated; fn inflate(self, config: &Config<'a>) -> Result; } impl<'a, T: Inflate<'a> + ?Sized> Inflate<'a> for Box { type Inflated = Box; fn inflate(self, config: &Config<'a>) -> Result { match (*self).inflate(config) { Ok(a) => Ok(Box::new(a)), Err(e) => Err(e), } } } pub struct CodegenState<'a> { #[allow(dead_code)] foo: &'a str, } pub trait Codegen<'a> { fn codegen(&self, state: &mut CodegenState<'a>); } #[derive(Debug, PartialEq, Eq, Clone)] pub struct WS<'a> { pub last_line: &'a str, } #[cst_node] pub struct Parameters<'a> { pub params: Vec>, pub foo: Param<'a>, } impl<'r, 'a> Inflate<'a> for DeflatedParameters<'r, 'a> { type Inflated = Parameters<'a>; fn inflate(self, config: &Config<'a>) -> Result { let params = vec![]; #[allow(clippy::blacklisted_name)] let foo = self.foo.inflate(config)?; Ok(Self::Inflated { params, foo }) } } #[cst_node] pub struct Param<'a> { pub star: Option<&'a str>, pub(crate) star_tok: Option>, } impl<'r, 'a> Inflate<'a> for DeflatedParam<'r, 'a> { type Inflated = Param<'a>; fn inflate(self, _config: &Config<'a>) -> Result { Ok(Self::Inflated { star: self.star }) } } impl<'a> Codegen<'a> for Param<'a> { fn codegen(&self, _state: &mut CodegenState<'a>) {} } #[cst_node] pub struct BitOr<'a> { pub whitespace_before: WS<'a>, pub whitespace_after: WS<'a>, pub(crate) tok: TokenRef<'a>, } #[cst_node] pub enum CompOp<'a> { LessThan { whitespace_before: WS<'a>, tok: TokenRef<'a>, }, GreaterThan { whitespace_after: WS<'a>, tok: TokenRef<'a>, }, } impl<'r, 'a> Inflate<'a> for DeflatedCompOp<'r, 'a> { type Inflated = CompOp<'a>; fn inflate(self, _config: &Config<'a>) -> Result { Ok(match self { Self::LessThan { tok: _, .. } => Self::Inflated::LessThan { whitespace_before: WS { last_line: "yo" }, }, Self::GreaterThan { tok: _, .. } => Self::Inflated::GreaterThan { whitespace_after: WS { last_line: "" }, }, }) } } impl<'a> Codegen<'a> for CompOp<'a> { fn codegen(&self, _state: &mut CodegenState<'a>) {} } #[cst_node(Codegen)] enum Expr<'a> { #[allow(dead_code)] One(Box>), #[allow(dead_code)] Two(CompOp<'a>), } fn main() {} LibCST-1.2.0/native/libcst_derive/tests/pass/simple.rs000066400000000000000000000016131456464173300226630ustar00rootroot00000000000000// Copyright (c) Meta Platforms, Inc. and affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the root directory of this source tree use libcst_derive::cst_node; #[derive(Debug, PartialEq, Eq, Clone)] pub struct WS<'a>(&'a str); type TokenRef<'r, 'a> = &'r &'a str; #[cst_node] pub enum Foo<'a> { One(One<'a>), Two(Box>), } #[cst_node] pub struct One<'a> { pub two: Box>, pub header: WS<'a>, pub(crate) newline_tok: TokenRef<'a>, } #[cst_node] pub struct Two<'a> { pub whitespace_before: WS<'a>, pub(crate) tok: TokenRef<'a>, } #[cst_node] struct Thin<'a> { pub whitespace: WS<'a>, } #[cst_node] struct Value<'a> { pub value: &'a str, } #[cst_node] struct Empty {} #[cst_node] enum Smol<'a> { #[allow(dead_code)] Thin(Thin<'a>), #[allow(dead_code)] Empty(Empty), } fn main() {} LibCST-1.2.0/native/roundtrip.sh000077500000000000000000000004161456464173300164630ustar00rootroot00000000000000#!/bin/bash # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. PARSE=$(dirname $0)/target/release/parse exec diff -u "$1" <($PARSE < "$1") LibCST-1.2.0/pyproject.toml000066400000000000000000000043531456464173300155300ustar00rootroot00000000000000[build-system] requires = ["setuptools", "setuptools-scm", "setuptools-rust", "wheel"] [project] name = "libcst" description = "A concrete syntax tree with AST-like properties for Python 3.0 through 3.12 programs." readme = "README.rst" dynamic = ["version"] license = { file = "LICENSE" } classifiers = [ "License :: OSI Approved :: MIT License", "Topic :: Software Development :: Libraries", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", ] requires-python = ">=3.9" dependencies = [ "typing_extensions>=3.7.4.2", "typing_inspect>=0.4.0", "pyyaml>=5.2", ] [project.optional-dependencies] dev = [ "black==23.12.1", "coverage>=4.5.4", "build>=0.10.0", "fixit==2.1.0", "flake8==7.0.0", "Sphinx>=5.1.1", "hypothesis>=4.36.0", "hypothesmith>=0.0.4", "jupyter>=1.0.0", "maturin>=0.8.3,<1.5", "nbsphinx>=0.4.2", "prompt-toolkit>=2.0.9", "pyre-check==0.9.18; platform_system != 'Windows'", "setuptools_scm>=6.0.1", "sphinx-rtd-theme>=0.4.3", "ufmt==2.3.0", "usort==1.0.7", "setuptools-rust>=1.5.2", "slotscheck>=0.7.1", "jinja2==3.1.3", ] [project.urls] Documentation = "https://libcst.readthedocs.io/en/latest/" Github = "https://github.com/Instagram/LibCST" Changelog = "https://github.com/Instagram/LibCST/blob/main/CHANGELOG.md" [tool.black] target-version = ["py39"] extend-exclude = '^/native/' # Prepend "^/" to specify root file/folder. See https://black.readthedocs.io/en/stable/usage_and_configuration/the_basics.html#configuration-format [tool.hatch.envs.default] features = ["dev"] [tool.hatch.envs.default.scripts] docs = "sphinx-build -ab html docs/source docs/build" fixtures = ["python scripts/regenerate-fixtures.py", "git diff --exit-code"] format = "ufmt format libcst scripts" lint = [ "flake8 libcst", "ufmt check libcst scripts", "python -m slotscheck libcst", "python scripts/check_copyright.py", ] test = ["python --version", "python -m libcst.tests"] typecheck = ["pyre --version", "pyre check"] [tool.slotscheck] exclude-modules = '^libcst\.(testing|tests)' [tool.ufmt] excludes = ["native/", "stubs/"] LibCST-1.2.0/scripts/000077500000000000000000000000001456464173300142765ustar00rootroot00000000000000LibCST-1.2.0/scripts/check_copyright.py000066400000000000000000000033301456464173300200140ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import re import sys from pathlib import Path from subprocess import run from typing import Iterable, List, Pattern # Use the copyright header from this file as the benchmark for all files EXPECTED_HEADER: str = "\n".join( line for line in Path(__file__).read_text().splitlines()[:4] ) EXCEPTION_PATTERNS: List[Pattern[str]] = [ re.compile(pattern) for pattern in ( r"^native/libcst/tests/fixtures/", r"^libcst/_add_slots\.py$", r"^libcst/tests/test_(e2e|fuzz)\.py$", r"^libcst/_parser/base_parser\.py$", r"^libcst/_parser/parso/utils\.py$", r"^libcst/_parser/parso/pgen2/(generator|grammar_parser)\.py$", r"^libcst/_parser/parso/python/(py_token|tokenize)\.py$", r"^libcst/_parser/parso/tests/test_(fstring|tokenize|utils)\.py$", ) ] def tracked_files() -> Iterable[Path]: proc = run( ["git", "ls-tree", "-r", "--name-only", "HEAD"], check=True, capture_output=True, encoding="utf-8", ) yield from ( path for line in proc.stdout.splitlines() if not any(pattern.search(line) for pattern in EXCEPTION_PATTERNS) if (path := Path(line)) and path.is_file() and path.suffix in (".py", ".sh") ) def main() -> None: error = False for path in tracked_files(): content = path.read_text("utf-8") if EXPECTED_HEADER not in content: print(f"Missing or incomplete copyright in {path}") error = True sys.exit(1 if error else 0) if __name__ == "__main__": main() LibCST-1.2.0/scripts/regenerate-fixtures.py000066400000000000000000000022001456464173300206320ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ Regenerate test fixtures, eg. after upgrading Pyre """ import json import os from pathlib import Path from subprocess import run from libcst.metadata import TypeInferenceProvider def main() -> None: CWD = Path.cwd() repo_root = Path(__file__).parent.parent test_root = repo_root / "libcst" / "tests" / "pyre" try: os.chdir(test_root) run(["pyre", "-n", "start", "--no-watchman"], check=True) for file_path in test_root.glob("*.py"): json_path = file_path.with_suffix(".json") print(f"generating {file_path} -> {json_path}") path_str = file_path.as_posix() cache = TypeInferenceProvider.gen_cache(test_root, [path_str], timeout=None) result = cache[path_str] json_path.write_text(json.dumps(result, sort_keys=True, indent=2)) finally: run(["pyre", "-n", "stop"], check=True) os.chdir(CWD) if __name__ == "__main__": main() LibCST-1.2.0/setup.py000066400000000000000000000021131456464173300143160ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from os import environ import setuptools from setuptools_rust import Binding, RustExtension def no_local_scheme(version: str) -> str: return "" setuptools.setup( setup_requires=["setuptools-rust", "setuptools_scm"], use_scm_version={ "write_to": "libcst/_version.py", **( {"local_scheme": no_local_scheme} if "LIBCST_NO_LOCAL_SCHEME" in environ else {} ), }, packages=setuptools.find_packages(), package_data={ "libcst": ["py.typed"], "libcst.tests.pyre": ["*"], "libcst.codemod.tests": ["*"], }, test_suite="libcst", rust_extensions=[ RustExtension( "libcst.native", path="native/libcst/Cargo.toml", binding=Binding.PyO3, ) ], zip_safe=False, # for mypy compatibility https://mypy.readthedocs.io/en/latest/installed_packages.html ) LibCST-1.2.0/stubs/000077500000000000000000000000001456464173300137475ustar00rootroot00000000000000LibCST-1.2.0/stubs/hypothesis.pyi000066400000000000000000000000301456464173300166620ustar00rootroot00000000000000# pyre-placeholder-stub LibCST-1.2.0/stubs/hypothesmith.pyi000066400000000000000000000000301456464173300172100ustar00rootroot00000000000000# pyre-placeholder-stub LibCST-1.2.0/stubs/libcst/000077500000000000000000000000001456464173300152275ustar00rootroot00000000000000LibCST-1.2.0/stubs/libcst/native.pyi000066400000000000000000000006521456464173300172430ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Optional import libcst def parse_module(source: str, encoding: Optional[str]) -> libcst.Module: ... def parse_expression(source: str) -> libcst.BaseExpression: ... def parse_statement(source: str) -> libcst.BaseStatement: ... LibCST-1.2.0/stubs/libcst_native/000077500000000000000000000000001456464173300165755ustar00rootroot00000000000000LibCST-1.2.0/stubs/libcst_native/parser_config.pyi000066400000000000000000000021721456464173300221430ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Any, FrozenSet, Mapping, Sequence from libcst._parser.parso.utils import PythonVersionInfo class BaseWhitespaceParserConfig: def __new__( cls, *, lines: Sequence[str], default_newline: str, ) -> BaseWhitespaceParserConfig: ... lines: Sequence[str] default_newline: str class ParserConfig(BaseWhitespaceParserConfig): def __new__( cls, *, lines: Sequence[str], encoding: str, default_indent: str, default_newline: str, has_trailing_newline: bool, version: PythonVersionInfo, future_imports: FrozenSet[str], ) -> BaseWhitespaceParserConfig: ... # lines is inherited encoding: str default_indent: str # default_newline is inherited has_trailing_newline: bool version: PythonVersionInfo future_imports: FrozenSet[str] def parser_config_asdict(config: ParserConfig) -> Mapping[str, Any]: ... LibCST-1.2.0/stubs/libcst_native/token_type.pyi000066400000000000000000000012641456464173300215040ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. class TokenType: name: str contains_syntax: bool STRING: TokenType = ... NAME: TokenType = ... NUMBER: TokenType = ... OP: TokenType = ... NEWLINE: TokenType = ... INDENT: TokenType = ... DEDENT: TokenType = ... ASYNC: TokenType = ... AWAIT: TokenType = ... FSTRING_START: TokenType = ... FSTRING_STRING: TokenType = ... FSTRING_END: TokenType = ... ENDMARKER: TokenType = ... # unused dummy tokens for backwards compat with the parso tokenizer ERRORTOKEN: TokenType = ... ERROR_DEDENT: TokenType = ... LibCST-1.2.0/stubs/libcst_native/tokenize.pyi000066400000000000000000000016471456464173300211600ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Iterator, Optional, Tuple from libcst_native import token_type, whitespace_state class Token: def __new__( cls, type: token_type.TokenType, string: str, start_pos: Tuple[int, int], end_pos: Tuple[int, int], whitespace_before: whitespace_state.WhitespaceState, whitespace_after: whitespace_state.WhitespaceState, relative_indent: Optional[str], ) -> Token: ... type: token_type.TokenType string: str start_pos: Tuple[int, int] end_pos: Tuple[int, int] whitespace_before: whitespace_state.WhitespaceState whitespace_after: whitespace_state.WhitespaceState relative_indent: Optional[str] def tokenize(text: str) -> Iterator[Token]: ... LibCST-1.2.0/stubs/libcst_native/whitespace_parser.pyi000066400000000000000000000017151456464173300230340ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Optional, Sequence, Union from libcst._nodes.whitespace import ( EmptyLine, Newline, ParenthesizedWhitespace, SimpleWhitespace, TrailingWhitespace, ) from libcst._parser.types.config import BaseWhitespaceParserConfig as Config from libcst._parser.types.whitespace_state import WhitespaceState as State def parse_simple_whitespace(config: Config, state: State) -> SimpleWhitespace: ... def parse_empty_lines( config: Config, state: State, *, override_absolute_indent: Optional[str] = None, ) -> Sequence[EmptyLine]: ... def parse_trailing_whitespace(config: Config, state: State) -> TrailingWhitespace: ... def parse_parenthesizable_whitespace( config: Config, state: State ) -> Union[SimpleWhitespace, ParenthesizedWhitespace]: ... LibCST-1.2.0/stubs/libcst_native/whitespace_state.pyi000066400000000000000000000010541456464173300226540ustar00rootroot00000000000000# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. class WhitespaceState: def __new__( cls, line: int, column: int, absolute_indent: str, is_parenthesized: bool ) -> WhitespaceState: ... line: int # one-indexed (to match parso's behavior) column: int # zero-indexed (to match parso's behavior) # What to look for when executing `_parse_indent`. absolute_indent: str is_parenthesized: bool LibCST-1.2.0/stubs/setuptools.pyi000066400000000000000000000000301456464173300167040ustar00rootroot00000000000000# pyre-placeholder-stub LibCST-1.2.0/stubs/tokenize.pyi000066400000000000000000000032351456464173300163250ustar00rootroot00000000000000from token import ( AMPER, AMPEREQUAL, AT, ATEQUAL, CIRCUMFLEX, CIRCUMFLEXEQUAL, COLON, COLONEQUAL, COMMA, COMMENT, DEDENT, DOT, DOUBLESLASH, DOUBLESLASHEQUAL, DOUBLESTAR, DOUBLESTAREQUAL, ELLIPSIS, ENCODING, ENDMARKER, EQEQUAL, EQUAL, ERRORTOKEN, EXACT_TOKEN_TYPES, GREATER, GREATEREQUAL, INDENT, LBRACE, LEFTSHIFT, LEFTSHIFTEQUAL, LESS, LESSEQUAL, LPAR, LSQB, MINEQUAL, MINUS, N_TOKENS, NAME, NEWLINE, NL, NOTEQUAL, NT_OFFSET, NUMBER, OP, PERCENT, PERCENTEQUAL, PLUS, PLUSEQUAL, RARROW, RBRACE, RIGHTSHIFT, RIGHTSHIFTEQUAL, RPAR, RSQB, SEMI, SLASH, SLASHEQUAL, STAR, STAREQUAL, STRING, TILDE, TYPE_COMMENT, TYPE_IGNORE, VBAR, VBAREQUAL, ) from typing import Callable, Generator, Sequence, Tuple Hexnumber: str = ... Binnumber: str = ... Octnumber: str = ... Decnumber: str = ... Intnumber: str = ... Exponent: str = ... Pointfloat: str = ... Expfloat: str = ... Floatnumber: str = ... Imagnumber: str = ... Number: str = ... Whitespace: str = ... Comment: str = ... Ignore: str = ... Name: str = ... class TokenInfo(Tuple[int, str, Tuple[int, int], Tuple[int, int], int]): exact_type: int = ... type: int = ... string: str = ... start: Tuple[int, int] = ... end: Tuple[int, int] = ... line: int = ... def __repr__(self) -> str: ... def detect_encoding(readline: Callable[[], bytes]) -> Tuple[str, Sequence[bytes]]: ... def tokenize(readline: Callable[[], bytes]) -> Generator[TokenInfo, None, None]: ... LibCST-1.2.0/stubs/typing_inspect.pyi000066400000000000000000000000301456464173300175220ustar00rootroot00000000000000# pyre-placeholder-stub