././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1710442639.1658149 coverage-7.4.4/0000755000175100001770000000000000000000000014127 5ustar00runnerdocker00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/.editorconfig0000644000175100001770000000155100000000000016606 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt # This file is for unifying the coding style for different editors and IDEs. # More information at http://EditorConfig.org root = true [*] charset = utf-8 end_of_line = lf indent_size = 4 indent_style = space insert_final_newline = true max_line_length = 80 trim_trailing_whitespace = true [*.py] max_line_length = 100 [*.pyi] max_line_length = 100 [*.c] max_line_length = 100 [*.h] max_line_length = 100 [*.yml] indent_size = 2 [*.rst] max_line_length = 79 [*.tok] trim_trailing_whitespace = false [*_dos.tok] end_of_line = crlf [Makefile] indent_style = tab indent_size = 8 [*,cover] trim_trailing_whitespace = false [*.diff] trim_trailing_whitespace = false [.git/*] trim_trailing_whitespace = false ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/.git-blame-ignore-revs0000644000175100001770000000172600000000000020235 0ustar00runnerdocker00000000000000# Commits to ignore when doing git-blame. # 2023-01-05 style: use good style for annotated defaults parameters 78444f4c06df6a634fa67dd99ee7c07b6b633d9e # 2023-01-06 style(perf): blacken lab/benchmark.py bf6c12f5da54db7c5c0cc47cbf22c70f686e8236 # 2023-03-22 style: use double-quotes 16abd82b6e87753184e8308c4b2606ff3979f8d3 b7be64538aa480fce641349d3053e9a84862d571 # 2023-04-01 style: use double-quotes in JavaScript b03ab92bae24c54f1d5a98baa3af6b9a18de4d36 # 2023-11-04 style: ruff format igor.py, setup.py, __main__.py acb80450d7c033a6ea6e06eb2e74d3590c268435 # 2023-11-20 style: fr"" is better than rf"", for real d8daa08b347fe6b7099c437b09d926eb999d0803 # 2023-12-02 style: check_coverage close parens should be on their own line 5d0b5d4464b84adb6389c8894c207a323edb2b2b # 2024-02-27 style: fix COM812 Trailing comma missing e4e238a9ed8f2ad2b9060247591b4c057c2953bf # 2024-02-27 style: modernize type hints, a few more f-strings 401a63bf08bdfd780b662f64d2dfe3603f2584dd ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1710442639.1018147 coverage-7.4.4/.github/0000755000175100001770000000000000000000000015467 5ustar00runnerdocker00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/.github/CODE_OF_CONDUCT.md0000644000175100001770000000054000000000000020265 0ustar00runnerdocker00000000000000# Treat each other well Everyone participating in the coverage.py project, and in particular in the issue tracker, pull requests, and social media activity, is expected to treat other people with respect and to follow the guidelines articulated in the [Python Community Code of Conduct][psf_coc]. [psf_coc]: https://www.python.org/psf/codeofconduct/ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/.github/FUNDING.yml0000644000175100001770000000004700000000000017305 0ustar00runnerdocker00000000000000github: nedbat tidelift: pypi/coverage ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1710442639.1018147 coverage-7.4.4/.github/ISSUE_TEMPLATE/0000755000175100001770000000000000000000000017652 5ustar00runnerdocker00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/.github/ISSUE_TEMPLATE/bug_report.md0000644000175100001770000000204100000000000022341 0ustar00runnerdocker00000000000000--- name: Bug report about: Report a problem with coverage.py title: '' labels: bug, needs triage assignees: '' --- **Describe the bug** A clear and concise description of the bug. **To Reproduce** How can we reproduce the problem? Please *be specific*. Don't link to a failing CI job. Answer the questions below: 1. What version of Python are you using? 1. What version of coverage.py shows the problem? The output of `coverage debug sys` is helpful. 1. What versions of what packages do you have installed? The output of `pip freeze` is helpful. 1. What code shows the problem? Give us a specific commit of a specific repo that we can check out. If you've already worked around the problem, please provide a commit before that fix. 1. What commands should we run to reproduce the problem? *Be specific*. Include everything, even `git clone`, `pip install`, and so on. Explain like we're five! **Expected behavior** A clear and concise description of what you expected to happen. **Additional context** Add any other context about the problem here. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/.github/ISSUE_TEMPLATE/config.yml0000644000175100001770000000112000000000000021634 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt # https://help.github.com/en/github/building-a-strong-community/configuring-issue-templates-for-your-repository blank_issues_enabled: false contact_links: - name: Frequently Asked Questions url: https://coverage.readthedocs.io/en/latest/faq.html about: Some common problems are described here. - name: Tidelift security contact url: https://tidelift.com/security about: Please report security vulnerabilities here. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/.github/ISSUE_TEMPLATE/feature_request.md0000644000175100001770000000113200000000000023374 0ustar00runnerdocker00000000000000--- name: Feature request about: Suggest an idea for coverage.py title: '' labels: enhancement, needs triage assignees: '' --- **Is your feature request related to a problem? Please describe.** A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] **Describe the solution you'd like** A clear and concise description of what you want to happen. **Describe alternatives you've considered** A clear and concise description of any alternative solutions or features you've considered. **Additional context** Add any other context about the feature request here. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/.github/ISSUE_TEMPLATE/support.md0000644000175100001770000000123700000000000021713 0ustar00runnerdocker00000000000000--- name: Support request about: Ask for help using coverage.py title: '' labels: support, needs triage assignees: '' --- **Have you asked elsewhere?** There are other good places to ask for help using coverage.py. These places let other people suggest solutions, are more likely places for people to find your question: - [Stack Overflow](https://stackoverflow.com/questions/tagged/coverage.py) - [discuss.python.org](https://discuss.python.org/search?q=coverage.py) **Describe your situation** Wherever you ask your question, be sure to explain: - What you did - What happened - How that was different than what you wanted to happen - What kind of help you need ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/.github/SECURITY.md0000644000175100001770000000031100000000000017253 0ustar00runnerdocker00000000000000# Security Disclosures To report a security vulnerability, please use the [Tidelift security contact](https://tidelift.com/security). Tidelift will coordinate the fix and disclosure with maintainers. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/.github/dependabot.yml0000644000175100001770000000060200000000000020315 0ustar00runnerdocker00000000000000# From: # https://docs.github.com/en/code-security/supply-chain-security/keeping-your-dependencies-updated-automatically/keeping-your-actions-up-to-date-with-dependabot # Set update schedule for GitHub Actions version: 2 updates: - package-ecosystem: "github-actions" directory: "/" schedule: # Check for updates to GitHub Actions every weekday interval: "daily" ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1710442639.1018147 coverage-7.4.4/.github/workflows/0000755000175100001770000000000000000000000017524 5ustar00runnerdocker00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/.github/workflows/codeql-analysis.yml0000644000175100001770000000454600000000000023350 0ustar00runnerdocker00000000000000# For most projects, this workflow file will not need changing; you simply need # to commit it to your repository. # # You may wish to alter this file to override the set of languages analyzed, # or to provide custom queries or build logic. # # ******** NOTE ******** # We have attempted to detect the languages in your repository. Please check # the `language` matrix defined below to confirm you have the correct set of # supported CodeQL languages. # name: "CodeQL" on: push: branches: - master pull_request: # The branches below must be a subset of the branches above branches: - master schedule: - cron: '30 20 * * 6' permissions: contents: read jobs: analyze: name: Analyze runs-on: ubuntu-latest permissions: actions: read contents: read security-events: write strategy: fail-fast: false matrix: language: - python - javascript # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python', 'ruby' ] # Learn more about CodeQL language support at https://git.io/codeql-language-support steps: - name: Checkout repository uses: actions/checkout@v4 # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL uses: github/codeql-action/init@v3 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. # By default, queries listed here will override any specified in a config file. # Prefix the list here with "+" to use these queries and those in the config file. # queries: ./path/to/local/query, your-org/your-repo/queries@main # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild uses: github/codeql-action/autobuild@v3 # โ„น๏ธ Command-line programs to run using the OS shell. # ๐Ÿ“š https://git.io/JvXDl # โœ๏ธ If the Autobuild fails above, remove it and uncomment the following three lines # and modify them (or add more) to build your code if your project # uses a compiled language #- run: | # make bootstrap # make release - name: Perform CodeQL Analysis uses: github/codeql-action/analyze@v3 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/.github/workflows/coverage.yml0000644000175100001770000002131200000000000022041 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt name: "Coverage" on: # As currently structured, this adds too many jobs (checks?), so don't run it # on pull requests yet. push: branches: - master - "**/*metacov*" workflow_dispatch: defaults: run: shell: bash env: PIP_DISABLE_PIP_VERSION_CHECK: 1 FORCE_COLOR: 1 # Get colored pytest output permissions: contents: read concurrency: group: "${{ github.workflow }}-${{ github.ref }}" cancel-in-progress: true jobs: coverage: name: "${{ matrix.python-version }} on ${{ matrix.os }}" runs-on: "${{ matrix.os }}-latest" env: MATRIX_ID: "${{ matrix.python-version }}.${{ matrix.os }}" strategy: matrix: os: - ubuntu - macos - windows python-version: # When changing this list, be sure to check the [gh] list in # tox.ini so that tox will run properly. PYVERSIONS # Available versions: # https://github.com/actions/python-versions/blob/main/versions-manifest.json - "3.8" - "3.9" - "3.10" - "3.11" - "3.12" - "3.13" - "pypy-3.8" - "pypy-3.9" - "pypy-3.10" exclude: # Mac PyPy always takes the longest, and doesn't add anything. - os: macos python-version: "pypy-3.8" - os: macos python-version: "pypy-3.9" - os: macos python-version: "pypy-3.10" # Windows pypy 3.9 and 3.10 get stuck with PyPy 7.3.15. I hope to # unstick them, but I don't want that to block all other progress, so # skip them for now. - os: windows python-version: "pypy-3.9" - os: windows python-version: "pypy-3.10" # Skip 3.13.0a4 and pin to 3.13.0a3 for Windows due to build error. # Undo when 3.13.0a5 is released. - os: windows python-version: "3.13" include: - os: windows python-version: "3.13.0-alpha.3" # If one job fails, stop the whole thing. fail-fast: true steps: - name: "Check out the repo" uses: "actions/checkout@v4" - name: "Set up Python" uses: "actions/setup-python@v5" with: python-version: "${{ matrix.python-version }}" allow-prereleases: true # At a certain point, installing dependencies failed on pypy 3.9 and # 3.10 on Windows. Commenting out the cache here fixed it. Someday # try using the cache again. #cache: pip #cache-dependency-path: 'requirements/*.pip' - name: "Show environment" run: | set -xe python -VV python -m site env - name: "Install dependencies" run: | echo matrix id: $MATRIX_ID set -xe python -VV python -m site python -m pip install -r requirements/tox.pip - name: "Run tox coverage for ${{ matrix.python-version }}" env: COVERAGE_COVERAGE: "yes" COVERAGE_CONTEXT: "${{ matrix.python-version }}.${{ matrix.os }}" run: | set -xe python -m tox - name: "Combine data" env: COVERAGE_RCFILE: "metacov.ini" run: | python -m coverage combine mv .metacov .metacov.$MATRIX_ID - name: "Upload coverage data" uses: actions/upload-artifact@v4 with: name: metacov-${{ env.MATRIX_ID }} path: .metacov.* combine: name: "Combine coverage data" needs: coverage runs-on: ubuntu-latest outputs: total: ${{ steps.total.outputs.total }} env: COVERAGE_RCFILE: "metacov.ini" steps: - name: "Check out the repo" uses: "actions/checkout@v4" - name: "Set up Python" uses: "actions/setup-python@v5" with: python-version: "3.8" # Minimum of PYVERSIONS # At a certain point, installing dependencies failed on pypy 3.9 and # 3.10 on Windows. Commenting out the cache here fixed it. Someday # try using the cache again. #cache: pip #cache-dependency-path: 'requirements/*.pip' - name: "Show environment" run: | set -xe python -VV python -m site env - name: "Install dependencies" run: | set -xe python -m pip install -e . python igor.py zip_mods - name: "Download coverage data" uses: actions/download-artifact@v4 with: pattern: metacov-* merge-multiple: true - name: "Combine and report" id: combine env: COVERAGE_CONTEXT: "yes" run: | set -xe python igor.py combine_html - name: "Upload HTML report" uses: actions/upload-artifact@v4 with: name: html_report path: htmlcov - name: "Get total" id: total run: | echo "total=$(python -m coverage report --format=total)" >> $GITHUB_OUTPUT publish: name: "Publish coverage report" needs: combine runs-on: ubuntu-latest steps: - name: "Show environment" run: | set -xe env - name: "Compute info for later steps" id: info run: | set -xe env export SHA10=$(echo ${{ github.sha }} | cut -c 1-10) export SLUG=$(date +'%Y%m%d')_$SHA10 export REPORT_DIR=reports/$SLUG/htmlcov export REF="${{ github.ref }}" echo "total=${{ needs.combine.outputs.total }}" >> $GITHUB_ENV echo "sha10=$SHA10" >> $GITHUB_ENV echo "slug=$SLUG" >> $GITHUB_ENV echo "report_dir=$REPORT_DIR" >> $GITHUB_ENV echo "url=https://htmlpreview.github.io/?https://github.com/nedbat/coverage-reports/blob/main/reports/$SLUG/htmlcov/index.html" >> $GITHUB_ENV echo "branch=${REF#refs/heads/}" >> $GITHUB_ENV - name: "Summarize" run: | echo '### Total coverage: ${{ env.total }}%' >> $GITHUB_STEP_SUMMARY - name: "Checkout reports repo" if: ${{ github.ref == 'refs/heads/master' }} run: | set -xe git clone --depth=1 --no-checkout https://${{ secrets.COVERAGE_REPORTS_TOKEN }}@github.com/nedbat/coverage-reports reports_repo cd reports_repo git sparse-checkout init --cone git sparse-checkout set --skip-checks '/*' '!/reports' git config user.name nedbat git config user.email ned@nedbatchelder.com git checkout main - name: "Download coverage HTML report" if: ${{ github.ref == 'refs/heads/master' }} uses: actions/download-artifact@v4 with: name: html_report path: reports_repo/${{ env.report_dir }} - name: "Push to report repo" if: | github.repository_owner == 'nedbat' && github.ref == 'refs/heads/master' env: COMMIT_MESSAGE: ${{ github.event.head_commit.message }} run: | set -xe # Make the redirect to the latest report. echo "" > reports_repo/latest.html echo "" >> reports_repo/latest.html echo "Coverage report redirect..." >> reports_repo/latest.html # Make the commit message. echo "${{ env.total }}% - $COMMIT_MESSAGE" > commit.txt echo "" >> commit.txt echo "${{ env.url }}" >> commit.txt echo "${{ env.sha10 }}: ${{ env.branch }}" >> commit.txt # Commit. cd ./reports_repo git sparse-checkout set --skip-checks '/*' '${{ env.report_dir }}' rm ${{ env.report_dir }}/.gitignore git add ${{ env.report_dir }} latest.html git commit --file=../commit.txt git push echo '[${{ env.url }}](${{ env.url }})' >> $GITHUB_STEP_SUMMARY - name: "Create badge" if: | github.repository_owner == 'nedbat' && github.ref == 'refs/heads/master' # https://gist.githubusercontent.com/nedbat/8c6980f77988a327348f9b02bbaf67f5 uses: schneegans/dynamic-badges-action@e9a478b16159b4d31420099ba146cdc50f134483 with: auth: ${{ secrets.METACOV_GIST_SECRET }} gistID: 8c6980f77988a327348f9b02bbaf67f5 filename: metacov.json label: Coverage message: ${{ env.total }}% minColorRange: 60 maxColorRange: 95 valColorRange: ${{ env.total }} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/.github/workflows/dependency-review.yml0000644000175100001770000000222100000000000023661 0ustar00runnerdocker00000000000000# Dependency Review Action # # This Action will scan dependency manifest files that change as part of a Pull Reqest, surfacing known-vulnerable versions of the packages declared or updated in the PR. Once installed, if the workflow run is marked as required, PRs introducing known-vulnerable packages will be blocked from merging. # # Source repository: https://github.com/actions/dependency-review-action # Public documentation: https://docs.github.com/en/code-security/supply-chain-security/understanding-your-software-supply-chain/about-dependency-review#dependency-review-enforcement name: 'Dependency Review' on: push: branches: - master - nedbat/* pull_request: workflow_dispatch: permissions: contents: read jobs: dependency-review: if: github.repository_owner == 'nedbat' runs-on: ubuntu-latest steps: - name: 'Checkout Repository' uses: actions/checkout@v4 - name: 'Dependency Review' uses: actions/dependency-review-action@v4 with: base-ref: ${{ github.event.pull_request.base.sha || 'master' }} head-ref: ${{ github.event.pull_request.head.sha || github.ref }} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/.github/workflows/kit.yml0000644000175100001770000002242600000000000021044 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt # This file is meant to be processed with cog. # Running "make prebuild" will bring it up to date. # Based on: # https://github.com/joerick/cibuildwheel/blob/master/examples/github-deploy.yml # To test installing wheels without uploading them to PyPI: # # $ mkdir /tmp/pypi # $ cp dist/* /tmp/pypi # $ python -m pip install piprepo # $ piprepo build /tmp/pypi # $ python -m pip install -v coverage --index-url=file:///tmp/pypi/simple # # Note that cibuildwheel recommends not shipping wheels for pre-release versions # of Python: https://cibuildwheel.readthedocs.io/en/stable/options/#prerelease-pythons # So we don't. name: "Kits" on: push: branches: # Don't build kits all the time, but do if the branch is about kits. - "**/*kit*" workflow_dispatch: repository_dispatch: types: - build-kits defaults: run: shell: bash env: PIP_DISABLE_PIP_VERSION_CHECK: 1 permissions: contents: read concurrency: group: "${{ github.workflow }}-${{ github.ref }}" cancel-in-progress: true jobs: wheels: name: "${{ matrix.py }} ${{ matrix.os }} ${{ matrix.arch }} wheels" runs-on: ${{ matrix.os }}-latest env: MATRIX_ID: "${{ matrix.py }}-${{ matrix.os }}-${{ matrix.arch }}" strategy: matrix: include: # To change the matrix, edit the choices, then process this file with cog: # # $ make workflows # # which runs: # # $ python -m pip install cogapp # $ python -m cogapp -crP .github/workflows/kit.yml # # Choices come from the table on https://pypi.org/project/cibuildwheel/ # # [[[cog # #----- vvv Choices for the matrix vvv ----- # # # Operating systems: # oss = ["ubuntu", "macos", "windows"] # # # For each OS, what arch to use with cibuildwheel: # os_archs = { # "ubuntu": ["x86_64", "i686", "aarch64"], # "macos": ["arm64", "x86_64"], # "windows": ["x86", "AMD64"], # } # # PYVERSIONS. Available versions: # # https://github.com/actions/python-versions/blob/main/versions-manifest.json # # PyPy versions are handled further below in the "pypy" step. # pys = ["cp38", "cp39", "cp310", "cp311", "cp312"] # # # Some OS/arch combinations need overrides for the Python versions: # os_arch_pys = { # ("macos", "arm64"): ["cp38", "cp39", "cp310", "cp311", "cp312"], # } # # #----- ^^^ ---------------------- ^^^ ----- # # import json # for the_os in oss: # for the_arch in os_archs[the_os]: # for the_py in os_arch_pys.get((the_os, the_arch), pys): # them = { # "os": the_os, # "py": the_py, # "arch": the_arch, # } # print(f"- {json.dumps(them)}") # ]]] - {"os": "ubuntu", "py": "cp38", "arch": "x86_64"} - {"os": "ubuntu", "py": "cp39", "arch": "x86_64"} - {"os": "ubuntu", "py": "cp310", "arch": "x86_64"} - {"os": "ubuntu", "py": "cp311", "arch": "x86_64"} - {"os": "ubuntu", "py": "cp312", "arch": "x86_64"} - {"os": "ubuntu", "py": "cp38", "arch": "i686"} - {"os": "ubuntu", "py": "cp39", "arch": "i686"} - {"os": "ubuntu", "py": "cp310", "arch": "i686"} - {"os": "ubuntu", "py": "cp311", "arch": "i686"} - {"os": "ubuntu", "py": "cp312", "arch": "i686"} - {"os": "ubuntu", "py": "cp38", "arch": "aarch64"} - {"os": "ubuntu", "py": "cp39", "arch": "aarch64"} - {"os": "ubuntu", "py": "cp310", "arch": "aarch64"} - {"os": "ubuntu", "py": "cp311", "arch": "aarch64"} - {"os": "ubuntu", "py": "cp312", "arch": "aarch64"} - {"os": "macos", "py": "cp38", "arch": "arm64"} - {"os": "macos", "py": "cp39", "arch": "arm64"} - {"os": "macos", "py": "cp310", "arch": "arm64"} - {"os": "macos", "py": "cp311", "arch": "arm64"} - {"os": "macos", "py": "cp312", "arch": "arm64"} - {"os": "macos", "py": "cp38", "arch": "x86_64"} - {"os": "macos", "py": "cp39", "arch": "x86_64"} - {"os": "macos", "py": "cp310", "arch": "x86_64"} - {"os": "macos", "py": "cp311", "arch": "x86_64"} - {"os": "macos", "py": "cp312", "arch": "x86_64"} - {"os": "windows", "py": "cp38", "arch": "x86"} - {"os": "windows", "py": "cp39", "arch": "x86"} - {"os": "windows", "py": "cp310", "arch": "x86"} - {"os": "windows", "py": "cp311", "arch": "x86"} - {"os": "windows", "py": "cp312", "arch": "x86"} - {"os": "windows", "py": "cp38", "arch": "AMD64"} - {"os": "windows", "py": "cp39", "arch": "AMD64"} - {"os": "windows", "py": "cp310", "arch": "AMD64"} - {"os": "windows", "py": "cp311", "arch": "AMD64"} - {"os": "windows", "py": "cp312", "arch": "AMD64"} # [[[end]]] (checksum: a6ca53e9c620c9e5ca85e7322122056c) fail-fast: false steps: - name: "Setup QEMU" if: matrix.os == 'ubuntu' uses: docker/setup-qemu-action@68827325e0b33c7199eb31dd4e31fbe9023e06e3 with: platforms: arm64 - name: "Check out the repo" uses: actions/checkout@v4 - name: "Install Python 3.8" uses: actions/setup-python@v5 with: # PYVERSIONS python-version: "3.8" cache: pip cache-dependency-path: 'requirements/*.pip' - name: "Install tools" run: | python -m pip install -r requirements/kit.pip - name: "Build wheels" env: CIBW_BUILD: ${{ matrix.py }}-* CIBW_ARCHS: ${{ matrix.arch }} CIBW_ENVIRONMENT: PIP_DISABLE_PIP_VERSION_CHECK=1 CIBW_PRERELEASE_PYTHONS: True CIBW_TEST_COMMAND: python -c "from coverage.tracer import CTracer; print('CTracer OK!')" run: | python -m cibuildwheel --output-dir wheelhouse - name: "List wheels" run: | ls -al wheelhouse/ - name: "Upload wheels" uses: actions/upload-artifact@v4 with: name: dist-${{ env.MATRIX_ID }} path: wheelhouse/*.whl retention-days: 7 sdist: name: "Source distribution" runs-on: ubuntu-latest steps: - name: "Check out the repo" uses: actions/checkout@v4 - name: "Install Python 3.8" uses: actions/setup-python@v5 with: # PYVERSIONS python-version: "3.8" cache: pip cache-dependency-path: 'requirements/*.pip' - name: "Install tools" run: | python -m pip install -r requirements/kit.pip - name: "Build sdist" run: | python -m build - name: "List tarballs" run: | ls -al dist/ - name: "Upload sdist" uses: actions/upload-artifact@v4 with: name: dist-sdist path: dist/*.tar.gz retention-days: 7 pypy: name: "PyPy wheel" runs-on: ubuntu-latest steps: - name: "Check out the repo" uses: actions/checkout@v4 - name: "Install PyPy" uses: actions/setup-python@v5 with: python-version: "pypy-3.8" # Minimum of PyPy PYVERSIONS cache: pip cache-dependency-path: 'requirements/*.pip' - name: "Install requirements" run: | pypy3 -m pip install -r requirements/kit.pip - name: "Build wheel" env: DIST_EXTRA_CONFIG: extra.cfg run: | # One wheel works for all PyPy versions. PYVERSIONS # yes, this is weird syntax: https://github.com/pypa/build/issues/202 echo -e "[bdist_wheel]\npython_tag=pp38.pp39.pp310" > $DIST_EXTRA_CONFIG pypy3 -m build -w - name: "List wheels" run: | ls -al dist/ - name: "Upload wheels" uses: actions/upload-artifact@v4 with: name: dist-pypy path: dist/*.whl retention-days: 7 sign: # This signs our artifacts, but we don't use the signatures for anything # yet. Someday maybe PyPI will have a way to upload and verify them. name: "Sign artifacts" needs: - wheels - sdist - pypy runs-on: ubuntu-latest permissions: id-token: write steps: - name: "Download artifacts" uses: actions/download-artifact@v4 with: pattern: dist-* merge-multiple: true - name: "Sign artifacts" uses: sigstore/gh-action-sigstore-python@v2.1.1 with: inputs: coverage-*.* - name: "List files" run: | ls -alR - name: "Upload signatures" uses: actions/upload-artifact@v4 with: name: signatures path: | *.crt *.sig *.sigstore retention-days: 7 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/.github/workflows/python-nightly.yml0000644000175100001770000000544100000000000023250 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt name: "Python Nightly Tests" on: push: branches: - "**/*nightly*" schedule: # Run at 2:22am early every morning Eastern time (6/7:22 UTC) # so that we get tips of CPython development tested. # https://crontab.guru/#22_7_%2a_%2a_%2a - cron: "22 7 * * *" workflow_dispatch: defaults: run: shell: bash env: PIP_DISABLE_PIP_VERSION_CHECK: 1 COVERAGE_IGOR_VERBOSE: 1 permissions: contents: read concurrency: group: "${{ github.workflow }}-${{ github.ref }}" cancel-in-progress: true jobs: tests: name: "${{ matrix.python-version }}" # Choose a recent Ubuntu that deadsnakes still builds all the versions for. # For example, deadsnakes doesn't provide 3.10 nightly for 22.04 (jammy) # because jammy ships 3.10, and deadsnakes doesn't want to clobber it. # https://launchpad.net/~deadsnakes/+archive/ubuntu/nightly/+packages # https://github.com/deadsnakes/issues/issues/234 # bionic: 18, focal: 20, jammy: 22 runs-on: ubuntu-20.04 # If it doesn't finish in an hour, it's not going to. Don't spin for six # hours needlessly. timeout-minutes: 60 strategy: matrix: python-version: # When changing this list, be sure to check the [gh] list in # tox.ini so that tox will run properly. PYVERSIONS # Available versions: # https://launchpad.net/~deadsnakes/+archive/ubuntu/nightly/+packages - "3.11-dev" - "3.12-dev" - "3.13-dev" # https://github.com/actions/setup-python#available-versions-of-pypy - "pypy-3.8-nightly" - "pypy-3.9-nightly" - "pypy-3.10-nightly" fail-fast: false steps: - name: "Check out the repo" uses: "actions/checkout@v4" - name: "Install ${{ matrix.python-version }} with deadsnakes" uses: deadsnakes/action@6c8b9b82fe0b4344f4b98f2775fcc395df45e494 if: "!startsWith(matrix.python-version, 'pypy-')" with: python-version: "${{ matrix.python-version }}" - name: "Install ${{ matrix.python-version }} with setup-python" uses: "actions/setup-python@v5" if: "startsWith(matrix.python-version, 'pypy-')" with: python-version: "${{ matrix.python-version }}" - name: "Show diagnostic info" run: | set -xe python -VV python -m site python -m coverage debug sys python -m coverage debug pybehave env - name: "Install dependencies" run: | python -m pip install -r requirements/tox.pip - name: "Run tox" run: | python -m tox -- -rfsEX ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/.github/workflows/quality.yml0000644000175100001770000000477200000000000021751 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt name: "Quality" on: push: branches: - master - nedbat/* pull_request: workflow_dispatch: defaults: run: shell: bash env: PIP_DISABLE_PIP_VERSION_CHECK: 1 permissions: contents: read concurrency: group: "${{ github.workflow }}-${{ github.ref }}" cancel-in-progress: true jobs: lint: name: "Pylint etc" # Because pylint can report different things on different OS's (!) # (https://github.com/PyCQA/pylint/issues/3489), run this on Mac where local # pylint gets run. runs-on: macos-latest steps: - name: "Check out the repo" uses: "actions/checkout@v4" - name: "Install Python" uses: "actions/setup-python@v5" with: python-version: "3.8" # Minimum of PYVERSIONS cache: pip cache-dependency-path: 'requirements/*.pip' - name: "Install dependencies" run: | python -m pip install -r requirements/tox.pip - name: "Tox lint" run: | python -m tox -e lint mypy: name: "Check types" runs-on: ubuntu-latest steps: - name: "Check out the repo" uses: "actions/checkout@v4" - name: "Install Python" uses: "actions/setup-python@v5" with: python-version: "3.8" # Minimum of PYVERSIONS, but at least 3.8 cache: pip cache-dependency-path: 'requirements/*.pip' - name: "Install dependencies" run: | # We run on 3.8, but the pins were made on 3.7, so don't insist on # hashes, which won't match. python -m pip install -r requirements/tox.pip - name: "Tox mypy" run: | python -m tox -e mypy doc: name: "Build docs" runs-on: ubuntu-latest steps: - name: "Check out the repo" uses: "actions/checkout@v4" - name: "Install Python" uses: "actions/setup-python@v5" with: python-version: "3.11" # Doc version from PYVERSIONS cache: pip cache-dependency-path: 'requirements/*.pip' - name: "Show environment" run: | set -xe python -VV python -m site env - name: "Install dependencies" run: | set -xe python -m pip install -r requirements/tox.pip - name: "Tox doc" run: | python -m tox -e doc ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/.github/workflows/testsuite.yml0000644000175100001770000000752200000000000022306 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt name: "Tests" on: push: branches: - master - nedbat/* pull_request: workflow_dispatch: defaults: run: shell: bash env: PIP_DISABLE_PIP_VERSION_CHECK: 1 COVERAGE_IGOR_VERBOSE: 1 FORCE_COLOR: 1 # Get colored pytest output permissions: contents: read concurrency: group: "${{ github.workflow }}-${{ github.ref }}" cancel-in-progress: true jobs: tests: name: "${{ matrix.python-version }} on ${{ matrix.os }}" runs-on: "${{ matrix.os }}-latest" # Don't run tests if the branch name includes "-notests" if: "!contains(github.ref, '-notests')" strategy: matrix: os: - ubuntu - macos - windows python-version: # When changing this list, be sure to check the [gh] list in # tox.ini so that tox will run properly. PYVERSIONS # Available versions: # https://github.com/actions/python-versions/blob/main/versions-manifest.json # https://github.com/actions/setup-python/blob/main/docs/advanced-usage.md#available-versions-of-python-and-pypy - "3.8" - "3.9" - "3.10" - "3.11" - "3.12" - "3.13" - "pypy-3.8" - "pypy-3.9" - "pypy-3.10" exclude: # Windows pypy 3.9 and 3.10 get stuck with PyPy 7.3.15. I hope to # unstick them, but I don't want that to block all other progress, so # skip them for now. - os: windows python-version: "pypy-3.9" - os: windows python-version: "pypy-3.10" # Skip 3.13.0a4 and pin to 3.13.0a3 for Windows due to build error. # Undo when 3.13.0a5 is released. - os: windows python-version: "3.13" include: - os: windows python-version: "3.13.0-alpha.3" fail-fast: false steps: - name: "Check out the repo" uses: "actions/checkout@v4" - name: "Set up Python" uses: "actions/setup-python@v5" with: python-version: "${{ matrix.python-version }}" allow-prereleases: true # At a certain point, installing dependencies failed on pypy 3.9 and # 3.10 on Windows. Commenting out the cache here fixed it. Someday # try using the cache again. #cache: pip #cache-dependency-path: 'requirements/*.pip' - name: "Show environment" run: | set -xe python -VV python -m site # For extreme debugging: # python -c "import urllib.request as r; exec(r.urlopen('https://bit.ly/pydoctor').read())" env - name: "Install dependencies" run: | set -xe python -m pip install -r requirements/tox.pip - name: "Run tox for ${{ matrix.python-version }}" run: | python -m tox -- -rfsEX - name: "Retry tox for ${{ matrix.python-version }}" if: failure() run: | # `exit 1` makes sure that the job remains red with flaky runs python -m tox -- -rfsEX --lf -vvvvv && exit 1 # This job aggregates test results. It's the required check for branch protection. # https://github.com/marketplace/actions/alls-green#why # https://github.com/orgs/community/discussions/33579 success: name: Tests successful # The tests didn't run if the branch name includes "-notests" if: "!contains(github.ref, '-notests')" needs: - tests runs-on: ubuntu-latest steps: - name: Decide whether the needed jobs succeeded or failed uses: re-actors/alls-green@05ac9388f0aebcb5727afa17fcccfecd6f8ec5fe with: jobs: ${{ toJSON(needs) }} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/.readthedocs.yaml0000644000175100001770000000112200000000000017352 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt # # ReadTheDocs configuration. # See https://docs.readthedocs.io/en/stable/config-file/v2.html version: 2 build: os: ubuntu-22.04 tools: # PYVERSIONS: the version we use for building docs. Check tox.ini[doc] also. python: "3.11" sphinx: builder: html configuration: doc/conf.py # Don't build anything except HTML. formats: [] python: install: - requirements: doc/requirements.pip - method: pip path: . ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/CHANGES.rst0000644000175100001770000015577100000000000015751 0ustar00runnerdocker00000000000000.. Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 .. For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt ============================== Change history for coverage.py ============================== These changes are listed in decreasing version number order. Note this can be different from a strict chronological order when there are two branches in development at the same time, such as 4.5.x and 5.0. See :ref:`migrating` for significant changes that might be required when upgrading your version of coverage.py. .. When updating the "Unreleased" header to a specific version, use this .. format. Don't forget the jump target: .. .. .. _changes_9-8-1: .. .. Version 9.8.1 โ€” 2027-07-27 .. -------------------------- .. scriv-start-here .. _changes_7-4-4: Version 7.4.4 โ€” 2024-03-14 -------------------------- - Fix: in some cases, even with ``[run] relative_files=True``, a data file could be created with absolute path names. When combined with other relative data files, it was random whether the absolute file names would be made relative or not. If they weren't, then a file would be listed twice in reports, as detailed in `issue 1752`_. This is now fixed: absolute file names are always made relative when combining. Thanks to Bruno Rodrigues dos Santos for support. - Fix: the last case of a match/case statement had an incorrect message if the branch was missed. It said the pattern never matched, when actually the branch is missed if the last case always matched. - Fix: clicking a line number in the HTML report now positions more accurately. - Fix: the ``report:format`` setting was defined as a boolean, but should be a string. Thanks, `Tanaydin Sirin `_. It is also now documented on the :ref:`configuration page `. .. _issue 1752: https://github.com/nedbat/coveragepy/issues/1752 .. _pull 1754: https://github.com/nedbat/coveragepy/pull/1754 .. _changes_7-4-3: Version 7.4.3 โ€” 2024-02-23 -------------------------- - Fix: in some cases, coverage could fail with a RuntimeError: "Set changed size during iteration." This is now fixed, closing `issue 1733`_. .. _issue 1733: https://github.com/nedbat/coveragepy/issues/1733 .. _changes_7-4-2: Version 7.4.2 โ€” 2024-02-20 -------------------------- - Fix: setting ``COVERAGE_CORE=sysmon`` no longer errors on 3.11 and lower, thanks `Hugo van Kemenade `_. It now issues a warning that sys.monitoring is not available and falls back to the default core instead. .. _pull 1747: https://github.com/nedbat/coveragepy/pull/1747 .. _changes_7-4-1: Version 7.4.1 โ€” 2024-01-26 -------------------------- - Python 3.13.0a3 is supported. - Fix: the JSON report now includes an explicit format version number, closing `issue 1732`_. .. _issue 1732: https://github.com/nedbat/coveragepy/issues/1732 .. _changes_7-4-0: Version 7.4.0 โ€” 2023-12-27 -------------------------- - In Python 3.12 and above, you can try an experimental core based on the new :mod:`sys.monitoring ` module by defining a ``COVERAGE_CORE=sysmon`` environment variable. This should be faster for line coverage, but not for branch coverage, and plugins and dynamic contexts are not yet supported with it. I am very interested to hear how it works (or doesn't!) for you. .. _changes_7-3-4: Version 7.3.4 โ€” 2023-12-20 -------------------------- - Fix: the change for multi-line signature exclusions in 7.3.3 broke other forms of nested clauses being excluded properly. This is now fixed, closing `issue 1713`_. - Fix: in the HTML report, selecting code for copying won't select the line numbers also. Thanks, `Robert Harris `_. .. _issue 1713: https://github.com/nedbat/coveragepy/issues/1713 .. _pull 1717: https://github.com/nedbat/coveragepy/pull/1717 .. _changes_7-3-3: Version 7.3.3 โ€” 2023-12-14 -------------------------- - Fix: function definitions with multi-line signatures can now be excluded by matching any of the lines, closing `issue 684`_. Thanks, `Jan Rusak, Maciej Kowalczyk and Joanna Ejzel `_. - Fix: XML reports could fail with a TypeError if files had numeric components that were duplicates except for leading zeroes, like ``file1.py`` and ``file001.py``. Fixes `issue 1709`_. - The ``coverage annotate`` command used to announce that it would be removed in a future version. Enough people got in touch to say that they use it, so it will stay. Don't expect it to keep up with other new features though. - Added new :ref:`debug options `: - ``pytest`` writes the pytest test name into the debug output. - ``dataop2`` writes the full data being added to CoverageData objects. .. _issue 684: https://github.com/nedbat/coveragepy/issues/684 .. _pull 1705: https://github.com/nedbat/coveragepy/pull/1705 .. _issue 1709: https://github.com/nedbat/coveragepy/issues/1709 .. _changes_7-3-2: Version 7.3.2 โ€” 2023-10-02 -------------------------- - The ``coverage lcov`` command ignored the ``[report] exclude_lines`` and ``[report] exclude_also`` settings (`issue 1684`_). This is now fixed, thanks `Jacqueline Lee `_. - Sometimes SQLite will create journal files alongside the coverage.py database files. These are ephemeral, but could be mistakenly included when combining data files. Now they are always ignored, fixing `issue 1605`_. Thanks to Brad Smith for suggesting fixes and providing detailed debugging. - On Python 3.12+, we now disable SQLite writing journal files, which should be a little faster. - The new 3.12 soft keyword ``type`` is properly bolded in HTML reports. - Removed the "fullcoverage" feature used by CPython to measure the coverage of early-imported standard library modules. CPython `stopped using it <88054_>`_ in 2021, and it stopped working completely in Python 3.13. .. _issue 1605: https://github.com/nedbat/coveragepy/issues/1605 .. _issue 1684: https://github.com/nedbat/coveragepy/issues/1684 .. _pull 1685: https://github.com/nedbat/coveragepy/pull/1685 .. _88054: https://github.com/python/cpython/issues/88054 .. _changes_7-3-1: Version 7.3.1 โ€” 2023-09-06 -------------------------- - The semantics of stars in file patterns has been clarified in the docs. A leading or trailing star matches any number of path components, like a double star would. This is different than the behavior of a star in the middle of a pattern. This discrepancy was `identified by Sviatoslav Sydorenko `_, who `provided patient detailed diagnosis `_ and graciously agreed to a pragmatic resolution. - The API docs were missing from the last version. They are now `restored `_. .. _apidocs: https://coverage.readthedocs.io/en/latest/api_coverage.html .. _starbad: https://github.com/nedbat/coveragepy/issues/1407#issuecomment-1631085209 .. _pull 1650: https://github.com/nedbat/coveragepy/pull/1650 .. _changes_7-3-0: Version 7.3.0 โ€” 2023-08-12 -------------------------- - Added a :meth:`.Coverage.collect` context manager to start and stop coverage data collection. - Dropped support for Python 3.7. - Fix: in unusual circumstances, SQLite cannot be set to asynchronous mode. Coverage.py would fail with the error ``Safety level may not be changed inside a transaction.`` This is now avoided, closing `issue 1646`_. Thanks to Michael Bell for the detailed bug report. - Docs: examples of configuration files now include separate examples for the different syntaxes: .coveragerc, pyproject.toml, setup.cfg, and tox.ini. - Fix: added ``nosemgrep`` comments to our JavaScript code so that semgrep-based SAST security checks won't raise false alarms about security problems that aren't problems. - Added a CITATION.cff file, thanks to `Ken Schackart `_. .. _pull 1641: https://github.com/nedbat/coveragepy/pull/1641 .. _issue 1646: https://github.com/nedbat/coveragepy/issues/1646 .. _changes_7-2-7: Version 7.2.7 โ€” 2023-05-29 -------------------------- - Fix: reverted a `change from 6.4.3 `_ that helped Cython, but also increased the size of data files when using dynamic contexts, as described in the now-fixed `issue 1586`_. The problem is now avoided due to a recent change (`issue 1538 `_). Thanks to `Anders Kaseorg `_ and David Szotten for persisting with problem reports and detailed diagnoses. - Wheels are now provided for CPython 3.12. .. _pull 1347b: https://github.com/nedbat/coveragepy/pull/1347 .. _issue 1538b: https://github.com/nedbat/coveragepy/issues/1538 .. _issue 1586: https://github.com/nedbat/coveragepy/issues/1586 .. _pull 1629: https://github.com/nedbat/coveragepy/pull/1629 .. _changes_7-2-6: Version 7.2.6 โ€” 2023-05-23 -------------------------- - Fix: the ``lcov`` command could raise an IndexError exception if a file is translated to Python but then executed under its own name. Jinja2 does this when rendering templates. Fixes `issue 1553`_. - Python 3.12 beta 1 now inlines comprehensions. Previously they were compiled as invisible functions and coverage.py would warn you if they weren't completely executed. This no longer happens under Python 3.12. - Fix: the ``coverage debug sys`` command includes some environment variables in its output. This could have included sensitive data. Those values are now hidden with asterisks, closing `issue 1628`_. .. _issue 1553: https://github.com/nedbat/coveragepy/issues/1553 .. _issue 1628: https://github.com/nedbat/coveragepy/issues/1628 .. _changes_7-2-5: Version 7.2.5 โ€” 2023-04-30 -------------------------- - Fix: ``html_report()`` could fail with an AttributeError on ``isatty`` if run in an unusual environment where sys.stdout had been replaced. This is now fixed. .. _changes_7-2-4: Version 7.2.4 โ€” 2023-04-28 -------------------------- PyCon 2023 sprint fixes! - Fix: with ``relative_files = true``, specifying a specific file to include or omit wouldn't work correctly (`issue 1604`_). This is now fixed, with testing help by `Marc Gibbons `_. - Fix: the XML report would have an incorrect ```` element when using relative files and the source option ended with a slash (`issue 1541`_). This is now fixed, thanks to `Kevin Brown-Silva `_. - When the HTML report location is printed to the terminal, it's now a terminal-compatible URL, so that you can click the location to open the HTML file in your browser. Finishes `issue 1523`_ thanks to `Ricardo Newbery `_. - Docs: a new :ref:`Migrating page ` with details about how to migrate between major versions of coverage.py. It currently covers the wildcard changes in 7.x. Thanks, `Brian Grohe `_. .. _issue 1523: https://github.com/nedbat/coveragepy/issues/1523 .. _issue 1541: https://github.com/nedbat/coveragepy/issues/1541 .. _issue 1604: https://github.com/nedbat/coveragepy/issues/1604 .. _pull 1608: https://github.com/nedbat/coveragepy/pull/1608 .. _pull 1609: https://github.com/nedbat/coveragepy/pull/1609 .. _pull 1610: https://github.com/nedbat/coveragepy/pull/1610 .. _pull 1613: https://github.com/nedbat/coveragepy/pull/1613 .. _changes_7-2-3: Version 7.2.3 โ€” 2023-04-06 -------------------------- - Fix: the :ref:`config_run_sigterm` setting was meant to capture data if a process was terminated with a SIGTERM signal, but it didn't always. This was fixed thanks to `Lewis Gaul `_, closing `issue 1599`_. - Performance: HTML reports with context information are now much more compact. File sizes are typically as small as one-third the previous size, but can be dramatically smaller. This closes `issue 1584`_ thanks to `Oleh Krehel `_. - Development dependencies no longer use hashed pins, closing `issue 1592`_. .. _issue 1584: https://github.com/nedbat/coveragepy/issues/1584 .. _pull 1587: https://github.com/nedbat/coveragepy/pull/1587 .. _issue 1592: https://github.com/nedbat/coveragepy/issues/1592 .. _issue 1599: https://github.com/nedbat/coveragepy/issues/1599 .. _pull 1600: https://github.com/nedbat/coveragepy/pull/1600 .. _changes_7-2-2: Version 7.2.2 โ€” 2023-03-16 -------------------------- - Fix: if a virtualenv was created inside a source directory, and a sourced package was installed inside the virtualenv, then all of the third-party packages inside the virtualenv would be measured. This was incorrect, but has now been fixed: only the specified packages will be measured, thanks to `Manuel Jacob `_. - Fix: the ``coverage lcov`` command could create a .lcov file with incorrect LF (lines found) and LH (lines hit) totals. This is now fixed, thanks to `Ian Moore `_. - Fix: the ``coverage xml`` command on Windows could create a .xml file with duplicate ```` elements. This is now fixed, thanks to `Benjamin Parzella `_, closing `issue 1573`_. .. _pull 1560: https://github.com/nedbat/coveragepy/pull/1560 .. _issue 1573: https://github.com/nedbat/coveragepy/issues/1573 .. _pull 1574: https://github.com/nedbat/coveragepy/pull/1574 .. _pull 1583: https://github.com/nedbat/coveragepy/pull/1583 .. _changes_7-2-1: Version 7.2.1 โ€” 2023-02-26 -------------------------- - Fix: the PyPI page had broken links to documentation pages, but no longer does, closing `issue 1566`_. - Fix: public members of the coverage module are now properly indicated so that mypy will find them, fixing `issue 1564`_. .. _issue 1564: https://github.com/nedbat/coveragepy/issues/1564 .. _issue 1566: https://github.com/nedbat/coveragepy/issues/1566 .. _changes_7-2-0: Version 7.2.0 โ€” 2023-02-22 -------------------------- - Added a new setting ``[report] exclude_also`` to let you add more exclusions without overwriting the defaults. Thanks, `Alpha Chen `_, closing `issue 1391`_. - Added a :meth:`.CoverageData.purge_files` method to remove recorded data for a particular file. Contributed by `Stephan Deibel `_. - Fix: when reporting commands fail, they will no longer congratulate themselves with messages like "Wrote XML report to file.xml" before spewing a traceback about their failure. - Fix: arguments in the public API that name file paths now accept pathlib.Path objects. This includes the ``data_file`` and ``config_file`` arguments to the Coverage constructor and the ``basename`` argument to CoverageData. Closes `issue 1552`_. - Fix: In some embedded environments, an IndexError could occur on stop() when the originating thread exits before completion. This is now fixed, thanks to `Russell Keith-Magee `_, closing `issue 1542`_. - Added a ``py.typed`` file to announce our type-hintedness. Thanks, `KotlinIsland `_. .. _issue 1391: https://github.com/nedbat/coveragepy/issues/1391 .. _issue 1542: https://github.com/nedbat/coveragepy/issues/1542 .. _pull 1543: https://github.com/nedbat/coveragepy/pull/1543 .. _pull 1547: https://github.com/nedbat/coveragepy/pull/1547 .. _pull 1550: https://github.com/nedbat/coveragepy/pull/1550 .. _issue 1552: https://github.com/nedbat/coveragepy/issues/1552 .. _pull 1557: https://github.com/nedbat/coveragepy/pull/1557 .. _changes_7-1-0: Version 7.1.0 โ€” 2023-01-24 -------------------------- - Added: the debug output file can now be specified with ``[run] debug_file`` in the configuration file. Closes `issue 1319`_. - Performance: fixed a slowdown with dynamic contexts that's been around since 6.4.3. The fix closes `issue 1538`_. Thankfully this doesn't break the `Cython change`_ that fixed `issue 972`_. Thanks to Mathieu Kniewallner for the deep investigative work and comprehensive issue report. - Typing: all product and test code has type annotations. .. _Cython change: https://github.com/nedbat/coveragepy/pull/1347 .. _issue 972: https://github.com/nedbat/coveragepy/issues/972 .. _issue 1319: https://github.com/nedbat/coveragepy/issues/1319 .. _issue 1538: https://github.com/nedbat/coveragepy/issues/1538 .. _changes_7-0-5: Version 7.0.5 โ€” 2023-01-10 -------------------------- - Fix: On Python 3.7, a file with type annotations but no ``from __future__ import annotations`` would be missing statements in the coverage report. This is now fixed, closing `issue 1524`_. .. _issue 1524: https://github.com/nedbat/coveragepy/issues/1524 .. _changes_7-0-4: Version 7.0.4 โ€” 2023-01-07 -------------------------- - Performance: an internal cache of file names was accidentally disabled, resulting in sometimes drastic reductions in performance. This is now fixed, closing `issue 1527`_. Thanks to Ivan Ciuvalschii for the reproducible test case. .. _issue 1527: https://github.com/nedbat/coveragepy/issues/1527 .. _changes_7-0-3: Version 7.0.3 โ€” 2023-01-03 -------------------------- - Fix: when using pytest-cov or pytest-xdist, or perhaps both, the combining step could fail with ``assert row is not None`` using 7.0.2. This was due to a race condition that has always been possible and is still possible. In 7.0.1 and before, the error was silently swallowed by the combining code. Now it will produce a message "Couldn't combine data file" and ignore the data file as it used to do before 7.0.2. Closes `issue 1522`_. .. _issue 1522: https://github.com/nedbat/coveragepy/issues/1522 .. _changes_7-0-2: Version 7.0.2 โ€” 2023-01-02 -------------------------- - Fix: when using the ``[run] relative_files = True`` setting, a relative ``[paths]`` pattern was still being made absolute. This is now fixed, closing `issue 1519`_. - Fix: if Python doesn't provide tomllib, then TOML configuration files can only be read if coverage.py is installed with the ``[toml]`` extra. Coverage.py will raise an error if TOML support is not installed when it sees your settings are in a .toml file. But it didn't understand that ``[tools.coverage]`` was a valid section header, so the error wasn't reported if you used that header, and settings were silently ignored. This is now fixed, closing `issue 1516`_. - Fix: adjusted how decorators are traced on PyPy 7.3.10, fixing `issue 1515`_. - Fix: the ``coverage lcov`` report did not properly implement the ``--fail-under=MIN`` option. This has been fixed. - Refactor: added many type annotations, including a number of refactorings. This should not affect outward behavior, but they were a bit invasive in some places, so keep your eyes peeled for oddities. - Refactor: removed the vestigial and long untested support for Jython and IronPython. .. _issue 1515: https://github.com/nedbat/coveragepy/issues/1515 .. _issue 1516: https://github.com/nedbat/coveragepy/issues/1516 .. _issue 1519: https://github.com/nedbat/coveragepy/issues/1519 .. _changes_7-0-1: Version 7.0.1 โ€” 2022-12-23 -------------------------- - When checking if a file mapping resolved to a file that exists, we weren't considering files in .whl files. This is now fixed, closing `issue 1511`_. - File pattern rules were too strict, forbidding plus signs and curly braces in directory and file names. This is now fixed, closing `issue 1513`_. - Unusual Unicode or control characters in source files could prevent reporting. This is now fixed, closing `issue 1512`_. - The PyPy wheel now installs on PyPy 3.7, 3.8, and 3.9, closing `issue 1510`_. .. _issue 1510: https://github.com/nedbat/coveragepy/issues/1510 .. _issue 1511: https://github.com/nedbat/coveragepy/issues/1511 .. _issue 1512: https://github.com/nedbat/coveragepy/issues/1512 .. _issue 1513: https://github.com/nedbat/coveragepy/issues/1513 .. _changes_7-0-0: Version 7.0.0 โ€” 2022-12-18 -------------------------- Nothing new beyond 7.0.0b1. .. _changes_7-0-0b1: Version 7.0.0b1 โ€” 2022-12-03 ---------------------------- A number of changes have been made to file path handling, including pattern matching and path remapping with the ``[paths]`` setting (see :ref:`config_paths`). These changes might affect you, and require you to update your settings. (This release includes the changes from `6.6.0b1`__, since 6.6.0 was never released.) __ https://coverage.readthedocs.io/en/latest/changes.html#changes-6-6-0b1 - Changes to file pattern matching, which might require updating your configuration: - Previously, ``*`` would incorrectly match directory separators, making precise matching difficult. This is now fixed, closing `issue 1407`_. - Now ``**`` matches any number of nested directories, including none. - Improvements to combining data files when using the :ref:`config_run_relative_files` setting, which might require updating your configuration: - During ``coverage combine``, relative file paths are implicitly combined without needing a ``[paths]`` configuration setting. This also fixed `issue 991`_. - A ``[paths]`` setting like ``*/foo`` will now match ``foo/bar.py`` so that relative file paths can be combined more easily. - The :ref:`config_run_relative_files` setting is properly interpreted in more places, fixing `issue 1280`_. - When remapping file paths with ``[paths]``, a path will be remapped only if the resulting path exists. The documentation has long said the prefix had to exist, but it was never enforced. This fixes `issue 608`_, improves `issue 649`_, and closes `issue 757`_. - Reporting operations now implicitly use the ``[paths]`` setting to remap file paths within a single data file. Combining multiple files still requires the ``coverage combine`` step, but this simplifies some single-file situations. Closes `issue 1212`_ and `issue 713`_. - The ``coverage report`` command now has a ``--format=`` option. The original style is now ``--format=text``, and is the default. - Using ``--format=markdown`` will write the table in Markdown format, thanks to `Steve Oswald `_, closing `issue 1418`_. - Using ``--format=total`` will write a single total number to the output. This can be useful for making badges or writing status updates. - Combining data files with ``coverage combine`` now hashes the data files to skip files that add no new information. This can reduce the time needed. Many details affect the speed-up, but for coverage.py's own test suite, combining is about 40% faster. Closes `issue 1483`_. - When searching for completely un-executed files, coverage.py uses the presence of ``__init__.py`` files to determine which directories have source that could have been imported. However, `implicit namespace packages`_ don't require ``__init__.py``. A new setting ``[report] include_namespace_packages`` tells coverage.py to consider these directories during reporting. Thanks to `Felix Horvat `_ for the contribution. Closes `issue 1383`_ and `issue 1024`_. - Fixed environment variable expansion in pyproject.toml files. It was overly broad, causing errors outside of coverage.py settings, as described in `issue 1481`_ and `issue 1345`_. This is now fixed, but in rare cases will require changing your pyproject.toml to quote non-string values that use environment substitution. - An empty file has a coverage total of 100%, but used to fail with ``--fail-under``. This has been fixed, closing `issue 1470`_. - The text report table no longer writes out two separator lines if there are no files listed in the table. One is plenty. - Fixed a mis-measurement of a strange use of wildcard alternatives in match/case statements, closing `issue 1421`_. - Fixed internal logic that prevented coverage.py from running on implementations other than CPython or PyPy (`issue 1474`_). - The deprecated ``[run] note`` setting has been completely removed. .. _implicit namespace packages: https://peps.python.org/pep-0420/ .. _issue 608: https://github.com/nedbat/coveragepy/issues/608 .. _issue 649: https://github.com/nedbat/coveragepy/issues/649 .. _issue 713: https://github.com/nedbat/coveragepy/issues/713 .. _issue 757: https://github.com/nedbat/coveragepy/issues/757 .. _issue 991: https://github.com/nedbat/coveragepy/issues/991 .. _issue 1024: https://github.com/nedbat/coveragepy/issues/1024 .. _issue 1212: https://github.com/nedbat/coveragepy/issues/1212 .. _issue 1280: https://github.com/nedbat/coveragepy/issues/1280 .. _issue 1345: https://github.com/nedbat/coveragepy/issues/1345 .. _issue 1383: https://github.com/nedbat/coveragepy/issues/1383 .. _issue 1407: https://github.com/nedbat/coveragepy/issues/1407 .. _issue 1418: https://github.com/nedbat/coveragepy/issues/1418 .. _issue 1421: https://github.com/nedbat/coveragepy/issues/1421 .. _issue 1470: https://github.com/nedbat/coveragepy/issues/1470 .. _issue 1474: https://github.com/nedbat/coveragepy/issues/1474 .. _issue 1481: https://github.com/nedbat/coveragepy/issues/1481 .. _issue 1483: https://github.com/nedbat/coveragepy/issues/1483 .. _pull 1387: https://github.com/nedbat/coveragepy/pull/1387 .. _pull 1479: https://github.com/nedbat/coveragepy/pull/1479 .. _changes_6-6-0b1: Version 6.6.0b1 โ€” 2022-10-31 ---------------------------- (Note: 6.6.0 final was never released. These changes are part of `7.0.0b1`__.) __ https://coverage.readthedocs.io/en/latest/changes.html#changes-7-0-0b1 - Changes to file pattern matching, which might require updating your configuration: - Previously, ``*`` would incorrectly match directory separators, making precise matching difficult. This is now fixed, closing `issue 1407`_. - Now ``**`` matches any number of nested directories, including none. - Improvements to combining data files when using the :ref:`config_run_relative_files` setting: - During ``coverage combine``, relative file paths are implicitly combined without needing a ``[paths]`` configuration setting. This also fixed `issue 991`_. - A ``[paths]`` setting like ``*/foo`` will now match ``foo/bar.py`` so that relative file paths can be combined more easily. - The setting is properly interpreted in more places, fixing `issue 1280`_. - Fixed environment variable expansion in pyproject.toml files. It was overly broad, causing errors outside of coverage.py settings, as described in `issue 1481`_ and `issue 1345`_. This is now fixed, but in rare cases will require changing your pyproject.toml to quote non-string values that use environment substitution. - Fixed internal logic that prevented coverage.py from running on implementations other than CPython or PyPy (`issue 1474`_). .. _issue 991: https://github.com/nedbat/coveragepy/issues/991 .. _issue 1280: https://github.com/nedbat/coveragepy/issues/1280 .. _issue 1345: https://github.com/nedbat/coveragepy/issues/1345 .. _issue 1407: https://github.com/nedbat/coveragepy/issues/1407 .. _issue 1474: https://github.com/nedbat/coveragepy/issues/1474 .. _issue 1481: https://github.com/nedbat/coveragepy/issues/1481 .. _changes_6-5-0: Version 6.5.0 โ€” 2022-09-29 -------------------------- - The JSON report now includes details of which branches were taken, and which are missing for each file. Thanks, `Christoph Blessing `_. Closes `issue 1425`_. - Starting with coverage.py 6.2, ``class`` statements were marked as a branch. This wasn't right, and has been reverted, fixing `issue 1449`_. Note this will very slightly reduce your coverage total if you are measuring branch coverage. - Packaging is now compliant with `PEP 517`_, closing `issue 1395`_. - A new debug option ``--debug=pathmap`` shows details of the remapping of paths that happens during combine due to the ``[paths]`` setting. - Fix an internal problem with caching of invalid Python parsing. Found by OSS-Fuzz, fixing their `bug 50381`_. .. _bug 50381: https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=50381 .. _PEP 517: https://peps.python.org/pep-0517/ .. _issue 1395: https://github.com/nedbat/coveragepy/issues/1395 .. _issue 1425: https://github.com/nedbat/coveragepy/issues/1425 .. _issue 1449: https://github.com/nedbat/coveragepy/issues/1449 .. _pull 1438: https://github.com/nedbat/coveragepy/pull/1438 .. _changes_6-4-4: Version 6.4.4 โ€” 2022-08-16 -------------------------- - Wheels are now provided for Python 3.11. .. _changes_6-4-3: Version 6.4.3 โ€” 2022-08-06 -------------------------- - Fix a failure when combining data files if the file names contained glob-like patterns. Thanks, `Michael Krebs and Benjamin Schubert `_. - Fix a messaging failure when combining Windows data files on a different drive than the current directory, closing `issue 1428`_. Thanks, `Lorenzo Micรฒ `_. - Fix path calculations when running in the root directory, as you might do in a Docker container. Thanks `Arthur Rio `_. - Filtering in the HTML report wouldn't work when reloading the index page. This is now fixed. Thanks, `Marc Legendre `_. - Fix a problem with Cython code measurement, closing `issue 972`_. Thanks, `Matus Valo `_. .. _issue 972: https://github.com/nedbat/coveragepy/issues/972 .. _issue 1428: https://github.com/nedbat/coveragepy/issues/1428 .. _pull 1347: https://github.com/nedbat/coveragepy/pull/1347 .. _pull 1403: https://github.com/nedbat/coveragepy/issues/1403 .. _pull 1405: https://github.com/nedbat/coveragepy/issues/1405 .. _pull 1413: https://github.com/nedbat/coveragepy/issues/1413 .. _pull 1430: https://github.com/nedbat/coveragepy/pull/1430 .. _changes_6-4-2: Version 6.4.2 โ€” 2022-07-12 -------------------------- - Updated for a small change in Python 3.11.0 beta 4: modules now start with a line with line number 0, which is ignored. This line cannot be executed, so coverage totals were thrown off. This line is now ignored by coverage.py, but this also means that truly empty modules (like ``__init__.py``) have no lines in them, rather than one phantom line. Fixes `issue 1419`_. - Internal debugging data added to sys.modules is now an actual module, to avoid confusing code that examines everything in sys.modules. Thanks, `Yilei Yang `_. .. _issue 1419: https://github.com/nedbat/coveragepy/issues/1419 .. _pull 1399: https://github.com/nedbat/coveragepy/pull/1399 .. _changes_6-4-1: Version 6.4.1 โ€” 2022-06-02 -------------------------- - Greatly improved performance on PyPy, and other environments that need the pure Python trace function. Thanks, Carl Friedrich Bolz-Tereick (`pull 1381`_ and `pull 1388`_). Slightly improved performance when using the C trace function, as most environments do. Closes `issue 1339`_. - The conditions for using tomllib from the standard library have been made more precise, so that 3.11 alphas will continue to work. Closes `issue 1390`_. .. _issue 1339: https://github.com/nedbat/coveragepy/issues/1339 .. _pull 1381: https://github.com/nedbat/coveragepy/pull/1381 .. _pull 1388: https://github.com/nedbat/coveragepy/pull/1388 .. _issue 1390: https://github.com/nedbat/coveragepy/issues/1390 .. _changes_64: Version 6.4 โ€” 2022-05-22 ------------------------ - A new setting, :ref:`config_run_sigterm`, controls whether a SIGTERM signal handler is used. In 6.3, the signal handler was always installed, to capture data at unusual process ends. Unfortunately, this introduced other problems (see `issue 1310`_). Now the signal handler is only used if you opt-in by setting ``[run] sigterm = true``. - Small changes to the HTML report: - Added links to next and previous file, and more keyboard shortcuts: ``[`` and ``]`` for next file and previous file; ``u`` for up to the index; and ``?`` to open/close the help panel. Thanks, `J. M. F. Tsang `_. - The time stamp and version are displayed at the top of the report. Thanks, `Ammar Askar `_. Closes `issue 1351`_. - A new debug option ``debug=sqldata`` adds more detail to ``debug=sql``, logging all the data being written to the database. - Previously, running ``coverage report`` (or any of the reporting commands) in an empty directory would create a .coverage data file. Now they do not, fixing `issue 1328`_. - On Python 3.11, the ``[toml]`` extra no longer installs tomli, instead using tomllib from the standard library. Thanks `Shantanu `_. - In-memory CoverageData objects now properly update(), closing `issue 1323`_. .. _issue 1310: https://github.com/nedbat/coveragepy/issues/1310 .. _issue 1323: https://github.com/nedbat/coveragepy/issues/1323 .. _issue 1328: https://github.com/nedbat/coveragepy/issues/1328 .. _issue 1351: https://github.com/nedbat/coveragepy/issues/1351 .. _pull 1354: https://github.com/nedbat/coveragepy/pull/1354 .. _pull 1359: https://github.com/nedbat/coveragepy/pull/1359 .. _pull 1364: https://github.com/nedbat/coveragepy/pull/1364 .. _changes_633: Version 6.3.3 โ€” 2022-05-12 -------------------------- - Fix: Coverage.py now builds successfully on CPython 3.11 (3.11.0b1) again. Closes `issue 1367`_. Some results for generators may have changed. .. _issue 1367: https://github.com/nedbat/coveragepy/issues/1367 .. _changes_632: Version 6.3.2 โ€” 2022-02-20 -------------------------- - Fix: adapt to pypy3.9's decorator tracing behavior. It now traces function decorators like CPython 3.8: both the @-line and the def-line are traced. Fixes `issue 1326`_. - Debug: added ``pybehave`` to the list of :ref:`coverage debug ` and :ref:`cmd_run_debug` options. - Fix: show an intelligible error message if ``--concurrency=multiprocessing`` is used without a configuration file. Closes `issue 1320`_. .. _issue 1320: https://github.com/nedbat/coveragepy/issues/1320 .. _issue 1326: https://github.com/nedbat/coveragepy/issues/1326 .. _changes_631: Version 6.3.1 โ€” 2022-02-01 -------------------------- - Fix: deadlocks could occur when terminating processes. Some of these deadlocks (described in `issue 1310`_) are now fixed. - Fix: a signal handler was being set from multiple threads, causing an error: "ValueError: signal only works in main thread". This is now fixed, closing `issue 1312`_. - Fix: ``--precision`` on the command-line was being ignored while considering ``--fail-under``. This is now fixed, thanks to `Marcelo Trylesinski `_. - Fix: releases no longer provide 3.11.0-alpha wheels. Coverage.py uses CPython internal fields which are moving during the alpha phase. Fixes `issue 1316`_. .. _issue 1310: https://github.com/nedbat/coveragepy/issues/1310 .. _issue 1312: https://github.com/nedbat/coveragepy/issues/1312 .. _issue 1316: https://github.com/nedbat/coveragepy/issues/1316 .. _pull 1317: https://github.com/nedbat/coveragepy/pull/1317 .. _changes_63: Version 6.3 โ€” 2022-01-25 ------------------------ - Feature: Added the ``lcov`` command to generate reports in LCOV format. Thanks, `Bradley Burns `_. Closes issues `587 `_ and `626 `_. - Feature: the coverage data file can now be specified on the command line with the ``--data-file`` option in any command that reads or writes data. This is in addition to the existing ``COVERAGE_FILE`` environment variable. Closes `issue 624`_. Thanks, `Nikita Bloshchanevich `_. - Feature: coverage measurement data will now be written when a SIGTERM signal is received by the process. This includes :meth:`Process.terminate `, and other ways to terminate a process. Currently this is only on Linux and Mac; Windows is not supported. Fixes `issue 1307`_. - Dropped support for Python 3.6, which reached end-of-life on 2021-12-23. - Updated Python 3.11 support to 3.11.0a4, fixing `issue 1294`_. - Fix: the coverage data file is now created in a more robust way, to avoid problems when multiple processes are trying to write data at once. Fixes issues `1303 `_ and `883 `_. - Fix: a .gitignore file will only be written into the HTML report output directory if the directory is empty. This should prevent certain unfortunate accidents of writing the file where it is not wanted. - Releases now have MacOS arm64 wheels for Apple Silicon, fixing `issue 1288`_. .. _issue 587: https://github.com/nedbat/coveragepy/issues/587 .. _issue 624: https://github.com/nedbat/coveragepy/issues/624 .. _issue 626: https://github.com/nedbat/coveragepy/issues/626 .. _issue 883: https://github.com/nedbat/coveragepy/issues/883 .. _issue 1288: https://github.com/nedbat/coveragepy/issues/1288 .. _issue 1294: https://github.com/nedbat/coveragepy/issues/1294 .. _issue 1303: https://github.com/nedbat/coveragepy/issues/1303 .. _issue 1307: https://github.com/nedbat/coveragepy/issues/1307 .. _pull 1289: https://github.com/nedbat/coveragepy/pull/1289 .. _pull 1304: https://github.com/nedbat/coveragepy/pull/1304 .. _changes_62: Version 6.2 โ€” 2021-11-26 ------------------------ - Feature: Now the ``--concurrency`` setting can have a list of values, so that threads and another lightweight threading package can be measured together, such as ``--concurrency=gevent,thread``. Closes `issue 1012`_ and `issue 1082`_. This also means that ``thread`` must be explicitly specified in some cases that used to be implicit such as ``--concurrency=multiprocessing``, which must be changed to ``--concurrency=multiprocessing,thread``. - Fix: A module specified as the ``source`` setting is imported during startup, before the user program imports it. This could cause problems if the rest of the program isn't ready yet. For example, `issue 1203`_ describes a Django setting that is accessed before settings have been configured. Now the early import is wrapped in a try/except so errors then don't stop execution. - Fix: A colon in a decorator expression would cause an exclusion to end too early, preventing the exclusion of the decorated function. This is now fixed. - Fix: The HTML report now will not overwrite a .gitignore file that already exists in the HTML output directory (follow-on for `issue 1244 `_). - API: The exceptions raised by Coverage.py have been specialized, to provide finer-grained catching of exceptions by third-party code. - API: Using ``suffix=False`` when constructing a Coverage object with multiprocessing wouldn't suppress the data file suffix (`issue 989`_). This is now fixed. - Debug: The ``coverage debug data`` command will now sniff out combinable data files, and report on all of them. - Debug: The ``coverage debug`` command used to accept a number of topics at a time, and show all of them, though this was never documented. This no longer works, to allow for command-line options in the future. .. _issue 989: https://github.com/nedbat/coveragepy/issues/989 .. _issue 1012: https://github.com/nedbat/coveragepy/issues/1012 .. _issue 1082: https://github.com/nedbat/coveragepy/issues/1082 .. _issue 1203: https://github.com/nedbat/coveragepy/issues/1203 .. _issue 1244b: https://github.com/nedbat/coveragepy/issues/1244 .. _changes_612: Version 6.1.2 โ€” 2021-11-10 -------------------------- - Python 3.11 is supported (tested with 3.11.0a2). One still-open issue has to do with `exits through with-statements `_. - Fix: When remapping file paths through the ``[paths]`` setting while combining, the ``[run] relative_files`` setting was ignored, resulting in absolute paths for remapped file names (`issue 1147`_). This is now fixed. - Fix: Complex conditionals over excluded lines could have incorrectly reported a missing branch (`issue 1271`_). This is now fixed. - Fix: More exceptions are now handled when trying to parse source files for reporting. Problems that used to terminate coverage.py can now be handled with ``[report] ignore_errors``. This helps with plugins failing to read files (`django_coverage_plugin issue 78`_). - Fix: Removed another vestige of jQuery from the source tarball (`issue 840 `_). - Fix: Added a default value for a new-to-6.x argument of an internal class. This unsupported class is being used by coveralls (`issue 1273`_). Although I'd rather not "fix" unsupported interfaces, it's actually nicer with a default value. .. _django_coverage_plugin issue 78: https://github.com/nedbat/django_coverage_plugin/issues/78 .. _issue 840b: https://github.com/nedbat/coveragepy/issues/840 .. _issue 1147: https://github.com/nedbat/coveragepy/issues/1147 .. _issue 1270: https://github.com/nedbat/coveragepy/issues/1270 .. _issue 1271: https://github.com/nedbat/coveragepy/issues/1271 .. _issue 1273: https://github.com/nedbat/coveragepy/issues/1273 .. _changes_611: Version 6.1.1 โ€” 2021-10-31 -------------------------- - Fix: The sticky header on the HTML report didn't work unless you had branch coverage enabled. This is now fixed: the sticky header works for everyone. (Do people still use coverage without branch measurement!? j/k) - Fix: When using explicitly declared namespace packages, the "already imported a file that will be measured" warning would be issued (`issue 888`_). This is now fixed. .. _issue 888: https://github.com/nedbat/coveragepy/issues/888 .. _changes_61: Version 6.1 โ€” 2021-10-30 ------------------------ - Deprecated: The ``annotate`` command and the ``Coverage.annotate`` function will be removed in a future version, unless people let me know that they are using it. Instead, the ``html`` command gives better-looking (and more accurate) output, and the ``report -m`` command will tell you line numbers of missing lines. Please get in touch if you have a reason to use ``annotate`` over those better options: ned@nedbatchelder.com. - Feature: Coverage now sets an environment variable, ``COVERAGE_RUN`` when running your code with the ``coverage run`` command. The value is not important, and may change in the future. Closes `issue 553`_. - Feature: The HTML report pages for Python source files now have a sticky header so the file name and controls are always visible. - Feature: The ``xml`` and ``json`` commands now describe what they wrote where. - Feature: The ``html``, ``combine``, ``xml``, and ``json`` commands all accept a ``-q/--quiet`` option to suppress the messages they write to stdout about what they are doing (`issue 1254`_). - Feature: The ``html`` command writes a ``.gitignore`` file into the HTML output directory, to prevent the report from being committed to git. If you want to commit it, you will need to delete that file. Closes `issue 1244`_. - Feature: Added support for PyPy 3.8. - Fix: More generated code is now excluded from measurement. Code such as `attrs`_ boilerplate, or doctest code, was being measured though the synthetic line numbers meant they were never reported. Once Cython was involved though, the generated .so files were parsed as Python, raising syntax errors, as reported in `issue 1160`_. This is now fixed. - Fix: When sorting human-readable names, numeric components are sorted correctly: file10.py will appear after file9.py. This applies to file names, module names, environment variables, and test contexts. - Performance: Branch coverage measurement is faster, though you might only notice on code that is executed many times, such as long-running loops. - Build: jQuery is no longer used or vendored (`issue 840`_ and `issue 1118`_). Huge thanks to Nils Kattenbeck (septatrix) for the conversion to vanilla JavaScript in `pull request 1248`_. .. _issue 553: https://github.com/nedbat/coveragepy/issues/553 .. _issue 840: https://github.com/nedbat/coveragepy/issues/840 .. _issue 1118: https://github.com/nedbat/coveragepy/issues/1118 .. _issue 1160: https://github.com/nedbat/coveragepy/issues/1160 .. _issue 1244: https://github.com/nedbat/coveragepy/issues/1244 .. _pull request 1248: https://github.com/nedbat/coveragepy/pull/1248 .. _issue 1254: https://github.com/nedbat/coveragepy/issues/1254 .. _attrs: https://www.attrs.org/ .. _changes_602: Version 6.0.2 โ€” 2021-10-11 -------------------------- - Namespace packages being measured weren't properly handled by the new code that ignores third-party packages. If the namespace package was installed, it was ignored as a third-party package. That problem (`issue 1231`_) is now fixed. - Packages named as "source packages" (with ``source``, or ``source_pkgs``, or pytest-cov's ``--cov``) might have been only partially measured. Their top-level statements could be marked as un-executed, because they were imported by coverage.py before measurement began (`issue 1232`_). This is now fixed, but the package will be imported twice, once by coverage.py, then again by your test suite. This could cause problems if importing the package has side effects. - The :meth:`.CoverageData.contexts_by_lineno` method was documented to return a dict, but was returning a defaultdict. Now it returns a plain dict. It also no longer returns negative numbered keys. .. _issue 1231: https://github.com/nedbat/coveragepy/issues/1231 .. _issue 1232: https://github.com/nedbat/coveragepy/issues/1232 .. _changes_601: Version 6.0.1 โ€” 2021-10-06 -------------------------- - In 6.0, the coverage.py exceptions moved from coverage.misc to coverage.exceptions. These exceptions are not part of the public supported API, CoverageException is. But a number of other third-party packages were importing the exceptions from coverage.misc, so they are now available from there again (`issue 1226`_). - Changed an internal detail of how tomli is imported, so that tomli can use coverage.py for their own test suite (`issue 1228`_). - Defend against an obscure possibility under code obfuscation, where a function can have an argument called "self", but no local named "self" (`pull request 1210`_). Thanks, Ben Carlsson. .. _pull request 1210: https://github.com/nedbat/coveragepy/pull/1210 .. _issue 1226: https://github.com/nedbat/coveragepy/issues/1226 .. _issue 1228: https://github.com/nedbat/coveragepy/issues/1228 .. _changes_60: Version 6.0 โ€” 2021-10-03 ------------------------ - The ``coverage html`` command now prints a message indicating where the HTML report was written. Fixes `issue 1195`_. - The ``coverage combine`` command now prints messages indicating each data file being combined. Fixes `issue 1105`_. - The HTML report now includes a sentence about skipped files due to ``skip_covered`` or ``skip_empty`` settings. Fixes `issue 1163`_. - Unrecognized options in the configuration file are no longer errors. They are now warnings, to ease the use of coverage across versions. Fixes `issue 1035`_. - Fix handling of exceptions through context managers in Python 3.10. A missing exception is no longer considered a missing branch from the with statement. Fixes `issue 1205`_. - Fix another rarer instance of "Error binding parameter 0 - probably unsupported type." (`issue 1010 `_). - Creating a directory for the coverage data file now is safer against conflicts when two coverage runs happen simultaneously (`pull 1220`_). Thanks, Clรฉment Pit-Claudel. .. _issue 1010b: https://github.com/nedbat/coveragepy/issues/1010 .. _issue 1035: https://github.com/nedbat/coveragepy/issues/1035 .. _issue 1105: https://github.com/nedbat/coveragepy/issues/1105 .. _issue 1163: https://github.com/nedbat/coveragepy/issues/1163 .. _issue 1195: https://github.com/nedbat/coveragepy/issues/1195 .. _issue 1205: https://github.com/nedbat/coveragepy/issues/1205 .. _pull 1220: https://github.com/nedbat/coveragepy/pull/1220 .. _changes_60b1: Version 6.0b1 โ€” 2021-07-18 -------------------------- - Dropped support for Python 2.7, PyPy 2, and Python 3.5. - Added support for the Python 3.10 ``match/case`` syntax. - Data collection is now thread-safe. There may have been rare instances of exceptions raised in multi-threaded programs. - Plugins (like the `Django coverage plugin`_) were generating "Already imported a file that will be measured" warnings about Django itself. These have been fixed, closing `issue 1150`_. - Warnings generated by coverage.py are now real Python warnings. - Using ``--fail-under=100`` with coverage near 100% could result in the self-contradictory message :code:`total of 100 is less than fail-under=100`. This bug (`issue 1168`_) is now fixed. - The ``COVERAGE_DEBUG_FILE`` environment variable now accepts ``stdout`` and ``stderr`` to write to those destinations. - TOML parsing now uses the `tomli`_ library. - Some minor changes to usually invisible details of the HTML report: - Use a modern hash algorithm when fingerprinting, for high-security environments (`issue 1189`_). When generating the HTML report, we save the hash of the data, to avoid regenerating an unchanged HTML page. We used to use MD5 to generate the hash, and now use SHA-3-256. This was never a security concern, but security scanners would notice the MD5 algorithm and raise a false alarm. - Change how report file names are generated, to avoid leading underscores (`issue 1167`_), to avoid rare file name collisions (`issue 584`_), and to avoid file names becoming too long (`issue 580`_). .. _Django coverage plugin: https://pypi.org/project/django-coverage-plugin/ .. _issue 580: https://github.com/nedbat/coveragepy/issues/580 .. _issue 584: https://github.com/nedbat/coveragepy/issues/584 .. _issue 1150: https://github.com/nedbat/coveragepy/issues/1150 .. _issue 1167: https://github.com/nedbat/coveragepy/issues/1167 .. _issue 1168: https://github.com/nedbat/coveragepy/issues/1168 .. _issue 1189: https://github.com/nedbat/coveragepy/issues/1189 .. _tomli: https://pypi.org/project/tomli/ .. _changes_56b1: Version 5.6b1 โ€” 2021-04-13 -------------------------- Note: 5.6 final was never released. These changes are part of 6.0. - Third-party packages are now ignored in coverage reporting. This solves a few problems: - Coverage will no longer report about other people's code (`issue 876`_). This is true even when using ``--source=.`` with a venv in the current directory. - Coverage will no longer generate "Already imported a file that will be measured" warnings about coverage itself (`issue 905`_). - The HTML report uses j/k to move up and down among the highlighted chunks of code. They used to highlight the current chunk, but 5.0 broke that behavior. Now the highlighting is working again. - The JSON report now includes ``percent_covered_display``, a string with the total percentage, rounded to the same number of decimal places as the other reports' totals. .. _issue 876: https://github.com/nedbat/coveragepy/issues/876 .. _issue 905: https://github.com/nedbat/coveragepy/issues/905 .. _changes_55: Version 5.5 โ€” 2021-02-28 ------------------------ - ``coverage combine`` has a new option, ``--keep`` to keep the original data files after combining them. The default is still to delete the files after they have been combined. This was requested in `issue 1108`_ and implemented in `pull request 1110`_. Thanks, ร‰ric Lariviรจre. - When reporting missing branches in ``coverage report``, branches aren't reported that jump to missing lines. This adds to the long-standing behavior of not reporting branches from missing lines. Now branches are only reported if both the source and destination lines are executed. Closes both `issue 1065`_ and `issue 955`_. - Minor improvements to the HTML report: - The state of the line visibility selector buttons is saved in local storage so you don't have to fiddle with them so often, fixing `issue 1123`_. - It has a little more room for line numbers so that 4-digit numbers work well, fixing `issue 1124`_. - Improved the error message when combining line and branch data, so that users will be more likely to understand what's happening, closing `issue 803`_. .. _issue 803: https://github.com/nedbat/coveragepy/issues/803 .. _issue 955: https://github.com/nedbat/coveragepy/issues/955 .. _issue 1065: https://github.com/nedbat/coveragepy/issues/1065 .. _issue 1108: https://github.com/nedbat/coveragepy/issues/1108 .. _pull request 1110: https://github.com/nedbat/coveragepy/pull/1110 .. _issue 1123: https://github.com/nedbat/coveragepy/issues/1123 .. _issue 1124: https://github.com/nedbat/coveragepy/issues/1124 .. _changes_54: Version 5.4 โ€” 2021-01-24 ------------------------ - The text report produced by ``coverage report`` now always outputs a TOTAL line, even if only one Python file is reported. This makes regex parsing of the output easier. Thanks, Judson Neer. This had been requested a number of times (`issue 1086`_, `issue 922`_, `issue 732`_). - The ``skip_covered`` and ``skip_empty`` settings in the configuration file can now be specified in the ``[html]`` section, so that text reports and HTML reports can use separate settings. The HTML report will still use the ``[report]`` settings if there isn't a value in the ``[html]`` section. Closes `issue 1090`_. - Combining files on Windows across drives now works properly, fixing `issue 577`_. Thanks, `Valentin Lab `_. - Fix an obscure warning from deep in the _decimal module, as reported in `issue 1084`_. - Update to support Python 3.10 alphas in progress, including `PEP 626: Precise line numbers for debugging and other tools `_. .. _issue 577: https://github.com/nedbat/coveragepy/issues/577 .. _issue 732: https://github.com/nedbat/coveragepy/issues/732 .. _issue 922: https://github.com/nedbat/coveragepy/issues/922 .. _issue 1084: https://github.com/nedbat/coveragepy/issues/1084 .. _issue 1086: https://github.com/nedbat/coveragepy/issues/1086 .. _issue 1090: https://github.com/nedbat/coveragepy/issues/1090 .. _pr1080: https://github.com/nedbat/coveragepy/pull/1080 .. _pep626: https://www.python.org/dev/peps/pep-0626/ .. _changes_531: Version 5.3.1 โ€” 2020-12-19 -------------------------- - When using ``--source`` on a large source tree, v5.x was slower than previous versions. This performance regression is now fixed, closing `issue 1037`_. - Mysterious SQLite errors can happen on PyPy, as reported in `issue 1010`_. An immediate retry seems to fix the problem, although it is an unsatisfying solution. - The HTML report now saves the sort order in a more widely supported way, fixing `issue 986`_. Thanks, Sebastiรกn Ramรญrez (`pull request 1066`_). - The HTML report pages now have a :ref:`Sleepy Snake ` favicon. - Wheels are now provided for manylinux2010, and for PyPy3 (pp36 and pp37). - Continuous integration has moved from Travis and AppVeyor to GitHub Actions. .. _issue 986: https://github.com/nedbat/coveragepy/issues/986 .. _issue 1037: https://github.com/nedbat/coveragepy/issues/1037 .. _issue 1010: https://github.com/nedbat/coveragepy/issues/1010 .. _pull request 1066: https://github.com/nedbat/coveragepy/pull/1066 .. _changes_53: Version 5.3 โ€” 2020-09-13 ------------------------ - The ``source`` setting has always been interpreted as either a file path or a module, depending on which existed. If both interpretations were valid, it was assumed to be a file path. The new ``source_pkgs`` setting can be used to name a package to disambiguate this case. Thanks, Thomas Grainger. Fixes `issue 268`_. - If a plugin was disabled due to an exception, we used to still try to record its information, causing an exception, as reported in `issue 1011`_. This is now fixed. .. _issue 268: https://github.com/nedbat/coveragepy/issues/268 .. _issue 1011: https://github.com/nedbat/coveragepy/issues/1011 .. scriv-end-here Older changes ------------- The complete history is available in the `coverage.py docs`__. __ https://coverage.readthedocs.io/en/latest/changes.html ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/CITATION.cff0000644000175100001770000000177100000000000016027 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt cff-version: 1.2.0 title: "Coverage.py: The code coverage tool for Python" message: >- If you use this software, please cite it using the metadata from this file. type: software authors: - family-names: Batchelder given-names: Ned orcid: https://orcid.org/0009-0006-2659-884X - name: "Contributors to Coverage.py" repository-code: "https://github.com/nedbat/coveragepy" url: "https://coverage.readthedocs.io/" abstract: >- Coverage.py is a tool for measuring code coverage of Python programs. It monitors your program, noting which parts of the code have been executed, then analyzes the source to identify code that could have been executed but was not. Coverage measurement is typically used to gauge the effectiveness of tests. It can show which parts of your code are being exercised by tests, and which are not. license: Apache-2.0 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/CONTRIBUTORS.txt0000644000175100001770000000670600000000000016636 0ustar00runnerdocker00000000000000Coverage.py was originally written by Gareth Rees, and since 2004 has been extended and maintained by Ned Batchelder. Other contributions, including writing code, updating docs, and submitting useful bug reports, have been made by: Abdeali Kothari Adi Roiban Agbonze O. Jeremiah Albertas Agejevas Aleksi Torhamo Alex Gaynor Alex Groce Alex Sandro Alexander Todorov Alexander Walters Alpha Chen Ammar Askar Anders Kaseorg Andrew Hoos Anthony Sottile Arcadiy Ivanov Aron Griffis Artem Dayneko Arthur Deygin Arthur Rio Asher Foa Ben Carlsson Ben Finney Benjamin Parzella Benjamin Schubert Bernรกt Gรกbor Bill Hart Brad Smith Bradley Burns Brandon Rhodes Brett Cannon Brian Grohe Bruno Oliveira Bruno P. Kinoshita Bruno Rodrigues dos Santos Buck Evan Buck Golemon Calen Pennington Carl Friedrich Bolz-Tereick Carl Gieringer Catherine Proulx Charles Chan Chris Adams Chris Jerdonek Chris Rose Chris Warrick Christian Clauss Christian Heimes Christine Lytwynec Christoph Blessing Christoph Zwerschke Christopher Pickering Clรฉment Pit-Claudel Conrad Ho Cosimo Lupo Dan Hemberger Dan Riti Dan Wandschneider Danek Duvall Daniel Hahler Danny Allen David Christian David MacIver David Stanek David Szotten Dennis Sweeney Detlev Offenbach Devin Jeanpierre Dirk Thomas Dmitry Shishov Dmitry Trofimov Edgar Ramรญrez Mondragรณn Eduardo Schettino Edward Loper Eli Skeggs Emil Madsen ร‰ric Lariviรจre Federico Bond Felix Horvat Frazer McLean Geoff Bache George Paci George Song George-Cristian Bรฎrzan Greg Rogers Guido van Rossum Guillaume Chazarain Holger Krekel Hugo van Kemenade Ian Moore Ilia Meerovich Imri Goldberg Ionel Cristian Mฤƒrieศ™ Ivan Ciuvalschii J. M. F. Tsang JT Olds Jacqueline Lee Jakub Wilk Jan Rusak Janakarajan Natarajan Jerin Peter George Jessamyn Smith Joanna Ejzel Joe Doherty Joe Jevnik John Vandenberg Jon Chappell Jon Dufresne Joseph Tate Josh Williams Judson Neer Julian Berman Julien Voisin Justas Sadzeviฤius Karthikeyan Singaravelan Kassandra Keeton Ken Schackart Kevin Brown-Silva Kjell Braden Krystian Kichewko Kyle Altendorf Lars Hupfeldt Nielsen Latrice Wilgus Leonardo Pistone Lewis Gaul Lex Berezhny Loรฏc Dachary Lorenzo Micรฒ Louis Heredero Luis Nell ลukasz Stolcman Maciej Kowalczyk Manuel Jacob Marc Abramowitz Marc Gibbons Marc Legendre Marcelo Trylesinski Marcus Cobden Mariatta Marius Gedminas Mark van der Wal Martin Fuzzey Mathieu Kniewallner Matt Bachmann Matthew Boehm Matthew Desmarais Matus Valo Max Linke Mayank Singhal Michael Bell Michael Krebs Michaล‚ Bultrowicz Michaล‚ Gรณrny Mickie Betz Mike Fiedler Min ho Kim Nathan Land Naveen Srinivasan Naveen Yadav Neil Pilgrim Nicholas Nadeau Nikita Bloshchanevich Nikita Sobolev Nils Kattenbeck Noel O'Boyle Oleg Hรถfling Oleh Krehel Olivier Grisel Ori Avtalion Pablo Carballo Pankaj Pandey Patrick Mezard Pavel Tsialnou Peter Baughman Peter Ebden Peter Portante Phebe Polk Reya B Ricardo Newbery Robert Harris Rodrigue Cloutier Roger Hu Roland Illig Ross Lawley Roy Williams Russell Keith-Magee S. Y. Lee Salvatore Zagaria Sandra Martocchia Scott Belden Sebastiรกn Ramรญrez Sergey B Kirpichev Shantanu Sigve Tjora Simon Willison Stan Hu Stanisล‚aw Pitucha Stefan Behnel Stephan Deibel Stephan Richter Stephen Finucane Steve Dower Steve Leonard Steve Oswald Steve Peak Sviatoslav Sydorenko Tanaydin Sirin Teake Nutma Ted Wexler Thijs Triemstra Thomas Grainger Timo Furrer Titus Brown Tom Gurion Valentin Lab Ville Skyttรค Vince Salvino Wonwin McBrootles Xie Yanbo Yilei "Dolee" Yang Yury Selivanov Zac Hatfield-Dodds Zooko Wilcox-O'Hearn ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/LICENSE.txt0000644000175100001770000002367600000000000015770 0ustar00runnerdocker00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/MANIFEST.in0000644000175100001770000000241500000000000015667 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt # MANIFEST.in file for coverage.py # This file includes everything needed to recreate the entire project, even # though many of these files are not installed by setup.py. Unpacking the # .tar.gz source distribution would give you everything needed to continue # developing the project. "pip install" will not install many of these files. include .editorconfig include .git-blame-ignore-revs include .readthedocs.yaml include CHANGES.rst include CITATION.cff include CONTRIBUTORS.txt include LICENSE.txt include MANIFEST.in include Makefile include NOTICE.txt include README.rst include __main__.py include howto.txt include igor.py include metacov.ini include setup.py include tox.ini recursive-include ci * recursive-include lab * recursive-include .github * recursive-include coverage *.pyi recursive-include coverage/ctracer *.c *.h recursive-include doc *.py *.in *.pip *.rst *.txt *.png recursive-include doc/_static * prune doc/_build prune doc/_spell recursive-include requirements *.in *.pip recursive-include tests *.py *.tok recursive-include tests/gold * recursive-include tests js/* qunit/* prune tests/eggsrc/build ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/Makefile0000644000175100001770000002412700000000000015575 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt # Makefile for utility work on coverage.py. .DEFAULT_GOAL := help ##@ Utilities .PHONY: help clean_platform clean sterile help: ## Show this help. @# Adapted from https://www.thapaliya.com/en/writings/well-documented-makefiles/ @echo Available targets: @awk -F ':.*##' '/^[^: ]+:.*##/{printf " \033[1m%-20s\033[m %s\n",$$1,$$2} /^##@/{printf "\n%s\n",substr($$0,5)}' $(MAKEFILE_LIST) _clean_platform: @rm -f *.so */*.so @rm -f *.pyd */*.pyd @rm -rf __pycache__ */__pycache__ */*/__pycache__ */*/*/__pycache__ */*/*/*/__pycache__ */*/*/*/*/__pycache__ @rm -f *.pyc */*.pyc */*/*.pyc */*/*/*.pyc */*/*/*/*.pyc */*/*/*/*/*.pyc @rm -f *.pyo */*.pyo */*/*.pyo */*/*/*.pyo */*/*/*/*.pyo */*/*/*/*/*.pyo @rm -f *$$py.class */*$$py.class */*/*$$py.class */*/*/*$$py.class */*/*/*/*$$py.class */*/*/*/*/*$$py.class debug_clean: ## Delete various debugging artifacts. @rm -rf /tmp/dis $$COVERAGE_DEBUG_FILE clean: debug_clean _clean_platform ## Remove artifacts of test execution, installation, etc. @echo "Cleaning..." @-pip uninstall -yq coverage @mkdir -p build # so the chmod won't fail if build doesn't exist @chmod -R 777 build @rm -rf build coverage.egg-info dist htmlcov @rm -f *.bak */*.bak */*/*.bak */*/*/*.bak */*/*/*/*.bak */*/*/*/*/*.bak @rm -f coverage/*,cover @rm -f MANIFEST @rm -f .coverage .coverage.* .metacov* @rm -f coverage.xml coverage.json @rm -f .tox/*/lib/*/site-packages/zzz_metacov.pth @rm -f */.coverage */*/.coverage */*/*/.coverage */*/*/*/.coverage */*/*/*/*/.coverage */*/*/*/*/*/.coverage @rm -f tests/covmain.zip tests/zipmods.zip tests/zip1.zip @rm -rf doc/_build doc/_spell doc/sample_html_beta @rm -rf tmp @rm -rf .*cache */.*cache */*/.*cache */*/*/.*cache .hypothesis @rm -rf tests/actual @-make -C tests/gold/html clean sterile: clean ## Remove all non-controlled content, even if expensive. rm -rf .tox rm -f cheats.txt ##@ Tests and quality checks .PHONY: lint smoke lint: ## Run linters and checkers. tox -q -e lint PYTEST_SMOKE_ARGS = -n auto -m "not expensive" --maxfail=3 $(ARGS) smoke: ## Run tests quickly with the C tracer in the lowest supported Python versions. COVERAGE_TEST_CORES=ctrace tox -q -e py38 -- $(PYTEST_SMOKE_ARGS) ##@ Metacov: coverage measurement of coverage.py itself # See metacov.ini for details. .PHONY: metacov metahtml metasmoke metacov: ## Run meta-coverage, measuring ourself. COVERAGE_COVERAGE=yes tox -q $(ARGS) metahtml: ## Produce meta-coverage HTML reports. python igor.py combine_html metasmoke: COVERAGE_TEST_CORES=ctrace ARGS="-e py39" make metacov metahtml ##@ Requirements management # When updating requirements, a few rules to follow: # # 1) Don't install more than one .pip file at once. Always use pip-compile to # combine .in files onto a single .pip file that can be installed where needed. # # 2) Check manual pins before `make upgrade` to see if they can be removed. Look # in requirements/pins.pip, and search for "windows" in .in files to find pins # and extra requirements that have been needed, but might be obsolete. .PHONY: upgrade doc_upgrade diff_upgrade DOCBIN = .tox/doc/bin PIP_COMPILE = pip-compile ${COMPILE_OPTS} --allow-unsafe --resolver=backtracking upgrade: ## Update the *.pip files with the latest packages satisfying *.in files. $(MAKE) _upgrade COMPILE_OPTS="--upgrade" upgrade_one: ## Update the *.pip files for one package. `make upgrade-one package=...` @test -n "$(package)" || { echo "\nUsage: make upgrade-one package=...\n"; exit 1; } $(MAKE) _upgrade COMPILE_OPTS="--upgrade-package $(package)" _upgrade: export CUSTOM_COMPILE_COMMAND=make upgrade _upgrade: pip install -q -r requirements/pip-tools.pip $(PIP_COMPILE) -o requirements/pip-tools.pip requirements/pip-tools.in $(PIP_COMPILE) -o requirements/pip.pip requirements/pip.in $(PIP_COMPILE) -o requirements/pytest.pip requirements/pytest.in $(PIP_COMPILE) -o requirements/kit.pip requirements/kit.in $(PIP_COMPILE) -o requirements/tox.pip requirements/tox.in $(PIP_COMPILE) -o requirements/dev.pip requirements/dev.in $(PIP_COMPILE) -o requirements/light-threads.pip requirements/light-threads.in $(PIP_COMPILE) -o requirements/mypy.pip requirements/mypy.in doc_upgrade: export CUSTOM_COMPILE_COMMAND=make doc_upgrade doc_upgrade: $(DOCBIN) ## Update the doc/requirements.pip file $(DOCBIN)/pip install -q -r requirements/pip-tools.pip $(DOCBIN)/$(PIP_COMPILE) --upgrade -o doc/requirements.pip doc/requirements.in diff_upgrade: ## Summarize the last `make upgrade` @# The sort flags sort by the package name first, then by the -/+, and @# sort by version numbers, so we get a summary with lines like this: @# -bashlex==0.16 @# +bashlex==0.17 @# -build==0.9.0 @# +build==0.10.0 @git diff -U0 | grep -v '^@' | grep == | sort -k1.2,1.99 -k1.1,1.1r -u -V ##@ Pre-builds for prepping the code .PHONY: css workflows prebuild CSS = coverage/htmlfiles/style.css SCSS = coverage/htmlfiles/style.scss css: $(CSS) ## Compile .scss into .css. $(CSS): $(SCSS) pysassc --style=compact $(SCSS) $@ cp $@ tests/gold/html/styled workflows: ## Run cog on the workflows to keep them up-to-date. python -m cogapp -crP .github/workflows/*.yml prebuild: css workflows cogdoc ## One command for all source prep. ##@ Sample HTML reports .PHONY: _sample_cog_html sample_html sample_html_beta _sample_cog_html: clean python -m pip install -e . cd ~/cog; \ rm -rf htmlcov; \ PYTEST_ADDOPTS= coverage run --branch --source=cogapp -m pytest -k CogTestsInMemory; \ coverage combine; \ coverage html sample_html: _sample_cog_html ## Generate sample HTML report. rm -f doc/sample_html/*.* cp -r ~/cog/htmlcov/ doc/sample_html/ rm doc/sample_html/.gitignore sample_html_beta: _sample_cog_html ## Generate sample HTML report for a beta release. rm -f doc/sample_html_beta/*.* cp -r ~/cog/htmlcov/ doc/sample_html_beta/ rm doc/sample_html_beta/.gitignore ##@ Kitting: making releases .PHONY: edit_for_release cheats relbranch relcommit1 relcommit2 .PHONY: kit kit_upload test_upload kit_local build_kits download_kits check_kits .PHONY: tag bump_version REPO_OWNER = nedbat/coveragepy edit_for_release: #: Edit sources to insert release facts (see howto.txt). python igor.py edit_for_release cheats: ## Create some useful snippets for releasing. python igor.py cheats | tee cheats.txt relbranch: #: Create the branch for releasing (see howto.txt). git switch -c nedbat/release-$$(date +%Y%m%d) relcommit1: #: Commit the first release changes (see howto.txt). git commit -am "docs: prep for $$(python setup.py --version)" relcommit2: #: Commit the latest sample HTML report (see howto.txt). git commit -am "docs: sample HTML for $$(python setup.py --version)" kit: ## Make the source distribution. python -m build kit_upload: ## Upload the built distributions to PyPI. twine upload --verbose dist/* test_upload: ## Upload the distributions to PyPI's testing server. twine upload --verbose --repository testpypi --password $$TWINE_TEST_PASSWORD dist/* kit_local: # pip.conf looks like this: # [global] # find-links = file:///Users/ned/Downloads/local_pypi cp -v dist/* `awk -F "//" '/find-links/ {print $$2}' ~/.pip/pip.conf` # pip caches wheels of things it has installed. Clean them out so we # don't go crazy trying to figure out why our new code isn't installing. find ~/Library/Caches/pip/wheels -name 'coverage-*' -delete build_kits: ## Trigger GitHub to build kits python ci/trigger_build_kits.py $(REPO_OWNER) download_kits: ## Download the built kits from GitHub. python ci/download_gha_artifacts.py $(REPO_OWNER) 'dist-*' dist check_kits: ## Check that dist/* are well-formed. python -m twine check dist/* @echo $$(ls -1 dist | wc -l) distribution kits tag: #: Make a git tag with the version number (see howto.txt). git tag -s -m "Version $$(python setup.py --version)" $$(python setup.py --version) git push --follow-tags bump_version: #: Edit sources to bump the version after a release (see howto.txt). git switch -c nedbat/bump-version python igor.py bump_version git commit -a -m "build: bump version" git push -u origin @ ##@ Documentation .PHONY: cogdoc dochtml docdev docspell SPHINXOPTS = -aE SPHINXBUILD = $(DOCBIN)/sphinx-build $(SPHINXOPTS) SPHINXAUTOBUILD = $(DOCBIN)/sphinx-autobuild --port 9876 --ignore '.git/**' --open-browser $(DOCBIN): tox -q -e doc --notest cogdoc: $(DOCBIN) ## Run docs through cog. $(DOCBIN)/python -m cogapp -crP --verbosity=1 doc/*.rst dochtml: cogdoc $(DOCBIN) ## Build the docs HTML output. $(SPHINXBUILD) -b html doc doc/_build/html docdev: dochtml ## Build docs, and auto-watch for changes. PATH=$(DOCBIN):$(PATH) $(SPHINXAUTOBUILD) -b html doc doc/_build/html docspell: $(DOCBIN) ## Run the spell checker on the docs. # Very mac-specific... PYENCHANT_LIBRARY_PATH=/opt/homebrew/lib/libenchant-2.dylib $(SPHINXBUILD) -b spelling doc doc/_spell ##@ Publishing docs .PHONY: publish publishbeta relnotes_json github_releases comment_on_fixes WEBHOME = ~/web/stellated WEBSAMPLE = $(WEBHOME)/files/sample_coverage_html WEBSAMPLEBETA = $(WEBHOME)/files/sample_coverage_html_beta publish: ## Publish the sample HTML report. rm -f $(WEBSAMPLE)/*.* mkdir -p $(WEBSAMPLE) cp doc/sample_html/*.* $(WEBSAMPLE) publishbeta: rm -f $(WEBSAMPLEBETA)/*.* mkdir -p $(WEBSAMPLEBETA) cp doc/sample_html_beta/*.* $(WEBSAMPLEBETA) CHANGES_MD = tmp/rst_rst/changes.md RELNOTES_JSON = tmp/relnotes.json $(CHANGES_MD): CHANGES.rst $(DOCBIN) $(SPHINXBUILD) -b rst doc tmp/rst_rst pandoc -frst -tmarkdown_strict --markdown-headings=atx --wrap=none tmp/rst_rst/changes.rst > $(CHANGES_MD) relnotes_json: $(RELNOTES_JSON) ## Convert changelog to JSON for further parsing. $(RELNOTES_JSON): $(CHANGES_MD) $(DOCBIN)/python ci/parse_relnotes.py tmp/rst_rst/changes.md $(RELNOTES_JSON) github_releases: $(DOCBIN) ## Update GitHub releases. $(DOCBIN)/python -m scriv github-release --all comment_on_fixes: $(RELNOTES_JSON) ## Add a comment to issues that were fixed. python ci/comment_on_fixes.py $(REPO_OWNER) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/NOTICE.txt0000644000175100001770000000125100000000000015650 0ustar00runnerdocker00000000000000Copyright 2001 Gareth Rees. All rights reserved. Copyright 2004-2024 Ned Batchelder. All rights reserved. Except where noted otherwise, this software is licensed under the Apache License, Version 2.0 (the "License"); you may not use this work except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1710442639.1658149 coverage-7.4.4/PKG-INFO0000644000175100001770000001776700000000000015246 0ustar00runnerdocker00000000000000Metadata-Version: 2.1 Name: coverage Version: 7.4.4 Summary: Code coverage measurement for Python Home-page: https://github.com/nedbat/coveragepy Author: Ned Batchelder and 224 others Author-email: ned@nedbatchelder.com License: Apache-2.0 Project-URL: Documentation, https://coverage.readthedocs.io/en/7.4.4 Project-URL: Funding, https://tidelift.com/subscription/pkg/pypi-coverage?utm_source=pypi-coverage&utm_medium=referral&utm_campaign=pypi Project-URL: Issues, https://github.com/nedbat/coveragepy/issues Project-URL: Mastodon, https://hachyderm.io/@coveragepy Project-URL: Mastodon (nedbat), https://hachyderm.io/@nedbat Keywords: code coverage testing Classifier: Environment :: Console Classifier: Intended Audience :: Developers Classifier: License :: OSI Approved :: Apache Software License Classifier: Operating System :: OS Independent Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.8 Classifier: Programming Language :: Python :: 3.9 Classifier: Programming Language :: Python :: 3.10 Classifier: Programming Language :: Python :: 3.11 Classifier: Programming Language :: Python :: 3.12 Classifier: Programming Language :: Python :: 3.13 Classifier: Programming Language :: Python :: Implementation :: CPython Classifier: Programming Language :: Python :: Implementation :: PyPy Classifier: Topic :: Software Development :: Quality Assurance Classifier: Topic :: Software Development :: Testing Classifier: Development Status :: 5 - Production/Stable Requires-Python: >=3.8 Description-Content-Type: text/x-rst License-File: LICENSE.txt Provides-Extra: toml Requires-Dist: tomli; python_full_version <= "3.11.0a6" and extra == "toml" .. Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 .. For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt =========== Coverage.py =========== Code coverage testing for Python. .. image:: https://raw.githubusercontent.com/vshymanskyy/StandWithUkraine/main/banner2-direct.svg :target: https://vshymanskyy.github.io/StandWithUkraine :alt: Stand with Ukraine ------------- | |kit| |license| |versions| | |test-status| |quality-status| |docs| |metacov| | |tidelift| |sponsor| |stars| |mastodon-coveragepy| |mastodon-nedbat| Coverage.py measures code coverage, typically during test execution. It uses the code analysis tools and tracing hooks provided in the Python standard library to determine which lines are executable, and which have been executed. Coverage.py runs on these versions of Python: .. PYVERSIONS * Python 3.8 through 3.12, and 3.13.0a3 and up. * PyPy3 versions 3.8 through 3.10. Documentation is on `Read the Docs`_. Code repository and issue tracker are on `GitHub`_. .. _Read the Docs: https://coverage.readthedocs.io/en/7.4.4/ .. _GitHub: https://github.com/nedbat/coveragepy **New in 7.x:** experimental support for sys.monitoring; dropped support for Python 3.7; added ``Coverage.collect()`` context manager; improved data combining; ``[run] exclude_also`` setting; ``report --format=``; type annotations. **New in 6.x:** dropped support for Python 2.7, 3.5, and 3.6; write data on SIGTERM; added support for 3.10 match/case statements. For Enterprise -------------- .. |tideliftlogo| image:: https://nedbatchelder.com/pix/Tidelift_Logo_small.png :alt: Tidelift :target: https://tidelift.com/subscription/pkg/pypi-coverage?utm_source=pypi-coverage&utm_medium=referral&utm_campaign=readme .. list-table:: :widths: 10 100 * - |tideliftlogo| - `Available as part of the Tidelift Subscription. `_ Coverage and thousands of other packages are working with Tidelift to deliver one enterprise subscription that covers all of the open source you use. If you want the flexibility of open source and the confidence of commercial-grade software, this is for you. `Learn more. `_ Getting Started --------------- Looking to run ``coverage`` on your test suite? See the `Quick Start section`_ of the docs. .. _Quick Start section: https://coverage.readthedocs.io/en/7.4.4/#quick-start Change history -------------- The complete history of changes is on the `change history page`_. .. _change history page: https://coverage.readthedocs.io/en/7.4.4/changes.html Code of Conduct --------------- Everyone participating in the coverage.py project is expected to treat other people with respect and to follow the guidelines articulated in the `Python Community Code of Conduct`_. .. _Python Community Code of Conduct: https://www.python.org/psf/codeofconduct/ Contributing ------------ Found a bug? Want to help improve the code or documentation? See the `Contributing section`_ of the docs. .. _Contributing section: https://coverage.readthedocs.io/en/7.4.4/contributing.html Security -------- To report a security vulnerability, please use the `Tidelift security contact`_. Tidelift will coordinate the fix and disclosure. .. _Tidelift security contact: https://tidelift.com/security License ------- Licensed under the `Apache 2.0 License`_. For details, see `NOTICE.txt`_. .. _Apache 2.0 License: http://www.apache.org/licenses/LICENSE-2.0 .. _NOTICE.txt: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt .. |test-status| image:: https://github.com/nedbat/coveragepy/actions/workflows/testsuite.yml/badge.svg?branch=master&event=push :target: https://github.com/nedbat/coveragepy/actions/workflows/testsuite.yml :alt: Test suite status .. |quality-status| image:: https://github.com/nedbat/coveragepy/actions/workflows/quality.yml/badge.svg?branch=master&event=push :target: https://github.com/nedbat/coveragepy/actions/workflows/quality.yml :alt: Quality check status .. |docs| image:: https://readthedocs.org/projects/coverage/badge/?version=latest&style=flat :target: https://coverage.readthedocs.io/en/7.4.4/ :alt: Documentation .. |kit| image:: https://img.shields.io/pypi/v/coverage :target: https://pypi.org/project/coverage/ :alt: PyPI status .. |versions| image:: https://img.shields.io/pypi/pyversions/coverage.svg?logo=python&logoColor=FBE072 :target: https://pypi.org/project/coverage/ :alt: Python versions supported .. |license| image:: https://img.shields.io/pypi/l/coverage.svg :target: https://pypi.org/project/coverage/ :alt: License .. |metacov| image:: https://img.shields.io/endpoint?url=https://gist.githubusercontent.com/nedbat/8c6980f77988a327348f9b02bbaf67f5/raw/metacov.json :target: https://nedbat.github.io/coverage-reports/latest.html :alt: Coverage reports .. |tidelift| image:: https://tidelift.com/badges/package/pypi/coverage :target: https://tidelift.com/subscription/pkg/pypi-coverage?utm_source=pypi-coverage&utm_medium=referral&utm_campaign=readme :alt: Tidelift .. |stars| image:: https://img.shields.io/github/stars/nedbat/coveragepy.svg?logo=github :target: https://github.com/nedbat/coveragepy/stargazers :alt: GitHub stars .. |mastodon-nedbat| image:: https://img.shields.io/badge/dynamic/json?style=flat&labelColor=450657&logo=mastodon&logoColor=ffffff&link=https%3A%2F%2Fhachyderm.io%2F%40nedbat&url=https%3A%2F%2Fhachyderm.io%2Fusers%2Fnedbat%2Ffollowers.json&query=totalItems&label=@nedbat :target: https://hachyderm.io/@nedbat :alt: nedbat on Mastodon .. |mastodon-coveragepy| image:: https://img.shields.io/badge/dynamic/json?style=flat&labelColor=450657&logo=mastodon&logoColor=ffffff&link=https%3A%2F%2Fhachyderm.io%2F%40coveragepy&url=https%3A%2F%2Fhachyderm.io%2Fusers%2Fcoveragepy%2Ffollowers.json&query=totalItems&label=@coveragepy :target: https://hachyderm.io/@coveragepy :alt: coveragepy on Mastodon .. |sponsor| image:: https://img.shields.io/badge/%E2%9D%A4-Sponsor%20me-brightgreen?style=flat&logo=GitHub :target: https://github.com/sponsors/nedbat :alt: Sponsor me on GitHub ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/README.rst0000644000175100001770000001442100000000000015620 0ustar00runnerdocker00000000000000.. Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 .. For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt =========== Coverage.py =========== Code coverage testing for Python. .. image:: https://raw.githubusercontent.com/vshymanskyy/StandWithUkraine/main/banner2-direct.svg :target: https://vshymanskyy.github.io/StandWithUkraine :alt: Stand with Ukraine ------------- | |kit| |license| |versions| | |test-status| |quality-status| |docs| |metacov| | |tidelift| |sponsor| |stars| |mastodon-coveragepy| |mastodon-nedbat| Coverage.py measures code coverage, typically during test execution. It uses the code analysis tools and tracing hooks provided in the Python standard library to determine which lines are executable, and which have been executed. Coverage.py runs on these versions of Python: .. PYVERSIONS * Python 3.8 through 3.12, and 3.13.0a3 and up. * PyPy3 versions 3.8 through 3.10. Documentation is on `Read the Docs`_. Code repository and issue tracker are on `GitHub`_. .. _Read the Docs: https://coverage.readthedocs.io/ .. _GitHub: https://github.com/nedbat/coveragepy **New in 7.x:** experimental support for sys.monitoring; dropped support for Python 3.7; added ``Coverage.collect()`` context manager; improved data combining; ``[run] exclude_also`` setting; ``report --format=``; type annotations. **New in 6.x:** dropped support for Python 2.7, 3.5, and 3.6; write data on SIGTERM; added support for 3.10 match/case statements. For Enterprise -------------- .. |tideliftlogo| image:: https://nedbatchelder.com/pix/Tidelift_Logo_small.png :alt: Tidelift :target: https://tidelift.com/subscription/pkg/pypi-coverage?utm_source=pypi-coverage&utm_medium=referral&utm_campaign=readme .. list-table:: :widths: 10 100 * - |tideliftlogo| - `Available as part of the Tidelift Subscription. `_ Coverage and thousands of other packages are working with Tidelift to deliver one enterprise subscription that covers all of the open source you use. If you want the flexibility of open source and the confidence of commercial-grade software, this is for you. `Learn more. `_ Getting Started --------------- Looking to run ``coverage`` on your test suite? See the `Quick Start section`_ of the docs. .. _Quick Start section: https://coverage.readthedocs.io/#quick-start Change history -------------- The complete history of changes is on the `change history page`_. .. _change history page: https://coverage.readthedocs.io/en/latest/changes.html Code of Conduct --------------- Everyone participating in the coverage.py project is expected to treat other people with respect and to follow the guidelines articulated in the `Python Community Code of Conduct`_. .. _Python Community Code of Conduct: https://www.python.org/psf/codeofconduct/ Contributing ------------ Found a bug? Want to help improve the code or documentation? See the `Contributing section`_ of the docs. .. _Contributing section: https://coverage.readthedocs.io/en/latest/contributing.html Security -------- To report a security vulnerability, please use the `Tidelift security contact`_. Tidelift will coordinate the fix and disclosure. .. _Tidelift security contact: https://tidelift.com/security License ------- Licensed under the `Apache 2.0 License`_. For details, see `NOTICE.txt`_. .. _Apache 2.0 License: http://www.apache.org/licenses/LICENSE-2.0 .. _NOTICE.txt: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt .. |test-status| image:: https://github.com/nedbat/coveragepy/actions/workflows/testsuite.yml/badge.svg?branch=master&event=push :target: https://github.com/nedbat/coveragepy/actions/workflows/testsuite.yml :alt: Test suite status .. |quality-status| image:: https://github.com/nedbat/coveragepy/actions/workflows/quality.yml/badge.svg?branch=master&event=push :target: https://github.com/nedbat/coveragepy/actions/workflows/quality.yml :alt: Quality check status .. |docs| image:: https://readthedocs.org/projects/coverage/badge/?version=latest&style=flat :target: https://coverage.readthedocs.io/ :alt: Documentation .. |kit| image:: https://img.shields.io/pypi/v/coverage :target: https://pypi.org/project/coverage/ :alt: PyPI status .. |versions| image:: https://img.shields.io/pypi/pyversions/coverage.svg?logo=python&logoColor=FBE072 :target: https://pypi.org/project/coverage/ :alt: Python versions supported .. |license| image:: https://img.shields.io/pypi/l/coverage.svg :target: https://pypi.org/project/coverage/ :alt: License .. |metacov| image:: https://img.shields.io/endpoint?url=https://gist.githubusercontent.com/nedbat/8c6980f77988a327348f9b02bbaf67f5/raw/metacov.json :target: https://nedbat.github.io/coverage-reports/latest.html :alt: Coverage reports .. |tidelift| image:: https://tidelift.com/badges/package/pypi/coverage :target: https://tidelift.com/subscription/pkg/pypi-coverage?utm_source=pypi-coverage&utm_medium=referral&utm_campaign=readme :alt: Tidelift .. |stars| image:: https://img.shields.io/github/stars/nedbat/coveragepy.svg?logo=github :target: https://github.com/nedbat/coveragepy/stargazers :alt: GitHub stars .. |mastodon-nedbat| image:: https://img.shields.io/badge/dynamic/json?style=flat&labelColor=450657&logo=mastodon&logoColor=ffffff&link=https%3A%2F%2Fhachyderm.io%2F%40nedbat&url=https%3A%2F%2Fhachyderm.io%2Fusers%2Fnedbat%2Ffollowers.json&query=totalItems&label=@nedbat :target: https://hachyderm.io/@nedbat :alt: nedbat on Mastodon .. |mastodon-coveragepy| image:: https://img.shields.io/badge/dynamic/json?style=flat&labelColor=450657&logo=mastodon&logoColor=ffffff&link=https%3A%2F%2Fhachyderm.io%2F%40coveragepy&url=https%3A%2F%2Fhachyderm.io%2Fusers%2Fcoveragepy%2Ffollowers.json&query=totalItems&label=@coveragepy :target: https://hachyderm.io/@coveragepy :alt: coveragepy on Mastodon .. |sponsor| image:: https://img.shields.io/badge/%E2%9D%A4-Sponsor%20me-brightgreen?style=flat&logo=GitHub :target: https://github.com/sponsors/nedbat :alt: Sponsor me on GitHub ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/__main__.py0000644000175100001770000000064500000000000016226 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt """Be able to execute coverage.py by pointing Python at a working tree.""" import runpy import os PKG = "coverage" run_globals = runpy.run_module(PKG, run_name="__main__", alter_sys=True) executed = os.path.splitext(os.path.basename(run_globals["__file__"]))[0] ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1710442639.105815 coverage-7.4.4/ci/0000755000175100001770000000000000000000000014522 5ustar00runnerdocker00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/ci/README.txt0000644000175100001770000000006100000000000016215 0ustar00runnerdocker00000000000000Files to support continuous integration systems. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/ci/comment_on_fixes.py0000644000175100001770000000323600000000000020434 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt """Add a release comment to all the issues mentioned in the latest release.""" import json import re import sys from session import get_session with open("tmp/relnotes.json") as frn: relnotes = json.load(frn) latest = relnotes[0] version = latest["version"] comment = ( f"This is now released as part of [coverage {version}]" + f"(https://pypi.org/project/coverage/{version})." ) print(f"Comment will be:\n\n{comment}\n") repo_owner = sys.argv[1] for m in re.finditer(fr"https://github.com/{repo_owner}/(issues|pull)/(\d+)", latest["text"]): kind, number = m.groups() do_comment = False if kind == "issues": url = f"https://api.github.com/repos/{repo_owner}/issues/{number}" issue_data = get_session().get(url).json() if issue_data["state"] == "closed": do_comment = True else: print(f"Still open, comment manually: {m[0]}") else: url = f"https://api.github.com/repos/{repo_owner}/pulls/{number}" pull_data = get_session().get(url).json() if pull_data["state"] == "closed": if pull_data["merged"]: do_comment = True else: print(f"Not merged, comment manually: {m[0]}") else: print(f"Still open, comment manually: {m[0]}") if do_comment: print(f"Commenting on {m[0]}") url = f"https://api.github.com/repos/{repo_owner}/issues/{number}/comments" resp = get_session().post(url, json={"body": comment}) print(resp) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/ci/download_gha_artifacts.py0000644000175100001770000000662000000000000021566 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt """Use the GitHub API to download built artifacts.""" import collections import datetime import fnmatch import operator import os import os.path import sys import time import zipfile from session import get_session def download_url(url, filename): """Download a file from `url` to `filename`.""" response = get_session().get(url, stream=True) if response.status_code == 200: with open(filename, "wb") as f: for chunk in response.iter_content(16*1024): f.write(chunk) else: raise RuntimeError(f"Fetching {url} produced: status={response.status_code}") def unpack_zipfile(filename): """Unpack a zipfile, using the names in the zip.""" with open(filename, "rb") as fzip: z = zipfile.ZipFile(fzip) for name in z.namelist(): print(f" extracting {name}") z.extract(name) def utc2local(timestring): """ Convert a UTC time into local time in a more readable form. For example: '20201208T122900Z' to '2020-12-08 07:29:00'. """ dt = datetime.datetime utc = dt.fromisoformat(timestring.rstrip("Z")) epoch = time.mktime(utc.timetuple()) offset = dt.fromtimestamp(epoch) - dt.utcfromtimestamp(epoch) local = utc + offset return local.strftime("%Y-%m-%d %H:%M:%S") def all_items(url, key): """ Get all items from a paginated GitHub URL. `key` is the key in the top-level returned object that has a list of items. """ url += ("&" if "?" in url else "?") + "per_page=100" while url: response = get_session().get(url) response.raise_for_status() data = response.json() if isinstance(data, dict) and (msg := data.get("message")): raise RuntimeError(f"URL {url!r} failed: {msg}") yield from data.get(key, ()) try: url = response.links.get("next").get("url") except AttributeError: url = None def main(owner_repo, artifact_pattern, dest_dir): """ Download and unzip the latest artifacts matching a pattern. `owner_repo` is a GitHub pair for the repo, like "nedbat/coveragepy". `artifact_pattern` is a filename glob for the artifact name. `dest_dir` is the directory to unpack them into. """ # Get all artifacts matching the pattern, grouped by name. url = f"https://api.github.com/repos/{owner_repo}/actions/artifacts" artifacts_by_name = collections.defaultdict(list) for artifact in all_items(url, "artifacts"): name = artifact["name"] if not fnmatch.fnmatch(name, artifact_pattern): continue artifacts_by_name[name].append(artifact) os.makedirs(dest_dir, exist_ok=True) os.chdir(dest_dir) temp_zip = "artifacts.zip" # Download the latest of each name. for name, artifacts in artifacts_by_name.items(): artifact = max(artifacts, key=operator.itemgetter("created_at")) print( f"Downloading {artifact['name']}, " + f"size: {artifact['size_in_bytes']}, " + f"created: {utc2local(artifact['created_at'])}", ) download_url(artifact["archive_download_url"], temp_zip) unpack_zipfile(temp_zip) os.remove(temp_zip) if __name__ == "__main__": sys.exit(main(*sys.argv[1:])) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/ci/ghrel_template.md.j20000644000175100001770000000064600000000000020360 0ustar00runnerdocker00000000000000{# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 -#} {# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt -#} {# This file is for use with scriv to create GitHub releases. -#} {{body}} :arrow_right:  PyPI page: [coverage {{version}}](https://pypi.org/project/coverage/{{version}}). :arrow_right:  To install: `python3 -m pip install coverage=={{version}}` ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/ci/parse_relnotes.py0000644000175100001770000000663000000000000020126 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt """ Parse CHANGES.md into a JSON structure. Run with two arguments: the .md file to parse, and the JSON file to write: python parse_relnotes.py CHANGES.md relnotes.json Every section that has something that looks like a version number in it will be recorded as the release notes for that version. """ import json import re import sys class TextChunkBuffer: """Hold onto text chunks until needed.""" def __init__(self): self.buffer = [] def append(self, text): """Add `text` to the buffer.""" self.buffer.append(text) def clear(self): """Clear the buffer.""" self.buffer = [] def flush(self): """Produce a ("text", text) tuple if there's anything here.""" buffered = "".join(self.buffer).strip() if buffered: yield ("text", buffered) self.clear() def parse_md(lines): """Parse markdown lines, producing (type, text) chunks.""" buffer = TextChunkBuffer() for line in lines: if header_match := re.search(r"^(#+) (.+)$", line): yield from buffer.flush() hashes, text = header_match.groups() yield (f"h{len(hashes)}", text) else: buffer.append(line) yield from buffer.flush() def sections(parsed_data): """Convert a stream of parsed tokens into sections with text and notes. Yields a stream of: ('h-level', 'header text', 'text') """ header = None text = [] for ttype, ttext in parsed_data: if ttype.startswith('h'): if header: yield (*header, "\n".join(text)) text = [] header = (ttype, ttext) elif ttype == "text": text.append(ttext) else: raise RuntimeError(f"Don't know ttype {ttype!r}") yield (*header, "\n".join(text)) def refind(regex, text): """Find a regex in some text, and return the matched text, or None.""" if m := re.search(regex, text): return m.group() else: return None def fix_ref_links(text, version): """Find links to .rst files, and make them full RTFD links.""" def new_link(m): return f"](https://coverage.readthedocs.io/en/{version}/{m[1]}.html{m[2]})" return re.sub(r"\]\((\w+)\.rst(#.*?)\)", new_link, text) def relnotes(mdlines): r"""Yield (version, text) pairs from markdown lines. Each tuple is a separate version mentioned in the release notes. A version is any section with \d\.\d in the header text. """ for _, htext, text in sections(parse_md(mdlines)): version = refind(r"\d+\.\d[^ ]*", htext) if version: prerelease = any(c in version for c in "abc") when = refind(r"\d+-\d+-\d+", htext) text = fix_ref_links(text, version) yield { "version": version, "text": text, "prerelease": prerelease, "when": when, } def parse(md_filename, json_filename): """Main function: parse markdown and write JSON.""" with open(md_filename) as mf: markdown = mf.read() with open(json_filename, "w") as jf: json.dump(list(relnotes(markdown.splitlines(True))), jf, indent=4) if __name__ == "__main__": parse(*sys.argv[1:3]) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/ci/session.py0000644000175100001770000000150600000000000016561 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt """Help make a requests Session with proper authentication.""" import os import requests _SESSION = None def get_session(): """Get a properly authenticated requests Session.""" global _SESSION if _SESSION is None: # If GITHUB_TOKEN is in the environment, use it. _SESSION = requests.session() token = os.environ.get("GITHUB_TOKEN") if token is not None: _SESSION.headers["Authorization"] = f"token {token}" # requests.get() will always prefer the .netrc file even if a header # is already set. This tells it to ignore the .netrc file. _SESSION.trust_env = False return _SESSION ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/ci/trigger_build_kits.py0000644000175100001770000000135300000000000020752 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt """Trigger the GitHub action to build our kits.""" import sys from session import get_session repo_owner = sys.argv[1] # The GitHub URL makes no mention of which workflow to use. It's found based on # the event_type, which matches the types in the workflow: # # on: # repository_dispatch: # types: # - build-kits # resp = get_session().post( f"https://api.github.com/repos/{repo_owner}/dispatches", json={"event_type": "build-kits"}, ) if resp.status_code // 100 == 2: print("Success") else: print(f"Status: {resp.status_code}") print(resp.text) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1710442639.1138148 coverage-7.4.4/coverage/0000755000175100001770000000000000000000000015722 5ustar00runnerdocker00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/coverage/__init__.py0000644000175100001770000000232000000000000020030 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt """ Code coverage measurement for Python. Ned Batchelder https://coverage.readthedocs.io """ from __future__ import annotations # mypy's convention is that "import as" names are public from the module. # We import names as themselves to indicate that. Pylint sees it as pointless, # so disable its warning. # pylint: disable=useless-import-alias from coverage.version import ( __version__ as __version__, version_info as version_info, ) from coverage.control import ( Coverage as Coverage, process_startup as process_startup, ) from coverage.data import CoverageData as CoverageData from coverage.exceptions import CoverageException as CoverageException from coverage.plugin import ( CoveragePlugin as CoveragePlugin, FileReporter as FileReporter, FileTracer as FileTracer, ) # Backward compatibility. coverage = Coverage # On Windows, we encode and decode deep enough that something goes wrong and # the encodings.utf_8 module is loaded and then unloaded, I don't know why. # Adding a reference here prevents it from being unloaded. Yuk. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/coverage/__main__.py0000644000175100001770000000044500000000000020017 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt """Coverage.py's main entry point.""" from __future__ import annotations import sys from coverage.cmdline import main sys.exit(main()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/coverage/annotate.py0000644000175100001770000000723300000000000020112 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt """Source file annotation for coverage.py.""" from __future__ import annotations import os import re from typing import Iterable, TYPE_CHECKING from coverage.files import flat_rootname from coverage.misc import ensure_dir, isolate_module from coverage.plugin import FileReporter from coverage.report_core import get_analysis_to_report from coverage.results import Analysis from coverage.types import TMorf if TYPE_CHECKING: from coverage import Coverage os = isolate_module(os) class AnnotateReporter: """Generate annotated source files showing line coverage. This reporter creates annotated copies of the measured source files. Each .py file is copied as a .py,cover file, with a left-hand margin annotating each line:: > def h(x): - if 0: #pragma: no cover - pass > if x == 1: ! a = 1 > else: > a = 2 > h(2) Executed lines use ">", lines not executed use "!", lines excluded from consideration use "-". """ def __init__(self, coverage: Coverage) -> None: self.coverage = coverage self.config = self.coverage.config self.directory: str | None = None blank_re = re.compile(r"\s*(#|$)") else_re = re.compile(r"\s*else\s*:\s*(#|$)") def report(self, morfs: Iterable[TMorf] | None, directory: str | None = None) -> None: """Run the report. See `coverage.report()` for arguments. """ self.directory = directory self.coverage.get_data() for fr, analysis in get_analysis_to_report(self.coverage, morfs): self.annotate_file(fr, analysis) def annotate_file(self, fr: FileReporter, analysis: Analysis) -> None: """Annotate a single file. `fr` is the FileReporter for the file to annotate. """ statements = sorted(analysis.statements) missing = sorted(analysis.missing) excluded = sorted(analysis.excluded) if self.directory: ensure_dir(self.directory) dest_file = os.path.join(self.directory, flat_rootname(fr.relative_filename())) if dest_file.endswith("_py"): dest_file = dest_file[:-3] + ".py" dest_file += ",cover" else: dest_file = fr.filename + ",cover" with open(dest_file, "w", encoding="utf-8") as dest: i = j = 0 covered = True source = fr.source() for lineno, line in enumerate(source.splitlines(True), start=1): while i < len(statements) and statements[i] < lineno: i += 1 while j < len(missing) and missing[j] < lineno: j += 1 if i < len(statements) and statements[i] == lineno: covered = j >= len(missing) or missing[j] > lineno if self.blank_re.match(line): dest.write(" ") elif self.else_re.match(line): # Special logic for lines containing only "else:". if j >= len(missing): dest.write("> ") elif statements[i] == missing[j]: dest.write("! ") else: dest.write("> ") elif lineno in excluded: dest.write("- ") elif covered: dest.write("> ") else: dest.write("! ") dest.write(line) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/coverage/bytecode.py0000644000175100001770000000131100000000000020066 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt """Bytecode manipulation for coverage.py""" from __future__ import annotations from types import CodeType from typing import Iterator def code_objects(code: CodeType) -> Iterator[CodeType]: """Iterate over all the code objects in `code`.""" stack = [code] while stack: # We're going to return the code object on the stack, but first # push its children for later returning. code = stack.pop() for c in code.co_consts: if isinstance(c, CodeType): stack.append(c) yield code ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/coverage/cmdline.py0000644000175100001770000010272400000000000017715 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt """Command-line support for coverage.py.""" from __future__ import annotations import glob import optparse # pylint: disable=deprecated-module import os import os.path import shlex import sys import textwrap import traceback from typing import cast, Any, NoReturn import coverage from coverage import Coverage from coverage import env from coverage.collector import HAS_CTRACER from coverage.config import CoverageConfig from coverage.control import DEFAULT_DATAFILE from coverage.data import combinable_files, debug_data_file from coverage.debug import info_header, short_stack, write_formatted_info from coverage.exceptions import _BaseCoverageException, _ExceptionDuringRun, NoSource from coverage.execfile import PyRunner from coverage.results import Numbers, should_fail_under from coverage.version import __url__ # When adding to this file, alphabetization is important. Look for # "alphabetize" comments throughout. class Opts: """A namespace class for individual options we'll build parsers from.""" # Keep these entries alphabetized (roughly) by the option name as it # appears on the command line. append = optparse.make_option( "-a", "--append", action="store_true", help="Append coverage data to .coverage, otherwise it starts clean each time.", ) branch = optparse.make_option( "", "--branch", action="store_true", help="Measure branch coverage in addition to statement coverage.", ) concurrency = optparse.make_option( "", "--concurrency", action="store", metavar="LIBS", help=( "Properly measure code using a concurrency library. " + "Valid values are: {}, or a comma-list of them." ).format(", ".join(sorted(CoverageConfig.CONCURRENCY_CHOICES))), ) context = optparse.make_option( "", "--context", action="store", metavar="LABEL", help="The context label to record for this coverage run.", ) contexts = optparse.make_option( "", "--contexts", action="store", metavar="REGEX1,REGEX2,...", help=( "Only display data from lines covered in the given contexts. " + "Accepts Python regexes, which must be quoted." ), ) datafile = optparse.make_option( "", "--data-file", action="store", metavar="DATAFILE", help=( "Base name of the data files to operate on. " + "Defaults to '.coverage'. [env: COVERAGE_FILE]" ), ) datafle_input = optparse.make_option( "", "--data-file", action="store", metavar="INFILE", help=( "Read coverage data for report generation from this file. " + "Defaults to '.coverage'. [env: COVERAGE_FILE]" ), ) datafile_output = optparse.make_option( "", "--data-file", action="store", metavar="OUTFILE", help=( "Write the recorded coverage data to this file. " + "Defaults to '.coverage'. [env: COVERAGE_FILE]" ), ) debug = optparse.make_option( "", "--debug", action="store", metavar="OPTS", help="Debug options, separated by commas. [env: COVERAGE_DEBUG]", ) directory = optparse.make_option( "-d", "--directory", action="store", metavar="DIR", help="Write the output files to DIR.", ) fail_under = optparse.make_option( "", "--fail-under", action="store", metavar="MIN", type="float", help="Exit with a status of 2 if the total coverage is less than MIN.", ) format = optparse.make_option( "", "--format", action="store", metavar="FORMAT", help="Output format, either text (default), markdown, or total.", ) help = optparse.make_option( "-h", "--help", action="store_true", help="Get help on this command.", ) ignore_errors = optparse.make_option( "-i", "--ignore-errors", action="store_true", help="Ignore errors while reading source files.", ) include = optparse.make_option( "", "--include", action="store", metavar="PAT1,PAT2,...", help=( "Include only files whose paths match one of these patterns. " + "Accepts shell-style wildcards, which must be quoted." ), ) keep = optparse.make_option( "", "--keep", action="store_true", help="Keep original coverage files, otherwise they are deleted.", ) pylib = optparse.make_option( "-L", "--pylib", action="store_true", help=( "Measure coverage even inside the Python installed library, " + "which isn't done by default." ), ) show_missing = optparse.make_option( "-m", "--show-missing", action="store_true", help="Show line numbers of statements in each module that weren't executed.", ) module = optparse.make_option( "-m", "--module", action="store_true", help=( " is an importable Python module, not a script path, " + "to be run as 'python -m' would run it." ), ) omit = optparse.make_option( "", "--omit", action="store", metavar="PAT1,PAT2,...", help=( "Omit files whose paths match one of these patterns. " + "Accepts shell-style wildcards, which must be quoted." ), ) output_xml = optparse.make_option( "-o", "", action="store", dest="outfile", metavar="OUTFILE", help="Write the XML report to this file. Defaults to 'coverage.xml'", ) output_json = optparse.make_option( "-o", "", action="store", dest="outfile", metavar="OUTFILE", help="Write the JSON report to this file. Defaults to 'coverage.json'", ) output_lcov = optparse.make_option( "-o", "", action="store", dest="outfile", metavar="OUTFILE", help="Write the LCOV report to this file. Defaults to 'coverage.lcov'", ) json_pretty_print = optparse.make_option( "", "--pretty-print", action="store_true", help="Format the JSON for human readers.", ) parallel_mode = optparse.make_option( "-p", "--parallel-mode", action="store_true", help=( "Append the machine name, process id and random number to the " + "data file name to simplify collecting data from " + "many processes." ), ) precision = optparse.make_option( "", "--precision", action="store", metavar="N", type=int, help=( "Number of digits after the decimal point to display for " + "reported coverage percentages." ), ) quiet = optparse.make_option( "-q", "--quiet", action="store_true", help="Don't print messages about what is happening.", ) rcfile = optparse.make_option( "", "--rcfile", action="store", help=( "Specify configuration file. " + "By default '.coveragerc', 'setup.cfg', 'tox.ini', and " + "'pyproject.toml' are tried. [env: COVERAGE_RCFILE]" ), ) show_contexts = optparse.make_option( "--show-contexts", action="store_true", help="Show contexts for covered lines.", ) skip_covered = optparse.make_option( "--skip-covered", action="store_true", help="Skip files with 100% coverage.", ) no_skip_covered = optparse.make_option( "--no-skip-covered", action="store_false", dest="skip_covered", help="Disable --skip-covered.", ) skip_empty = optparse.make_option( "--skip-empty", action="store_true", help="Skip files with no code.", ) sort = optparse.make_option( "--sort", action="store", metavar="COLUMN", help=( "Sort the report by the named column: name, stmts, miss, branch, brpart, or cover. " + "Default is name." ), ) source = optparse.make_option( "", "--source", action="store", metavar="SRC1,SRC2,...", help="A list of directories or importable names of code to measure.", ) timid = optparse.make_option( "", "--timid", action="store_true", help="Use the slower Python trace function core.", ) title = optparse.make_option( "", "--title", action="store", metavar="TITLE", help="A text string to use as the title on the HTML.", ) version = optparse.make_option( "", "--version", action="store_true", help="Display version information and exit.", ) class CoverageOptionParser(optparse.OptionParser): """Base OptionParser for coverage.py. Problems don't exit the program. Defaults are initialized for all options. """ def __init__(self, *args: Any, **kwargs: Any) -> None: kwargs["add_help_option"] = False super().__init__(*args, **kwargs) self.set_defaults( # Keep these arguments alphabetized by their names. action=None, append=None, branch=None, concurrency=None, context=None, contexts=None, data_file=None, debug=None, directory=None, fail_under=None, format=None, help=None, ignore_errors=None, include=None, keep=None, module=None, omit=None, parallel_mode=None, precision=None, pylib=None, quiet=None, rcfile=True, show_contexts=None, show_missing=None, skip_covered=None, skip_empty=None, sort=None, source=None, timid=None, title=None, version=None, ) self.disable_interspersed_args() class OptionParserError(Exception): """Used to stop the optparse error handler ending the process.""" pass def parse_args_ok(self, args: list[str]) -> tuple[bool, optparse.Values | None, list[str]]: """Call optparse.parse_args, but return a triple: (ok, options, args) """ try: options, args = super().parse_args(args) except self.OptionParserError: return False, None, [] return True, options, args def error(self, msg: str) -> NoReturn: """Override optparse.error so sys.exit doesn't get called.""" show_help(msg) raise self.OptionParserError class GlobalOptionParser(CoverageOptionParser): """Command-line parser for coverage.py global option arguments.""" def __init__(self) -> None: super().__init__() self.add_options([ Opts.help, Opts.version, ]) class CmdOptionParser(CoverageOptionParser): """Parse one of the new-style commands for coverage.py.""" def __init__( self, action: str, options: list[optparse.Option], description: str, usage: str | None = None, ): """Create an OptionParser for a coverage.py command. `action` is the slug to put into `options.action`. `options` is a list of Option's for the command. `description` is the description of the command, for the help text. `usage` is the usage string to display in help. """ if usage: usage = "%prog " + usage super().__init__( usage=usage, description=description, ) self.set_defaults(action=action) self.add_options(options) self.cmd = action def __eq__(self, other: str) -> bool: # type: ignore[override] # A convenience equality, so that I can put strings in unit test # results, and they will compare equal to objects. return (other == f"") __hash__ = None # type: ignore[assignment] def get_prog_name(self) -> str: """Override of an undocumented function in optparse.OptionParser.""" program_name = super().get_prog_name() # Include the sub-command for this parser as part of the command. return f"{program_name} {self.cmd}" # In lists of Opts, keep them alphabetized by the option names as they appear # on the command line, since these lists determine the order of the options in # the help output. # # In COMMANDS, keep the keys (command names) alphabetized. GLOBAL_ARGS = [ Opts.debug, Opts.help, Opts.rcfile, ] COMMANDS = { "annotate": CmdOptionParser( "annotate", [ Opts.directory, Opts.datafle_input, Opts.ignore_errors, Opts.include, Opts.omit, ] + GLOBAL_ARGS, usage="[options] [modules]", description=( "Make annotated copies of the given files, marking statements that are executed " + "with > and statements that are missed with !." ), ), "combine": CmdOptionParser( "combine", [ Opts.append, Opts.datafile, Opts.keep, Opts.quiet, ] + GLOBAL_ARGS, usage="[options] ... ", description=( "Combine data from multiple coverage files. " + "The combined results are written to a single " + "file representing the union of the data. The positional " + "arguments are data files or directories containing data files. " + "If no paths are provided, data files in the default data file's " + "directory are combined." ), ), "debug": CmdOptionParser( "debug", GLOBAL_ARGS, usage="", description=( "Display information about the internals of coverage.py, " + "for diagnosing problems. " + "Topics are: " + "'data' to show a summary of the collected data; " + "'sys' to show installation information; " + "'config' to show the configuration; " + "'premain' to show what is calling coverage; " + "'pybehave' to show internal flags describing Python behavior." ), ), "erase": CmdOptionParser( "erase", [ Opts.datafile, ] + GLOBAL_ARGS, description="Erase previously collected coverage data.", ), "help": CmdOptionParser( "help", GLOBAL_ARGS, usage="[command]", description="Describe how to use coverage.py", ), "html": CmdOptionParser( "html", [ Opts.contexts, Opts.directory, Opts.datafle_input, Opts.fail_under, Opts.ignore_errors, Opts.include, Opts.omit, Opts.precision, Opts.quiet, Opts.show_contexts, Opts.skip_covered, Opts.no_skip_covered, Opts.skip_empty, Opts.title, ] + GLOBAL_ARGS, usage="[options] [modules]", description=( "Create an HTML report of the coverage of the files. " + "Each file gets its own page, with the source decorated to show " + "executed, excluded, and missed lines." ), ), "json": CmdOptionParser( "json", [ Opts.contexts, Opts.datafle_input, Opts.fail_under, Opts.ignore_errors, Opts.include, Opts.omit, Opts.output_json, Opts.json_pretty_print, Opts.quiet, Opts.show_contexts, ] + GLOBAL_ARGS, usage="[options] [modules]", description="Generate a JSON report of coverage results.", ), "lcov": CmdOptionParser( "lcov", [ Opts.datafle_input, Opts.fail_under, Opts.ignore_errors, Opts.include, Opts.output_lcov, Opts.omit, Opts.quiet, ] + GLOBAL_ARGS, usage="[options] [modules]", description="Generate an LCOV report of coverage results.", ), "report": CmdOptionParser( "report", [ Opts.contexts, Opts.datafle_input, Opts.fail_under, Opts.format, Opts.ignore_errors, Opts.include, Opts.omit, Opts.precision, Opts.sort, Opts.show_missing, Opts.skip_covered, Opts.no_skip_covered, Opts.skip_empty, ] + GLOBAL_ARGS, usage="[options] [modules]", description="Report coverage statistics on modules.", ), "run": CmdOptionParser( "run", [ Opts.append, Opts.branch, Opts.concurrency, Opts.context, Opts.datafile_output, Opts.include, Opts.module, Opts.omit, Opts.pylib, Opts.parallel_mode, Opts.source, Opts.timid, ] + GLOBAL_ARGS, usage="[options] [program options]", description="Run a Python program, measuring code execution.", ), "xml": CmdOptionParser( "xml", [ Opts.datafle_input, Opts.fail_under, Opts.ignore_errors, Opts.include, Opts.omit, Opts.output_xml, Opts.quiet, Opts.skip_empty, ] + GLOBAL_ARGS, usage="[options] [modules]", description="Generate an XML report of coverage results.", ), } def show_help( error: str | None = None, topic: str | None = None, parser: optparse.OptionParser | None = None, ) -> None: """Display an error message, or the named topic.""" assert error or topic or parser program_path = sys.argv[0] if program_path.endswith(os.path.sep + "__main__.py"): # The path is the main module of a package; get that path instead. program_path = os.path.dirname(program_path) program_name = os.path.basename(program_path) if env.WINDOWS: # entry_points={"console_scripts":...} on Windows makes files # called coverage.exe, coverage3.exe, and coverage-3.5.exe. These # invoke coverage-script.py, coverage3-script.py, and # coverage-3.5-script.py. argv[0] is the .py file, but we want to # get back to the original form. auto_suffix = "-script.py" if program_name.endswith(auto_suffix): program_name = program_name[:-len(auto_suffix)] help_params = dict(coverage.__dict__) help_params["__url__"] = __url__ help_params["program_name"] = program_name if HAS_CTRACER: help_params["extension_modifier"] = "with C extension" else: help_params["extension_modifier"] = "without C extension" if error: print(error, file=sys.stderr) print(f"Use '{program_name} help' for help.", file=sys.stderr) elif parser: print(parser.format_help().strip()) print() else: assert topic is not None help_msg = textwrap.dedent(HELP_TOPICS.get(topic, "")).strip() if help_msg: print(help_msg.format(**help_params)) else: print(f"Don't know topic {topic!r}") print("Full documentation is at {__url__}".format(**help_params)) OK, ERR, FAIL_UNDER = 0, 1, 2 class CoverageScript: """The command-line interface to coverage.py.""" def __init__(self) -> None: self.global_option = False self.coverage: Coverage def command_line(self, argv: list[str]) -> int: """The bulk of the command line interface to coverage.py. `argv` is the argument list to process. Returns 0 if all is well, 1 if something went wrong. """ # Collect the command-line options. if not argv: show_help(topic="minimum_help") return OK # The command syntax we parse depends on the first argument. Global # switch syntax always starts with an option. parser: optparse.OptionParser | None self.global_option = argv[0].startswith("-") if self.global_option: parser = GlobalOptionParser() else: parser = COMMANDS.get(argv[0]) if not parser: show_help(f"Unknown command: {argv[0]!r}") return ERR argv = argv[1:] ok, options, args = parser.parse_args_ok(argv) if not ok: return ERR assert options is not None # Handle help and version. if self.do_help(options, args, parser): return OK # Listify the list options. source = unshell_list(options.source) omit = unshell_list(options.omit) include = unshell_list(options.include) debug = unshell_list(options.debug) contexts = unshell_list(options.contexts) if options.concurrency is not None: concurrency = options.concurrency.split(",") else: concurrency = None # Do something. self.coverage = Coverage( data_file=options.data_file or DEFAULT_DATAFILE, data_suffix=options.parallel_mode, cover_pylib=options.pylib, timid=options.timid, branch=options.branch, config_file=options.rcfile, source=source, omit=omit, include=include, debug=debug, concurrency=concurrency, check_preimported=True, context=options.context, messages=not options.quiet, ) if options.action == "debug": return self.do_debug(args) elif options.action == "erase": self.coverage.erase() return OK elif options.action == "run": return self.do_run(options, args) elif options.action == "combine": if options.append: self.coverage.load() data_paths = args or None self.coverage.combine(data_paths, strict=True, keep=bool(options.keep)) self.coverage.save() return OK # Remaining actions are reporting, with some common options. report_args = dict( morfs=unglob_args(args), ignore_errors=options.ignore_errors, omit=omit, include=include, contexts=contexts, ) # We need to be able to import from the current directory, because # plugins may try to, for example, to read Django settings. sys.path.insert(0, "") self.coverage.load() total = None if options.action == "report": total = self.coverage.report( precision=options.precision, show_missing=options.show_missing, skip_covered=options.skip_covered, skip_empty=options.skip_empty, sort=options.sort, output_format=options.format, **report_args, ) elif options.action == "annotate": self.coverage.annotate(directory=options.directory, **report_args) elif options.action == "html": total = self.coverage.html_report( directory=options.directory, precision=options.precision, skip_covered=options.skip_covered, skip_empty=options.skip_empty, show_contexts=options.show_contexts, title=options.title, **report_args, ) elif options.action == "xml": total = self.coverage.xml_report( outfile=options.outfile, skip_empty=options.skip_empty, **report_args, ) elif options.action == "json": total = self.coverage.json_report( outfile=options.outfile, pretty_print=options.pretty_print, show_contexts=options.show_contexts, **report_args, ) elif options.action == "lcov": total = self.coverage.lcov_report( outfile=options.outfile, **report_args, ) else: # There are no other possible actions. raise AssertionError if total is not None: # Apply the command line fail-under options, and then use the config # value, so we can get fail_under from the config file. if options.fail_under is not None: self.coverage.set_option("report:fail_under", options.fail_under) if options.precision is not None: self.coverage.set_option("report:precision", options.precision) fail_under = cast(float, self.coverage.get_option("report:fail_under")) precision = cast(int, self.coverage.get_option("report:precision")) if should_fail_under(total, fail_under, precision): msg = "total of {total} is less than fail-under={fail_under:.{p}f}".format( total=Numbers(precision=precision).display_covered(total), fail_under=fail_under, p=precision, ) print("Coverage failure:", msg) return FAIL_UNDER return OK def do_help( self, options: optparse.Values, args: list[str], parser: optparse.OptionParser, ) -> bool: """Deal with help requests. Return True if it handled the request, False if not. """ # Handle help. if options.help: if self.global_option: show_help(topic="help") else: show_help(parser=parser) return True if options.action == "help": if args: for a in args: parser_maybe = COMMANDS.get(a) if parser_maybe is not None: show_help(parser=parser_maybe) else: show_help(topic=a) else: show_help(topic="help") return True # Handle version. if options.version: show_help(topic="version") return True return False def do_run(self, options: optparse.Values, args: list[str]) -> int: """Implementation of 'coverage run'.""" if not args: if options.module: # Specified -m with nothing else. show_help("No module specified for -m") return ERR command_line = cast(str, self.coverage.get_option("run:command_line")) if command_line is not None: args = shlex.split(command_line) if args and args[0] in {"-m", "--module"}: options.module = True args = args[1:] if not args: show_help("Nothing to do.") return ERR if options.append and self.coverage.get_option("run:parallel"): show_help("Can't append to data files in parallel mode.") return ERR if options.concurrency == "multiprocessing": # Can't set other run-affecting command line options with # multiprocessing. for opt_name in ["branch", "include", "omit", "pylib", "source", "timid"]: # As it happens, all of these options have no default, meaning # they will be None if they have not been specified. if getattr(options, opt_name) is not None: show_help( "Options affecting multiprocessing must only be specified " + "in a configuration file.\n" + f"Remove --{opt_name} from the command line.", ) return ERR os.environ["COVERAGE_RUN"] = "true" runner = PyRunner(args, as_module=bool(options.module)) runner.prepare() if options.append: self.coverage.load() # Run the script. self.coverage.start() code_ran = True try: runner.run() except NoSource: code_ran = False raise finally: self.coverage.stop() if code_ran: self.coverage.save() return OK def do_debug(self, args: list[str]) -> int: """Implementation of 'coverage debug'.""" if not args: show_help("What information would you like: config, data, sys, premain, pybehave?") return ERR if args[1:]: show_help("Only one topic at a time, please") return ERR if args[0] == "sys": write_formatted_info(print, "sys", self.coverage.sys_info()) elif args[0] == "data": print(info_header("data")) data_file = self.coverage.config.data_file debug_data_file(data_file) for filename in combinable_files(data_file): print("-----") debug_data_file(filename) elif args[0] == "config": write_formatted_info(print, "config", self.coverage.config.debug_info()) elif args[0] == "premain": print(info_header("premain")) print(short_stack(full=True)) elif args[0] == "pybehave": write_formatted_info(print, "pybehave", env.debug_info()) else: show_help(f"Don't know what you mean by {args[0]!r}") return ERR return OK def unshell_list(s: str) -> list[str] | None: """Turn a command-line argument into a list.""" if not s: return None if env.WINDOWS: # When running coverage.py as coverage.exe, some of the behavior # of the shell is emulated: wildcards are expanded into a list of # file names. So you have to single-quote patterns on the command # line, but (not) helpfully, the single quotes are included in the # argument, so we have to strip them off here. s = s.strip("'") return s.split(",") def unglob_args(args: list[str]) -> list[str]: """Interpret shell wildcards for platforms that need it.""" if env.WINDOWS: globbed = [] for arg in args: if "?" in arg or "*" in arg: globbed.extend(glob.glob(arg)) else: globbed.append(arg) args = globbed return args HELP_TOPICS = { "help": """\ Coverage.py, version {__version__} {extension_modifier} Measure, collect, and report on code coverage in Python programs. usage: {program_name} [options] [args] Commands: annotate Annotate source files with execution information. combine Combine a number of data files. debug Display information about the internals of coverage.py erase Erase previously collected coverage data. help Get help on using coverage.py. html Create an HTML report. json Create a JSON report of coverage results. lcov Create an LCOV report of coverage results. report Report coverage stats on modules. run Run a Python program and measure code execution. xml Create an XML report of coverage results. Use "{program_name} help " for detailed help on any command. """, "minimum_help": ( "Code coverage for Python, version {__version__} {extension_modifier}. " + "Use '{program_name} help' for help." ), "version": "Coverage.py, version {__version__} {extension_modifier}", } def main(argv: list[str] | None = None) -> int | None: """The main entry point to coverage.py. This is installed as the script entry point. """ if argv is None: argv = sys.argv[1:] try: status = CoverageScript().command_line(argv) except _ExceptionDuringRun as err: # An exception was caught while running the product code. The # sys.exc_info() return tuple is packed into an _ExceptionDuringRun # exception. traceback.print_exception(*err.args) # pylint: disable=no-value-for-parameter status = ERR except _BaseCoverageException as err: # A controlled error inside coverage.py: print the message to the user. msg = err.args[0] print(msg) status = ERR except SystemExit as err: # The user called `sys.exit()`. Exit with their argument, if any. if err.args: status = err.args[0] else: status = None return status # Profiling using ox_profile. Install it from GitHub: # pip install git+https://github.com/emin63/ox_profile.git # # $set_env.py: COVERAGE_PROFILE - Set to use ox_profile. _profile = os.getenv("COVERAGE_PROFILE") if _profile: # pragma: debugging from ox_profile.core.launchers import SimpleLauncher # pylint: disable=import-error original_main = main def main( # pylint: disable=function-redefined argv: list[str] | None = None, ) -> int | None: """A wrapper around main that profiles.""" profiler = SimpleLauncher.launch() try: return original_main(argv) finally: data, _ = profiler.query(re_filter="coverage", max_records=100) print(profiler.show(query=data, limit=100, sep="", col="")) profiler.cancel() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/coverage/collector.py0000644000175100001770000005243000000000000020266 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt """Raw data collector for coverage.py.""" from __future__ import annotations import functools import os import sys from types import FrameType from typing import ( cast, Any, Callable, Dict, List, Mapping, Set, TypeVar, ) from coverage import env from coverage.config import CoverageConfig from coverage.data import CoverageData from coverage.debug import short_stack from coverage.disposition import FileDisposition from coverage.exceptions import ConfigError from coverage.misc import human_sorted_items, isolate_module from coverage.plugin import CoveragePlugin from coverage.pytracer import PyTracer from coverage.sysmon import SysMonitor from coverage.types import ( TArc, TFileDisposition, TTraceData, TTraceFn, TracerCore, TWarnFn, ) os = isolate_module(os) try: # Use the C extension code when we can, for speed. from coverage.tracer import CTracer, CFileDisposition HAS_CTRACER = True except ImportError: # Couldn't import the C extension, maybe it isn't built. if os.getenv("COVERAGE_CORE") == "ctrace": # pragma: part covered # During testing, we use the COVERAGE_CORE environment variable # to indicate that we've fiddled with the environment to test this # fallback code. If we thought we had a C tracer, but couldn't import # it, then exit quickly and clearly instead of dribbling confusing # errors. I'm using sys.exit here instead of an exception because an # exception here causes all sorts of other noise in unittest. sys.stderr.write("*** COVERAGE_CORE is 'ctrace' but can't import CTracer!\n") sys.exit(1) HAS_CTRACER = False T = TypeVar("T") class Collector: """Collects trace data. Creates a Tracer object for each thread, since they track stack information. Each Tracer points to the same shared data, contributing traced data points. When the Collector is started, it creates a Tracer for the current thread, and installs a function to create Tracers for each new thread started. When the Collector is stopped, all active Tracers are stopped. Threads started while the Collector is stopped will never have Tracers associated with them. """ # The stack of active Collectors. Collectors are added here when started, # and popped when stopped. Collectors on the stack are paused when not # the top, and resumed when they become the top again. _collectors: list[Collector] = [] # The concurrency settings we support here. LIGHT_THREADS = {"greenlet", "eventlet", "gevent"} def __init__( self, should_trace: Callable[[str, FrameType], TFileDisposition], check_include: Callable[[str, FrameType], bool], should_start_context: Callable[[FrameType], str | None] | None, file_mapper: Callable[[str], str], timid: bool, branch: bool, warn: TWarnFn, concurrency: list[str], metacov: bool, ) -> None: """Create a collector. `should_trace` is a function, taking a file name and a frame, and returning a `coverage.FileDisposition object`. `check_include` is a function taking a file name and a frame. It returns a boolean: True if the file should be traced, False if not. `should_start_context` is a function taking a frame, and returning a string. If the frame should be the start of a new context, the string is the new context. If the frame should not be the start of a new context, return None. `file_mapper` is a function taking a filename, and returning a Unicode filename. The result is the name that will be recorded in the data file. If `timid` is true, then a slower simpler trace function will be used. This is important for some environments where manipulation of tracing functions make the faster more sophisticated trace function not operate properly. If `branch` is true, then branches will be measured. This involves collecting data on which statements followed each other (arcs). Use `get_arc_data` to get the arc data. `warn` is a warning function, taking a single string message argument and an optional slug argument which will be a string or None, to be used if a warning needs to be issued. `concurrency` is a list of strings indicating the concurrency libraries in use. Valid values are "greenlet", "eventlet", "gevent", or "thread" (the default). "thread" can be combined with one of the other three. Other values are ignored. """ self.should_trace = should_trace self.check_include = check_include self.should_start_context = should_start_context self.file_mapper = file_mapper self.branch = branch self.warn = warn self.concurrency = concurrency assert isinstance(self.concurrency, list), f"Expected a list: {self.concurrency!r}" self.pid = os.getpid() self.covdata: CoverageData self.threading = None self.static_context: str | None = None self.origin = short_stack() self.concur_id_func = None self._trace_class: type[TracerCore] self.file_disposition_class: type[TFileDisposition] core: str | None if timid: core = "pytrace" else: core = os.getenv("COVERAGE_CORE") if core == "sysmon" and not env.PYBEHAVIOR.pep669: self.warn("sys.monitoring isn't available, using default core", slug="no-sysmon") core = None if not core: # Once we're comfortable with sysmon as a default: # if env.PYBEHAVIOR.pep669 and self.should_start_context is None: # core = "sysmon" if HAS_CTRACER: core = "ctrace" else: core = "pytrace" if core == "sysmon": self._trace_class = SysMonitor self._core_kwargs = {"tool_id": 3 if metacov else 1} self.file_disposition_class = FileDisposition self.supports_plugins = False self.packed_arcs = False self.systrace = False elif core == "ctrace": self._trace_class = CTracer self._core_kwargs = {} self.file_disposition_class = CFileDisposition self.supports_plugins = True self.packed_arcs = True self.systrace = True elif core == "pytrace": self._trace_class = PyTracer self._core_kwargs = {} self.file_disposition_class = FileDisposition self.supports_plugins = False self.packed_arcs = False self.systrace = True else: raise ConfigError(f"Unknown core value: {core!r}") # We can handle a few concurrency options here, but only one at a time. concurrencies = set(self.concurrency) unknown = concurrencies - CoverageConfig.CONCURRENCY_CHOICES if unknown: show = ", ".join(sorted(unknown)) raise ConfigError(f"Unknown concurrency choices: {show}") light_threads = concurrencies & self.LIGHT_THREADS if len(light_threads) > 1: show = ", ".join(sorted(light_threads)) raise ConfigError(f"Conflicting concurrency settings: {show}") do_threading = False tried = "nothing" # to satisfy pylint try: if "greenlet" in concurrencies: tried = "greenlet" import greenlet self.concur_id_func = greenlet.getcurrent elif "eventlet" in concurrencies: tried = "eventlet" import eventlet.greenthread # pylint: disable=import-error,useless-suppression self.concur_id_func = eventlet.greenthread.getcurrent elif "gevent" in concurrencies: tried = "gevent" import gevent # pylint: disable=import-error,useless-suppression self.concur_id_func = gevent.getcurrent if "thread" in concurrencies: do_threading = True except ImportError as ex: msg = f"Couldn't trace with concurrency={tried}, the module isn't installed." raise ConfigError(msg) from ex if self.concur_id_func and not hasattr(self._trace_class, "concur_id_func"): raise ConfigError( "Can't support concurrency={} with {}, only threads are supported.".format( tried, self.tracer_name(), ), ) if do_threading or not concurrencies: # It's important to import threading only if we need it. If # it's imported early, and the program being measured uses # gevent, then gevent's monkey-patching won't work properly. import threading self.threading = threading self.reset() def __repr__(self) -> str: return f"" def use_data(self, covdata: CoverageData, context: str | None) -> None: """Use `covdata` for recording data.""" self.covdata = covdata self.static_context = context self.covdata.set_context(self.static_context) def tracer_name(self) -> str: """Return the class name of the tracer we're using.""" return self._trace_class.__name__ def _clear_data(self) -> None: """Clear out existing data, but stay ready for more collection.""" # We used to use self.data.clear(), but that would remove filename # keys and data values that were still in use higher up the stack # when we are called as part of switch_context. for d in self.data.values(): d.clear() for tracer in self.tracers: tracer.reset_activity() def reset(self) -> None: """Clear collected data, and prepare to collect more.""" # The trace data we are collecting. self.data: TTraceData = {} # A dictionary mapping file names to file tracer plugin names that will # handle them. self.file_tracers: dict[str, str] = {} self.disabled_plugins: set[str] = set() # The .should_trace_cache attribute is a cache from file names to # coverage.FileDisposition objects, or None. When a file is first # considered for tracing, a FileDisposition is obtained from # Coverage.should_trace. Its .trace attribute indicates whether the # file should be traced or not. If it should be, a plugin with dynamic # file names can decide not to trace it based on the dynamic file name # being excluded by the inclusion rules, in which case the # FileDisposition will be replaced by None in the cache. if env.PYPY: import __pypy__ # pylint: disable=import-error # Alex Gaynor said: # should_trace_cache is a strictly growing key: once a key is in # it, it never changes. Further, the keys used to access it are # generally constant, given sufficient context. That is to say, at # any given point _trace() is called, pypy is able to know the key. # This is because the key is determined by the physical source code # line, and that's invariant with the call site. # # This property of a dict with immutable keys, combined with # call-site-constant keys is a match for PyPy's module dict, # which is optimized for such workloads. # # This gives a 20% benefit on the workload described at # https://bitbucket.org/pypy/pypy/issue/1871/10x-slower-than-cpython-under-coverage self.should_trace_cache = __pypy__.newdict("module") else: self.should_trace_cache = {} # Our active Tracers. self.tracers: list[TracerCore] = [] self._clear_data() def _start_tracer(self) -> TTraceFn | None: """Start a new Tracer object, and store it in self.tracers.""" tracer = self._trace_class(**self._core_kwargs) tracer.data = self.data tracer.trace_arcs = self.branch tracer.should_trace = self.should_trace tracer.should_trace_cache = self.should_trace_cache tracer.warn = self.warn if hasattr(tracer, 'concur_id_func'): tracer.concur_id_func = self.concur_id_func if hasattr(tracer, 'file_tracers'): tracer.file_tracers = self.file_tracers if hasattr(tracer, 'threading'): tracer.threading = self.threading if hasattr(tracer, 'check_include'): tracer.check_include = self.check_include if hasattr(tracer, 'should_start_context'): tracer.should_start_context = self.should_start_context if hasattr(tracer, 'switch_context'): tracer.switch_context = self.switch_context if hasattr(tracer, 'disable_plugin'): tracer.disable_plugin = self.disable_plugin fn = tracer.start() self.tracers.append(tracer) return fn # The trace function has to be set individually on each thread before # execution begins. Ironically, the only support the threading module has # for running code before the thread main is the tracing function. So we # install this as a trace function, and the first time it's called, it does # the real trace installation. # # New in 3.12: threading.settrace_all_threads: https://github.com/python/cpython/pull/96681 def _installation_trace(self, frame: FrameType, event: str, arg: Any) -> TTraceFn | None: """Called on new threads, installs the real tracer.""" # Remove ourselves as the trace function. sys.settrace(None) # Install the real tracer. fn: TTraceFn | None = self._start_tracer() # Invoke the real trace function with the current event, to be sure # not to lose an event. if fn: fn = fn(frame, event, arg) # Return the new trace function to continue tracing in this scope. return fn def start(self) -> None: """Start collecting trace information.""" # We may be a new collector in a forked process. The old process' # collectors will be in self._collectors, but they won't be usable. # Find them and discard them. keep_collectors = [] for c in self._collectors: if c.pid == self.pid: keep_collectors.append(c) else: c.post_fork() self._collectors[:] = keep_collectors if self._collectors: self._collectors[-1].pause() self.tracers = [] try: # Install the tracer on this thread. self._start_tracer() except: if self._collectors: self._collectors[-1].resume() raise # If _start_tracer succeeded, then we add ourselves to the global # stack of collectors. self._collectors.append(self) # Install our installation tracer in threading, to jump-start other # threads. if self.systrace and self.threading: self.threading.settrace(self._installation_trace) def stop(self) -> None: """Stop collecting trace information.""" assert self._collectors if self._collectors[-1] is not self: print("self._collectors:") for c in self._collectors: print(f" {c!r}\n{c.origin}") assert self._collectors[-1] is self, ( f"Expected current collector to be {self!r}, but it's {self._collectors[-1]!r}" ) self.pause() # Remove this Collector from the stack, and resume the one underneath (if any). self._collectors.pop() if self._collectors: self._collectors[-1].resume() def pause(self) -> None: """Pause tracing, but be prepared to `resume`.""" for tracer in self.tracers: tracer.stop() stats = tracer.get_stats() if stats: print("\nCoverage.py tracer stats:") for k, v in human_sorted_items(stats.items()): print(f"{k:>20}: {v}") if self.threading: self.threading.settrace(None) def resume(self) -> None: """Resume tracing after a `pause`.""" for tracer in self.tracers: tracer.start() if self.systrace: if self.threading: self.threading.settrace(self._installation_trace) else: self._start_tracer() def post_fork(self) -> None: """After a fork, tracers might need to adjust.""" for tracer in self.tracers: if hasattr(tracer, "post_fork"): tracer.post_fork() def _activity(self) -> bool: """Has any activity been traced? Returns a boolean, True if any trace function was invoked. """ return any(tracer.activity() for tracer in self.tracers) def switch_context(self, new_context: str | None) -> None: """Switch to a new dynamic context.""" context: str | None self.flush_data() if self.static_context: context = self.static_context if new_context: context += "|" + new_context else: context = new_context self.covdata.set_context(context) def disable_plugin(self, disposition: TFileDisposition) -> None: """Disable the plugin mentioned in `disposition`.""" file_tracer = disposition.file_tracer assert file_tracer is not None plugin = file_tracer._coverage_plugin plugin_name = plugin._coverage_plugin_name self.warn(f"Disabling plug-in {plugin_name!r} due to previous exception") plugin._coverage_enabled = False disposition.trace = False @functools.lru_cache(maxsize=None) # pylint: disable=method-cache-max-size-none def cached_mapped_file(self, filename: str) -> str: """A locally cached version of file names mapped through file_mapper.""" return self.file_mapper(filename) def mapped_file_dict(self, d: Mapping[str, T]) -> dict[str, T]: """Return a dict like d, but with keys modified by file_mapper.""" # The call to list(items()) ensures that the GIL protects the dictionary # iterator against concurrent modifications by tracers running # in other threads. We try three times in case of concurrent # access, hoping to get a clean copy. runtime_err = None for _ in range(3): # pragma: part covered try: items = list(d.items()) except RuntimeError as ex: # pragma: cant happen runtime_err = ex else: break else: # pragma: cant happen assert isinstance(runtime_err, Exception) raise runtime_err return {self.cached_mapped_file(k): v for k, v in items if v} def plugin_was_disabled(self, plugin: CoveragePlugin) -> None: """Record that `plugin` was disabled during the run.""" self.disabled_plugins.add(plugin._coverage_plugin_name) def flush_data(self) -> bool: """Save the collected data to our associated `CoverageData`. Data may have also been saved along the way. This forces the last of the data to be saved. Returns True if there was data to save, False if not. """ if not self._activity(): return False if self.branch: if self.packed_arcs: # Unpack the line number pairs packed into integers. See # tracer.c:CTracer_record_pair for the C code that creates # these packed ints. arc_data: dict[str, list[TArc]] = {} packed_data = cast(Dict[str, Set[int]], self.data) # The list() here and in the inner loop are to get a clean copy # even as tracers are continuing to add data. for fname, packeds in list(packed_data.items()): tuples = [] for packed in list(packeds): l1 = packed & 0xFFFFF l2 = (packed & (0xFFFFF << 20)) >> 20 if packed & (1 << 40): l1 *= -1 if packed & (1 << 41): l2 *= -1 tuples.append((l1, l2)) arc_data[fname] = tuples else: arc_data = cast(Dict[str, List[TArc]], self.data) self.covdata.add_arcs(self.mapped_file_dict(arc_data)) else: line_data = cast(Dict[str, Set[int]], self.data) self.covdata.add_lines(self.mapped_file_dict(line_data)) file_tracers = { k: v for k, v in self.file_tracers.items() if v not in self.disabled_plugins } self.covdata.add_file_tracers(self.mapped_file_dict(file_tracers)) self._clear_data() return True ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/coverage/config.py0000644000175100001770000005266200000000000017554 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt """Config file for coverage.py""" from __future__ import annotations import collections import configparser import copy import os import os.path import re from typing import ( Any, Callable, Iterable, Union, ) from coverage.exceptions import ConfigError from coverage.misc import isolate_module, human_sorted_items, substitute_variables from coverage.tomlconfig import TomlConfigParser, TomlDecodeError from coverage.types import ( TConfigurable, TConfigSectionIn, TConfigValueIn, TConfigSectionOut, TConfigValueOut, TPluginConfig, ) os = isolate_module(os) class HandyConfigParser(configparser.ConfigParser): """Our specialization of ConfigParser.""" def __init__(self, our_file: bool) -> None: """Create the HandyConfigParser. `our_file` is True if this config file is specifically for coverage, False if we are examining another config file (tox.ini, setup.cfg) for possible settings. """ super().__init__(interpolation=None) self.section_prefixes = ["coverage:"] if our_file: self.section_prefixes.append("") def read( # type: ignore[override] self, filenames: Iterable[str], encoding_unused: str | None = None, ) -> list[str]: """Read a file name as UTF-8 configuration data.""" return super().read(filenames, encoding="utf-8") def real_section(self, section: str) -> str | None: """Get the actual name of a section.""" for section_prefix in self.section_prefixes: real_section = section_prefix + section has = super().has_section(real_section) if has: return real_section return None def has_option(self, section: str, option: str) -> bool: real_section = self.real_section(section) if real_section is not None: return super().has_option(real_section, option) return False def has_section(self, section: str) -> bool: return bool(self.real_section(section)) def options(self, section: str) -> list[str]: real_section = self.real_section(section) if real_section is not None: return super().options(real_section) raise ConfigError(f"No section: {section!r}") def get_section(self, section: str) -> TConfigSectionOut: """Get the contents of a section, as a dictionary.""" d: dict[str, TConfigValueOut] = {} for opt in self.options(section): d[opt] = self.get(section, opt) return d def get(self, section: str, option: str, *args: Any, **kwargs: Any) -> str: # type: ignore """Get a value, replacing environment variables also. The arguments are the same as `ConfigParser.get`, but in the found value, ``$WORD`` or ``${WORD}`` are replaced by the value of the environment variable ``WORD``. Returns the finished value. """ for section_prefix in self.section_prefixes: real_section = section_prefix + section if super().has_option(real_section, option): break else: raise ConfigError(f"No option {option!r} in section: {section!r}") v: str = super().get(real_section, option, *args, **kwargs) v = substitute_variables(v, os.environ) return v def getlist(self, section: str, option: str) -> list[str]: """Read a list of strings. The value of `section` and `option` is treated as a comma- and newline- separated list of strings. Each value is stripped of white space. Returns the list of strings. """ value_list = self.get(section, option) values = [] for value_line in value_list.split("\n"): for value in value_line.split(","): value = value.strip() if value: values.append(value) return values def getregexlist(self, section: str, option: str) -> list[str]: """Read a list of full-line regexes. The value of `section` and `option` is treated as a newline-separated list of regexes. Each value is stripped of white space. Returns the list of strings. """ line_list = self.get(section, option) value_list = [] for value in line_list.splitlines(): value = value.strip() try: re.compile(value) except re.error as e: raise ConfigError( f"Invalid [{section}].{option} value {value!r}: {e}", ) from e if value: value_list.append(value) return value_list TConfigParser = Union[HandyConfigParser, TomlConfigParser] # The default line exclusion regexes. DEFAULT_EXCLUDE = [ r"#\s*(pragma|PRAGMA)[:\s]?\s*(no|NO)\s*(cover|COVER)", ] # The default partial branch regexes, to be modified by the user. DEFAULT_PARTIAL = [ r"#\s*(pragma|PRAGMA)[:\s]?\s*(no|NO)\s*(branch|BRANCH)", ] # The default partial branch regexes, based on Python semantics. # These are any Python branching constructs that can't actually execute all # their branches. DEFAULT_PARTIAL_ALWAYS = [ "while (True|1|False|0):", "if (True|1|False|0):", ] class CoverageConfig(TConfigurable, TPluginConfig): """Coverage.py configuration. The attributes of this class are the various settings that control the operation of coverage.py. """ # pylint: disable=too-many-instance-attributes def __init__(self) -> None: """Initialize the configuration attributes to their defaults.""" # Metadata about the config. # We tried to read these config files. self.attempted_config_files: list[str] = [] # We did read these config files, but maybe didn't find any content for us. self.config_files_read: list[str] = [] # The file that gave us our configuration. self.config_file: str | None = None self._config_contents: bytes | None = None # Defaults for [run] and [report] self._include = None self._omit = None # Defaults for [run] self.branch = False self.command_line: str | None = None self.concurrency: list[str] = [] self.context: str | None = None self.cover_pylib = False self.data_file = ".coverage" self.debug: list[str] = [] self.debug_file: str | None = None self.disable_warnings: list[str] = [] self.dynamic_context: str | None = None self.parallel = False self.plugins: list[str] = [] self.relative_files = False self.run_include: list[str] = [] self.run_omit: list[str] = [] self.sigterm = False self.source: list[str] | None = None self.source_pkgs: list[str] = [] self.timid = False self._crash: str | None = None # Defaults for [report] self.exclude_list = DEFAULT_EXCLUDE[:] self.exclude_also: list[str] = [] self.fail_under = 0.0 self.format: str | None = None self.ignore_errors = False self.include_namespace_packages = False self.report_include: list[str] | None = None self.report_omit: list[str] | None = None self.partial_always_list = DEFAULT_PARTIAL_ALWAYS[:] self.partial_list = DEFAULT_PARTIAL[:] self.precision = 0 self.report_contexts: list[str] | None = None self.show_missing = False self.skip_covered = False self.skip_empty = False self.sort: str | None = None # Defaults for [html] self.extra_css: str | None = None self.html_dir = "htmlcov" self.html_skip_covered: bool | None = None self.html_skip_empty: bool | None = None self.html_title = "Coverage report" self.show_contexts = False # Defaults for [xml] self.xml_output = "coverage.xml" self.xml_package_depth = 99 # Defaults for [json] self.json_output = "coverage.json" self.json_pretty_print = False self.json_show_contexts = False # Defaults for [lcov] self.lcov_output = "coverage.lcov" # Defaults for [paths] self.paths: dict[str, list[str]] = {} # Options for plugins self.plugin_options: dict[str, TConfigSectionOut] = {} MUST_BE_LIST = { "debug", "concurrency", "plugins", "report_omit", "report_include", "run_omit", "run_include", } def from_args(self, **kwargs: TConfigValueIn) -> None: """Read config values from `kwargs`.""" for k, v in kwargs.items(): if v is not None: if k in self.MUST_BE_LIST and isinstance(v, str): v = [v] setattr(self, k, v) def from_file(self, filename: str, warn: Callable[[str], None], our_file: bool) -> bool: """Read configuration from a .rc file. `filename` is a file name to read. `our_file` is True if this config file is specifically for coverage, False if we are examining another config file (tox.ini, setup.cfg) for possible settings. Returns True or False, whether the file could be read, and it had some coverage.py settings in it. """ _, ext = os.path.splitext(filename) cp: TConfigParser if ext == ".toml": cp = TomlConfigParser(our_file) else: cp = HandyConfigParser(our_file) self.attempted_config_files.append(filename) try: files_read = cp.read(filename) except (configparser.Error, TomlDecodeError) as err: raise ConfigError(f"Couldn't read config file {filename}: {err}") from err if not files_read: return False self.config_files_read.extend(map(os.path.abspath, files_read)) any_set = False try: for option_spec in self.CONFIG_FILE_OPTIONS: was_set = self._set_attr_from_config_option(cp, *option_spec) if was_set: any_set = True except ValueError as err: raise ConfigError(f"Couldn't read config file {filename}: {err}") from err # Check that there are no unrecognized options. all_options = collections.defaultdict(set) for option_spec in self.CONFIG_FILE_OPTIONS: section, option = option_spec[1].split(":") all_options[section].add(option) for section, options in all_options.items(): real_section = cp.real_section(section) if real_section: for unknown in set(cp.options(section)) - options: warn( "Unrecognized option '[{}] {}=' in config file {}".format( real_section, unknown, filename, ), ) # [paths] is special if cp.has_section("paths"): for option in cp.options("paths"): self.paths[option] = cp.getlist("paths", option) any_set = True # plugins can have options for plugin in self.plugins: if cp.has_section(plugin): self.plugin_options[plugin] = cp.get_section(plugin) any_set = True # Was this file used as a config file? If it's specifically our file, # then it was used. If we're piggybacking on someone else's file, # then it was only used if we found some settings in it. if our_file: used = True else: used = any_set if used: self.config_file = os.path.abspath(filename) with open(filename, "rb") as f: self._config_contents = f.read() return used def copy(self) -> CoverageConfig: """Return a copy of the configuration.""" return copy.deepcopy(self) CONCURRENCY_CHOICES = {"thread", "gevent", "greenlet", "eventlet", "multiprocessing"} CONFIG_FILE_OPTIONS = [ # These are *args for _set_attr_from_config_option: # (attr, where, type_="") # # attr is the attribute to set on the CoverageConfig object. # where is the section:name to read from the configuration file. # type_ is the optional type to apply, by using .getTYPE to read the # configuration value from the file. # [run] ("branch", "run:branch", "boolean"), ("command_line", "run:command_line"), ("concurrency", "run:concurrency", "list"), ("context", "run:context"), ("cover_pylib", "run:cover_pylib", "boolean"), ("data_file", "run:data_file"), ("debug", "run:debug", "list"), ("debug_file", "run:debug_file"), ("disable_warnings", "run:disable_warnings", "list"), ("dynamic_context", "run:dynamic_context"), ("parallel", "run:parallel", "boolean"), ("plugins", "run:plugins", "list"), ("relative_files", "run:relative_files", "boolean"), ("run_include", "run:include", "list"), ("run_omit", "run:omit", "list"), ("sigterm", "run:sigterm", "boolean"), ("source", "run:source", "list"), ("source_pkgs", "run:source_pkgs", "list"), ("timid", "run:timid", "boolean"), ("_crash", "run:_crash"), # [report] ("exclude_list", "report:exclude_lines", "regexlist"), ("exclude_also", "report:exclude_also", "regexlist"), ("fail_under", "report:fail_under", "float"), ("format", "report:format"), ("ignore_errors", "report:ignore_errors", "boolean"), ("include_namespace_packages", "report:include_namespace_packages", "boolean"), ("partial_always_list", "report:partial_branches_always", "regexlist"), ("partial_list", "report:partial_branches", "regexlist"), ("precision", "report:precision", "int"), ("report_contexts", "report:contexts", "list"), ("report_include", "report:include", "list"), ("report_omit", "report:omit", "list"), ("show_missing", "report:show_missing", "boolean"), ("skip_covered", "report:skip_covered", "boolean"), ("skip_empty", "report:skip_empty", "boolean"), ("sort", "report:sort"), # [html] ("extra_css", "html:extra_css"), ("html_dir", "html:directory"), ("html_skip_covered", "html:skip_covered", "boolean"), ("html_skip_empty", "html:skip_empty", "boolean"), ("html_title", "html:title"), ("show_contexts", "html:show_contexts", "boolean"), # [xml] ("xml_output", "xml:output"), ("xml_package_depth", "xml:package_depth", "int"), # [json] ("json_output", "json:output"), ("json_pretty_print", "json:pretty_print", "boolean"), ("json_show_contexts", "json:show_contexts", "boolean"), # [lcov] ("lcov_output", "lcov:output"), ] def _set_attr_from_config_option( self, cp: TConfigParser, attr: str, where: str, type_: str = "", ) -> bool: """Set an attribute on self if it exists in the ConfigParser. Returns True if the attribute was set. """ section, option = where.split(":") if cp.has_option(section, option): method = getattr(cp, "get" + type_) setattr(self, attr, method(section, option)) return True return False def get_plugin_options(self, plugin: str) -> TConfigSectionOut: """Get a dictionary of options for the plugin named `plugin`.""" return self.plugin_options.get(plugin, {}) def set_option(self, option_name: str, value: TConfigValueIn | TConfigSectionIn) -> None: """Set an option in the configuration. `option_name` is a colon-separated string indicating the section and option name. For example, the ``branch`` option in the ``[run]`` section of the config file would be indicated with `"run:branch"`. `value` is the new value for the option. """ # Special-cased options. if option_name == "paths": self.paths = value # type: ignore[assignment] return # Check all the hard-coded options. for option_spec in self.CONFIG_FILE_OPTIONS: attr, where = option_spec[:2] if where == option_name: setattr(self, attr, value) return # See if it's a plugin option. plugin_name, _, key = option_name.partition(":") if key and plugin_name in self.plugins: self.plugin_options.setdefault(plugin_name, {})[key] = value # type: ignore[index] return # If we get here, we didn't find the option. raise ConfigError(f"No such option: {option_name!r}") def get_option(self, option_name: str) -> TConfigValueOut | None: """Get an option from the configuration. `option_name` is a colon-separated string indicating the section and option name. For example, the ``branch`` option in the ``[run]`` section of the config file would be indicated with `"run:branch"`. Returns the value of the option. """ # Special-cased options. if option_name == "paths": return self.paths # type: ignore[return-value] # Check all the hard-coded options. for option_spec in self.CONFIG_FILE_OPTIONS: attr, where = option_spec[:2] if where == option_name: return getattr(self, attr) # type: ignore[no-any-return] # See if it's a plugin option. plugin_name, _, key = option_name.partition(":") if key and plugin_name in self.plugins: return self.plugin_options.get(plugin_name, {}).get(key) # If we get here, we didn't find the option. raise ConfigError(f"No such option: {option_name!r}") def post_process_file(self, path: str) -> str: """Make final adjustments to a file path to make it usable.""" return os.path.expanduser(path) def post_process(self) -> None: """Make final adjustments to settings to make them usable.""" self.data_file = self.post_process_file(self.data_file) self.html_dir = self.post_process_file(self.html_dir) self.xml_output = self.post_process_file(self.xml_output) self.paths = { k: [self.post_process_file(f) for f in v] for k, v in self.paths.items() } self.exclude_list += self.exclude_also def debug_info(self) -> list[tuple[str, Any]]: """Make a list of (name, value) pairs for writing debug info.""" return human_sorted_items( (k, v) for k, v in self.__dict__.items() if not k.startswith("_") ) def config_files_to_try(config_file: bool | str) -> list[tuple[str, bool, bool]]: """What config files should we try to read? Returns a list of tuples: (filename, is_our_file, was_file_specified) """ # Some API users were specifying ".coveragerc" to mean the same as # True, so make it so. if config_file == ".coveragerc": config_file = True specified_file = (config_file is not True) if not specified_file: # No file was specified. Check COVERAGE_RCFILE. rcfile = os.getenv("COVERAGE_RCFILE") if rcfile: config_file = rcfile specified_file = True if not specified_file: # Still no file specified. Default to .coveragerc config_file = ".coveragerc" assert isinstance(config_file, str) files_to_try = [ (config_file, True, specified_file), ("setup.cfg", False, False), ("tox.ini", False, False), ("pyproject.toml", False, False), ] return files_to_try def read_coverage_config( config_file: bool | str, warn: Callable[[str], None], **kwargs: TConfigValueIn, ) -> CoverageConfig: """Read the coverage.py configuration. Arguments: config_file: a boolean or string, see the `Coverage` class for the tricky details. warn: a function to issue warnings. all others: keyword arguments from the `Coverage` class, used for setting values in the configuration. Returns: config: config is a CoverageConfig object read from the appropriate configuration file. """ # Build the configuration from a number of sources: # 1) defaults: config = CoverageConfig() # 2) from a file: if config_file: files_to_try = config_files_to_try(config_file) for fname, our_file, specified_file in files_to_try: config_read = config.from_file(fname, warn, our_file=our_file) if config_read: break if specified_file: raise ConfigError(f"Couldn't read {fname!r} as a config file") # 3) from environment variables: env_data_file = os.getenv("COVERAGE_FILE") if env_data_file: config.data_file = env_data_file # $set_env.py: COVERAGE_DEBUG - Debug options: https://coverage.rtfd.io/cmd.html#debug debugs = os.getenv("COVERAGE_DEBUG") if debugs: config.debug.extend(d.strip() for d in debugs.split(",")) # 4) from constructor arguments: config.from_args(**kwargs) # Once all the config has been collected, there's a little post-processing # to do. config.post_process() return config ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/coverage/context.py0000644000175100001770000000462700000000000017771 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt """Determine contexts for coverage.py""" from __future__ import annotations from types import FrameType from typing import cast, Callable, Sequence def combine_context_switchers( context_switchers: Sequence[Callable[[FrameType], str | None]], ) -> Callable[[FrameType], str | None] | None: """Create a single context switcher from multiple switchers. `context_switchers` is a list of functions that take a frame as an argument and return a string to use as the new context label. Returns a function that composites `context_switchers` functions, or None if `context_switchers` is an empty list. When invoked, the combined switcher calls `context_switchers` one-by-one until a string is returned. The combined switcher returns None if all `context_switchers` return None. """ if not context_switchers: return None if len(context_switchers) == 1: return context_switchers[0] def should_start_context(frame: FrameType) -> str | None: """The combiner for multiple context switchers.""" for switcher in context_switchers: new_context = switcher(frame) if new_context is not None: return new_context return None return should_start_context def should_start_context_test_function(frame: FrameType) -> str | None: """Is this frame calling a test_* function?""" co_name = frame.f_code.co_name if co_name.startswith("test") or co_name == "runTest": return qualname_from_frame(frame) return None def qualname_from_frame(frame: FrameType) -> str | None: """Get a qualified name for the code running in `frame`.""" co = frame.f_code fname = co.co_name method = None if co.co_argcount and co.co_varnames[0] == "self": self = frame.f_locals.get("self", None) method = getattr(self, fname, None) if method is None: func = frame.f_globals.get(fname) if func is None: return None return cast(str, func.__module__ + "." + fname) func = getattr(method, "__func__", None) if func is None: cls = self.__class__ return cast(str, cls.__module__ + "." + cls.__name__ + "." + fname) return cast(str, func.__module__ + "." + func.__qualname__) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/coverage/control.py0000644000175100001770000014477200000000000017773 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt """Central control stuff for coverage.py.""" from __future__ import annotations import atexit import collections import contextlib import os import os.path import platform import signal import sys import threading import time import warnings from types import FrameType from typing import ( cast, Any, Callable, IO, Iterable, Iterator, List, ) from coverage import env from coverage.annotate import AnnotateReporter from coverage.collector import Collector, HAS_CTRACER from coverage.config import CoverageConfig, read_coverage_config from coverage.context import should_start_context_test_function, combine_context_switchers from coverage.data import CoverageData, combine_parallel_data from coverage.debug import ( DebugControl, NoDebugging, short_stack, write_formatted_info, relevant_environment_display, ) from coverage.disposition import disposition_debug_msg from coverage.exceptions import ConfigError, CoverageException, CoverageWarning, PluginError from coverage.files import PathAliases, abs_file, relative_filename, set_relative_directory from coverage.html import HtmlReporter from coverage.inorout import InOrOut from coverage.jsonreport import JsonReporter from coverage.lcovreport import LcovReporter from coverage.misc import bool_or_none, join_regex from coverage.misc import DefaultValue, ensure_dir_for_file, isolate_module from coverage.multiproc import patch_multiprocessing from coverage.plugin import FileReporter from coverage.plugin_support import Plugins from coverage.python import PythonFileReporter from coverage.report import SummaryReporter from coverage.report_core import render_report from coverage.results import Analysis from coverage.types import ( FilePath, TConfigurable, TConfigSectionIn, TConfigValueIn, TConfigValueOut, TFileDisposition, TLineNo, TMorf, ) from coverage.xmlreport import XmlReporter os = isolate_module(os) @contextlib.contextmanager def override_config(cov: Coverage, **kwargs: TConfigValueIn) -> Iterator[None]: """Temporarily tweak the configuration of `cov`. The arguments are applied to `cov.config` with the `from_args` method. At the end of the with-statement, the old configuration is restored. """ original_config = cov.config cov.config = cov.config.copy() try: cov.config.from_args(**kwargs) yield finally: cov.config = original_config DEFAULT_DATAFILE = DefaultValue("MISSING") _DEFAULT_DATAFILE = DEFAULT_DATAFILE # Just in case, for backwards compatibility class Coverage(TConfigurable): """Programmatic access to coverage.py. To use:: from coverage import Coverage cov = Coverage() cov.start() #.. call your code .. cov.stop() cov.html_report(directory="covhtml") A context manager is available to do the same thing:: cov = Coverage() with cov.collect(): #.. call your code .. cov.html_report(directory="covhtml") Note: in keeping with Python custom, names starting with underscore are not part of the public API. They might stop working at any point. Please limit yourself to documented methods to avoid problems. Methods can raise any of the exceptions described in :ref:`api_exceptions`. """ # The stack of started Coverage instances. _instances: list[Coverage] = [] @classmethod def current(cls) -> Coverage | None: """Get the latest started `Coverage` instance, if any. Returns: a `Coverage` instance, or None. .. versionadded:: 5.0 """ if cls._instances: return cls._instances[-1] else: return None def __init__( # pylint: disable=too-many-arguments self, data_file: FilePath | DefaultValue | None = DEFAULT_DATAFILE, data_suffix: str | bool | None = None, cover_pylib: bool | None = None, auto_data: bool = False, timid: bool | None = None, branch: bool | None = None, config_file: FilePath | bool = True, source: Iterable[str] | None = None, source_pkgs: Iterable[str] | None = None, omit: str | Iterable[str] | None = None, include: str | Iterable[str] | None = None, debug: Iterable[str] | None = None, concurrency: str | Iterable[str] | None = None, check_preimported: bool = False, context: str | None = None, messages: bool = False, ) -> None: """ Many of these arguments duplicate and override values that can be provided in a configuration file. Parameters that are missing here will use values from the config file. `data_file` is the base name of the data file to use. The config value defaults to ".coverage". None can be provided to prevent writing a data file. `data_suffix` is appended (with a dot) to `data_file` to create the final file name. If `data_suffix` is simply True, then a suffix is created with the machine and process identity included. `cover_pylib` is a boolean determining whether Python code installed with the Python interpreter is measured. This includes the Python standard library and any packages installed with the interpreter. If `auto_data` is true, then any existing data file will be read when coverage measurement starts, and data will be saved automatically when measurement stops. If `timid` is true, then a slower and simpler trace function will be used. This is important for some environments where manipulation of tracing functions breaks the faster trace function. If `branch` is true, then branch coverage will be measured in addition to the usual statement coverage. `config_file` determines what configuration file to read: * If it is ".coveragerc", it is interpreted as if it were True, for backward compatibility. * If it is a string, it is the name of the file to read. If the file can't be read, it is an error. * If it is True, then a few standard files names are tried (".coveragerc", "setup.cfg", "tox.ini"). It is not an error for these files to not be found. * If it is False, then no configuration file is read. `source` is a list of file paths or package names. Only code located in the trees indicated by the file paths or package names will be measured. `source_pkgs` is a list of package names. It works the same as `source`, but can be used to name packages where the name can also be interpreted as a file path. `include` and `omit` are lists of file name patterns. Files that match `include` will be measured, files that match `omit` will not. Each will also accept a single string argument. `debug` is a list of strings indicating what debugging information is desired. `concurrency` is a string indicating the concurrency library being used in the measured code. Without this, coverage.py will get incorrect results if these libraries are in use. Valid strings are "greenlet", "eventlet", "gevent", "multiprocessing", or "thread" (the default). This can also be a list of these strings. If `check_preimported` is true, then when coverage is started, the already-imported files will be checked to see if they should be measured by coverage. Importing measured files before coverage is started can mean that code is missed. `context` is a string to use as the :ref:`static context ` label for collected data. If `messages` is true, some messages will be printed to stdout indicating what is happening. .. versionadded:: 4.0 The `concurrency` parameter. .. versionadded:: 4.2 The `concurrency` parameter can now be a list of strings. .. versionadded:: 5.0 The `check_preimported` and `context` parameters. .. versionadded:: 5.3 The `source_pkgs` parameter. .. versionadded:: 6.0 The `messages` parameter. """ # Start self.config as a usable default configuration. It will soon be # replaced with the real configuration. self.config = CoverageConfig() # data_file=None means no disk file at all. data_file missing means # use the value from the config file. self._no_disk = data_file is None if isinstance(data_file, DefaultValue): data_file = None if data_file is not None: data_file = os.fspath(data_file) # This is injectable by tests. self._debug_file: IO[str] | None = None self._auto_load = self._auto_save = auto_data self._data_suffix_specified = data_suffix # Is it ok for no data to be collected? self._warn_no_data = True self._warn_unimported_source = True self._warn_preimported_source = check_preimported self._no_warn_slugs: list[str] = [] self._messages = messages # A record of all the warnings that have been issued. self._warnings: list[str] = [] # Other instance attributes, set with placebos or placeholders. # More useful objects will be created later. self._debug: DebugControl = NoDebugging() self._inorout: InOrOut | None = None self._plugins: Plugins = Plugins() self._data: CoverageData | None = None self._collector: Collector | None = None self._metacov = False self._file_mapper: Callable[[str], str] = abs_file self._data_suffix = self._run_suffix = None self._exclude_re: dict[str, str] = {} self._old_sigterm: Callable[[int, FrameType | None], Any] | None = None # State machine variables: # Have we initialized everything? self._inited = False self._inited_for_start = False # Have we started collecting and not stopped it? self._started = False # Should we write the debug output? self._should_write_debug = True # Build our configuration from a number of sources. if not isinstance(config_file, bool): config_file = os.fspath(config_file) self.config = read_coverage_config( config_file=config_file, warn=self._warn, data_file=data_file, cover_pylib=cover_pylib, timid=timid, branch=branch, parallel=bool_or_none(data_suffix), source=source, source_pkgs=source_pkgs, run_omit=omit, run_include=include, debug=debug, report_omit=omit, report_include=include, concurrency=concurrency, context=context, ) # If we have sub-process measurement happening automatically, then we # want any explicit creation of a Coverage object to mean, this process # is already coverage-aware, so don't auto-measure it. By now, the # auto-creation of a Coverage object has already happened. But we can # find it and tell it not to save its data. if not env.METACOV: _prevent_sub_process_measurement() def _init(self) -> None: """Set all the initial state. This is called by the public methods to initialize state. This lets us construct a :class:`Coverage` object, then tweak its state before this function is called. """ if self._inited: return self._inited = True # Create and configure the debugging controller. self._debug = DebugControl(self.config.debug, self._debug_file, self.config.debug_file) if self._debug.should("process"): self._debug.write("Coverage._init") if "multiprocessing" in (self.config.concurrency or ()): # Multi-processing uses parallel for the subprocesses, so also use # it for the main process. self.config.parallel = True # _exclude_re is a dict that maps exclusion list names to compiled regexes. self._exclude_re = {} set_relative_directory() if self.config.relative_files: self._file_mapper = relative_filename # Load plugins self._plugins = Plugins.load_plugins(self.config.plugins, self.config, self._debug) # Run configuring plugins. for plugin in self._plugins.configurers: # We need an object with set_option and get_option. Either self or # self.config will do. Choosing randomly stops people from doing # other things with those objects, against the public API. Yes, # this is a bit childish. :) plugin.configure([self, self.config][int(time.time()) % 2]) def _post_init(self) -> None: """Stuff to do after everything is initialized.""" if self._should_write_debug: self._should_write_debug = False self._write_startup_debug() # "[run] _crash" will raise an exception if the value is close by in # the call stack, for testing error handling. if self.config._crash and self.config._crash in short_stack(): raise RuntimeError(f"Crashing because called by {self.config._crash}") def _write_startup_debug(self) -> None: """Write out debug info at startup if needed.""" wrote_any = False with self._debug.without_callers(): if self._debug.should("config"): config_info = self.config.debug_info() write_formatted_info(self._debug.write, "config", config_info) wrote_any = True if self._debug.should("sys"): write_formatted_info(self._debug.write, "sys", self.sys_info()) for plugin in self._plugins: header = "sys: " + plugin._coverage_plugin_name info = plugin.sys_info() write_formatted_info(self._debug.write, header, info) wrote_any = True if self._debug.should("pybehave"): write_formatted_info(self._debug.write, "pybehave", env.debug_info()) wrote_any = True if wrote_any: write_formatted_info(self._debug.write, "end", ()) def _should_trace(self, filename: str, frame: FrameType) -> TFileDisposition: """Decide whether to trace execution in `filename`. Calls `_should_trace_internal`, and returns the FileDisposition. """ assert self._inorout is not None disp = self._inorout.should_trace(filename, frame) if self._debug.should("trace"): self._debug.write(disposition_debug_msg(disp)) return disp def _check_include_omit_etc(self, filename: str, frame: FrameType) -> bool: """Check a file name against the include/omit/etc, rules, verbosely. Returns a boolean: True if the file should be traced, False if not. """ assert self._inorout is not None reason = self._inorout.check_include_omit_etc(filename, frame) if self._debug.should("trace"): if not reason: msg = f"Including {filename!r}" else: msg = f"Not including {filename!r}: {reason}" self._debug.write(msg) return not reason def _warn(self, msg: str, slug: str | None = None, once: bool = False) -> None: """Use `msg` as a warning. For warning suppression, use `slug` as the shorthand. If `once` is true, only show this warning once (determined by the slug.) """ if not self._no_warn_slugs: self._no_warn_slugs = list(self.config.disable_warnings) if slug in self._no_warn_slugs: # Don't issue the warning return self._warnings.append(msg) if slug: msg = f"{msg} ({slug})" if self._debug.should("pid"): msg = f"[{os.getpid()}] {msg}" warnings.warn(msg, category=CoverageWarning, stacklevel=2) if once: assert slug is not None self._no_warn_slugs.append(slug) def _message(self, msg: str) -> None: """Write a message to the user, if configured to do so.""" if self._messages: print(msg) def get_option(self, option_name: str) -> TConfigValueOut | None: """Get an option from the configuration. `option_name` is a colon-separated string indicating the section and option name. For example, the ``branch`` option in the ``[run]`` section of the config file would be indicated with `"run:branch"`. Returns the value of the option. The type depends on the option selected. As a special case, an `option_name` of ``"paths"`` will return an dictionary with the entire ``[paths]`` section value. .. versionadded:: 4.0 """ return self.config.get_option(option_name) def set_option(self, option_name: str, value: TConfigValueIn | TConfigSectionIn) -> None: """Set an option in the configuration. `option_name` is a colon-separated string indicating the section and option name. For example, the ``branch`` option in the ``[run]`` section of the config file would be indicated with ``"run:branch"``. `value` is the new value for the option. This should be an appropriate Python value. For example, use True for booleans, not the string ``"True"``. As an example, calling: .. code-block:: python cov.set_option("run:branch", True) has the same effect as this configuration file: .. code-block:: ini [run] branch = True As a special case, an `option_name` of ``"paths"`` will replace the entire ``[paths]`` section. The value should be a dictionary. .. versionadded:: 4.0 """ self.config.set_option(option_name, value) def load(self) -> None: """Load previously-collected coverage data from the data file.""" self._init() if self._collector is not None: self._collector.reset() should_skip = self.config.parallel and not os.path.exists(self.config.data_file) if not should_skip: self._init_data(suffix=None) self._post_init() if not should_skip: assert self._data is not None self._data.read() def _init_for_start(self) -> None: """Initialization for start()""" # Construct the collector. concurrency: list[str] = self.config.concurrency or [] if "multiprocessing" in concurrency: if self.config.config_file is None: raise ConfigError("multiprocessing requires a configuration file") patch_multiprocessing(rcfile=self.config.config_file) dycon = self.config.dynamic_context if not dycon or dycon == "none": context_switchers = [] elif dycon == "test_function": context_switchers = [should_start_context_test_function] else: raise ConfigError(f"Don't understand dynamic_context setting: {dycon!r}") context_switchers.extend( plugin.dynamic_context for plugin in self._plugins.context_switchers ) should_start_context = combine_context_switchers(context_switchers) self._collector = Collector( should_trace=self._should_trace, check_include=self._check_include_omit_etc, should_start_context=should_start_context, file_mapper=self._file_mapper, timid=self.config.timid, branch=self.config.branch, warn=self._warn, concurrency=concurrency, metacov=self._metacov, ) suffix = self._data_suffix_specified if suffix: if not isinstance(suffix, str): # if data_suffix=True, use .machinename.pid.random suffix = True elif self.config.parallel: if suffix is None: suffix = True elif not isinstance(suffix, str): suffix = bool(suffix) else: suffix = None self._init_data(suffix) assert self._data is not None self._collector.use_data(self._data, self.config.context) # Early warning if we aren't going to be able to support plugins. if self._plugins.file_tracers and not self._collector.supports_plugins: self._warn( "Plugin file tracers ({}) aren't supported with {}".format( ", ".join( plugin._coverage_plugin_name for plugin in self._plugins.file_tracers ), self._collector.tracer_name(), ), ) for plugin in self._plugins.file_tracers: plugin._coverage_enabled = False # Create the file classifying substructure. self._inorout = InOrOut( config=self.config, warn=self._warn, debug=(self._debug if self._debug.should("trace") else None), include_namespace_packages=self.config.include_namespace_packages, ) self._inorout.plugins = self._plugins self._inorout.disp_class = self._collector.file_disposition_class # It's useful to write debug info after initing for start. self._should_write_debug = True # Register our clean-up handlers. atexit.register(self._atexit) if self.config.sigterm: is_main = (threading.current_thread() == threading.main_thread()) if is_main and not env.WINDOWS: # The Python docs seem to imply that SIGTERM works uniformly even # on Windows, but that's not my experience, and this agrees: # https://stackoverflow.com/questions/35772001/x/35792192#35792192 self._old_sigterm = signal.signal( # type: ignore[assignment] signal.SIGTERM, self._on_sigterm, ) def _init_data(self, suffix: str | bool | None) -> None: """Create a data file if we don't have one yet.""" if self._data is None: # Create the data file. We do this at construction time so that the # data file will be written into the directory where the process # started rather than wherever the process eventually chdir'd to. ensure_dir_for_file(self.config.data_file) self._data = CoverageData( basename=self.config.data_file, suffix=suffix, warn=self._warn, debug=self._debug, no_disk=self._no_disk, ) def start(self) -> None: """Start measuring code coverage. Coverage measurement is only collected in functions called after :meth:`start` is invoked. Statements in the same scope as :meth:`start` won't be measured. Once you invoke :meth:`start`, you must also call :meth:`stop` eventually, or your process might not shut down cleanly. The :meth:`collect` method is a context manager to handle both starting and stopping collection. """ self._init() if not self._inited_for_start: self._inited_for_start = True self._init_for_start() self._post_init() assert self._collector is not None assert self._inorout is not None # Issue warnings for possible problems. self._inorout.warn_conflicting_settings() # See if we think some code that would eventually be measured has # already been imported. if self._warn_preimported_source: self._inorout.warn_already_imported_files() if self._auto_load: self.load() self._collector.start() self._started = True self._instances.append(self) def stop(self) -> None: """Stop measuring code coverage.""" if self._instances: if self._instances[-1] is self: self._instances.pop() if self._started: assert self._collector is not None self._collector.stop() self._started = False @contextlib.contextmanager def collect(self) -> Iterator[None]: """A context manager to start/stop coverage measurement collection. .. versionadded:: 7.3 """ self.start() try: yield finally: self.stop() def _atexit(self, event: str = "atexit") -> None: """Clean up on process shutdown.""" if self._debug.should("process"): self._debug.write(f"{event}: pid: {os.getpid()}, instance: {self!r}") if self._started: self.stop() if self._auto_save or event == "sigterm": self.save() def _on_sigterm(self, signum_unused: int, frame_unused: FrameType | None) -> None: """A handler for signal.SIGTERM.""" self._atexit("sigterm") # Statements after here won't be seen by metacov because we just wrote # the data, and are about to kill the process. signal.signal(signal.SIGTERM, self._old_sigterm) # pragma: not covered os.kill(os.getpid(), signal.SIGTERM) # pragma: not covered def erase(self) -> None: """Erase previously collected coverage data. This removes the in-memory data collected in this session as well as discarding the data file. """ self._init() self._post_init() if self._collector is not None: self._collector.reset() self._init_data(suffix=None) assert self._data is not None self._data.erase(parallel=self.config.parallel) self._data = None self._inited_for_start = False def switch_context(self, new_context: str) -> None: """Switch to a new dynamic context. `new_context` is a string to use as the :ref:`dynamic context ` label for collected data. If a :ref:`static context ` is in use, the static and dynamic context labels will be joined together with a pipe character. Coverage collection must be started already. .. versionadded:: 5.0 """ if not self._started: # pragma: part started raise CoverageException("Cannot switch context, coverage is not started") assert self._collector is not None if self._collector.should_start_context: self._warn("Conflicting dynamic contexts", slug="dynamic-conflict", once=True) self._collector.switch_context(new_context) def clear_exclude(self, which: str = "exclude") -> None: """Clear the exclude list.""" self._init() setattr(self.config, which + "_list", []) self._exclude_regex_stale() def exclude(self, regex: str, which: str = "exclude") -> None: """Exclude source lines from execution consideration. A number of lists of regular expressions are maintained. Each list selects lines that are treated differently during reporting. `which` determines which list is modified. The "exclude" list selects lines that are not considered executable at all. The "partial" list indicates lines with branches that are not taken. `regex` is a regular expression. The regex is added to the specified list. If any of the regexes in the list is found in a line, the line is marked for special treatment during reporting. """ self._init() excl_list = getattr(self.config, which + "_list") excl_list.append(regex) self._exclude_regex_stale() def _exclude_regex_stale(self) -> None: """Drop all the compiled exclusion regexes, a list was modified.""" self._exclude_re.clear() def _exclude_regex(self, which: str) -> str: """Return a regex string for the given exclusion list.""" if which not in self._exclude_re: excl_list = getattr(self.config, which + "_list") self._exclude_re[which] = join_regex(excl_list) return self._exclude_re[which] def get_exclude_list(self, which: str = "exclude") -> list[str]: """Return a list of excluded regex strings. `which` indicates which list is desired. See :meth:`exclude` for the lists that are available, and their meaning. """ self._init() return cast(List[str], getattr(self.config, which + "_list")) def save(self) -> None: """Save the collected coverage data to the data file.""" data = self.get_data() data.write() def _make_aliases(self) -> PathAliases: """Create a PathAliases from our configuration.""" aliases = PathAliases( debugfn=(self._debug.write if self._debug.should("pathmap") else None), relative=self.config.relative_files, ) for paths in self.config.paths.values(): result = paths[0] for pattern in paths[1:]: aliases.add(pattern, result) return aliases def combine( self, data_paths: Iterable[str] | None = None, strict: bool = False, keep: bool = False, ) -> None: """Combine together a number of similarly-named coverage data files. All coverage data files whose name starts with `data_file` (from the coverage() constructor) will be read, and combined together into the current measurements. `data_paths` is a list of files or directories from which data should be combined. If no list is passed, then the data files from the directory indicated by the current data file (probably the current directory) will be combined. If `strict` is true, then it is an error to attempt to combine when there are no data files to combine. If `keep` is true, then original input data files won't be deleted. .. versionadded:: 4.0 The `data_paths` parameter. .. versionadded:: 4.3 The `strict` parameter. .. versionadded: 5.5 The `keep` parameter. """ self._init() self._init_data(suffix=None) self._post_init() self.get_data() assert self._data is not None combine_parallel_data( self._data, aliases=self._make_aliases(), data_paths=data_paths, strict=strict, keep=keep, message=self._message, ) def get_data(self) -> CoverageData: """Get the collected data. Also warn about various problems collecting data. Returns a :class:`coverage.CoverageData`, the collected coverage data. .. versionadded:: 4.0 """ self._init() self._init_data(suffix=None) self._post_init() if self._collector is not None: for plugin in self._plugins: if not plugin._coverage_enabled: self._collector.plugin_was_disabled(plugin) if self._collector.flush_data(): self._post_save_work() assert self._data is not None return self._data def _post_save_work(self) -> None: """After saving data, look for warnings, post-work, etc. Warn about things that should have happened but didn't. Look for un-executed files. """ assert self._data is not None assert self._inorout is not None # If there are still entries in the source_pkgs_unmatched list, # then we never encountered those packages. if self._warn_unimported_source: self._inorout.warn_unimported_source() # Find out if we got any data. if not self._data and self._warn_no_data: self._warn("No data was collected.", slug="no-data-collected") # Touch all the files that could have executed, so that we can # mark completely un-executed files as 0% covered. file_paths = collections.defaultdict(list) for file_path, plugin_name in self._inorout.find_possibly_unexecuted_files(): file_path = self._file_mapper(file_path) file_paths[plugin_name].append(file_path) for plugin_name, paths in file_paths.items(): self._data.touch_files(paths, plugin_name) # Backward compatibility with version 1. def analysis(self, morf: TMorf) -> tuple[str, list[TLineNo], list[TLineNo], str]: """Like `analysis2` but doesn't return excluded line numbers.""" f, s, _, m, mf = self.analysis2(morf) return f, s, m, mf def analysis2( self, morf: TMorf, ) -> tuple[str, list[TLineNo], list[TLineNo], list[TLineNo], str]: """Analyze a module. `morf` is a module or a file name. It will be analyzed to determine its coverage statistics. The return value is a 5-tuple: * The file name for the module. * A list of line numbers of executable statements. * A list of line numbers of excluded statements. * A list of line numbers of statements not run (missing from execution). * A readable formatted string of the missing line numbers. The analysis uses the source file itself and the current measured coverage data. """ analysis = self._analyze(morf) return ( analysis.filename, sorted(analysis.statements), sorted(analysis.excluded), sorted(analysis.missing), analysis.missing_formatted(), ) def _analyze(self, it: FileReporter | TMorf) -> Analysis: """Analyze a single morf or code unit. Returns an `Analysis` object. """ # All reporting comes through here, so do reporting initialization. self._init() self._post_init() data = self.get_data() if isinstance(it, FileReporter): fr = it else: fr = self._get_file_reporter(it) return Analysis(data, self.config.precision, fr, self._file_mapper) def _get_file_reporter(self, morf: TMorf) -> FileReporter: """Get a FileReporter for a module or file name.""" assert self._data is not None plugin = None file_reporter: str | FileReporter = "python" if isinstance(morf, str): mapped_morf = self._file_mapper(morf) plugin_name = self._data.file_tracer(mapped_morf) if plugin_name: plugin = self._plugins.get(plugin_name) if plugin: file_reporter = plugin.file_reporter(mapped_morf) if file_reporter is None: raise PluginError( "Plugin {!r} did not provide a file reporter for {!r}.".format( plugin._coverage_plugin_name, morf, ), ) if file_reporter == "python": file_reporter = PythonFileReporter(morf, self) assert isinstance(file_reporter, FileReporter) return file_reporter def _get_file_reporters(self, morfs: Iterable[TMorf] | None = None) -> list[FileReporter]: """Get a list of FileReporters for a list of modules or file names. For each module or file name in `morfs`, find a FileReporter. Return the list of FileReporters. If `morfs` is a single module or file name, this returns a list of one FileReporter. If `morfs` is empty or None, then the list of all files measured is used to find the FileReporters. """ assert self._data is not None if not morfs: morfs = self._data.measured_files() # Be sure we have a collection. if not isinstance(morfs, (list, tuple, set)): morfs = [morfs] # type: ignore[list-item] file_reporters = [self._get_file_reporter(morf) for morf in morfs] return file_reporters def _prepare_data_for_reporting(self) -> None: """Re-map data before reporting, to get implicit "combine" behavior.""" if self.config.paths: mapped_data = CoverageData(warn=self._warn, debug=self._debug, no_disk=True) if self._data is not None: mapped_data.update(self._data, aliases=self._make_aliases()) self._data = mapped_data def report( self, morfs: Iterable[TMorf] | None = None, show_missing: bool | None = None, ignore_errors: bool | None = None, file: IO[str] | None = None, omit: str | list[str] | None = None, include: str | list[str] | None = None, skip_covered: bool | None = None, contexts: list[str] | None = None, skip_empty: bool | None = None, precision: int | None = None, sort: str | None = None, output_format: str | None = None, ) -> float: """Write a textual summary report to `file`. Each module in `morfs` is listed, with counts of statements, executed statements, missing statements, and a list of lines missed. If `show_missing` is true, then details of which lines or branches are missing will be included in the report. If `ignore_errors` is true, then a failure while reporting a single file will not stop the entire report. `file` is a file-like object, suitable for writing. `output_format` determines the format, either "text" (the default), "markdown", or "total". `include` is a list of file name patterns. Files that match will be included in the report. Files matching `omit` will not be included in the report. If `skip_covered` is true, don't report on files with 100% coverage. If `skip_empty` is true, don't report on empty files (those that have no statements). `contexts` is a list of regular expression strings. Only data from :ref:`dynamic contexts ` that match one of those expressions (using :func:`re.search `) will be included in the report. `precision` is the number of digits to display after the decimal point for percentages. All of the arguments default to the settings read from the :ref:`configuration file `. Returns a float, the total percentage covered. .. versionadded:: 4.0 The `skip_covered` parameter. .. versionadded:: 5.0 The `contexts` and `skip_empty` parameters. .. versionadded:: 5.2 The `precision` parameter. .. versionadded:: 7.0 The `format` parameter. """ self._prepare_data_for_reporting() with override_config( self, ignore_errors=ignore_errors, report_omit=omit, report_include=include, show_missing=show_missing, skip_covered=skip_covered, report_contexts=contexts, skip_empty=skip_empty, precision=precision, sort=sort, format=output_format, ): reporter = SummaryReporter(self) return reporter.report(morfs, outfile=file) def annotate( self, morfs: Iterable[TMorf] | None = None, directory: str | None = None, ignore_errors: bool | None = None, omit: str | list[str] | None = None, include: str | list[str] | None = None, contexts: list[str] | None = None, ) -> None: """Annotate a list of modules. Each module in `morfs` is annotated. The source is written to a new file, named with a ",cover" suffix, with each line prefixed with a marker to indicate the coverage of the line. Covered lines have ">", excluded lines have "-", and missing lines have "!". See :meth:`report` for other arguments. """ self._prepare_data_for_reporting() with override_config( self, ignore_errors=ignore_errors, report_omit=omit, report_include=include, report_contexts=contexts, ): reporter = AnnotateReporter(self) reporter.report(morfs, directory=directory) def html_report( self, morfs: Iterable[TMorf] | None = None, directory: str | None = None, ignore_errors: bool | None = None, omit: str | list[str] | None = None, include: str | list[str] | None = None, extra_css: str | None = None, title: str | None = None, skip_covered: bool | None = None, show_contexts: bool | None = None, contexts: list[str] | None = None, skip_empty: bool | None = None, precision: int | None = None, ) -> float: """Generate an HTML report. The HTML is written to `directory`. The file "index.html" is the overview starting point, with links to more detailed pages for individual modules. `extra_css` is a path to a file of other CSS to apply on the page. It will be copied into the HTML directory. `title` is a text string (not HTML) to use as the title of the HTML report. See :meth:`report` for other arguments. Returns a float, the total percentage covered. .. note:: The HTML report files are generated incrementally based on the source files and coverage results. If you modify the report files, the changes will not be considered. You should be careful about changing the files in the report folder. """ self._prepare_data_for_reporting() with override_config( self, ignore_errors=ignore_errors, report_omit=omit, report_include=include, html_dir=directory, extra_css=extra_css, html_title=title, html_skip_covered=skip_covered, show_contexts=show_contexts, report_contexts=contexts, html_skip_empty=skip_empty, precision=precision, ): reporter = HtmlReporter(self) ret = reporter.report(morfs) return ret def xml_report( self, morfs: Iterable[TMorf] | None = None, outfile: str | None = None, ignore_errors: bool | None = None, omit: str | list[str] | None = None, include: str | list[str] | None = None, contexts: list[str] | None = None, skip_empty: bool | None = None, ) -> float: """Generate an XML report of coverage results. The report is compatible with Cobertura reports. Each module in `morfs` is included in the report. `outfile` is the path to write the file to, "-" will write to stdout. See :meth:`report` for other arguments. Returns a float, the total percentage covered. """ self._prepare_data_for_reporting() with override_config( self, ignore_errors=ignore_errors, report_omit=omit, report_include=include, xml_output=outfile, report_contexts=contexts, skip_empty=skip_empty, ): return render_report(self.config.xml_output, XmlReporter(self), morfs, self._message) def json_report( self, morfs: Iterable[TMorf] | None = None, outfile: str | None = None, ignore_errors: bool | None = None, omit: str | list[str] | None = None, include: str | list[str] | None = None, contexts: list[str] | None = None, pretty_print: bool | None = None, show_contexts: bool | None = None, ) -> float: """Generate a JSON report of coverage results. Each module in `morfs` is included in the report. `outfile` is the path to write the file to, "-" will write to stdout. `pretty_print` is a boolean, whether to pretty-print the JSON output or not. See :meth:`report` for other arguments. Returns a float, the total percentage covered. .. versionadded:: 5.0 """ self._prepare_data_for_reporting() with override_config( self, ignore_errors=ignore_errors, report_omit=omit, report_include=include, json_output=outfile, report_contexts=contexts, json_pretty_print=pretty_print, json_show_contexts=show_contexts, ): return render_report(self.config.json_output, JsonReporter(self), morfs, self._message) def lcov_report( self, morfs: Iterable[TMorf] | None = None, outfile: str | None = None, ignore_errors: bool | None = None, omit: str | list[str] | None = None, include: str | list[str] | None = None, contexts: list[str] | None = None, ) -> float: """Generate an LCOV report of coverage results. Each module in `morfs` is included in the report. `outfile` is the path to write the file to, "-" will write to stdout. See :meth:`report` for other arguments. .. versionadded:: 6.3 """ self._prepare_data_for_reporting() with override_config( self, ignore_errors=ignore_errors, report_omit=omit, report_include=include, lcov_output=outfile, report_contexts=contexts, ): return render_report(self.config.lcov_output, LcovReporter(self), morfs, self._message) def sys_info(self) -> Iterable[tuple[str, Any]]: """Return a list of (key, value) pairs showing internal information.""" import coverage as covmod self._init() self._post_init() def plugin_info(plugins: list[Any]) -> list[str]: """Make an entry for the sys_info from a list of plug-ins.""" entries = [] for plugin in plugins: entry = plugin._coverage_plugin_name if not plugin._coverage_enabled: entry += " (disabled)" entries.append(entry) return entries info = [ ("coverage_version", covmod.__version__), ("coverage_module", covmod.__file__), ("core", self._collector.tracer_name() if self._collector is not None else "-none-"), ("CTracer", "available" if HAS_CTRACER else "unavailable"), ("plugins.file_tracers", plugin_info(self._plugins.file_tracers)), ("plugins.configurers", plugin_info(self._plugins.configurers)), ("plugins.context_switchers", plugin_info(self._plugins.context_switchers)), ("configs_attempted", self.config.attempted_config_files), ("configs_read", self.config.config_files_read), ("config_file", self.config.config_file), ("config_contents", repr(self.config._config_contents) if self.config._config_contents else "-none-", ), ("data_file", self._data.data_filename() if self._data is not None else "-none-"), ("python", sys.version.replace("\n", "")), ("platform", platform.platform()), ("implementation", platform.python_implementation()), ("executable", sys.executable), ("def_encoding", sys.getdefaultencoding()), ("fs_encoding", sys.getfilesystemencoding()), ("pid", os.getpid()), ("cwd", os.getcwd()), ("path", sys.path), ("environment", [f"{k} = {v}" for k, v in relevant_environment_display(os.environ)]), ("command_line", " ".join(getattr(sys, "argv", ["-none-"]))), ] if self._inorout is not None: info.extend(self._inorout.sys_info()) info.extend(CoverageData.sys_info()) return info # Mega debugging... # $set_env.py: COVERAGE_DEBUG_CALLS - Lots and lots of output about calls to Coverage. if int(os.getenv("COVERAGE_DEBUG_CALLS", 0)): # pragma: debugging from coverage.debug import decorate_methods, show_calls Coverage = decorate_methods( # type: ignore[misc] show_calls(show_args=True), butnot=["get_data"], )(Coverage) def process_startup() -> Coverage | None: """Call this at Python start-up to perhaps measure coverage. If the environment variable COVERAGE_PROCESS_START is defined, coverage measurement is started. The value of the variable is the config file to use. There are two ways to configure your Python installation to invoke this function when Python starts: #. Create or append to sitecustomize.py to add these lines:: import coverage coverage.process_startup() #. Create a .pth file in your Python installation containing:: import coverage; coverage.process_startup() Returns the :class:`Coverage` instance that was started, or None if it was not started by this call. """ cps = os.getenv("COVERAGE_PROCESS_START") if not cps: # No request for coverage, nothing to do. return None # This function can be called more than once in a process. This happens # because some virtualenv configurations make the same directory visible # twice in sys.path. This means that the .pth file will be found twice, # and executed twice, executing this function twice. We set a global # flag (an attribute on this function) to indicate that coverage.py has # already been started, so we can avoid doing it twice. # # https://github.com/nedbat/coveragepy/issues/340 has more details. if hasattr(process_startup, "coverage"): # We've annotated this function before, so we must have already # started coverage.py in this process. Nothing to do. return None cov = Coverage(config_file=cps) process_startup.coverage = cov # type: ignore[attr-defined] cov._warn_no_data = False cov._warn_unimported_source = False cov._warn_preimported_source = False cov._auto_save = True cov.start() return cov def _prevent_sub_process_measurement() -> None: """Stop any subprocess auto-measurement from writing data.""" auto_created_coverage = getattr(process_startup, "coverage", None) if auto_created_coverage is not None: auto_created_coverage._auto_save = False ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1710442639.1178148 coverage-7.4.4/coverage/ctracer/0000755000175100001770000000000000000000000017345 5ustar00runnerdocker00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/coverage/ctracer/datastack.c0000644000175100001770000000262700000000000021457 0ustar00runnerdocker00000000000000/* Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 */ /* For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt */ #include "util.h" #include "datastack.h" #define STACK_DELTA 20 int DataStack_init(Stats *pstats, DataStack *pdata_stack) { pdata_stack->depth = -1; pdata_stack->stack = NULL; pdata_stack->alloc = 0; return RET_OK; } void DataStack_dealloc(Stats *pstats, DataStack *pdata_stack) { int i; for (i = 0; i < pdata_stack->alloc; i++) { Py_XDECREF(pdata_stack->stack[i].file_data); } PyMem_Free(pdata_stack->stack); } int DataStack_grow(Stats *pstats, DataStack *pdata_stack) { pdata_stack->depth++; if (pdata_stack->depth >= pdata_stack->alloc) { /* We've outgrown our data_stack array: make it bigger. */ int bigger = pdata_stack->alloc + STACK_DELTA; DataStackEntry * bigger_data_stack = PyMem_Realloc(pdata_stack->stack, bigger * sizeof(DataStackEntry)); STATS( pstats->stack_reallocs++; ) if (bigger_data_stack == NULL) { PyErr_NoMemory(); pdata_stack->depth--; return RET_ERROR; } /* Zero the new entries. */ memset(bigger_data_stack + pdata_stack->alloc, 0, STACK_DELTA * sizeof(DataStackEntry)); pdata_stack->stack = bigger_data_stack; pdata_stack->alloc = bigger; } return RET_OK; } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/coverage/ctracer/datastack.h0000644000175100001770000000300500000000000021453 0ustar00runnerdocker00000000000000/* Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 */ /* For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt */ #ifndef _COVERAGE_DATASTACK_H #define _COVERAGE_DATASTACK_H #include "util.h" #include "stats.h" /* An entry on the data stack. For each call frame, we need to record all * the information needed for CTracer_handle_line to operate as quickly as * possible. */ typedef struct DataStackEntry { /* The current file_data set. Owned. */ PyObject * file_data; /* The disposition object for this frame. A borrowed instance of CFileDisposition. */ PyObject * disposition; /* The FileTracer handling this frame, or None if it's Python. Borrowed. */ PyObject * file_tracer; /* The line number of the last line recorded, for tracing arcs. -1 means there was no previous line, as when entering a code object. */ int last_line; BOOL started_context; } DataStackEntry; /* A data stack is a dynamically allocated vector of DataStackEntry's. */ typedef struct DataStack { int depth; /* The index of the last-used entry in stack. */ int alloc; /* number of entries allocated at stack. */ /* The file data at each level, or NULL if not recording. */ DataStackEntry * stack; } DataStack; int DataStack_init(Stats * pstats, DataStack *pdata_stack); void DataStack_dealloc(Stats * pstats, DataStack *pdata_stack); int DataStack_grow(Stats * pstats, DataStack *pdata_stack); #endif /* _COVERAGE_DATASTACK_H */ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/coverage/ctracer/filedisp.c0000644000175100001770000000630000000000000021307 0ustar00runnerdocker00000000000000/* Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 */ /* For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt */ #include "util.h" #include "filedisp.h" void CFileDisposition_dealloc(CFileDisposition *self) { Py_XDECREF(self->original_filename); Py_XDECREF(self->canonical_filename); Py_XDECREF(self->source_filename); Py_XDECREF(self->trace); Py_XDECREF(self->reason); Py_XDECREF(self->file_tracer); Py_XDECREF(self->has_dynamic_filename); } static PyMemberDef CFileDisposition_members[] = { { "original_filename", T_OBJECT, offsetof(CFileDisposition, original_filename), 0, PyDoc_STR("") }, { "canonical_filename", T_OBJECT, offsetof(CFileDisposition, canonical_filename), 0, PyDoc_STR("") }, { "source_filename", T_OBJECT, offsetof(CFileDisposition, source_filename), 0, PyDoc_STR("") }, { "trace", T_OBJECT, offsetof(CFileDisposition, trace), 0, PyDoc_STR("") }, { "reason", T_OBJECT, offsetof(CFileDisposition, reason), 0, PyDoc_STR("") }, { "file_tracer", T_OBJECT, offsetof(CFileDisposition, file_tracer), 0, PyDoc_STR("") }, { "has_dynamic_filename", T_OBJECT, offsetof(CFileDisposition, has_dynamic_filename), 0, PyDoc_STR("") }, { NULL } }; PyTypeObject CFileDispositionType = { PyVarObject_HEAD_INIT(NULL, 0) "coverage.CFileDispositionType", /*tp_name*/ sizeof(CFileDisposition), /*tp_basicsize*/ 0, /*tp_itemsize*/ (destructor)CFileDisposition_dealloc, /*tp_dealloc*/ 0, /*tp_print*/ 0, /*tp_getattr*/ 0, /*tp_setattr*/ 0, /*tp_compare*/ 0, /*tp_repr*/ 0, /*tp_as_number*/ 0, /*tp_as_sequence*/ 0, /*tp_as_mapping*/ 0, /*tp_hash */ 0, /*tp_call*/ 0, /*tp_str*/ 0, /*tp_getattro*/ 0, /*tp_setattro*/ 0, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, /*tp_flags*/ "CFileDisposition objects", /* tp_doc */ 0, /* tp_traverse */ 0, /* tp_clear */ 0, /* tp_richcompare */ 0, /* tp_weaklistoffset */ 0, /* tp_iter */ 0, /* tp_iternext */ 0, /* tp_methods */ CFileDisposition_members, /* tp_members */ 0, /* tp_getset */ 0, /* tp_base */ 0, /* tp_dict */ 0, /* tp_descr_get */ 0, /* tp_descr_set */ 0, /* tp_dictoffset */ 0, /* tp_init */ 0, /* tp_alloc */ 0, /* tp_new */ }; ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/coverage/ctracer/filedisp.h0000644000175100001770000000125600000000000021321 0ustar00runnerdocker00000000000000/* Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 */ /* For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt */ #ifndef _COVERAGE_FILEDISP_H #define _COVERAGE_FILEDISP_H #include "util.h" #include "structmember.h" typedef struct CFileDisposition { PyObject_HEAD PyObject * original_filename; PyObject * canonical_filename; PyObject * source_filename; PyObject * trace; PyObject * reason; PyObject * file_tracer; PyObject * has_dynamic_filename; } CFileDisposition; void CFileDisposition_dealloc(CFileDisposition *self); extern PyTypeObject CFileDispositionType; #endif /* _COVERAGE_FILEDISP_H */ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/coverage/ctracer/module.c0000644000175100001770000000306400000000000021001 0ustar00runnerdocker00000000000000/* Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 */ /* For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt */ #include "util.h" #include "tracer.h" #include "filedisp.h" /* Module definition */ #define MODULE_DOC PyDoc_STR("Fast coverage tracer.") static PyModuleDef moduledef = { PyModuleDef_HEAD_INIT, "coverage.tracer", MODULE_DOC, -1, NULL, /* methods */ NULL, NULL, /* traverse */ NULL, /* clear */ NULL }; PyObject * PyInit_tracer(void) { PyObject * mod = PyModule_Create(&moduledef); if (mod == NULL) { return NULL; } if (CTracer_intern_strings() < 0) { return NULL; } /* Initialize CTracer */ CTracerType.tp_new = PyType_GenericNew; if (PyType_Ready(&CTracerType) < 0) { Py_DECREF(mod); return NULL; } Py_INCREF(&CTracerType); if (PyModule_AddObject(mod, "CTracer", (PyObject *)&CTracerType) < 0) { Py_DECREF(mod); Py_DECREF(&CTracerType); return NULL; } /* Initialize CFileDisposition */ CFileDispositionType.tp_new = PyType_GenericNew; if (PyType_Ready(&CFileDispositionType) < 0) { Py_DECREF(mod); Py_DECREF(&CTracerType); return NULL; } Py_INCREF(&CFileDispositionType); if (PyModule_AddObject(mod, "CFileDisposition", (PyObject *)&CFileDispositionType) < 0) { Py_DECREF(mod); Py_DECREF(&CTracerType); Py_DECREF(&CFileDispositionType); return NULL; } return mod; } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/coverage/ctracer/stats.h0000644000175100001770000000130600000000000020654 0ustar00runnerdocker00000000000000/* Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 */ /* For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt */ #ifndef _COVERAGE_STATS_H #define _COVERAGE_STATS_H #include "util.h" #if COLLECT_STATS #define STATS(x) x #else #define STATS(x) #endif typedef struct Stats { unsigned int calls; /* Need at least one member, but the rest only if needed. */ #if COLLECT_STATS unsigned int lines; unsigned int returns; unsigned int others; unsigned int files; unsigned int stack_reallocs; unsigned int errors; unsigned int pycalls; unsigned int start_context_calls; #endif } Stats; #endif /* _COVERAGE_STATS_H */ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/coverage/ctracer/tracer.c0000644000175100001770000010315400000000000020775 0ustar00runnerdocker00000000000000/* Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 */ /* For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt */ /* C-based Tracer for coverage.py. */ #include "util.h" #include "datastack.h" #include "filedisp.h" #include "tracer.h" /* Python C API helpers. */ static int pyint_as_int(PyObject * pyint, int *pint) { int the_int = (int)PyLong_AsLong(pyint); if (the_int == -1 && PyErr_Occurred()) { return RET_ERROR; } *pint = the_int; return RET_OK; } /* Interned strings to speed GetAttr etc. */ static PyObject *str__coverage_plugin; static PyObject *str__coverage_plugin_name; static PyObject *str_dynamic_source_filename; static PyObject *str_line_number_range; int CTracer_intern_strings(void) { int ret = RET_ERROR; #define INTERN_STRING(v, s) \ v = PyUnicode_InternFromString(s); \ if (v == NULL) { \ goto error; \ } INTERN_STRING(str__coverage_plugin, "_coverage_plugin") INTERN_STRING(str__coverage_plugin_name, "_coverage_plugin_name") INTERN_STRING(str_dynamic_source_filename, "dynamic_source_filename") INTERN_STRING(str_line_number_range, "line_number_range") ret = RET_OK; error: return ret; } static void CTracer_disable_plugin(CTracer *self, PyObject * disposition); static int CTracer_init(CTracer *self, PyObject *args_unused, PyObject *kwds_unused) { int ret = RET_ERROR; if (DataStack_init(&self->stats, &self->data_stack) < 0) { goto error; } self->pdata_stack = &self->data_stack; self->context = Py_None; Py_INCREF(self->context); ret = RET_OK; goto ok; error: STATS( self->stats.errors++; ) ok: return ret; } static void CTracer_dealloc(CTracer *self) { int i; if (self->started) { PyEval_SetTrace(NULL, NULL); } Py_XDECREF(self->should_trace); Py_XDECREF(self->check_include); Py_XDECREF(self->warn); Py_XDECREF(self->concur_id_func); Py_XDECREF(self->data); Py_XDECREF(self->file_tracers); Py_XDECREF(self->should_trace_cache); Py_XDECREF(self->should_start_context); Py_XDECREF(self->switch_context); Py_XDECREF(self->context); Py_XDECREF(self->disable_plugin); DataStack_dealloc(&self->stats, &self->data_stack); if (self->data_stacks) { for (i = 0; i < self->data_stacks_used; i++) { DataStack_dealloc(&self->stats, self->data_stacks + i); } PyMem_Free(self->data_stacks); } Py_XDECREF(self->data_stack_index); Py_TYPE(self)->tp_free((PyObject*)self); } #if TRACE_LOG /* Set debugging constants: a file substring and line number to start logging. */ static const char * start_file = "badasync.py"; static int start_line = 1; static const char * indent(int n) { static const char * spaces = " " " " " " " " ; return spaces + strlen(spaces) - n*2; } static BOOL logging = FALSE; static void CTracer_showlog(CTracer * self, int lineno, PyObject * filename, const char * msg) { if (logging) { int depth = self->pdata_stack->depth; printf("%x: %s%3d ", (int)self, indent(depth), depth); if (lineno) { printf("%4d", lineno); } else { printf(" "); } if (filename) { PyObject *ascii = PyUnicode_AsASCIIString(filename); printf(" %s", PyBytes_AS_STRING(ascii)); Py_DECREF(ascii); } if (msg) { printf(" %s", msg); } printf("\n"); } } #define SHOWLOG(l,f,m) CTracer_showlog(self,l,f,m) #else #define SHOWLOG(l,f,m) #endif /* TRACE_LOG */ #if WHAT_LOG static const char * what_sym[] = {"CALL", "EXC ", "LINE", "RET "}; #endif /* Record a pair of integers in self->pcur_entry->file_data. */ static int CTracer_record_pair(CTracer *self, int l1, int l2) { int ret = RET_ERROR; PyObject * packed_obj = NULL; uint64 packed = 0; // Conceptually, data is a set of tuples (l1, l2), but that literally // making a set of tuples would require us to construct a tuple just to // see if we'd already recorded an arc. On many-times-executed code, // that would mean we construct a tuple, find the tuple is already in the // set, then discard the tuple. We can avoid that overhead by packing // the two line numbers into one integer instead. // See collector.py:flush_data for the Python code that unpacks this. if (l1 < 0) { packed |= (1LL << 40); l1 = -l1; } if (l2 < 0) { packed |= (1LL << 41); l2 = -l2; } packed |= (((uint64)l2) << 20) + (uint64)l1; packed_obj = PyLong_FromUnsignedLongLong(packed); if (packed_obj == NULL) { goto error; } if (PySet_Add(self->pcur_entry->file_data, packed_obj) < 0) { goto error; } ret = RET_OK; error: Py_XDECREF(packed_obj); return ret; } /* Set self->pdata_stack to the proper data_stack to use. */ static int CTracer_set_pdata_stack(CTracer *self) { int ret = RET_ERROR; PyObject * co_obj = NULL; PyObject * stack_index = NULL; if (self->concur_id_func != Py_None) { int the_index = 0; if (self->data_stack_index == NULL) { PyObject * weakref = NULL; weakref = PyImport_ImportModule("weakref"); if (weakref == NULL) { goto error; } STATS( self->stats.pycalls++; ) self->data_stack_index = PyObject_CallMethod(weakref, "WeakKeyDictionary", NULL); Py_XDECREF(weakref); if (self->data_stack_index == NULL) { goto error; } } STATS( self->stats.pycalls++; ) co_obj = PyObject_CallObject(self->concur_id_func, NULL); if (co_obj == NULL) { goto error; } stack_index = PyObject_GetItem(self->data_stack_index, co_obj); if (stack_index == NULL) { /* PyObject_GetItem sets an exception if it didn't find the thing. */ PyErr_Clear(); /* A new concurrency object. Make a new data stack. */ the_index = self->data_stacks_used; stack_index = PyLong_FromLong((long)the_index); if (stack_index == NULL) { goto error; } if (PyObject_SetItem(self->data_stack_index, co_obj, stack_index) < 0) { goto error; } self->data_stacks_used++; if (self->data_stacks_used >= self->data_stacks_alloc) { int bigger = self->data_stacks_alloc + 10; DataStack * bigger_stacks = PyMem_Realloc(self->data_stacks, bigger * sizeof(DataStack)); if (bigger_stacks == NULL) { PyErr_NoMemory(); goto error; } self->data_stacks = bigger_stacks; self->data_stacks_alloc = bigger; } DataStack_init(&self->stats, &self->data_stacks[the_index]); } else { if (pyint_as_int(stack_index, &the_index) < 0) { goto error; } } self->pdata_stack = &self->data_stacks[the_index]; } else { self->pdata_stack = &self->data_stack; } ret = RET_OK; error: Py_XDECREF(co_obj); Py_XDECREF(stack_index); return ret; } /* * Parts of the trace function. */ static int CTracer_handle_call(CTracer *self, PyFrameObject *frame) { int ret = RET_ERROR; int ret2; /* Owned references that we clean up at the very end of the function. */ PyObject * disposition = NULL; PyObject * plugin = NULL; PyObject * plugin_name = NULL; PyObject * next_tracename = NULL; #ifdef RESUME PyObject * pCode = NULL; #endif /* Borrowed references. */ PyObject * filename = NULL; PyObject * disp_trace = NULL; PyObject * tracename = NULL; PyObject * file_tracer = NULL; PyObject * has_dynamic_filename = NULL; CFileDisposition * pdisp = NULL; STATS( self->stats.calls++; ) /* Grow the stack. */ if (CTracer_set_pdata_stack(self) < 0) { goto error; } if (DataStack_grow(&self->stats, self->pdata_stack) < 0) { goto error; } self->pcur_entry = &self->pdata_stack->stack[self->pdata_stack->depth]; /* See if this frame begins a new context. */ if (self->should_start_context != Py_None && self->context == Py_None) { PyObject * context; /* We're looking for our context, ask should_start_context if this is the start. */ STATS( self->stats.start_context_calls++; ) STATS( self->stats.pycalls++; ) context = PyObject_CallFunctionObjArgs(self->should_start_context, frame, NULL); if (context == NULL) { goto error; } if (context != Py_None) { PyObject * val; Py_DECREF(self->context); self->context = context; self->pcur_entry->started_context = TRUE; STATS( self->stats.pycalls++; ) val = PyObject_CallFunctionObjArgs(self->switch_context, context, NULL); if (val == NULL) { goto error; } Py_DECREF(val); } else { Py_DECREF(context); self->pcur_entry->started_context = FALSE; } } else { self->pcur_entry->started_context = FALSE; } /* Check if we should trace this line. */ filename = MyFrame_GetCode(frame)->co_filename; disposition = PyDict_GetItem(self->should_trace_cache, filename); if (disposition == NULL) { if (PyErr_Occurred()) { goto error; } STATS( self->stats.files++; ) /* We've never considered this file before. */ /* Ask should_trace about it. */ STATS( self->stats.pycalls++; ) disposition = PyObject_CallFunctionObjArgs(self->should_trace, filename, frame, NULL); if (disposition == NULL) { /* An error occurred inside should_trace. */ goto error; } if (PyDict_SetItem(self->should_trace_cache, filename, disposition) < 0) { goto error; } } else { Py_INCREF(disposition); } if (disposition == Py_None) { /* A later check_include returned false, so don't trace it. */ disp_trace = Py_False; } else { /* The object we got is a CFileDisposition, use it efficiently. */ pdisp = (CFileDisposition *) disposition; disp_trace = pdisp->trace; if (disp_trace == NULL) { goto error; } } if (disp_trace == Py_True) { /* If tracename is a string, then we're supposed to trace. */ tracename = pdisp->source_filename; if (tracename == NULL) { goto error; } file_tracer = pdisp->file_tracer; if (file_tracer == NULL) { goto error; } if (file_tracer != Py_None) { plugin = PyObject_GetAttr(file_tracer, str__coverage_plugin); if (plugin == NULL) { goto error; } plugin_name = PyObject_GetAttr(plugin, str__coverage_plugin_name); if (plugin_name == NULL) { goto error; } } has_dynamic_filename = pdisp->has_dynamic_filename; if (has_dynamic_filename == NULL) { goto error; } if (has_dynamic_filename == Py_True) { STATS( self->stats.pycalls++; ) next_tracename = PyObject_CallMethodObjArgs( file_tracer, str_dynamic_source_filename, tracename, frame, NULL ); if (next_tracename == NULL) { /* An exception from the function. Alert the user with a * warning and a traceback. */ CTracer_disable_plugin(self, disposition); /* Because we handled the error, goto ok. */ goto ok; } tracename = next_tracename; if (tracename != Py_None) { /* Check the dynamic source filename against the include rules. */ PyObject * included = NULL; int should_include; included = PyDict_GetItem(self->should_trace_cache, tracename); if (included == NULL) { PyObject * should_include_bool; if (PyErr_Occurred()) { goto error; } STATS( self->stats.files++; ) STATS( self->stats.pycalls++; ) should_include_bool = PyObject_CallFunctionObjArgs(self->check_include, tracename, frame, NULL); if (should_include_bool == NULL) { goto error; } should_include = (should_include_bool == Py_True); Py_DECREF(should_include_bool); if (PyDict_SetItem(self->should_trace_cache, tracename, should_include ? disposition : Py_None) < 0) { goto error; } } else { should_include = (included != Py_None); } if (!should_include) { tracename = Py_None; } } } } else { tracename = Py_None; } if (tracename != Py_None) { PyObject * file_data = PyDict_GetItem(self->data, tracename); if (file_data == NULL) { if (PyErr_Occurred()) { goto error; } file_data = PySet_New(NULL); if (file_data == NULL) { goto error; } ret2 = PyDict_SetItem(self->data, tracename, file_data); if (ret2 < 0) { goto error; } /* If the disposition mentions a plugin, record that. */ if (file_tracer != Py_None) { ret2 = PyDict_SetItem(self->file_tracers, tracename, plugin_name); if (ret2 < 0) { goto error; } } } else { /* PyDict_GetItem gives a borrowed reference. Own it. */ Py_INCREF(file_data); } Py_XDECREF(self->pcur_entry->file_data); self->pcur_entry->file_data = file_data; self->pcur_entry->file_tracer = file_tracer; SHOWLOG(PyFrame_GetLineNumber(frame), filename, "traced"); } else { Py_XDECREF(self->pcur_entry->file_data); self->pcur_entry->file_data = NULL; self->pcur_entry->file_tracer = Py_None; MyFrame_NoTraceLines(frame); SHOWLOG(PyFrame_GetLineNumber(frame), filename, "skipped"); } self->pcur_entry->disposition = disposition; /* Make the frame right in case settrace(gettrace()) happens. */ MyFrame_SetTrace(frame, self); /* A call event is really a "start frame" event, and can happen for * re-entering a generator also. How we tell the difference depends on * the version of Python. */ BOOL real_call = FALSE; #ifdef RESUME /* * The current opcode is guaranteed to be RESUME. The argument * determines what kind of resume it is. */ pCode = MyCode_GetCode(MyFrame_GetCode(frame)); real_call = (PyBytes_AS_STRING(pCode)[MyFrame_GetLasti(frame) + 1] == 0); #else // f_lasti is -1 for a true call, and a real byte offset for a generator re-entry. real_call = (MyFrame_GetLasti(frame) < 0); #endif if (real_call) { self->pcur_entry->last_line = -MyFrame_GetCode(frame)->co_firstlineno; } else { self->pcur_entry->last_line = PyFrame_GetLineNumber(frame); } ok: ret = RET_OK; error: #ifdef RESUME MyCode_FreeCode(pCode); #endif Py_XDECREF(next_tracename); Py_XDECREF(disposition); Py_XDECREF(plugin); Py_XDECREF(plugin_name); return ret; } static void CTracer_disable_plugin(CTracer *self, PyObject * disposition) { PyObject * ret; PyErr_Print(); STATS( self->stats.pycalls++; ) ret = PyObject_CallFunctionObjArgs(self->disable_plugin, disposition, NULL); if (ret == NULL) { goto error; } Py_DECREF(ret); return; error: /* This function doesn't return a status, so if an error happens, print it, * but don't interrupt the flow. */ /* PySys_WriteStderr is nicer, but is not in the public API. */ fprintf(stderr, "Error occurred while disabling plug-in:\n"); PyErr_Print(); } static int CTracer_unpack_pair(CTracer *self, PyObject *pair, int *p_one, int *p_two) { int ret = RET_ERROR; int the_int; PyObject * pyint = NULL; int index; if (!PyTuple_Check(pair) || PyTuple_Size(pair) != 2) { PyErr_SetString( PyExc_TypeError, "line_number_range must return 2-tuple" ); goto error; } for (index = 0; index < 2; index++) { pyint = PyTuple_GetItem(pair, index); if (pyint == NULL) { goto error; } if (pyint_as_int(pyint, &the_int) < 0) { goto error; } *(index == 0 ? p_one : p_two) = the_int; } ret = RET_OK; error: return ret; } static int CTracer_handle_line(CTracer *self, PyFrameObject *frame) { int ret = RET_ERROR; int ret2; STATS( self->stats.lines++; ) if (self->pdata_stack->depth >= 0) { SHOWLOG(PyFrame_GetLineNumber(frame), MyFrame_GetCode(frame)->co_filename, "line"); if (self->pcur_entry->file_data) { int lineno_from = -1; int lineno_to = -1; /* We're tracing in this frame: record something. */ if (self->pcur_entry->file_tracer != Py_None) { PyObject * from_to = NULL; STATS( self->stats.pycalls++; ) from_to = PyObject_CallMethodObjArgs(self->pcur_entry->file_tracer, str_line_number_range, frame, NULL); if (from_to == NULL) { CTracer_disable_plugin(self, self->pcur_entry->disposition); goto ok; } ret2 = CTracer_unpack_pair(self, from_to, &lineno_from, &lineno_to); Py_DECREF(from_to); if (ret2 < 0) { CTracer_disable_plugin(self, self->pcur_entry->disposition); goto ok; } } else { lineno_from = lineno_to = PyFrame_GetLineNumber(frame); } if (lineno_from != -1) { for (; lineno_from <= lineno_to; lineno_from++) { if (self->tracing_arcs) { /* Tracing arcs: key is (last_line,this_line). */ if (CTracer_record_pair(self, self->pcur_entry->last_line, lineno_from) < 0) { goto error; } } else { /* Tracing lines: key is simply this_line. */ PyObject * this_line = PyLong_FromLong((long)lineno_from); if (this_line == NULL) { goto error; } ret2 = PySet_Add(self->pcur_entry->file_data, this_line); Py_DECREF(this_line); if (ret2 < 0) { goto error; } } self->pcur_entry->last_line = lineno_from; } } } } ok: ret = RET_OK; error: return ret; } static int CTracer_handle_return(CTracer *self, PyFrameObject *frame) { int ret = RET_ERROR; PyObject * pCode = NULL; STATS( self->stats.returns++; ) /* A near-copy of this code is above in the missing-return handler. */ if (CTracer_set_pdata_stack(self) < 0) { goto error; } self->pcur_entry = &self->pdata_stack->stack[self->pdata_stack->depth]; if (self->pdata_stack->depth >= 0) { if (self->tracing_arcs && self->pcur_entry->file_data) { BOOL real_return = FALSE; pCode = MyCode_GetCode(MyFrame_GetCode(frame)); int lasti = MyFrame_GetLasti(frame); Py_ssize_t code_size = PyBytes_GET_SIZE(pCode); unsigned char * code_bytes = (unsigned char *)PyBytes_AS_STRING(pCode); #ifdef RESUME if (lasti == code_size - 2) { real_return = TRUE; } else { #if ENV_LASTI_IS_YIELD lasti += 2; #endif real_return = (code_bytes[lasti] != RESUME); } #else /* Need to distinguish between RETURN_VALUE and YIELD_VALUE. Read * the current bytecode to see what it is. In unusual circumstances * (Cython code), co_code can be the empty string, so range-check * f_lasti before reading the byte. */ BOOL is_yield = FALSE; BOOL is_yield_from = FALSE; if (lasti < code_size) { is_yield = (code_bytes[lasti] == YIELD_VALUE); if (lasti + 2 < code_size) { is_yield_from = (code_bytes[lasti + 2] == YIELD_FROM); } } real_return = !(is_yield || is_yield_from); #endif if (real_return) { int first = MyFrame_GetCode(frame)->co_firstlineno; if (CTracer_record_pair(self, self->pcur_entry->last_line, -first) < 0) { goto error; } } } /* If this frame started a context, then returning from it ends the context. */ if (self->pcur_entry->started_context) { PyObject * val; Py_DECREF(self->context); self->context = Py_None; Py_INCREF(self->context); STATS( self->stats.pycalls++; ) val = PyObject_CallFunctionObjArgs(self->switch_context, self->context, NULL); if (val == NULL) { goto error; } Py_DECREF(val); } /* Pop the stack. */ SHOWLOG(PyFrame_GetLineNumber(frame), MyFrame_GetCode(frame)->co_filename, "return"); self->pdata_stack->depth--; self->pcur_entry = &self->pdata_stack->stack[self->pdata_stack->depth]; } ret = RET_OK; error: MyCode_FreeCode(pCode); return ret; } /* * The Trace Function */ static int CTracer_trace(CTracer *self, PyFrameObject *frame, int what, PyObject *arg_unused) { int ret = RET_ERROR; #if DO_NOTHING return RET_OK; #endif if (!self->started) { /* If CTracer.stop() has been called from another thread, the tracer is still active in the current thread. Let's deactivate ourselves now. */ PyEval_SetTrace(NULL, NULL); return RET_OK; } #if WHAT_LOG || TRACE_LOG PyObject * ascii = NULL; #endif #if WHAT_LOG const char * w = "XXX "; if (what <= (int)(sizeof(what_sym)/sizeof(const char *))) { w = what_sym[what]; } ascii = PyUnicode_AsASCIIString(MyFrame_GetCode(frame)->co_filename); printf("%x trace: f:%x %s @ %s %d\n", (int)self, (int)frame, what_sym[what], PyBytes_AS_STRING(ascii), PyFrame_GetLineNumber(frame)); Py_DECREF(ascii); #endif #if TRACE_LOG ascii = PyUnicode_AsASCIIString(MyFrame_GetCode(frame)->co_filename); if (strstr(PyBytes_AS_STRING(ascii), start_file) && PyFrame_GetLineNumber(frame) == start_line) { logging = TRUE; } Py_DECREF(ascii); #endif self->activity = TRUE; switch (what) { case PyTrace_CALL: if (CTracer_handle_call(self, frame) < 0) { goto error; } break; case PyTrace_RETURN: if (CTracer_handle_return(self, frame) < 0) { goto error; } break; case PyTrace_LINE: if (CTracer_handle_line(self, frame) < 0) { goto error; } break; default: STATS( self->stats.others++; ) break; } ret = RET_OK; goto cleanup; error: STATS( self->stats.errors++; ) cleanup: return ret; } /* * Python has two ways to set the trace function: sys.settrace(fn), which * takes a Python callable, and PyEval_SetTrace(func, obj), which takes * a C function and a Python object. The way these work together is that * sys.settrace(pyfn) calls PyEval_SetTrace(builtin_func, pyfn), using the * Python callable as the object in PyEval_SetTrace. So sys.gettrace() * simply returns the Python object used as the second argument to * PyEval_SetTrace. So sys.gettrace() will return our self parameter, which * means it must be callable to be used in sys.settrace(). * * So we make ourself callable, equivalent to invoking our trace function. */ static PyObject * CTracer_call(CTracer *self, PyObject *args, PyObject *kwds) { PyFrameObject *frame; PyObject *what_str; PyObject *arg; int what; PyObject *ret = NULL; PyObject * ascii = NULL; #if DO_NOTHING CRASH #endif static char *what_names[] = { "call", "exception", "line", "return", "c_call", "c_exception", "c_return", NULL }; static char *kwlist[] = {"frame", "event", "arg", NULL}; if (!PyArg_ParseTupleAndKeywords(args, kwds, "O!O!O|i:Tracer_call", kwlist, &PyFrame_Type, &frame, &PyUnicode_Type, &what_str, &arg)) { goto done; } /* In Python, the what argument is a string, we need to find an int for the C function. */ for (what = 0; what_names[what]; what++) { int should_break; ascii = PyUnicode_AsASCIIString(what_str); should_break = !strcmp(PyBytes_AS_STRING(ascii), what_names[what]); Py_DECREF(ascii); if (should_break) { break; } } #if WHAT_LOG ascii = PyUnicode_AsASCIIString(MyFrame_GetCode(frame)->co_filename); printf("pytrace: %s @ %s %d\n", what_sym[what], PyBytes_AS_STRING(ascii), PyFrame_GetLineNumber(frame)); Py_DECREF(ascii); #endif /* Invoke the C function, and return ourselves. */ if (CTracer_trace(self, frame, what, arg) == RET_OK) { Py_INCREF(self); ret = (PyObject *)self; } /* For better speed, install ourselves the C way so that future calls go directly to CTracer_trace, without this intermediate function. Only do this if this is a CALL event, since new trace functions only take effect then. If we don't condition it on CALL, then we'll clobber the new trace function before it has a chance to get called. To understand why, there are three internal values to track: frame.f_trace, c_tracefunc, and c_traceobj. They are explained here: https://nedbatchelder.com/text/trace-function.html Without the conditional on PyTrace_CALL, this is what happens: def func(): # f_trace c_tracefunc c_traceobj # -------------- -------------- -------------- # CTracer CTracer.trace CTracer sys.settrace(my_func) # CTracer trampoline my_func # Now Python calls trampoline(CTracer), which calls this function # which calls PyEval_SetTrace below, setting us as the tracer again: # CTracer CTracer.trace CTracer # and it's as if the settrace never happened. */ if (what == PyTrace_CALL) { PyEval_SetTrace((Py_tracefunc)CTracer_trace, (PyObject*)self); } done: return ret; } static PyObject * CTracer_start(CTracer *self, PyObject *args_unused) { PyEval_SetTrace((Py_tracefunc)CTracer_trace, (PyObject*)self); self->started = TRUE; self->tracing_arcs = self->trace_arcs && PyObject_IsTrue(self->trace_arcs); /* start() returns a trace function usable with sys.settrace() */ Py_INCREF(self); return (PyObject *)self; } static PyObject * CTracer_stop(CTracer *self, PyObject *args_unused) { if (self->started) { /* Set the started flag only. The actual call to PyEval_SetTrace(NULL, NULL) is delegated to the callback itself to ensure that it called from the right thread. */ self->started = FALSE; } Py_RETURN_NONE; } static PyObject * CTracer_activity(CTracer *self, PyObject *args_unused) { if (self->activity) { Py_RETURN_TRUE; } else { Py_RETURN_FALSE; } } static PyObject * CTracer_reset_activity(CTracer *self, PyObject *args_unused) { self->activity = FALSE; Py_RETURN_NONE; } static PyObject * CTracer_get_stats(CTracer *self, PyObject *args_unused) { #if COLLECT_STATS return Py_BuildValue( "{sI,sI,sI,sI,sI,sI,si,sI,sI,sI}", "calls", self->stats.calls, "lines", self->stats.lines, "returns", self->stats.returns, "others", self->stats.others, "files", self->stats.files, "stack_reallocs", self->stats.stack_reallocs, "stack_alloc", self->pdata_stack->alloc, "errors", self->stats.errors, "pycalls", self->stats.pycalls, "start_context_calls", self->stats.start_context_calls ); #else Py_RETURN_NONE; #endif /* COLLECT_STATS */ } static PyMemberDef CTracer_members[] = { { "should_trace", T_OBJECT, offsetof(CTracer, should_trace), 0, PyDoc_STR("Function indicating whether to trace a file.") }, { "check_include", T_OBJECT, offsetof(CTracer, check_include), 0, PyDoc_STR("Function indicating whether to include a file.") }, { "warn", T_OBJECT, offsetof(CTracer, warn), 0, PyDoc_STR("Function for issuing warnings.") }, { "concur_id_func", T_OBJECT, offsetof(CTracer, concur_id_func), 0, PyDoc_STR("Function for determining concurrency context") }, { "data", T_OBJECT, offsetof(CTracer, data), 0, PyDoc_STR("The raw dictionary of trace data.") }, { "file_tracers", T_OBJECT, offsetof(CTracer, file_tracers), 0, PyDoc_STR("Mapping from file name to plugin name.") }, { "should_trace_cache", T_OBJECT, offsetof(CTracer, should_trace_cache), 0, PyDoc_STR("Dictionary caching should_trace results.") }, { "trace_arcs", T_OBJECT, offsetof(CTracer, trace_arcs), 0, PyDoc_STR("Should we trace arcs, or just lines?") }, { "should_start_context", T_OBJECT, offsetof(CTracer, should_start_context), 0, PyDoc_STR("Function for starting contexts.") }, { "switch_context", T_OBJECT, offsetof(CTracer, switch_context), 0, PyDoc_STR("Function for switching to a new context.") }, { "disable_plugin", T_OBJECT, offsetof(CTracer, disable_plugin), 0, PyDoc_STR("Function for disabling a plugin.") }, { NULL } }; static PyMethodDef CTracer_methods[] = { { "start", (PyCFunction) CTracer_start, METH_VARARGS, PyDoc_STR("Start the tracer") }, { "stop", (PyCFunction) CTracer_stop, METH_VARARGS, PyDoc_STR("Stop the tracer") }, { "get_stats", (PyCFunction) CTracer_get_stats, METH_VARARGS, PyDoc_STR("Get statistics about the tracing") }, { "activity", (PyCFunction) CTracer_activity, METH_VARARGS, PyDoc_STR("Has there been any activity?") }, { "reset_activity", (PyCFunction) CTracer_reset_activity, METH_VARARGS, PyDoc_STR("Reset the activity flag") }, { NULL } }; PyTypeObject CTracerType = { PyVarObject_HEAD_INIT(NULL, 0) "coverage.CTracer", /*tp_name*/ sizeof(CTracer), /*tp_basicsize*/ 0, /*tp_itemsize*/ (destructor)CTracer_dealloc, /*tp_dealloc*/ 0, /*tp_print*/ 0, /*tp_getattr*/ 0, /*tp_setattr*/ 0, /*tp_compare*/ 0, /*tp_repr*/ 0, /*tp_as_number*/ 0, /*tp_as_sequence*/ 0, /*tp_as_mapping*/ 0, /*tp_hash */ (ternaryfunc)CTracer_call, /*tp_call*/ 0, /*tp_str*/ 0, /*tp_getattro*/ 0, /*tp_setattro*/ 0, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, /*tp_flags*/ "CTracer objects", /* tp_doc */ 0, /* tp_traverse */ 0, /* tp_clear */ 0, /* tp_richcompare */ 0, /* tp_weaklistoffset */ 0, /* tp_iter */ 0, /* tp_iternext */ CTracer_methods, /* tp_methods */ CTracer_members, /* tp_members */ 0, /* tp_getset */ 0, /* tp_base */ 0, /* tp_dict */ 0, /* tp_descr_get */ 0, /* tp_descr_set */ 0, /* tp_dictoffset */ (initproc)CTracer_init, /* tp_init */ 0, /* tp_alloc */ 0, /* tp_new */ }; ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/coverage/ctracer/tracer.h0000644000175100001770000000370200000000000021000 0ustar00runnerdocker00000000000000/* Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 */ /* For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt */ #ifndef _COVERAGE_TRACER_H #define _COVERAGE_TRACER_H #include "util.h" #include "structmember.h" #include "frameobject.h" #include "opcode.h" #include "datastack.h" /* The CTracer type. */ typedef struct CTracer { PyObject_HEAD /* Python objects manipulated directly by the Collector class. */ PyObject * should_trace; PyObject * check_include; PyObject * warn; PyObject * concur_id_func; PyObject * data; PyObject * file_tracers; PyObject * should_trace_cache; PyObject * trace_arcs; PyObject * should_start_context; PyObject * switch_context; PyObject * disable_plugin; /* Has the tracer been started? */ BOOL started; /* Are we tracing arcs, or just lines? */ BOOL tracing_arcs; /* Have we had any activity? */ BOOL activity; /* The current dynamic context. */ PyObject * context; /* The data stack is a stack of sets. Each set collects data for a single source file. The data stack parallels the call stack: each call pushes the new frame's file data onto the data stack, and each return pops file data off. The file data is a set whose form depends on the tracing options. If tracing arcs, the values are line number pairs. If not tracing arcs, the values are line numbers. */ DataStack data_stack; /* Used if we aren't doing concurrency. */ PyObject * data_stack_index; /* Used if we are doing concurrency. */ DataStack * data_stacks; int data_stacks_alloc; int data_stacks_used; DataStack * pdata_stack; /* The current file's data stack entry. */ DataStackEntry * pcur_entry; Stats stats; } CTracer; int CTracer_intern_strings(void); extern PyTypeObject CTracerType; #endif /* _COVERAGE_TRACER_H */ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/coverage/ctracer/util.h0000644000175100001770000000566300000000000020505 0ustar00runnerdocker00000000000000/* Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 */ /* For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt */ #ifndef _COVERAGE_UTIL_H #define _COVERAGE_UTIL_H #include /* Compile-time debugging helpers */ #undef WHAT_LOG /* Define to log the WHAT params in the trace function. */ #undef TRACE_LOG /* Define to log our bookkeeping. */ #undef COLLECT_STATS /* Collect counters: stats are printed when tracer is stopped. */ #undef DO_NOTHING /* Define this to make the tracer do nothing. */ #if PY_VERSION_HEX >= 0x030B00A0 // 3.11 moved f_lasti into an internal structure. This is totally the wrong way // to make this work, but it's all I've got until https://bugs.python.org/issue40421 // is resolved. #if PY_VERSION_HEX < 0x030D0000 #include #endif #if PY_VERSION_HEX >= 0x030B00A7 #define MyFrame_GetLasti(f) (PyFrame_GetLasti(f)) #else #define MyFrame_GetLasti(f) ((f)->f_frame->f_lasti * 2) #endif #elif PY_VERSION_HEX >= 0x030A00A7 // The f_lasti field changed meaning in 3.10.0a7. It had been bytes, but // now is instructions, so we need to adjust it to use it as a byte index. #define MyFrame_GetLasti(f) ((f)->f_lasti * 2) #else #define MyFrame_GetLasti(f) ((f)->f_lasti) #endif #if PY_VERSION_HEX >= 0x030D0000 #define MyFrame_NoTraceLines(f) (PyObject_SetAttrString((PyObject*)(f), "f_trace_lines", Py_False)) #define MyFrame_SetTrace(f, obj) (PyObject_SetAttrString((PyObject*)(f), "f_trace", (PyObject*)(obj))) #else #define MyFrame_NoTraceLines(f) ((f)->f_trace_lines = 0) #define MyFrame_SetTrace(f, obj) {Py_INCREF(obj); Py_XSETREF((f)->f_trace, (PyObject*)(obj));} #endif // Access f_code should be done through a helper starting in 3.9. #if PY_VERSION_HEX >= 0x03090000 #define MyFrame_GetCode(f) (PyFrame_GetCode(f)) #else #define MyFrame_GetCode(f) ((f)->f_code) #endif #if PY_VERSION_HEX >= 0x030B00B1 #define MyCode_GetCode(co) (PyCode_GetCode(co)) #define MyCode_FreeCode(code) Py_XDECREF(code) #elif PY_VERSION_HEX >= 0x030B00A7 #define MyCode_GetCode(co) (PyObject_GetAttrString((PyObject *)(co), "co_code")) #define MyCode_FreeCode(code) Py_XDECREF(code) #else #define MyCode_GetCode(co) ((co)->co_code) #define MyCode_FreeCode(code) #endif // Where does frame.f_lasti point when yielding from a generator? // It used to point at the YIELD, now it points at the RESUME. // https://github.com/python/cpython/issues/113728 #define ENV_LASTI_IS_YIELD (PY_VERSION_HEX < 0x030D0000) /* The values returned to indicate ok or error. */ #define RET_OK 0 #define RET_ERROR -1 /* Nicer booleans */ typedef int BOOL; #define FALSE 0 #define TRUE 1 #if SIZEOF_LONG_LONG < 8 #error long long too small! #endif typedef unsigned long long uint64; /* Only for extreme machete-mode debugging! */ #define CRASH { printf("*** CRASH! ***\n"); *((int*)1) = 1; } #endif /* _COVERAGE_UTIL_H */ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/coverage/data.py0000644000175100001770000001732500000000000017215 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt """Coverage data for coverage.py. This file had the 4.x JSON data support, which is now gone. This file still has storage-agnostic helpers, and is kept to avoid changing too many imports. CoverageData is now defined in sqldata.py, and imported here to keep the imports working. """ from __future__ import annotations import glob import hashlib import os.path from typing import Callable, Iterable from coverage.exceptions import CoverageException, NoDataError from coverage.files import PathAliases from coverage.misc import Hasher, file_be_gone, human_sorted, plural from coverage.sqldata import CoverageData def line_counts(data: CoverageData, fullpath: bool = False) -> dict[str, int]: """Return a dict summarizing the line coverage data. Keys are based on the file names, and values are the number of executed lines. If `fullpath` is true, then the keys are the full pathnames of the files, otherwise they are the basenames of the files. Returns a dict mapping file names to counts of lines. """ summ = {} filename_fn: Callable[[str], str] if fullpath: # pylint: disable=unnecessary-lambda-assignment filename_fn = lambda f: f else: filename_fn = os.path.basename for filename in data.measured_files(): lines = data.lines(filename) assert lines is not None summ[filename_fn(filename)] = len(lines) return summ def add_data_to_hash(data: CoverageData, filename: str, hasher: Hasher) -> None: """Contribute `filename`'s data to the `hasher`. `hasher` is a `coverage.misc.Hasher` instance to be updated with the file's data. It should only get the results data, not the run data. """ if data.has_arcs(): hasher.update(sorted(data.arcs(filename) or [])) else: hasher.update(sorted_lines(data, filename)) hasher.update(data.file_tracer(filename)) def combinable_files(data_file: str, data_paths: Iterable[str] | None = None) -> list[str]: """Make a list of data files to be combined. `data_file` is a path to a data file. `data_paths` is a list of files or directories of files. Returns a list of absolute file paths. """ data_dir, local = os.path.split(os.path.abspath(data_file)) data_paths = data_paths or [data_dir] files_to_combine = [] for p in data_paths: if os.path.isfile(p): files_to_combine.append(os.path.abspath(p)) elif os.path.isdir(p): pattern = glob.escape(os.path.join(os.path.abspath(p), local)) +".*" files_to_combine.extend(glob.glob(pattern)) else: raise NoDataError(f"Couldn't combine from non-existent path '{p}'") # SQLite might have made journal files alongside our database files. # We never want to combine those. files_to_combine = [fnm for fnm in files_to_combine if not fnm.endswith("-journal")] # Sorting isn't usually needed, since it shouldn't matter what order files # are combined, but sorting makes tests more predictable, and makes # debugging more understandable when things go wrong. return sorted(files_to_combine) def combine_parallel_data( data: CoverageData, aliases: PathAliases | None = None, data_paths: Iterable[str] | None = None, strict: bool = False, keep: bool = False, message: Callable[[str], None] | None = None, ) -> None: """Combine a number of data files together. `data` is a CoverageData. Treat `data.filename` as a file prefix, and combine the data from all of the data files starting with that prefix plus a dot. If `aliases` is provided, it's a `PathAliases` object that is used to re-map paths to match the local machine's. If `data_paths` is provided, it is a list of directories or files to combine. Directories are searched for files that start with `data.filename` plus dot as a prefix, and those files are combined. If `data_paths` is not provided, then the directory portion of `data.filename` is used as the directory to search for data files. Unless `keep` is True every data file found and combined is then deleted from disk. If a file cannot be read, a warning will be issued, and the file will not be deleted. If `strict` is true, and no files are found to combine, an error is raised. `message` is a function to use for printing messages to the user. """ files_to_combine = combinable_files(data.base_filename(), data_paths) if strict and not files_to_combine: raise NoDataError("No data to combine") file_hashes = set() combined_any = False for f in files_to_combine: if f == data.data_filename(): # Sometimes we are combining into a file which is one of the # parallel files. Skip that file. if data._debug.should("dataio"): data._debug.write(f"Skipping combining ourself: {f!r}") continue try: rel_file_name = os.path.relpath(f) except ValueError: # ValueError can be raised under Windows when os.getcwd() returns a # folder from a different drive than the drive of f, in which case # we print the original value of f instead of its relative path rel_file_name = f with open(f, "rb") as fobj: hasher = hashlib.new("sha3_256") hasher.update(fobj.read()) sha = hasher.digest() combine_this_one = sha not in file_hashes delete_this_one = not keep if combine_this_one: if data._debug.should("dataio"): data._debug.write(f"Combining data file {f!r}") file_hashes.add(sha) try: new_data = CoverageData(f, debug=data._debug) new_data.read() except CoverageException as exc: if data._warn: # The CoverageException has the file name in it, so just # use the message as the warning. data._warn(str(exc)) if message: message(f"Couldn't combine data file {rel_file_name}: {exc}") delete_this_one = False else: data.update(new_data, aliases=aliases) combined_any = True if message: message(f"Combined data file {rel_file_name}") else: if message: message(f"Skipping duplicate data {rel_file_name}") if delete_this_one: if data._debug.should("dataio"): data._debug.write(f"Deleting data file {f!r}") file_be_gone(f) if strict and not combined_any: raise NoDataError("No usable data files") def debug_data_file(filename: str) -> None: """Implementation of 'coverage debug data'.""" data = CoverageData(filename) filename = data.data_filename() print(f"path: {filename}") if not os.path.exists(filename): print("No data collected: file doesn't exist") return data.read() print(f"has_arcs: {data.has_arcs()!r}") summary = line_counts(data, fullpath=True) filenames = human_sorted(summary.keys()) nfiles = len(filenames) print(f"{nfiles} file{plural(nfiles)}:") for f in filenames: line = f"{f}: {summary[f]} line{plural(summary[f])}" plugin = data.file_tracer(f) if plugin: line += f" [{plugin}]" print(line) def sorted_lines(data: CoverageData, filename: str) -> list[int]: """Get the sorted lines for a file, for tests.""" lines = data.lines(filename) return sorted(lines or []) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/coverage/debug.py0000644000175100001770000005035600000000000017373 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt """Control of and utilities for debugging.""" from __future__ import annotations import atexit import contextlib import functools import inspect import itertools import os import pprint import re import reprlib import sys import traceback import types import _thread from typing import ( overload, Any, Callable, IO, Iterable, Iterator, Mapping, ) from coverage.misc import human_sorted_items, isolate_module from coverage.types import AnyCallable, TWritable os = isolate_module(os) # When debugging, it can be helpful to force some options, especially when # debugging the configuration mechanisms you usually use to control debugging! # This is a list of forced debugging options. FORCED_DEBUG: list[str] = [] FORCED_DEBUG_FILE = None class DebugControl: """Control and output for debugging.""" show_repr_attr = False # For auto_repr def __init__( self, options: Iterable[str], output: IO[str] | None, file_name: str | None = None, ) -> None: """Configure the options and output file for debugging.""" self.options = list(options) + FORCED_DEBUG self.suppress_callers = False filters = [] if self.should("process"): filters.append(CwdTracker().filter) filters.append(ProcessTracker().filter) if self.should("pytest"): filters.append(PytestTracker().filter) if self.should("pid"): filters.append(add_pid_and_tid) self.output = DebugOutputFile.get_one( output, file_name=file_name, filters=filters, ) self.raw_output = self.output.outfile def __repr__(self) -> str: return f"" def should(self, option: str) -> bool: """Decide whether to output debug information in category `option`.""" if option == "callers" and self.suppress_callers: return False return (option in self.options) @contextlib.contextmanager def without_callers(self) -> Iterator[None]: """A context manager to prevent call stacks from being logged.""" old = self.suppress_callers self.suppress_callers = True try: yield finally: self.suppress_callers = old def write(self, msg: str, *, exc: BaseException | None = None) -> None: """Write a line of debug output. `msg` is the line to write. A newline will be appended. If `exc` is provided, a stack trace of the exception will be written after the message. """ self.output.write(msg + "\n") if exc is not None: self.output.write("".join(traceback.format_exception(None, exc, exc.__traceback__))) if self.should("self"): caller_self = inspect.stack()[1][0].f_locals.get("self") if caller_self is not None: self.output.write(f"self: {caller_self!r}\n") if self.should("callers"): dump_stack_frames(out=self.output, skip=1) self.output.flush() class NoDebugging(DebugControl): """A replacement for DebugControl that will never try to do anything.""" def __init__(self) -> None: # pylint: disable=super-init-not-called ... def should(self, option: str) -> bool: """Should we write debug messages? Never.""" return False def write(self, msg: str, *, exc: BaseException | None = None) -> None: """This will never be called.""" raise AssertionError("NoDebugging.write should never be called.") def info_header(label: str) -> str: """Make a nice header string.""" return "--{:-<60s}".format(" "+label+" ") def info_formatter(info: Iterable[tuple[str, Any]]) -> Iterator[str]: """Produce a sequence of formatted lines from info. `info` is a sequence of pairs (label, data). The produced lines are nicely formatted, ready to print. """ info = list(info) if not info: return label_len = 30 assert all(len(l) < label_len for l, _ in info) for label, data in info: if data == []: data = "-none-" if isinstance(data, tuple) and len(repr(tuple(data))) < 30: # Convert to tuple to scrub namedtuples. yield "%*s: %r" % (label_len, label, tuple(data)) elif isinstance(data, (list, set, tuple)): prefix = "%*s:" % (label_len, label) for e in data: yield "%*s %s" % (label_len+1, prefix, e) prefix = "" else: yield "%*s: %s" % (label_len, label, data) def write_formatted_info( write: Callable[[str], None], header: str, info: Iterable[tuple[str, Any]], ) -> None: """Write a sequence of (label,data) pairs nicely. `write` is a function write(str) that accepts each line of output. `header` is a string to start the section. `info` is a sequence of (label, data) pairs, where label is a str, and data can be a single value, or a list/set/tuple. """ write(info_header(header)) for line in info_formatter(info): write(f" {line}") def exc_one_line(exc: Exception) -> str: """Get a one-line summary of an exception, including class name and message.""" lines = traceback.format_exception_only(type(exc), exc) return "|".join(l.rstrip() for l in lines) _FILENAME_REGEXES: list[tuple[str, str]] = [ (r".*[/\\]pytest-of-.*[/\\]pytest-\d+([/\\]popen-gw\d+)?", "tmp:"), ] _FILENAME_SUBS: list[tuple[str, str]] = [] @overload def short_filename(filename: str) -> str: pass @overload def short_filename(filename: None) -> None: pass def short_filename(filename: str | None) -> str | None: """Shorten a file name. Directories are replaced by prefixes like 'syspath:'""" if not _FILENAME_SUBS: for pathdir in sys.path: _FILENAME_SUBS.append((pathdir, "syspath:")) import coverage _FILENAME_SUBS.append((os.path.dirname(coverage.__file__), "cov:")) _FILENAME_SUBS.sort(key=(lambda pair: len(pair[0])), reverse=True) if filename is not None: for pat, sub in _FILENAME_REGEXES: filename = re.sub(pat, sub, filename) for before, after in _FILENAME_SUBS: filename = filename.replace(before, after) return filename def short_stack( skip: int = 0, full: bool = False, frame_ids: bool = False, short_filenames: bool = False, ) -> str: """Return a string summarizing the call stack. The string is multi-line, with one line per stack frame. Each line shows the function name, the file name, and the line number: ... start_import_stop : /Users/ned/coverage/trunk/tests/coveragetest.py:95 import_local_file : /Users/ned/coverage/trunk/tests/coveragetest.py:81 import_local_file : /Users/ned/coverage/trunk/coverage/backward.py:159 ... `skip` is the number of closest immediate frames to skip, so that debugging functions can call this and not be included in the result. If `full` is true, then include all frames. Otherwise, initial "boring" frames (ones in site-packages and earlier) are omitted. `short_filenames` will shorten filenames using `short_filename`, to reduce the amount of repetitive noise in stack traces. """ # Regexes in initial frames that we don't care about. BORING_PRELUDE = [ "", # pytest-xdist has string execution. r"\bigor.py$", # Our test runner. r"\bsite-packages\b", # pytest etc getting to our tests. ] stack: Iterable[inspect.FrameInfo] = inspect.stack()[:skip:-1] if not full: for pat in BORING_PRELUDE: stack = itertools.dropwhile( (lambda fi, pat=pat: re.search(pat, fi.filename)), # type: ignore[misc] stack, ) lines = [] for frame_info in stack: line = f"{frame_info.function:>30s} : " if frame_ids: line += f"{id(frame_info.frame):#x} " filename = frame_info.filename if short_filenames: filename = short_filename(filename) line += f"{filename}:{frame_info.lineno}" lines.append(line) return "\n".join(lines) def dump_stack_frames(out: TWritable, skip: int = 0) -> None: """Print a summary of the stack to `out`.""" out.write(short_stack(skip=skip+1) + "\n") def clipped_repr(text: str, numchars: int = 50) -> str: """`repr(text)`, but limited to `numchars`.""" r = reprlib.Repr() r.maxstring = numchars return r.repr(text) def short_id(id64: int) -> int: """Given a 64-bit id, make a shorter 16-bit one.""" id16 = 0 for offset in range(0, 64, 16): id16 ^= id64 >> offset return id16 & 0xFFFF def add_pid_and_tid(text: str) -> str: """A filter to add pid and tid to debug messages.""" # Thread ids are useful, but too long. Make a shorter one. tid = f"{short_id(_thread.get_ident()):04x}" text = f"{os.getpid():5d}.{tid}: {text}" return text AUTO_REPR_IGNORE = {"$coverage.object_id"} def auto_repr(self: Any) -> str: """A function implementing an automatic __repr__ for debugging.""" show_attrs = ( (k, v) for k, v in self.__dict__.items() if getattr(v, "show_repr_attr", True) and not inspect.ismethod(v) and k not in AUTO_REPR_IGNORE ) return "<{klass} @{id:#x}{attrs}>".format( klass=self.__class__.__name__, id=id(self), attrs="".join(f" {k}={v!r}" for k, v in show_attrs), ) def simplify(v: Any) -> Any: # pragma: debugging """Turn things which are nearly dict/list/etc into dict/list/etc.""" if isinstance(v, dict): return {k:simplify(vv) for k, vv in v.items()} elif isinstance(v, (list, tuple)): return type(v)(simplify(vv) for vv in v) elif hasattr(v, "__dict__"): return simplify({"."+k: v for k, v in v.__dict__.items()}) else: return v def pp(v: Any) -> None: # pragma: debugging """Debug helper to pretty-print data, including SimpleNamespace objects.""" # Might not be needed in 3.9+ pprint.pprint(simplify(v)) def filter_text(text: str, filters: Iterable[Callable[[str], str]]) -> str: """Run `text` through a series of filters. `filters` is a list of functions. Each takes a string and returns a string. Each is run in turn. After each filter, the text is split into lines, and each line is passed through the next filter. Returns: the final string that results after all of the filters have run. """ clean_text = text.rstrip() ending = text[len(clean_text):] text = clean_text for filter_fn in filters: lines = [] for line in text.splitlines(): lines.extend(filter_fn(line).splitlines()) text = "\n".join(lines) return text + ending class CwdTracker: """A class to add cwd info to debug messages.""" def __init__(self) -> None: self.cwd: str | None = None def filter(self, text: str) -> str: """Add a cwd message for each new cwd.""" cwd = os.getcwd() if cwd != self.cwd: text = f"cwd is now {cwd!r}\n" + text self.cwd = cwd return text class ProcessTracker: """Track process creation for debug logging.""" def __init__(self) -> None: self.pid: int = os.getpid() self.did_welcome = False def filter(self, text: str) -> str: """Add a message about how new processes came to be.""" welcome = "" pid = os.getpid() if self.pid != pid: welcome = f"New process: forked {self.pid} -> {pid}\n" self.pid = pid elif not self.did_welcome: argv = getattr(sys, "argv", None) welcome = ( f"New process: {pid=}, executable: {sys.executable!r}\n" + f"New process: cmd: {argv!r}\n" ) if hasattr(os, "getppid"): welcome += f"New process parent pid: {os.getppid()!r}\n" if welcome: self.did_welcome = True return welcome + text else: return text class PytestTracker: """Track the current pytest test name to add to debug messages.""" def __init__(self) -> None: self.test_name: str | None = None def filter(self, text: str) -> str: """Add a message when the pytest test changes.""" test_name = os.getenv("PYTEST_CURRENT_TEST") if test_name != self.test_name: text = f"Pytest context: {test_name}\n" + text self.test_name = test_name return text class DebugOutputFile: """A file-like object that includes pid and cwd information.""" def __init__( self, outfile: IO[str] | None, filters: Iterable[Callable[[str], str]], ): self.outfile = outfile self.filters = list(filters) self.pid = os.getpid() @classmethod def get_one( cls, fileobj: IO[str] | None = None, file_name: str | None = None, filters: Iterable[Callable[[str], str]] = (), interim: bool = False, ) -> DebugOutputFile: """Get a DebugOutputFile. If `fileobj` is provided, then a new DebugOutputFile is made with it. If `fileobj` isn't provided, then a file is chosen (`file_name` if provided, or COVERAGE_DEBUG_FILE, or stderr), and a process-wide singleton DebugOutputFile is made. `filters` are the text filters to apply to the stream to annotate with pids, etc. If `interim` is true, then a future `get_one` can replace this one. """ if fileobj is not None: # Make DebugOutputFile around the fileobj passed. return cls(fileobj, filters) the_one, is_interim = cls._get_singleton_data() if the_one is None or is_interim: if file_name is not None: fileobj = open(file_name, "a", encoding="utf-8") else: # $set_env.py: COVERAGE_DEBUG_FILE - Where to write debug output file_name = os.getenv("COVERAGE_DEBUG_FILE", FORCED_DEBUG_FILE) if file_name in ("stdout", "stderr"): fileobj = getattr(sys, file_name) elif file_name: fileobj = open(file_name, "a", encoding="utf-8") atexit.register(fileobj.close) else: fileobj = sys.stderr the_one = cls(fileobj, filters) cls._set_singleton_data(the_one, interim) if not(the_one.filters): the_one.filters = list(filters) return the_one # Because of the way igor.py deletes and re-imports modules, # this class can be defined more than once. But we really want # a process-wide singleton. So stash it in sys.modules instead of # on a class attribute. Yes, this is aggressively gross. SYS_MOD_NAME = "$coverage.debug.DebugOutputFile.the_one" SINGLETON_ATTR = "the_one_and_is_interim" @classmethod def _set_singleton_data(cls, the_one: DebugOutputFile, interim: bool) -> None: """Set the one DebugOutputFile to rule them all.""" singleton_module = types.ModuleType(cls.SYS_MOD_NAME) setattr(singleton_module, cls.SINGLETON_ATTR, (the_one, interim)) sys.modules[cls.SYS_MOD_NAME] = singleton_module @classmethod def _get_singleton_data(cls) -> tuple[DebugOutputFile | None, bool]: """Get the one DebugOutputFile.""" singleton_module = sys.modules.get(cls.SYS_MOD_NAME) return getattr(singleton_module, cls.SINGLETON_ATTR, (None, True)) @classmethod def _del_singleton_data(cls) -> None: """Delete the one DebugOutputFile, just for tests to use.""" if cls.SYS_MOD_NAME in sys.modules: del sys.modules[cls.SYS_MOD_NAME] def write(self, text: str) -> None: """Just like file.write, but filter through all our filters.""" assert self.outfile is not None self.outfile.write(filter_text(text, self.filters)) self.outfile.flush() def flush(self) -> None: """Flush our file.""" assert self.outfile is not None self.outfile.flush() def log(msg: str, stack: bool = False) -> None: # pragma: debugging """Write a log message as forcefully as possible.""" out = DebugOutputFile.get_one(interim=True) out.write(msg+"\n") if stack: dump_stack_frames(out=out, skip=1) def decorate_methods( decorator: Callable[..., Any], butnot: Iterable[str] = (), private: bool = False, ) -> Callable[..., Any]: # pragma: debugging """A class decorator to apply a decorator to methods.""" def _decorator(cls): # type: ignore[no-untyped-def] for name, meth in inspect.getmembers(cls, inspect.isroutine): if name not in cls.__dict__: continue if name != "__init__": if not private and name.startswith("_"): continue if name in butnot: continue setattr(cls, name, decorator(meth)) return cls return _decorator def break_in_pudb(func: AnyCallable) -> AnyCallable: # pragma: debugging """A function decorator to stop in the debugger for each call.""" @functools.wraps(func) def _wrapper(*args: Any, **kwargs: Any) -> Any: import pudb sys.stdout = sys.__stdout__ pudb.set_trace() return func(*args, **kwargs) return _wrapper OBJ_IDS = itertools.count() CALLS = itertools.count() OBJ_ID_ATTR = "$coverage.object_id" def show_calls( show_args: bool = True, show_stack: bool = False, show_return: bool = False, ) -> Callable[..., Any]: # pragma: debugging """A method decorator to debug-log each call to the function.""" def _decorator(func: AnyCallable) -> AnyCallable: @functools.wraps(func) def _wrapper(self: Any, *args: Any, **kwargs: Any) -> Any: oid = getattr(self, OBJ_ID_ATTR, None) if oid is None: oid = f"{os.getpid():08d} {next(OBJ_IDS):04d}" setattr(self, OBJ_ID_ATTR, oid) extra = "" if show_args: eargs = ", ".join(map(repr, args)) ekwargs = ", ".join("{}={!r}".format(*item) for item in kwargs.items()) extra += "(" extra += eargs if eargs and ekwargs: extra += ", " extra += ekwargs extra += ")" if show_stack: extra += " @ " extra += "; ".join(short_stack(short_filenames=True).splitlines()) callid = next(CALLS) msg = f"{oid} {callid:04d} {func.__name__}{extra}\n" DebugOutputFile.get_one(interim=True).write(msg) ret = func(self, *args, **kwargs) if show_return: msg = f"{oid} {callid:04d} {func.__name__} return {ret!r}\n" DebugOutputFile.get_one(interim=True).write(msg) return ret return _wrapper return _decorator def relevant_environment_display(env: Mapping[str, str]) -> list[tuple[str, str]]: """Filter environment variables for a debug display. Select variables to display (with COV or PY in the name, or HOME, TEMP, or TMP), and also cloak sensitive values with asterisks. Arguments: env: a dict of environment variable names and values. Returns: A list of pairs (name, value) to show. """ slugs = {"COV", "PY"} include = {"HOME", "TEMP", "TMP"} cloak = {"API", "TOKEN", "KEY", "SECRET", "PASS", "SIGNATURE"} to_show = [] for name, val in env.items(): keep = False if name in include: keep = True elif any(slug in name for slug in slugs): keep = True if keep: if any(slug in name for slug in cloak): val = re.sub(r"\w", "*", val) to_show.append((name, val)) return human_sorted_items(to_show) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/coverage/disposition.py0000644000175100001770000000354600000000000020650 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt """Simple value objects for tracking what to do with files.""" from __future__ import annotations from typing import TYPE_CHECKING from coverage.types import TFileDisposition if TYPE_CHECKING: from coverage.plugin import FileTracer class FileDisposition: """A simple value type for recording what to do with a file.""" original_filename: str canonical_filename: str source_filename: str | None trace: bool reason: str file_tracer: FileTracer | None has_dynamic_filename: bool def __repr__(self) -> str: return f"" # FileDisposition "methods": FileDisposition is a pure value object, so it can # be implemented in either C or Python. Acting on them is done with these # functions. def disposition_init(cls: type[TFileDisposition], original_filename: str) -> TFileDisposition: """Construct and initialize a new FileDisposition object.""" disp = cls() disp.original_filename = original_filename disp.canonical_filename = original_filename disp.source_filename = None disp.trace = False disp.reason = "" disp.file_tracer = None disp.has_dynamic_filename = False return disp def disposition_debug_msg(disp: TFileDisposition) -> str: """Make a nice debug message of what the FileDisposition is doing.""" if disp.trace: msg = f"Tracing {disp.original_filename!r}" if disp.original_filename != disp.source_filename: msg += f" as {disp.source_filename!r}" if disp.file_tracer: msg += f": will be traced by {disp.file_tracer!r}" else: msg = f"Not tracing {disp.original_filename!r}: {disp.reason}" return msg ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/coverage/env.py0000644000175100001770000001246600000000000017075 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt """Determine facts about the environment.""" from __future__ import annotations import os import platform import sys from typing import Any, Iterable # debug_info() at the bottom wants to show all the globals, but not imports. # Grab the global names here to know which names to not show. Nothing defined # above this line will be in the output. _UNINTERESTING_GLOBALS = list(globals()) # These names also shouldn't be shown. _UNINTERESTING_GLOBALS += ["PYBEHAVIOR", "debug_info"] # Operating systems. WINDOWS = sys.platform == "win32" LINUX = sys.platform.startswith("linux") OSX = sys.platform == "darwin" # Python implementations. CPYTHON = (platform.python_implementation() == "CPython") PYPY = (platform.python_implementation() == "PyPy") # Python versions. We amend version_info with one more value, a zero if an # official version, or 1 if built from source beyond an official version. # Only use sys.version_info directly where tools like mypy need it to understand # version-specfic code, otherwise use PYVERSION. PYVERSION = sys.version_info + (int(platform.python_version()[-1] == "+"),) if PYPY: PYPYVERSION = sys.pypy_version_info # type: ignore[attr-defined] # Python behavior. class PYBEHAVIOR: """Flags indicating this Python's behavior.""" # Does Python conform to PEP626, Precise line numbers for debugging and other tools. # https://www.python.org/dev/peps/pep-0626 pep626 = (PYVERSION > (3, 10, 0, "alpha", 4)) # Is "if __debug__" optimized away? optimize_if_debug = not pep626 # Is "if not __debug__" optimized away? The exact details have changed # across versions. if pep626: optimize_if_not_debug = 1 elif PYPY: if PYVERSION >= (3, 9): optimize_if_not_debug = 2 else: optimize_if_not_debug = 3 else: optimize_if_not_debug = 2 # 3.7 changed how functions with only docstrings are numbered. docstring_only_function = (not PYPY) and (PYVERSION <= (3, 10)) # When a break/continue/return statement in a try block jumps to a finally # block, does the finally jump back to the break/continue/return (pre-3.10) # to do the work? finally_jumps_back = (PYVERSION < (3, 10)) # CPython 3.11 now jumps to the decorator line again while executing # the decorator. trace_decorator_line_again = (CPYTHON and PYVERSION > (3, 11, 0, "alpha", 3, 0)) # CPython 3.9a1 made sys.argv[0] and other reported files absolute paths. report_absolute_files = ( (CPYTHON or (PYPY and PYPYVERSION >= (7, 3, 10))) and PYVERSION >= (3, 9) ) # Lines after break/continue/return/raise are no longer compiled into the # bytecode. They used to be marked as missing, now they aren't executable. omit_after_jump = ( pep626 or (PYPY and PYVERSION >= (3, 9) and PYPYVERSION >= (7, 3, 12)) ) # PyPy has always omitted statements after return. omit_after_return = omit_after_jump or PYPY # Optimize away unreachable try-else clauses. optimize_unreachable_try_else = pep626 # Modules used to have firstlineno equal to the line number of the first # real line of code. Now they always start at 1. module_firstline_1 = pep626 # Are "if 0:" lines (and similar) kept in the compiled code? keep_constant_test = pep626 # When leaving a with-block, do we visit the with-line again for the exit? exit_through_with = (PYVERSION >= (3, 10, 0, "beta")) # Match-case construct. match_case = (PYVERSION >= (3, 10)) # Some words are keywords in some places, identifiers in other places. soft_keywords = (PYVERSION >= (3, 10)) # Modules start with a line numbered zero. This means empty modules have # only a 0-number line, which is ignored, giving a truly empty module. empty_is_empty = (PYVERSION >= (3, 11, 0, "beta", 4)) # Are comprehensions inlined (new) or compiled as called functions (old)? # Changed in https://github.com/python/cpython/pull/101441 comprehensions_are_functions = (PYVERSION <= (3, 12, 0, "alpha", 7, 0)) # PEP669 Low Impact Monitoring: https://peps.python.org/pep-0669/ pep669 = bool(getattr(sys, "monitoring", None)) # Where does frame.f_lasti point when yielding from a generator? # It used to point at the YIELD, now it points at the RESUME. # https://github.com/python/cpython/issues/113728 lasti_is_yield = (PYVERSION < (3, 13)) # Coverage.py specifics, about testing scenarios. See tests/testenv.py also. # Are we coverage-measuring ourselves? METACOV = os.getenv("COVERAGE_COVERAGE") is not None # Are we running our test suite? # Even when running tests, you can use COVERAGE_TESTING=0 to disable the # test-specific behavior like AST checking. TESTING = os.getenv("COVERAGE_TESTING") == "True" def debug_info() -> Iterable[tuple[str, Any]]: """Return a list of (name, value) pairs for printing debug information.""" info = [ (name, value) for name, value in globals().items() if not name.startswith("_") and name not in _UNINTERESTING_GLOBALS ] info += [ (name, value) for name, value in PYBEHAVIOR.__dict__.items() if not name.startswith("_") ] return sorted(info) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/coverage/exceptions.py0000644000175100001770000000256500000000000020465 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt """Exceptions coverage.py can raise.""" from __future__ import annotations class _BaseCoverageException(Exception): """The base-base of all Coverage exceptions.""" pass class CoverageException(_BaseCoverageException): """The base class of all exceptions raised by Coverage.py.""" pass class ConfigError(_BaseCoverageException): """A problem with a config file, or a value in one.""" pass class DataError(CoverageException): """An error in using a data file.""" pass class NoDataError(CoverageException): """We didn't have data to work with.""" pass class NoSource(CoverageException): """We couldn't find the source for a module.""" pass class NoCode(NoSource): """We couldn't find any code at all.""" pass class NotPython(CoverageException): """A source file turned out not to be parsable Python.""" pass class PluginError(CoverageException): """A plugin misbehaved.""" pass class _ExceptionDuringRun(CoverageException): """An exception happened while running customer code. Construct it with three arguments, the values from `sys.exc_info`. """ pass class CoverageWarning(Warning): """A warning from Coverage.py.""" pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/coverage/execfile.py0000644000175100001770000002747200000000000020074 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt """Execute files of Python code.""" from __future__ import annotations import importlib.machinery import importlib.util import inspect import marshal import os import struct import sys from importlib.machinery import ModuleSpec from types import CodeType, ModuleType from typing import Any from coverage import env from coverage.exceptions import CoverageException, _ExceptionDuringRun, NoCode, NoSource from coverage.files import canonical_filename, python_reported_file from coverage.misc import isolate_module from coverage.python import get_python_source os = isolate_module(os) PYC_MAGIC_NUMBER = importlib.util.MAGIC_NUMBER class DummyLoader: """A shim for the pep302 __loader__, emulating pkgutil.ImpLoader. Currently only implements the .fullname attribute """ def __init__(self, fullname: str, *_args: Any) -> None: self.fullname = fullname def find_module( modulename: str, ) -> tuple[str | None, str, ModuleSpec]: """Find the module named `modulename`. Returns the file path of the module, the name of the enclosing package, and the spec. """ try: spec = importlib.util.find_spec(modulename) except ImportError as err: raise NoSource(str(err)) from err if not spec: raise NoSource(f"No module named {modulename!r}") pathname = spec.origin packagename = spec.name if spec.submodule_search_locations: mod_main = modulename + ".__main__" spec = importlib.util.find_spec(mod_main) if not spec: raise NoSource( f"No module named {mod_main}; " + f"{modulename!r} is a package and cannot be directly executed", ) pathname = spec.origin packagename = spec.name packagename = packagename.rpartition(".")[0] return pathname, packagename, spec class PyRunner: """Multi-stage execution of Python code. This is meant to emulate real Python execution as closely as possible. """ def __init__(self, args: list[str], as_module: bool = False) -> None: self.args = args self.as_module = as_module self.arg0 = args[0] self.package: str | None = None self.modulename: str | None = None self.pathname: str | None = None self.loader: DummyLoader | None = None self.spec: ModuleSpec | None = None def prepare(self) -> None: """Set sys.path properly. This needs to happen before any importing, and without importing anything. """ path0: str | None if self.as_module: path0 = os.getcwd() elif os.path.isdir(self.arg0): # Running a directory means running the __main__.py file in that # directory. path0 = self.arg0 else: path0 = os.path.abspath(os.path.dirname(self.arg0)) if os.path.isdir(sys.path[0]): # sys.path fakery. If we are being run as a command, then sys.path[0] # is the directory of the "coverage" script. If this is so, replace # sys.path[0] with the directory of the file we're running, or the # current directory when running modules. If it isn't so, then we # don't know what's going on, and just leave it alone. top_file = inspect.stack()[-1][0].f_code.co_filename sys_path_0_abs = os.path.abspath(sys.path[0]) top_file_dir_abs = os.path.abspath(os.path.dirname(top_file)) sys_path_0_abs = canonical_filename(sys_path_0_abs) top_file_dir_abs = canonical_filename(top_file_dir_abs) if sys_path_0_abs != top_file_dir_abs: path0 = None else: # sys.path[0] is a file. Is the next entry the directory containing # that file? if sys.path[1] == os.path.dirname(sys.path[0]): # Can it be right to always remove that? del sys.path[1] if path0 is not None: sys.path[0] = python_reported_file(path0) def _prepare2(self) -> None: """Do more preparation to run Python code. Includes finding the module to run and adjusting sys.argv[0]. This method is allowed to import code. """ if self.as_module: self.modulename = self.arg0 pathname, self.package, self.spec = find_module(self.modulename) if self.spec is not None: self.modulename = self.spec.name self.loader = DummyLoader(self.modulename) assert pathname is not None self.pathname = os.path.abspath(pathname) self.args[0] = self.arg0 = self.pathname elif os.path.isdir(self.arg0): # Running a directory means running the __main__.py file in that # directory. for ext in [".py", ".pyc", ".pyo"]: try_filename = os.path.join(self.arg0, "__main__" + ext) # 3.8.10 changed how files are reported when running a # directory. But I'm not sure how far this change is going to # spread, so I'll just hard-code it here for now. if env.PYVERSION >= (3, 8, 10): try_filename = os.path.abspath(try_filename) if os.path.exists(try_filename): self.arg0 = try_filename break else: raise NoSource(f"Can't find '__main__' module in '{self.arg0}'") # Make a spec. I don't know if this is the right way to do it. try_filename = python_reported_file(try_filename) self.spec = importlib.machinery.ModuleSpec("__main__", None, origin=try_filename) self.spec.has_location = True self.package = "" self.loader = DummyLoader("__main__") else: self.loader = DummyLoader("__main__") self.arg0 = python_reported_file(self.arg0) def run(self) -> None: """Run the Python code!""" self._prepare2() # Create a module to serve as __main__ main_mod = ModuleType("__main__") from_pyc = self.arg0.endswith((".pyc", ".pyo")) main_mod.__file__ = self.arg0 if from_pyc: main_mod.__file__ = main_mod.__file__[:-1] if self.package is not None: main_mod.__package__ = self.package main_mod.__loader__ = self.loader # type: ignore[assignment] if self.spec is not None: main_mod.__spec__ = self.spec main_mod.__builtins__ = sys.modules["builtins"] # type: ignore[attr-defined] sys.modules["__main__"] = main_mod # Set sys.argv properly. sys.argv = self.args try: # Make a code object somehow. if from_pyc: code = make_code_from_pyc(self.arg0) else: code = make_code_from_py(self.arg0) except CoverageException: raise except Exception as exc: msg = f"Couldn't run '{self.arg0}' as Python code: {exc.__class__.__name__}: {exc}" raise CoverageException(msg) from exc # Execute the code object. # Return to the original directory in case the test code exits in # a non-existent directory. cwd = os.getcwd() try: exec(code, main_mod.__dict__) except SystemExit: # pylint: disable=try-except-raise # The user called sys.exit(). Just pass it along to the upper # layers, where it will be handled. raise except Exception: # Something went wrong while executing the user code. # Get the exc_info, and pack them into an exception that we can # throw up to the outer loop. We peel one layer off the traceback # so that the coverage.py code doesn't appear in the final printed # traceback. typ, err, tb = sys.exc_info() assert typ is not None assert err is not None assert tb is not None # PyPy3 weirdness. If I don't access __context__, then somehow it # is non-None when the exception is reported at the upper layer, # and a nested exception is shown to the user. This getattr fixes # it somehow? https://bitbucket.org/pypy/pypy/issue/1903 getattr(err, "__context__", None) # Call the excepthook. try: assert err.__traceback__ is not None err.__traceback__ = err.__traceback__.tb_next sys.excepthook(typ, err, tb.tb_next) except SystemExit: # pylint: disable=try-except-raise raise except Exception as exc: # Getting the output right in the case of excepthook # shenanigans is kind of involved. sys.stderr.write("Error in sys.excepthook:\n") typ2, err2, tb2 = sys.exc_info() assert typ2 is not None assert err2 is not None assert tb2 is not None err2.__suppress_context__ = True assert err2.__traceback__ is not None err2.__traceback__ = err2.__traceback__.tb_next sys.__excepthook__(typ2, err2, tb2.tb_next) sys.stderr.write("\nOriginal exception was:\n") raise _ExceptionDuringRun(typ, err, tb.tb_next) from exc else: sys.exit(1) finally: os.chdir(cwd) def run_python_module(args: list[str]) -> None: """Run a Python module, as though with ``python -m name args...``. `args` is the argument array to present as sys.argv, including the first element naming the module being executed. This is a helper for tests, to encapsulate how to use PyRunner. """ runner = PyRunner(args, as_module=True) runner.prepare() runner.run() def run_python_file(args: list[str]) -> None: """Run a Python file as if it were the main program on the command line. `args` is the argument array to present as sys.argv, including the first element naming the file being executed. `package` is the name of the enclosing package, if any. This is a helper for tests, to encapsulate how to use PyRunner. """ runner = PyRunner(args, as_module=False) runner.prepare() runner.run() def make_code_from_py(filename: str) -> CodeType: """Get source from `filename` and make a code object of it.""" # Open the source file. try: source = get_python_source(filename) except (OSError, NoSource) as exc: raise NoSource(f"No file to run: '{filename}'") from exc return compile(source, filename, "exec", dont_inherit=True) def make_code_from_pyc(filename: str) -> CodeType: """Get a code object from a .pyc file.""" try: fpyc = open(filename, "rb") except OSError as exc: raise NoCode(f"No file to run: '{filename}'") from exc with fpyc: # First four bytes are a version-specific magic number. It has to # match or we won't run the file. magic = fpyc.read(4) if magic != PYC_MAGIC_NUMBER: raise NoCode(f"Bad magic number in .pyc file: {magic!r} != {PYC_MAGIC_NUMBER!r}") flags = struct.unpack(" None: """Set the directory that `relative_filename` will be relative to.""" global RELATIVE_DIR, CANONICAL_FILENAME_CACHE # The current directory abs_curdir = abs_file(os.curdir) if not abs_curdir.endswith(os.sep): # Suffix with separator only if not at the system root abs_curdir = abs_curdir + os.sep # The absolute path to our current directory. RELATIVE_DIR = os.path.normcase(abs_curdir) # Cache of results of calling the canonical_filename() method, to # avoid duplicating work. CANONICAL_FILENAME_CACHE = {} def relative_directory() -> str: """Return the directory that `relative_filename` is relative to.""" return RELATIVE_DIR def relative_filename(filename: str) -> str: """Return the relative form of `filename`. The file name will be relative to the current directory when the `set_relative_directory` was called. """ fnorm = os.path.normcase(filename) if fnorm.startswith(RELATIVE_DIR): filename = filename[len(RELATIVE_DIR):] return filename def canonical_filename(filename: str) -> str: """Return a canonical file name for `filename`. An absolute path with no redundant components and normalized case. """ if filename not in CANONICAL_FILENAME_CACHE: cf = filename if not os.path.isabs(filename): for path in [os.curdir] + sys.path: if path is None: continue # type: ignore[unreachable] f = os.path.join(path, filename) try: exists = os.path.exists(f) except UnicodeError: exists = False if exists: cf = f break cf = abs_file(cf) CANONICAL_FILENAME_CACHE[filename] = cf return CANONICAL_FILENAME_CACHE[filename] MAX_FLAT = 100 def flat_rootname(filename: str) -> str: """A base for a flat file name to correspond to this file. Useful for writing files about the code where you want all the files in the same directory, but need to differentiate same-named files from different directories. For example, the file a/b/c.py will return 'd_86bbcbe134d28fd2_c_py' """ dirname, basename = ntpath.split(filename) if dirname: fp = hashlib.new("sha3_256", dirname.encode("UTF-8")).hexdigest()[:16] prefix = f"d_{fp}_" else: prefix = "" return prefix + basename.replace(".", "_") if env.WINDOWS: _ACTUAL_PATH_CACHE: dict[str, str] = {} _ACTUAL_PATH_LIST_CACHE: dict[str, list[str]] = {} def actual_path(path: str) -> str: """Get the actual path of `path`, including the correct case.""" if path in _ACTUAL_PATH_CACHE: return _ACTUAL_PATH_CACHE[path] head, tail = os.path.split(path) if not tail: # This means head is the drive spec: normalize it. actpath = head.upper() elif not head: actpath = tail else: head = actual_path(head) if head in _ACTUAL_PATH_LIST_CACHE: files = _ACTUAL_PATH_LIST_CACHE[head] else: try: files = os.listdir(head) except Exception: # This will raise OSError, or this bizarre TypeError: # https://bugs.python.org/issue1776160 files = [] _ACTUAL_PATH_LIST_CACHE[head] = files normtail = os.path.normcase(tail) for f in files: if os.path.normcase(f) == normtail: tail = f break actpath = os.path.join(head, tail) _ACTUAL_PATH_CACHE[path] = actpath return actpath else: def actual_path(path: str) -> str: """The actual path for non-Windows platforms.""" return path def abs_file(path: str) -> str: """Return the absolute normalized form of `path`.""" return actual_path(os.path.abspath(os.path.realpath(path))) def zip_location(filename: str) -> tuple[str, str] | None: """Split a filename into a zipfile / inner name pair. Only return a pair if the zipfile exists. No check is made if the inner name is in the zipfile. """ for ext in [".zip", ".whl", ".egg", ".pex"]: zipbase, extension, inner = filename.partition(ext + sep(filename)) if extension: zipfile = zipbase + ext if os.path.exists(zipfile): return zipfile, inner return None def source_exists(path: str) -> bool: """Determine if a source file path exists.""" if os.path.exists(path): return True if zip_location(path): # If zip_location returns anything, then it's a zipfile that # exists. That's good enough for us. return True return False def python_reported_file(filename: str) -> str: """Return the string as Python would describe this file name.""" if env.PYBEHAVIOR.report_absolute_files: filename = os.path.abspath(filename) return filename def isabs_anywhere(filename: str) -> bool: """Is `filename` an absolute path on any OS?""" return ntpath.isabs(filename) or posixpath.isabs(filename) def prep_patterns(patterns: Iterable[str]) -> list[str]: """Prepare the file patterns for use in a `GlobMatcher`. If a pattern starts with a wildcard, it is used as a pattern as-is. If it does not start with a wildcard, then it is made absolute with the current directory. If `patterns` is None, an empty list is returned. """ prepped = [] for p in patterns or []: prepped.append(p) if not p.startswith(("*", "?")): prepped.append(abs_file(p)) return prepped class TreeMatcher: """A matcher for files in a tree. Construct with a list of paths, either files or directories. Paths match with the `match` method if they are one of the files, or if they are somewhere in a subtree rooted at one of the directories. """ def __init__(self, paths: Iterable[str], name: str = "unknown") -> None: self.original_paths: list[str] = human_sorted(paths) #self.paths = list(map(os.path.normcase, paths)) self.paths = [os.path.normcase(p) for p in paths] self.name = name def __repr__(self) -> str: return f"" def info(self) -> list[str]: """A list of strings for displaying when dumping state.""" return self.original_paths def match(self, fpath: str) -> bool: """Does `fpath` indicate a file in one of our trees?""" fpath = os.path.normcase(fpath) for p in self.paths: if fpath.startswith(p): if fpath == p: # This is the same file! return True if fpath[len(p)] == os.sep: # This is a file in the directory return True return False class ModuleMatcher: """A matcher for modules in a tree.""" def __init__(self, module_names: Iterable[str], name:str = "unknown") -> None: self.modules = list(module_names) self.name = name def __repr__(self) -> str: return f"" def info(self) -> list[str]: """A list of strings for displaying when dumping state.""" return self.modules def match(self, module_name: str) -> bool: """Does `module_name` indicate a module in one of our packages?""" if not module_name: return False for m in self.modules: if module_name.startswith(m): if module_name == m: return True if module_name[len(m)] == ".": # This is a module in the package return True return False class GlobMatcher: """A matcher for files by file name pattern.""" def __init__(self, pats: Iterable[str], name: str = "unknown") -> None: self.pats = list(pats) self.re = globs_to_regex(self.pats, case_insensitive=env.WINDOWS) self.name = name def __repr__(self) -> str: return f"" def info(self) -> list[str]: """A list of strings for displaying when dumping state.""" return self.pats def match(self, fpath: str) -> bool: """Does `fpath` match one of our file name patterns?""" return self.re.match(fpath) is not None def sep(s: str) -> str: """Find the path separator used in this string, or os.sep if none.""" if sep_match := re.search(r"[\\/]", s): the_sep = sep_match[0] else: the_sep = os.sep return the_sep # Tokenizer for _glob_to_regex. # None as a sub means disallowed. G2RX_TOKENS = [(re.compile(rx), sub) for rx, sub in [ (r"\*\*\*+", None), # Can't have *** (r"[^/]+\*\*+", None), # Can't have x** (r"\*\*+[^/]+", None), # Can't have **x (r"\*\*/\*\*", None), # Can't have **/** (r"^\*+/", r"(.*[/\\\\])?"), # ^*/ matches any prefix-slash, or nothing. (r"/\*+$", r"[/\\\\].*"), # /*$ matches any slash-suffix. (r"\*\*/", r"(.*[/\\\\])?"), # **/ matches any subdirs, including none (r"/", r"[/\\\\]"), # / matches either slash or backslash (r"\*", r"[^/\\\\]*"), # * matches any number of non slash-likes (r"\?", r"[^/\\\\]"), # ? matches one non slash-like (r"\[.*?\]", r"\g<0>"), # [a-f] matches [a-f] (r"[a-zA-Z0-9_-]+", r"\g<0>"), # word chars match themselves (r"[\[\]]", None), # Can't have single square brackets (r".", r"\\\g<0>"), # Anything else is escaped to be safe ]] def _glob_to_regex(pattern: str) -> str: """Convert a file-path glob pattern into a regex.""" # Turn all backslashes into slashes to simplify the tokenizer. pattern = pattern.replace("\\", "/") if "/" not in pattern: pattern = "**/" + pattern path_rx = [] pos = 0 while pos < len(pattern): for rx, sub in G2RX_TOKENS: # pragma: always breaks if m := rx.match(pattern, pos=pos): if sub is None: raise ConfigError(f"File pattern can't include {m[0]!r}") path_rx.append(m.expand(sub)) pos = m.end() break return "".join(path_rx) def globs_to_regex( patterns: Iterable[str], case_insensitive: bool = False, partial: bool = False, ) -> re.Pattern[str]: """Convert glob patterns to a compiled regex that matches any of them. Slashes are always converted to match either slash or backslash, for Windows support, even when running elsewhere. If the pattern has no slash or backslash, then it is interpreted as matching a file name anywhere it appears in the tree. Otherwise, the glob pattern must match the whole file path. If `partial` is true, then the pattern will match if the target string starts with the pattern. Otherwise, it must match the entire string. Returns: a compiled regex object. Use the .match method to compare target strings. """ flags = 0 if case_insensitive: flags |= re.IGNORECASE rx = join_regex(map(_glob_to_regex, patterns)) if not partial: rx = fr"(?:{rx})\Z" compiled = re.compile(rx, flags=flags) return compiled class PathAliases: """A collection of aliases for paths. When combining data files from remote machines, often the paths to source code are different, for example, due to OS differences, or because of serialized checkouts on continuous integration machines. A `PathAliases` object tracks a list of pattern/result pairs, and can map a path through those aliases to produce a unified path. """ def __init__( self, debugfn: Callable[[str], None] | None = None, relative: bool = False, ) -> None: # A list of (original_pattern, regex, result) self.aliases: list[tuple[str, re.Pattern[str], str]] = [] self.debugfn = debugfn or (lambda msg: 0) self.relative = relative self.pprinted = False def pprint(self) -> None: """Dump the important parts of the PathAliases, for debugging.""" self.debugfn(f"Aliases (relative={self.relative}):") for original_pattern, regex, result in self.aliases: self.debugfn(f" Rule: {original_pattern!r} -> {result!r} using regex {regex.pattern!r}") def add(self, pattern: str, result: str) -> None: """Add the `pattern`/`result` pair to the list of aliases. `pattern` is an `glob`-style pattern. `result` is a simple string. When mapping paths, if a path starts with a match against `pattern`, then that match is replaced with `result`. This models isomorphic source trees being rooted at different places on two different machines. `pattern` can't end with a wildcard component, since that would match an entire tree, and not just its root. """ original_pattern = pattern pattern_sep = sep(pattern) if len(pattern) > 1: pattern = pattern.rstrip(r"\/") # The pattern can't end with a wildcard component. if pattern.endswith("*"): raise ConfigError("Pattern must not end with wildcards.") # The pattern is meant to match a file path. Let's make it absolute # unless it already is, or is meant to match any prefix. if not self.relative: if not pattern.startswith("*") and not isabs_anywhere(pattern + pattern_sep): pattern = abs_file(pattern) if not pattern.endswith(pattern_sep): pattern += pattern_sep # Make a regex from the pattern. regex = globs_to_regex([pattern], case_insensitive=True, partial=True) # Normalize the result: it must end with a path separator. result_sep = sep(result) result = result.rstrip(r"\/") + result_sep self.aliases.append((original_pattern, regex, result)) def map(self, path: str, exists:Callable[[str], bool] = source_exists) -> str: """Map `path` through the aliases. `path` is checked against all of the patterns. The first pattern to match is used to replace the root of the path with the result root. Only one pattern is ever used. If no patterns match, `path` is returned unchanged. The separator style in the result is made to match that of the result in the alias. `exists` is a function to determine if the resulting path actually exists. Returns the mapped path. If a mapping has happened, this is a canonical path. If no mapping has happened, it is the original value of `path` unchanged. """ if not self.pprinted: self.pprint() self.pprinted = True for original_pattern, regex, result in self.aliases: if m := regex.match(path): new = path.replace(m[0], result) new = new.replace(sep(path), sep(result)) if not self.relative: new = canonical_filename(new) dot_start = result.startswith(("./", ".\\")) and len(result) > 2 if new.startswith(("./", ".\\")) and not dot_start: new = new[2:] if not exists(new): self.debugfn( f"Rule {original_pattern!r} changed {path!r} to {new!r} " + "which doesn't exist, continuing", ) continue self.debugfn( f"Matched path {path!r} to rule {original_pattern!r} -> {result!r}, " + f"producing {new!r}", ) return new # If we get here, no pattern matched. if self.relative: path = relative_filename(path) if self.relative and not isabs_anywhere(path): # Auto-generate a pattern to implicitly match relative files parts = re.split(r"[/\\]", path) if len(parts) > 1: dir1 = parts[0] pattern = f"*/{dir1}" regex_pat = fr"^(.*[\\/])?{re.escape(dir1)}[\\/]" result = f"{dir1}{os.sep}" # Only add a new pattern if we don't already have this pattern. if not any(p == pattern for p, _, _ in self.aliases): self.debugfn( f"Generating rule: {pattern!r} -> {result!r} using regex {regex_pat!r}", ) self.aliases.append((pattern, re.compile(regex_pat), result)) return self.map(path, exists=exists) self.debugfn(f"No rules match, path {path!r} is unchanged") return path def find_python_files(dirname: str, include_namespace_packages: bool) -> Iterable[str]: """Yield all of the importable Python files in `dirname`, recursively. To be importable, the files have to be in a directory with a __init__.py, except for `dirname` itself, which isn't required to have one. The assumption is that `dirname` was specified directly, so the user knows best, but sub-directories are checked for a __init__.py to be sure we only find the importable files. If `include_namespace_packages` is True, then the check for __init__.py files is skipped. Files with strange characters are skipped, since they couldn't have been imported, and are probably editor side-files. """ for i, (dirpath, dirnames, filenames) in enumerate(os.walk(dirname)): if not include_namespace_packages: if i > 0 and "__init__.py" not in filenames: # If a directory doesn't have __init__.py, then it isn't # importable and neither are its files del dirnames[:] continue for filename in filenames: # We're only interested in files that look like reasonable Python # files: Must end with .py or .pyw, and must not have certain funny # characters that probably mean they are editor junk. if re.match(r"^[^.#~!$@%^&*()+=,]+\.pyw?$", filename): yield os.path.join(dirpath, filename) # Globally set the relative directory. set_relative_directory() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/coverage/html.py0000644000175100001770000005552500000000000017254 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt """HTML reporting for coverage.py.""" from __future__ import annotations import collections import datetime import functools import json import os import re import shutil import string from dataclasses import dataclass from typing import Any, Iterable, TYPE_CHECKING, cast import coverage from coverage.data import CoverageData, add_data_to_hash from coverage.exceptions import NoDataError from coverage.files import flat_rootname from coverage.misc import ensure_dir, file_be_gone, Hasher, isolate_module, format_local_datetime from coverage.misc import human_sorted, plural, stdout_link from coverage.report_core import get_analysis_to_report from coverage.results import Analysis, Numbers from coverage.templite import Templite from coverage.types import TLineNo, TMorf from coverage.version import __url__ if TYPE_CHECKING: # To avoid circular imports: from coverage import Coverage from coverage.plugins import FileReporter # To be able to use 3.8 typing features, and still run on 3.7: from typing import TypedDict class IndexInfoDict(TypedDict): """Information for each file, to render the index file.""" nums: Numbers html_filename: str relative_filename: str class FileInfoDict(TypedDict): """Summary of the information from last rendering, to avoid duplicate work.""" hash: str index: IndexInfoDict os = isolate_module(os) def data_filename(fname: str) -> str: """Return the path to an "htmlfiles" data file of ours. """ static_dir = os.path.join(os.path.dirname(__file__), "htmlfiles") static_filename = os.path.join(static_dir, fname) return static_filename def read_data(fname: str) -> str: """Return the contents of a data file of ours.""" with open(data_filename(fname)) as data_file: return data_file.read() def write_html(fname: str, html: str) -> None: """Write `html` to `fname`, properly encoded.""" html = re.sub(r"(\A\s+)|(\s+$)", "", html, flags=re.MULTILINE) + "\n" with open(fname, "wb") as fout: fout.write(html.encode("ascii", "xmlcharrefreplace")) @dataclass class LineData: """The data for each source line of HTML output.""" tokens: list[tuple[str, str]] number: TLineNo category: str statement: bool contexts: list[str] contexts_label: str context_list: list[str] short_annotations: list[str] long_annotations: list[str] html: str = "" context_str: str | None = None annotate: str | None = None annotate_long: str | None = None css_class: str = "" @dataclass class FileData: """The data for each source file of HTML output.""" relative_filename: str nums: Numbers lines: list[LineData] class HtmlDataGeneration: """Generate structured data to be turned into HTML reports.""" EMPTY = "(empty)" def __init__(self, cov: Coverage) -> None: self.coverage = cov self.config = self.coverage.config data = self.coverage.get_data() self.has_arcs = data.has_arcs() if self.config.show_contexts: if data.measured_contexts() == {""}: self.coverage._warn("No contexts were measured") data.set_query_contexts(self.config.report_contexts) def data_for_file(self, fr: FileReporter, analysis: Analysis) -> FileData: """Produce the data needed for one file's report.""" if self.has_arcs: missing_branch_arcs = analysis.missing_branch_arcs() arcs_executed = analysis.arcs_executed() if self.config.show_contexts: contexts_by_lineno = analysis.data.contexts_by_lineno(analysis.filename) lines = [] for lineno, tokens in enumerate(fr.source_token_lines(), start=1): # Figure out how to mark this line. category = "" short_annotations = [] long_annotations = [] if lineno in analysis.excluded: category = "exc" elif lineno in analysis.missing: category = "mis" elif self.has_arcs and lineno in missing_branch_arcs: category = "par" for b in missing_branch_arcs[lineno]: if b < 0: short_annotations.append("exit") else: short_annotations.append(str(b)) long_annotations.append(fr.missing_arc_description(lineno, b, arcs_executed)) elif lineno in analysis.statements: category = "run" contexts = [] contexts_label = "" context_list = [] if category and self.config.show_contexts: contexts = human_sorted(c or self.EMPTY for c in contexts_by_lineno.get(lineno, ())) if contexts == [self.EMPTY]: contexts_label = self.EMPTY else: contexts_label = f"{len(contexts)} ctx" context_list = contexts lines.append(LineData( tokens=tokens, number=lineno, category=category, statement=(lineno in analysis.statements), contexts=contexts, contexts_label=contexts_label, context_list=context_list, short_annotations=short_annotations, long_annotations=long_annotations, )) file_data = FileData( relative_filename=fr.relative_filename(), nums=analysis.numbers, lines=lines, ) return file_data class FileToReport: """A file we're considering reporting.""" def __init__(self, fr: FileReporter, analysis: Analysis) -> None: self.fr = fr self.analysis = analysis self.rootname = flat_rootname(fr.relative_filename()) self.html_filename = self.rootname + ".html" HTML_SAFE = string.ascii_letters + string.digits + "!#$%'()*+,-./:;=?@[]^_`{|}~" @functools.lru_cache(maxsize=None) def encode_int(n: int) -> str: """Create a short HTML-safe string from an integer, using HTML_SAFE.""" if n == 0: return HTML_SAFE[0] r = [] while n: n, t = divmod(n, len(HTML_SAFE)) r.append(HTML_SAFE[t]) return "".join(r) class HtmlReporter: """HTML reporting.""" # These files will be copied from the htmlfiles directory to the output # directory. STATIC_FILES = [ "style.css", "coverage_html.js", "keybd_closed.png", "keybd_open.png", "favicon_32.png", ] def __init__(self, cov: Coverage) -> None: self.coverage = cov self.config = self.coverage.config self.directory = self.config.html_dir self.skip_covered = self.config.html_skip_covered if self.skip_covered is None: self.skip_covered = self.config.skip_covered self.skip_empty = self.config.html_skip_empty if self.skip_empty is None: self.skip_empty = self.config.skip_empty self.skipped_covered_count = 0 self.skipped_empty_count = 0 title = self.config.html_title self.extra_css: str | None if self.config.extra_css: self.extra_css = os.path.basename(self.config.extra_css) else: self.extra_css = None self.data = self.coverage.get_data() self.has_arcs = self.data.has_arcs() self.file_summaries: list[IndexInfoDict] = [] self.all_files_nums: list[Numbers] = [] self.incr = IncrementalChecker(self.directory) self.datagen = HtmlDataGeneration(self.coverage) self.totals = Numbers(precision=self.config.precision) self.directory_was_empty = False self.first_fr = None self.final_fr = None self.template_globals = { # Functions available in the templates. "escape": escape, "pair": pair, "len": len, # Constants for this report. "__url__": __url__, "__version__": coverage.__version__, "title": title, "time_stamp": format_local_datetime(datetime.datetime.now()), "extra_css": self.extra_css, "has_arcs": self.has_arcs, "show_contexts": self.config.show_contexts, # Constants for all reports. # These css classes determine which lines are highlighted by default. "category": { "exc": "exc show_exc", "mis": "mis show_mis", "par": "par run show_par", "run": "run", }, } self.pyfile_html_source = read_data("pyfile.html") self.source_tmpl = Templite(self.pyfile_html_source, self.template_globals) def report(self, morfs: Iterable[TMorf] | None) -> float: """Generate an HTML report for `morfs`. `morfs` is a list of modules or file names. """ # Read the status data and check that this run used the same # global data as the last run. self.incr.read() self.incr.check_global_data(self.config, self.pyfile_html_source) # Process all the files. For each page we need to supply a link # to the next and previous page. files_to_report = [] for fr, analysis in get_analysis_to_report(self.coverage, morfs): ftr = FileToReport(fr, analysis) should = self.should_report_file(ftr) if should: files_to_report.append(ftr) else: file_be_gone(os.path.join(self.directory, ftr.html_filename)) for i, ftr in enumerate(files_to_report): if i == 0: prev_html = "index.html" else: prev_html = files_to_report[i - 1].html_filename if i == len(files_to_report) - 1: next_html = "index.html" else: next_html = files_to_report[i + 1].html_filename self.write_html_file(ftr, prev_html, next_html) if not self.all_files_nums: raise NoDataError("No data to report.") self.totals = cast(Numbers, sum(self.all_files_nums)) # Write the index file. if files_to_report: first_html = files_to_report[0].html_filename final_html = files_to_report[-1].html_filename else: first_html = final_html = "index.html" self.index_file(first_html, final_html) self.make_local_static_report_files() return self.totals.n_statements and self.totals.pc_covered def make_directory(self) -> None: """Make sure our htmlcov directory exists.""" ensure_dir(self.directory) if not os.listdir(self.directory): self.directory_was_empty = True def make_local_static_report_files(self) -> None: """Make local instances of static files for HTML report.""" # The files we provide must always be copied. for static in self.STATIC_FILES: shutil.copyfile(data_filename(static), os.path.join(self.directory, static)) # Only write the .gitignore file if the directory was originally empty. # .gitignore can't be copied from the source tree because it would # prevent the static files from being checked in. if self.directory_was_empty: with open(os.path.join(self.directory, ".gitignore"), "w") as fgi: fgi.write("# Created by coverage.py\n*\n") # The user may have extra CSS they want copied. if self.extra_css: assert self.config.extra_css is not None shutil.copyfile(self.config.extra_css, os.path.join(self.directory, self.extra_css)) def should_report_file(self, ftr: FileToReport) -> bool: """Determine if we'll report this file.""" # Get the numbers for this file. nums = ftr.analysis.numbers self.all_files_nums.append(nums) if self.skip_covered: # Don't report on 100% files. no_missing_lines = (nums.n_missing == 0) no_missing_branches = (nums.n_partial_branches == 0) if no_missing_lines and no_missing_branches: # If there's an existing file, remove it. self.skipped_covered_count += 1 return False if self.skip_empty: # Don't report on empty files. if nums.n_statements == 0: self.skipped_empty_count += 1 return False return True def write_html_file(self, ftr: FileToReport, prev_html: str, next_html: str) -> None: """Generate an HTML file for one source file.""" self.make_directory() # Find out if the file on disk is already correct. if self.incr.can_skip_file(self.data, ftr.fr, ftr.rootname): self.file_summaries.append(self.incr.index_info(ftr.rootname)) return # Write the HTML page for this file. file_data = self.datagen.data_for_file(ftr.fr, ftr.analysis) contexts = collections.Counter(c for cline in file_data.lines for c in cline.contexts) context_codes = {y: i for (i, y) in enumerate(x[0] for x in contexts.most_common())} if context_codes: contexts_json = json.dumps( {encode_int(v): k for (k, v) in context_codes.items()}, indent=2, ) else: contexts_json = None for ldata in file_data.lines: # Build the HTML for the line. html_parts = [] for tok_type, tok_text in ldata.tokens: if tok_type == "ws": html_parts.append(escape(tok_text)) else: tok_html = escape(tok_text) or " " html_parts.append(f'{tok_html}') ldata.html = "".join(html_parts) if ldata.context_list: encoded_contexts = [ encode_int(context_codes[c_context]) for c_context in ldata.context_list ] code_width = max(len(ec) for ec in encoded_contexts) ldata.context_str = ( str(code_width) + "".join(ec.ljust(code_width) for ec in encoded_contexts) ) else: ldata.context_str = "" if ldata.short_annotations: # 202F is NARROW NO-BREAK SPACE. # 219B is RIGHTWARDS ARROW WITH STROKE. ldata.annotate = ",   ".join( f"{ldata.number} ↛ {d}" for d in ldata.short_annotations ) else: ldata.annotate = None if ldata.long_annotations: longs = ldata.long_annotations if len(longs) == 1: ldata.annotate_long = longs[0] else: ldata.annotate_long = "{:d} missed branches: {}".format( len(longs), ", ".join( f"{num:d}) {ann_long}" for num, ann_long in enumerate(longs, start=1) ), ) else: ldata.annotate_long = None css_classes = [] if ldata.category: css_classes.append( self.template_globals["category"][ldata.category], # type: ignore[index] ) ldata.css_class = " ".join(css_classes) or "pln" html_path = os.path.join(self.directory, ftr.html_filename) html = self.source_tmpl.render({ **file_data.__dict__, "contexts_json": contexts_json, "prev_html": prev_html, "next_html": next_html, }) write_html(html_path, html) # Save this file's information for the index file. index_info: IndexInfoDict = { "nums": ftr.analysis.numbers, "html_filename": ftr.html_filename, "relative_filename": ftr.fr.relative_filename(), } self.file_summaries.append(index_info) self.incr.set_index_info(ftr.rootname, index_info) def index_file(self, first_html: str, final_html: str) -> None: """Write the index.html file for this report.""" self.make_directory() index_tmpl = Templite(read_data("index.html"), self.template_globals) skipped_covered_msg = skipped_empty_msg = "" if self.skipped_covered_count: n = self.skipped_covered_count skipped_covered_msg = f"{n} file{plural(n)} skipped due to complete coverage." if self.skipped_empty_count: n = self.skipped_empty_count skipped_empty_msg = f"{n} empty file{plural(n)} skipped." html = index_tmpl.render({ "files": self.file_summaries, "totals": self.totals, "skipped_covered_msg": skipped_covered_msg, "skipped_empty_msg": skipped_empty_msg, "first_html": first_html, "final_html": final_html, }) index_file = os.path.join(self.directory, "index.html") write_html(index_file, html) print_href = stdout_link(index_file, f"file://{os.path.abspath(index_file)}") self.coverage._message(f"Wrote HTML report to {print_href}") # Write the latest hashes for next time. self.incr.write() class IncrementalChecker: """Logic and data to support incremental reporting.""" STATUS_FILE = "status.json" STATUS_FORMAT = 2 NOTE = ( "This file is an internal implementation detail to speed up HTML report" + " generation. Its format can change at any time. You might be looking" + " for the JSON report: https://coverage.rtfd.io/cmd.html#cmd-json" ) # The data looks like: # # { # "format": 2, # "globals": "540ee119c15d52a68a53fe6f0897346d", # "version": "4.0a1", # "files": { # "cogapp___init__": { # "hash": "e45581a5b48f879f301c0f30bf77a50c", # "index": { # "html_filename": "cogapp___init__.html", # "relative_filename": "cogapp/__init__", # "nums": [ 1, 14, 0, 0, 0, 0, 0 ] # } # }, # ... # "cogapp_whiteutils": { # "hash": "8504bb427fc488c4176809ded0277d51", # "index": { # "html_filename": "cogapp_whiteutils.html", # "relative_filename": "cogapp/whiteutils", # "nums": [ 1, 59, 0, 1, 28, 2, 2 ] # } # } # } # } def __init__(self, directory: str) -> None: self.directory = directory self.reset() def reset(self) -> None: """Initialize to empty. Causes all files to be reported.""" self.globals = "" self.files: dict[str, FileInfoDict] = {} def read(self) -> None: """Read the information we stored last time.""" usable = False try: status_file = os.path.join(self.directory, self.STATUS_FILE) with open(status_file) as fstatus: status = json.load(fstatus) except (OSError, ValueError): usable = False else: usable = True if status["format"] != self.STATUS_FORMAT: usable = False elif status["version"] != coverage.__version__: usable = False if usable: self.files = {} for filename, fileinfo in status["files"].items(): fileinfo["index"]["nums"] = Numbers(*fileinfo["index"]["nums"]) self.files[filename] = fileinfo self.globals = status["globals"] else: self.reset() def write(self) -> None: """Write the current status.""" status_file = os.path.join(self.directory, self.STATUS_FILE) files = {} for filename, fileinfo in self.files.items(): index = fileinfo["index"] index["nums"] = index["nums"].init_args() # type: ignore[typeddict-item] files[filename] = fileinfo status = { "note": self.NOTE, "format": self.STATUS_FORMAT, "version": coverage.__version__, "globals": self.globals, "files": files, } with open(status_file, "w") as fout: json.dump(status, fout, separators=(",", ":")) def check_global_data(self, *data: Any) -> None: """Check the global data that can affect incremental reporting.""" m = Hasher() for d in data: m.update(d) these_globals = m.hexdigest() if self.globals != these_globals: self.reset() self.globals = these_globals def can_skip_file(self, data: CoverageData, fr: FileReporter, rootname: str) -> bool: """Can we skip reporting this file? `data` is a CoverageData object, `fr` is a `FileReporter`, and `rootname` is the name being used for the file. """ m = Hasher() m.update(fr.source().encode("utf-8")) add_data_to_hash(data, fr.filename, m) this_hash = m.hexdigest() that_hash = self.file_hash(rootname) if this_hash == that_hash: # Nothing has changed to require the file to be reported again. return True else: self.set_file_hash(rootname, this_hash) return False def file_hash(self, fname: str) -> str: """Get the hash of `fname`'s contents.""" return self.files.get(fname, {}).get("hash", "") # type: ignore[call-overload] def set_file_hash(self, fname: str, val: str) -> None: """Set the hash of `fname`'s contents.""" self.files.setdefault(fname, {})["hash"] = val # type: ignore[typeddict-item] def index_info(self, fname: str) -> IndexInfoDict: """Get the information for index.html for `fname`.""" return self.files.get(fname, {}).get("index", {}) # type: ignore def set_index_info(self, fname: str, info: IndexInfoDict) -> None: """Set the information for index.html for `fname`.""" self.files.setdefault(fname, {})["index"] = info # type: ignore[typeddict-item] # Helpers for templates and generating HTML def escape(t: str) -> str: """HTML-escape the text in `t`. This is only suitable for HTML text, not attributes. """ # Convert HTML special chars into HTML entities. return t.replace("&", "&").replace("<", "<") def pair(ratio: tuple[int, int]) -> str: """Format a pair of numbers so JavaScript can read them in an attribute.""" return "{} {}".format(*ratio) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1710442639.1178148 coverage-7.4.4/coverage/htmlfiles/0000755000175100001770000000000000000000000017711 5ustar00runnerdocker00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/coverage/htmlfiles/coverage_html.js0000644000175100001770000005255100000000000023076 0ustar00runnerdocker00000000000000// Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 // For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt // Coverage.py HTML report browser code. /*jslint browser: true, sloppy: true, vars: true, plusplus: true, maxerr: 50, indent: 4 */ /*global coverage: true, document, window, $ */ coverage = {}; // General helpers function debounce(callback, wait) { let timeoutId = null; return function(...args) { clearTimeout(timeoutId); timeoutId = setTimeout(() => { callback.apply(this, args); }, wait); }; }; function checkVisible(element) { const rect = element.getBoundingClientRect(); const viewBottom = Math.max(document.documentElement.clientHeight, window.innerHeight); const viewTop = 30; return !(rect.bottom < viewTop || rect.top >= viewBottom); } function on_click(sel, fn) { const elt = document.querySelector(sel); if (elt) { elt.addEventListener("click", fn); } } // Helpers for table sorting function getCellValue(row, column = 0) { const cell = row.cells[column] // nosemgrep: eslint.detect-object-injection if (cell.childElementCount == 1) { const child = cell.firstElementChild if (child instanceof HTMLTimeElement && child.dateTime) { return child.dateTime } else if (child instanceof HTMLDataElement && child.value) { return child.value } } return cell.innerText || cell.textContent; } function rowComparator(rowA, rowB, column = 0) { let valueA = getCellValue(rowA, column); let valueB = getCellValue(rowB, column); if (!isNaN(valueA) && !isNaN(valueB)) { return valueA - valueB } return valueA.localeCompare(valueB, undefined, {numeric: true}); } function sortColumn(th) { // Get the current sorting direction of the selected header, // clear state on other headers and then set the new sorting direction const currentSortOrder = th.getAttribute("aria-sort"); [...th.parentElement.cells].forEach(header => header.setAttribute("aria-sort", "none")); if (currentSortOrder === "none") { th.setAttribute("aria-sort", th.dataset.defaultSortOrder || "ascending"); } else { th.setAttribute("aria-sort", currentSortOrder === "ascending" ? "descending" : "ascending"); } const column = [...th.parentElement.cells].indexOf(th) // Sort all rows and afterwards append them in order to move them in the DOM Array.from(th.closest("table").querySelectorAll("tbody tr")) .sort((rowA, rowB) => rowComparator(rowA, rowB, column) * (th.getAttribute("aria-sort") === "ascending" ? 1 : -1)) .forEach(tr => tr.parentElement.appendChild(tr) ); } // Find all the elements with data-shortcut attribute, and use them to assign a shortcut key. coverage.assign_shortkeys = function () { document.querySelectorAll("[data-shortcut]").forEach(element => { document.addEventListener("keypress", event => { if (event.target.tagName.toLowerCase() === "input") { return; // ignore keypress from search filter } if (event.key === element.dataset.shortcut) { element.click(); } }); }); }; // Create the events for the filter box. coverage.wire_up_filter = function () { // Cache elements. const table = document.querySelector("table.index"); const table_body_rows = table.querySelectorAll("tbody tr"); const no_rows = document.getElementById("no_rows"); // Observe filter keyevents. document.getElementById("filter").addEventListener("input", debounce(event => { // Keep running total of each metric, first index contains number of shown rows const totals = new Array(table.rows[0].cells.length).fill(0); // Accumulate the percentage as fraction totals[totals.length - 1] = { "numer": 0, "denom": 0 }; // nosemgrep: eslint.detect-object-injection // Hide / show elements. table_body_rows.forEach(row => { if (!row.cells[0].textContent.includes(event.target.value)) { // hide row.classList.add("hidden"); return; } // show row.classList.remove("hidden"); totals[0]++; for (let column = 1; column < totals.length; column++) { // Accumulate dynamic totals cell = row.cells[column] // nosemgrep: eslint.detect-object-injection if (column === totals.length - 1) { // Last column contains percentage const [numer, denom] = cell.dataset.ratio.split(" "); totals[column]["numer"] += parseInt(numer, 10); // nosemgrep: eslint.detect-object-injection totals[column]["denom"] += parseInt(denom, 10); // nosemgrep: eslint.detect-object-injection } else { totals[column] += parseInt(cell.textContent, 10); // nosemgrep: eslint.detect-object-injection } } }); // Show placeholder if no rows will be displayed. if (!totals[0]) { // Show placeholder, hide table. no_rows.style.display = "block"; table.style.display = "none"; return; } // Hide placeholder, show table. no_rows.style.display = null; table.style.display = null; const footer = table.tFoot.rows[0]; // Calculate new dynamic sum values based on visible rows. for (let column = 1; column < totals.length; column++) { // Get footer cell element. const cell = footer.cells[column]; // nosemgrep: eslint.detect-object-injection // Set value into dynamic footer cell element. if (column === totals.length - 1) { // Percentage column uses the numerator and denominator, // and adapts to the number of decimal places. const match = /\.([0-9]+)/.exec(cell.textContent); const places = match ? match[1].length : 0; const { numer, denom } = totals[column]; // nosemgrep: eslint.detect-object-injection cell.dataset.ratio = `${numer} ${denom}`; // Check denom to prevent NaN if filtered files contain no statements cell.textContent = denom ? `${(numer * 100 / denom).toFixed(places)}%` : `${(100).toFixed(places)}%`; } else { cell.textContent = totals[column]; // nosemgrep: eslint.detect-object-injection } } })); // Trigger change event on setup, to force filter on page refresh // (filter value may still be present). document.getElementById("filter").dispatchEvent(new Event("input")); }; coverage.INDEX_SORT_STORAGE = "COVERAGE_INDEX_SORT_2"; // Loaded on index.html coverage.index_ready = function () { coverage.assign_shortkeys(); coverage.wire_up_filter(); document.querySelectorAll("[data-sortable] th[aria-sort]").forEach( th => th.addEventListener("click", e => sortColumn(e.target)) ); // Look for a localStorage item containing previous sort settings: const stored_list = localStorage.getItem(coverage.INDEX_SORT_STORAGE); if (stored_list) { const {column, direction} = JSON.parse(stored_list); const th = document.querySelector("[data-sortable]").tHead.rows[0].cells[column]; // nosemgrep: eslint.detect-object-injection th.setAttribute("aria-sort", direction === "ascending" ? "descending" : "ascending"); th.click() } // Watch for page unload events so we can save the final sort settings: window.addEventListener("unload", function () { const th = document.querySelector('[data-sortable] th[aria-sort="ascending"], [data-sortable] [aria-sort="descending"]'); if (!th) { return; } localStorage.setItem(coverage.INDEX_SORT_STORAGE, JSON.stringify({ column: [...th.parentElement.cells].indexOf(th), direction: th.getAttribute("aria-sort"), })); }); on_click(".button_prev_file", coverage.to_prev_file); on_click(".button_next_file", coverage.to_next_file); on_click(".button_show_hide_help", coverage.show_hide_help); }; // -- pyfile stuff -- coverage.LINE_FILTERS_STORAGE = "COVERAGE_LINE_FILTERS"; coverage.pyfile_ready = function () { // If we're directed to a particular line number, highlight the line. var frag = location.hash; if (frag.length > 2 && frag[1] === "t") { document.querySelector(frag).closest(".n").classList.add("highlight"); coverage.set_sel(parseInt(frag.substr(2), 10)); } else { coverage.set_sel(0); } on_click(".button_toggle_run", coverage.toggle_lines); on_click(".button_toggle_mis", coverage.toggle_lines); on_click(".button_toggle_exc", coverage.toggle_lines); on_click(".button_toggle_par", coverage.toggle_lines); on_click(".button_next_chunk", coverage.to_next_chunk_nicely); on_click(".button_prev_chunk", coverage.to_prev_chunk_nicely); on_click(".button_top_of_page", coverage.to_top); on_click(".button_first_chunk", coverage.to_first_chunk); on_click(".button_prev_file", coverage.to_prev_file); on_click(".button_next_file", coverage.to_next_file); on_click(".button_to_index", coverage.to_index); on_click(".button_show_hide_help", coverage.show_hide_help); coverage.filters = undefined; try { coverage.filters = localStorage.getItem(coverage.LINE_FILTERS_STORAGE); } catch(err) {} if (coverage.filters) { coverage.filters = JSON.parse(coverage.filters); } else { coverage.filters = {run: false, exc: true, mis: true, par: true}; } for (cls in coverage.filters) { coverage.set_line_visibilty(cls, coverage.filters[cls]); // nosemgrep: eslint.detect-object-injection } coverage.assign_shortkeys(); coverage.init_scroll_markers(); coverage.wire_up_sticky_header(); document.querySelectorAll("[id^=ctxs]").forEach( cbox => cbox.addEventListener("click", coverage.expand_contexts) ); // Rebuild scroll markers when the window height changes. window.addEventListener("resize", coverage.build_scroll_markers); }; coverage.toggle_lines = function (event) { const btn = event.target.closest("button"); const category = btn.value const show = !btn.classList.contains("show_" + category); coverage.set_line_visibilty(category, show); coverage.build_scroll_markers(); coverage.filters[category] = show; try { localStorage.setItem(coverage.LINE_FILTERS_STORAGE, JSON.stringify(coverage.filters)); } catch(err) {} }; coverage.set_line_visibilty = function (category, should_show) { const cls = "show_" + category; const btn = document.querySelector(".button_toggle_" + category); if (btn) { if (should_show) { document.querySelectorAll("#source ." + category).forEach(e => e.classList.add(cls)); btn.classList.add(cls); } else { document.querySelectorAll("#source ." + category).forEach(e => e.classList.remove(cls)); btn.classList.remove(cls); } } }; // Return the nth line div. coverage.line_elt = function (n) { return document.getElementById("t" + n)?.closest("p"); }; // Set the selection. b and e are line numbers. coverage.set_sel = function (b, e) { // The first line selected. coverage.sel_begin = b; // The next line not selected. coverage.sel_end = (e === undefined) ? b+1 : e; }; coverage.to_top = function () { coverage.set_sel(0, 1); coverage.scroll_window(0); }; coverage.to_first_chunk = function () { coverage.set_sel(0, 1); coverage.to_next_chunk(); }; coverage.to_prev_file = function () { window.location = document.getElementById("prevFileLink").href; } coverage.to_next_file = function () { window.location = document.getElementById("nextFileLink").href; } coverage.to_index = function () { location.href = document.getElementById("indexLink").href; } coverage.show_hide_help = function () { const helpCheck = document.getElementById("help_panel_state") helpCheck.checked = !helpCheck.checked; } // Return a string indicating what kind of chunk this line belongs to, // or null if not a chunk. coverage.chunk_indicator = function (line_elt) { const classes = line_elt?.className; if (!classes) { return null; } const match = classes.match(/\bshow_\w+\b/); if (!match) { return null; } return match[0]; }; coverage.to_next_chunk = function () { const c = coverage; // Find the start of the next colored chunk. var probe = c.sel_end; var chunk_indicator, probe_line; while (true) { probe_line = c.line_elt(probe); if (!probe_line) { return; } chunk_indicator = c.chunk_indicator(probe_line); if (chunk_indicator) { break; } probe++; } // There's a next chunk, `probe` points to it. var begin = probe; // Find the end of this chunk. var next_indicator = chunk_indicator; while (next_indicator === chunk_indicator) { probe++; probe_line = c.line_elt(probe); next_indicator = c.chunk_indicator(probe_line); } c.set_sel(begin, probe); c.show_selection(); }; coverage.to_prev_chunk = function () { const c = coverage; // Find the end of the prev colored chunk. var probe = c.sel_begin-1; var probe_line = c.line_elt(probe); if (!probe_line) { return; } var chunk_indicator = c.chunk_indicator(probe_line); while (probe > 1 && !chunk_indicator) { probe--; probe_line = c.line_elt(probe); if (!probe_line) { return; } chunk_indicator = c.chunk_indicator(probe_line); } // There's a prev chunk, `probe` points to its last line. var end = probe+1; // Find the beginning of this chunk. var prev_indicator = chunk_indicator; while (prev_indicator === chunk_indicator) { probe--; if (probe <= 0) { return; } probe_line = c.line_elt(probe); prev_indicator = c.chunk_indicator(probe_line); } c.set_sel(probe+1, end); c.show_selection(); }; // Returns 0, 1, or 2: how many of the two ends of the selection are on // the screen right now? coverage.selection_ends_on_screen = function () { if (coverage.sel_begin === 0) { return 0; } const begin = coverage.line_elt(coverage.sel_begin); const end = coverage.line_elt(coverage.sel_end-1); return ( (checkVisible(begin) ? 1 : 0) + (checkVisible(end) ? 1 : 0) ); }; coverage.to_next_chunk_nicely = function () { if (coverage.selection_ends_on_screen() === 0) { // The selection is entirely off the screen: // Set the top line on the screen as selection. // This will select the top-left of the viewport // As this is most likely the span with the line number we take the parent const line = document.elementFromPoint(0, 0).parentElement; if (line.parentElement !== document.getElementById("source")) { // The element is not a source line but the header or similar coverage.select_line_or_chunk(1); } else { // We extract the line number from the id coverage.select_line_or_chunk(parseInt(line.id.substring(1), 10)); } } coverage.to_next_chunk(); }; coverage.to_prev_chunk_nicely = function () { if (coverage.selection_ends_on_screen() === 0) { // The selection is entirely off the screen: // Set the lowest line on the screen as selection. // This will select the bottom-left of the viewport // As this is most likely the span with the line number we take the parent const line = document.elementFromPoint(document.documentElement.clientHeight-1, 0).parentElement; if (line.parentElement !== document.getElementById("source")) { // The element is not a source line but the header or similar coverage.select_line_or_chunk(coverage.lines_len); } else { // We extract the line number from the id coverage.select_line_or_chunk(parseInt(line.id.substring(1), 10)); } } coverage.to_prev_chunk(); }; // Select line number lineno, or if it is in a colored chunk, select the // entire chunk coverage.select_line_or_chunk = function (lineno) { var c = coverage; var probe_line = c.line_elt(lineno); if (!probe_line) { return; } var the_indicator = c.chunk_indicator(probe_line); if (the_indicator) { // The line is in a highlighted chunk. // Search backward for the first line. var probe = lineno; var indicator = the_indicator; while (probe > 0 && indicator === the_indicator) { probe--; probe_line = c.line_elt(probe); if (!probe_line) { break; } indicator = c.chunk_indicator(probe_line); } var begin = probe + 1; // Search forward for the last line. probe = lineno; indicator = the_indicator; while (indicator === the_indicator) { probe++; probe_line = c.line_elt(probe); indicator = c.chunk_indicator(probe_line); } coverage.set_sel(begin, probe); } else { coverage.set_sel(lineno); } }; coverage.show_selection = function () { // Highlight the lines in the chunk document.querySelectorAll("#source .highlight").forEach(e => e.classList.remove("highlight")); for (let probe = coverage.sel_begin; probe < coverage.sel_end; probe++) { coverage.line_elt(probe).querySelector(".n").classList.add("highlight"); } coverage.scroll_to_selection(); }; coverage.scroll_to_selection = function () { // Scroll the page if the chunk isn't fully visible. if (coverage.selection_ends_on_screen() < 2) { const element = coverage.line_elt(coverage.sel_begin); coverage.scroll_window(element.offsetTop - 60); } }; coverage.scroll_window = function (to_pos) { window.scroll({top: to_pos, behavior: "smooth"}); }; coverage.init_scroll_markers = function () { // Init some variables coverage.lines_len = document.querySelectorAll("#source > p").length; // Build html coverage.build_scroll_markers(); }; coverage.build_scroll_markers = function () { const temp_scroll_marker = document.getElementById("scroll_marker") if (temp_scroll_marker) temp_scroll_marker.remove(); // Don't build markers if the window has no scroll bar. if (document.body.scrollHeight <= window.innerHeight) { return; } const marker_scale = window.innerHeight / document.body.scrollHeight; const line_height = Math.min(Math.max(3, window.innerHeight / coverage.lines_len), 10); let previous_line = -99, last_mark, last_top; const scroll_marker = document.createElement("div"); scroll_marker.id = "scroll_marker"; document.getElementById("source").querySelectorAll( "p.show_run, p.show_mis, p.show_exc, p.show_exc, p.show_par" ).forEach(element => { const line_top = Math.floor(element.offsetTop * marker_scale); const line_number = parseInt(element.querySelector(".n a").id.substr(1)); if (line_number === previous_line + 1) { // If this solid missed block just make previous mark higher. last_mark.style.height = `${line_top + line_height - last_top}px`; } else { // Add colored line in scroll_marker block. last_mark = document.createElement("div"); last_mark.id = `m${line_number}`; last_mark.classList.add("marker"); last_mark.style.height = `${line_height}px`; last_mark.style.top = `${line_top}px`; scroll_marker.append(last_mark); last_top = line_top; } previous_line = line_number; }); // Append last to prevent layout calculation document.body.append(scroll_marker); }; coverage.wire_up_sticky_header = function () { const header = document.querySelector("header"); const header_bottom = ( header.querySelector(".content h2").getBoundingClientRect().top - header.getBoundingClientRect().top ); function updateHeader() { if (window.scrollY > header_bottom) { header.classList.add("sticky"); } else { header.classList.remove("sticky"); } } window.addEventListener("scroll", updateHeader); updateHeader(); }; coverage.expand_contexts = function (e) { var ctxs = e.target.parentNode.querySelector(".ctxs"); if (!ctxs.classList.contains("expanded")) { var ctxs_text = ctxs.textContent; var width = Number(ctxs_text[0]); ctxs.textContent = ""; for (var i = 1; i < ctxs_text.length; i += width) { key = ctxs_text.substring(i, i + width).trim(); ctxs.appendChild(document.createTextNode(contexts[key])); ctxs.appendChild(document.createElement("br")); } ctxs.classList.add("expanded"); } }; document.addEventListener("DOMContentLoaded", () => { if (document.body.classList.contains("indexfile")) { coverage.index_ready(); } else { coverage.pyfile_ready(); } }); ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/coverage/htmlfiles/favicon_32.png0000644000175100001770000000330400000000000022350 0ustar00runnerdocker00000000000000‰PNG  IHDR DคŠฦPLTE250SRS;A6œž  ___^^_CJ>OXH#cm[fef05,[[\HPCFL?JIK<:< tjep\[gT_iX7V_N]\](<;=&#$็ิ๊ึเXใYXซพแX๛๛๛ํืไัQŸฒพัญีํฤ๗๗๗๊๋๋๔สฯๆฟSฃทโY แW๙๘๘็ๆๆ฿๗ฬุ๐ฦฤฤฤฤฺตฆธ˜ขด•Ÿฒ’šฌข‡฿Z๛YV€J;zG:vD7rC5\5*๕๓๓โ๚ฯา๊ยหโปM™ฌดศฅ€“ฐ˜ค‰‹›€ฺUฺUฏษGˆO?l>0X2'ืืืฮฮฮษ฿ธฟิฐชฌฌปฯซM—ฉทหงขคฆฒลขGŒŸD‡—@‘>v„<‰7jw…v{}f&šXึ๓UัQฬP[cPรMมK™<4q:L+"๐๐๑฿฿฿ัััVฉผWงบขชฌ”คฉฉฌฆIกญภŸชฝœ–—™v” ณ‹œฉ‡Œ“‡‘œfz|3WญภถทนฒทนŸฃฃงฐ TœE‹œ˜ญ™EŽ–a‰”›ค“AŽK€ŽP€ŒHz‡}ry{Lqzjvyžx’x†กv u^jo3’mˆ€iq†hmyc/Wbc”`i{`PŸ[*’[ิ๘Zฒ๓Yœ๏Y”๎YfๆY+แXฺ๘Wˆ™SW{SีRCกRewRueRฮQPPQ+ะPวโO!ฌN›N หMI…LพูK{‹KญฮJ\[JjYH ฒGจภE–ซE7E/ฑCŒRBfRBr@|ฐ>›IDAT8หb€&nNC%Nn&  ฬ.ร›๋ccใ9นฝ]฿˜M ”ฏ“)์อŽŽ/!ศƒ,ฏย\เi 6ณณขณ"#ณ๒ย๒]piงูแYQแ™ู|ยp๒Pูœค)S๎ส — TมตŸช?'i๗{7๛๒S?ฟฌ ˆŠˆVT+*K;•์/ฒฒฒJจ(/J(ซT!ศ๒@R`๗yํ›ๅfeีSVnoฟ๓Xรํ‘‘™™:@K˜ุ}ม๒G+€ฺ&$ุWี5งYป7mชyตึˆ‰[ไ˜ ๙n{ทm็š=,-,,,oด•๚คผ็ใbเไฐฑ(฿c๏V๕ ฬายคbษRsWM\s€ฺท”U-ถดถkด ณถpoM5ทuเgPp*(ฉ\Wดฝ/ฉฒฮ2=:lีŠๅ-i mฎๆŽฌ ,1@ฮLุ็e3s™ๅฝทึiหVYพ๚wรสIถ>b ข6ฆฆพ{vmน$๘‘วผ–ึ๎๎i--ึญAfม8'อ™ี ๖๊Y+๎N‡8าาruฒร๓5 ,ž๋ซซKภaนตู๚๒๙Œo ,A*ฌำ$ฮจc0pvž Žk฿›CC2nฝYb ิฟ`ี™ย)ฌ o‚AGา…ึ๓็†ึ/rMผuๅงึถ๑วMไ‡TGฮฆƒ‹-=ๆ…†ผšjk๎ํไ๏g7ญึA‹ƒ[ฯษินGJ๊ใ… >$;š™›ฺฺšิงJs10ฑ๙vNwฐ3s)>๒ะำใcอออ๓ gคฐฉ300ฒL77333w9,ไ๊"W[ป< €‰Ž‰A๑’Œ ˜`ชญ˜;^ซrฤ0sq 8Yใ`ศฮIr๙ๆS3^B™fŽ>.~ฬชDษ(—Ž{๚=ฮ ์ผ๓eแษ^b†นš8ˆR๏bW?YDฦ`dNŽษฤ%๛€h—Rฟ@f ~PาNŒต5ทณs๖w4๗ž(ษฑ‘ทูtƒโผํ\Jk‚5Xู™ก„‹ƒŸUผฉI\Z‰ƒK. /๒%;่“ IENDฎB`‚././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/coverage/htmlfiles/index.html0000644000175100001770000001243000000000000021706 0ustar00runnerdocker00000000000000{# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 #} {# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt #} {{ title|escape }} {% if extra_css %} {% endif %}

{{ title|escape }}: {{totals.pc_covered_str}}%

coverage.py v{{__version__}}, created at {{ time_stamp }}

{# The title="" attr doesn"t work in Safari. #} {% if has_arcs %} {% endif %} {% for file in files %} {% if has_arcs %} {% endif %} {% endfor %} {% if has_arcs %} {% endif %}
Module statements missing excludedbranches partialcoverage
{{file.relative_filename}} {{file.nums.n_statements}} {{file.nums.n_missing}} {{file.nums.n_excluded}}{{file.nums.n_branches}} {{file.nums.n_partial_branches}}{{file.nums.pc_covered_str}}%
Total {{totals.n_statements}} {{totals.n_missing}} {{totals.n_excluded}}{{totals.n_branches}} {{totals.n_partial_branches}}{{totals.pc_covered_str}}%

No items found using the specified filter.

{% if skipped_covered_msg %}

{{ skipped_covered_msg }}

{% endif %} {% if skipped_empty_msg %}

{{ skipped_empty_msg }}

{% endif %}
././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/coverage/htmlfiles/keybd_closed.png0000644000175100001770000002145400000000000023054 0ustar00runnerdocker00000000000000‰PNG  IHDR 'ว>N…zTXtRaw profile type exifxฺํšYv;ฒE1ŠBข†ƒvญšม~ํ$eQe๛๊~>ห"ฉl€@4'ฮAาฌ๛๏6แ_r1˜sI5ฅ‹ก†๊สu5}ตWะื๓Gผฯู็ใๆ๗ ว!ฯป?–t_8n฿8oO๑@ๅศ๖็5ใ—น๓ๆล"๙<๏๊=w็„ฝhgYWช%ฟ_B_็พธกศาx ๅู์Ogผ7#๓x็–ทโีyw ๐๒ko|ผZ๏ๅB๏๕ˆผz_๏มpศW~บYe>Fๅํำ‡จT๗uP|:W<;3ฝฝyœ๐?๗๐‹‹฿อ์วษ=wรๆหy๎=‹ู{ีตpiบ๕Xข~โยŽหฝ–๘ษF>gฉC๖B>Iฬฮฯฐี:ยฒmฐำ6ปํา๗a&ท\ๆนA ไX๑ูU7eˆVป]๖ีO_ˆ ผžฃ๎อซ๓Vnุยฤำrฅณ f%Œผ?/ฺ[RZqfu๊+์r’˜!‘“Wฎ" v?๒(ชƒ?I\=Œ๊ๆยีฯ=ฺ;ท$ผฺsaไิšอ๓1wฤ๋‰ภ•ฌ6ู+;—ญล…๘4*ฮื ัMฌtม๛DpŠ“นน'[ฝึEwƒY"๚ไ3กฉพ+6๒'‡Bต่cˆ1ฆ˜c‰5ถไSH1ฅ”“€_ห>‡sส9—\s+พ„K*นSjiีU8ฦšjฎฅึฺ“6Fnธ ต๎บ๏กวžz๎ฅื้3ยˆ#<Šuด้ฆŸเฤL3ฯ2๋lห.Ri…WZy•UWคฺ๖;์ธำฮป์บ[ิฌ9a๔๓็Qณจ9”\˜฿ขฦญ9?†ฐ'QbFฤ\ฐD๏5ไSฟr{ป5 ๓ํนป๚”๛ŽLยิ2–\ผฎrgใส=Zาci6t‚<าsœLb%๏7ภใ~๓ำ๗›Ÿ๐ธ฿t€วๆง<๎7?เqฟ๙้๛อO๘<๚อฺฅ@gขT";๓ty๗ื+วๆๆุตฏ@y—ั{[sค•๖Z ฒฑvดPะt้เi๒c๚ฝำRˆ่ฑ๕qฉต2{b2@Žฉฆ\ฤ‘‡–6ขp–ฟ`.&fฦ ?AU0”E!ฆใฝ8าf๎˜ฝ฿VŒ็Tg‘mฯ(g์*—๏ีฐา•Rƒ—๛ลิฎฎ•F™–!w\๋šm/ŒS๒ุ~1อฆx.•AMํb0ํ>Vศพaฬcr๎uฏnฎึ๖ Ÿ@kn๓Qฌห]ฏw3ค3ฮง&gภิDไผญTึaๅ’ณx>Oญ)n+=I3#6Sนฑ็ฒล/\xŸx‹โใขk/i—e8GL€๎™Wกว้้J๛๘˜kฤวšOSงz/ืพ[ุ'*ฉhTL฿ษž[ƒ\S.๕ษdqฑ‹ํG?‡,ŸšOQ๘D้ขซ๘ฮXอฎjN_ญ์ใฃัใขyIผีkk๏แ1้4šQฉษˆ[žš)ัณฮ:›šaPห4วSฒOฯ๙—Yf^ฆYวKื’|jปฑฌ™kะˆƒF,ืด9Nัฦ๚นภ36œทDdศียA‘๙%้เ'โ๑ำ[๔Hฃแซาƒกq ]‚„คœฬ!(z๑"*Il“LuŸ(Ÿำœ4้ณžืrfrขฤE!M สิ%ฌ23>๘ุ๘y๒ศ“Zภ์RDุ0ฑs{ะH0ส ฟn.qs;8 แ|+ุHœŽฐฆ2;คe hpๆฉ€•šx]M@์,1โฃฉีง„HJมHุ๗ค่Cคz๗,8ื่ะนกอuโy2ๆฬ…8๋ำ"=่n,Q#ปBNไข ะ3ชƒ?คŸ…™j $6โฌึ4ศ„LFํa*PเrฦIพPQฺŸv1p‰ใุMัไตt!ล/  Ÿ5…9ค†Fน#ถเห ;#ฮUค  มPฮบ˜ธฐPtA๓๚ฑnใณrูถ9kฯnดต|7ิโk[xp9‡ ˆ•ง2ˆ RI†Xพ0็š๓ำ•=€ณb‚,E๋ฎบ8Ž„ฎคพV็ผ•ถ}&ํŠ-Hู๕เษ3— ขถ‹,C@z%’งK๘K฿™_ฮ#6€+yด.yMV“ฅ[nฎค’ซร"6ฟ-๕ฑœฬ[ตz ž$งka|Œ…Eฉvกถ4‘… ME^-–•ฏสดีฤ๓u™ e^A฿†piด,Oƒw”2Mvˆฮ๗ ีM”VฏK=ฆd๒์8Š๑' ุ}ต[I2Oyt"ภvศฉNิ`ฟญ@„6ฦ98ล =œขLสฐฆีฦศฒ๏ไ$ล@.K๛&{;ฑถw๙ุงkvา;ซtRŸฅ *ŠมรfŒาป4K๎ข–>r—ต4๑ปฎฟฯpรฤ็XuญtcูW๔$™xจ[ ปฌX\)๙ฉื๐•‡€ฺgษŽ„าโmฌณt'cถณE๖43˜ŽญD72ก,ก.ยa D/Gืฅ็Rปš–\า๒Wูm>@C๚/ยWD*ัœRT๖๑-|ํ™_Ž1โ™ๆ (><‡4qำฅAใ W5= 1†žQ๐ศ5กhู ฑNˆ8C๙าPูเqอ7ษ^ywl—ญˆต–}œU๐w +Rc(1Žx2œ ญ %dVŸ&๐ 7 `ip+‹Ÿ฿Tๅส๏Uฅq•ธ ฬ-ณl/X N{ฃรA/ล+$ฤ!โนพ#โ &กๆˆCข฿๑่d}Gnโ๖!mี!TQๆW ฐฉeพ!ูำDฏ=85ะ๖ŽS๛่nvื>๗•iบปoฆ๚Kœฝ‰ด–—ว+ไ}น_Dz€4ผ๕n~sั•‡T‡๙7สCชรถcญศ<ฃ"บ(ฉไ Cๅต5:๛>t%๗HOใp๖Œฬ=Žr็q„ใgศ๏Pป"๚lY( ษ3จOคXs2สofฉCšเœ7ฟY0UˆพW… ŽIyฐ DฏTh_๔K่aชฟZชภB“ k5ก๖ูsๆkืษh่=บK–—u-AK{.ห%ศ-"ุสNดc7L›*๋1ฎY'๋ผZ(5_UX‹ูmฬ๐ืvg๐:Ff๏Eqน”%า WhA`vฏษ็ไFฆa7HMดะœnœีค~t€)ีฟ stA™Fฆม: ฅัIJฆ‘.ำŒlœIm้๕]ฺe๏ทŒl@ฃ’ฐฒI๖Ax69!e9ม?E”™†^€Šชw&ฉfช1Šž"฿}๊Ažญุ’ฑถ"้Qแฟท†+9๘ฎD๔e<อฏ€~1ึW6EโฦBQi๘XแแTืJ,m 93๔ฺFฐ$วUG8ขาBำฒ{ฏ๑ฟ’๘ๆŸhฏ$พ๙SฟจŽ-’/๊~ษT‚T์แOฃเ-[r๖+ฏ`<ป#—œnf{ฦ67oบ%ใ‰ณe—Vีข“Lzา7ฆ[HทYiˆไKญuิณuš^•.$u r"Q‡Aญ‚ซ%‘ญsถV)ดเ˜xฌ Tฐจ๓~ป=mพŸo- #”\€๙Nษg)Sภแ๖มํณี$]ขHœ•ฟrชๆZศ@1S&ํมใาญ ƒ@น7“>™p 8ำตภ^Gc?™!่"์๚ฤูช-ๆƒูๅ‘ฯ1฿y$N› lคฯ˜ึ–'”๖Šฐ‚Ž2˜ฃ%9FO_›Bฌ‘M๖่>๓-aaKลฎ,ฺe๙ชบ]`ยิ่lฆฮ%ยพึิjCJะ ,›ฅิXาอั๛มอฦO`cRwโC๓ทนคฤV˜2Ng่nGแxัsล๎๊pด 4.๔ํึ้๓ƒfฒB‚B‘ญลึจ/=ฃšT]wฝกJ‘vฬW@™…ต~ฒพjึ+รถN˜ฑต‡ ๏f|Zvำ/‰I>?านƒพŠ[X -VะึqK“ษฌฐ;OวัoยHhฝื}๘ลฎฉ๙n๔ovMอ฿B๊+D5Ÿ 5%ซญ ฯฒ็มŠ<ึ[a„~๖)•บ๏ฌ฿ญi8Dู*ฉ{.‚yqŽ4—<Š;‹ฮ]EฦซžอrŸธ`๖อฎฒkฎ}njฒษ=ปnr๋™,ป~ณMHU่ฑศ“HvTบH>*-คฺhŸฒล๏น zษจƒ!*ศ5ุ"9А!€ึึ!rนT ฮtIญoŽ=]ื/“Œ>ณ{3H„สYไ1J˜ส“QjGIฟ jF ๚ผdพ„ฤ#AUฟณจ๎ecฯ๗`ง<iพO€k‰182#†D“Eย7ั—็ 9ฉ๎uืyชฦEg#F”#›*9 ๘—ฯเ๔gเj๒ฎม้ƒŒ ์ห—• ™„Z๐x๔,๓ƒhQบYกOYต๚๕A๋ำsV๐๋rูOTe^—ฟb๓‰U™๙––g@๒ลฐ„ฒ]ะฮสL๓ชK –0€)ฺL๛N=uJฅิ็ฑn ข่ะl9ํB[ส™”ก๔F.ูHGBTณ๔ชด๚ ืy"฿๗ม@RพE‹‘+่ฆ'9y๘ด&ŽึpAฅ!†4ผNวบร,*ศรฌcิm’๔›ษfตKถ4ิ(กNฯf1ุณค๒ีฐ'ณไค%น|ฬ’oF?๖G2๏๖™๗๛‰ฟฬ{‡ฤ_ๆCฑฟฬซ๛[™W ๖ท2ฏ์oe^%ุ฿๚หIA‰ฟฬŸไKม‰ ภฒ]a ศ<——๏p2๙ฺ~MpZบ*พkoz๔)ฬ๒Œ*ฤ@฿ถฯGล๋y7ๅร๚ทแ•Yอ(Ÿฒ0 ิ๚y%iCCPICC profilexœฑJรP†ฟฦข"๊ข8ˆC—‚vrฉ ก kฃSzำb1‰!I)พoขำA|_@มูFณxแ๐ฮ๙{/8nl’ขน IZๆ^ฏ\W๎ยMVqุa/4Eึ๑jฯ็+ ซ/-๋U?๗็™†…‘ฮTฉษ๒โ๖ดฬ,ซXฟํ๗Žฤb7JาH$Ž’ศฒํ%๑ฤxฺ,ำ‹sWmแัๅ—ฦฤ”ดคฉ:วดู—zไ„S`ค1C๕ฆš)นr๒8๕EบMMf•็+e ฑผlย‰ฮชอฦฦ, ๓ฐjอฉœัa%€ตgXบฎษZถš™v5๓ฯ7~`Pr๒t iTXtXML:com.adobe.xmp ึยbKGD‡ฬฟ pHYs  šœtIMEๅ'cฉ“OIDAT(ฯcd๘ฯ@$`a R-#C:C:๒3ŠD)vภ‹a๎bภqรL†tฌ$Dฏจเ„e pK‘B+0ฑ"k้ฬ%ฬสIENDฎB`‚././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/coverage/htmlfiles/keybd_open.png0000644000175100001770000002145300000000000022543 0ustar00runnerdocker00000000000000‰PNG  IHDR 'ว>N„zTXtRaw profile type exifxฺํšYv;ฒE1ŠBข†ƒvญšม~ํ$eQe๛๊~>ห"ฉl€@4'ฮAาฌ๛๏6แ_r1˜sI5ฅ‹ก†๊สu5}ตWะื๓Gผฯู็ใๆ๗ ว!ฯป?–t_8n฿8oO๑@ๅศ๖็5ใ—น๓ๆล"๙<๏๊=w็„ฝhgYWช%ฟ_B_็พธกศาx ๅู์Ogผ7#๓x็–ทโีyw ๐๒ko|ผZ๏ๅB๏๕ˆผzฐ‡|ๅง๋UๆcT>}ˆJu_ลงs…แภณ3ำ๛—ว ๓qฟธ๘ฬ~Ÿ๓q7lธœว๏ณ˜ฝืY] —ฆ{Q%๊'.์ธ๋m‰ŸฬoไsึŸสO1d๏ ไ“ฤ์ [ญ#,;mณ.}v`bpหeJŽŸ]uร_†h๙ฑe_๔…ศ ยKฬผ{ณล๊ผUงถ0๑ด\้,ƒYI#/ฦฯห๖–”ทVœY๚ ปœ$fHไไ•ซˆ<Š๊เวฯวWOฃบนฐภv๕3D๖ฮ-ษ#ฏ๖\y?ตf๓ผภEฬ1ฦz"p%๋ฃM๖สฮek๑c!>Š๓มuB`ct+]๐>œโdn๎ษVฏuัร`ˆ>๙LhชoฤJ€ษกC-๚bŒ)ๆXb-๙RL)ๅ$เืฒฯ!วœrฮ%ืŠ/กฤ’J.ล”ZZuีŽฑฆškฉตถฦค‘w7.hญป๎{่ฑงž{้ตทA๚Œ0โH#bFmบ้'81ำฬณฬ:ฒ‹TZaล•V^eีี6ฉถ;๎ด๓.ป๎๖5kNX?yิ์#jN#%ๆทจqkฮ!ฌภI”˜1,ฯฺIฬฎbCpFB'1#๋จŠ่ฐ2Jpฆ•ˆมฐฌ‹พล๎WไžโfB๘Q#rFB๗oDฮH่^D๎sพˆฺ”n3.o4BR†โิหS~\ฐJs}^~ูš‡0ˆหไฎ*Ÿญฯ{ ๙ิฏ\ใnMร|{๎.ภƒ>ๅพ#“0ตŒ%o†ซูธr–๔Xšอ? ๔'“Xษ๛฿ ๐ธ฿t€วๆง<๎7?เqฟ๙้๛อOxo~:ภใ~๓ำ?~3@ฟv)ะ™จ•ศลฮ๖<]=ไ5๗สฑน9vํ+Pe๔ึiฅฝV‚lฌ-4]:x๗Fš˜~๏ด"zl}\jญฬž˜ cช)qครกฅ ‡hใœๅ/˜‹‰™qยใOP eQ‚้x๏Žด™;f๏ทใ9ีYd3สปสๅ{5ฌtฅิเๅ~1ต๋ƒkๅ†QฆeศWภบf๗ ฃว”<ถ_LณižKePSปLปฒow๓˜œ{รซ›ซต=่'ะš|+ภrืkล ้Œ๓ฉ‰‡ฦ059o+•uG˜Eนไ,žวSkŠJOภฬˆอTn์นl๑ —'^็ข๘ธ่รฺKฺ%Fฮ {ๆีF่มqzบา>>ๆ๑ฑๆำิฉหต๏?๖‰J*ำwฒ็–ม ื”KE2Y\์โE๛‘ลฯ!หงๆS>ัvบ่*พ3Vณซš“วW+๛๘h๔ธh^o๕ฺว{xL:fTj2โึง&J๔ฌณฮฆๆ@ิลฒอ1ฦ”์ำse–™—iึ๑าต$Ÿฺn,kๆ4bฤ Q'ห5ญ@NงSดqท~.๐ŒM็mrตpFd~I:๘‰๘G๔=ลh๘ช๔`่F\C— !)'sŠ^ผˆJ[วคS'ส็4'Mบรฌ็ตœ™œ(qQHS‚2u ซฬŒ>๖~ž<๒dฦ0{‡ิ6L์$4Œ2รฏ›‹H8฿J6งฃl†ฉฬiœy*`ฅ&^W;K €๘hชD๕)!’R0๖=)๚ฉ ฮ๕:tnh3w8Tฮ๘x๏Iฝืเ™%๕Sด”f๑7€่Z TMไr‘"ผa#๎๎Fหวกม5&ิล๘>งC[UŒผKฒ&ŸโŠ+I็7 x#™Mคjลภ\า€; ePtํ๕yแv E*ร๚r;USœ„cฺ๎ฆค%€xื.V๊Uฒงี๛Vปื๏ๆภ็rช๖ิฌV,S=jcp]๗าฑ์˜่€eสลYN_Ž‹ );๓%F0V juMซ-ธm–สมOhใฺl–ขฬ„อภT๙pตwร]ศ๋กeุ@G…7 Jปˆ’ki‘ํ๐(QสjศCา…‘j˜ฅ3ลgจd ‰1“ํฅ ŸึU “…„ฏ^•Tก eฺ๗ณž9™‘๘–ผอHZีQfOROCถ0HอณGา=ูผ’@ƒZYฉ"@P๊Z„๑L ”ฝx€`h=• ’OžทŒ9s!ฮ๚ดHzฟKิศ๖.‚๙…(@B๔ฬ…๊ ๗้gaฆฺ‰8ซ5 ๒!“Q{˜ ธ†‡q’/T”๖ง] œAโ8vSt y-]cH๑ห่gMaฉกF๎Hง-๘๒ยNฤˆณCiBH0$”ณn&.,]ะผ~ฌ[เ๘ฌ\ถmฮฺณm-฿ ๕ƒ๘ภ\ฮ!๗‚beใฉ bƒD’!–/ฬน&…ฤteเl‡ุƒ`Kับซ.Ž#ก+ฉฏU๗9oฅmŸ dปb Rv=x๒ ๗eƒhทํ"ห^@‰ไ้าาwๆ—๓ˆ เJญK^“ีd้–›+ฉไ๊ฐˆMภoK,'๓Vญจ'ษ้ZcaQช]gจ-MdaCFS‘W‹eๅk€2m5ฑ@Aภ|]ฆHG™Wะภท!7Eญหำ ยฅL“ข๓=ˆuฅีม๋RAF)™<;Žb|เ ถG_ใEํึD’ฬSฐ?rช5ุo+a†qN๑B‚Gง(“2ฌiต1ฒ์;9I๑หาพษNฌmว]>๖้š๔ฮ*ิg)ศถ †b๐ฐ†ฃ๔.อ’ปจฅe-Mฎ๋๏30ฑว9V]+X๖=I&๊่.+WF d~๊5|ๅ! ๖ูCฒ#กฟดx๋lฦษ˜ํl‘=อ ฆc+ัL(Kจ‹pXัหั5B้นิฎฆ%ื…ดUv›ะ~ใ‹๐U‘J4ง•}|‹?_{ๆ—cŒxฆ9 €ฯ!Mtiะ8่@MOCŒกgq_.+{ๆพๆ๙AC•ย<ห๓Qจ๚ค;Ezœแ^‰:ํ~ฏ Šเศฯbภ็7Uน๒{Ui\%nsเฤvห, V‚ย่pะK๑ qˆxฎ๏ˆxรฃI่ƒy#โ่๗D<:Y฿‘[ทธ}H[5AU”๙UEljD™oFH๖4ัkN ดฝใิ>บท›ตฯฝDeš๎๎›iเถgo"ญๅeว๑ y_nว‘ /Gฝ›฿\๔Gๅ!ีa๒๊0ฟ-๎šึฃS]ๅืพฯX+2ฯ่ถˆ.Jช๙ยPymฮพ]ษ=าำ8œ=ใsฃไyแธ๗๒;ิ…ฎˆ>[JB๒ ๊iV๗œŒ๒›Y๊&8็อoฬGีข/ยUaƒcRE์Gั+ฺ๚D˜*ยฏ–*0†ะ$รZ hฃ}๖œ๙ฺu2บD๎’e#มe]Kะาžหr ฒw‹ถฒmวุ ำฆสz…AŒkึษ:ฏJอWwึ"แภ9ม5‘VโฐOv›?3ต<„Ž‘ู{ัc\.e‰4ฺ่ฦ_˜k๒9น‘iุ R-4'ทguฉ`J๕/ศFFPฆQ…iฐ@it’…i$ฟฟห4#gR›wz}—v`™ล๛-#ะจคฌฌC’}žMwNAHYN๐OQeฆกƒ ข๊ Dช™jŒข็…ศwŸzg+ถdฌญ?dz”g๘๏ํ6ทแJพ+‘w#}O๓+ _Œ๕•M‘ธฑPDT>Vx8ีตKHฮ †ถ,ษลqี‘GŽจดะด์kฏ$พ๙'+‰oTใ/ชc‹ไ‹บ_2• {๘คภ(xห–œ=ฦJ็๋‡ุ6ฯ๎ศๅDง›ูžฑ„ออ›nษxโlูฅU`ต่ค“ž๔้าmV"๙Rk๕l]ฆWฅท Iม‚œHิaPซ ฤjId๋œญU -8&ž+(•,๊ผ฿nO›ฏ๗งล[หย%`‡S2ไYสpธ}p{ภluI—่6gฅภฏœชน2Pฬ”I{๐ธtkร P๎อคO&ฮ๔฿A-0ว„ืัุOf?:†ป>qถj‹9ฦ`๖_yไณCฬw‰ำๆ้3ฆตๅ ฅฝ"์‡ ฃ &ฤhIN†ัำืf…+@d“=บฯ|K˜@ุRฑ+‹vYพช๎EX„05:›ฉs‰pงฏ๕ต‡ฺฤ4(หf)5–ts๔~Dpณฑวุ˜ิ๘ะ|็Dm.)ฑฆฬŸำบQ8^tฤ\qฤ†ป:ํK}ป๕E๚ ™ƒฌ Pdkฑ5ชฦKฯจ&Uื]oจRค๓wf!BญŸฌฏš๕สฐญflํaฟย{…Ÿ–๔หEโ…’ฯtnม ฏโ–ยD‹ีดuาd2+์ฮำq๔›0Z๏u7~ฑkjพ6›]S๓ท๚ Qอ'HM 7ยวช@+่ณ์yฐ"๕VกŸ}Je#$€๎;๋wkQถJ๊ž‹`^œ#อ%โฮขsW‘๑ชทgณฦg.˜}ณkง์š+dŸ[„šlrฯฎ›z&หฎ฿lRz,๒$$†•.’ฯ„สGKฉ6ฺงl๑{.ƒ^๒๊`ˆ r ถHŽ"d ตuˆ\.Uƒ3DRซO„๋๕๒5ิoŽƒF/}!ษุื>ฬ๋‡CU้š <˜dฮs}ศzฬRฃD‚ชYทQทIบ#GAื๕ห$ฃฯ์ กryŒฆ๒d”šฤัcา/ƒšQƒ/™/!๑HPี๏฿,ช{ูุ๓=ุ)ฯEš๏ มZb Žฬˆ!ัd‘ฐล@๔ๅ9HNช{užชqัูˆๅศฤฆJŽวๅ38ธš€kp๚ @ c๛๒eฅB&ก–<}ห Zิ€nV่SVญ~}ะ๚๔œUญ‰ฃ5\ะciGศฃ! ฏำฑ.๗0Kฟ ๒0๋u›$ฝfrทYFํ’- 5JจำณY ๖ม,ฉ|5์ษ,yฉFI.ณไ›ัO†‘ฟฬ{‡ฤ_ๆฝร~โ/๓a?๑—๙ว์/๓*มึ_ๆU‚ญฟฬซ๛[™W ๖ท2Rโ/๓'๙า_p"(ฐlWX2ฯๅๅ;œLพถ_œ†–ฎ ค๏ไ›} s‡<ฃ 1ะทํ๓Q๑๚ฤ_M๙pเŸพํ@xeV๓?8ฒ+i0lา%iCCPICC profilexœฑJรP†ฟฦข"๊ข8ˆC—‚vrฉ ก kฃSzำb1‰!I)พoขำA|_@มูFณxแ๐ฮ๙{/8nl’ขน IZๆ^ฏ\W๎ยMVqุa/4Eึ๑jฯ็+ ซ/-๋U?๗็™†…‘ฮTฉษ๒โ๖ดฬ,ซXฟํ๗Žฤb7JาH$Ž’ศฒํ%๑ฤxฺ,ำ‹sWmแัๅ—ฦฤ”ดคฉ:วดู—zไ„S`ค1C๕ฆš)นr๒8๕EบMMf•็+e ฑผlย‰ฮชอฦฦ, ๓ฐjอฉœัa%€ตgXบฎษZถš™v5๓ฯ7~`Pr๒t iTXtXML:com.adobe.xmp HM5AbKGD‡ฬฟ pHYs  šœtIMEๅ"c ]OIDAT(ฯcd๘ฯ@$`a R-#C:C:๒3ŠD)vภ‹a๎bภqรL†tฌ$Dฏจเ„e pK‘B+0ฑ"k้ฬ%ฬสIENDฎB`‚././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/coverage/htmlfiles/pyfile.html0000644000175100001770000001444200000000000022074 0ustar00runnerdocker00000000000000{# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 #} {# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt #} Coverage for {{relative_filename|escape}}: {{nums.pc_covered_str}}% {% if extra_css %} {% endif %} {% if contexts_json %} {% endif %}
{% for line in lines -%} {% joined %}

{{line.number}} {{line.html}}  {% if line.context_list %} {% endif %} {# Things that should float right in the line. #} {% if line.annotate %} {{line.annotate}} {{line.annotate_long}} {% endif %} {% if line.contexts %} {% endif %} {# Things that should appear below the line. #} {% if line.context_str %} {{ line.context_str }} {% endif %}

{% endjoined %} {% endfor %}
././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/coverage/htmlfiles/style.css0000644000175100001770000003015200000000000021564 0ustar00runnerdocker00000000000000@charset "UTF-8"; /* Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 */ /* For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt */ /* Don't edit this .css file. Edit the .scss file instead! */ html, body, h1, h2, h3, p, table, td, th { margin: 0; padding: 0; border: 0; font-weight: inherit; font-style: inherit; font-size: 100%; font-family: inherit; vertical-align: baseline; } body { font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, Ubuntu, Cantarell, "Helvetica Neue", sans-serif; font-size: 1em; background: #fff; color: #000; } @media (prefers-color-scheme: dark) { body { background: #1e1e1e; } } @media (prefers-color-scheme: dark) { body { color: #eee; } } html > body { font-size: 16px; } a:active, a:focus { outline: 2px dashed #007acc; } p { font-size: .875em; line-height: 1.4em; } table { border-collapse: collapse; } td { vertical-align: top; } table tr.hidden { display: none !important; } p#no_rows { display: none; font-size: 1.2em; } a.nav { text-decoration: none; color: inherit; } a.nav:hover { text-decoration: underline; color: inherit; } .hidden { display: none; } header { background: #f8f8f8; width: 100%; z-index: 2; border-bottom: 1px solid #ccc; } @media (prefers-color-scheme: dark) { header { background: black; } } @media (prefers-color-scheme: dark) { header { border-color: #333; } } header .content { padding: 1rem 3.5rem; } header h2 { margin-top: .5em; font-size: 1em; } header p.text { margin: .5em 0 -.5em; color: #666; font-style: italic; } @media (prefers-color-scheme: dark) { header p.text { color: #aaa; } } header.sticky { position: fixed; left: 0; right: 0; height: 2.5em; } header.sticky .text { display: none; } header.sticky h1, header.sticky h2 { font-size: 1em; margin-top: 0; display: inline-block; } header.sticky .content { padding: 0.5rem 3.5rem; } header.sticky .content p { font-size: 1em; } header.sticky ~ #source { padding-top: 6.5em; } main { position: relative; z-index: 1; } footer { margin: 1rem 3.5rem; } footer .content { padding: 0; color: #666; font-style: italic; } @media (prefers-color-scheme: dark) { footer .content { color: #aaa; } } #index { margin: 1rem 0 0 3.5rem; } h1 { font-size: 1.25em; display: inline-block; } #filter_container { float: right; margin: 0 2em 0 0; } #filter_container input { width: 10em; padding: 0.2em 0.5em; border: 2px solid #ccc; background: #fff; color: #000; } @media (prefers-color-scheme: dark) { #filter_container input { border-color: #444; } } @media (prefers-color-scheme: dark) { #filter_container input { background: #1e1e1e; } } @media (prefers-color-scheme: dark) { #filter_container input { color: #eee; } } #filter_container input:focus { border-color: #007acc; } header button { font-family: inherit; font-size: inherit; border: 1px solid; border-radius: .2em; color: inherit; padding: .1em .5em; margin: 1px calc(.1em + 1px); cursor: pointer; border-color: #ccc; } @media (prefers-color-scheme: dark) { header button { border-color: #444; } } header button:active, header button:focus { outline: 2px dashed #007acc; } header button.run { background: #eeffee; } @media (prefers-color-scheme: dark) { header button.run { background: #373d29; } } header button.run.show_run { background: #dfd; border: 2px solid #00dd00; margin: 0 .1em; } @media (prefers-color-scheme: dark) { header button.run.show_run { background: #373d29; } } header button.mis { background: #ffeeee; } @media (prefers-color-scheme: dark) { header button.mis { background: #4b1818; } } header button.mis.show_mis { background: #fdd; border: 2px solid #ff0000; margin: 0 .1em; } @media (prefers-color-scheme: dark) { header button.mis.show_mis { background: #4b1818; } } header button.exc { background: #f7f7f7; } @media (prefers-color-scheme: dark) { header button.exc { background: #333; } } header button.exc.show_exc { background: #eee; border: 2px solid #808080; margin: 0 .1em; } @media (prefers-color-scheme: dark) { header button.exc.show_exc { background: #333; } } header button.par { background: #ffffd5; } @media (prefers-color-scheme: dark) { header button.par { background: #650; } } header button.par.show_par { background: #ffa; border: 2px solid #bbbb00; margin: 0 .1em; } @media (prefers-color-scheme: dark) { header button.par.show_par { background: #650; } } #help_panel, #source p .annotate.long { display: none; position: absolute; z-index: 999; background: #ffffcc; border: 1px solid #888; border-radius: .2em; color: #333; padding: .25em .5em; } #source p .annotate.long { white-space: normal; float: right; top: 1.75em; right: 1em; height: auto; } #help_panel_wrapper { float: right; position: relative; } #keyboard_icon { margin: 5px; } #help_panel_state { display: none; } #help_panel { top: 25px; right: 0; padding: .75em; border: 1px solid #883; color: #333; } #help_panel .keyhelp p { margin-top: .75em; } #help_panel .legend { font-style: italic; margin-bottom: 1em; } .indexfile #help_panel { width: 25em; } .pyfile #help_panel { width: 18em; } #help_panel_state:checked ~ #help_panel { display: block; } kbd { border: 1px solid black; border-color: #888 #333 #333 #888; padding: .1em .35em; font-family: SFMono-Regular, Menlo, Monaco, Consolas, monospace; font-weight: bold; background: #eee; border-radius: 3px; } #source { padding: 1em 0 1em 3.5rem; font-family: SFMono-Regular, Menlo, Monaco, Consolas, monospace; } #source p { position: relative; white-space: pre; } #source p * { box-sizing: border-box; } #source p .n { float: left; text-align: right; width: 3.5rem; box-sizing: border-box; margin-left: -3.5rem; padding-right: 1em; color: #999; user-select: none; } @media (prefers-color-scheme: dark) { #source p .n { color: #777; } } #source p .n.highlight { background: #ffdd00; } #source p .n a { scroll-margin-top: 6em; text-decoration: none; color: #999; } @media (prefers-color-scheme: dark) { #source p .n a { color: #777; } } #source p .n a:hover { text-decoration: underline; color: #999; } @media (prefers-color-scheme: dark) { #source p .n a:hover { color: #777; } } #source p .t { display: inline-block; width: 100%; box-sizing: border-box; margin-left: -.5em; padding-left: 0.3em; border-left: 0.2em solid #fff; } @media (prefers-color-scheme: dark) { #source p .t { border-color: #1e1e1e; } } #source p .t:hover { background: #f2f2f2; } @media (prefers-color-scheme: dark) { #source p .t:hover { background: #282828; } } #source p .t:hover ~ .r .annotate.long { display: block; } #source p .t .com { color: #008000; font-style: italic; line-height: 1px; } @media (prefers-color-scheme: dark) { #source p .t .com { color: #6a9955; } } #source p .t .key { font-weight: bold; line-height: 1px; } #source p .t .str { color: #0451a5; } @media (prefers-color-scheme: dark) { #source p .t .str { color: #9cdcfe; } } #source p.mis .t { border-left: 0.2em solid #ff0000; } #source p.mis.show_mis .t { background: #fdd; } @media (prefers-color-scheme: dark) { #source p.mis.show_mis .t { background: #4b1818; } } #source p.mis.show_mis .t:hover { background: #f2d2d2; } @media (prefers-color-scheme: dark) { #source p.mis.show_mis .t:hover { background: #532323; } } #source p.run .t { border-left: 0.2em solid #00dd00; } #source p.run.show_run .t { background: #dfd; } @media (prefers-color-scheme: dark) { #source p.run.show_run .t { background: #373d29; } } #source p.run.show_run .t:hover { background: #d2f2d2; } @media (prefers-color-scheme: dark) { #source p.run.show_run .t:hover { background: #404633; } } #source p.exc .t { border-left: 0.2em solid #808080; } #source p.exc.show_exc .t { background: #eee; } @media (prefers-color-scheme: dark) { #source p.exc.show_exc .t { background: #333; } } #source p.exc.show_exc .t:hover { background: #e2e2e2; } @media (prefers-color-scheme: dark) { #source p.exc.show_exc .t:hover { background: #3c3c3c; } } #source p.par .t { border-left: 0.2em solid #bbbb00; } #source p.par.show_par .t { background: #ffa; } @media (prefers-color-scheme: dark) { #source p.par.show_par .t { background: #650; } } #source p.par.show_par .t:hover { background: #f2f2a2; } @media (prefers-color-scheme: dark) { #source p.par.show_par .t:hover { background: #6d5d0c; } } #source p .r { position: absolute; top: 0; right: 2.5em; font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, Ubuntu, Cantarell, "Helvetica Neue", sans-serif; } #source p .annotate { font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, Ubuntu, Cantarell, "Helvetica Neue", sans-serif; color: #666; padding-right: .5em; } @media (prefers-color-scheme: dark) { #source p .annotate { color: #ddd; } } #source p .annotate.short:hover ~ .long { display: block; } #source p .annotate.long { width: 30em; right: 2.5em; } #source p input { display: none; } #source p input ~ .r label.ctx { cursor: pointer; border-radius: .25em; } #source p input ~ .r label.ctx::before { content: "โ–ถ "; } #source p input ~ .r label.ctx:hover { background: #e8f4ff; color: #666; } @media (prefers-color-scheme: dark) { #source p input ~ .r label.ctx:hover { background: #0f3a42; } } @media (prefers-color-scheme: dark) { #source p input ~ .r label.ctx:hover { color: #aaa; } } #source p input:checked ~ .r label.ctx { background: #d0e8ff; color: #666; border-radius: .75em .75em 0 0; padding: 0 .5em; margin: -.25em 0; } @media (prefers-color-scheme: dark) { #source p input:checked ~ .r label.ctx { background: #056; } } @media (prefers-color-scheme: dark) { #source p input:checked ~ .r label.ctx { color: #aaa; } } #source p input:checked ~ .r label.ctx::before { content: "โ–ผ "; } #source p input:checked ~ .ctxs { padding: .25em .5em; overflow-y: scroll; max-height: 10.5em; } #source p label.ctx { color: #999; display: inline-block; padding: 0 .5em; font-size: .8333em; } @media (prefers-color-scheme: dark) { #source p label.ctx { color: #777; } } #source p .ctxs { display: block; max-height: 0; overflow-y: hidden; transition: all .2s; padding: 0 .5em; font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, Ubuntu, Cantarell, "Helvetica Neue", sans-serif; white-space: nowrap; background: #d0e8ff; border-radius: .25em; margin-right: 1.75em; text-align: right; } @media (prefers-color-scheme: dark) { #source p .ctxs { background: #056; } } #index { font-family: SFMono-Regular, Menlo, Monaco, Consolas, monospace; font-size: 0.875em; } #index table.index { margin-left: -.5em; } #index td, #index th { text-align: right; width: 5em; padding: .25em .5em; border-bottom: 1px solid #eee; } @media (prefers-color-scheme: dark) { #index td, #index th { border-color: #333; } } #index td.name, #index th.name { text-align: left; width: auto; } #index th { font-style: italic; color: #333; cursor: pointer; } @media (prefers-color-scheme: dark) { #index th { color: #ddd; } } #index th:hover { background: #eee; } @media (prefers-color-scheme: dark) { #index th:hover { background: #333; } } #index th[aria-sort="ascending"], #index th[aria-sort="descending"] { white-space: nowrap; background: #eee; padding-left: .5em; } @media (prefers-color-scheme: dark) { #index th[aria-sort="ascending"], #index th[aria-sort="descending"] { background: #333; } } #index th[aria-sort="ascending"]::after { font-family: sans-serif; content: " โ†‘"; } #index th[aria-sort="descending"]::after { font-family: sans-serif; content: " โ†“"; } #index td.name a { text-decoration: none; color: inherit; } #index tr.total td, #index tr.total_dynamic td { font-weight: bold; border-top: 1px solid #ccc; border-bottom: none; } #index tr.file:hover { background: #eee; } @media (prefers-color-scheme: dark) { #index tr.file:hover { background: #333; } } #index tr.file:hover td.name { text-decoration: underline; color: inherit; } #scroll_marker { position: fixed; z-index: 3; right: 0; top: 0; width: 16px; height: 100%; background: #fff; border-left: 1px solid #eee; will-change: transform; } @media (prefers-color-scheme: dark) { #scroll_marker { background: #1e1e1e; } } @media (prefers-color-scheme: dark) { #scroll_marker { border-color: #333; } } #scroll_marker .marker { background: #ccc; position: absolute; min-height: 3px; width: 100%; } @media (prefers-color-scheme: dark) { #scroll_marker .marker { background: #444; } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/coverage/htmlfiles/style.scss0000644000175100001770000004167600000000000021764 0ustar00runnerdocker00000000000000/* Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 */ /* For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt */ // CSS styles for coverage.py HTML reports. // When you edit this file, you need to run "make css" to get the CSS file // generated, and then check in both the .scss and the .css files. // When working on the file, this command is useful: // sass --watch --style=compact --sourcemap=none --no-cache coverage/htmlfiles/style.scss:htmlcov/style.css // // OR you can process sass purely in python with `pip install pysass`, then: // pysassc --style=compact coverage/htmlfiles/style.scss coverage/htmlfiles/style.css // Ignore this comment, it's for the CSS output file: /* Don't edit this .css file. Edit the .scss file instead! */ // Dimensions $left-gutter: 3.5rem; // // Declare colors and variables // $font-normal: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, Ubuntu, Cantarell, "Helvetica Neue", sans-serif; $font-code: SFMono-Regular, Menlo, Monaco, Consolas, monospace; $off-button-lighten: 50%; $hover-dark-amt: 95%; $focus-color: #007acc; $mis-color: #ff0000; $run-color: #00dd00; $exc-color: #808080; $par-color: #bbbb00; $light-bg: #fff; $light-fg: #000; $light-gray1: #f8f8f8; $light-gray2: #eee; $light-gray3: #ccc; $light-gray4: #999; $light-gray5: #666; $light-gray6: #333; $light-pln-bg: $light-bg; $light-mis-bg: #fdd; $light-run-bg: #dfd; $light-exc-bg: $light-gray2; $light-par-bg: #ffa; $light-token-com: #008000; $light-token-str: #0451a5; $light-context-bg-color: #d0e8ff; $dark-bg: #1e1e1e; $dark-fg: #eee; $dark-gray1: #222; $dark-gray2: #333; $dark-gray3: #444; $dark-gray4: #777; $dark-gray5: #aaa; $dark-gray6: #ddd; $dark-pln-bg: $dark-bg; $dark-mis-bg: #4b1818; $dark-run-bg: #373d29; $dark-exc-bg: $dark-gray2; $dark-par-bg: #650; $dark-token-com: #6a9955; $dark-token-str: #9cdcfe; $dark-context-bg-color: #056; // // Mixins and utilities // @mixin background-dark($color) { @media (prefers-color-scheme: dark) { background: $color; } } @mixin color-dark($color) { @media (prefers-color-scheme: dark) { color: $color; } } @mixin border-color-dark($color) { @media (prefers-color-scheme: dark) { border-color: $color; } } // Add visual outline to navigable elements on focus improve accessibility. @mixin focus-border { &:active, &:focus { outline: 2px dashed $focus-color; } } // Page-wide styles html, body, h1, h2, h3, p, table, td, th { margin: 0; padding: 0; border: 0; font-weight: inherit; font-style: inherit; font-size: 100%; font-family: inherit; vertical-align: baseline; } // Set baseline grid to 16 pt. body { font-family: $font-normal; font-size: 1em; background: $light-bg; color: $light-fg; @include background-dark($dark-bg); @include color-dark($dark-fg); } html>body { font-size: 16px; } a { @include focus-border; } p { font-size: .875em; line-height: 1.4em; } table { border-collapse: collapse; } td { vertical-align: top; } table tr.hidden { display: none !important; } p#no_rows { display: none; font-size: 1.2em; } a.nav { text-decoration: none; color: inherit; &:hover { text-decoration: underline; color: inherit; } } .hidden { display: none; } // Page structure header { background: $light-gray1; @include background-dark(black); width: 100%; z-index: 2; border-bottom: 1px solid $light-gray3; @include border-color-dark($dark-gray2); .content { padding: 1rem $left-gutter; } h2 { margin-top: .5em; font-size: 1em; } p.text { margin: .5em 0 -.5em; color: $light-gray5; @include color-dark($dark-gray5); font-style: italic; } &.sticky { position: fixed; left: 0; right: 0; height: 2.5em; .text { display: none; } h1, h2 { font-size: 1em; margin-top: 0; display: inline-block; } .content { padding: .5rem $left-gutter; p { font-size: 1em; } } & ~ #source { padding-top: 6.5em; } } } main { position: relative; z-index: 1; } footer { margin: 1rem $left-gutter; .content { padding: 0; color: $light-gray5; @include color-dark($dark-gray5); font-style: italic; } } #index { margin: 1rem 0 0 $left-gutter; } // Header styles h1 { font-size: 1.25em; display: inline-block; } #filter_container { float: right; margin: 0 2em 0 0; input { width: 10em; padding: 0.2em 0.5em; border: 2px solid $light-gray3; background: $light-bg; color: $light-fg; @include border-color-dark($dark-gray3); @include background-dark($dark-bg); @include color-dark($dark-fg); &:focus { border-color: $focus-color; } } } header button { font-family: inherit; font-size: inherit; border: 1px solid; border-radius: .2em; color: inherit; padding: .1em .5em; margin: 1px calc(.1em + 1px); cursor: pointer; border-color: $light-gray3; @include border-color-dark($dark-gray3); @include focus-border; &.run { background: mix($light-run-bg, $light-bg, $off-button-lighten); @include background-dark($dark-run-bg); &.show_run { background: $light-run-bg; @include background-dark($dark-run-bg); border: 2px solid $run-color; margin: 0 .1em; } } &.mis { background: mix($light-mis-bg, $light-bg, $off-button-lighten); @include background-dark($dark-mis-bg); &.show_mis { background: $light-mis-bg; @include background-dark($dark-mis-bg); border: 2px solid $mis-color; margin: 0 .1em; } } &.exc { background: mix($light-exc-bg, $light-bg, $off-button-lighten); @include background-dark($dark-exc-bg); &.show_exc { background: $light-exc-bg; @include background-dark($dark-exc-bg); border: 2px solid $exc-color; margin: 0 .1em; } } &.par { background: mix($light-par-bg, $light-bg, $off-button-lighten); @include background-dark($dark-par-bg); &.show_par { background: $light-par-bg; @include background-dark($dark-par-bg); border: 2px solid $par-color; margin: 0 .1em; } } } // Yellow post-it things. %popup { display: none; position: absolute; z-index: 999; background: #ffffcc; border: 1px solid #888; border-radius: .2em; color: #333; padding: .25em .5em; } // Yellow post-it's in the text listings. %in-text-popup { @extend %popup; white-space: normal; float: right; top: 1.75em; right: 1em; height: auto; } // Help panel #help_panel_wrapper { float: right; position: relative; } #keyboard_icon { margin: 5px; } #help_panel_state { display: none; } #help_panel { @extend %popup; top: 25px; right: 0; padding: .75em; border: 1px solid #883; color: #333; .keyhelp p { margin-top: .75em; } .legend { font-style: italic; margin-bottom: 1em; } .indexfile & { width: 25em; } .pyfile & { width: 18em; } #help_panel_state:checked ~ & { display: block; } } kbd { border: 1px solid black; border-color: #888 #333 #333 #888; padding: .1em .35em; font-family: $font-code; font-weight: bold; background: #eee; border-radius: 3px; } // Source file styles // The slim bar at the left edge of the source lines, colored by coverage. $border-indicator-width: .2em; #source { padding: 1em 0 1em $left-gutter; font-family: $font-code; p { // position relative makes position:absolute pop-ups appear in the right place. position: relative; white-space: pre; * { box-sizing: border-box; } .n { float: left; text-align: right; width: $left-gutter; box-sizing: border-box; margin-left: -$left-gutter; padding-right: 1em; color: $light-gray4; user-select: none; @include color-dark($dark-gray4); &.highlight { background: #ffdd00; } a { // Make anchors to the line scroll the line to be // visible beneath the fixed-position header. scroll-margin-top: 6em; text-decoration: none; color: $light-gray4; @include color-dark($dark-gray4); &:hover { text-decoration: underline; color: $light-gray4; @include color-dark($dark-gray4); } } } .t { display: inline-block; width: 100%; box-sizing: border-box; margin-left: -.5em; padding-left: .5em - $border-indicator-width; border-left: $border-indicator-width solid $light-bg; @include border-color-dark($dark-bg); &:hover { background: mix($light-pln-bg, $light-fg, $hover-dark-amt); @include background-dark(mix($dark-pln-bg, $dark-fg, $hover-dark-amt)); & ~ .r .annotate.long { display: block; } } // Syntax coloring .com { color: $light-token-com; @include color-dark($dark-token-com); font-style: italic; line-height: 1px; } .key { font-weight: bold; line-height: 1px; } .str { color: $light-token-str; @include color-dark($dark-token-str); } } &.mis { .t { border-left: $border-indicator-width solid $mis-color; } &.show_mis .t { background: $light-mis-bg; @include background-dark($dark-mis-bg); &:hover { background: mix($light-mis-bg, $light-fg, $hover-dark-amt); @include background-dark(mix($dark-mis-bg, $dark-fg, $hover-dark-amt)); } } } &.run { .t { border-left: $border-indicator-width solid $run-color; } &.show_run .t { background: $light-run-bg; @include background-dark($dark-run-bg); &:hover { background: mix($light-run-bg, $light-fg, $hover-dark-amt); @include background-dark(mix($dark-run-bg, $dark-fg, $hover-dark-amt)); } } } &.exc { .t { border-left: $border-indicator-width solid $exc-color; } &.show_exc .t { background: $light-exc-bg; @include background-dark($dark-exc-bg); &:hover { background: mix($light-exc-bg, $light-fg, $hover-dark-amt); @include background-dark(mix($dark-exc-bg, $dark-fg, $hover-dark-amt)); } } } &.par { .t { border-left: $border-indicator-width solid $par-color; } &.show_par .t { background: $light-par-bg; @include background-dark($dark-par-bg); &:hover { background: mix($light-par-bg, $light-fg, $hover-dark-amt); @include background-dark(mix($dark-par-bg, $dark-fg, $hover-dark-amt)); } } } .r { position: absolute; top: 0; right: 2.5em; font-family: $font-normal; } .annotate { font-family: $font-normal; color: $light-gray5; @include color-dark($dark-gray6); padding-right: .5em; &.short:hover ~ .long { display: block; } &.long { @extend %in-text-popup; width: 30em; right: 2.5em; } } input { display: none; & ~ .r label.ctx { cursor: pointer; border-radius: .25em; &::before { content: "โ–ถ "; } &:hover { background: mix($light-context-bg-color, $light-bg, $off-button-lighten); @include background-dark(mix($dark-context-bg-color, $dark-bg, $off-button-lighten)); color: $light-gray5; @include color-dark($dark-gray5); } } &:checked ~ .r label.ctx { background: $light-context-bg-color; @include background-dark($dark-context-bg-color); color: $light-gray5; @include color-dark($dark-gray5); border-radius: .75em .75em 0 0; padding: 0 .5em; margin: -.25em 0; &::before { content: "โ–ผ "; } } &:checked ~ .ctxs { padding: .25em .5em; overflow-y: scroll; max-height: 10.5em; } } label.ctx { color: $light-gray4; @include color-dark($dark-gray4); display: inline-block; padding: 0 .5em; font-size: .8333em; // 10/12 } .ctxs { display: block; max-height: 0; overflow-y: hidden; transition: all .2s; padding: 0 .5em; font-family: $font-normal; white-space: nowrap; background: $light-context-bg-color; @include background-dark($dark-context-bg-color); border-radius: .25em; margin-right: 1.75em; text-align: right; } } } // index styles #index { font-family: $font-code; font-size: 0.875em; table.index { margin-left: -.5em; } td, th { text-align: right; width: 5em; padding: .25em .5em; border-bottom: 1px solid $light-gray2; @include border-color-dark($dark-gray2); &.name { text-align: left; width: auto; } } th { font-style: italic; color: $light-gray6; @include color-dark($dark-gray6); cursor: pointer; &:hover { background: $light-gray2; @include background-dark($dark-gray2); } &[aria-sort="ascending"], &[aria-sort="descending"] { white-space: nowrap; background: $light-gray2; @include background-dark($dark-gray2); padding-left: .5em; } &[aria-sort="ascending"]::after { font-family: sans-serif; content: " โ†‘"; } &[aria-sort="descending"]::after { font-family: sans-serif; content: " โ†“"; } } td.name a { text-decoration: none; color: inherit; } tr.total td, tr.total_dynamic td { font-weight: bold; border-top: 1px solid #ccc; border-bottom: none; } tr.file:hover { background: $light-gray2; @include background-dark($dark-gray2); td.name { text-decoration: underline; color: inherit; } } } // scroll marker styles #scroll_marker { position: fixed; z-index: 3; right: 0; top: 0; width: 16px; height: 100%; background: $light-bg; border-left: 1px solid $light-gray2; @include background-dark($dark-bg); @include border-color-dark($dark-gray2); will-change: transform; // for faster scrolling of fixed element in Chrome .marker { background: $light-gray3; @include background-dark($dark-gray3); position: absolute; min-height: 3px; width: 100%; } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/coverage/inorout.py0000644000175100001770000005621200000000000020001 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt """Determining whether files are being measured/reported or not.""" from __future__ import annotations import importlib.util import inspect import itertools import os import platform import re import sys import sysconfig import traceback from types import FrameType, ModuleType from typing import ( cast, Any, Iterable, TYPE_CHECKING, ) from coverage import env from coverage.disposition import FileDisposition, disposition_init from coverage.exceptions import CoverageException, PluginError from coverage.files import TreeMatcher, GlobMatcher, ModuleMatcher from coverage.files import prep_patterns, find_python_files, canonical_filename from coverage.misc import sys_modules_saved from coverage.python import source_for_file, source_for_morf from coverage.types import TFileDisposition, TMorf, TWarnFn, TDebugCtl if TYPE_CHECKING: from coverage.config import CoverageConfig from coverage.plugin_support import Plugins # Pypy has some unusual stuff in the "stdlib". Consider those locations # when deciding where the stdlib is. These modules are not used for anything, # they are modules importable from the pypy lib directories, so that we can # find those directories. modules_we_happen_to_have: list[ModuleType] = [ inspect, itertools, os, platform, re, sysconfig, traceback, ] if env.PYPY: try: import _structseq modules_we_happen_to_have.append(_structseq) except ImportError: pass try: import _pypy_irc_topic modules_we_happen_to_have.append(_pypy_irc_topic) except ImportError: pass def canonical_path(morf: TMorf, directory: bool = False) -> str: """Return the canonical path of the module or file `morf`. If the module is a package, then return its directory. If it is a module, then return its file, unless `directory` is True, in which case return its enclosing directory. """ morf_path = canonical_filename(source_for_morf(morf)) if morf_path.endswith("__init__.py") or directory: morf_path = os.path.split(morf_path)[0] return morf_path def name_for_module(filename: str, frame: FrameType | None) -> str: """Get the name of the module for a filename and frame. For configurability's sake, we allow __main__ modules to be matched by their importable name. If loaded via runpy (aka -m), we can usually recover the "original" full dotted module name, otherwise, we resort to interpreting the file name to get the module's name. In the case that the module name can't be determined, None is returned. """ module_globals = frame.f_globals if frame is not None else {} dunder_name: str = module_globals.get("__name__", None) if isinstance(dunder_name, str) and dunder_name != "__main__": # This is the usual case: an imported module. return dunder_name spec = module_globals.get("__spec__", None) if spec: fullname = spec.name if isinstance(fullname, str) and fullname != "__main__": # Module loaded via: runpy -m return fullname # Script as first argument to Python command line. inspectedname = inspect.getmodulename(filename) if inspectedname is not None: return inspectedname else: return dunder_name def module_is_namespace(mod: ModuleType) -> bool: """Is the module object `mod` a PEP420 namespace module?""" return hasattr(mod, "__path__") and getattr(mod, "__file__", None) is None def module_has_file(mod: ModuleType) -> bool: """Does the module object `mod` have an existing __file__ ?""" mod__file__ = getattr(mod, "__file__", None) if mod__file__ is None: return False return os.path.exists(mod__file__) def file_and_path_for_module(modulename: str) -> tuple[str | None, list[str]]: """Find the file and search path for `modulename`. Returns: filename: The filename of the module, or None. path: A list (possibly empty) of directories to find submodules in. """ filename = None path = [] try: spec = importlib.util.find_spec(modulename) except Exception: pass else: if spec is not None: filename = spec.origin path = list(spec.submodule_search_locations or ()) return filename, path def add_stdlib_paths(paths: set[str]) -> None: """Add paths where the stdlib can be found to the set `paths`.""" # Look at where some standard modules are located. That's the # indication for "installed with the interpreter". In some # environments (virtualenv, for example), these modules may be # spread across a few locations. Look at all the candidate modules # we've imported, and take all the different ones. for m in modules_we_happen_to_have: if hasattr(m, "__file__"): paths.add(canonical_path(m, directory=True)) def add_third_party_paths(paths: set[str]) -> None: """Add locations for third-party packages to the set `paths`.""" # Get the paths that sysconfig knows about. scheme_names = set(sysconfig.get_scheme_names()) for scheme in scheme_names: # https://foss.heptapod.net/pypy/pypy/-/issues/3433 better_scheme = "pypy_posix" if scheme == "pypy" else scheme if os.name in better_scheme.split("_"): config_paths = sysconfig.get_paths(scheme) for path_name in ["platlib", "purelib", "scripts"]: paths.add(config_paths[path_name]) def add_coverage_paths(paths: set[str]) -> None: """Add paths where coverage.py code can be found to the set `paths`.""" cover_path = canonical_path(__file__, directory=True) paths.add(cover_path) if env.TESTING: # Don't include our own test code. paths.add(os.path.join(cover_path, "tests")) class InOrOut: """Machinery for determining what files to measure.""" def __init__( self, config: CoverageConfig, warn: TWarnFn, debug: TDebugCtl | None, include_namespace_packages: bool, ) -> None: self.warn = warn self.debug = debug self.include_namespace_packages = include_namespace_packages self.source: list[str] = [] self.source_pkgs: list[str] = [] self.source_pkgs.extend(config.source_pkgs) for src in config.source or []: if os.path.isdir(src): self.source.append(canonical_filename(src)) else: self.source_pkgs.append(src) self.source_pkgs_unmatched = self.source_pkgs[:] self.include = prep_patterns(config.run_include) self.omit = prep_patterns(config.run_omit) # The directories for files considered "installed with the interpreter". self.pylib_paths: set[str] = set() if not config.cover_pylib: add_stdlib_paths(self.pylib_paths) # To avoid tracing the coverage.py code itself, we skip anything # located where we are. self.cover_paths: set[str] = set() add_coverage_paths(self.cover_paths) # Find where third-party packages are installed. self.third_paths: set[str] = set() add_third_party_paths(self.third_paths) def _debug(msg: str) -> None: if self.debug: self.debug.write(msg) # The matchers for should_trace. # Generally useful information _debug("sys.path:" + "".join(f"\n {p}" for p in sys.path)) # Create the matchers we need for should_trace self.source_match = None self.source_pkgs_match = None self.pylib_match = None self.include_match = self.omit_match = None if self.source or self.source_pkgs: against = [] if self.source: self.source_match = TreeMatcher(self.source, "source") against.append(f"trees {self.source_match!r}") if self.source_pkgs: self.source_pkgs_match = ModuleMatcher(self.source_pkgs, "source_pkgs") against.append(f"modules {self.source_pkgs_match!r}") _debug("Source matching against " + " and ".join(against)) else: if self.pylib_paths: self.pylib_match = TreeMatcher(self.pylib_paths, "pylib") _debug(f"Python stdlib matching: {self.pylib_match!r}") if self.include: self.include_match = GlobMatcher(self.include, "include") _debug(f"Include matching: {self.include_match!r}") if self.omit: self.omit_match = GlobMatcher(self.omit, "omit") _debug(f"Omit matching: {self.omit_match!r}") self.cover_match = TreeMatcher(self.cover_paths, "coverage") _debug(f"Coverage code matching: {self.cover_match!r}") self.third_match = TreeMatcher(self.third_paths, "third") _debug(f"Third-party lib matching: {self.third_match!r}") # Check if the source we want to measure has been installed as a # third-party package. # Is the source inside a third-party area? self.source_in_third_paths = set() with sys_modules_saved(): for pkg in self.source_pkgs: try: modfile, path = file_and_path_for_module(pkg) _debug(f"Imported source package {pkg!r} as {modfile!r}") except CoverageException as exc: _debug(f"Couldn't import source package {pkg!r}: {exc}") continue if modfile: if self.third_match.match(modfile): _debug( f"Source in third-party: source_pkg {pkg!r} at {modfile!r}", ) self.source_in_third_paths.add(canonical_path(source_for_file(modfile))) else: for pathdir in path: if self.third_match.match(pathdir): _debug( f"Source in third-party: {pkg!r} path directory at {pathdir!r}", ) self.source_in_third_paths.add(pathdir) for src in self.source: if self.third_match.match(src): _debug(f"Source in third-party: source directory {src!r}") self.source_in_third_paths.add(src) self.source_in_third_match = TreeMatcher(self.source_in_third_paths, "source_in_third") _debug(f"Source in third-party matching: {self.source_in_third_match}") self.plugins: Plugins self.disp_class: type[TFileDisposition] = FileDisposition def should_trace(self, filename: str, frame: FrameType | None = None) -> TFileDisposition: """Decide whether to trace execution in `filename`, with a reason. This function is called from the trace function. As each new file name is encountered, this function determines whether it is traced or not. Returns a FileDisposition object. """ original_filename = filename disp = disposition_init(self.disp_class, filename) def nope(disp: TFileDisposition, reason: str) -> TFileDisposition: """Simple helper to make it easy to return NO.""" disp.trace = False disp.reason = reason return disp if original_filename.startswith("<"): return nope(disp, "original file name is not real") if frame is not None: # Compiled Python files have two file names: frame.f_code.co_filename is # the file name at the time the .pyc was compiled. The second name is # __file__, which is where the .pyc was actually loaded from. Since # .pyc files can be moved after compilation (for example, by being # installed), we look for __file__ in the frame and prefer it to the # co_filename value. dunder_file = frame.f_globals and frame.f_globals.get("__file__") if dunder_file: filename = source_for_file(dunder_file) if original_filename and not original_filename.startswith("<"): orig = os.path.basename(original_filename) if orig != os.path.basename(filename): # Files shouldn't be renamed when moved. This happens when # exec'ing code. If it seems like something is wrong with # the frame's file name, then just use the original. filename = original_filename if not filename: # Empty string is pretty useless. return nope(disp, "empty string isn't a file name") if filename.startswith("memory:"): return nope(disp, "memory isn't traceable") if filename.startswith("<"): # Lots of non-file execution is represented with artificial # file names like "", "", or # "". Don't ever trace these executions, since we # can't do anything with the data later anyway. return nope(disp, "file name is not real") canonical = canonical_filename(filename) disp.canonical_filename = canonical # Try the plugins, see if they have an opinion about the file. plugin = None for plugin in self.plugins.file_tracers: if not plugin._coverage_enabled: continue try: file_tracer = plugin.file_tracer(canonical) if file_tracer is not None: file_tracer._coverage_plugin = plugin disp.trace = True disp.file_tracer = file_tracer if file_tracer.has_dynamic_source_filename(): disp.has_dynamic_filename = True else: disp.source_filename = canonical_filename( file_tracer.source_filename(), ) break except Exception: plugin_name = plugin._coverage_plugin_name tb = traceback.format_exc() self.warn(f"Disabling plug-in {plugin_name!r} due to an exception:\n{tb}") plugin._coverage_enabled = False continue else: # No plugin wanted it: it's Python. disp.trace = True disp.source_filename = canonical if not disp.has_dynamic_filename: if not disp.source_filename: raise PluginError( f"Plugin {plugin!r} didn't set source_filename for '{disp.original_filename}'", ) reason = self.check_include_omit_etc(disp.source_filename, frame) if reason: nope(disp, reason) return disp def check_include_omit_etc(self, filename: str, frame: FrameType | None) -> str | None: """Check a file name against the include, omit, etc, rules. Returns a string or None. String means, don't trace, and is the reason why. None means no reason found to not trace. """ modulename = name_for_module(filename, frame) # If the user specified source or include, then that's authoritative # about the outer bound of what to measure and we don't have to apply # any canned exclusions. If they didn't, then we have to exclude the # stdlib and coverage.py directories. if self.source_match or self.source_pkgs_match: extra = "" ok = False if self.source_pkgs_match: if self.source_pkgs_match.match(modulename): ok = True if modulename in self.source_pkgs_unmatched: self.source_pkgs_unmatched.remove(modulename) else: extra = f"module {modulename!r} " if not ok and self.source_match: if self.source_match.match(filename): ok = True if not ok: return extra + "falls outside the --source spec" if self.third_match.match(filename) and not self.source_in_third_match.match(filename): return "inside --source, but is third-party" elif self.include_match: if not self.include_match.match(filename): return "falls outside the --include trees" else: # We exclude the coverage.py code itself, since a little of it # will be measured otherwise. if self.cover_match.match(filename): return "is part of coverage.py" # If we aren't supposed to trace installed code, then check if this # is near the Python standard library and skip it if so. if self.pylib_match and self.pylib_match.match(filename): return "is in the stdlib" # Exclude anything in the third-party installation areas. if self.third_match.match(filename): return "is a third-party module" # Check the file against the omit pattern. if self.omit_match and self.omit_match.match(filename): return "is inside an --omit pattern" # No point tracing a file we can't later write to SQLite. try: filename.encode("utf-8") except UnicodeEncodeError: return "non-encodable filename" # No reason found to skip this file. return None def warn_conflicting_settings(self) -> None: """Warn if there are settings that conflict.""" if self.include: if self.source or self.source_pkgs: self.warn("--include is ignored because --source is set", slug="include-ignored") def warn_already_imported_files(self) -> None: """Warn if files have already been imported that we will be measuring.""" if self.include or self.source or self.source_pkgs: warned = set() for mod in list(sys.modules.values()): filename = getattr(mod, "__file__", None) if filename is None: continue if filename in warned: continue if len(getattr(mod, "__path__", ())) > 1: # A namespace package, which confuses this code, so ignore it. continue disp = self.should_trace(filename) if disp.has_dynamic_filename: # A plugin with dynamic filenames: the Python file # shouldn't cause a warning, since it won't be the subject # of tracing anyway. continue if disp.trace: msg = f"Already imported a file that will be measured: {filename}" self.warn(msg, slug="already-imported") warned.add(filename) elif self.debug and self.debug.should("trace"): self.debug.write( "Didn't trace already imported file {!r}: {}".format( disp.original_filename, disp.reason, ), ) def warn_unimported_source(self) -> None: """Warn about source packages that were of interest, but never traced.""" for pkg in self.source_pkgs_unmatched: self._warn_about_unmeasured_code(pkg) def _warn_about_unmeasured_code(self, pkg: str) -> None: """Warn about a package or module that we never traced. `pkg` is a string, the name of the package or module. """ mod = sys.modules.get(pkg) if mod is None: self.warn(f"Module {pkg} was never imported.", slug="module-not-imported") return if module_is_namespace(mod): # A namespace package. It's OK for this not to have been traced, # since there is no code directly in it. return if not module_has_file(mod): self.warn(f"Module {pkg} has no Python source.", slug="module-not-python") return # The module was in sys.modules, and seems like a module with code, but # we never measured it. I guess that means it was imported before # coverage even started. msg = f"Module {pkg} was previously imported, but not measured" self.warn(msg, slug="module-not-measured") def find_possibly_unexecuted_files(self) -> Iterable[tuple[str, str | None]]: """Find files in the areas of interest that might be untraced. Yields pairs: file path, and responsible plug-in name. """ for pkg in self.source_pkgs: if (pkg not in sys.modules or not module_has_file(sys.modules[pkg])): continue pkg_file = source_for_file(cast(str, sys.modules[pkg].__file__)) yield from self._find_executable_files(canonical_path(pkg_file)) for src in self.source: yield from self._find_executable_files(src) def _find_plugin_files(self, src_dir: str) -> Iterable[tuple[str, str]]: """Get executable files from the plugins.""" for plugin in self.plugins.file_tracers: for x_file in plugin.find_executable_files(src_dir): yield x_file, plugin._coverage_plugin_name def _find_executable_files(self, src_dir: str) -> Iterable[tuple[str, str | None]]: """Find executable files in `src_dir`. Search for files in `src_dir` that can be executed because they are probably importable. Don't include ones that have been omitted by the configuration. Yield the file path, and the plugin name that handles the file. """ py_files = ( (py_file, None) for py_file in find_python_files(src_dir, self.include_namespace_packages) ) plugin_files = self._find_plugin_files(src_dir) for file_path, plugin_name in itertools.chain(py_files, plugin_files): file_path = canonical_filename(file_path) if self.omit_match and self.omit_match.match(file_path): # Turns out this file was omitted, so don't pull it back # in as un-executed. continue yield file_path, plugin_name def sys_info(self) -> Iterable[tuple[str, Any]]: """Our information for Coverage.sys_info. Returns a list of (key, value) pairs. """ info = [ ("coverage_paths", self.cover_paths), ("stdlib_paths", self.pylib_paths), ("third_party_paths", self.third_paths), ("source_in_third_party_paths", self.source_in_third_paths), ] matcher_names = [ "source_match", "source_pkgs_match", "include_match", "omit_match", "cover_match", "pylib_match", "third_match", "source_in_third_match", ] for matcher_name in matcher_names: matcher = getattr(self, matcher_name) if matcher: matcher_info = matcher.info() else: matcher_info = "-none-" info.append((matcher_name, matcher_info)) return info ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/coverage/jsonreport.py0000644000175100001770000001137200000000000020505 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt """Json reporting for coverage.py""" from __future__ import annotations import datetime import json import sys from typing import Any, IO, Iterable, TYPE_CHECKING from coverage import __version__ from coverage.report_core import get_analysis_to_report from coverage.results import Analysis, Numbers from coverage.types import TMorf, TLineNo if TYPE_CHECKING: from coverage import Coverage from coverage.data import CoverageData # "Version 1" had no format number at all. # 2: add the meta.format field. FORMAT_VERSION = 2 class JsonReporter: """A reporter for writing JSON coverage results.""" report_type = "JSON report" def __init__(self, coverage: Coverage) -> None: self.coverage = coverage self.config = self.coverage.config self.total = Numbers(self.config.precision) self.report_data: dict[str, Any] = {} def report(self, morfs: Iterable[TMorf] | None, outfile: IO[str]) -> float: """Generate a json report for `morfs`. `morfs` is a list of modules or file names. `outfile` is a file object to write the json to. """ outfile = outfile or sys.stdout coverage_data = self.coverage.get_data() coverage_data.set_query_contexts(self.config.report_contexts) self.report_data["meta"] = { "format": FORMAT_VERSION, "version": __version__, "timestamp": datetime.datetime.now().isoformat(), "branch_coverage": coverage_data.has_arcs(), "show_contexts": self.config.json_show_contexts, } measured_files = {} for file_reporter, analysis in get_analysis_to_report(self.coverage, morfs): measured_files[file_reporter.relative_filename()] = self.report_one_file( coverage_data, analysis, ) self.report_data["files"] = measured_files self.report_data["totals"] = { "covered_lines": self.total.n_executed, "num_statements": self.total.n_statements, "percent_covered": self.total.pc_covered, "percent_covered_display": self.total.pc_covered_str, "missing_lines": self.total.n_missing, "excluded_lines": self.total.n_excluded, } if coverage_data.has_arcs(): self.report_data["totals"].update({ "num_branches": self.total.n_branches, "num_partial_branches": self.total.n_partial_branches, "covered_branches": self.total.n_executed_branches, "missing_branches": self.total.n_missing_branches, }) json.dump( self.report_data, outfile, indent=(4 if self.config.json_pretty_print else None), ) return self.total.n_statements and self.total.pc_covered def report_one_file(self, coverage_data: CoverageData, analysis: Analysis) -> dict[str, Any]: """Extract the relevant report data for a single file.""" nums = analysis.numbers self.total += nums summary = { "covered_lines": nums.n_executed, "num_statements": nums.n_statements, "percent_covered": nums.pc_covered, "percent_covered_display": nums.pc_covered_str, "missing_lines": nums.n_missing, "excluded_lines": nums.n_excluded, } reported_file = { "executed_lines": sorted(analysis.executed), "summary": summary, "missing_lines": sorted(analysis.missing), "excluded_lines": sorted(analysis.excluded), } if self.config.json_show_contexts: reported_file["contexts"] = analysis.data.contexts_by_lineno(analysis.filename) if coverage_data.has_arcs(): summary.update({ "num_branches": nums.n_branches, "num_partial_branches": nums.n_partial_branches, "covered_branches": nums.n_executed_branches, "missing_branches": nums.n_missing_branches, }) reported_file["executed_branches"] = list( _convert_branch_arcs(analysis.executed_branch_arcs()), ) reported_file["missing_branches"] = list( _convert_branch_arcs(analysis.missing_branch_arcs()), ) return reported_file def _convert_branch_arcs( branch_arcs: dict[TLineNo, list[TLineNo]], ) -> Iterable[tuple[TLineNo, TLineNo]]: """Convert branch arcs to a list of two-element tuples.""" for source, targets in branch_arcs.items(): for target in targets: yield source, target ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/coverage/lcovreport.py0000644000175100001770000001205600000000000020477 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt """LCOV reporting for coverage.py.""" from __future__ import annotations import base64 import hashlib import sys from typing import IO, Iterable, TYPE_CHECKING from coverage.plugin import FileReporter from coverage.report_core import get_analysis_to_report from coverage.results import Analysis, Numbers from coverage.types import TMorf if TYPE_CHECKING: from coverage import Coverage def line_hash(line: str) -> str: """Produce a hash of a source line for use in the LCOV file.""" hashed = hashlib.md5(line.encode("utf-8")).digest() return base64.b64encode(hashed).decode("ascii").rstrip("=") class LcovReporter: """A reporter for writing LCOV coverage reports.""" report_type = "LCOV report" def __init__(self, coverage: Coverage) -> None: self.coverage = coverage self.total = Numbers(self.coverage.config.precision) def report(self, morfs: Iterable[TMorf] | None, outfile: IO[str]) -> float: """Renders the full lcov report. `morfs` is a list of modules or filenames outfile is the file object to write the file into. """ self.coverage.get_data() outfile = outfile or sys.stdout for fr, analysis in get_analysis_to_report(self.coverage, morfs): self.get_lcov(fr, analysis, outfile) return self.total.n_statements and self.total.pc_covered def get_lcov(self, fr: FileReporter, analysis: Analysis, outfile: IO[str]) -> None: """Produces the lcov data for a single file. This currently supports both line and branch coverage, however function coverage is not supported. """ self.total += analysis.numbers outfile.write("TN:\n") outfile.write(f"SF:{fr.relative_filename()}\n") source_lines = fr.source().splitlines() for covered in sorted(analysis.executed): if covered in analysis.excluded: # Do not report excluded as executed continue # Note: Coverage.py currently only supports checking *if* a line # has been executed, not how many times, so we set this to 1 for # nice output even if it's technically incorrect. # The lines below calculate a 64-bit encoded md5 hash of the line # corresponding to the DA lines in the lcov file, for either case # of the line being covered or missed in coverage.py. The final two # characters of the encoding ("==") are removed from the hash to # allow genhtml to run on the resulting lcov file. if source_lines: if covered-1 >= len(source_lines): break line = source_lines[covered-1] else: line = "" outfile.write(f"DA:{covered},1,{line_hash(line)}\n") for missed in sorted(analysis.missing): # We don't have to skip excluded lines here, because `missing` # already doesn't have them. assert source_lines line = source_lines[missed-1] outfile.write(f"DA:{missed},0,{line_hash(line)}\n") outfile.write(f"LF:{analysis.numbers.n_statements}\n") outfile.write(f"LH:{analysis.numbers.n_executed}\n") # More information dense branch coverage data. missing_arcs = analysis.missing_branch_arcs() executed_arcs = analysis.executed_branch_arcs() for block_number, block_line_number in enumerate( sorted(analysis.branch_stats().keys()), ): for branch_number, line_number in enumerate( sorted(missing_arcs[block_line_number]), ): # The exit branches have a negative line number, # this will not produce valid lcov. Setting # the line number of the exit branch to 0 will allow # for valid lcov, while preserving the data. line_number = max(line_number, 0) outfile.write(f"BRDA:{line_number},{block_number},{branch_number},-\n") # The start value below allows for the block number to be # preserved between these two for loops (stopping the loop from # resetting the value of the block number to 0). for branch_number, line_number in enumerate( sorted(executed_arcs[block_line_number]), start=len(missing_arcs[block_line_number]), ): line_number = max(line_number, 0) outfile.write(f"BRDA:{line_number},{block_number},{branch_number},1\n") # Summary of the branch coverage. if analysis.has_arcs(): branch_stats = analysis.branch_stats() brf = sum(t for t, k in branch_stats.values()) brh = brf - sum(t - k for t, k in branch_stats.values()) outfile.write(f"BRF:{brf}\n") outfile.write(f"BRH:{brh}\n") outfile.write("end_of_record\n") ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/coverage/misc.py0000644000175100001770000003000200000000000017222 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt """Miscellaneous stuff for coverage.py.""" from __future__ import annotations import contextlib import datetime import errno import hashlib import importlib import importlib.util import inspect import locale import os import os.path import re import sys import types from types import ModuleType from typing import ( Any, Callable, IO, Iterable, Iterator, Mapping, NoReturn, Sequence, TypeVar, ) from coverage import env from coverage.exceptions import CoverageException from coverage.types import TArc # In 6.0, the exceptions moved from misc.py to exceptions.py. But a number of # other packages were importing the exceptions from misc, so import them here. # pylint: disable=unused-wildcard-import from coverage.exceptions import * # pylint: disable=wildcard-import ISOLATED_MODULES: dict[ModuleType, ModuleType] = {} def isolate_module(mod: ModuleType) -> ModuleType: """Copy a module so that we are isolated from aggressive mocking. If a test suite mocks os.path.exists (for example), and then we need to use it during the test, everything will get tangled up if we use their mock. Making a copy of the module when we import it will isolate coverage.py from those complications. """ if mod not in ISOLATED_MODULES: new_mod = types.ModuleType(mod.__name__) ISOLATED_MODULES[mod] = new_mod for name in dir(mod): value = getattr(mod, name) if isinstance(value, types.ModuleType): value = isolate_module(value) setattr(new_mod, name, value) return ISOLATED_MODULES[mod] os = isolate_module(os) class SysModuleSaver: """Saves the contents of sys.modules, and removes new modules later.""" def __init__(self) -> None: self.old_modules = set(sys.modules) def restore(self) -> None: """Remove any modules imported since this object started.""" new_modules = set(sys.modules) - self.old_modules for m in new_modules: del sys.modules[m] @contextlib.contextmanager def sys_modules_saved() -> Iterator[None]: """A context manager to remove any modules imported during a block.""" saver = SysModuleSaver() try: yield finally: saver.restore() def import_third_party(modname: str) -> tuple[ModuleType, bool]: """Import a third-party module we need, but might not be installed. This also cleans out the module after the import, so that coverage won't appear to have imported it. This lets the third party use coverage for their own tests. Arguments: modname (str): the name of the module to import. Returns: The imported module, and a boolean indicating if the module could be imported. If the boolean is False, the module returned is not the one you want: don't use it. """ with sys_modules_saved(): try: return importlib.import_module(modname), True except ImportError: return sys, False def nice_pair(pair: TArc) -> str: """Make a nice string representation of a pair of numbers. If the numbers are equal, just return the number, otherwise return the pair with a dash between them, indicating the range. """ start, end = pair if start == end: return "%d" % start else: return "%d-%d" % (start, end) TSelf = TypeVar("TSelf") TRetVal = TypeVar("TRetVal") def expensive(fn: Callable[[TSelf], TRetVal]) -> Callable[[TSelf], TRetVal]: """A decorator to indicate that a method shouldn't be called more than once. Normally, this does nothing. During testing, this raises an exception if called more than once. """ if env.TESTING: attr = "_once_" + fn.__name__ def _wrapper(self: TSelf) -> TRetVal: if hasattr(self, attr): raise AssertionError(f"Shouldn't have called {fn.__name__} more than once") setattr(self, attr, True) return fn(self) return _wrapper else: return fn # pragma: not testing def bool_or_none(b: Any) -> bool | None: """Return bool(b), but preserve None.""" if b is None: return None else: return bool(b) def join_regex(regexes: Iterable[str]) -> str: """Combine a series of regex strings into one that matches any of them.""" regexes = list(regexes) if len(regexes) == 1: return regexes[0] else: return "|".join(f"(?:{r})" for r in regexes) def file_be_gone(path: str) -> None: """Remove a file, and don't get annoyed if it doesn't exist.""" try: os.remove(path) except OSError as e: if e.errno != errno.ENOENT: raise def ensure_dir(directory: str) -> None: """Make sure the directory exists. If `directory` is None or empty, do nothing. """ if directory: os.makedirs(directory, exist_ok=True) def ensure_dir_for_file(path: str) -> None: """Make sure the directory for the path exists.""" ensure_dir(os.path.dirname(path)) def output_encoding(outfile: IO[str] | None = None) -> str: """Determine the encoding to use for output written to `outfile` or stdout.""" if outfile is None: outfile = sys.stdout encoding = ( getattr(outfile, "encoding", None) or getattr(sys.__stdout__, "encoding", None) or locale.getpreferredencoding() ) return encoding class Hasher: """Hashes Python data for fingerprinting.""" def __init__(self) -> None: self.hash = hashlib.new("sha3_256") def update(self, v: Any) -> None: """Add `v` to the hash, recursively if needed.""" self.hash.update(str(type(v)).encode("utf-8")) if isinstance(v, str): self.hash.update(v.encode("utf-8")) elif isinstance(v, bytes): self.hash.update(v) elif v is None: pass elif isinstance(v, (int, float)): self.hash.update(str(v).encode("utf-8")) elif isinstance(v, (tuple, list)): for e in v: self.update(e) elif isinstance(v, dict): keys = v.keys() for k in sorted(keys): self.update(k) self.update(v[k]) else: for k in dir(v): if k.startswith("__"): continue a = getattr(v, k) if inspect.isroutine(a): continue self.update(k) self.update(a) self.hash.update(b".") def hexdigest(self) -> str: """Retrieve the hex digest of the hash.""" return self.hash.hexdigest()[:32] def _needs_to_implement(that: Any, func_name: str) -> NoReturn: """Helper to raise NotImplementedError in interface stubs.""" if hasattr(that, "_coverage_plugin_name"): thing = "Plugin" name = that._coverage_plugin_name else: thing = "Class" klass = that.__class__ name = f"{klass.__module__}.{klass.__name__}" raise NotImplementedError( f"{thing} {name!r} needs to implement {func_name}()", ) class DefaultValue: """A sentinel object to use for unusual default-value needs. Construct with a string that will be used as the repr, for display in help and Sphinx output. """ def __init__(self, display_as: str) -> None: self.display_as = display_as def __repr__(self) -> str: return self.display_as def substitute_variables(text: str, variables: Mapping[str, str]) -> str: """Substitute ``${VAR}`` variables in `text` with their values. Variables in the text can take a number of shell-inspired forms:: $VAR ${VAR} ${VAR?} strict: an error if VAR isn't defined. ${VAR-missing} defaulted: "missing" if VAR isn't defined. $$ just a dollar sign. `variables` is a dictionary of variable values. Returns the resulting text with values substituted. """ dollar_pattern = r"""(?x) # Use extended regex syntax \$ # A dollar sign, (?: # then (?P\$) | # a dollar sign, or (?P\w+) | # a plain word, or { # a {-wrapped (?P\w+) # word, (?: (?P\?) | # with a strict marker -(?P[^}]*) # or a default value )? # maybe. } ) """ dollar_groups = ("dollar", "word1", "word2") def dollar_replace(match: re.Match[str]) -> str: """Called for each $replacement.""" # Only one of the dollar_groups will have matched, just get its text. word = next(g for g in match.group(*dollar_groups) if g) # pragma: always breaks if word == "$": return "$" elif word in variables: return variables[word] elif match["strict"]: msg = f"Variable {word} is undefined: {text!r}" raise CoverageException(msg) else: return match["defval"] text = re.sub(dollar_pattern, dollar_replace, text) return text def format_local_datetime(dt: datetime.datetime) -> str: """Return a string with local timezone representing the date. """ return dt.astimezone().strftime("%Y-%m-%d %H:%M %z") def import_local_file(modname: str, modfile: str | None = None) -> ModuleType: """Import a local file as a module. Opens a file in the current directory named `modname`.py, imports it as `modname`, and returns the module object. `modfile` is the file to import if it isn't in the current directory. """ if modfile is None: modfile = modname + ".py" spec = importlib.util.spec_from_file_location(modname, modfile) assert spec is not None mod = importlib.util.module_from_spec(spec) sys.modules[modname] = mod assert spec.loader is not None spec.loader.exec_module(mod) return mod def _human_key(s: str) -> tuple[list[str | int], str]: """Turn a string into a list of string and number chunks. "z23a" -> (["z", 23, "a"], "z23a") The original string is appended as a last value to ensure the key is unique enough so that "x1y" and "x001y" can be distinguished. """ def tryint(s: str) -> str | int: """If `s` is a number, return an int, else `s` unchanged.""" try: return int(s) except ValueError: return s return ([tryint(c) for c in re.split(r"(\d+)", s)], s) def human_sorted(strings: Iterable[str]) -> list[str]: """Sort the given iterable of strings the way that humans expect. Numeric components in the strings are sorted as numbers. Returns the sorted list. """ return sorted(strings, key=_human_key) SortableItem = TypeVar("SortableItem", bound=Sequence[Any]) def human_sorted_items( items: Iterable[SortableItem], reverse: bool = False, ) -> list[SortableItem]: """Sort (string, ...) items the way humans expect. The elements of `items` can be any tuple/list. They'll be sorted by the first element (a string), with ties broken by the remaining elements. Returns the sorted list of items. """ return sorted(items, key=lambda item: (_human_key(item[0]), *item[1:]), reverse=reverse) def plural(n: int, thing: str = "", things: str = "") -> str: """Pluralize a word. If n is 1, return thing. Otherwise return things, or thing+s. """ if n == 1: return thing else: return things or (thing + "s") def stdout_link(text: str, url: str) -> str: """Format text+url as a clickable link for stdout. If attached to a terminal, use escape sequences. Otherwise, just return the text. """ if hasattr(sys.stdout, "isatty") and sys.stdout.isatty(): return f"\033]8;;{url}\a{text}\033]8;;\a" else: return text ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/coverage/multiproc.py0000644000175100001770000001014300000000000020311 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt """Monkey-patching to add multiprocessing support for coverage.py""" from __future__ import annotations import multiprocessing import multiprocessing.process import os import os.path import sys import traceback from typing import Any from coverage.debug import DebugControl # An attribute that will be set on the module to indicate that it has been # monkey-patched. PATCHED_MARKER = "_coverage$patched" OriginalProcess = multiprocessing.process.BaseProcess original_bootstrap = OriginalProcess._bootstrap # type: ignore[attr-defined] class ProcessWithCoverage(OriginalProcess): # pylint: disable=abstract-method """A replacement for multiprocess.Process that starts coverage.""" def _bootstrap(self, *args, **kwargs): # type: ignore[no-untyped-def] """Wrapper around _bootstrap to start coverage.""" debug: DebugControl | None = None try: from coverage import Coverage # avoid circular import cov = Coverage(data_suffix=True, auto_data=True) cov._warn_preimported_source = False cov.start() _debug = cov._debug assert _debug is not None if _debug.should("multiproc"): debug = _debug if debug: debug.write("Calling multiprocessing bootstrap") except Exception: print("Exception during multiprocessing bootstrap init:", file=sys.stderr) traceback.print_exc(file=sys.stderr) sys.stderr.flush() raise try: return original_bootstrap(self, *args, **kwargs) finally: if debug: debug.write("Finished multiprocessing bootstrap") try: cov.stop() cov.save() except Exception as exc: if debug: debug.write("Exception during multiprocessing bootstrap cleanup", exc=exc) raise if debug: debug.write("Saved multiprocessing data") class Stowaway: """An object to pickle, so when it is unpickled, it can apply the monkey-patch.""" def __init__(self, rcfile: str) -> None: self.rcfile = rcfile def __getstate__(self) -> dict[str, str]: return {"rcfile": self.rcfile} def __setstate__(self, state: dict[str, str]) -> None: patch_multiprocessing(state["rcfile"]) def patch_multiprocessing(rcfile: str) -> None: """Monkey-patch the multiprocessing module. This enables coverage measurement of processes started by multiprocessing. This involves aggressive monkey-patching. `rcfile` is the path to the rcfile being used. """ if hasattr(multiprocessing, PATCHED_MARKER): return OriginalProcess._bootstrap = ProcessWithCoverage._bootstrap # type: ignore[attr-defined] # Set the value in ProcessWithCoverage that will be pickled into the child # process. os.environ["COVERAGE_RCFILE"] = os.path.abspath(rcfile) # When spawning processes rather than forking them, we have no state in the # new process. We sneak in there with a Stowaway: we stuff one of our own # objects into the data that gets pickled and sent to the sub-process. When # the Stowaway is unpickled, its __setstate__ method is called, which # re-applies the monkey-patch. # Windows only spawns, so this is needed to keep Windows working. try: from multiprocessing import spawn original_get_preparation_data = spawn.get_preparation_data except (ImportError, AttributeError): pass else: def get_preparation_data_with_stowaway(name: str) -> dict[str, Any]: """Get the original preparation data, and also insert our stowaway.""" d = original_get_preparation_data(name) d["stowaway"] = Stowaway(rcfile) return d spawn.get_preparation_data = get_preparation_data_with_stowaway setattr(multiprocessing, PATCHED_MARKER, True) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/coverage/numbits.py0000644000175100001770000001106700000000000017762 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt """ Functions to manipulate packed binary representations of number sets. To save space, coverage stores sets of line numbers in SQLite using a packed binary representation called a numbits. A numbits is a set of positive integers. A numbits is stored as a blob in the database. The exact meaning of the bytes in the blobs should be considered an implementation detail that might change in the future. Use these functions to work with those binary blobs of data. """ from __future__ import annotations import json import sqlite3 from itertools import zip_longest from typing import Iterable def nums_to_numbits(nums: Iterable[int]) -> bytes: """Convert `nums` into a numbits. Arguments: nums: a reusable iterable of integers, the line numbers to store. Returns: A binary blob. """ try: nbytes = max(nums) // 8 + 1 except ValueError: # nums was empty. return b"" b = bytearray(nbytes) for num in nums: b[num//8] |= 1 << num % 8 return bytes(b) def numbits_to_nums(numbits: bytes) -> list[int]: """Convert a numbits into a list of numbers. Arguments: numbits: a binary blob, the packed number set. Returns: A list of ints. When registered as a SQLite function by :func:`register_sqlite_functions`, this returns a string, a JSON-encoded list of ints. """ nums = [] for byte_i, byte in enumerate(numbits): for bit_i in range(8): if (byte & (1 << bit_i)): nums.append(byte_i * 8 + bit_i) return nums def numbits_union(numbits1: bytes, numbits2: bytes) -> bytes: """Compute the union of two numbits. Returns: A new numbits, the union of `numbits1` and `numbits2`. """ byte_pairs = zip_longest(numbits1, numbits2, fillvalue=0) return bytes(b1 | b2 for b1, b2 in byte_pairs) def numbits_intersection(numbits1: bytes, numbits2: bytes) -> bytes: """Compute the intersection of two numbits. Returns: A new numbits, the intersection `numbits1` and `numbits2`. """ byte_pairs = zip_longest(numbits1, numbits2, fillvalue=0) intersection_bytes = bytes(b1 & b2 for b1, b2 in byte_pairs) return intersection_bytes.rstrip(b"\0") def numbits_any_intersection(numbits1: bytes, numbits2: bytes) -> bool: """Is there any number that appears in both numbits? Determine whether two number sets have a non-empty intersection. This is faster than computing the intersection. Returns: A bool, True if there is any number in both `numbits1` and `numbits2`. """ byte_pairs = zip_longest(numbits1, numbits2, fillvalue=0) return any(b1 & b2 for b1, b2 in byte_pairs) def num_in_numbits(num: int, numbits: bytes) -> bool: """Does the integer `num` appear in `numbits`? Returns: A bool, True if `num` is a member of `numbits`. """ nbyte, nbit = divmod(num, 8) if nbyte >= len(numbits): return False return bool(numbits[nbyte] & (1 << nbit)) def register_sqlite_functions(connection: sqlite3.Connection) -> None: """ Define numbits functions in a SQLite connection. This defines these functions for use in SQLite statements: * :func:`numbits_union` * :func:`numbits_intersection` * :func:`numbits_any_intersection` * :func:`num_in_numbits` * :func:`numbits_to_nums` `connection` is a :class:`sqlite3.Connection ` object. After creating the connection, pass it to this function to register the numbits functions. Then you can use numbits functions in your queries:: import sqlite3 from coverage.numbits import register_sqlite_functions conn = sqlite3.connect("example.db") register_sqlite_functions(conn) c = conn.cursor() # Kind of a nonsense query: # Find all the files and contexts that executed line 47 in any file: c.execute( "select file_id, context_id from line_bits where num_in_numbits(?, numbits)", (47,) ) """ connection.create_function("numbits_union", 2, numbits_union) connection.create_function("numbits_intersection", 2, numbits_intersection) connection.create_function("numbits_any_intersection", 2, numbits_any_intersection) connection.create_function("num_in_numbits", 2, num_in_numbits) connection.create_function("numbits_to_nums", 1, lambda b: json.dumps(numbits_to_nums(b))) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/coverage/parser.py0000644000175100001770000015246700000000000017607 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt """Code parsing for coverage.py.""" from __future__ import annotations import ast import collections import os import re import sys import token import tokenize from dataclasses import dataclass from types import CodeType from typing import ( cast, Any, Callable, Dict, Iterable, List, Optional, Protocol, Sequence, Set, Tuple, ) from coverage import env from coverage.bytecode import code_objects from coverage.debug import short_stack from coverage.exceptions import NoSource, NotPython from coverage.misc import join_regex, nice_pair from coverage.phystokens import generate_tokens from coverage.types import TArc, TLineNo class PythonParser: """Parse code to find executable lines, excluded lines, etc. This information is all based on static analysis: no code execution is involved. """ def __init__( self, text: str | None = None, filename: str | None = None, exclude: str | None = None, ) -> None: """ Source can be provided as `text`, the text itself, or `filename`, from which the text will be read. Excluded lines are those that match `exclude`, a regex string. """ assert text or filename, "PythonParser needs either text or filename" self.filename = filename or "" if text is not None: self.text: str = text else: from coverage.python import get_python_source try: self.text = get_python_source(self.filename) except OSError as err: raise NoSource(f"No source for code: '{self.filename}': {err}") from err self.exclude = exclude # The text lines of the parsed code. self.lines: list[str] = self.text.split("\n") # The normalized line numbers of the statements in the code. Exclusions # are taken into account, and statements are adjusted to their first # lines. self.statements: set[TLineNo] = set() # The normalized line numbers of the excluded lines in the code, # adjusted to their first lines. self.excluded: set[TLineNo] = set() # The raw_* attributes are only used in this class, and in # lab/parser.py to show how this class is working. # The line numbers that start statements, as reported by the line # number table in the bytecode. self.raw_statements: set[TLineNo] = set() # The raw line numbers of excluded lines of code, as marked by pragmas. self.raw_excluded: set[TLineNo] = set() # The line numbers of class definitions. self.raw_classdefs: set[TLineNo] = set() # The line numbers of docstring lines. self.raw_docstrings: set[TLineNo] = set() # Internal detail, used by lab/parser.py. self.show_tokens = False # A dict mapping line numbers to lexical statement starts for # multi-line statements. self._multiline: dict[TLineNo, TLineNo] = {} # Lazily-created arc data, and missing arc descriptions. self._all_arcs: set[TArc] | None = None self._missing_arc_fragments: TArcFragments | None = None def lines_matching(self, *regexes: str) -> set[TLineNo]: """Find the lines matching one of a list of regexes. Returns a set of line numbers, the lines that contain a match for one of the regexes in `regexes`. The entire line needn't match, just a part of it. """ combined = join_regex(regexes) regex_c = re.compile(combined) matches = set() for i, ltext in enumerate(self.lines, start=1): if regex_c.search(ltext): matches.add(i) return matches def _raw_parse(self) -> None: """Parse the source to find the interesting facts about its lines. A handful of attributes are updated. """ # Find lines which match an exclusion pattern. if self.exclude: self.raw_excluded = self.lines_matching(self.exclude) # Tokenize, to find excluded suites, to find docstrings, and to find # multi-line statements. # The last token seen. Start with INDENT to get module docstrings prev_toktype: int = token.INDENT # The current number of indents. indent: int = 0 # An exclusion comment will exclude an entire clause at this indent. exclude_indent: int = 0 # Are we currently excluding lines? excluding: bool = False # Are we excluding decorators now? excluding_decorators: bool = False # The line number of the first line in a multi-line statement. first_line: int = 0 # Is the file empty? empty: bool = True # Is this the first token on a line? first_on_line: bool = True # Parenthesis (and bracket) nesting level. nesting: int = 0 assert self.text is not None tokgen = generate_tokens(self.text) for toktype, ttext, (slineno, _), (elineno, _), ltext in tokgen: if self.show_tokens: # pragma: debugging print("%10s %5s %-20r %r" % ( tokenize.tok_name.get(toktype, toktype), nice_pair((slineno, elineno)), ttext, ltext, )) if toktype == token.INDENT: indent += 1 elif toktype == token.DEDENT: indent -= 1 elif toktype == token.NAME: if ttext == "class": # Class definitions look like branches in the bytecode, so # we need to exclude them. The simplest way is to note the # lines with the "class" keyword. self.raw_classdefs.add(slineno) elif toktype == token.OP: if ttext == ":" and nesting == 0: should_exclude = ( self.raw_excluded.intersection(range(first_line, elineno + 1)) or excluding_decorators ) if not excluding and should_exclude: # Start excluding a suite. We trigger off of the colon # token so that the #pragma comment will be recognized on # the same line as the colon. self.raw_excluded.add(elineno) exclude_indent = indent excluding = True excluding_decorators = False elif ttext == "@" and first_on_line: # A decorator. if elineno in self.raw_excluded: excluding_decorators = True if excluding_decorators: self.raw_excluded.add(elineno) elif ttext in "([{": nesting += 1 elif ttext in ")]}": nesting -= 1 elif toktype == token.STRING: if prev_toktype == token.INDENT: # Strings that are first on an indented line are docstrings. # (a trick from trace.py in the stdlib.) This works for # 99.9999% of cases. self.raw_docstrings.update(range(slineno, elineno+1)) elif toktype == token.NEWLINE: if first_line and elineno != first_line: # We're at the end of a line, and we've ended on a # different line than the first line of the statement, # so record a multi-line range. for l in range(first_line, elineno+1): self._multiline[l] = first_line first_line = 0 first_on_line = True if ttext.strip() and toktype != tokenize.COMMENT: # A non-white-space token. empty = False if not first_line: # The token is not white space, and is the first in a statement. first_line = slineno # Check whether to end an excluded suite. if excluding and indent <= exclude_indent: excluding = False if excluding: self.raw_excluded.add(elineno) first_on_line = False prev_toktype = toktype # Find the starts of the executable statements. if not empty: byte_parser = ByteParser(self.text, filename=self.filename) self.raw_statements.update(byte_parser._find_statements()) # The first line of modules can lie and say 1 always, even if the first # line of code is later. If so, map 1 to the actual first line of the # module. if env.PYBEHAVIOR.module_firstline_1 and self._multiline: self._multiline[1] = min(self.raw_statements) def first_line(self, lineno: TLineNo) -> TLineNo: """Return the first line number of the statement including `lineno`.""" if lineno < 0: lineno = -self._multiline.get(-lineno, -lineno) else: lineno = self._multiline.get(lineno, lineno) return lineno def first_lines(self, linenos: Iterable[TLineNo]) -> set[TLineNo]: """Map the line numbers in `linenos` to the correct first line of the statement. Returns a set of the first lines. """ return {self.first_line(l) for l in linenos} def translate_lines(self, lines: Iterable[TLineNo]) -> set[TLineNo]: """Implement `FileReporter.translate_lines`.""" return self.first_lines(lines) def translate_arcs(self, arcs: Iterable[TArc]) -> set[TArc]: """Implement `FileReporter.translate_arcs`.""" return {(self.first_line(a), self.first_line(b)) for (a, b) in arcs} def parse_source(self) -> None: """Parse source text to find executable lines, excluded lines, etc. Sets the .excluded and .statements attributes, normalized to the first line of multi-line statements. """ try: self._raw_parse() except (tokenize.TokenError, IndentationError, SyntaxError) as err: if hasattr(err, "lineno"): lineno = err.lineno # IndentationError else: lineno = err.args[1][0] # TokenError raise NotPython( f"Couldn't parse '{self.filename}' as Python source: " + f"{err.args[0]!r} at line {lineno}", ) from err self.excluded = self.first_lines(self.raw_excluded) ignore = self.excluded | self.raw_docstrings starts = self.raw_statements - ignore self.statements = self.first_lines(starts) - ignore def arcs(self) -> set[TArc]: """Get information about the arcs available in the code. Returns a set of line number pairs. Line numbers have been normalized to the first line of multi-line statements. """ if self._all_arcs is None: self._analyze_ast() assert self._all_arcs is not None return self._all_arcs def _analyze_ast(self) -> None: """Run the AstArcAnalyzer and save its results. `_all_arcs` is the set of arcs in the code. """ aaa = AstArcAnalyzer(self.text, self.raw_statements, self._multiline) aaa.analyze() self._all_arcs = set() for l1, l2 in aaa.arcs: fl1 = self.first_line(l1) fl2 = self.first_line(l2) if fl1 != fl2: self._all_arcs.add((fl1, fl2)) self._missing_arc_fragments = aaa.missing_arc_fragments def exit_counts(self) -> dict[TLineNo, int]: """Get a count of exits from that each line. Excluded lines are excluded. """ exit_counts: dict[TLineNo, int] = collections.defaultdict(int) for l1, l2 in self.arcs(): if l1 < 0: # Don't ever report -1 as a line number continue if l1 in self.excluded: # Don't report excluded lines as line numbers. continue if l2 in self.excluded: # Arcs to excluded lines shouldn't count. continue exit_counts[l1] += 1 # Class definitions have one extra exit, so remove one for each: for l in self.raw_classdefs: # Ensure key is there: class definitions can include excluded lines. if l in exit_counts: exit_counts[l] -= 1 return exit_counts def missing_arc_description( self, start: TLineNo, end: TLineNo, executed_arcs: Iterable[TArc] | None = None, ) -> str: """Provide an English sentence describing a missing arc.""" if self._missing_arc_fragments is None: self._analyze_ast() assert self._missing_arc_fragments is not None actual_start = start if ( executed_arcs and end < 0 and end == -start and (end, start) not in executed_arcs and (end, start) in self._missing_arc_fragments ): # It's a one-line callable, and we never even started it, # and we have a message about not starting it. start, end = end, start fragment_pairs = self._missing_arc_fragments.get((start, end), [(None, None)]) msgs = [] for smsg, emsg in fragment_pairs: if emsg is None: if end < 0: # Hmm, maybe we have a one-line callable, let's check. if (-end, end) in self._missing_arc_fragments: return self.missing_arc_description(-end, end) emsg = "didn't jump to the function exit" else: emsg = "didn't jump to line {lineno}" emsg = emsg.format(lineno=end) msg = f"line {actual_start} {emsg}" if smsg is not None: msg += f", because {smsg.format(lineno=actual_start)}" msgs.append(msg) return " or ".join(msgs) class ByteParser: """Parse bytecode to understand the structure of code.""" def __init__( self, text: str, code: CodeType | None = None, filename: str | None = None, ) -> None: self.text = text if code is not None: self.code = code else: assert filename is not None try: self.code = compile(text, filename, "exec", dont_inherit=True) except SyntaxError as synerr: raise NotPython( "Couldn't parse '%s' as Python source: '%s' at line %d" % ( filename, synerr.msg, synerr.lineno or 0, ), ) from synerr def child_parsers(self) -> Iterable[ByteParser]: """Iterate over all the code objects nested within this one. The iteration includes `self` as its first value. """ return (ByteParser(self.text, code=c) for c in code_objects(self.code)) def _line_numbers(self) -> Iterable[TLineNo]: """Yield the line numbers possible in this code object. Uses co_lnotab described in Python/compile.c to find the line numbers. Produces a sequence: l0, l1, ... """ if hasattr(self.code, "co_lines"): # PYVERSIONS: new in 3.10 for _, _, line in self.code.co_lines(): if line: yield line else: # Adapted from dis.py in the standard library. byte_increments = self.code.co_lnotab[0::2] line_increments = self.code.co_lnotab[1::2] last_line_num = None line_num = self.code.co_firstlineno byte_num = 0 for byte_incr, line_incr in zip(byte_increments, line_increments): if byte_incr: if line_num != last_line_num: yield line_num last_line_num = line_num byte_num += byte_incr if line_incr >= 0x80: line_incr -= 0x100 line_num += line_incr if line_num != last_line_num: yield line_num def _find_statements(self) -> Iterable[TLineNo]: """Find the statements in `self.code`. Produce a sequence of line numbers that start statements. Recurses into all code objects reachable from `self.code`. """ for bp in self.child_parsers(): # Get all of the lineno information from this code. yield from bp._line_numbers() # # AST analysis # @dataclass(frozen=True, order=True) class ArcStart: """The information needed to start an arc. `lineno` is the line number the arc starts from. `cause` is an English text fragment used as the `startmsg` for AstArcAnalyzer.missing_arc_fragments. It will be used to describe why an arc wasn't executed, so should fit well into a sentence of the form, "Line 17 didn't run because {cause}." The fragment can include "{lineno}" to have `lineno` interpolated into it. """ lineno: TLineNo cause: str = "" class TAddArcFn(Protocol): """The type for AstArcAnalyzer.add_arc().""" def __call__( self, start: TLineNo, end: TLineNo, smsg: str | None = None, emsg: str | None = None, ) -> None: ... TArcFragments = Dict[TArc, List[Tuple[Optional[str], Optional[str]]]] class Block: """ Blocks need to handle various exiting statements in their own ways. All of these methods take a list of exits, and a callable `add_arc` function that they can use to add arcs if needed. They return True if the exits are handled, or False if the search should continue up the block stack. """ # pylint: disable=unused-argument def process_break_exits(self, exits: set[ArcStart], add_arc: TAddArcFn) -> bool: """Process break exits.""" # Because break can only appear in loops, and most subclasses # implement process_break_exits, this function is never reached. raise AssertionError def process_continue_exits(self, exits: set[ArcStart], add_arc: TAddArcFn) -> bool: """Process continue exits.""" # Because continue can only appear in loops, and most subclasses # implement process_continue_exits, this function is never reached. raise AssertionError def process_raise_exits(self, exits: set[ArcStart], add_arc: TAddArcFn) -> bool: """Process raise exits.""" return False def process_return_exits(self, exits: set[ArcStart], add_arc: TAddArcFn) -> bool: """Process return exits.""" return False class LoopBlock(Block): """A block on the block stack representing a `for` or `while` loop.""" def __init__(self, start: TLineNo) -> None: # The line number where the loop starts. self.start = start # A set of ArcStarts, the arcs from break statements exiting this loop. self.break_exits: set[ArcStart] = set() def process_break_exits(self, exits: set[ArcStart], add_arc: TAddArcFn) -> bool: self.break_exits.update(exits) return True def process_continue_exits(self, exits: set[ArcStart], add_arc: TAddArcFn) -> bool: for xit in exits: add_arc(xit.lineno, self.start, xit.cause) return True class FunctionBlock(Block): """A block on the block stack representing a function definition.""" def __init__(self, start: TLineNo, name: str) -> None: # The line number where the function starts. self.start = start # The name of the function. self.name = name def process_raise_exits(self, exits: set[ArcStart], add_arc: TAddArcFn) -> bool: for xit in exits: add_arc( xit.lineno, -self.start, xit.cause, f"didn't except from function {self.name!r}", ) return True def process_return_exits(self, exits: set[ArcStart], add_arc: TAddArcFn) -> bool: for xit in exits: add_arc( xit.lineno, -self.start, xit.cause, f"didn't return from function {self.name!r}", ) return True class TryBlock(Block): """A block on the block stack representing a `try` block.""" def __init__(self, handler_start: TLineNo | None, final_start: TLineNo | None) -> None: # The line number of the first "except" handler, if any. self.handler_start = handler_start # The line number of the "finally:" clause, if any. self.final_start = final_start # The ArcStarts for breaks/continues/returns/raises inside the "try:" # that need to route through the "finally:" clause. self.break_from: set[ArcStart] = set() self.continue_from: set[ArcStart] = set() self.raise_from: set[ArcStart] = set() self.return_from: set[ArcStart] = set() def process_break_exits(self, exits: set[ArcStart], add_arc: TAddArcFn) -> bool: if self.final_start is not None: self.break_from.update(exits) return True return False def process_continue_exits(self, exits: set[ArcStart], add_arc: TAddArcFn) -> bool: if self.final_start is not None: self.continue_from.update(exits) return True return False def process_raise_exits(self, exits: set[ArcStart], add_arc: TAddArcFn) -> bool: if self.handler_start is not None: for xit in exits: add_arc(xit.lineno, self.handler_start, xit.cause) else: assert self.final_start is not None self.raise_from.update(exits) return True def process_return_exits(self, exits: set[ArcStart], add_arc: TAddArcFn) -> bool: if self.final_start is not None: self.return_from.update(exits) return True return False class WithBlock(Block): """A block on the block stack representing a `with` block.""" def __init__(self, start: TLineNo) -> None: # We only ever use this block if it is needed, so that we don't have to # check this setting in all the methods. assert env.PYBEHAVIOR.exit_through_with # The line number of the with statement. self.start = start # The ArcStarts for breaks/continues/returns/raises inside the "with:" # that need to go through the with-statement while exiting. self.break_from: set[ArcStart] = set() self.continue_from: set[ArcStart] = set() self.return_from: set[ArcStart] = set() def _process_exits( self, exits: set[ArcStart], add_arc: TAddArcFn, from_set: set[ArcStart] | None = None, ) -> bool: """Helper to process the four kinds of exits.""" for xit in exits: add_arc(xit.lineno, self.start, xit.cause) if from_set is not None: from_set.update(exits) return True def process_break_exits(self, exits: set[ArcStart], add_arc: TAddArcFn) -> bool: return self._process_exits(exits, add_arc, self.break_from) def process_continue_exits(self, exits: set[ArcStart], add_arc: TAddArcFn) -> bool: return self._process_exits(exits, add_arc, self.continue_from) def process_raise_exits(self, exits: set[ArcStart], add_arc: TAddArcFn) -> bool: return self._process_exits(exits, add_arc) def process_return_exits(self, exits: set[ArcStart], add_arc: TAddArcFn) -> bool: return self._process_exits(exits, add_arc, self.return_from) class NodeList(ast.AST): """A synthetic fictitious node, containing a sequence of nodes. This is used when collapsing optimized if-statements, to represent the unconditional execution of one of the clauses. """ def __init__(self, body: Sequence[ast.AST]) -> None: self.body = body self.lineno = body[0].lineno # TODO: some add_arcs methods here don't add arcs, they return them. Rename them. # TODO: the cause messages have too many commas. # TODO: Shouldn't the cause messages join with "and" instead of "or"? def _make_expression_code_method(noun: str) -> Callable[[AstArcAnalyzer, ast.AST], None]: """A function to make methods for expression-based callable _code_object__ methods.""" def _code_object__expression_callable(self: AstArcAnalyzer, node: ast.AST) -> None: start = self.line_for_node(node) self.add_arc(-start, start, None, f"didn't run the {noun} on line {start}") self.add_arc(start, -start, None, f"didn't finish the {noun} on line {start}") return _code_object__expression_callable class AstArcAnalyzer: """Analyze source text with an AST to find executable code paths.""" def __init__( self, text: str, statements: set[TLineNo], multiline: dict[TLineNo, TLineNo], ) -> None: self.root_node = ast.parse(text) # TODO: I think this is happening in too many places. self.statements = {multiline.get(l, l) for l in statements} self.multiline = multiline # Turn on AST dumps with an environment variable. # $set_env.py: COVERAGE_AST_DUMP - Dump the AST nodes when parsing code. dump_ast = bool(int(os.getenv("COVERAGE_AST_DUMP", "0"))) if dump_ast: # pragma: debugging # Dump the AST so that failing tests have helpful output. print(f"Statements: {self.statements}") print(f"Multiline map: {self.multiline}") dumpkw: dict[str, Any] = {} if sys.version_info >= (3, 9): dumpkw["indent"] = 4 print(ast.dump(self.root_node, include_attributes=True, **dumpkw)) self.arcs: set[TArc] = set() # A map from arc pairs to a list of pairs of sentence fragments: # { (start, end): [(startmsg, endmsg), ...], } # # For an arc from line 17, they should be usable like: # "Line 17 {endmsg}, because {startmsg}" self.missing_arc_fragments: TArcFragments = collections.defaultdict(list) self.block_stack: list[Block] = [] # $set_env.py: COVERAGE_TRACK_ARCS - Trace possible arcs added while parsing code. self.debug = bool(int(os.getenv("COVERAGE_TRACK_ARCS", "0"))) def analyze(self) -> None: """Examine the AST tree from `root_node` to determine possible arcs. This sets the `arcs` attribute to be a set of (from, to) line number pairs. """ for node in ast.walk(self.root_node): node_name = node.__class__.__name__ code_object_handler = getattr(self, "_code_object__" + node_name, None) if code_object_handler is not None: code_object_handler(node) def add_arc( self, start: TLineNo, end: TLineNo, smsg: str | None = None, emsg: str | None = None, ) -> None: """Add an arc, including message fragments to use if it is missing.""" if self.debug: # pragma: debugging print(f"\nAdding possible arc: ({start}, {end}): {smsg!r}, {emsg!r}") print(short_stack()) self.arcs.add((start, end)) if smsg is not None or emsg is not None: self.missing_arc_fragments[(start, end)].append((smsg, emsg)) def nearest_blocks(self) -> Iterable[Block]: """Yield the blocks in nearest-to-farthest order.""" return reversed(self.block_stack) def line_for_node(self, node: ast.AST) -> TLineNo: """What is the right line number to use for this node? This dispatches to _line__Node functions where needed. """ node_name = node.__class__.__name__ handler = cast( Optional[Callable[[ast.AST], TLineNo]], getattr(self, "_line__" + node_name, None), ) if handler is not None: return handler(node) else: return node.lineno def _line_decorated(self, node: ast.FunctionDef) -> TLineNo: """Compute first line number for things that can be decorated (classes and functions).""" if node.decorator_list: lineno = node.decorator_list[0].lineno else: lineno = node.lineno return lineno def _line__Assign(self, node: ast.Assign) -> TLineNo: return self.line_for_node(node.value) _line__ClassDef = _line_decorated def _line__Dict(self, node: ast.Dict) -> TLineNo: if node.keys: if node.keys[0] is not None: return node.keys[0].lineno else: # Unpacked dict literals `{**{"a":1}}` have None as the key, # use the value in that case. return node.values[0].lineno else: return node.lineno _line__FunctionDef = _line_decorated _line__AsyncFunctionDef = _line_decorated def _line__List(self, node: ast.List) -> TLineNo: if node.elts: return self.line_for_node(node.elts[0]) else: return node.lineno def _line__Module(self, node: ast.Module) -> TLineNo: if env.PYBEHAVIOR.module_firstline_1: return 1 elif node.body: return self.line_for_node(node.body[0]) else: # Empty modules have no line number, they always start at 1. return 1 # The node types that just flow to the next node with no complications. OK_TO_DEFAULT = { "AnnAssign", "Assign", "Assert", "AugAssign", "Delete", "Expr", "Global", "Import", "ImportFrom", "Nonlocal", "Pass", } def add_arcs(self, node: ast.AST) -> set[ArcStart]: """Add the arcs for `node`. Return a set of ArcStarts, exits from this node to the next. Because a node represents an entire sub-tree (including its children), the exits from a node can be arbitrarily complex:: if something(1): if other(2): doit(3) else: doit(5) There are two exits from line 1: they start at line 3 and line 5. """ node_name = node.__class__.__name__ handler = cast( Optional[Callable[[ast.AST], Set[ArcStart]]], getattr(self, "_handle__" + node_name, None), ) if handler is not None: return handler(node) else: # No handler: either it's something that's ok to default (a simple # statement), or it's something we overlooked. if env.TESTING: if node_name not in self.OK_TO_DEFAULT: raise RuntimeError(f"*** Unhandled: {node}") # pragma: only failure # Default for simple statements: one exit from this node. return {ArcStart(self.line_for_node(node))} def add_body_arcs( self, body: Sequence[ast.AST], from_start: ArcStart | None = None, prev_starts: set[ArcStart] | None = None, ) -> set[ArcStart]: """Add arcs for the body of a compound statement. `body` is the body node. `from_start` is a single `ArcStart` that can be the previous line in flow before this body. `prev_starts` is a set of ArcStarts that can be the previous line. Only one of them should be given. Returns a set of ArcStarts, the exits from this body. """ if prev_starts is None: assert from_start is not None prev_starts = {from_start} for body_node in body: lineno = self.line_for_node(body_node) first_line = self.multiline.get(lineno, lineno) if first_line not in self.statements: maybe_body_node = self.find_non_missing_node(body_node) if maybe_body_node is None: continue body_node = maybe_body_node lineno = self.line_for_node(body_node) for prev_start in prev_starts: self.add_arc(prev_start.lineno, lineno, prev_start.cause) prev_starts = self.add_arcs(body_node) return prev_starts def find_non_missing_node(self, node: ast.AST) -> ast.AST | None: """Search `node` looking for a child that has not been optimized away. This might return the node you started with, or it will work recursively to find a child node in self.statements. Returns a node, or None if none of the node remains. """ # This repeats work just done in add_body_arcs, but this duplication # means we can avoid a function call in the 99.9999% case of not # optimizing away statements. lineno = self.line_for_node(node) first_line = self.multiline.get(lineno, lineno) if first_line in self.statements: return node missing_fn = cast( Optional[Callable[[ast.AST], Optional[ast.AST]]], getattr(self, "_missing__" + node.__class__.__name__, None), ) if missing_fn is not None: ret_node = missing_fn(node) else: ret_node = None return ret_node # Missing nodes: _missing__* # # Entire statements can be optimized away by Python. They will appear in # the AST, but not the bytecode. These functions are called (by # find_non_missing_node) to find a node to use instead of the missing # node. They can return None if the node should truly be gone. def _missing__If(self, node: ast.If) -> ast.AST | None: # If the if-node is missing, then one of its children might still be # here, but not both. So return the first of the two that isn't missing. # Use a NodeList to hold the clauses as a single node. non_missing = self.find_non_missing_node(NodeList(node.body)) if non_missing: return non_missing if node.orelse: return self.find_non_missing_node(NodeList(node.orelse)) return None def _missing__NodeList(self, node: NodeList) -> ast.AST | None: # A NodeList might be a mixture of missing and present nodes. Find the # ones that are present. non_missing_children = [] for child in node.body: maybe_child = self.find_non_missing_node(child) if maybe_child is not None: non_missing_children.append(maybe_child) # Return the simplest representation of the present children. if not non_missing_children: return None if len(non_missing_children) == 1: return non_missing_children[0] return NodeList(non_missing_children) def _missing__While(self, node: ast.While) -> ast.AST | None: body_nodes = self.find_non_missing_node(NodeList(node.body)) if not body_nodes: return None # Make a synthetic While-true node. new_while = ast.While() new_while.lineno = body_nodes.lineno new_while.test = ast.Name() new_while.test.lineno = body_nodes.lineno new_while.test.id = "True" assert hasattr(body_nodes, "body") new_while.body = body_nodes.body new_while.orelse = [] return new_while def is_constant_expr(self, node: ast.AST) -> str | None: """Is this a compile-time constant?""" node_name = node.__class__.__name__ if node_name in ["Constant", "NameConstant", "Num"]: return "Num" elif isinstance(node, ast.Name): if node.id in ["True", "False", "None", "__debug__"]: return "Name" return None # In the fullness of time, these might be good tests to write: # while EXPR: # while False: # listcomps hidden deep in other expressions # listcomps hidden in lists: x = [[i for i in range(10)]] # nested function definitions # Exit processing: process_*_exits # # These functions process the four kinds of jump exits: break, continue, # raise, and return. To figure out where an exit goes, we have to look at # the block stack context. For example, a break will jump to the nearest # enclosing loop block, or the nearest enclosing finally block, whichever # is nearer. def process_break_exits(self, exits: set[ArcStart]) -> None: """Add arcs due to jumps from `exits` being breaks.""" for block in self.nearest_blocks(): # pragma: always breaks if block.process_break_exits(exits, self.add_arc): break def process_continue_exits(self, exits: set[ArcStart]) -> None: """Add arcs due to jumps from `exits` being continues.""" for block in self.nearest_blocks(): # pragma: always breaks if block.process_continue_exits(exits, self.add_arc): break def process_raise_exits(self, exits: set[ArcStart]) -> None: """Add arcs due to jumps from `exits` being raises.""" for block in self.nearest_blocks(): if block.process_raise_exits(exits, self.add_arc): break def process_return_exits(self, exits: set[ArcStart]) -> None: """Add arcs due to jumps from `exits` being returns.""" for block in self.nearest_blocks(): # pragma: always breaks if block.process_return_exits(exits, self.add_arc): break # Handlers: _handle__* # # Each handler deals with a specific AST node type, dispatched from # add_arcs. Handlers return the set of exits from that node, and can # also call self.add_arc to record arcs they find. These functions mirror # the Python semantics of each syntactic construct. See the docstring # for add_arcs to understand the concept of exits from a node. # # Every node type that represents a statement should have a handler, or it # should be listed in OK_TO_DEFAULT. def _handle__Break(self, node: ast.Break) -> set[ArcStart]: here = self.line_for_node(node) break_start = ArcStart(here, cause="the break on line {lineno} wasn't executed") self.process_break_exits({break_start}) return set() def _handle_decorated(self, node: ast.FunctionDef) -> set[ArcStart]: """Add arcs for things that can be decorated (classes and functions).""" main_line: TLineNo = node.lineno last: TLineNo | None = node.lineno decs = node.decorator_list if decs: last = None for dec_node in decs: dec_start = self.line_for_node(dec_node) if last is not None and dec_start != last: # type: ignore[unreachable] self.add_arc(last, dec_start) # type: ignore[unreachable] last = dec_start assert last is not None self.add_arc(last, main_line) last = main_line if env.PYBEHAVIOR.trace_decorator_line_again: for top, bot in zip(decs, decs[1:]): self.add_arc(self.line_for_node(bot), self.line_for_node(top)) self.add_arc(self.line_for_node(decs[0]), main_line) self.add_arc(main_line, self.line_for_node(decs[-1])) # The definition line may have been missed, but we should have it # in `self.statements`. For some constructs, `line_for_node` is # not what we'd think of as the first line in the statement, so map # it to the first one. if node.body: body_start = self.line_for_node(node.body[0]) body_start = self.multiline.get(body_start, body_start) # The body is handled in collect_arcs. assert last is not None return {ArcStart(last)} _handle__ClassDef = _handle_decorated def _handle__Continue(self, node: ast.Continue) -> set[ArcStart]: here = self.line_for_node(node) continue_start = ArcStart(here, cause="the continue on line {lineno} wasn't executed") self.process_continue_exits({continue_start}) return set() def _handle__For(self, node: ast.For) -> set[ArcStart]: start = self.line_for_node(node.iter) self.block_stack.append(LoopBlock(start=start)) from_start = ArcStart(start, cause="the loop on line {lineno} never started") exits = self.add_body_arcs(node.body, from_start=from_start) # Any exit from the body will go back to the top of the loop. for xit in exits: self.add_arc(xit.lineno, start, xit.cause) my_block = self.block_stack.pop() assert isinstance(my_block, LoopBlock) exits = my_block.break_exits from_start = ArcStart(start, cause="the loop on line {lineno} didn't complete") if node.orelse: else_exits = self.add_body_arcs(node.orelse, from_start=from_start) exits |= else_exits else: # No else clause: exit from the for line. exits.add(from_start) return exits _handle__AsyncFor = _handle__For _handle__FunctionDef = _handle_decorated _handle__AsyncFunctionDef = _handle_decorated def _handle__If(self, node: ast.If) -> set[ArcStart]: start = self.line_for_node(node.test) from_start = ArcStart(start, cause="the condition on line {lineno} was never true") exits = self.add_body_arcs(node.body, from_start=from_start) from_start = ArcStart(start, cause="the condition on line {lineno} was never false") exits |= self.add_body_arcs(node.orelse, from_start=from_start) return exits if sys.version_info >= (3, 10): def _handle__Match(self, node: ast.Match) -> set[ArcStart]: start = self.line_for_node(node) last_start = start exits = set() had_wildcard = False for case in node.cases: case_start = self.line_for_node(case.pattern) pattern = case.pattern while isinstance(pattern, ast.MatchOr): pattern = pattern.patterns[-1] if isinstance(pattern, ast.MatchAs): had_wildcard = True self.add_arc(last_start, case_start, "the pattern on line {lineno} always matched") from_start = ArcStart( case_start, cause="the pattern on line {lineno} never matched", ) exits |= self.add_body_arcs(case.body, from_start=from_start) last_start = case_start if not had_wildcard: exits.add( ArcStart(case_start, cause="the pattern on line {lineno} always matched"), ) return exits def _handle__NodeList(self, node: NodeList) -> set[ArcStart]: start = self.line_for_node(node) exits = self.add_body_arcs(node.body, from_start=ArcStart(start)) return exits def _handle__Raise(self, node: ast.Raise) -> set[ArcStart]: here = self.line_for_node(node) raise_start = ArcStart(here, cause="the raise on line {lineno} wasn't executed") self.process_raise_exits({raise_start}) # `raise` statement jumps away, no exits from here. return set() def _handle__Return(self, node: ast.Return) -> set[ArcStart]: here = self.line_for_node(node) return_start = ArcStart(here, cause="the return on line {lineno} wasn't executed") self.process_return_exits({return_start}) # `return` statement jumps away, no exits from here. return set() def _handle__Try(self, node: ast.Try) -> set[ArcStart]: if node.handlers: handler_start = self.line_for_node(node.handlers[0]) else: handler_start = None if node.finalbody: final_start = self.line_for_node(node.finalbody[0]) else: final_start = None # This is true by virtue of Python syntax: have to have either except # or finally, or both. assert handler_start is not None or final_start is not None try_block = TryBlock(handler_start, final_start) self.block_stack.append(try_block) start = self.line_for_node(node) exits = self.add_body_arcs(node.body, from_start=ArcStart(start)) # We're done with the `try` body, so this block no longer handles # exceptions. We keep the block so the `finally` clause can pick up # flows from the handlers and `else` clause. if node.finalbody: try_block.handler_start = None if node.handlers: # If there are `except` clauses, then raises in the try body # will already jump to them. Start this set over for raises in # `except` and `else`. try_block.raise_from = set() else: self.block_stack.pop() handler_exits: set[ArcStart] = set() if node.handlers: last_handler_start: TLineNo | None = None for handler_node in node.handlers: handler_start = self.line_for_node(handler_node) if last_handler_start is not None: self.add_arc(last_handler_start, handler_start) last_handler_start = handler_start from_cause = "the exception caught by line {lineno} didn't happen" from_start = ArcStart(handler_start, cause=from_cause) handler_exits |= self.add_body_arcs(handler_node.body, from_start=from_start) if node.orelse: exits = self.add_body_arcs(node.orelse, prev_starts=exits) exits |= handler_exits if node.finalbody: self.block_stack.pop() final_from = ( # You can get to the `finally` clause from: exits | # the exits of the body or `else` clause, try_block.break_from | # or a `break`, try_block.continue_from | # or a `continue`, try_block.raise_from | # or a `raise`, try_block.return_from # or a `return`. ) final_exits = self.add_body_arcs(node.finalbody, prev_starts=final_from) if try_block.break_from: if env.PYBEHAVIOR.finally_jumps_back: for break_line in try_block.break_from: lineno = break_line.lineno cause = break_line.cause.format(lineno=lineno) for final_exit in final_exits: self.add_arc(final_exit.lineno, lineno, cause) breaks = try_block.break_from else: breaks = self._combine_finally_starts(try_block.break_from, final_exits) self.process_break_exits(breaks) if try_block.continue_from: if env.PYBEHAVIOR.finally_jumps_back: for continue_line in try_block.continue_from: lineno = continue_line.lineno cause = continue_line.cause.format(lineno=lineno) for final_exit in final_exits: self.add_arc(final_exit.lineno, lineno, cause) continues = try_block.continue_from else: continues = self._combine_finally_starts(try_block.continue_from, final_exits) self.process_continue_exits(continues) if try_block.raise_from: self.process_raise_exits( self._combine_finally_starts(try_block.raise_from, final_exits), ) if try_block.return_from: if env.PYBEHAVIOR.finally_jumps_back: for return_line in try_block.return_from: lineno = return_line.lineno cause = return_line.cause.format(lineno=lineno) for final_exit in final_exits: self.add_arc(final_exit.lineno, lineno, cause) returns = try_block.return_from else: returns = self._combine_finally_starts(try_block.return_from, final_exits) self.process_return_exits(returns) if exits: # The finally clause's exits are only exits for the try block # as a whole if the try block had some exits to begin with. exits = final_exits return exits def _combine_finally_starts(self, starts: set[ArcStart], exits: set[ArcStart]) -> set[ArcStart]: """Helper for building the cause of `finally` branches. "finally" clauses might not execute their exits, and the causes could be due to a failure to execute any of the exits in the try block. So we use the causes from `starts` as the causes for `exits`. """ causes = [] for start in sorted(starts): if start.cause: causes.append(start.cause.format(lineno=start.lineno)) cause = " or ".join(causes) exits = {ArcStart(xit.lineno, cause) for xit in exits} return exits def _handle__While(self, node: ast.While) -> set[ArcStart]: start = to_top = self.line_for_node(node.test) constant_test = self.is_constant_expr(node.test) top_is_body0 = False if constant_test: top_is_body0 = True if env.PYBEHAVIOR.keep_constant_test: top_is_body0 = False if top_is_body0: to_top = self.line_for_node(node.body[0]) self.block_stack.append(LoopBlock(start=to_top)) from_start = ArcStart(start, cause="the condition on line {lineno} was never true") exits = self.add_body_arcs(node.body, from_start=from_start) for xit in exits: self.add_arc(xit.lineno, to_top, xit.cause) exits = set() my_block = self.block_stack.pop() assert isinstance(my_block, LoopBlock) exits.update(my_block.break_exits) from_start = ArcStart(start, cause="the condition on line {lineno} was never false") if node.orelse: else_exits = self.add_body_arcs(node.orelse, from_start=from_start) exits |= else_exits else: # No `else` clause: you can exit from the start. if not constant_test: exits.add(from_start) return exits def _handle__With(self, node: ast.With) -> set[ArcStart]: start = self.line_for_node(node) if env.PYBEHAVIOR.exit_through_with: self.block_stack.append(WithBlock(start=start)) exits = self.add_body_arcs(node.body, from_start=ArcStart(start)) if env.PYBEHAVIOR.exit_through_with: with_block = self.block_stack.pop() assert isinstance(with_block, WithBlock) with_exit = {ArcStart(start)} if exits: for xit in exits: self.add_arc(xit.lineno, start) exits = with_exit if with_block.break_from: self.process_break_exits( self._combine_finally_starts(with_block.break_from, with_exit), ) if with_block.continue_from: self.process_continue_exits( self._combine_finally_starts(with_block.continue_from, with_exit), ) if with_block.return_from: self.process_return_exits( self._combine_finally_starts(with_block.return_from, with_exit), ) return exits _handle__AsyncWith = _handle__With # Code object dispatchers: _code_object__* # # These methods are used by analyze() as the start of the analysis. # There is one for each construct with a code object. def _code_object__Module(self, node: ast.Module) -> None: start = self.line_for_node(node) if node.body: exits = self.add_body_arcs(node.body, from_start=ArcStart(-start)) for xit in exits: self.add_arc(xit.lineno, -start, xit.cause, "didn't exit the module") else: # Empty module. self.add_arc(-start, start) self.add_arc(start, -start) def _code_object__FunctionDef(self, node: ast.FunctionDef) -> None: start = self.line_for_node(node) self.block_stack.append(FunctionBlock(start=start, name=node.name)) exits = self.add_body_arcs(node.body, from_start=ArcStart(-start)) self.process_return_exits(exits) self.block_stack.pop() _code_object__AsyncFunctionDef = _code_object__FunctionDef def _code_object__ClassDef(self, node: ast.ClassDef) -> None: start = self.line_for_node(node) self.add_arc(-start, start) exits = self.add_body_arcs(node.body, from_start=ArcStart(start)) for xit in exits: self.add_arc( xit.lineno, -start, xit.cause, f"didn't exit the body of class {node.name!r}", ) _code_object__Lambda = _make_expression_code_method("lambda") _code_object__GeneratorExp = _make_expression_code_method("generator expression") if env.PYBEHAVIOR.comprehensions_are_functions: _code_object__DictComp = _make_expression_code_method("dictionary comprehension") _code_object__SetComp = _make_expression_code_method("set comprehension") _code_object__ListComp = _make_expression_code_method("list comprehension") ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/coverage/phystokens.py0000644000175100001770000001753400000000000020515 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt """Better tokenizing for coverage.py.""" from __future__ import annotations import ast import io import keyword import re import sys import token import tokenize from typing import Iterable from coverage import env from coverage.types import TLineNo, TSourceTokenLines TokenInfos = Iterable[tokenize.TokenInfo] def _phys_tokens(toks: TokenInfos) -> TokenInfos: """Return all physical tokens, even line continuations. tokenize.generate_tokens() doesn't return a token for the backslash that continues lines. This wrapper provides those tokens so that we can re-create a faithful representation of the original source. Returns the same values as generate_tokens() """ last_line: str | None = None last_lineno = -1 last_ttext: str = "" for ttype, ttext, (slineno, scol), (elineno, ecol), ltext in toks: if last_lineno != elineno: if last_line and last_line.endswith("\\\n"): # We are at the beginning of a new line, and the last line # ended with a backslash. We probably have to inject a # backslash token into the stream. Unfortunately, there's more # to figure out. This code:: # # usage = """\ # HEY THERE # """ # # triggers this condition, but the token text is:: # # '"""\\\nHEY THERE\n"""' # # so we need to figure out if the backslash is already in the # string token or not. inject_backslash = True if last_ttext.endswith("\\"): inject_backslash = False elif ttype == token.STRING: if "\n" in ttext and ttext.split("\n", 1)[0][-1] == "\\": # It's a multi-line string and the first line ends with # a backslash, so we don't need to inject another. inject_backslash = False if inject_backslash: # Figure out what column the backslash is in. ccol = len(last_line.split("\n")[-2]) - 1 # Yield the token, with a fake token type. yield tokenize.TokenInfo( 99999, "\\\n", (slineno, ccol), (slineno, ccol+2), last_line, ) last_line = ltext if ttype not in (tokenize.NEWLINE, tokenize.NL): last_ttext = ttext yield tokenize.TokenInfo(ttype, ttext, (slineno, scol), (elineno, ecol), ltext) last_lineno = elineno class SoftKeywordFinder(ast.NodeVisitor): """Helper for finding lines with soft keywords, like match/case lines.""" def __init__(self, source: str) -> None: # This will be the set of line numbers that start with a soft keyword. self.soft_key_lines: set[TLineNo] = set() self.visit(ast.parse(source)) if sys.version_info >= (3, 10): def visit_Match(self, node: ast.Match) -> None: """Invoked by ast.NodeVisitor.visit""" self.soft_key_lines.add(node.lineno) for case in node.cases: self.soft_key_lines.add(case.pattern.lineno) self.generic_visit(node) if sys.version_info >= (3, 12): def visit_TypeAlias(self, node: ast.TypeAlias) -> None: """Invoked by ast.NodeVisitor.visit""" self.soft_key_lines.add(node.lineno) self.generic_visit(node) def source_token_lines(source: str) -> TSourceTokenLines: """Generate a series of lines, one for each line in `source`. Each line is a list of pairs, each pair is a token:: [('key', 'def'), ('ws', ' '), ('nam', 'hello'), ('op', '('), ... ] Each pair has a token class, and the token text. If you concatenate all the token texts, and then join them with newlines, you should have your original `source` back, with two differences: trailing white space is not preserved, and a final line with no newline is indistinguishable from a final line with a newline. """ ws_tokens = {token.INDENT, token.DEDENT, token.NEWLINE, tokenize.NL} line: list[tuple[str, str]] = [] col = 0 source = source.expandtabs(8).replace("\r\n", "\n") tokgen = generate_tokens(source) if env.PYBEHAVIOR.soft_keywords: soft_key_lines = SoftKeywordFinder(source).soft_key_lines for ttype, ttext, (sline, scol), (_, ecol), _ in _phys_tokens(tokgen): mark_start = True for part in re.split("(\n)", ttext): if part == "\n": yield line line = [] col = 0 mark_end = False elif part == "": mark_end = False elif ttype in ws_tokens: mark_end = False else: if mark_start and scol > col: line.append(("ws", " " * (scol - col))) mark_start = False tok_class = tokenize.tok_name.get(ttype, "xx").lower()[:3] if ttype == token.NAME: if keyword.iskeyword(ttext): # Hard keywords are always keywords. tok_class = "key" elif sys.version_info >= (3, 10): # PYVERSIONS # Need the version_info check to keep mypy from borking # on issoftkeyword here. if env.PYBEHAVIOR.soft_keywords and keyword.issoftkeyword(ttext): # Soft keywords appear at the start of the line, # on lines that start match or case statements. if len(line) == 0: is_start_of_line = True elif (len(line) == 1) and line[0][0] == "ws": is_start_of_line = True else: is_start_of_line = False if is_start_of_line and sline in soft_key_lines: tok_class = "key" line.append((tok_class, part)) mark_end = True scol = 0 if mark_end: col = ecol if line: yield line class CachedTokenizer: """A one-element cache around tokenize.generate_tokens. When reporting, coverage.py tokenizes files twice, once to find the structure of the file, and once to syntax-color it. Tokenizing is expensive, and easily cached. This is a one-element cache so that our twice-in-a-row tokenizing doesn't actually tokenize twice. """ def __init__(self) -> None: self.last_text: str | None = None self.last_tokens: list[tokenize.TokenInfo] = [] def generate_tokens(self, text: str) -> TokenInfos: """A stand-in for `tokenize.generate_tokens`.""" if text != self.last_text: self.last_text = text readline = io.StringIO(text).readline try: self.last_tokens = list(tokenize.generate_tokens(readline)) except: self.last_text = None raise return self.last_tokens # Create our generate_tokens cache as a callable replacement function. generate_tokens = CachedTokenizer().generate_tokens def source_encoding(source: bytes) -> str: """Determine the encoding for `source`, according to PEP 263. `source` is a byte string: the text of the program. Returns a string, the name of the encoding. """ readline = iter(source.splitlines(True)).__next__ return tokenize.detect_encoding(readline)[0] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/coverage/plugin.py0000644000175100001770000004565300000000000017607 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt """ .. versionadded:: 4.0 Plug-in interfaces for coverage.py. Coverage.py supports a few different kinds of plug-ins that change its behavior: * File tracers implement tracing of non-Python file types. * Configurers add custom configuration, using Python code to change the configuration. * Dynamic context switchers decide when the dynamic context has changed, for example, to record what test function produced the coverage. To write a coverage.py plug-in, create a module with a subclass of :class:`~coverage.CoveragePlugin`. You will override methods in your class to participate in various aspects of coverage.py's processing. Different types of plug-ins have to override different methods. Any plug-in can optionally implement :meth:`~coverage.CoveragePlugin.sys_info` to provide debugging information about their operation. Your module must also contain a ``coverage_init`` function that registers an instance of your plug-in class:: import coverage class MyPlugin(coverage.CoveragePlugin): ... def coverage_init(reg, options): reg.add_file_tracer(MyPlugin()) You use the `reg` parameter passed to your ``coverage_init`` function to register your plug-in object. The registration method you call depends on what kind of plug-in it is. If your plug-in takes options, the `options` parameter is a dictionary of your plug-in's options from the coverage.py configuration file. Use them however you want to configure your object before registering it. Coverage.py will store its own information on your plug-in object, using attributes whose names start with ``_coverage_``. Don't be startled. .. warning:: Plug-ins are imported by coverage.py before it begins measuring code. If you write a plugin in your own project, it might import your product code before coverage.py can start measuring. This can result in your own code being reported as missing. One solution is to put your plugins in your project tree, but not in your importable Python package. .. _file_tracer_plugins: File Tracers ============ File tracers implement measurement support for non-Python files. File tracers implement the :meth:`~coverage.CoveragePlugin.file_tracer` method to claim files and the :meth:`~coverage.CoveragePlugin.file_reporter` method to report on those files. In your ``coverage_init`` function, use the ``add_file_tracer`` method to register your file tracer. .. _configurer_plugins: Configurers =========== .. versionadded:: 4.5 Configurers modify the configuration of coverage.py during start-up. Configurers implement the :meth:`~coverage.CoveragePlugin.configure` method to change the configuration. In your ``coverage_init`` function, use the ``add_configurer`` method to register your configurer. .. _dynamic_context_plugins: Dynamic Context Switchers ========================= .. versionadded:: 5.0 Dynamic context switcher plugins implement the :meth:`~coverage.CoveragePlugin.dynamic_context` method to dynamically compute the context label for each measured frame. Computed context labels are useful when you want to group measured data without modifying the source code. For example, you could write a plugin that checks `frame.f_code` to inspect the currently executed method, and set the context label to a fully qualified method name if it's an instance method of `unittest.TestCase` and the method name starts with 'test'. Such a plugin would provide basic coverage grouping by test and could be used with test runners that have no built-in coveragepy support. In your ``coverage_init`` function, use the ``add_dynamic_context`` method to register your dynamic context switcher. """ from __future__ import annotations import functools from types import FrameType from typing import Any, Iterable from coverage import files from coverage.misc import _needs_to_implement from coverage.types import TArc, TConfigurable, TLineNo, TSourceTokenLines class CoveragePlugin: """Base class for coverage.py plug-ins.""" _coverage_plugin_name: str _coverage_enabled: bool def file_tracer(self, filename: str) -> FileTracer | None: # pylint: disable=unused-argument """Get a :class:`FileTracer` object for a file. Plug-in type: file tracer. Every Python source file is offered to your plug-in to give it a chance to take responsibility for tracing the file. If your plug-in can handle the file, it should return a :class:`FileTracer` object. Otherwise return None. There is no way to register your plug-in for particular files. Instead, this method is invoked for all files as they are executed, and the plug-in decides whether it can trace the file or not. Be prepared for `filename` to refer to all kinds of files that have nothing to do with your plug-in. The file name will be a Python file being executed. There are two broad categories of behavior for a plug-in, depending on the kind of files your plug-in supports: * Static file names: each of your original source files has been converted into a distinct Python file. Your plug-in is invoked with the Python file name, and it maps it back to its original source file. * Dynamic file names: all of your source files are executed by the same Python file. In this case, your plug-in implements :meth:`FileTracer.dynamic_source_filename` to provide the actual source file for each execution frame. `filename` is a string, the path to the file being considered. This is the absolute real path to the file. If you are comparing to other paths, be sure to take this into account. Returns a :class:`FileTracer` object to use to trace `filename`, or None if this plug-in cannot trace this file. """ return None def file_reporter( self, filename: str, # pylint: disable=unused-argument ) -> FileReporter | str: # str should be Literal["python"] """Get the :class:`FileReporter` class to use for a file. Plug-in type: file tracer. This will only be invoked if `filename` returns non-None from :meth:`file_tracer`. It's an error to return None from this method. Returns a :class:`FileReporter` object to use to report on `filename`, or the string `"python"` to have coverage.py treat the file as Python. """ _needs_to_implement(self, "file_reporter") def dynamic_context( self, frame: FrameType, # pylint: disable=unused-argument ) -> str | None: """Get the dynamically computed context label for `frame`. Plug-in type: dynamic context. This method is invoked for each frame when outside of a dynamic context, to see if a new dynamic context should be started. If it returns a string, a new context label is set for this and deeper frames. The dynamic context ends when this frame returns. Returns a string to start a new dynamic context, or None if no new context should be started. """ return None def find_executable_files( self, src_dir: str, # pylint: disable=unused-argument ) -> Iterable[str]: """Yield all of the executable files in `src_dir`, recursively. Plug-in type: file tracer. Executability is a plug-in-specific property, but generally means files which would have been considered for coverage analysis, had they been included automatically. Returns or yields a sequence of strings, the paths to files that could have been executed, including files that had been executed. """ return [] def configure(self, config: TConfigurable) -> None: """Modify the configuration of coverage.py. Plug-in type: configurer. This method is called during coverage.py start-up, to give your plug-in a chance to change the configuration. The `config` parameter is an object with :meth:`~coverage.Coverage.get_option` and :meth:`~coverage.Coverage.set_option` methods. Do not call any other methods on the `config` object. """ pass def sys_info(self) -> Iterable[tuple[str, Any]]: """Get a list of information useful for debugging. Plug-in type: any. This method will be invoked for ``--debug=sys``. Your plug-in can return any information it wants to be displayed. Returns a list of pairs: `[(name, value), ...]`. """ return [] class CoveragePluginBase: """Plugins produce specialized objects, which point back to the original plugin.""" _coverage_plugin: CoveragePlugin class FileTracer(CoveragePluginBase): """Support needed for files during the execution phase. File tracer plug-ins implement subclasses of FileTracer to return from their :meth:`~CoveragePlugin.file_tracer` method. You may construct this object from :meth:`CoveragePlugin.file_tracer` any way you like. A natural choice would be to pass the file name given to `file_tracer`. `FileTracer` objects should only be created in the :meth:`CoveragePlugin.file_tracer` method. See :ref:`howitworks` for details of the different coverage.py phases. """ def source_filename(self) -> str: """The source file name for this file. This may be any file name you like. A key responsibility of a plug-in is to own the mapping from Python execution back to whatever source file name was originally the source of the code. See :meth:`CoveragePlugin.file_tracer` for details about static and dynamic file names. Returns the file name to credit with this execution. """ _needs_to_implement(self, "source_filename") def has_dynamic_source_filename(self) -> bool: """Does this FileTracer have dynamic source file names? FileTracers can provide dynamically determined file names by implementing :meth:`dynamic_source_filename`. Invoking that function is expensive. To determine whether to invoke it, coverage.py uses the result of this function to know if it needs to bother invoking :meth:`dynamic_source_filename`. See :meth:`CoveragePlugin.file_tracer` for details about static and dynamic file names. Returns True if :meth:`dynamic_source_filename` should be called to get dynamic source file names. """ return False def dynamic_source_filename( self, filename: str, # pylint: disable=unused-argument frame: FrameType, # pylint: disable=unused-argument ) -> str | None: """Get a dynamically computed source file name. Some plug-ins need to compute the source file name dynamically for each frame. This function will not be invoked if :meth:`has_dynamic_source_filename` returns False. Returns the source file name for this frame, or None if this frame shouldn't be measured. """ return None def line_number_range(self, frame: FrameType) -> tuple[TLineNo, TLineNo]: """Get the range of source line numbers for a given a call frame. The call frame is examined, and the source line number in the original file is returned. The return value is a pair of numbers, the starting line number and the ending line number, both inclusive. For example, returning (5, 7) means that lines 5, 6, and 7 should be considered executed. This function might decide that the frame doesn't indicate any lines from the source file were executed. Return (-1, -1) in this case to tell coverage.py that no lines should be recorded for this frame. """ lineno = frame.f_lineno return lineno, lineno @functools.total_ordering class FileReporter(CoveragePluginBase): """Support needed for files during the analysis and reporting phases. File tracer plug-ins implement a subclass of `FileReporter`, and return instances from their :meth:`CoveragePlugin.file_reporter` method. There are many methods here, but only :meth:`lines` is required, to provide the set of executable lines in the file. See :ref:`howitworks` for details of the different coverage.py phases. """ def __init__(self, filename: str) -> None: """Simple initialization of a `FileReporter`. The `filename` argument is the path to the file being reported. This will be available as the `.filename` attribute on the object. Other method implementations on this base class rely on this attribute. """ self.filename = filename def __repr__(self) -> str: return f"<{self.__class__.__name__} filename={self.filename!r}>" def relative_filename(self) -> str: """Get the relative file name for this file. This file path will be displayed in reports. The default implementation will supply the actual project-relative file path. You only need to supply this method if you have an unusual syntax for file paths. """ return files.relative_filename(self.filename) def source(self) -> str: """Get the source for the file. Returns a Unicode string. The base implementation simply reads the `self.filename` file and decodes it as UTF-8. Override this method if your file isn't readable as a text file, or if you need other encoding support. """ with open(self.filename, encoding="utf-8") as f: return f.read() def lines(self) -> set[TLineNo]: """Get the executable lines in this file. Your plug-in must determine which lines in the file were possibly executable. This method returns a set of those line numbers. Returns a set of line numbers. """ _needs_to_implement(self, "lines") def excluded_lines(self) -> set[TLineNo]: """Get the excluded executable lines in this file. Your plug-in can use any method it likes to allow the user to exclude executable lines from consideration. Returns a set of line numbers. The base implementation returns the empty set. """ return set() def translate_lines(self, lines: Iterable[TLineNo]) -> set[TLineNo]: """Translate recorded lines into reported lines. Some file formats will want to report lines slightly differently than they are recorded. For example, Python records the last line of a multi-line statement, but reports are nicer if they mention the first line. Your plug-in can optionally define this method to perform these kinds of adjustment. `lines` is a sequence of integers, the recorded line numbers. Returns a set of integers, the adjusted line numbers. The base implementation returns the numbers unchanged. """ return set(lines) def arcs(self) -> set[TArc]: """Get the executable arcs in this file. To support branch coverage, your plug-in needs to be able to indicate possible execution paths, as a set of line number pairs. Each pair is a `(prev, next)` pair indicating that execution can transition from the `prev` line number to the `next` line number. Returns a set of pairs of line numbers. The default implementation returns an empty set. """ return set() def no_branch_lines(self) -> set[TLineNo]: """Get the lines excused from branch coverage in this file. Your plug-in can use any method it likes to allow the user to exclude lines from consideration of branch coverage. Returns a set of line numbers. The base implementation returns the empty set. """ return set() def translate_arcs(self, arcs: Iterable[TArc]) -> set[TArc]: """Translate recorded arcs into reported arcs. Similar to :meth:`translate_lines`, but for arcs. `arcs` is a set of line number pairs. Returns a set of line number pairs. The default implementation returns `arcs` unchanged. """ return set(arcs) def exit_counts(self) -> dict[TLineNo, int]: """Get a count of exits from that each line. To determine which lines are branches, coverage.py looks for lines that have more than one exit. This function creates a dict mapping each executable line number to a count of how many exits it has. To be honest, this feels wrong, and should be refactored. Let me know if you attempt to implement this method in your plug-in... """ return {} def missing_arc_description( self, start: TLineNo, end: TLineNo, executed_arcs: Iterable[TArc] | None = None, # pylint: disable=unused-argument ) -> str: """Provide an English sentence describing a missing arc. The `start` and `end` arguments are the line numbers of the missing arc. Negative numbers indicate entering or exiting code objects. The `executed_arcs` argument is a set of line number pairs, the arcs that were executed in this file. By default, this simply returns the string "Line {start} didn't jump to {end}". """ return f"Line {start} didn't jump to line {end}" def source_token_lines(self) -> TSourceTokenLines: """Generate a series of tokenized lines, one for each line in `source`. These tokens are used for syntax-colored reports. Each line is a list of pairs, each pair is a token:: [("key", "def"), ("ws", " "), ("nam", "hello"), ("op", "("), ... ] Each pair has a token class, and the token text. The token classes are: * ``"com"``: a comment * ``"key"``: a keyword * ``"nam"``: a name, or identifier * ``"num"``: a number * ``"op"``: an operator * ``"str"``: a string literal * ``"ws"``: some white space * ``"txt"``: some other kind of text If you concatenate all the token texts, and then join them with newlines, you should have your original source back. The default implementation simply returns each line tagged as ``"txt"``. """ for line in self.source().splitlines(): yield [("txt", line)] def __eq__(self, other: Any) -> bool: return isinstance(other, FileReporter) and self.filename == other.filename def __lt__(self, other: Any) -> bool: return isinstance(other, FileReporter) and self.filename < other.filename # This object doesn't need to be hashed. __hash__ = None # type: ignore[assignment] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/coverage/plugin_support.py0000644000175100001770000002405400000000000021373 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt """Support for plugins.""" from __future__ import annotations import os import os.path import sys from types import FrameType from typing import Any, Iterable, Iterator from coverage.exceptions import PluginError from coverage.misc import isolate_module from coverage.plugin import CoveragePlugin, FileTracer, FileReporter from coverage.types import ( TArc, TConfigurable, TDebugCtl, TLineNo, TPluginConfig, TSourceTokenLines, ) os = isolate_module(os) class Plugins: """The currently loaded collection of coverage.py plugins.""" def __init__(self) -> None: self.order: list[CoveragePlugin] = [] self.names: dict[str, CoveragePlugin] = {} self.file_tracers: list[CoveragePlugin] = [] self.configurers: list[CoveragePlugin] = [] self.context_switchers: list[CoveragePlugin] = [] self.current_module: str | None = None self.debug: TDebugCtl | None @classmethod def load_plugins( cls, modules: Iterable[str], config: TPluginConfig, debug: TDebugCtl | None = None, ) -> Plugins: """Load plugins from `modules`. Returns a Plugins object with the loaded and configured plugins. """ plugins = cls() plugins.debug = debug for module in modules: plugins.current_module = module __import__(module) mod = sys.modules[module] coverage_init = getattr(mod, "coverage_init", None) if not coverage_init: raise PluginError( f"Plugin module {module!r} didn't define a coverage_init function", ) options = config.get_plugin_options(module) coverage_init(plugins, options) plugins.current_module = None return plugins def add_file_tracer(self, plugin: CoveragePlugin) -> None: """Add a file tracer plugin. `plugin` is an instance of a third-party plugin class. It must implement the :meth:`CoveragePlugin.file_tracer` method. """ self._add_plugin(plugin, self.file_tracers) def add_configurer(self, plugin: CoveragePlugin) -> None: """Add a configuring plugin. `plugin` is an instance of a third-party plugin class. It must implement the :meth:`CoveragePlugin.configure` method. """ self._add_plugin(plugin, self.configurers) def add_dynamic_context(self, plugin: CoveragePlugin) -> None: """Add a dynamic context plugin. `plugin` is an instance of a third-party plugin class. It must implement the :meth:`CoveragePlugin.dynamic_context` method. """ self._add_plugin(plugin, self.context_switchers) def add_noop(self, plugin: CoveragePlugin) -> None: """Add a plugin that does nothing. This is only useful for testing the plugin support. """ self._add_plugin(plugin, None) def _add_plugin( self, plugin: CoveragePlugin, specialized: list[CoveragePlugin] | None, ) -> None: """Add a plugin object. `plugin` is a :class:`CoveragePlugin` instance to add. `specialized` is a list to append the plugin to. """ plugin_name = f"{self.current_module}.{plugin.__class__.__name__}" if self.debug and self.debug.should("plugin"): self.debug.write(f"Loaded plugin {self.current_module!r}: {plugin!r}") labelled = LabelledDebug(f"plugin {self.current_module!r}", self.debug) plugin = DebugPluginWrapper(plugin, labelled) plugin._coverage_plugin_name = plugin_name plugin._coverage_enabled = True self.order.append(plugin) self.names[plugin_name] = plugin if specialized is not None: specialized.append(plugin) def __bool__(self) -> bool: return bool(self.order) def __iter__(self) -> Iterator[CoveragePlugin]: return iter(self.order) def get(self, plugin_name: str) -> CoveragePlugin: """Return a plugin by name.""" return self.names[plugin_name] class LabelledDebug: """A Debug writer, but with labels for prepending to the messages.""" def __init__(self, label: str, debug: TDebugCtl, prev_labels: Iterable[str] = ()): self.labels = list(prev_labels) + [label] self.debug = debug def add_label(self, label: str) -> LabelledDebug: """Add a label to the writer, and return a new `LabelledDebug`.""" return LabelledDebug(label, self.debug, self.labels) def message_prefix(self) -> str: """The prefix to use on messages, combining the labels.""" prefixes = self.labels + [""] return ":\n".join(" "*i+label for i, label in enumerate(prefixes)) def write(self, message: str) -> None: """Write `message`, but with the labels prepended.""" self.debug.write(f"{self.message_prefix()}{message}") class DebugPluginWrapper(CoveragePlugin): """Wrap a plugin, and use debug to report on what it's doing.""" def __init__(self, plugin: CoveragePlugin, debug: LabelledDebug) -> None: super().__init__() self.plugin = plugin self.debug = debug def file_tracer(self, filename: str) -> FileTracer | None: tracer = self.plugin.file_tracer(filename) self.debug.write(f"file_tracer({filename!r}) --> {tracer!r}") if tracer: debug = self.debug.add_label(f"file {filename!r}") tracer = DebugFileTracerWrapper(tracer, debug) return tracer def file_reporter(self, filename: str) -> FileReporter | str: reporter = self.plugin.file_reporter(filename) assert isinstance(reporter, FileReporter) self.debug.write(f"file_reporter({filename!r}) --> {reporter!r}") if reporter: debug = self.debug.add_label(f"file {filename!r}") reporter = DebugFileReporterWrapper(filename, reporter, debug) return reporter def dynamic_context(self, frame: FrameType) -> str | None: context = self.plugin.dynamic_context(frame) self.debug.write(f"dynamic_context({frame!r}) --> {context!r}") return context def find_executable_files(self, src_dir: str) -> Iterable[str]: executable_files = self.plugin.find_executable_files(src_dir) self.debug.write(f"find_executable_files({src_dir!r}) --> {executable_files!r}") return executable_files def configure(self, config: TConfigurable) -> None: self.debug.write(f"configure({config!r})") self.plugin.configure(config) def sys_info(self) -> Iterable[tuple[str, Any]]: return self.plugin.sys_info() class DebugFileTracerWrapper(FileTracer): """A debugging `FileTracer`.""" def __init__(self, tracer: FileTracer, debug: LabelledDebug) -> None: self.tracer = tracer self.debug = debug def _show_frame(self, frame: FrameType) -> str: """A short string identifying a frame, for debug messages.""" return "%s@%d" % ( os.path.basename(frame.f_code.co_filename), frame.f_lineno, ) def source_filename(self) -> str: sfilename = self.tracer.source_filename() self.debug.write(f"source_filename() --> {sfilename!r}") return sfilename def has_dynamic_source_filename(self) -> bool: has = self.tracer.has_dynamic_source_filename() self.debug.write(f"has_dynamic_source_filename() --> {has!r}") return has def dynamic_source_filename(self, filename: str, frame: FrameType) -> str | None: dyn = self.tracer.dynamic_source_filename(filename, frame) self.debug.write("dynamic_source_filename({!r}, {}) --> {!r}".format( filename, self._show_frame(frame), dyn, )) return dyn def line_number_range(self, frame: FrameType) -> tuple[TLineNo, TLineNo]: pair = self.tracer.line_number_range(frame) self.debug.write(f"line_number_range({self._show_frame(frame)}) --> {pair!r}") return pair class DebugFileReporterWrapper(FileReporter): """A debugging `FileReporter`.""" def __init__(self, filename: str, reporter: FileReporter, debug: LabelledDebug) -> None: super().__init__(filename) self.reporter = reporter self.debug = debug def relative_filename(self) -> str: ret = self.reporter.relative_filename() self.debug.write(f"relative_filename() --> {ret!r}") return ret def lines(self) -> set[TLineNo]: ret = self.reporter.lines() self.debug.write(f"lines() --> {ret!r}") return ret def excluded_lines(self) -> set[TLineNo]: ret = self.reporter.excluded_lines() self.debug.write(f"excluded_lines() --> {ret!r}") return ret def translate_lines(self, lines: Iterable[TLineNo]) -> set[TLineNo]: ret = self.reporter.translate_lines(lines) self.debug.write(f"translate_lines({lines!r}) --> {ret!r}") return ret def translate_arcs(self, arcs: Iterable[TArc]) -> set[TArc]: ret = self.reporter.translate_arcs(arcs) self.debug.write(f"translate_arcs({arcs!r}) --> {ret!r}") return ret def no_branch_lines(self) -> set[TLineNo]: ret = self.reporter.no_branch_lines() self.debug.write(f"no_branch_lines() --> {ret!r}") return ret def exit_counts(self) -> dict[TLineNo, int]: ret = self.reporter.exit_counts() self.debug.write(f"exit_counts() --> {ret!r}") return ret def arcs(self) -> set[TArc]: ret = self.reporter.arcs() self.debug.write(f"arcs() --> {ret!r}") return ret def source(self) -> str: ret = self.reporter.source() self.debug.write("source() --> %d chars" % (len(ret),)) return ret def source_token_lines(self) -> TSourceTokenLines: ret = list(self.reporter.source_token_lines()) self.debug.write("source_token_lines() --> %d tokens" % (len(ret),)) return ret ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/coverage/py.typed0000644000175100001770000000011000000000000017411 0ustar00runnerdocker00000000000000# Marker file for PEP 561 to indicate that this package has type hints. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/coverage/python.py0000644000175100001770000001753300000000000017626 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt """Python source expertise for coverage.py""" from __future__ import annotations import os.path import types import zipimport from typing import Iterable, TYPE_CHECKING from coverage import env from coverage.exceptions import CoverageException, NoSource from coverage.files import canonical_filename, relative_filename, zip_location from coverage.misc import expensive, isolate_module, join_regex from coverage.parser import PythonParser from coverage.phystokens import source_token_lines, source_encoding from coverage.plugin import FileReporter from coverage.types import TArc, TLineNo, TMorf, TSourceTokenLines if TYPE_CHECKING: from coverage import Coverage os = isolate_module(os) def read_python_source(filename: str) -> bytes: """Read the Python source text from `filename`. Returns bytes. """ with open(filename, "rb") as f: source = f.read() return source.replace(b"\r\n", b"\n").replace(b"\r", b"\n") def get_python_source(filename: str) -> str: """Return the source code, as unicode.""" base, ext = os.path.splitext(filename) if ext == ".py" and env.WINDOWS: exts = [".py", ".pyw"] else: exts = [ext] source_bytes: bytes | None for ext in exts: try_filename = base + ext if os.path.exists(try_filename): # A regular text file: open it. source_bytes = read_python_source(try_filename) break # Maybe it's in a zip file? source_bytes = get_zip_bytes(try_filename) if source_bytes is not None: break else: # Couldn't find source. raise NoSource(f"No source for code: '{filename}'.") # Replace \f because of http://bugs.python.org/issue19035 source_bytes = source_bytes.replace(b"\f", b" ") source = source_bytes.decode(source_encoding(source_bytes), "replace") # Python code should always end with a line with a newline. if source and source[-1] != "\n": source += "\n" return source def get_zip_bytes(filename: str) -> bytes | None: """Get data from `filename` if it is a zip file path. Returns the bytestring data read from the zip file, or None if no zip file could be found or `filename` isn't in it. The data returned will be an empty string if the file is empty. """ zipfile_inner = zip_location(filename) if zipfile_inner is not None: zipfile, inner = zipfile_inner try: zi = zipimport.zipimporter(zipfile) except zipimport.ZipImportError: return None try: data = zi.get_data(inner) except OSError: return None return data return None def source_for_file(filename: str) -> str: """Return the source filename for `filename`. Given a file name being traced, return the best guess as to the source file to attribute it to. """ if filename.endswith(".py"): # .py files are themselves source files. return filename elif filename.endswith((".pyc", ".pyo")): # Bytecode files probably have source files near them. py_filename = filename[:-1] if os.path.exists(py_filename): # Found a .py file, use that. return py_filename if env.WINDOWS: # On Windows, it could be a .pyw file. pyw_filename = py_filename + "w" if os.path.exists(pyw_filename): return pyw_filename # Didn't find source, but it's probably the .py file we want. return py_filename # No idea, just use the file name as-is. return filename def source_for_morf(morf: TMorf) -> str: """Get the source filename for the module-or-file `morf`.""" if hasattr(morf, "__file__") and morf.__file__: filename = morf.__file__ elif isinstance(morf, types.ModuleType): # A module should have had .__file__, otherwise we can't use it. # This could be a PEP-420 namespace package. raise CoverageException(f"Module {morf} has no file") else: filename = morf filename = source_for_file(filename) return filename class PythonFileReporter(FileReporter): """Report support for a Python file.""" def __init__(self, morf: TMorf, coverage: Coverage | None = None) -> None: self.coverage = coverage filename = source_for_morf(morf) fname = filename canonicalize = True if self.coverage is not None: if self.coverage.config.relative_files: canonicalize = False if canonicalize: fname = canonical_filename(filename) super().__init__(fname) if hasattr(morf, "__name__"): name = morf.__name__.replace(".", os.sep) if os.path.basename(filename).startswith("__init__."): name += os.sep + "__init__" name += ".py" else: name = relative_filename(filename) self.relname = name self._source: str | None = None self._parser: PythonParser | None = None self._excluded = None def __repr__(self) -> str: return f"" def relative_filename(self) -> str: return self.relname @property def parser(self) -> PythonParser: """Lazily create a :class:`PythonParser`.""" assert self.coverage is not None if self._parser is None: self._parser = PythonParser( filename=self.filename, exclude=self.coverage._exclude_regex("exclude"), ) self._parser.parse_source() return self._parser def lines(self) -> set[TLineNo]: """Return the line numbers of statements in the file.""" return self.parser.statements def excluded_lines(self) -> set[TLineNo]: """Return the line numbers of statements in the file.""" return self.parser.excluded def translate_lines(self, lines: Iterable[TLineNo]) -> set[TLineNo]: return self.parser.translate_lines(lines) def translate_arcs(self, arcs: Iterable[TArc]) -> set[TArc]: return self.parser.translate_arcs(arcs) @expensive def no_branch_lines(self) -> set[TLineNo]: assert self.coverage is not None no_branch = self.parser.lines_matching( join_regex(self.coverage.config.partial_list), join_regex(self.coverage.config.partial_always_list), ) return no_branch @expensive def arcs(self) -> set[TArc]: return self.parser.arcs() @expensive def exit_counts(self) -> dict[TLineNo, int]: return self.parser.exit_counts() def missing_arc_description( self, start: TLineNo, end: TLineNo, executed_arcs: Iterable[TArc] | None = None, ) -> str: return self.parser.missing_arc_description(start, end, executed_arcs) def source(self) -> str: if self._source is None: self._source = get_python_source(self.filename) return self._source def should_be_python(self) -> bool: """Does it seem like this file should contain Python? This is used to decide if a file reported as part of the execution of a program was really likely to have contained Python in the first place. """ # Get the file extension. _, ext = os.path.splitext(self.filename) # Anything named *.py* should be Python. if ext.startswith(".py"): return True # A file with no extension should be Python. if not ext: return True # Everything else is probably not Python. return False def source_token_lines(self) -> TSourceTokenLines: return source_token_lines(self.source()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/coverage/pytracer.py0000644000175100001770000003453000000000000020132 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt """Raw data collector for coverage.py.""" from __future__ import annotations import atexit import dis import itertools import sys import threading from types import FrameType, ModuleType from typing import Any, Callable, Set, cast from coverage import env from coverage.types import ( TArc, TFileDisposition, TLineNo, TTraceData, TTraceFileData, TTraceFn, TracerCore, TWarnFn, ) # We need the YIELD_VALUE opcode below, in a comparison-friendly form. # PYVERSIONS: RESUME is new in Python3.11 RESUME = dis.opmap.get("RESUME") RETURN_VALUE = dis.opmap["RETURN_VALUE"] if RESUME is None: YIELD_VALUE = dis.opmap["YIELD_VALUE"] YIELD_FROM = dis.opmap["YIELD_FROM"] YIELD_FROM_OFFSET = 0 if env.PYPY else 2 # When running meta-coverage, this file can try to trace itself, which confuses # everything. Don't trace ourselves. THIS_FILE = __file__.rstrip("co") class PyTracer(TracerCore): """Python implementation of the raw data tracer.""" # Because of poor implementations of trace-function-manipulating tools, # the Python trace function must be kept very simple. In particular, there # must be only one function ever set as the trace function, both through # sys.settrace, and as the return value from the trace function. Put # another way, the trace function must always return itself. It cannot # swap in other functions, or return None to avoid tracing a particular # frame. # # The trace manipulator that introduced this restriction is DecoratorTools, # which sets a trace function, and then later restores the pre-existing one # by calling sys.settrace with a function it found in the current frame. # # Systems that use DecoratorTools (or similar trace manipulations) must use # PyTracer to get accurate results. The command-line --timid argument is # used to force the use of this tracer. tracer_ids = itertools.count() def __init__(self) -> None: # Which tracer are we? self.id = next(self.tracer_ids) # Attributes set from the collector: self.data: TTraceData self.trace_arcs = False self.should_trace: Callable[[str, FrameType], TFileDisposition] self.should_trace_cache: dict[str, TFileDisposition | None] self.should_start_context: Callable[[FrameType], str | None] | None = None self.switch_context: Callable[[str | None], None] | None = None self.warn: TWarnFn # The threading module to use, if any. self.threading: ModuleType | None = None self.cur_file_data: TTraceFileData | None = None self.last_line: TLineNo = 0 self.cur_file_name: str | None = None self.context: str | None = None self.started_context = False # The data_stack parallels the Python call stack. Each entry is # information about an active frame, a four-element tuple: # [0] The TTraceData for this frame's file. Could be None if we # aren't tracing this frame. # [1] The current file name for the frame. None if we aren't tracing # this frame. # [2] The last line number executed in this frame. # [3] Boolean: did this frame start a new context? self.data_stack: list[tuple[TTraceFileData | None, str | None, TLineNo, bool]] = [] self.thread: threading.Thread | None = None self.stopped = False self._activity = False self.in_atexit = False # On exit, self.in_atexit = True atexit.register(setattr, self, "in_atexit", True) # Cache a bound method on the instance, so that we don't have to # re-create a bound method object all the time. self._cached_bound_method_trace: TTraceFn = self._trace def __repr__(self) -> str: points = sum(len(v) for v in self.data.values()) files = len(self.data) return f"" def log(self, marker: str, *args: Any) -> None: """For hard-core logging of what this tracer is doing.""" with open("/tmp/debug_trace.txt", "a") as f: f.write(f"{marker} {self.id}[{len(self.data_stack)}]") if 0: # if you want thread ids.. f.write(".{:x}.{:x}".format( # type: ignore[unreachable] self.thread.ident, self.threading.current_thread().ident, )) f.write(" {}".format(" ".join(map(str, args)))) if 0: # if you want callers.. f.write(" | ") # type: ignore[unreachable] stack = " / ".join( (fname or "???").rpartition("/")[-1] for _, fname, _, _ in self.data_stack ) f.write(stack) f.write("\n") def _trace( self, frame: FrameType, event: str, arg: Any, # pylint: disable=unused-argument lineno: TLineNo | None = None, # pylint: disable=unused-argument ) -> TTraceFn | None: """The trace function passed to sys.settrace.""" if THIS_FILE in frame.f_code.co_filename: return None # f = frame; code = f.f_code # self.log(":", f"{code.co_filename} {f.f_lineno} {code.co_name}()", event) if (self.stopped and sys.gettrace() == self._cached_bound_method_trace): # pylint: disable=comparison-with-callable # The PyTrace.stop() method has been called, possibly by another # thread, let's deactivate ourselves now. if 0: f = frame # type: ignore[unreachable] self.log("---\nX", f.f_code.co_filename, f.f_lineno) while f: self.log(">", f.f_code.co_filename, f.f_lineno, f.f_code.co_name, f.f_trace) f = f.f_back sys.settrace(None) try: self.cur_file_data, self.cur_file_name, self.last_line, self.started_context = ( self.data_stack.pop() ) except IndexError: self.log( "Empty stack!", frame.f_code.co_filename, frame.f_lineno, frame.f_code.co_name, ) return None # if event != "call" and frame.f_code.co_filename != self.cur_file_name: # self.log("---\n*", frame.f_code.co_filename, self.cur_file_name, frame.f_lineno) if event == "call": # Should we start a new context? if self.should_start_context and self.context is None: context_maybe = self.should_start_context(frame) if context_maybe is not None: self.context = context_maybe started_context = True assert self.switch_context is not None self.switch_context(self.context) else: started_context = False else: started_context = False self.started_context = started_context # Entering a new frame. Decide if we should trace in this file. self._activity = True self.data_stack.append( ( self.cur_file_data, self.cur_file_name, self.last_line, started_context, ), ) # Improve tracing performance: when calling a function, both caller # and callee are often within the same file. if that's the case, we # don't have to re-check whether to trace the corresponding # function (which is a little bit expensive since it involves # dictionary lookups). This optimization is only correct if we # didn't start a context. filename = frame.f_code.co_filename if filename != self.cur_file_name or started_context: self.cur_file_name = filename disp = self.should_trace_cache.get(filename) if disp is None: disp = self.should_trace(filename, frame) self.should_trace_cache[filename] = disp self.cur_file_data = None if disp.trace: tracename = disp.source_filename assert tracename is not None if tracename not in self.data: self.data[tracename] = set() self.cur_file_data = self.data[tracename] else: frame.f_trace_lines = False elif not self.cur_file_data: frame.f_trace_lines = False # The call event is really a "start frame" event, and happens for # function calls and re-entering generators. The f_lasti field is # -1 for calls, and a real offset for generators. Use <0 as the # line number for calls, and the real line number for generators. if RESUME is not None: # The current opcode is guaranteed to be RESUME. The argument # determines what kind of resume it is. oparg = frame.f_code.co_code[frame.f_lasti + 1] real_call = (oparg == 0) else: real_call = (getattr(frame, "f_lasti", -1) < 0) if real_call: self.last_line = -frame.f_code.co_firstlineno else: self.last_line = frame.f_lineno elif event == "line": # Record an executed line. if self.cur_file_data is not None: flineno: TLineNo = frame.f_lineno if self.trace_arcs: cast(Set[TArc], self.cur_file_data).add((self.last_line, flineno)) else: cast(Set[TLineNo], self.cur_file_data).add(flineno) self.last_line = flineno elif event == "return": if self.trace_arcs and self.cur_file_data: # Record an arc leaving the function, but beware that a # "return" event might just mean yielding from a generator. code = frame.f_code.co_code lasti = frame.f_lasti if RESUME is not None: if len(code) == lasti + 2: # A return from the end of a code object is a real return. real_return = True else: # It is a real return if we aren't going to resume next. if env.PYBEHAVIOR.lasti_is_yield: lasti += 2 real_return = (code[lasti] != RESUME) else: if code[lasti] == RETURN_VALUE: real_return = True elif code[lasti] == YIELD_VALUE: real_return = False elif len(code) <= lasti + YIELD_FROM_OFFSET: real_return = True elif code[lasti + YIELD_FROM_OFFSET] == YIELD_FROM: real_return = False else: real_return = True if real_return: first = frame.f_code.co_firstlineno cast(Set[TArc], self.cur_file_data).add((self.last_line, -first)) # Leaving this function, pop the filename stack. self.cur_file_data, self.cur_file_name, self.last_line, self.started_context = ( self.data_stack.pop() ) # Leaving a context? if self.started_context: assert self.switch_context is not None self.context = None self.switch_context(None) return self._cached_bound_method_trace def start(self) -> TTraceFn: """Start this Tracer. Return a Python function suitable for use with sys.settrace(). """ self.stopped = False if self.threading: if self.thread is None: self.thread = self.threading.current_thread() sys.settrace(self._cached_bound_method_trace) return self._cached_bound_method_trace def stop(self) -> None: """Stop this Tracer.""" # Get the active tracer callback before setting the stop flag to be # able to detect if the tracer was changed prior to stopping it. tf = sys.gettrace() # Set the stop flag. The actual call to sys.settrace(None) will happen # in the self._trace callback itself to make sure to call it from the # right thread. self.stopped = True if self.threading: assert self.thread is not None if self.thread.ident != self.threading.current_thread().ident: # Called on a different thread than started us: we can't unhook # ourselves, but we've set the flag that we should stop, so we # won't do any more tracing. #self.log("~", "stopping on different threads") return # PyPy clears the trace function before running atexit functions, # so don't warn if we are in atexit on PyPy and the trace function # has changed to None. Metacoverage also messes this up, so don't # warn if we are measuring ourselves. suppress_warning = ( (env.PYPY and self.in_atexit and tf is None) or env.METACOV ) if self.warn and not suppress_warning: if tf != self._cached_bound_method_trace: # pylint: disable=comparison-with-callable self.warn( "Trace function changed, data is likely wrong: " + f"{tf!r} != {self._cached_bound_method_trace!r}", slug="trace-changed", ) def activity(self) -> bool: """Has there been any activity?""" return self._activity def reset_activity(self) -> None: """Reset the activity() flag.""" self._activity = False def get_stats(self) -> dict[str, int] | None: """Return a dictionary of statistics, or None.""" return None ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/coverage/report.py0000644000175100001770000002450700000000000017617 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt """Summary reporting""" from __future__ import annotations import sys from typing import Any, IO, Iterable, TYPE_CHECKING from coverage.exceptions import ConfigError, NoDataError from coverage.misc import human_sorted_items from coverage.plugin import FileReporter from coverage.report_core import get_analysis_to_report from coverage.results import Analysis, Numbers from coverage.types import TMorf if TYPE_CHECKING: from coverage import Coverage class SummaryReporter: """A reporter for writing the summary report.""" def __init__(self, coverage: Coverage) -> None: self.coverage = coverage self.config = self.coverage.config self.branches = coverage.get_data().has_arcs() self.outfile: IO[str] | None = None self.output_format = self.config.format or "text" if self.output_format not in {"text", "markdown", "total"}: raise ConfigError(f"Unknown report format choice: {self.output_format!r}") self.fr_analysis: list[tuple[FileReporter, Analysis]] = [] self.skipped_count = 0 self.empty_count = 0 self.total = Numbers(precision=self.config.precision) def write(self, line: str) -> None: """Write a line to the output, adding a newline.""" assert self.outfile is not None self.outfile.write(line.rstrip()) self.outfile.write("\n") def write_items(self, items: Iterable[str]) -> None: """Write a list of strings, joined together.""" self.write("".join(items)) def _report_text( self, header: list[str], lines_values: list[list[Any]], total_line: list[Any], end_lines: list[str], ) -> None: """Internal method that prints report data in text format. `header` is a list with captions. `lines_values` is list of lists of sortable values. `total_line` is a list with values of the total line. `end_lines` is a list of ending lines with information about skipped files. """ # Prepare the formatting strings, header, and column sorting. max_name = max([len(line[0]) for line in lines_values] + [5]) + 1 max_n = max(len(total_line[header.index("Cover")]) + 2, len(" Cover")) + 1 max_n = max([max_n] + [len(line[header.index("Cover")]) + 2 for line in lines_values]) formats = dict( Name="{:{name_len}}", Stmts="{:>7}", Miss="{:>7}", Branch="{:>7}", BrPart="{:>7}", Cover="{:>{n}}", Missing="{:>10}", ) header_items = [ formats[item].format(item, name_len=max_name, n=max_n) for item in header ] header_str = "".join(header_items) rule = "-" * len(header_str) # Write the header self.write(header_str) self.write(rule) formats.update(dict(Cover="{:>{n}}%"), Missing=" {:9}") for values in lines_values: # build string with line values line_items = [ formats[item].format(str(value), name_len=max_name, n=max_n-1) for item, value in zip(header, values) ] self.write_items(line_items) # Write a TOTAL line if lines_values: self.write(rule) line_items = [ formats[item].format(str(value), name_len=max_name, n=max_n-1) for item, value in zip(header, total_line) ] self.write_items(line_items) for end_line in end_lines: self.write(end_line) def _report_markdown( self, header: list[str], lines_values: list[list[Any]], total_line: list[Any], end_lines: list[str], ) -> None: """Internal method that prints report data in markdown format. `header` is a list with captions. `lines_values` is a sorted list of lists containing coverage information. `total_line` is a list with values of the total line. `end_lines` is a list of ending lines with information about skipped files. """ # Prepare the formatting strings, header, and column sorting. max_name = max((len(line[0].replace("_", "\\_")) for line in lines_values), default=0) max_name = max(max_name, len("**TOTAL**")) + 1 formats = dict( Name="| {:{name_len}}|", Stmts="{:>9} |", Miss="{:>9} |", Branch="{:>9} |", BrPart="{:>9} |", Cover="{:>{n}} |", Missing="{:>10} |", ) max_n = max(len(total_line[header.index("Cover")]) + 6, len(" Cover ")) header_items = [formats[item].format(item, name_len=max_name, n=max_n) for item in header] header_str = "".join(header_items) rule_str = "|" + " ".join(["- |".rjust(len(header_items[0])-1, "-")] + ["-: |".rjust(len(item)-1, "-") for item in header_items[1:]], ) # Write the header self.write(header_str) self.write(rule_str) for values in lines_values: # build string with line values formats.update(dict(Cover="{:>{n}}% |")) line_items = [ formats[item].format(str(value).replace("_", "\\_"), name_len=max_name, n=max_n-1) for item, value in zip(header, values) ] self.write_items(line_items) # Write the TOTAL line formats.update(dict(Name="|{:>{name_len}} |", Cover="{:>{n}} |")) total_line_items: list[str] = [] for item, value in zip(header, total_line): if value == "": insert = value elif item == "Cover": insert = f" **{value}%**" else: insert = f" **{value}**" total_line_items += formats[item].format(insert, name_len=max_name, n=max_n) self.write_items(total_line_items) for end_line in end_lines: self.write(end_line) def report(self, morfs: Iterable[TMorf] | None, outfile: IO[str] | None = None) -> float: """Writes a report summarizing coverage statistics per module. `outfile` is a text-mode file object to write the summary to. """ self.outfile = outfile or sys.stdout self.coverage.get_data().set_query_contexts(self.config.report_contexts) for fr, analysis in get_analysis_to_report(self.coverage, morfs): self.report_one_file(fr, analysis) if not self.total.n_files and not self.skipped_count: raise NoDataError("No data to report.") if self.output_format == "total": self.write(self.total.pc_covered_str) else: self.tabular_report() return self.total.pc_covered def tabular_report(self) -> None: """Writes tabular report formats.""" # Prepare the header line and column sorting. header = ["Name", "Stmts", "Miss"] if self.branches: header += ["Branch", "BrPart"] header += ["Cover"] if self.config.show_missing: header += ["Missing"] column_order = dict(name=0, stmts=1, miss=2, cover=-1) if self.branches: column_order.update(dict(branch=3, brpart=4)) # `lines_values` is list of lists of sortable values. lines_values = [] for (fr, analysis) in self.fr_analysis: nums = analysis.numbers args = [fr.relative_filename(), nums.n_statements, nums.n_missing] if self.branches: args += [nums.n_branches, nums.n_partial_branches] args += [nums.pc_covered_str] if self.config.show_missing: args += [analysis.missing_formatted(branches=True)] args += [nums.pc_covered] lines_values.append(args) # Line sorting. sort_option = (self.config.sort or "name").lower() reverse = False if sort_option[0] == "-": reverse = True sort_option = sort_option[1:] elif sort_option[0] == "+": sort_option = sort_option[1:] sort_idx = column_order.get(sort_option) if sort_idx is None: raise ConfigError(f"Invalid sorting option: {self.config.sort!r}") if sort_option == "name": lines_values = human_sorted_items(lines_values, reverse=reverse) else: lines_values.sort( key=lambda line: (line[sort_idx], line[0]), reverse=reverse, ) # Calculate total if we had at least one file. total_line = ["TOTAL", self.total.n_statements, self.total.n_missing] if self.branches: total_line += [self.total.n_branches, self.total.n_partial_branches] total_line += [self.total.pc_covered_str] if self.config.show_missing: total_line += [""] # Create other final lines. end_lines = [] if self.config.skip_covered and self.skipped_count: file_suffix = "s" if self.skipped_count>1 else "" end_lines.append( f"\n{self.skipped_count} file{file_suffix} skipped due to complete coverage.", ) if self.config.skip_empty and self.empty_count: file_suffix = "s" if self.empty_count > 1 else "" end_lines.append(f"\n{self.empty_count} empty file{file_suffix} skipped.") if self.output_format == "markdown": formatter = self._report_markdown else: formatter = self._report_text formatter(header, lines_values, total_line, end_lines) def report_one_file(self, fr: FileReporter, analysis: Analysis) -> None: """Report on just one file, the callback from report().""" nums = analysis.numbers self.total += nums no_missing_lines = (nums.n_missing == 0) no_missing_branches = (nums.n_partial_branches == 0) if self.config.skip_covered and no_missing_lines and no_missing_branches: # Don't report on 100% files. self.skipped_count += 1 elif self.config.skip_empty and nums.n_statements == 0: # Don't report on empty files. self.empty_count += 1 else: self.fr_analysis.append((fr, analysis)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/coverage/report_core.py0000644000175100001770000000772300000000000020630 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt """Reporter foundation for coverage.py.""" from __future__ import annotations import sys from typing import ( Callable, Iterable, Iterator, IO, Protocol, TYPE_CHECKING, ) from coverage.exceptions import NoDataError, NotPython from coverage.files import prep_patterns, GlobMatcher from coverage.misc import ensure_dir_for_file, file_be_gone from coverage.plugin import FileReporter from coverage.results import Analysis from coverage.types import TMorf if TYPE_CHECKING: from coverage import Coverage class Reporter(Protocol): """What we expect of reporters.""" report_type: str def report(self, morfs: Iterable[TMorf] | None, outfile: IO[str]) -> float: """Generate a report of `morfs`, written to `outfile`.""" def render_report( output_path: str, reporter: Reporter, morfs: Iterable[TMorf] | None, msgfn: Callable[[str], None], ) -> float: """Run a one-file report generator, managing the output file. This function ensures the output file is ready to be written to. Then writes the report to it. Then closes the file and cleans up. """ file_to_close = None delete_file = False if output_path == "-": outfile = sys.stdout else: # Ensure that the output directory is created; done here because this # report pre-opens the output file. HtmlReporter does this on its own # because its task is more complex, being multiple files. ensure_dir_for_file(output_path) outfile = open(output_path, "w", encoding="utf-8") file_to_close = outfile delete_file = True try: ret = reporter.report(morfs, outfile=outfile) if file_to_close is not None: msgfn(f"Wrote {reporter.report_type} to {output_path}") delete_file = False return ret finally: if file_to_close is not None: file_to_close.close() if delete_file: file_be_gone(output_path) # pragma: part covered (doesn't return) def get_analysis_to_report( coverage: Coverage, morfs: Iterable[TMorf] | None, ) -> Iterator[tuple[FileReporter, Analysis]]: """Get the files to report on. For each morf in `morfs`, if it should be reported on (based on the omit and include configuration options), yield a pair, the `FileReporter` and `Analysis` for the morf. """ file_reporters = coverage._get_file_reporters(morfs) config = coverage.config if config.report_include: matcher = GlobMatcher(prep_patterns(config.report_include), "report_include") file_reporters = [fr for fr in file_reporters if matcher.match(fr.filename)] if config.report_omit: matcher = GlobMatcher(prep_patterns(config.report_omit), "report_omit") file_reporters = [fr for fr in file_reporters if not matcher.match(fr.filename)] if not file_reporters: raise NoDataError("No data to report.") for fr in sorted(file_reporters): try: analysis = coverage._analyze(fr) except NotPython: # Only report errors for .py files, and only if we didn't # explicitly suppress those errors. # NotPython is only raised by PythonFileReporter, which has a # should_be_python() method. if fr.should_be_python(): # type: ignore[attr-defined] if config.ignore_errors: msg = f"Couldn't parse Python file '{fr.filename}'" coverage._warn(msg, slug="couldnt-parse") else: raise except Exception as exc: if config.ignore_errors: msg = f"Couldn't parse '{fr.filename}': {exc}".rstrip() coverage._warn(msg, slug="couldnt-parse") else: raise else: yield (fr, analysis) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/coverage/results.py0000644000175100001770000003205700000000000020004 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt """Results of coverage measurement.""" from __future__ import annotations import collections from typing import Callable, Iterable, TYPE_CHECKING from coverage.debug import auto_repr from coverage.exceptions import ConfigError from coverage.misc import nice_pair from coverage.types import TArc, TLineNo if TYPE_CHECKING: from coverage.data import CoverageData from coverage.plugin import FileReporter class Analysis: """The results of analyzing a FileReporter.""" def __init__( self, data: CoverageData, precision: int, file_reporter: FileReporter, file_mapper: Callable[[str], str], ) -> None: self.data = data self.file_reporter = file_reporter self.filename = file_mapper(self.file_reporter.filename) self.statements = self.file_reporter.lines() self.excluded = self.file_reporter.excluded_lines() # Identify missing statements. executed: Iterable[TLineNo] executed = self.data.lines(self.filename) or [] executed = self.file_reporter.translate_lines(executed) self.executed = executed self.missing = self.statements - self.executed if self.data.has_arcs(): self._arc_possibilities = sorted(self.file_reporter.arcs()) self.exit_counts = self.file_reporter.exit_counts() self.no_branch = self.file_reporter.no_branch_lines() n_branches = self._total_branches() mba = self.missing_branch_arcs() n_partial_branches = sum(len(v) for k,v in mba.items() if k not in self.missing) n_missing_branches = sum(len(v) for k,v in mba.items()) else: self._arc_possibilities = [] self.exit_counts = {} self.no_branch = set() n_branches = n_partial_branches = n_missing_branches = 0 self.numbers = Numbers( precision=precision, n_files=1, n_statements=len(self.statements), n_excluded=len(self.excluded), n_missing=len(self.missing), n_branches=n_branches, n_partial_branches=n_partial_branches, n_missing_branches=n_missing_branches, ) def missing_formatted(self, branches: bool = False) -> str: """The missing line numbers, formatted nicely. Returns a string like "1-2, 5-11, 13-14". If `branches` is true, includes the missing branch arcs also. """ if branches and self.has_arcs(): arcs = self.missing_branch_arcs().items() else: arcs = None return format_lines(self.statements, self.missing, arcs=arcs) def has_arcs(self) -> bool: """Were arcs measured in this result?""" return self.data.has_arcs() def arc_possibilities(self) -> list[TArc]: """Returns a sorted list of the arcs in the code.""" return self._arc_possibilities def arcs_executed(self) -> list[TArc]: """Returns a sorted list of the arcs actually executed in the code.""" executed: Iterable[TArc] executed = self.data.arcs(self.filename) or [] executed = self.file_reporter.translate_arcs(executed) return sorted(executed) def arcs_missing(self) -> list[TArc]: """Returns a sorted list of the un-executed arcs in the code.""" possible = self.arc_possibilities() executed = self.arcs_executed() missing = ( p for p in possible if p not in executed and p[0] not in self.no_branch and p[1] not in self.excluded ) return sorted(missing) def arcs_unpredicted(self) -> list[TArc]: """Returns a sorted list of the executed arcs missing from the code.""" possible = self.arc_possibilities() executed = self.arcs_executed() # Exclude arcs here which connect a line to itself. They can occur # in executed data in some cases. This is where they can cause # trouble, and here is where it's the least burden to remove them. # Also, generators can somehow cause arcs from "enter" to "exit", so # make sure we have at least one positive value. unpredicted = ( e for e in executed if e not in possible and e[0] != e[1] and (e[0] > 0 or e[1] > 0) ) return sorted(unpredicted) def _branch_lines(self) -> list[TLineNo]: """Returns a list of line numbers that have more than one exit.""" return [l1 for l1,count in self.exit_counts.items() if count > 1] def _total_branches(self) -> int: """How many total branches are there?""" return sum(count for count in self.exit_counts.values() if count > 1) def missing_branch_arcs(self) -> dict[TLineNo, list[TLineNo]]: """Return arcs that weren't executed from branch lines. Returns {l1:[l2a,l2b,...], ...} """ missing = self.arcs_missing() branch_lines = set(self._branch_lines()) mba = collections.defaultdict(list) for l1, l2 in missing: if l1 in branch_lines: mba[l1].append(l2) return mba def executed_branch_arcs(self) -> dict[TLineNo, list[TLineNo]]: """Return arcs that were executed from branch lines. Returns {l1:[l2a,l2b,...], ...} """ executed = self.arcs_executed() branch_lines = set(self._branch_lines()) eba = collections.defaultdict(list) for l1, l2 in executed: if l1 in branch_lines: eba[l1].append(l2) return eba def branch_stats(self) -> dict[TLineNo, tuple[int, int]]: """Get stats about branches. Returns a dict mapping line numbers to a tuple: (total_exits, taken_exits). """ missing_arcs = self.missing_branch_arcs() stats = {} for lnum in self._branch_lines(): exits = self.exit_counts[lnum] missing = len(missing_arcs[lnum]) stats[lnum] = (exits, exits - missing) return stats class Numbers: """The numerical results of measuring coverage. This holds the basic statistics from `Analysis`, and is used to roll up statistics across files. """ def __init__( self, precision: int = 0, n_files: int = 0, n_statements: int = 0, n_excluded: int = 0, n_missing: int = 0, n_branches: int = 0, n_partial_branches: int = 0, n_missing_branches: int = 0, ) -> None: assert 0 <= precision < 10 self._precision = precision self._near0 = 1.0 / 10**precision self._near100 = 100.0 - self._near0 self.n_files = n_files self.n_statements = n_statements self.n_excluded = n_excluded self.n_missing = n_missing self.n_branches = n_branches self.n_partial_branches = n_partial_branches self.n_missing_branches = n_missing_branches __repr__ = auto_repr def init_args(self) -> list[int]: """Return a list for __init__(*args) to recreate this object.""" return [ self._precision, self.n_files, self.n_statements, self.n_excluded, self.n_missing, self.n_branches, self.n_partial_branches, self.n_missing_branches, ] @property def n_executed(self) -> int: """Returns the number of executed statements.""" return self.n_statements - self.n_missing @property def n_executed_branches(self) -> int: """Returns the number of executed branches.""" return self.n_branches - self.n_missing_branches @property def pc_covered(self) -> float: """Returns a single percentage value for coverage.""" if self.n_statements > 0: numerator, denominator = self.ratio_covered pc_cov = (100.0 * numerator) / denominator else: pc_cov = 100.0 return pc_cov @property def pc_covered_str(self) -> str: """Returns the percent covered, as a string, without a percent sign. Note that "0" is only returned when the value is truly zero, and "100" is only returned when the value is truly 100. Rounding can never result in either "0" or "100". """ return self.display_covered(self.pc_covered) def display_covered(self, pc: float) -> str: """Return a displayable total percentage, as a string. Note that "0" is only returned when the value is truly zero, and "100" is only returned when the value is truly 100. Rounding can never result in either "0" or "100". """ if 0 < pc < self._near0: pc = self._near0 elif self._near100 < pc < 100: pc = self._near100 else: pc = round(pc, self._precision) return "%.*f" % (self._precision, pc) def pc_str_width(self) -> int: """How many characters wide can pc_covered_str be?""" width = 3 # "100" if self._precision > 0: width += 1 + self._precision return width @property def ratio_covered(self) -> tuple[int, int]: """Return a numerator and denominator for the coverage ratio.""" numerator = self.n_executed + self.n_executed_branches denominator = self.n_statements + self.n_branches return numerator, denominator def __add__(self, other: Numbers) -> Numbers: nums = Numbers(precision=self._precision) nums.n_files = self.n_files + other.n_files nums.n_statements = self.n_statements + other.n_statements nums.n_excluded = self.n_excluded + other.n_excluded nums.n_missing = self.n_missing + other.n_missing nums.n_branches = self.n_branches + other.n_branches nums.n_partial_branches = ( self.n_partial_branches + other.n_partial_branches ) nums.n_missing_branches = ( self.n_missing_branches + other.n_missing_branches ) return nums def __radd__(self, other: int) -> Numbers: # Implementing 0+Numbers allows us to sum() a list of Numbers. assert other == 0 # we only ever call it this way. return self def _line_ranges( statements: Iterable[TLineNo], lines: Iterable[TLineNo], ) -> list[tuple[TLineNo, TLineNo]]: """Produce a list of ranges for `format_lines`.""" statements = sorted(statements) lines = sorted(lines) pairs = [] start = None lidx = 0 for stmt in statements: if lidx >= len(lines): break if stmt == lines[lidx]: lidx += 1 if not start: start = stmt end = stmt elif start: pairs.append((start, end)) start = None if start: pairs.append((start, end)) return pairs def format_lines( statements: Iterable[TLineNo], lines: Iterable[TLineNo], arcs: Iterable[tuple[TLineNo, list[TLineNo]]] | None = None, ) -> str: """Nicely format a list of line numbers. Format a list of line numbers for printing by coalescing groups of lines as long as the lines represent consecutive statements. This will coalesce even if there are gaps between statements. For example, if `statements` is [1,2,3,4,5,10,11,12,13,14] and `lines` is [1,2,5,10,11,13,14] then the result will be "1-2, 5-11, 13-14". Both `lines` and `statements` can be any iterable. All of the elements of `lines` must be in `statements`, and all of the values must be positive integers. If `arcs` is provided, they are (start,[end,end,end]) pairs that will be included in the output as long as start isn't in `lines`. """ line_items = [(pair[0], nice_pair(pair)) for pair in _line_ranges(statements, lines)] if arcs is not None: line_exits = sorted(arcs) for line, exits in line_exits: for ex in sorted(exits): if line not in lines and ex not in lines: dest = (ex if ex > 0 else "exit") line_items.append((line, f"{line}->{dest}")) ret = ", ".join(t[-1] for t in sorted(line_items)) return ret def should_fail_under(total: float, fail_under: float, precision: int) -> bool: """Determine if a total should fail due to fail-under. `total` is a float, the coverage measurement total. `fail_under` is the fail_under setting to compare with. `precision` is the number of digits to consider after the decimal point. Returns True if the total should fail. """ # We can never achieve higher than 100% coverage, or less than zero. if not (0 <= fail_under <= 100.0): msg = f"fail_under={fail_under} is invalid. Must be between 0 and 100." raise ConfigError(msg) # Special case for fail_under=100, it must really be 100. if fail_under == 100.0 and total != 100.0: return True return round(total, precision) < fail_under ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/coverage/sqldata.py0000644000175100001770000012516400000000000017736 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt """SQLite coverage data.""" from __future__ import annotations import collections import datetime import functools import glob import itertools import os import random import socket import sqlite3 import string import sys import textwrap import threading import zlib from typing import ( cast, Any, Collection, Mapping, Sequence, ) from coverage.debug import NoDebugging, auto_repr from coverage.exceptions import CoverageException, DataError from coverage.files import PathAliases from coverage.misc import file_be_gone, isolate_module from coverage.numbits import numbits_to_nums, numbits_union, nums_to_numbits from coverage.sqlitedb import SqliteDb from coverage.types import AnyCallable, FilePath, TArc, TDebugCtl, TLineNo, TWarnFn from coverage.version import __version__ os = isolate_module(os) # If you change the schema: increment the SCHEMA_VERSION and update the # docs in docs/dbschema.rst by running "make cogdoc". SCHEMA_VERSION = 7 # Schema versions: # 1: Released in 5.0a2 # 2: Added contexts in 5.0a3. # 3: Replaced line table with line_map table. # 4: Changed line_map.bitmap to line_map.numbits. # 5: Added foreign key declarations. # 6: Key-value in meta. # 7: line_map -> line_bits SCHEMA = """\ CREATE TABLE coverage_schema ( -- One row, to record the version of the schema in this db. version integer ); CREATE TABLE meta ( -- Key-value pairs, to record metadata about the data key text, value text, unique (key) -- Possible keys: -- 'has_arcs' boolean -- Is this data recording branches? -- 'sys_argv' text -- The coverage command line that recorded the data. -- 'version' text -- The version of coverage.py that made the file. -- 'when' text -- Datetime when the file was created. ); CREATE TABLE file ( -- A row per file measured. id integer primary key, path text, unique (path) ); CREATE TABLE context ( -- A row per context measured. id integer primary key, context text, unique (context) ); CREATE TABLE line_bits ( -- If recording lines, a row per context per file executed. -- All of the line numbers for that file/context are in one numbits. file_id integer, -- foreign key to `file`. context_id integer, -- foreign key to `context`. numbits blob, -- see the numbits functions in coverage.numbits foreign key (file_id) references file (id), foreign key (context_id) references context (id), unique (file_id, context_id) ); CREATE TABLE arc ( -- If recording branches, a row per context per from/to line transition executed. file_id integer, -- foreign key to `file`. context_id integer, -- foreign key to `context`. fromno integer, -- line number jumped from. tono integer, -- line number jumped to. foreign key (file_id) references file (id), foreign key (context_id) references context (id), unique (file_id, context_id, fromno, tono) ); CREATE TABLE tracer ( -- A row per file indicating the tracer used for that file. file_id integer primary key, tracer text, foreign key (file_id) references file (id) ); """ def _locked(method: AnyCallable) -> AnyCallable: """A decorator for methods that should hold self._lock.""" @functools.wraps(method) def _wrapped(self: CoverageData, *args: Any, **kwargs: Any) -> Any: if self._debug.should("lock"): self._debug.write(f"Locking {self._lock!r} for {method.__name__}") with self._lock: if self._debug.should("lock"): self._debug.write(f"Locked {self._lock!r} for {method.__name__}") return method(self, *args, **kwargs) return _wrapped class CoverageData: """Manages collected coverage data, including file storage. This class is the public supported API to the data that coverage.py collects during program execution. It includes information about what code was executed. It does not include information from the analysis phase, to determine what lines could have been executed, or what lines were not executed. .. note:: The data file is currently a SQLite database file, with a :ref:`documented schema `. The schema is subject to change though, so be careful about querying it directly. Use this API if you can to isolate yourself from changes. There are a number of kinds of data that can be collected: * **lines**: the line numbers of source lines that were executed. These are always available. * **arcs**: pairs of source and destination line numbers for transitions between source lines. These are only available if branch coverage was used. * **file tracer names**: the module names of the file tracer plugins that handled each file in the data. Lines, arcs, and file tracer names are stored for each source file. File names in this API are case-sensitive, even on platforms with case-insensitive file systems. A data file either stores lines, or arcs, but not both. A data file is associated with the data when the :class:`CoverageData` is created, using the parameters `basename`, `suffix`, and `no_disk`. The base name can be queried with :meth:`base_filename`, and the actual file name being used is available from :meth:`data_filename`. To read an existing coverage.py data file, use :meth:`read`. You can then access the line, arc, or file tracer data with :meth:`lines`, :meth:`arcs`, or :meth:`file_tracer`. The :meth:`has_arcs` method indicates whether arc data is available. You can get a set of the files in the data with :meth:`measured_files`. As with most Python containers, you can determine if there is any data at all by using this object as a boolean value. The contexts for each line in a file can be read with :meth:`contexts_by_lineno`. To limit querying to certain contexts, use :meth:`set_query_context` or :meth:`set_query_contexts`. These will narrow the focus of subsequent :meth:`lines`, :meth:`arcs`, and :meth:`contexts_by_lineno` calls. The set of all measured context names can be retrieved with :meth:`measured_contexts`. Most data files will be created by coverage.py itself, but you can use methods here to create data files if you like. The :meth:`add_lines`, :meth:`add_arcs`, and :meth:`add_file_tracers` methods add data, in ways that are convenient for coverage.py. To record data for contexts, use :meth:`set_context` to set a context to be used for subsequent :meth:`add_lines` and :meth:`add_arcs` calls. To add a source file without any measured data, use :meth:`touch_file`, or :meth:`touch_files` for a list of such files. Write the data to its file with :meth:`write`. You can clear the data in memory with :meth:`erase`. Data for specific files can be removed from the database with :meth:`purge_files`. Two data collections can be combined by using :meth:`update` on one :class:`CoverageData`, passing it the other. Data in a :class:`CoverageData` can be serialized and deserialized with :meth:`dumps` and :meth:`loads`. The methods used during the coverage.py collection phase (:meth:`add_lines`, :meth:`add_arcs`, :meth:`set_context`, and :meth:`add_file_tracers`) are thread-safe. Other methods may not be. """ def __init__( self, basename: FilePath | None = None, suffix: str | bool | None = None, no_disk: bool = False, warn: TWarnFn | None = None, debug: TDebugCtl | None = None, ) -> None: """Create a :class:`CoverageData` object to hold coverage-measured data. Arguments: basename (str): the base name of the data file, defaulting to ".coverage". This can be a path to a file in another directory. suffix (str or bool): has the same meaning as the `data_suffix` argument to :class:`coverage.Coverage`. no_disk (bool): if True, keep all data in memory, and don't write any disk file. warn: a warning callback function, accepting a warning message argument. debug: a `DebugControl` object (optional) """ self._no_disk = no_disk self._basename = os.path.abspath(basename or ".coverage") self._suffix = suffix self._warn = warn self._debug = debug or NoDebugging() self._choose_filename() # Maps filenames to row ids. self._file_map: dict[str, int] = {} # Maps thread ids to SqliteDb objects. self._dbs: dict[int, SqliteDb] = {} self._pid = os.getpid() # Synchronize the operations used during collection. self._lock = threading.RLock() # Are we in sync with the data file? self._have_used = False self._has_lines = False self._has_arcs = False self._current_context: str | None = None self._current_context_id: int | None = None self._query_context_ids: list[int] | None = None __repr__ = auto_repr def _choose_filename(self) -> None: """Set self._filename based on inited attributes.""" if self._no_disk: self._filename = ":memory:" else: self._filename = self._basename suffix = filename_suffix(self._suffix) if suffix: self._filename += "." + suffix def _reset(self) -> None: """Reset our attributes.""" if not self._no_disk: for db in self._dbs.values(): db.close() self._dbs = {} self._file_map = {} self._have_used = False self._current_context_id = None def _open_db(self) -> None: """Open an existing db file, and read its metadata.""" if self._debug.should("dataio"): self._debug.write(f"Opening data file {self._filename!r}") self._dbs[threading.get_ident()] = SqliteDb(self._filename, self._debug) self._read_db() def _read_db(self) -> None: """Read the metadata from a database so that we are ready to use it.""" with self._dbs[threading.get_ident()] as db: try: row = db.execute_one("select version from coverage_schema") assert row is not None except Exception as exc: if "no such table: coverage_schema" in str(exc): self._init_db(db) else: raise DataError( "Data file {!r} doesn't seem to be a coverage data file: {}".format( self._filename, exc, ), ) from exc else: schema_version = row[0] if schema_version != SCHEMA_VERSION: raise DataError( "Couldn't use data file {!r}: wrong schema: {} instead of {}".format( self._filename, schema_version, SCHEMA_VERSION, ), ) row = db.execute_one("select value from meta where key = 'has_arcs'") if row is not None: self._has_arcs = bool(int(row[0])) self._has_lines = not self._has_arcs with db.execute("select id, path from file") as cur: for file_id, path in cur: self._file_map[path] = file_id def _init_db(self, db: SqliteDb) -> None: """Write the initial contents of the database.""" if self._debug.should("dataio"): self._debug.write(f"Initing data file {self._filename!r}") db.executescript(SCHEMA) db.execute_void("insert into coverage_schema (version) values (?)", (SCHEMA_VERSION,)) # When writing metadata, avoid information that will needlessly change # the hash of the data file, unless we're debugging processes. meta_data = [ ("version", __version__), ] if self._debug.should("process"): meta_data.extend([ ("sys_argv", str(getattr(sys, "argv", None))), ("when", datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")), ]) db.executemany_void("insert or ignore into meta (key, value) values (?, ?)", meta_data) def _connect(self) -> SqliteDb: """Get the SqliteDb object to use.""" if threading.get_ident() not in self._dbs: self._open_db() return self._dbs[threading.get_ident()] def __bool__(self) -> bool: if (threading.get_ident() not in self._dbs and not os.path.exists(self._filename)): return False try: with self._connect() as con: with con.execute("select * from file limit 1") as cur: return bool(list(cur)) except CoverageException: return False def dumps(self) -> bytes: """Serialize the current data to a byte string. The format of the serialized data is not documented. It is only suitable for use with :meth:`loads` in the same version of coverage.py. Note that this serialization is not what gets stored in coverage data files. This method is meant to produce bytes that can be transmitted elsewhere and then deserialized with :meth:`loads`. Returns: A byte string of serialized data. .. versionadded:: 5.0 """ if self._debug.should("dataio"): self._debug.write(f"Dumping data from data file {self._filename!r}") with self._connect() as con: script = con.dump() return b"z" + zlib.compress(script.encode("utf-8")) def loads(self, data: bytes) -> None: """Deserialize data from :meth:`dumps`. Use with a newly-created empty :class:`CoverageData` object. It's undefined what happens if the object already has data in it. Note that this is not for reading data from a coverage data file. It is only for use on data you produced with :meth:`dumps`. Arguments: data: A byte string of serialized data produced by :meth:`dumps`. .. versionadded:: 5.0 """ if self._debug.should("dataio"): self._debug.write(f"Loading data into data file {self._filename!r}") if data[:1] != b"z": raise DataError( f"Unrecognized serialization: {data[:40]!r} (head of {len(data)} bytes)", ) script = zlib.decompress(data[1:]).decode("utf-8") self._dbs[threading.get_ident()] = db = SqliteDb(self._filename, self._debug) with db: db.executescript(script) self._read_db() self._have_used = True def _file_id(self, filename: str, add: bool = False) -> int | None: """Get the file id for `filename`. If filename is not in the database yet, add it if `add` is True. If `add` is not True, return None. """ if filename not in self._file_map: if add: with self._connect() as con: self._file_map[filename] = con.execute_for_rowid( "insert or replace into file (path) values (?)", (filename,), ) return self._file_map.get(filename) def _context_id(self, context: str) -> int | None: """Get the id for a context.""" assert context is not None self._start_using() with self._connect() as con: row = con.execute_one("select id from context where context = ?", (context,)) if row is not None: return cast(int, row[0]) else: return None @_locked def set_context(self, context: str | None) -> None: """Set the current context for future :meth:`add_lines` etc. `context` is a str, the name of the context to use for the next data additions. The context persists until the next :meth:`set_context`. .. versionadded:: 5.0 """ if self._debug.should("dataop"): self._debug.write(f"Setting coverage context: {context!r}") self._current_context = context self._current_context_id = None def _set_context_id(self) -> None: """Use the _current_context to set _current_context_id.""" context = self._current_context or "" context_id = self._context_id(context) if context_id is not None: self._current_context_id = context_id else: with self._connect() as con: self._current_context_id = con.execute_for_rowid( "insert into context (context) values (?)", (context,), ) def base_filename(self) -> str: """The base filename for storing data. .. versionadded:: 5.0 """ return self._basename def data_filename(self) -> str: """Where is the data stored? .. versionadded:: 5.0 """ return self._filename @_locked def add_lines(self, line_data: Mapping[str, Collection[TLineNo]]) -> None: """Add measured line data. `line_data` is a dictionary mapping file names to iterables of ints:: { filename: { line1, line2, ... }, ...} """ if self._debug.should("dataop"): self._debug.write("Adding lines: %d files, %d lines total" % ( len(line_data), sum(len(lines) for lines in line_data.values()), )) if self._debug.should("dataop2"): for filename, linenos in sorted(line_data.items()): self._debug.write(f" {filename}: {linenos}") self._start_using() self._choose_lines_or_arcs(lines=True) if not line_data: return with self._connect() as con: self._set_context_id() for filename, linenos in line_data.items(): linemap = nums_to_numbits(linenos) file_id = self._file_id(filename, add=True) query = "select numbits from line_bits where file_id = ? and context_id = ?" with con.execute(query, (file_id, self._current_context_id)) as cur: existing = list(cur) if existing: linemap = numbits_union(linemap, existing[0][0]) con.execute_void( "insert or replace into line_bits " + " (file_id, context_id, numbits) values (?, ?, ?)", (file_id, self._current_context_id, linemap), ) @_locked def add_arcs(self, arc_data: Mapping[str, Collection[TArc]]) -> None: """Add measured arc data. `arc_data` is a dictionary mapping file names to iterables of pairs of ints:: { filename: { (l1,l2), (l1,l2), ... }, ...} """ if self._debug.should("dataop"): self._debug.write("Adding arcs: %d files, %d arcs total" % ( len(arc_data), sum(len(arcs) for arcs in arc_data.values()), )) if self._debug.should("dataop2"): for filename, arcs in sorted(arc_data.items()): self._debug.write(f" {filename}: {arcs}") self._start_using() self._choose_lines_or_arcs(arcs=True) if not arc_data: return with self._connect() as con: self._set_context_id() for filename, arcs in arc_data.items(): if not arcs: continue file_id = self._file_id(filename, add=True) data = [(file_id, self._current_context_id, fromno, tono) for fromno, tono in arcs] con.executemany_void( "insert or ignore into arc " + "(file_id, context_id, fromno, tono) values (?, ?, ?, ?)", data, ) def _choose_lines_or_arcs(self, lines: bool = False, arcs: bool = False) -> None: """Force the data file to choose between lines and arcs.""" assert lines or arcs assert not (lines and arcs) if lines and self._has_arcs: if self._debug.should("dataop"): self._debug.write("Error: Can't add line measurements to existing branch data") raise DataError("Can't add line measurements to existing branch data") if arcs and self._has_lines: if self._debug.should("dataop"): self._debug.write("Error: Can't add branch measurements to existing line data") raise DataError("Can't add branch measurements to existing line data") if not self._has_arcs and not self._has_lines: self._has_lines = lines self._has_arcs = arcs with self._connect() as con: con.execute_void( "insert or ignore into meta (key, value) values (?, ?)", ("has_arcs", str(int(arcs))), ) @_locked def add_file_tracers(self, file_tracers: Mapping[str, str]) -> None: """Add per-file plugin information. `file_tracers` is { filename: plugin_name, ... } """ if self._debug.should("dataop"): self._debug.write("Adding file tracers: %d files" % (len(file_tracers),)) if not file_tracers: return self._start_using() with self._connect() as con: for filename, plugin_name in file_tracers.items(): file_id = self._file_id(filename, add=True) existing_plugin = self.file_tracer(filename) if existing_plugin: if existing_plugin != plugin_name: raise DataError( "Conflicting file tracer name for '{}': {!r} vs {!r}".format( filename, existing_plugin, plugin_name, ), ) elif plugin_name: con.execute_void( "insert into tracer (file_id, tracer) values (?, ?)", (file_id, plugin_name), ) def touch_file(self, filename: str, plugin_name: str = "") -> None: """Ensure that `filename` appears in the data, empty if needed. `plugin_name` is the name of the plugin responsible for this file. It is used to associate the right filereporter, etc. """ self.touch_files([filename], plugin_name) def touch_files(self, filenames: Collection[str], plugin_name: str | None = None) -> None: """Ensure that `filenames` appear in the data, empty if needed. `plugin_name` is the name of the plugin responsible for these files. It is used to associate the right filereporter, etc. """ if self._debug.should("dataop"): self._debug.write(f"Touching {filenames!r}") self._start_using() with self._connect(): # Use this to get one transaction. if not self._has_arcs and not self._has_lines: raise DataError("Can't touch files in an empty CoverageData") for filename in filenames: self._file_id(filename, add=True) if plugin_name: # Set the tracer for this file self.add_file_tracers({filename: plugin_name}) def purge_files(self, filenames: Collection[str]) -> None: """Purge any existing coverage data for the given `filenames`. .. versionadded:: 7.2 """ if self._debug.should("dataop"): self._debug.write(f"Purging data for {filenames!r}") self._start_using() with self._connect() as con: if self._has_lines: sql = "delete from line_bits where file_id=?" elif self._has_arcs: sql = "delete from arc where file_id=?" else: raise DataError("Can't purge files in an empty CoverageData") for filename in filenames: file_id = self._file_id(filename, add=False) if file_id is None: continue con.execute_void(sql, (file_id,)) def update(self, other_data: CoverageData, aliases: PathAliases | None = None) -> None: """Update this data with data from several other :class:`CoverageData` instances. If `aliases` is provided, it's a `PathAliases` object that is used to re-map paths to match the local machine's. Note: `aliases` is None only when called directly from the test suite. """ if self._debug.should("dataop"): self._debug.write("Updating with data from {!r}".format( getattr(other_data, "_filename", "???"), )) if self._has_lines and other_data._has_arcs: raise DataError("Can't combine arc data with line data") if self._has_arcs and other_data._has_lines: raise DataError("Can't combine line data with arc data") aliases = aliases or PathAliases() # Force the database we're writing to to exist before we start nesting contexts. self._start_using() # Collector for all arcs, lines and tracers other_data.read() with other_data._connect() as con: # Get files data. with con.execute("select path from file") as cur: files = {path: aliases.map(path) for (path,) in cur} # Get contexts data. with con.execute("select context from context") as cur: contexts = [context for (context,) in cur] # Get arc data. with con.execute( "select file.path, context.context, arc.fromno, arc.tono " + "from arc " + "inner join file on file.id = arc.file_id " + "inner join context on context.id = arc.context_id", ) as cur: arcs = [ (files[path], context, fromno, tono) for (path, context, fromno, tono) in cur ] # Get line data. with con.execute( "select file.path, context.context, line_bits.numbits " + "from line_bits " + "inner join file on file.id = line_bits.file_id " + "inner join context on context.id = line_bits.context_id", ) as cur: lines: dict[tuple[str, str], bytes] = {} for path, context, numbits in cur: key = (files[path], context) if key in lines: numbits = numbits_union(lines[key], numbits) lines[key] = numbits # Get tracer data. with con.execute( "select file.path, tracer " + "from tracer " + "inner join file on file.id = tracer.file_id", ) as cur: tracers = {files[path]: tracer for (path, tracer) in cur} with self._connect() as con: assert con.con is not None con.con.isolation_level = "IMMEDIATE" # Get all tracers in the DB. Files not in the tracers are assumed # to have an empty string tracer. Since Sqlite does not support # full outer joins, we have to make two queries to fill the # dictionary. with con.execute("select path from file") as cur: this_tracers = {path: "" for path, in cur} with con.execute( "select file.path, tracer from tracer " + "inner join file on file.id = tracer.file_id", ) as cur: this_tracers.update({ aliases.map(path): tracer for path, tracer in cur }) # Create all file and context rows in the DB. con.executemany_void( "insert or ignore into file (path) values (?)", ((file,) for file in files.values()), ) with con.execute("select id, path from file") as cur: file_ids = {path: id for id, path in cur} self._file_map.update(file_ids) con.executemany_void( "insert or ignore into context (context) values (?)", ((context,) for context in contexts), ) with con.execute("select id, context from context") as cur: context_ids = {context: id for id, context in cur} # Prepare tracers and fail, if a conflict is found. # tracer_paths is used to ensure consistency over the tracer data # and tracer_map tracks the tracers to be inserted. tracer_map = {} for path in files.values(): this_tracer = this_tracers.get(path) other_tracer = tracers.get(path, "") # If there is no tracer, there is always the None tracer. if this_tracer is not None and this_tracer != other_tracer: raise DataError( "Conflicting file tracer name for '{}': {!r} vs {!r}".format( path, this_tracer, other_tracer, ), ) tracer_map[path] = other_tracer # Prepare arc and line rows to be inserted by converting the file # and context strings with integer ids. Then use the efficient # `executemany()` to insert all rows at once. arc_rows = ( (file_ids[file], context_ids[context], fromno, tono) for file, context, fromno, tono in arcs ) # Get line data. with con.execute( "select file.path, context.context, line_bits.numbits " + "from line_bits " + "inner join file on file.id = line_bits.file_id " + "inner join context on context.id = line_bits.context_id", ) as cur: for path, context, numbits in cur: key = (aliases.map(path), context) if key in lines: numbits = numbits_union(lines[key], numbits) lines[key] = numbits if arcs: self._choose_lines_or_arcs(arcs=True) # Write the combined data. con.executemany_void( "insert or ignore into arc " + "(file_id, context_id, fromno, tono) values (?, ?, ?, ?)", arc_rows, ) if lines: self._choose_lines_or_arcs(lines=True) con.execute_void("delete from line_bits") con.executemany_void( "insert into line_bits " + "(file_id, context_id, numbits) values (?, ?, ?)", [ (file_ids[file], context_ids[context], numbits) for (file, context), numbits in lines.items() ], ) con.executemany_void( "insert or ignore into tracer (file_id, tracer) values (?, ?)", ((file_ids[filename], tracer) for filename, tracer in tracer_map.items()), ) if not self._no_disk: # Update all internal cache data. self._reset() self.read() def erase(self, parallel: bool = False) -> None: """Erase the data in this object. If `parallel` is true, then also deletes data files created from the basename by parallel-mode. """ self._reset() if self._no_disk: return if self._debug.should("dataio"): self._debug.write(f"Erasing data file {self._filename!r}") file_be_gone(self._filename) if parallel: data_dir, local = os.path.split(self._filename) local_abs_path = os.path.join(os.path.abspath(data_dir), local) pattern = glob.escape(local_abs_path) + ".*" for filename in glob.glob(pattern): if self._debug.should("dataio"): self._debug.write(f"Erasing parallel data file {filename!r}") file_be_gone(filename) def read(self) -> None: """Start using an existing data file.""" if os.path.exists(self._filename): with self._connect(): self._have_used = True def write(self) -> None: """Ensure the data is written to the data file.""" pass def _start_using(self) -> None: """Call this before using the database at all.""" if self._pid != os.getpid(): # Looks like we forked! Have to start a new data file. self._reset() self._choose_filename() self._pid = os.getpid() if not self._have_used: self.erase() self._have_used = True def has_arcs(self) -> bool: """Does the database have arcs (True) or lines (False).""" return bool(self._has_arcs) def measured_files(self) -> set[str]: """A set of all files that have been measured. Note that a file may be mentioned as measured even though no lines or arcs for that file are present in the data. """ return set(self._file_map) def measured_contexts(self) -> set[str]: """A set of all contexts that have been measured. .. versionadded:: 5.0 """ self._start_using() with self._connect() as con: with con.execute("select distinct(context) from context") as cur: contexts = {row[0] for row in cur} return contexts def file_tracer(self, filename: str) -> str | None: """Get the plugin name of the file tracer for a file. Returns the name of the plugin that handles this file. If the file was measured, but didn't use a plugin, then "" is returned. If the file was not measured, then None is returned. """ self._start_using() with self._connect() as con: file_id = self._file_id(filename) if file_id is None: return None row = con.execute_one("select tracer from tracer where file_id = ?", (file_id,)) if row is not None: return row[0] or "" return "" # File was measured, but no tracer associated. def set_query_context(self, context: str) -> None: """Set a context for subsequent querying. The next :meth:`lines`, :meth:`arcs`, or :meth:`contexts_by_lineno` calls will be limited to only one context. `context` is a string which must match a context exactly. If it does not, no exception is raised, but queries will return no data. .. versionadded:: 5.0 """ self._start_using() with self._connect() as con: with con.execute("select id from context where context = ?", (context,)) as cur: self._query_context_ids = [row[0] for row in cur.fetchall()] def set_query_contexts(self, contexts: Sequence[str] | None) -> None: """Set a number of contexts for subsequent querying. The next :meth:`lines`, :meth:`arcs`, or :meth:`contexts_by_lineno` calls will be limited to the specified contexts. `contexts` is a list of Python regular expressions. Contexts will be matched using :func:`re.search `. Data will be included in query results if they are part of any of the contexts matched. .. versionadded:: 5.0 """ self._start_using() if contexts: with self._connect() as con: context_clause = " or ".join(["context regexp ?"] * len(contexts)) with con.execute("select id from context where " + context_clause, contexts) as cur: self._query_context_ids = [row[0] for row in cur.fetchall()] else: self._query_context_ids = None def lines(self, filename: str) -> list[TLineNo] | None: """Get the list of lines executed for a source file. If the file was not measured, returns None. A file might be measured, and have no lines executed, in which case an empty list is returned. If the file was executed, returns a list of integers, the line numbers executed in the file. The list is in no particular order. """ self._start_using() if self.has_arcs(): arcs = self.arcs(filename) if arcs is not None: all_lines = itertools.chain.from_iterable(arcs) return list({l for l in all_lines if l > 0}) with self._connect() as con: file_id = self._file_id(filename) if file_id is None: return None else: query = "select numbits from line_bits where file_id = ?" data = [file_id] if self._query_context_ids is not None: ids_array = ", ".join("?" * len(self._query_context_ids)) query += " and context_id in (" + ids_array + ")" data += self._query_context_ids with con.execute(query, data) as cur: bitmaps = list(cur) nums = set() for row in bitmaps: nums.update(numbits_to_nums(row[0])) return list(nums) def arcs(self, filename: str) -> list[TArc] | None: """Get the list of arcs executed for a file. If the file was not measured, returns None. A file might be measured, and have no arcs executed, in which case an empty list is returned. If the file was executed, returns a list of 2-tuples of integers. Each pair is a starting line number and an ending line number for a transition from one line to another. The list is in no particular order. Negative numbers have special meaning. If the starting line number is -N, it represents an entry to the code object that starts at line N. If the ending ling number is -N, it's an exit from the code object that starts at line N. """ self._start_using() with self._connect() as con: file_id = self._file_id(filename) if file_id is None: return None else: query = "select distinct fromno, tono from arc where file_id = ?" data = [file_id] if self._query_context_ids is not None: ids_array = ", ".join("?" * len(self._query_context_ids)) query += " and context_id in (" + ids_array + ")" data += self._query_context_ids with con.execute(query, data) as cur: return list(cur) def contexts_by_lineno(self, filename: str) -> dict[TLineNo, list[str]]: """Get the contexts for each line in a file. Returns: A dict mapping line numbers to a list of context names. .. versionadded:: 5.0 """ self._start_using() with self._connect() as con: file_id = self._file_id(filename) if file_id is None: return {} lineno_contexts_map = collections.defaultdict(set) if self.has_arcs(): query = ( "select arc.fromno, arc.tono, context.context " + "from arc, context " + "where arc.file_id = ? and arc.context_id = context.id" ) data = [file_id] if self._query_context_ids is not None: ids_array = ", ".join("?" * len(self._query_context_ids)) query += " and arc.context_id in (" + ids_array + ")" data += self._query_context_ids with con.execute(query, data) as cur: for fromno, tono, context in cur: if fromno > 0: lineno_contexts_map[fromno].add(context) if tono > 0: lineno_contexts_map[tono].add(context) else: query = ( "select l.numbits, c.context from line_bits l, context c " + "where l.context_id = c.id " + "and file_id = ?" ) data = [file_id] if self._query_context_ids is not None: ids_array = ", ".join("?" * len(self._query_context_ids)) query += " and l.context_id in (" + ids_array + ")" data += self._query_context_ids with con.execute(query, data) as cur: for numbits, context in cur: for lineno in numbits_to_nums(numbits): lineno_contexts_map[lineno].add(context) return {lineno: list(contexts) for lineno, contexts in lineno_contexts_map.items()} @classmethod def sys_info(cls) -> list[tuple[str, Any]]: """Our information for `Coverage.sys_info`. Returns a list of (key, value) pairs. """ with SqliteDb(":memory:", debug=NoDebugging()) as db: with db.execute("pragma temp_store") as cur: temp_store = [row[0] for row in cur] with db.execute("pragma compile_options") as cur: copts = [row[0] for row in cur] copts = textwrap.wrap(", ".join(copts), width=75) return [ ("sqlite3_sqlite_version", sqlite3.sqlite_version), ("sqlite3_temp_store", temp_store), ("sqlite3_compile_options", copts), ] def filename_suffix(suffix: str | bool | None) -> str | None: """Compute a filename suffix for a data file. If `suffix` is a string or None, simply return it. If `suffix` is True, then build a suffix incorporating the hostname, process id, and a random number. Returns a string or None. """ if suffix is True: # If data_suffix was a simple true value, then make a suffix with # plenty of distinguishing information. We do this here in # `save()` at the last minute so that the pid will be correct even # if the process forks. die = random.Random(os.urandom(8)) letters = string.ascii_uppercase + string.ascii_lowercase rolls = "".join(die.choice(letters) for _ in range(6)) suffix = f"{socket.gethostname()}.{os.getpid()}.X{rolls}x" elif suffix is False: suffix = None return suffix ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/coverage/sqlitedb.py0000644000175100001770000002272000000000000020106 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt """SQLite abstraction for coverage.py""" from __future__ import annotations import contextlib import re import sqlite3 from typing import cast, Any, Iterable, Iterator, Tuple from coverage.debug import auto_repr, clipped_repr, exc_one_line from coverage.exceptions import DataError from coverage.types import TDebugCtl class SqliteDb: """A simple abstraction over a SQLite database. Use as a context manager, then you can use it like a :class:`python:sqlite3.Connection` object:: with SqliteDb(filename, debug_control) as db: with db.execute("select a, b from some_table") as cur: for a, b in cur: etc(a, b) """ def __init__(self, filename: str, debug: TDebugCtl) -> None: self.debug = debug self.filename = filename self.nest = 0 self.con: sqlite3.Connection | None = None __repr__ = auto_repr def _connect(self) -> None: """Connect to the db and do universal initialization.""" if self.con is not None: return # It can happen that Python switches threads while the tracer writes # data. The second thread will also try to write to the data, # effectively causing a nested context. However, given the idempotent # nature of the tracer operations, sharing a connection among threads # is not a problem. if self.debug.should("sql"): self.debug.write(f"Connecting to {self.filename!r}") try: self.con = sqlite3.connect(self.filename, check_same_thread=False) except sqlite3.Error as exc: raise DataError(f"Couldn't use data file {self.filename!r}: {exc}") from exc if self.debug.should("sql"): self.debug.write(f"Connected to {self.filename!r} as {self.con!r}") self.con.create_function("REGEXP", 2, lambda txt, pat: re.search(txt, pat) is not None) # Turning off journal_mode can speed up writing. It can't always be # disabled, so we have to be prepared for *-journal files elsewhere. # In Python 3.12+, we can change the config to allow journal_mode=off. if hasattr(sqlite3, "SQLITE_DBCONFIG_DEFENSIVE"): # Turn off defensive mode, so that journal_mode=off can succeed. self.con.setconfig( # type: ignore[attr-defined, unused-ignore] sqlite3.SQLITE_DBCONFIG_DEFENSIVE, False, ) # This pragma makes writing faster. It disables rollbacks, but we never need them. self.execute_void("pragma journal_mode=off") # This pragma makes writing faster. It can fail in unusual situations # (https://github.com/nedbat/coveragepy/issues/1646), so use fail_ok=True # to keep things going. self.execute_void("pragma synchronous=off", fail_ok=True) def close(self) -> None: """If needed, close the connection.""" if self.con is not None and self.filename != ":memory:": if self.debug.should("sql"): self.debug.write(f"Closing {self.con!r} on {self.filename!r}") self.con.close() self.con = None def __enter__(self) -> SqliteDb: if self.nest == 0: self._connect() assert self.con is not None self.con.__enter__() self.nest += 1 return self def __exit__(self, exc_type, exc_value, traceback) -> None: # type: ignore[no-untyped-def] self.nest -= 1 if self.nest == 0: try: assert self.con is not None self.con.__exit__(exc_type, exc_value, traceback) self.close() except Exception as exc: if self.debug.should("sql"): self.debug.write(f"EXCEPTION from __exit__: {exc_one_line(exc)}") raise DataError(f"Couldn't end data file {self.filename!r}: {exc}") from exc def _execute(self, sql: str, parameters: Iterable[Any]) -> sqlite3.Cursor: """Same as :meth:`python:sqlite3.Connection.execute`.""" if self.debug.should("sql"): tail = f" with {parameters!r}" if parameters else "" self.debug.write(f"Executing {sql!r}{tail}") try: assert self.con is not None try: return self.con.execute(sql, parameters) # type: ignore[arg-type] except Exception: # In some cases, an error might happen that isn't really an # error. Try again immediately. # https://github.com/nedbat/coveragepy/issues/1010 return self.con.execute(sql, parameters) # type: ignore[arg-type] except sqlite3.Error as exc: msg = str(exc) if self.filename != ":memory:": try: # `execute` is the first thing we do with the database, so try # hard to provide useful hints if something goes wrong now. with open(self.filename, "rb") as bad_file: cov4_sig = b"!coverage.py: This is a private format" if bad_file.read(len(cov4_sig)) == cov4_sig: msg = ( "Looks like a coverage 4.x data file. " + "Are you mixing versions of coverage?" ) except Exception: pass if self.debug.should("sql"): self.debug.write(f"EXCEPTION from execute: {exc_one_line(exc)}") raise DataError(f"Couldn't use data file {self.filename!r}: {msg}") from exc @contextlib.contextmanager def execute( self, sql: str, parameters: Iterable[Any] = (), ) -> Iterator[sqlite3.Cursor]: """Context managed :meth:`python:sqlite3.Connection.execute`. Use with a ``with`` statement to auto-close the returned cursor. """ cur = self._execute(sql, parameters) try: yield cur finally: cur.close() def execute_void(self, sql: str, parameters: Iterable[Any] = (), fail_ok: bool = False) -> None: """Same as :meth:`python:sqlite3.Connection.execute` when you don't need the cursor. If `fail_ok` is True, then SQLite errors are ignored. """ try: # PyPy needs the .close() calls here, or sqlite gets twisted up: # https://bitbucket.org/pypy/pypy/issues/2872/default-isolation-mode-is-different-on self._execute(sql, parameters).close() except DataError: if not fail_ok: raise def execute_for_rowid(self, sql: str, parameters: Iterable[Any] = ()) -> int: """Like execute, but returns the lastrowid.""" with self.execute(sql, parameters) as cur: assert cur.lastrowid is not None rowid: int = cur.lastrowid if self.debug.should("sqldata"): self.debug.write(f"Row id result: {rowid!r}") return rowid def execute_one(self, sql: str, parameters: Iterable[Any] = ()) -> tuple[Any, ...] | None: """Execute a statement and return the one row that results. This is like execute(sql, parameters).fetchone(), except it is correct in reading the entire result set. This will raise an exception if more than one row results. Returns a row, or None if there were no rows. """ with self.execute(sql, parameters) as cur: rows = list(cur) if len(rows) == 0: return None elif len(rows) == 1: return cast(Tuple[Any, ...], rows[0]) else: raise AssertionError(f"SQL {sql!r} shouldn't return {len(rows)} rows") def _executemany(self, sql: str, data: list[Any]) -> sqlite3.Cursor: """Same as :meth:`python:sqlite3.Connection.executemany`.""" if self.debug.should("sql"): final = ":" if self.debug.should("sqldata") else "" self.debug.write(f"Executing many {sql!r} with {len(data)} rows{final}") if self.debug.should("sqldata"): for i, row in enumerate(data): self.debug.write(f"{i:4d}: {row!r}") assert self.con is not None try: return self.con.executemany(sql, data) except Exception: # In some cases, an error might happen that isn't really an # error. Try again immediately. # https://github.com/nedbat/coveragepy/issues/1010 return self.con.executemany(sql, data) def executemany_void(self, sql: str, data: Iterable[Any]) -> None: """Same as :meth:`python:sqlite3.Connection.executemany` when you don't need the cursor.""" data = list(data) if data: self._executemany(sql, data).close() def executescript(self, script: str) -> None: """Same as :meth:`python:sqlite3.Connection.executescript`.""" if self.debug.should("sql"): self.debug.write("Executing script with {} chars: {}".format( len(script), clipped_repr(script, 100), )) assert self.con is not None self.con.executescript(script).close() def dump(self) -> str: """Return a multi-line string, the SQL dump of the database.""" assert self.con is not None return "\n".join(self.con.iterdump()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/coverage/sysmon.py0000644000175100001770000003611400000000000017631 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt """Callback functions and support for sys.monitoring data collection.""" from __future__ import annotations import functools import inspect import os import os.path import sys import threading import traceback from dataclasses import dataclass from types import CodeType, FrameType from typing import ( Any, Callable, Set, TYPE_CHECKING, cast, ) from coverage.debug import short_filename, short_stack from coverage.types import ( AnyCallable, TArc, TFileDisposition, TLineNo, TTraceData, TTraceFileData, TracerCore, TWarnFn, ) # pylint: disable=unused-argument LOG = False # This module will be imported in all versions of Python, but only used in 3.12+ # It will be type-checked for 3.12, but not for earlier versions. sys_monitoring = getattr(sys, "monitoring", None) if TYPE_CHECKING: assert sys_monitoring is not None # I want to say this but it's not allowed: # MonitorReturn = Literal[sys.monitoring.DISABLE] | None MonitorReturn = Any if LOG: # pragma: debugging class LoggingWrapper: """Wrap a namespace to log all its functions.""" def __init__(self, wrapped: Any, namespace: str) -> None: self.wrapped = wrapped self.namespace = namespace def __getattr__(self, name: str) -> Callable[..., Any]: def _wrapped(*args: Any, **kwargs: Any) -> Any: log(f"{self.namespace}.{name}{args}{kwargs}") return getattr(self.wrapped, name)(*args, **kwargs) return _wrapped sys_monitoring = LoggingWrapper(sys_monitoring, "sys.monitoring") assert sys_monitoring is not None short_stack = functools.partial( short_stack, full=True, short_filenames=True, frame_ids=True, ) seen_threads: set[int] = set() def log(msg: str) -> None: """Write a message to our detailed debugging log(s).""" # Thread ids are reused across processes? # Make a shorter number more likely to be unique. pid = os.getpid() tid = cast(int, threading.current_thread().ident) tslug = f"{(pid * tid) % 9_999_991:07d}" if tid not in seen_threads: seen_threads.add(tid) log(f"New thread {tid} {tslug}:\n{short_stack()}") # log_seq = int(os.getenv("PANSEQ", "0")) # root = f"/tmp/pan.{log_seq:03d}" for filename in [ "/tmp/foo.out", # f"{root}.out", # f"{root}-{pid}.out", # f"{root}-{pid}-{tslug}.out", ]: with open(filename, "a") as f: print(f"{pid}:{tslug}: {msg}", file=f, flush=True) def arg_repr(arg: Any) -> str: """Make a customized repr for logged values.""" if isinstance(arg, CodeType): return ( f"" ) return repr(arg) def panopticon(*names: str | None) -> AnyCallable: """Decorate a function to log its calls.""" def _decorator(method: AnyCallable) -> AnyCallable: @functools.wraps(method) def _wrapped(self: Any, *args: Any) -> Any: try: # log(f"{method.__name__}() stack:\n{short_stack()}") args_reprs = [] for name, arg in zip(names, args): if name is None: continue args_reprs.append(f"{name}={arg_repr(arg)}") log(f"{id(self):#x}:{method.__name__}({', '.join(args_reprs)})") ret = method(self, *args) # log(f" end {id(self):#x}:{method.__name__}({', '.join(args_reprs)})") return ret except Exception as exc: log(f"!!{exc.__class__.__name__}: {exc}") log("".join(traceback.format_exception(exc))) # pylint: disable=[no-value-for-parameter] try: assert sys_monitoring is not None sys_monitoring.set_events(sys.monitoring.COVERAGE_ID, 0) except ValueError: # We might have already shut off monitoring. log("oops, shutting off events with disabled tool id") raise return _wrapped return _decorator else: def log(msg: str) -> None: """Write a message to our detailed debugging log(s), but not really.""" def panopticon(*names: str | None) -> AnyCallable: """Decorate a function to log its calls, but not really.""" def _decorator(meth: AnyCallable) -> AnyCallable: return meth return _decorator @dataclass class CodeInfo: """The information we want about each code object.""" tracing: bool file_data: TTraceFileData | None # TODO: what is byte_to_line for? byte_to_line: dict[int, int] | None def bytes_to_lines(code: CodeType) -> dict[int, int]: """Make a dict mapping byte code offsets to line numbers.""" b2l = {} for bstart, bend, lineno in code.co_lines(): if lineno is not None: for boffset in range(bstart, bend, 2): b2l[boffset] = lineno return b2l class SysMonitor(TracerCore): """Python implementation of the raw data tracer for PEP669 implementations.""" # One of these will be used across threads. Be careful. def __init__(self, tool_id: int) -> None: # Attributes set from the collector: self.data: TTraceData self.trace_arcs = False self.should_trace: Callable[[str, FrameType], TFileDisposition] self.should_trace_cache: dict[str, TFileDisposition | None] # TODO: should_start_context and switch_context are unused! # Change tests/testenv.py:DYN_CONTEXTS when this is updated. self.should_start_context: Callable[[FrameType], str | None] | None = None self.switch_context: Callable[[str | None], None] | None = None # TODO: warn is unused. self.warn: TWarnFn self.myid = tool_id # Map id(code_object) -> CodeInfo self.code_infos: dict[int, CodeInfo] = {} # A list of code_objects, just to keep them alive so that id's are # useful as identity. self.code_objects: list[CodeType] = [] self.last_lines: dict[FrameType, int] = {} # Map id(code_object) -> code_object self.local_event_codes: dict[int, CodeType] = {} self.sysmon_on = False self.stats = { "starts": 0, } self.stopped = False self._activity = False def __repr__(self) -> str: points = sum(len(v) for v in self.data.values()) files = len(self.data) return f"" @panopticon() def start(self) -> None: """Start this Tracer.""" self.stopped = False assert sys_monitoring is not None sys_monitoring.use_tool_id(self.myid, "coverage.py") register = functools.partial(sys_monitoring.register_callback, self.myid) events = sys_monitoring.events if self.trace_arcs: sys_monitoring.set_events( self.myid, events.PY_START | events.PY_UNWIND, ) register(events.PY_START, self.sysmon_py_start) register(events.PY_RESUME, self.sysmon_py_resume_arcs) register(events.PY_RETURN, self.sysmon_py_return_arcs) register(events.PY_UNWIND, self.sysmon_py_unwind_arcs) register(events.LINE, self.sysmon_line_arcs) else: sys_monitoring.set_events(self.myid, events.PY_START) register(events.PY_START, self.sysmon_py_start) register(events.LINE, self.sysmon_line_lines) sys_monitoring.restart_events() self.sysmon_on = True @panopticon() def stop(self) -> None: """Stop this Tracer.""" if not self.sysmon_on: # In forking situations, we might try to stop when we are not # started. Do nothing in that case. return assert sys_monitoring is not None sys_monitoring.set_events(self.myid, 0) self.sysmon_on = False for code in self.local_event_codes.values(): sys_monitoring.set_local_events(self.myid, code, 0) self.local_event_codes = {} sys_monitoring.free_tool_id(self.myid) @panopticon() def post_fork(self) -> None: """The process has forked, clean up as needed.""" self.stop() def activity(self) -> bool: """Has there been any activity?""" return self._activity def reset_activity(self) -> None: """Reset the activity() flag.""" self._activity = False def get_stats(self) -> dict[str, int] | None: """Return a dictionary of statistics, or None.""" return None # The number of frames in callers_frame takes @panopticon into account. if LOG: def callers_frame(self) -> FrameType: """Get the frame of the Python code we're monitoring.""" return ( inspect.currentframe().f_back.f_back.f_back # type: ignore[union-attr,return-value] ) else: def callers_frame(self) -> FrameType: """Get the frame of the Python code we're monitoring.""" return inspect.currentframe().f_back.f_back # type: ignore[union-attr,return-value] @panopticon("code", "@") def sysmon_py_start(self, code: CodeType, instruction_offset: int) -> MonitorReturn: """Handle sys.monitoring.events.PY_START events.""" # Entering a new frame. Decide if we should trace in this file. self._activity = True self.stats["starts"] += 1 code_info = self.code_infos.get(id(code)) tracing_code: bool | None = None file_data: TTraceFileData | None = None if code_info is not None: tracing_code = code_info.tracing file_data = code_info.file_data if tracing_code is None: filename = code.co_filename disp = self.should_trace_cache.get(filename) if disp is None: frame = inspect.currentframe().f_back # type: ignore[union-attr] if LOG: # @panopticon adds a frame. frame = frame.f_back # type: ignore[union-attr] disp = self.should_trace(filename, frame) # type: ignore[arg-type] self.should_trace_cache[filename] = disp tracing_code = disp.trace if tracing_code: tracename = disp.source_filename assert tracename is not None if tracename not in self.data: self.data[tracename] = set() file_data = self.data[tracename] b2l = bytes_to_lines(code) else: file_data = None b2l = None self.code_infos[id(code)] = CodeInfo( tracing=tracing_code, file_data=file_data, byte_to_line=b2l, ) self.code_objects.append(code) if tracing_code: events = sys.monitoring.events if self.sysmon_on: assert sys_monitoring is not None sys_monitoring.set_local_events( self.myid, code, events.PY_RETURN # | events.PY_RESUME # | events.PY_YIELD | events.LINE, # | events.BRANCH # | events.JUMP ) self.local_event_codes[id(code)] = code if tracing_code and self.trace_arcs: frame = self.callers_frame() self.last_lines[frame] = -code.co_firstlineno return None else: return sys.monitoring.DISABLE @panopticon("code", "@") def sysmon_py_resume_arcs( self, code: CodeType, instruction_offset: int, ) -> MonitorReturn: """Handle sys.monitoring.events.PY_RESUME events for branch coverage.""" frame = self.callers_frame() self.last_lines[frame] = frame.f_lineno @panopticon("code", "@", None) def sysmon_py_return_arcs( self, code: CodeType, instruction_offset: int, retval: object, ) -> MonitorReturn: """Handle sys.monitoring.events.PY_RETURN events for branch coverage.""" frame = self.callers_frame() code_info = self.code_infos.get(id(code)) if code_info is not None and code_info.file_data is not None: last_line = self.last_lines.get(frame) if last_line is not None: arc = (last_line, -code.co_firstlineno) # log(f"adding {arc=}") cast(Set[TArc], code_info.file_data).add(arc) # Leaving this function, no need for the frame any more. self.last_lines.pop(frame, None) @panopticon("code", "@", "exc") def sysmon_py_unwind_arcs( self, code: CodeType, instruction_offset: int, exception: BaseException, ) -> MonitorReturn: """Handle sys.monitoring.events.PY_UNWIND events for branch coverage.""" frame = self.callers_frame() # Leaving this function. last_line = self.last_lines.pop(frame, None) if isinstance(exception, GeneratorExit): # We don't want to count generator exits as arcs. return code_info = self.code_infos.get(id(code)) if code_info is not None and code_info.file_data is not None: if last_line is not None: arc = (last_line, -code.co_firstlineno) # log(f"adding {arc=}") cast(Set[TArc], code_info.file_data).add(arc) @panopticon("code", "line") def sysmon_line_lines(self, code: CodeType, line_number: int) -> MonitorReturn: """Handle sys.monitoring.events.LINE events for line coverage.""" code_info = self.code_infos[id(code)] if code_info.file_data is not None: cast(Set[TLineNo], code_info.file_data).add(line_number) # log(f"adding {line_number=}") return sys.monitoring.DISABLE @panopticon("code", "line") def sysmon_line_arcs(self, code: CodeType, line_number: int) -> MonitorReturn: """Handle sys.monitoring.events.LINE events for branch coverage.""" code_info = self.code_infos[id(code)] ret = None if code_info.file_data is not None: frame = self.callers_frame() last_line = self.last_lines.get(frame) if last_line is not None: arc = (last_line, line_number) cast(Set[TArc], code_info.file_data).add(arc) # log(f"adding {arc=}") self.last_lines[frame] = line_number return ret ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/coverage/templite.py0000644000175100001770000002507600000000000020131 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt """A simple Python template renderer, for a nano-subset of Django syntax. For a detailed discussion of this code, see this chapter from 500 Lines: http://aosabook.org/en/500L/a-template-engine.html """ # Coincidentally named the same as http://code.activestate.com/recipes/496702/ from __future__ import annotations import re from typing import ( Any, Callable, Dict, NoReturn, cast, ) class TempliteSyntaxError(ValueError): """Raised when a template has a syntax error.""" pass class TempliteValueError(ValueError): """Raised when an expression won't evaluate in a template.""" pass class CodeBuilder: """Build source code conveniently.""" def __init__(self, indent: int = 0) -> None: self.code: list[str | CodeBuilder] = [] self.indent_level = indent def __str__(self) -> str: return "".join(str(c) for c in self.code) def add_line(self, line: str) -> None: """Add a line of source to the code. Indentation and newline will be added for you, don't provide them. """ self.code.extend([" " * self.indent_level, line, "\n"]) def add_section(self) -> CodeBuilder: """Add a section, a sub-CodeBuilder.""" section = CodeBuilder(self.indent_level) self.code.append(section) return section INDENT_STEP = 4 # PEP8 says so! def indent(self) -> None: """Increase the current indent for following lines.""" self.indent_level += self.INDENT_STEP def dedent(self) -> None: """Decrease the current indent for following lines.""" self.indent_level -= self.INDENT_STEP def get_globals(self) -> dict[str, Any]: """Execute the code, and return a dict of globals it defines.""" # A check that the caller really finished all the blocks they started. assert self.indent_level == 0 # Get the Python source as a single string. python_source = str(self) # Execute the source, defining globals, and return them. global_namespace: dict[str, Any] = {} exec(python_source, global_namespace) return global_namespace class Templite: """A simple template renderer, for a nano-subset of Django syntax. Supported constructs are extended variable access:: {{var.modifier.modifier|filter|filter}} loops:: {% for var in list %}...{% endfor %} and ifs:: {% if var %}...{% endif %} Comments are within curly-hash markers:: {# This will be ignored #} Lines between `{% joined %}` and `{% endjoined %}` will have lines stripped and joined. Be careful, this could join words together! Any of these constructs can have a hyphen at the end (`-}}`, `-%}`, `-#}`), which will collapse the white space following the tag. Construct a Templite with the template text, then use `render` against a dictionary context to create a finished string:: templite = Templite('''

Hello {{name|upper}}!

{% for topic in topics %}

You are interested in {{topic}}.

{% endif %} ''', {"upper": str.upper}, ) text = templite.render({ "name": "Ned", "topics": ["Python", "Geometry", "Juggling"], }) """ def __init__(self, text: str, *contexts: dict[str, Any]) -> None: """Construct a Templite with the given `text`. `contexts` are dictionaries of values to use for future renderings. These are good for filters and global values. """ self.context = {} for context in contexts: self.context.update(context) self.all_vars: set[str] = set() self.loop_vars: set[str] = set() # We construct a function in source form, then compile it and hold onto # it, and execute it to render the template. code = CodeBuilder() code.add_line("def render_function(context, do_dots):") code.indent() vars_code = code.add_section() code.add_line("result = []") code.add_line("append_result = result.append") code.add_line("extend_result = result.extend") code.add_line("to_str = str") buffered: list[str] = [] def flush_output() -> None: """Force `buffered` to the code builder.""" if len(buffered) == 1: code.add_line("append_result(%s)" % buffered[0]) elif len(buffered) > 1: code.add_line("extend_result([%s])" % ", ".join(buffered)) del buffered[:] ops_stack = [] # Split the text to form a list of tokens. tokens = re.split(r"(?s)({{.*?}}|{%.*?%}|{#.*?#})", text) squash = in_joined = False for token in tokens: if token.startswith("{"): start, end = 2, -2 squash = (token[-3] == "-") if squash: end = -3 if token.startswith("{#"): # Comment: ignore it and move on. continue elif token.startswith("{{"): # An expression to evaluate. expr = self._expr_code(token[start:end].strip()) buffered.append("to_str(%s)" % expr) else: # token.startswith("{%") # Action tag: split into words and parse further. flush_output() words = token[start:end].strip().split() if words[0] == "if": # An if statement: evaluate the expression to determine if. if len(words) != 2: self._syntax_error("Don't understand if", token) ops_stack.append("if") code.add_line("if %s:" % self._expr_code(words[1])) code.indent() elif words[0] == "for": # A loop: iterate over expression result. if len(words) != 4 or words[2] != "in": self._syntax_error("Don't understand for", token) ops_stack.append("for") self._variable(words[1], self.loop_vars) code.add_line( f"for c_{words[1]} in {self._expr_code(words[3])}:", ) code.indent() elif words[0] == "joined": ops_stack.append("joined") in_joined = True elif words[0].startswith("end"): # Endsomething. Pop the ops stack. if len(words) != 1: self._syntax_error("Don't understand end", token) end_what = words[0][3:] if not ops_stack: self._syntax_error("Too many ends", token) start_what = ops_stack.pop() if start_what != end_what: self._syntax_error("Mismatched end tag", end_what) if end_what == "joined": in_joined = False else: code.dedent() else: self._syntax_error("Don't understand tag", words[0]) else: # Literal content. If it isn't empty, output it. if in_joined: token = re.sub(r"\s*\n\s*", "", token.strip()) elif squash: token = token.lstrip() if token: buffered.append(repr(token)) if ops_stack: self._syntax_error("Unmatched action tag", ops_stack[-1]) flush_output() for var_name in self.all_vars - self.loop_vars: vars_code.add_line(f"c_{var_name} = context[{var_name!r}]") code.add_line("return ''.join(result)") code.dedent() self._render_function = cast( Callable[ [Dict[str, Any], Callable[..., Any]], str, ], code.get_globals()["render_function"], ) def _expr_code(self, expr: str) -> str: """Generate a Python expression for `expr`.""" if "|" in expr: pipes = expr.split("|") code = self._expr_code(pipes[0]) for func in pipes[1:]: self._variable(func, self.all_vars) code = f"c_{func}({code})" elif "." in expr: dots = expr.split(".") code = self._expr_code(dots[0]) args = ", ".join(repr(d) for d in dots[1:]) code = f"do_dots({code}, {args})" else: self._variable(expr, self.all_vars) code = "c_%s" % expr return code def _syntax_error(self, msg: str, thing: Any) -> NoReturn: """Raise a syntax error using `msg`, and showing `thing`.""" raise TempliteSyntaxError(f"{msg}: {thing!r}") def _variable(self, name: str, vars_set: set[str]) -> None: """Track that `name` is used as a variable. Adds the name to `vars_set`, a set of variable names. Raises an syntax error if `name` is not a valid name. """ if not re.match(r"[_a-zA-Z][_a-zA-Z0-9]*$", name): self._syntax_error("Not a valid name", name) vars_set.add(name) def render(self, context: dict[str, Any] | None = None) -> str: """Render this template by applying it to `context`. `context` is a dictionary of values to use in this rendering. """ # Make the complete context we'll use. render_context = dict(self.context) if context: render_context.update(context) return self._render_function(render_context, self._do_dots) def _do_dots(self, value: Any, *dots: str) -> Any: """Evaluate dotted expressions at run-time.""" for dot in dots: try: value = getattr(value, dot) except AttributeError: try: value = value[dot] except (TypeError, KeyError) as exc: raise TempliteValueError( f"Couldn't evaluate {value!r}.{dot}", ) from exc if callable(value): value = value() return value ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/coverage/tomlconfig.py0000644000175100001770000001654400000000000020447 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt """TOML configuration support for coverage.py""" from __future__ import annotations import os import re from typing import Any, Callable, Iterable, TypeVar from coverage import env from coverage.exceptions import ConfigError from coverage.misc import import_third_party, substitute_variables from coverage.types import TConfigSectionOut, TConfigValueOut if env.PYVERSION >= (3, 11, 0, "alpha", 7): import tomllib # pylint: disable=import-error has_tomllib = True else: # TOML support on Python 3.10 and below is an install-time extra option. tomllib, has_tomllib = import_third_party("tomli") class TomlDecodeError(Exception): """An exception class that exists even when toml isn't installed.""" pass TWant = TypeVar("TWant") class TomlConfigParser: """TOML file reading with the interface of HandyConfigParser.""" # This class has the same interface as config.HandyConfigParser, no # need for docstrings. # pylint: disable=missing-function-docstring def __init__(self, our_file: bool) -> None: self.our_file = our_file self.data: dict[str, Any] = {} def read(self, filenames: Iterable[str]) -> list[str]: # RawConfigParser takes a filename or list of filenames, but we only # ever call this with a single filename. assert isinstance(filenames, (bytes, str, os.PathLike)) filename = os.fspath(filenames) try: with open(filename, encoding='utf-8') as fp: toml_text = fp.read() except OSError: return [] if has_tomllib: try: self.data = tomllib.loads(toml_text) except tomllib.TOMLDecodeError as err: raise TomlDecodeError(str(err)) from err return [filename] else: has_toml = re.search(r"^\[tool\.coverage(\.|])", toml_text, flags=re.MULTILINE) if self.our_file or has_toml: # Looks like they meant to read TOML, but we can't read it. msg = "Can't read {!r} without TOML support. Install with [toml] extra" raise ConfigError(msg.format(filename)) return [] def _get_section(self, section: str) -> tuple[str | None, TConfigSectionOut | None]: """Get a section from the data. Arguments: section (str): A section name, which can be dotted. Returns: name (str): the actual name of the section that was found, if any, or None. data (str): the dict of data in the section, or None if not found. """ prefixes = ["tool.coverage."] for prefix in prefixes: real_section = prefix + section parts = real_section.split(".") try: data = self.data[parts[0]] for part in parts[1:]: data = data[part] except KeyError: continue break else: return None, None return real_section, data def _get(self, section: str, option: str) -> tuple[str, TConfigValueOut]: """Like .get, but returns the real section name and the value.""" name, data = self._get_section(section) if data is None: raise ConfigError(f"No section: {section!r}") assert name is not None try: value = data[option] except KeyError: raise ConfigError(f"No option {option!r} in section: {name!r}") from None return name, value def _get_single(self, section: str, option: str) -> Any: """Get a single-valued option. Performs environment substitution if the value is a string. Other types will be converted later as needed. """ name, value = self._get(section, option) if isinstance(value, str): value = substitute_variables(value, os.environ) return name, value def has_option(self, section: str, option: str) -> bool: _, data = self._get_section(section) if data is None: return False return option in data def real_section(self, section: str) -> str | None: name, _ = self._get_section(section) return name def has_section(self, section: str) -> bool: name, _ = self._get_section(section) return bool(name) def options(self, section: str) -> list[str]: _, data = self._get_section(section) if data is None: raise ConfigError(f"No section: {section!r}") return list(data.keys()) def get_section(self, section: str) -> TConfigSectionOut: _, data = self._get_section(section) return data or {} def get(self, section: str, option: str) -> Any: _, value = self._get_single(section, option) return value def _check_type( self, section: str, option: str, value: Any, type_: type[TWant], converter: Callable[[Any], TWant] | None, type_desc: str, ) -> TWant: """Check that `value` has the type we want, converting if needed. Returns the resulting value of the desired type. """ if isinstance(value, type_): return value if isinstance(value, str) and converter is not None: try: return converter(value) except Exception as e: raise ValueError( f"Option [{section}]{option} couldn't convert to {type_desc}: {value!r}", ) from e raise ValueError( f"Option [{section}]{option} is not {type_desc}: {value!r}", ) def getboolean(self, section: str, option: str) -> bool: name, value = self._get_single(section, option) bool_strings = {"true": True, "false": False} return self._check_type(name, option, value, bool, bool_strings.__getitem__, "a boolean") def _get_list(self, section: str, option: str) -> tuple[str, list[str]]: """Get a list of strings, substituting environment variables in the elements.""" name, values = self._get(section, option) values = self._check_type(name, option, values, list, None, "a list") values = [substitute_variables(value, os.environ) for value in values] return name, values def getlist(self, section: str, option: str) -> list[str]: _, values = self._get_list(section, option) return values def getregexlist(self, section: str, option: str) -> list[str]: name, values = self._get_list(section, option) for value in values: value = value.strip() try: re.compile(value) except re.error as e: raise ConfigError(f"Invalid [{name}].{option} value {value!r}: {e}") from e return values def getint(self, section: str, option: str) -> int: name, value = self._get_single(section, option) return self._check_type(name, option, value, int, int, "an integer") def getfloat(self, section: str, option: str) -> float: name, value = self._get_single(section, option) if isinstance(value, int): value = float(value) return self._check_type(name, option, value, float, float, "a float") ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/coverage/tracer.pyi0000644000175100001770000000222300000000000017724 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt """Typing information for the constructs from our .c files.""" from typing import Any, Dict from coverage.types import TFileDisposition, TTraceData, TTraceFn, TracerCore class CFileDisposition(TFileDisposition): """CFileDisposition is in ctracer/filedisp.c""" canonical_filename: Any file_tracer: Any has_dynamic_filename: Any original_filename: Any reason: Any source_filename: Any trace: Any def __init__(self) -> None: ... class CTracer(TracerCore): """CTracer is in ctracer/tracer.c""" check_include: Any concur_id_func: Any data: TTraceData disable_plugin: Any file_tracers: Any should_start_context: Any should_trace: Any should_trace_cache: Any switch_context: Any trace_arcs: Any warn: Any def __init__(self) -> None: ... def activity(self) -> bool: ... def get_stats(self) -> Dict[str, int]: ... def reset_activity(self) -> Any: ... def start(self) -> TTraceFn: ... def stop(self) -> None: ... ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/coverage/types.py0000644000175100001770000001244400000000000017445 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt """ Types for use throughout coverage.py. """ from __future__ import annotations import os import pathlib from types import FrameType, ModuleType from typing import ( Any, Callable, Dict, Iterable, List, Mapping, Optional, Protocol, Set, Tuple, Type, Union, TYPE_CHECKING, ) if TYPE_CHECKING: from coverage.plugin import FileTracer AnyCallable = Callable[..., Any] ## File paths # For arguments that are file paths: if TYPE_CHECKING: FilePath = Union[str, os.PathLike[str]] else: # PathLike < python3.9 doesn't support subscription FilePath = Union[str, os.PathLike] # For testing FilePath arguments FilePathClasses = [str, pathlib.Path] FilePathType = Union[Type[str], Type[pathlib.Path]] ## Python tracing class TTraceFn(Protocol): """A Python trace function.""" def __call__( self, frame: FrameType, event: str, arg: Any, lineno: TLineNo | None = None, # Our own twist, see collector.py ) -> TTraceFn | None: ... ## Coverage.py tracing # Line numbers are pervasive enough that they deserve their own type. TLineNo = int TArc = Tuple[TLineNo, TLineNo] class TFileDisposition(Protocol): """A simple value type for recording what to do with a file.""" original_filename: str canonical_filename: str source_filename: str | None trace: bool reason: str file_tracer: FileTracer | None has_dynamic_filename: bool # When collecting data, we use a dictionary with a few possible shapes. The # keys are always file names. # - If measuring line coverage, the values are sets of line numbers. # - If measuring arcs in the Python tracer, the values are sets of arcs (pairs # of line numbers). # - If measuring arcs in the C tracer, the values are sets of packed arcs (two # line numbers combined into one integer). TTraceFileData = Union[Set[TLineNo], Set[TArc], Set[int]] TTraceData = Dict[str, TTraceFileData] class TracerCore(Protocol): """Anything that can report on Python execution.""" data: TTraceData trace_arcs: bool should_trace: Callable[[str, FrameType], TFileDisposition] should_trace_cache: Mapping[str, TFileDisposition | None] should_start_context: Callable[[FrameType], str | None] | None switch_context: Callable[[str | None], None] | None warn: TWarnFn def __init__(self) -> None: ... def start(self) -> TTraceFn | None: """Start this tracer, return a trace function if based on sys.settrace.""" def stop(self) -> None: """Stop this tracer.""" def activity(self) -> bool: """Has there been any activity?""" def reset_activity(self) -> None: """Reset the activity() flag.""" def get_stats(self) -> dict[str, int] | None: """Return a dictionary of statistics, or None.""" ## Coverage # Many places use kwargs as Coverage kwargs. TCovKwargs = Any ## Configuration # One value read from a config file. TConfigValueIn = Optional[Union[bool, int, float, str, Iterable[str]]] TConfigValueOut = Optional[Union[bool, int, float, str, List[str]]] # An entire config section, mapping option names to values. TConfigSectionIn = Mapping[str, TConfigValueIn] TConfigSectionOut = Mapping[str, TConfigValueOut] class TConfigurable(Protocol): """Something that can proxy to the coverage configuration settings.""" def get_option(self, option_name: str) -> TConfigValueOut | None: """Get an option from the configuration. `option_name` is a colon-separated string indicating the section and option name. For example, the ``branch`` option in the ``[run]`` section of the config file would be indicated with `"run:branch"`. Returns the value of the option. """ def set_option(self, option_name: str, value: TConfigValueIn | TConfigSectionIn) -> None: """Set an option in the configuration. `option_name` is a colon-separated string indicating the section and option name. For example, the ``branch`` option in the ``[run]`` section of the config file would be indicated with `"run:branch"`. `value` is the new value for the option. """ class TPluginConfig(Protocol): """Something that can provide options to a plugin.""" def get_plugin_options(self, plugin: str) -> TConfigSectionOut: """Get the options for a plugin.""" ## Parsing TMorf = Union[ModuleType, str] TSourceTokenLines = Iterable[List[Tuple[str, str]]] ## Plugins class TPlugin(Protocol): """What all plugins have in common.""" _coverage_plugin_name: str _coverage_enabled: bool ## Debugging class TWarnFn(Protocol): """A callable warn() function.""" def __call__(self, msg: str, slug: str | None = None, once: bool = False) -> None: ... class TDebugCtl(Protocol): """A DebugControl object, or something like it.""" def should(self, option: str) -> bool: """Decide whether to output debug information in category `option`.""" def write(self, msg: str) -> None: """Write a line of debug output.""" class TWritable(Protocol): """Anything that can be written to.""" def write(self, msg: str) -> None: """Write a message.""" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/coverage/version.py0000644000175100001770000000262700000000000017770 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt """The version and URL for coverage.py""" # This file is exec'ed in setup.py, don't import anything! from __future__ import annotations # version_info: same semantics as sys.version_info. # _dev: the .devN suffix if any. version_info = (7, 4, 4, "final", 0) _dev = 0 def _make_version( major: int, minor: int, micro: int, releaselevel: str = "final", serial: int = 0, dev: int = 0, ) -> str: """Create a readable version string from version_info tuple components.""" assert releaselevel in ["alpha", "beta", "candidate", "final"] version = "%d.%d.%d" % (major, minor, micro) if releaselevel != "final": short = {"alpha": "a", "beta": "b", "candidate": "rc"}[releaselevel] version += f"{short}{serial}" if dev != 0: version += f".dev{dev}" return version def _make_url( major: int, minor: int, micro: int, releaselevel: str, serial: int = 0, dev: int = 0, ) -> str: """Make the URL people should start at for this version of coverage.py.""" return ( "https://coverage.readthedocs.io/en/" + _make_version(major, minor, micro, releaselevel, serial, dev) ) __version__ = _make_version(*version_info, _dev) __url__ = _make_url(*version_info, _dev) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/coverage/xmlreport.py0000644000175100001770000002305700000000000020337 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt """XML reporting for coverage.py""" from __future__ import annotations import os import os.path import sys import time import xml.dom.minidom from dataclasses import dataclass from typing import Any, IO, Iterable, TYPE_CHECKING from coverage import __version__, files from coverage.misc import isolate_module, human_sorted, human_sorted_items from coverage.plugin import FileReporter from coverage.report_core import get_analysis_to_report from coverage.results import Analysis from coverage.types import TMorf from coverage.version import __url__ if TYPE_CHECKING: from coverage import Coverage os = isolate_module(os) DTD_URL = "https://raw.githubusercontent.com/cobertura/web/master/htdocs/xml/coverage-04.dtd" def rate(hit: int, num: int) -> str: """Return the fraction of `hit`/`num`, as a string.""" if num == 0: return "1" else: return "%.4g" % (hit / num) @dataclass class PackageData: """Data we keep about each "package" (in Java terms).""" elements: dict[str, xml.dom.minidom.Element] hits: int lines: int br_hits: int branches: int def appendChild(parent: Any, child: Any) -> None: """Append a child to a parent, in a way mypy will shut up about.""" parent.appendChild(child) class XmlReporter: """A reporter for writing Cobertura-style XML coverage results.""" report_type = "XML report" def __init__(self, coverage: Coverage) -> None: self.coverage = coverage self.config = self.coverage.config self.source_paths = set() if self.config.source: for src in self.config.source: if os.path.exists(src): if self.config.relative_files: src = src.rstrip(r"\/") else: src = files.canonical_filename(src) self.source_paths.add(src) self.packages: dict[str, PackageData] = {} self.xml_out: xml.dom.minidom.Document def report(self, morfs: Iterable[TMorf] | None, outfile: IO[str] | None = None) -> float: """Generate a Cobertura-compatible XML report for `morfs`. `morfs` is a list of modules or file names. `outfile` is a file object to write the XML to. """ # Initial setup. outfile = outfile or sys.stdout has_arcs = self.coverage.get_data().has_arcs() # Create the DOM that will store the data. impl = xml.dom.minidom.getDOMImplementation() assert impl is not None self.xml_out = impl.createDocument(None, "coverage", None) # Write header stuff. xcoverage = self.xml_out.documentElement xcoverage.setAttribute("version", __version__) xcoverage.setAttribute("timestamp", str(int(time.time()*1000))) xcoverage.appendChild(self.xml_out.createComment( f" Generated by coverage.py: {__url__} ", )) xcoverage.appendChild(self.xml_out.createComment(f" Based on {DTD_URL} ")) # Call xml_file for each file in the data. for fr, analysis in get_analysis_to_report(self.coverage, morfs): self.xml_file(fr, analysis, has_arcs) xsources = self.xml_out.createElement("sources") xcoverage.appendChild(xsources) # Populate the XML DOM with the source info. for path in human_sorted(self.source_paths): xsource = self.xml_out.createElement("source") appendChild(xsources, xsource) txt = self.xml_out.createTextNode(path) appendChild(xsource, txt) lnum_tot, lhits_tot = 0, 0 bnum_tot, bhits_tot = 0, 0 xpackages = self.xml_out.createElement("packages") xcoverage.appendChild(xpackages) # Populate the XML DOM with the package info. for pkg_name, pkg_data in human_sorted_items(self.packages.items()): xpackage = self.xml_out.createElement("package") appendChild(xpackages, xpackage) xclasses = self.xml_out.createElement("classes") appendChild(xpackage, xclasses) for _, class_elt in human_sorted_items(pkg_data.elements.items()): appendChild(xclasses, class_elt) xpackage.setAttribute("name", pkg_name.replace(os.sep, ".")) xpackage.setAttribute("line-rate", rate(pkg_data.hits, pkg_data.lines)) if has_arcs: branch_rate = rate(pkg_data.br_hits, pkg_data.branches) else: branch_rate = "0" xpackage.setAttribute("branch-rate", branch_rate) xpackage.setAttribute("complexity", "0") lhits_tot += pkg_data.hits lnum_tot += pkg_data.lines bhits_tot += pkg_data.br_hits bnum_tot += pkg_data.branches xcoverage.setAttribute("lines-valid", str(lnum_tot)) xcoverage.setAttribute("lines-covered", str(lhits_tot)) xcoverage.setAttribute("line-rate", rate(lhits_tot, lnum_tot)) if has_arcs: xcoverage.setAttribute("branches-valid", str(bnum_tot)) xcoverage.setAttribute("branches-covered", str(bhits_tot)) xcoverage.setAttribute("branch-rate", rate(bhits_tot, bnum_tot)) else: xcoverage.setAttribute("branches-covered", "0") xcoverage.setAttribute("branches-valid", "0") xcoverage.setAttribute("branch-rate", "0") xcoverage.setAttribute("complexity", "0") # Write the output file. outfile.write(serialize_xml(self.xml_out)) # Return the total percentage. denom = lnum_tot + bnum_tot if denom == 0: pct = 0.0 else: pct = 100.0 * (lhits_tot + bhits_tot) / denom return pct def xml_file(self, fr: FileReporter, analysis: Analysis, has_arcs: bool) -> None: """Add to the XML report for a single file.""" if self.config.skip_empty: if analysis.numbers.n_statements == 0: return # Create the "lines" and "package" XML elements, which # are populated later. Note that a package == a directory. filename = fr.filename.replace("\\", "/") for source_path in self.source_paths: if not self.config.relative_files: source_path = files.canonical_filename(source_path) if filename.startswith(source_path.replace("\\", "/") + "/"): rel_name = filename[len(source_path)+1:] break else: rel_name = fr.relative_filename().replace("\\", "/") self.source_paths.add(fr.filename[:-len(rel_name)].rstrip(r"\/")) dirname = os.path.dirname(rel_name) or "." dirname = "/".join(dirname.split("/")[:self.config.xml_package_depth]) package_name = dirname.replace("/", ".") package = self.packages.setdefault(package_name, PackageData({}, 0, 0, 0, 0)) xclass: xml.dom.minidom.Element = self.xml_out.createElement("class") appendChild(xclass, self.xml_out.createElement("methods")) xlines = self.xml_out.createElement("lines") appendChild(xclass, xlines) xclass.setAttribute("name", os.path.relpath(rel_name, dirname)) xclass.setAttribute("filename", rel_name.replace("\\", "/")) xclass.setAttribute("complexity", "0") branch_stats = analysis.branch_stats() missing_branch_arcs = analysis.missing_branch_arcs() # For each statement, create an XML "line" element. for line in sorted(analysis.statements): xline = self.xml_out.createElement("line") xline.setAttribute("number", str(line)) # Q: can we get info about the number of times a statement is # executed? If so, that should be recorded here. xline.setAttribute("hits", str(int(line not in analysis.missing))) if has_arcs: if line in branch_stats: total, taken = branch_stats[line] xline.setAttribute("branch", "true") xline.setAttribute( "condition-coverage", "%d%% (%d/%d)" % (100*taken//total, taken, total), ) if line in missing_branch_arcs: annlines = ["exit" if b < 0 else str(b) for b in missing_branch_arcs[line]] xline.setAttribute("missing-branches", ",".join(annlines)) appendChild(xlines, xline) class_lines = len(analysis.statements) class_hits = class_lines - len(analysis.missing) if has_arcs: class_branches = sum(t for t, k in branch_stats.values()) missing_branches = sum(t - k for t, k in branch_stats.values()) class_br_hits = class_branches - missing_branches else: class_branches = 0 class_br_hits = 0 # Finalize the statistics that are collected in the XML DOM. xclass.setAttribute("line-rate", rate(class_hits, class_lines)) if has_arcs: branch_rate = rate(class_br_hits, class_branches) else: branch_rate = "0" xclass.setAttribute("branch-rate", branch_rate) package.elements[rel_name] = xclass package.hits += class_hits package.lines += class_lines package.br_hits += class_br_hits package.branches += class_branches def serialize_xml(dom: xml.dom.minidom.Document) -> str: """Serialize a minidom node to XML.""" return dom.toprettyxml() ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1710442639.1658149 coverage-7.4.4/coverage.egg-info/0000755000175100001770000000000000000000000017414 5ustar00runnerdocker00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442639.0 coverage-7.4.4/coverage.egg-info/PKG-INFO0000644000175100001770000001776700000000000020533 0ustar00runnerdocker00000000000000Metadata-Version: 2.1 Name: coverage Version: 7.4.4 Summary: Code coverage measurement for Python Home-page: https://github.com/nedbat/coveragepy Author: Ned Batchelder and 224 others Author-email: ned@nedbatchelder.com License: Apache-2.0 Project-URL: Documentation, https://coverage.readthedocs.io/en/7.4.4 Project-URL: Funding, https://tidelift.com/subscription/pkg/pypi-coverage?utm_source=pypi-coverage&utm_medium=referral&utm_campaign=pypi Project-URL: Issues, https://github.com/nedbat/coveragepy/issues Project-URL: Mastodon, https://hachyderm.io/@coveragepy Project-URL: Mastodon (nedbat), https://hachyderm.io/@nedbat Keywords: code coverage testing Classifier: Environment :: Console Classifier: Intended Audience :: Developers Classifier: License :: OSI Approved :: Apache Software License Classifier: Operating System :: OS Independent Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.8 Classifier: Programming Language :: Python :: 3.9 Classifier: Programming Language :: Python :: 3.10 Classifier: Programming Language :: Python :: 3.11 Classifier: Programming Language :: Python :: 3.12 Classifier: Programming Language :: Python :: 3.13 Classifier: Programming Language :: Python :: Implementation :: CPython Classifier: Programming Language :: Python :: Implementation :: PyPy Classifier: Topic :: Software Development :: Quality Assurance Classifier: Topic :: Software Development :: Testing Classifier: Development Status :: 5 - Production/Stable Requires-Python: >=3.8 Description-Content-Type: text/x-rst License-File: LICENSE.txt Provides-Extra: toml Requires-Dist: tomli; python_full_version <= "3.11.0a6" and extra == "toml" .. Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 .. For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt =========== Coverage.py =========== Code coverage testing for Python. .. image:: https://raw.githubusercontent.com/vshymanskyy/StandWithUkraine/main/banner2-direct.svg :target: https://vshymanskyy.github.io/StandWithUkraine :alt: Stand with Ukraine ------------- | |kit| |license| |versions| | |test-status| |quality-status| |docs| |metacov| | |tidelift| |sponsor| |stars| |mastodon-coveragepy| |mastodon-nedbat| Coverage.py measures code coverage, typically during test execution. It uses the code analysis tools and tracing hooks provided in the Python standard library to determine which lines are executable, and which have been executed. Coverage.py runs on these versions of Python: .. PYVERSIONS * Python 3.8 through 3.12, and 3.13.0a3 and up. * PyPy3 versions 3.8 through 3.10. Documentation is on `Read the Docs`_. Code repository and issue tracker are on `GitHub`_. .. _Read the Docs: https://coverage.readthedocs.io/en/7.4.4/ .. _GitHub: https://github.com/nedbat/coveragepy **New in 7.x:** experimental support for sys.monitoring; dropped support for Python 3.7; added ``Coverage.collect()`` context manager; improved data combining; ``[run] exclude_also`` setting; ``report --format=``; type annotations. **New in 6.x:** dropped support for Python 2.7, 3.5, and 3.6; write data on SIGTERM; added support for 3.10 match/case statements. For Enterprise -------------- .. |tideliftlogo| image:: https://nedbatchelder.com/pix/Tidelift_Logo_small.png :alt: Tidelift :target: https://tidelift.com/subscription/pkg/pypi-coverage?utm_source=pypi-coverage&utm_medium=referral&utm_campaign=readme .. list-table:: :widths: 10 100 * - |tideliftlogo| - `Available as part of the Tidelift Subscription. `_ Coverage and thousands of other packages are working with Tidelift to deliver one enterprise subscription that covers all of the open source you use. If you want the flexibility of open source and the confidence of commercial-grade software, this is for you. `Learn more. `_ Getting Started --------------- Looking to run ``coverage`` on your test suite? See the `Quick Start section`_ of the docs. .. _Quick Start section: https://coverage.readthedocs.io/en/7.4.4/#quick-start Change history -------------- The complete history of changes is on the `change history page`_. .. _change history page: https://coverage.readthedocs.io/en/7.4.4/changes.html Code of Conduct --------------- Everyone participating in the coverage.py project is expected to treat other people with respect and to follow the guidelines articulated in the `Python Community Code of Conduct`_. .. _Python Community Code of Conduct: https://www.python.org/psf/codeofconduct/ Contributing ------------ Found a bug? Want to help improve the code or documentation? See the `Contributing section`_ of the docs. .. _Contributing section: https://coverage.readthedocs.io/en/7.4.4/contributing.html Security -------- To report a security vulnerability, please use the `Tidelift security contact`_. Tidelift will coordinate the fix and disclosure. .. _Tidelift security contact: https://tidelift.com/security License ------- Licensed under the `Apache 2.0 License`_. For details, see `NOTICE.txt`_. .. _Apache 2.0 License: http://www.apache.org/licenses/LICENSE-2.0 .. _NOTICE.txt: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt .. |test-status| image:: https://github.com/nedbat/coveragepy/actions/workflows/testsuite.yml/badge.svg?branch=master&event=push :target: https://github.com/nedbat/coveragepy/actions/workflows/testsuite.yml :alt: Test suite status .. |quality-status| image:: https://github.com/nedbat/coveragepy/actions/workflows/quality.yml/badge.svg?branch=master&event=push :target: https://github.com/nedbat/coveragepy/actions/workflows/quality.yml :alt: Quality check status .. |docs| image:: https://readthedocs.org/projects/coverage/badge/?version=latest&style=flat :target: https://coverage.readthedocs.io/en/7.4.4/ :alt: Documentation .. |kit| image:: https://img.shields.io/pypi/v/coverage :target: https://pypi.org/project/coverage/ :alt: PyPI status .. |versions| image:: https://img.shields.io/pypi/pyversions/coverage.svg?logo=python&logoColor=FBE072 :target: https://pypi.org/project/coverage/ :alt: Python versions supported .. |license| image:: https://img.shields.io/pypi/l/coverage.svg :target: https://pypi.org/project/coverage/ :alt: License .. |metacov| image:: https://img.shields.io/endpoint?url=https://gist.githubusercontent.com/nedbat/8c6980f77988a327348f9b02bbaf67f5/raw/metacov.json :target: https://nedbat.github.io/coverage-reports/latest.html :alt: Coverage reports .. |tidelift| image:: https://tidelift.com/badges/package/pypi/coverage :target: https://tidelift.com/subscription/pkg/pypi-coverage?utm_source=pypi-coverage&utm_medium=referral&utm_campaign=readme :alt: Tidelift .. |stars| image:: https://img.shields.io/github/stars/nedbat/coveragepy.svg?logo=github :target: https://github.com/nedbat/coveragepy/stargazers :alt: GitHub stars .. |mastodon-nedbat| image:: https://img.shields.io/badge/dynamic/json?style=flat&labelColor=450657&logo=mastodon&logoColor=ffffff&link=https%3A%2F%2Fhachyderm.io%2F%40nedbat&url=https%3A%2F%2Fhachyderm.io%2Fusers%2Fnedbat%2Ffollowers.json&query=totalItems&label=@nedbat :target: https://hachyderm.io/@nedbat :alt: nedbat on Mastodon .. |mastodon-coveragepy| image:: https://img.shields.io/badge/dynamic/json?style=flat&labelColor=450657&logo=mastodon&logoColor=ffffff&link=https%3A%2F%2Fhachyderm.io%2F%40coveragepy&url=https%3A%2F%2Fhachyderm.io%2Fusers%2Fcoveragepy%2Ffollowers.json&query=totalItems&label=@coveragepy :target: https://hachyderm.io/@coveragepy :alt: coveragepy on Mastodon .. |sponsor| image:: https://img.shields.io/badge/%E2%9D%A4-Sponsor%20me-brightgreen?style=flat&logo=GitHub :target: https://github.com/sponsors/nedbat :alt: Sponsor me on GitHub ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442639.0 coverage-7.4.4/coverage.egg-info/SOURCES.txt0000644000175100001770000002230700000000000021304 0ustar00runnerdocker00000000000000.editorconfig .git-blame-ignore-revs .readthedocs.yaml CHANGES.rst CITATION.cff CONTRIBUTORS.txt LICENSE.txt MANIFEST.in Makefile NOTICE.txt README.rst __main__.py howto.txt igor.py metacov.ini pyproject.toml setup.py tox.ini .github/CODE_OF_CONDUCT.md .github/FUNDING.yml .github/SECURITY.md .github/dependabot.yml .github/ISSUE_TEMPLATE/bug_report.md .github/ISSUE_TEMPLATE/config.yml .github/ISSUE_TEMPLATE/feature_request.md .github/ISSUE_TEMPLATE/support.md .github/workflows/codeql-analysis.yml .github/workflows/coverage.yml .github/workflows/dependency-review.yml .github/workflows/kit.yml .github/workflows/python-nightly.yml .github/workflows/quality.yml .github/workflows/testsuite.yml ci/README.txt ci/comment_on_fixes.py ci/download_gha_artifacts.py ci/ghrel_template.md.j2 ci/parse_relnotes.py ci/session.py ci/trigger_build_kits.py coverage/__init__.py coverage/__main__.py coverage/annotate.py coverage/bytecode.py coverage/cmdline.py coverage/collector.py coverage/config.py coverage/context.py coverage/control.py coverage/data.py coverage/debug.py coverage/disposition.py coverage/env.py coverage/exceptions.py coverage/execfile.py coverage/files.py coverage/html.py coverage/inorout.py coverage/jsonreport.py coverage/lcovreport.py coverage/misc.py coverage/multiproc.py coverage/numbits.py coverage/parser.py coverage/phystokens.py coverage/plugin.py coverage/plugin_support.py coverage/py.typed coverage/python.py coverage/pytracer.py coverage/report.py coverage/report_core.py coverage/results.py coverage/sqldata.py coverage/sqlitedb.py coverage/sysmon.py coverage/templite.py coverage/tomlconfig.py coverage/tracer.pyi coverage/types.py coverage/version.py coverage/xmlreport.py coverage.egg-info/PKG-INFO coverage.egg-info/SOURCES.txt coverage.egg-info/dependency_links.txt coverage.egg-info/entry_points.txt coverage.egg-info/not-zip-safe coverage.egg-info/requires.txt coverage.egg-info/top_level.txt coverage/ctracer/datastack.c coverage/ctracer/datastack.h coverage/ctracer/filedisp.c coverage/ctracer/filedisp.h coverage/ctracer/module.c coverage/ctracer/stats.h coverage/ctracer/tracer.c coverage/ctracer/tracer.h coverage/ctracer/util.h coverage/htmlfiles/coverage_html.js coverage/htmlfiles/favicon_32.png coverage/htmlfiles/index.html coverage/htmlfiles/keybd_closed.png coverage/htmlfiles/keybd_open.png coverage/htmlfiles/pyfile.html coverage/htmlfiles/style.css coverage/htmlfiles/style.scss doc/api.rst doc/api_coverage.rst doc/api_coveragedata.rst doc/api_exceptions.rst doc/api_module.rst doc/api_plugin.rst doc/branch.rst doc/changes.rst doc/cmd.rst doc/cog_helpers.py doc/conf.py doc/config.rst doc/contexts.rst doc/contributing.rst doc/dbschema.rst doc/dict.txt doc/excluding.rst doc/faq.rst doc/howitworks.rst doc/index.rst doc/install.rst doc/migrating.rst doc/other.rst doc/plugins.rst doc/python-coverage.1.txt doc/requirements.in doc/requirements.pip doc/sleepy.rst doc/source.rst doc/subprocess.rst doc/trouble.rst doc/whatsnew5x.rst doc/_static/coverage.css doc/media/Tidelift_Logos_RGB_Tidelift_Shorthand_On-White.png doc/media/Tidelift_Logos_RGB_Tidelift_Shorthand_On-White_small.png doc/media/sleepy-snake-600.png doc/media/sleepy-snake-circle-150.png doc/sample_html/favicon_32.png doc/sample_html/keybd_closed.png doc/sample_html/keybd_open.png lab/README.txt lab/bpo_prelude.py lab/branch_trace.py lab/branches.py lab/compare_times.sh lab/coverage-03.dtd lab/coverage-04.dtd lab/extract_code.py lab/find_class.py lab/genpy.py lab/goals.py lab/hack_pyc.py lab/new-data.js lab/parse_all.py lab/parser.py lab/pick.py lab/platform_info.py lab/run_sysmon.py lab/run_trace.py lab/select_contexts.py lab/show_platform.py lab/show_pyc.py lab/treetopy.sh lab/benchmark/benchmark.py lab/benchmark/empty.py lab/benchmark/run.py lab/notes/bug1303.txt lab/notes/pypy-738-decorated-functions.txt requirements/dev.in requirements/dev.pip requirements/kit.in requirements/kit.pip requirements/light-threads.in requirements/light-threads.pip requirements/mypy.in requirements/mypy.pip requirements/pins.pip requirements/pip-tools.in requirements/pip-tools.pip requirements/pip.in requirements/pip.pip requirements/pytest.in requirements/pytest.pip requirements/tox.in requirements/tox.pip tests/__init__.py tests/balance_xdist_plugin.py tests/conftest.py tests/coveragetest.py tests/covmodzip1.py tests/goldtest.py tests/helpers.py tests/mixins.py tests/osinfo.py tests/plugin1.py tests/plugin2.py tests/plugin_config.py tests/select_plugin.py tests/stress_phystoken.tok tests/stress_phystoken_dos.tok tests/test_annotate.py tests/test_api.py tests/test_arcs.py tests/test_cmdline.py tests/test_collector.py tests/test_concurrency.py tests/test_config.py tests/test_context.py tests/test_coverage.py tests/test_data.py tests/test_debug.py tests/test_execfile.py tests/test_filereporter.py tests/test_files.py tests/test_goldtest.py tests/test_html.py tests/test_json.py tests/test_lcov.py tests/test_misc.py tests/test_mixins.py tests/test_numbits.py tests/test_oddball.py tests/test_parser.py tests/test_phystokens.py tests/test_plugins.py tests/test_process.py tests/test_python.py tests/test_report.py tests/test_report_common.py tests/test_report_core.py tests/test_results.py tests/test_setup.py tests/test_sqlitedb.py tests/test_templite.py tests/test_testing.py tests/test_venv.py tests/test_version.py tests/test_xml.py tests/testenv.py tests/gold/README.rst tests/gold/annotate/anno_dir/d_80084bf2fba02475___init__.py,cover tests/gold/annotate/anno_dir/d_80084bf2fba02475_a.py,cover tests/gold/annotate/anno_dir/d_b039179a8a4ce2c2___init__.py,cover tests/gold/annotate/anno_dir/d_b039179a8a4ce2c2_b.py,cover tests/gold/annotate/anno_dir/multi.py,cover tests/gold/annotate/encodings/utf8.py,cover tests/gold/annotate/mae/mae.py,cover tests/gold/annotate/multi/multi.py,cover tests/gold/annotate/multi/a/__init__.py,cover tests/gold/annotate/multi/a/a.py,cover tests/gold/annotate/multi/b/__init__.py,cover tests/gold/annotate/multi/b/b.py,cover tests/gold/annotate/white/white.py,cover tests/gold/html/Makefile tests/gold/html/a/a_py.html tests/gold/html/a/index.html tests/gold/html/b_branch/b_py.html tests/gold/html/b_branch/index.html tests/gold/html/bom/bom_py.html tests/gold/html/bom/index.html tests/gold/html/contexts/index.html tests/gold/html/contexts/two_tests_py.html tests/gold/html/isolatin1/index.html tests/gold/html/isolatin1/isolatin1_py.html tests/gold/html/omit_1/index.html tests/gold/html/omit_1/m1_py.html tests/gold/html/omit_1/m2_py.html tests/gold/html/omit_1/m3_py.html tests/gold/html/omit_1/main_py.html tests/gold/html/omit_2/index.html tests/gold/html/omit_2/m2_py.html tests/gold/html/omit_2/m3_py.html tests/gold/html/omit_2/main_py.html tests/gold/html/omit_3/index.html tests/gold/html/omit_3/m3_py.html tests/gold/html/omit_3/main_py.html tests/gold/html/omit_4/index.html tests/gold/html/omit_4/m1_py.html tests/gold/html/omit_4/m3_py.html tests/gold/html/omit_4/main_py.html tests/gold/html/omit_5/index.html tests/gold/html/omit_5/m1_py.html tests/gold/html/omit_5/main_py.html tests/gold/html/other/blah_blah_other_py.html tests/gold/html/other/here_py.html tests/gold/html/other/index.html tests/gold/html/partial/index.html tests/gold/html/partial/partial_py.html tests/gold/html/partial_626/index.html tests/gold/html/partial_626/partial_py.html tests/gold/html/styled/a_py.html tests/gold/html/styled/extra.css tests/gold/html/styled/index.html tests/gold/html/styled/style.css tests/gold/html/support/coverage_html.js tests/gold/html/support/favicon_32.png tests/gold/html/support/keybd_closed.png tests/gold/html/support/keybd_open.png tests/gold/html/support/style.css tests/gold/html/unicode/index.html tests/gold/html/unicode/unicode_py.html tests/gold/testing/getty/gettysburg.txt tests/gold/testing/xml/output.xml tests/gold/xml/x_xml/coverage.xml tests/gold/xml/y_xml_branch/coverage.xml tests/js/index.html tests/js/tests.js tests/modules/covmod1.py tests/modules/runmod1.py tests/modules/usepkgs.py tests/modules/aa/__init__.py tests/modules/aa/afile.odd.py tests/modules/aa/afile.py tests/modules/aa/zfile.py tests/modules/aa/bb/__init__.py tests/modules/aa/bb/bfile.odd.py tests/modules/aa/bb/bfile.py tests/modules/aa/bb.odd/bfile.py tests/modules/aa/bb/cc/__init__.py tests/modules/aa/bb/cc/cfile.py tests/modules/ambiguous/__init__.py tests/modules/ambiguous/pkg1/__init__.py tests/modules/ambiguous/pkg1/ambiguous.py tests/modules/namespace_420/sub1/__init__.py tests/modules/pkg1/__init__.py tests/modules/pkg1/__main__.py tests/modules/pkg1/p1a.py tests/modules/pkg1/p1b.py tests/modules/pkg1/p1c.py tests/modules/pkg1/runmod2.py tests/modules/pkg1/sub/__init__.py tests/modules/pkg1/sub/__main__.py tests/modules/pkg1/sub/ps1a.py tests/modules/pkg1/sub/runmod3.py tests/modules/pkg2/__init__.py tests/modules/pkg2/p2a.py tests/modules/pkg2/p2b.py tests/modules/plugins/__init__.py tests/modules/plugins/a_plugin.py tests/modules/plugins/another.py tests/modules/process_test/__init__.py tests/modules/process_test/try_execfile.py tests/moremodules/namespace_420/sub2/__init__.py tests/moremodules/othermods/__init__.py tests/moremodules/othermods/othera.py tests/moremodules/othermods/otherb.py tests/moremodules/othermods/sub/__init__.py tests/moremodules/othermods/sub/osa.py tests/moremodules/othermods/sub/osb.py tests/qunit/jquery.tmpl.min.js tests/zipsrc/zip1/__init__.py tests/zipsrc/zip1/zip1.py././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442639.0 coverage-7.4.4/coverage.egg-info/dependency_links.txt0000644000175100001770000000000100000000000023462 0ustar00runnerdocker00000000000000 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442639.0 coverage-7.4.4/coverage.egg-info/entry_points.txt0000644000175100001770000000017200000000000022712 0ustar00runnerdocker00000000000000[console_scripts] coverage = coverage.cmdline:main coverage-3.8 = coverage.cmdline:main coverage3 = coverage.cmdline:main ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442638.0 coverage-7.4.4/coverage.egg-info/not-zip-safe0000644000175100001770000000000100000000000021642 0ustar00runnerdocker00000000000000 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442639.0 coverage-7.4.4/coverage.egg-info/requires.txt0000644000175100001770000000007000000000000022011 0ustar00runnerdocker00000000000000 [toml] [toml:python_full_version <= "3.11.0a6"] tomli ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442639.0 coverage-7.4.4/coverage.egg-info/top_level.txt0000644000175100001770000000001100000000000022136 0ustar00runnerdocker00000000000000coverage ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1710442639.125815 coverage-7.4.4/doc/0000755000175100001770000000000000000000000014674 5ustar00runnerdocker00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1710442639.125815 coverage-7.4.4/doc/_static/0000755000175100001770000000000000000000000016322 5ustar00runnerdocker00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/doc/_static/coverage.css0000644000175100001770000000525200000000000020633 0ustar00runnerdocker00000000000000body { font-family: Georgia; } h1, h2, h3, h4, h5, h6 { font-family: Helvetica; } a:hover { text-decoration: underline; } img.tideliftlogo { border: 1px solid #888; margin-top: .5em !important; } .rst-content ol.arabic li { margin-bottom: 12px; } .rst-content h3, .rst-content h4, .rst-content h5, .rst-content h6 { /* This makes config.rst look a little better, but the paras are still too * spaced out. */ margin-bottom: 12px; } .ui.menu { font-family: Helvetica; min-height: 0; } .ui.tabular.menu .item { padding: .25em 1em; } .ui.menu .item { padding: 0; } .sig { font-family: Consolas,"Andale Mono WT","Andale Mono","Lucida Console","Lucida Sans Typewriter","DejaVu Sans Mono","Bitstream Vera Sans Mono","Liberation Mono","Nimbus Mono L",Monaco,"Courier New",Courier,monospace; } .sig-name, .sig-prename { font-size: 1.1em; font-weight: bold; color: black; } .rst-content dl dt.sig { font-weight: inherit; } /* .. parsed-literal:: isn't styled like other
 blocks!? */

.rst-content pre.literal-block {
    white-space: pre;
    padding: 12px 12px !important;
    font-family: Consolas,"Andale Mono WT","Andale Mono","Lucida Console","Lucida Sans Typewriter","DejaVu Sans Mono","Bitstream Vera Sans Mono","Liberation Mono","Nimbus Mono L",Monaco,"Courier New",Courier,monospace;
    font-size: 12px;
    display: block;
    overflow: auto;
    color: #404040;
    background: #efc;
}

/* I'm not sure why I had to make this so specific to get it to take effect... */
div.rst-content div.document div.wy-table-responsive table.docutils.align-default tbody tr td {
    vertical-align: top !important;
}

/* And this doesn't work, and I guess I just have to live with it. */
div.rst-content div.document div.wy-table-responsive table.docutils.align-default tbody tr td .line-block {
    margin-bottom: 0 !important;
}

/* sphinx-code-tabs */

/* Some selectors here are extra-specific (.container) because this file comes
 * before code-tabs.css, so we need the specificity to override it.
 */

div.tabs.container > ul.tabbar > li.tabbutton {
    color: #666;
    background-color: #ddd;
    border-color: #aaa;
}

div.tabs.container > ul.tabbar > li.tabbutton:hover {
	background-color: #eee;
}

div.tabs.container > ul.tabbar > li.tabbutton.selected {
    color: black;
    background-color: #fff;
    border-color: #aaa;
    border-bottom-color: #fff;
}

div.tabs.container > ul.tabbar > li.tabbutton.selected:hover {
    background-color: #fff;
}

div.tabs.container {
    margin-bottom: 1em;
}

div.tab.selected {
    border: 1px solid #ccc;
    border-radius: 0 .5em .5em .5em;
}

div.tab.codetab.selected {
    border: none;
}
././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0
coverage-7.4.4/doc/api.rst0000644000175100001770000000322500000000000016201 0ustar00runnerdocker00000000000000.. Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
.. For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt

.. _api:

===============
Coverage.py API
===============

There are a few different ways to use coverage.py programmatically.

The API to coverage.py is in a module called :mod:`coverage`.  Most of the
interface is in the :class:`coverage.Coverage` class.  Methods on the Coverage
object correspond roughly to operations available in the command line
interface. For example, a simple use would be::

    import coverage

    cov = coverage.Coverage()
    cov.start()

    # .. call your code ..

    cov.stop()
    cov.save()

    cov.html_report()

Any of the methods can raise specialized exceptions described in
:ref:`api_exceptions`.

Coverage.py supports plugins that can change its behavior, to collect
information from non-Python files, or to perform complex configuration.  See
:ref:`api_plugin` for details.

If you want to access the data that coverage.py has collected, the
:class:`coverage.CoverageData` class provides an API to read coverage.py data
files.

.. note::

    Only the documented portions of the API are supported. Other names you may
    find in modules or objects can change their behavior at any time. Please
    limit yourself to documented methods to avoid problems.

For more intensive data use, you might want to access the coverage.py database
file directly.  The schema is subject to change, so this is for advanced uses
only.  :ref:`dbschema` explains more.

.. toctree::
    :maxdepth: 1

    api_coverage
    api_exceptions
    api_module
    api_plugin
    api_coveragedata
    dbschema
././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0
coverage-7.4.4/doc/api_coverage.rst0000644000175100001770000000054000000000000020051 0ustar00runnerdocker00000000000000.. Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
.. For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt

.. _api_coverage:

The Coverage class
------------------

.. module:: coverage
    :noindex:

.. autoclass:: Coverage
    :members:
    :exclude-members: sys_info
    :special-members: __init__
././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0
coverage-7.4.4/doc/api_coveragedata.rst0000644000175100001770000000055000000000000020704 0ustar00runnerdocker00000000000000.. Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
.. For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt

.. _api_coveragedata:

The CoverageData class
----------------------

.. versionadded:: 4.0

.. module:: coverage
    :noindex:

.. autoclass:: CoverageData
    :members:
    :special-members: __init__
././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0
coverage-7.4.4/doc/api_exceptions.rst0000644000175100001770000000060700000000000020443 0ustar00runnerdocker00000000000000.. Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
.. For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt

.. _api_exceptions:

Coverage exceptions
-------------------

.. module:: coverage.exceptions

.. autoclass:: CoverageException

.. automodule:: coverage.exceptions
    :noindex:
    :members:
    :exclude-members: CoverageException
././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0
coverage-7.4.4/doc/api_module.rst0000644000175100001770000000217200000000000017546 0ustar00runnerdocker00000000000000.. Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
.. For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt

.. _api_module:

coverage module
---------------

.. module:: coverage

The most important thing in the coverage module is the
:class:`coverage.Coverage` class, described in :ref:`api_coverage`, but there
are a few other things also.


.. data:: version_info

A tuple of five elements, similar to :data:`sys.version_info
`: *major*, *minor*, *micro*, *releaselevel*, and
*serial*.  All values except *releaselevel* are integers; the release level is
``'alpha'``, ``'beta'``, ``'candidate'``, or ``'final'``. Unlike
:data:`sys.version_info `, the elements are not
available by name.

.. data:: __version__

A string with the version of coverage.py, for example, ``"5.0b2"``.

.. autoclass:: CoverageException


Starting coverage.py automatically
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

This function is used to start coverage measurement automatically when Python
starts.  See :ref:`subprocess` for details.

.. autofunction:: process_startup
././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0
coverage-7.4.4/doc/api_plugin.rst0000644000175100001770000000120600000000000017554 0ustar00runnerdocker00000000000000.. Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
.. For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt

.. _api_plugin:

===============
Plug-in classes
===============

.. automodule:: coverage.plugin

.. module:: coverage
    :noindex:

The CoveragePlugin class
------------------------

.. autoclass:: CoveragePlugin
    :members:
    :member-order: bysource

The FileTracer class
--------------------

.. autoclass:: FileTracer
    :members:
    :member-order: bysource

The FileReporter class
----------------------

.. autoclass:: FileReporter
    :members:
    :member-order: bysource
././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0
coverage-7.4.4/doc/branch.rst0000644000175100001770000000770200000000000016671 0ustar00runnerdocker00000000000000.. Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
.. For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt

.. _branch:

===========================
Branch coverage measurement
===========================

.. highlight:: python
   :linenothreshold: 5

In addition to the usual statement coverage, coverage.py also supports branch
coverage measurement. Where a line in your program could jump to more than one
next line, coverage.py tracks which of those destinations are actually visited,
and flags lines that haven't visited all of their possible destinations.

For example::

    def my_partial_fn(x):
        if x:
            y = 10
        return y

    my_partial_fn(1)

In this code, line 2 is an ``if`` statement which can go next to either line 3
or line 4. Statement coverage would show all lines of the function as executed.
But the if was never evaluated as false, so line 2 never jumps to line 4.

Branch coverage will flag this code as not fully covered because of the missing
jump from line 2 to line 4.  This is known as a partial branch.


How to measure branch coverage
------------------------------

To measure branch coverage, run coverage.py with the ``--branch`` flag::

    coverage run --branch myprog.py

When you report on the results with ``coverage report`` or ``coverage html``,
the percentage of branch possibilities taken will be included in the percentage
covered total for each file.  The coverage percentage for a file is the actual
executions divided by the execution opportunities.  Each line in the file is an
execution opportunity, as is each branch destination.

The HTML report gives information about which lines had missing branches. Lines
that were missing some branches are shown in yellow, with an annotation at the
far right showing branch destination line numbers that were not exercised.

The XML and JSON reports produced by ``coverage xml`` and ``coverage json``
also include branch information, including separate statement and branch
coverage percentages.


How it works
------------

When measuring branches, coverage.py collects pairs of line numbers, a source
and destination for each transition from one line to another.  Static analysis
of the source provides a list of possible transitions.  Comparing the measured
to the possible indicates missing branches.

The idea of tracking how lines follow each other was from `Titus Brown`__.
Thanks, Titus!

__ http://ivory.idyll.org/blog


Excluding code
--------------

If you have :ref:`excluded code `, a conditional will not be counted
as a branch if one of its choices is excluded::

    def only_one_choice(x):
        if x:
            blah1()
            blah2()
        else:  # pragma: no cover
            # x is always true.
            blah3()

Because the ``else`` clause is excluded, the ``if`` only has one possible next
line, so it isn't considered a branch at all.


Structurally partial branches
-----------------------------

Sometimes branching constructs are used in unusual ways that don't actually
branch.  For example::

    while True:
        if cond:
            break
        do_something()

Here the while loop will never exit normally, so it doesn't take both of its
"possible" branches.  For some of these constructs, such as "while True:" and
"if 0:", coverage.py understands what is going on.  In these cases, the line
will not be marked as a partial branch.

But there are many ways in your own code to write intentionally partial
branches, and you don't want coverage.py pestering you about them.  You can
tell coverage.py that you don't want them flagged by marking them with a
pragma::

    i = 0
    while i < 999999999:  # pragma: no branch
        if eventually():
            break

Here the while loop will never complete because the break will always be taken
at some point.  Coverage.py can't work that out on its own, but the "no branch"
pragma indicates that the branch is known to be partial, and the line is not
flagged.
././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0
coverage-7.4.4/doc/changes.rst0000644000175100001770000033373600000000000017055 0ustar00runnerdocker00000000000000.. Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
.. For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt

.. _changes:

.. The recent changes from the top-level file:

.. include:: ../CHANGES.rst
    :end-before: scriv-end-here

.. Older changes here:

.. _changes_521:

Version 5.2.1 โ€” 2020-07-23
--------------------------

- The dark mode HTML report still used light colors for the context listing,
  making them unreadable (`issue 1009`_).  This is now fixed.

- The time stamp on the HTML report now includes the time zone. Thanks, Xie
  Yanbo (`pull request 960`_).

.. _pull request 960: https://github.com/nedbat/coveragepy/pull/960
.. _issue 1009: https://github.com/nedbat/coveragepy/issues/1009


.. _changes_52:

Version 5.2 โ€” 2020-07-05
------------------------

- The HTML report has been redesigned by Vince Salvino.  There is now a dark
  mode, the code text is larger, and system sans serif fonts are used, in
  addition to other small changes (`issue 858`_ and `pull request 931`_).

- The ``coverage report`` and ``coverage html`` commands now accept a
  ``--precision`` option to control the number of decimal points displayed.
  Thanks, Teake Nutma (`pull request 982`_).

- The ``coverage report`` and ``coverage html`` commands now accept a
  ``--no-skip-covered`` option to negate ``--skip-covered``.  Thanks, Anthony
  Sottile (`issue 779`_ and `pull request 932`_).

- The ``--skip-empty`` option is now available for the XML report, closing
  `issue 976`_.

- The ``coverage report`` command now accepts a ``--sort`` option to specify
  how to sort the results.  Thanks, Jerin Peter George (`pull request 1005`_).

- If coverage fails due to the coverage total not reaching the ``--fail-under``
  value, it will now print a message making the condition clear.  Thanks,
  Naveen Yadav (`pull request 977`_).

- TOML configuration files with non-ASCII characters would cause errors on
  Windows (`issue 990`_).  This is now fixed.

- The output of ``--debug=trace`` now includes information about how the
  ``--source`` option is being interpreted, and the module names being
  considered.

.. _pull request 931: https://github.com/nedbat/coveragepy/pull/931
.. _pull request 932: https://github.com/nedbat/coveragepy/pull/932
.. _pull request 977: https://github.com/nedbat/coveragepy/pull/977
.. _pull request 982: https://github.com/nedbat/coveragepy/pull/982
.. _pull request 1005: https://github.com/nedbat/coveragepy/pull/1005
.. _issue 779: https://github.com/nedbat/coveragepy/issues/779
.. _issue 858: https://github.com/nedbat/coveragepy/issues/858
.. _issue 976: https://github.com/nedbat/coveragepy/issues/976
.. _issue 990: https://github.com/nedbat/coveragepy/issues/990


.. _changes_51:

Version 5.1 โ€” 2020-04-12
------------------------

- The JSON report now includes counts of covered and missing branches. Thanks,
  Salvatore Zagaria.

- On Python 3.8, try-finally-return reported wrong branch coverage with
  decorated async functions (`issue 964`_).  This is now fixed. Thanks, Kjell
  Braden.

- The :meth:`~coverage.Coverage.get_option` and
  :meth:`~coverage.Coverage.set_option` methods can now manipulate the
  ``[paths]`` configuration setting.  Thanks to Bernรกt Gรกbor for the fix for
  `issue 967`_.

.. _issue 964: https://github.com/nedbat/coveragepy/issues/964
.. _issue 967: https://github.com/nedbat/coveragepy/issues/967


.. _changes_504:

Version 5.0.4 โ€” 2020-03-16
--------------------------

- If using the ``[run] relative_files`` setting, the XML report will use
  relative files in the ```` elements indicating the location of source
  code.  Closes `issue 948`_.

- The textual summary report could report missing lines with negative line
  numbers on PyPy3 7.1 (`issue 943`_).  This is now fixed.

- Windows wheels for Python 3.8 were incorrectly built, but are now fixed.
  (`issue 949`_)

- Updated Python 3.9 support to 3.9a4.

- HTML reports couldn't be sorted if localStorage wasn't available. This is now
  fixed: sorting works even though the sorting setting isn't retained. (`issue
  944`_ and `pull request 945`_). Thanks, Abdeali Kothari.

.. _issue 943: https://github.com/nedbat/coveragepy/issues/943
.. _issue 944: https://github.com/nedbat/coveragepy/issues/944
.. _pull request 945: https://github.com/nedbat/coveragepy/pull/945
.. _issue 948: https://github.com/nedbat/coveragepy/issues/948
.. _issue 949: https://github.com/nedbat/coveragepy/issues/949


.. _changes_503:

Version 5.0.3 โ€” 2020-01-12
--------------------------

- A performance improvement in 5.0.2 didn't work for test suites that changed
  directory before combining data, causing "Couldn't use data file: no such
  table: meta" errors (`issue 916`_).  This is now fixed.

- Coverage could fail to run your program with some form of "ModuleNotFound" or
  "ImportError" trying to import from the current directory. This would happen
  if coverage had been packaged into a zip file (for example, on Windows), or
  was found indirectly (for example, by pyenv-virtualenv).  A number of
  different scenarios were described in `issue 862`_ which is now fixed.  Huge
  thanks to Agbonze O. Jeremiah for reporting it, and Alexander Waters and
  George-Cristian Bรฎrzan for protracted debugging sessions.

- Added the "premain" debug option.

- Added SQLite compile-time options to the "debug sys" output.

.. _issue 862: https://github.com/nedbat/coveragepy/issues/862
.. _issue 916: https://github.com/nedbat/coveragepy/issues/916


.. _changes_502:

Version 5.0.2 โ€” 2020-01-05
--------------------------

- Programs that used multiprocessing and changed directories would fail under
  coverage.  This is now fixed (`issue 890`_).  A side effect is that debug
  information about the config files read now shows absolute paths to the
  files.

- When running programs as modules (``coverage run -m``) with ``--source``,
  some measured modules were imported before coverage starts.  This resulted in
  unwanted warnings ("Already imported a file that will be measured") and a
  reduction in coverage totals (`issue 909`_).  This is now fixed.

- If no data was collected, an exception about "No data to report" could happen
  instead of a 0% report being created (`issue 884`_).  This is now fixed.

- The handling of source files with non-encodable file names has changed.
  Previously, if a file name could not be encoded as UTF-8, an error occurred,
  as described in `issue 891`_.  Now, those files will not be measured, since
  their data would not be recordable.

- A new warning ("dynamic-conflict") is issued if two mechanisms are trying to
  change the dynamic context.  Closes `issue 901`_.

- ``coverage run --debug=sys`` would fail with an AttributeError. This is now
  fixed (`issue 907`_).

.. _issue 884: https://github.com/nedbat/coveragepy/issues/884
.. _issue 890: https://github.com/nedbat/coveragepy/issues/890
.. _issue 891: https://github.com/nedbat/coveragepy/issues/891
.. _issue 901: https://github.com/nedbat/coveragepy/issues/901
.. _issue 907: https://github.com/nedbat/coveragepy/issues/907
.. _issue 909: https://github.com/nedbat/coveragepy/issues/909


.. _changes_501:

Version 5.0.1 โ€” 2019-12-22
--------------------------

- If a 4.x data file is the cause of a "file is not a database" error, then use
  a more specific error message, "Looks like a coverage 4.x data file, are you
  mixing versions of coverage?"  Helps diagnose the problems described in
  `issue 886`_.

- Measurement contexts and relative file names didn't work together, as
  reported in `issue 899`_ and `issue 900`_.  This is now fixed, thanks to
  David Szotten.

- When using ``coverage run --concurrency=multiprocessing``, all data files
  should be named with parallel-ready suffixes.  5.0 mistakenly named the main
  process' file with no suffix when using ``--append``.  This is now fixed,
  closing `issue 880`_.

- Fixed a problem on Windows when the current directory is changed to a
  different drive (`issue 895`_).  Thanks, Olivier Grisel.

- Updated Python 3.9 support to 3.9a2.

.. _issue 880: https://github.com/nedbat/coveragepy/issues/880
.. _issue 886: https://github.com/nedbat/coveragepy/issues/886
.. _issue 895: https://github.com/nedbat/coveragepy/issues/895
.. _issue 899: https://github.com/nedbat/coveragepy/issues/899
.. _issue 900: https://github.com/nedbat/coveragepy/issues/900


.. _changes_50:

Version 5.0 โ€” 2019-12-14
------------------------

Nothing new beyond 5.0b2.

A summary of major changes in 5.0 since 4.5.x is in see :ref:`whatsnew5x`.



.. _changes_50b2:

Version 5.0b2 โ€” 2019-12-08
--------------------------

- An experimental ``[run] relative_files`` setting tells coverage to store
  relative file names in the data file. This makes it easier to run tests in
  one (or many) environments, and then report in another.  It has not had much
  real-world testing, so it may change in incompatible ways in the future.

- When constructing a :class:`coverage.Coverage` object, `data_file` can be
  specified as None to prevent writing any data file at all.  In previous
  versions, an explicit `data_file=None` argument would use the default of
  ".coverage". Fixes `issue 871`_.

- Python files run with ``-m`` now have ``__spec__`` defined properly.  This
  fixes `issue 745`_ (about not being able to run unittest tests that spawn
  subprocesses), and `issue 838`_, which described the problem directly.

- The ``[paths]`` configuration section is now ordered. If you specify more
  than one list of patterns, the first one that matches will be used.  Fixes
  `issue 649`_.

- The :func:`.coverage.numbits.register_sqlite_functions` function now also
  registers `numbits_to_nums` for use in SQLite queries.  Thanks, Simon
  Willison.

- Python 3.9a1 is supported.

- Coverage.py has a mascot: :ref:`Sleepy Snake `.

.. _issue 649: https://github.com/nedbat/coveragepy/issues/649
.. _issue 745: https://github.com/nedbat/coveragepy/issues/745
.. _issue 838: https://github.com/nedbat/coveragepy/issues/838
.. _issue 871: https://github.com/nedbat/coveragepy/issues/871


.. _changes_50b1:

Version 5.0b1 โ€” 2019-11-11
--------------------------

- The HTML and textual reports now have a ``--skip-empty`` option that skips
  files with no statements, notably ``__init__.py`` files.  Thanks, Reya B.

- Configuration can now be read from `TOML`_ files.  This requires installing
  coverage.py with the ``[toml]`` extra.  The standard "pyproject.toml" file
  will be read automatically if no other configuration file is found, with
  settings in the ``[tool.coverage.]`` namespace.  Thanks to Frazer McLean for
  implementation and persistence.  Finishes `issue 664`_.

- The ``[run] note`` setting has been deprecated. Using it will result in a
  warning, and the note will not be written to the data file.  The
  corresponding :class:`.CoverageData` methods have been removed.

- The HTML report has been reimplemented (no more table around the source
  code). This allowed for a better presentation of the context information,
  hopefully resolving `issue 855`_.

- Added sqlite3 module version information to ``coverage debug sys`` output.

- Asking the HTML report to show contexts (``[html] show_contexts=True`` or
  ``coverage html --show-contexts``) will issue a warning if there were no
  contexts measured (`issue 851`_).

.. _TOML: https://toml.io/
.. _issue 664: https://github.com/nedbat/coveragepy/issues/664
.. _issue 851: https://github.com/nedbat/coveragepy/issues/851
.. _issue 855: https://github.com/nedbat/coveragepy/issues/855


.. _changes_50a8:

Version 5.0a8 โ€” 2019-10-02
--------------------------

- The :class:`.CoverageData` API has changed how queries are limited to
  specific contexts.  Now you use :meth:`.CoverageData.set_query_context` to
  set a single exact-match string, or :meth:`.CoverageData.set_query_contexts`
  to set a list of regular expressions to match contexts.  This changes the
  command-line ``--contexts`` option to use regular expressions instead of
  filename-style wildcards.


.. _changes_50a7:

Version 5.0a7 โ€” 2019-09-21
--------------------------

- Data can now be "reported" in JSON format, for programmatic use, as requested
  in `issue 720`_.  The new ``coverage json`` command writes raw and summarized
  data to a JSON file.  Thanks, Matt Bachmann.

- Dynamic contexts are now supported in the Python tracer, which is important
  for PyPy users.  Closes `issue 846`_.

- The compact line number representation introduced in 5.0a6 is called a
  "numbits."  The :mod:`coverage.numbits` module provides functions for working
  with them.

- The reporting methods used to permanently apply their arguments to the
  configuration of the Coverage object.  Now they no longer do.  The arguments
  affect the operation of the method, but do not persist.

- A class named "test_something" no longer confuses the ``test_function``
  dynamic context setting.  Fixes `issue 829`_.

- Fixed an unusual tokenizing issue with backslashes in comments.  Fixes
  `issue 822`_.

- ``debug=plugin`` didn't properly support configuration or dynamic context
  plugins, but now it does, closing `issue 834`_.

.. _issue 720: https://github.com/nedbat/coveragepy/issues/720
.. _issue 822: https://github.com/nedbat/coveragepy/issues/822
.. _issue 834: https://github.com/nedbat/coveragepy/issues/834
.. _issue 829: https://github.com/nedbat/coveragepy/issues/829
.. _issue 846: https://github.com/nedbat/coveragepy/issues/846


.. _changes_50a6:

Version 5.0a6 โ€” 2019-07-16
--------------------------

- Reporting on contexts. Big thanks to Stephan Richter and Albertas Agejevas
  for the contribution.

  - The ``--contexts`` option is available on the ``report`` and ``html``
    commands.  It's a comma-separated list of shell-style wildcards, selecting
    the contexts to report on.  Only contexts matching one of the wildcards
    will be included in the report.

  - The ``--show-contexts`` option for the ``html`` command adds context
    information to each covered line.  Hovering over the "ctx" marker at the
    end of the line reveals a list of the contexts that covered the line.

- Database changes:

  - Line numbers are now stored in a much more compact way.  For each file and
    context, a single binary string is stored with a bit per line number.  This
    greatly improves memory use, but makes ad-hoc use difficult.

  - Dynamic contexts with no data are no longer written to the database.

  - SQLite data storage is now faster.  There's no longer a reason to keep the
    JSON data file code, so it has been removed.

- Changes to the :class:`.CoverageData` interface:

  - The new :meth:`.CoverageData.dumps` method serializes the data to a string,
    and a corresponding :meth:`.CoverageData.loads` method reconstitutes this
    data.  The format of the data string is subject to change at any time, and
    so should only be used between two installations of the same version of
    coverage.py.

  - The :class:`CoverageData constructor<.CoverageData>` has a new
    argument, `no_disk` (default: False).  Setting it to True prevents writing
    any data to the disk.  This is useful for transient data objects.

- Added the class method :meth:`.Coverage.current` to get the latest started
  Coverage instance.

- Multiprocessing support in Python 3.8 was broken, but is now fixed.  Closes
  `issue 828`_.

- Error handling during reporting has changed slightly.  All reporting methods
  now behave the same.  The ``--ignore-errors`` option keeps errors from
  stopping the reporting, but files that couldn't parse as Python will always
  be reported as warnings.  As with other warnings, you can suppress them with
  the ``[run] disable_warnings`` configuration setting.

- Coverage.py no longer fails if the user program deletes its current
  directory. Fixes `issue 806`_.  Thanks, Dan Hemberger.

- The scrollbar markers in the HTML report now accurately show the highlighted
  lines, regardless of what categories of line are highlighted.

- The hack to accommodate ShiningPanda_ looking for an obsolete internal data
  file has been removed, since ShiningPanda 0.22 fixed it four years ago.

- The deprecated `Reporter.file_reporters` property has been removed.

.. _ShiningPanda: https://plugins.jenkins.io/shiningpanda/
.. _issue 806: https://github.com/nedbat/coveragepy/pull/806
.. _issue 828: https://github.com/nedbat/coveragepy/issues/828


.. _changes_50a5:

Version 5.0a5 โ€” 2019-05-07
--------------------------

- Drop support for Python 3.4

- Dynamic contexts can now be set two new ways, both thanks to Justas
  Sadzeviฤius.

  - A plugin can implement a ``dynamic_context`` method to check frames for
    whether a new context should be started.  See
    :ref:`dynamic_context_plugins` for more details.

  - Another tool (such as a test runner) can use the new
    :meth:`.Coverage.switch_context` method to explicitly change the context.

- The ``dynamic_context = test_function`` setting now works with Python 2
  old-style classes, though it only reports the method name, not the class it
  was defined on.  Closes `issue 797`_.

- ``fail_under`` values more than 100 are reported as errors.  Thanks to Mike
  Fiedler for closing `issue 746`_.

- The "missing" values in the text output are now sorted by line number, so
  that missing branches are reported near the other lines they affect. The
  values used to show all missing lines, and then all missing branches.

- Access to the SQLite database used for data storage is now thread-safe.
  Thanks, Stephan Richter. This closes `issue 702`_.

- Combining data stored in SQLite is now about twice as fast, fixing `issue
  761`_.  Thanks, Stephan Richter.

- The ``filename`` attribute on :class:`.CoverageData` objects has been made
  private.  You can use the ``data_filename`` method to get the actual file
  name being used to store data, and the ``base_filename`` method to get the
  original filename before parallelizing suffixes were added.  This is part of
  fixing `issue 708`_.

- Line numbers in the HTML report now align properly with source lines, even
  when Chrome's minimum font size is set, fixing `issue 748`_.  Thanks Wen Ye.

.. _issue 702: https://github.com/nedbat/coveragepy/issues/702
.. _issue 708: https://github.com/nedbat/coveragepy/issues/708
.. _issue 746: https://github.com/nedbat/coveragepy/issues/746
.. _issue 748: https://github.com/nedbat/coveragepy/issues/748
.. _issue 761: https://github.com/nedbat/coveragepy/issues/761
.. _issue 797: https://github.com/nedbat/coveragepy/issues/797


.. _changes_50a4:

Version 5.0a4 โ€” 2018-11-25
--------------------------

- You can specify the command line to run your program with the ``[run]
  command_line`` configuration setting, as requested in `issue 695`_.

- Coverage will create directories as needed for the data file if they don't
  exist, closing `issue 721`_.

- The ``coverage run`` command has always adjusted the first entry in sys.path,
  to properly emulate how Python runs your program.  Now this adjustment is
  skipped if sys.path[0] is already different than Python's default.  This
  fixes `issue 715`_.

- Improvements to context support:

  - The "no such table: meta" error is fixed.: `issue 716`_.

  - Combining data files is now much faster.

- Python 3.8 (as of today!) passes all tests.

.. _issue 695: https://github.com/nedbat/coveragepy/issues/695
.. _issue 715: https://github.com/nedbat/coveragepy/issues/715
.. _issue 716: https://github.com/nedbat/coveragepy/issues/716
.. _issue 721: https://github.com/nedbat/coveragepy/issues/721


.. _changes_50a3:

Version 5.0a3 โ€” 2018-10-06
--------------------------

- Context support: static contexts let you specify a label for a coverage run,
  which is recorded in the data, and retained when you combine files.  See
  :ref:`contexts` for more information.

- Dynamic contexts: specifying ``[run] dynamic_context = test_function`` in the
  config file will record the test function name as a dynamic context during
  execution.  This is the core of "Who Tests What" (`issue 170`_).  Things to
  note:

  - There is no reporting support yet.  Use SQLite to query the .coverage file
    for information.  Ideas are welcome about how reporting could be extended
    to use this data.

  - There's a noticeable slow-down before any test is run.

  - Data files will now be roughly N times larger, where N is the number of
    tests you have.  Combining data files is therefore also N times slower.

  - No other values for ``dynamic_context`` are recognized yet.  Let me know
    what else would be useful.  I'd like to use a pytest plugin to get better
    information directly from pytest, for example.

.. _issue 170: https://github.com/nedbat/coveragepy/issues/170

- Environment variable substitution in configuration files now supports two
  syntaxes for controlling the behavior of undefined variables: if ``VARNAME``
  is not defined, ``${VARNAME?}`` will raise an error, and ``${VARNAME-default
  value}`` will use "default value".

- Partial support for Python 3.8, which has not yet released an alpha. Fixes
  `issue 707`_ and `issue 714`_.

.. _issue 707: https://github.com/nedbat/coveragepy/issues/707
.. _issue 714: https://github.com/nedbat/coveragepy/issues/714


.. _changes_50a2:

Version 5.0a2 โ€” 2018-09-03
--------------------------

- Coverage's data storage has changed.  In version 4.x, .coverage files were
  basically JSON.  Now, they are SQLite databases.  This means the data file
  can be created earlier than it used to.  A large amount of code was
  refactored to support this change.

  - Because the data file is created differently than previous releases, you
    may need ``parallel=true`` where you didn't before.

  - The old data format is still available (for now) by setting the environment
    variable ``COVERAGE_STORAGE=json``. Please tell me if you think you need to
    keep the JSON format.

  - The database schema is guaranteed to change in the future, to support new
    features.  I'm looking for opinions about making the schema part of the
    public API to coverage.py or not.

- Development moved from `Bitbucket`_ to `GitHub`_.

- HTML files no longer have trailing and extra white space.

- The sort order in the HTML report is stored in local storage rather than
  cookies, closing `issue 611`_.  Thanks, Federico Bond.

- pickle2json, for converting v3 data files to v4 data files, has been removed.

.. _Bitbucket: https://bitbucket.org
.. _GitHub: https://github.com/nedbat/coveragepy

.. _issue 611: https://github.com/nedbat/coveragepy/issues/611


.. _changes_50a1:

Version 5.0a1 โ€” 2018-06-05
--------------------------

- Coverage.py no longer supports Python 2.6 or 3.3.

- The location of the configuration file can now be specified with a
  ``COVERAGE_RCFILE`` environment variable, as requested in `issue 650`_.

- Namespace packages are supported on Python 3.7, where they used to cause
  TypeErrors about path being None. Fixes `issue 700`_.

- A new warning (``already-imported``) is issued if measurable files have
  already been imported before coverage.py started measurement.  See
  :ref:`cmd_warnings` for more information.

- Running coverage many times for small runs in a single process should be
  faster, closing `issue 625`_.  Thanks, David MacIver.

- Large HTML report pages load faster.  Thanks, Pankaj Pandey.

.. _issue 625: https://github.com/nedbat/coveragepy/issues/625
.. _issue 650: https://github.com/nedbat/coveragepy/issues/650
.. _issue 700: https://github.com/nedbat/coveragepy/issues/700


.. _changes_454:

Version 4.5.4 โ€” 2019-07-29
--------------------------

- Multiprocessing support in Python 3.8 was broken, but is now fixed.  Closes
  `issue 828`_.

.. _issue 828: https://github.com/nedbat/coveragepy/issues/828


.. _changes_453:

Version 4.5.3 โ€” 2019-03-09
--------------------------

- Only packaging metadata changes.


.. _changes_452:

Version 4.5.2 โ€” 2018-11-12
--------------------------

- Namespace packages are supported on Python 3.7, where they used to cause
  TypeErrors about path being None. Fixes `issue 700`_.

- Python 3.8 (as of today!) passes all tests.  Fixes `issue 707`_ and
  `issue 714`_.

- Development moved from `Bitbucket`_ to `GitHub`_.

.. _issue 700: https://github.com/nedbat/coveragepy/issues/700
.. _issue 707: https://github.com/nedbat/coveragepy/issues/707
.. _issue 714: https://github.com/nedbat/coveragepy/issues/714

.. _Bitbucket: https://bitbucket.org
.. _GitHub: https://github.com/nedbat/coveragepy


.. _changes_451:

Version 4.5.1 โ€” 2018-02-10
--------------------------

- Now that 4.5 properly separated the ``[run] omit`` and ``[report] omit``
  settings, an old bug has become apparent.  If you specified a package name
  for ``[run] source``, then omit patterns weren't matched inside that package.
  This bug (`issue 638`_) is now fixed.

- On Python 3.7, reporting about a decorated function with no body other than a
  docstring would crash coverage.py with an IndexError (`issue 640`_).  This is
  now fixed.

- Configurer plugins are now reported in the output of ``--debug=sys``.

.. _issue 638: https://github.com/nedbat/coveragepy/issues/638
.. _issue 640: https://github.com/nedbat/coveragepy/issues/640


.. _changes_45:

Version 4.5 โ€” 2018-02-03
------------------------

- A new kind of plugin is supported: configurers are invoked at start-up to
  allow more complex configuration than the .coveragerc file can easily do.
  See :ref:`api_plugin` for details.  This solves the complex configuration
  problem described in `issue 563`_.

- The ``fail_under`` option can now be a float.  Note that you must specify the
  ``[report] precision`` configuration option for the fractional part to be
  used.  Thanks to Lars Hupfeldt Nielsen for help with the implementation.
  Fixes `issue 631`_.

- The ``include`` and ``omit`` options can be specified for both the ``[run]``
  and ``[report]`` phases of execution.  4.4.2 introduced some incorrect
  interactions between those phases, where the options for one were confused
  for the other.  This is now corrected, fixing `issue 621`_ and `issue 622`_.
  Thanks to Daniel Hahler for seeing more clearly than I could.

- The ``coverage combine`` command used to always overwrite the data file, even
  when no data had been read from apparently combinable files.  Now, an error
  is raised if we thought there were files to combine, but in fact none of them
  could be used.  Fixes `issue 629`_.

- The ``coverage combine`` command could get confused about path separators
  when combining data collected on Windows with data collected on Linux, as
  described in `issue 618`_.  This is now fixed: the result path always uses
  the path separator specified in the ``[paths]`` result.

- On Windows, the HTML report could fail when source trees are deeply nested,
  due to attempting to create HTML filenames longer than the 250-character
  maximum.  Now filenames will never get much larger than 200 characters,
  fixing `issue 627`_.  Thanks to Alex Sandro for helping with the fix.

.. _issue 563: https://github.com/nedbat/coveragepy/issues/563
.. _issue 618: https://github.com/nedbat/coveragepy/issues/618
.. _issue 621: https://github.com/nedbat/coveragepy/issues/621
.. _issue 622: https://github.com/nedbat/coveragepy/issues/622
.. _issue 627: https://github.com/nedbat/coveragepy/issues/627
.. _issue 629: https://github.com/nedbat/coveragepy/issues/629
.. _issue 631: https://github.com/nedbat/coveragepy/issues/631


.. _changes_442:

Version 4.4.2 โ€” 2017-11-05
--------------------------

- Support for Python 3.7.  In some cases, class and module docstrings are no
  longer counted in statement totals, which could slightly change your total
  results.

- Specifying both ``--source`` and ``--include`` no longer silently ignores the
  include setting, instead it displays a warning. Thanks, Loรฏc Dachary.  Closes
  `issue 265`_ and `issue 101`_.

- Fixed a race condition when saving data and multiple threads are tracing
  (`issue 581`_). It could produce a "dictionary changed size during iteration"
  RuntimeError.  I believe this mostly but not entirely fixes the race
  condition.  A true fix would likely be too expensive.  Thanks, Peter Baughman
  for the debugging, and Olivier Grisel for the fix with tests.

- Configuration values which are file paths will now apply tilde-expansion,
  closing `issue 589`_.

- Now secondary config files like tox.ini and setup.cfg can be specified
  explicitly, and prefixed sections like `[coverage:run]` will be read. Fixes
  `issue 588`_.

- Be more flexible about the command name displayed by help, fixing
  `issue 600`_. Thanks, Ben Finney.

.. _issue 101: https://github.com/nedbat/coveragepy/issues/101
.. _issue 581: https://github.com/nedbat/coveragepy/issues/581
.. _issue 588: https://github.com/nedbat/coveragepy/issues/588
.. _issue 589: https://github.com/nedbat/coveragepy/issues/589
.. _issue 600: https://github.com/nedbat/coveragepy/issues/600


.. _changes_441:

Version 4.4.1 โ€” 2017-05-14
--------------------------

- No code changes: just corrected packaging for Python 2.7 Linux wheels.


.. _changes_44:

Version 4.4 โ€” 2017-05-07
------------------------

- Reports could produce the wrong file names for packages, reporting ``pkg.py``
  instead of the correct ``pkg/__init__.py``.  This is now fixed.  Thanks, Dirk
  Thomas.

- XML reports could produce ```` and ```` lines that together
  didn't specify a valid source file path.  This is now fixed. (`issue 526`_)

- Namespace packages are no longer warned as having no code. (`issue 572`_)

- Code that uses ``sys.settrace(sys.gettrace())`` in a file that wasn't being
  coverage-measured would prevent correct coverage measurement in following
  code. An example of this was running doctests programmatically. This is now
  fixed. (`issue 575`_)

- Errors printed by the ``coverage`` command now go to stderr instead of
  stdout.

- Running ``coverage xml`` in a directory named with non-ASCII characters would
  fail under Python 2. This is now fixed. (`issue 573`_)

.. _issue 526: https://github.com/nedbat/coveragepy/issues/526
.. _issue 572: https://github.com/nedbat/coveragepy/issues/572
.. _issue 573: https://github.com/nedbat/coveragepy/issues/573
.. _issue 575: https://github.com/nedbat/coveragepy/issues/575


Version 4.4b1 โ€” 2017-04-04
--------------------------

- Some warnings can now be individually disabled.  Warnings that can be
  disabled have a short name appended.  The ``[run] disable_warnings`` setting
  takes a list of these warning names to disable. Closes both `issue 96`_ and
  `issue 355`_.

- The XML report now includes attributes from version 4 of the Cobertura XML
  format, fixing `issue 570`_.

- In previous versions, calling a method that used collected data would prevent
  further collection.  For example, `save()`, `report()`, `html_report()`, and
  others would all stop collection.  An explicit `start()` was needed to get it
  going again.  This is no longer true.  Now you can use the collected data and
  also continue measurement. Both `issue 79`_ and `issue 448`_ described this
  problem, and have been fixed.

- Plugins can now find un-executed files if they choose, by implementing the
  `find_executable_files` method.  Thanks, Emil Madsen.

- Minimal IronPython support. You should be able to run IronPython programs
  under ``coverage run``, though you will still have to do the reporting phase
  with CPython.

- Coverage.py has long had a special hack to support CPython's need to measure
  the coverage of the standard library tests. This code was not installed by
  kitted versions of coverage.py.  Now it is.

.. _issue 79: https://github.com/nedbat/coveragepy/issues/79
.. _issue 96: https://github.com/nedbat/coveragepy/issues/96
.. _issue 355: https://github.com/nedbat/coveragepy/issues/355
.. _issue 448: https://github.com/nedbat/coveragepy/issues/448
.. _issue 570: https://github.com/nedbat/coveragepy/issues/570


.. _changes_434:

Version 4.3.4 โ€” 2017-01-17
--------------------------

- Fixing 2.6 in version 4.3.3 broke other things, because the too-tricky
  exception wasn't properly derived from Exception, described in `issue 556`_.
  A newb mistake; it hasn't been a good few days.

.. _issue 556: https://github.com/nedbat/coveragepy/issues/556


.. _changes_433:

Version 4.3.3 โ€” 2017-01-17
--------------------------

- Python 2.6 support was broken due to a testing exception imported for the
  benefit of the coverage.py test suite.  Properly conditionalizing it fixed
  `issue 554`_ so that Python 2.6 works again.

.. _issue 554: https://github.com/nedbat/coveragepy/issues/554


.. _changes_432:

Version 4.3.2 โ€” 2017-01-16
--------------------------

- Using the ``--skip-covered`` option on an HTML report with 100% coverage
  would cause a "No data to report" error, as reported in `issue 549`_. This is
  now fixed; thanks, Loรฏc Dachary.

- If-statements can be optimized away during compilation, for example, `if 0:`
  or `if __debug__:`.  Coverage.py had problems properly understanding these
  statements which existed in the source, but not in the compiled bytecode.
  This problem, reported in `issue 522`_, is now fixed.

- If you specified ``--source`` as a directory, then coverage.py would look for
  importable Python files in that directory, and could identify ones that had
  never been executed at all.  But if you specified it as a package name, that
  detection wasn't performed.  Now it is, closing `issue 426`_. Thanks to Loรฏc
  Dachary for the fix.

- If you started and stopped coverage measurement thousands of times in your
  process, you could crash Python with a "Fatal Python error: deallocating
  None" error.  This is now fixed.  Thanks to Alex Groce for the bug report.

- On PyPy, measuring coverage in subprocesses could produce a warning: "Trace
  function changed, measurement is likely wrong: None".  This was spurious, and
  has been suppressed.

- Previously, coverage.py couldn't start on Jython, due to that implementation
  missing the multiprocessing module (`issue 551`_). This problem has now been
  fixed. Also, `issue 322`_ about not being able to invoke coverage
  conveniently, seems much better: ``jython -m coverage run myprog.py`` works
  properly.

- Let's say you ran the HTML report over and over again in the same output
  directory, with ``--skip-covered``. And imagine due to your heroic
  test-writing efforts, a file just achieved the goal of 100% coverage. With
  coverage.py 4.3, the old HTML file with the less-than-100% coverage would be
  left behind.  This file is now properly deleted.

.. _issue 322: https://github.com/nedbat/coveragepy/issues/322
.. _issue 426: https://github.com/nedbat/coveragepy/issues/426
.. _issue 522: https://github.com/nedbat/coveragepy/issues/522
.. _issue 549: https://github.com/nedbat/coveragepy/issues/549
.. _issue 551: https://github.com/nedbat/coveragepy/issues/551


.. _changes_431:

Version 4.3.1 โ€” 2016-12-28
--------------------------

- Some environments couldn't install 4.3, as described in `issue 540`_. This is
  now fixed.

- The check for conflicting ``--source`` and ``--include`` was too simple in a
  few different ways, breaking a few perfectly reasonable use cases, described
  in `issue 541`_.  The check has been reverted while we re-think the fix for
  `issue 265`_.

.. _issue 540: https://github.com/nedbat/coveragepy/issues/540
.. _issue 541: https://github.com/nedbat/coveragepy/issues/541


.. _changes_43:

Version 4.3 โ€” 2016-12-27
------------------------

Special thanks to **Loรฏc Dachary**, who took an extraordinary interest in
coverage.py and contributed a number of improvements in this release.

- Subprocesses that are measured with `automatic subprocess measurement`_ used
  to read in any pre-existing data file.  This meant data would be incorrectly
  carried forward from run to run.  Now those files are not read, so each
  subprocess only writes its own data. Fixes `issue 510`_.

- The ``coverage combine`` command will now fail if there are no data files to
  combine. The combine changes in 4.2 meant that multiple combines could lose
  data, leaving you with an empty .coverage data file. Fixes
  `issue 525`_, `issue 412`_, `issue 516`_, and probably `issue 511`_.

- Coverage.py wouldn't execute `sys.excepthook`_ when an exception happened in
  your program.  Now it does, thanks to Andrew Hoos.  Closes `issue 535`_.

- Branch coverage fixes:

  - Branch coverage could misunderstand a finally clause on a try block that
    never continued on to the following statement, as described in `issue
    493`_.  This is now fixed. Thanks to Joe Doherty for the report and Loรฏc
    Dachary for the fix.

  - A while loop with a constant condition (while True) and a continue
    statement would be mis-analyzed, as described in `issue 496`_. This is now
    fixed, thanks to a bug report by Eli Skeggs and a fix by Loรฏc Dachary.

  - While loops with constant conditions that were never executed could result
    in a non-zero coverage report.  Artem Dayneko reported this in `issue
    502`_, and Loรฏc Dachary provided the fix.

- The HTML report now supports a ``--skip-covered`` option like the other
  reporting commands.  Thanks, Loรฏc Dachary for the implementation, closing
  `issue 433`_.

- Options can now be read from a tox.ini file, if any. Like setup.cfg, sections
  are prefixed with "coverage:", so ``[run]`` options will be read from the
  ``[coverage:run]`` section of tox.ini. Implements part of `issue 519`_.
  Thanks, Stephen Finucane.

- Specifying both ``--source`` and ``--include`` no longer silently ignores the
  include setting, instead it fails with a message. Thanks, Nathan Land and
  Loรฏc Dachary. Closes `issue 265`_.

- The ``Coverage.combine`` method has a new parameter, ``strict=False``, to
  support failing if there are no data files to combine.

- When forking subprocesses, the coverage data files would have the same random
  number appended to the file name. This didn't cause problems, because the
  file names had the process id also, making collisions (nearly) impossible.
  But it was disconcerting.  This is now fixed.

- The text report now properly sizes headers when skipping some files, fixing
  `issue 524`_. Thanks, Anthony Sottile and Loรฏc Dachary.

- Coverage.py can now search .pex files for source, just as it can .zip and
  .egg.  Thanks, Peter Ebden.

- Data files are now about 15% smaller.

- Improvements in the ``[run] debug`` setting:

  - The "dataio" debug setting now also logs when data files are deleted during
    combining or erasing.

  - A new debug option, "multiproc", for logging the behavior of
    ``concurrency=multiprocessing``.

  - If you used the debug options "config" and "callers" together, you'd get a
    call stack printed for every line in the multi-line config output. This is
    now fixed.

- Fixed an unusual bug involving multiple coding declarations affecting code
  containing code in multi-line strings: `issue 529`_.

- Coverage.py will no longer be misled into thinking that a plain file is a
  package when interpreting ``--source`` options.  Thanks, Cosimo Lupo.

- If you try to run a non-Python file with coverage.py, you will now get a more
  useful error message. `Issue 514`_.

- The default pragma regex changed slightly, but this will only matter to you
  if you are deranged and use mixed-case pragmas.

- Deal properly with non-ASCII file names in an ASCII-only world, `issue 533`_.

- Programs that set Unicode configuration values could cause UnicodeErrors when
  generating HTML reports.  Pytest-cov is one example.  This is now fixed.

- Prevented deprecation warnings from configparser that happened in some
  circumstances, closing `issue 530`_.

- Corrected the name of the jquery.ba-throttle-debounce.js library. Thanks,
  Ben Finney.  Closes `issue 505`_.

- Testing against PyPy 5.6 and PyPy3 5.5.

- Switched to pytest from nose for running the coverage.py tests.

- Renamed AUTHORS.txt to CONTRIBUTORS.txt, since there are other ways to
  contribute than by writing code. Also put the count of contributors into the
  author string in setup.py, though this might be too cute.

.. _sys.excepthook: https://docs.python.org/3/library/sys.html#sys.excepthook
.. _issue 265: https://github.com/nedbat/coveragepy/issues/265
.. _issue 412: https://github.com/nedbat/coveragepy/issues/412
.. _issue 433: https://github.com/nedbat/coveragepy/issues/433
.. _issue 493: https://github.com/nedbat/coveragepy/issues/493
.. _issue 496: https://github.com/nedbat/coveragepy/issues/496
.. _issue 502: https://github.com/nedbat/coveragepy/issues/502
.. _issue 505: https://github.com/nedbat/coveragepy/issues/505
.. _issue 514: https://github.com/nedbat/coveragepy/issues/514
.. _issue 510: https://github.com/nedbat/coveragepy/issues/510
.. _issue 511: https://github.com/nedbat/coveragepy/issues/511
.. _issue 516: https://github.com/nedbat/coveragepy/issues/516
.. _issue 519: https://github.com/nedbat/coveragepy/issues/519
.. _issue 524: https://github.com/nedbat/coveragepy/issues/524
.. _issue 525: https://github.com/nedbat/coveragepy/issues/525
.. _issue 529: https://github.com/nedbat/coveragepy/issues/529
.. _issue 530: https://github.com/nedbat/coveragepy/issues/530
.. _issue 533: https://github.com/nedbat/coveragepy/issues/533
.. _issue 535: https://github.com/nedbat/coveragepy/issues/535


.. _changes_42:

Version 4.2 โ€” 2016-07-26
------------------------

- Since ``concurrency=multiprocessing`` uses subprocesses, options specified on
  the coverage.py command line will not be communicated down to them.  Only
  options in the configuration file will apply to the subprocesses.
  Previously, the options didn't apply to the subprocesses, but there was no
  indication.  Now it is an error to use ``--concurrency=multiprocessing`` and
  other run-affecting options on the command line.  This prevents
  failures like those reported in `issue 495`_.

- Filtering the HTML report is now faster, thanks to Ville Skyttรค.

.. _issue 495: https://github.com/nedbat/coveragepy/issues/495


Version 4.2b1 โ€” 2016-07-04
--------------------------

Work from the PyCon 2016 Sprints!

- BACKWARD INCOMPATIBILITY: the ``coverage combine`` command now ignores an
  existing ``.coverage`` data file.  It used to include that file in its
  combining.  This caused confusing results, and extra tox "clean" steps.  If
  you want the old behavior, use the new ``coverage combine --append`` option.

- The ``concurrency`` option can now take multiple values, to support programs
  using multiprocessing and another library such as eventlet.  This is only
  possible in the configuration file, not from the command line. The
  configuration file is the only way for sub-processes to all run with the same
  options.  Fixes `issue 484`_.  Thanks to Josh Williams for prototyping.

- Using a ``concurrency`` setting of ``multiprocessing`` now implies
  ``--parallel`` so that the main program is measured similarly to the
  sub-processes.

- When using `automatic subprocess measurement`_, running coverage commands
  would create spurious data files.  This is now fixed, thanks to diagnosis and
  testing by Dan Riti.  Closes `issue 492`_.

- A new configuration option, ``report:sort``, controls what column of the
  text report is used to sort the rows.  Thanks to Dan Wandschneider, this
  closes `issue 199`_.

- The HTML report has a more-visible indicator for which column is being
  sorted.  Closes `issue 298`_, thanks to Josh Williams.

- If the HTML report cannot find the source for a file, the message now
  suggests using the ``-i`` flag to allow the report to continue. Closes
  `issue 231`_, thanks, Nathan Land.

- When reports are ignoring errors, there's now a warning if a file cannot be
  parsed, rather than being silently ignored.  Closes `issue 396`_. Thanks,
  Matthew Boehm.

- A new option for ``coverage debug`` is available: ``coverage debug config``
  shows the current configuration.  Closes `issue 454`_, thanks to Matthew
  Boehm.

- Running coverage as a module (``python -m coverage``) no longer shows the
  program name as ``__main__.py``.  Fixes `issue 478`_.  Thanks, Scott Belden.

- The `test_helpers` module has been moved into a separate pip-installable
  package: `unittest-mixins`_.

.. _automatic subprocess measurement: https://coverage.readthedocs.io/en/latest/subprocess.html
.. _issue 199: https://github.com/nedbat/coveragepy/issues/199
.. _issue 231: https://github.com/nedbat/coveragepy/issues/231
.. _issue 298: https://github.com/nedbat/coveragepy/issues/298
.. _issue 396: https://github.com/nedbat/coveragepy/issues/396
.. _issue 454: https://github.com/nedbat/coveragepy/issues/454
.. _issue 478: https://github.com/nedbat/coveragepy/issues/478
.. _issue 484: https://github.com/nedbat/coveragepy/issues/484
.. _issue 492: https://github.com/nedbat/coveragepy/issues/492
.. _unittest-mixins: https://pypi.org/project/unittest-mixins/


.. _changes_41:

Version 4.1 โ€” 2016-05-21
------------------------

- The internal attribute `Reporter.file_reporters` was removed in 4.1b3.  It
  should have come has no surprise that there were third-party tools out there
  using that attribute.  It has been restored, but with a deprecation warning.


Version 4.1b3 โ€” 2016-05-10
--------------------------

- When running your program, execution can jump from an ``except X:`` line to
  some other line when an exception other than ``X`` happens.  This jump is no
  longer considered a branch when measuring branch coverage.

- When measuring branch coverage, ``yield`` statements that were never resumed
  were incorrectly marked as missing, as reported in `issue 440`_.  This is now
  fixed.

- During branch coverage of single-line callables like lambdas and generator
  expressions, coverage.py can now distinguish between them never being called,
  or being called but not completed.  Fixes `issue 90`_, `issue 460`_ and
  `issue 475`_.

- The HTML report now has a map of the file along the rightmost edge of the
  page, giving an overview of where the missed lines are.  Thanks, Dmitry
  Shishov.

- The HTML report now uses different monospaced fonts, favoring Consolas over
  Courier.  Along the way, `issue 472`_ about not properly handling one-space
  indents was fixed.  The index page also has slightly different styling, to
  try to make the clickable detail pages more apparent.

- Missing branches reported with ``coverage report -m`` will now say ``->exit``
  for missed branches to the exit of a function, rather than a negative number.
  Fixes `issue 469`_.

- ``coverage --help`` and ``coverage --version`` now mention which tracer is
  installed, to help diagnose problems. The docs mention which features need
  the C extension. (`issue 479`_)

- Officially support PyPy 5.1, which required no changes, just updates to the
  docs.

- The `Coverage.report` function had two parameters with non-None defaults,
  which have been changed.  `show_missing` used to default to True, but now
  defaults to None.  If you had been calling `Coverage.report` without
  specifying `show_missing`, you'll need to explicitly set it to True to keep
  the same behavior.  `skip_covered` used to default to False. It is now None,
  which doesn't change the behavior.  This fixes `issue 485`_.

- It's never been possible to pass a namespace module to one of the analysis
  functions, but now at least we raise a more specific error message, rather
  than getting confused. (`issue 456`_)

- The `coverage.process_startup` function now returns the `Coverage` instance
  it creates, as suggested in `issue 481`_.

- Make a small tweak to how we compare threads, to avoid buggy custom
  comparison code in thread classes. (`issue 245`_)

.. _issue 90: https://github.com/nedbat/coveragepy/issues/90
.. _issue 245: https://github.com/nedbat/coveragepy/issues/245
.. _issue 440: https://github.com/nedbat/coveragepy/issues/440
.. _issue 456: https://github.com/nedbat/coveragepy/issues/456
.. _issue 460: https://github.com/nedbat/coveragepy/issues/460
.. _issue 469: https://github.com/nedbat/coveragepy/issues/469
.. _issue 472: https://github.com/nedbat/coveragepy/issues/472
.. _issue 475: https://github.com/nedbat/coveragepy/issues/475
.. _issue 479: https://github.com/nedbat/coveragepy/issues/479
.. _issue 481: https://github.com/nedbat/coveragepy/issues/481
.. _issue 485: https://github.com/nedbat/coveragepy/issues/485


Version 4.1b2 โ€” 2016-01-23
--------------------------

- Problems with the new branch measurement in 4.1 beta 1 were fixed:

  - Class docstrings were considered executable.  Now they no longer are.

  - ``yield from`` and ``await`` were considered returns from functions, since
    they could transfer control to the caller.  This produced unhelpful
    "missing branch" reports in a number of circumstances.  Now they no longer
    are considered returns.

  - In unusual situations, a missing branch to a negative number was reported.
    This has been fixed, closing `issue 466`_.

- The XML report now produces correct package names for modules found in
  directories specified with ``source=``.  Fixes `issue 465`_.

- ``coverage report`` won't produce trailing white space.

.. _issue 465: https://github.com/nedbat/coveragepy/issues/465
.. _issue 466: https://github.com/nedbat/coveragepy/issues/466


Version 4.1b1 โ€” 2016-01-10
--------------------------

- Branch analysis has been rewritten: it used to be based on bytecode, but now
  uses AST analysis.  This has changed a number of things:

  - More code paths are now considered runnable, especially in
    ``try``/``except`` structures.  This may mean that coverage.py will
    identify more code paths as uncovered.  This could either raise or lower
    your overall coverage number.

  - Python 3.5's ``async`` and ``await`` keywords are properly supported,
    fixing `issue 434`_.

  - Some long-standing branch coverage bugs were fixed:

    - `issue 129`_: functions with only a docstring for a body would
      incorrectly report a missing branch on the ``def`` line.

    - `issue 212`_: code in an ``except`` block could be incorrectly marked as
      a missing branch.

    - `issue 146`_: context managers (``with`` statements) in a loop or ``try``
      block could confuse the branch measurement, reporting incorrect partial
      branches.

    - `issue 422`_: in Python 3.5, an actual partial branch could be marked as
      complete.

- Pragmas to disable coverage measurement can now be used on decorator lines,
  and they will apply to the entire function or class being decorated.  This
  implements the feature requested in `issue 131`_.

- Multiprocessing support is now available on Windows.  Thanks, Rodrigue
  Cloutier.

- Files with two encoding declarations are properly supported, fixing
  `issue 453`_. Thanks, Max Linke.

- Non-ascii characters in regexes in the configuration file worked in 3.7, but
  stopped working in 4.0.  Now they work again, closing `issue 455`_.

- Form-feed characters would prevent accurate determination of the beginning of
  statements in the rest of the file.  This is now fixed, closing `issue 461`_.

.. _issue 129: https://github.com/nedbat/coveragepy/issues/129
.. _issue 131: https://github.com/nedbat/coveragepy/issues/131
.. _issue 146: https://github.com/nedbat/coveragepy/issues/146
.. _issue 212: https://github.com/nedbat/coveragepy/issues/212
.. _issue 422: https://github.com/nedbat/coveragepy/issues/422
.. _issue 434: https://github.com/nedbat/coveragepy/issues/434
.. _issue 453: https://github.com/nedbat/coveragepy/issues/453
.. _issue 455: https://github.com/nedbat/coveragepy/issues/455
.. _issue 461: https://github.com/nedbat/coveragepy/issues/461


.. _changes_403:

Version 4.0.3 โ€” 2015-11-24
--------------------------

- Fixed a mysterious problem that manifested in different ways: sometimes
  hanging the process (`issue 420`_), sometimes making database connections
  fail (`issue 445`_).

- The XML report now has correct ```` elements when using a
  ``--source=`` option somewhere besides the current directory.  This fixes
  `issue 439`_. Thanks, Arcadiy Ivanov.

- Fixed an unusual edge case of detecting source encodings, described in
  `issue 443`_.

- Help messages that mention the command to use now properly use the actual
  command name, which might be different than "coverage".  Thanks to Ben
  Finney, this closes `issue 438`_.

.. _issue 420: https://github.com/nedbat/coveragepy/issues/420
.. _issue 438: https://github.com/nedbat/coveragepy/issues/438
.. _issue 439: https://github.com/nedbat/coveragepy/issues/439
.. _issue 443: https://github.com/nedbat/coveragepy/issues/443
.. _issue 445: https://github.com/nedbat/coveragepy/issues/445


.. _changes_402:

Version 4.0.2 โ€” 2015-11-04
--------------------------

- More work on supporting unusually encoded source. Fixed `issue 431`_.

- Files or directories with non-ASCII characters are now handled properly,
  fixing `issue 432`_.

- Setting a trace function with sys.settrace was broken by a change in 4.0.1,
  as reported in `issue 436`_.  This is now fixed.

- Officially support PyPy 4.0, which required no changes, just updates to the
  docs.

.. _issue 431: https://github.com/nedbat/coveragepy/issues/431
.. _issue 432: https://github.com/nedbat/coveragepy/issues/432
.. _issue 436: https://github.com/nedbat/coveragepy/issues/436


.. _changes_401:

Version 4.0.1 โ€” 2015-10-13
--------------------------

- When combining data files, unreadable files will now generate a warning
  instead of failing the command.  This is more in line with the older
  coverage.py v3.7.1 behavior, which silently ignored unreadable files.
  Prompted by `issue 418`_.

- The --skip-covered option would skip reporting on 100% covered files, but
  also skipped them when calculating total coverage.  This was wrong, it should
  only remove lines from the report, not change the final answer.  This is now
  fixed, closing `issue 423`_.

- In 4.0, the data file recorded a summary of the system on which it was run.
  Combined data files would keep all of those summaries.  This could lead to
  enormous data files consisting of mostly repetitive useless information. That
  summary is now gone, fixing `issue 415`_.  If you want summary information,
  get in touch, and we'll figure out a better way to do it.

- Test suites that mocked os.path.exists would experience strange failures, due
  to coverage.py using their mock inadvertently.  This is now fixed, closing
  `issue 416`_.

- Importing a ``__init__`` module explicitly would lead to an error:
  ``AttributeError: 'module' object has no attribute '__path__'``, as reported
  in `issue 410`_.  This is now fixed.

- Code that uses ``sys.settrace(sys.gettrace())`` used to incur a more than 2x
  speed penalty.  Now there's no penalty at all. Fixes `issue 397`_.

- Pyexpat C code will no longer be recorded as a source file, fixing
  `issue 419`_.

- The source kit now contains all of the files needed to have a complete source
  tree, re-fixing `issue 137`_ and closing `issue 281`_.

.. _issue 281: https://github.com/nedbat/coveragepy/issues/281
.. _issue 397: https://github.com/nedbat/coveragepy/issues/397
.. _issue 410: https://github.com/nedbat/coveragepy/issues/410
.. _issue 415: https://github.com/nedbat/coveragepy/issues/415
.. _issue 416: https://github.com/nedbat/coveragepy/issues/416
.. _issue 418: https://github.com/nedbat/coveragepy/issues/418
.. _issue 419: https://github.com/nedbat/coveragepy/issues/419
.. _issue 423: https://github.com/nedbat/coveragepy/issues/423


.. _changes_40:

Version 4.0 โ€” 2015-09-20
------------------------

No changes from 4.0b3


Version 4.0b3 โ€” 2015-09-07
--------------------------

- Reporting on an unmeasured file would fail with a traceback.  This is now
  fixed, closing `issue 403`_.

- The Jenkins ShiningPanda_ plugin looks for an obsolete file name to find the
  HTML reports to publish, so it was failing under coverage.py 4.0.  Now we
  create that file if we are running under Jenkins, to keep things working
  smoothly. `issue 404`_.

- Kits used to include tests and docs, but didn't install them anywhere, or
  provide all of the supporting tools to make them useful.  Kits no longer
  include tests and docs.  If you were using them from the older packages, get
  in touch and help me understand how.

.. _issue 403: https://github.com/nedbat/coveragepy/issues/403
.. _issue 404: https://github.com/nedbat/coveragepy/issues/404


Version 4.0b2 โ€” 2015-08-22
--------------------------

- 4.0b1 broke ``--append`` creating new data files.  This is now fixed, closing
  `issue 392`_.

- ``py.test --cov`` can write empty data, then touch files due to ``--source``,
  which made coverage.py mistakenly force the data file to record lines instead
  of arcs.  This would lead to a "Can't combine line data with arc data" error
  message.  This is now fixed, and changed some method names in the
  CoverageData interface.  Fixes `issue 399`_.

- `CoverageData.read_fileobj` and `CoverageData.write_fileobj` replace the
  `.read` and `.write` methods, and are now properly inverses of each other.

- When using ``report --skip-covered``, a message will now be included in the
  report output indicating how many files were skipped, and if all files are
  skipped, coverage.py won't accidentally scold you for having no data to
  report.  Thanks, Krystian Kichewko.

- A new conversion utility has been added:  ``python -m coverage.pickle2json``
  will convert v3.x pickle data files to v4.x JSON data files.  Thanks,
  Alexander Todorov.  Closes `issue 395`_.

- A new version identifier is available, `coverage.version_info`, a plain tuple
  of values similar to `sys.version_info`_.

.. _issue 392: https://github.com/nedbat/coveragepy/issues/392
.. _issue 395: https://github.com/nedbat/coveragepy/issues/395
.. _issue 399: https://github.com/nedbat/coveragepy/issues/399
.. _sys.version_info: https://docs.python.org/3/library/sys.html#sys.version_info


Version 4.0b1 โ€” 2015-08-02
--------------------------

- Coverage.py is now licensed under the Apache 2.0 license.  See NOTICE.txt for
  details.  Closes `issue 313`_.

- The data storage has been completely revamped.  The data file is now
  JSON-based instead of a pickle, closing `issue 236`_.  The `CoverageData`
  class is now a public supported documented API to the data file.

- A new configuration option, ``[run] note``, lets you set a note that will be
  stored in the `runs` section of the data file.  You can use this to annotate
  the data file with any information you like.

- Unrecognized configuration options will now print an error message and stop
  coverage.py.  This should help prevent configuration mistakes from passing
  silently.  Finishes `issue 386`_.

- In parallel mode, ``coverage erase`` will now delete all of the data files,
  fixing `issue 262`_.

- Coverage.py now accepts a directory name for ``coverage run`` and will run a
  ``__main__.py`` found there, just like Python will.  Fixes `issue 252`_.
  Thanks, Dmitry Trofimov.

- The XML report now includes a ``missing-branches`` attribute.  Thanks, Steve
  Peak.  This is not a part of the Cobertura DTD, so the XML report no longer
  references the DTD.

- Missing branches in the HTML report now have a bit more information in the
  right-hand annotations.  Hopefully this will make their meaning clearer.

- All the reporting functions now behave the same if no data had been
  collected, exiting with a status code of 1.  Fixed ``fail_under`` to be
  applied even when the report is empty.  Thanks, Ionel Cristian Mฤƒrieศ™.

- Plugins are now initialized differently.  Instead of looking for a class
  called ``Plugin``, coverage.py looks for a function called ``coverage_init``.

- A file-tracing plugin can now ask to have built-in Python reporting by
  returning `"python"` from its `file_reporter()` method.

- Code that was executed with `exec` would be mis-attributed to the file that
  called it.  This is now fixed, closing `issue 380`_.

- The ability to use item access on `Coverage.config` (introduced in 4.0a2) has
  been changed to a more explicit `Coverage.get_option` and
  `Coverage.set_option` API.

- The ``Coverage.use_cache`` method is no longer supported.

- The private method ``Coverage._harvest_data`` is now called
  ``Coverage.get_data``, and returns the ``CoverageData`` containing the
  collected data.

- The project is consistently referred to as "coverage.py" throughout the code
  and the documentation, closing `issue 275`_.

- Combining data files with an explicit configuration file was broken in 4.0a6,
  but now works again, closing `issue 385`_.

- ``coverage combine`` now accepts files as well as directories.

- The speed is back to 3.7.1 levels, after having slowed down due to plugin
  support, finishing up `issue 387`_.

.. _issue 236: https://github.com/nedbat/coveragepy/issues/236
.. _issue 252: https://github.com/nedbat/coveragepy/issues/252
.. _issue 262: https://github.com/nedbat/coveragepy/issues/262
.. _issue 275: https://github.com/nedbat/coveragepy/issues/275
.. _issue 313: https://github.com/nedbat/coveragepy/issues/313
.. _issue 380: https://github.com/nedbat/coveragepy/issues/380
.. _issue 385: https://github.com/nedbat/coveragepy/issues/385
.. _issue 386: https://github.com/nedbat/coveragepy/issues/386
.. _issue 387: https://github.com/nedbat/coveragepy/issues/387

.. 40 issues closed in 4.0 below here


Version 4.0a6 โ€” 2015-06-21
--------------------------

- Python 3.5b2 and PyPy 2.6.0 are supported.

- The original module-level function interface to coverage.py is no longer
  supported.  You must now create a ``coverage.Coverage`` object, and use
  methods on it.

- The ``coverage combine`` command now accepts any number of directories as
  arguments, and will combine all the data files from those directories.  This
  means you don't have to copy the files to one directory before combining.
  Thanks, Christine Lytwynec.  Finishes `issue 354`_.

- Branch coverage couldn't properly handle certain extremely long files. This
  is now fixed (`issue 359`_).

- Branch coverage didn't understand yield statements properly.  Mickie Betz
  persisted in pursuing this despite Ned's pessimism.  Fixes `issue 308`_ and
  `issue 324`_.

- The ``COVERAGE_DEBUG`` environment variable can be used to set the
  ``[run] debug`` configuration option to control what internal operations are
  logged.

- HTML reports were truncated at formfeed characters.  This is now fixed
  (`issue 360`_).  It's always fun when the problem is due to a `bug in the
  Python standard library `_.

- Files with incorrect encoding declaration comments are no longer ignored by
  the reporting commands, fixing `issue 351`_.

- HTML reports now include a time stamp in the footer, closing `issue 299`_.
  Thanks, Conrad Ho.

- HTML reports now begrudgingly use double-quotes rather than single quotes,
  because there are "software engineers" out there writing tools that read HTML
  and somehow have no idea that single quotes exist.  Capitulates to the absurd
  `issue 361`_.  Thanks, Jon Chappell.

- The ``coverage annotate`` command now handles non-ASCII characters properly,
  closing `issue 363`_.  Thanks, Leonardo Pistone.

- Drive letters on Windows were not normalized correctly, now they are. Thanks,
  Ionel Cristian Mฤƒrieศ™.

- Plugin support had some bugs fixed, closing `issue 374`_ and `issue 375`_.
  Thanks, Stefan Behnel.

.. _issue 299: https://github.com/nedbat/coveragepy/issues/299
.. _issue 308: https://github.com/nedbat/coveragepy/issues/308
.. _issue 324: https://github.com/nedbat/coveragepy/issues/324
.. _issue 351: https://github.com/nedbat/coveragepy/issues/351
.. _issue 354: https://github.com/nedbat/coveragepy/issues/354
.. _issue 359: https://github.com/nedbat/coveragepy/issues/359
.. _issue 360: https://github.com/nedbat/coveragepy/issues/360
.. _issue 361: https://github.com/nedbat/coveragepy/issues/361
.. _issue 363: https://github.com/nedbat/coveragepy/issues/363
.. _issue 374: https://github.com/nedbat/coveragepy/issues/374
.. _issue 375: https://github.com/nedbat/coveragepy/issues/375


Version 4.0a5 โ€” 2015-02-16
--------------------------

- Plugin support is now implemented in the C tracer instead of the Python
  tracer. This greatly improves the speed of tracing projects using plugins.

- Coverage.py now always adds the current directory to sys.path, so that
  plugins can import files in the current directory (`issue 358`_).

- If the `config_file` argument to the Coverage constructor is specified as
  ".coveragerc", it is treated as if it were True.  This means setup.cfg is
  also examined, and a missing file is not considered an error (`issue 357`_).

- Wildly experimental: support for measuring processes started by the
  multiprocessing module.  To use, set ``--concurrency=multiprocessing``,
  either on the command line or in the .coveragerc file (`issue 117`_). Thanks,
  Eduardo Schettino.  Currently, this does not work on Windows.

- A new warning is possible, if a desired file isn't measured because it was
  imported before coverage.py was started (`issue 353`_).

- The `coverage.process_startup` function now will start coverage measurement
  only once, no matter how many times it is called.  This fixes problems due
  to unusual virtualenv configurations (`issue 340`_).

- Added 3.5.0a1 to the list of supported CPython versions.

.. _issue 117: https://github.com/nedbat/coveragepy/issues/117
.. _issue 340: https://github.com/nedbat/coveragepy/issues/340
.. _issue 353: https://github.com/nedbat/coveragepy/issues/353
.. _issue 357: https://github.com/nedbat/coveragepy/issues/357
.. _issue 358: https://github.com/nedbat/coveragepy/issues/358


Version 4.0a4 โ€” 2015-01-25
--------------------------

- Plugins can now provide sys_info for debugging output.

- Started plugins documentation.

- Prepared to move the docs to readthedocs.org.


Version 4.0a3 โ€” 2015-01-20
--------------------------

- Reports now use file names with extensions.  Previously, a report would
  describe a/b/c.py as "a/b/c".  Now it is shown as "a/b/c.py".  This allows
  for better support of non-Python files, and also fixed `issue 69`_.

- The XML report now reports each directory as a package again.  This was a bad
  regression, I apologize.  This was reported in `issue 235`_, which is now
  fixed.

- A new configuration option for the XML report: ``[xml] package_depth``
  controls which directories are identified as packages in the report.
  Directories deeper than this depth are not reported as packages.
  The default is that all directories are reported as packages.
  Thanks, Lex Berezhny.

- When looking for the source for a frame, check if the file exists. On
  Windows, .pyw files are no longer recorded as .py files. Along the way, this
  fixed `issue 290`_.

- Empty files are now reported as 100% covered in the XML report, not 0%
  covered (`issue 345`_).

- Regexes in the configuration file are now compiled as soon as they are read,
  to provide error messages earlier (`issue 349`_).

.. _issue 69: https://github.com/nedbat/coveragepy/issues/69
.. _issue 235: https://github.com/nedbat/coveragepy/issues/235
.. _issue 290: https://github.com/nedbat/coveragepy/issues/290
.. _issue 345: https://github.com/nedbat/coveragepy/issues/345
.. _issue 349: https://github.com/nedbat/coveragepy/issues/349


Version 4.0a2 โ€” 2015-01-14
--------------------------

- Officially support PyPy 2.4, and PyPy3 2.4.  Drop support for
  CPython 3.2 and older versions of PyPy.  The code won't work on CPython 3.2.
  It will probably still work on older versions of PyPy, but I'm not testing
  against them.

- Plugins!

- The original command line switches (`-x` to run a program, etc) are no
  longer supported.

- A new option: `coverage report --skip-covered` will reduce the number of
  files reported by skipping files with 100% coverage.  Thanks, Krystian
  Kichewko.  This means that empty `__init__.py` files will be skipped, since
  they are 100% covered, closing `issue 315`_.

- You can now specify the ``--fail-under`` option in the ``.coveragerc`` file
  as the ``[report] fail_under`` option.  This closes `issue 314`_.

- The ``COVERAGE_OPTIONS`` environment variable is no longer supported.  It was
  a hack for ``--timid`` before configuration files were available.

- The HTML report now has filtering.  Type text into the Filter box on the
  index page, and only modules with that text in the name will be shown.
  Thanks, Danny Allen.

- The textual report and the HTML report used to report partial branches
  differently for no good reason.  Now the text report's "missing branches"
  column is a "partial branches" column so that both reports show the same
  numbers.  This closes `issue 342`_.

- If you specify a ``--rcfile`` that cannot be read, you will get an error
  message.  Fixes `issue 343`_.

- The ``--debug`` switch can now be used on any command.

- You can now programmatically adjust the configuration of coverage.py by
  setting items on `Coverage.config` after construction.

- A module run with ``-m`` can be used as the argument to ``--source``, fixing
  `issue 328`_.  Thanks, Buck Evan.

- The regex for matching exclusion pragmas has been fixed to allow more kinds
  of white space, fixing `issue 334`_.

- Made some PyPy-specific tweaks to improve speed under PyPy.  Thanks, Alex
  Gaynor.

- In some cases, with a source file missing a final newline, coverage.py would
  count statements incorrectly.  This is now fixed, closing `issue 293`_.

- The status.dat file that HTML reports use to avoid re-creating files that
  haven't changed is now a JSON file instead of a pickle file.  This obviates
  `issue 287`_ and `issue 237`_.

.. _issue 237: https://github.com/nedbat/coveragepy/issues/237
.. _issue 287: https://github.com/nedbat/coveragepy/issues/287
.. _issue 293: https://github.com/nedbat/coveragepy/issues/293
.. _issue 314: https://github.com/nedbat/coveragepy/issues/314
.. _issue 315: https://github.com/nedbat/coveragepy/issues/315
.. _issue 328: https://github.com/nedbat/coveragepy/issues/328
.. _issue 334: https://github.com/nedbat/coveragepy/issues/334
.. _issue 342: https://github.com/nedbat/coveragepy/issues/342
.. _issue 343: https://github.com/nedbat/coveragepy/issues/343


Version 4.0a1 โ€” 2014-09-27
--------------------------

- Python versions supported are now CPython 2.6, 2.7, 3.2, 3.3, and 3.4, and
  PyPy 2.2.

- Gevent, eventlet, and greenlet are now supported, closing `issue 149`_.
  The ``concurrency`` setting specifies the concurrency library in use.  Huge
  thanks to Peter Portante for initial implementation, and to Joe Jevnik for
  the final insight that completed the work.

- Options are now also read from a setup.cfg file, if any.  Sections are
  prefixed with "coverage:", so the ``[run]`` options will be read from the
  ``[coverage:run]`` section of setup.cfg.  Finishes `issue 304`_.

- The ``report -m`` command can now show missing branches when reporting on
  branch coverage.  Thanks, Steve Leonard. Closes `issue 230`_.

- The XML report now contains a  element, fixing `issue 94`_.  Thanks
  Stan Hu.

- The class defined in the coverage module is now called ``Coverage`` instead
  of ``coverage``, though the old name still works, for backward compatibility.

- The ``fail-under`` value is now rounded the same as reported results,
  preventing paradoxical results, fixing `issue 284`_.

- The XML report will now create the output directory if need be, fixing
  `issue 285`_.  Thanks, Chris Rose.

- HTML reports no longer raise UnicodeDecodeError if a Python file has
  un-decodable characters, fixing `issue 303`_ and `issue 331`_.

- The annotate command will now annotate all files, not just ones relative to
  the current directory, fixing `issue 57`_.

- The coverage module no longer causes deprecation warnings on Python 3.4 by
  importing the imp module, fixing `issue 305`_.

- Encoding declarations in source files are only considered if they are truly
  comments.  Thanks, Anthony Sottile.

.. _issue 57: https://github.com/nedbat/coveragepy/issues/57
.. _issue 94: https://github.com/nedbat/coveragepy/issues/94
.. _issue 149: https://github.com/nedbat/coveragepy/issues/149
.. _issue 230: https://github.com/nedbat/coveragepy/issues/230
.. _issue 284: https://github.com/nedbat/coveragepy/issues/284
.. _issue 285: https://github.com/nedbat/coveragepy/issues/285
.. _issue 303: https://github.com/nedbat/coveragepy/issues/303
.. _issue 304: https://github.com/nedbat/coveragepy/issues/304
.. _issue 305: https://github.com/nedbat/coveragepy/issues/305
.. _issue 331: https://github.com/nedbat/coveragepy/issues/331


.. _changes_371:

Version 3.7.1 โ€” 2013-12-13
--------------------------

- Improved the speed of HTML report generation by about 20%.

- Fixed the mechanism for finding OS-installed static files for the HTML report
  so that it will actually find OS-installed static files.


.. _changes_37:

Version 3.7 โ€” 2013-10-06
------------------------

- Added the ``--debug`` switch to ``coverage run``.  It accepts a list of
  options indicating the type of internal activity to log to stderr.

- Improved the branch coverage facility, fixing `issue 92`_ and `issue 175`_.

- Running code with ``coverage run -m`` now behaves more like Python does,
  setting sys.path properly, which fixes `issue 207`_ and `issue 242`_.

- Coverage.py can now run .pyc files directly, closing `issue 264`_.

- Coverage.py properly supports .pyw files, fixing `issue 261`_.

- Omitting files within a tree specified with the ``source`` option would
  cause them to be incorrectly marked as un-executed, as described in
  `issue 218`_.  This is now fixed.

- When specifying paths to alias together during data combining, you can now
  specify relative paths, fixing `issue 267`_.

- Most file paths can now be specified with username expansion (``~/src``, or
  ``~build/src``, for example), and with environment variable expansion
  (``build/$BUILDNUM/src``).

- Trying to create an XML report with no files to report on, would cause a
  ZeroDivisionError, but no longer does, fixing `issue 250`_.

- When running a threaded program under the Python tracer, coverage.py no
  longer issues a spurious warning about the trace function changing: "Trace
  function changed, measurement is likely wrong: None."  This fixes `issue
  164`_.

- Static files necessary for HTML reports are found in system-installed places,
  to ease OS-level packaging of coverage.py.  Closes `issue 259`_.

- Source files with encoding declarations, but a blank first line, were not
  decoded properly.  Now they are.  Thanks, Roger Hu.

- The source kit now includes the ``__main__.py`` file in the root coverage
  directory, fixing `issue 255`_.

.. _issue 92: https://github.com/nedbat/coveragepy/issues/92
.. _issue 164: https://github.com/nedbat/coveragepy/issues/164
.. _issue 175: https://github.com/nedbat/coveragepy/issues/175
.. _issue 207: https://github.com/nedbat/coveragepy/issues/207
.. _issue 242: https://github.com/nedbat/coveragepy/issues/242
.. _issue 218: https://github.com/nedbat/coveragepy/issues/218
.. _issue 250: https://github.com/nedbat/coveragepy/issues/250
.. _issue 255: https://github.com/nedbat/coveragepy/issues/255
.. _issue 259: https://github.com/nedbat/coveragepy/issues/259
.. _issue 261: https://github.com/nedbat/coveragepy/issues/261
.. _issue 264: https://github.com/nedbat/coveragepy/issues/264
.. _issue 267: https://github.com/nedbat/coveragepy/issues/267


.. _changes_36:

Version 3.6 โ€” 2013-01-05
------------------------

- Added a page to the docs about troublesome situations, closing `issue 226`_,
  and added some info to the TODO file, closing `issue 227`_.

.. _issue 226: https://github.com/nedbat/coveragepy/issues/226
.. _issue 227: https://github.com/nedbat/coveragepy/issues/227


Version 3.6b3 โ€” 2012-12-29
--------------------------

- Beta 2 broke the nose plugin. It's fixed again, closing `issue 224`_.

.. _issue 224: https://github.com/nedbat/coveragepy/issues/224


Version 3.6b2 โ€” 2012-12-23
--------------------------

- Coverage.py runs on Python 2.3 and 2.4 again. It was broken in 3.6b1.

- The C extension is optionally compiled using a different more widely-used
  technique, taking another stab at fixing `issue 80`_ once and for all.

- Combining data files would create entries for phantom files if used with
  ``source`` and path aliases.  It no longer does.

- ``debug sys`` now shows the configuration file path that was read.

- If an oddly-behaved package claims that code came from an empty-string
  file name, coverage.py no longer associates it with the directory name,
  fixing `issue 221`_.

.. _issue 221: https://github.com/nedbat/coveragepy/issues/221


Version 3.6b1 โ€” 2012-11-28
--------------------------

- Wildcards in ``include=`` and ``omit=`` arguments were not handled properly
  in reporting functions, though they were when running.  Now they are handled
  uniformly, closing `issue 143`_ and `issue 163`_.  **NOTE**: it is possible
  that your configurations may now be incorrect.  If you use ``include`` or
  ``omit`` during reporting, whether on the command line, through the API, or
  in a configuration file, please check carefully that you were not relying on
  the old broken behavior.

- The **report**, **html**, and **xml** commands now accept a ``--fail-under``
  switch that indicates in the exit status whether the coverage percentage was
  less than a particular value.  Closes `issue 139`_.

- The reporting functions coverage.report(), coverage.html_report(), and
  coverage.xml_report() now all return a float, the total percentage covered
  measurement.

- The HTML report's title can now be set in the configuration file, with the
  ``--title`` switch on the command line, or via the API.

- Configuration files now support substitution of environment variables, using
  syntax like ``${WORD}``.  Closes `issue 97`_.

- Embarrassingly, the ``[xml] output=`` setting in the .coveragerc file simply
  didn't work.  Now it does.

- The XML report now consistently uses file names for the file name attribute,
  rather than sometimes using module names.  Fixes `issue 67`_.
  Thanks, Marcus Cobden.

- Coverage percentage metrics are now computed slightly differently under
  branch coverage.  This means that completely un-executed files will now
  correctly have 0% coverage, fixing `issue 156`_.  This also means that your
  total coverage numbers will generally now be lower if you are measuring
  branch coverage.

- When installing, now in addition to creating a "coverage" command, two new
  aliases are also installed.  A "coverage2" or "coverage3" command will be
  created, depending on whether you are installing in Python 2.x or 3.x.
  A "coverage-X.Y" command will also be created corresponding to your specific
  version of Python.  Closes `issue 111`_.

- The coverage.py installer no longer tries to bootstrap setuptools or
  Distribute.  You must have one of them installed first, as `issue 202`_
  recommended.

- The coverage.py kit now includes docs (closing `issue 137`_) and tests.

- On Windows, files are now reported in their correct case, fixing `issue 89`_
  and `issue 203`_.

- If a file is missing during reporting, the path shown in the error message
  is now correct, rather than an incorrect path in the current directory.
  Fixes `issue 60`_.

- Running an HTML report in Python 3 in the same directory as an old Python 2
  HTML report would fail with a UnicodeDecodeError. This issue (`issue 193`_)
  is now fixed.

- Fixed yet another error trying to parse non-Python files as Python, this
  time an IndentationError, closing `issue 82`_ for the fourth time...

- If `coverage xml` fails because there is no data to report, it used to
  create a zero-length XML file.  Now it doesn't, fixing `issue 210`_.

- Jython files now work with the ``--source`` option, fixing `issue 100`_.

- Running coverage.py under a debugger is unlikely to work, but it shouldn't
  fail with "TypeError: 'NoneType' object is not iterable".  Fixes `issue
  201`_.

- On some Linux distributions, when installed with the OS package manager,
  coverage.py would report its own code as part of the results.  Now it won't,
  fixing `issue 214`_, though this will take some time to be repackaged by the
  operating systems.

- Docstrings for the legacy singleton methods are more helpful.  Thanks Marius
  Gedminas.  Closes `issue 205`_.

- The pydoc tool can now show documentation for the class `coverage.coverage`.
  Closes `issue 206`_.

- Added a page to the docs about contributing to coverage.py, closing
  `issue 171`_.

- When coverage.py ended unsuccessfully, it may have reported odd errors like
  ``'NoneType' object has no attribute 'isabs'``.  It no longer does,
  so kiss `issue 153`_ goodbye.

.. _issue 60: https://github.com/nedbat/coveragepy/issues/60
.. _issue 67: https://github.com/nedbat/coveragepy/issues/67
.. _issue 89: https://github.com/nedbat/coveragepy/issues/89
.. _issue 97: https://github.com/nedbat/coveragepy/issues/97
.. _issue 100: https://github.com/nedbat/coveragepy/issues/100
.. _issue 111: https://github.com/nedbat/coveragepy/issues/111
.. _issue 137: https://github.com/nedbat/coveragepy/issues/137
.. _issue 139: https://github.com/nedbat/coveragepy/issues/139
.. _issue 143: https://github.com/nedbat/coveragepy/issues/143
.. _issue 153: https://github.com/nedbat/coveragepy/issues/153
.. _issue 156: https://github.com/nedbat/coveragepy/issues/156
.. _issue 163: https://github.com/nedbat/coveragepy/issues/163
.. _issue 171: https://github.com/nedbat/coveragepy/issues/171
.. _issue 193: https://github.com/nedbat/coveragepy/issues/193
.. _issue 201: https://github.com/nedbat/coveragepy/issues/201
.. _issue 202: https://github.com/nedbat/coveragepy/issues/202
.. _issue 203: https://github.com/nedbat/coveragepy/issues/203
.. _issue 205: https://github.com/nedbat/coveragepy/issues/205
.. _issue 206: https://github.com/nedbat/coveragepy/issues/206
.. _issue 210: https://github.com/nedbat/coveragepy/issues/210
.. _issue 214: https://github.com/nedbat/coveragepy/issues/214


.. _changes_353:

Version 3.5.3 โ€” 2012-09-29
--------------------------

- Line numbers in the HTML report line up better with the source lines, fixing
  `issue 197`_, thanks Marius Gedminas.

- When specifying a directory as the source= option, the directory itself no
  longer needs to have a ``__init__.py`` file, though its sub-directories do,
  to be considered as source files.

- Files encoded as UTF-8 with a BOM are now properly handled, fixing
  `issue 179`_.  Thanks, Pablo Carballo.

- Fixed more cases of non-Python files being reported as Python source, and
  then not being able to parse them as Python.  Closes `issue 82`_ (again).
  Thanks, Julian Berman.

- Fixed memory leaks under Python 3, thanks, Brett Cannon. Closes `issue 147`_.

- Optimized .pyo files may not have been handled correctly, `issue 195`_.
  Thanks, Marius Gedminas.

- Certain unusually named file paths could have been mangled during reporting,
  `issue 194`_.  Thanks, Marius Gedminas.

- Try to do a better job of the impossible task of detecting when we can't
  build the C extension, fixing `issue 183`_.

- Testing is now done with `tox`_, thanks, Marc Abramowitz.

.. _issue 147: https://github.com/nedbat/coveragepy/issues/147
.. _issue 179: https://github.com/nedbat/coveragepy/issues/179
.. _issue 183: https://github.com/nedbat/coveragepy/issues/183
.. _issue 194: https://github.com/nedbat/coveragepy/issues/194
.. _issue 195: https://github.com/nedbat/coveragepy/issues/195
.. _issue 197: https://github.com/nedbat/coveragepy/issues/197
.. _tox: https://tox.readthedocs.io/


.. _changes_352:

Version 3.5.2 โ€” 2012-05-04
--------------------------

No changes since 3.5.2.b1


Version 3.5.2b1 โ€” 2012-04-29
----------------------------

- The HTML report has slightly tweaked controls: the buttons at the top of
  the page are color-coded to the source lines they affect.

- Custom CSS can be applied to the HTML report by specifying a CSS file as
  the ``extra_css`` configuration value in the ``[html]`` section.

- Source files with custom encodings declared in a comment at the top are now
  properly handled during reporting on Python 2.  Python 3 always handled them
  properly.  This fixes `issue 157`_.

- Backup files left behind by editors are no longer collected by the source=
  option, fixing `issue 168`_.

- If a file doesn't parse properly as Python, we don't report it as an error
  if the file name seems like maybe it wasn't meant to be Python.  This is a
  pragmatic fix for `issue 82`_.

- The ``-m`` switch on ``coverage report``, which includes missing line numbers
  in the summary report, can now be specified as ``show_missing`` in the
  config file.  Closes `issue 173`_.

- When running a module with ``coverage run -m ``, certain details
  of the execution environment weren't the same as for
  ``python -m ``.  This had the unfortunate side-effect of making
  ``coverage run -m unittest discover`` not work if you had tests in a
  directory named "test".  This fixes `issue 155`_ and `issue 142`_.

- Now the exit status of your product code is properly used as the process
  status when running ``python -m coverage run ...``.  Thanks, JT Olds.

- When installing into PyPy, we no longer attempt (and fail) to compile
  the C tracer function, closing `issue 166`_.

.. _issue 142: https://github.com/nedbat/coveragepy/issues/142
.. _issue 155: https://github.com/nedbat/coveragepy/issues/155
.. _issue 157: https://github.com/nedbat/coveragepy/issues/157
.. _issue 166: https://github.com/nedbat/coveragepy/issues/166
.. _issue 168: https://github.com/nedbat/coveragepy/issues/168
.. _issue 173: https://github.com/nedbat/coveragepy/issues/173


.. _changes_351:

Version 3.5.1 โ€” 2011-09-23
--------------------------

- The ``[paths]`` feature unfortunately didn't work in real world situations
  where you wanted to, you know, report on the combined data.  Now all paths
  stored in the combined file are canonicalized properly.


Version 3.5.1b1 โ€” 2011-08-28
----------------------------

- When combining data files from parallel runs, you can now instruct
  coverage.py about which directories are equivalent on different machines.  A
  ``[paths]`` section in the configuration file lists paths that are to be
  considered equivalent.  Finishes `issue 17`_.

- for-else constructs are understood better, and don't cause erroneous partial
  branch warnings.  Fixes `issue 122`_.

- Branch coverage for ``with`` statements is improved, fixing `issue 128`_.

- The number of partial branches reported on the HTML summary page was
  different than the number reported on the individual file pages.  This is
  now fixed.

- An explicit include directive to measure files in the Python installation
  wouldn't work because of the standard library exclusion.  Now the include
  directive takes precedence, and the files will be measured.  Fixes
  `issue 138`_.

- The HTML report now handles Unicode characters in Python source files
  properly.  This fixes `issue 124`_ and `issue 144`_. Thanks, Devin
  Jeanpierre.

- In order to help the core developers measure the test coverage of the
  standard library, Brandon Rhodes devised an aggressive hack to trick Python
  into running some coverage.py code before anything else in the process.
  See the coverage/fullcoverage directory if you are interested.

.. _issue 17: https://github.com/nedbat/coveragepy/issues/17
.. _issue 122: https://github.com/nedbat/coveragepy/issues/122
.. _issue 124: https://github.com/nedbat/coveragepy/issues/124
.. _issue 128: https://github.com/nedbat/coveragepy/issues/128
.. _issue 138: https://github.com/nedbat/coveragepy/issues/138
.. _issue 144: https://github.com/nedbat/coveragepy/issues/144


.. _changes_35:

Version 3.5 โ€” 2011-06-29
------------------------

- The HTML report hotkeys now behave slightly differently when the current
  chunk isn't visible at all:  a chunk on the screen will be selected,
  instead of the old behavior of jumping to the literal next chunk.
  The hotkeys now work in Google Chrome.  Thanks, Guido van Rossum.


Version 3.5b1 โ€” 2011-06-05
--------------------------

- The HTML report now has hotkeys.  Try ``n``, ``s``, ``m``, ``x``, ``b``,
  ``p``, and ``c`` on the overview page to change the column sorting.
  On a file page, ``r``, ``m``, ``x``, and ``p`` toggle the run, missing,
  excluded, and partial line markings.  You can navigate the highlighted
  sections of code by using the ``j`` and ``k`` keys for next and previous.
  The ``1`` (one) key jumps to the first highlighted section in the file,
  and ``0`` (zero) scrolls to the top of the file.

- The ``--omit`` and ``--include`` switches now interpret their values more
  usefully.  If the value starts with a wildcard character, it is used as-is.
  If it does not, it is interpreted relative to the current directory.
  Closes `issue 121`_.

- Partial branch warnings can now be pragma'd away.  The configuration option
  ``partial_branches`` is a list of regular expressions.  Lines matching any of
  those expressions will never be marked as a partial branch.  In addition,
  there's a built-in list of regular expressions marking statements which
  should never be marked as partial.  This list includes ``while True:``,
  ``while 1:``, ``if 1:``, and ``if 0:``.

- The ``coverage()`` constructor accepts single strings for the ``omit=`` and
  ``include=`` arguments, adapting to a common error in programmatic use.

- Modules can now be run directly using ``coverage run -m modulename``, to
  mirror Python's ``-m`` flag.  Closes `issue 95`_, thanks, Brandon Rhodes.

- ``coverage run`` didn't emulate Python accurately in one small detail: the
  current directory inserted into ``sys.path`` was relative rather than
  absolute. This is now fixed.

- HTML reporting is now incremental: a record is kept of the data that
  produced the HTML reports, and only files whose data has changed will
  be generated.  This should make most HTML reporting faster.

- Pathological code execution could disable the trace function behind our
  backs, leading to incorrect code measurement.  Now if this happens,
  coverage.py will issue a warning, at least alerting you to the problem.
  Closes `issue 93`_.  Thanks to Marius Gedminas for the idea.

- The C-based trace function now behaves properly when saved and restored
  with ``sys.gettrace()`` and ``sys.settrace()``.  This fixes `issue 125`_
  and `issue 123`_.  Thanks, Devin Jeanpierre.

- Source files are now opened with Python 3.2's ``tokenize.open()`` where
  possible, to get the best handling of Python source files with encodings.
  Closes `issue 107`_, thanks, Brett Cannon.

- Syntax errors in supposed Python files can now be ignored during reporting
  with the ``-i`` switch just like other source errors.  Closes `issue 115`_.

- Installation from source now succeeds on machines without a C compiler,
  closing `issue 80`_.

- Coverage.py can now be run directly from a working tree by specifying
  the directory name to python:  ``python coverage_py_working_dir run ...``.
  Thanks, Brett Cannon.

- A little bit of Jython support: `coverage run` can now measure Jython
  execution by adapting when $py.class files are traced. Thanks, Adi Roiban.
  Jython still doesn't provide the Python libraries needed to make
  coverage reporting work, unfortunately.

- Internally, files are now closed explicitly, fixing `issue 104`_.  Thanks,
  Brett Cannon.

.. _issue 80: https://github.com/nedbat/coveragepy/issues/80
.. _issue 93: https://github.com/nedbat/coveragepy/issues/93
.. _issue 95: https://github.com/nedbat/coveragepy/issues/95
.. _issue 104: https://github.com/nedbat/coveragepy/issues/104
.. _issue 107: https://github.com/nedbat/coveragepy/issues/107
.. _issue 115: https://github.com/nedbat/coveragepy/issues/115
.. _issue 121: https://github.com/nedbat/coveragepy/issues/121
.. _issue 123: https://github.com/nedbat/coveragepy/issues/123
.. _issue 125: https://github.com/nedbat/coveragepy/issues/125


.. _changes_34:

Version 3.4 โ€” 2010-09-19
------------------------

- The XML report is now sorted by package name, fixing `issue 88`_.

- Programs that exited with ``sys.exit()`` with no argument weren't handled
  properly, producing a coverage.py stack trace.  That is now fixed.

.. _issue 88: https://github.com/nedbat/coveragepy/issues/88


Version 3.4b2 โ€” 2010-09-06
--------------------------

- Completely un-executed files can now be included in coverage results,
  reported as 0% covered.  This only happens if the --source option is
  specified, since coverage.py needs guidance about where to look for source
  files.

- The XML report output now properly includes a percentage for branch coverage,
  fixing `issue 65`_ and `issue 81`_.

- Coverage percentages are now displayed uniformly across reporting methods.
  Previously, different reports could round percentages differently.  Also,
  percentages are only reported as 0% or 100% if they are truly 0 or 100, and
  are rounded otherwise.  Fixes `issue 41`_ and `issue 70`_.

- The precision of reported coverage percentages can be set with the
  ``[report] precision`` config file setting.  Completes `issue 16`_.

- Threads derived from ``threading.Thread`` with an overridden `run` method
  would report no coverage for the `run` method.  This is now fixed, closing
  `issue 85`_.

.. _issue 16: https://github.com/nedbat/coveragepy/issues/16
.. _issue 41: https://github.com/nedbat/coveragepy/issues/41
.. _issue 65: https://github.com/nedbat/coveragepy/issues/65
.. _issue 70: https://github.com/nedbat/coveragepy/issues/70
.. _issue 81: https://github.com/nedbat/coveragepy/issues/81
.. _issue 85: https://github.com/nedbat/coveragepy/issues/85


Version 3.4b1 โ€” 2010-08-21
--------------------------

- BACKWARD INCOMPATIBILITY: the ``--omit`` and ``--include`` switches now take
  file patterns rather than file prefixes, closing `issue 34`_ and `issue 36`_.

- BACKWARD INCOMPATIBILITY: the `omit_prefixes` argument is gone throughout
  coverage.py, replaced with `omit`, a list of file name patterns suitable for
  `fnmatch`.  A parallel argument `include` controls what files are included.

- The run command now has a ``--source`` switch, a list of directories or
  module names.  If provided, coverage.py will only measure execution in those
  source files.

- Various warnings are printed to stderr for problems encountered during data
  measurement: if a ``--source`` module has no Python source to measure, or is
  never encountered at all, or if no data is collected.

- The reporting commands (report, annotate, html, and xml) now have an
  ``--include`` switch to restrict reporting to modules matching those file
  patterns, similar to the existing ``--omit`` switch. Thanks, Zooko.

- The run command now supports ``--include`` and ``--omit`` to control what
  modules it measures. This can speed execution and reduce the amount of data
  during reporting. Thanks Zooko.

- Since coverage.py 3.1, using the Python trace function has been slower than
  it needs to be.  A cache of tracing decisions was broken, but has now been
  fixed.

- Python 2.7 and 3.2 have introduced new opcodes that are now supported.

- Python files with no statements, for example, empty ``__init__.py`` files,
  are now reported as having zero statements instead of one.  Fixes `issue 1`_.

- Reports now have a column of missed line counts rather than executed line
  counts, since developers should focus on reducing the missed lines to zero,
  rather than increasing the executed lines to varying targets.  Once
  suggested, this seemed blindingly obvious.

- Line numbers in HTML source pages are clickable, linking directly to that
  line, which is highlighted on arrival.  Added a link back to the index page
  at the bottom of each HTML page.

- Programs that call ``os.fork`` will properly collect data from both the child
  and parent processes.  Use ``coverage run -p`` to get two data files that can
  be combined with ``coverage combine``.  Fixes `issue 56`_.

- Coverage.py is now runnable as a module: ``python -m coverage``.  Thanks,
  Brett Cannon.

- When measuring code running in a virtualenv, most of the system library was
  being measured when it shouldn't have been.  This is now fixed.

- Doctest text files are no longer recorded in the coverage data, since they
  can't be reported anyway.  Fixes `issue 52`_ and `issue 61`_.

- Jinja HTML templates compile into Python code using the HTML file name,
  which confused coverage.py.  Now these files are no longer traced, fixing
  `issue 82`_.

- Source files can have more than one dot in them (foo.test.py), and will be
  treated properly while reporting.  Fixes `issue 46`_.

- Source files with DOS line endings are now properly tokenized for syntax
  coloring on non-DOS machines.  Fixes `issue 53`_.

- Unusual code structure that confused exits from methods with exits from
  classes is now properly analyzed.  See `issue 62`_.

- Asking for an HTML report with no files now shows a nice error message rather
  than a cryptic failure ('int' object is unsubscriptable). Fixes `issue 59`_.

.. _issue 1:  https://github.com/nedbat/coveragepy/issues/1
.. _issue 34: https://github.com/nedbat/coveragepy/issues/34
.. _issue 36: https://github.com/nedbat/coveragepy/issues/36
.. _issue 46: https://github.com/nedbat/coveragepy/issues/46
.. _issue 53: https://github.com/nedbat/coveragepy/issues/53
.. _issue 52: https://github.com/nedbat/coveragepy/issues/52
.. _issue 56: https://github.com/nedbat/coveragepy/issues/56
.. _issue 61: https://github.com/nedbat/coveragepy/issues/61
.. _issue 62: https://github.com/nedbat/coveragepy/issues/62
.. _issue 59: https://github.com/nedbat/coveragepy/issues/59
.. _issue 82: https://github.com/nedbat/coveragepy/issues/82


.. _changes_331:

Version 3.3.1 โ€” 2010-03-06
--------------------------

- Using `parallel=True` in .coveragerc file prevented reporting, but now does
  not, fixing `issue 49`_.

- When running your code with "coverage run", if you call `sys.exit()`,
  coverage.py will exit with that status code, fixing `issue 50`_.

.. _issue 49: https://github.com/nedbat/coveragepy/issues/49
.. _issue 50: https://github.com/nedbat/coveragepy/issues/50


.. _changes_33:

Version 3.3 โ€” 2010-02-24
------------------------

- Settings are now read from a .coveragerc file.  A specific file can be
  specified on the command line with --rcfile=FILE.  The name of the file can
  be programmatically set with the `config_file` argument to the coverage()
  constructor, or reading a config file can be disabled with
  `config_file=False`.

- Fixed a problem with nested loops having their branch possibilities
  mis-characterized: `issue 39`_.

- Added coverage.process_start to enable coverage measurement when Python
  starts.

- Parallel data file names now have a random number appended to them in
  addition to the machine name and process id.

- Parallel data files combined with "coverage combine" are deleted after
  they're combined, to clean up unneeded files.  Fixes `issue 40`_.

- Exceptions thrown from product code run with "coverage run" are now displayed
  without internal coverage.py frames, so the output is the same as when the
  code is run without coverage.py.

- The `data_suffix` argument to the coverage constructor is now appended with
  an added dot rather than simply appended, so that .coveragerc files will not
  be confused for data files.

- Python source files that don't end with a newline can now be executed, fixing
  `issue 47`_.

- Added an AUTHORS.txt file.

.. _issue 39: https://github.com/nedbat/coveragepy/issues/39
.. _issue 40: https://github.com/nedbat/coveragepy/issues/40
.. _issue 47: https://github.com/nedbat/coveragepy/issues/47


.. _changes_32:

Version 3.2 โ€” 2009-12-05
------------------------

- Added a ``--version`` option on the command line.


Version 3.2b4 โ€” 2009-12-01
--------------------------

- Branch coverage improvements:

  - The XML report now includes branch information.

- Click-to-sort HTML report columns are now persisted in a cookie.  Viewing
  a report will sort it first the way you last had a coverage report sorted.
  Thanks, `Chris Adams`_.

- On Python 3.x, setuptools has been replaced by `Distribute`_.

.. _Distribute: https://pypi.org/project/distribute/


Version 3.2b3 โ€” 2009-11-23
--------------------------

- Fixed a memory leak in the C tracer that was introduced in 3.2b1.

- Branch coverage improvements:

  - Branches to excluded code are ignored.

- The table of contents in the HTML report is now sortable: click the headers
  on any column.  Thanks, `Chris Adams`_.

.. _Chris Adams: http://chris.improbable.org


Version 3.2b2 โ€” 2009-11-19
--------------------------

- Branch coverage improvements:

  - Classes are no longer incorrectly marked as branches: `issue 32`_.

  - "except" clauses with types are no longer incorrectly marked as branches:
    `issue 35`_.

- Fixed some problems syntax coloring sources with line continuations and
  source with tabs: `issue 30`_ and `issue 31`_.

- The --omit option now works much better than before, fixing `issue 14`_ and
  `issue 33`_.  Thanks, Danek Duvall.

.. _issue 14: https://github.com/nedbat/coveragepy/issues/14
.. _issue 30: https://github.com/nedbat/coveragepy/issues/30
.. _issue 31: https://github.com/nedbat/coveragepy/issues/31
.. _issue 32: https://github.com/nedbat/coveragepy/issues/32
.. _issue 33: https://github.com/nedbat/coveragepy/issues/33
.. _issue 35: https://github.com/nedbat/coveragepy/issues/35


Version 3.2b1 โ€” 2009-11-10
--------------------------

- Branch coverage!

- XML reporting has file paths that let Cobertura find the source code.

- The tracer code has changed, it's a few percent faster.

- Some exceptions reported by the command line interface have been cleaned up
  so that tracebacks inside coverage.py aren't shown.  Fixes `issue 23`_.

.. _issue 23: https://github.com/nedbat/coveragepy/issues/23


.. _changes_31:

Version 3.1 โ€” 2009-10-04
------------------------

- Source code can now be read from eggs.  Thanks, Ross Lawley.  Fixes
  `issue 25`_.

.. _issue 25: https://github.com/nedbat/coveragepy/issues/25


Version 3.1b1 โ€” 2009-09-27
--------------------------

- Python 3.1 is now supported.

- Coverage.py has a new command line syntax with sub-commands.  This expands
  the possibilities for adding features and options in the future.  The old
  syntax is still supported.  Try "coverage help" to see the new commands.
  Thanks to Ben Finney for early help.

- Added an experimental "coverage xml" command for producing coverage reports
  in a Cobertura-compatible XML format.  Thanks, Bill Hart.

- Added the --timid option to enable a simpler slower trace function that works
  for DecoratorTools projects, including TurboGears.  Fixed `issue 12`_ and
  `issue 13`_.

- HTML reports show modules from other directories.  Fixed `issue 11`_.

- HTML reports now display syntax-colored Python source.

- Programs that change directory will still write .coverage files in the
  directory where execution started.  Fixed `issue 24`_.

- Added a "coverage debug" command for getting diagnostic information about the
  coverage.py installation.

.. _issue 11: https://github.com/nedbat/coveragepy/issues/11
.. _issue 12: https://github.com/nedbat/coveragepy/issues/12
.. _issue 13: https://github.com/nedbat/coveragepy/issues/13
.. _issue 24: https://github.com/nedbat/coveragepy/issues/24


.. _changes_301:

Version 3.0.1 โ€” 2009-07-07
--------------------------

- Removed the recursion limit in the tracer function.  Previously, code that
  ran more than 500 frames deep would crash. Fixed `issue 9`_.

- Fixed a bizarre problem involving pyexpat, whereby lines following XML parser
  invocations could be overlooked.  Fixed `issue 10`_.

- On Python 2.3, coverage.py could mis-measure code with exceptions being
  raised.  This is now fixed.

- The coverage.py code itself will now not be measured by coverage.py, and no
  coverage.py modules will be mentioned in the nose --with-cover plug-in.
  Fixed `issue 8`_.

- When running source files, coverage.py now opens them in universal newline
  mode just like Python does.  This lets it run Windows files on Mac, for
  example.

.. _issue 9: https://github.com/nedbat/coveragepy/issues/9
.. _issue 10: https://github.com/nedbat/coveragepy/issues/10
.. _issue 8: https://github.com/nedbat/coveragepy/issues/8


.. _changes_30:

Version 3.0 โ€” 2009-06-13
------------------------

- Fixed the way the Python library was ignored.  Too much code was being
  excluded the old way.

- Tabs are now properly converted in HTML reports.  Previously indentation was
  lost.  Fixed `issue 6`_.

- Nested modules now get a proper flat_rootname.  Thanks, Christian Heimes.

.. _issue 6: https://github.com/nedbat/coveragepy/issues/6


Version 3.0b3 โ€” 2009-05-16
--------------------------

- Added parameters to coverage.__init__ for options that had been set on the
  coverage object itself.

- Added clear_exclude() and get_exclude_list() methods for programmatic
  manipulation of the exclude regexes.

- Added coverage.load() to read previously-saved data from the data file.

- Improved the finding of code files.  For example, .pyc files that have been
  installed after compiling are now located correctly.  Thanks, Detlev
  Offenbach.

- When using the object API (that is, constructing a coverage() object), data
  is no longer saved automatically on process exit.  You can re-enable it with
  the auto_data=True parameter on the coverage() constructor. The module-level
  interface still uses automatic saving.


Version 3.0b โ€” 2009-04-30
-------------------------

HTML reporting, and continued refactoring.

- HTML reports and annotation of source files: use the new -b (browser) switch.
  Thanks to George Song for code, inspiration and guidance.

- Code in the Python standard library is not measured by default.  If you need
  to measure standard library code, use the -L command-line switch during
  execution, or the cover_pylib=True argument to the coverage() constructor.

- Source annotation into a directory (-a -d) behaves differently.  The
  annotated files are named with their hierarchy flattened so that same-named
  files from different directories no longer collide.  Also, only files in the
  current tree are included.

- coverage.annotate_file is no longer available.

- Programs executed with -x now behave more as they should, for example,
  __file__ has the correct value.

- .coverage data files have a new pickle-based format designed for better
  extensibility.

- Removed the undocumented cache_file argument to coverage.usecache().


Version 3.0b1 โ€” 2009-03-07
--------------------------

Major overhaul.

- Coverage.py is now a package rather than a module.  Functionality has been
  split into classes.

- The trace function is implemented in C for speed.  Coverage.py runs are now
  much faster.  Thanks to David Christian for productive micro-sprints and
  other encouragement.

- Executable lines are identified by reading the line number tables in the
  compiled code, removing a great deal of complicated analysis code.

- Precisely which lines are considered executable has changed in some cases.
  Therefore, your coverage stats may also change slightly.

- The singleton coverage object is only created if the module-level functions
  are used.  This maintains the old interface while allowing better
  programmatic use of coverage.py.

- The minimum supported Python version is 2.3.


Version 2.85 โ€” 2008-09-14
-------------------------

- Add support for finding source files in eggs. Don't check for
  morf's being instances of ModuleType, instead use duck typing so that
  pseudo-modules can participate. Thanks, Imri Goldberg.

- Use os.realpath as part of the fixing of file names so that symlinks won't
  confuse things. Thanks, Patrick Mezard.


Version 2.80 โ€” 2008-05-25
-------------------------

- Open files in rU mode to avoid line ending craziness. Thanks, Edward Loper.


Version 2.78 โ€” 2007-09-30
-------------------------

- Don't try to predict whether a file is Python source based on the extension.
  Extension-less files are often Pythons scripts. Instead, simply parse the
  file and catch the syntax errors. Hat tip to Ben Finney.


Version 2.77 โ€” 2007-07-29
-------------------------

- Better packaging.


Version 2.76 โ€” 2007-07-23
-------------------------

- Now Python 2.5 is *really* fully supported: the body of the new with
  statement is counted as executable.


Version 2.75 โ€” 2007-07-22
-------------------------

- Python 2.5 now fully supported. The method of dealing with multi-line
  statements is now less sensitive to the exact line that Python reports during
  execution. Pass statements are handled specially so that their disappearance
  during execution won't throw off the measurement.


Version 2.7 โ€” 2007-07-21
------------------------

- "#pragma: nocover" is excluded by default.

- Properly ignore docstrings and other constant expressions that appear in the
  middle of a function, a problem reported by Tim Leslie.

- coverage.erase() shouldn't clobber the exclude regex. Change how parallel
  mode is invoked, and fix erase() so that it erases the cache when called
  programmatically.

- In reports, ignore code executed from strings, since we can't do anything
  useful with it anyway.

- Better file handling on Linux, thanks Guillaume Chazarain.

- Better shell support on Windows, thanks Noel O'Boyle.

- Python 2.2 support maintained, thanks Catherine Proulx.

- Minor changes to avoid lint warnings.


Version 2.6 โ€” 2006-08-23
------------------------

- Applied Joseph Tate's patch for function decorators.

- Applied Sigve Tjora and Mark van der Wal's fixes for argument handling.

- Applied Geoff Bache's parallel mode patch.

- Refactorings to improve testability. Fixes to command-line logic for parallel
  mode and collect.


Version 2.5 โ€” 2005-12-04
------------------------

- Call threading.settrace so that all threads are measured. Thanks Martin
  Fuzzey.

- Add a file argument to report so that reports can be captured to a different
  destination.

- Coverage.py can now measure itself.

- Adapted Greg Rogers' patch for using relative file names, and sorting and
  omitting files to report on.


Version 2.2 โ€” 2004-12-31
------------------------

- Allow for keyword arguments in the module global functions. Thanks, Allen.


Version 2.1 โ€” 2004-12-14
------------------------

- Return 'analysis' to its original behavior and add 'analysis2'. Add a global
  for 'annotate', and factor it, adding 'annotate_file'.


Version 2.0 โ€” 2004-12-12
------------------------

Significant code changes.

- Finding executable statements has been rewritten so that docstrings and
  other quirks of Python execution aren't mistakenly identified as missing
  lines.

- Lines can be excluded from consideration, even entire suites of lines.

- The file system cache of covered lines can be disabled programmatically.

- Modernized the code.


Earlier History
---------------

2001-12-04 GDR Created.

2001-12-06 GDR Added command-line interface and source code annotation.

2001-12-09 GDR Moved design and interface to separate documents.

2001-12-10 GDR Open cache file as binary on Windows. Allow simultaneous -e and
-x, or -a and -r.

2001-12-12 GDR Added command-line help. Cache analysis so that it only needs to
be done once when you specify -a and -r.

2001-12-13 GDR Improved speed while recording. Portable between Python 1.5.2
and 2.1.1.

2002-01-03 GDR Module-level functions work correctly.

2002-01-07 GDR Update sys.path when running a file with the -x option, so that
it matches the value the program would get if it were run on its own.
././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0
coverage-7.4.4/doc/cmd.rst0000644000175100001770000012705500000000000016203 0ustar00runnerdocker00000000000000.. Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
.. For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt

.. This file is processed with cog to insert the latest command
   help into the docs. If it's out of date, the quality checks will fail.
   Running "make prebuild" will bring it up to date.

.. [[[cog
    from cog_helpers import show_configs, show_help
.. ]]]
.. [[[end]]] (checksum: d41d8cd98f00b204e9800998ecf8427e)

.. _cmd:

==================
Command line usage
==================

.. highlight:: console

When you install coverage.py, a command-line script called ``coverage`` is
placed on your path.  To help with multi-version installs, it will also create
a ``coverage3`` alias, and a ``coverage-X.Y`` alias, depending on the version
of Python you're using.  For example, when installing on Python 3.10, you will
be able to use ``coverage``, ``coverage3``, or ``coverage-3.10`` on the command
line.

Coverage.py has a number of commands:

* **run** -- :ref:`Run a Python program and collect execution data `.

* **combine** -- :ref:`Combine together a number of data files `.

* **erase** -- :ref:`Erase previously collected coverage data `.

* **report** -- :ref:`Report coverage results `.

* **html** --
  :ref:`Produce annotated HTML listings with coverage results `.

* **xml** -- :ref:`Produce an XML report with coverage results `.

* **json** -- :ref:`Produce a JSON report with coverage results `.

* **lcov** -- :ref:`Produce an LCOV report with coverage results `.

* **annotate** --
  :ref:`Annotate source files with coverage results `.

* **debug** -- :ref:`Get diagnostic information `.

Help is available with the **help** command, or with the ``--help`` switch on
any other command::

    $ coverage help
    $ coverage help run
    $ coverage run --help

Version information for coverage.py can be displayed with
``coverage --version``:

.. parsed-literal::

    $ coverage --version
    Coverage.py, version |release| with C extension
    Documentation at |doc-url|

Any command can use a configuration file by specifying it with the
``--rcfile=FILE`` command-line switch.  Any option you can set on the command
line can also be set in the configuration file.  This can be a better way to
control coverage.py since the configuration file can be checked into source
control, and can provide options that other invocation techniques (like test
runner plugins) may not offer. See :ref:`config` for more details.


.. _cmd_run:

Execution: ``coverage run``
---------------------------

You collect execution data by running your Python program with the **run**
command::

    $ coverage run my_program.py arg1 arg2
    blah blah ..your program's output.. blah blah

Your program runs just as if it had been invoked with the Python command line.
Arguments after your file name are passed to your program as usual in
``sys.argv``.  Rather than providing a file name, you can use the ``-m`` switch
and specify an importable module name instead, just as you can with the
Python ``-m`` switch::

    $ coverage run -m packagename.modulename arg1 arg2
    blah blah ..your program's output.. blah blah

.. note::

    In most cases, the program to use here is a test runner, not your program
    you are trying to measure. The test runner will run your tests and coverage
    will measure the coverage of your code along the way.

There are many options:

.. [[[cog show_help("run") ]]]
.. code::

    $ coverage run --help
    Usage: coverage run [options]  [program options]

    Run a Python program, measuring code execution.

    Options:
      -a, --append          Append coverage data to .coverage, otherwise it starts
                            clean each time.
      --branch              Measure branch coverage in addition to statement
                            coverage.
      --concurrency=LIBS    Properly measure code using a concurrency library.
                            Valid values are: eventlet, gevent, greenlet,
                            multiprocessing, thread, or a comma-list of them.
      --context=LABEL       The context label to record for this coverage run.
      --data-file=OUTFILE   Write the recorded coverage data to this file.
                            Defaults to '.coverage'. [env: COVERAGE_FILE]
      --include=PAT1,PAT2,...
                            Include only files whose paths match one of these
                            patterns. Accepts shell-style wildcards, which must be
                            quoted.
      -m, --module           is an importable Python module, not a script
                            path, to be run as 'python -m' would run it.
      --omit=PAT1,PAT2,...  Omit files whose paths match one of these patterns.
                            Accepts shell-style wildcards, which must be quoted.
      -L, --pylib           Measure coverage even inside the Python installed
                            library, which isn't done by default.
      -p, --parallel-mode   Append the machine name, process id and random number
                            to the data file name to simplify collecting data from
                            many processes.
      --source=SRC1,SRC2,...
                            A list of directories or importable names of code to
                            measure.
      --timid               Use the slower Python trace function core.
      --debug=OPTS          Debug options, separated by commas. [env:
                            COVERAGE_DEBUG]
      -h, --help            Get help on this command.
      --rcfile=RCFILE       Specify configuration file. By default '.coveragerc',
                            'setup.cfg', 'tox.ini', and 'pyproject.toml' are
                            tried. [env: COVERAGE_RCFILE]
.. [[[end]]] (checksum: b1a0fffe2768fc142f1d97ae556b621d)

If you want :ref:`branch coverage ` measurement, use the ``--branch``
flag.  Otherwise only statement coverage is measured.

You can specify the code to measure with the ``--source``, ``--include``, and
``--omit`` switches.  See :ref:`Specifying source files ` for
details of their interpretation.  Remember to put options for run after "run",
but before the program invocation::

    $ coverage run --source=dir1,dir2 my_program.py arg1 arg2
    $ coverage run --source=dir1,dir2 -m packagename.modulename arg1 arg2

.. note::

    Specifying ``--source`` on the ``coverage run`` command line won't affect
    subsequent reporting commands like ``coverage xml``.  Use the :ref:`source
    ` setting in the configuration file to apply the setting
    uniformly to all commands.


Coverage.py can measure multi-threaded programs by default. If you are using
more other concurrency support, with the `multiprocessing`_, `greenlet`_,
`eventlet`_, or `gevent`_ libraries, then coverage.py can get confused. Use the
``--concurrency`` switch to properly measure programs using these libraries.
Give it a value of ``multiprocessing``, ``thread``, ``greenlet``, ``eventlet``,
or ``gevent``.  Values other than ``thread`` require the :ref:`C extension
`.

You can combine multiple values for ``--concurrency``, separated with commas.
You can specify ``thread`` and also one of ``eventlet``, ``gevent``, or
``greenlet``.

If you are using ``--concurrency=multiprocessing``, you must set other options
in the configuration file.  Options on the command line will not be passed to
the processes that multiprocessing creates.  Best practice is to use the
configuration file for all options.

.. _multiprocessing: https://docs.python.org/3/library/multiprocessing.html
.. _greenlet: https://greenlet.readthedocs.io/
.. _gevent: https://www.gevent.org/
.. _eventlet: https://eventlet.readthedocs.io/

If you are measuring coverage in a multi-process program, or across a number of
machines, you'll want the ``--parallel-mode`` switch to keep the data separate
during measurement.  See :ref:`cmd_combine` below.

You can specify a :ref:`static context ` for a coverage run with
``--context``.  This can be any label you want, and will be recorded with the
data.  See :ref:`contexts` for more information.

By default, coverage.py does not measure code installed with the Python
interpreter, for example, the standard library. If you want to measure that
code as well as your own, add the ``-L`` (or ``--pylib``) flag.

If your coverage results seem to be overlooking code that you know has been
executed, try running coverage.py again with the ``--timid`` flag.  This uses a
simpler but slower trace method, and might be needed in rare cases.

In Python 3.12 and above, you can try an experimental core based on the new
:mod:`sys.monitoring ` module by defining a
``COVERAGE_CORE=sysmon`` environment variable.  This should be faster, though
plugins and dynamic contexts are not yet supported with it.

Coverage.py sets an environment variable, ``COVERAGE_RUN`` to indicate that
your code is running under coverage measurement.  The value is not relevant,
and may change in the future.

These options can also be set in the :ref:`config_run` section of your
.coveragerc file.


.. _cmd_warnings:

Warnings
........

During execution, coverage.py may warn you about conditions it detects that
could affect the measurement process.  The possible warnings include:

Couldn't parse Python file XXX (couldnt-parse)
  During reporting, a file was thought to be Python, but it couldn't be parsed
  as Python.

Trace function changed, data is likely wrong: XXX (trace-changed)
  Coverage measurement depends on a Python setting called the trace function.
  Other Python code in your product might change that function, which will
  disrupt coverage.py's measurement.  This warning indicates that has happened.
  The XXX in the message is the new trace function value, which might provide
  a clue to the cause.

Module XXX has no Python source (module-not-python)
  You asked coverage.py to measure module XXX, but once it was imported, it
  turned out not to have a corresponding .py file.  Without a .py file,
  coverage.py can't report on missing lines.

Module XXX was never imported (module-not-imported)
  You asked coverage.py to measure module XXX, but it was never imported by
  your program.

No data was collected (no-data-collected)
  Coverage.py ran your program, but didn't measure any lines as executed.
  This could be because you asked to measure only modules that never ran,
  or for other reasons.

  To debug this problem, try using ``run --debug=trace`` to see the tracing
  decision made for each file.

Module XXX was previously imported, but not measured (module-not-measured)
  You asked coverage.py to measure module XXX, but it had already been imported
  when coverage started.  This meant coverage.py couldn't monitor its
  execution.

Already imported a file that will be measured: XXX (already-imported)
  File XXX had already been imported when coverage.py started measurement. Your
  setting for ``--source`` or ``--include`` indicates that you wanted to
  measure that file.  Lines will be missing from the coverage report since the
  execution during import hadn't been measured.

\-\-include is ignored because \-\-source is set (include-ignored)
  Both ``--include`` and ``--source`` were specified while running code.  Both
  are meant to focus measurement on a particular part of your source code, so
  ``--include`` is ignored in favor of ``--source``.

Conflicting dynamic contexts (dynamic-conflict)
  The ``[run] dynamic_context`` option is set in the configuration file, but
  something (probably a test runner plugin) is also calling the
  :meth:`.Coverage.switch_context` function to change the context. Only one of
  these mechanisms should be in use at a time.

sys.monitoring isn't available, using default core (no-sysmon)
  You requested to use the sys.monitoring measurement core, but are running on
  Python 3.11 or lower where it isn't available.  A default core will be used
  instead.

Individual warnings can be disabled with the :ref:`disable_warnings
` configuration setting.  To silence "No data was
collected," add this to your configuration file:

.. [[[cog
    show_configs(
        ini=r"""
            [run]
            disable_warnings = no-data-collected
            """,
        toml=r"""
            [tool.coverage.run]
            disable_warnings = ["no-data-collected"]
            """,
        )
.. ]]]

.. tabs::

    .. code-tab:: ini
        :caption: .coveragerc

        [run]
        disable_warnings = no-data-collected

    .. code-tab:: toml
        :caption: pyproject.toml

        [tool.coverage.run]
        disable_warnings = ["no-data-collected"]

    .. code-tab:: ini
        :caption: setup.cfg, tox.ini

        [coverage:run]
        disable_warnings = no-data-collected

.. [[[end]]] (checksum: 66c0c28e863c2a44218190a8a6a3f707)


.. _cmd_datafile:

Data file
.........

Coverage.py collects execution data in a file called ".coverage".  If need be,
you can set a new file name with the ``COVERAGE_FILE`` environment variable.
This can include a path to another directory.

By default, each run of your program starts with an empty data set. If you need
to run your program multiple times to get complete data (for example, because
you need to supply different options), you can accumulate data across runs with
the ``--append`` flag on the **run** command.


.. _cmd_combine:

Combining data files: ``coverage combine``
------------------------------------------

Often test suites are run under different conditions, for example, with
different versions of Python, or dependencies, or on different operating
systems.  In these cases, you can collect coverage data for each test run, and
then combine all the separate data files into one combined file for reporting.

The **combine** command reads a number of separate data files, matches the data
by source file name, and writes a combined data file with all of the data.

Coverage normally writes data to a filed named ".coverage".  The ``run
--parallel-mode`` switch (or ``[run] parallel=True`` configuration option)
tells coverage to expand the file name to include machine name, process id, and
a random number so that every data file is distinct::

    .coverage.Neds-MacBook-Pro.local.88335.316857
    .coverage.Geometer.8044.799674

You can also define a new data file name with the ``[run] data_file`` option.

Once you have created a number of these files, you can copy them all to a
single directory, and use the **combine** command to combine them into one
.coverage data file::

    $ coverage combine

You can also name directories or files to be combined on the command line::

    $ coverage combine data1.dat windows_data_files/

Coverage.py will collect the data from those places and combine them.  The
current directory isn't searched if you use command-line arguments.  If you
also want data from the current directory, name it explicitly on the command
line.

When coverage.py combines data files, it looks for files named the same as the
data file (defaulting to ".coverage"), with a dotted suffix.  Here are some
examples of data files that can be combined::

    .coverage.machine1
    .coverage.20120807T212300
    .coverage.last_good_run.ok

An existing combined data file is ignored and re-written. If you want to use
**combine** to accumulate results into the .coverage data file over a number of
runs, use the ``--append`` switch on the **combine** command.  This behavior
was the default before version 4.2.

If any of the data files can't be read, coverage.py will print a warning
indicating the file and the problem.

The original input data files are deleted once they've been combined. If you
want to keep those files, use the ``--keep`` command-line option.

.. [[[cog show_help("combine") ]]]
.. code::

    $ coverage combine --help
    Usage: coverage combine [options]   ... 

    Combine data from multiple coverage files. The combined results are written to
    a single file representing the union of the data. The positional arguments are
    data files or directories containing data files. If no paths are provided,
    data files in the default data file's directory are combined.

    Options:
      -a, --append          Append coverage data to .coverage, otherwise it starts
                            clean each time.
      --data-file=DATAFILE  Base name of the data files to operate on. Defaults to
                            '.coverage'. [env: COVERAGE_FILE]
      --keep                Keep original coverage files, otherwise they are
                            deleted.
      -q, --quiet           Don't print messages about what is happening.
      --debug=OPTS          Debug options, separated by commas. [env:
                            COVERAGE_DEBUG]
      -h, --help            Get help on this command.
      --rcfile=RCFILE       Specify configuration file. By default '.coveragerc',
                            'setup.cfg', 'tox.ini', and 'pyproject.toml' are
                            tried. [env: COVERAGE_RCFILE]
.. [[[end]]] (checksum: 0bdd83f647ee76363c955bedd9ddf749)


.. _cmd_combine_remapping:

Re-mapping paths
................

To combine data for a source file, coverage has to find its data in each of the
data files.  Different test runs may run the same source file from different
locations. For example, different operating systems will use different paths
for the same file, or perhaps each Python version is run from a different
subdirectory.  Coverage needs to know that different file paths are actually
the same source file for reporting purposes.

You can tell coverage.py how different source locations relate with a
``[paths]`` section in your configuration file (see :ref:`config_paths`).
It might be more convenient to use the ``[run] relative_files``
setting to store relative file paths (see :ref:`relative_files
`).

If data isn't combining properly, you can see details about the inner workings
with ``--debug=pathmap``.


.. _cmd_erase:

Erase data: ``coverage erase``
------------------------------

To erase the collected data, use the **erase** command:

.. [[[cog show_help("erase") ]]]
.. code::

    $ coverage erase --help
    Usage: coverage erase [options]

    Erase previously collected coverage data.

    Options:
      --data-file=DATAFILE  Base name of the data files to operate on. Defaults to
                            '.coverage'. [env: COVERAGE_FILE]
      --debug=OPTS          Debug options, separated by commas. [env:
                            COVERAGE_DEBUG]
      -h, --help            Get help on this command.
      --rcfile=RCFILE       Specify configuration file. By default '.coveragerc',
                            'setup.cfg', 'tox.ini', and 'pyproject.toml' are
                            tried. [env: COVERAGE_RCFILE]
.. [[[end]]] (checksum: cfeaef66ce8d5154dc6914831030b46b)

If your configuration file indicates parallel data collection, **erase** will
remove all of the data files.


.. _cmd_reporting:

Reporting
---------

Coverage.py provides a few styles of reporting, with the **report**, **html**,
**annotate**, **json**, **lcov**, and **xml** commands.  They share a number
of common options.

The command-line arguments are module or file names to report on, if you'd like
to report on a subset of the data collected.

The ``--include`` and ``--omit`` flags specify lists of file name patterns.
They control which files to report on, and are described in more detail in
:ref:`source`.

The ``-i`` or ``--ignore-errors`` switch tells coverage.py to ignore problems
encountered trying to find source files to report on.  This can be useful if
some files are missing, or if your Python execution is tricky enough that file
names are synthesized without real source files.

If you provide a ``--fail-under`` value, the total percentage covered will be
compared to that value.  If it is less, the command will exit with a status
code of 2, indicating that the total coverage was less than your target.  This
can be used as part of a pass/fail condition, for example in a continuous
integration server.  This option isn't available for **annotate**.

These options can also be set in your .coveragerc file. See
:ref:`Configuration: [report] `.


.. _cmd_report:

Coverage summary: ``coverage report``
-------------------------------------

The simplest reporting is a textual summary produced with **report**::

    $ coverage report
    Name                      Stmts   Miss  Cover
    ---------------------------------------------
    my_program.py                20      4    80%
    my_module.py                 15      2    86%
    my_other_module.py           56      6    89%
    ---------------------------------------------
    TOTAL                        91     12    87%

For each module executed, the report shows the count of executable statements,
the number of those statements missed, and the resulting coverage, expressed
as a percentage.

.. [[[cog show_help("report") ]]]
.. code::

    $ coverage report --help
    Usage: coverage report [options] [modules]

    Report coverage statistics on modules.

    Options:
      --contexts=REGEX1,REGEX2,...
                            Only display data from lines covered in the given
                            contexts. Accepts Python regexes, which must be
                            quoted.
      --data-file=INFILE    Read coverage data for report generation from this
                            file. Defaults to '.coverage'. [env: COVERAGE_FILE]
      --fail-under=MIN      Exit with a status of 2 if the total coverage is less
                            than MIN.
      --format=FORMAT       Output format, either text (default), markdown, or
                            total.
      -i, --ignore-errors   Ignore errors while reading source files.
      --include=PAT1,PAT2,...
                            Include only files whose paths match one of these
                            patterns. Accepts shell-style wildcards, which must be
                            quoted.
      --omit=PAT1,PAT2,...  Omit files whose paths match one of these patterns.
                            Accepts shell-style wildcards, which must be quoted.
      --precision=N         Number of digits after the decimal point to display
                            for reported coverage percentages.
      --sort=COLUMN         Sort the report by the named column: name, stmts,
                            miss, branch, brpart, or cover. Default is name.
      -m, --show-missing    Show line numbers of statements in each module that
                            weren't executed.
      --skip-covered        Skip files with 100% coverage.
      --no-skip-covered     Disable --skip-covered.
      --skip-empty          Skip files with no code.
      --debug=OPTS          Debug options, separated by commas. [env:
                            COVERAGE_DEBUG]
      -h, --help            Get help on this command.
      --rcfile=RCFILE       Specify configuration file. By default '.coveragerc',
                            'setup.cfg', 'tox.ini', and 'pyproject.toml' are
                            tried. [env: COVERAGE_RCFILE]
.. [[[end]]] (checksum: 167272a29d9e7eb017a592a0e0747a06)

The ``-m`` flag also shows the line numbers of missing statements::

    $ coverage report -m
    Name                      Stmts   Miss  Cover   Missing
    -------------------------------------------------------
    my_program.py                20      4    80%   33-35, 39
    my_module.py                 15      2    86%   8, 12
    my_other_module.py           56      6    89%   17-23
    -------------------------------------------------------
    TOTAL                        91     12    87%

If you are using branch coverage, then branch statistics will be reported in
the Branch and BrPart (for Partial Branch) columns, the Missing column will
detail the missed branches::

    $ coverage report -m
    Name                      Stmts   Miss Branch BrPart  Cover   Missing
    ---------------------------------------------------------------------
    my_program.py                20      4     10      2    80%   33-35, 36->38, 39
    my_module.py                 15      2      3      0    86%   8, 12
    my_other_module.py           56      6      5      1    89%   17-23, 40->45
    ---------------------------------------------------------------------
    TOTAL                        91     12     18      3    87%

You can restrict the report to only certain files by naming them on the
command line::

    $ coverage report -m my_program.py my_other_module.py
    Name                      Stmts   Miss  Cover   Missing
    -------------------------------------------------------
    my_program.py                20      4    80%   33-35, 39
    my_other_module.py           56      6    89%   17-23
    -------------------------------------------------------
    TOTAL                        76     10    87%

The ``--skip-covered`` switch will skip any file with 100% coverage, letting
you focus on the files that still need attention. The ``--no-skip-covered``
option can be used if needed to see all the files.  The ``--skip-empty`` switch
will skip any file with no executable statements.

If you have :ref:`recorded contexts `, the ``--contexts`` option lets
you choose which contexts to report on.  See :ref:`context_reporting` for
details.

The ``--precision`` option controls the number of digits displayed after the
decimal point in coverage percentages, defaulting to none.

The ``--sort`` option is the name of a column to sort the report by.

The ``--format`` option controls the style of the report.  ``--format=text``
creates plain text tables as shown above.  ``--format=markdown`` creates
Markdown tables.  ``--format=total`` writes out a single number, the total
coverage percentage as shown at the end of the tables, but without a percent
sign.

Other common reporting options are described above in :ref:`cmd_reporting`.
These options can also be set in your .coveragerc file. See
:ref:`Configuration: [report] `.


.. _cmd_html:

HTML reporting: ``coverage html``
---------------------------------

Coverage.py can annotate your source code to show which lines were executed
and which were not.  The **html** command creates an HTML report similar to the
**report** summary, but as an HTML file.  Each module name links to the source
file decorated to show the status of each line.

Here's a `sample report`__.

__ https://nedbatchelder.com/files/sample_coverage_html/index.html

Lines are highlighted: green for executed, red for missing, and gray for
excluded.  If you've used branch coverage, partial branches are yellow.  The
colored counts at the top of the file are buttons to turn on and off the
highlighting.

A number of keyboard shortcuts are available for navigating the report.
Click the keyboard icon in the upper right to see the complete list.

.. [[[cog show_help("html") ]]]
.. code::

    $ coverage html --help
    Usage: coverage html [options] [modules]

    Create an HTML report of the coverage of the files.  Each file gets its own
    page, with the source decorated to show executed, excluded, and missed lines.

    Options:
      --contexts=REGEX1,REGEX2,...
                            Only display data from lines covered in the given
                            contexts. Accepts Python regexes, which must be
                            quoted.
      -d DIR, --directory=DIR
                            Write the output files to DIR.
      --data-file=INFILE    Read coverage data for report generation from this
                            file. Defaults to '.coverage'. [env: COVERAGE_FILE]
      --fail-under=MIN      Exit with a status of 2 if the total coverage is less
                            than MIN.
      -i, --ignore-errors   Ignore errors while reading source files.
      --include=PAT1,PAT2,...
                            Include only files whose paths match one of these
                            patterns. Accepts shell-style wildcards, which must be
                            quoted.
      --omit=PAT1,PAT2,...  Omit files whose paths match one of these patterns.
                            Accepts shell-style wildcards, which must be quoted.
      --precision=N         Number of digits after the decimal point to display
                            for reported coverage percentages.
      -q, --quiet           Don't print messages about what is happening.
      --show-contexts       Show contexts for covered lines.
      --skip-covered        Skip files with 100% coverage.
      --no-skip-covered     Disable --skip-covered.
      --skip-empty          Skip files with no code.
      --title=TITLE         A text string to use as the title on the HTML.
      --debug=OPTS          Debug options, separated by commas. [env:
                            COVERAGE_DEBUG]
      -h, --help            Get help on this command.
      --rcfile=RCFILE       Specify configuration file. By default '.coveragerc',
                            'setup.cfg', 'tox.ini', and 'pyproject.toml' are
                            tried. [env: COVERAGE_RCFILE]
.. [[[end]]] (checksum: e3a1a6e24ad9b303ba06d42880ed0219)

The title of the report can be set with the ``title`` setting in the
``[html]`` section of the configuration file, or the ``--title`` switch on
the command line.

If you prefer a different style for your HTML report, you can provide your
own CSS file to apply, by specifying a CSS file in the ``[html]`` section of
the configuration file.  See :ref:`config_html_extra_css` for details.

The ``-d`` argument specifies an output directory, defaulting to "htmlcov"::

    $ coverage html -d coverage_html

Other common reporting options are described above in :ref:`cmd_reporting`.

Generating the HTML report can be time-consuming.  Stored with the HTML report
is a data file that is used to speed up reporting the next time.  If you
generate a new report into the same directory, coverage.py will skip
generating unchanged pages, making the process faster.

The ``--skip-covered`` switch will skip any file with 100% coverage, letting
you focus on the files that still need attention.  The ``--skip-empty`` switch
will skip any file with no executable statements.

The ``--precision`` option controls the number of digits displayed after the
decimal point in coverage percentages, defaulting to none.

If you have :ref:`recorded contexts `, the ``--contexts`` option lets
you choose which contexts to report on, and the ``--show-contexts`` option will
annotate lines with the contexts that ran them.  See :ref:`context_reporting`
for details.

These options can also be set in your .coveragerc file. See
:ref:`Configuration: [html] `.


.. _cmd_xml:

XML reporting: ``coverage xml``
-------------------------------

The **xml** command writes coverage data to a "coverage.xml" file in a format
compatible with `Cobertura`_.

.. _Cobertura: http://cobertura.github.io/cobertura/

.. [[[cog show_help("xml") ]]]
.. code::

    $ coverage xml --help
    Usage: coverage xml [options] [modules]

    Generate an XML report of coverage results.

    Options:
      --data-file=INFILE    Read coverage data for report generation from this
                            file. Defaults to '.coverage'. [env: COVERAGE_FILE]
      --fail-under=MIN      Exit with a status of 2 if the total coverage is less
                            than MIN.
      -i, --ignore-errors   Ignore errors while reading source files.
      --include=PAT1,PAT2,...
                            Include only files whose paths match one of these
                            patterns. Accepts shell-style wildcards, which must be
                            quoted.
      --omit=PAT1,PAT2,...  Omit files whose paths match one of these patterns.
                            Accepts shell-style wildcards, which must be quoted.
      -o OUTFILE            Write the XML report to this file. Defaults to
                            'coverage.xml'
      -q, --quiet           Don't print messages about what is happening.
      --skip-empty          Skip files with no code.
      --debug=OPTS          Debug options, separated by commas. [env:
                            COVERAGE_DEBUG]
      -h, --help            Get help on this command.
      --rcfile=RCFILE       Specify configuration file. By default '.coveragerc',
                            'setup.cfg', 'tox.ini', and 'pyproject.toml' are
                            tried. [env: COVERAGE_RCFILE]
.. [[[end]]] (checksum: 8b239d89534be0b2c69489e10b1352a9)

You can specify the name of the output file with the ``-o`` switch.

Other common reporting options are described above in :ref:`cmd_reporting`.

To include complete file paths in the output file, rather than just
the file name, use [include] vs [source] in your ".coveragerc" file.

For example, use this:

.. code:: ini

    [run]
    include =
        foo/*
        bar/*


which will result in

.. code:: xml

    
    
    

in place of this:

.. code:: ini

    [run]
    source =
        foo
        bar

which may result in

.. code:: xml

    
    

These options can also be set in your .coveragerc file. See
:ref:`Configuration: [xml] `.


.. _cmd_json:

JSON reporting: ``coverage json``
---------------------------------

The **json** command writes coverage data to a "coverage.json" file.

.. [[[cog show_help("json") ]]]
.. code::

    $ coverage json --help
    Usage: coverage json [options] [modules]

    Generate a JSON report of coverage results.

    Options:
      --contexts=REGEX1,REGEX2,...
                            Only display data from lines covered in the given
                            contexts. Accepts Python regexes, which must be
                            quoted.
      --data-file=INFILE    Read coverage data for report generation from this
                            file. Defaults to '.coverage'. [env: COVERAGE_FILE]
      --fail-under=MIN      Exit with a status of 2 if the total coverage is less
                            than MIN.
      -i, --ignore-errors   Ignore errors while reading source files.
      --include=PAT1,PAT2,...
                            Include only files whose paths match one of these
                            patterns. Accepts shell-style wildcards, which must be
                            quoted.
      --omit=PAT1,PAT2,...  Omit files whose paths match one of these patterns.
                            Accepts shell-style wildcards, which must be quoted.
      -o OUTFILE            Write the JSON report to this file. Defaults to
                            'coverage.json'
      --pretty-print        Format the JSON for human readers.
      -q, --quiet           Don't print messages about what is happening.
      --show-contexts       Show contexts for covered lines.
      --debug=OPTS          Debug options, separated by commas. [env:
                            COVERAGE_DEBUG]
      -h, --help            Get help on this command.
      --rcfile=RCFILE       Specify configuration file. By default '.coveragerc',
                            'setup.cfg', 'tox.ini', and 'pyproject.toml' are
                            tried. [env: COVERAGE_RCFILE]
.. [[[end]]] (checksum: e53e60cb65d971c35d1db1c08324b72e)

You can specify the name of the output file with the ``-o`` switch.  The JSON
can be nicely formatted by specifying the ``--pretty-print`` switch.

Other common reporting options are described above in :ref:`cmd_reporting`.
These options can also be set in your .coveragerc file. See
:ref:`Configuration: [json] `.


.. _cmd_lcov:

LCOV reporting: ``coverage lcov``
---------------------------------

The **lcov** command writes coverage data to a "coverage.lcov" file.

.. [[[cog show_help("lcov") ]]]
.. code::

    $ coverage lcov --help
    Usage: coverage lcov [options] [modules]

    Generate an LCOV report of coverage results.

    Options:
      --data-file=INFILE    Read coverage data for report generation from this
                            file. Defaults to '.coverage'. [env: COVERAGE_FILE]
      --fail-under=MIN      Exit with a status of 2 if the total coverage is less
                            than MIN.
      -i, --ignore-errors   Ignore errors while reading source files.
      --include=PAT1,PAT2,...
                            Include only files whose paths match one of these
                            patterns. Accepts shell-style wildcards, which must be
                            quoted.
      -o OUTFILE            Write the LCOV report to this file. Defaults to
                            'coverage.lcov'
      --omit=PAT1,PAT2,...  Omit files whose paths match one of these patterns.
                            Accepts shell-style wildcards, which must be quoted.
      -q, --quiet           Don't print messages about what is happening.
      --debug=OPTS          Debug options, separated by commas. [env:
                            COVERAGE_DEBUG]
      -h, --help            Get help on this command.
      --rcfile=RCFILE       Specify configuration file. By default '.coveragerc',
                            'setup.cfg', 'tox.ini', and 'pyproject.toml' are
                            tried. [env: COVERAGE_RCFILE]
.. [[[end]]] (checksum: 16acfbae8011d2e3b620695c5fe13746)

Common reporting options are described above in :ref:`cmd_reporting`.
Also see :ref:`Configuration: [lcov] `.

.. versionadded:: 6.3


.. _cmd_annotate:

Text annotation: ``coverage annotate``
--------------------------------------

.. note::

    The **annotate** command has been obsoleted by more modern reporting tools,
    including the **html** command.

The **annotate** command produces a text annotation of your source code.  With
a ``-d`` argument specifying an output directory, each Python file becomes a
text file in that directory.  Without ``-d``, the files are written into the
same directories as the original Python files.

Coverage status for each line of source is indicated with a character prefix::

    > executed
    ! missing (not executed)
    - excluded

For example::

      # A simple function, never called with x==1

    > def h(x):
          """Silly function."""
    -     if 0:  # pragma: no cover
    -         pass
    >     if x == 1:
    !         a = 1
    >     else:
    >         a = 2

.. [[[cog show_help("annotate") ]]]
.. code::

    $ coverage annotate --help
    Usage: coverage annotate [options] [modules]

    Make annotated copies of the given files, marking statements that are executed
    with > and statements that are missed with !.

    Options:
      -d DIR, --directory=DIR
                            Write the output files to DIR.
      --data-file=INFILE    Read coverage data for report generation from this
                            file. Defaults to '.coverage'. [env: COVERAGE_FILE]
      -i, --ignore-errors   Ignore errors while reading source files.
      --include=PAT1,PAT2,...
                            Include only files whose paths match one of these
                            patterns. Accepts shell-style wildcards, which must be
                            quoted.
      --omit=PAT1,PAT2,...  Omit files whose paths match one of these patterns.
                            Accepts shell-style wildcards, which must be quoted.
      --debug=OPTS          Debug options, separated by commas. [env:
                            COVERAGE_DEBUG]
      -h, --help            Get help on this command.
      --rcfile=RCFILE       Specify configuration file. By default '.coveragerc',
                            'setup.cfg', 'tox.ini', and 'pyproject.toml' are
                            tried. [env: COVERAGE_RCFILE]
.. [[[end]]] (checksum: fd7d8fbd2dd6e24d37f868b389c2ad6d)

Other common reporting options are described above in :ref:`cmd_reporting`.


.. _cmd_debug:

Diagnostics: ``coverage debug``
-------------------------------

The **debug** command shows internal information to help diagnose problems.
If you are reporting a bug about coverage.py, including the output of this
command can often help::

    $ coverage debug sys > please_attach_to_bug_report.txt

A few types of information are available:

* ``config``: show coverage's configuration
* ``sys``: show system configuration
* ``data``: show a summary of the collected coverage data
* ``premain``: show the call stack invoking coverage
* ``pybehave``: show internal flags describing Python behavior

.. [[[cog show_help("debug") ]]]
.. code::

    $ coverage debug --help
    Usage: coverage debug 

    Display information about the internals of coverage.py, for diagnosing
    problems. Topics are: 'data' to show a summary of the collected data; 'sys' to
    show installation information; 'config' to show the configuration; 'premain'
    to show what is calling coverage; 'pybehave' to show internal flags describing
    Python behavior.

    Options:
      --debug=OPTS     Debug options, separated by commas. [env: COVERAGE_DEBUG]
      -h, --help       Get help on this command.
      --rcfile=RCFILE  Specify configuration file. By default '.coveragerc',
                       'setup.cfg', 'tox.ini', and 'pyproject.toml' are tried.
                       [env: COVERAGE_RCFILE]
.. [[[end]]] (checksum: c9b8dfb644da3448830b1c99bffa6880)

.. _cmd_run_debug:

``--debug``
...........

The ``--debug`` option is also available on all commands.  It instructs
coverage.py to log internal details of its operation, to help with diagnosing
problems.  It takes a comma-separated list of options, each indicating a facet
of operation to log:

* ``callers``: annotate each debug message with a stack trace of the callers
  to that point.

* ``config``: before starting, dump all the :ref:`configuration `
  values.

* ``dataio``: log when reading or writing any data file.

* ``dataop``: log a summary of data being added to CoverageData objects.

* ``dataop2``: when used with ``debug=dataop``, log the actual data being added
  to CoverageData objects.

* ``lock``: log operations acquiring locks in the data layer.

* ``multiproc``: log the start and stop of multiprocessing processes.

* ``pathmap``: log the remapping of paths that happens during ``coverage
  combine``. See :ref:`config_paths`.

* ``pid``: annotate all warnings and debug output with the process and thread
  ids.

* ``plugin``: print information about plugin operations.

* ``process``: show process creation information, and changes in the current
  directory.  This also writes a time stamp and command arguments into the data
  file.

* ``pybehave``: show the values of `internal flags `_ describing the
  behavior of the current version of Python.

* ``pytest``: indicate the name of the current pytest test when it changes.

* ``self``: annotate each debug message with the object printing the message.

* ``sql``: log the SQL statements used for recording data.

* ``sqldata``: when used with ``debug=sql``, also log the full data being used
  in SQL statements.

* ``sys``: before starting, dump all the system and environment information,
  as with :ref:`coverage debug sys `.

* ``trace``: print every decision about whether to trace a file or not. For
  files not being traced, the reason is also given.

.. _env.py: https://github.com/nedbat/coveragepy/blob/master/coverage/env.py

Debug options can also be set with the ``COVERAGE_DEBUG`` environment variable,
a comma-separated list of these options, or in the :ref:`config_run_debug`
section of the .coveragerc file.

The debug output goes to stderr, unless the :ref:`config_run_debug_file`
setting or the ``COVERAGE_DEBUG_FILE`` environment variable names a different
file, which will be appended to.  This can be useful because many test runners
capture output, which could hide important details.  ``COVERAGE_DEBUG_FILE``
accepts the special names ``stdout`` and ``stderr`` to write to those
destinations.
././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0
coverage-7.4.4/doc/cog_helpers.py0000644000175100001770000000543500000000000017547 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt

"""
Functions for use with cog in the documentation.
"""

# For help text in doc/cmd.rst:
# optparse wraps help to the COLUMNS value.  Set it here to be sure it's
# consistent regardless of the environment.  Has to be set before we
# import cmdline.py, which creates the optparse objects.

# pylint: disable=wrong-import-position
import os
os.environ["COLUMNS"] = "80"

import contextlib
import io
import re
import textwrap

import cog              # pylint: disable=import-error

from coverage.cmdline import CoverageScript
from coverage.config import read_coverage_config


def show_help(cmd):
    """
    Insert the help output from a command.
    """
    with contextlib.redirect_stdout(io.StringIO()) as stdout:
        CoverageScript().command_line([cmd, "--help"])
    help_text = stdout.getvalue()
    help_text = help_text.replace("__main__.py", "coverage")
    help_text = re.sub(r"(?m)^Full doc.*$", "", help_text)
    help_text = help_text.rstrip()

    print(".. code::\n")
    print(f"    $ coverage {cmd} --help")
    print(textwrap.indent(help_text, "    "))


def _read_config(text, fname):
    """
    Prep and read configuration text.

    Returns the prepared text, and a dict of the settings.
    """
    # Text will be triple-quoted with an initial ignored newline.
    assert text[0] == "\n"
    text = textwrap.dedent(text[1:])

    os.makedirs("tmp", exist_ok=True)
    with open(f"tmp/{fname}", "w") as f:
        f.write(text)

    config = read_coverage_config(f"tmp/{fname}", warn=cog.error)

    values = {}
    for name, val in vars(config).items():
        if name.startswith("_"):
            continue
        if "config_file" in name:
            continue
        values[name] = val
    return text, values


def show_configs(ini, toml):
    """
    Show configuration text in a tabbed box.

    `ini` is the ini-file syntax, `toml` is the equivalent TOML syntax.
    The equivalence is checked for accuracy, and the process fails if there's
    a mismtach.

    A three-tabbed box will be produced.
    """
    ini, ini_vals = _read_config(ini, "covrc")
    toml, toml_vals = _read_config(toml, "covrc.toml")
    for key, val in ini_vals.items():
        if val != toml_vals[key]:
            cog.error(f"Mismatch! {key}: {val!r} vs {toml_vals[key]!r}")

    ini2 = re.sub(r"(?m)^\[", "[coverage:", ini)
    print()
    print(".. tabs::\n")
    for name, syntax, text in [
        (".coveragerc", "ini", ini),
        ("pyproject.toml", "toml", toml),
        ("setup.cfg, tox.ini", "ini", ini2),
    ]:
        print(f"    .. code-tab:: {syntax}")
        print(f"        :caption: {name}")
        print()
        print(textwrap.indent(text, " " * 8))
././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0
coverage-7.4.4/doc/conf.py0000644000175100001770000002023700000000000016177 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt

"""Sphinx configuration."""

# coverage.py documentation build configuration file, created by
# sphinx-quickstart on Wed May 13 22:18:33 2009.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.

import atexit
import os
import re
import sys
import tempfile

# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.append(os.path.abspath('.'))

# on_rtd is whether we are on readthedocs.org
on_rtd = os.getenv('READTHEDOCS') == 'True'

# -- General configuration -----------------------------------------------------

# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
    'sphinx.ext.autodoc',
    'sphinx.ext.todo',
    'sphinx.ext.ifconfig',
    'sphinx.ext.intersphinx',
    'sphinxcontrib.restbuilder',
    'sphinx.ext.napoleon',
    'sphinx_code_tabs',
    'sphinx_rtd_theme',
]

autodoc_typehints = "description"

# Add any paths that contain templates here, relative to this directory.
templates_path = []

# The suffix of source filenames.
source_suffix = '.rst'

# The encoding of source files.
#source_encoding = 'utf-8'

# The master toctree document.
master_doc = 'index'

# General information about the project.
project = 'Coverage.py'

# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.

# @@@ editable
copyright = "2009โ€“2024, Ned Batchelder" # pylint: disable=redefined-builtin
# The short X.Y.Z version.
version = "7.4.4"
# The full version, including alpha/beta/rc tags.
release = "7.4.4"
# The date of release, in "monthname day, year" format.
release_date = "March 14, 2024"
# @@@ end

rst_epilog = f"""
.. |release_date| replace:: {release_date}
.. |coverage-equals-release| replace:: coverage=={release}
.. |doc-url| replace:: https://coverage.readthedocs.io/en/{release}
.. |br| raw:: html

  
""" # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of documents that shouldn't be included in the build. #unused_docs = [] # List of directories, relative to source directory, that shouldn't be searched # for source files. exclude_patterns = ["_build", "help/*"] # The reST default role (used for this markup: `text`) to use for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] intersphinx_mapping = { 'python': ('https://docs.python.org/3', None), } nitpick_ignore = [ ("py:class", "frame"), ("py:class", "module"), ("py:class", "DefaultValue"), ("py:class", "FilePath"), ("py:class", "TWarnFn"), ("py:class", "TDebugCtl"), ] nitpick_ignore_regex = [ (r"py:class", r"coverage\..*\..*"), ] # -- Options for HTML output --------------------------------------------------- # The theme to use for HTML and HTML Help pages. Major themes that come with # Sphinx are currently 'default' and 'sphinxdoc'. html_theme = 'sphinx_rtd_theme' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} #html_style = "neds.css" #html_add_permalinks = "" # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = ['_templates'] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. html_logo = 'media/sleepy-snake-circle-150.png' # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. html_use_modindex = False # If false, no index is generated. html_use_index = False # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. html_show_sourcelink = False # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = '.htm' # Output file base name for HTML help builder. htmlhelp_basename = 'coveragepydoc' # -- Spelling --- if any("spell" in arg for arg in sys.argv): # sphinxcontrib.spelling needs the native "enchant" library, which often is # missing, so only use the extension if we are specifically spell-checking. extensions += ['sphinxcontrib.spelling'] names_file = tempfile.NamedTemporaryFile(mode='w', prefix="coverage_names_", suffix=".txt") with open("../CONTRIBUTORS.txt") as contributors: names = set(re.split(r"[^\w']", contributors.read())) names = [n for n in names if len(n) >= 2 and n[0].isupper()] names_file.write("\n".join(names)) names_file.flush() atexit.register(os.remove, names_file.name) spelling_word_list_filename = ['dict.txt', names_file.name] spelling_show_suggestions = False # Regexes for URLs that linkcheck should skip. linkcheck_ignore = [ # We have lots of links to GitHub, and they start refusing to serve them to linkcheck, # so don't bother checking them. r"https://github.com/nedbat/coveragepy/(issues|pull)/\d+", # When publishing a new version, the docs will refer to the version before # the docs have been published. So don't check those links. fr"https://coverage.readthedocs.io/en/{release}$", ] # https://github.com/executablebooks/sphinx-tabs/pull/54 sphinx_tabs_valid_builders = ['linkcheck'] # When auto-doc'ing a class, only write the class' docstring into the class docs, # don't automatically include the __init__ docstring. autoclass_content = "class" prerelease = bool(max(release).isalpha()) def setup(app): """Configure Sphinx""" app.add_css_file('coverage.css') app.add_config_value('prerelease', False, 'env') print("** Prerelease = %r" % prerelease) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/doc/config.rst0000644000175100001770000005655000000000000016706 0ustar00runnerdocker00000000000000.. Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 .. For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt .. This file is processed with cog to create the tabbed multi-syntax configuration examples. If those are wrong, the quality checks will fail. Running "make prebuild" checks them and produces the output. .. [[[cog from cog_helpers import show_configs .. ]]] .. [[[end]]] (checksum: d41d8cd98f00b204e9800998ecf8427e) .. _config: ======================= Configuration reference ======================= .. highlight:: ini Coverage.py options can be specified in a configuration file. This makes it easier to re-run coverage.py with consistent settings, and also allows for specification of options that are otherwise only available in the :ref:`API `. Configuration files also make it easier to get coverage testing of spawned sub-processes. See :ref:`subprocess` for more details. The default name for the configuration file is ``.coveragerc``, in the same directory coverage.py is being run in. Most of the settings in the configuration file are tied to your source code and how it should be measured, so it should be stored with your source, and checked into source control, rather than put in your home directory. A different location for the configuration file can be specified with the ``--rcfile=FILE`` command line option or with the ``COVERAGE_RCFILE`` environment variable. Coverage.py will read settings from other usual configuration files if no other configuration file is used. It will automatically read from "setup.cfg" or "tox.ini" if they exist. In this case, the section names have "coverage:" prefixed, so the ``[run]`` options described below will be found in the ``[coverage:run]`` section of the file. Coverage.py will read from "pyproject.toml" if TOML support is available, either because you are running on Python 3.11 or later, or because you installed with the ``toml`` extra (``pip install coverage[toml]``). Syntax ------ The specific syntax of a configuration file depends on what type it is. All configuration files are assumed to be in INI format, unless their file extension is .toml, which are TOML. INI Syntax .......... A coverage.py configuration file is in classic .ini file format: sections are introduced by a ``[section]`` header, and contain ``name = value`` entries. Lines beginning with ``#`` or ``;`` are ignored as comments. Strings don't need quotes. Multi-valued strings can be created by indenting values on multiple lines. Boolean values can be specified as ``on``, ``off``, ``true``, ``false``, ``1``, or ``0`` and are case-insensitive. TOML Syntax ........... `TOML syntax`_ uses explicit lists with brackets, and strings with quotes. Booleans are in ``true`` or ``false``. Configuration must be within the ``[tool.coverage]`` section, for example, ``[tool.coverage.run]``. Environment variable expansion in values is available, but only within quoted strings, even for non-string values. .. _TOML syntax: https://toml.io Environment variables ..................... Environment variables can be substituted in by using dollar signs: ``$WORD`` or ``${WORD}`` will be replaced with the value of ``WORD`` in the environment. A dollar sign can be inserted with ``$$``. Special forms can be used to control what happens if the variable isn't defined in the environment: - If you want to raise an error if an environment variable is undefined, use a question mark suffix: ``${WORD?}``. - If you want to provide a default for missing variables, use a dash with a default value: ``${WORD-default value}``. - Otherwise, missing environment variables will result in empty strings with no error. Sample file ........... Here's a sample configuration file, in each syntax: .. [[[cog show_configs( ini=r""" [run] branch = True [report] ; Regexes for lines to exclude from consideration exclude_also = ; Don't complain about missing debug-only code: def __repr__ if self\.debug ; Don't complain if tests don't hit defensive assertion code: raise AssertionError raise NotImplementedError ; Don't complain if non-runnable code isn't run: if 0: if __name__ == .__main__.: ; Don't complain about abstract methods, they aren't run: @(abc\.)?abstractmethod ignore_errors = True [html] directory = coverage_html_report """, toml=r""" [tool.coverage.run] branch = true [tool.coverage.report] # Regexes for lines to exclude from consideration exclude_also = [ # Don't complain about missing debug-only code: "def __repr__", "if self\\.debug", # Don't complain if tests don't hit defensive assertion code: "raise AssertionError", "raise NotImplementedError", # Don't complain if non-runnable code isn't run: "if 0:", "if __name__ == .__main__.:", # Don't complain about abstract methods, they aren't run: "@(abc\\.)?abstractmethod", ] ignore_errors = true [tool.coverage.html] directory = "coverage_html_report" """, ) .. ]]] .. tabs:: .. code-tab:: ini :caption: .coveragerc [run] branch = True [report] ; Regexes for lines to exclude from consideration exclude_also = ; Don't complain about missing debug-only code: def __repr__ if self\.debug ; Don't complain if tests don't hit defensive assertion code: raise AssertionError raise NotImplementedError ; Don't complain if non-runnable code isn't run: if 0: if __name__ == .__main__.: ; Don't complain about abstract methods, they aren't run: @(abc\.)?abstractmethod ignore_errors = True [html] directory = coverage_html_report .. code-tab:: toml :caption: pyproject.toml [tool.coverage.run] branch = true [tool.coverage.report] # Regexes for lines to exclude from consideration exclude_also = [ # Don't complain about missing debug-only code: "def __repr__", "if self\\.debug", # Don't complain if tests don't hit defensive assertion code: "raise AssertionError", "raise NotImplementedError", # Don't complain if non-runnable code isn't run: "if 0:", "if __name__ == .__main__.:", # Don't complain about abstract methods, they aren't run: "@(abc\\.)?abstractmethod", ] ignore_errors = true [tool.coverage.html] directory = "coverage_html_report" .. code-tab:: ini :caption: setup.cfg, tox.ini [coverage:run] branch = True [coverage:report] ; Regexes for lines to exclude from consideration exclude_also = ; Don't complain about missing debug-only code: def __repr__ if self\.debug ; Don't complain if tests don't hit defensive assertion code: raise AssertionError raise NotImplementedError ; Don't complain if non-runnable code isn't run: if 0: if __name__ == .__main__.: ; Don't complain about abstract methods, they aren't run: @(abc\.)?abstractmethod ignore_errors = True [coverage:html] directory = coverage_html_report .. [[[end]]] (checksum: 75c6c0c2ee170424cc1c18710e2b4919) The specific configuration settings are described below. Many sections and settings correspond roughly to commands and options in the :ref:`command-line interface `. .. _config_run: [run] ----- These settings are generally used when running product code, though some apply to more than one command. .. _config_run_branch: [run] branch ............ (boolean, default False) Whether to measure :ref:`branch coverage ` in addition to statement coverage. .. _config_run_command_line: [run] command_line .................. (string) The command-line to run your program. This will be used if you run ``coverage run`` with no further arguments. Coverage.py options cannot be specified here, other than ``-m`` to indicate the module to run. .. versionadded:: 5.0 .. _config_run_concurrency: [run] concurrency ................. (multi-string, default "thread") The concurrency libraries in use by the product code. If your program uses `multiprocessing`_, `gevent`_, `greenlet`_, or `eventlet`_, you must name that library in this option, or coverage.py will produce very wrong results. .. _multiprocessing: https://docs.python.org/3/library/multiprocessing.html .. _greenlet: https://greenlet.readthedocs.io/ .. _gevent: https://www.gevent.org/ .. _eventlet: https://eventlet.readthedocs.io/ See :ref:`subprocess` for details of multi-process measurement. Before version 4.2, this option only accepted a single string. .. versionadded:: 4.0 .. _config_run_context: [run] context ............. (string) The static context to record for this coverage run. See :ref:`contexts` for more information .. versionadded:: 5.0 .. _config_run_cover_pylib: [run] cover_pylib ................. (boolean, default False) Whether to measure the Python standard library. .. _config_run_data_file: [run] data_file ............... (string, default ".coverage") The name of the data file to use for storing or reporting coverage. This value can include a path to another directory. .. _config_run_disable_warnings: [run] disable_warnings ...................... (multi-string) A list of warnings to disable. Warnings that can be disabled include a short string at the end, the name of the warning. See :ref:`cmd_warnings` for specific warnings. .. _config_run_debug: [run] debug ........... (multi-string) A list of debug options. See :ref:`the run --debug option ` for details. .. _config_run_debug_file: [run] debug_file ................ (string) A file name to write debug output to. See :ref:`the run --debug option ` for details. .. _config_run_dynamic_context: [run] dynamic_context ..................... (string) The name of a strategy for setting the dynamic context during execution. See :ref:`dynamic_contexts` for details. .. _config_run_include: [run] include ............. (multi-string) A list of file name patterns, the files to include in measurement or reporting. Ignored if ``source`` is set. See :ref:`source` for details. .. _config_run_omit: [run] omit .......... (multi-string) A list of file name patterns, the files to leave out of measurement or reporting. See :ref:`source` for details. .. _config_run_parallel: [run] parallel .............. (boolean, default False) Append the machine name, process id and random number to the data file name to simplify collecting data from many processes. See :ref:`cmd_combine` for more information. .. _config_run_plugins: [run] plugins ............. (multi-string) A list of plugin package names. See :ref:`plugins` for more information. .. _config_run_relative_files: [run] relative_files .................... (boolean, default False) store relative file paths in the data file. This makes it easier to measure code in one (or multiple) environments, and then report in another. See :ref:`cmd_combine` for details. Note that setting ``source`` has to be done in the configuration file rather than the command line for this option to work, since the reporting commands need to know the source origin. .. versionadded:: 5.0 .. _config_run_sigterm: [run] sigterm ............. (boolean, default False) if true, register a SIGTERM signal handler to capture data when the process ends due to a SIGTERM signal. This includes :meth:`Process.terminate `, and other ways to terminate a process. This can help when collecting data in usual situations, but can also introduce problems (see `issue 1310`_). Only on Linux and Mac. .. _issue 1310: https://github.com/nedbat/coveragepy/issues/1310 .. versionadded:: 6.4 (in 6.3 this was always enabled) .. _config_run_source: [run] source ............ (multi-string) A list of packages or directories, the source to measure during execution. If set, ``include`` is ignored. See :ref:`source` for details. .. _config_run_source_pkgs: [run] source_pkgs ................. (multi-string) A list of packages, the source to measure during execution. Operates the same as ``source``, but only names packages, for resolving ambiguities between packages and directories. .. versionadded:: 5.3 .. _config_run_timid: [run] timid ........... (boolean, default False) Use a simpler but slower trace method. This uses the PyTracer trace function core instead of CTracer, and is only needed in very unusual circumstances. .. _config_paths: [paths] ------- The entries in this section are lists of file paths that should be considered equivalent when combining data from different machines: .. [[[cog show_configs( ini=r""" [paths] source = src/ /jenkins/build/*/src c:\myproj\src """, toml=r""" [tool.coverage.paths] source = [ "src/", "/jenkins/build/*/src", "c:\\myproj\\src", ] """, ) .. ]]] .. tabs:: .. code-tab:: ini :caption: .coveragerc [paths] source = src/ /jenkins/build/*/src c:\myproj\src .. code-tab:: toml :caption: pyproject.toml [tool.coverage.paths] source = [ "src/", "/jenkins/build/*/src", "c:\\myproj\\src", ] .. code-tab:: ini :caption: setup.cfg, tox.ini [coverage:paths] source = src/ /jenkins/build/*/src c:\myproj\src .. [[[end]]] (checksum: cf06ac36436db0c87be15a85223900d0) The names of the entries ("source" in this example) are ignored, you may choose any name that you like. The value is a list of strings. When combining data with the ``combine`` command, two file paths will be combined if they start with paths from the same list. The first value must be an actual file path on the machine where the reporting will happen, so that source code can be found. The other values can be file patterns to match against the paths of collected data, or they can be absolute or relative file paths on the current machine. In this example, data collected for "/jenkins/build/1234/src/module.py" will be combined with data for "c:\\myproj\\src\\module.py", and will be reported against the source file found at "src/module.py". If you specify more than one list of paths, they will be considered in order. A file path will only be remapped if the result exists. If a path matches a list, but the result doesn't exist, the next list will be tried. The first list that has an existing result will be used. Remapping will also be done during reporting, but only within the single data file being reported. Combining multiple files requires the ``combine`` command. The ``--debug=pathmap`` option can be used to log details of the re-mapping of paths. See :ref:`the --debug option `. See :ref:`cmd_combine_remapping` and :ref:`source_glob` for more information. .. _config_report: [report] -------- Settings common to many kinds of reporting. .. _config_report_exclude_also: [report] exclude_also ..................... (multi-string) A list of regular expressions. This setting is similar to :ref:`config_report_exclude_lines`: it specifies patterns for lines to exclude from reporting. This setting is preferred, because it will preserve the default exclude patterns instead of overwriting them. .. versionadded:: 7.2.0 .. _config_report_exclude_lines: [report] exclude_lines ...................... (multi-string) A list of regular expressions. Any line of your source code containing a match for one of these regexes is excluded from being reported as missing. More details are in :ref:`excluding`. If you use this option, you are replacing all the exclude regexes, so you'll need to also supply the "pragma: no cover" regex if you still want to use it. The :ref:`config_report_exclude_also` setting can be used to specify patterns without overwriting the default set. You can exclude lines introducing blocks, and the entire block is excluded. If you exclude a ``def`` line or decorator line, the entire function is excluded. Be careful when writing this setting: the values are regular expressions that only have to match a portion of the line. For example, if you write ``...``, you'll exclude any line with three or more of any character. If you write ``pass``, you'll also exclude the line ``my_pass="foo"``, and so on. .. _config_report_fail_under: [report] fail_under ................... (float) A target coverage percentage. If the total coverage measurement is under this value, then exit with a status code of 2. If you specify a non-integral value, you must also set ``[report] precision`` properly to make use of the decimal places. A setting of 100 will fail any value under 100, regardless of the number of decimal places of precision. .. _config_report_format: [report] format ............... (string, default "text") The format to use for the textual report. The default is "text" which produces a simple textual table. You can use "markdown" to produce a Markdown table, or "total" to output only the total coverage percentage. .. versionadded:: 7.0 .. _config_report_ignore_errors: [report] ignore_errors ...................... (boolean, default False) Ignore source code that can't be found, emitting a warning instead of an exception. .. _config_report_include: [report] include ................ (multi-string) A list of file name patterns, the files to include in reporting. See :ref:`source` for details. .. _config_include_namespace_packages: [report] include_namespace_packages ................................... (boolean, default False) When searching for completely un-executed files, include directories without ``__init__.py`` files. These are `implicit namespace packages`_, and are usually skipped. .. _implicit namespace packages: https://peps.python.org/pep-0420/ .. versionadded:: 7.0 .. _config_report_omit: [report] omit ............. (multi-string) A list of file name patterns, the files to leave out of reporting. See :ref:`source` for details. .. _config_report_partial_branches: [report] partial_branches ......................... (multi-string) A list of regular expressions. Any line of code that matches one of these regexes is excused from being reported as a partial branch. More details are in :ref:`branch`. If you use this option, you are replacing all the partial branch regexes so you'll need to also supply the "pragma: no branch" regex if you still want to use it. .. _config_report_precision: [report] precision .................. (integer) The number of digits after the decimal point to display for reported coverage percentages. The default is 0, displaying for example "87%". A value of 2 will display percentages like "87.32%". This setting also affects the interpretation of the ``fail_under`` setting. .. _config_report_show_missing: [report] show_missing ..................... (boolean, default False) When running a summary report, show missing lines. See :ref:`cmd_report` for more information. .. _config_report_skip_covered: [report] skip_covered ..................... (boolean, default False) Don't report files that are 100% covered. This helps you focus on files that need attention. .. _config_report_skip_empty: [report] skip_empty ................... (boolean, default False) Don't report files that have no executable code (such as ``__init__.py`` files). .. _config_report_sort: [report] sort ............. (string, default "Name") Sort the text report by the named column. Allowed values are "Name", "Stmts", "Miss", "Branch", "BrPart", or "Cover". Prefix with ``-`` for descending sort (for example, "-cover"). .. _config_html: [html] ------ Settings particular to HTML reporting. The settings in the ``[report]`` section also apply to HTML output, where appropriate. .. _config_html_directory: [html] directory ................ (string, default "htmlcov") Where to write the HTML report files. .. _config_html_extra_css: [html] extra_css ................ (string) The path to a file of CSS to apply to the HTML report. The file will be copied into the HTML output directory. Don't name it "style.css". This CSS is in addition to the CSS normally used, though you can overwrite as many of the rules as you like. .. _config_html_show_context: [html] show_contexts .................... (boolean) Should the HTML report include an indication on each line of which contexts executed the line. See :ref:`dynamic_contexts` for details. .. _config_html_skip_covered: [html] skip_covered ................... (boolean, defaulted from ``[report] skip_covered``) Don't include files in the report that are 100% covered files. See :ref:`cmd_report` for more information. .. versionadded:: 5.4 .. _config_html_skip_empty: [html] skip_empty ................. (boolean, defaulted from ``[report] skip_empty``) Don't include empty files (those that have 0 statements) in the report. See :ref:`cmd_report` for more information. .. versionadded:: 5.4 .. _config_html_title: [html] title ............ (string, default "Coverage report") The title to use for the report. Note this is text, not HTML. .. _config_xml: [xml] ----- Settings particular to XML reporting. The settings in the ``[report]`` section also apply to XML output, where appropriate. .. _config_xml_output: [xml] output ............ (string, default "coverage.xml") Where to write the XML report. .. _config_xml_package_depth: [xml] package_depth ................... (integer, default 99) Controls which directories are identified as packages in the report. Directories deeper than this depth are not reported as packages. The default is that all directories are reported as packages. .. _config_json: [json] ------ Settings particular to JSON reporting. The settings in the ``[report]`` section also apply to JSON output, where appropriate. .. versionadded:: 5.0 .. _config_json_output: [json] output ............. (string, default "coverage.json") Where to write the JSON file. .. _config_json_pretty_print: [json] pretty_print ................... (boolean, default false) Controls if the JSON is outputted with white space formatted for human consumption (True) or for minimum file size (False). .. _config_json_show_contexts: [json] show_contexts .................... (boolean, default false) Should the JSON report include an indication of which contexts executed each line. See :ref:`dynamic_contexts` for details. .. _config_lcov: [lcov] ------ Settings particular to LCOV reporting (see :ref:`cmd_lcov`). .. versionadded:: 6.3 [lcov] output ............. (string, default "coverage.lcov") Where to write the LCOV file. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/doc/contexts.rst0000644000175100001770000001134600000000000017302 0ustar00runnerdocker00000000000000.. Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 .. For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt .. This file is processed with cog to create the tabbed multi-syntax configuration examples. If those are wrong, the quality checks will fail. Running "make prebuild" checks them and produces the output. .. [[[cog from cog_helpers import show_configs .. ]]] .. [[[end]]] (checksum: d41d8cd98f00b204e9800998ecf8427e) .. _contexts: ==================== Measurement contexts ==================== .. versionadded:: 5.0 Coverage.py measures whether code was run, but it can also record the context in which it was run. This can provide more information to help you understand the behavior of your tests. There are two kinds of context: static and dynamic. Static contexts are fixed for an entire run, and are set explicitly with an option. Dynamic contexts change over the course of a single run. .. _static_contexts: Static contexts --------------- A static context is set by an option when you run coverage.py. The value is fixed for the duration of a run. They can be any text you like, for example, "python3" or "with_numpy". The context is recorded with the data. When you :ref:`combine multiple data files ` together, they can have differing contexts. All of the information is retained, so that the different contexts are correctly recorded in the combined file. A static context is specified with the ``--context=CONTEXT`` option to :ref:`the coverage run command `, or the ``[run] context`` setting in the configuration file. .. _dynamic_contexts: Dynamic contexts ---------------- Dynamic contexts are found during execution. They are most commonly used to answer the question "what test ran this line?," but have been generalized to allow any kind of context tracking. As execution proceeds, the dynamic context changes to record the context of execution. Separate data is recorded for each context, so that it can be analyzed later. There are three ways to enable dynamic contexts: * you can set the ``[run] dynamic_context`` option in your .coveragerc file, or * you can enable a :ref:`dynamic context switcher ` plugin, or * another tool (such as a test runner) can call the :meth:`.Coverage.switch_context` method to set the context explicitly. The pytest plugin `pytest-cov`_ has a ``--cov-context`` option that uses this to set the dynamic context for each test. .. _pytest-cov: https://pypi.org/project/pytest-cov/ .. highlight:: ini The ``[run] dynamic_context`` setting has only one option now. Set it to ``test_function`` to start a new dynamic context for every test function: .. [[[cog show_configs( ini=r""" [run] dynamic_context = test_function """, toml=r""" [tool.coverage.run] dynamic_context = "test_function" """, ) .. ]]] .. tabs:: .. code-tab:: ini :caption: .coveragerc [run] dynamic_context = test_function .. code-tab:: toml :caption: pyproject.toml [tool.coverage.run] dynamic_context = "test_function" .. code-tab:: ini :caption: setup.cfg, tox.ini [coverage:run] dynamic_context = test_function .. [[[end]]] (checksum: 5c5d120ee876e5fe26e573e1a5e8551d) Each test function you run will be considered a separate dynamic context, and coverage data will be segregated for each. A test function is any function whose name starts with "test". If you have both a static context and a dynamic context, they are joined with a pipe symbol to be recorded as a single string. Initially, when your program starts running, the dynamic context is an empty string. Any code measured before a dynamic context is set will be recorded in this empty context. For example, if you are recording test names as contexts, then the code run by the test runner before (and between) tests will be in the empty context. Dynamic contexts can be explicitly disabled by setting ``dynamic_context`` to ``none``. .. _context_reporting: Context reporting ----------------- The ``coverage report`` and ``coverage html`` commands both accept ``--contexts`` option, a comma-separated list of regular expressions. The report will be limited to the contexts that match one of those patterns. The ``coverage html`` command also has ``--show-contexts``. If set, the HTML report will include an annotation on each covered line indicating the number of contexts that executed the line. Clicking the annotation displays a list of the contexts. Raw data -------- For more advanced reporting or analysis, the .coverage data file is a SQLite database. See :ref:`dbschema` for details. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/doc/contributing.rst0000644000175100001770000003467400000000000020153 0ustar00runnerdocker00000000000000.. Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 .. For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt .. Command samples here were made with a 100-column terminal. .. _contributing: =========================== Contributing to coverage.py =========================== .. highlight:: console I welcome contributions to coverage.py. Over the years, hundreds of people have provided contributions of various sizes to add features, fix bugs, or just help diagnose thorny issues. This page should have all the information you need to make a contribution. One source of history or ideas are the `bug reports`_ against coverage.py. There you can find ideas for requested features, or the remains of rejected ideas. .. _bug reports: https://github.com/nedbat/coveragepy/issues Before you begin ---------------- If you have an idea for coverage.py, run it by me before you begin writing code. This way, I can get you going in the right direction, or point you to previous work in the area. Things are not always as straightforward as they seem, and having the benefit of lessons learned by those before you can save you frustration. Getting the code ---------------- .. PYVERSIONS (mention of lowest version in the "create virtualenv" step). The coverage.py code is hosted on a GitHub repository at https://github.com/nedbat/coveragepy. To get a working environment, follow these steps: #. `Fork the repo`_ into your own GitHub account. The coverage.py code will then be copied into a GitHub repository at ``https://github.com/GITHUB_USER/coveragepy`` where GITHUB_USER is your GitHub username. #. (Optional) Create a virtualenv to work in, and activate it. There are a number of ways to do this. Use the method you are comfortable with. Ideally, use Python3.8 (the lowest version coverage.py supports). #. Clone the repository:: $ git clone https://github.com/GITHUB_USER/coveragepy $ cd coveragepy #. Install the requirements:: $ python3 -m pip install -r requirements/dev.pip Note: You may need to upgrade pip to install the requirements. Running the tests ----------------- The tests are written mostly as standard unittest-style tests, and are run with pytest running under `tox`_:: $ python3 -m tox -e py38 ROOT: tox-gh won't override envlist because tox is not running in GitHub Actions py38: wheel-0.40.0-py3-none-any.whl already present in /Users/nedbatchelder/Library/Application Support/virtualenv/wheel/3.8/embed/3/wheel.json py38: pip-23.1.2-py3-none-any.whl already present in /Users/nedbatchelder/Library/Application Support/virtualenv/wheel/3.8/embed/3/pip.json py38: setuptools-67.8.0-py3-none-any.whl already present in /Users/nedbatchelder/Library/Application Support/virtualenv/wheel/3.8/embed/3/setuptools.json py38: install_deps> python -m pip install -U -r requirements/pip.pip -r requirements/pytest.pip -r requirements/light-threads.pip .pkg: install_requires> python -I -m pip install setuptools .pkg: _optional_hooks> python /usr/local/virtualenvs/coverage/lib/python3.8/site-packages/pyproject_api/_backend.py True setuptools.build_meta .pkg: get_requires_for_build_editable> python /usr/local/virtualenvs/coverage/lib/python3.8/site-packages/pyproject_api/_backend.py True setuptools.build_meta .pkg: install_requires_for_build_editable> python -I -m pip install wheel .pkg: build_editable> python /usr/local/virtualenvs/coverage/lib/python3.8/site-packages/pyproject_api/_backend.py True setuptools.build_meta py38: install_package_deps> python -m pip install -U 'tomli; python_full_version <= "3.11.0a6"' py38: install_package> python -m pip install -U --force-reinstall --no-deps .tox/.tmp/package/1/coverage-7.2.8a0.dev1-0.editable-cp38-cp38-macosx_13_0_x86_64.whl py38: commands[0]> python igor.py zip_mods py38: commands[1]> python setup.py --quiet build_ext --inplace py38: commands[2]> python -m pip install -q -e . py38: commands[3]> python igor.py test_with_tracer c === CPython 3.8.17 with C tracer (.tox/py38/bin/python) === bringing up nodes... ............................................................................................ [ 6%] ...................................x.....x...............s..s..s.s.......................... [ 13%] ............................................................................................ [ 20%] ............................................................................................ [ 27%] ............................................................................................ [ 34%] ............................................................................................ [ 41%] ............................................................................................ [ 47%] ............................................................................................ [ 54%] ........................................................s...........s....................... [ 61%] ............................................................................................ [ 68%] ..........s...........................s...........s......................................... [ 75%] ..................s...................s..............................................s...... [ 82%] ...............................s............................................................ [ 88%] ............................................................................................ [ 95%] .............................s............................. [100%] 1332 passed, 14 skipped, 2 xfailed in 60.54s (0:01:00) py38: commands[4]> python igor.py remove_extension py38: commands[5]> python igor.py test_with_tracer py === CPython 3.8.17 with Python tracer (.tox/py38/bin/python) === bringing up nodes... ............................................................................................ [ 6%] .............................x.............................................................. [ 13%] ..ss...................................x.....................................ss............. [ 20%] ..........s.....................................ss...................s.................sss.. [ 27%] .ss.....s................................................................................... [ 34%] ............................................................................................ [ 41%] ....................................................................s....................... [ 47%] .....................................................................s..s.ss................ [ 54%] ...ss.sss.......................................................s........s........sss....... [ 61%] .ss...............s.s..................s.................s.s................................ [ 68%] ...........................................................................................s [ 75%] ........................................................s.......................s........... [ 82%] ....................ss.s........................ssss........................................ [ 88%] ............................................................................................ [ 95%] ................................s...............ss......... [100%] 1297 passed, 49 skipped, 2 xfailed in 44.59s .pkg: _exit> python /usr/local/virtualenvs/coverage/lib/python3.8/site-packages/pyproject_api/_backend.py True setuptools.build_meta py38: OK (143.82=setup[23.23]+cmd[0.29,1.60,8.43,61.64,0.34,48.28] seconds) congratulations :) (144.93 seconds) Tox runs the complete test suite twice for each version of Python you have installed. The first run uses the C implementation of the trace function, the second uses the Python implementation. To limit tox to just a few versions of Python, use the ``-e`` switch:: $ python3 -m tox -e py38,py39 On the tox command line, options after ``--`` are passed to pytest. To run just a few tests, you can use `pytest test selectors`_:: $ python3 -m tox -- tests/test_misc.py $ python3 -m tox -- tests/test_misc.py::HasherTest $ python3 -m tox -- tests/test_misc.py::HasherTest::test_string_hashing These commands run the tests in one file, one class, and just one test, respectively. The pytest ``-k`` option selects tests based on a word in their name, which can be very convenient for ad-hoc test selection. Of course you can combine tox and pytest options:: $ python3 -m tox -q -e py310 -- -n 0 -vv -k hash === CPython 3.10.12 with C tracer (.tox/py310/bin/python) === ======================================= test session starts ======================================== platform darwin -- Python 3.10.12, pytest-7.3.2, pluggy-1.0.0 -- /Users/nedbatchelder/coverage/trunk/.tox/py310/bin/python cachedir: .tox/py310/.pytest_cache hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('/Users/nedbatchelder/coverage/trunk/.hypothesis/examples') rootdir: /Users/nedbatchelder/coverage/trunk configfile: pyproject.toml plugins: hypothesis-6.78.3, flaky-3.7.0, xdist-3.3.1 collected 1348 items / 1338 deselected / 10 selected run-last-failure: no previously failed tests, not deselecting items. tests/test_data.py::CoverageDataTest::test_add_to_hash_with_lines PASSED [ 10%] tests/test_data.py::CoverageDataTest::test_add_to_hash_with_arcs PASSED [ 20%] tests/test_data.py::CoverageDataTest::test_add_to_lines_hash_with_missing_file PASSED [ 30%] tests/test_data.py::CoverageDataTest::test_add_to_arcs_hash_with_missing_file PASSED [ 40%] tests/test_execfile.py::RunPycFileTest::test_running_hashed_pyc PASSED [ 50%] tests/test_misc.py::HasherTest::test_string_hashing PASSED [ 60%] tests/test_misc.py::HasherTest::test_bytes_hashing PASSED [ 70%] tests/test_misc.py::HasherTest::test_unicode_hashing PASSED [ 80%] tests/test_misc.py::HasherTest::test_dict_hashing PASSED [ 90%] tests/test_misc.py::HasherTest::test_dict_collision PASSED [100%] =============================== 10 passed, 1338 deselected in 2.24s ================================ Skipping tests with Python tracer: Only one tracer: no Python tracer for CPython py310: OK (17.99 seconds) congratulations :) (19.09 seconds) TODO: Update this for CORE instead of TRACER You can also affect the test runs with environment variables: - ``COVERAGE_ONE_CORE=1`` will use only one tracing core for each Python version. This isn't about CPU cores, it's about the central code that tracks execution. This will use the preferred core for the Python version and implementation being tested. - ``COVERAGE_TEST_CORES=...`` defines the cores to run tests on. Three cores are available, specify them as a comma-separated string: - ``ctrace`` is a sys.settrace function implemented in C. - ``pytrace`` is a sys.settrace function implemented in Python. - ``sysmon`` is a sys.monitoring implementation. - ``COVERAGE_AST_DUMP=1`` will dump the AST tree as it is being used during code parsing. There are other environment variables that affect tests. I use `set_env.py`_ as a simple terminal interface to see and set them. Of course, run all the tests on every version of Python you have before submitting a change. .. _pytest test selectors: https://doc.pytest.org/en/stable/usage.html#specifying-which-tests-to-run Lint, etc --------- I try to keep the coverage.py source as clean as possible. I use pylint to alert me to possible problems:: $ make lint The source is pylint-clean, even if it's because there are pragmas quieting some warnings. Please try to keep it that way, but don't let pylint warnings keep you from sending patches. I can clean them up. Lines should be kept to a 100-character maximum length. I recommend an `editorconfig.org`_ plugin for your editor of choice, which will also help with indentation, line endings and so on. Other style questions are best answered by looking at the existing code. Formatting of docstrings, comments, long lines, and so on, should match the code that already exists. Many people love `black`_, but I would prefer not to run it on coverage.py. Continuous integration ---------------------- When you make a pull request, `GitHub actions`__ will run all of the tests and quality checks on your changes. If any fail, either fix them or ask for help. __ https://github.com/nedbat/coveragepy/actions Dependencies ------------ Coverage.py has no direct runtime dependencies, and I would like to keep it that way. It has many development dependencies. These are specified generically in the ``requirements/*.in`` files. The .in files should have no versions specified in them. The specific versions to use are pinned in ``requirements/*.pip`` files. These are created by running ``make upgrade``. .. minimum of PYVERSIONS: It's important to use Python 3.8 to run ``make upgrade`` so that the pinned versions will work on all of the Python versions currently supported by coverage.py. If for some reason we need to constrain a version of a dependency, the constraint should be specified in the ``requirements/pins.pip`` file, with a detailed reason for the pin. Coverage testing coverage.py ---------------------------- Coverage.py can measure itself, but it's complicated. The process has been packaged up to make it easier:: $ make metacov metahtml Then look at htmlcov/index.html. Note that due to the recursive nature of coverage.py measuring itself, there are some parts of the code that will never appear as covered, even though they are executed. Contributing ------------ When you are ready to contribute a change, any way you can get it to me is probably fine. A pull request on GitHub is great, but a simple diff or patch works too. All contributions are expected to include tests for new functionality and fixes. If you need help writing tests, please ask. .. _fork the repo: https://docs.github.com/en/get-started/quickstart/fork-a-repo .. _editorconfig.org: http://editorconfig.org .. _tox: https://tox.readthedocs.io/ .. _black: https://pypi.org/project/black/ .. _set_env.py: https://nedbatchelder.com/blog/201907/set_envpy.html ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/doc/dbschema.rst0000644000175100001770000001000400000000000017167 0ustar00runnerdocker00000000000000.. Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 .. For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt .. This file is meant to be processed with cog to insert the latest database schema into the docs. If it's out of date, the quality checks will fail. Running "make prebuild" will bring it up to date. .. _dbschema: =========================== Coverage.py database schema =========================== .. versionadded:: 5.0 Coverage.py stores data in a SQLite database, by default called ``.coverage``. For most needs, the :class:`.CoverageData` API will be sufficient, and should be preferred to accessing the database directly. Only advanced uses will need to use the database. The schema can change without changing the major version of coverage.py, so be careful when accessing the database directly. The ``coverage_schema`` table has the schema number of the database. The schema described here corresponds to: .. [[[cog from coverage.sqldata import SCHEMA_VERSION print(".. code::") print() print(f" SCHEMA_VERSION = {SCHEMA_VERSION}") print() .. ]]] .. code:: SCHEMA_VERSION = 7 .. [[[end]]] (checksum: 95a75340df33237e7e9c93b02dd1814c) You can use SQLite tools such as the :mod:`sqlite3 ` module in the Python standard library to access the data. Some data is stored in a packed format that will need custom functions to access. See :func:`.register_sqlite_functions`. Database schema --------------- This is the database schema: .. [[[cog import textwrap from coverage.sqldata import SCHEMA print(".. code-block:: sql") print() print(textwrap.indent(SCHEMA, " ")) .. ]]] .. code-block:: sql CREATE TABLE coverage_schema ( -- One row, to record the version of the schema in this db. version integer ); CREATE TABLE meta ( -- Key-value pairs, to record metadata about the data key text, value text, unique (key) -- Possible keys: -- 'has_arcs' boolean -- Is this data recording branches? -- 'sys_argv' text -- The coverage command line that recorded the data. -- 'version' text -- The version of coverage.py that made the file. -- 'when' text -- Datetime when the file was created. ); CREATE TABLE file ( -- A row per file measured. id integer primary key, path text, unique (path) ); CREATE TABLE context ( -- A row per context measured. id integer primary key, context text, unique (context) ); CREATE TABLE line_bits ( -- If recording lines, a row per context per file executed. -- All of the line numbers for that file/context are in one numbits. file_id integer, -- foreign key to `file`. context_id integer, -- foreign key to `context`. numbits blob, -- see the numbits functions in coverage.numbits foreign key (file_id) references file (id), foreign key (context_id) references context (id), unique (file_id, context_id) ); CREATE TABLE arc ( -- If recording branches, a row per context per from/to line transition executed. file_id integer, -- foreign key to `file`. context_id integer, -- foreign key to `context`. fromno integer, -- line number jumped from. tono integer, -- line number jumped to. foreign key (file_id) references file (id), foreign key (context_id) references context (id), unique (file_id, context_id, fromno, tono) ); CREATE TABLE tracer ( -- A row per file indicating the tracer used for that file. file_id integer primary key, tracer text, foreign key (file_id) references file (id) ); .. [[[end]]] (checksum: 6a04d14b07f08f86cccf43056328dcb7) .. _numbits: Numbits ------- .. automodule:: coverage.numbits :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/doc/dict.txt0000644000175100001770000000460700000000000016367 0ustar00runnerdocker00000000000000API BOM BTW CPython CTracer Cobertura Consolas Cython DOCTYPE DOM HTML Jinja Mako OK PYTHONPATH TODO Tidelift URL UTF XML activestate apache api args argv ascii async basename basenames bitbucket bom boolean booleans btw builtin builtins bytecode bytecodes bytestring callable callables canonicalize canonicalized canonicalizes chdir'd clickable cmdline codecs colorsys combinable conditionalizing config configparser configurability configurability's configurer configurers cov coveragepy coveragerc covhtml css dataio datetime deallocating debounce decodable dedent defaultdict deserialize deserialized dict dict's dicts dirname django docstring docstrings doctest doctests encodable encodings endfor endif eventlet exe exec'd exec'ing execfile executability executable's execv expr extensibility favicon filename filenames filepath filereporter fname fnmatch fooey formfeed fpath fullcoverage gauge getattr gevent gevent's github gitignore globals greenlet hintedness hotkey hotkeys html htmlcov http https importlib installable instancemethod int ints invariants iterable iterables jQuery jquery json jython kwargs lcov localStorage manylinux matcher matchers merchantability metadata meth mischaracterize mischaracterized mixin modulename monkeypatch monkeypatching monospaced morf morfs multi multiproc mumbo mycode mypy namespace namespaces nano nbsp ned nedbat nedbatchelder newb nocover nosetests nullary num numbits numpy ok opcode opcodes optparse os outfile overridable parallelizing parsable parsers pathlib pathnames plugin plugins pragma pragma'd pragmas pre premain prepended prepending programmability programmatically py py's pyc pyenv pyexpat pylib pylint pyproject pypy pytest pythonpath pyw rcfile readme readthedocs realpath recordable refactored refactoring refactorings regex regexes reimplemented renderer rootname runnable runtime scrollbar septatrix serializable settrace setuptools sigterm sitecustomize sortable src stackoverflow stderr stdlib stdout str subclasses subdirectory subprocess subprocesses symlink symlinks syntaxes sys templating templite testability todo tokenization tokenize tokenized tokenizer tokenizes tokenizing toml tomllib tox traceback tracebacks tuple tuples txt ubuntu undecodable unexecutable unicode uninstall unittest unparsable unrunnable unsubscriptable untokenizable usecache username utf vendored versionadded virtualenv wikipedia wildcard wildcards www xdist xml xrange xyzzy ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/doc/excluding.rst0000644000175100001770000001545100000000000017416 0ustar00runnerdocker00000000000000.. Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 .. For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt .. This file is processed with cog to create the tabbed multi-syntax configuration examples. If those are wrong, the quality checks will fail. Running "make prebuild" checks them and produces the output. .. [[[cog from cog_helpers import show_configs .. ]]] .. [[[end]]] (checksum: d41d8cd98f00b204e9800998ecf8427e) .. _excluding: =============================== Excluding code from coverage.py =============================== .. highlight:: python You may have code in your project that you know won't be executed, and you want to tell coverage.py to ignore it. For example, you may have debugging-only code that won't be executed during your unit tests. You can tell coverage.py to exclude this code during reporting so that it doesn't clutter your reports with noise about code that you don't need to hear about. Coverage.py will look for comments marking clauses for exclusion. In this code, the "if debug" clause is excluded from reporting:: a = my_function1() if debug: # pragma: no cover msg = "blah blah" log_message(msg, a) b = my_function2() Any line with a comment of "pragma: no cover" is excluded. If that line introduces a clause, for example, an if clause, or a function or class definition, then the entire clause is also excluded. Here the __repr__ function is not reported as missing:: class MyObject(object): def __init__(self): blah1() blah2() def __repr__(self): # pragma: no cover return "" Excluded code is executed as usual, and its execution is recorded in the coverage data as usual. When producing reports though, coverage.py excludes it from the list of missing code. Branch coverage --------------- When measuring :ref:`branch coverage `, a conditional will not be counted as a branch if one of its choices is excluded:: def only_one_choice(x): if x: blah1() blah2() else: # pragma: no cover # x is always true. blah3() Because the ``else`` clause is excluded, the ``if`` only has one possible next line, so it isn't considered a branch at all. Advanced exclusion ------------------ Coverage.py identifies exclusions by matching lines against a list of regular expressions. Using :ref:`configuration files ` or the coverage :ref:`API `, you can add to that list. This is useful if you have often-used constructs to exclude that can be matched with a regex. You can exclude them all at once without littering your code with exclusion pragmas. If the matched line introduces a block, the entire block is excluded from reporting. Matching a ``def`` line or decorator line will exclude an entire function. .. highlight:: ini For example, you might decide that __repr__ functions are usually only used in debugging code, and are uninteresting to test themselves. You could exclude all of them by adding a regex to the exclusion list: .. [[[cog show_configs( ini=r""" [report] exclude_also = def __repr__ """, toml=r""" [tool.coverage.report] exclude_also = [ "def __repr__", ] """, ) .. ]]] .. tabs:: .. code-tab:: ini :caption: .coveragerc [report] exclude_also = def __repr__ .. code-tab:: toml :caption: pyproject.toml [tool.coverage.report] exclude_also = [ "def __repr__", ] .. code-tab:: ini :caption: setup.cfg, tox.ini [coverage:report] exclude_also = def __repr__ .. [[[end]]] (checksum: adc6406467518c89a5a6fe2c4b999416) For example, here's a list of exclusions I've used: .. [[[cog show_configs( ini=r""" [report] exclude_also = def __repr__ if self.debug: if settings.DEBUG raise AssertionError raise NotImplementedError if 0: if __name__ == .__main__.: if TYPE_CHECKING: class .*\bProtocol\): @(abc\.)?abstractmethod """, toml=r""" [tool.coverage.report] exclude_also = [ "def __repr__", "if self.debug:", "if settings.DEBUG", "raise AssertionError", "raise NotImplementedError", "if 0:", "if __name__ == .__main__.:", "if TYPE_CHECKING:", "class .*\\bProtocol\\):", "@(abc\\.)?abstractmethod", ] """, ) .. ]]] .. tabs:: .. code-tab:: ini :caption: .coveragerc [report] exclude_also = def __repr__ if self.debug: if settings.DEBUG raise AssertionError raise NotImplementedError if 0: if __name__ == .__main__.: if TYPE_CHECKING: class .*\bProtocol\): @(abc\.)?abstractmethod .. code-tab:: toml :caption: pyproject.toml [tool.coverage.report] exclude_also = [ "def __repr__", "if self.debug:", "if settings.DEBUG", "raise AssertionError", "raise NotImplementedError", "if 0:", "if __name__ == .__main__.:", "if TYPE_CHECKING:", "class .*\\bProtocol\\):", "@(abc\\.)?abstractmethod", ] .. code-tab:: ini :caption: setup.cfg, tox.ini [coverage:report] exclude_also = def __repr__ if self.debug: if settings.DEBUG raise AssertionError raise NotImplementedError if 0: if __name__ == .__main__.: if TYPE_CHECKING: class .*\bProtocol\): @(abc\.)?abstractmethod .. [[[end]]] (checksum: ef1947821b8224c4f02d27f9514e5c5e) The :ref:`config_report_exclude_also` option adds regexes to the built-in default list so that you can add your own exclusions. The older :ref:`config_report_exclude_lines` option completely overwrites the list of regexes. The regexes only have to match part of a line. Be careful not to over-match. A value of ``...`` will match any line with more than three characters in it. A similar pragma, "no branch", can be used to tailor branch coverage measurement. See :ref:`branch` for details. Excluding source files ---------------------- See :ref:`source` for ways to limit what files coverage.py measures or reports on. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/doc/faq.rst0000644000175100001770000001541400000000000016202 0ustar00runnerdocker00000000000000.. Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 .. For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt .. _faq: ================== FAQ and other help ================== Frequently asked questions -------------------------- Q: Why are some of my files not measured? ......................................... Coverage.py has a number of mechanisms for deciding which files to measure and which to skip. If your files aren't being measured, use the ``--debug=trace`` :ref:`option `, also settable as ``[run] debug=trace`` in the :ref:`settings file `, or as ``COVERAGE_DEBUG=trace`` in an environment variable. This will write a line for each file considered, indicating whether it is traced or not, and if not, why not. Be careful though: the output might be swallowed by your test runner. If so, a ``COVERAGE_DEBUG_FILE=/tmp/cov.out`` environment variable can direct the output to a file instead to ensure you see everything. Q: Why do unexecutable lines show up as executed? ................................................. Usually this is because you've updated your code and run coverage.py on it again without erasing the old data. Coverage.py records line numbers executed, so the old data may have recorded a line number which has since moved, causing coverage.py to claim a line has been executed which cannot be. If old data is persisting, you can use an explicit ``coverage erase`` command to clean out the old data. Q: Why are my function definitions marked as run when I haven't tested them? ............................................................................ The ``def`` and ``class`` lines in your Python file are executed when the file is imported. Those are the lines that define your functions and classes. They run even if you never call the functions. It's the body of the functions that will be marked as not executed if you don't test them, not the ``def`` lines. This can mean that your code has a moderate coverage total even if no tests have been written or run. This might seem surprising, but it is accurate: the ``def`` lines have actually been run. Q: Why do the bodies of functions show as executed, but the def lines do not? ............................................................................. If this happens, it's because coverage.py has started after the functions are defined. The definition lines are executed without coverage measurement, then coverage.py is started, then the function is called. This means the body is measured, but the definition of the function itself is not. The same thing can happen with the bodies of classes. To fix this, start coverage.py earlier. If you use the :ref:`command line ` to run your program with coverage.py, then your entire program will be monitored. If you are using the :ref:`API `, you need to call coverage.start() before importing the modules that define your functions. Q: My decorator lines are marked as covered, but the "def" line is not. Why? ............................................................................. Different versions of Python report execution on different lines. Coverage.py adapts its behavior to the version of Python being used. In Python 3.7 and earlier, a decorated function definition only reported the decorator as executed. In Python 3.8 and later, both the decorator and the "def" are reported. If you collect execution data on Python 3.7, and then run coverage reports on Python 3.8, there will be a discrepancy. Q: Can I find out which tests ran which lines? .............................................. Yes! Coverage.py has a feature called :ref:`dynamic_contexts` which can collect this information. Add this to your .coveragerc file: .. code-block:: ini [run] dynamic_context = test_function and then use the ``--contexts`` option when generating an HTML report. Q: How is the total percentage calculated? .......................................... Coverage.py counts the total number of possible executions. This is the number of executable statements minus the number of excluded statements. It then counts the number of those possibilities that were actually executed. The total percentage is the actual executions divided by the possible executions. As an example, a coverage report with 1514 statements and 901 missed statements would calculate a total percentage of (1514-901)/1514, or 40.49%. :ref:`Branch coverage ` extends the calculation to include the total number of possible branch exits, and the number of those taken. In this case the specific numbers shown in coverage reports don't calculate out to the percentage shown, because the number of missing branch exits isn't reported explicitly. A branch line that wasn't executed at all is counted once as a missing statement in the report, instead of as two missing branches. Reports show the number of partial branches, which is the lines that were executed but did not execute all of their exits. Q: Coverage.py is much slower than I remember, what's going on? ............................................................... Make sure you are using the C trace function. Coverage.py provides two implementations of the trace function. The C implementation runs much faster. To see what you are running, use ``coverage debug sys``. The output contains details of the environment, including a line that says either ``CTracer: available`` or ``CTracer: unavailable``. If it says unavailable, then you are using the slow Python implementation. Try re-installing coverage.py to see what happened and if you get the CTracer as you should. Q: Isn't coverage testing the best thing ever? .............................................. It's good, but `it isn't perfect`__. __ https://nedbatchelder.com/blog/200710/flaws_in_coverage_measurement.html Q: Where can I get more help with coverage.py? .............................................. You can discuss coverage.py or get help using it on the `Python discussion forums`_. If you ping me (``@nedbat``), there's a higher chance I'll see the post. .. _Python discussion forums: https://discuss.python.org/ Bug reports are gladly accepted at the `GitHub issue tracker`_. .. _GitHub issue tracker: https://github.com/nedbat/coveragepy/issues `I can be reached`__ in a number of ways. I'm happy to answer questions about using coverage.py. __ https://nedbatchelder.com/site/aboutned.html History ------- Coverage.py was originally written by `Gareth Rees`_. Since 2004, `Ned Batchelder`_ has extended and maintained it with the help of `many others`_. The :ref:`change history ` has all the details. .. _Gareth Rees: http://garethrees.org/ .. _Ned Batchelder: https://nedbatchelder.com .. _many others: https://github.com/nedbat/coveragepy/blob/master/CONTRIBUTORS.txt ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/doc/howitworks.rst0000644000175100001770000001132200000000000017645 0ustar00runnerdocker00000000000000.. Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 .. For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt .. _howitworks: ===================== How coverage.py works ===================== For advanced use of coverage.py, or just because you are curious, it helps to understand what's happening behind the scenes. Coverage.py works in three phases: * **Execution**: Coverage.py runs your code, and monitors it to see what lines were executed. * **Analysis**: Coverage.py examines your code to determine what lines could have run. * **Reporting**: Coverage.py combines the results of execution and analysis to produce a coverage number and an indication of missing execution. The execution phase is handled by the ``coverage run`` command. The analysis and reporting phases are handled by the reporting commands like ``coverage report`` or ``coverage html``. As a short-hand, I say that coverage.py measures what lines were executed. But it collects more information than that. It can measure what branches were taken, and if you have contexts enabled, for each line or branch, it will also measure what contexts they were executed in. Let's look at each phase in more detail. Execution --------- At the heart of the execution phase is a trace function. This is a function that the Python interpreter invokes for each line executed in a program. Coverage.py implements a trace function that records each file and line number as it is executed. For more details of trace functions, see the Python docs for `sys.settrace`_, or if you are really brave, `How C trace functions really work`_. Executing a function for every line in your program can make execution very slow. Coverage.py's trace function is implemented in C to reduce that overhead. It also takes care to not trace code that you aren't interested in. When measuring branch coverage, the same trace function is used, but instead of recording line numbers, coverage.py records pairs of line numbers. Each invocation of the trace function remembers the line number, then the next invocation records the pair `(prev, this)` to indicate that execution transitioned from the previous line to this line. Internally, these are called arcs. As the data is being collected, coverage.py writes the data to a file, usually named ``.coverage``. This is a :ref:`SQLite database ` containing all of the measured data. .. _sys.settrace: https://docs.python.org/3/library/sys.html#sys.settrace .. _How C trace functions really work: https://nedbatchelder.com/text/trace-function.html Plugins ....... Of course coverage.py mostly measures execution of Python files. But it can also be used to analyze other kinds of execution. :ref:`File tracer plugins ` provide support for non-Python files. For example, Django HTML templates result in Python code being executed somewhere, but as a developer, you want that execution mapped back to your .html template file. During execution, each new Python file encountered is provided to the plugins to consider. A plugin can claim the file and then convert the runtime Python execution into source-level data to be recorded. Dynamic contexts ................ When using :ref:`dynamic contexts `, there is a current dynamic context that changes over the course of execution. It starts as empty. While it is empty, every time a new function is entered, a check is made to see if the dynamic context should change. While a non-empty dynamic context is current, the check is skipped until the function that started the context returns. Analysis -------- After your program has been executed and the line numbers recorded, coverage.py needs to determine what lines could have been executed. Luckily, compiled Python files (.pyc files) have a table of line numbers in them. Coverage.py reads this table to get the set of executable lines, with a little more source analysis to leave out things like docstrings. The data file is read to get the set of lines that were executed. The difference between the executable lines and the executed lines are the lines that were not executed. The same principle applies for branch measurement, though the process for determining possible branches is more involved. Coverage.py uses the abstract syntax tree of the Python source file to determine the set of possible branches. Reporting --------- Once we have the set of executed lines and missing lines, reporting is just a matter of formatting that information in a useful way. Each reporting method (text, HTML, JSON, annotated source, XML) has a different output format, but the process is the same: write out the information in the particular format, possibly including the source code itself. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/doc/index.rst0000644000175100001770000001713600000000000016545 0ustar00runnerdocker00000000000000.. Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 .. For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt =========== Coverage.py =========== Coverage.py is a tool for measuring code coverage of Python programs. It monitors your program, noting which parts of the code have been executed, then analyzes the source to identify code that could have been executed but was not. Coverage measurement is typically used to gauge the effectiveness of tests. It can show which parts of your code are being exercised by tests, and which are not. The latest version is coverage.py |release|, released |release_date|. It is supported on: .. PYVERSIONS * Python 3.8 through 3.12, and 3.13.0a3 and up. * PyPy3 versions 3.8 through 3.10. .. ifconfig:: prerelease **This is a pre-release build. The usual warnings about possible bugs apply.** The latest stable version is coverage.py 6.5.0, `described here`_. .. _described here: http://coverage.readthedocs.io/ For Enterprise -------------- .. image:: media/Tidelift_Logos_RGB_Tidelift_Shorthand_On-White.png :width: 75 :alt: Tidelift :align: left :class: tideliftlogo :target: https://tidelift.com/subscription/pkg/pypi-coverage?utm_source=pypi-coverage&utm_medium=referral&utm_campaign=readme `Available as part of the Tidelift Subscription. `_ |br| Coverage and thousands of other packages are working with Tidelift to deliver one enterprise subscription that covers all of the open source you use. If you want the flexibility of open source and the confidence of commercial-grade software, this is for you. `Learn more. `_ .. _Tidelift Subscription: https://tidelift.com/subscription/pkg/pypi-coverage?utm_source=pypi-coverage&utm_medium=referral&utm_campaign=docs Quick start ----------- Getting started is easy: #. Install coverage.py:: $ python3 -m pip install coverage For more details, see :ref:`install`. #. Use ``coverage run`` to run your test suite and gather data. However you normally run your test suite, you can use your test runner under coverage. .. tip:: If your test runner command starts with "python", just replace the initial "python" with "coverage run". ``python something.py`` becomes ``coverage run something.py`` ``python -m amodule`` becomes ``coverage run -m amodule`` Other instructions for specific test runners: .. tabs:: .. tab:: pytest If you usually use:: $ pytest arg1 arg2 arg3 then you can run your tests under coverage with:: $ coverage run -m pytest arg1 arg2 arg3 Many people choose to use the `pytest-cov`_ plugin, but for most purposes, it is unnecessary. .. tab:: unittest Change "python" to "coverage run", so this:: $ python3 -m unittest discover becomes:: $ coverage run -m unittest discover .. tab:: nosetest .. note:: Nose has been `unmaintained since at least 2015 `_. *You should seriously consider using a different test runner.* Change this:: $ nosetests arg1 arg2 to:: $ coverage run -m nose arg1 arg2 Coverage doesn't distinguish between tests and the code being tested. We `recommend that you include your tests in coverage measurement `_. To limit coverage measurement to code in the current directory, and also find files that weren't executed at all, add the ``--source=.`` argument to your coverage command line. You can also :ref:`specify source files to measure ` or :ref:`exclude code from measurement `. #. Use ``coverage report`` to report on the results:: $ coverage report -m Name Stmts Miss Cover Missing ------------------------------------------------------- my_program.py 20 4 80% 33-35, 39 my_other_module.py 56 6 89% 17-23 ------------------------------------------------------- TOTAL 76 10 87% #. For a nicer presentation, use ``coverage html`` to get annotated HTML listings detailing missed lines:: $ coverage html .. ifconfig:: not prerelease Then open htmlcov/index.html in your browser, to see a `report like this`_. .. ifconfig:: prerelease Then open htmlcov/index.html in your browser, to see a `report like this one`_. .. _report like this: https://nedbatchelder.com/files/sample_coverage_html/index.html .. _report like this one: https://nedbatchelder.com/files/sample_coverage_html_beta/index.html .. _nose state: https://github.com/nose-devs/nose/commit/0f40fa995384afad77e191636c89eb7d5b8870ca .. _include tests: https://nedbatchelder.com/blog/202008/you_should_include_your_tests_in_coverage.html Capabilities ------------ Coverage.py can do a number of things: - By default it will measure line (statement) coverage. - It can also measure :ref:`branch coverage `. - It can tell you :ref:`what tests ran which lines `. - It can produce reports in a number of formats: :ref:`text `, :ref:`HTML `, :ref:`XML `, :ref:`LCOV `, and :ref:`JSON `. - For advanced uses, there's an :ref:`API `, and the result data is available in a :ref:`SQLite database `. Using coverage.py ----------------- There are a few different ways to use coverage.py. The simplest is the :ref:`command line `, which lets you run your program and see the results. If you need more control over how your project is measured, you can use the :ref:`API `. Some test runners provide coverage integration to make it easy to use coverage.py while running tests. For example, `pytest`_ has the `pytest-cov`_ plugin. You can fine-tune coverage.py's view of your code by directing it to ignore parts that you know aren't interesting. See :ref:`source` and :ref:`excluding` for details. .. _pytest: http://doc.pytest.org .. _pytest-cov: https://pytest-cov.readthedocs.io/ .. _contact: Getting help ------------ If the :ref:`FAQ ` doesn't answer your question, you can discuss coverage.py or get help using it on the `Python discussion forums`_. If you ping me (``@nedbat``), there's a higher chance I'll see the post. .. _Python discussion forums: https://discuss.python.org/ Bug reports are gladly accepted at the `GitHub issue tracker`_. GitHub also hosts the `code repository`_. .. _GitHub issue tracker: https://github.com/nedbat/coveragepy/issues .. _code repository: https://github.com/nedbat/coveragepy Professional support for coverage.py is available as part of the `Tidelift Subscription`_. `I can be reached`_ in a number of ways. I'm happy to answer questions about using coverage.py. .. _I can be reached: https://nedbatchelder.com/site/aboutned.html .. raw:: html

For news and other chatter, follow the project on Mastodon: @coveragepy@hachyderm.io.

More information ---------------- .. toctree:: :maxdepth: 1 install For enterprise cmd config source excluding branch subprocess contexts api howitworks plugins other contributing trouble faq Change history migrating sleepy ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/doc/install.rst0000644000175100001770000000475000000000000017102 0ustar00runnerdocker00000000000000.. Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 .. For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt .. _install: ============ Installation ============ .. highlight:: console .. _coverage_pypi: https://pypi.org/project/coverage/ .. _setuptools: https://pypi.org/project/setuptools/ You can install coverage.py in the usual ways. The simplest way is with pip:: $ python3 -m pip install coverage .. ifconfig:: prerelease To install a pre-release version, you will need to specify ``--pre``:: $ python3 -m pip install --pre coverage or the exact version you want to install: .. parsed-literal:: $ python3 -m pip install |coverage-equals-release| .. _install_extension: C Extension ----------- Coverage.py includes a C extension for speed. It is strongly recommended to use this extension: it is much faster, and is needed to support a number of coverage.py features. Most of the time, the C extension will be installed without any special action on your part. You can determine if you are using the extension by looking at the output of ``coverage --version``: .. parsed-literal:: $ coverage --version Coverage.py, version |release| with C extension Documentation at |doc-url| The first line will either say "with C extension," or "without C extension." If you are missing the extension, first make sure you have the latest version of pip in use when installing coverage. If you are installing on Linux, you may need to install the python-dev and gcc support files before installing coverage via pip. The exact commands depend on which package manager you use, which Python version you are using, and the names of the packages for your distribution. For example:: $ sudo apt-get install python-dev gcc $ sudo yum install python-devel gcc $ sudo apt-get install python3-dev gcc $ sudo yum install python3-devel gcc A few features of coverage.py aren't supported without the C extension, such as concurrency and plugins. Checking the installation ------------------------- If all went well, you should be able to open a command prompt, and see coverage.py installed properly: .. parsed-literal:: $ coverage --version Coverage.py, version |release| with C extension Documentation at |doc-url| You can also invoke coverage.py as a module: .. parsed-literal:: $ python3 -m coverage --version Coverage.py, version |release| with C extension Documentation at |doc-url| ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1710442639.125815 coverage-7.4.4/doc/media/0000755000175100001770000000000000000000000015753 5ustar00runnerdocker00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/doc/media/Tidelift_Logos_RGB_Tidelift_Shorthand_On-White.png0000644000175100001770000000774500000000000027447 0ustar00runnerdocker00000000000000‰PNG  IHDRXXพf˜ pHYs.#.#xฅ?v—IDATxฺํ1n็†ั7nRI;0๏ ฤฌ@t/ะฒ6`ฅTห+0ฝหlXšูฃ์Cญ ิจPKb†@’kภฒM‘๓ฯ„ E‚ัGC~ฤ๙9฿`sc @`,€ภX, ฐX @`,€ภX, ฐX @` ฐ€ภX, ฐX @` ฐ€ภ@`, ฐX @` ฐ€ภ@`,€ภX @` ฐ€ภ@`,€ภX @` ฐ€ภ@`,€ภX  ฐ€ภ@`,€ภX  ฐ @`,€ภX  ฐ @`,€ภX  ฐ @`,€ภX  ฐ @`,€ภX, ฐ @`,€ภX, ฐX @`,€ภX, ฐX @` ฐ€ภX, ฐX @` ฐ€ภX, ฐX @` ฐ€ภ@`, ฐX @` ฐ€ภ@`,€ภX @` ฐ€ภ@`์า3#`Nฮ.:I:๕ฟv“šJkง“ัข!๎^ŽV[ิ_IฒœNFs#A`ฑAuYวิฑ‰์•ู฿าตw^Žฝ๚น“$wIๆ๕Ÿร๋ฆฤ> พ๗a’ซ$ฏMุ็๕ืห$Nฮ.n’ ฆ“ัฬhxjฮ`๑”q5W@ƒ'๙๓ไ์b\ŒEqIŽŒh ืIฎEQ๊฿ ฿˜ะ`ว>๘€ภข4งFเ]!Xกg@!F€ภข~#Jแwล๐ฌ+ 'g=c@`ภf ,lXืX4šทฺy่( 6ฬนQ€ภXฐ_œE`,€ภX  ฐ >ca,ุ ้d$ฐX š๋ฮX4ย?ทXฐAฮ1š ‹=ไ<P’™ ฐ(มย€B8ฺ€ภยoƒ›56ฅธ6 ทำษH`!ฐ(รt2š'น1  ม๎“œ‹า\ะเธ๊ีฟ ‚ภข๕ฎ๕2€ฆ๘5IG\!ฐ(9ฒfI:Iวฃ€นM๒6ษง“ั๙t2Z Oํ™๐ฤ‘ตL2H289ป่$้ึ_Iา๛ยMr`Š@ํ1g;g๕?I๕/z ฐhul-๊z>e๘Nฮ.LaoผP.ท€ภX, ฐX @` ฐ€ภX, ฐX @` ฐ€ภ@`, ฐX @` ฐ€ภ@`,€ภX ุW๗I~žNFณ]ำO๕u, 8wIzำษhค‹šNF๓$$7^"@`%นIาญcฆqฆ“ัr:๕’|๔R (ม๛้dิ›NFหฆ_่t2บL๒*n hจ๛$ฏฆ“ั ค‹žNFืIzInฝ„€ภšไ6ี-ม๋/พพ•ูK๒ป—X@š๊0๛ขไoข>—ušไญ—Xภ.ฝNF็%œท๚ŠะบJ๒"ฮe ุฒ๛$?ี1า:๕sปบq. Xภ–&้4๕ Œฌลt2๊ฆบ ฐ€'๓q:utK๐กužไ็ธe,`รึ+o.๗๑›ฏŸF฿K๕tz|ท4pๅอ"kž๊\–;€ภพห๏u\อโ+v› ฐ€o๑~:๎ำyซฏญAฌุ๐๎“ผ(mๅอ"๋:ๅ,เึ+ofF๑จศZค:๎Q€ภ>ซ+ovYห๚QV์ ๘‡Ÿถ๒fกu•ไง8—,ุ{wฉVŒb#‘5Oา‰sY€ภ‚ฝu“๊ผ•G0l6ฒ–๕Šฆ,ุ/ง“Qฯ-ม' ญหXฑห`/'yตฏ+ovYใTŸ2tหะR๋•7ืFฑีศšื‘๕ปi€ภฺลส›Fึr:ฦŠX@kผต๒ฆ1ก5H๒"ฮeภŠต^yseŠฌYฌุ้6Iวส›ฦFึ"V์ภ^xfะฟึซ[hvd-“œŸœ]ฬ’|2h'๏`A๙๎Sฏผ1ŠขBkœjลฮi€ภšๅ.ีงวFQddอSหบ1 X@3XyำŽศZN'ฃ^ฌุ์{+oZZ—I^ลฃ@`[ท^y30ŠVFึuฌุl]ืส›ึGึzลŽ[ฟPฐL`ƒผƒ ฐ€ภ@`,๐Vร`5์๗t=ืซaะ+, ฤฐ:\ ๛ณ$๏vi/“ฬVร~ืซ, คธ๊ฆz๙qC/๑จŽฌsฏ ฐ€โ๊<ษ,ษ๓†_๊A’Oซaสซ, ษq5N๒ฉŽ—RผY ๛s็ฒ4-ฌWร<ษ๋Bฟ…ฃ$ ็ฒ4%ฎzIuค”์ ษ_ซaาซ ,`—qu™ไฯ”uK๐K>ฌ†ฑ[†€ภถV‡ซa:ษ‡–~‹ฏS}สฐใีฐธ๊ฆ๚”เห–ซGIๆซaิซ,เ)ใ๊ดŽซฃ=๙–’ถ๖^}@`OWWI~Kปฮ[=ึปีฐ?s. Xภฆยjฝ๒ๆอžโ8ี-Cr๐]qี๔•7๖eุ1 X@Yqี‰•7Mv”๊\Vฯ(@`eฤีiฌผ)มzลฮภ(@`อŽซA’฿โผUIYฑ hfXฎ†Yฌผ)•; ฐ€†ลU7ี-มcำ(ฺzลฮนQ€ภvW็ฉณ?7V8H๕(+v@`;Šซqฌผiซ7ซa๎\,`ปq5G0ดQ’…็eภถ๛—/ํwD`ภ@`, ฐX @` ฐ€ภ@`,€ภX @` ฐ€ภ@`,€ภX @` ฐ€ภ@`,€ภX  ฐ€ภ@`,€ภX  ฐ @`,€ภX  ฐ @`,€ภX  ฐ @`,€ภXห€ภ6ๆถฐ๋7่Z๎Jฟ1๓วฐหย"กIืป๐วX ุ‚yAืzkvญ™ ฐ ีฎ๕›-อXภ็”๔.ฬฬ๕์ลต สVฒ์^$˜ ฐ€อบ*เo~ๅนูตjv€ภ‚ึV“Ÿ้tŸไา์Z7;@`A{ีฯ–:M3?ivŸไผฉ๏ภ˜ฐ-?<<<˜j5์_ฆzวใyโ`œdะฐ‡‹š ฐ€oŽ…N’nuX๓฿Žฟ๑›ฤมโo_๓’฿uyฤ์“™ ฐvศ, ฐ @`,€ภX, ฐ @`,€ภX, ฐX @`,€ภX, ฐX @` ฐ€ภX, ฐX @` ฐ€ภ@`, ฐX @` ฐ€ภ@`, ฐX @` ฐ€ภ@`,€ภX @` ฐ€ภ@`,€ภX  ฐ€ภ@`,€ภX  ฐ @`,€ภX  ฐ @`,€ภX  ฐ @`,€ภX  ฐ @`,€ภX, ฐ @`,€ภX, ฐX @`,€ภX, ฐXO๏•๐— }“qIENDฎB`‚././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/doc/media/Tidelift_Logos_RGB_Tidelift_Shorthand_On-White_small.png0000644000175100001770000001563600000000000030635 0ustar00runnerdocker00000000000000‰PNG  IHDR,,y}ŽusRGBฎฮ้ pHYs.#.#xฅ?vYiTXtXML:com.adobe.xmp 1 Lย'YIDATxํ}Œๅ}ภ๑yff๗nฯgŒi]—*U”ถ4 ยะ( BW%|mช4iƒ 4ihศUซJa‰ZU๊ ุPื6  }ก\ม€_iฎE4‰„42ญิ„ฆกMxณ ุ็}™—งฟ฿์Ž}˜ปฝ™๏ยz๗vgžyžฯ๓์oŸyžูวแ† € € € € € € € € € € € € € € € € € € € € € € € € € € € € € € € € € € € € € € € € € € € € € € € € € € € € € € € € € € € € € € € € € € € € € € € € € € € € € € € € € € € € € € € € € € € € € € € € € € € € € € € € € € € €ซ.`V} l w4šซิLMMyŽ3ๅlฺtุฮฮฮฦ’„ํ=ึ(€™™™q_}๕‚ไ๓65ๅฤีjU 7VO@™ปzฉ“rม ํฉ๗ง‡ีฅ™6.VถํS๋Cทดี็}Žc6I็jณq’5Voน๔4R8G )ศภบง>ด๓คRVyฝ‡›๔2<้‘FW]๓›wœ๘ทd#’ด฿C™Zิ๑ฑโๆ8'ๅ~Dส{Tส๛m฿˜ฏ=๚เฎืยค&๚œ[g6–ฮE๏~‰4XM_{รฅฑใ๏—~ueฏะJ{ด๙๏ูkT2๒_Gz_ฏrี๊mาC่mW๘…๕รIz็ฏ›๐zmq"tZEOพแ๔ฎ†aฤW]{ำWm฿2;{๗ทZฺขบปฐ:;iื=žš๚ต๑ุx_ึ`†อ ƒด7•>vN)ณKhO*–mqG„>ิ0&ฎAร‰mTรจ”Y–2ž๖Fตฝxฅาุ‡‚ ฏW\sรลดพ“~)๖–d๑–.ยื[_ตชƒฅšภ๚ณห—นฦผ+ š‘๔+4ะงw๙ ;9ฟ[)ŸซQKหน๒–ึชฒ็,”๒นตyw{K๙ถGvฃT?5ฏดxkฐ:ิy:ณ#ป‚๏๕ผคCช฿”่Uu€แํ~สAุ”4์Gฎ๘ลฯพC{๑๒œvีA•€ี่๘๑๏'ศบ๖lฃใVฺ/เ†@ฦฑฑ•1ผ ฯ .ัไffฎใ๓ุม @็w,U—™ฒอ2HชK๓-ุมŒทป0‘็ษž5๏า5า|ทkq9Vทตn VZฑ\ท2š'อสฤu…ด7฿ํสE\Žaทตžฃบ•`นม ํตห$ฤfM2ํอ.๙ฅDภ๊ถN‘๊VŠๅz ๗ตรฎฉXU ๗5,ซk*D`•๘v UืT,ˆร ` ปุ~qไHฌ๖ฏ)‡5tn ฌฮF,ภ๊่90ไ&?ก]9ฌA–ฟฐ–๗9ญgใื’oC๙V์ฐ o#ะญ€Fก‡eพง+pXCg6Vฃ๔[Oฮ$๓Fฉฺ฿ŠVใm: hsrใ( m?ง _pม|vP#`u:ญgพ#K—ๆง9ฬxปณ€DฆPNUค ~ใฑG๖<ฏOZ?€ึg– `-%ำ~ฝ}ฮv'*OHภ:"gl_@[™}$w=‚ๆิ]ฯฯ&๋ทไ(%…ผ‚ฝๆแTปX๐\Œ~๋Eพ็—b9ส]๚‚œyTน-/@ภZG฿ตzFศวg๏9*ง@\$ฝฌRyผ์๛eO‚—ุ“[wc<=ฯ“๔a˜w=ูTozVฯaบŸ๖ยvกฯตญHส+•ว|า“ฑซeฐ}c{wณžผOO]ฤ๋ตฬษ žz]ฉhหทSี=๐p๕oฆ?๚ฯสนื~I~ }พ gฝC~ถA<าoS=}๐9๒ฺฐGบ4`MสฝPK@ ๖z๔ก–[บWzŠH6ด]่อ• ฝ.ฏฟ,๗๏JX}*œœ<๘O๗้ผ+v[HKภ๊F)Yฆช—fาฦuXLบ๑2๊เฮฬ๖7nh๕์ฟ็[็Ÿ(‡%็hื‰tม`ฒa*uนPฤธปั†ฮH7oฝ^C62ิ๐@ นhb&,•J~3’๗m๕็ž{ฌF#Jƒลขkฌๆ‹ฅาผ;ะvก9v์˜‘/ฝ@ž&งึžปด'zV)Hฌ.าE๔›Pƒึœ#๗[ฅก™xvึั๑ฌไ&AKeญ็ะฆงopฌ็J/Oใิ[>#Cหำ๊nธ“ๅˆ“†ึ‘l+9ฎiuทนขิๅš–Usๆๆๆ"v{7d ซG3@ฬอU๕เ™ึงคี{ัศ0๔ปS-Nm)ๅฝOตhตตฯz บี/โกืว"yฐฺv$X้ภ{พIญฆ~^ค‡ีˆ5: XIiJ'ฦฌ3ฎฝb6mฺคม€€ร๊ง‡•รJฅHไU€€•ืšฅ\ไP€€•รJฅHไU€€•ืšฅ\ไP€€•รJฅHไU€€•ืšฅ\ไP€€•รJฅHไU€€•ืšฅ\ไP€€•รJฅHไU€€•ืšฅ\ไP€€•รJฅHไU€€•ืšฅ\ไP€€•รJฅHไU€€•ืš]…rษ๙Zb9๏สฯ‚ g9”›16Nฮฅ '€แ†@ฌฐŠฝจๅ ฆcฦ‰ส+vุ’ฎ้NดO]ลู6Sป `uลT่…คGeขRiฬตฑ}ฺ6๏ฉ†œ{ซ็žึyวŽ%ง-–ซล<ี ๊rขso\’JฎVha ฿ตซkชB.จ= ให๙าƒ qDฉrูปŽIธาvำsภาS๋yฬๅ:|๛"k?ลั‹~ฉฌ'‘ไ œ…l^ฝš€ีปYQึ๕๚eฒ่A๐;๖๎ุ์์ตึi˜“๓ฆฏศแTะzhืืฝ0พ(l6—›ญ$ ํย+* +ญนงH^s๒Ll0๋.–‚ฐ๙Škํฏ|xฯWdะญJึ%`๕T4hษล}๛ชฏI’ูzํMไyฅ฿•]Eน:–\๏วฑดหL4“ตฯ$=ฌต7ๅ-j0ŠไbŸฅ0l>ๅ๚ฮE๛%Xip‘Heฌาย๋ล$ฝคํ฿ป๛๗ไตณ6ฎ้ีieฦตR(฿"@ภz Gกˆ๔rีruO.๛ฒ xู้งมชu• วฌ:iถ ^๚ส—ํ๏บั%ธพีืา๑ณžวษ:m“๗ณ-@ภสv *๗ก^>]vวQ\เแŸั„u€ผฌตลาI.}ฅAk฿ƒyx~C็ย yฟ-น~_rฉ.}XLญ ฏฐ Z๑ํbk&ิ๔l^ ฌPz:ฅJ:7zตโ5 “y๏ฝuเื~๐ือcฑซ‰างฌTขxษเนฮะ…Aใ@ร/:๐๐ฎCฺำiชไBฑkชขMฦตdl๋เ]โฤ๑‡ๅะ‡Wd@วตŠ{กล5ญ…ัณ1ฃ]?ซ•ปP†ซึ! ๕/ุป็Vะ‚๑ชีฺnวtŽkํธ๚•ญ37]$  šไธ0I„/ฺŽ’๙\€Šฯgฝ.WชPึ}™‘{ำ†แถ4Xiฯf ฦซ–หืย๗Nk้ภฟN่D€N่ฤ€,ธfปช 3ล๓แ ฐ†_k”ซใU๒›ฒ์ฯื^ผ_Ž8ื^•ผnฺ=›5สKw›Iวตtiะ ะ y‰qญ๎sต+WีนdaคGbŒŒyอ ๘ฒm”>ฐม=ต`pdhงYฃ๒:! :AภOz–ฌ๋\ฟAภสu๕&…;›ฐ๙2˜ษวปซฑF‡, HืX \`ub@'tข ๕“žd}}? Œ’ฬ* ฐVxhษKŸIฦฉฌ/ฟ\Žใ่%9—ป๛ฮtฎีsZ๎VดaEิ ๕ฤ์oศุึ จQ{ะณj•ฅฌ+J“•ฒ%@ภสV}u•Riส`\ซ˜0 ž๐ขฑ <ฒ๋Iฐ'cU๘=`WY……4h%AWาึ 8งGา˜FhฦตVม|”’$`Rm (/ฎ๋Ÿ“อzฎํ…G๑๒‚๑ชmexษด'’Ÿ๔่ฤ๋๛ืj'พ+9ฺ8ผ\ฑeX‘ภ‡gnุpๅ57}4]นuไz๚Wพ5k‰ฎ˜๙์ฆญW฿xeปtœ|9_ีLiŠ ะuส‡๗Œ€œ๛๒กํRฦ‚ œ๑!H้ๅ2ฎŒๅ๗’td”m ™:•ˆีcษโ8ๅม ,`ะDทn-9|๕T าภ•ฆษ# ะท€ง4Xีwn{๗‰Wจ&บ’ •ซฦŽซ~ๆ•3“I:ี)~ฯฺw-'พแŠSื=—4้idบn6š฿พm[ฺ+7หIภrชีS=ฅฎพ{Kœ"ว๊dX{๒/?วT็ยง๗l)u Z€€U่๊_บ๐Vz>2($ื"tlํฮ้/Œ•ฬ#ฒ๔&๋G๕ฅื๊๎๋ธ'*?๔[ป๙ZŸ€•ฏ๚์น4IฐJv/?Wืฟ*ป€7ืšr&๕89ม ฦซบอ“_k„ก๏นสพู7ฟc๋ญ:ฆฅ์"vK˜ๅ+ศ/Zยด็"!ฌํุ๚!พ๚๑1ณ๖td…aต _๗ut฿จ”ซาปฐฒฮ|ย\่๑4ฐ.Z^,Œภฐfa€Gฑ ษxีuีไผ่'ทO฿"็Iฟฝ,ƒเ2–4ฬ`•Riฏ฿JเŒ*๐?’ €_6ท์{&™Aœ™MตHๆฑX์ซพeLHŽฏ’๕ฎNหม ]ฉ๘ทหyาFk•/0ื๒uภ_u ™|s่Cม์ล%`’ื?“C’ƒA็ย๚๖m?#ืcbย„ฮะุ้ถ”{-วซบbึ๘ื ะ ]Q.ใZ]ๆn!V๎ช๔ํา๑ชึ๖\xrวีืฦฦ>3Q๖~ถ=^ฅช๗Ÿูผ}3ซ๕Šง:Lศฤภ—Ÿ›๚ฐZ%ั `fฝ ,Wญม๊jr˜๚Ž้?จ”ฬCพk&๊อhฦซบ-งถSWฌL LทฌNด‚–œ๒†ณ>t๋˜๙ๅFeฬ"๓ฃX€Vฐš ๕GฦพWพoขRบR>๔บ๛ง๗,ึฝฏcพป9ˆ์2a๐y๓น๊j฿šH˜ๅซŠ‘ใ=ฌVฎD#cๅวฤฺ9น๊๗๋Œม*l๏eน} ะ‰0ะ‰@`\+‡ y‘"eนแ.R^า`%Rึศ‰kงฏwLิxษ{gํd๛๘ช'ะหฐ–'ๅL~าฃ›eแM™HHw3\.ฒA€€ี(kokฐา<หฯ[vŽW/นฦ๑๊2ำ&=ซ,๎.วฏญŸ๔ศ‚o์ณ๕ปฆฏhฯ™L—ƒห๒{ฌ,ืyืC๔%ปgfƒtณnl6"=/‚ธ! gdฝŸ?}™AlT&ห9FใIB็N๚I”uGS€€5š๕าWฎŽ6NH์rŽทวซŠ๐แ๕œPŽฺฒv^แfg๛โcๅ `pๅ๔™5ู,ะM kLาžg T์ข•€•ำงbsZฑ/ํบเ €โ#%V–j‹ผ"PpVมลG Kฌ,ีyE เฌ‚7Š@–XYช-๒Š@มXo, ฐฒT[ไ‚ ฐ (>Y `eฉถศ+ `ผP|ฒ$@ภสRm‘W .@ภ*x ๘dI€€•ฅฺ"ฏ\€€U๐@๑ศ’+KตE^(ธซเ €โ#%V–j‹ผ"PpVมลG Kฌ,ีyE เฌ‚7Š@–XYช-๒Š@มXo, ฐฒT[ไ‚ ฐ (>Y `eฉถศ+ `ผP|ฒ$@ภสRm‘W .@ภ*x ๘dI€€•ฅฺ"ฏ\€€U๐@๑ศ’+KตE^(ธซเ €โ#%V–jซ‡ผฦ=,หขอ^^bYืu์o๕๕ฒ4ๆVบ3}%รส#,@ภแสYiึฮ‘ใธƒ ฦ)ฐ๏6[ำNcเ9๔ณ็/xย+ญ ึฌ@฿oฐู!ตพ$J้ํๅFษรqืด_H^ํ๏#!ะ86Œ={\Sบอฉ๖œเก๖ฎqjNซ 8˜ jx’ฒสร๋ญMฬัฎ{ฎlฌ@ลfฃžzสๅๆณŽ4%๑Zต;† อqณ๎ˆfๆึ[{฿ญ๒ƒ๕ญผ๓Š“ฤTํเฆฑJส*!๕X’ฺนว“๎ฒFƒ ` ึsจฉษง4 ๆฦC||_2ญึ –๕4bY๛}g“ำฦZIH8S’—ุ‰_ รค‹5จ๖็:อH;m/'ฦกึ_ A5˜ีศiฎ@ภVง|]อ๛MวOช7ิ}น$uz๋„nษ“D์7อuณM[ญ&แ๋๔]>›™Mข”qขgรุž”ต4์ตvปLb‘ลt}Sd/8ถOทŸ๊7อE6รKฃ @ภ…ZhZV/Š๏ฉ ‚สธ?&‘&€’อุ่–~˜Cนk/์Œปพๆ„า1ำๅ๔žบส˜WŽ% DฑณG^sœ๓ฏค• 3iฐ›ธ๙ฑ•\Wู0๎ศ8[ฺ5_ํํž™ฏ…งห$ๅะˆ็Vฮsญu๖ญปe฿3’acชUVRQ๙๛gE / ๙*Qา’mmวถŸ—^ัŸI้๋ปฦ/I/)‰Ar\ใ-Q๕ฑ|ไฃV‡Lw‚(Žeื๒ึ‰โๆ˜ฆฝR1 ,บงjํํ3•šWCูาง$'*š7ํทiถZ=รล7กป’š=IH{U๒0oy0 ทl๘ใGำ๔_™Wณ.ฐDซอzฑศยภ๒ๆ]ำ๏๑"็ฆ6;ฦฦึšณญq6สง^~’6%‡H8แน๎Yศ•๗^“`๐ยบ;,ป‚ั ‚มยt๓kศทั{LlBBั„ไIwi7k๔’ ุX๒งy‹“hฆัTวฉL(O^l_Œส๑N~ๆเKZใถ* U[=/›dHภ>0ำ:.iydZš Zฬ &œซฌPL’qzX#^AfOz"ฦฉVM2๎๔ซง๋{ฉฉŸ’CฅฝUีำƒ0ซU้ตฦณ๚อฯ™๋kเrnซš9gฮJ฿\*o๚~{pNžN–=0+ปฌซ“77@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@(ธภอz~‘^,c’IENDฎB`‚././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/doc/media/sleepy-snake-600.png0000644000175100001770000050253200000000000021373 0ustar00runnerdocker00000000000000‰PNG  IHDRXXพf˜'iCCPkCGColorSpaceAdobeRGB1998(‘c``RH,(ศa``ศอ+) rwRˆˆŒR`ฬภรภส ย ม –˜\\เเร0|ปฦภข/๋‚ฬย”ว ธRR‹“๔ ฮN.(*a``ฬฒ•หK @์ [$)ฬ^bdoฑำ!์`5๖ฐš g ๛อ—f3์โK‡ฐ@lจฝ ่˜’Ÿ”ช๒ฝ†กฅฅ…&‰~ JR+J@ดs~AeQfzF‰‚#0คR<๓’๕tŒ -@แQ9žŒbgb€›#มภเฟ”ๅBฬค—aT„˜š!ƒ€>รพ9ษฅEePc™Œ๑,>J`…$PW pHYs\F\F”CA%iTXtXML:com.adobe.xmp Adobe RGB (1998) 3 1 2019-12-05T20:19:20-05:00 2019-12-02T11:49:28-05:00 2019-12-05T20:19:20-05:00 Adobe Photoshop CC (Windows) image/png xmp.did:28b9c407-2a9e-4547-8a93-dda294a6498b Adobe Photoshop CC (Windows) 2019-12-02T11:49:28-05:00 xmp.iid:28b9c407-2a9e-4547-8a93-dda294a6498b created Adobe Photoshop CC (Windows) / 2019-12-02T11:56:54-05:00 xmp.iid:2a0b84bc-762f-a34a-a36b-a34f7ef0736b saved Adobe Photoshop CC (Windows) / 2019-12-05T20:19:20-05:00 xmp.iid:8718b5c8-17df-a044-bab4-2dd2bedd8f29 saved converted from application/vnd.adobe.photoshop to image/png derived converted from application/vnd.adobe.photoshop to image/png Adobe Photoshop CC (Windows) / 2019-12-05T20:19:20-05:00 xmp.iid:0cad74f9-9226-e048-8e2f-119e30b14ec3 saved xmp.iid:0cad74f9-9226-e048-8e2f-119e30b14ec3 adobe:docid:photoshop:84a6e3b8-a0a7-6041-985b-d47ecac14256 xmp.did:28b9c407-2a9e-4547-8a93-dda294a6498b xmp.did:28b9c407-2a9e-4547-8a93-dda294a6498b xmp.iid:8718b5c8-17df-a044-bab4-2dd2bedd8f29 ›ร@IDATx์}`]gy๖{‡ฎ๖[–-๏;qœ=ษ i !†ั2”BR P ฅฅฅฌBกะฒJ€@!!P2 IœฤvxฦSฒ%ูึwฯ๓ž{คk[ฒๅฤKึ๛%฿=็ž๓oฦฺI๖{oƒ‰q ŠผŒ์ถ5vo์Ž!`†ภ„@ภึ„xMึษs„€Kx–ย๊๗xฒฯ—ี ]=z`้ิrูt*Aา’AขขRคS๖เฺESส$#9Qผ0Rล ๘}๊ี?ค?–D"Y9ฝา ‚/K€Kึ— ~]‰1า‹J–ฉX็่…[ณ†€!`œ.lำ้Bา๊นะ Oฅ’มx~Ÿ่๓–๗‡#C7.žX>ญ&ภ lซo’ลSJ`๔H(‘็wีโ{ฉร\8็๖๛dืแVูื&‰ YyฉR”™.{ั๓ผ๔Tฯฮฦ–hfภ_=Ž4 ญ ศ$uๆ‹,†€!0‘ˆ_n>‘วa}7ฮ_L๒{gaี_ไjQUฉx@จฺ;ezqˆ“c‚฿•ฌtdš [ป{ฅฝo@ฎœ3U–Mซ๗ึฎ^I๐๙ค"/KŽ๔๔ษ์ฒBOภ๋t…$เ๕ภ๘YTฑ์฿ๅ™z›Vฏ!`g ๛E~–€ถf&wAฉ4ล็ฝ‡a@ฌ|๓+J”\…ฃ๘[!ถBl• Ž ฿ฃ1?,~งYฐ0# ฮํ>ษIK–•5•๊มƒiIi๏ํW฿ญ7/ซ!}พx์ญหV ;†€!0q0‚5q฿๕ฬ!เปXต*อDกNy~!ๅม™)‰๊ธ๎vม‹{}๐ท มฟŠ$*3% $ชOฺ TQ๑*ฮฮ˜๕>U/ฎ.Bฦu๚\E๚pŽะณc๕™– ฌ Cภ˜ ธษํพu8#๔0%b‹ฃ[[ีaDJ Tr’*†d œล ขษ0ฺี?Ÿ,†e๐ษดข Cภ˜0มš0ฏส:z ษฉนZ–์๗ึ์ij eg๘ธื`Afš์€ฉแฒแฯ )‰ ๊เNuซกฝKƒ‹jŸQ ‰แน/แร/oใeฯ๔โ์่‘ž__(RrusFภ๗๖dฏท ;@wa@-’+๖ร’!`†ภBภึzYึีณŽ€ชXhuฮอPข๒ถ<ฌ.ศ๕qใๆBญƒG:”qB:ธ“pั'‹ซ[ๅ๘@ชธjN๑tสŸyYqืŠ…ฒrzตg~e1ศZzจฏป;ฺฺฬˆD—!ข๛}xl~8*}(|yูH–"g†€!`L Œ`MŒ๗dฝ<7P=b,ฌVซ}‰>๏[qทฑถ!”Ÿ‘๎ฅŠU’ G@ @8 ‰p[2 CเŒ#เฎR:ใ Y†ภE€2ีฃ2dš _K๖๛:š๚‡๒q-อหPbDnŸเG5ฌj๑:๏๋ฦะ N$WZเห= ฟŽLrล2Fž%Cภ8[ม:[H[;%#ุฮf6'iLฬžiลyฌ #2๛‘ž>y`อนจบ\ฺบ:%+=Cช rค,7Sบ๚`๊๓้ถ8‡แธพทแฐh๏V .›=Uฎœ7ร‰“…ษž+ ้$ฯDrๅ๕x•h๑;NำU“q&—ธ„ŠJ ๓ฉ$ืศ%lใฉึq*ํฒ๔ใ>Ž๓‘žมมaGŽ+Ž0้๋D ต‡j๐ฏ/ƒdฝ ็ŸB52qž่˜่@์ร0ฮFฐฮ๖ึ๒๙€Cฐ|_ำํ”ย,5๕‘ัgŠน"+Mฎœ;Mv66หฺ๛ R๕ษฎ{uxdน0#–`c่Y•ฅRำ+หฆUH>ถัaลฌ‡ah>ไ‘฿ฉฒะTุ7่ำLน๋uึXๆซDTU†LN1€ฬฉโd"“I0Pi2ƒ•’„ฤ๛แซ&‡9คยa!๎๓ศ๑r\ื!?–เห๎ †๙ปฌฆย[S˜็ฒX3-ช$"@ltอฮ}aๆ(Tฎูฝม๐ฏ0๐ลsG&น2’,†ภ๋Gภึ๋วฮžผฐp(Wž†T/ฬสšม…4Qก๒@yขY+Wอญ‘นe…ฒ๛pซ฿+ๅๆฅsฅ"/K!ช;Ÿ๋ฤ:$S|œ&@N๚l„ษ1-‚@t!qซสง_=S8R[…0Y`Iฅ‘Iร%_RฤWฆณ=ั่tิท๕กˆ2ฌ!ว๗$ฉ๊ฐฏุv'Š@ฉัมกpดว-`$]4=nE‹์ฆ‰Dถ๘๛‹ฤ๓RไGฒฃc(4Tž‘ธn้<ชจJ"q/หอVUฟ4'ำ“H๐=ถ๙5Rฮ T/?ฤ~ jVu}™$‹ฏ'๖ถqfษ0 S@ภึ)€eE'๎ไšOชŽ<=9เ!A ม<ศฉ—3oGOฟะี;44์‹ER•›‘ฌ฿S ช˜ ย9พeจ  '25\เ…|&#%IฺQ฿กŽ.IO ๘ฆๆgศพึฎ๊ฎฤ„—#มPU$ค๊Tธ;NU‡Iฎ{ไ$งฬLณ’|๋5บ<๚2 G๊‚ั่๏pัโŸEŽWธHถ8L’๓-นไ๊t์๑L‡\็Uฎ_8KWbrใํ^๘ฝอ(aD 01ผง  ๐ไภw๎Oฐeัม#ํ /๎ช‹@็‚„( ญC๑CฆŠพZ2 C`|มNVj๒!เRกŒฐDin“ค@@U' :LรฅX\H•Šฆ>nAf๐P๚Wั๔วišŠี)>G…d๗๚๛*[ๆ์วFะ mฒ~ฯynว>ถOr3ปm/M๎ษJ๔KQnF(91 MS<ล™้ึื a)cD๛ ๒ค}ก/X๏เP”m์nlŽ"hฉ;.VHU๐฿ณ€lญลcฯใฺฯ7"ปไ‚jฯฯขEโCๅ*๙0 &u…‚หk*)ฟ พqต-ํR pE๘YgBษฎ01“™ฅฑw Xl'e9Y_ฌ…S|๔ห(ฝ ลCvป๘ธ%Cภ0ฦ€ฌqCe')ูร  P2|yโ1 aสHpV R ภdศDลWŒiลtjว Ÿแะ4[5ดwสa„gจm<,uฝ๑Uห๒B˜$3L+ฅืณcๅP$๚Aญธ๖#ไoฑzdฆ๓ล|H‚Eฒ๗7)~฿4ก๙•%หฑx ๆุ๎AY2ต !05Ÿชํ~ภฅ›mSนšV”ซก5(฿•ncยuํ‚^˜ ƒ „ฎ`ˆXO"ซ?Wฌ=,†€!0>Œ`'+5Iภ?P4JŸ&Uจโa ‡A(๚9)ษกฉ‰“6MqVG’!งาด๗๐๙ัณ/k~”ฆ%ษลp~/ษฮPณ"๗7LFๆสB&:xiNญGล‹ๆฒ,ฌtœZ˜๋้๊๐ืยLนiwm๔p฿ ํข5ๅพโว`๒|ื~…2I“3h‡l9Wฮ์ง๛nL๖yณป#แะออ๖็€$QกJยะิ4ห๑9ษ9‚ใยฮQ๋ZuกAyn–๚ฝํNำŠ๒d๙ด*ม๒ฟU;๊X์ปXv0 Cเฤม:1>vw’#€•zYTŒ่ไฬิ1LจFee`uŽ…™iชqr')Jƒ๏๓Q~yัhํป5ห-X]8ญ0Wี”ๆ๙ŒCฆpBz&6FๆD O(Q"IฃbEฒTŽ8\l—uา๚ื38ำ CผH๓3า4F๛Eี‡uBล)ภฝนล˜;šฃ/๏=จ*zQžเ๛“>2‰ฌม#t๙œ-@›4ขศT ‚IดHP{ R1นD•็ฤ˜๊$\Š5ž๋๊”*Fใฯฤโ‚ฦถ.๘mตI5ห›/šใ๙อ+B0๚a*|&มŠฝœY2 C`8bฃ 1&#>oด๓8Lf์\ใL๊ฤ‚…๔‡w&”ภqŸคจซฟ“นS–~?ฝดUฆ7อฎQrE3เ ฬT4ำQ๑bษaBER…g๙Hkl๑่ฤbl“ํฑ,ษ •fœณ^*cนiฉช`Qลขใถ†&(ijFฃฟษษ ๋OขV]+Wฯ›๎y๗ห|ˆๅG?ยˆ/rม~Œซ lQษฺ„L…g 2‰3งธชNฯPยฺ๗—1$™Lฆ4้{!˜Hฤ~g<แX•œฤลลŸx•ไจ›ด!2t8ฝ#yAฎเใๆy'ฮ—!“`ูคม’!`Œ๗wิ๘J[)C`’!เ๕:ŽUXiงd&ž๑pb'1!มแไN?+&ฦธBX%ฮ ท5eง8j ฐI\rฤ๛ฑ"ZŽืIิlˆ๚๙=>ปชV8์lมรŒ4๛ัyždษอtฒ'๑bœ>yค$k*ขอณ~(6›๋eืกV มฑฑM&๖ฯ“”]9ฏF๎ฟn•๏ถ‹็%Tfฅzฐc๑น~Js@T๔ง@}tx'‰ ฉSต ˆฆIชTTbHช่kๅ>ฌn„ู‹กhิ€จhŒm’h‘ ’Dไเ9ฎf์่๋—#Pnธข‘ฑก๒ižํ‘ฌธ$…ฆ4f๚j!”„S]}tG}‹šค3Jฯ฿„"๏‡ำoัิ77วฦGHดดl์ฺ้8๔„ด  wjAฆoosง`|—ยmฺฺLฦลฑX2 Cเ„Œฬ ',f7 ษ‰˜กJ๑PE(’กT8Žsw&้aY'"Ia"!โlฯษ|ดฤr๛šŽศ^8šณ:n“ิ€$ฑ.ฅxฮ๓ -@๒ฤv™HฌHด~Aบแ_ิŒ•Š\:a$ธ5ซฒR“ิูž‘H0ฤC|ว ฑU*:$uˆ!ลz3Rฅค„คK‰ž!กœ^œO‚โนdzŸŸฑปžฺบ;ฌ.๑vE๋]X}wบ่ึO‘นoซ0ฆฯG7ฦ‘g_ŽtธJBh†ศฬ’ธหEีฏๆZฦc๐V^ห„rุˆƒ\ ภ†ฟธI๎นb™\6sŠl:p$v ๆC‡8f่8Mญฉ‰ษ2ฏฒ ‚yLล๘Vฃ-,๗5ŒฃซVฤ0&3Fฐ&๓ทฑŸ n"i)$X 3qไˆ$‡f?pŸaลฒ๔œJ!)!™ขูmFiพ่jD>#Kำีj๐Šสฦฺu2ฟฟฎš1 โนš3qd‹ไŒuq +AŸ$š }+๛ีs%Ib*"ฺณ‡ช]๖dy฿ฝอG0?”ญ9ิ&›qไึ9Fฐฦ๛–ฌœ!0ษ0‚5ษl๘c"@ฮM๓{W๔…#WggFg–bMกC(๘ฌ๘=>๑๛a($LTฐYœ{z’ุP=aขูnV๐ๅะษšพO›๊้9ฌ้๋D(*W$Mฏ'๑)็YŠ’:6’>2B*;lƒjฯ†Bา?,๎ๅวSใu†oˆขT…Jฮ€ัะ๗ม๗jละWชoŽ ็ฌฃณ8‰ฬlฤำ‚ๆI‰ฦ-6ežŠฦ๏Cnล1 y+r?2“*Sฮ้I?้ฯE‚ถ ูXั˜ ’yi๏AO‚‡าw•~eฤ  #]^]งŠU,*wY0๏:ฑฐR”`าส$อˆ…0}๎ojฃ๏V>X^จX?g[ศFฐ‚%Cภ89FฐNŽ‘•˜œp"bƒพ; `ญ ‰๑‘ภŸF›ใaq๖ค“8ญJ˜z‚าิี#ณJ tา'cฆ_S“y;T๚1ั”Gi/&u๒)šOึ๑ญuEท่ไม2D.Ÿ+์Tvsิ1š:ฉT)12E‚UากไŠ[ํฐ/์ๅ–ฃร8M™Tพvมกž]๔aขBฦฒ$e$g,7Dซ8'ำ|ค]:`น๒&๛ฝE~็*ฌ:$ัjAๆๆส$Lใ!Y๑0ฟ ฯ,Gฮy‹โ]yถภqมRต์k]kˆUŠชV๖5่u’D&’ศ\˜A+rณ๕}‘’,n฿ kwื…R|~ิyE?ŒอgศP-†€!pB๘หฬ’!`ŒxIo—-UVŠT)าะ`J |IB•I5 ำ2อn,FS …‹๐ฝบKๆ•สฬ’|วแœชšGkgื ํAญ:ยวH๒$HLฐx WRษa˜ งmฤ“มHอ จน’ ใH‘จpLL ๕@S"฿I้;ึ •Ž{๛‘ r3eš;1๕ำสน!‰ฉ.ศ๑`+k‡šฃ๋›5ิHัLฌ:9Tจข๊ฟFฆJไ’,งรธ—ุ :หง#๙˜ฑน"ํƒ!o!ฦำ„UPอTญข/ษ*อจิ-ฉ.•๕ุท‘์4’|บ+0๙Nz bํจญ—†ฎพ0๊๕๗bฏBคw"7"ณ_nไzœZ2 C`lœ฿˜c฿ท;†ภdE@,„XาŠ\—Ÿžญ).๐€,!šป TฃโBโAŸ+๚f1&ำa8W“ธ0L‚Cp rA=ขู$ŠQรs`ฒโๆฮ|†‰ ึ‰ฺตแc/ขš0in๖/ศ>๘e#ข; ีi";ะึ!n)ฯ๏ชำ9N๐>ํ'M“$†TzH้ฃิ‰XRฌ“ ๏‡้Xฯr่p&ˆM Uฌญ›Tกใณ4‹’ฬQั"qฃ้mjaพdหั้mฦž‡จ" _HึฝC!Pอ".ัย้pโ๕+‘ฟƒฎ\›๐‡ฐrQVNฏ๒.ยfออภป พo๋vะmoฒ'ถ๐7#cฃUdฑ„ฐ๊"รY๕ฒ#mฺ)ƒมhŒด‘ ๙?‘\K†€!0~Œ`++9นP‚…Uj7 ภe H‘้%๙–~"!•..๛งŽๆฑCX}G’Aฟ’ฮไ$$$Xฌ‹ื้ไ:ดปsfฝ‰๔l‰}กฺTง๔{๗‰Jีm!—ฯž*Kฆ–IMaŽๅ๊๗๕‹ต›แ˜า•†ฉ่/Ÿw•.ึƒฎก`yี$Hrฆ๑ฟภpX†ญ9cU‰จQํฺX(PฮrเF‡qfcHˆ้eEREขีแ9g.8ย@ทMSกz™|ˆฤF‡;Žใ๏ะt%ช ขผŠนำผTฃhฎ\9ฃZๆร<8u๗ b>qฑHฮ\ตIำe>ฌธrฐ *"7|&๔+k„้$23ฆฯ~ํ1ฑ/๖๛RกฐCภ๖ c<(Y™‰„น 3 ’{๎~wฏq<๎=žว_็w&%X ๏mƒ‘ศฒœ๔@xFqกัpยๆรฃ%’‘z๘#ังŠํ ุD8ซU๕!ม“Tšกœ`ั7ˆแx พแฤธ:uำี uฆฑปGV–jŸ่NSู่‰ีตU%Ptบไื๋ทจ๒F““'ท^Ž‹ฮเG „‘Drๅ ‘ิ๑จ&P‚†Šูท๎’t;ถห~Qษcyš๏ธ‚ฒส ศ๏@[g&C†Dธ็Mh~2Ÿ˜่wE{?ก3เธ>€r‰o^2ว“•๊•ฝuฒbฦUงธ‘3Wsึฯพ`ถI‡w\*jLTื^€‚๗ณ6ฉสโฤ๚‚ŸTฌโ๔@BVZขทญ/้@q7Ž—lh๖a†ภมป๔k"ู ยยนsNฌžํSyจYภน›ฮ๔ Q@าE_#’พt8งปƒๅ๊@ชOTwธ=L9HัฯoิU€ ม{์ป็Ž›๊ฏณ~ฎ†$้"Q#„:F<@ข6ชซ ก.ญ฿SฏัิiF$นc9’ึร็าN๘]MGw*†šU ฿‹b)จ๘ศ$7tfxภ๋M…sผwj~†ง47G^พWn]ถPชH–XณAq[^คj๛๎๚ฤq0ปณบา9ลSส›ZหยชRฝz"ป›Žค!X๊ฅ W๏„ฉ๘!vXd=ฒVฃ%Cภ0ฦDภึ˜ะุ๓ฮ•$?ว’)คx’ฤฎปๅ‰Qธ๓‘ๅ“ๅเt3œ8Yาaš’ ท^แwfufฦfฟs1ฑพชGๆ4oๆ%W A™ใ Wิัว‡ไ :สี„L$*4งัฤF•…D‹‘เ$ิ๐ซpœุ'’จ64_’นๅลJฆH’เ๏2็ัhๆtบ†œ} I๚ุ)ถMขรDำ-*:$\]ศ[์;3ฟ3::•9๚f1vษ‰“‹J+ˆฌ—~P$ot ฒlจc$–ฌ๔MวฯvH๎*๒ฒ=อGผƒม ฐ๔ร7kˆึ›P๔7ศw!฿์š๐.›Sฃ*XuQž’ุงท๏“๊ๅฎU(• ศจภ•ุภ;a\,ށฤ6oฎ|dฆ™ฮTพเภ๏)อฮŒค%'… B&E"ูุ\๚z g๚๐{ไAdŒ9ภแฤ’!`๑๐ฯNK†ภ๙Œ็aNd<าDไ’)œjขยฑนy&&ฮน(Žฉู—˜˜=ฯฯ™63FH#ฟฏืx}พ7!แ0อ‡;;–…†ฃแ0ซ๗"ณZไgฐŠl ขน |}ฐ๒/ bเl-์ฬช*hŒ๖DyMœq้ภฮีuœไiJƒ*ฃˆqVzh^sฏผ##a ษaาฯFh„"4šต:๗๚]ีˆค#ž(ษย5"Fog:ั%S ธืu|Iงห^‡d…qฑธืแ^ฤฤฺr๐ฐฬ.-PขฤGh dฝ๔ฃi‘•ผ}๗ฉตrํ‚r1i‘กoMwŒKUtี yญฑ%แทถGฐ:0ณแฅCแศหแศฺ๊มน??9UZ่กW;Vb›yvว^๙“ tี`(V:ฑoFุb3Dำๅ๙,R’ฌƒฤ๏;FลYe…^#๕ฎœ^ใ~ไ ฌ๖„zvศึ&<๐9ไำ๐๖P‹%Cภธ 0‚uAพึ bP$UฬT’Hx”ƒ“eศ3‘ฏ๕'&O๑%งN๗':qBƒข$CˆE…ฤฉ๓˜DตŠd์จไ ุfNa1– ^ล cยบ"CจฌŸ—|๕XuGฅลq–ๆฬจฤ€ค…„ลM$Tœ{I่ภN’แศ@ผŠUxG" ;- mำงŽ๓h‹+r€:ฎ็5ํำ1ปeIˆ˜จ๘84Œcs๏๊-;ฯจฦq•l„™เ^€j5๎ิ"(H\E่ึฅฤสƒช~๔ฦหไkฎัxYKซหะXDะ6•$’ฑฒL๏+๛๊สŸ*›*๔‚ฏิหU์›ฎ†„ฉ’„r1๊ข;WIrำ์ต{๖ฃฉ2 ค/คqอkต๐E+V"Œ‘?ށฏ'~|-ษ!Q Nูฉ)^l“ไ}—Yฏท ท-†€!pBŒ`ปy–เTG๓ง/7ณ ณ/Aพ๋๓ฯKHห, `™0ฅพฆzก~ฺk8ŽฐœุŒ™’_"ู99’˜’ žิ/ 0aาŒ๚!j’’ล‹cหมžC{vJ_Kใฑฯณ/พฤขาh2b+<ฒ๙่5?UมgˆA9น „€ๆ9—ะะ๔GณcKัŒฆไK'qิ†ฃหลธ2๏t%N\5ว:I๐ุš+GKlŒ[zไŒ)>dฌ‹'‘•”ก"%Oธ""ošU-๛w๋๊œ ลˆ!\/V>%—~่บKไ+ฟ{Vร',้!uไ5œส2ฮึี๓ฆcK›|†u‘ G่x๎ฅm. ธR๑"ๅ;เŠM˜๔tลfVพฐsฏธdฎึหํ}ึ๏'•๙$mร๎.)vวๅ|Gภ3_K%zTฉpะE ž๙ d๖•คži์Žท2-K†€!`ภ้๛ tฝ๖อ88—‘ฬpfwfw‘<œ_‰nจK+ู้)๐–#‡dฐ๓H™Œ„ศ3cูjo๙์yพ”tฌ+(|hก\xA68Q:Gถ{F‚๏L<๊$สIฟฟOzฉใˆ ๖๕J_'"Ž7’ร๛wG๋ถlร G1•MMฒ พ>้“’ฬDฉA€P:ฒ3ฦXrข๖{กtลฬƒ.๑า†๑Aๅ…โUผใป{๏“ษk ?๛ไ @K\ (๎€ซŸIฉ)สGิฤฐ๒‹ช>nัธฃ>ŽฤhWฦ@ฃ$<ฏ!ุ็^„>˜u‹DSMqhŒชWพš•๒ญว_ะพฬE%bqส’cZtฬ†๐c๓๎(iŽ>๔าVmฎ‹ุWึIฟ)ฎฤuธI2*o๙Y™všธ.๙=BS0]:ซฆY๘Ÿ3๓ง‡?@J่pิ@ชจฆv˜ฯŒพtี๐รป๔ิ”ไEถlฦชYŒโ;‘ต/8Z2 Cเ8Œ`‰]8‹p‚:–X]ƒk๏†฿ิ๊ฤŒœยิ‰ฎ๚:+Wั๒ฬ]}ตทxฺL_NIนdย™9#„ Nำ$OสP-g_ฅ2<ฤfRถฦหzpO— JVznพTV๋\Yeh ฯCีซใpƒด6C{vศ /I๗ง|6vEค1ิ‡๐˜$'ษ+—ฮญAh‚>จZ‰PXผX๚ๆ„$pU๗ANๆtถ>i,ล*พ *Qƒแ†WPฅส๐!ูbI6จ๘ถtฎ:ข๎ๅWๅษอฏษ]—,R๒ยฒc%>ฯป4ั„ทฐฒDรQะ/ljaŽฬe๛n™8เเฺKไ›=/ป›ไMpZง—๖๕ฐ>GŸX_I0กษษสR’ฤ๔s๋G@ี–ถn%[}ธFยล} ฉnั็j+ร่O๏๒้Uุ›0MH>หU‘L K3๋ฆu †๚ณt๋’9ชFะ™”ฤDชคPฤSชุ‡!`'@ภึ ภฑ[g 2 ๆxล๊6|ท?9ํ†@zฆ ด5ห@{sd ]M…Wเ™zัr~ลช,5๙y@`ศ˜hZŠ`ฆ๏•Cฌpyx*ฦ๙ั_xแ่„็ใŸฃ—ษZrj†ค /Y…ลR1w‘/ปV๚๏๊’๖C๕า\ทO๊ถnแยฮ.ฝ‡ตพ6o–<ผa›‚Xฑืงฆ0šศH|Bd .9‰2บcสs่ฦQ=บoง๐แมQฒฒใึLRC“ฺฎฆVูะคๆ7๚Lqcใไ@@อb4rฃ็Mต ฒzึ๘-ฅษV_ฌไไฟŸ^/v๙Rฎ๒VชF๋šถ‡vh๒ใปaWอ&๋๗Tฟ0์Kจฤ’j*๚F*yxํFUณ~้E cน:ึเ๖Ÿคห5]ฒ]†ม ใีŒuEg๖JจmT Y7 #W8า„8Ž๓$Xท_ชRnข฿ pต อxM=0gๆj?`&ฦ ฒJ/๚‚1‘€RE›[^(ืa#•.๖o'"ฃธ=FO๐{ี\ุ† ฉ@ฬX6fU8ะk€“SแQ=ิ&์ร0 a8ูY2ฮ4œ‰HII)–`๐#ธ๖๏‰Yy–”™S<ุั๊๎$ฑ๒^๓ฎ๙ฎธ๗}Y+/๗\%งg่d†i+‚์šณ8มปyD๋8MC‰ซ[‰พGu๏=‡ฑ•„b'eๅUตชy‹d๊โๅชv๙qฝ*˜hดpณกZ9a’แ  ั]Eจcˆu›cำฤ๖cืNv`9>Gณ#กG7l“ „W ŸI kTu 1ฒtT$>๗ฌ@ NgŒ"ำเoEB?-าJึห็iบkแเ่L>ฤzIv˜Xว‡ํปแำะ  y?ฟ(*OU่s๖ ค’DฅŒัไ]Js+U*†‚`ZˆU†4๓ัDHœน!W2๒Uj1nฐM ุใŽั7‹โ0‘๔qK nฤธelk”.๖ีQ฿<บA4•/์sจ&ส†ถฮศžรญ>}ฑ7!›v$Tญั> Cภ8SฐŽฦรพ~๘3FโฤH< ้๋๛LRNAยTIkcซย"ูีพีทฟ-กlๆ|Iหฒภ%๛!จ(nrHภxi‡๛ิ้;j๛ค=๘Ÿฤ#?-Kี-๘qMYฐDŠฆิศฆง~ซฆรจว/จ4๔ H˜ดผฆRC0ุฆ๋XM…ˆ>Yฎ_'zš>5แษFญDxั\vอโ9๒๐บMXE7O๏Y Sฬ‡Tฮ“Š๊$ัโถ6๑‰j…5mehr$!น[ัืึม้=_๙OฆbนuฒoคX,ฯ} IzžูฑO ฬ"๘i๙ก9Ž๏ษrรย™<”JM‡์cnigP›จ`๑pี$๗ ค?Y|ฉุWb˜ƒ็zAึุ0ษ%W๎ัๅ}Ž›[ๅ8๏SิฌJGy%ฃจ“ญฐ):๛AV๕]แบS๋‡5)R4๚YษฯFcฏrRŒูiผQŒ`ฝQํ๙ฑpีQ’+ฆท!8žuq =ฆภƒd'ž๊E๛ๆฟ้z๙์๙’šษ@๋P<ยAGฉ"ั:o&_ืgŠ๒๚ŸฎN ƒถุ/;_ฃ๑'฿•ดโ2 มD5ˆgา3’‚aPAเLฤgji๎T•@eJฤJGิŠˆจJ J นQลำI[ฤ4๎ช@๎d?&$ ŒาฮXSื.ž+๋I|ชHไจ๖@ะw‰\rมk#–.ึN2s4กใ7*I\!ษิู; ่๕ห8?ด฿๘`$y’™› j]5ŠพR$YTฒฎ˜3Uฺปปไๅ}๕ำPฃ}ŒรSู$กB„‰ส๗t$ ไxHŽƒ‹๊๑ใX๓เO๖๔–z_chแYํ๊า ฐณIbภ„24A๊ ใsLx\แสD๐๘T์^บ0“ว๓Ye๛„ 8]๐ใฝ0฿ข๊l#`๋l#~แทว ‡?Wฎ๔t9ฮyeZq9LgรC2็าซ^u“TM“คT'VT“/็*u^ืi๋๘=Nำ ฿>gฝมn้lm–ฮๆร๐รj•๖รฒ‘ต?h"t:้vฟฤkแฯไCไ๔„Dœ ไภิO่ข๊r’N“"ษ–ฎภ‹อp|lดD’@’ม0 Pณฑ๓ކfูงFliรhๆItž!nm˜„K$x์รŠ้Fผ-็อŒR๖˜Gสู™ใh‡น’*าพฆfyuAน็๒‹•X’dัd๙–ๅ‹ไททหฎCญjฒคZล=ไ„ธˆชIใ%นž๙ั95n4M%U}ฑŠsฐท"๊q{ฯ๏ฏ~\ฮ๕‘๛$^)PฦŒง๔่ัภ*F8ชศ…๕ๅP๔/ช<าโ๑ธฐ C}aาFcœ^Œ`^<'{myrอุ๛ฯ๛iLa๗'`5Tซ0ศUดjE‹oบSJgฬ‘คด๔83`ŒXลt๗œc๕Š“:“ฯ3f้0&๘ึCuา๐ฺ6ฉE|ฌํkžา๛๑%ำf:qธRา4ว‘„๓!D…ื(๓(ฬkCˆทล๏ p*แ^'วUดต-ฟขW%?ล+นY)ˆR^ๅ์)ˆ Ÿ&<ึ1Vโ-ฎคฃ)Œ&นR(Z4 2|‚“P€cฃ Žzด[ผF๑‚*ิ๋Iฤ“f7š*เR๘6อยŠ=*MtvgฃTหุw’ฌฮR‚Eb• ลส]E˜ ŽfNฦ1KF๘ฎœdเW7ฑ$Xt`W“ ฺค ฑg5pช[๘หNฤส T•นW‡4 –ๆ:ซ@•FฑsHจ?-Vศ…Šว .ั,Hๅช1๚<ˆสO`K๐ฦ ฟ'r'ฬ…ฟp˜.ธq€ ำ…€ฌำ…ไไฎ‡–\ี๊C8$ศUAzi…t7{ปWพใ2๗ฒkเž “ZŠ &9‡,่u }GเN๙]ญMˆตSํ}Mึ>๔ำแ>zแŸณ+ กยe—ชร{R*ˆ‚Œ2ธฉK‚<๘NŒฆD'ั J(๖7์๋C„๙^2฿Œีˆ‡๗๎’-|กZดh‹'KZZ†ฐาmB'Tc ™ฒ˜๓บิsธ3วœฐmถF?6žsuIใIแ+’ร ”(ูŠฑแสA*;ผฌ)๎ี$‘ด{นp๒Oถอ—IฎVLฏTไ๔}๎N฿’ล๘T๓ฑ Xุ๏8๋ณ…ขฌT๕]ฃ‡9‰Sb`นๆ;gนg!วภพRขใ:์Y!ฮฮ ุ^:BIP%-Qท2?|z5,ˆrSnึศ&ษภFQมD‰f@ห[„1[2 q"`kœ@Yฑ1เฯซZMลd๔-LAืxa๒Jษ/‚\๑พฎO} +ํซ™-80เ+(็Mยdฬhž๔C-แ$ั&๛7ฟ,ฯ>๔€;u๛บ๒-๗H๑ิ้L:URเ7–€ไ”u 3นวุฏžว>”ฮ`rWฟ3‡UH๙ฌy้ผไŽ{dซ/ห†ว‘–ฺ=๚Diy%63‡ปๅสy3AฒเMขƒๆbวWฏ็J!p“=ขM,Z".jช\TƒH@ุg’œ?ำ##$*lทคททWˆSฅ‰๕h#ฮื“~ฒ/xฆN้9pฎ'๑ใชBงmวฐc=ผ็*uŒำล8b.ฆ๗ e๙ฌFฅ.๙แณ์•ฒD&๖‰ใฬ„_๎5^็ํ๘า๑๚ฑ‰fศœŒ mƒ? Ei๕ฝเษี+เƒW๑Ÿ ม๗&QŸnส ๕สKA๑ฐCภ#XฃใbWOŽg$ธโ5ฮ?…)+ แถ#tdOXqฒ่š›%ซ +‡ฟ ซ๎ฮ'bลiSg+˜ฉ,i8 wผ*OG vชH,บ๚f„aX&นe•’™W(>L๚œฉ91นœษ๗ิซvŸใ“%wูEฅ๎aฦล—jœญO<"ไbฒ฿าํ>XQ @3W ‚ป‡Ÿvt”พ(5รแœ„Š็ผฮXWtZ็jB๚X‘g‚฿ึ>lqSŽ•wU๙0ซก|ขฯrื#๛-}ี„>ะvุw็Lฟž์ลIRfaโ[v*ัa$n4q I๛ม>๓๏)Bก,`สHห)โา'…#Œ๗๛‚฿ง*U8ย~•้IฮJH%V(sฉำžณ์กึ6‰V—ŽT u0˜ฉ_W1บไ }ิV๐HАช’h]ะI_’ฌ๏พภb์๚Z.่Aเ ำ„€ฌำไ$ซฦ5\•!Cต’›i C๘…ผ๕็๊อคฬX~–บc…๘›ู‰พ~~ E*V0๕‘(‘XํธNžม7ตƒ>ฤณZ๖ๆปคQหfฮ“D8ใs†ก3~hศ™[IŠP >ใf๘S^snT๚Aญ[ฅๅไษjlใ“[Z.O~Ÿ๊’pjถdอ&yหลsdFi†%8ษr•Ÿ ฤ€zเลM0ฑฅ๋€์ๆึƒญ‚d๕์ฉr)ย0Pข#๎รGไ›wJ:bIeภ์ษ8QkwียฌL่้ฮqŸJbq5ีม๙&ฌrƒ6ธ฿cTq•#ใVQฉ"กrbQกvm‚ช“ณ๕ c]ผู!˜๑๘ณลไ๖ˆคˆค~Xz7h*คส„ย@IDAT$+#M"9cๆI k๔็Nx–˜Qหล L7นสซภ5ท)๗๖{tเธเอA˜g5แ;X˜!p0‚uมœUqR!นขI้ใ˜g>ฉ*—˜JHI๓๔ท๒_tญฒเสฅ`ส4พ&'ๅ๓Iตr c‘˜ดึืสฎuฯษ3?/T้๔ูฒึท ๖;ิญr|Z1นโHี™!Š  :m๓DคdŽ‹–๐™บhน์z้yy๚ว฿ึ~๏ห ฒฺ—ม7‹j‹kRำ›q$๔E"qบkู|๛๏m—,ฦ๗lu‚ฟ‘สหแฯๆ$,ป[Tฝบz4UEๅจ+oม„tšw}šœพฦ54ŽSŽŒไ‰>aA5า๖ฐ)t ศYHz ’ั_Š~Q๊ท††๐ˆ:ฟณ †ฑP1 ื8.Ž›dˆu1๑ภ1ฎJ$c›ŽB็A0ำ!]5€ณฆŽ์Tญ ก็ นย Jrๅ๓รั2FB,์^ฟfXฑชœณP\}“TฮY$iู4ฟัต ฆMฆ3Cชด๊ฃ?โgz’‹ุช=:า_|ำ’ƒ[๛%Xc#mP•`ยดฟzๆub'‘8๑™ ล๋W.“uป NV=SŽ’™ศ‰U%†Pเึ6 ุY˜™ล+ึ=ดฃฑงะเ‰ฺ8z0วsHˆข;็Ž3บ๋ใดกถQVิThฟศb๒Ž IU,&ค Lฤฌwz€ฐ‰Xh@‹็t„็–:Tถ๊ฑ•P)ยX$๐๋c๊ƒบสwํิ่ดวพPcPTeUจู-‚๏ŒฬJ{&\K†€!`ŒŠ€ฌQaฑ‹ว ภmnษไœS๊ษq e”Tx:๋ky_๒พ ำฐ] gด๓OตrฬฆฏซCื๊็ูm™vั YbU6c.ถๅษฤD!Uธ๏šŸด๐้ภŒํN:ำ๓ fyฅpถ' 9ิ#Š+9ฤB๒‘๏?ฌAM_๙มื@–๊4ฌIหวช9ฎงผฮ@ค๊ๆออ:ีซฤผลชqฝ๚๛†$jฮu ฆรL็‹ฉ?๑ีh็†‰HS=w1ๅ๐่‡ลml่„ORG5ษ%FZ/ 1tCSยY ฉ๋ %ธบ๐ภชฌ‡ŒใเJร'pk?ฮŒจŒE1๎~ลšืDฌ้ว•๊ล๚9|a?Gๅฯป๛oล?ส’– CภP'eƒม N$แ$W‹?1ƒฟƒสร$„Rศ•ไTVหu๖A๕UขI jึycไไศ๙ๆ@*jwn‘—๙…์\๛ ฎรq์OH๕ขe’’‘ฅQซtJี2ง็ฝNร๎\L๒„Ut$O บ–ำ๛hP‘b &b@ๅˆฆ=7ฑ฿ฤnˆvB$-‹‹$ขtาOวฦัTซH–”ฑ2ฃึP๒:วฆ&G\gDz7P)๋KCxw~O„1 ฅUWุn'}‡ฮž•dษฏa *”ย ฉฏนพSƒ’…ฏ5ถรqฐ\<ญ<๖.ฦฎL฿R‹R’C…7ฮ๖t#แa—ิดSซPr“’)–Eๆุ้'Eœug-ไิHวEฒFา่*s8Eyง ฯ{‡๐ฃ‡…ฮศ‘=ใ3ฎU0J5ืษŠœ,dษ0 ๎/ ฤ œ,hซ!๋`ด๊๏"ฟ™ŠK0ฝค<กป๑ ~ฝ็ ฿2ฦไ&ว$็‹#ป์yู€๘Qn๔๕?9™ฒ`ฉ$&งจ•’ยแIS‡๕บ?”TqถฦDอ๐ฤƒุ ๔๖@j’†][ฅ~ืซ๒ฺŽ?Jจ้DอธaRฝ2[ geHjn’ tส๖'๗Jห๎z}8ฅ'} ้ˆFฅ:#ร <บiงn๖ฬํqจPฅbฅ็8๖ึYŽีภ๏X–็*ZIX้H,ฅ4GีขYSปNšรdพ๊ก™sไŒ8p‘จ1ไƒ›จŒeง `9ีŽ˜L=๔ถw1rๅ‚iGCภ8 #XGม1ฉฟฤฆ&%Wี@โ‘"๋,žYV ซ:์ณ{>u)ฉ™ฟ X,จำDR๚˜PRญ6ฎ—Ÿ}Q$.{๋ปdึช+`,๏"šป1นhฟูฆ3ำ‹*•ม8ู‡ฮvi9ธ{n‘บอ[ฅn๛+G ญtnฅฬพฉ ชPIPฃz‡@ส” €),ณ8MH๖KRFฑท€9ถ}ขณ์ณค๋pฏ์z๊ <๓’^•(= C :ฝp ภบ=uˆ35[ EL”:ช๓๖ ๙):Gต)‹ฅ3;S pvI#ŠฉฟY2โg้ƒฑwมr,M<ˆ๗Nคbล๏|?ฌหฉ%๑่8\S&ก๙•ณุๅ๘๊๐ี’!`#2ทa1น ัๆ_ไœ(๎F r>๒ู“r๒ฐRฐNW]}฿ยHใJRิฬย๙ๅ&วื ถ็ก๙พVฯ๒GRฝpฉฌฤถ3ฅ5s@€`B‚ขฤ4 uฌQq’ฦL#Uœk=j๚;า“_ำAู๚ฬใRปyำ๐ใ+฿ณ@ช..มยฐ’-ท:ชTฒ'E<ยุTอ๗DŒ‚ฒ9†•ศ,Mร‚‚ )ž›'้…)๒O=/™ีIาs{๕๕EคŠUปท8,‹ฐyquaLคŽzฃีฒขแ„Fใฺ;.q๖O8P8ก#ช<}ซtฑN„ฑp‚)=yd[ฑ๘ฎๅยI}ดค s“h:ฮ“˜น Z;{qฝ3ZMฮ5ืค-W่ƒี=viปc“Sฐ&๛O€Cฒ]sวื7jf "พU›4K๏แน๚]’ูP’ำฐJ +ฺ(IqšxใŸ4 ๊ฆฬ๐w:ฐ}“ไ๏>ข•๐หŒ‹WKR:VษX…เg๓ฦ๚Œ)X็`๎ต ฑด†—ฉํPฝ4ี๎’ฝŸ—mฯ<3< UพDjฎ(‘*ฉผD๑'R™rผคฉชDฐบ-ุ๏ย>ุั'`1ฟjฅพ๔น "”ญฅoŸฅ์w๗‚คAษl‡ร|gX‚จ›้™WwHช%”SWRม‰#ฤ$‰๑”Bฟวฦ็t‚ฮูYDาแOv ตรq<๎„ำ™$จU:|%Žฝ#=ปใั’ c_1อƒฎยว๑ฐ,หq ้โผLง‹ก{•%ŽO$tLเjd๊'yyZิ> C`#`kฟ| Uฎ๒p/ศ๏Dๆฬฮ(ญH่‚jลt๓+s.ฝR BA7฿Qั*฿๐‡3า‘=~Nฒนงไฑ๏}UE~ู-*y œกppข}}Fl‹uธพUƒ}ˆตc‹์ูดFึ?DKช“ๆฟyถLปฌXŠๆfKิ UซขXูฯHใ$F๑ษ้“R๘ห'<ืgP‚ไŒŠา{fIrVข๒CO‹๛ํ%๘eฐ9$i03ึ!fิs;๗หๅุ‡>KTqa*#ูbซt'ฉ !๑เน๋๘อNL8คห!,w6[c_ฺ๛๚ดฏ$FLบฑ5Žนบ!๚s๚S „5*<บ๋$ฉ๖ฑއ›Vsฬ#๊๓€k$ฎ$W$`9ุ ˆdm์1:๕1–ฺฆโั/T๏ุ‡!`#มมbฒq%'ŠiศO`vฉย ๖'งz้™~’ซ) –ศๅo{nyรฅ๎ฎ9สŸำ~กตกN๎Wฅvหน๑-3W\b“:2bœ5ว ˆCน&ˆˆFŽJWkณ4๎ูŽXZๅๅ฿f๘‘KณDชWHูโ()0ฎ฿s@U ฎฒkhk“>nf Bมภœฉp๔N8ฉ œน0หeง%ƒฌ$Iฬp ู@gs†• ูr Yศ™ๆZnŒ*†N`์*&W}JILˆ{ิO+ัŸˆุ]N -’$ใ;จข,>็ชWฎyื่๔ฎG๐ Rย68Ž๏เO6Bฯ 9l‹Y2 C`Œ`ส$ธไ’ซ Œ๕IL(• Wƒˆo@|+Oq™ฎ}ฯ_bฃๆี’–•ฃ!8‹ผn่4ช&AlรรใžW^”๑oฐ๓\น๏_พ+…Sj@ฎA;5  #Xย[<ต1๐SนbฐฤŸa–Tr•’_’ุืาจ5๕ษV๕Šjล๐Šปำ>™jSใˆ?ย/๔uvศ+>$k\~๗{dแU7I2‚M:}=ช๋Wล ฤ +%Aฝ !)ถญy ํฯp?งฎ˜. ๎(“ฒ‹r%ฏ:1ฎ0ISญ€Kฆ3Iช†;;a[่ถ’บš+สๅฟบYพw๛#Jฎฒฆ%Kวž~I…๚eh`rำŸ\'ูˆ๔ƒเ›>๔„ดวถž้9p่จชIฆฐGแฎv,฿w@ฆ@ ›ZR,•ˆŸ‡rช[$ก!*F8ž6ขุ6‰ ัH๐O†\p”,’#&8r๏ฤิฤ$aรw๎ก˜‚จ๐$„1.9>X4’hฑฬq ืxฦน้ิ}\™ุe(kZ~N2Q†~อืฦxw,†ภคEภึไz๕|฿tฮฝ๙0KU`‚ ฦ“ซ{๑›RŽ๘Vมl€Kย๑zศสiฦT%ิIrีง๒฿|ๅ๓าˆํa๎๛ฏชzล>พ~ี ฃQ`0P?ย,X5ํ฿-;ื=-/๒แ‘,ฝ{ฬบฎ+๘r$ญซึ0ั‡‡`6นโl6‰ีpงpBขAฮ†eสŠ๙เ“wศฟ_๕K%W9ำSคk฿ ไeKsc›๗พปไาUหฅk [>–มม! |ต4‘ํฏ๎’=ปjฅ้P‹ผ๔ยFฉฏsศ6ช.“';:Dฐฯœฒ™WQ"iบฺุ“้;:M$œ?wT‹b\J‡๋‹๙bฉW0fš๖œ ฃั$แJ‚บ้)†U(ฬJืศํบАล*Uฆ„๏๎Q+dฝใIดB:กฦSฺส†ภ$Eภึไy๑tTก฿ศ5ศฟลฬL"ไM-*“ป?๑’[Zกซใ8๕œ[จฟWํิมฯ๊ฯ~TV๚Vl,’ž›ฏC# EงN1cฒฅสAGy๎Axธ๎5ู๒(V?ฉ˜w๓,Y๒๖j)[T ฤคขำzˆjา้๖ญn๔O”ื`ฟผกพ “ๆศO฿)O}้%ู๛ZIฉ@ŸA™พ๘™oJไสœ้ำ%ต8œg๑™ำฆสสK–ภ7iP†ƒาีู#;ทํ–gžZ'>ูgyฆฒา<้„ึฯึnฆMnปxžTไfฉ:Dลษ5กฝQE‹๏„uhแ”Ÿ2พVHแe\( ˆ๘?ฎ—เ”&™>ณjิ>๙'พ*๒ญOIEqฉ๔Gtฅ ซ&ฆคž6ต%ฎ ,ฬห—โU…ฒtล๙ำ{o‘็ž^'?ั#๒๊†ํZfฺŒ)๒ุk๛eํ–rๅ’9RE++‰ต๚P7-9าจŠh[ษS์1W}b7๑>3#y"‘ข–K”pKW ฒ ร1ธๆA๗่ึŽ`ผx่dtŸC93บเูั0ฦDภึ˜ะ\7\r5ฃyฌ"ไ*ตจ4มCs+งส๕Yษ-ซPŸซS%,g!gรฦผ0Eต"Tฤำ?ŽO!๏—๏ ศi†9ๅTxJๆKLพฌ—jMM‡”Xญy๒๋าW๋Œbั[ๆสฒwีHAM–šีq@1›Nb.ว{' ้ฒwฬึ?๖…uฒทlŸL›^ฅjTมC๒ร_~UฆUU)ษข‚ลฬไ‰ูP”๋!›ส/‹ๆฯ‘…ศท฿uฝ<๑ฯษ}๋งฒ๙‡h@ั๚ีหPด $ข5ญ(Oƒœพ^,—$iใ๘ˆuM฿ูKtMรMธแุW๎=่lํ Cภ8#Xว"rแ|็_ู4ed 3Tf‰!8ดHฎ˜nศงโ”ซS3ณiง๙ƒ“'< Iwo‡“๙eษ๕ทKีล–มูž็ิ๚้ฦฬโๆฯ=ฒoวซ๒ย#฿•†ญ๛ต๗ำ/Ÿ!K๎ญ”ชeEˆa…ีg๐Wา๋$็ฃ)pœ˜ณ๏$‰ำp๙}sคทต_ึ|๛Uiจ>(ำjช”้M๏—๊ซ2>Yุ0๚X^มwแ’-ช[Q'hkIQ‘ผ๓พ;ๅš›.“?>๑‚|๛+?–MฏlำžQัz๘ๅmR‘š$ซฮ’)นBdhp—(6 ฅ5ฑ2้Iฑˆํฑ‚n?๔+ า$ศภŸช`๑;~vธ็`<5โ9C=P ใ ั๒๎ŠDึลŸน$˜ŠiZไใ@ˆ]แ-ฎFd*]8๔่U์ฬ†€!`ฤ!`+Œ ่”,„n๓>‚Ymf*Wรไ๊_–ไWTว๖<5าr&p"โ4œwญ_#G๊kๅบ?+ud็–4LงขฐqโไŒI?+ฎ ฌ฿นM6?l~โqญซzE,ืฉXR ซGซ‰Mฌtpฑ%Y Œ>lำณ๒=๓ไภห‡[คฎผNฆ‚dํถ>Y7ษ‹#ณfี€ฐŒŒ’\ฒE\‡"ˆ•…๗T\P oฟ๛vน๒ฺUฒ๎๙๒_‘<ว—ค 3Eข๙y๒ฎ-ฏฉ”…UฮชC’œ“™ IdHŒ’^‚ไI9พ๓œ‰AH๑†Pณ!CF0๑;}ฐดœ๛@๐ียL๐๚‡iQฮM,Ÿ‰ฐG8๗f์ศ๊ˆ •0'ESqD ำฤ†€!0 ฃ6ฅ ]š083‘ำฏaๆXY%”œWขfA^fPฮ’้ณc{ ฦ?7cิเก>'ถา๖5OIo๛Yrใ’’™-C}˜ษปฦ฿OuŽQHHJ–ิต๖กŸศ๖1r•.ื|bน๖•%2๛ฦ*IษIRลŠABIHNก™sึ)ถชๆยกฐdฅสอ_\ญOFeokญฬœ=Mฟห็ฟ r2$ XMวˆ'J|$a<’h๕C๙*ศห•[oนNพเฟษ?๛'ฅนณO"Š|Eyผฟพo?๑ขlFุ'ชบฯ!NŽ\4jSฌ›ัๆIš๔ญƒแŒ˜๑.’จ `OLnฤDโค?'x‡Ld}Pน่0๏&’%ชZ<บฉk`ะท๓จ{y๘จuแ[p˜˜yจ^1š;S|Uฮ๛4 Cมบ๐~ 8›p–ผำฬ๛1หDฐiณทฟตQสgฮ“๛๕{RX]ฃA9า2ฦฌr–pกrล๘VCƒ๒ฺบ็ฐ’ฑJ]sณNโร๛Ž—๕`F%น๒<ืพ๚Š<–็~CŒฦ'—xฉ|เ๑ซeี๛ๆJfiš๛BPท\bunq8“p“82jB8๙#ท:Mต‹์jุ+SฆUศฏ~๚๒๘๏ž)qศ“ชใ่‰ฃฅมIผ?า/E๙๙๒-๗?&Ÿ๛ืษƒอ<ุ$3ฐ"อ†ฟxf์onSขโ:ซŸธญใน ฏ๐M้>„A,g[]!ศ`ำo็)šขช„qี +:แsฌL,K๓ *wว7w{bฑghz?1ี’๖a“#Xึ'นโ/นศ฿แ๔‘˜‘hkา๗|_|LŠชง;A9u็–Tpาcิ๔ฮๆรR๗๊)ญ™ฅํมก!UNอ$ˆ๙“ง#ล๊ภ5ฟ๘>bfฅ์z๑%ฌ +“{~tต\‘๙R4'!$4Sฌฮ-๚ฮยษ4ท๑)ฟจ@๙ำดล@ชOฅค,_~๋๒ศรOยฆLื=ช?ฃฐ ฝsI ส ญp4ฌŽ๓๗ี}๒ไK?“ป๎}ณผถ}ฏ>-*=๛ฒๆฅMRืาฎ*‰:ถตขฬ45ษนืญn\”‰ไ0‚;๗KไรŒมล7้˜้ห็๘e16–๋เ๎ะฉs#[uŸ9้xัGชœLฌZO์ร0  `๋เLฐ[|—๔ยๅ๑๛ศูค”`xh@gฬท๎k’WŽ8W๐gขขก3 ซฤ อ‡ษฑPƒฺณS*็-–ฬยโX.vS฿x’ฃZ๙ร‰ฯิ‚จ}๗๎–u?ˆ‡3ๅชฟ^&x๖™qU…Vv^วฒฯp_oยI’5–ฉ—–สM_ธDฑ9t96‡๎่“b%ษZปvƒ${’N‰`น]R_่0ฯ€ห—,–ฏ็gๅ๛ฟŠ–ไJํkตˆž(ษฉ๒ƒg^’๑ฝฝท+€ง๑$ง8;CWบ‘]’ิั็,อLว Y$J4'*ัร]RFลฉั๙sแ\sย9taoB%[ธศgh2ิ}Qnฌฤ็Cnแ๎…ฏCฦ’!`c"`kLh&์ฏข็K0ซIฉ ก>น กสg/8oฬ‚DV'=Ll”T™บxน’“c๐Gr์‰Žฯบ‰๓ืxYŸpo”ŸG%Œญ๕าŠห ิ\"—~pžไT"ผB.ฐฌC.&ื‘˜Etแ[jd๑]3คcWฟ„๒AT่ณ-๒™ฟฒิ7’€9๑~N5๑ฝ’hฑฦุJF,ณ;rƒแฅ_ศ?}ใoฅฃw”สสlฟำ._sฒ๗pซฌ๐์…™T๒+%X!ุ‘<žhNิัฃKฺ7Cภ0†8ืณ๎pG์ไu!@"ลจ‰—เืGYCbfถ:ตฟ๕3_ึy!ี}*ฆ7ึq๚็%L|P๊wพ s% ฿ศ7ž๖“ ซคaื˜?‡๘V?ำGฏ๛๔rนๆ“‹$gJฆใk‰มTซ“ ŠืAขA:ฒ๐Ž้ฒ่ฮ้านg@š‚‡dJuน|แ“_“ตk6H@FBœคฦ“ๆ;wณzร0&ศ-oพV{แ๙‹+@ฌ˜฿]+>๗’้๎;Šd16๋`ๆ๊A]1จO8ฤ(3%Iฟ `p Xช?kq)-)0ฌtฑŽBฌX&sd”ฤห(ช Uู๏อQ0ฒK†€!p4๖‹โh<&า7บwŠ๚N/ญ๖57zW~ท”ฯšXP4iŒ1cœล‘RI ฏTOG›๔wwมก‘พiพgืXVc[ สฆ''?ฤ๛คvใซAฑผืสŠwฯีi๖"ฑoฝg‚๓ฒ)bลญRs“ิ‹์ญƒjp~-<๒ห'ิส‡๛วš๋ศ€Hx่ะNS1แ+หหไ๓_ธ๚ษ๏ษeW-้’ƒV๚‹6HG‰๚C‚ฤ$œ๐ว†uะD์iPR\sฟSqข3{E^๖๐s์๓g๏:ใ(ฎ๖ป~๊๊ี–lcน7ภฦL๏-”„๖‡$B =@่ @€ะ‹้Z่cŒmภฝYฒี๋้ช๏›ฝ•Nฒd[ถl‹ฐOšฝู™ู7s;฿พ๗ๆ=g.บ๐‹ฦ๐J:ฦ‹YE(ฟk*Bฎ>มวฉถฃช๋yE๐”ฑ18`pเ็ฮ`tG€ๆฐHไ4ยฑP v4W”*ี`แมG(@ฃ}สู‡7สI ๊›๊’"‰KIC มไธ3เO9"…ใะฆšj๙๊ลวๅร฿งndไ๙r๙œƒ”๛…Lฤ3คV๏cชSiซF{ฌ_=sค* บ}ซคg&หฟ1[~\ฐ2,๋€,ฝ•9š}–Gญ8ด์ฑ;รี1œู!)ษ•อ!5ๅฅ๒า]–?zืุdฺ…cๅ„{ฆJาฐXekลษัW;dg฿(ล๓ีaงหJSฑ !„ธPNไฅgw์กv"ŒN฿l A–?เW6_นร2eคB1CGตขtณXTฎ’>าี\uบ_@3$Ž๚๒๐ธีคUŒฺq๏คI่XŒ?*๛๘)า ~56 ่โ€ฐบx๑Sฺำ๛ํL รmัพึช kฺฐ’o่ผR‰ ฑjฌฎ’>xSลคTชaAgณธ‚1{sํVy๗‘[คพค ็"ๅธ;ฆศกžตV˜ ucจ;Yถห;ฤ๔RnuXeิฑนชœสบJษสM—็ŸxCพdปk๑wDvต:ศŠŠ•Sฮ*Z=๘งžxฆ„GวศKืฒiฒแ‡๏”?ฎจ๘DๅšกหZf[v€Y`ฬ๎jj”o๓ผlูP„LIrKษิs๗ฐhกnด๙nŒ#ๆีu ”91YŽฝํ@i-๓ˆฺ"ัั๒‹ฃ.”O>™#“}‡’ž~WrA}fๅfdสฟ_ธ[).ฐ†-Uั–Zˆ่”D ง$Z”JaŸ.ltํ ฮ๖ม‘HPฦ| แใ:7I๛ผ4๛"ฺxี5ปดโ;‘iƒปฏ‹Œใ ฌ90XfแŸu'๔๓ๆ๕iเ*\—ฆIฏส- ูCแญ} qุ4ำฯฺv#ปฒŸPjชู*Ÿ?๗จคBบf†jX}Jฐ8qšigƒ oัวoหโOC œ0fŸ ๙‡dŠถVt”iจwฃczปฃŠถlcO&๛+[ึ7JRf‚ส๗“šฦ:ฑํข‡๗ช์ํฅI4| 7o๑4}N‰cHŒT5ทย ผK†DG*[+Šํฅฟ+&-€4วพม=a•9ฏ<รorฺC๛Kมฌ,๑นเ”ธRl๐๒•6<ึ)“ฯกส/m*ƒ‡๗a๒7?ส'๏ฯQม 5้ฃ6ผ”*ตwธๅะC”'^พW•uา c๕ฏ–ฬ„ekฅ$^$Jฅ่fŽFƒจ—fั›ปv˜๙tโกp,–ทฝ;ข} \>ะยํeUyŒƒ ฌŸึะQษYhvŒลแ๔มฉจ%"%]ล๓ใฤิ๕^พooŒ“™’‚Ko\4_5&!ธฐ๏น‰ V ึ”ห๛ฃฎ9๊ฦค๐๘ก*ไ ™v๛๖:kวญ(ฐุ๗-ufแหˆrvXั2` ๙ผ~ษู?Uฆnดxห`฿ไnPsหฟdฆข kฯ5V๏c/ึqœzๆ1r็ƒืส:ุaๅŒฬ“…๐‘ล@ฯJญPฤผTฒ4วฃZป๘K hขรREม_Ž ฦ๐ผV'dhฯE#y๕๓ัO๔ุR Y๏ึW๙mมำมR{d6พ08`p0ึOgฐฏธr!q.bณ#“ี~ฺqงILR &FNƒงKูwk‹ฌ[8W…์‰ˆ‰ เo;/)๐„‰ฯ‰n๕wŸ๐๖dึ•๛หิฉ$V4ยT’ซ พ0+{-ีไ]๛เตd ปnwสูตฺป]Eั`ะถ0ผ็ฉs[๊คเfใบby้้ท ถC?Hธ์v๕ภ}!pขŸ+˜ฃห๙ฟ?].ำyR ต ๊ใ๏แดซRId›๋'Tั4^ื๛A^eต8d>–็CrBฅหซ˜฿ ้˜2zWผgฮPาิฆknŒA44…ๆ0๖ 08ะฯl[๋Œcกะ๛๊dฬถุฆŠu,g๔„เ๗พšQฏšู๔๚ต๏DMตZlน่ฤ!a‡•AtFZถvฉฬ{ใU™xฦh™r~Wn >›+&ๆaณ5’ฦ๖fค]มฒx-อ`YL4xO/]ั;ปํVร0#QบE้U|๔ลST าน(๗9wvKะืWศต]m้T/ชณฆ€hฬะ๐]W6cฯเ€มƒ!ะ'ํCฦ๎ ไ์”^qFน˜ํsฦ'ชฉyึyI|Œฟ}^ฉ)ƒg๗ ภHnิVŸั0;)  ZF฿J>ฉฺธV5&,2Zฉ ™ง็ฆรื”m’—oพR’Fฺeๆฃ%"ฮฉญคxaฐ๏ D๐ธซry๕ขฯeัซk•?.Jณจๆ์‘_VปEถฎญ—^\ฃ]สํ_1ฉr‡yู•€=GคMฦžšฏ๒oฉna#rิซณ฿Uพฑ๖ด‹•ฑท฿+ Qqrm—*IZ9โ’ถ6ถ(๐ิ –2pWรค‹q”hั-ม๗k[Zแxมฐฏam\๑ ช ทG”`:ิSsถO{i{ญ5ฮ0808`ฌมะ ;nƒ๖ชํ˜ฺ'Cย๐ตป•!Iๆศฑ˜์ƒ;gฦ=@h" ขน๑1I9œซxœ+yŽ’(ฮ1mEฉj‘32Jk'YH3™‡ื3๔ฝฎ๒uแ—)^†พdฎT{!นr5บๅ•ห?“uŸ—ส;W#หฺ€๛รก˜ง_„หšซฺไ›ๆIหึ6ธจะ@ฟJ้_ๆ5‘@Rฌ๘์he‹Eฅุๆ6๘ฅ–%w๐ฐ,v1P†ณ๗ฺฟVชŒ7ไ คี…คQc ไฝEซd)$ZงC –J9ผ(ตข;†ฌ„Xt ๎@ซฆฉM’เv‚็ุOjbืŠ~#.ๆq^ัM(8ฎ๒ว 08ส}โ=f์.๐™ฏฟ^ŸวY$,>5เชl|์i’˜‘ญ$Ejยเvk@A@4X็ผไ๓ด+ท Mu5ฎฦFqต4mญสถสัaท;:<๎vมฮn7ีm.Wญjkn„ L3JVuฮd(€ฌfำ:Yั2|ษœ ิ‚|;S„6งUฦ_ Ÿ_%ใใๅkพ‘˜ดHบ‡ภPน<ุIษ๓RbDj&ฑบฃ๐i๒6ฅฐชNosV  สา[ฌ‰go_.ญ›แ+M‹วwืMศฐWseHR"ิx>€ไุ{5;y”eS‚d’ƒfNQื”–”K๎ะL๙B๙pไ4ค8F5คฅญFLƒไ7SQื(๑‘aP7ฺีชCŸ–@ jhช iŸE๘€ใ&’X–ืoน+f l—ภฺ.{ลIJ้ša,าIl‘ลม่ท‚•ƒSUเd/@ฯ€ทc†โ'3‚*ตะี KTฎ_จXป2ฐ๘ำฒzถฉkIv'Ÿ5,RๅซฺฐRช6ี^๚ธR๓%0„๗ร8ำฒ…๊สI็ไKXœC|ํศณG'ํ๎ ูoœ˜ipoฐษ”sF*€UตกAฌQfyœๅœ็ŽBศ™ 1AสE— x'๓ํีA{.%XHุ^๎;วu๕ๅ-™ฆ€"ม8บ•Œƒ~ฌ(Œอˆ”ฉ็ํ' ž[%›=[เถ!_พ๙rก|๖ั\9็œS:ัทk๘‹2ZGศžัcFช ะ'๖iF@่ัฃ d๙†bR็€‚eภN๑ๅ@O๔า๎† 0%6J๋ึ4ไeู\mHwvŒ๙เ๐๏–MGค ฟมฉฝย{=r_ 08ส`…rcp๏ำ5ƒษแ…k›Ÿ–ฅVF Lณ1!A’ขT€ ฦŒ™คฝฅIสV/“ต฿}ํ_๑อg|…gฝL–"fปีษi+dD6ปอ๖ Tผ๏xใฯํ๐uXต„ฮภš๙_›งŸq>Wๆ P Iฌจซ~Kฅฬy้Y+I”์"Qน๗อ‡R9น5#๐s_8Zž?๛C‰สuHปี'ณฯ๛HŽบq๘‘)Ž(›+‘|%‚้๋ถpŽ1RkK๋ๆ็lื5*w??‚ๅฑoWT"รฯR๑i+ืยbพlแ6) ЁV.['‡9C7ิƒวƒญ๐˜N')!Aฮ>๋™0ฅPฆ>YŠV\)–"[1oู*ฒฆHฝเ)’O๐4Xืo™ld™”rQJฅืหัทอ%Dภ่ 1ยฏŸ0ถ ่ƒป83๗Qšqx 9ภg=้XฮศŽุคŽถ๊Jพ IBุ?Umป8กQRลkนฐฝตY–|๖^วc—ใธbฝถhฯ;ใ“?iญ*]kuๅDŸซๅ8ฎƒ+"&‚=TqK0ศน‡๛$า็žๆ†vlญหพ๘0ะฺPฏ์บ8ำพซตกN–.xOe Oดย“ปfLฟhก๒๖๖กpl™จfซZU+ž6HUุŒ๔vัnำ%*q๙33ๅ7o/1yN)๛ฑFฬ6“คŒŠ’9/‘‡y]>ผuพฌฌT๊หZคญec†Fป›=bืภืู+T5uKyŸŠรธท]‘๖๑š˜ิ0i†ฝฅp ด๔RO(k€,$eฟษ™š"-› C๒ณKE>๛๐ฉฉซร` ฉะ+๖์พ๎#ซ5ะ&C๓dฮา7ๅGo฿\๒KUq‚DWฌ/…ฑY‹dฅk๑Ÿ‚X]sV nปJS๓฿+ปUyฏ$ pฅXa|08ฐhO๕d2N๏๐Y^€t<[`ฑc>hแว‹3" ž)ํP‡xxงH›(่{ ๑ื`\‰•,|๏u_ษ๒EVึศดŒ€ปพฆ๒’ว๐‹B•ไ ฿ ขtเrบ`.7c™k^ฯ็ฉ™วถn. ”ฏYaŽChu ณUcu•(๏Bึ„Hxภ ‘0๐ฬฮ&Aธูถ:,๒๑ํ $,ฦ!'=]ค+O๐ปFทWฟฒ| 9SSๅทฏœ"็VภงีJดย2lbwXๅ๛ูซUbYูS’แ†"Z†ฤI;ภU๕๚qF%พ \๊ึตษ7,•“>CySW่ก ภ-าฉฉŒะaธWบS ) šท—}ย‚ท˜ิXฉะจlฤ(Yฆžk™Ÿ๕ั(~lหŠก^l57KNn†ฬh)X&)๑Iธ‚กdุๅ{‡t?\8#‘?Lฅv_ป\~ํRV\)^๘วZ0w‘yใร’›*›J6KIM=|jE Iะล‚้[~ำ๖ždฆ-—Z๘ล ƒ lภฺw๖ํ9)*šแtฤ&๚ฺถVZ‡M<@า๒Gย`—“ชže็Jฉฝฉรœ ภf‹ฌž๗eเหูฑ [$Vmี[ฟnฉ,ฟ ฿ฟ)‘cD—L…ฉ]^หY๗ีŸ>ผฤT]V„ษฺำ)5ax:”„ฤb` +L๊% ชณยœ2๗cไูณ>ฉ็’ผiฉแtˆุ฿‚w_ บgˆN g —a32dูะs)ภ0mhคkn)YธEฅžล&๏งนgศ9 ตำ-” วจ[kแษษช>:อ—‡ฌ`ข๑M•งค1ˆญWึ –&=Sะซg“ถ๙ž9!Yซ฿ะ&C i็๊›สeสไ๑ุ mญ:ตว?๏Qญ~ฒธo…ห์๔tHญา$Bยลฎน•ƒ}Iๅ๏ฅU\eฐ๏ „(›‹1\n/rTrภn มc08`p W๔Oัkฦม=ภฮT”EbJ8Ÿๅยœ<&…!Ž๐HLฆ\๑ค๑๐ˆ6?%ต2Sฑฝ่L/ภ•6V6ฌฑนผไ—~ท๋DpลqApคทƒkWHญˆผๅin\‡, yี฿\Wฃœ‘r๊"ุา(Q‡า6~7็,ด˜า–ด1I’{@š|yRWาŒช…‚ฬ๕*IคIˆัษแrเoGหŸ็Ÿ%็ฝxด]]โk์›•[V5หธำ dฬษร””Jฉุ:5ดีชZU'sนDพ{j…๛˜ทd๙;•‹า,^ำ™ศส‰็่q>21Lสื(5*าŽูŽ<ไkLZ„ฬบj’jQภฌห’VJป฿ญ์˜tUšไฝตฅ4‹ฟ?^<ฺ `‹”7m–ท^H5ก^I๑cค๎ใ,”อฺ๙ฎOžใ*ร ๑ทั๙E?hl 08ะ“†ซ'GวwสZฐฆa"(ฐ…Guดl.7[แ’1ข`IŸ5wœื'<ซอกิqK>?0๏ูJj™š@นO ž?#i3ฌXw฿H'w’XวX‹ฏฝ๕ˆ”ฬ'Zซสคพชฒ#6%อไ…๗vJดHYข”J/่ฯQ•Nฒด ‹u@zตŸผrแg ˆ~อdeˆ4=•ขw๊]‚Dต๗้C*.3J2& ‘้‘๖FดึตK๓–6iฌhQ€)>;Jผ.HPะๆiiโŒข๚ถw?`Hฑ™ภ จ‡K ืร˜tQvฤ รผ4ŒRร๙5Ÿ\&ฅ2eœAWฺฬ{p{ค0สฃ;‰‚YY๒๙ฝ?Hu]ตบไแ{Ÿ‘ p– อฮ–๖H{ฅ๕๏œรธทฝ]อ<ผgb rUเ๊u๋ๅั๛Ÿ“—Ÿ}[๒rาค*รใ'Ž’„ศpอ˜7ิpUฯC<ฯŸส๋Rช฿ๅ@6XฐA sภXƒณs๕8ีƒb  xš-'๑ธfHะโ๎„ํฅV CรU‚›7ฌ‘gฎนภษ™žev76.ธ๚=พk>ตUƒ<ฯ4คฯOกษŒsr๘๚๏็๚ฒ วY=ํ.)YฅUŸ=5+ีAฝอtl&ิBอธ๙ว–ษุฃs$eTZn^ฏ3i?ห฿Av09๛เ/‹d‡/ฉคธฮช ~(ข,„j?Ehฝ๛”ธKbขร'มฃชภ„๙ีำGศ‹ฟDฃ'๙o]*›Wิส๘3 ”G๙8;Gค+ไ vฅ๚ะc|ฺZAุำiTฟ าฏj8JฅsT+lปv@ b" ‚ษ “†ขV62G6ฌ.–5+7ศฐ์œฮถิ“ว‚ไํ€๊ธ—พc๘‘0‹Mš<- T้ย[T๒rำฅhS…Žะ:ใด˜—˜ฉฟ#,`q๑†ฆoจ›4ส18`pเ’ภ|J้geธœF‡žฎšอ2้˜S%gฬDH%t์ต†3ŸลG—ทฌ๙๎ซŽwผภษ“‘h,/น๛w#ั@…c€…๊ฦPุPโ|ฦ:ฟX\๚ใGoวN9tŸA K–ฎT•ๅ ้ˆEIrzi‘RผDม๊่งส‡ท-5oญ—ดั‰โ‡zl๛๚Sำ•ๆ @IDAT๒bืr่ฉ_  ฐA˜’!3&ฮ๚5zึฮ-ฯ! ํฬ(E:•cๅู3฿—๒ลu’>.N6ฬ)W‰๙‡ภž+ฅ Y"  คq:W*าŸปี+ e๕ชุฏ\คถ—|vชฒ @’ฆ5L๎๚ูFJฑR๒’คฑจ@PหR^Šล ๘c b๚ผ‡๎%n๗ม'ดปาาิ*‰I๑GN]คฐฆๆfกหˆท_๛X่‰Ž “ค”$ูˆ•„3๗*cณำจRapzi]%๖ุS,๖๓@ํํศัะ#—๑ีเ€มƒpภXฐdŸเ4ฒœgถZถเ?j&6' ›w์๗ŠเЁ˜[๋e๎๋ฯ}๔ถษ1–#:ฎเ๊Pž&๚ึถ‡‰เฮ๊ชฌ,…๔์น–ถ–หแฒมส๛aฮŠ2%‹ีTZัpะ@sTชYsภ*ynต˜~S(–ผ8๑ำˆพ?“์ดIUZ'qB+จถpฟฉี6Uซ G>†”.Wฬ=CyXง฿-b†:ฅพฒUถฎฺ ฺfkOดภ่)v๘wช^ำ"+คIุุถ›m เ ๕๋๑;ฬฺMT”nO‡WI›๔ธ}^ฟ๓GธY†รเํใษง^V๋สk:4›=ฝฐl&+ž.•S๘ช ฟ [ถnฉVเ๊ไ)ฃฅ0“๏*๘5แwก_ง„|๔n๚)ฒuธ๔ล๐rช,=‹ฑ58`pภเ@ฏพ๖zฮ8ธo8Q๙ฑEฦH๋–Š@๎ุษฆคฌ\จŽถ#]ถUWvikj9ฏ>MpeŽฯjฒ:รhฏฏƒlWึ์{ŸฝE๊พZ*J๏ทGฦ”~๛Ÿูํ_ฝ๘ฅฆƒ/ึ=ถ1ฬ`ญ๖h"เ)v†X:dDNคœtฮpต์kลฺF้เfท*‹ูP=ฑ๎๏$)ืะ˜>๊B๒ษรUxช๚7ถ‹ท!8Œ๚(ำSใW๙ฎ ห“(๒R?–ญAฆm/d5”%c$xลPTS]ท!Œ*ฐ{D0 ณภพ ~•\‡%!ฉo๓ฬญ*;~คฌ‡W๗Fฤ)yษB;qฆวฤOลีƒ#ฮ•apvJc๙ฆอ-ฒeM=9ธ d่ml0ํฒ,เM8 ๊0ฆหŠ’่”ๅAŸใ‡‹ €๋๋ฐ่)฿๔Jฏร1ภผ^ร}]ผใผž๗dG<ภ’Šry๑™ทๅ.๘ฐ"ล'ฦa๐h๊Gฎ$1ฟีl•๚ถ๙๐ํ/ิฑM[ช$?%VfŽ!i‘Cฉ›ต3}ฝฝ<<ั$oิ ƒ l—ภฺ.{๖๚Iพ&ˆŒษIˆ฿งิƒ#Fา๛ฆMl@N‡Tฎ[%y่Ž@seYbZšสŠoลลWœ•8=์+าo จ˜นuญ2๗'ๆลZีŠทh•บAิ2<ย)oRฌQq’9<\สึถI๙ขญฒ฿ฑนPำเ(‚€Tท๏ฒภ Š6แ๑‰ˆ3˜ะน’P 6ฬก“R"( ŽภF๛V †+ฎ#้ๅ‡ปRqQนธ\n w„‰Euuvวj ใ:+ิ}sฟ](วL?ง๓ขKฏ๚?9เ ‰่ห๎ช]ฺhัฐ}๙†2นฮวeh^†lD;7Bล๔*‰่ฮต…<"7ฺ—เ'ๆ๖๓ฎจฃŠPsฎ
    ฆ™m0 ‚)Qdท#^ =ท๋“ Ž+โฤภc\-ธแว๏ไฅ›ฤใฐฤ4+ภี‹ุฟLeD9ุR26X่ ช™@ิ1‰&ๆœ;wซผษ1Q&‰ฤ\ห9VŸˆ;:[•ปny“l๘ขŒ'4ัน?BไวฅZjฌลผฟะค๒„ไ อป3|PlC K]Eจบ‘†2ฒ๓เฮ”ˆฑM›+งl,+‘[ฏ}@]3ถTE๋Kไ๚.•๔๔i๋puWฃ:+\34"Žๆ๛o~ฆฎก )ยiื๎?ุ$n, …ถ’yu9ฮรข•1ˆ‡`S๛ˆหณ็Vขhƒ ๘_แ€ฐOOŸพ mัาZUแGHSZม~x่6tŸดtpล€ษk|#ฏ฿uฅู=๖จXซซฆ๒ \pv๐ึฎ๖ตไŠMแXใ g3]ี\D๛vุึdG›tu“:ฐ‹ไŽlสฐC‚ๅ4 ฬŽhx ญษส’Ÿ9D[ศwาธนRฟ=์]kยพ$cBำ๎ดๅ(ภ p“ฆ9;ๅwRSS p่ร,ศจ‹เสaถหฦา๙อ™WสยyKไฬ๓N”ยฑรีี๙#ra4hS๕*ะ,“ซhุะ$ผ๓…:ฺ๎าBF9๕8ใฬฅซฺ–6ฉ†ซ‡@Q•ง—ศ๚]๐=x๋่0Qzฅ-e ถลุ08`p 7ซ7ฎ์c€ อย๖๔ฐุX5/L>๎4eฮีƒ&<๐๙เjยน๒ๆฝ7 cZlปงนแG‘Qฟ u&๚ฌ<ดฯ6๚X;*2อ‘VŽบiฌp„!ฒZฝFฐ{D:2ย,๐" X>hๅtซ5Iq‰WVXฌ&_ลW}&ฝ๊>Wƒฉt ซ(ธŒ0&&JนU่ฎ์f›’B}ำ# \ั#ๅื\ ํ. €็ศ๋ณHศ้คฝ]ร:แavijีLฃp=J์ใาš)ฏk์๖;๊ฬƒ๓4างAผฃ!ใฃ’ใว฿๕@X”b‘BrjŒOƒ ่ะ'=ปฑ๛เlภYbาณฮธ$iช( ภฐ๗ ]่@oW-ะ;ปฎเŒิ๏ooณย๛|‘๐ใEVฒ<–ซฝv๋ื๎ป-็)ี“tž‡ฮ,ฐrLƒ{.H‘‚๖2ปฺ<๎มT—˜š+Jœ`=ฐลJหŒ๛ŸŸ„V,]+mญPใ)„ฒky–<งQzg๖ฑืไๅ็‘3ฮ9Anพ็Oาิุขbž๘‹#%#+oชว๕Jแฅ”ž™*-อ.ๅP4*Bอ—ึlฬ์V‹ฒฃ๒Hqผt'ฑ+๛ฑ๎Gี7dฆฬTุืnธ—ฌฦ!ƒ „rภXกุs๛|ฆ“ืTื๘0qŸ’+ข า+H‡ƒoว–iง#a‘๎๘ปปfเฤdƒัส๕ซ”ไ ๑ }>WซลืVŠ๋NiŒ-ห,เ MQ๗สํŽD๋‘ํ•>ษ?$ำ‡˜zš๏+žฺu"sฝUแf‰gu๕ ^"๐•šะi›tุ1ู2zJขlลษy/Qอ-T๖De๚ลฦvป๐C1B“fyแํœ}ฑ3คฦ1†7_.”|ฏบ„’ซค๐™๓@ถฟฤFว`Ub๗กฬk š ผ่ฝdU๊"‰Qส^ส‡Aภ~g>์mฐฏข„ส i0 ึƒ๏)๊Z๎`๕v. ŠะญNcW™ฉ+ƒ ๔อ`๕อ›:ฃ๓˜,ช๋8[0q?๘ฐ6 ๛cแิำZUn;ื—Jฮ่ œฐบ๛t"Jƒ๗ฺสRy๎บ‹q‰๘ญฮJฎธฃ๏\้* ์ โฝ“.ดำw๎/ LaŒ=ˆUoœw•x%มU<€UŠ“.j"Vย7โŠ3Vฬฅยึ-wจฮ-ธX–ฟ[คOณ =/๊V‚๑%”บหท ฎ jฮf‡ญThฆ>๖uี`UํV๙๛mฉ\7ํO2vิ()ูZ.๗kul ญภๆ:  -Žฑ[].YฟฆHvU…๑ๆLลkH”`€ัkฝ’D๑`RCฐ็8Dฬ๊GY.Wป*ชยW_ 08ะ;๔ษฟ๗ณฦัๅัŸศ|8ำ๏ี€ง!]tามH@– "R3pj+<๘=๓Hพzc’ภฅม‡>๗้็ชนพF>ŽxL)้ๆ๖`*ณ๙R| ‰๕ 6pฅ๓ ฯo9ฅ ^ฦS cฬูS่aญMโH€• pc3‰๛Dฎฝ lฅ* ›ฐฒ๎tj/๒ี๕sฅry €ซถ’Q4>vศถzอษฌชฦ;”ตNG๘f๙๒ใoe๎WZฐ๏‡?ฟฌYตQ…ฝIJƒ{!๘๑l hžฐๆOสJ*ๅ๙'”!1่\.co0;1Vถย˜]ฒเ]pœQjฅภ^RB_Z˜…y)ฅRฎดๆฏ์ฺ่๗†2(}6ศเ€มƒ;ไ@o๓ะ/22์(Iโ\ฯ็๚SHk‘ๆ›ญถื!uบ๛7Xa_aี฿#Ž˜i\nฐ2อ8๓ืโ‡šCถkฃ7รๆชฉz <ด? ~˜ˆLห์ภJCXบ›†ูแ”วzถppะYแ๑v.=๓M๚eกน34 $KปCT:ภฆุ^q _› \๕FฐcŒป๐0›œQกช–ฒฟOo฿XXU๋xCUจุา๋U{“[V}ฐIๅฑจๅ™rเx‰‚zP฿๚๎S;V nฎู"ธ๛Ium๎ฐ,)z๙}๙x๖›๒๙Gsีฑaร๒$,ฬ๒zG<ฌกธจLๅŽS1Yฉโ„ญา,ฉฬL]ฅi{<ฬ=†p’G-ฃแaZh ณษ<˜c‚qภเ€ม}ว`ํ๓-—’คคO‘~ธ‚C"S3b>oภื๊ƒฏ*ฏ฿ํฌ๚ใฬฯ็ธ)kิ8‰Nา-$_QrีPU)ฏu= (งฃฅฒ ’!๓฿ฎฎๅตมิห,3๛Ž8พ2c-แฆ [*”}ฐ)๗€4จๅpดทIญme”Xๅ@z•แิิƒ\š‹-mr์YU…ํnŸ -ˆ‘็VพNฅ้ปอ๒๕CKTXณaี“e]฿ษ[๐g+ย๐,๙ฯz ฯฒ‰งEณ๙njlV+๑ดกธaศ7 ๆ.–•หืK ภU.Œา3Rไ๓'฿ืŸก‡ฌ/‘lpƒฺ ,ไYะh—y_ ๒RๅHข+ฅ&x”Wช< jถ„’)]:ลŠFฺžRฦซ<<ู•Ieๅ0ฅ“ำ ้NF{ไาO[ƒ h0ึภ‚+>>Aš๕Ÿ1-›หี๋}๊ฐ๔Ue‹อฬ้ฑŒwCzXฟฯฟษฺ…฿@]ล7wฺ\ม ฺ"K>_jK6โฒr;PมŒWW"‰ืฦ7k}|2<ูž้wu๘g^>Ÿ;0ฎ๔ษq\3PzฅBด€บPฌฏา T…‡›%W฿2Yผ์J™๗ิ ้B3ำแ$ น ๊โุมEtฉQถ˜K‚฿ƒเฆ~ฐt ำuQ๗=‚#bึแฅโๅ็V'››ค 5 >ษฌาŒU…u‘Q๊๘˜๑#$ฬป=lžภ‚zฑฆบ^ู*o[›ๆส GฃดฑาlฐบpR_ใ@oฯณ>ฅeศภAขdฺ ƒ ์†=มYิฏ > \q– ธํLHqC็Hฮ+ฉ'œ.ฉCG(ีฐeณD%$™ย0กฬ<๛BY๚ู๛๒/ษ;ธ]า2%!#ฺ?๒6ิษ7ฏ=+‹?yื‘mฉ/„'ฝ๙iเฉ฿[ฦ:)%lฤษ'ษsฅŸข%Všฌ6หnvๆคHวขู^ฅQzE€ƒ;3 ™ํฑรƒนŽ‚อฯ๙โรR๙โพเ@3BฦŸ–/&๘wขไ‚y ข4ภ รื๎—สe5Š%\ๆŒภ~ƒฬ:jบฤ ธ2mฆบฉ็B˜GรXƒ‹.—œ9 NDGย)(WyŽช๊š:mRr"Tฝš#X=ธณ^.H9์ฏ}๚)[ติ ต ญฉผ„Œธฯ๕น"ˆŒเJo[@ถศๅQyฮ\xn๗อธtผ5mLขŠƒทอLว+๚Ad6ล„แZP็ถ zN }ฉVยร๛ไpน็_3ไ•Uหšไญ?}ญโŽ9!Os!AGšฌ์็NTฏ6{คjล ฟ+ –XMฒŸภjSซ7V)p้US{‹ผ๘ฬ[*KๅšMr lท0po€)ซkG‡_-ญMหHฆ A๚ˆ!ฏว„ธษ‰—rณ ฦร๏ ฟฏžฤุ„L\ HITo’*އžฦ๏z9,Q‹ณH )uมใฦจะdl ่•ฺำฑืSฦม~r@็ๅ ธ๎Pg|ŠวTg?์?ศไใ~!แ1qฌ๋ธ๐เaํฏป]นa ศrทต*ซ1‡h6Am ฒqั|y๚ส฿*p•žmƒZb8•Pp5X฿ ษ โŸD„ลนคญJyฃ09q(์ษv?๎ g6ฎL”M๔ Z+…(ส๋้๖XY๐&เำGh—&ZไKพ๒%ีb‡1<ฒ า8@~ี—6ษึUอ•ใom@ฉSy–*=;<„p<๗&มR ช=Jฏx๑}Uเ-wQฦฮš*อๅUา pดฆชFš+jddaพคฆ'+๛+ญๆฎOบghkwษ‚oซƒลuต’ž*แ›PEจ$U]ู๙>ฃ$oVNจ!ํd ƒ้๐Hm!ๅ"ผย~oฝ†‹mta—/Pํ9ฐG๘ รษ๋ะฤc๚sป0๋ภ๔ บ$้hๆo;IฮุIbตฌเ1ฮ„'y็–๛H|ซŽKอPญ๙ƒ7ๅ•ฎโณ /ํถๆŠ’e"ฮ)๘8,{ฐ‚+4M=น=7"ี‘๊o ๘ŽผaชeHAœ๘กื๋mfๆ%|;๎~‹&ภ5U<ศœ ๛K:ศš01Y^๛๘ตฦ/้(็ำ;H}Y3@&ddฉGฐYWาคXL;5‡อฎ2๓ภุ‰๛๕ษz‚+ฆทzฺไีู๏vๆ;๓‚SๅุK~)‘pวPS]'๖œtu๎ศใgJ-๘ัCวŠา*ศ^}]‰‚wvฉi–จ&hยีฬฯxƒ]CAƒLQX‘๎T’,–ำu^kŽ6vze๘ac€U๙ฑ‹.Q+ร๘-่€ŠฦgŸง|I M<ฦsฬKgแจA €5ฐŽโขฑI\hžxิษจญ ์iGา[ต]$ณีุ‚๐าnsีVมญuา4@ŠM8ฅ{~ืf •{ะ}๐แ็…ๅUŠษbบฦย็ฃ˜†”i‡fณ;F>Uiฬ‰Zrก์dฤnมีฑํ}฿ึ;ฬv{‡ แoพ@cเ หคธzIฺ้ถWฦ 9ื*ืA4ฤณี็=ฺษJzๅƒอSจDbWฺJ.า5รPจใ์ฐวQ“ใ๎อv์]x๎…๛ษ…WŒ‘Mฌ็๋ryโwคzCƒฆฺคจ์็Hธmๅžamฝ,{{ƒ„gฺ…แŽยแฏ”˜'Nฦรœืs˜\)้•ฏM^zF[98uฺx9้G(œMฟYCะy%ส)#5%QPัE ไแฤ‹Ÿภชๅ๋ิ‰VฏWา#L…)wช”์ช—nโธa<ยˆาcชแใ `k‹Cๅaธ>ฒท2Œcๆ€|#ำs‘€๔า จzูอๆฯยฌๆw๓›๘Ž/ไ9คซ‘x=ฏ5ๆ00ม มษcpLฟ่OๅVื€= ษีาฌlRดYGฯขNu}จืgโ/ณิรื(`ฑ9ฬฟ๏{์†Tฤ7ตŸ‚j‚ํไๅAึ๓ฅ๔ฺฒž0TMะJีฦ™n7ˆ\ไ“u(ิƒผA๖ํF‰ฺฅTาuCx„Mฎธa’œ๗๛QRŽSm]๒ัญ „žหญ”dฬ@–โ/$ž6Ÿฌ๚ฐX1‹ฦ๎$g๐E‚+#ยเณ ๊=I—^-_ผFžx๘Eu๚š›/–dHveม฿๚ต›:/ป๚ฏ8ท ^ีƒVูผนZ๛ว๓*T…รs28jGฤ0;†_ฯ๑…๏ผ‡>~}*;ม5ฟ.ม๊YDะล฿ฃAปว9มnู้eคea๓#6หมVK2$V^L]พ@‡โI๖A82๚&y†D'hำ‘๘ฐ5ๆ10ม มวc`lŸp-wฝ~ซ@ี%ลใjS๖VvSR’†ˆงฃGxไ&hฑ›l๒รขe๒่ฯร๕H†$ร•รท<*]xบL8`)ซฉ/‚แqฆอœ ๕เ0ˆ@ูห]คƒŽ/๙qUื ์Œy1's%!I๓yฅvเขM–b๚–?ะฎ!C!4‚๏‹X?‰นƒ^$ิwใฃ_ะูNptาƒH๑Q๏อ^ฟื่ฐŒสb–œ„~ดK‚vsKป:J•/ชjL.Yฃ*ศj‡p ๖r3า›H4๚3ปุ๗0ึภ๕‡u9]_C‹];f Ž‘Œ‘cิฒฯ gˆ”\!yฐผrjy๋๏7ณฌ๔ยจOz๓C๘๚ ŽฑLฮ ƒ๘`๓ย45JjไV‡ำ*.ภŸqง็[1v๑บฐl7มAื ฑxอภฺSLแ\Jๅ…๑•K่ฏฦ0๚ส›) gฏ3V-ฮบr’8"mสซ๙๎—ช`~XXั—ี๋๊ๅCฤk4แ[*กคWร†็สaG„ึkูะ…ผ–าซHฏ^@าคด$‰iZ'๏๐ฤ;VJ7ืศาลซี๙_๊8IŠ๏แn!dผฐ,”Vmญ–็S•ทตฉQm‡ฆOภ‰ [IญฐฯrZ•1“ถR฿๋ฟTœg>‚H^ซ)–J6ุv‘เ‰Nƒzƒ๚อ>#ฎ~๔ป ฦn๕4x|ถกIัถ™…#$)& jภ`๐ทศขฬ‹`+‰}ฺ"ip๛œX-๊…oณ‘ศฤ2GbF็€  kเ๚< d.าU~๛^ฌ„็๋6หท^iก/ฌจ๘ฉ฿\!^ธl๐ดตษึช i(ฤBAŸ-ธ2aี Š/ˆร?€ฅ=kไŽศ\Gaร—wiถ๔1I๔เ๖B&4ิฎ‹๐เY[๕`4":cฎr{ถ…“,=ผืVปไ[ุ•"p‚ูฐฉMrGgศงWŠ#ย.3.'6€?TŠX-ูณ˜Ÿw๐—เฑƒฉ>-Q๗•แPาซดaฉาิฐA~wูฏ$7+โ@+>Bz๕Kไyฌด"sVt„˜Rข๐ป0๚เ็e}›หP‚ฅ{oงMb(Yกโ*ฤ๏ฟ["y๙ูRตฑTๆd์๑๙eำ– 3แ'ixฬ๛, H๑h๗ร๓อˆ]ศ•ˆผV;ฏ_ญoตฉ>๘M๏ldะฮq€๓ %Lว!=E6;-f/ภ•}bNฆมอฒ]+ฝ?*แn๙Jๅยข†ฬ„X‰›0J^๛nฉภ^ห51่ฯH/!ั6รY`‚Aƒƒภุ~ะ฿ž๎Cฑ WืZapป`๓|๋๚Œ๚`6#Nกฅฝถฺฌู]™g\]ŽkP–ณA`'แ9ฮiพคe“›ํตฬธdฌ8ขl*ดส@Hyศ420า+โโถ!๋โใ*ฆฏ>-—โ nINIK-”)g“วzAr 3ๅ๋‡รQฌOWQยbJBง$.ฺ0 ํุื…pRฃSุs*”WR5Hซ5:ใ'สฑ'ช€ ๓ržN๊Zๅตฝา+อ-รมI๑ฐ8”ฐ๔ำๆ”อล•๒มง๓ิe้\๖ฆ7ใvJฦฺล้\ิ•_R๚)))lย †„แ—KVTฒ+t(ลุ„์ำm‡ุVJE,˜เน฿“x•[sั`ยj6PUฯ<ฦ๗ํr€,$ โ๖6ๆฤ๊@ŒืํวรwZย$Uึ7I"ฐ‹๊[แัฟถQญ๘šœ์7^*PึJ:ภุp,‚8ฐ Gๆญ+ถภvห‡ฒ p๚Jค›‘ดฬุ1ศเภพๆ@ืSq_ทไฃ~๐nฎCบฯ๏v‰ฯีbKLตFฆgY๑ษ6๘ธฒAbeณ8ร-ˆS)R+9เ๊\lนJ้งฎ๘0ำAๅ_ฃš”ใ๏˜fN ฯ๕๚1ย๙~ ฅ๔j4@)ˆ๊ฬ…|{โ)J[คWๅ-๒ฯ{ๆม Aฆlฉ€#žำ—koฝDฮ๙อฉฒiE™Œž:\ๆ=พฑ IsUซุรฑบmT ธ๓'ฎ”คjฐisซผ๙—/ิจŸล3่ถฟ_%9i^ผJ๕ญ?Tl๘›?wฑฬ~ ๆ112ฎจjใ9JŠLุึบฺล•—ฉฎ:๔ศ้ฆ|_uwๅม๔กU†ุƒw๘ฐสK€EpEโธh*66F-๚ฟ๒qห1ย„]eฃลกฤsฬG๐aˆ๊รž‹wTW‚-Š๚K้ž‚กอ๛_ูืPฏๆำj\ดภช@ษ่YUU อ2 †์ฑaLฅฒฒ|ซR็พ:ภ–K๕ ฅ[›š”+Ž!P# sKrvI+O 2Œ/ฅF฿™al๖-๔‡ลพmลVํฎ^…๔>์ญŽvีlฆMญื‡ ัฅT๔๋ฒi ’z–cK๚)Hฎx4zกศ๊\ถŸTฟฎ-€`ฮึยใ๓”aชบฅ$ธ ?ฤง%'า\XถG`ฤยๆzภกœ;•Š•ฝ๕๒zฤ%lCศ–(Y ฏ;Gpˆคล$ห_nฟLสK7ห—Ÿฬ“&็หgVศ†๏Šๅิ๛“๔ฑI๊–”v }!ตS๎ๅ‚DณNEs๐›G—ISQปD็:•๔j๘~Ceํช hŽŸTTP ๖่_œฬ68Xฏ“๗Œj™ำ&J œuาฤ yhxพกฆA๊ึห‘ว,“๖1ฮ๗˜Y>k€ —D#xJะN:(๙ไ“oไีo~”XQ˜ฅVสpj`I]ม๊โ๏ัฮเ)ตaˆํ (}0|ศ“ฟWƒvฬOวCฺ$P้๙วdงู๒ Z[Y-ใฒำ”“฿y๋JฐŸ UŒรซFTV…B‰~ฃเ[๊ิy‚ญ๚—lชฎ—Lฤ ,kjS=‹Jยะ”$็๑‘กื‹]ƒ ์ฌ=รwฟ_!]ƒtา_‘.B:้Bคง#๑a@ฐห๋˜3๑ž๘Vสv\รใ์aซM ฅŽkŠH Cจ ‘^ก|%ฑขl,๊Aบ๖๔#ปทU’‹kไึซฟ“ธXฝb‹~ี2|ไPi 4Kj๒๙ื์ปไ‚?œ%ซพ_/cฆސš5-๒ุqoหย็VI38@fAโคฌOฬปืฒฝwตW˜ษฅon€ฝู ‰ฯPเŠvOW…ใFศuท]*‘๐{ๅ teฃต”พึอ๒๙‡฿ศ็+ใฆŒ–๋๙9้ฑSJฺสฎ#<ห๛ค‹+ีtฃ๚žใJ}ื2ฦ P>Xธcะ9@†“Rซ?#ุ%ฆ๓ณhขิŠ๊5•[e,l๔่fใ;x้๊ห…r๘๘$}J3ฌT+zโ‡Š*ฤIฤ‚…้ฃ‡ซPu *FขBR๗ฮืŽŸ๖:ิศ๋ต<*ไœ „`„I&ก๛Uฮ‘ฒึฤvฐจƒ@>4ฎF๚<&ฯผษํgผมLฤ๔๓ม๐;8แ๔๎+ฅv*nต่Zซวผน{…ฏฆ๔ส‚) ญี'O=ฒ\ต;(`9า+ชจHT‡eIƒ$๋R9๑Gสฒk$ฟ0Gาฒฐ*๎[yเWไ‡—ึHkKภำ๛นZฌ`“ ้’aแ์ี๒฿๋็*ษU๚Vษ†{…"L|นรฒไ้W.Y)iเ…gี %Eaf‡ฌYฟA.8ƒ&1xซธๆ–3Tฆwจะubr{”Q๚ššziุuุ๋แrภA๑Cู–GXT5.วา็ŸxCปŠฒญr๓=–กCsd่B๙qใGr๘ัำๅ ‘ž ๕Rิ…ฤA4|็V/•฿ต/QABฌxกง-๋๋‚ซ••ฎVF†ฝๅ3Žmร>HGƒjP๒ฃ,‰ฐปjm‡/5l ˜’ฃฃฆ์ฒ@‹ซ+Ž!9ฐูcOญ(฿"uXd2tˆ๖ฝใ็“ฅkel๐โะ@ชำ—Zม e{ ๚qภX{พณ F˜ถz๎๓ภcฝ?ูqb‰ํฯฬIฎVboP ฦ7nl๗ร฿•eยรUฬ:Nุœ‚X ”ฬฏภจฒ๕๖Qฺแ€}P๑ฦFy๚Ÿ+0‘็สบUีr7Aฮะ ฬจš'[ท๙]Px๒Vนฑฟส๚ลRYR+๛Mส_“_น๚yๅยOeล๛Eาึเึ€คB:ˆ4=งท‰7ฦ[l„=ู็๗(๏ๅ[ Kท)ษU&T8e%0D=า=2ช @\๖mภ•’61 ณฯ%ฯ?บสปหฯVnเ ฝผO>|†œ้ืบ ฅฒ“&้ข?Ÿc‰ัดๅ๊4zym(๏ตู๏ฉผส˜ {ำ™Œท3๊j•แyy๒่์ปๅ’+ฯ—y‹VษทeUR‹ษ›R)ํฝEป”†์๚Lฌ ~โ %^]5w;ซฎก vฉtะะ=ฃ๑ญ7(†แฤq>wพขšfS ๚วŠ฿Dimฝ๊๓l๖Ž‚ดŠ’ญโ#ยฅฅ-sื+ร๖I๘ 2 R-ฦฬวหึสฬยa’ ,ช A&7ณT์‡๓จฏ๎ิฮŸ๖๔‰s/UgT๓ใT#*Lลb๙-ว_7ฎ+มฺ:=D9๐๔๓)g™๙ว ‘€uฉ2n hะ2•2O…gwพ_hmRฆบ–฿้คtํ๚*ฤฮศย|Yษลนฟ๛…Œ๚ˆซ 1x%ํฦป(ƒื\zงบ>.!(แเฤŒ[ึฑ™:๚AะไฅtซbuJฐะf๔^฿™๛(ใgzXฑ.x๏9ฑดมยŠ?ต๊3;!NfeฺษKŽW๖*ษใ–ฦูธตF๒แt๔ธ #”tฑhkญ|ณzƒLฬห–ยฬd•ฏjf๛—>๖ี† 280H8`ฌAาƒฐ|S็dBpU˜‘™r‹3ฬyสผUsโกญฎ6—|๙ูwา ๙๛KสจxฅHใnNทmxz\ลยษ%มึ@ฟ›r"w †g–ศื/ S dัยurๅugIrR"Œ๋ท๕PNฐEu!Aรa‡$‹Š>R*ฌ๏z >:OjฺทdUฉ4ฐ,_0ษ—$ั)ˆiจฉโŒT๚; S„2๒ๆT1ฤjX๊"๘ฅ ซe่Ÿขy•2ฉฅRพคNี่ฬถˆฃล))#า”3Q|ํรห!OCmMึR5่€jฐคข\nบโUฮiPU|ฟR†!ณ๕ัUYY™\๓ห?IxF’ด•Wหี7Aา’“`ปฏDdŸ(i!คWoฝ๚‘*eN?๛8 ณ8bG“ขปกฎด[ํ๒K~)้™)r๖I—ษ๓–ศ้*=:=Y]็‡ใุˆ /&ูฉฮc”~‘x฿แv9๎๊ฬa์๔มŽdฝF5‚™ธ:0จ‰y้ฐฟช–UP โมฎฟ-:„=jฬpๅยกyciim=~”คb1CM3^ bฆZ1+&\JเU ี<0ึเ้‹มิ)ฉถC’๑$\้ฟฑ›%%-)ฒKฏSฮกf<ไ+ีเ@‡แำ’)ฦํฐ?‡ไˆ“ Q5Hงข5p*๚่ฝ\kฆภีำ'Bzu4พk๑๕t้Uhต“่ภฒ'?Yงj;๎ว6๖E์g๐<rZจ'7h‡ ๋๔—ด C Š”รHนi5 ซ 3’U_๐$๛•Eฉแjฏ"ฌฬ‚ q*์h'ทมพigG_XอX$pีaวุ†—}๚'ำ 'ท้mึ๚“"O]œ๓็ก฿ˆฦ xŠƒ๚V?ol๚~ฒ ‚ฦMุ๋เš?h‚+gXธใ๕ฬ์๔ใึญ.bCผo|ธmโิัrีลทKMฑถškๆ•ใ%2)l@ยแฐุ xโ๐ˆN…๗vjี’ˆaˆ˜>‡๔j๎—•2vbŽ,ฑXnบ๛’šu๐เกq2`าUˆำ˜,ณ฿zH9$ฝF๎ษš)ฑ๙~iƒจฏf}ณ|๕E*ม… Ÿ•%‰Cc$rโฎlู#l๐?iUwlXบ€žข!Œเ=จ็jOฐ]h6n‚J“ุ ฌj‹[ิ๊ภฏ"˜ิ(jจ]š7>U„9+'ื 1r๏ŸeๆแJค-\ฺ!น"…ถIปตฃN€ก…‹–ศญื=(ู9้2,&J:เ3 .ๆ?๚ฒฤมV-ิpฟ็iuู]/งŸsผD„มฃ;4nกRO–วzผพ๚{•๕Š 2~“N<ํH Nœรไณ_๘วฝfŸdก ‡8^พ๔;i„-mu‚๖Szณีl„P+่;์}<* ๋๎I์†vG"ฐขู์j ๘๕`ฯf๎yฑ๑]ฤŸ‚็z˜:6”m6MศMWฮDiภN‰•€+5nup6Z^ื(ัฐหšpM€[ขRํ;เ}บถ| ๅึ๐S ค๗ฯ”ˆผb"ˆแ๛ซŠึDผฟUดง8`ฌ=ลูŸ^น ๚ห๙‰ูน™wมไH€+฿eW_ะ๑๋‹ฯฐ ฯ*?๕ขผ๑า๛’"้2แึ,ษ ฎ์m๒๐Iƒ€R้U4ิƒํ๛Š“)ฅW[6ทษท}‹ฺRธ๚รŸฯ—ษŒํ4l฿™{เไฮ๒\ํ’:dˆ\C๎O;|๚@†‡+T‡Cภ`/Tีฒห2•๔๒c๒ย${tšฤ็DK|vดD&‡#!ึ/p1ธด3 +ฐ*‘R(&Jฅ๘ม ‰ณ>A•ป‹PLอJMYตบNฝฒVๅ1a*ŠLEhEโ๑’žใ-ีี \QZAร๔>ภŒ%เŸ๓ร{๙L^{แ=9ชมt,ฝง$‰/ใ์7๚ฝ*ถqxม6 F๓ืr ‚๖†c%ขKฉซม–Iัึ๋?๑$‚ซ๋เE๚!Sเั8#HฒBโั๔ฑๅลไซญ(์@้/ผ–m๎F8"โ$ฆOdฒ_zๅYJAžiqณวg3›_ฌX๏HDLJบc`gวN9pษ@ ี*ธh`lE‡;ิ‹ƒ’Rโุย`,สdlี)คฃIH๘+pลฎไ#c0๖Fพ…ก|?้ืˆ๙€ŒœŸišั“x ฌ_•ใj.ว%ะš๔฿d๘ไฝซŸ ถํ#k1~Tซ?„จฬ‹Ž‹|>##uฺชๅ๋]โนแŽหใ&ŽยZ.ซT5o•‡‚า˜8K>๎๒ร%,ฮ‰XƒX5Iส@Ÿ mxDไB5˜ŒD มะ@กภคhCฃ\z’œ–pU$]qฎย8[นeุล{๊”fม6ห [’#˜ ร๙1RY^ฅŒฦ็อ๙QญŠบน^ีYC๒Sฑ* nŸฉ. เำœžฯ!ถ๓y ‰ฺโ(ํ,/"Sœ’ˆUv4 €ใ†ีล.ฝn(ฑ ข๙รrqฎKๅู-c/šคษ!+ืฌ•‹ฮฝNๅีH้(ค‘&"iซ2xc๐๛็b ุึฐ„yี;ถใลแ@œฯˆ6จ๘๎‚K‚ุW@Z‚Dโ“ ดึL>ูู<9 ฟY๒!t4œI>ฺึœpๅฝ๔ชณ\z๕ฏ้‰)jYพลOŸ[ฅนโ8™vežไŠŠธt ‘OH?ฐQ#"4฿Wiุ9O†dูฅ]I`oฺาSซU‰IfูR)rๆy'Jด#Rฅ]%%อBฌ\ฃR|lฌ$ล&ศุย”ใา๓/J๙๐/ๅฝท8tง(ฌŒŠ†๓E‹Uณษ2แQชิbfMRจญˆร>Ÿบ˜pxฃ็๓—R)Eฝ้—WHqUygดฏ:ŽงภFiฟั๙’ืแV-ด WL๊Rคฮ zูa+Tƒญ๐ฐฬฟ^S9๎yไ/’ีxs๏xLขว+ฺฦe๔_ ‘๒7jฟ>ํฺฎl›4{[ๅUห“ฎ W๙y9Aษ ็˜พI,ำ({$์วP+v<^7œ:๔โŠCjฟ8„ฝ*้b€Vs‡Cำ q€`‚ฯ›O‘ฆa๘o5›ฆ๛ ถโใžูMt›[ฝhŒกสบ&ฅfNOˆQq%๕ชจgˆฅึlT‡๖Gt๙p€ bู83 ‡ฑ๛y(๖<|้$M/ฎญ#qpj#๊%ธโผ{า๏‘ฌ:6ถ8‹฿e้ภ๏ฺZ๎1นสฝ&(Pyžmืทื๏ฌฎU:ฺถzํVฯ๖—8า}H+‘Hฌ“`ะ^ไ™nะฯก?ถ› Fๆ4d๗<๛ฦvz/ณ9ฅ“]ธ-L–,_)ฟ=๋*B1แวปdย/ ด‰oO-ฝโJซ4ศ ฒ Tาซ์‚:]บธZพ{ฑŒ›++—n’ฟqผŠ่—ํีŽšฅf/ ฉฝ๊น y?ยŒะ`:ิรๅk~+ฅ›เ7kSฺฑV>๐kiฌo‘f,=(:๖`๛O5hกไa5V"T0\ฅวg4IQ2Db{wX*€e‚j๐ƒ9ส ฦกSๅ €ำpจzš#ฎลโ‚Š็{ฤ”+Hz๘้dๆฬ”๋่I,ฯ'ฅฐ†’'ฑา๐ฑpnภ๋<TqŒเLB;ฉฮyจgพ3ฆa38a๗ถํถr1ๆs๑ย@7pFj~๙ลf” ™2'๊-ณ๑……ํอ~ำOฤ”t7Zp [Iณ?<รแn๐Z``qใืฃำฬหวKฮฉก’อ  ž!๏&j…เไ‘92๑—#คdA•ผ}ีszม\ "ยฌŸ๕ง NฺsQ 9=~h†$%@@IDAT ,ุ;dฌฝร็มP ์๑QD=1#+ๅ_ภ; เส#๓ืึš•  ฎ 8I11œน ไ…งฤr–X9๘†แ2์  eิ๕kธ[c)ฝJลc!kHฏฐ|ั๗โ||ก็ษ๊ๅE* sม๐ผ•^๕ลท<๏ƒญCฤl๑!1ัbJใ’|F๐โเุ#nฏแJmHงกWl0์ฆŠ–nZ]อ^IภA`E๐Hbzubšฯ*-nธ~๐•๛ษื.ำœฌค`N8qษrนตOคœเ๊คCๅธdˆŽŽQRผžuท0…aีเ7๓สyง^ฺGสฒลซๅi”{๘3`ญฌ9tAำ:O๓hฟๆ€ไิO€ีาิ(Lสไ&๙งถมๅ Ru@gน*‘R3Hญะ๗‚ะZNุv$2RƒญA;ๆ$ฤEฐฏ:ื@แYtไคBWrแtข๋d;J"รT่*6xvภŒ.,:›r4wH+‰y1*rฤ๚ฏสไ๓Gพ7\™ฃ๓œWฝื ‰ุhิ9๊>…T! Lุdฌฝมๅ}_ฬ|ฐU‚ooX[ฬฅภž'_นฯv์IณLmๅœ“ 'Rๅ™ปฒBnพ~dƒƒ™,ซŒ9i@ฑา@ซY่มรcLค&ฝ‚M๖€ฺ^)ฏ฿PKผ:V)ัต= *QJPvdDญ2เG(ุโร” €ฎ(4ฏƒซ๒lœ็ีค/แxรลcXA0ฦฃ-U(๑ZRO€šgG๛lฏงrโลg’Y9|3"&ทZyI_X‡O9ณณจ๏ฟFr2ณ{ตปbyL\ฑนtี*9zฺูสมี w^.Tkโc—วฺ๘แ7a?Y‚… 4 |๐`U#๋ี‡r_ลไคฅฑฉณQำZื„ฦ]฿บๅ4พl‡Šg>…t5JผpาไBKœ…Rบป F๋์‡iรs๐{ื$ฬช+8nqœฦใ|6‘๛ํnี6ทหVธใ(ฺZ'‹กb/„;ช)ัขdซž—–lHหาŸฝ๋Œชสฺg23้=!ฝ‡$”ะ{Tภ‚ฐb๏ฝญmํฎ๕wmซปซฎ๎ฺwWWฑaCDEEAz‡ก„!Ho๗;/B€@&uผ7/ฏw฿ญ็ž๒ภ8„!oT๏ฉ๚ใ{ฌw=†cD9ว๙4?ว๎5˜wkค๏…ค็ููP†™˜ฅpผAๆUJ\Uๆ๐Vbส"=ม_สƒŽFผFLฝ(0Zูภื›๖2๓ญฅ๗/ถ ถ\๐T?็ถมหdกZ›ผ Vk—๐ัOŸฬฯ?ี!มหV3†^ีž๕ํั5W'vย pใ@ว‰”2ฝ=QๆN_9V„ ผฝ ยผzึbฎส‘Kฤ–LHฏธ`ใภโ)โwQฺ3ใวM@_5T:ิPซๅู—”ฌฬ4Hฮ๖ ‰ใฉw7'‹ ฒ~›z†฿@ึิๅ๚s-aค๊itภwำn๋๋)฿หmื<,``œ˜๗ฎxP†~‰8ก†๛•%šชช๕๚;xP,pilN )/ํ]คษp—W•ห‹Oฟ!๙B:Gซ=๐‰œ`ฌvBฑ60“ … โCFœ0T๘ท{{ฑาโ๙oง$๓๋้ๆ?^.แกŠ ฿)(7_ฏณฮส๑ซึ'Rภv & -8(เ$เ#้คŒำ๕ฯ‘ษโส`๑&ˆ๏๖’^ฤฝฯยป๊oขคšuJหัชzJ›ธธK…*๑i;E›ฌŒ˜(„ะษ”Mจ%์šƒ09ŠWฆz6ิ#1ณจd์„‡-Ÿ฿Iืช›e‚”ปศธXu๐$ GlI!ญ‹nฟหpœ‡ฟ_`›Š’‹ุDศ81๏w›n(ฆMXŒ %W™ธณ22+ะw˜ซมWw•กืw0„‘ยB•ฦ้ญI์{า*มศ‘ษบ๖หฑ๒ย ุภ\ูœแvฉฺYs;O;2˜ไฟzษ%เeฐsE๏>‚ˆ^w๑=J Cฟ,ฦ๐ษŸณ„wJ4Dq๓Žญฐฃ›#ƒ†๕•pฌ,ศ7UVเฅF*ƒš“๖9สQแo61ถา‹Rv๗fgอ฿j>gˆv5^:ฬ๐B1]หซkl&าน @ฐ”h'nw*ถ;ฐอม๖>ถ๏ฑqEV„อb>˜ulึXŒC%Kz5๏ฬึ†e๘;ษ\ๅž’!ƒฏํ&ˆ|Q ใ๕ึfฎฌ ฑุ7ศะ%vo'—pŠs์Dป#ฤงชบธฦ‰ๆO๘Š‘ุ๘}ZV๕ฯzน’•“.‘ัแŠo๔อคi๒ร”_pGˆ ธฅ“„"่pkถ34IB|ยVฑฝb8‰ฯ&ไ๋{พฆxฺใ›‰ E™`5€@‰Oลฒ๔๊L)ฅ vฮ;๙D๎ป๕I-–1`ึS ฝส‡‡ึฟพ๚Iฯ=๗สŸไœ‹OW4x: JE.ัื_Ÿศ34ฯฬŸ็k|มวžฝS:fg'ƒFซzๆƒ!่a๙๕7฿ห้ว]&ๅNmฟtฤ80A‚a$Xk›ฐ pเ)aโ!B;’gจo‡ำใ๛ษluถฎ๋ษศU‡๒Tj๔3| _D)๚^D_œdปay3070าpV6ภ{60า/๓en }ก!€€h๘รูdฺQ๊Raป]ไะSฆซLY-๓"๓ิY่ษ?ะึ‚9[่kณ} 4๕owWจคหbฒ96“ฌFh~m๗Dส0*ฏู•_๎์rjฆœ๔ะeฎ่xฤ˜+“7ณวว๑I=bdฤ]}e๒ใฟ8เอXoฦธแXl฿`ณๆ๓Œw๏‘๐2X)F'b๙fdทห%`ฎ  I‰‹‹•;– [;5 [ว,ะใ๚ูํํL€mƒ$Y zKเส๒ฒJ™ŽUงL–E@ถ&Mš๘n๖Šผ๘ๆใrฮง1yH=เัz€ร9“ฦพ$NโฎO3็(ฝฒŒ๕W๏4;3฿šฆO๔p%hฅ\ทX—]zšY๕‘Uoฤณข„Š%หBUไ1ฐ-๙ิฑ2eฅAฉใNYธขuฉแ’Œ11ฃaณC‰˜ำแฃ8Zสฌ‰ฆฒแ’J**๔—*ลข’b[ฆTซjk,f‰๕k1LœS]hจŽ p~Ž์ผณนฌr2ฮSฒEbใ ็D9ถำ‘ีq~๑Ž:x์9’บGสศป๛JHL 28V+wD‰eจ ภ@t?#Kฆฝ=ฯๆชayภศบ™!ƒล๏fi[m‡^ji xฌ–– ็Ÿg(cๅt:ป&๎4“3แŠผ*om5คT์๖‘'cฺ๋ณฃฝsืlB.H|B ์MB!Iะ‰สฝ—ฐื ึ[.นๆ,ู›™ 0„j ช†ดชž็ช;“x7F–qโ uc)ฒ‚ฤ˜ๆ2}ๆPwเญฐ*5า+ถ†ํUiY•ผ๛บa*#ฃฒfe สMไๆ+aWฤ„C๋/ฉ๚ฌŒ$1f‚gY˜๒ะ~ใ;‹นถถ|๗ใฯrึ‰W๋_†H กO|ํ#™ ๙ |ไ™;คC๛๖jฬNฦE1Šส‡iqcZ๏M๘T™+Jฎศ\แž+ๅฦ;.•จะุ\Q-่b`\ฯำšR+ชงMŸ)ฃŒื+S็M:ซ4‹Gk’v{ํ>‡๕RษHDค|–lุšูTUณ[๖;ฉ^p๛ผฤ{ขY%V[‘5YาT‘ก&Š๛.0NVณB่ีห6ภs,ึ™ฐoฏ”…XXยถŠ๊?9‘สใฃ%๖€”ˆ‘ุๆx?U‘๔&คส5ฆG‰ำฃ”ŒF๑e`พ๔ทฒส%gUิ#ข๊‘Vmqiฉฯย‚-~[ส*วเc>kํ๖/|_X_TBD2Wค[AŠR*2+ŽQ๗ ’ˆ”CŠไJณdv,“j ฐd๖Žนคง|๑ะtŸภH”}Q ฟi 6Šค๋็๓”w฿า๐2X--Aฯ=ฯ™œฃ™ซxlsuz|RดO``P ˜+3ๅธ๖ +Apf •โ‹ลไdD• ๑ฌฬฐ„“nD{šธv1’ุŽๆ[6ะฏทฦz๛rโTrž*?M5R‰ไ ภ1คืส—ค๊ม”>q:@Q’Eฆƒ#•'<_๘ก{\wtŽ๗gผ>คnnแ!Z'bฎYU,/<5Oฺ็ภ๖jy0(๑ „o๙ว๓–7_~OฆN™ฎƒ ฿๑ภ5าw@wIJNภ „t0Zuฎ‰œิo™,†ˆณdฎNrกขช„y๓ฦญ๒ท'_ำฯงง฿…WžๆจA๒Diƒ;1-ADgฬ™+Ÿq ฎชD\๎ธธั4isลg}}œฒy๛6yู้ทไ™ว^–pแ•gJlDด2tLป9๕ม\ุa{ฃ๕ 'u‹˜cNฒ*uำ“ผปแ;๘?‹๖ํ|่0kzซ๛ฎŸƒ—aj1Š่สกT ๘J†fฒชA&ฉ๖คdฎ *ƒส๔แ(ุgญผC๊ถuYR^AฦHฝ •้ยs@แWษV$\A๐@ ภ๛xNfaŸวฆชmŸฃ&‘ำะศงก 1ƒ:คืnฺนงvอ–Bu›RKซjฏ†๗ว…พฮรƒ|ouQI4”์'{ึTุGB—า;Vอ+๗}พ๕Hœ`y‚iๅ0–3"Eฌ]ซสซ์>NxŒซdฐ,้‘ศั๏โ^ซmT3g6๎xP กว|ำRS“$oY>tpcทqีŸ™š†ฐำต๕1ๅpR'2?ื˜ธโง!ฉ!Ex_็9’!สgย`)$ZwK๒ต๚h๐ฏ;๖G9๖ัฮ่ษ\š@ †ใูšJdฏhฮ$ื8›นฃๆWาถW$lๆHlัŽyd8กฮ๘AUฉˆ๓W ฐฟ9y์๑า>>MRžI”3ฮ=I>๛hŠฦZ$ศๅeg฿ฆา”G`4zฬq’š”ฌรตนdš‡๛อ-๚ #๐0%AภฆZธ|™2W|%UฦŒs8แฟŸkŸ๔ฒ 9ะx๑A๒ไำH๒ฤ›˜q‚’ZŒ‹ไษ7œ'ท{5$R^C`[๓ฃฉ๓YF๐•ต๋เถ?R๚ ๊)Ÿ}ฆ า "๖U"_ะ โ] )คี%๕„‹”Zp๒nขปธnฃQ4ํ€ุ:ูFaฌ็ฅร.”bˆ๋aJ‡”จ"l\\ ฒ^,ฉodŸ&‘น: ’ะTภoPูฮ:$^W)~๙<๛~TฺTฎพSถฏ• ˜JDF„kp้vpิ2Lๆ๙พ:ยฬฃ*นeห †,6,ิ‡[<๛fฅึ๐ดvแชต<=ทจฒ๊4จ7ี@QๆŠ ุ:NW{+ฺzbAj2ีฒฝމbEค„ช๊๒ซว~ฑล!@{~๙(คœ…บWk.jูหดีxง5๔ปŸsU„ถN\Mpแษัพรห``ƒโฆณVศ๗‚นz8.Q*0WvจUœ”๔์ืซ7 ็ย!D™(e:Pธพ2kจขF‰ฒNX?้ƒ?‚`S—กŒ<ว2>Oj9$‘b h:3PZY ช#ท!ฤคPฒตฐq2%)2R"เEคR.‚—2=ว+ำOฦอ5าฉQ~bdจ=><ฤ9)ถ`ฉU‹7l œžท632! vG~™ฯเซปIX|ฐŽVพ5“G{‡R[,”? A6อO#nWldฐXGฎU ށ€›i J็MLAึO,/  ‹ฌAฺลdyo„“XญH\Xทj฿‡พ6Xd๎RขยฅB๐hผCจ‹มtอYณA–o S8A Ž๛4,า “EฦŽEz'ๆo-๔‘pธVJวQiบชaลฌฯ8ฌ_|{ ดิB ๘้ด—ˆ€Gม`@zฌฎษ(ึŽ[JLUe1V—โx46ry(9ษภF๕1‘z๊™mS‡8?,lฯ`{ภ_ “ๅeฐP[GX๎ะณศ5hฦOs2ศฬJฉse?๓ผัถ๛ปI2R Ž„…ƒรแN2‡๚]ฦำ0ex7฿1ซฝd฿œ!๐Z”W_x่[šl๙ไ:™๐ีwz|}ค๛ธ, K จ?™ฝดืŽฝŒฐ ู0jgHOร2X/#Sส ฮค๔ฌYฒ@ิ'ุ7Hmึห“eฯญ.U`0i4 o/้G‚s/>Mฆ|9Mธ)M๋ฯฟ ใ๘ทๅŒ๓N–3ฦŸ IOW‰‚m+พต ม‘ฌ+อะa๎˜OJpรถอr็๕ ๑ฯHKๆษ฿_’ำTjeA(ธ—™๕Z^€EY}๖ผrืM'?ยaิช??[=oj12,L mฎฌtZuา๘|sfพH˜ฅา ’uฌา*\bำไ=ีP๑ปrฝื}วผXPhฟุฝth% ลG(„‚•\ี bนฒ) ๔ีำผค„KชาซขŽต:y2vคzxภฝตแ.+ ฃ๒ร[ูั h[G็ ภ{(ญ"ำE๊#[qณ&/}=(๓‘u"ฌS]ฬํวถCหช J๕•’•๕‹M๖/ำา4น#ถc)q#wฤ฿ฦm[ห฿ํ๐wHy›หึ-วฑaดิG๑ใrg†ฮCŠ7b๋ห”ี๎M๋ฮhฐcซlFm9Dห”8s>`,?#_ฐuลvถRl๎้โฯถI^๋ศึ ‹2W˜วO<ห ~’’šX…ุxฮ‡žบM.พ๚lD†ำี>XS[kgูšเ8 Y1ๅศhั5ฬ๓GหรwEพ›ณt์ี^ส๖TศืOฬ”Ÿป@.z๙dI่Šฐ:๓Yฆch+ฬ@ฮ}ร1)ใ„วmฏ0จ9!š฿ถญT}c ๆj‹\ลาjOเ๏•'+oีo†ฯ‘h•รั)ssฅsnŽ)ฬดฉ3ๅฝ&ณฆฯ—๗๑ห˜Ng_pชŒ=TR’’tถญ๚4สภ}Gุ๋b>ศ\-ห_)w\๗(˜ศ5;„O ฤ2;3O ัก’ฦภืY*aNU{gขชy%WdฎจVฝ‚แAกj๋w4ฺ1๓cๅŸฎ–š‰๓'nœˆ Yฟฎ?]?ฺส:ๆw\=x้ะJ€ลLโLฉๅวz`iณl้x๊bj๔.ืŽื,Ržชฏ'+5๗›Žญ:ฅ๛*8hPฺD†ˆj?beQbฦ{ุฯq ไส Žจ2ๆ$พ/’ญDี๖ ฮ0๊+‘‡ ี าฎภC$เฅ\คภ:ป‚ฟด‡ชำกh์ู“ }ุC;๖T.Zi‚ม๑•<EB๕Mํ=c/bCใฐTฒฎ๛X\–ืj‚ิtแškอ“ท Dk๚7ฑB]ื;ํีป*ซw•8ศ9m/ซะ€Iˆ&8’ฤ=€๘eๅZหnตม)ย˜’u@้งMฅ_รF>ฃ.๑L~‘œ็IนEฯ'๋MฑQ Xอป‰๋`{๐VXd0$ณ>ี ถ8žz๑~น๘ช3ี๖‰a[š๒ฐj”ำb:ธชคYDHจ ึ jรก*a›ษ’โ/ษฑ‰๒๋s$*-LŒ2“์ศD้5˜‰xƒƒ"L“cn้[๒Qฬ›9ˆป๕๒ณ dม์ภo*TAยูž๗๐[šC๚อ ๒ฺQR*{v–ัgŒ œ2K?ฐeำ6๙๊ำฉ@ฦŸ*ก‘มฒu๛vูSZ"gธfBขงฃฟนy๏oN๕5D‡7ไดŸgษ๑}ฮ–[๏ฝJ๎š๚ลW%:w†ˆ‹๔D#_>-{3โฏ}๑ๅทrม˜›ค เ! ๖N}๓ชœu๖hิB ลถLฟฒชRศ3oูˆnZ_dœคทย›Œา ช†ิฆฆQ๐€RึๅlฒํมD๎๔{ywuua%zbชฯฯo๘€‚–U:ผ8๏@จฃWZ’D‡”ูu}ธๅ ศ?ฉฒฺI—ถP Pษฑ`H.3กŠฃT)#6ะฎบj›ฌqe๎_\ฐ๗Fซ}Uวค8‰nƒBO[ดแžv€ม6F๖;ๅ;ูื๑dฦ ซPจลvOโไv”›’€(มxวf‰H๐—ฒ]ีาsl้0*U๛Ž•}จ5wศ05ˆ… นˆๅ:ใk&ฌp0X‰`šศxญ*‡ญ[ใ‚ย/‹ึ๎–ญyEต~1Ÿ๊สภา์ท ฌs‹๙™ˆใใกฌ$j>๊฿ž,ƒ:f"F.<—!<ฃ7้(—D—๚•L4ํ๒RœsฦWจwmHร’-ฆK&‹p.62rฆrpะึศ+ม:25BฎKน๔ฯ`ฎ$4$ดบ`ํFวUFŸJษ,†q :z[%8vจัfส|์[:ไถWœขซ/๘#`ฎืJNt™p๓wโใ๔‘ฎงe*œ2?0+ „L๕๏žJŒด็Xตfงผ๕๒ษ้”&ห—ฌส๋t ๆjt8eหoถZ~7ฃ$๛ไ“ŽีXŽcฯ>Qพโ๙์ำ๒ฑโb9Xt๗#7ศุ2ๅdมIy#ฬ๋˜dฅiZฯดๆ/฿ลvFf๓“o_S"ญ3_อ๑ึฃD’ม9N๘เsนx-0Šฯ’…๓–ฉ1๛รO฿!];wจ—Ny{๒๛9aZ8X~`qขฆŽEดa–Iร๋ 9ษ1Œีฆ๕Dฌ)ญฎ6๎„๎ทy›[‘จ5pฎ”๖~V๋ฤž„‰ฬW๒>Oฎq‘็X_%ญzใฏ5œ๐—ฬu{H›˜๎ซP…6ฺส“o•Rดำพjqม๙hๆ"Ž1ฌs"ยํ@ชey(Rชล๖ƒFภ๕o ิ“๏ฃ๔‹mƒไp\คไž1ŠXMgŸ๚้ฅVูqัฦq6ลูล_คG„ถ†ภ ฃW7ศRNCฒน๐๓เ˜หฟMYกอใYg€Cmฦpซอ้็“V‚cz๏•b;\biY q^†CX  +ู6ถ_I‚j•Lำๆ;ไ DะHˆีŽFVภMฬ#วŽP0`ฝ‘ระ์ผ|YปณฤFจ๒]ˆํUlฌพ“ต9jปณy›+ชรฮม้hv๖ECŽŒˆจ!s๕๚๛ฯส้งŽR UG{Bj๎2Ÿ|Jkส$$0HฦŸ?nฮ“ๅฮฏ“ๅsWKvื y๏บodม‡+u )ร~Tƒฑ`ตsล|sไ ๋h˜7พง pงW้ •f๚fF/ตฺN5๐ฎ]%[>ฯ—Ÿ>O^|rž,\ดC๊\ซWf#ฦXษw๑‡U–<เ๗ุมœEฆฉฎF[&†˜แ?็Oq7’8*A2W้Q!ถ›N"€C๘๓สu ;n@OHญBษ_๛!๋ฝ5™ห˜mฃคฯฃ๛u—nฉ dฎ|hO Œุ]$๋ๆฏ6ด๗JฐZท2่Cฝ๑1ุหม$%-นzๅ๒ีŽฟฝ๖จœ2n!u‚=ฺากฟ…มKซกKวZS2Sาเึ9p‡ส4ฎaงžY๒ ฿JMฐฏœ{Zšt‚ั*;y}/?ิ7๎~vN'›ขๅ2แ฿yธ1Q เf}ืCWH,ŒQ)กู๓ฐT›พข฿ญ฿เ๒<sB๏ธ^=บIงฎ9 ส9ต฿}kขL๚gMไถkึ_โA „๘๖9้PB‚Uฅ]@bkั๎ซ~ wฝ^Wย:๎xฆเ˜+œิ๎ฌ‰…๗ูa่ป]๖ิkBป๎92wึb…sธ๛แค]D””ิ”บิžษkำ5p๐ณฬ7ฟญn7ณฉ9›u}ร๓”Rธซฉฎธน,š.’ญ๓Ž—nEs(‡่"!•ิeก‰ีœgๅฝัP๛’Q"1ึ5ภ>%๊?ฟถ;๙i๔oŸ x…U1ฅ@MHฉ๏็F)&ก ุFb ๆ‹†Vืิ8Yปmง,XทA‡#K์4้šงธYTY๑=ฐา_JำณP๒ึmVpํk CF่5ภ;Z“๘€” Wหwอy_ฌฉ”I๒ย{#;ิฉPNŒั๑ะ.ฌซุฬฝ…แ่’š‹ ต„Yแช‘*ผlท’‚ข-6ะฯ6บO7UัNz– ๎ภ C9Rjลz ฑ^iท ฬ๐"จ^yฬ–Q Gช4kฺ๋hฃื 3~ถ๙(ถ6Iฃ๕ช… €ฬUwlŸ`๓๐bีขyหœ?}[๙…z=ะ$‡็ฺ4ัˆJ1น๚๛๚ซซุ0/=๗ ร3ไƒK&ษ5๓ฮnัฐฃ 3b:“ง?Šv^ฟL,?Lู นRaw#๊็-„h2ๆภภอ๒<ไŠ7;#]7ขรฯ†วั็}#ฏพ๘Ž~๎›ฏผ/H.ํ?ธงtƒื]ฤเั`ธ%eฦชญั†‹Lไฦ+šp3vV5u+'n$‡UP'l๚์9๒ุ}ญ๗8\4oนJญNA่!†พ!nsašzฏงฯฑMV`\ฃ๛.5ƒ–ก)`}'dn-5P€งบธขยฬคžฮ์๏ =X9U`1ชC?iึ$NI`:Xช`8Nะปšm”# ฝอๆฎคฦ๋ ›ตาษ=r„กpvbฑวX…ฉP `TUŽ์ณฬ~Y๏T๙ฑล=[nr,ิ‡{๗pณผŒ8ฅํยคkZฒขวG@…จ๏ลDƒ‡=žฤFศ–eา็‚Ž ผ\K&ื[‹ศ\Q-XZT!Ÿ3MV|ฟ^:…J ค๊๐\œ๖อzฉ-ฉ’ ศ@ูะ฿*ไ‡ั2ˆ9ธฏื6XH ไ๊๑ส\i%fญ? RพHH›( sœ: ‡‚ถาใrCแ9{P/eคh'GŠSห•วด‹œ ‡„มู้ŠชO@X2]„T!3L•แ์๕>Pั–Uืœ๔นzฅ+'?„ b›"/ƒๅ๙๊`##'O.{ถฑ…v่^™+ฎ๖/ปnผ‹น28<ธซ'NฎU5UŠึ}Wหfˆ้?๚฿—๘ฎv๒๘ํำไ™ืŽ“8เdUยมเmyๆ“9เ81H–ษ?Ÿฃ‡q˜ซตr๊#% ซ$OJฏ๖—c‹ม1ŒV…ุฐำ:aไ0ตำบ๊ฆ๓เ]ทD&็3€ถ~ฏษ|Wย„ถกฦ๘ํ‘฿XุŽ$bๅ„น@0C/ จพ`vฬิะภX,˜%b>…8จqณˆฯSขๆSEfnwE‰,\ถZ>x๗ E๔ท๎#Tีก]r;จTˆ‘Zƒตw8ฟ,ซ2J“|1้๊ฤุ(!ฦ˜;X™Aสe ย<พห•DCก5Jำ๛gำ%@a„š'ใฒ‘Lธฉ†Su •œซ๘ฌ‹Eาบซ„š—๕ศเอw+ล:ฅฝUš ๑ ] ”•Œ{ัcWŸ1@ฆ๔hD(าำ.จ_Vฒl฿%_มฦฐ๏Oู@’งแ5™0 ลIiใล iP5‘–็ด^๐ไŽEฦE๒ผฃ•ส\%ๆ†ส†Eล0ร0<ฮ”้;ฏ/—1็ถ—”๔P•(‡ เC๐ฑpY™Wž๘”„‘\’7๊<9o.Y•ฺžธV„a C !ี}„อ่f•่›{Q5\๘p`lะ`lํ”%๑]Dๆส0ำ†๖‡สธKrผ2Xศ;฿ญถฏฑy,ยoฌJ&็>Eพู3ช–-^้ผ๋ก๋ๅlฤŠฟ-NJ-ญr—b".2กQnQ+-k›|3Yไ—หm๔ฉ๏0ฎqญฅฏิ^FQ๒2ุ€ะ8ป2ฐp`.„MhซIฏšส8vkยฆ1;qทคSNŽnk]พd%๒บR~nฆผ†‹„ถก›{šฐข*19%^ƒe’$ฅฤI0Vูภํ˜ThฌN,7N ZสD4Qฐ)พ‡๙$3E้กA8ุtฑคผTรอGศœ/เ๗+ะณ•ท—๚?9^”a!*ูbด5ๆJฟป2`!‘ช๑ญฤอฤฎงtวŒ<ฯo฿Y๖A(รข๖<ฐr7zฯฐ๊i-ๆูA*u#ัะฮํJ•Œ‰ถUFชežgRd8ึ†าFJฝฒ ษโKเตV฿๕แ์ฌ~หดiwวฟ-ฉVG€ŽnษK๒dแ๊9 ๖@”`‘vํ0L|ป๖แH—๐ก\Pฑ฿ฏ๚qƒ|p฿O‘์ฏฬี อำ | cศf ya่อลเT ุPv๘L’‹}Uฤtmอc<>ชGŽๅุฬ๑!‰ึoLฑdeถ6:ƒ |ฟ~ฦกลP็'ร uI†ŒT ฑ›^GฌJ"ร|6@=T๛๚๘8p2n#ƒฅญŠฯด%ฒ}[สำฏ5/,๑๊ฝ8~˜‚IRqฎ‹เ‹พXi™+~+‰’,zคeฅม›p๒+2nฤPƒฅหณฬฮR’ †žฑ‰โจภำ๛๎ซ}โ๋ฏชkŽGDzฅ/mbgฉแฌ˜‡œP"€h>h@_0 ทŒ=๗$น๖K4V฿๔isกœ ๆkU}Jๅพ@ฌแnใๅ`UJƒMŸแDำ •า3ศ‰QA—๏‘่ˆ_ตc๛NY 7๘้?ฮF็๖yี™šํำง;F2ฤq#อฒoตี๚>98๔{`›CขU5%"œ4๋0y๊จŽ๓บ:>ศธ\Zi๔ฬ๋Vb๊ีฝปC)๘ฐ๖ŠาษOตฌ™Y/…จ'J ่-ฆ“Lฏฺฬ™trๅiNุxd/*.+ื ™ ุๆ97พoฏ‡๖๓‡&ํzะ’jQBFฌฤศ๊ฑŒ๛ผ5ล/เข;ชแ=%!1ญke˜+ภะ์ฌฏ ฑ|hปงคDฑบธ2๗ร@Lfj ŒMงNI๗๖ง๛$Mี*๓Aขญุ˜sN’มร๚@2ูNฅVLท™f4๛ค}$OฐLIฅ๘๎ข’R5ฒต๊‚ฃ0m@จ†าaY๏tกฮ8ฉม˜Ylฤั+ฤUK}าฆqท\ทูC0Rqฬ\rHmฐtd!’ษ‹ฟYP[L๎ด๗_๎Whm 8จขท์์tO๏/ซญ0Mย› –Vั#Š๔‡ZฒD:ฮ’ภ…คฑžูช‡y……˜6ฒkรู0ทPข!ฝฺ^P.c;•าซ*Hc-‰[fUคไฦ$ศฒย-๒ฬ%บ'ผ๔'9๕‘A’9dฎ˜๔รO฿.aˆวVq”B†ด์๓i5Z„gYzRŠ๓'ๅ๒sn‡ช0S๎ฝ้G้= Vz๕‰•ฒจ 9ุ๙ย@s๓†น๋๚$4,IึฎZจˆ+$ถJ€๔ิ‰๖๐ฟ uŸ$`˜ ธŒƒูขดิp“ ฤแ1ััƒ!ƒq%อ{ rwbe๗ม;_H>น*ล๊ั;Wฦœv‚”๛Tจ 1ทv–ซฝำฆทโš๕`vcฑ‘งิJฐห@ฬ5ฦd่œN]ณ`ร‰•ร ““Z[ดตฒ๒๎ห2คคp0HQ`จ2sณไHํบใฉNa๛แoM๎ฯป[ะ๗อฬfqซฯป฿๊=nบดฌP' ผขฌa(yแ<^พn= ขUคฌ+Gษ0V”|ฬ_zจ;eM g=dๅ‰ํ…๊.Rษ:‰#Ht;xBิZ„WB•‡6 nํŒอๆ5;j$ป}‚.๊c6rLมUๆ‘าjโนฆFEH|Xจd†ท“_ถฌ—eK7>็ษ9&YŽปปฝ๓X?4ยค๔jพILหศ j! ฒฒ* เZŠz&ั›ณqฟcฮ๙7r-ผ?รมH‘™ฝzƒ๔Pฌ/แฦU!ศV†„P๗>H+‘'@Vฃ1ตฝ—มjY%Xาซ๛ะˆศ\UA"`_ณชภ–‹๐!oH4+3C]ุ๗ืญeฏoOณำp‚ฆq๗ žo4๖ๅฏ&ฎ‘ฮ€m dƒjhณ[pTXพx‡BBr5ค=u‰าšะ ž/q”พ฿<˜>'ลtBๆห"c๎ƒ๋uHค f’ ีˆฉ0‚?๔ไู๏Iฯžน‘18ํ’ผŒg-Qถn.มกส\=๙ย}„ช ข๛v06%๓‰ีpยS@๒ุ\tw+omๅื0NFฺ@ฉฉุH™ฐก9ˆม๐ฬไเnšœนฏฉผ๓:'อ2— –NCM่=w ะbไ `P•C‚๔ะFฯ?ฺถQีC–q็6—๕ขหRฟwXL”šส3๏5็@y๐ุ5๖=JSv•Aุƒh=ป ซฅำI้jeเ<๖ชฝB{ตc1UธฆXfผพXํRZ\ฃxQT‘!ก˜aฌฬุมผ’็ซ…4›ใlztคฤAขี-2N&mอ“ๅS tหšคฅนkuy^Jค}Jฒ๊๋m๏Œ4๛/1๑nๆ†[5เๅษ:&ำUฯโ๓M4JHเh{•งน๏Dฺ` ฮIำ็)šืท@IDATอ"ฃญj}<งmDSืp98ำ๖ศห`~ฐ์(vŽํ!ฮ‰!a๖]ปv้@๒๒ฟŸŽ™YRRK๛ณ๚:W:Ÿค4ํŽจbบ์ฺs”มสํž"ฯ<<[๚ Iแ#’หเeอAฃx/Ÿ}ย๑ƒgfตCส‚ศฒs๚u–ฺฬ–aจฬ—yฐ˜ฏาบ2้ั5WพŸ?A†v'kWฏื›†๗:Snน๛ ‰‹o'?}?P“ค]\„ฌ๐jNงL5คงMเUืœ๏E!บ7sŸJฦ c‰่_[ปตผฟั๒ึไหO๐ิ$ตรไi ํTh“ฆหd|Ÿ–งั๔ŽƒYเs0ฏ้บห}zo๚A๏ู}J…ฆs™ ญข’ ล@็จ0พ\mนๆJTพ[ )†P‰~Y7๙!-‹ณฮy}ใm‰ˆ‘ๅLA{*ฌา๐8~ม๐เ%s่b=๚n|(฿K`ิผ)štmŒ]BืW [ œ็2ึ&HTทฒ]kkํฮhั–ฉ#`&ศhอ ฺ(?ฬ[Eจ[๒ฟsOU(nยFหลๆถ$kAYI๋ a_•โฒ„. ข`วถ ้XฬฑX๏„```๎U[wจั~@bอB้๐;JฏHฦœถ\ก๐$,Hฦ'Lใปฺ">g–ื ™+ืร5 V'ง$๚์U* ำตSGeฎ\ืZฦ_i ~Nฺ=๛v‘ณ.8^qk๐%6๙nา:ผศ(‘a:โยz–Ry๓Kค}ถjไฤำŽ• _ 6ุ Jบm^–ฅ๛Vะ]ปศ'฿ฝVŸuุษณฝ"w๐˜2WPWหถอEŠ F/ล๎6=๓ผัดjฅ j๋สบ*…ฒ }•"ผฃlษXฺฺ-i_'‚ฺ:eฺŒ™rัทศR|๏ ๘cมt‡bPๆชs•kภ6j โอ!”‡ปŠฐ9x๏i(v–pI‰v์๔hฮ*ษ T(" ธGสคฐ~€T[+๖q‹,Š๗’๐h=ัvG™ฆYึ๓|=Iแภฅ#ฉp๘TEุ /gYฐ/ึ พ!‚2๋;หwVส ฤOผ๖ฝงม<`Tแ…๒วี]ว247šxMœiPzฤ๒ ๒—ก้ir้ฑjฒR"ช‹๖TีA…โt00s?ltๆhเv๑G3ษ*iฺ}|–oQGˆ ชๅaWEi%%nฬวจ,ี/่๖คS]ผz๔Sฒ&†นbSjลtš฿ I<‰5๒ๅิผฮl๎="{/ƒu่ลฬต*๒a4,L๘U˜ุŒหvูu็ช ‹\ข๒ูฐฯฤ๏งg[(p“ฮน๐4-ŠN]าไลง็ห_ถช0ฆๆoฅ!$;่็ฎึวชkŒ=QG :อOฉนol›๗YํŠ๊ร(?/๙Dบ๖์(ฐำ ฆขS—le–(นฒผŸว’q|ค€dคXžซๆืศXฑํ่ jอ"่ˆ_๛็ป2ฒxYฏK๏/ฝSaฤๆŠFถดฑสƒ7W๔๊L›ฑIเ Œ‘œๅ5ณฌโๅB4‚ฯ@ๅc+†Y˜Nœ”าฆ†์สx๑&tb>่t“sR%™ะ\zจ;วษW‘^ซ๕ฆ๖`ŒVฌ3๖Da‰ม†นjศ’็๐้ ]Zd˜;†b&“(เคr๓ICไ<ภจ์F๋ภใ๘๓d ˜.2+dฒฑlฤใojd˜ฯ้บO€9 Pื๋vWU๗qฺง’ไ|#‘ฐอ““l.Ÿ@ฆŒ4‹E…_>W๗๖๗ณเ)Z‰P]ไษN„pศ†๎ื๖ะาธupโaธโšัn‹˜cyP๗ำƒ“mฅŽ>๙wืแQบk;ำlL‚mdoุ๐6’™_I6,้ีyh7ณำefฅ:8ฑFุ{ฝ:k บ๕ั*_vf๔B$๕qภšเฏ|ํ<ฺ้ซ#๋;๒ษ๒๖EมUyปไม~Rร์|ฅZŒqฟv๕เพฝ๑5–+'–ŠบJ้1[๒e๙แ_ไ7?ฎGŒทžก7ๅปŸฟ ={ๅJYT€ๅ(Yฟึฟž_2Vd lฒh๙rน็ฆวๅ๋Iำ$+#YFภn#ƒ1‡_%sะV† ŸKํฟšา+ฎ๖๗G|ึ’pแ˜Ž—ญX„$r# J†‰\|QมP ้,”่ศษำH-๔ด2l฿็LJ<ฌหหํ ๖ิงuอคโ™=?‚…vCแํ(˜oฌ‘—w‘PxโQ}ืฌม๋pฒƒ—ำ~w์ฏ–}ตVB|๑๎Jmหd6)๙กZฐ= .โ‡๗“M๒ฬ‚นภ{ˆง€ฤ๑๐2f`kzํฑ|ิ~Iว TJM- ศ ๐๙ใ๒UUKืog ศ{‚œ>ใJชj/F–gธฒอ g17‘มbpล๛#@@ว9์5๖์๓ลผล2ฌSถมฒฎใ๖fบM๚^J+ูฯ˜72า=เˆฒ0 d)uใ๗ฅAHฌ4ช๙‰O B Eฤ:ไแ๎ฺ"yฌCซฒ\าา-๔uด IHjWณjลZ{฿ๅ๑็๎’่ฐHx]็รm๕uhฏ๘ํอ’ถXQ!rๅ็)ƒี>'E^{a‘Œฟฌฃt๋ |ฅfzrP%2m“•ม˜ณO๘•ทk๖[ผS& ‘๊ฝุvัr๖YงสฑฃIสตR€พ๊Œ˜lื.FPพิL^-~QI€7?_,dชไร‰_ช?33จ๗]`‹BCฺkทษSgaต*น0๔ื๑Ÿ**ด#์bด๛‡˜ ีุ†T˜œƒฟYธ฿่=>X ฐทฒ8Qcด๑Q‰ %Udˆ๘L ฅ,œ\ี๐ูีฟษ<3pณJฆ˜๊’ไฦŒm—/+‘ฯP2ษ๑ึบGo๔ไ๏ถ!}:<,^ปQ2฿T*$cP‚8ญ‡_‰2ฉ€ฝ)ษๆg„Ifฑ`tYv, „“ัJณดLำBLžต!i,L4ˆœNˆ–-๛ห‘ฯ๒7!"LN๋ี™ฟญ๚ฃ™ ฮฐถCธฏใว=ีี€|Iนขpฏ"#ต?"OมฬพŠm˜+คbuPฺ๋V‚ RnRœz ๆoู!Dสg}šิi๙ึโฦR๒…”fMšฟฆถฺา๊Zๆg+ถx t |™;Ž๐ห`5ฟภY™l8dฒ…6้@ญ †… bM๗๘อ’™”Š›ฅชzi~ฒฟ;9˜าฃฐ#ๅO8e˜|9๑;๐ฝน\r:Gจฒฮ{ฎถฉRกƒ๖ท•ษ‡อร- @B฿ ทsฅtสอฦWฉดฉgห็80q#ณม&Jฤ๘~ฝz์๕ษ„xจ„ค‹ฤ{Dœจvฯžoอkœ˜7@Pl-ฺ.os‚-2D!ฮ_jปeJีร5ฐP’ุ55ฮT7e’สU‹}–(คำ >ผ I[ŒŒ%h`–‰ี`8Ac,Oฃฑˆจดhงm>˜ำ\ ฮำฉa๎=’#?ฏX+ฉ`\)2u •/๋W๋ี$]…6ข’Kไuย้์vช4y๘>6žไlV8h+ิ\j[ษ๏ัสŽUyoฃC๗Aจณเคmห๗8ด7: rด2ู–฿๋๎QxัUgjV;ๆฆห+ะผfUฑฟlส~็@>ิƒS'oV๛”ศษcŽว„๋ง+ณถสhF[yวU<7bjัh}#๓ฅ‘k@_Vศฤrลุ๖ถV˜@œ0dwย}ๆ์yrึจ+•น๊ ปซ!ตหล *N4$?ํ˜Žเฝ อฉy์t(๙เ$ลvีลน ้q.ั9RkวถX@šน6บ#พr๗,๓zB)า3 Mฮ0Pฌ7"ฤ‚ๆiฺ]2ๆ ฅ_z›ซศX„zƒuี่qท”Zvศ~CFฆjfoญ’๔ ‡๐8ฎถึฒ74ด๙nˆŽ@zใ9Rฉ^๛๘HUา•vL…;Y2"dZษ0‹ซO๏ำUฮ‚dwcqฉ6Z“a+ฬฐ4ด•m0ฉ\Pษ‰ตะำ๗ค๎jฯฑŒL_xํ},Jฒ, +าญ2๋ซŠื_ร๖ ถpHš๊ภ\๙l/ฏข็ŸฦŒ JฎˆฌŽfฬ15ศชe}’˜7ำOซe.<กงหMกvOU5฿ร์fuล› นPฬ^›ษ’™ษEnDg; •^8วRT๖๘Kฦศฅื๖zd4์฿๓฿œฺ"H&1ณ;fสึยี๚ศฌŸ6๋ฦฒำษฎ‰„xž`{t๓ติƒKศ๑0๐dh๕ฮ{Zr,G‹ูrmขX๋Oq ใๆ ตg๕Šชฟzt4oเ™์0pณ[^}ๅ9ถ๗Y2๘_'bโ8†ฑฑ0๚ฅ'k~?'ฤx๑xl๔>ยฝฤโq'2c{`็ม๔]ใน๛e=Fฉ่%ฌช3p"าuรn฿็y๏ -PT‹Š{PิJฌU|<ไ_ฎS†กย›ท“อาษŸu i—NฦV:ธf๊ีu3๕ 1ฟฬ+m๕ึn“รœŠt:!Mยงฅฝl—`˜จŠp`JPˆ…!ๆ(™#”้!K<ฌี€8@ dร8!ฯd–Xพ„(้˜+ืarj๏ฮ2แ•ogชว!>3ฒ๘u์,็ฬพ๊๘Žni‰U€D€„ผ๖~Ž)ธe(6ฎbธ™Hณuฮ]ŒMb๔I™+า้บkuIพ|2kฑ๘กฯโ9๏ƒ๛?ม–|Œ%มa!/ƒu๐บ งLYd<ฺ,๐ฎ๊ดqล๒ี6บผำจ= 8OU๐สโdๆฅ—Wก„ˆGะฯซn:_ 71v†แŠ๏ ลฺใ๕QฐPnXทGพs:€5S๔EDม€ระ3::์๕+๛)‹™๒งฑ๘’eŠ๎๎€บบ๛yๆHๆขล}2~s,”.นWnน๒A ˆ ‘๓‡๗•^ฐี'N˜oz*q…ี‚<ลnศhถฎ์๓MUŠฅfแ๙๚ALGU„•ๅ<$ใF"ภK*้9ืฑ๗g%`•mฐt&Sซ็ดmกœษฐฎ๋‚1oๆฝ;H ษ@ฮ ตล=ใRmจงjทzˆŒDwgi™ไ๎$’rRl`8ฏ\ฟรdอC๏;ขิ”ืศบe[(<ำ™ˆd2๕8ภึเ[DwลๆBYฟร0cdยX0TตRHFดT†—ืOBUš๕=40ŒำIFFKeM†’๕BU]\xจœิ=วyfndฆชŠ*ช‡ฮaชฟ็eM,rOœ๘8O’ธƒดQ๋lษก2ผs{9H/I‹Žะ>I;*ีณNฉบoŠด‘๕`฿๙n†L‡*1Xiˆ†klK_aปฮ๕,oo“ไๅ\-lsV ๘ ฺ[XDTHuQQ‘Žฯ๋!I‰O”าจE฿N๓w|ตaP8e๑าw`Yนœ ‘o'่/;7สyฒฦฐsŒจืฯxๆย`•rฐฑVล๛<์=ฑ฿ ร…ั๏?rŠ|๑๑7’ ‰%G“XŸฬ ูษ<}๘ม2คX๙๘ฏd0Vแ—Bํ‘ ๕‡N dฎะnHl:œ,จ๙xึ"Yนaณœ ฯษXx-q"wฆ7R‚ล>ฅ]V๛าD\;žฏฤ๊™่ิ๚‡wwจ%เขิน†ะ Tc)ƒ€Re๋฿.Š [ua1UWQ#3กNค!ฤŠe{ไฆ;/ืธzŠ9V?sถํ๏oKนใFIU5ิ ๚ื;rึ‰Wห๘Kวชน{ภ่#gๆ‹เ ๋7m’๛n}R.w3”Lrึ1ฝe0ผŽขƒ!ตฤเฏƒฑล\กก;‰žƒ๒G‰ล๊๛Šƒ$+{B5ธOฤ|ฮฒWQI@ฃ๖ร๋lซ~x–„I‹ณxkฮไ๚ž฿โ…Fd%l(i{c$00ฬ‚”‘วŒ;g&wsŸNฐจOืcz’๕ฤ*†ๅก˜0 €ƒมโแibšlฅ••2?H"ผ๑’Nร2%0LํฏตOๆ฿$q๎2๘WกษFzF•Ÿ.(\฿l-.ุhงฦP3i0d'f๑ฐˆ~Nว~ Gร~ˆ=z)คภ1`\i›56N๔”$CE2Œ–๕Lญช#ใ9ปืกŸU๏จจสำ71ส฿๙ožค!p์ฎ๔028@ฅjฌ_Œๆo-Vˆฤ#eP9าภ= h‚‡ฒฯ›ง sลOใsƒaqี๑ไ"๔๛์„)ญฎฑcฦๅฐฝ- n“ผL›ฬ ซ-SJฏบก?ศ ฅgฆiํoฏ="#กšฒผึฌฮ{ผtฐ0kWE็ap(ญ“ฉ_h'kหRวฒจ6dŸก๘ad@vš|>g)ะเ)๏ฉ—f!M~'๛ ๊Hต' r››U •]]ayี๘Pง}vˆรq+.๗)ำ ๒ ]แž25ชทBใ0๏+!ลšฒhฅ†๖กิu฿xๅ—๒[ธ(Š Qฏศ“{t‘ฮษฑ์+Ÿ‡ผ๎yุ๎ภFj“ผL›ฬ”)ฏฃบg’นBฝІ่›ำ1ณ ƒ๖๏ธTฮพ๐Tํ๘สuณๅy้J€ฉkมtฌ ๘?„4YธMSๅฅg~V+'€DญIc€8๐;iโZYฝขJขbŒ aค๔)j9)3ฟร›9ฐr2ƒขFyใc๙#ย๊z<ฌoขแH ๓ฤไ†™†์/ฟ๐o5เ<™D๖Sภ, …{wcNlVsๆณ\กำ›๊•)ำๅ<ภ5t‚Q/W๏๏0KB!กไรธณ›/cปขАv*ด5ั^ฬฤˆ]ๅEhf‘tืeoงw+งƒฺลื…ธ]ga^ฑภYoชพBA3 ๋ยTAƒ)A}ฺธว‘ ษD[ฤ็MYำX๋TK์ผHuฐLขzP_ฌy~วฏ`y…๛ฺ$าๅล฿oะ—T`ฑ@2Fซ^1;๖ ๖n๔า๋ŠพC†iQิmJฅ3!JH ฐ ึpD{8&๋ ‹ๅ_,ณ็(๊ า,ื8ŒghฯEเRav\q\้>ฅบธช&fwu๕SPแuช„˜ vZvฺŠัร“ชมIU๋f ฮwHlศ†Xmš‘์,‰ี‹T%8ž0ฤ!g„Ÿ"|ฤษุ8_['ถ ฒZfศMษ…๖bฌ^ฏB'’™•Rฝ|้*็˜sNb๘•๓Œแ_Žึ*ฟํำแๅ„'S_ุ*Ž7B )’ผ%๒)‚7clั ”ฟ:yBmธyc‰ผ๐คฎจ0*ค*ฌ,ฝth%ภ๒$1 ๒ฤ&หu฿#”ศ^qxŒN>{1/‡–๒แ฿อถ@๏@จ๓ืญ•ปn|\nฟ๎ษฦช๚Rจบ~^M๕gฎyิ๕)ส\ญูV$f†\|L "๛-โNศโxุ€ไฤGk›ขดฤ๚~ๆ–ฦ๔Jฑบ&tฺ“,;๓—wฐะšBู…กจI”`™ฺ3ืซฌ(ฉ ;พ{๐Vฝอ‡|&ฆ„ผ7ศฃ3ซ1่ล–๏˜=พŸ*Hชœ้โPผฝJบœ–)a‰A๐คzฐๅ๏i*&[‰ไใ|ค|GนL๘`5s#ทUK"ิn\L˜ฯXnผj๚ ยโ@eศ~@เZฤ๚ฃ๚iฐ์่$ภW์ทฎ4 “|>wฉผ๓eŠศœ๑:ห˜i2mBก๙-•ํ๘.YŽ ้]““U๎[€Dน€กl0bž ๎w~š'?ๅญ•ฮLM[1ฺ`iฉ"ฆ๘ภฃR,๊๐มฦฌ๑ๅm3ไeฐ๖ญ rมฌธ4 …ร[iใ†-Ziท฿wตฤ„Eม“ม๋1ธoฑฺv†xษnŸ!Oฟt?lชŠ%งSบห4YฑดH ลฒM‚ญXV๏ถJHฝœฐล2 V,:)ฏY๗Z~wณœธ1ดฬค)Sๅ‚17สฤฉoศEŸ%ม๐ฤ$ฺพปšฆตKˆyแ@ํฯ,"ฮŠR0ญทt๕ฆ–์ ฿I/ท —i„#™Cล?h่hท ๊–ผจ้gษ๖ว`aYดนDoศ„a=ัMำ ™๕ƒEๆ8(ฑ Y6๔จๅ"%L™-J ็#Tอ.ฤ๕3ฮŸา,ชึวCjผฆ(ฝต๗๑yํ+HK_tITฒœ`Ke๏™žR/Abšฌ๖[cMfCjEš๐๓\ษธ]Cยฌ๕‰ค๛ ™gถK‚ถ ฏTsnุQŒˆ ฆ9AUxEอ&๙H›#/ƒตw•ฐถ,uภ‹hภััQUeฅ๖ท?ซt้ิAJk†‡^jY hวGGbI(4 ก\6lZญ‰2F!C็ฐƒqใ๑ดo7๊5ภชภ(tฺฌfZskูง-ๆ†SJ4}๖\๙์ร)ฒh72|่0บฉs„%ฒ”Z‘‚์ฒqหfyโฟห๘Sฏ‡สWไŒกฝd&ช!ศ(qได&[>ร๏เdฬมuฮ๊ ๊-x=0~จF˜0cพฦ^9ถ~ส\-ธU$$ฆ2]ฅฅ:a์5Y!}ำoT†ึุ ืฝปƒ–€)ศFฮ,^NกฌSโ6Z€*\ช๕<’%ม๒ฏiqภษุxฤ™๖@˜ฦ"ฤ}žŸ_M กฬ‡ฺ,ส LHิƒว“_VŸนƒ–ร!ภ>ภ€€QNฤัb<่เ`ฅi0„Œ‘`™ฑณ9 3›,kชย-F‰้ด‡d˜A•—oฆŒ$™"JณXู๑ํไxh๗ฬHR น/ž†๐ ด—ใ˜l๕ 3Žg2kฅวฦB๋“๏6ืใยBeD—, ‘ืงฮิ อฌCึ2๋ถพฑ`ัรz&sปา6ฺk=๛ู๗๒&‚Fฏฦ฿X(eวk์๐:‡ึฝฌqฝ—Œ•Œ••ฃ๚ ะ>ชูh3/g+ฎถํ—ิิิœำ)ณf๙โUŽGŸฝSN>๕8ต2}+๕ฌ6S G&#TฑVะฃ0&V๘เตrู9ทI๗ํๅ๙ว็@4E๚NะUj!BใL|‰fชชยW6ฎ_ซว‰C›ตฮ‘ษ๏ฏ้-Tัc‹F์บฺEๆ‰‚พfำzูฃืGžนCB|ƒิ –m๚Hชปษ\1/์Eฟฬ™'ธ๒™;{‰๔ํัQz`uMะPŽ’–!ป{น๓[จึเเ๛ฒ|ต3a(†ะ˜•ทZฮ Rๆ` นฃHWษณVจ๊C'ผL ฦศฝ/Wจ~เคฯ qโเuๆm&žล๋ทึaRฑU˜X„ AjSƒธษRปฏโYถดห!ƒฬB&ตฅx—ฦฃำ ซ๔ุLๆฌช๙ผEฺŽะฬ„o๕ะ/รถ@ฆ‚Tผฎ\‚ำ[ž_์R ‡ิ๚•ห ฆ•K’ชฦฺ๋V5W‡ถำ’รwฑัŽ‘ 3ฐ2z<gBฆ‡ถY ส<๘q4„'P๏|xž'๊ฝ๑รg"&`์@ย”T‚”ขžŒV/a~ƒ๑x&#EI๒ปช=ึ๓_ '๗์คฬ^x๑์jกพญชษ้€(„ไ* Z$ค&ย๛0y"ฃ่G)V‘MšตBืrRCใ0๕ฝ—มjจŽj(ๆ๊Ž จ'ภ\ี{ัiถ‹ฎmฌฐPํ[,G2ฦฬ/a!˜ฉดขB๛i—ไxeš(™J€mYฏœ 5€'าก8HdไุGษ๔อรGX–2ฉmณ๒ศฎZา;–x8พ:จspŸ?8ู๒๛u…E๒ลฅ20+Mรn 6"™PG ๋”กj@ฎขง.]ฅฬWาW ๊N˜H†ั3๓ถบซให‚“5aZ‡ธ@\ียS 8:ร“I`์3’ž0‡มJ€B&o2ช˜v0’R%ด R‰–)gญlฃ›iโไส:b๑ณ-‘"#ภc๋QMดฅ;$Fฉ ๑ฏl†}"ฐ@ฉ}ใฤ7‹<พฟฅ/ู๓„*O‚{uYตฬœฑ]B ‰82ฆข„R%Y้ชF9’“ั!ึแoโvJข†Bั๑ชf<฿ /=;KาณรdอJุ[ื|$‚czvT5้Z๗4บ๕u&่ณIฒ6ืz‘ข3๘ณ5˜jH!|$ี*G‚ 3ษู+ภHO๙แGจ”ykdิ ] …l• ไ๘+ŸVt@ีA\dมฺMŠsurNภต*–9k €มiuาaู/fฮ—-ฅ•๐๔5/ƒแr็ฤฤ˜m”€pRเ$ล?~ิุw-<g-[%๋w—ัถƒ๓œมn\y๙ ฟิฑเ8ธ{้เ% ๅi)ส@•a:น๚Cฝd•;'[ณ.๊ ‡ห฿ิT)ตฑหชฟdฐŒฺg๙„็ฺ4!=ํH๐ๅQp;wึ3ญทใช>Ÿใg“U๋D(*ี)ปWื(Hง*ฏ!๖$‡วoี&†๊u–)U๎ํใข y2‹27œi›uูฑไ้๓ๅ_๐ฺ=’็\˜jX‰%)4๛๛ฎU#|‡a]’IW}ปT€C;gซtŽฑ ็ฐง@Rัๆณš~™&ษๅUhG|D r8ถ–จ๒I/ตฉ—ม2ฝ’๚:a?›–‘žท4ฟj์9':O8u8ช \฿LฺTf2S/ลj+z๒V9i๐านkขฬ™ฑRฮ‘ฎz๙ฑึ!Žaข้x๙@.…Qe _€bฦฐ‰ํ)0‡ํุฝK๕๗สSผˆ•๗7P›4ดb/ต๖‘,'ƒW…‰าว_ถ์ุ&/?๗ถ๙กตšฮฺ[ฒ š ส๖์c๓ฦoใไKรYk6๊Šุ;ค–๓ธ>tULImH,•ฐxjvธj[qชmdึul๗้ƒ( เฤโDBตCqIน†™Œ˜… Žีภ๐q–TW;Pฌ4€y $l“ฑYิฺ๓ฌ๕ž฿ฦฏญNลฆ~Ng็JNเ๊p€โฆ=%*eลEถJL(ฅ ~’Eผฮ๚ขฤŠs,ป;ฅ,–๊ศบฏฅฟฌXถC2ๅ  ,ฐฅVP.}/่$19R[ๅYๆฦ=ฟมฯซPฬฬKภ{Tkวโ ฿อ˜›†ม๐Xง)โปษดRอN#๕ีWM@ฅTKaP7ิ$cQt คฮ฿.^%๏<_œƒํPQ‹Bแ‰ศ๚#๋ษ"๗Nร2ๆฦ๚ŽŒŒุH6๓Z่Œฌggฅะ๐‹๚ัชj<_b ส๔฿†›ฺภ‘—ม‚ฤ๕Pq~HMญํdW#p\๛‡‹$ฬ?DสผqH3egซฤฟพภKy๐ฯทสƒw<-นฒdัx?P…QK˜‡!้เ5ภฆกŸื~ฏd10๔ฤห_ฟNCส|๔ฟ/e๖ช/%6ขlฌJQnlGž(Qsุ1!ข{อž;_๎๛รŸๅ ฌg#๔ล๐”‰วฤJฦŠ๎โ˜xม๎GN$T า={ฦสุbDชšbT64ฤMƒํใย=%ปNƒม‡ญกX<:ฎQลุeT/”กแฬฮ_ฏƒ2ไƒ ดถุvQ’WฐIŠ žZ- ŠรษXk ๗ฐ%z 6‹ุุ\C„uส๛{ฐ3ฌ๕[@2Hฅˆ๏H&‹Dป•`aพdจ๖ฎฐŠซ้N] เฌ8ดฅEJK–บ{๛ีจ ํ_‡Rw๗R*า–B‘โNB Š“@’ดqฬ฿tฝก‰ขžL ญq#ภ๊หlืkœdpฐึ&d$งทึuว1[”Žd๎„v8T$w[น๔นถฝฤ๙Heซ-ศ@ิƒ^๘ e‹v่g`Eปฅ ฎVWฉ๋'9ผๅพjm๖69ญu ิ VuภŠ`v์ั"ฌซ™๋u@ภฝE””ขOฮฐYำงส๗@ช]=ฌGณ™€Q๖pฃ?-.\!๑›i@ฟO]lคTTfัLš๖ย-ธ Ixป>dCปS`ฑ‚ฬŠน฿Gถoษญ„Wqวˆเ0•4ƒซฦoฉ4ค,…Z6<0DžštŸ ์2VJŠŠล?ุ[rwไซ++ฮฐ๖๊,ณvืm6ๆ•ใภl€y๙ู๗tk็}“฿yBบ๗่ˆ‘ฬXicD•?Cข†ภฝP ฿V๗4Qมถ่๔…ว ๘ฏูŸBDC|ผ ?:dfํ‘‰“YWb^–‘ฃL=j•TyภwฏSฅ๘OJบฬ‡ำอมžท‰j‚วฑฝ€ณ[.Yโ่ฤzๆIุH<>ŒืoฐTUFใ˜ืณฑ=ˆํOl$๒E๓Y=ัผซS ่€ฉกŸvƒ„ƒ๊!Šๅึ4Q‹ไตฅe ข(ูd<ชฅHๆจษ๚g{1@แ2วtkถ#} ฎ;k>๘๎ุ9G9ซูB\๏pqvsK)ิƒตคญu}อกžcั&%ฤ>๒หd๎์ฝmใ๖bI๑…“ป๖|กฏงsี๊&…f\MK ฤ…!]ใ"U‚ฬNU๛ํ&ศขใOฺ4’จ&4ฦMณึ๔tอŽฯk=ขyL;:ฮ8fฝ`เาI06oโ ~78zๅunไ {‹K๔ศ‘(ณf“t*,Sz5$>!f9W฿v๏U^ƒถ@=@ูd๋8A™b-ฉ.•ŽX-๖ู4x๘q“xa’Œ'แU_\ะ๙HY`‚ฝztมzIN0,W„นYท1Užz๐e๙๎ห_ๅขหGหฝ(-cใคeFj์ถปOขV&฿}๛ซ\yA"ƒฑJฐ3์8จขzบน์šD EfI"pไ=d๊ฆeร!ก—ด ิ˜ฃซiผ *ร฿Wฎ… #O\œช๖–[(4#ธ"@:›ฌาP๋ซ!ŒฦAI฿FFฬ๓Ÿ`๛ งษœdA\กDžภ{ฬ‰—qถy_ืฐ‚ข•๎ฮีeจtzกVชl E–˜F๋aZ$V๏'(cเะL ฮ™๗0ฝ๚"ๆ/+Os+,โฑ 0ฐŒทถั๚zวแากƒั(w)Gxœ9ไ`u๚ฤJi2\ห…ฦfŸ9\u=ฯtYๆฑ2;;&ศฦํpี€2ฆฤ™Šาฉ๓zv„ช0X ™ึ‹‚,ไ‹๗’ยมงฒŒ*แŸ1ฏp1๋œ็X๖๔œOPวsฌWgดึฑ๑ฯˆiฅƒw˜’j๖]NŠlŠNU€ลšาแๅ๋~[^ž๚๏จผไ๊s=š Opeg3ฤฦี2โ์!๒ฦ็Oษ๕MDุศฅฌ…๗ntJRZJ: ใB๗๓น“‘h0๎ ”N>๓/3๔jฬง'฿/ใฏ9 ‹1`sฅŒซห`ŸDอ *มํ๒ฯ›ใาแ'mฆ|ฑ(…QŠIํ๔ฅ๋ี(๋Šฤบ*€m$ษฏš็๕ฬ;๘๘‚ฉ๛7๋“€–๕M^O๗ฮบ`ฦ๚(๋ล$๒†Wยช_ ๔\nƒ]ฆSQ‚e2ใ>พ>ถๆ์ฤt ‚ช›๊sฒั(แฯ>4Žๆ†3@œj7D6W ~ฯ&ะา{ะพxT$ึC้„#”ฮ๓ฏ?ฌพฏf"@h็nํไ๓พ—`ฌ,ป‰›ลษ ฦ๑”ไจไธI•Y๗๎๐oต13]๎ธ๖Q™<น๖Kv‚ฎัุ6๊ซฝK๙ขใP#ฮแว๏|ซพท๘์9};KD๛%˜…ๆ!์E‹`ฌ๘ƒJศ0fFe@~)• 13m<ธ:ˆc1๋ฅTƒ,ฏ๓˜*ร๐N๘ฒช@|Bว„ลอภ6G!๒/N้3šษfJ€ K+ ช$ัŸ‰ฬ”T™,’ํ˜ํ„Lฅธ‡ํภ<ึ‡ฬต)QŠaฌ,ณ๒Y๓z]"l฿[กŠDo)ฺS!ํGท฿/จ๑ธPฤ/`๊Q0p฿ตญPzi…๑ชlุ4๕E\?–€‡9^4T>L— ‡v‘ห:หลbฃŸ–ฎึบกวu–๓อzด`๕7‰v–DึkzาYซŠ6]X๔ด@ฃvs๑ยธnaŒAš prลtุ€ ‘พร$๚ฬูA…lฉ„ถะzพึๆ'๏ฉฐุTz๎๘zxขลาิŠAง๗qdํเ1FVซ~@๓l$0ลTHE1/ๆnRRV*ed,*จs)C#rฦŒ฿พŒฑGฉ Ÿฅ ร›ฐ12อฦ๎d–EฺY~YฅUฅ.o๖ฌผ๒๛๒ยฤทคC็$™ ตกVจpวxจ—ผีsyS.'~3ษ ฯ—ญ^ƒx}ษRๆŠสหฏ‹€ทžค™Lฏ1gะ|งซณ,Gพ๎ฟ๙)๙๛ฏ% Kดฯ„Ÿชpo/ฤ ฃv11Aพชส!P"ฑำั.ƒƒ(๏ก!+…r5!8ฅVหA56[K7gซ“G #FKช‚aฬmŒฤฦ‚o`Q"bดz3‘ฺ_ษม– ฬP๊Žฺwร1G^A kถํ'O„gย;B“ฐศyะLเOC“-ว๛ใ]“lูโ๚š|HใGมม.ะ๎์ ิั™ฉ•ฆ t้ฏฎKl”ฬXถN.9ญ‹ฎฆใQม#š-J”ฟSŸ็ฏชVbต๙Œ?&ฤtRส‰U1&T\ฌ@ืt0Zใžฯ่gใ๏ใ%$ นTศฮธ]ฯา๎TXิ‘)[น?•j8Eซ‚M‹!ฝBอX‘Gช(๊ˆ๙ฯ‘ฮฌุ๒ .}Sถฺegn‘LDฯH฿"ำพqคคjฎmษศฏ>สจ๙อƒ‘็Ÿ)ƒ‡๕RiM‹ฤ๑๕๔F>แˆเูœฤ|—ผ๋C6ป3บ™ฌ๊&gฌ1ฅw&ศrqr–‹วV๐y๛ตศฦ ้๊ฒ€n พ๕M0ดWMpa~fSZWlKT๒๋r™ืk }๕๓2์ฬ€™Pก5บฝ2ˆ\้ิเ๖ใทฟ‘;ฏ\๓u์ญบ@ฝG•`& \ํ†บRk_ข‚)ฮ~iWมะ5 šูf๘ท๎ขด,ฤ6จi{;9Z +*œสูDวv6rpJญุ่๗ ุ๘ัL6YฤฌA7ด ร–3,ธญ'Q๕๛[ถaถ J•4…๎4ฎด๓:๏ญbvvมŠ๊Aห*้yE[๑„zฐ‹š๙Fื—8/Hoท—ศ'ฏึำน›E๚t๕ลDรมฐ_4onไฟไร์ทqร๚จ๚ฐ91ขd‹ื8 ฆชŸD๗ ฬ๏แช„eฬgจ%"o [Jฦiซษ•›pฆสษ๋ @ำฌฝc›เ๓{KJ๕๘ษี†หฺ7ฺะ๑ฉฐ๘ญtkrEhXHห‚ฝ›+ว‘"ธ6;f€ยอ0%ŒณV‚`บ)))ฒ๔฿ี๒หL~P๕†$ด‘ฤvํล74Bผ|ฤหคุ K’ูุ์ก“fบๅล…’ฟ†น;$[–,š๙ป๘ี/บ1ัQ[gœO:vm#J u‘s5f@lจ'‡ šP„หA,ฐ=บไ:„อ๑๓๑…ไNP๎KNUฺUJ~=ๅ็ฟ?†ณอwก6@‚รd์ืษ๕ท]*W฿rฑ$ตhฉ@ิ สrโ๓lXwlO•N~๕ูดšะ23}!=ปwัM๔ ษศธัฉ้–]ๅืฟ”ง~E’ีqัแ’€•>; ŠฮฆPZ‚s๙ผน๚ˆR+J่ฑ}2=ฐ3ภ2๋€๗;า๎‚้oอ+€{†lณgั๏ฦณ๊ส|‹:ฅ่<ถoญ๕ล,ตฒFS๙๎%๎;‹ชผ˜_zแ?ฑ=˜{`แ„AP#ก}PŠˆ Š@IDATBeŒค๛žb๏ๅ‚‰}*Dœ8Ž.อ๔ษ˜&ฒz์‚L$ผCPƒฏไWasธgpsฬuEุ %&๊ม๔2H{} ~…ฒมo?!ฤฒก4‘`ˆ๊}ๆบหaฎhงEŠƒCTgตญำRีs‡ฺฑ^ Žiๅ๎้ขถvnกพž๚žJุผีๆำxฝ฿ลvฒป HฯTWmม)‚,’๕.ใ‡ญ์โm%w๕—\ล๙z=งขLูnิุำ’1;/;๊เe8\=Eส›'ซ–ฏWP๕๚K้9๎๚ฝDbw•Šr๒;W6]ส ฅร้cคฮ๋๒ถm•ธN`์n 05โ€`ซฃ ƒPYIฑดvฎไๅdHฮš%ฒlๆo๒ภ7า…—‘กg๖‘๎ฝ:JDษ”ขั'Ucถ $4ใGกัVอ+gส่wŒN}แ๏dŸ„QU๘Mฃ๖ฐ น฿" ญใๅถซึิ฿˜ฑ|๐ึ—2้ญวฅž†!ฒ5 j‚+ๆ๏ฝืพ{nž(mเOO:whงnC๘Q้:ฤดทrฝีฺ”Tน๕สe์/Z@-; ,~ช|ิทUญ๐›ฬŸj2M'๚ญแาnฺY1ิ ฯ›’-ถrž…Pฏส วก\ˆp7ฐต‚:าdJšc‡)ึ ๓4ฤต8hฆ&QŠJ*ซซK!าfข็ศK๗'o:าีหเ†MTํA–๗๓)Cเ๐‡S!๎Ÿ๖Q~แe ณaท(AuŠยย 45X๏ฃ‰‹ˆยJAxf๘kฬบฃ฿-gxŒ/ญ€ฝี‡฿ึHOGศ›ฎมส๘ึdoƒ๕ctืฅใœ€p๕gฃเ0”ƒg7็ฆšฐฦEส^ฃMฏอฺ*฿.\ฅ๕A์{ส,(~๒กyุฎมถN/็šม•ต0š๊X๊ัูปก]จZอ๗"J1Hhyƒziว๋<ฯH€]ั?8๐ึ•ฬ4่…<%ซc‘xz•ด/๎ฎเ ทzyๆ;ใ•แn/%U’›ว๙šส๐VZ^์CษŒ ฐGA9`2›@ฤี›tl?ช๗@ชEข“Q,‚ ฃร.ฒ่–jP>รwlJย|–I‘ฯv๑ฝๅf[ฑcษeญ&rุwž  l'3QŸGpๅiฮp‡ƒ์๏ม*ด`ฟ๕ฏt8)@ำ™โy‹ไ™'_“กษ)ธ๊;b”$vUp5h 2โ๖‡เญJ>ผ๏I[ฒ@n|ํsisฺ`ู™ฑIfพ7E|‚Bฅu๏ ชธŽฺaะยx๓หฟ[,Hญ†๚ฯ๖Hัํ:KทสะŸ‘Qท= ๚๔แwษุ"๕๏tฎผ๔๏HZz†Jฐช‘฿าˆศ$~7ำ‘ภUํ{y\W^žjƒ๕oสOr็ืšทˆทฏ‡y‡๙ืd๙<ฅ[*แฒo๑Ÿข2ย’rY•ฝMsŒ0ฤŠ!ฃk ๖tำDoฌว ฎ˜ไ{ฏ})๏9•\=๓สบ,†สณ1ฅฒผนฑฝoห)'L–๗฿JฺwH”๐˜ƒx‚ป`ฬžฝ{ฏ$†๑#lภ#%tJ tJอตฅVDIœภไ ๔ศว๐™Uๆ๋โXYษ@IE%yฯ&lw`๛‰็ ฑซl5แะฑฑฺพ:–฿RตŸฒ?Fๆศษ‹8ๆภMพฆ0ฺW›ŠpI=yณํ)ฟ (ซึก:ย9>Jวท3Š‚j้8ฆฅx…ภ"Šึ็ว‘๎^ฉ—˜4กBVบƒ_mถ†ว ‰Eมt•‹๖หอฤั2Y๛:2ผvj ฤฬบR8Aฉ7‰*ฦฤฐ@k}/๎5lํ๒j์ฝ์!Q+ดˆM;ๅทฌหdิ‘PณLร ฌNด(ฦฑ?์*ู)=„a;’‡ƒปlA๐ฺ๎|Vฆ~ณt์™ŒN)Kg,[ ฮt้๕าถ฿)€=ึ7“6}ฉิ*0:ซทห๏>๏ภ`xู โโๆŽŽBร๕ ุ`ๅ‹8ี™ะุŒ>=๒Za!~„ฝ–‡‡ฤv์ฆ๖Zฑ๚ษส๏฿”ดดmr๕…“ซoพX๎y๘‰€ ๎M<š`คฮ๙h€Y๖\๒Kซ\ษ<ึ…ด3"ญ’j0‚NํชmึyŸ%ำงฮ”‰พฌษบCฬ=๗ฯu1fจŒฝไlตำ ๕ R๐M—๕]N&ธขืใ/>A๎ƒjw๐}ๅษ๏UpE{ฒรƒิฅ Ž๕ณฝำฉ้บิ๒ ิชฟ–•Qzว๓ฆzxๅฤฌP ํ ›)๒Xรน=ร”หๆ†งo,L2j๔๏ไ'กE\‘—PาMะฆ๎–yอดํ2Œ’๒๐&›ธt2,~uw ฿\EดHˆ)OKIwข4ฃG๏Njฏsเฮe๐ดyกข-†ฺๆE™‡Ywวไฎ"๎2๗‡o%ก{o้5๚bq‡šn๑ฯSe€ิ…ผ qป๋`ผaมIY4Wฺ๗&แ‰m5=ถฬผํ[ไŸo>–่ถค]ฟกช๒3:Ž6’บ540“จฝฌ ิcIฝAบ– 1 ็ศŠO’w^๙LVฏ(<~ฃภ๖Lรœะฉ*๛รฉG๋–กบ?ลžvn ]ฆภฑGฮ˜ป๋Œ฿l#_[Vปคึา๊rฅ#ไ฿k‡๗dSj†>:m๊ แ6pXoน๚๓ฅwฟn ๑'ไœ”ึ ]ุ^|็ั~+€ฤรงุ]๐'ีH/yแ‡%>2Zี‚)นb“i9แ฿฿,”3๛\ชŸ pี jพxฮ€1;Wถฆฺํถ๘จJ]›*wˆำoR• Rเฐศ๏ ำภ(_nxถสษฎjoน‚+(@ไnl†XุP ฒฟ6 =62ฺLวTsณงg)ด ;;ฦ๗ึslKTห UEฤsผ็L"ััฌ‡ B›ใ"๐]=ˆv:7uณ๚ฟbz-`ี+ฺx๓‚๖Pค๎|จ‡ฝ์Y,ำ~ไ‚Y \™" งัํ‰.ๆx๙ฮ๑ๆŸeฮR ฤŠ}:Fk“น€%ฎ‡PnbGF…”8ฺ!rภhเNi%yัแlทXlไ็๙e\wฉDTdltฒ,ชฉb่†9ƒšWdtˆ%-5ƒถXr๗รืื„Sฉ .Lฐ”2ง™rแู7๒viั:AV,\ขว#o}@mŸถงงสด7ž—ฎƒฯ’Aผแ&`T‰หfLร}ีา็K`์ท` E{๗H*ืœo>‘กใฏ“–z)ฐั ^c็^;! ภ@€–X„tqพ๘B}๙วsห‚9‹dฤ€E๒ฟ‡ฎ‡ส์ iืถ•๖rHR๚Ÿชฝ๑4`฿ณqCz &(9รgน™@‹u-1WGส้=าบุX Gเลญ฿เžฺVz๖ํข~ จNฆ๏-ึฏ™ๆ!ฟใ0'๙$X{ษ๔i3\6(Y^๛pขD‡G\•4ชZะ”คq๛๊๋้rีwi†มyh๘ท๒ม,•*ASto2X2:žพท@ใRยEฐE?9lP_ดูผsทตbdๅรgคV…–JGจ‰ฝฦvถ4l$๊šU‚Z'็ํร—_ๆไไค„%%ี๐"KPNฉ')QJแ‰็œ8ช ๗RRŠฎ4ณอั๖สhyu+;ๆฅ†ฺ แ๛ชใ˜ €{#๖`า<–งุ่*ณ3๘:ูหาm%’นถPย"ekศXู†ˆ<ฯT็๙@ฺd๒3fQฅŽ๘๋ V*e:†|ณ: ฆ่†‚Rn•|๑Aฃ65}ญtcยฦ฿ไY๖ ฮ) M ถ้ใA๋กm9™– ฎ"Q7?ขRผƒฝห๓๒๖*ธ๚๘๛—ฅ]›V‡ ฦ$V—สณซNูW Y#y’ถ>Uzsฑtx&|2ษ๏oฟ@โ ็๙ˆ„`Uม ิช?S•!%YŽp"ZZ/้ซ—ส‚ฉŸย>ช‡\ไหbมR ๆAcxƒc˜9จฟฟ๚]๘65ค˜h•|šL๚PVฯš&๓ง}ฃถdด'cะไs/:S‚|ิฯห 6เฌฟ[Jy\๑ญ›ล๊Tดv=[J‡พ‹้pcG-ญ.ำใˆน์Š๓h๓ืbyŽ4็ฬZ 1๐ฤฟ๎ธUฦ`Uœ—Œ4@tJห๒2ำ<๔๗Ufพมษ3fอ‘‹GฌŸํAุฅDYมYnในR+(฿รb ๚‚ธJFดŒ•ฐฃโ๒๘อ้Dj กงd{4Tง#Fzf@ธO0Z2G~*ฺeะYเ?)้าlุžz99TXฅVL๎Alx™๙+ทf:9K€อ†‹#๘ืหีนš ˆ†า้h_{ฎ6ฃ-ั†‡’ะbHŒ่ œฝมjDขo๚Vu"ฅ#\ฦ๏Jฯเฃ ตยฯฃ ‚dŒ—ƒ|&โ฿Iฯ‚=Tปqrƒzโ%‚ผ†"f—โฺ8wH|๐oล๒]๚ชRงJจโ5ˆ5๛‘%A๚Hร๏ุฟQฮœXQยศ8„(r-๖}ช Iˆ0bHฐ๔โQ๓EM้ธ/B๑xรqiํ๏%ฏแo๒Y0Zmฌ/๒›‚ฝ,บทศฤ†ส!้9ใะถ๖' ภโwp&Š˜Š๑๐rณ๘x๛:o‘)_LM†Ÿ5่ ทœ91 CะS๕ฃ๗พจFi Yำ*นŸœ ;+w?Y=g†ฌœ7[†ม๖ŠNBะ˜hkตb๖ฯ ฒ^zฎคMิึ๋e๕_ฟฉ4iุตwJpL ญqฎิN‹†าะd:1ฅ;ˆภจ้}ม• ๐ทvฦทฒj!$Y7<.ณ~'MผUฺช4‹K_ •๒‰ ํ@t™jจ<0]3m†~แqH` œ{๎pกD้๗้ษ pรAjำ>AWrล!ํณ†,]!แ‰ˆ UZ”jีNS<`G&ไ ง%หWส9CฎึซฟอTฺดJ”โชU?๐Hƒ$ธrลJม=E๙๊ิ๔ฌ^ŒHˆ‘AฐฏŠ…A;R6โb,…๚zี ~Tlภ BฦฃWvGI[ ฅZTl@ฌMƒ%y;;V•ขp ,•N๘ ู€\‡ํSl$๖Uฃข๕g๓๎$-๔ะFt๖เๆโlGiี’M9ฒ ซา ย๙}%>(‹$œT๊ไ…vZ๎ก๊%‚-พŒํ ๒ˆ๐ˆ๗PBFฃํซSฤ9vŸ่ฯ‘ก วjว LœJ=ศ>C๕ ;‚ฐฌฌJRWšFฎz๊U5P็*>: ดlกฤv่*‘ญฺkg,“+;า7สžํ[ฅ€3$02บ|0–ก1ฐ๑(Ÿa…๘คŠฒ2qD์พ{ย>Q‚[.๓?|่g้๖ฬ”@๙๚kHi–™็ฦ( Sbgฌ ๔๓“K.#ˆMฉ่ฆ1<๓๒๕'ำuใ๑ฝ(#ฯ&๑ ัPix แมPSW.\ฟ๐ 5ณ.5U๚w>หิ™๏ภ_W๗IZc}30ธ!/9;ถษฃ{^พ๘xšD!ถๅ ณวจMLฺ๖0VwPOํ 9“ค+ํญย ตb fb\dx”ร-ฤ็ำ^•!ƒOSpลขlŒ๒ีษ&žฒ)'S๎ธ๚a™๑๋\i7 ฝ`๓ฃUŠ๋้jพw๘›า)Jฆธ|เŠKจ้Xิดรเต20ดํ{dlญฒ Jhk%E•ซิŠŸ๗!ถ[ฐัตYj…B8ลˆ€rb า-Kแ‡Uj๕ศ3wส%Wั…$†หPโnL|๕_FK6‡:สIW1œQ–ae].\lƒ”4@~ั?หd๚wณ$ ‹ฒŸล|ฃโ"%ถK‰AเํaHVั๎ู™6บถ฿dJ`SเWq"€'9Tcชsัฃส`Œด๊บgŽข\ฐ๚ัNv็WJf@' 2JmฌY5ฮ่= Ž๕ย‰”?‰ฒตถp`ม˜˜[h,ไฃtศทQ—‡อถcu๘ฮ#o๒‚ฉ%Sœฐขžทกœัฑš#๐<‰†ืษ—HŽเณ–สjำสฤzอึv'ภชย 9iLdtจ%;s›~ำง?พ"g„ฎ #\5PFร@}9รŠn>ผฒMพP๋ƒŽฎ`„ีฆฃ„ทLRฏ้n^0xฤเc)7ิ{ดŸขQ;ี~4bท@ฤIQ2WMy้€I;+=ถร^‹ฏะห9š๊๙ฑำ๏g,D‚ฝปcฅaผฤฮSVพ;E}ฎn{่นโ๚ฑjD7&(mˆ<3mRc€Œcษฟ ดจ:ไqท.ฅS—6rีjฬษ?gฬG@้๗5ฉ๐ศ`„ถ™ชใYN@,ฤถP๙ัั)ฟŽD J ี๎+_z๋Q๊จoดฦ๘nณŒiตr:™pำ2Fษ“คGˆฟมน_fะiXฆฮฅา”P™เŠ p= 6XAH&X†ซ)ฉ>dจ›R2TrลิชถV0ฺ;า(ƒโบ_ฐ‘šม•QงสžŒŽun4๔ชช‰ฑ’–’ฎ p๊Œwdเoฺkฉ?dVษพแ ้ ษปท‡NI|TดHg ฌร๛KaQ‘<‰@โ\œ๒หณuUnึฦLšQI*ƒ‰B& lฯฎช>$ะLP–รีƒXๅ ฉูฟxc6G่ธžกุƒxO4ฉz้‡`่ํY"sff‹ พฑ ๆEฬฏญY%ดUฃด1Hญl[๙6mฒRถๆiv5฿ฌb น&งด”มQ๔่ๆ็ฅRผญ{๒•2qkณ€d.  eUYณ‚็l–š2ภโ ‰ฒีqจ 'B|ชฎxฮnฺ_ศ€~ฝaะNฟO ๅมศjผ$ฒชผ๏ฟ]๎ผ๖gิyŸ้S $Uโ Pๅ€๛+ฮi7e0ฃวูรุ๎ Sธ‚มF -€@ŒŒมษฅ`๗Nแ รw„ฺ9ม ‹๙TIzˆjCุ–u6Z"“:ชข_~x๐|yvส}าฝ{'€Sชฟ่ฆ ~9~๕0O๘‹ใฦFE}ฯoežJ0y,แรB!๋ฉ+0้ดtสs๏kB‰~รืฟฉt๖’ฏœนญะฟฟs.๙้๛ูบJq๒; %^œ Rไ]฿ๅyจฏชWv.2oมb9ฝืลz[‚+Hค|กVกม1 ฺpฐEpๅ FทเŠ’ซvQ!ฐ‘ ธขD‹นX0๖๑ce-Bภู_mญุ@ฎVcปl์p|คY%ˆB8EศdW!ะ&|๊้้=xใ†อU}๔p๘ฟ)คcป60E€t@ตบ9Šฒ— ๑%= eV›ุwผเ๗ฯืร[Z`1]ซ\sห8๙{๖ByเŽg$+%]ึ\‹'รพ2Rญ0ดmดqŽิA Z,a8ศf ู๎Œ รๅ ซ–cแ”เ๕1X=่้ iK:ผ(ฌ ลฐ’*OCrSS zDํศหh.@;(ฎ๔4๙ sG๓Rฌฟง5Ž ห๖๐๙6ฏ๒๛่๒โนJด Uข ล‡UฬผNWTA:ยก(~*ัฅˆฃฯฌื“6พkช‹ฬ›๒ยHlฏปรG~^;N๋Cpลฅ๏DลฮKล–ู๑๔ผ?ห็’ผpr ๓—!ืLˆึํลƒซŒ จ ŸขO6”Cฉ๖ฎๆ @€ภsดฯฺ‘‘&้+–ภึiืtจใโ–ั"฿๔๔–ฦู!ŸF^ๅะกq โ7๎:ต;[9๕Yบpน ้qกPฝzัeฃ ฦฦ‹.๕้“๏฿นkท๘a–išƒสดqJใoa^ธ‘™ะ้)‰Rส:Kงnmีvd6ิlw฿๔ค^ซ๊ืqŒ,I๛Uโใ€ภ~m7cV(ิ!—]5V9ยAƒŠ>];ฮYฎฮp 1๛ฯy2rเ๚’3 ฮ์#uj&sฬ„ฺ…Rฌeฎ ฎhฐฦdจTา•ถK7gห์ี5-Hญ$b-CZมs๏cปึฝซƒัสqิL'{ p,a{ ]œะ*v"bถฦยUQ5$พvˆT`ืตm{ษฏ,D„๔†รไ!‰}o฿…๙๛$ํนา˜ฤ ๐]:ด“Ž’dไyรd€ึ;/*ห–ฌ‘…[re!๎‰ƒT+ถMํ#C •๎lHHtก?ฎท่ฎ๏dฺ eฮผ’ุ)Bฉฤ็S๙ถว0ซจ„Fh฿T€Uซ ๔มฑC๏ฅ้p9x%M\al€?d๙3Uv^žžj#ฅ(๙f}Z0NbUพŸgด2‚ฌ\^ฺYQีHรyWผหsF;ัzฏคOQฉLmœว4U€eVใ%(hฏเ K๚ฆlงO~˜"๛๕ิีY.Z[„@บฯž#฿๙‹|!G4š[$ฆS/๑ ืU€ดฃาšFE›ƒซy๏Ak๗~^Dงd๏ฌ๐J[บ@ถฆฎ“ฐ„$„อyTฝนณaธศ(Jท‘Oเ‘าฌR5‚O๊5@WBสG“eำšต‹n–‚ะB่ใษ ิ_ึ๒9žฌrq TVณ\B0ƒ้ีฝซึ—f*ถFตฟ—1)ีข-@B<๖q’ ?YO"์ฮ๏?อัฌฟ๖ยึ|/˜ล‚•^๛ฅโยYฎ6&ธขz’ฅ๙ใฟหธQ4ƒัณฃด…„Œ’3E†ฏกฝA”‚+€จ\๘˜ูใโถ!สPฉเ$…๗ฑjคํฬืe๓ˆ…!ตโt’R+"ะ๑ุพฤFช=ะgš๗'{ Pฏล๖@๏Ÿ/ถjำโบ ฦข‡Šฯ~|ลกw๎v^ฐ‹*ฌ.Rpu<ผะ|ึK_n่—)—Œ—ำฯ๊ต:กZ๒3๏สๆ”tูŒŒํP่ ฅด๋๒spFคำน โ…0Yป˜๑ฎxGJซ๙ต”"๘๙jcแ Aฮž5Yhเฌ่ท}ภ })%Xพจ@ไ %uุ3 )ๅf=yคD1\S"^Vjaผ!cๅ"กส3u|ซI}วŽ—๘๖ล'4R}ฑิญภJ+ฃฎ-šญฤF฿ฉป$๖่ซพฐ(้าธVฆ7ูๅ@‰G์ฬยZด’แท?/๕฿ศš๏>—ง~Eๆฯ].OOพGฺต6|ˆ้,ฯ๚อu๙v๖‰$ธA˜๘ภหา๚–โ้Žเมช ๊’x>ร๚ๅฦNOO๘<ฆJ๐o^าU‡๔ฬฮธ•๑p{p ย9bๆํๅ…/ฅ#ฮ๚Wณ๊Sี–mฟ†ฦ๐q…ฯE๔‚คPx‡ˆข)ล๓‘ฐง Cฃ„ŠŒ36)Yp.ฺjAฮ๑šT…ๅซ๙+๔u4d‡ญE” \ฅcฃ๎q>6cชนOЁSอt’—€ฉ&ธŠ ๓สฯ฿ฏ๛๚5iUˆšQ=แษ[[ทlฉฆฆoป๚.ณ_’/•Yี๓A2tp?uฝr๕MษŸ3ศ-W>(K`ณEๅv0ยdํ ฆ,I$ฎwธ8[รี841e๘V•Hฏฑz35ูฑญPพ๛œZuPิ„ฝผtง ฆแฒbผ๏˜๖F&ศ Tลชฯฺ๒Aฺi’๕&วโขeLgฒ.TQ2Nขz*ม8+5ฦœ3‰น บฆ ฬ [ธ›arŒszม๖vM`™อo;‹`Fsๅ ฺ๙o:์_ฎฦMx\B[w{Lา (%(UฐวญภtโP*ภบVำrruSi฿ูW5ใ#uอห}ŽLŠDภ้w }ฦ^ฎแvx้q5~๏•4W่ป‰๎ส ฐ0UP๕=ผŸ๏"@Iˆ‹ƒบญ|๕ษ4น๑ฺหคสพเล๖$}~c CGฃ*ด/ปvฌd#ๆKOฝญv ๔HแธQ๊[Œ.!(ตๅ@๖ส๓ศ“L–ธึqาซชaะฮ|ožซ+น*ะˆ+ˆฅัศq๕#ฺทfผAฬ๎IดฯZบ9*มTต“pqฐง!;มฅคฑ}็xอฆ™๒ืL๕Wต%•็ว'ฦL”’บ}kฎๅฑg๏tผโ† ํ<}U๒อW^%X?2๛%S+็ฺE฿‡ย‘าฉk๙ SจฺO˜.๖›ฑ`รว^b{า ฤ–kN๔D๎ศa ้b ย‚๛+๔3;I[“+9›ห$บ…3Vรศ3c๐ไ๚อลL Y pฅ1>mฐL‰หš^๘9Q#ัตฯa` ƒ?๒Kp™๎พž^5‹2พ‡ซ˜i3MฦcƒŒ๗ำ*cหๅ/ฐฃk”bู<5`“j”o_Mวcฮฎnฌี๊RTœ‡ธ ผทหฬŸิ ๘ลด‚แ:ฃ5+ฎคcญOpฅ/ยކ๎“า๘]gC8n:„#฿•p–J{ฃ6}ษˆว&IซvI๚ ง๗‡ฐ*ำ๔˜B‚ฌบvDฃฏ,๛wตคงe^5\๗โ ฦฒwG4‚๔์lนi๒ฦไOคm๛D93&เ ณaฐพชห$๘ฒขˆšŒ‹฿ศo แ>™ขY๒&)$ <{ ฿VXษทbˆ}ŽSVj<ธ5ำฉQfณ-ื"jลŽ-y-Sืoถ<๙โvฏผคC๛ค$U ฒMs_0F^ศอ์dแฅš!ฏœIพข-<ท#ึ&คW๛uˆศ2งุโกhq@ ThํŠ๚&?„้!ล๙"‡ศ'ฎูํฎ๐%ฬ' ฮฤf pฺ 2๙ั‘ˆWY ดท*ย*AHฌLข๚,๓,Jลhพ`๒X‚0W7ฌฯ2I›ฆ†ณอ}ฏZ† ฟtŽีฯ^Tm๏ท  ภจX์Yๆญ8l า ฦมT=fฎ^.ณ?z]๒wํะp96ุgŽ^ Yhเดหr‡oฐฃ/’d€,W<๙โฤทไฎ๋Ÿฌญ[5 zณ#=แ}wpก ภ“3ฬฦม๛^๗#~ซ“Œ4ฑ้wฃ‰ั๛ดโ๗1ฆ!‰RTFเ@S—ฒ9R๎˜ž๊)5}“\๑=๒3ผฮ๗๎ฺVN…IถKƒฉ๖@8Tฅ•ษ‘ุ&นqๆHณเ~แืๅ๋แ}ถ„๖vUEฦ*Arม,lรฐMฑๆ‰ช›้ิ(6ถึ9ล๏a•เK›ำฒp\๑๑w“nบใ ;_D‰(†kถw[๊วF_u”ํปvส›˜€vAณ0 RŸณ๖„Coจว ฏฬฝ†ฮEนุฎ ^2Oํ:V‰ :HrหUาG^ƒfF฿X?;:%Ez+2†X“ปโ(ถ ณ<„Bขฺ‘ใ2มฆ จXWผ‡า*๓ุL   )Pซจ๏ุhžท๕ฟM`q&EfO9ๅ™PW„ฮb"ณ’ฒ๊Žƒฯ๏€`ุEฑ!กโq๕x‰มเj๏ฮm๒/ษฒำคำณีy) ๊ฒo๖ผฒสลลี]zŸ7^ €u8๚ฎ/ฎผเฒ>- aXตcิEeศฅLน๋่h฿yฌื †ํ$)ฉ›d๕ฺ Zd}รศ ๋z`ล‰ฃss๓Tํษg๊‹ฬด@z๑๒2๚ดKd฿KคGงึาฦ๋ดŸ›‰./tˆS๕ํjzLุVUดทzํ๗yฒ˜ฑ0‹,ƒ#8Dชg‡แ :[lณฌฟyพYj…B8Eศฎบ…E.Jl\0T"‚Aๅโฟ8Ž}†NŽ,Fืงฏผ๚,_Dฒ“5ซRdลาตื‰ํFฤKtืตฝjhถรCห!ทร !‚๚พZตaz@U9"ํ1)2ํxฟm“‘C†ั"๚๘(Q๘#fOtํ€ˆZฦJe” yี†Nไ’L‚ ŒjG39ฆ]XB%ซุaวqŸv $žณi2F›ฮโ3Gfฯ€ำซ IูฐซศโƒใJฤtpq๗0ยpฺะเ1<คทDธุ๖]ixox|ืVb6•ฯDƒผ€ ซ‚ƒ7ฃ‡Ÿฟ,|๎AY๐๗bน|์2้ญฅkทjิอ<ฬk่ ’ฉ˜(UŒQ่ @ากMRำCJแb!1 ๑ฺfค็HˆrZ?Jญ่Vƒ@n‚แ}œ–B_xgO†1ปŒdืมwY<|„BšF• YPํบ Cฃ1;g‰k3sไ‡ลk4 ธ`จฮ5อฐ?‘žมvŸํ“`X69J€cƒ!ฆนนeซุ็7nHwู*ปสŸž|ŸำลWœc็๏ๅWใ“$[#m๋Xณท4_ฃ-0Yะp๗3L\แ‡ชB๛SCๆ›hN%ฦ \้Ts=|_-˜ณE\ฃEฺ8DJ0œr5:jCfฅ~าFYฎฅP๋‘(ง* ่hน็็qลฒ?์ข  ”! šภ‘ฬ็)ฃซ˜šธฦw*Uklห=ฦ฿^ฏ๘๏eฦ6Ljg็เ่Sฒc+ํƒเAฝฆRxฆ‰%U‘1๐ƒušJณLร๚ZIๆกก“ฆ๊“R:Tm__ง=๔ขพr๕ฒี2ค๛…๒ิ#S$+'Gํฉ90$:zpํฬ๖ฤBฅ˜มนะจํ8<Ÿ‰?œwŽ;\ฟ๚gธ กฺฤ1)อg๋Z,{‚+}šำฬp5ฌ{;้ฏ๋žnฎบJ0†์4 %ธ"ร2y6ู๓ฐO%˜Zฎ ฌ„ sฒฒ ทลf‚ซf• ใ#‚l‚+Ž@%8…เ ว–o๙๚[ว๙ยl ซู&kxc3คํe1ธฝ=ๅ3‰ ˜iใ)‚Tjาะeฃ‰WJ“หชจผRfฯศึW—โZ[ฌ๊u†ฝ#ฅ8&ภh่|O๚ฬ#ํจHฆ‹†cๅo\1่ ?dด๛$Obzบ𐉁xŽ๓ฐŒม๐d*ZฃˆษHู‘,ๅๅืฑ›\โ{าถw?ฝ๘ใฏหี'ฯ_คqํS  เX;žY‰ถ—e@™ ห4๎ิ๓Ÿžฮrึ่A๚S?Yv!ึWGO9ฐ า๘๏ฏ’ ฯพCŸฝŒู๊Aบย€๖ Tšฦจ†ทๅ}%ษษมU‚ฑํ™ fิฅ•UQ”X|ญถ™ึ฿lฤอ*Aย)Bl&lฤโƒรjถๅeP V\q•Kำ~u6ดฟชน@…*Aภ+›,๖7๖‚ภ_~๘C๓˜ฑ#[z_ูEผBก_mม’aAฦม๑X˜xห๚uy๒ึsหT&ขฺฮ5แE^E8ถYŽตห…‡นไ*dำ‹;Wš“ธฺ๗tฌc‡่s^˜˜šฯ(jยอ&๏ข$“C:A&o%ขึฮ|DยaEsะั(ษ|๘eƒ{ๆฟ)“ึ7>€3ฎ3œแ;dŸจ๑ซY์สA–๊4=Y‰_fฦUฐ{‡8ภy๋ฐ๋๎–ทNœ_๐๗ฟrF๏q๒๖kŸI.ธมNˆ+์hHz<ร–ส’ฎŠ‹Kไใwพg„z8ิ'า&€˜Xษw<ซ ฎQ|ฯ'|+t•!‘ม2๎ด.า ~ฎ่…ฦฑต\ํๆวrG“Tถq๛.ysๆ|ษ…˜.ชw—Y,`XPsฑ]‰ํld^ฆs์f:EJ€c,JฎฦDล„/-ฬ/้ป~อFหฃฯeิค{ใใmn•เแ๊†ํžฑ8Sึoยฺ๊%wIฑษF฿ิ/m@6อค)s๑DฉFมชาซRœ˜==C๓!"‰แบZฮp›bœถ้=ส”R8zb_๓’$็8}TbyP๒E'ฃ\1H2ฯqe!<๑ห‚*B•p!]cโŠ@ะ{‹๔™Šjฤ[ฺงบ>†7๋c'lวNี”ษฬVถฬฯษไทุFฦ‚Uุ|ู7ษrgฉฒ“•c๕หgO#+fN—ึฝ๚หˆg”˜„–๚M๗ 2mณf9W7ธpฐช›:ะโฌส ๖—_wพ$$ลฑ4๖€’โๆๆ"‘0\%ๅ!L]ษWŽ˜ตUษG๏~-t‘ApuAL„ฤร CS€7ํฎฬwฑžXึ”2p‚ผ*k›|๒ทฑLฮCซเ‚ทำ‘Sฦ๖>6๒ผf• แ#‚lL฿–ะ:ฌŒ->ลEฅๅ๏}๕‚ำMw]f๏ๆโf“ซUOฺ๎1))†๔j๊็ฟ่-wศˆง๛ŠŒ7VF๏?!9Tว{މฦํแ๐}ๅ‡าuvฒ—ด๕y๒ƒ ล‹‘žำผเงŽqู๋š _ไw(ๅ‘heเryศ ŸใD.่‚I(๑ป)ฅ‹(}'eน๐=œ,–ใ][Š ญ ’ w“ดy2Šอg๔04 yˆ‹O€|V&ผภAœ+!Vl"up˜OณอำœQP๊)W=๓บlOO“/ปK|Cยไ‡&I‹ฏ†7}‘ฟfฬ•Qฏ„ฯฌ—eษŠ•RB›ฅ&.ัาoุกไjโค{ๅด= เ*Qณย^<<ฅ]ง$ญภ๔MYะฑ ฟ0Q\‘ู|๒๎T฿~D`@i‹hiี‚LหเQVNUซน๐sD•`Aq™ฬ\™"฿-Zeบ`จ€๓P๖{ntฝะrldIอ*AฃN•=Aถ1ZŠ|ฆ}โคTH}Z$ฦTภqจ๓๙cGจชอX%H์m๛ฤล \ฒ~ฏ^˜ˆ‰_D2m‘ึCขuภึั์ฒ ๔9fฏŒฃ๏+„ลแ$็ฏ_3๕mกqโ่#ู„็์-Paž ะ}gธZเkใ=มc่††ฟŽ\ *‰xbด-#ึฉQB|ึไd ฆp“qGจ;(13š']ศ |า๘6๙—q‹ดอ3ู”ษœu๕q€ก จ:ฌekq`”nุร์๘š๒'ฺ^hภำ;ฝไฟแn‰nืE&]>R ๓vkPํฑฯผ!]Ÿกg่–ฮ•4QfฮšฃถBtŽษ๐2d&ถฯTŒ:PCTpไZฑJ”+งjวO$ƒะู\$๘มุD๕ีวUชดa+Cฟา‡r๛5H|ซX9Dใ น"WแlะX๔งฏฉูฑ,)^'ัึ—-”…PUz:9T—WUYเ‚มROเ–[ฑqฝ8ฯ‘‹ู<รB›ฉ~J€#"๋fBmƒรg%&ล_ฒvUJๅE—ฎœ๚๛;Žํ“ZKIUฉถq[\%xจb ?qBLฮข=0lToษศษ’๓ฆ ๏0ฌ์n๘qKฏM(]๚พr„๔jวึbHำึk~ถ๏(‚]{ๅg฿ฟ๑j ดํุJผ|Wะl€˜D๐ึภุ`bดO:๖9ร1ฏd~\1hๆWฌ;,บณ๐„ณO’๖ ๆ อzหะiซ+B฿์)+ฯcแภหฯฝ/ั-ฃePdจDด‘ูะ1(E้พn๛•E๊”Zรืฬ"๘ต๚kญ1ู๓rrฌ*ฐTฐฏPZ‘‰ภ๊l์!,S‚รf:J€ณQ6`ึ๛่˜ธˆฯถmแถcKn๙cฯๅxออ{นzฺ*ม๋‰}šผƒšฯNฆ};C^นต—ฤAัFHŽ วๅ|ล1fงขzฐ%คW๐ัภŽทlัYฝ4n\์ๅกJ~๘ใ}).*–gyMf,\)3pฯˆ>ฅŒ}ิฎ‰!ญ๐$๙แ1ฝต1nโไา0T็มใhชๅ~ฤืณ^์ฑข’%มUƒฦ B~y–3ฦำศฝ’+๙dJXyศืฺ)่2L4ฆฐkสหlw=}ห๖์โภn๏du.Š jฆ†-๔ำ‘+;Oปรฤ/$\>zเfษXฝL๚]tฅดะŠ…tซํQ’พtžฬŸ6U^}}ฮ9XFž;X๚ ่.p3@ƒxj฿๗[d-_ฯuI†@/์LทTv&ำๆ฿›ำ‘W.ซฦ Iจ •/‚)'ฌ๊๓๓๗ึย‡‹!@ hกo\%{๛VyโI๒)‰–I๑20!ำ!*2H๚ถŒ•H€: 6์,mh‘ “RสDbˆB(๗caัœRBงi‘Ž๙ำ]/ม9R L,ศ˜๐:ๅ™ด™ฐ8ƒ?š ™™n*๙ญO–?)ฬูSฅ•1m;ฉƒOŠ"5๘ท7๏๋ณฎุ้R'ฒu{น๙อฏต์_พjŒฌ๛็O„ฃp‡๓ีาk์UrฯH฿ั็kว๙๕วYrใeคcฬPybยห๒ำ/ณ$=+ ’๘@AŒCnŽpHb}rใ{ L ง๓ฮx=yn๊ฏฒdูสZ~ป˜๖มฤWา๗ิผ|™๘ภd)*„_+ต?๘^žแŠมr๘ˆูน=Wo ม;฿w,เสอมU6eeส๕—ซเชผณ๘dะfx2hฺช —aูพ•!แ-์๔%ณ%7_พsก‚+ze฿S†•Uๆ$Š~ญ่ ‹เŠ’,އh\hฆ“ฒX๏WnฎnฮŸดiŸ@pUีณo—๒ึเ<๊‘ภAพ@IDATฌำซูฎlู+๛แj…}›’ใ๒By๗ี/๔ถฬuปeศฝ%ฎgจTย”9 .๚8ฯพz‰‚j0ซiภฝiใ^Y๒ฯ.I๊htล‘cOGt”ขสb ‘ko‡˜ก3ๅ‰๎–ฌ์๒๙Ÿส,L6ๅ๎U›%Jฅ™ฎษฟ๊#Ÿ5 “Qp•_~Iฉ>๎aฌฺgฮj’#ฟ ภ่jฯะษ*มาฒย9:%PใsL›’ฤถˆH๓pัPฤุแี|1๙i฿K฿6น7™ฏMf๎(™2 ธ›ตBซฝ‚0›sV_35F ฐ#จธผT<แๅ}ศ•ทH‹.ษ๒อM.รFJทณฮ…ึX8aํ#Qa๑=๚หึ”5’>'I!“๏mlศฉฏr๗นาตG;iำ!Qย น๑๒๐ิ5ลTห1vกบ฿จ๙0พ่Œ5งsภ|ฒญ0ผ ˜ร€.ce๚_Jฟ~=ฅาฒ3จ)Vซ\๖ยu๊ฟ(ด๗3าaz‡"JทสเีŸt,แC(น๒ภฬ;5cณ\wัฒถแฝ/ŒูW0}Wžฌส&bร%NDนฌู$ๆ€Lm%ภื๗2M๒rrจ‚Wv๖จ%ุฦc๛‰๊!๊ฏๆฉPl&ไ๕ฌ๗่ธ๐Ÿ0ˆu]ป*ีrรใํoป๗*็,R)ช*F“6BW5ตBaฟฆT๚งŸgษ๛o|)1-๖},าํขึะ-Aา€ลีฯ M|ๅ;‘X^`Yฏ k๒๔ตkVdH;˜Ft่่ภPgš‘/bฃขไๆ;/ื`๑oฟฉผๆWฒO๕้’$mawsJจ6ิ๑๕ิ๐_ฃูฎู‘`ัY(ษี‰์e"๏#?r„aD›Qz~งŠฐ‹ะธ‘*Auห€ฤฉB,)‡‘;ีŽ๘Gbz4y0จšห˜น6,k™4ฬณ{ุู;žY^€•c`ำcญ—†yksช‡-ศJ`ฬ ?d˜}ุูUIB๗พrห;฿สฺฟgฉmึ ๑ืK›พƒล;0Xข’:HX‹Vา๗tสฺ,9k—JฦŸ฿JN™๒์ป5๏8s๔P้7ฐ›t๏ีQBย‚ฤj2O7ฬpˆุinฮ เKA;ๅัƒ0>Xพ๑ถœ2yแ‡ๅาซฯƒํ หก2<d1UGุ ภ"ร^M&8`รคช‚ฦด$ ˜Šษ,๔Dญ~๎u‡ไjรๆMrํwษb€คdHฎNƒQzg_ผ)[6l)รฺ'"ฤ„f”ฮZ›98ี‘Yป๖(ธข‚ณƒ]eฅ’DšŠํvlYุŒBkV ข(N)2ต๛ทl๗๑ฦ ›ฃply๕รงฯฝhธป“›ฺ[หdภKŽาmJผำsฒๅฉ‡^ึ,fภิ๐š{FŠg7’j/†ภEผะำ‚่’ #?ฟ\ž•๔ร•+Œ)œ$†ฬr#`*ซ*SพำกMyj๒2๊ำๅW>—i฿อ”yxr "6ด‚ f€ป๋~@‹ฯ7 แป๘.jv๎60U„ต‰ื)ฅ"€โ1%์F๎๘|ๅโํ~ธสญFํz?๊ั’w7ุ`ฑLxŽfนyyœQฺƒ๓mฤ_.ส!ีNฦ8cƒ๛Kศ3x˜,‘ipKpr๗ˆ)ฯฯ“เ๘D{ฏ._g]4Rฃ;LๆN™ำ์5่LUXZป๔ืคค0_cาeWqzx๛J๗ณฯ j/sพxOf๔†:%mั9Y}|ล/4B|‚CubgรๅรFูž";ึอ—ิ”l๙ๅ๛บๅ้._yฆJทข!ษ‰ยๆ๋็-...0(wWผฯsํ\์คLโ1;o9>d€@5"ฝŽ’อ0 ฟ๋kผิvYe๚Œ๙,ํยB‚ๆ›฿าีhตำ4ำๆ_๖z^s4Vตยฅ›) หj๐ ˜jมU๋ืห%g(iS1รํO›4ุ_ฬ^ฝQ๒`ะ~6fฑXK+C”n0+ชDฎฆ/Xฎ-ัฮRRQeชžยซฑพŽดปฉษA๓A“.๒@ณa›Mหหใ1Aต"}4•ซZทxgš4^ณ๘ว๛N๔าษIYดทโGฎœฐ๘$ฟคเjŠฌCŸกเvภmํ$ฒc0L๐๙ต๚ฟ>ิ@;Vศ^L˜บธ#4Ž! ฬษ,”ŸพIƒ‹IK้; ‡ฺ^1„–=$5$“ฯ/•T—€ง9หAคSทถr๎ธณ`ฟ๕พC๘?p๏ ญD๐=ฺ)ี–hŽ้ ๊eg•ธA๒”นทXกpjyฎi†8&๒taณซEึVZ ำ‰๐j“๘$ฝดซใQC๎‰cมใ"Ž(ฯ-ฅผ•c;‘*ฅXLQฯแฏMSSXญ์แwอ:4&ฮูฦq่pอิH%@ฦ…๒v€t'๊ฟพHธyœ พ&u>๊…@วผt1ๆ๎'${*๙๕ฝ)๒็ืสฐKฏ—่ถลีรR! @วภจXฉH๎'นฃฅฮญฒ;s“d-Ÿ'๋–ญภืฌD4ฟฎERขDภ๑f‡N‰า*ตค๖ Š ว^˜%น;1 †\กHfคฬฌชD:bฦธxใ/าญๅ™สศๆญ^:ถm+ฅF๓K1ตฤแฑQ‘ะฑง็อ|๊ฏB0ƒ-’ H่œtO€ิ เŠนkRRคWาHM‚A›Oƒxถบb$V๎าทu,)Vเ€™v ฬ7 ฐ๓ะ…•T:U•U*ธJล๏‹ฐ-แP3ธ2สแdุs`9š$’๗ดU—ำ21๖iจว๎#ธบไ๊1๗?v‹SLxฺ8ิุศฌฝ>ี„v์Ÿบะ0‘jAฎŒ’Hฉ๎V,วทแฒ3จ!๗h$ี`สฒยโt๗ฑ๘€4า™฿งฅk‰ๆไภ,ม ’๘@ ^‚พƒ šฺไ7Zพ>rนgI_๘๛sฦ?2้้wd6$ณ๑่ฐไ๖’เ็#hฑจบc]"‰$, ‚ PหW•๑J๒-ช๓hge/ใ<ณฤ฿4Vา'ูDุ5U"tฮQZU3‰ิปŒikŠ_๋$kg‹‡M`้ฌ :ุมะW…'$9:AšAiJร62[ฌฦ˜'6Am@xดœyใ=าซ ฟ|๒™๕มซ2๚ฮG4fก+์จXOŒ_xๅำฏIฮ†52๋“71S๙HฟA"“ฤ ฬง๚x2CJภร"%บM'I์5H’๓vIa๎.ู™ž*ฅy;d๏ึtYฟjคญKัmœššิซฒt๎š$=๛v†ณฯVบBัรัCg๊t ส/ฅI\“BR ีชEผlุ>G&างh™>็C้{Ze„ตAV์ฟ ๕กมฬwึKตŸfv^žz๚Pm‘เ‹’ซี6ศตO๏ะตญ๔๗๗8d}๙—น2ฟ;ว…ƒ1=afGฦหผPN†ณtsŽL_บถjมJ'{;Gk<ม‘ุตุถcc฿f?18"šฉษ–G&ึฅYŸœA„bC ๙ะP…ณ{&\%Dฦ„ฟด1%,WOz๛ฑส1ž้่๋้ฃญt@o๐มฬิ7ฑ/psตs‘้?ฯ”๏|VZ$ฤHZj†\๗่h๑ ๕hTี ฟ๊มŽ^๖โ`W ะ“ฑน@žy`กDวDKfFฆ<๙โ-j๊p$ 8ำ1ฃ-๊o๊ Fยฑqฒฬ™ต@ฎน๘๙}แ*๙ีIFvmืโEƒsด’†vํPjตฟ๒ƒ-mจXlr4Lง-ํฌิ ฮณั๒*’gq•sิ Œ˜๖ค,8X.ผh”๔๎฿]f6Wnน๒!๙ฎขŸฝ๋ฌชศฺ็•๔HH$„ซ๔*ŠEeuU์}ํซซฎk[ื๎ชปฎkwีีuีตขข‚`ค๗Bzzฏ๗y7 1Hศ{แN2๗w๋ฬ™™3gNื{ œงF„J(ƒ7๖ฯeo๎›๛>g`YSˆ‡ฒธKฤQT~'qE‰ข=์4qG๎ 2๕›…฿<ว…&]7๐&พ‹ว|ŽDE†Hฌเnโ$ฯ›อo๚"eม๓\งหTบk‰-W$|0Yรบม=จปUๅ.ฝฦž `oY๕ร y๋ž›dเษgIฯฑ'ILR; 4?pฌ%ฃpฉzKฮบ•ฒaแY๛ำ๗’gtฒ๚H`p(87๏ โ:ŽƒŽ฿ 8ัŠ๒ิโ(ฌํ=+ีrx‘ญไๆdJๅณ—ฯ—]฿+ูธโ฿ิ,Žy่ฉkdฤุA’šž"แเชqีUQGBศ:แ๘ๆ\ฏ๑/:๋&‰ž๙/=rHฝNึ@“fูU@4{`๙g’๒ห๔ฺ)บ”ตU.=๓F=7~p†มทVv~‘ผ6ใ'นฤ ๚ผยฝLŠl€xhสœ3Wm”ล›ณซ"~Œ†K_เถ฿!oเH\€จhHู_…q3gซ-งเ๘zwp่€่6V`Q]๑3r“ิ฿๓๛,<ฅบ]ZrP\|ฌไ—PœไงŸ=G9%L2้๋-ธฑp<ๆ-XถLฆœ…q„ช์ฺR “ž%ํ&ภ฿ฆkv? eๅlฯF๊ไ„ุวuF<ธd_;%gkญœsฉ&šะFัŽ>ฐฺ…8L)‰meส%gหภก}ไฃwฟ”‡๏yV23ทIwแ €1P |ไั`มโ|๋Cฟvƒ"๖tรD]บ๐# ‰5B่J €โปฉฯ{๊‰๚๗๑ฦ-ฦX%‘eฮRzฯxžร•ๆy‚ื ธ๑ฅไkกฌว~"๔ฏ€`Š๋่)„ถ‚{หv=EŒUศD%๖!“ฮ—ฮƒGสา้Ÿหซฟฟ\€ะ๊s)๐ž"pบ#n(oงHjf7ญ“ตsฟฑ5[-คf@G+TลzTš็ฏ็ ๒>$3€9๎Hฌ‘p‹…9DกวU<้"ูฑq์Xท\ฒg}*ู˜\(d:๕ฌeุศ~rโฉฃ$=ฝฝžซr€3NVLXคะ2๛๛rฺจ‹eฮชOคG—ฮ*Rฑ>ฐ KEไ @&ตฬB!ฉวเ๖—…นr๗อส 8w โj0ฌืํุ#™ฝTฎŸ0\•@ฉ‹ ศ ฯ๓}ไ\ๅภื;ณๆืใภจ|\ู ™๑™8–96ฌ๑มsv๒MXฤ๗๙ž๏๔๓ัyQึ–ฺ๊าbŽGBz'G"ฤ๋A!uมะื ฌ3–ผ"ป37ษžm9ฮœy_ป๖่หย<แ๔1เะฦชH—n$ ฑ-™#ƒรัiฐ8ช3ฺ้็-VŽ:๑๔“ู?-๑วQีะคำ&ฝฯ์ฺn๎๕•ๆ๗ ยiปด…฿+ nŒื์y์พy$นนY ๛`pฃธ k=๛;kต -โ‡๎;I็?uP<๖ฮ๋ษ๓ฯผ)+ฐ;„WXRv8ธY„:'^r˜จยภDลtžS E˜ำๅBtยL+™ซz‹UงบYB /$W8+ฅ?พ›xŽ„›ี~ฤๅป ดฏปx/าRด่9๔๎/Xuไd7L๖A`9bSฺ๋$k+ธ{GgSB Eก๋๒ob’ฺหศs/UBkูท_สหท\*ส-}ฦŸ ]ซd@์LI#ถM๏$ปwส๎ญ›eล,„บ€K๊dนภนยlTม†\ EIz”7 มUƒC$qี๗ˆญ&I6ฤ‘\ึฎX#Ÿพฅๆ;o|Xž๘็=X]žย&J•~i“–œ"ฏ๗)7`ฒ‹๓มภ“Š๏pL]ฌ0U7pฅ~V^I)?cํN์6๐ุ—ณ/%–—ํCไsc@D”ณชธะง–Ž๖ษยค ๚+:’พฤ(‚FƒXแt"ด•๖=๚JFฟมฒmj๙๔ูGฐ๚–hpŸธง‡l0ธHไH)— ƒฌ8?Oฏ๙Cพ9คฯs oภ*ฒbmดp Žฌ๖%ฅื%๒’3:‹ณ›‰๐Y.ํำ’ตัGMปฤDIJm+ฯ<๚Šฒ๖๛‚ฤwˆš+ƒ0|t•”–ส๓น0ษ˜†สจ1pdŠฟฐย| ๑๛‚ธ/›vๅษ{?-—[O ฤุศRลˆxึ#`๓›฿-$Wส+|\ฝˆใs‘)ƒ$Gฤ1a'฿‡๑z,๒๔ ˜„Ž•E๙Uภqlc็ —฿่:แา฿9ฑ"$*๚Œฐ รdk-Sฑะ@Ÿฏท—}T๛~dŒ$v๎'ฟ Bฦ$=G/)ปJlB[ษ_ปJ.Y-๏พ๑‰๏™—ซด๘Hก<Ÿฏฦ๛ุฉฟ{>~ฤwฌ—ฟ `ˆFฏ๘ํ๏!v1g„L~eค๔š˜๏ณG—ธbฅ }XƒยสมชC{Wห฿Z ๋V"tUn‘๖งซ•ƒU๋PZแฐภ“ Wถ ๑Qu]5ธ?n้’ัAN>gœ$‚+>œ"/‚ฎ&uJรมm"๑โ‘ๆ™9’…?ะ„bัยข ›$ฏดRFtI‡ฃQพำ่V”–C].Iเœ)Žl๒nžรฟแ่{Ggฃไฬท๎ฟAฏ๕„’๕ฝUู๋เษ’ํญะ๋๚.q!ษižทy}๒5–P,Wจ3GeNhืฤ%@ท ๎ถก/ฺDภัE,ถSR—’Gฃ&œ!›–ฬ“ๅ3ฟ„r`ฃW…’B‹ขฟ „ฺ ฺ•0Š€๗oภภ๏ฑ๚บ1ผช‘ะ ร*=ฑS7Iล๗2–.-๐&แป_hžฟnชt’*ซF@wœ~๖x™5}ŽZ(‘P๊ฝิ”๏Aโ :Xษๅ!l S๔j็ำOฟ’{n{B2บw€‡๖8ษby–@ทœ:RS9™ฉ ฑไ\ญ††ธธฺํp@7ิ!%ี5ทใ•๋‹”Ÿ=?ํCภ"ฎN@=>ML (ฮูJฅำnธ –ถu|P‘†VG1ฝwแ`/B‰“_ญ๚ts‚฿sQz_BFWO=ฦM”ฬe๓dใ์iฒvๅZนญ:ห}]'ฃว‘จเH5๒ ŽV!w๔กฬqๅRฆ๙ฯหผ—Hคฤหษฏ๔’ฎ'ฆBO|#PG“sE(๖Œ—‰–cXบJpA<ธสํŸฝทQ2:งศ๚5pฉU๊Œ•ีีฤ‡Žlsfช 7ต/ศซo˜"ร ื‡^”๗ ฃี#5IF๗ศˆ฿ดOž{๕•ล_ <ผqw‘คว†ขกyp  †ˆ`cุXมฝแีžy แ„–eฏ~หบเ:฿Kจฌผ `[ŽKF ญั;ผƒฺ— $ช~ƒ<า?,ผฆฒจะี๗ฤ‰สํฐ8%พTกcฆฌD@diฺ ก$€ :‡ข<Š ™จCb! r ˜๙›ซqN.›ฌwqไ๊๊žขD”!"ฤ„คMงพโ,-ู›ทHF—4้7ฐ'tรฐ* "0 VŒมB†`:ถฟฎ !ETฐw"bpกŒe ่พ๘d†l„ษ8ู๖n8›8๖Rฝy2|]ภy่ฐนˆ—zM}\๑อ$ฎฝคๆษช5 หคหp๚ศผL@ำaงึ‹ธข;…ร’ฺ๙ƒธช๎{ย้~grcฆ ๊"r4๔ktฮFฺ-ร3x๛ปแlU+—‹ฟa .Xดฤงw–v}†I2tนj ทƒ๋ฒFCBenูc๘งk…>ชใ<}_ฃฯ้CŽ5ๆ‡ฟ|วรื]G|2Hฦ=ุM๚žฉลˆ+ึ›ƒฐ 0้์ฮ!tŸ‚snง|๙๑&๙๊ำ-’ปงP&œ6F.พ๊๑งn๊qคเg๕รถƒ๋่“†HGเดื๕กดv‹ดลย.ๅ 5Ÿใ‚†uู_"aX๑เ|pล2ฺ&HFBŒ.ู๏ุลJ€๓(Tัก๖“fˆ๛ธˆD‡ิ$•โฉำล๐_ส)ฉฤ\/ฦตoVmโ%ฺSฎมk_ซฟลG~ฦjนŠฑeฌIฅ'๕\ช0Q1QมำN  QD€ndรัUศTRงU uง8๘ฌd! &กี๘šuฯ๏‰˜๑m์kภฃ…bd|‚ธฃ’๔5/]ษสูฆJด-ฌz๗๋.'A9๘๏ฟ*›แiAZY—ๆ’Afขี ำฆ๕™ˆณ๘ฒ_‡•เnœซAx…ƒธjเ\aาCŸ&axWAฐป็[ชj๋ฎย๎UdkAd^ฎoต7>โ`โ5"ฑฟล&๚egV๕9T7๕i(BŽUc~ไไ๎ซ6 ํ๗ฆ๏[Ÿใฉ“" 6บB้ใœAฒ`๑2น๐ฬ๔ำC/๏(}ฯ้ค"ม–เ\Y๕'$เOTาแนcŸฤUqQฅ๔=ฝฆPMX” ,Š๑šาภzรaX2๊’†‡†ส…Sฮ’นซ?‘๓/9S™>Wf•‚uฐคY?Z%21”Zแ˜๘Šœฆ=Eฅj้}๋ฝ7|‡•xฬ็ด|I”ส๋–Sำ@ๅ–ีญ๖[ธช: ˜ช JpฮGง’๏# ูฤ•ดีmด'9๎์‹Lฝวฃ@ท,›9MM๛X~x๏ ๙ !ฆ๕ฉ์ุด^v!„TEIฑ ฎฎO4๕ส7f์qQฯ‘]"B[ ;๗rs฿_ฅ๛ ม๚Šห~๓{น๛ึวeŽโ ึ๑j-"๖ƒฟCq8f48yๅนทeๆ—s$อี^FึSยใ้ฅ๘พ…ˆ+ึ†s [ซ3|_a้รมษ•าฺๅ{x$ซ—o‘!pฅ{๎ณ/z้จm่)k ๏ ่ณ^~ๅoๅ?S)™๙%๒ pูผฌะ7ญฉW,G๑1yœ~ชฮจŠ๚๐๗ล „จ๊ะเูฝนื Ÿ5šก๑˜:&ๅ๓<ก/ใ ผธsG๕S&+๔—๐•อพษG๏ซณอนkซธ~U’;BjkEๆ}E?vKD$L7 ›กP๗+ฮ’ษw=ข~ฏœ 'd"„•ง9๘m:3%yฤ‰ฒ|๖๗2ํำo0v 2H @ัๅ`คแ^แ'~/ัฐrDุ†fหN$R‰ ฆ j2ิซซฌพGฺ"คEZ\ดZใ๐>#คฮีว๓WTƒหๅ.จฌ.‚>9Wำ9F‰กญqhงึ'V๙ปล?4ขn๋ส%2๛–•฿ณW5—ฮ๘ผ7#& ‚ซ“ไ.ัฑNๆ์hfช๚{๖@฿ƒnFัฝฦ(i=๛K›v้าถ๋—ฒ์๕สหฯ[–-^'๗r!ฎ’ธโ‡ซํิz!@ัo <ฒซ…TuE™L{…ึ่ ‰„ฉฝ๚ม 6}1ศXรโ˜Vถ๕ณš5 5jTD*๐ฌŠ=ๅำ_๋ดฌ]P s>8Y›p /4Yt€็งw๏|…ภb{+`]ํ‹rด#;ฺดOวiดˆผ:ฐ1สช*ห…ฮFOฟ้n„ยซ„ฑŽH จ–KีqƒI'F›’ผ๖ปRT=ฌ@ฉ>zTLY[ท)rbŒั฿x›—ห(6"ฉ1ัฒบX=hcฑ( าะ็ rฎ๖€ธ๚เ{๕ะ๎‚ล B๕ิNมcE&qeฐ์ิ๊ ภoะpŸ.#Ÿ’\N–kวฦตl>HŽ›๘ žiDิ๔ฬN=)r–(ฦณ\›I่PlHย‡–Œ๑i2๊ข฿ษภ ฏ’X|”NzฐYณี๒–}Z\L™๘~+V9W๓/‘“&ัศเ๓zสเซปƒ[๊Tjท๊ร ณ3Q<ทว!WมนฉฟŸ๑Kหๅ•ฟฯ}‘ฌท๖’3$:"R eZฝY๗์ูFๆ๔หี)=Mž„๒๛ o>"sWฌ—งแํ3‚ฮณƒRu๗1๑7‰$sฬ_F™ž\‹EผgyฅนDœi๎ก๘\ชz‘aฃ'๙^/ซ๔ฌ=เง,–OM๘พD`ฑ’‚ย๚มถ ,๓Nฒฬm๋A‚ลF‘5ˆบ ภซN ช–ฌ>'จ„D้=๙๗*—{๓•๗ๅฝท>E Yๆjูณ'_‹ศ@ฮ๕„ี>†z๎ns/ํtzƒ0๋oสชO€๏aQัBf๚ŠตuJ*jยaISV]KWo#sL’ฃeงึ V์9$ OG^ฎีia‰)u%ณ$ฑc7๕'xทDฤฦKeY)๔ั8ัy&)r–8–ุoFโwtŒภญC@`ฐ :๕}็ร๎๙8cvN:]]˜ธภ•ว€•Iัw\  –‘๘›>๋๗l)XS.C/่-ฃะNRaEูฒJํœญ!฿5ฤขาฌ๚ู ซ—ๅยฉh™คค๗mg}XฯฃีNVdฯ2Qlศ๘ซ!5Ÿw๒ํข๗ไxธyซe ยsqAH.;๋ฬlีร๊vไs1`Cธ›๙2฿C ,ใ^aฺธ+ใธ ฤ๔–Ez ธ“Q+ŒOฆ๛„ืๅ+–Uฮ‡ญ<†ธ”T‡L–ูํไ;ะ1jƒ[o(;Gท้GK4V๊)Irรe๗ศsๆIpHY,i๔๊ ฌ=„YrL„1_&๛ิ[ โ๚~อ&Y™ตซ*:ภฯœ)]ฃืูบ0Zษ!;Ygาๅบฟ?๒ Ov†Tๅl•มฯuœy๋ฝ’1`ˆF0 †!ฆ๐จฮj|Eห%ึ˜€ะ ๎?๕ง$‚า"คsOนV๕๊ปฃW–ฯ$vzโhRQฅ“;ˆ*†•ข‡๓า๒rูœ•%/=๗o ฐฮ๗ฟ๕8Wฝ$4cฏย;ˆ+NBเ^%Cm(1~1JiโV {ซ้_dฒุ’นฉŽEก›งžp^PO”่E/›U็ค•ะุงท<ฦ#rอMสว฿-”7็€‹_ ‹jด๊i)น“ฐ"ตฏs/`ฮ๘Pท†{จ๔ฎ)ๅ’ํจัื"Ž„พ*๕๎qฏรื,>^ณ๗ƒ^๙ห"\ผฒpอช:‡t-นW0mึFnYดำL)ํSฟ Lึj่—oฎHจ™7๒ญ&ั‹;’#V‡๐ฑตฬsบ้mžำปใŠส—Rข‡aU[]YI๊ส—สn—ีu6Š!ๅ]ฦฎgU•าตง–ด}zŠ,i™i.,(าsXญQlXณใฦ‰ศ†๕a,+mเf“cE์Aิิปส…า๛‡?-#ž๖+ฎช&๋๛ไBFถ•ฺ„V–8)0CๆšLยสS ํฌช{สC€k๔?บHจTฎ/xkาq‹ฒ’ศŠk—&งxฬz๛ษ๛๖KxP๑๗]'maaHPŒํนxูJ5)+ญP+mู;ฅ ฟPรPฑžPทแ2๐บN’6,Q"3.ธJggะ’๘‚œ๊^ฅ€{ีสํ,;ใXee!`|5Œ6ชฎๆ€มฝp‰7๘N"l]p}Qวคิฉšr๑ู’’š(ว\*o๏œ'`คใ๐เ5๋0ฑ}จ๊7อฟT]ฮำ๘ท3ถ%‰SโCหบฑ)ค <ฯ7ย9>{†่@ม^๐„ฏ$_!ฐฌๆKถ™œuI},;๙ ุŒ๕Fอ:- ๙jำ–ฎญB'&1Eฮธ๙๙๐ฉค‚?๋…w%2&L‹GewzfฎาฐV~zมำC้้ฝ ฿c$ุ‚[–ีืหึิVึิบC`‰SRUCฮีzd›ธR ถช ' Œง๓WOย[ป”็๎จ,ส.๑๋>๒xวะI(‘BยJW๒ภiพดปcฃบ‹๑‰rยๅ7ษ๗ัโž๗ถผ๔๗รฃ๙FนถKdืฮ<™๛ใby๙wšญVRZชaัRXT.ู ฅไ้ลฒ๘uาqtŠ$๗k#aq๘ฏํ ่ฌJ๎žKณ/;‚'Y_ะWา' +4E…$H0–eมœธโ/Eน•rัe'Jคx4ธopฏš‚Œ8เku2v๔0™ฑ๐ฟ๒๐—iSgIjงTฝฝjฎZ2’พฌ๔hไaGevโyาZฤ‡R์ศๆญ…3V2G !ฦไ9๏ƒยชBฃฝ/X„1๛6Sฒq๚(าฤึkhsฮztdัสจF๒2ณี-B0ฌljภ6ฆu”Žด/59P๘CŸส€Yz?_๘ๅG’า!]j+ %_Š$บ๛JŠ@๐|ฉลมโ‹<‰]teึŽ:่] ๔ฎขม๛p๊Sdvd›s ดขฤ6ๅ aV.ื$ฎBAธ3ˆ3œ‰๚Oบ๙nI๊ ๋8D6P'ษ@bฒะ๘ฅvPฝ๔qzšwำšqืึMR ฑ``pฐ@mG2บw“Y฿ฬั๘]ด…dGทxฟไ๓foฺŒmCส฿€ในAฒ๘ฝuzrุ•=ฅ๓๘๖’ะ5Z‚ฃีึัŽ?ศูฝ3P‡@‡ดuE'ฃD~.่\ษทฯ†K†v’ฝ5Sz๕้Šี’`เD๚่ผdq•ส๋*ไธพ}ๅ™W3ฦ\*kVo”Nเdฝ‚๙ผa}ฅ]l$<ธ๛ซ^ฉขwmฑๆ7สษ'ุ๐วฤ~ฤE'‰-eo้Y3 Pญbga otB‹]fฅ็ฒyุ๓รvพ@`Ž`Sญ#r@ฌc๛hg๖…Nr8สศไยฟีณฟ•ลณพ’1“/FภJlฉ5จ`$–‘œ5N|OšคึnXwy@J  }ศ์+จี—เP๚zษ8d๊ขUีะ9 R๛l\พ฿๓Ž}ฟl_ฑฯ{3s#{ก ็ขC QZ~ๆฆšไ.=ง\{ป๚ฑา€ใX\Pฏษ๋&DNŠิ{%aลP=ึฏ‘ ‹ๆส‚iŸH คZiŠ•ส’M๋็ฝวลภwสฎœlI๋3H‚Bแา3พธุ"็ฎชผLถฎ^&นP๔_3g^cๅtศ/nA^&ว$หˆk๚H๛ใ(7ย๗ภ!ข๘่(เ}ฎ๎ินoธ  š=ห}โฒ kลpคXำdุ˜ธภส'ฺีjด&{‹ุ/ฉ-•ฤ„y๋—ไ‘{!oพ๒t์’&oฐHN€ๅPฃ ่โยC฿ b9ีษมL็ผส~VQEภบVd›๛เฦ,…9`Kทล‘๊ฬ รชดจ@น!ฺชh;y7tภbeBไ|ฤs้fCV8CWฟแ)ZใIz‰R(EฯD1Iํ๊E…:ห๚•k„ŸŸ—`n—Yุ๓˜ึ1ล๐#ฃ ึ9๐\‰ล”ฺ‰–พคบๆAsC#ซ2ฯ {็ณ •ฤฬw(fA]เง'ผm2‰ซฺ๔>]'^q3$)QAๅ๕“0 +9a[ฤdๅล…’นb‰†๑Y๙ร๔ฝ‹Rำ{X8AA-กaxึ ณ ŸFแT,”ํม๎ํ{๖ƒต`นŒ:๏2ูพilZ$e[’ˆ>๙ ฎ2รฃฉ#ZSตูู{์ษฮแเุ†วมJ„๛ผื'๔e–“ใ•\&ฦ์ตg)ๆิข3’F2ฤœ –ฮxฃ๙†„Aวšเ  hฬBI๗๔}G'ฉzแg'ก ฟRม! ศยuaำ ใ๎8ฤZบrฉ|๙๒ำxขBฺ‚s๒ษ]?ศสฉ›dฤ๏๚J๊เqALWƒ๐9Mวแฯ>q'8ฒษG‰B๋๖†๎S_ˆc฿ผกPฟwพดOk'[6eสุ‡Jฌ%+€๖าฯ<„o{ำ#ฌKe…ฃmoธR…ร ฒบ๗่(๏อ]*gึ๖ํฺ*\ด๔ŸEฯ๎{’ฺฦเสo•bqปpS6OJEMeลTrcโ๘๒ฉไ –PX!คฉ๊ฆa๋นึฑA'ำ๎ๆฉh๋จTฃZxˆ,šขy๛bUห:#{S"ย็ไเ€†ำ”ภJหˆG็-๒ุ”วqท„†ภึ„M C๋wV์…ˆKธ2k;'_wem-u ไฉงwUุ›€๏;eฑธVl฿Hไป‘o ˆˆ‘ภ๐๐ช‚ญ›ˆ_]o“d๔ ฑ3B/มYงทWวžขWโŒG–“  =ู™Rฐsป๎!ใ&_"1„TจŠ๛ฐoJP‘๛‹ {๘ป|Š๘v๖Mพง†/0็';ฤๅ๖SๅZ)r1๖ใ๛o 9fQAฒแ๛Jไฯไิ‡Iฯ3:Hp$แ๊‡๛x}“ฏะOพชีท Œ9ฦ ‹Fะ€U้ฅ Sั-™™2‹ฎพ{ ึญsXำฝLˆฌ—ฟ\uใ…ฒ{wž<๗ไ๋าฝg'๙฿ผๅช๑ษyฒ๔ญšฒง}ุ,'ฆM๏#w‹ษZG!“็I๓รWถ\E{{bIน๖Eพศ?,ขฎชธะัm่XG|j†Š ญ อ+ฒฟ๒qฅG‘W{–+ƒ=ใ“ืฅดอ€ฉนcsyYb‰ศqฃˆNmฯœ!๛๖”_ฬ’.3คOฯ๎Px%b๗{Dฅk‡๏f$mโขd็ฎ]uซณwํ:ซj๋nฤ ‘ฉงcxเžGํฯA€ฤธi ๒4ฟเฐ มq๑Rถ{{uEaพŸห/ภqฝOh NŽk๊y#qล2น=บUจ‡ก ฐณ,rr้F‚bผ6ฉิร<นU$Imฐ^ิงR"ŠฯyฦทŽq๕ƒ„h&5z†ฯ/๐tPŸ‡i=&@น่ณฏ๑0๔‚บฅส‚,“ํ+vKr฿6ฺ&ๅไ<Œ_›H9—กeS!<.vร|'3สๅ฿Wนนๅ๒ฤ}๓eใ:Dfฌ+’ซnบ@ฦŽๆ ใมoฟถ^๖<นKด0 ฤยq่จ˜ง\๒แป_HbZฒl฿ฑSาโbŒSeภH~ๅgWุ0a!Pง•5ฃ\X๗“ใฟณฐX–lษฉE๘0gemๅฤฬฐทz็iเSืฎƒฑ€={pŸ?@ใDN๊:ๅ๎าีœึืZm๒ UฏmKE๔Foฌื˜ Bวkั็K๎นํู‚˜˜~N?Lฆ8)0 ฆoลJ๑‰†MKๆห’oฆJE)โะaโdฎ7:Tฑ)iสษตโ j,Dpโ4žeฝLN>Lx™ฎ๕NZ*’ำeศ(นโ้I gJฮสอ™แ/๋g๎‘gFพ+™?mว"ๅFy๐ซg๊Z`v์ลv่ิ๋;yž๏ฆัสฮํฅ๒ีg[ 4๚–ฃŠNแฝกq~@=L"ซชแq‚ไๆปฎPฝฌœMYฒฺๅ'‹ทไn†XฺW†์\ ๔ฌฤpฃw[‡ŒoˆไคRคy ๙ญbJ์#…ถฺช+]u)ืเวCๆoญ(๊kใcŠ @X)ๅ๎–นŸGŠ๓๖(วฤ[,๋| ฌ‡ตธD๒–‡๗S/ป^gีฎฐžู‘“‡ีVl>EสฏฎCBฐฯ™ะ~5˜‰ษ้ศDพ”็89[}ZOุŸ€ผลต"็j๒wเZ=‘’Pฒmk5,&๖€cไy—JืกcG‘(a๏ฐ๚ฝ ™‰ซฒขB™ม[๒ฮŸo“ฉ|\สŠ‹Œ) ๔kโ ึโปzข“,น[Z'O฿?’•โท๘}๚ŠMI•1Sฎฦkฬ_OQ$c'ศKg~,Y‹w‰_ฐ!อc{•ก๚กw•ื Px๖˜ƒ“…swj•ทlส—‹ฎ ขมฎะฝbญญๆ#๖‡Ÿ†ํ๓เณ$)ฑฝพ~ไธมไPƒขoฏDVM%V‹.9ฒI2vP‘อู‚่จ`oญ™๊แชร.fŽอM„5ล…ฅสม2'EHE๛l๒‹ภฅํ๖l ๘aั‘^KeHMTเคb(W†ูkVศ็|L–NŸ*—=๙Š๔}ข๊(aล๚กฃ*_;lใ๎่ฃ•๗ลb๖„—ำฟ“.ฝNฦ]|).ิu2xีBป^S“ฝ ขeฤxค%r|mMธ๚u:๛oarH’7ฏ*Wพ3Ibำ"p ๖_A อฦโ*Vƒ๊Pิ๓(‰'7”ฑ Š+ๅท้9 Nถg๏’?=|“$ถ?fฤƒจx}"Lุงช๐ทc.=ื ด"ฌeŠฯO‘KลTomh‡8ฟUVฉฏ|R\ˆ เ"B_!ฐ’่"ฏ๊ฒbmZuี“ฝl-_H์X˜siโLฤะln: @ˆ Šธ*cข3NW%๙y2ณ๗ฤ?(พ˜&ธ4ำชฺหศG๏กQ1rฦ5ทส๋w\w‹ด@IDATฃก#๚—ฅc็4นเยณT/!"’ฬ21ดซฮ๒ภ‚ฌŸ\…yส,์ุ`l'‹ฐJภ๑@'฿š˜์วŠ„•sะi“h๔›œส•ž&œ =;Z8)žฎ`%ทœ ล~J$ov่wœrbiGฎ,๐ฝฉ >kไร คธLๆอYชuFฟkั5ฝ๖=<ป7ศ มUฎ{Š๔ฐHŒ—rขg๙ฺ„"=๐าีVœค*A\ึ‚M1:ทkถ)ฝด&ŠEซย L†“จ(-†ทไYฒf.˜จ๙คคz:ธ,ลAl^tฎฺกOdEืส7ฏ?'‰ฉir๕wศๅื'ืgฒฌฮ=นนoๅุ้รภฆบe{q3YD0œ็a =—”ฬ๘zฅ;ณIp9!Jsw1แ_ „ir<ถ qBดุะิ~“ศเ๙ส๒RษฒQึฯQุqRn|๕Cจ(4>ซxŽ๗ถœb8Yฐj ‹ั\ิY'‹ฆ},๓X%ฑ้r%u๑c%SskหPŒฉปkฐCGArฐr]ิx๒'A@ฯํY™E๒/,’„ฤ$ูž“)cฦ‘@G”โM๎aHึ.Xn"ฮะ๖ภฟUtฃฺ5$ซฑื–Ž–Šู(๕ษ!%FมคŽMk7x.5t๖๚{}ใภ9X`cฮ@‡หญA๎AW%ฐุ๑‰$|-™ืPnญ~*eฮ\่๘$ft…่0D'๒ฦŽุ|ญฎญฑผl?ถ๗}ฦŸชœวฏ^ปดOm+/รธบ ๒—ย<๕‘รพหฤIœœk2็9;}วs๐ฑฌถˆว๑ศืล$๔dBฯŠ„•c่Yธ;!qํำัฮ1>“zห%ƒ;ิ1P ›D…yณe๛†ต€ูฤ๑U-่wโD จ๐-๗‘ƒ˜!ฒ*…žๅ#ึ้ฎ-๋%g๛J๙9า~P‚:#ญ,ล"•”'ฑgฐkC†ƒธ‚แ rฏะผ{'ภึœšล๓vษ†5 D) ์‚P1wะ็ ฝ+x๐ฟ,YอิฤuN•Dฎ0ฉ)พม๙Œ๊ŠGqšwr&gbe"๑…ฆูห๔n๏x;eA.ั@พุ“5ฏลบ๊ใ{ํh่XŒใีcิ Z๊Lิ#๓Ÿxฏp+(>Œbjr ๚Ÿt&โงต‡/ก฿Kฤp)ฎ qํุF%wัหจ.=หKžjำr“ท…S<งํ†@c1 ?Eผ๙N—๘…Eฦ3^บ+‡8ฆn๐็นi1ฺฆ}5.!วฒฆ\+/ P่9‰zจ$๚rณ3eยน๒๕kื๓0hrฦ€กF4Nuฝะส๑†nข`ี6dา…*พ๗ƒ๑๙ิ{fส9ฯMศไ0ฉ†าปG7R‰ซ๐s5ฤUzEƒMADn‹ K2ณŠไต็–J|t(to”ำฯ๐0ัว6๗ Dgie™ าGnบใry๚‘—%`ธฑจฌ๏ˆˆ๑@ีr1@ู ๅxeฉฬYฦo๑ฉc_!ฐ"ช$›*ี}๐)8๏ฟฐ่pT8ญช๒ิZถ๔*yE>ึ๏ฐˆ,ญ๔พๅž‘ท๎a4œBx}Or–lฯๆdMo฿/"๙fdบ‚gโุใrญmง#2)˜sซจ_u#0 aQ=‚ƒCJwไHEnm 8u๕;๑tg"๔$Uฯjฏ…_ี‚ ่„ล’AARR'9kWสญ * ี?Td|"้E*๑ลลจ๕L –่}x”bะฤŽ]$มจ7/_(™ ไ๛็–ศ๘;nเBBƒžพค38Wวมb0ฤ•™๔๗.&ฯนภ๑ช(ฏ‘Wž]&?ฬุ&qžฅ ก}๓c{่ึ`พ๒๓sห่QC$>ฐ4ฉ}ร…D•eEHiฆtœไ1Jห’Z‡ƒใq 2ำพ_hฎ{ํึW,*-ีำตpำฏ>4ผฌ‡X0t2ฅ๋›#ํ๑•๖cG’ กBJืžrษฃ/ศ๋w]' ฎ>’œy[ทภ ชอŽCI~@๒!6Dƒ๓Qcv:<ภ”ฉ dยุ‚+นU็Aอ`JPl|{*w—ํ&•…นผวัqเ0'DiŸxแ$ญ™9&[šk…™ž3 Mุี*ๅษYฟZfฝฆ(pลฤ฿฿/๑i% ึญ~px‰‰ฯx>‡ ;V’.zภแ ’๑ˆผ๐าอ—HxF•ฬ}}ฅฤuˆ~p†xjBเค…ภื•šfp.'?(m/˜ณMž}t‘tํ™$ซ–eหว3^“ฮ`9Xู:็กtย…pv9่fบiทษฃw?ฃOีaAb†พh"p๑<ฝฝไ{… กร„kท&‘ž๑อทXVท/U๐ขa˜ธ"Seolด,ˆlธๆคฬ ธ—<๖‚P'+sลb๏$'|'ี•ํูฮE7ผŽ|๒CศŸ##1q,ฉXฯู้ภ!ภฉ’™Hย`i๏ก๘}ต;(๔„€ศจเฒ];คt‡๚ฑ1ีร Ž•; ฦ !๗P็‘xฅๅ +Zขิš8™™๐ด2ฎGชฒผLOธๆ6@ขŠีฆŠบz!Qˆ.y,V`้Ž๕ฆฅo\ปtp•ŸW๙&IJ“O๏™-=ฤหนc`&Ša†B0s’anœŽk:พœ=3—•ธบ๑[eไ่มฐฌ้C_ะJI\ัrห/(ศˆ ๚ 1*ผาMƒึ‚lS@ ?ใT(}โ@แฌoโราสJํ๙8.ยiตj๚_๚ํํ–Kๅ`๙i•ร’ะธi0Mid๏mด8ฉAp‚ฃ‡้3nพG–ฯœ&ำ฿xžEr„ดM๖/ู–eOรqn*๒Wศ‡1r!2uดT\…ฝ"žดำ>!`แฐฦช4}8O“"b:ธƒƒค8;“>๔”[5|๒E~iฝJ4tu‚B#”CEฎ๊vโมแXก๏(Ÿส3ฑ“˜R.=๚นQ ฟ}ร๕ถำ\ถ`% จล'ปŠrซZšถฯf:บ :NMฺ้KN!๚9โ,"|ณl~cน„Ž„ฯj„ำม$ขDTs%S๎,sw•ษฌoฒp ี(se์‰รฑrI%รฤจ8ฌนง[็9ยฤ โช ๑ญW?Yำ็สs๚‹„†‡Hr๛DYด`ฅ:5Hซนน™}ิ .Uต”รˆVvTj็นm๙ |& โcg>๚0ด“Vส 7ึrPฆฦ!R^ๆ&E:fมSอ5&๏ด“ ฃYศD์มแ‘2๐ดs$ต๗™๛ั;ฒโปฏฑ๋œ•ล%u{8ัSŒ5“WaOB๋YdใeฯtjŽOe์ไึXภแ1Ÿ8เ ?ย†ฐdbด‡qศcœ~ƒขใฺTมaayฎZษ“ฟ0ืะำ'ป3 Qƒ๚„ชCH(u*ม5mวตDข?NุไพP“\*๚ฏส‡~ุๆฅ ไGdN๊ิ ฑ0„โQ‹ขO฿ซส.R็fภ็ๆIไ#๔y๛ต6žฎช"Cฟภ@้‰I2$2J n„—~$GH|’ป\ŠชRŠฎิI/œy๒๗ศ/"/D&ซาียก๊k้Kฤ12Wดa5 วืธƒBฮ ŒŠq–็ๅยล‚Zm๒ืˆษป; BtR ฬ์้๘ข6ˆำ0แ*็งล9l>cEUดg—ไmห’[7Kq๕ๅิoย™2ครchX-Z4RThGsุ*a๒F[ท๏ัWŸŠ๋Z%…ซ*eqaกฮAลฐ8~œ๕‘ญ™œ"ซ™ำถสาœz๒ไผ‹ฯ~{(๗สŒm}1ต!QOฏ๕๕”พz€ๅRx0bEYI™ยย”ฆw7n%ูษๅjธำ—cัƒ โซดบfณ0z•ึ˜๗œ๒ฏpฐ”UHOฆl˜&7&ตสw€n—๔Xึ3cยZงใF('‹มพ?~ๆ/เฒั8/n8/๙๙5UฅEฤO l~ฆ'๏ฦ~:๒wศ? /BnŒp8†Ih1ทฦฤ,3๋L"k ๒ฝฎภเ Aั1RœณUŠหJˆœวM<ืูถCgฟธviใ“>ส{r|ˆ๒)Pk*|็‰อl&–Pฤ ‰Sq2ฯq"cั brC‰๏xโX —CIls๚cŸู ๒ี+รkๅ๙วๆHŸ‘I’˜$.PX\I,๘C?kว๖yๆa.„ศ™)‘หฎ;WยBi๎Uc๘ณVืV‹Ÿx เํ9\ข‡’p"๓รรลjŒ^วฅrr`qแ‘ข‹ืHx!Y>ฐึ๑‡ฏ'_!ฐฺะpq‡%ฆจำ†Q๙zCุๅoฝะษ•“d`h˜t6N’:๗ี?~+3|A`ahU…ุwuNทซถถ –ฒBwMeE,“q3ต™ฟ@y1rcb‹„๗'ฤ\ ุ ?|(5ฎ ฉ#ึง๒‹PZโฆจํY ฎJซ#’ป๚™เGefบ*0แbŒEgƒE_ว์%ษš|8ร ซธ็่†‰\*Šbด๕x Hpูi฿ Lต‰›ด3เktฑาiเpY7๏Gฉ.˜/K1zฆ}ท].˜œŽ!ๆตmั๏UeEผ๑า*Yฟบฦ"ํ@œฅ@‰›^หลD(ๅ๚ŠHE€ฺ? :[(/ซ0กr๐›b‹ธ"๒iา >ยY๛ผjุภb“Xฆ›>Ÿผภฒฺวฒ&ะ฿jƒ7ฬ!ึm>฿vZ+€œ๘gธ)"‘mฺสภSฯ–๎#—œ,ษ^ณ\ถ,_$›—-dgf&ขฉsืทI กฃชผยฟฒ(๔ฺชสำqุ[dŠง#g"๋{+‘ใรw๑^"/foKV}Y.๋˜D•U๊žโt๛)0&NAณคข@ชยข]#ฯ9฿Mฎ`8@$@”ำใํuJฐ5‘yฬไ!ฌด๖<ลหl~BรN n ยTa๊!‘ร€เ`ˆ\ใd๕R^qศ‚6ศo&$Kp˜๚†๖ิ ๚ๆ›Ly์žŸศ9^ึฎฺ!I)๑โV‹AOใ4ผธ™บ’าˆ˜sญTฯํ๓๔_๔(ญยx„ฟUัz ษ(ม7tnภฮ’gฤŒผภ9†ฝ๏8ซI๖ณj๛พง%ฏฐ•™ธrจ›†โ]ปะ่L?Hz—ฝฑ!เตฐ๔(…โ5 :)ํ})ฒs๓ษ…>N๖Y๘ีงŽ๊๒R85V}H(ี†$$;jซช]'Žญ.+‹s[— /Gฆ(q62ํ-"‡šˆผHธ5N|'ฯs(‰ฯZษšฌqkท๎แพ๑1๏c›ฯ็ฺ!“ผ<(6ฑweQž๚ฏส่?ฤ}ไx7แEฎถ๚†โŠฺpz‚ฏ๑ข„IฤQrโb_P๎JcยึฬฦดบŒRใ]ฤx^ลc:d ƒˆp๒ํ๗สฟ๏ปYวษ?Ÿ\"“~IŽ๋ืVฆฯX€ฮหA\Hyiจไdo•aฃJื๎เ]Yk„/›/ษฎ๋Q5o๙๒“™rแe“ค}ปdฉ๚"+gซั!MIŒWํอtฝ@ม`$TญQย6ฌ‚‚{žวษ(\4p1hจฝž๔ฝพB`™–sBฦหT‰‹Y ๋O{cCภว!bKgZ2T๏ิ‰hศb\ำ๛xtd้|p2้|ŠFปณ6;vmู่\:ใ )™ร”0แƒก‰) œ_]Q_[UีญถVue%\ฌ๐ๅ\%nABฆx๑[dz”'C%ำHdŠ็Iั‘gr2 ษgป#๓{|.9™KZb]ฺn“จ๊‚ˆI.ฮแvw€>tหB‚B‚”ณSU\ฝช=RSฎขPซ์ฮฑSฎq16` \-ะ8€ึ`Fดv Œ6|๑('*ฉ+ง “‡ง1J@. ฎ3ฌ B๔เผฑn#q๐ณ{žฒ๖ภ•‹ฦลฦูkWโJค˜ภ1_์M€ฃ žbค…ศ๖ลQฑN฿ษ{๋ฅชชV.8uชตซRbc๕ธ๗€n๐TŠ•GƒฅแŠไซืiA้vบ%ฟฐP&ฟL ๓Kไธa}%ฝฤชuฦฑญU7 ถ„KN–‘ๆE!M ‚?๓šAึ {"Žฒสj ภ}฿ฑ๛;pฎJถ–ึับฐชถฮRp็@5‹พ†ทะ‘ทXl&ใd\ 2U"$‰Z2Xw˜ำ๖ึ†@+€€Elฑ* œt5'dœcะ฿๐ุx‰ˆKิ^แจฒ\Ÿqๆ{r2];ทl”ณพขb;ๆzDEฏแมm’ฐ‡&ัฤ,ะ้๊qนชำ" %ะู‰฿tb๒ขx‹ฑ~ยBฑˆG๕บ ƒฒ1Nๅy})^ซ,+U—–JEa.พU‰ฐ5%uๅy๊z—9ช3ุฒ+ถ]ช๊ำDล'Šฅผlนeฉพ๕RฏุTN)„?ซฒโBu ภ๖!lณ๚„cžc[1i]_ฏฟั>8๖5ฉ‡EวI$๚ c€ช˜๎,B€๏#จi"'œ™ผ๛ะ๚™ผ™-{˜าtฌ โ\’ทซFโbMป7ดฏ:ุนขU‹ญน”SpH <๔]ฒ}.4ดXI{W (lุ)Fฬฯ็ฺŠั‡3ำร™jิํ๕ขgCผ Gฅเไ*ๆ9oนh๐wซฒฆ–* L6eเpฤทู…l#uHqใะ`zซ6ยฒ a๏[ะท๙วDฅPรแ2‡BVฤ$ต—˜ไ๖ากfฐz*~๖ษžๅ(/*t์ุดYR˜ฏ:@ฮงKrryQžฐฒuM?้นท๑9Lา๗&ย#9ฤžŽฤN‚ภ™ข(z Wง Jœ@g†๚i ื=+ซƒ๙ฺQป„f Št™้ศGW๑อแ"ถas็ZY[็‡SŠcัgพ๕ฤ้•สษชๆBmc`ฮัC๎ZI๏;Hฮป๗Iy๛[eวึ=,Dg!qไ'ูY ค๖l๏ฮุš1ง'Zแฆ>จf-`ใ็rหo/:“BอiแWำlŸu‚)|ืญฺจP แไ†ั‰พ๏เD}+*บ7N•5ฏ ฺp๛P#ๆ…o๐ัco็`Y`ๅฒ]“!ชเ๚-ŸVCรkkาฑ˜.{฿J!ฐ๗$mLฉkฑสๆdภkด˜ŠNLVOๆภxBๅp๕ฅ๐าย(ฮg;9†เๅผฌจ@v!พ'Cต†„ึั›8ฤ‘x ๚NuQ I0qq…G8Iะซ8นม‘บ…„[iA>ฤ_Tp‚„๘žSc้E&$ย >oต–ค"?ลd๔ฎฮo๋Pๅ_#ัจžสeu็ อศฒฒJ\๙B[S w๔7๚`๏๑งส‹7L‘฿˜ค๗จ„za๚‰wrน.zไŸ๒๚ืH]้.ฉ KDr$ข๋ผ…าณO CruZk"\ภ™ ~จภฅ#Aี %มต7ฎ1ภczž„ฒˆ0b?—>Oยฌนdฮ๓;ฬ:๖=ทU ะ3jตฆไํ–ตBฎ‡บE`ับŠƒถา‚dำ’๙:˜ˆดyพนNัšฮฎ‹ จโ/ฯ0!ฑ‚Uงบ2Qา†"E ?G$lHไฤฆด๗p~ AP ข†„ฑPqPืษŒ^ซQb ”•‘เCึ5๎๙]"]">c&$žsบเHท>แf"uN†๔ฒn‡นน‡[oพ™X/ิล4†oVม‡KM—œซธk`P์น๕LfHd4-Œ˜โฺฅ+qลc”zฟฦ๒R]฿oVหN6 ภR†๘แรUAC‚ห ปB&N:ซะี‰/Zต‘ปDq8Yศ!8ŽP๓vN^aT‹ีฟLฦณU๚ผƒ๏๗œ'๑ฤL๔ฉ๗๑f่ฤ๐{ไ๐ฆeTdพO„~h 8บOaบ๐้๒]hฉฏqqะ0้wโD%ฒ่ภื]&๖aM#๘cŸlืฝ\๒๘‹าก๏qRœ ‹9cHตx ).*…‡้๛ๆมึฒ5 ๑—ฯ>๘.)J$Pฬธ$d<8ไ—jK๘ๅ็+‰ ^maH2ํ‹.2็0‡“’า_ฃM*ชซ๕'JEใ#Ÿ๗‹~ฉX^uอW,O+j๋)ีษ ‡‰+nฆ›ืื#v=aolุ0๐ LEœ*ส2„—!jŒN'&fa ืbฒ!†€y9;แ]ศ฿ๅ!เฬy^ย˜ไ}žฬŸ†ศร๗š~ืn‡์\Hะ7ึะIจTใ฿๗"›–ฮว9ใeผ1‘ลฯณฯำศษื&ร ทธmหIอH•๋3e๋–mสฉ9ฬลl๑ืQๅ)ฤ,s.”๕k7ษจqƒa:\iฦ๏—ฎบj mbีEร<ชmไ!ย8ƒS\H+B&xt'ห๘aั3พฝ๑Kฉ[ต็จ~‘S|มฤh๔d[ฑฝ๔คฝฑ!`C`?hXฑ๙5—qR +l๖๑.ž๗\ณ๎ๅ:ทง์ำ67Hะื€SJrใ.บN_Oe๖๕ ๆ"_นฑFŒอ~อnZU^Yั ฐ.ฤ3ืJษ๚อ๚†ตfo๚qร4ค}tรฐ@มฎ@Y๙๒ั๛ž“หฎ=OŒHง๙—ซfŒhAh้_E€x B6๓ฒ์%†kE+B?ˆy/๏ฌXฝคผL[W็ศย)นEWm?{‰O๐‹v˜๊{8'vzŠ ส่9๊Y๐ล‡PเอW“rฐ๕{cCภ†€ ญ:‰“C๚ ‰RชŽะๅวฅ1๗แ;eี3T<…๐iฏ€ศย\œำ8$ฅ[/ {ภ+๏ฟ3Uถํู)~jว3พ8_บeWAฎ๔I;AฎฝyŠด‹O’ฒšwCAะS1พชถJฒ3  N๎ฤ ั฿/ต งlrซขCƒแ๑=P +๊gVƒธำ#jฌฎU/๎2i“_zO4„ฏX–aรฐbkแŸบA’A_†เ-Bแ]—$>฿4>ั์Bฺฐ!`Cเ(B€๐!๊Sˆq์บฏs๒&ัŸึI.{โeฝํใg”Eำ>Q=@Zร]A3Yศขฮ หu‚ณาถเ๔๕ิ๏d็๖ะT๒ืลพพๅ ็i€A7 t๚ฤŸŸ—›๏ผBF'“ej=x u KฝดผB™7{ฑ>Fทฌnๅ –ไ}์')๔H[๕โพงˆมWคำผล่อ[_!ฐ~ฆƒe่+ฒัHhXฬeชซ—f'6lุheโ/ฝC#y๘๙iฌVีฐ‚๛†%8*nf†VŠO๋(W<๕/่d ’/_~Z~| )ฮหUท|DลUx7๕ฑBเ~คC!ฒญศภ๎‹Oพ•ยส"ˆต,˜๏มT๋‡ูž–/=๛o๙ว“’๋nฝXa„ฟเIœ้‰ต{gฎ0s>โ<8%\)ทฝOัАษืZNลฎ@๚๏๘ ecz@@zNฑmp่ฤˆ`AB๖n'6lุheภDL*rฅ6/[(6ฌQฏ๘ด|ef()ต"ว}tขฤ>c‰ Oพๆ6ต.๑ƒทไำg–ํx•฿๙œrณ๔Y‘ิžxiimไฯw>-๏ฝ=U‰ž›ˆ๓uฐ ”ำ”;oxX>šชฤGฦJeญGg๙ชฑ=gงyชm B฿ภEƒ2ฤาพŽฯ”WฉกำŸฐZ u {}พB`y4O]ีฒ–.E„ชต๛š>gถ!`Cภ†€ €ˆฅ:p˜ขS$&ช่,>UๆO@่Š!o[ถTยงร+Q—J +ก— ย%CXLฌŒน๐*™pๅอ๊;๑ตฏ’ๅ3ง)F"Mล„Xค3ไฮคM›vJTb’๎โ?สฦอ[ฤสเพF`ียb0ศ(KVฌ3ฦ]&7~ฉ 5เฌ๗๎#ฅ%†ู ‹อˆ[)ažฎ๔8ฅw่1žiซIN`Yไ0ฃk๗ฮJ=แWc w1ฬฝPSึl˜รึใzษุฐ!`Cภ†€/CD๕คจƒ•ะกณ๔3Aลzo{“<2ํๅg@t}.~ฎzV#’ณฅๆ ๚m#๑ี{)r๎Ÿ—„ึ๙ไ๏หทoพคn~(*sXเ7Hฤ ;็" ูžญ›๓B๛’ J-,\ฒ#oทyใ#Z)Wžƒ๘Ša5จy6FœศiธจD" "d๐ๆCšm๑P5ธkHNณ๘ฮญ!ั3˜7'’Q„>Gศ%ศ!กแuU%EŽ๎รฦ!๖Z;๕ูร• e็?}๒ฎ„#X‚฿*ห-vฐฒe|รN6lุฐ!เฅPœNวถ~๐ฦ]•าK๒se๕์™ฒnฒ๐หŒ\D( ล`็šOะณยs˜t่Ÿ%3ฟž#ษmขฅsl”ว๕‚‚๖€มยน{gAฑฌฬฺแ@ฐ่บสฺบฟใแd2$WF๏บัW,Bํ"ไฐ๐ฺชโ"gทแc=ยโ ‘8`vgm‘฿}-O:Suฒ(S?ุไ]อc—ฦ†€ 6L๘%N๋f‚'พ'n'มLa๐y•ึ{ คtํ%epี“ฟc›ไฌ]ฉnHlQG—j${>W ‡บ!แ’นปt0 !ค"dg๏KFฟม˜[R Žฌ‘ๅ–lผ'าง,\ฐE๏/];eภ{ตWฯ-”โธ@X2P๓๓Oฝ)O<๘ผœ๙› rรm—"˜5\$เผ๚‹<„ฎE ี`h|๖มื2ฮR้ะถtˆ‰4"Yถฯพ“ํว{7ํSท~วX{@`=‰S–Q›i๐|Ÿ7fO๗ืe ิ่ฝTูlฬ‹AŽ@๙nึ\น๎ขปด~๓ŠrสjธาaCWนป๓eั‚z*F๕ฑญ›f฿@—QขๆํRตƒฉz‰w'‹Š%…๋q6jNัƒE1๓ sx\ผึฆ~Rtะ้/{cCภ†€ พะ:j1Hั^rืž2๗ฃwไฝG(แ”9ฑcWี•  ัjQคcZ8†ๅUG"ฺ$h&g‹๕ฐ่3Qๅ•ศโ#]$แ{'ฝภฒ สเj ขJC1hƒr$ขiฐ ๋]คศ฿šฌ_ฎN<#Fฏู6lุ๐vฏ“nwชMปt™pีญา\ฉ}]ึ=๗˜พวจ๑Pn!1ษํ$‹kI‹น@'{e?ีใEฟˆีมŠี๙กNEƒอึž„ึ^ ฬ๏ป@@$w้!_zฎฝ๛ๆงrา้c%9ฑญวิOํ๕Šฃ๘รŠ1ธ)kซx๙=๚ๅปrฃL:๗$ฉฦนtWq8R~ny Œ๊ลœš๐, nญฌฎ‘]ปruNวLฝัS6ฒ๑ำ๗“ฏX่ล7m zl‡:ฟ&ด•ƒBรค฿ งห๊ล?ษPํคLžlฏlr๓.{kCภ†€ ๛ภALฆ๛yำ/_F(ˆ ฉ‡ีuุXI๋3P)dญZ&ณy>ฌพาว/๘๓3ˆ!ุ[E}†ภยi%˜ ๖'gJน\zฺฃm๒ห6W9ท€ ืcิ‰Rธ๊K๙้‡EฒhrI==น>*๑ผ๊Hร๙1ท็๎’{oRดLnบใrนๆ–)โvธ5nเก*ต7_n3ฃ]ผ๙เžl่y[™q 11ตš) ,ถ"eฒค“–!Mฅ-[UQฆ\+หนXd มคึ„•p8‚ ห<ึjฺ uฑ“ 6ZGงฏซีผŽSdุพG_5h๊>rผ์„ดnb“SีB=qี<๕\ญ&็๗๗“ฯkร0้2xคผ7ำ๐ฑพ๖ฃ?aธะg&ฅ}}w๏?ื ทำ e๑Ry๚แ—ไx?๛Sไ†?\&a!R๑+ผต7-y|4.ห๗gށ‚ปŸ๛ะิฆ๘.ŠAXig‚Bซ รrค|SPต฿ึHย/ึV–๋๏=ู™บฒฑ:6%‡dวฆคiม๒ถ็hฤoCŽตฒฺฒ!`Cภ†@ซ†€JTg,วhี็ั8b๕&ž7ฤŽQd็๗"ก_•ฺณฟŠ้็Šj"ไŠ๖„W’ˆ‰OหHˆ#““รๅๅผ-+–ญ?้บR}X.บc`z๑ooษณOKz๖้"๗<|ณด‰Œ‘๒šŠCvวะ๔ปฌ'EŒ•ะๅZปฺH๓ม๎๐‹Dืกค*ฒฐ 7๏Gัฅฃvฉ็G ฅtฟ_ ฐฌZช;] fฟฎแT™ฑ~@™ฦŒoซ๗3 จ"‚๚๋ึk์ฝ 6l,t‚>ญฎจ”UณฟUgž[–-PWŠเ4a>’ษZœถŒ~ล‡๊ฒ‡฿?BธvU ๅ๘~cO‘ฌ,ใฆi๊‡ำหpุZ‚ศโ7-ฑ฿ฉั ำณ๚‹คฅคhูh]8-+ส+e๋fzTq…2(K›เ้ขล _]h;4แกศ๙ผื&_"ฐŒ_~ป:;๛YBc…š9t:j,Eะ่Gxะฌ๖ 6lด2ะเขี/(HR{๔ƒ+„AP๎)‘ Iชฎี=BDฮฯAIŽ–SีB,Gำ?ฟ็paฝ 1ำก฿q๚า๐จ0y๒มd๓ฦ,„9๚๑ -‚ฮ4ษว๏O“+ฯ7~บฤy@๏^R^[qฺุ-(* @`•ศสฅk๕tl€ฟ@DHV‡ข‰Saโบศม‰œk}ซต์›กRผฎjึ’(_Kๆ‘ื–•ฌ†าE๗เ๐H„ส Yซ—aeรN†ีEรM๖‘ 6l2ฬ‚5$*Zข&#TMŠ๎๙u>๒ ๕…จ๋ล:Ÿ|‘H^‘–|œลุหลC“,H่0:ไ‡ู๓ไโณoึW|4ใ5;f˜”ยื•แ๔๑โ_ไŒำN„•8W‘`uDbฉU`iJŠ…q}๐:‡ุH\ƒ)ŸŒ9V)–จ๛s^qDบtŸ๚w๛G’`aยเ/A ™จ&ดา๎ฃ‚;ฬ‡Aภ `0"–„*:ศปIŠ{ฅูIฯCๅุ)—HT ]Dึ@]FษNg$๚บ"นZฑfญœ;้*ฝไ#OVฎธ๖BP+Œ๑-iZวี%BJv•Yล'&@=H'ฃฺV†ล“ฅฦใัaƒEวZv˜ฉsZลแAe?9]ย€ใธ˜” -ฑฎฯค9Sว3ฃ@ ฤธL ญ@าeฝv1๚“๙c0 ƒF€d’+&cbใฅ|nmZ ญรjยศŽจ-แxำQ‰ก€bแฅ}[Ažโg่e~๐rใmWHb5๚Q7‡-Qj็J่8Š๛๗ฦm’ซฅ๗Fศข$จ้–uฌ๑๚ิศDฝ…™[›ูZ;฿m๛ หพ-ุะ&ํˆt่S๑zเUฟE{ฆฯฟz8vBะ‹ฏ}บEณ์oๆำ `0 mE€“๘tธkฐำŸ๓˜ไn/€นyวนlPษ•ำ-นp?๔ภฏ) ็.“ปเ็๊ถ_\'.‡ซ‰ฺwท๛““1ซ*ซug‚+Rโ`ไn‘สC_0;˜ธ‚ฐ}—<ฺ%มฯP X6…ขวP/ฑาU#๘คห2,ดฎ5รภ*—่2~’h@hž ษ.ฦn> ƒ€Aภ ะ &ฤ$žZ’ใ/ธ\zO›…ช)ฑlŠฺwฌ P-่Œ–ย]E๒‡{!๏"TฯU7\,wฟApโฤKณHฯ:*ูR9„ุ–•kึษด้sdbFขคยธฝผฆN=๏!้8`U8bณเ*B†1d(ผฐŠCH:๎ฉฐ๔๖MิัZ+ ƒๅ:]Qอlฐ‚;ฉ„˜4G9ำะฦั\ุพu2ฅ ƒ@7A€“x.ฆrEE๋$~งทSyเIษ)((H“hี^‰’ซซ]ๅฅ๒ภ}”w฿๘Tnป็z๙๋c๗Iด4xi‡#ัฆX€ํuีๅุไ*:"Jfฬ๘^N}‘xf.’ณN( X #tจร+Q :ลV#—eฺซ}+Gd+– 6- ํXEบฏษน]s่@้65ณฏธ’ฌ๘Slv)อ5ƒ€Aภ `h O่ไบื ก2๒ค3e@ฆ_๒ถส—ŸอTŠ’ฐC2LjY ;xsnaกฟ"oผ๘ก†ภน๗๗ทJzR ผด[v_-ฯjฟo$Wฬัp1+tฮอ2พฆเฒณ!หย’?—ำ๒โ~ˆc+!๒YŽFนFอ๒{ั~ี๏%…ม"P–0hƒe๑ะึ ™"Zฮ0bเp41-C}—ุวw ฤM% ƒ@G" #l[๖่;๒’a_ถbj1 5EมІฑ๛ˆIงย่w™.๛ะณ’_ธรrูp˜ุS Iต`b-๎žGไ}ฤผ่ว็สvd$งฝดw์๐อฑ5ึแ–ลK—cลโี2ไ๊ฌ๑#%2HŒ(ฉฃQ=๑hKฒ๙อ{‚žูTฉ"dj[aึ9]๖oว>ก๖นm>ปžX$6†หGใ’S:.>ี+b0 Œ๚>ฺจFย|‚บ๖…4•เfา!"ฤิมฐ3M8Z+ ๛:FŽš|บd%K˜]68ำVZ-+yE;ฐZ๐O๒มฆษeW@}๚2 o–ฦl๏8{"C้Yด#Zฒ๓rไ๔๑—KOp„Q๊จŠแว,vVTCอG5ax‘’T^5จY5qฌว๚ฐiฌ6q [—ฐ๋นŠ5DงกO“>I๘(๖|ด์TF^ฉ.ิs๔ฏ๙c0ย `l>บจarEGร^(ฦŠhัDย๔;โถ‚˜๚เจกฆฺZ`ฅใ Wฌ๛$f(C'L–๋โŒOX้ฉ†N@>ผ9^นแŠกจl—๓ฏฯศดฟ•›๏ธZืoคb!ึ๛๋;ิๆŠwม:ฤ8ฃคผบB~uืร’Š}7^uพDc#ˆ”Rซm;Kๅ+ธง˜บd์(ฏยพCtQฑ#๕ nก>mโาี๏ศfด๊€ขIeน๔ิNฟ$œฉํIz)~ค –ฎฎุ ๋๊7l๊g0ฺ‚๛<‡3Rสฐ”ฦ Oศ‚#หพD 7ฏWS z!7ฉm$ท]9ู2๕‰‡„f) $$#™รFjกeสKOฟ-หญnณห>;%Wp"บvใ&๙ูUฟ’็žxS.พ\น/wHฏด*น๊Hƒv๋้ˆ”:oฝ<๚ืgๅ๓พ’ปn๙ฑDึา˜)>Hซพ_Ÿ-/}ทHฮ3"œญ@ส…๖เฝ็a…R0ฆ&g {โ฿C`ๅ๏†F@c 2|ฆfO‘|‹๛6‡ขr‘ทQ„โฯีT฿ `่fPjOฉ~|4๛ฏdุ 'Kbฮ๛๐M™๓๎+RZD„eณณ›แิฆๅXๅŽ•-KH ขƒ8AbฑSต$เœัSฮบ๑)ุJc,‘yณ–ดษ๑(หo๔3[6o฿&ท^sŸ|=}ถz๗ต๒๐ใฟ‘ดคไNณนRgฉhฏ<๛ฎ<ท็ๅ?9–ฯ•\‰!แ‡‹Vสฒ-9rว”“dDf†$Gป$ม3ุž8,ทLต ž&2ฺ๒๋[cฃ%ธ‚สะุ`ํ  Nฺg๗t4ZวzYี‰๏“8Yฟัึ;ฉŽๆ2ƒ€Aเˆ#@)ฝงg ังœ#?๘๙ฏe์Y?TIŒN6๗1๑Jwู ภŽ Z’จ็˜ŠถmฑศDp!ฑ D+kฤh*+ฝ๙๒GฒซจRฌศVษVr‚ 'ขซืฏ—›ฎธW%`?ฟ๗๙ร฿~!}{๕–ฌtv +ปŒBญ฿๋3๙ีฯ*gpดธ๋๊Uชตแqžž1OF๔๎%ทœ;Yz&ลK5ศSr\ ผศSข‡“ƒJ7mณสšdฑ๔` Žๆ”nyiๆƒdหI‚G„อGจIฐŠ€๖€—>G%ะP[ญžฺภŽ3m a๓ฬ –ภจn|piจƒ?ZRz๕‘คฝU๊bผทดกe!ๆ›ว jBŽ:๑t)ฮ&~/*?Kฌ่!ษ=3e8$†)8iหฦm’ฝ9็€๑ Y.หalมK–ษ5!Kฌ”_ณะC\tฌJฎ:Z-ˆัqั#‹–ฌ›กž์฿7CFฅ%KL”K `c๕ๆ์er;ˆีธ™๊šซ”@ัUา^ใ/๎ ฑฅคบVJซแˆ[Bbcชญ;B์iแใา>.ิ?C‰`ฑฎ”'nคคFO}}PEธ๗ไ-3 ๑(๕2$ซ2ๆ‹Aภ VXK็U‚ต!ํT}U†ีmvยอะ„Nญ{ &KฟXธˆภRฅYBŽGัฑq’uิัRฌSIฑVo฿•คญ‰ Cฯ|1cฆœ1แrู b๖๛‡๎–_๑6Iล-Jฎ:>z -ฤ€ไ-[ฝZ๋มŸ๛ชดxทTึ5ศŒkๅๆ3—ZQ*Eษ๙%ี ๛0pว๘ฟฃฌJs๐`›Iฑp~ล‰Wโ2qhึ0๛J7eืuyPoแ๗Pฟ ,<ฐVS\๑ลhจฉ1$ซUฐฬƒ@8!ภ"[๊(ร้ฎ้^€ษฃƒฤ'ง)ู๒ิFธ*Mะ฿ญษ{Sy‚—ญญลไ฿:ำ:.ธŸ$WNฌ2ค ๑ณOฟVžฤ‹สฟพQ๐฿€ัV ๒šใไ*;7GNs wษOฯž$=โโ—๖ูถษุคOj’xจ9าถd1คZธXHŒ‰Vi•žˆ?Š~ฆ+ช บ•ำ.ˆฦH-a—lาJ7ถ)ศจœh๔|žL”X9ม”๓7ฎ“๑๑ิึBงm$Yฮ`0บ;œ ใ_R^ŠD Vฏ“pํ™ิ&8ธ3~ญqJ)Gำก๖*=๎xใตๅช nื฿ž~ํorํO/ี|€O%dM'uะ†ใ0FH฿sหz•๋AฎฒใA{"dำŽb8*ซง1%W8ŠwN๒X V< yl๓ปไwฯ'ฐ฿JŽuท^5ฟbช ฐDdo@›ขกHฐl™้A‘+}.xt`ิ๔์žณfนl^23o=D[™ฉถAภ pˆ|7wˆ0ง$๎๘=’ซืwง ต ซงŸฌเฎจศฆm๛X’+ฦ*dpๆ็Ÿ|S๛บ฿่Oฏ„\yอEpUะˆะ1DฎP—ำ)S…<๔ปษWำพ—Ÿœ9Q๚%ฤฉ๊ŽD็ฎ,ง5HmฎT-ˆฺฺญ‘ชJจHฐT…๛ื„Oช๋<–๑บ6\˜ญม$ƒ€ย‡ๅ1@IDATAภ ะUภ ฦ9>Ž-k%>%-S] ้ฏŽะSฒV ๒โอUอ+ำไ“;ำา“›~R;'ฤคด่ฑ‡Ÿ‡‡๖geะะ๒๊ษ1ฃGI}ฃ%ลู—ฺฑฉvฺ &‡๖_”ศฝ๐ไ[๊L๔T„ภ•’จฦ๊Q-’+#๚๖–ฉถYฦ๘:๘j๓ƒa:ˆํณ๚๗ฬ4Hผ(อjั.๑ต๊รŒ„x-“ฮD[Žฒ8]0ำ‰‰rใฮสฑ ƒด0Lก$มฒแ๗aiจ๚iุUซ+c,v'ึZBวคา.t\1q๑ญi~3 G๔UชN‚ฃไฺŠru @้ {8ณ๚H=‹๔๒๊อ‰ ว\7”YŽFyLb’ฅNไsŒ!นBุ™ๅ%W'L'oM}Rฦ-uzฎe่Fฑ%IŒ•๘มำไพ;–ใŽ&ว๕ฮhHำ˜hw โ3n@mƒ”X๑<งำ๒mE๕แ๚RQSฏมš•<ูร/.@ก} --‘ญP”๎ฃ_ฏ๒ฺบˆสชJ=ณฦ็K e—ด๛ค Eึ่UR}-$Rm{x๘l0 ฆู$๒ แ‡gชn0„/ศS3ณdอ๗_ ีR}ฉVNำ,‚}ณIG`ภีƒH1A[,n๓'ฺัฅvŠŠŽขาOข#ฃeWY‰ํO"๔อ2๎ธั๒ิซหฐAƒค6€ฐ;8ท3ฦ"’$ๆXx‹๚ปู๒_W+#† “ฒzK,์คxน%ๅฒl[ž\4aด~๗ก1ฦ ๓c{[Qฉ,ฺบ ไส ผx๙คฑJฬิฦ*ุ๙Aทปชjex๏ุ`U-ขผ฿ญ(๋ีY‹ Wฃ ชฝA6vแ๔J‹1qHคฝŽฅ๘ํ…ฃัณ‰ƒy2|ภsœ9ฦ `0!8่าƒ5?Q>๛๏eย—หˆIงIzm5ะแ๔f๑ˆ8jิVZพ0cแฃŠu;E@ๅV[Y,W-า]่˜(๕Šž_บC๎ปใ!y๏อฯไ‚-=vŸ ์›ฅ’+๚sณ‹้ะO’+:4]ด|…\x๚O๕Zgษ‚‘:BษแW}ปfฃL9fคฤ`ล=]E‚\ีA2ทนฐXvVVหธl`š0(KNภน๐‹E[,&…M6yn\:ฤร‚ "lจ๏0|ฎส)”ฏ‘งD9‘Š๙DพเŽpKกDฐˆฝีk”ใaYฺยฮiคแ๖๐อ ]๚ฎ๒aqmฐ.๚ๅ2ํฉฟหŽ-dy— tK-(c86Œ™ิ_ฺ,U๏า‹ะฉ( ƒM+h;EUnIAกฃ+ฯ•ฌ>™RZW.O๏ซJฎฎธ๎B๙ใ#๗HVฏฬN๒ฮพฺ€ล:cdSvถ\rๆ ๚รอ๐สž๊Žั`หl>s7n—‰ร!N‚4ภฎ™’+Jฆnฮ•™kท4v~๒<$,RIh’ภ–Eาลฌ!t9xย–ทl[พ|ถtฤม่ส ใ,+6jไอ, ‰ภ†MขD(”’๖$ˆTคฦมU‡๘L๘’่‹J˜บ ›d%ฆgศลฟ“0$หปVพ|แqฉ,)๘[ฒ๛๎‚H'฿'ฦ•&ยจ{WnถŒœ|†~ญ9ม„c*KvJFฐjวŸxŒ๘}P Ÿ<๚เ3r๙ตศ_๙k้งไ กc:มจ’m`ŸS˜/7๘RVZ-๔u• —‰D]ึไI๘พฺ+]WQ%ศ6ตb{’ซ๔Wc<ำŠ์<•@•" IX$์ต˜x<+Jฐ(ฃ;ี…$W$m”€‘\% ฌz_@@ฎ,_"น(b‡ด{!f๐kh„šKัิ7G%ฆˆงฒ,+ %ฅw_‹๗*:ศยžษฬ3s˜Aภ ะุ้$หใ–q็^(ƒฦ/ฅyึ@~ฬคŽC€่’Hิืึศยฉ๏ศู7‰ธ„.5jทf๛ฐOBภโขญล’o‰๔่•&/?๓Ž<๑?/ษ” O—K้™–.ต:upqตmY2]Cะภพจd—‡เอห–ฌ•หO;N๚cๅษ• ซ๓K+ฅชฮฃ*?ฺYูาจ%[๓dฺ๒๕$Cโzฏธ#ฑัฒณถม1ๅเ{ฤ้c†ษเži’ฏ๏Tqต Wบ!แb๙ฬ%ฐวขŒ„’ซj00Iต`42‰ีญศ\JIถฆq๐๐)ิ–ถiจlkQ– ฦุjณUU\y๊ˆากqฆ8 Q๙fX_ถŽ„(โl-†าIKˆตVbg $dy{๐{X‘+Sศ๙ž`—หฑ*เ๗ี`3A8นฯE๑วƒH:+๘ฒฎชBฝ๏๖:+@ฌqบ9ฤ `0t:6‘ขK„บทˆ^L๊@€/—ไ‘Tดฝu๑ภW’sฉฦ*ม๕‹ๆแW—ฌ[ตI>~gบŒ=vค<Ÿฅ_f_ตน"q้ฌDข้€‚Eำ์ต็฿—œtฌ KNPGขT็QZUT^%G๕้กถR๔wE•fธi๘t้ZHพŸ฿ วืqJ(ๅmุฮนrD‚eRU˜83VmโศJŒ8f่%oดมbรoืXฆU„๙Aฎษว๎วAฆƒQ zHผย.…šK –ื๋]๏ŽูP&#๋ซซ,นd a,ิŒkg-๎„Dฬ {}ju‹ะฦ*›ร ƒ€!Vีภ*Hj ดน๋Vสฑ็^Š *E ๒+ฝบดXk4hฤูธn“n?๕ฺร28ซฟิจZฐ๓ฦ•BAโๆ„ฬแ๕W>‘ฟ" ฮ9วสุ๔•N๑wjm*j๋$าฌ8ฌ"ดษีึ%๒ฦ์ฅtหHŽ ส๏Zะ›AศŸฤ็ไ,_cใล WW:#""‘jศญฌ๕็.Yห›mบแhK ะ*ฏŸฤŠ้Eไ‡-‘Ÿ%ษ Krล›m‚_B ู๕ >˜๕ล์!BCJดa&GบMป+€.Q ฿ฝœพDzธลKe’Aภ `0to8ะk;c .™‘ ;Aว o๐ฃๅm฿#…XูษดuฝEฎf-_ŽyTะๆŠฃM็$›\มs•|๔แrวMฟ—๑ฃ†ศ1=Sี˜]ษฦIbvGEI,|uูไjGyฅผ6k +JฟTขZdˆไŠ๊< c่งb>๒ปศW#Ÿ6 ญขZ_ภ๓"“ข" U>œ์ƒ*ั็๑˜Iฎจป๙&d’+–ว๑œ๛ร6ู„%Tnะnญ๑`B4ฃ>,ฬ4๘ๅ`ษ_’HW” ;dY7๗[ูถ ฬษgnก4ษ `0tk0žpา]’ฟ]a่=x&ๆ,ท /ช ฝš๓ eฌ4cแdุฃ-?W๐|ฎ๚E๛วไj=†ภก=๒›ฏ~(ื]z—=| œ2ฐœ์ฌพ6dั0>งก{$ฅYoฬดœ~‚Aต กข๚Ždˆ‰š#J™ศlฒล›๛™6Tฃฏ)›ฉึNœฯ4ฒึ็ฤๅธฝ๙xไg‘ํ2Xžjค๐ถ)ิ[7Sn%7 พฟย๎6Hฐxžพ$h`}GŒ’>รGษป/ห.%]:Cฑ2 ƒ€A !`)t=@฿cวรษk|J*ิƒ>k2<8ไะณ~๏`ไต'_ซL:๎XฉkฌWี!ว˜ฮH่ม ๓นv2|ฤ@9ฮ@แ์“๖Pถ๋ฬcฉฃอCฺ|ทv+V๖๙a'ๅ)bฅEพ/Xwr{%!ขฤ‰ไˆ๛Hถx<]ปฟŽ|๒xไฟแGŠรV"?€|:2ƒŒฐ–Zแ>›Rจ,›๑f|~•วnY` ŠkํVะtwญlจnซpโ’Rๅ๔k~ฆGฎŸ๛zHถW‰ดrบ๙ษ `0ยฮูU=ˆ‚ซf}ฅฎ1(ฑjแาlล‹ลQyt“‰4b๔ƒTฃาๆ ฟUF[ชp'นภ]็3น !pฦ5Hฆภzฒ›ไj๏Uฆ<‡ไชฟXฑA–ร๙gbTคm'๕9Šป7X({ฬ ๎j๑a%[*ล๓ƒ<๙Xไ?!ื!“\ูฤ ›#…มฒŸŠ?เm(ๅ—ํ›6ŠทฎกA๓zJฮฤ*ยI—\%_ฝค4ภ฿ _ฌถ5ปRๆำ `0ยH…0”รFwg๖&ษศ\=ุ๒ฎอN๑๐/ต[ุc๏ํธO›\น#bdๆฬyrใฟ”~zษฉิœ‚@หอ%Wv-xƒ’\}พlตฌDุš”่HOฅว…cึ jA"›@ูง๏๋“Cฅ-ูข4‹คŒ™‰็s›<ƒไช%‚J‰“+€™D!+^Wฒฃ‘ŽT^ซดˆฯ๘ œวŸwฉ๔=ฑฟb๐"AคzE˜ร ƒ€A เผฌ(_Nน๒FUr5ก5‘งอฏเนด ท้ฦ#aฟิYษ&WัQ๒ๅืณไ’ณn'^8jจคร๑'ฝฉ๏)thIฎVษ†‚2+—งฌมKrต™ช<:ฒฅMุlSโ๘l“2กฬ๖๗6.‡ม"๎Jฐ๐์ึึ”๎ึ‹ท๕มฐ2pj\r โ{ฅซซศƒ„ญญฅ™ใ ƒ€A ิ ™ว…๚š ฐอ ผ†‚แ๐C!พำ้ซํ€”๗๋ƒ๓ฮH$Jฌ‚-ณfฯWrล๋|๒I‹‹ู/นขWu†ธ๙drูVVญjมfไ๊"q8ไjฯ['ูb๎ึ) –>0‡D๑%€AzVP๐Aฒะ‰$ห๖’lฑC.ช[7&s๓ƒ€A ิPญิhtฯํvซsQ•^ํ1ฤ`ุ:ตSm M:v์ฐ%WnGŒ|?wœ๒uz๙Ÿ!xs/จ(้va’+†ฌ™ถt•’+ธS๐bล',D>™‹ฦUr…SMฺ”…dชฏ*อํั›uwึTภ‹ญ0ๅž ๓0Š3ง ƒ@H" ˆ=ด3ธ6%๎iฮ•ๆฮH— Ÿxช๔KทnฒดธผC้•Mฎข#ขeฮErไk๔ยทL9 +cƒไช%เ<‡’ซช๚๙h๎Rูผณ‚6W6นZŽฃ/F&น"ู๊–vRธ๏KกHฐlฑใFgค“.๗ฅ4/' 1นดy?wfฆ`ƒ€Aภ `Sh_EBE๕_BZ–ไj{މ‹WR†๎๕4 6diลlr›ซ/พNฮt•^๓–)“•\๙เเjOฯกA{aYฅ|ผ`™ภำบฤปœฒ•\1เddฺ2“\มูค๖F ” V™ทฃ1 6ญตVาUƒแWํFLyƒ€A <ภ€กไ%8pp›า+ฎ&็v,ยจั|dO๒B0xŠžA~% Ku„ŠPฏ’Iฎฆ}ต\v. !เ-T ฦY’ซ=Eg<‡’ซrจ-Ÿ๙jพlูU)๔sฏ๋ฌ,%VW#ืฒXdCฎBGคP%Xlษฺj๕ํฟyีŠฦบ๊J4p๊ร ร๊ˆ†bส4 a…˜W‘[Q<ฌปcฌพ๊ฒRHฏ2‚กq8ฆ์m‚Bาๅ‡›ŸฒอzbZz๒>Ž:<ฤlrตเ๔้฿ส?ธM23Sไ๖ณO”ฐนฒ%Wอkวs(น**ฏ–ฉ ฉอ•๑icU|62 ู๒ ›ิA„ข‘;[;™”?เ๗nv Mmq!$XA๏น๛~:>SฌAภ `0tYHข˜๖A‘„H~Y๑๕’ึทฟ๔>ZW‘๛!ฑJHMำ09ใTซ<ซPHทv๛แ๔zื„‰ay(‹๚๊›๏ๅ๒๓nUษี%วŒ–๔ุึ]10อณ_ฯว  ไชํ6ป Uงa;ษ•‘\้s์ธ?ก(ม"ฺXž†๒่ค4Eงrฃ็ุฟ่.๓ว `0บ+ QT๗ฉfร&ZMXXฟๅฌY!ำž‡ไญ]กฟXK@!:`ฎ.{๖'—ยม†:Qื8;’ฐ kบสกnh`f+†ฟy๗Oๅโ3o’i rว9“Aฎ่Dt~ฎชaะ>cๅ%W0hHฎศ8ึ๙%dณZ tF E Vs\ถ8 ‹Fr”ba#€๓1bฌๆ ™mƒ€Aภ ะญ น‚„ชกn˜hŒnฏไ'WV#P๓{๓G}ศ„URฅพฎ่วhฺฟ๘Jn:฿ >ํ‘Hฎธ9>:Cnผ—2d`_๙แจม’คฑ๗&W๔Tฤ๐7\-8mูY—_Lษ•ภ R*Jซf!.X?\{qม`‘ๆc_„ชหnซ8 ณ76RMHฑ๏^“•}นูg0 aŒ€ๅ฿Po0H”จค;'โ n\0[@ไJFฟ›f{jว<ไŠ…พฒ์dyrท‡&{o>Iฎ"–ใ๕W฿—ซ.น œ%็#พ`2ศี=ด;คŽQ็lศVr•ํขไส&WP‹K‘Y๘nฝ&พ˜ิq„:มส4ิๅž ็5ึืT้ฌล๓Žk0ฆdƒ€Aภ ะๅ9j„}UlR2คW บM๊ฃไ vปyVหบy฿ษu? ‘Fฏํิ}X8๐œภ ฝ"]‡งฒษ•๕~ๅูwไถ๋๏—ฃ†๖—๓‡ @lA‹\9๖ }$ŒNิก๖_฿ฏห–›r่็JJภ-ษW ^ˆ\พ†b(ิค#ฐปu์˜ฎ๘‹=Mจ๔ึีl‹JH–†ชฒ@m%ฝ5k๐]ฑโฆNƒ€Aภ ะ dมV‰D‹Z ีn@Bด๔‹ไ•฿&็ŠเศฃŽ †Gร{—ึkv…Bฝ๕T 0%Š ก–=4Y{๖/ษ•หแ‚„ส'zไy๙ล-ศ„ฑ#dส๐[ฐตภอ”\อXนQฅW‰Q‘T ’DQ-Hw ็!ฏB&๛3Fํก3S(,5พBค็ ฎุxลฌฎฒยฏ:ณ๕˜k ฎŒ@4ฉt k็|#_<๗˜œqํญ’ฺปฏ4ิQภƒ_DฎฌรIŠชหพOไิsฦJผฉ”`้ีtมฑษ•ว็‘–ๅวๅค ฃไิ~ฝ$!&jฟjAงำ!uฏฬ^Ÿ-Kณ๓%ไชาฃmQฺ ธ,d’-#น์ัูืmฯ๋ๅ #<œMดญmทg=LYƒ€Aภ ะล อ#นkWสgO>"Gxš}ฦy–“Pิ๕PBคฉD ฦ๐Q1ฑzทผฦก$’ซ(\ y๊ไŸ>#๓—งๅฤ๑#ๅ„^้ต฿ี‚T ึยs<%W+ถ(น*๗๘h[ฅ‚|^!2ษ•‘\„#‘BU‚Eฌl*Uเซื๖ฑ3{3 ๖ณ,๗Hภkฎi0 G ฎ๒‹ŒŠ’Š;ไฟA’๚๖“S~๒_โVํฐปjฃไJ๏dŠ็ ฮซ)ำ`"ชดํฑ์้@๗l“ซ๊บy์ก็”\$้ะ†?<$X>xqฏ*ก ๔๋฿[b1PBˆt หฅัŽh)G~{๗฿ไ‘-ง7Z&๖N—X+:“QJฦฑ>ฐๆo.฿ฏ฿JํฒซCษ•-ฝz ีJY์qฒู.ณูYZ ๋ฌฺต~ปแlCธmๅ๙ื62†”๕โุ?ท^ˆ๙ี `0ย ’+'V ึ#„ฺ๗_—ญหส5๙—ฺ]iิร] ฅ,?ˆ็๖" ‰qp4๊<(#w’ซXง[ wษw>,/?๛ฎœ;qฌLฬฬw”kฟjAฎ d^“W$_ฏคไ ฺ9ะ1ำ็ dzjgโุNาeาD Vuภ็)tง๕"Œีฅ0:ไ ย๐ซ#ุฌฬฅ ƒภ‘AภV ึA:๔อkOหขO฿“.ธ\2‡ŒPฉfเ‡]1-ฅ€.ุcqVะ่ึJๆ C†ึq;cd{Ažuำ๏ๅ—?’๓'“c{ฅI4<ด&นขZpivž|ฒxฤฬUzีๆส–\ฝ„Jผ1ฺarใH~„:มฒ ๚WhX Y‹•„dXญต๔๖D$ƒ€Aภ `8๒จ !kรF LIฺฉŸๆu"ฤƒศ!ีปr๕)iIะั9ลธทMน’+ิอํˆ‘ีึหลg\'…er๕™e`bผ–Eต็b“+:]ด%WพYณYี‚U\ซ1Q@๒oไ๙ษ+ ‡.๓7”%XอA\๋ซำŽต๓gI๔๎ฤeฺ฿lข๙‰‡ณอ€ษ้jZ๒{8ๅ™s ƒ€Aเะ`‰พxG๖&™ฮหZศธs.”Q'ŸeYฐปn ทv๛๔\/žาฝNBB<ุŽ5œ6ฟ„=Fฤ:ฒbอZ9aฤJฎฎํ[นเดธ[~zึ$้Ÿ” ฤส"Rบป้‹กT‹vWหแใ๊ซU[ะ%%๕Jฎ์ RrE_WL‡Cฎ์2ๅซฅ™?ํŠ@๛ทบvญ c{ดBฝ‡'Dl_ฝ\—ฯr&ƒ7๏€…สœญะJฟQใt–ดzๆ—ชท^ฒŽนๆกิำœc0ยUม‘84'&_ฐ๚(w,$H~ฌฺ๓ท?นยe()๓ึ[.ˆm4|Wูื&น"Ar >~8M.?ฟล•ž ทwฒ๔ONP'กฌ%‰T๓ค๛‚;ึ์”ฯ–ฎƒ—wี‚4์ฒวนฦ6%W<๛pศN‚%า€ํ|๎0ฉ}u‚ี๗=Uๅนaูข@M?ซV‘ผ|น)ษšpฅRYผSvlู Qั1*n^1ณm0 ํ‹๛_'ฅH่‹๋kชด?V$VSv๘5ฐhีีA/๎,35=Y‹ฆ=ษ•ycช\w้2pP–4hษˆs#ฎ ฅแุƒ[YีBฝ#_pkQ‰ผ=wนDcฎผX.h/Fปrล =nท—บ„’‰"ซๆo{"`?จ๖,ณณหฒิVฟง~[lฯ>RWRิXถ#Nูp{๖ฏP+พผ\ฑB‡v/๙‰ฌ›๓ญTแฅใwcี€›" ƒฐศ1ภะ|ัง๏สgปQ<ฺŸXํ†"Gอ}`YฟA^&Q‘๔ย๎—็žzCnนๆ>1ดฟpฤ้•t j‰žv—fm[‰ d1ฏคBœณLwbŸห…/”œ‹2ษGต๖ู๖ษ๙p“p Xd๑ŸV"h7ปข์อโ๙ก!:ฦร€จ๕SY>—&คฆห๐‰งศ๒Ÿ*นjrั๚้ๆWƒ€Aภ `h$W$R^O|]ฝ-฿พ๖Œฤ%ร—TTดฏ6ี๖C9”pb Y]ฅjLdิ1ฃ$..VํญชIไ™ว_—{o๛‹Œ5Dฆ ๋/ฉฑ”\QUน7‡Qฆ„?ฎH‡ไ—Uส๓฿,ะ:ล8^O @r•<๙Kd’+ŠๆxZ{งŽ(ณฝ๋rๅ…ม"่๖}ฬ๗BTŒฑ่ซOk๑t†ป’,O}๚Z‰ัฺถbฑธจ*์@bว›4ษ `0t?่‡สกap–|=]Ž๛แๅฤKฏลJnื>]ด'>d!$J4œฏญ(ัขณ๚๕’๔)’_ฒC๎ฝ๕ฯ๒ป{.ง!ฎเƒ๛Jโ’\‘”ํ™lFcฉKๅนฏ็๓F’ซzฟ’ซ/๐}๒Zd’-ฺbูงaำคฎŽ€ญํ๊๕Aพ™~๗"›bุ’Ÿซ๖^ีe#ไฝ0่๓w a€ุฒdV6`ถ bK“ิ พV\1n9ๆ์ dŒ–ํ(จฝk}X'“ ƒ€Aเะ~Dล I&ฯ˜ผึชjะ05jาIไ l ’2ฟิVซถDฒ7ๅสต—ฅไ๊ฒS'ศัiษJฎ|0hง]cึX+๕ฎ9Qž )ฦยbyyๆ"๎ธ#$WzPr๕#๎C6ไ „j ‚Eํ{™ZWZ„ธS1ŽEŸฝ฿XRฃKxนฌถฃลึพ†I้™)ใฮน@฿Wสฎœlแ์สฝw4๚ฆ|ƒ€A <ฐ\E"– s๎บUˆ9[.๎๘@S2ด/๛ฆŽย‚arุฯ็็้%อ[*สๆz„พษี„ z๐ท.Q.dฎซ—U9;0๚ภn ๛@<๕]“W$oฮ^ส2X1ุX็ \=ƒ<ูvอ`$W#T“MJBตอ๋m‹‰>Aฮ๕๎๔ พrืฎฤษ็ @ฒ๐โะ๘ฒ๏๐ัr<"ธ?w๗ R’ท๖ YF’…g`’Aภ `88Hž(๑qBJU^T(s}Uถฏ^*Y#วUnแŠกตบYdŽ6ท;ถฌื‡๕๋#ทM9Iฆ$BZEeIฃDC=Xฃ๕๗ๆ.•L์ง-I]8p$Zพ-_›ฟB`oๅwaน{ƒ?ภ๑ŠqoีBญ…[ถ้Kp—๙5ย`qP‹๏บ’b}฿พ๕ข”t^ผ@,Kื!'๘:9๖‹ไูปฎ‡ฯ”}’jฏˆฉฏAภ p$ฐ$Sะ‘auเŽญๅฉฏ’™oฝ€ีฺง*แฒœˆ’ฎtvฒœLW’ž$=bลCcv$ศีึฅ๒์W๓ๅผcGK๏”*Aช oอ“ฉKึJผห้๗56:แ่ŠงูŽ+HฒeศQ ๑N‹ย–bฝํoจหK์฿้ซซ๑็ฌY!wฮหhcEวฦหษWT›ศฯ?& ตต–,#ษ ๑ืฦT฿ `่HhRแ€ํฌร)~//ฺ์L๙ู=’ท?TƒุึvNพื}โฒŒwkฅxI€w ƒษ5Iิš"yeๆนษ’‰˜ƒ !H[ฌช๚๙bล™ถ|ฝ’ซZŸ฿้ณศีี(๋!dวdCฎB8คp$Xd;‘Ÿฉูฅฮigพ†”ๆvjฌ@ตว‚ช061Yn~์ูธเ{Y8๕„WจS#M#ษ ‡ืวƒAภ ะฐot:]0\๗ศฒSๅGฌฐ{“.นJFL:ตู‚ฅ#Dฐpร^88ีๅ–Xฤ ิะ8จฮ’์<™ณf“uI’็ขŸ_ขƒถXำ—ฏ•ฅ๘=5ฺๅฏ๖๚ท’B9o"S๛ยd ฌoๆoH#@2n‰ค‘2ืญ~฿อ }๚น้ฒ๎YGั06๚Zbถััษ–dลงฆษะ “ไ“วT฿-YG-.ท[ฝภwฆqfG฿ฏ)฿ `0JฎฐJะS_+ณ฿{Mfพ๙ผ๗ƒd?าCGN5h๛์ต+dำโน"ฑI2y`šฤย๘~๖†m’[R&—8Nbข`sีษ•ถXU๒ษผeฒฅธ›#e >{ฝ%าจ฿IฌTWˆO“ย๛A‡ษํ่mฐ‘๒พ(รM๒ีืŸ„vุถr‰cไIgJbuๆ’^๒ธ7$f๔”cŽ•/Ÿชย้;l”DวฦI๋๑„W1ษ `0tE,rฅN› พฆีผ๔ไ(๘dI๗%ใˆภ๎ีŠV.–mซ–ส€^i2ฒgฒ,‚]UMƒGฆŒ‰UคตZuXPฌ>ฎ*ผ็r๚*=๊†ตฟ ™Fํถ@ภ+ขf) ฿E6ุEˆจ~•;ตgŠฏฎฺ฿P[ํ่;rŒ๚ชโห9า#u‹งกRzeส cŽ“/ž_)ษฯJฒb“’@๘,•{็ิ‡๐˜d0บ์้‚กฎชBfฟ๓ฒฬ๛Z3‘ห๏› ;แdˆZF๏Gะช…# fฬดห^พH๒ึฏ–ไฤ$ษนSRโcๅไกฺด๊็EŸ>wใv๙lู:ม*A‰r8|A7 ผญ_"?lศัใฎ‹ฏuฺuhร>_]ี๙I}๛rW-u$ฆe คอศ.:AUศ—Rg_Xe’ฃท’,ฦฯZˆ ฅƒฦ In1)้ใFgิ‰ื1ษ `0a,r-ต WtรฐxฺZฃซ๔Op๔xฤz๕๊l๙HO@9จ0๔]4ฌ๚s))ศ•๒zฟœ:7พXˆ?งzkkGบใฝ›~๏vI’ึCUw๖าฺ$ "๎ไžฝe๔)g‹žˆฉ2Lห์'ฉ™Y–CRฬไฌ™šQ๊ใ3 ฐDภrgƒ™01ื”—สwo<ซFํร'ž"๕;้3bดบนฒ€,๔๋fอr,ข:nP™4$Kบ—Tืษิ…+dTƒ)0~ฏ๐๘0Eแjd:†ฬq—ใSpŒย–Ia‰@8,>0ป!Yาๅ1ษi)ฺ*ฤ้๔8Œo๏Q–ิจณ$F$YเMดษโ๊ยAใŽ—ิ^}e๊ม๘ฝJฅ[ฯ—8ะi*ฬฐlืๆฆ .ŒีlดฅขZฐั.>yฏฒFใ็|ทpต`2ขa ŸVษงM‚ˆWฃFกOรYฏ>ฅพ†gfHBใ\ยฑ(ูsสk$9*2Pฮ%„นสร็๙ศ๓‘)ษ๒!›ิ w‚ลt9ŒrouๅE ™YPFd๔ัฃเฮ%Xฺ Hฒ2$‹1ดบ฿Qceฦ‹’ฅำ?B„๖r้9pˆฤะฌn๐ š[4t'เ™ซ๋OณออKๆห๋ฟฟSชJvษ {Z†wขFฝ๐“\ด่Œดซภํ์ณ+‹wษโ/‹YddŸžา/=E6•ศK฿-’*Ohฬ7 ๘™jภูศ"ฏAๆXdB฿„๎’l฿แ|ฟœ-Dˆ฿>/ฏ//ง˜ึ‹“ซ฿จc$!5=่ดŽ๏B็%v๖rใG+wฝ๐ก์พU 6ฏ“†šิ+•1ๆ:๏‰˜+ ‰€ญtลฤH &’ ?y[ูงr ;n27งbม์ญ@d”\udeฑlnP ษ/1ฐฑZ™S(๏/`H6xmˆtzซผ~)บ]ธ๙‘™8ึrฅPtŸ?แ.มฒŸคญ*\๐zฎู๋'ฮ[Sภ ัk๐p๋eฦKูณ%}ใบ”TEนc%ตw_้‡[1 ^*้ฒoภ| PE€†์4} ไชp๓yๆ็W |สEwN†ŒŸ(ฎ่%W\tไ0]C>Vn€ใ่ไ=ฤ_W#฿ญูยฐู[๏ะŠqฺจ|™‰ใ™-+๋Ow`๑‰ฒq๓^7#ฟVปณ๐ฮ˜”~ฌไsdฃฦ”ŒŽ~Dtดหb}> ล'๘Ÿ{%1๊<H=๗ชŒูa0„€ฅคญW.๛rชLๆQนไ—ฑšฌhตตขฝwW&VMw‰>ุ)“2ช• ฤส๏4F `3ษU ๒yศ‹‘๙rบc๊.‹ฯ–"[ฆG``yDะฑํG(glฑ"1ณ:’6OJš๖Eชดสc‰อIวhฟฅDฌีใ›N4ƒ€A ำ ิŠU‚U%ล๒อkO‹1o๛๗[’ Ÿ€$\>„ณb vฎ‰F›มฑbMณฺ‹1ม–ฌ1ฦ' V๖8๚v?ไBd๎๓ ›ิ่โญบ]Ÿ }๒=u%;$6#3bีw_4nYฏq กoื ถ_a*ทR+์\1๎`\Eฬปl๏๎MIƒ@่ ภ‚์—(ขไ*w*yโๆIฏCๅ‡wF’เฆฦ[_oMhiศฎ2ฎ}–ๆภกาถายฌeค ฌ ฉŒ‹งบ\๛๔s!ไA`#Œ9“R‹จฎ&ขhโvJฎ๚#žamUน฿-?–5฿%uี•:Sค๑%gŽ๘ำžงนWƒ€A  `ฑSbU]V,Kฆ(ฯuฝฦ]๙3๏ศภฑใUจf*ต๊•nK8ษUฟ]\ (~k•c"SgHI–šำโำ$ƒ€"$ ช q฿ํ๘L๔ิึLrล%๘6-˜ๅv<=ผgtI)g{์ฤธ ง๏๐1า{ะ0๙๐ั?Aพ.า$Kœฃ@ภ8‹ดœ๘u1)\ทjbๆf •ZQo์ !ณfๆ yใwK]eVVฦž๙ี ๘`{ล>,ดคVมgˆ๛#ก๒๙…V ‰}†ฤnผ7–๘],ู$ J“า{สเฑ${อ ฉƒ;ฉoY๓ํ4Y๙jณ•ี!ƒHGภ^{ร#ขีลžฉฉŽA  #@ฉ๛—+ZJ๒sไล๛๏”(Hvฎ๓ใ’š™#๖zต ฝ‚๛„’๊Aš`Lๅ)๑TU~\40ถเฟ๗ฆ๙ล ะดขฎBAฆAฮฑน—งถzBtbชหขูMฬู่eU…๚ฤHฒP{zgุ‰มXฉS‚•ทzนฃlฦฌซp๓z=& ถัpEA4L0ะิOk’IL2 -ฐคV˜ข`ฏ‹ๆจJp๒%Wส้WLขcใƒCCอ๕Bห๛๋ศ•-…ฃฦฦ9฿J|fV พฌ˜*มศ๏"›Žs/เฬ๎.ม"ถ|ฆ_G'งfยเ‡จ้Žฃวฉศป+ผ๏7%Q4$ฅZp $Y}†า€ค;แ@5ย าU!›อ–ฅ_|ฌคy.hล cิsฉ>ิฮฤ๔ปq5[๎Ž€ๆ}กJย‹sท ย‹ษฬ7Ÿ—๊A9๚Œ๓4พ ค9–{˜มล~Ÿึศว=(๐}% e%๔ขส1ใ+ไ้ศC-•6L24Gภ,Kwฮeถ็Tykซ/uง๗–\ฌ†<๎„ˆ”ž™][Š|š6ษขt*ฝOtฬ๑า ฎJ sฅบdงๅpEมFkฎ:ฌ(.าฉ}kล&$jG ั7 8kใแ†lก5n‡€ฎDภ07^บ_€ฯฝๅ^ก้ฤฅ๗> “8.ดQตa(๚ต:ภๅ}QbWพฃ@วีพ52สธ8uดต"มz™ฮE ม&ํCฐ,\lลU๘:ล—˜ๅซญข_gึศฑชR ฿R$Y์รA๔{๔(รO8E;C:,ฺJMจจV<ยรŸ‹,๚์=]qH1 ˆฝA‰ถุทL2บ์C่oถT…X9๗A6-_,—๚A9ๆ์bแLข†ŒแŒ}Nธ%%W่/ซJ‹eฦ ’˜”าขพl%U3ฬ„ภ?ฐi’A`7แ๗v์พทถnัเคสy„Y๏&@ื^Uq-ฟŒ`งโcxE+4 ณํ&H˜ุ า ƒ”n]ถPพz๙Iล&*!Y?=๐ ฯtญ๗ส qว]<`ๅ!lป,Z–R2 0E€b‹ทmฤงคF0 eห๖Lจ๕bB๋W›hแๆ4ี~QLฅ๖๎ Oฮฃ`;Yz *ฅปคแ™h_ฐn%œ—~+›เ฿๕!›าS<ฅZL–Qผ‘j)ๆA เ$†…™w๋€(ขฤพiใ‚๏•\ั ฏฎF[-ธ‡œƒLGขF-L:4 มj7’+’ฌbไž๊ช‰ัษiอ f9{.iIcญ*d!œ…Y ฺapฉ๚ฦXช„ ุผกใ'i่ ฺ`ๅฏeŸ ะhทx`ŸตAฆ—N>e*ฦI$ูrB๊ลfb˜ตs;Ž'wdV|?P๋ำ&าแฤ2lcgงื'.HฬN,ู๓—ง89\ั XีถZp+๎ใTไ]ศF- ต ืัฌƒ{8œ๔@Uุx:ณ47๕ฎตsฤคS%!-=ฌ์ฑ๖G ฉˆ ‘าuEัวชง็dฤmฬ฿ฒQผ5ด ‰ƒฤ+wลbV๊๊^VฦซDŒdDหšSฒฏ3ƒย๐7๛ D@IUPRลษ ฝฎำ?ŸฯSฏก]vm฿*~ฌ ฆ๏:k๒g)ปๅ:N.‡ี ฟผIwวค๔OeฉMฎฒฑ๓tไBdCฎ!๓็p0๋ภ่‘\QŠU‹ผไุ๊๊ŒL—ทถส_[Qๆ0ๆX๛@งึ-HMถTช…N‹†ข๑ฉ0Šz”Œ9๙L้9pจบpศ บzˆJL•›ึสๆ%๓dๅ7ำค$oฦงฺwฉT F๒œ‰›ˆnŒๆˆ๎‰€Mฌ,IUŸฉƒSะˆ/สล&๓>xS฿หรNฒg๐%V†`’+J๘j*สไ›WŸ’โmืณิ๏`วM2U†|๒dCฎ‚I‡€!X‡กญ*„๘ุนษ[[yY\ฏ>๐~ฎ๒yF˜wBงฯp2‚d1ณ‰ษ‘ๅ%พฦ๑า>ต}ฌ ?Q’{๔ฦŠรนM(3๔ฝฦฏŸ7SถฏY‰–Wฝล“hE#ณL7t‰ึะ`คZMุ™๎‡ภnbฉม็m€ฆฦูsๅร'‘%S฿ั‰สคKฏVศธคิ งโผะ+ถ๕u…I Iึผ฿TGขษ๐าŽXณ์ฐ๑ฑ’K rE4Ljฬุ6IH้d๎Qไ{@<ฅเ:ๅส#Nธ่JุAะ=กv[R@)”(ฑเŒั็๓J5‚L๏ฬู*นkWส‚O'ีJ๘ฝฟ3:๙,ก#ื^๐(ฯ๐ฤะrvHฒฉ๊ถธ๒ๆปKฒฺPwอ‰az๊kฅpำzY;๗Y>ใSm cN;WŽ9๛}g\ัฐ#b f,Hัทลผ+ŠI•ฦQฤDnลืŸษŒŸ@๐ๆพœGC5๔็ฺa™š ณZP‘3ฺCฐฺ†"‚q5d\ช)ˆWๅ…c:ื…wVF‚$๘Aบ๛เ@Hu€ขรoPฅแkhPขตuูY;{)ูNiผยI]ษ๎7r,ˆึYjืE—tdสN’ž–๙I’eฆ้*`แ๔'ุ^l<ฌ๐บYhAฌ"Aฌ๊jƒjภJ฿‘.บB๚๓ทJ๙~่ฤฎaE,ZK์/_}2=ด๗ฦs ต`mQ>'ษถ๖ๆฟฐB๐;๗›dh7ฬ(ีv(m)VN][ข‘๎๔^ฺ๙ฎ+~๗w|์ โmจ7ณHW š–P‹d‹+"u@จญฌ\„เษ^ฑคiเ /šบbฺ—Z‰jAc“ิฬ,๕*Oฒล„YEeJถLถ๑ ํOj:ึWW๋ญDนcฑโ‹›๐ฦ–a„พ$˜\‰›U๚ฺ9฿ ่M๖œ›๎”ฃ&ŸกjAฺ}๊ป`ƒ&|ฌ ๔9๐FOW \ลผxฺ2๛W$ฑO?ฉ*ฬ๓ƒxูไ๊n82ฟ[v VๆฏA ]žซ]`ฺซ[O?ฟฬsล'ลyซ+(ZŽผแซฏ(/V๕XชBqz$[MR-๖ijy ี‚$‹ฑ›ซQ1โ†L Xว๕C%ฏqษฉบz‘ƒ.Iท(œQ#*Xก๘'ุ&๊ชซคbgกฦนไs&ก๋ค“A.จp3m   ฎสvจิŠพ๙‰jAุ\ํ–\‹mšzุtะธม`hh?์ฦี~%vŸ’‚๚๚ศ3กถ8&ตg\}i‘พภ7=๚ผŸV•Ucป฿แ๎Nซwช‹umตh#A๛‘๊าbู‘ฝIึฯN–ฺeะc<<,๋Wฎฺค*6sศ‰OIำ8ˆA฿?T)ู+อเcฃ*Ÿh^•เPญร#ึ`*๕oC=ูƒค’ฤŠaฅ*wํœ5+dๅwำีVั.ํิŸ$รN8YRีNbฅvV๖™ิลd•ฎ+({๕ทท๋‰๐WX™ฟ]'ภุมฯห‘?Dถว?Cฎ†Iํ€ภฺฟไ๎QbP’ๅ๘9$ฬO@ลชธ7ใ95>๕ึC’aา~hšลำ(žœ!์ก)+ฬ—ญ+ษ๖ีหdส%๛*ู"Yรค‹ T}TPฺ}ฉ6xtย&u=(แHG™p|NJั6iดฎ @ ฝ•ป m|ฉ,fบ”็mำ‡ยXŸใt‚ฎง๕ ฮy•X3agร%้$ๅปŸณv…ผ๕ภ/๕ธ ไส&W‡พZžDf฿อ†\“:3าฎฤ์ษ%„๗`T”CkwPŸ๏๘ูใฏ ƒ์”d’ี*ิAขE"ฤˆ9เ๓KBXไoXฃ1รrเKซŽ™Hค,ืVฉ”l๕=Nzฎ6[ัฑqP5% 8 ฺฐว`lp์„ตWล'วมB%ฤ๒y0…fณnหnืŒ฿Yพ#_6.š-฿ฟฒ๕#าq๑DGศ‚;ฃ%ุ6i*ฑb+ +\šn๛ฐ7lcv๚Š%ำ>”™oฝ€๕‘˜"๕eปฐ๊~ฎio๐Sdฎ46W‡บ)เ`ะฑๅ`4ว์2'*$วCุ |djะฆ๕์ธ์0เ๗5$ห่ ฺDˆ’?:VไภB[ญฺสr%[;ฒ7ส‚wป{ #Sxcn*›*ฆแวŸ$วND TŒกhLGA€‰ซŽUด_ก[ หh^ฏฉด‹๛Y”y5š5mFภnO”ส9ะๆุฆ(™ฅฤj ืAS™ใฮนPฉ๗1FืIจ8y`jce5ศฆใอ†ศ+ูแข‚ๆIV—}๙‰ •†๒2๑ีืยPNDiบ๑Cไ้ศ†\ู๐™ฯGภŒ"ํ1I็ฉ;๎รจp\ฏพ Yy\นโธ๚ฟ รศ$uaวœซ1ฎBไ€Cu X๒‚™ฟ}ูV D|ฤมวL€T ผ\๗ะ2ุ่”ซSzeJL|ขช'๙lt a nu€kตt๓ฃA ˆ€ถWถุ ํ•žถ…9k–หๆฅ๓Aฎพm‚k”‹eไIVไD็‚ ?2“ลฉL๗ึลช"ฌ$พณ ตZ(Tฏ–ๆdSlศUˆ#$ศ#/B6jA€`R็!`เ๖รบน$๋Hฒnฤ๊7?ผ;†Rฤน?๛…eำ”!Y‡zณมห’ D*้"iญ,)ลEฒs)-ฬ“Mซ–J5>&๕6 „ฦ‚๗–ž†J>ฉZคไŒ1vqYฏŠ!\ƒjw9ฦVI%ฎPmฃฉชดช6UToฏว*ถ‚อ๋›@กSโOAฯAO-€ˆ‘˜™๖ีำ~7l{+.!พ_ฝคฎผค†ส> ‚ ƒ,!่!o ~งอ•INCภฌ๖…z7ษr8ฤ๔๕ท }๚๙ช๒sœ˜ฉFœyรmๅ็\1gHึaฏd‹็[พ“hฏ๖*$DPญTC]P‚U†"”pํ‚yฎะ:˜4x 2พฬฤบภ-•lAAiํ=8‚๊@hT7ixc} ใlดฤ@ฯไูง4e๖UหZฌ„%‘%บ๐%ฉGOH^ขตญฒMbฅ๐๘O{] €‰๊ฦลsไ“วิ๓b{ภ่Nu ส๏Tพ‡|r 2%W”h™d่T มjธ๙rณืdz๙2„ิ๑"คNไ่Sฯ‰8ใบ[%6)>ž ษR„๗O๓zT%bตี}>ŸGjชaฟUกมq๋แk‰ฮIภ(๕ชU$๙ื๎ณร!e ื์ จS‘พ™จ’`า0>:8๒›-ฺฝอ-“Bm[๖=์–RQUMRErDO๋$๔$๒๙Vหาฏฆ ม'ษไหฎ•ฬก#ฑาu$ฺุ้>ํ๘ข฿›6๛E@ a3ษ๗Rjทf่Wl‚8aP_ถา)ฤดŒ @Aื"๓กrต_Tอ€!XƒฐMฒ€oฤ๛xฯ/I้7ศW–ณีy์9Fœ~ํ-P ธี–ศHฒฺ๑์1 b๘R้1ถqๆพ@ะˆ˜๊E/fยŒ๓F2Fย• ื ง’ทL๘+š›™CGHFึ@I†ํVž!mAุ๙๋'%\๘nฅ ้โ#้ bา…?šฺžŸr)‹ด ์b2ุrmEน:D-ฮฯQŸUkgโฦhW5พซ๔\ฅU.Hซะ6HสY8 i- }ฟล b@<ม-Kศ็/>ฉกต่€ธf็Hฝ>เษ อ (่Wศ ยeb ฑ0GCฐ:t›d๑ tjwqRึ@_Enถ๓ค_‡เะ?: ฺ๙PฝeRG ` –Jylโฃ->8xฺ/›แำ[_'U%ลฒyษ<™๛ูGCูฝำ‰?บFี=”J$ฆgHŒๆนR‘ถ[6้โ@ญjE๛ฺ:rณฌ ๙ฒฏปw๑fO‡ ะฌ=h๙lึณ DJ‰8ž ฿G.ฏ Uฮ5eXT‘ฏาฮ]นูฒ แ0YjQรฃOŸ[พcิ- ด0`9‚%้็uTญโ,๓ฅ5lฉ=๚ืAฝfึ ™๑า้)XD$\D„ไAฆXy'2ู็!๓ก2Wwcห$ƒภ@ภ๊]Žภ…ปษ%›“ฌ—pฯ7ี….zh>ย+‚ช84ƒm็6 ฮŒ›ฎธ{‹‰ IŒjส`ห…•Š4ž฿ผxฎd๏วแiR฿’ูo€p™}BZ†ค`€u'&ซมsL\|ะ*^5พmผ.3lน4cป)5k๚b6๛tL‡lฐอป‚ๆ฿ํ๚5ฝC*q…ขŽv5QŠตม L๋ƒDŠป”\cbƒ/\๊๏CPOCช ฑาL’ชjดmp\น šงฃั€ไ้x๖้Yu }ฏ5ูbโฤdคUอQ;ธm•ใฝฐcาั๐œ๗_Sƒ๖xธ\iจฌoM% !น—*ภไs‘iิฮ๏–จ&Ž$]นื<’ธด็ตm15ลTณ''๗่)ฯษŽ๚มํฟ–1งMir `HV{ย~ˆe‘เJฎใ@LIF|phC‚Qธyƒแmํ ™CŽ’˜„KIW4ฬฅม|Bj†ฎฆฤ‹ Tฺ…มžกIีจ๕ฑ’๛๋?[ฝ›~ๅFs>ฎฤ&y-๊ฑฏ ทVทfฐhฮพ 8ภ>ญM๐˜ๆืขdˆป๙Uฌท-ึง RK5Wํq5ฏ%™*€M^H๔fกส/oำ๑ีYมฆƒk๚1๑T้Gถ}†T฿v๎„ค&๒ฬgง+N+dๅ&ฺุดก+๑Nะึชฒd—ฌ๘๊3™๕ŸตŒ Wvn๛ƒ…๒ๅ™ƒ|๒VdcoL๊:hืิuชถ5ฑIV?แ—๐2<<&ฅ‡๊'ื๓ฐ ™0Iฐ/เ j:ๆฎาHrXหf†žใ-รๆ€J9สa๛A GIv5บ]๓ื„ุ~๋๗@้—่งaY9Cะh:d‹~บจfdโc๑บ6yะ[๒‹&rดwi~\๐D๋–‚?X_”[iG@R‚LษšถCn7๙eiะzเr6‰ะ2ƒx๎žษะ>Ÿ6qไฑ{TวZค’uhY๋ย๔~4ฟฮc J›ฐT.dฐ ษช๎๕B2ีP[ซŽjซ0hำa-Ÿ}Sต–๚-=`œฮีค\UšŽจ |>v@^ำช3ฏjHUkX่7ลฯ’N‚iน ๆ~๐lฎๆ Vb‹งช’+u&L้๛SฆW‘ฏื-kŸ%6 ๎0#ภ]ู‘ฎNX_฿&Yƒp—๓ฐ๒ฅdoภ๋q]pวrิไำuแLุžu‡5!vs๖@Ša4ธ4Ÿ“gหฦฦ๒2_กธ่Qบ`ำ: ์ปคtg์ฺRติฺmg=JI‰LRzOหฆ ฤฮ‡•R `ฉ†Švวa`ท ๋ฝPmนฐไŸF๘ ตีJ4HึH:XJlX ทYWu5Š๘ผXษŠt8@ #A๔PN=V]บเ›ƒ บู&ฃqํิƒ8ะฏAW”‘|าๆˆฤรพฅuฌ+NPผx]แ%qคJ๊ฑ๊ฦWJ y>ฟะ.QI%‰~ใ=ฒŽฎ่ พup…ฐK'K’ฉ*|g๘^ƒ๗]€J;๓๓ ‰ชกtkท๘†้\ํืาว„ิtu@ห๛ฆSZ—%aดTปVyบ้B[€ุึ/xlSlCl{\!8๛Wdีฬ/ด$’+ธนแ6Z‚ช(ฅโ๖มŒ%\†\ “บฆw่วa‹ฐOยeฟ‹Œ‰u:\Q>OUyไEw^I?ฮŒmiA็Vฯ\ํภ์–lูR >+K-eG“ะ8ฺƒ@฿๔‹T_S‚เ…‘๔๘ไส†ชqณ’ฐ_+<Ž`\ธฦ MR:Œ.Jข@ถpฅแY$)ขญ`P’Kผู่\'|หwช+ บฬศ]ฟ๚ %…๛C/sศฌํฃ^)™JL๋กj[ฺฯฉ๊=žฒQ}Q?kL'ฝF"๑3้0 พ ฿xw8y ‘g`๗นพ);a๏ฦ‚ •Uโฉ.็ulั0g5;/CžƒฬD๑.I.‡€้):‘ุ’ฌ q้w ษŠvงgxซ๓s\็zฏภW–ฺup 1$ซ๓N›ฏศธž„ฟภเkK_(๕P’ภทŒ?Cไน _.ฺPยTUŸ3ฅ0ูหIYi™ิ•ตนๆ„๔>J%ดoK๋O}˜Q๚G s:๛uลเ{” ๏Uฑx@x>-$T,.Hค๘Mj?์ี|”V๎ุบ~ญ g๋Eโz๖‘,.AโEษ'ฆL/"™$‹(‰1ษ ะ%0=ว‘y,6ษšˆหฯฤ ? 6YุdE|๙ 2แ—ฉzล'™:g6‘yJ‡xUKhŸl๗–๔ƒdŠูฒซโ Nป;หqฉm;dKq๊ธNQีoคšœๅ๛ก*c‰$y‹ŽU้ฟGAdซY‡$บVตฮฐœc:U2D )Iž;!QU”ดqฐใXEI“=Rขcู8Yaƒ(ักt‰ช:F$เ๏”ุQ%ช๗Eiˆ$ฟSีจื‚๊’ืโu* ล#มaชA@^œGB๕ว{ฉฏฎ””‹ุี0จจ~ด“%™ย_ชXq/ึณj๙Œ๔D๛๓ูnุ๏…+จnK re๋ฒ…๒อkO๋5ฐยZjแ"…ช]<๛แุRซ๛qะKมสุgปีอd่ ม๊TฎLป“8‡ฟ’•ืณทงบ 7๊จO—ำฎ/Iม’dฺธฐ๛7ณ่ƒตหeKผ๐I}i—|-"@ฉ๏B$ำiบ%คtŸžฅฤƒวp?I ‰-SณH\๐~4f๔๕p7HV๔npMฉM`(ข๋ /’2‹๔‘Tbaฮณ Pำwุใd’@1 ษmศhoFRฅ’A 9…J7ญ?Š…U‹(Z˜iึŸเ„E! n7๛ีlถ3v›ฅ๔ํ“‹C6.˜DฌขS๔y6T”ุW&หทฅV๓ฑ}9r.2ี|lถส›&บ.{vป]ทฆแY3›d๕วํั้8,E๖‚d91ซw\rฯeะธใuๅ`รYผIaŠIRำญํฒฦ๋‹ ์๙ส๒ุf๛Hพ‚_นู25฿มƒšoydฟ5ซNึ vๅvธ ๎งQปu]๛DMzƒvฝ,ขษŸ(mbR2jKžtgฯ๛ุ]—}cfŸg>;›Xั€ ‚อH ๔'๗ลs้eฃ’%Rืบ’ึjUKjE• ลฉ$Q7#ฟ„ฬฤพาจ ๓'Tุ…Jรฏž์L่8๙#ไษAv&Nฟ8๑4ฐ8ซว†5xแG“บ#A2as e๘ย๏๚6w•W:X'Tหฎ?–์3wŸebWนว๕2[อ+&นฒW†า-Fแ– 2็ฝW%‹˜hฤ^ I–-iฤ.+ช๙p7#฿‚ 2๗ฉ•Ba„ฆง๊Oห–dก6ŽฟCม๒ห่ไt:ฏงฒิ5้’ซไ˜ณ/PŸIดตQiVkz5๎ภิย `่ๆ€XWาฐฎ3rแแง๏ ฝฑ3ัํBํฎv_oceKฅุ2ั ๋ไ:d๎ฃ๘าabร$ƒ@(!`VืyZอgi'ขZ๏รทQ/wzO/bn9ฑD1๙วื —˜Gวฦซ_ ฮUอาu๎มิฤ `่†ุ‹"h_ว… ;ซqๅืŸห’้ส[ซ๚๒r๑ึVู่4‘\ูถVหฐ;ไฯ‘™šM:ญๆฏA ิ0ซk=1Y1S\>๙]ไแAg{j๘9ๆดsๅุs/า@ร”`ั˜X—™[๚!n’Aภ `่,{:x`ง๏0,X(Q ๋็}ื"ผงบF ‰ท+DREreซZl฿‡2๛=ะิHญ†Iก€!X]๓๙ูณ77ช๗ไ฿ปโ%:)ษx%aŒc8bาiบLž๎,i–7uอ›2ต2ย›Xqu(mญชหJdœoๅซ—Oo0*!์nฉE4ƒ`ขAษ•ญ ค)ฅU"[๚C‹tq"i’A ,0ซ๋>ฦๆ*ร“Qอฟ"Ÿ์Nฯ”€ทม‹%อ‘pJq?–ŒฌA*ล2F๐]๗aššย%V4`วส@+๚iหีœ๗_—0d็J็ุž™R[T‰๘TD-)™ฒ%Vyุ~๙ dหโุZ “ยCฐบ๖SฅHŠฯˆ3?ฆ›„ฯฌ^ฑ=5X…ƒไ<๛ฦ;Tšล๘i๔%dนtเiๆ๑ “ รD€+๑Ž\้ฯŠN_ทฏZ*kg#๋็ฯิยน2ฐfW&€NibฟลNศญ”X•!3ูฟู›ตื5„ f‰jrศ4๙ไSiฝฤ[S้๕ีื:S๛rœqๅาฬฑ Xƒ๚ขCด2๒4“ ƒ@ ฑโbHฆhgี/๋๙pตฐโ›ijkลาโแ™qฝ5๚;ำ 9ส๓Žฟ=๗์์ฝฺCฺCซ„บ@ฦ ฐ96ก0&.'ง็จฤฉไ‹S\๑—|q*•|HูUq‡ฒใPe็01v,dlง’…บต:vตซฝgggwf๒ฟ=๏จต,`BปฝWzถป฿>ฆ๛7=~งŸื5rœื-–ฐฏยžไ ๋นใฌ8TP`3”ส'ภ ล0์}ฐ๏ภNMOŒ]‹ซ_}fqป7slz฿๖mพUHOป&‰D๑FOัืezฤTD@~MVๅ” ๑Dยช ^tยผ‡˜m|ำ๔๗5์ฺฦxQ3ข[pยŠื)W่ต๊}๖'ฐƒ0ึั่ฑ’ฐ•pk}ฟผx9—z3ฦ ๖g๑๊บTฒถ–A๐๖w็ยฌพ้6“ฉซทO l:คF“ะท%ภทyˆBX1ฎ“1Esเ/}ุฎƒ&naยL๕sBษร๕จไฤ๋~cŒ฿ ty‚ื-Tซˆ@๘ H`อฯ๏ุ=%› ‡r/ƒเ๓ฃ็ฆ “นHืีข๋nปหtฃูฐfQ R:lŽ+ฒ”คt~~๓ฺkธDlS ถMaล8ฮกSๆ๐ฎ—อฯบฤxU‰!฿Diทผธf@ึํ†๑:๔Ÿ0๗6 ็หc* €ึฮS#/t}ุ—"๑ฤต้ฆƒฅถพพณฦguฏปIJ3๖ฉิ&dซฃ„ึ>ด๗"๐> ธพm.+ฤ[ ž๎ฑ™ืฏ,ฌRญ]1d`๏EŸธค๘}2lม‰ซcœ๎ญŸภ&a,œวeh*"ฐ H`…ใk็ลŒbŠฅฦงศฯใ‰ณบชฉษ Ÿ8สงษุ๚-w{ื~โำฆฅ{%โณ"xใpอ‡l 6SXH\Vธ0เญ@†dG‡ฬก/šวฟ๑ทV6Uอmfผ๗$ZmศลฝQ\ๅ ์/aqข\8ื"ลX9".XXแ๙๊๙]Rh9ื|ฦูa๊CZฑtCCiไไq^ฃ7๗€wี ‡ะZasู0ฮยSˆ›ะ)*"R|+ะ?ด(rYฑฐ)๐ไฝfืึวอ‰ื๗ nยชฅ MgผฮEธE“VCgส…ฟ‡ฑSA^4c% จˆ€# ปฉ#ž!Q^)ฆXฎ…1o1>k:;Z@`ผ`ฦn}เฬ*ญ†ถv๋ลb ผ฿\ `x๐QPฐQ `‚PฦafG‡อแW_2?๚ื*วYีฺnr็(ฌl.+^'x๑•˜฿๓w1อNภX8ห”eซTD@H@+ผ็Ÿ(Y|ก~ฺ ƒงำ๕t๛็ว ดฦlš่–฿๛’Yถn“iBO๗ฬะฬW๖qˆซฐฝ๛›ั_yG๎*cqŽš#ปv —ีฆ9ญX˜$trdื{M˜ูศEธฮตƒœ@แ๕…VŒiจˆภ,$ฐfฒชเ&]Ÿ‡}‰ืfเนBZ็้Š๖เ{ซื™&ไทIWื๚Yแูtธา;„์ฌะแ„›@YX1พาvkƒแศ@Ÿู๛ฬSๆWฯl5G}ฤ$กฆฯ›๗œวŠŒž€ฑ)๐œ@น๐มอฏำ_YH`อ%„Užyad วฟ๛J4‘\–F ‡๑ำ%ผ!D5]sำํฦOkฺVฌฒI[–๖„ฉˆภœ%@aณ™ืผฮYF๚๛Lbซv?ๅวXq฿3mศผ>^สRXัU ผ๚wุ#ฐ_ยXx a๘{ cŠˆภ;ภz8!œ5Sh5แ๏„=„ง\4.Fๆ๗b1{’ศ†;๎‰\ฑ;L๋๒+L ๙oxฑ.Jh…๐ดะ!อw~ืX์+M'O˜Cฏพ`~ฝฉ^M{W ซ"บืš)ช๘๐ลธx wก„บ‡`๗'๋™TMตห Iึ6Dท<๐‡ฆc๕ZS]฿ˆnxชฑXษŠ-ๆำBห…-พึาฉๆำะ_x/ uฎ‰ 0†๘*คQ8kŽ๏}ีxi;์ฮ.โ๗YŒงSั๑พ3žํา†ตฦผ๛์ฟa}0WœงJขสัP>`บ๋}ภ@Cฒ9ž|ช \|cw d๋ฃจ๛ำX*ำžjldŠ็๑Š"[ถ~“ำชA?ˆผฐ้‚๑Z%˜‹G๑Z!9;t—œ@ะ[ล฿=ย“ูq_uต]ๆฉ๏๗ม6ๅ!p ณใgส‰‚อฬ๛)์GฐWa๎อ?6๒ทอ฿5MEDเภบ„pCฒi>้]ฏย4ปเ๙L,Uี•jl2ูำ|ปbห 3คx๐๊[›๖UW›ๆฮeถƒiŠซi?;4๎:ํภJEfp*4ย[ล”Rฑhฮ๎1g6Gv๏ฐู`%+jป"นััH~daภ๚a฿‡Q\š ๋Pฅ""p) ่Nw)้†gV8แp8t^ซFŒ?ป7OnNdjำฑชฝZ|ชๆ26ฎ๋ส๋o๖:ฎZk–^ณัฆ{(ŠcvKŠี%•O โญยƒG„*8ฆ๒f]ึฐ๓ๅm|“Œฌท*QำPŠgชโ“รCfzbœ๕ฏภž„1ำ:ำ,ธโ‚ฺ)ชส๕n–†"  ฌƒrธ>ร538กลฃ[ป๖)ุnLิิฤ‚˜ฉlฎ˜์ใ ดฯ~ๅkาตื™x2‰Kพซ…ด[˜อ๋?฿Bฤภ.สกŠ„›€V๔Tูุ*4งž๋7Gwฟ‚U/š7žš๘)ึv,ๅวฒfrx€อ€๔Vm‡Qyฬห*A3fซˆ€\X—ƒz8>ำฝyไโ;xT|พv+์4^ƒ'n“ฌญ1#=ว์๘๚๏๖Vnฺi๎๊๖2๕MVl1๋4=[zถฅฅfDT %๓ย*a3“Yำw์ 9uเu๓์c?0“C<๎‚‰–ชZวุ๙r!?ษบ=ฐŸภพ {ๆŠ X< E@. ฌห>Dหsศ‰-๗$อรซ‡m!8.yบโ™:&ง๒ wi[veคyตท›๚?๔๐ z=๖08yถธ‚)จฬ7VLูv^ZฆX@|bฌrYคX8tภzชv๔1wh…š๖ฮผU1vฦn›ำy3ฦฆ@zด\™้๊5˜$ฐๆภ—ข]pB‹‡|š^„้๋pWู”nj๛B$Y5~ฆว63•Fฃ]uรวMวชkL}๋“Hงmj ๒ฅูฆฤŠw‹ซ๊ิu5œ[|QEA…ปฌnฟQEม”6g@วห[อพgŸฒหฅ-.Db‘าไ๐plzbŒu'`฿€= {ๆŠbซ E`Žะ]jŽA๓x๗(ถhผำลV5ฆo&R_ˆWื~&Q•Žๅณ9“;ืkcMธR>Dบฎ`šป–™Zค|Hีิ˜x"‰{šยณล<@็ob)l๊ํDเSน‚็ฃ๓Pั๋šŸœ0ใƒ็ฬฤ่0โซLฯQไญzYื฿เn–2mํ…โtIAOำลB/s00พjf*็ึล,นN@kฎCแุ?'ถx4มfฤ5˜~ย้wโ5u๑LตษŒ˜ฉ๑aทL4Q]็]ฝ๙ำฒtนij๏2K:Mฆฎอ,ธ๑์eฐ|ูหEO—๏ุRภHdZW•ๆฯ–Ÿผ7p.๓†๕rAŠ.~ืแPED`ภšG_VHvี‰-็ฑโa5ม>ป๏ŸE`|uใหž-Š-žงึš—ญ๔ึp‹Yิูmา5ตฆบกษTี6˜x*e;ปตb‹Yไ)บ0๔‹M-่t/ัเข 0WW๖็Zก๑<ีวเฉู๊š9๔ส๓f‹ฯธOเา%fY‡ท–*)–รrๅ \ๆ์ฐ„ูผ RX˜T˜otว™o฿Xx๖—็ล ›>\Y†‘฿†A๔‘xบฺฤา‰ว(šŠูพS๖†…๙Zฑถz๓mโ•W™Ntู“A—=‰t•Ibพกลฆ ญ๓อŠXหฎสก๚W~`„ข2 ˆs‰อ~^9H}o๖ๅฦวฬฑื^5๛Oup็ ็ื๔ผ"ผT…โT!’Šฑฌ๕ฐ3…๖ยุเsฐgaฌc‘ฐ๒9่ฏ„‚@ๅŠฃัAฬWJ ฅะrn'l> ๛l5ฬC๖x“„ื ๒ฆ0‰€แณ'Q]ivŒคZผฮ+ฝ๎uื™Vผ˜ชฎงซฮค1Œฦ™Eง<ฯz+ผธช฿ฤXvKXAๆ 0งแk.LHjมž๖`ฟภxฎบ๓•7งูnRW ž๓›`ฟ ๋F฿nKขฉชt^*อŠ๔Hะ3•ํตM‹n;–+ ๛y+ฏปัปโ#›‘ตำิ3jUm\Lˆ๊๏=]lR๒=^n#~$6ฯลfJQ”็ยษสTeSy'@&็ ๐๙Q๔HนB1ๅฮž ำT“๐~ขSeŠชณ'Žš“V?Š7Q*฿^U๓’พ๗"3“#็b๔n–หa ๛Wุ‘rŠ*ncถ๓ี*" a%pมๅ(ฌฉใ -wใฮๆๅชC}l=lcyœTฏAŸ‰m่Šฑ51ฯ๘ไ)ภ(œ ๙)ƒ`z,fcยm›๔‘ๅฏ7Uˆ้โ‹K:ะีO๕U&ŠๆF6?บโ^wำnH!ๆ็Sฐส Yัภtล‰N/ะŸ๋L!UFcฟฟฒpbณžmฮ\Rชฤบ|มัุ‚้,rRœํ3รgฯ ?ีˆ้๏5ว๗๏e,W;/„ ฬง*Jq–ฃจ๒ทแๅฤฃ066cก’ฃq[†qF`^ฑุทผ0—็2ol,ŸMpู™๘ำ c“žvภึยj`๔6,…ญกm0šLEcฉดm2bท%hjdึyWิ๋ZณT7.2 ศ@๏ จ’ษ@€5ด!ืผ#L!‘ชB“%’คฦ“)+ฤEฃH`Gฟ|ใ‘…ยฬzX8Aaแ์‚X0ฮ๔บ๖๏|Ž ร๑นrม˜›ภ:๑ฑMy๘ŠX‡Lโ™Ÿ`xษไ#5 ฯRvdศฆง,CฮฃgŠอ|งg%ฟƒ#ปwp๋3อKี7Gฃฉคg๛ลฬ็ฬ4ึ-Lู>หป่!๑g‰M€/มุ' ื]qอUŽˆ†"ฐภ ๘W้A‡J<ทƒฦƒไอฯบ9=[แ:0ำ3]ฤ]ธรทB๔4Eใ)“ฌG’ำ๎ฅf:—=๏๑x๋ถนญ`qำ^ชฑอดต/๑b\ตMอ๖MGzฯ˜นพำbL9‘D=c์:ˆฎ8g…šทXœ๓?(xx๎ใ9หšเ็็ผulๆฺnฝ™˜นทไ–๕ทzแ”ฟๅฟMhcขpœQŠ(พวŽภ๓นœํ๏C“^9าฒๆ่k;อษ๛lณฃ{^qยกท๋ฯ2อ,๎€ถO6ๆ๒Shl€ษท$ุ~ผ ๖zูœx:ˆ้ใ0WXฯไแnކ"  žภlืฦEBKภ๏:o6xSžyฃคk‰q]ฐฯมุ์X[…๘ฎีx5?‚n บโึP๏‹ˆ Š&๋™ฬั=…“ฃH/q3gนฯ็~9ใ2^็๊u^ ผc๐’ีท.ถ‚‹ย‹]ล’Iล‚ฟ*WฃNภ&+!nž›-7IrŠMk\ฤ:*X์๒XŽ‹ฺษภ6œย2๖ธ(Šhœ๖ถ๋ธ:+”2˜cฝPv๛6\ั๏๚hฺn“ำำ๐>ั•Ÿท9ฆฆภjธ๗Œ์=…ิวอแ]/รฆ8t†mF"x›4ยœiฑ=c๎ธbไสฬ1Q‚ ‚g*oฟ„C˜ ฏ”อœฮ€+ธนฬ๎๒tะ[‰ชJ!ยๆgห[Uมข™ส—า™ีšK€ฟ ™9|ง›hๆ3ถk%, ฃ่bฎ.b1เgฌฦ6ฆŒ*H‘n)ŒGm„‘B(ู]$ 2ˆ€ยไ็rิ1ฮล—ลWp_ZE@D`pฒ9ผ‹ฺ5w!เ~ว3…สlซ9กยeนง้ โธŽ๓ นi.ใๆปๅ7ษyึ2XดโU 6฿ฑž…ห9ณๅi7ฎกˆ€ˆ€ˆ€„–€W<ภเ๘lำ3๋œcฝŠˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ€ˆ@…ภ่˜ธI^IENDฎB`‚././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/doc/media/sleepy-snake-circle-150.png0000644000175100001770000005715000000000000022633 0ustar00runnerdocker00000000000000‰PNG  IHDR––<qโbKGD ฝง“ pHYs\F\F”CAtIMEใ *'‹ลถf IDATxฺ์w˜Tๅ๙?๏)ำgถฬ๖e์ฒ๔""‚ˆDฌัXc์‰ILฤcิจั$ิC4Qcพ๖ 6Tš(MzeawูงŸs฿3,]D>ืลE9็ฬ=ฯs?๗S^๘พท#`โ๛Gะi ๐^ภ—๘e๛’๏ A ่๊บฤฟ}ฌฏnX!ฅฬFƒ€@/ภฎiš-%%EฯออUJJJ”””===]่บพฯฯ1 ƒ๚๚zซชชสพ}ปฑ}๛vซญญอ0 # ด•ภ2`~๏ฝ๋ึญ[๗=ฐพ#ๆHxB`„ฆiใ].ื0—ห•’••Eaa!ฃFข”””ŸŸข(‡b†aPQQมๆอ›Yถl .คขข‚ฺฺZBกP}(Zlฦt`Pดั๏๕-0UUฆiN†y<žใ˜฿ทo_ @)--%33๓จ]ำŽ;XทnซVญbๅส•ฌ]ป–UซV•ƒมฯ€ฯ๙BˆeRส(฿1gIภF›อึq๎น็š/ฝ๔’บuซlkk“†aศcมbฑ˜lmm•7n”๛฿ไ๘๑ใMEQฺ5ภรBˆž฿ฟ•฿ )ŠขiภŠข|–””$GŽ)Ÿ~๚iู,ฟMVWW'}๔Q9lุ0้๕zฅbpเB(฿ฦ๗็ O.ฮ;้ค“ฒฯ9็ฮ>๛l๚๔้sT/ขฌฌŒ5kึะััA  ++‹มƒ“››{ุ?s๙๒ๅL:•ฉSงฒdษ’2เ]!ฤ›Rส฿ป“#—ั|t\tัEr้าฅฒฝฝจ{˜สสJy๙็หไ”Tฉ๊6‰ขJ@j6›ฬฬส’7xฃด,๋kฝFkkซ\ฐ`<ใŒ3$ะฬ†‚ฎ y6`(ฐศใ๑ศ๓ฮ;Onถํ Y†aศ^xAjบ.ํАฃ‹ปษฦ ๚y๗้#ๅU'๖“}ฒ#FŒ่2^ทqใFyฺiงIฏื+่฿ฃใ๐ํT`ฒื๋•ฟ๕ฏๅ๊ีซฟQ.tใ7J@-ศ’ใ{ส^|šœ0zฐผ์๘>๒ส๚ษซ‡๕“W ํ+ฯ่ำ]rย„ ]๚๚‹/–ื\sดูlQเE!ฤˆ๏9ึก…ผT)ๅ€q?๑]'Nค[ทnจช๚]ำ 7ภคI“8o` *P’้'j˜ิถ(ษHๅฃ(B0ถW!ญ|Z^My ส์Yณ:th—]‡išlฺด‰ &๐แ‡ถo!~)ฅ4ŽฉHsŒสœ/„ุZTT๔ร3fธ^yๅ พQP-Yฒ„g๗œP˜ลzw'3ษN๒ˆฦศM๖bZ’’ŒT๚dงSูŽSื8g@ ญญ<ย X–ี•:ฝz๕b๖์ูผ๕ึ[‚‚‚ŸJ)›€ำEัฟึพvผ”๒ดดดษ๙ห_ผ๋ืฏg๘๑วฤ…ญZตŠ`0HQj2ฆe‘๊v1L4Eกฎ-ˆืaฃ%&ูeงGz ฐi*}sาXปnแp๘ˆ\ืy็วŠ+ธ๗{ฝ>ŸoบeY/ฅ฿k—˜{๖ูgŸฝbล nฟvl61ƒx]ืŠ ‰Ž๔ศHaมๆํ”dฆฐfG‘(ƒบe!ฅ@"ษN๖์rะะ$อ๏Gำด#vmษษษw฿},]บ”QฃF],~;ฐฒiษษษ}์ฑว<๏ฝ๗ืา€Ž`ˆFZ’โ ?›jIr:PA}{ˆ‹†๔aๆฺrึื4–%ัU… ต์h 2zิจ#!BP\\ฬวฬwแs:ž%ฉ๑ุQ%.Šข ฅ๙ืณWG=์๙็Ÿ็‚ .8fSำH$ย‹/ฝLบ†ผ;R||QYGชมะย\ฺ#ฺBa|N'QรไูE_ะฃด7“&M:ชwธqŒ1‚+V ฎญญ'„ุ ”ง=ึN+„x˜|๋ญท–ผ๗{œpย วดๆ1dศŠป1๓vJ2R๙ดฌ ‡MgP^&ŸW์`UU%ฉ๔ฬJวฆ*ฬูธ๚ކa๐้งŸ๕๋=ๅ”S˜7o?๑“Rพ%„ธแ;ํฑ€$`’รแ˜๐๗ฟ]ฝ๛๎ปฑํวผ˜ฆ( ๙๙๙๗๙Q5ไ๛“่™้งฌพท†]ำุXืฬŒตeœา3Ÿฆ๚๛๗S<๓dgg“››‹nGˆ#ฏ๒8.ผ๐B<}ผyg˜ฆฉ%jๆw XBˆ<เ…โโโ‹^{ํ5.พ๘bพMึณgO\.Oฟ๚&กhŒพนธ €–”่ชŠำฆฑpK%ง–2ผ{7†dั7;๖–f๑๔จญฏ็ฬ3ฯ<ชาษˆ#0`ณfอ ๛ซช:GJNKั[J9ฃคคd่์ูณ4h฿F9r$บฎ๓ย[๏QV฿ฬุา หJxซ๘ฃ,ฮHม๏vbI‰”เ๗8ูXˆช(๔2”3N?k5Ž๕๊ี‹K.น„ื^{ญW[[)Bˆ7๐‘|Mํ(Wฑ”๒ญใ?พx๙฿Šะท?ซฉฉกจจ”ฆงpฮภŒgTกMชข`Y1หBWU ำ"5ฐคฤ0พ9qผ  €ตkื2fฬ˜ใWฎ\นP1ZJYญ$๏Bˆแภ'gŸ}v้œ9sพต ˜={6แp˜S‹sนn๔qฌซn`ๅ๖ZšaE3Lถ5ทฑ`หvfฌ)cSm1รdMu]gธŒท}s–œœฬ๛๏ฟฯ˜1czI)?SUuภทฮc !FH)฿:ใŒ32&Ož|L ž‡c[ถlAvหD"Y\^ล‰s™_VEk0Œ]SศO๑bX’๔้ฯa#jZด„"๘:aรNCcร>j๚๕(ŠB  ฅฅฅณm๚H๑ฐ\ฆOŸฮ\P0mฺดทžซ๒ฌ")ๅsงŸ~zฦดiำ๘.ุ_|ฆ‘โr ทฎแา5~5๚8ๆฌ/งoN:M!VVึ’์r aI’œถ6ดา;;ชชjฎ๛ีฏhlh`ถm,Yฒไ€ฏ—ŸŸOAAฃFโฒห.#??ท%ฅรแเอ7฿ไ์ณฯ.š3gฮBˆ“ค”อว4ฐ„yRสใฦ+~๋ญท๘ฎXmm->‡U!h‹DB5LTE!fZ4t1(ภ๚š*šฺจlnฃขฉŠๆ6RUดoZ…MำptฮP‚ฎŠ๘œ64Eก#ฃ-ฅฆตํซ–๓เ'Ÿ๐เƒrโ๐แœ:v,gžy&#F|nงำษ๓ฯ?ฯน็žg้าฅ3UU=ว4อš.รAƒส/ฅคWฏ^ฝ/^LRRาwXฤชฏๆฺ‘๑Œ๖๏ยYzะ7'น*ะ-ƒ๊ๆ6ถตt`X•อํ๘}ไง๘(HMยeทก ชˆNยฏ)‚mMํฌฎฎฃ9F‚>ูi๔ษIGHIฬ’D ƒ•U๕,ฒบŽ^ŸQ'ฤฝ๗หฐaรพ๖}5551dศถn:•xŸ}—L ฉ]*ง”๒™œœœ“Wฎ\‰ฯ็๛ฮ€*๓๘?I’4้Ÿ›ึVืqbnธl6*[)LKfS}3•Mmd'yธ|X_๚egๅ๓เะ54EASŠ"PD๋I$ษ.ฝณ ฮฯข43 E@CG@4†วnรฆฉฅ%3บ$’ŒTZฺ๘lๅ<๕ฬ3ผฮ;ddd––†ํ>lฯ•™™ษไษ“{n`ๆ1 ฅ”wgee]4kึ,ผ^/฿% ƒ„BaLKา Q฿ aY’ฬ$7%ฉ”fฆRV฿ยš ฌุผž .ธ€ยข๎\๗ห_pมP\\|ศ๗ท๛u+ะ<๘K”!ฤ]ื๏z๒ษ'๚คฬั0หฒฐ,›ช`I'บWkยฑชขเะTlบ†ฎ*–…”`Jษ’อิด‡ศ๔น๑9l!PŸŽ%-ข†มถฆV๊ƒุT…^Y~๒R“๐ูํD รฒจhleIy5IN;C sศM๖rRnlmlaฺš2๎ธใ๚ืฟrํตืrำM7‘••uะ๗WZoแ:>?‹ฯที-`ฝ„)฿t( ผpmทู&L˜ภwัZ[[๙ืOPœโ&?% ‰คy\ˆQ/ษLฅฎญƒจa2(?‹ญญ$9lจiฤeำี3ŸผT้้~ทท]วcท‘์r’๏OขW–Ÿ|ก˜AEc ฆ”ุ4MUศ๔y(อ๒“โvฒฌข†˜eแฑศI๖2ชG้^U อL›!=๖@€ฝ{ใvปฟR้ทํ,\ดˆ๒ฒ2๚ๆฆ๋•-ํร€7‰๏ 8๚ภBx€—~๔ฃ๕x๚้ง๙ฎZ$แฉงžฦฏ JาS1,‹T—ณ“„/ฏฌฅ4ำOyc+^‡^Yiผฒd5ล้)(Š ฒนˆa’๊J”zน;HจqU!ู้ ;ู‹หฎำ GiGiEะU…าฌ8[ปฃ๚๖M0sำ9ก0‡nษ^ข†ษฬๆฉgžกฎถ]ื้ฝ๛๏ฯfณ‘อsฏพFG0B$วLk๐ย7ๅฑRZZz๑oผวใ๙ฮซฃฃƒ'ž|’ƒยดdฌx๘๏ฬซ—oฏกป?UQR˜CK0Dฬด๘ขชŽ‚ิdบF^jRg้็ห,f˜์hํ )ข=!๋ฦ๏vaำT6ฺึ 'ูIผ;RJlฉBWzgงั;;2ฉija๚x๕ืYธp!cวŽ= }๔ัGYฒd Hปช3ญ๎@*0ใhห ง:Žู/ผ๐^x!฿eknnfภ A ๔๊Œ๋]Dฬ4๗เ™SWn`p~6ศM๑ั QัะJฟt:"1R\,)"ภิU›ฉ๏Ždx]lih!?ีว}{เsฺยซEs Dฒหช(ฯ4ืTืQV฿ฬˆy$9ํุu†๖ ฏ-[ว–บft‡ƒำง1fฬ)ฏน}๛vฎฝ๖ZfฮœIŠหมเœtถ6ทแฐiฌัฮ><*KUี,)ๅ{W\qE๒wษwฺฺฺ˜4้_d84J2R๗‰‚%;ศO๑ลณDU!j˜ด„ยไฅ$aืตปf(รฒุัฺAm[€ฑฅ…hŠย๘~=ซปฆ๒ษๆํXฒ“โฒ‚วnฃ5ก)ยฎฉ่ V”–ยš๊สšIq9๐{\ -ศ&'ษห†๊:๏•W ƒ 4—หลๅ—_ฮ๔้ำษNrSœžยภผLบNœt„zukG1๐*;ขภB(–e—ŸŸฦฌYณพัฑฌฏk†a …hooงขข‚H$าูˆW__อfCUUTUๅ฿๙7niะ;ฟฐ!XQY'ไ^'ŠขŽD ƒ4๋+AE‚_ฝ๙:า<.2}n‘(~ท ปOฺSJ3ำ˜ผb=ฝณุต8Xบ†ำฆณนฎ‰ŽH”dgผ”“์%ูๅ`ๆฺ2‚‰5ฟวอญผ5c&๏ผ๛.๗ฟ% 3ธ[ฃŠ๓™ถj …$zgฅ‘ไแ๓Šy†%[€…GTnR๖ฎ๑ลู฿ฆปcัbฑmmmTUUฑhั"ฆOŸฮาฯ?งฒช๊@ำŒkP~ฟŸโ’jkjP<š‚aZ\๐T"๎qœ6-.|&’จiฯBฐฆชŽ‘ลyํไoปžuœ่K๐ุu6/*๋]’฿Ž5Ea@n&ๅ-|ฐf 'ๅ’ได“์ฒs้ ™ฒ|=‹สช8พ ›dง_:Žล[ซ˜บj3W]uฑX ฏMc@N๋wิำŽะ+3น+ุRืLAj๊š&sฝ๓‡ฃc=uลWุปข^uคmวŽฬ˜1ƒy๓ๆฑp|6—•ลณ ”4R {Pาx4‡ Uทกฺ`YUAัtขํmดืVjibู†อฤb[hฺ๒ษtK๕ั=-‰ž~\บN^ช‘€„”’$‡yืŒฦ(HMยL๔wIโ}];๙@ฬฒว ZBaฺ๊อ„;/?5 ฟEesšฺ(LKย็€s๚3}MŸWิ0ฌ(‡จe1ฌ0—ž~”m็ฃM".๚žP”หŽถkvิั3ำOm[€จi‘“ไกบตใNตŽฐ&๘|พแwqว1ทmฦ]wลฬ™ะิะˆ)%้}0๔ท’Vฺwz&BีQTๅvหุโใ^–a -iY›iฺฒชฯฑ}ีRถlฺฦM์vโฑ้H šช`I‰*^ง ยเN ํ•Šlชkค#ฃn:N]ง!"๋dhAS–oเ๔พ้–โe็KXRโดi”dฆˆฤXบญšใ๓s๐8lŒ๏Sฤ๛ซ6๓Ÿ—qๅ‰AS๑ุuF็ณธ|6UEะุ"fYhB!ีๅ ูๅ`ู๖ฺtค?N๐ญ.ๅX~เอ๋ฏฟu๙ๅ—s` ‡รlุฐ฿๖ท\sอ5ฌZป{zEใฮbไญ ฯ/!ตGO์พd-*ฑT๛ฦ๛8ภEีP4ป7‰”ขbŠฦŒง๔Œ๓ษ|†ฃนกžฦึV>ฏุAšว‰ืnKt/ุกR|%qท$lฎkฆ ีGใ‚ื•“—โcๆšญlจi`Mu#zไ““์ก$=…—–ฌๆ„ข†[›ฆRเOๆณญU˜’œๆeRูาฦ'›ทำ7'MQFc,,ฏ"อใ`@nkซ่Ÿ›I$f{คฅะ‰rํศAฃ1m{s{zB8v ฐl6ฆiš››{ึ๛๏ฟ/Žvฯ๖Wู๙๓๙๏~ว„ Xนr%๙#ว2่'ฟ`เe?'๗๘แจš หˆ!ฟ๎)ฑŒŠฎ“œWDั)ใqฆคฑc๙งC!VTึ๑๙ถjZใ-แแ(6MลmณAขถฯ'ึใค)bK}3~w\xPฤ๘พ=่ํง0-‰>ู้คzœH)๑8lฆลฆบ&zf๚๗+e ภŸฬถๆVฺ๊ค8”f๚ฉ๏ฑบบžใ๒ณh GYT^E‘?™>ูi,ญจbtIQำŠkh>7k›่‘–B?‰UU๕…ก˜๑ฐพKBa4M~๕ฏKษq๑รั—ฎนๆfอœI0ขวi?คฯ/ฦ™šช๋HiatแEำPTšีหจZฒ€†kh.฿ดวkด‡ฃฌจชcEU* *—Fฆือ˜ž๙๔HO้ฌฑvfงฆลภ Zร6ื6Qึุ‚หฆ3M,)ฑ'ž๙ฮ๏‰™s3x๑ำีœูฏ˜˜y€๐*%ลiฉL^พŽe;ธvิqŒ๏ำนถฒdk5YILหยcื1- กจE!ลๅ ฒน ปช’์t`J‰วncp^nุ: xปซ8ึO‡ž;nธcPกPˆW_}• & ๑๗b์ฟ')ฟ3Iผ ]ณแEeYDฺZุถp›ฆฟM กปืGzฏtปx$บ' อnGัmก RZXั(m;*iพ•–Š-llngรxl:Wุฟ3>‡ใ sH๓บX_ิษŸDโลฮุ™ศย_‘4EaaูvF๖ศร็ฐu&ใzwงฑ#HS0ŒeIRNฬ„|!ธm:mกชชPไ๗ล}„ ห็FW”˜e ๋หฅiฺW_}๕a๗tฅ-Xฐ€‡zˆ๗{•~^Iฏs.F๗x:Aี%€RUTFอK)h&eN#ญด/วžAfม๘Kzฃ๊ถธIศ ;3ร BioฃfๅgT/_ย–ูS๙๗วห8k@1ฃ‹๓๗ c†e5L พขพ=H(#'ษห๚š"ฆIฏ,?Mญธm๚WึUtEฅ#ล๋ˆ{@]Qsœิuฑธt Aผ๑pgF*„ N]฿m Iา=-™ uMื !+ฅl:ภบอ๏๗' „}โฤ‰Lœ8‘ึึV้™Œอ}คv๏็.fื ๘*บNK๙f>{๚1šห6Sx๒?๑ ผ9yqY–…9ธ0ซฺlt;แ$rIั)ง๓๙ำ็/6Sะสีรtส ;ร]พ?‰y›ถQV฿LM{€ej๘๕ษCจnmรฉxk๙ขฆษe'๔%b๘พ ำbDq7Bัkšศ์๎%‹ก+ Aิˆท๖x6ข†‰MำvนF)lบ–€”ฤ”cK (ol)ˆšึy|I‘ZŠ0เžน๓ฮ;“O=๕ิo P๕๕๕\|๑ล๛฿&‰=p(งM|Wj๚^‰๚ืทH[ +^zšฯž๚ฅœ|ืDบŸr:6_R\ž8ฬpบ๓wwZฅงŸ‹ฒfลr6๏จcp^ึj!บe2wรVšaze๙Yปฃž๎‰’ห ผLŽหหBืพ๚z!pูt:"R.–”Wใ๗ธะ5•ส–vVWืsz฿"QฺBQŠา’*[ษOMข%ฆก#ˆMUฉk ’ๆqแดijYC‹’เZฦ!{,)ๅ9nท;๛7ฟ๙อ7ช^xoฟššt—›žgœฯ€Ÿ ,ซหx”ขiฤ‚ึฟ๗:›gM%ณqœ๐Sคv๏‰ดgŠK*ชฆลษณe …ึีhช'ึัNธฝXG+Šช‘”•Cูถr–U2ช8oฯOผ€ ‡๔ฆ%ฆถ-@ฒหA^j†y่๗lZ’’Lg2 ‰‡๏Hฬ@SบJk8Š฿ํL$‚$—@4หยฆiRbZ'ไ0sๅYก˜‘Oฬ ƒ–ย.ฅ<๏–[nฑ9ŽoTW_}5/$V-:’’9ๅGIส+์าฐงฺ์4n\รฟ—?1w=„ฏ[>Šชb„C๋’โ|ห4hญF๕าE4n\OKลขXัf,†ด ,๖†ข๐แ†ญ ์–ฯa฿ƒฬG —Mง{Z2ˆNyํp•tEAOˆธว ๗Nผ๑NR ‹AQพTŠฆ!„ ฃXึซดVT ŽJล๛็^ค่‹โี 1\”ศ€q@X1 ญ8‰ดษงาxีGฤ–7๐ๆ๒ 4ย‰๎ั(N›†วfร๏qR๊#อํฦeW‰ึAื!๗๑XชBS Dก?ำ4๑ปฆฤกi8๔๘]Up่:ม˜ขljp;xเฑู่—“มยฒส ‡ฌ^^ฏท)งœrTeš&>๘ >๚(ž์\F|/ฝ๚ล‰ณ”]๚bก+^z†`cงM|้Š—y๔๓ZN,ะNล9ฌ}็e‚ญ๕8Nอ%๕™qุ๚ฅ c˜•เƒฟ Kข๘lคฝ:–ฦห็ฌ@4ฦิี›Q4ฏืMคฅำดˆEข มSะ#=…s)HMยกkจ{ฎ฿ ข.]ง2ฺŽ"โcš‰aYDb&šชPื [ŠE/ฎ#žƒัXg5,‹sYXV™๔%~zูW๋๊’’๚๕๋wฤA%ฅไึ[oๅ‰'Ÿ ฝwNœp'ฌCสฦพJ์ฌ[ณ’๕S฿ ซ฿`†^{Sงu @ivญทฒํใู2s:ค&W๖ ใฌก่}RฆD†ฟพN ก๙ทŸž™Y~๏ญIƒ!Zš[)Xมถ๒Jส7oc๑'หุฐhNMe`^&ฒำ้•ๅ๗ภtU!f˜ก`ำี๘ฃŒkTvUcŽ๒S“Šฤ๏qaS”N0)B้”N,)ษK๑’๎q๙๊;‚ง|%ฐEั,ห๚แO~๒“ฃโญ.ผ๐BฆL‰ฐe๔ฬษwอfร๊">ฅฺํl[8๒g1ไง7เ๖งฉL!ซ†–๗qสgN#ข†๐a™ใNDIฑลณณHn[ด$ยฃ“๒แNHงํพe์จฌใž[&2e๎ะ5 ‰e™DยQ๊๋Y0g ๙?X\^อ็5dx]\x\/ŠำSวŒ/sXฑKa—์บŠดโEูต“B%ž ฅงเu่ bฏ*มˆ๎”wพุx๐4ะY๚ุงMมฒฌR ๛าK/=ข€2 ƒ›nบiจ๚ ไไป’(ห.๒†ฆฝEๅโO๕›๛q๙ำณ…ภŒลุ6Sฎ9us^C›P@๖š ๐ค$ช.ฬQ–ือ IDATฮ๖f่BW๐\๏mA,™ฟŒ›ฎ๙=ํเ.y…น\ำ‹Xทcฯฝ๗OŠJ ฉEx|๎็<๙ัR;‚๑!โ ฆ$„ี๘ฦA›ช&eWโJ— ‰แถ้#ฑทiI๚็คA|ถิUกpฤ๑ว๏ษศศ8ขภš4i?8)=Jq๓hบฝKDO!ฑ`€mKๆใIฯข๗ู;9B ุ์l[0‡๕“_งพf๎ซz‘๔“Q่=|ศจ็QGรxoํY$๘ส&ฟ8•P0ฬ/=„aggNh$ฤ๎ณฮงž9šiSf๓ฦs๏2k๊GฌŸ๕)ฃŠ๓Uœ'ๆ{…{]UIrุAJlช‚Cื:ว‚ˆฯ-ivrO%‘บl๑–lน—|‘ไ้พฃตฃhฺฏวBจภเ‹/พ๘ˆ๖ฦL›6[nน€ิ’œz฿c8SาบTŠB,คrูงไ Iึภ!•ข`1>zเw,xไ~ฺ๛ถ9๛‡$vZž'๒$Gฯ$ณH~`(ŽำปลŸี”ูงpใ็หฃฤP…^x:Oฝ๑(ฏฬxŠ‚l>ผฮ๛œฅโกป๗*BŠลT5พ`'จDBฮˆ๓5๐%ฦึv๒*รด๖ษs4Eม็ฐœฑ‡g‹Hป€AฃF:bฯn๋ึญ์g?ภ_า‹q๗?†fทwYy&P_Cฆut3ๅฦ—่‡ช๙b)o๒bช6}Jาำ'’๖ฟ1จูฎฮ"์7b2ฎค}8๚q๑ƒ%๙—ง๙฿s๎ถฐฐ;์Œซท~ยญ๗ v^๚l-?XDesRฦoKSŠา’1ฅฤฉk4B"1\6ˆebKิc†‰*โ$“เXสNNิ9ฯ= ฐ€ค’=z‘g‰Dธํถจฉฉมๅฯ`ุฏ‡ชwแ I)Qu;ู†`ลขฬร-อ,๖_ฬำmจ็ฅ‘9็l\g`ตว8VLธ4’๎;ลงcšบQ*ทV๏&b๎‡๓`$ฤ๗฿ส=ว•ื]B‹e๑นŸ๑šอข1์šJช…Dา-ูKk(~ข†ืi'3ใM…‘(B(( ŒทYr฿้ฃฤg๐8v;œso`ๅ๕๎;ํH๕]ฝ๙ๆ›Lž<€‘ฟนค๎]Cิ;วv_๒ฝช๋ดWmgฮทฐyๅ oœJสCQRํ]"tฉY๛๑้x๏@C]๕\๛ ‰{[˜0}”๒ภ?๏bฒ)Œ?็>ดว็-e}mSขƒ’]Liัแฒiข1œ‰‘พPฬภmำ 3>๎ฑณแooน(ั๖ฃ#ฌแ=z๔8"›šš๘๙ฯภ +ฏ#ฃwฟ.+ั„[š 4ิ&ธภC_ู˜z๓U„Šรdฮ9๛ฐŒ]ํ™ว ษจ…็ฒbœ๐๖+ำ˜3>๚Aœฺkaกj =๓x้3๑ษ{vO}ฒœ7mC๑Mฯึฮฉจ˜–Ešวู9่ถูญั B$9ํุtmŸ…&†eํ<แlิ~ฅ๋๚ศพ}๛‘ัฎ[oฝ•P(DFŸ”Œ?3ึEaGšทnFีl๛๕~Bข–Lz˜E?€๗ๆ>คo ยcฅ˜ร&ƒI๗ A ภŸo”˜<๘g'‘D‰ruW๑โWy๊0๙bs6V$่œ@Wใ]ฆ๏&ต,IฆฯCЉDbIP…ภ’ฒsww3ฬล'ํX‡ฃ฿‘เWs็ฮeส”)จบาs.Fณu]ั2 7ฌA_›nยSอ l]6‡ิฦเฝฉ?ยฎZ๙ๅ6ลซใY)h‚ีหื3ใํ9(‡ธข?Hˆข’|ฎฯp{œฬูPมฆฺฆ๘,ค%Q์h $:F‰ฎ%๘Sฬ4ใŽib์'ว2,s็&มn;1ต๛ีูu]๏๙e{”วbฑฯ>๛,ํํํ$ๅ’;ไฤ.หBM tิ๎@ูO M“นบ๖Pืฦโ8%;คพ=˜๊ไ[ฎ Šฐษ&Žr/๎ฃฒrวa$œ’๑?8…๋nป†pฬเ‹๊:์ชB,ั9ัแuุุฝ‡ยด$6U บชaIk๙]‘hŒa'žไ์ ฌฏื{H+ฦ6nศ+/ฟ @ฏs/EตuSm6ส>œ†๎vฃh๚>ช๛๊ืŸงvำ RŸ}h฿6DํIf’2แิhjhๆo๗?“C๗aย๕‡›๙๙อ—ณฑฎ›ฆaS•๘2“X —อึนโ2พ ฮยกk˜ายeำ:;L๗&๐i~?6 so` HOO๏๒ลด'Nฤด,’๒‹่>ๆด.คจYตq๏}๚ัรอMฌ›๖:ว`‘‰ uํ96ยฆv1ฅ‹6ž›Zฎฯ ๑ฏ/?3…,F?Œ๕"๙ภอœ}ัxฆฏ-ceUช(T%ฎKีทh์bZ>‡ฝ๓ไXcฏe'RJbฆIZZ:๚๕ณน{ซGW{ซฺฺZ^|๑E_๙+ฬ่aฎOŒย ก %ฎฏจ*กฆšห7แJK๓ฝTU>~ไ\ท๖ฤ>4ฝkIบ"ฐš"4฿)ฑ-m_.K^Pำe –Q ๗U=Q๓โrะny๋0ใบำๅเ‘'๏%ฏ{.ซซh†ฑฤฎมUฃ&M้^W|'…ฯ๗ึs œ.'………ฑ7ฐrg•๓—ูํท฿žP]ป“^ฺ๗น•ขPuŽšjถ๚ ฆOaอ[/ณๆฝืYdึพJBฉๅญ4‡“ีo˜†oBฏๅยงŒZ8Oฯ'|๖vBSทQๆ /ŽA/MA†}=ญฦึvด๏กeขฆ$ฒฆ[ij<‹-wžS@ว30bผ๐๔œw้™ธ}ฎCพ—๚ถŸEyไพI,,ซ"ำ็ap^&.]Cw8ฐijง(jืT:B(์yLซIฺาาRH๖ุ๎‘ี•ํSฆLกกกอแ$gะฐฮ>ง/๙x‚D:ฺู8-ล…,๛็_ศะเ‘ฬโล‹‰„รH)Yฑb9ญญ-ผ๚๊ซHหคvํ ฬXก(ิฎ^AจนญW๒‘ำฉLI๒ฃรฐ˜Žน=@้3่xv=Vk4žx{&hแิ0kƒ๛„ฏฐišถ"ฦ>$ึ?๛˜xoKืฒiํ–/-๕|iยฉHnร่9บ๗,dสŠ ๓ว๎ว<ณpตํม=4Aไž …;่๗๛Iเ'Pv๗Xพ”””.y๎‘H„?หฒHฮอ'ฝw/๔"3ฆฃ™ฌz๋eแฎธ๔'๐s7n~คsป\rษ%<๔ำ|ดd)ฑ`Eำiด -฿`๘ฒ7/l๚`;ฮำ๓๖C……S#๕้ัดmมืหhฝ๋sฯnฤqrZฏdิ,'V[ ซ1Bเฅ-ธ/่';/I]AFMุ฿ฃ‘ซ:€ูE๓ํyVศภ๗ป„฿ภ;ฏฯเฤ‡>ŒCS#ฦษฃG๒าด3ๅๅฉฌ^ถŽชชZV~พ†Šฆ6n;”ไฤพ/ทc;Ucฑุฮ๙ืภruี๙7ตตตผ๛๔๑5(šŽe์_-VuญUXใๅ—_&//ƒ]—4jิ(่OูผY(ชJฐถ๛ˆL„zกมฆโQ!ื|อ‘C—$UAอq‘๚ŸQd-;฿วแ๚qwlCำqŒอมyni/Ÿ ปO0Kฐ๊B„฿FํจฉD—6ฦ{ฟv;ั:#’tŒŠŽr31๑ŸIจฎฌe้๊•จ‡ธซx๏๐)KMฺš๙๕ๅ๑ฤซoV~ทcฒพg’,:cขำ้ฤๅŠsฝ฿=aY]ฃˆ?๘ใ รoธ#ด๏4ฑขi๊kY๒์$—.ไพ๛๎ใฆ›n:ฌc่’’’ธ๛๗qูWา๒รKhฺธืu}ใโ ผNสจ,ฌ|‘;?#๙น“ už4 ƒJช๏u}ช@Fญ๘f E‰“๚ฝก&๐ฒfuˆ๐ดํ4^<ธ4๔žI()vdฬB†Mฬ๕ญดอูUฤw€= ช฿‰Hถ![ขฬœ>—z$tะŠฺudไคw‚ฌฃ-ภ›/ผหS๒อ(ษHแผA=๗๊{๎อ่wซ/‹}€ี š>}: ไw"พn๛H Šn#X_รwš^๙นผทx1C† ๙Zฏy๙็3๘ฏe๎Ÿ~‹แ‹b?แเTv ›ฆ“q}Oธ}9ž/š ‡๏๐K>–D†๖ž'V45รIส?†:ฝmฎ บจŽ่ขบซๅ k๑„Œ}ืZ๊ี๏ภh‰ฒf๙†ƒK}xy๒ฉัP฿ฤMฟEผ‰ASM wNx€แ…9œ3ฐ}ฏVไฝGfอฤผงรแภ0v]ฃถ—ึ๕ตํ?๙น'ŒDณ;๖ ํB๊ื~มœ?FŸโฬœ5‹๔๔๔.๑’ .dิI'ฑPˆ’rpฅ (5M.ฝก7fMsช๑'๎ัฤ…๋BaTˆ,จลlˆ€eก&ู้Nlƒจ้Žธว“๛ŽRDkK๛A๙†ฆบfฎ๙ลM|๐ฮ๕โC(Bมยย‰“ว‰Ÿ‡4ด ›๓+งฌ*žMHมฎ`tz,ร0๖xถ]fๅๅๅ,Xฐ›วK๖ ๖ˆวชมถ…s๙ไั๛ุง73>๘ ห@๑Cป็/X€๏พม‡rP“,Mpช $7฿ŸฟNฺ€๛’(ฃธฟˆฯฤจ™.—๔่\ุฦฮ…n†๏+;?ํํ๛.ู› ร\๑ร๋Y้*~pฮ.ฝ์G„[uํ๕ฬ™๑ ^"ฑKIดหฤฒ$อใo(ิuH$า๙ž+{$ธ_๓ ๋W_}•††ฒ7ป[็'_ณ;ุ2{*Ÿ<|ฅ‹xo๊ิ./v๙ฯ‡ภ64ใ >นA Wธ!U…hฬไยŸvง› ททฦ๋€G,‰Œ˜ศฐ‰ ๑?Gญ/U…ฆ คฺw ํ–;ป–ถๆ6„œ{๑้ปiฎ‚ kทะั@UพฤŽzรดฺุะา , hชุฃ01Lส๎GืษฝEMอแŸ5ฝvํฺฮqฎก?ฟkgฑYถ~2›ล“&’™‘มŸ}–ผผผ.}Oชชช˜=s6๚ 4์วฅ}e(ณ€BMp–[M|ijšƒsฏ๊N๛–bึ…บฎx|$M5bXท฿๒'jซ๊ธแฮŸใK๒rฺ'Mlีvlซ%‰ข+j|sณŒOH7t:ฝะุฝค#ˆnฏMำƒม}€ ‡u---œ๙ิิิ0์๚฿a๗%!ฅDั4Z+สX๒Ÿฟ”<๛์ณŒ9ฒหŸ๏ไษ“ฉฏฏล๓ำาƒ๚๚˜„+<;?^‰รฟธ๒†žde8h๚๕oO#`โ=6 ณspd …S็๐๔฿_เ‘ง๎งbหvz๕/&วŸYภ6ฅลชๅ๋ˆFโซร“œถฮญหก˜นว’P$บNR่ˆD;3ยว ๆ๎ภjhhh8จ{Yดhื^{-}๚๔กw๏œxโ‰lุฐ’3~DแจSฑ ESiฏฮn'ฺสsฯ=ว™gžูๅฯ50๙7‘Eœgๅๅ๘ปrU๎ˆ{+ฑ“„Jศสv๑‡IC‰.ฌฃใน ˜’cS\u88ุท …x฿ฏก๖-[yํ1f‘]ำ๐„CaV-_ ภ่โn„ขFยKษ=ทตsdz)PEZBฺ!~*ซต;yฏฏฌฌส›๙เƒ8็œsˆลb๑Eฏ ฯคูค•ฤ[[„ u{ณ๎พ3ะฮ๘Gฎผ๒ส#๒p—/_ฮว๓?!๙ร:ฏ=ู)HU๗uJั˜ล/(du5ผ๔วๅุ๘ฑŸvl๖ฦ+‚ุบŒJ~xฅ@ฐฝผšู๏ฬูzฐxส,6ฎฬธq๏hฑxฒNโ‘แs#P๖˜ส‘ศ๘ู‹ป=hรฒะuUUw๎4kXๅ๋ื๙ฏฝ๖?๙ษOPœ.๚w)™Žร2 I)T.™ฯา>Nฮเa๋Y๔ฤCD[น๛น็ž{Žุ๓ํฟƒ<Ž19_Yยp8ีwFชุ0ม˜มํฤๆี-|๚ำH{๖d์ราใD๚XŠ€š 6ฺยQDง:gX›Žฆiฌ_ฟ๊น;ฐvlผ๙€7ฒfอnผ๑F„ฆ3๚฿‘;tไ.'%๎Œ,*—ฬgูsOP๙๙B’œfอšล‘ถฦoฐpัB|๗‡’ฌฅว’@ฑl‚/็๖h<.>i&๕’๖8ิLืฑลป์แ9ีwึ่N2พ“ฐฯ_ฒ„ฉ“g๒ซkฯ'9beU ฒINูUแp`็ฝทfะ/'ŽHŒ4ฏ;แ2ผ๎=ยแ9iItMCUUสหห- eo๒พฑพพ€๗ฑiำ&ฺฺฺp$ง’=xX\cฑฌ๘/)QOV.๓?$หŸส3g2n8พฎ„q ‡รsฯ=(ูNœ็ิ๗D$\ๅ่B~•vIQw/xe$ข&Hำญ‹ฑš"]T›่šl0ถฑณขcWx;’่n‡ jh๙x ็,แž‡nฅ{r7VโฃN={b EGgวŸาาิFQZฆCOd‰ ขt~žbฆ๏฿MฮฑคDำ4 ร`ีชU jo`ี„B!cวŽeffข๋:แถfš6ฏหฆจ(ช†อํa๑ฟฆyหFอ€Ž่ƒmiiแฑ‡E;1ฯตฝกฏ๎5$\ๆ‡Tท วLฮผ0ŸŒนจކห็ฦ ม฿$ถฤึตh—ๆ๘๛ร๎Ÿฌh,ฦKBqชขP˜0a}u:ฉdN:uX' p`็ลพ @ฒำA[(Lฆฯฉ[I)+ธ?ื0๑8lป1Aฤ0๐๙|ิิิ‰DB@อภŠE"‘u›6mฺ๏œtาI >œhG;‹๕0ี+>ฃb‡lœ๑Soบšm ๆ‘‘ม๓ฯ?ฤŸํC=Dู๖ญ$แธƒ"์ศPแ$‡8ไ 0ำ”olวด$ฑO๋iนSdะ๘Fมš\อ@ฯ>=๘ม9ฃ1tZ จ*ม;ฏOใ๔ลl˜ฝ€ื๔Vofฤ)'ะณ[๗ฮ๖˜ญต|๚ษาx่L,ฝอ๔zŠ‚ -กyฏ๎งฎํ&@Kœบ !โg @]<๏๎#‘•[ถl้ z๛ํทน่ข‹˜6m๓๔=๏ฬ3ฯโž{๎ๆHฏ๏^บt)'NฤyIw๔าไƒ–†ู.q่ไปฆ*ฤฏ”‘W˜K๏%ฬ|kา‚ไ‰CQ}ถฃท”mงฤฐน•ภลƒPU…Ÿ]ู๎N ๏ }โ|็ ฐข ๆ›€CWืyไ๗ห้h3yเŸฟfYงp๑i?ใ‹wึัฐบ™”'Gฦ7' )"ฑ/พ้šฐZใ$ฝฯภR~๒‹๓w‰•ุ˜๗๑ฆพ๑7h,š”ฤL‹/ถื2r์0๔ํำูejณˆp(BIŸ๎lihำยกiุuต“SษปำดPlฺ^;พdฮœ9Ÿ์Xภ็ๅๅๅD"‘vpบnฮ:๋ฌฃ๊๚#‘ๅๅๅ~๛ํฌ]ป฿ฝƒัK“‘ƒkสKpœํะ๚‚t]aฮ‡UL}m}–rๆyใ๐๘ผ๘ฟนไดŸณnีF๊O›Ž๛็ฅxฎ๋šj็โ]ญH$Bฎีก้—๓1+โeทn๙ูผ๓ษ๓่šŽE|ˆด-ุฮญืร๘sวrว`๖Oฐxถ4ท๑ไ#ฟูCmojha๎Œ๙฿”าPศฯ~|+o.[G๏์ดฮร โง‹ล‘ฅซ*ถฝ‡bhjjฆชช `nผ‡Uฏ]ปถ:!อถjี*.ฟ์rศ{๏ฝ‡ใผผ7๔=hP™ฤuซ\ํˆป€hฤโ๏๗(\๖๓ ๑๘H$ูYผ๓ษ๓\{ใๅ!<ฝ๚ำฆำ6q%FY;ยก"lสื็`">แ#ร&มืหhธdNg#`Qq>oฮyทห™ J O?"Mี <๒Ÿ?`S]”^|ฯ/‚“OAฯ>=:Iป oฝ๚>ทVs๒i#่]Zย่ัรywsX๙นผปlจ…์l‹ูฉผ๏-I+พF!‰D%V[mmํบu๋ึ z์ฑว8~ศ๑ดไๆgAŽ“ค฿>hPํฬ์ฤม•oฟ\ฮ๒ล ไwฯแ‚หฮŽ1๐&yธ๏oฟๅฝ๙/’฿ฝVc„Žฌง™4^6—๐ผjPŠ[Q(โเ&@่*DMฺฝ–บqำhน๋sŒu-qฎ8j/ผ=๒0w๋NQUห3=ฯ'๔ก‰ๆWWž๐ห[ฏฤž8J ˆšพ๗_ธ_{็Uนต๑฿9gj: B ๔› ]DDQŠˆDšAคXฎจ tAŠ„:กwค”*%@€๋๔9฿"ก&„ฎ฿ณฒฒV’™œs็{ฟป<^ห”>ร€z*T๖cูๆูด๋{.EขQ(๒๔9hTH’Hzz:‹ๅHพŸ็฿$‚N–ๅs๛๗๏o๛ผฆR้๗9e๙์ห!H’”๗> ‰๋ถฑ๐งZถmB]ช‰ed*x๛Pณ"๓?MŸ&ต๎ๆMต":ˆ•p ๛IภXทqใฦF“'Oพท–นX$<<œ~๚a „๎^H› —0`$%)•q}…ฒƒvฏ๚Tผj—ฟซIฺ*โr+\รฮมŽwvGฅR=ฑAแัZL€€งwY|ผฝฑ[็ฮฐมŠ…xฆฤ—ŒŒ[%่“4ฅ ฟ-_รM{ <›kqท5๚{2ดึ๏œณณK(ตhุฐ~ƒzŽฆvƒ๊,0๓‘ๆภŒ‰†๗ๆแ่ฬ[Tฉศ=€Cผ& ›žmA~u๚(ิฌˆŠŠ’รรร‹Tปvํข]v”.็ฬ–ฃ+hิˆt(Q0z๐$S“qR…ทY†—ํ**)ิYp๋šค%hัชA-›=จ๎˜kฉ+:t่1`ย„1๗K!๗wzL˜žจ%•(9}ๆŸ}๐%ฏUซภ•%๋้ื~๑qษฌ:Oo—ฯGะาญง/1์‹ม|8บš{ฆƒ!รฬo0mยlJปฒ๙ศ29ˆ(Qy๓&฿๚„ร๛๐ฦ;ฏ>ภ฿ฅBEuฯhีtฉ€U†ไฌ[-ผ p"2† gฎค[eูๅaื๚ธวŸlŸ1cFก’ฤ๓็ฯวำำ“<ผ™2} Otส96NoถงzํสิkR›†๕ๆ๐๑0โฒ.0๕็1ธ–.ล™“0™อ๖ว g˜เ!! xพp'ABม๔ gั๋-ผฺญ-ญ_iษฒykจ๎ฺ‚E๓–“—lc•)แ๒Qู }฿ฦ‘}'Pจlฟ|“,šส็F พ‡B[Dไศใg,สz•แุ๕T ๐ฯ๛ฝลl!dq(ฝ^ฤ73วำณw—‡’ย €ZซฮKDหศ˜,fluYcฑส๒wOHq>R:I’ด.66V๓คv๘ุุXFŒมฺตkqqubศ'๏๓แจพ8kะcxขiฑC‹[ึ์b฿ฮรlXฒcyvํหc๗†๊†๎ถnแGTศ€Ÿธ de˜่Xg;i)VV๏Z@๋–-9~๚K็…ฒzษyX/:vmKƒ๚uฐ"?w-&"0uฬtๆL[Œฃ-ƒ3i๚hชT!]ง‚„ฤา_V๑๙ะฏจPูๆแ็๏“ฏV+๒ฺm๎๘ƒn๏vยีํ‘พค K5คถซ3k`ดXHฮฬฦี™๘Œl~ฺžeถZซw ,ตZ- †ฟ‡Zu๖์ูป&Nœศอ›7ฺ้ซ#“พMYŸ2ŒฟทถY~f“™ิฤtๆ๔ฟฮ ADZฟ‚ใาด‘=O~“ฐgส”๘แหนขภ•?ะ๑6(Uส|_D`โว฿ฑxึ*œK9ฒภRชิจ๘ภ5สฒœ7๎Qื/ุฟ(+TงC5?ฺW๕'Co$%;‡€ฒnฌ ฟศษ›ฑ!ภ€๛ร 1… Y„๏Wฎ\ILLฬC&,,ŒEกฝ˜ณ|e|sวฟม[ฑ"*D|*z๓ใฌษ์?ป‘ๆbM๒›๛H>„๕พ Nฃ o:ถ({!ev†™-ซn!ห๐๓’ฏ๎IธสHJ‰า^ฎ|5m,็ฃัฃoN>Mวฦฝ่๚J_ย…Qoส‹O๙“™4้{~๘r.อ‚rโ๚Nบ๕์„Bฅศg๚’โ’yฃuฬกŒgi๖YOU๚A(ะฺฬถˆฝFฉดอ#2[@Hฮาq๒fฌX๛(P=Xน_œžž~+$$ไ฿mุฐ ฤd2๑ษ—ƒ๙n๖ฤผ,ฯไ„ž5ซฐ้ภRพš9Jๅัo"ฉ๛^LS!whถŸ9ฎ E+็R๙๓x๕›ิขIƒmQืcภูอ‰Yฟ|ร๓›˜๔ใg$ล%ำ%ฐฝ^ f๚=\>%สB3๊=้๔7qไwœ<๒'3Oeล๘๚๙ไ3รQทc่ึบ?ว†ำกKkถ]——g‘ฬตˆHฬm[Mฝƒ๚Ÿ”ฑ(์บtเฒ$Š)˜ผz๊ิ)๎๒”ฎ\น’พ}๛bฑXXด':พั6o7W&%1•๏'ฯa้œีHv”^฿•ง–q.ํต…ซฑsT*้e{ทDณ้๐o4mู๐‰~เธ.Kวฑ#ง™>y?‡ƒ“=-4แ๋ใจโW‰,ฒ‹ฎ0ŒŠˆยว฿ญฝ๖ร%"w'q้ฃ& bไ๘`TjU‘ื@ฤ๙sำถNwถจC•ฒnคd๋ธŸยŽฟom4u6>œrr…็–.] ภแร‡ F&L๛„WปตณQ:ฃฯ!#Sส…ŸfลโM3‘ฃuฤฟบƒ&&3 ื}#J7ฃ2ูป%šสี+RฅFฅแ๎iดด%วึฒแ๗ฅดhี˜ƒปŽาภฟํ[ug๗ๆƒ$I!'Kgk™หKฺ6›*ตŠส5+กฑืไ3\""Xแ์ฉ‹4ซา‘Wo1แ๛™ะ*ฬ๙๎"แ‡้7ธ'ํปด*6๎ฎย๚Š‚(Pฉฒ?๋ำ,จ!~|0='ร;N๚‹žฏBษ€–๕๐ssa_W๘;.9่-็2ศnร<ฝ\ฑ˜-ƒฆOžOฤๅˆ%XVbภH๗]p๗(อพฐ(bu่ๅิL[W฿Bkง%ฐm3$QโE’{ด๗kา“3ะ๋Ÿ?( `ศ12iย4:7y›šj%}ืย^ฅไ\Lแทcฦc๋ภแy I’ง$ฅฎ๛จฯ็ไ่๕%ๆoษศ(ESfŒ!-ลฤาYW๒ˆ-ภ–ใ:u$‘๘˜ช๚ำผuฃถB๔a1ฅำวฮฒ|แบgถuํฑ็ยŸ—่๙J0f-็ƒuyนFฌฒL–มฤๆs˜-ึภœง7๑E‹ลbF=u1nhฏฯžjข๚ณ#&ZทkI๙ ๅXำโใs๒ฬกลlๅศX z+UjVยYr๚ฏ•5g@๗‘t่”w2,*Pgฬ˜วญร%1™เภTpwมdฑขV*Xy๊"‰™9ว€1E๓‹.w€ปยZžพฐDยฉด๏๖ทั๚|1๘$Jษv{)ษFถฏ‹ภd4W€J@เ๏หW;x ๓Cคf๕jE:Šˆค&ฅ3b๐xพ=wjWขcอJจถT˜$ ฌ›ˆ„ิF‘KR[’ภุgตZ?~ยl๙๗=วPฃ*ฑล่แ[x๙xp`[ ‡๖ฦ Pˆ\>—Jฬm[SHีš๓‘iผจ’’lฃZถe-›ซE„%บฌ>†p๏Ky๙ฎ็ํkyน—%xd_ŒF++~‰ภj•Yฟ4hŽFอ๋K ็Yl Y–1LนYG™พƒ{โUฮฃHA\/ฦ|ห๛?ฆŽJd@`}œด๊ผ‘ปj…ฤ–sWูq๑@0ฐ๚ูฯณ—ƒฮฅœƒvŸฅผนษ'jัRูฃ ‰๑ษ„์jรG๏!-ูถ8Gฮo# ฆ‹ฅตdธv%’Sวฮะท[sหฃ‹๒์’“ึ๛sยwepFxป8ๅ lคj›ฯ\แTT<ภ\เ#x6งšg~Œแๅ๔ิ๔ƒบ #)1นDNŠFŒ,Ž$I|ะ๕๗j8Fร]ฒ{˜โใ)๙–„„ัdไNƒ๘}ฯ1ฦ};ฒศ?Rn)a๐;Ÿ2.x2*๛Tล•๔OfD!‰\ŒI )+› ฑIฬ–m@(่0้Vฎ$ ‚ะ๗ฺๅศซํ๋u'!!้๙๗ๅ 0b์‡4lp๐\ฟz E K@เ๒ลZี์JNŽŽe›gแ์โTไฯผv5’ Zฏ๓วฆ nYg ญJ…J!!`ใa8GŽัฤึ 7Hษัg๏t›*‘e9R„ื/฿<ู๋ๅนu#๊น;๔,„l›Kห๖๐Oœ ฟPbฆP@@‹†ตฟ…ัทหPบ๗~ –PชTฉ"™> jB—nฆ[ะ{ธe็ะฏEm4J‰ฒฮŽh”’ ’ฆำs>:ษฬ+ท‰ฯศ]„ž@F1ํ๋bw`น‡ท{‡UปPซF๕|ร„žวb‰Šฅ[เ{DŒฆAณ:์<๚›NmฝzฃOaหฺฒ๚GZดm\ค|ฅ€brD๑l IทZqPซ๐-ํ‚"wT‰$ŠD&ฆ’e0เ ัฐ๔๘yฒ ฦต๔“Ÿฑ๙หo–‹_rAหสศnบ>dซฝๆต(๏็\ีลู‰ๆญrไภ >{^G“– P*ฯๅฬ* •™อ˜!S8ผ๏Žญข^ฝZX…ง๗•E๎ŠกO็กœsŒอjSฦัOGBn่ยbๅRL"j…Q’˜{่OŒหJ๚สo้ฤ๓r6 ‚ ฌ2Œn;6๎kไ\ส‘& ๛‘฿bถ ‰6V;o/:๗lO|L"ฟอๅzฤMjีญ†ง[ูb-VTก"1!‰ม๏Œ&-5ƒีปโๅYถHS‹–Mkทำฟาา้Vฟjฅ„ณVJ!‘–ฃ็ZB ’(เWฺ…ณั ฌ>} เ;QFศฟบ~ž^ฌุc4S์๘ใๅ๋7"้ิต]ฑj‰ฟยฯs๕u*๘aฦŒฝฝํ_ ขฌ—;sง-as่N$ตHณ& ๒ˆ/žu8แฦ๕[tiู‡r~ž,8G๛งN€่…”๘อ\ฦ –Žผh่็…ฃFณตBมีธdrŒ&JปPJซ%ไไธ~ว*CwA`ถ,?Ÿ^Itz๓_jฤ้็฿พมหฃุช Œ#*ต๊ไุq>๊;Ž›ืnSกŠใฟA`‡ๆ8ไ,*ศDDขฃโ่ำi0Zึc๖oษ$ปŸ'`ะ7์Bญใƒ–๕pตWฃQ*ัช”อฎฦ%แๆhG9'โ2ฒXr์<‰Y91@oยบ๗oัX๗ส`๓ํศ่š6์๕+ใUšš5ชK”aฃ[ฬ˜๑๕๑แํ]qttศ+Kูนi?้i88ูใ็Q –งบ&5*"oขGปด๎๘฿ฬ_คดŒ„ฤญศ(ymg๗ฅo‹:จ$;ต gญšฤฬโา2๑quฦรษž?naีฉฟIำ6 ถMื๓?— ˆ’จฑZฌS”*ๅ่๎ฝ;๓รฏ“ฑศ“eI 2Ÿ™–ลฮ๛™๔ู๗ค&ฅแ\ส‰v™2c ne  ‘7๏ะฅEo^ป#“S$mฌDม•ˆktoŒŸj—ž‰ท‹J…Dtj&‹_7g f++ร/r%>ซ,fS€ภฐ๎‘ถภoๅ=ฝ—lšI๕ฺ•๓QIXV™-kw๓fฯN.๑U ฮ^ฦ์๏{'€กcg่gpquBDxlsจ€@JR*๏พ:˜†อ๊๐ใฯSหN\ฺŒw;|HŸ2ผ^ง ๑ษT๑( ศd่d้ xบ8r+9•แ“ฎ7๚ฟ—ไ‚พ(๕น‘ข(ฎฮHหิ,_ฐฎ~B|ขXญVe\Ÿ๊ไ(ีjV.ิ{ญนฏ๓ฮ‡oR1ภ—ธุDยึ์b็ฆค$ฆขาจ๐๑๑B‰๒ฯ1Œ|ุ๓Sสxบ๑รย)˜SFd๑UŒ่๓9ซ๙Pžิ๎.dŒคf้ฑWซฐ[ฮ_cห๙kzƒู2]ฐEาฏ”๔‚พ(๋. $Y–๋ ]\๋รวดต&=วRโปํ๒บl=W๏`างำศHหฤฮ^KบU๕E0_i‡CžVU เํWƒ1่ ฌุ>/ทe๋้ฤ;ฦŽžสขŸB่฿ด.Ž$gๅเUส ฝัLtZ:~nฅุ๕\ฟƒฮd>| ย%ซ,ห/ฤZ๒โส`xีZฅง<–M๋ ีjk‹Zj๒pG‰ัdb๋๚],šฝ‚3ง.b4i๒R}ๆ.Ÿ†Wyฌฒ•™฿,$,t'ปNญy๊มฝ‰ŸLcลP‚๋SึัŽุ๔L*—u#KoไxตZ6ฝ*ง้๔ฑภืุJ^^(y‘เ|ผุพ™{ฟม=iZ๖ ;Œ๗ํ+Pp้๒UชU ศใ|–L fŽ8มๆี;Xถ` 5๋Ve๓มD•ศยŸ—๑๖{(๋QหS€JB"33‹ฯŸปŽะงY-TJ2 Fสป:“–ลมซทHฬึq#)ํ&ฐD€Er๎ิ๘ซฐ\’D‹ลโŒPฉUŸ”๗๗–FMDท^๒ขๆjิ,Zฐ‚–ญ็ฃž~ๆ'YDฬ3'ๆƒำฒMcฏ™‰มb@”ž.งฏ@"1)…ฎ0'$๑~๓:$eeใฌQใจUs๔F4{.Dg2,V๙[เAโๅฤ์ฝศฮ๛ใB2 ์ตZญ๓R’Rทฏ฿[aอฒอj•V)”๗/‡Jฉส๛{s`๗ชิจ”oาย3ฝžS_|+•ใ‡/็โํ๏Aรz๕ž*5$"u+†ฮ-E•žษ๛อ๊p%. Qนžœฮโc็,็ขใŒห\Yๆ5lใฒ^๔u{แ5ึ#ฤx xฃtื vƒhื)ภvอ0™Lุ;ฺฃR_‰Žˆˆ+vh j๔:พ|X:—ฌBFึตh9v<œa}วโcตะศฯ‹ฃื๏ฌำ–E–มธ ุ,ภ:›่ฟXwล ส ตF=ฌ”›3อ[5ขฐ^ดiฺ]๎0คgiER’า(Uฺ5jพv6‹~^มแKa8ๅา*Tปwเƒ#)ซR"I"Qฉ™่Œฆf Šฑดๅภ*ธ๔5Jนน8v๏ำYีฝOผ}<ฐsะขัj๒ฦท=m6ะ๊Tmอ™ห๛ษAG\l๕ผฐb<ฺt|้กํ๑wฟ[ฑ’“ญC—ญc๏ถร|:๐K,‹ฌฟ๐ำ]ุฟa!mภบปˆ~2rS 1ะุซ\ูz•kVฒซZฃUkPฅF%*V๑รั-ฯoฒไ…Hญy?{„“EUืๆDงž%“l4hฌ฿…ฦM๋๑๕q(Qไึ๘Zธโำน~ๅ&W.\ใ๒….เ๊ล๋Y qIงฐM}๑ฆ5P%#฿n ‚*หฒCฬxว˜;๑ี๎ฃJญ๊ ึจh4jมน”uึคzสTฎ^‘สี+โYฎ …E.@๒—1 ˜ญ^i*L(จำฐืฏ฿ไา๙nEFqๅโu.žฝฬ…ำ—ศฬศย 7สฝแจัhฺƒ/"WKe๑/ฟRจ ิ๊ฎ€เ ตื*}|ฝD'{…›{)AฃQ ๗๊/ญษฉฉrJRš5)!ลš›dฮษั™ฑีgๆjŸ3ภŸภ9เ๊ท‡๛X3ก2ฒc.ภด€  ฬ}NB~ƒˆœ๋X็้๗}ŸO'ล!๊fฝl๛ห–IENDฎB`‚././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/doc/migrating.rst0000644000175100001770000000460000000000000017407 0ustar00runnerdocker00000000000000.. Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 .. For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt .. _migrating: ========================== Migrating between versions ========================== New versions of coverage.py or Python might require you to adjust your settings, options, or other aspects of how you use coverage.py. This page details those changes. .. _migrating_cov7x: Migrating to coverage.py 7.x ---------------------------- Consider these changes when migrating to coverage.py 7.x: - The way that wildcards when specifying file paths work in certain cases has changed in 7.x: - Previously, ``*`` would incorrectly match directory separators, making precise matching difficult. Patterns such as ``*tests/*`` will need to be changed to ``*/tests/*``. - ``**`` now matches any number of nested directories. If you wish to retain the behavior of ``**/tests/*`` in previous versions then ``*/**/tests/*`` can be used instead. - When remapping file paths with ``[paths]``, a path will be remapped only if the resulting path exists. Ensure that remapped ``[paths]`` exist when upgrading as this is now being enforced. - The :ref:`config_report_exclude_also` setting is new in 7.2.0. It adds exclusion regexes while keeping the default built-in set. It's better than the older :ref:`config_report_exclude_lines` setting, which overwrote the entire list. Newer versions of coverage.py will be adding to the default set of exclusions. Using ``exclude_also`` will let you benefit from those updates. .. _migrating_cov62: Migrating to coverage.py 6.2 ---------------------------- - The ``--concurrency`` settings changed in 6.2 to be a list of values. You might need to explicitly list concurrency options that we previously implied. For example, ``--concurrency=multiprocessing`` used to implicitly enable thread concurrency. Now that must be explicitly enabled with ``--concurrency=multiprocessing,thread``. .. _migrating_py312: Migrating to Python 3.12 ------------------------ Keep these things in mind when running under Python 3.12: - Python 3.12 now inlines list, dict, and set comprehensions. Previously, they were compiled as functions that were called internally. Coverage.py would warn you if comprehensions weren't fully completed, but this no longer happens with Python 3.12. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/doc/other.rst0000644000175100001770000000766400000000000016564 0ustar00runnerdocker00000000000000.. Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 .. For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt .. _other: =============== Other resources =============== There are a number of projects that help integrate coverage.py into other systems, provide help using it, offer assistance, and so on. There's no guarantee these items are maintained or work well. Some of them seem to be quite old. If you have suggestions for updates to this page, `open a pull request`_ or `get in touch`_ some other way. .. _open a pull request: https://github.com/nedbat/coveragepy/blob/master/doc/other.rst .. _get in touch: https://nedbatchelder.com/site/aboutned.html Test runners ------------ Helpers for using coverage with specific test runners. * `pytest-cov`__ is a pytest plugin to coordinate coverage.py usage. __ https://pypi.org/project/pytest-cov/ * `trialcoverage`__ is a plug-in for Twisted trial. __ https://pypi.org/project/trialcoverage/ Configuration helpers --------------------- Tools to provide more control over how coverage is configured. * `covdefaults`__ provides "sensible" default settings for coverage. __ https://github.com/asottile/covdefaults * `coverage-conditional-plugin`__ lets you use conditions instead of simple "no cover" pragmas to control what lines are considered under different conditions. __ https://github.com/wemake-services/coverage-conditional-plugin Language plugins ---------------- Coverage.py plugins to enable coverage measurement of other languages. * `django-coverage`__ measures the coverage of Django templates. __ https://pypi.org/project/django-coverage/ * `Cython`__ provides a plugin for measuring Cythonized code. __ https://cython.readthedocs.io/en/latest/src/tutorial/profiling_tutorial.html#enabling-coverage-analysis * `coverage-jinja-plugin`__ is an incomplete Jinja2 plugin. __ https://github.com/MrSenko/coverage-jinja-plugin * `coverage-sh`__ measures code coverage of shell (sh or bash) scripts executed from Python with subprocess. __ https://github.com/lackhove/coverage-sh * `hy-coverage`__ supports the Hy language. __ https://github.com/timmartin/hy-coverage * `coverage-mako-plugin`__ measures coverage in Mako templates. Doesn't work yet, probably needs some changes in Mako itself. __ https://bitbucket-archive.softwareheritage.org/projects/ne/ned/coverage-mako-plugin.html Reporting helpers ----------------- Helpers for seeing the results. * `python-coverage-comment-action`__ can publish a delta coverage report as a pull request comment, create a coverage badge, or a dashboard to display in your readme. __ https://github.com/py-cov-action/python-coverage-comment-action * `diff-cover`__ reports on the coverage of lines changed in a pull request. __ https://pypi.org/project/diff-cover/ * `cuvner`__ offers alternate visualizations of coverage data, including ones for use in terminals. __ https://meejah.ca/projects/cuvner * `emacs-python-coverage`__ is an experimental Emacs package to report code coverage output produced by Python's coverage package directly inside Emacs buffers. __ https://github.com/wbolster/emacs-python-coverage * `python-genbadge`__ provides a set of command line utilities to generate badges for tools that do not provide one, including coverage badges. __ https://smarie.github.io/python-genbadge/ Other articles -------------- Writings about ways to enhance your use of coverage.py. * `How to Ditch Codecov for Python Projects`__: using GitHub Actions to manage coverage across versions and report on results. __ https://hynek.me/articles/ditch-codecov-python/ * `Making a coverage badge`__: using GitHub Actions to produce a colored badge. __ https://nedbatchelder.com/blog/202209/making_a_coverage_badge.html * `Coverage goals`__: a sidecar tool for reporting on per-file coverage goals. __ https://nedbatchelder.com/blog/202111/coverage_goals.html ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/doc/plugins.rst0000644000175100001770000000735400000000000017120 0ustar00runnerdocker00000000000000.. Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 .. For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt .. This file is processed with cog to create the tabbed multi-syntax configuration examples. If those are wrong, the quality checks will fail. Running "make prebuild" checks them and produces the output. .. [[[cog from cog_helpers import show_configs .. ]]] .. [[[end]]] (checksum: d41d8cd98f00b204e9800998ecf8427e) .. _plugins: ======== Plug-ins ======== Coverage.py's behavior can be extended with third-party plug-ins. A plug-in is a separately installed Python class that you register in your .coveragerc. Plugins can alter a number of aspects of coverage.py's behavior, including implementing coverage measurement for non-Python files. Information about using plug-ins is on this page. To write a plug-in, see :ref:`api_plugin`. See :ref:`other` for available plug-ins. .. versionadded:: 4.0 Using plug-ins -------------- To use a coverage.py plug-in, you install it and configure it. For this example, let's say there's a Python package called ``something`` that provides a coverage.py plug-in called ``something.plugin``. #. Install the plug-in's package as you would any other Python package: .. code-block:: sh $ python3 -m pip install something #. Configure coverage.py to use the plug-in. You do this by editing (or creating) your .coveragerc file, as described in :ref:`config`. The ``plugins`` setting indicates your plug-in. It's a list of importable module names of plug-ins: .. [[[cog show_configs( ini=r""" [run] plugins = something.plugin """, toml=r""" [tool.coverage.run] plugins = [ "something.plugin" ] """, ) .. ]]] .. tabs:: .. code-tab:: ini :caption: .coveragerc [run] plugins = something.plugin .. code-tab:: toml :caption: pyproject.toml [tool.coverage.run] plugins = [ "something.plugin" ] .. code-tab:: ini :caption: setup.cfg, tox.ini [coverage:run] plugins = something.plugin .. [[[end]]] (checksum: 788b15abb3c53370ccae3d9348e65385) #. If the plug-in needs its own configuration, you can add those settings in the .coveragerc file in a section named for the plug-in: .. [[[cog show_configs( ini=r""" [something.plugin] option1 = True option2 = abc.foo """, toml=r""" [tool.coverage.something.plugin] option1 = true option2 = "abc.foo" """, ) .. ]]] .. tabs:: .. code-tab:: ini :caption: .coveragerc [something.plugin] option1 = True option2 = abc.foo .. code-tab:: toml :caption: pyproject.toml [tool.coverage.something.plugin] option1 = true option2 = "abc.foo" .. code-tab:: ini :caption: setup.cfg, tox.ini [coverage:something.plugin] option1 = True option2 = abc.foo .. [[[end]]] (checksum: 71aa2ad856e03d228758fd5026fd3a52) Check the documentation for the plug-in for details on the options it takes. #. Run your tests with coverage.py as you usually would. If you get a message like "Plugin file tracers (something.plugin) aren't supported with PyTracer," then you don't have the :ref:`C extension ` installed. The C extension is needed for certain plug-ins. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/doc/python-coverage.1.txt0000644000175100001770000003026600000000000020715 0ustar00runnerdocker00000000000000=============== python-coverage =============== ---------------------------- Measure Python code coverage ---------------------------- :Author: Ned Batchelder :Author: |author| :Date: 2022-12-03 :Copyright: Apache 2.0 license, attribution and disclaimer required. :Manual section: 1 :Manual group: Coverage.py .. |command| replace:: **python-coverage** .. To test this file: $ rst2man < doc/python-coverage.1.txt | groff -man -Tascii SYNOPSIS ======== | |command| `command` [ `option` ... ] | |command| **help** [ `command` ] DESCRIPTION =========== |command| executes a Python program and measures which of its statements are executed and which are not, and reports these coverage measurements. COMMAND OVERVIEW ================ |command| **annotate** Annotate source files with execution information. |command| **combine** Combine a number of data files. |command| **debug** Display information about the internals of coverage.py. |command| **erase** Erase previously collected coverage data. |command| **help** Get help on using coverage.py. |command| **html** Create an HTML report. |command| **json** Create a JSON report of coverage results. |command| **report** Report coverage stats on modules. |command| **run** Run a Python program and measure code execution. |command| **xml** Create an XML report of coverage results. |command| **lcov** Create an LCOV report of coverage results. GLOBAL OPTIONS ============== **--help**, **-h** Describe how to use coverage.py, in general or a command. **--rcfile** `RCFILE` Specify configuration file `RCFILE`. By default '.coveragerc', 'setup.cfg', 'tox.ini', and 'pyproject.toml' are tried. **--debug** `DEBUGOPT`,... Debug options `DEBUGOPT`, separated by commas. COMMAND REFERENCE ================= **annotate** [ `option` ... ] Options: \-d `DIR`, --directory=`DIR` Write the output files to DIR. \--data-file `INFILE` Read coverage data for report generation from this file. Defaults to '.coverage'. \-i, --ignore-errors Ignore errors while reading source files. \--include `PATTERN` [ , ... ] Include only files whose paths match one of these PATTERNs. Accepts shell-style wildcards, which must be quoted. \--omit `PATTERN` [ , ... ] Omit files when their file name matches one of these PATTERNs. Usually needs quoting on the command line. **combine** [ `option` ... ] [ `PATH` ... ] Combine data from multiple coverage files collected with ``run -p``. The combined results are written to a single file representing the union of the data. Unless --keep is provided the original input coverage files are deleted. If `PATH` is specified, they are files or directories containing data to be combined. Options: \--append Append coverage data to .coverage, otherwise it starts clean each time. \--data-file `DATAFILE` Base name of the data files to operate on. Defaults to '.coverage'. \--keep Keep original coverage data files. \-q, --quiet Don't print messages about what is happening. **debug** `TOPIC` ... Display information about the internals of coverage.py, for diagnosing problems. Topics are: ``data`` to show a summary of the collected data; ``sys`` to show installation information; ``config`` to show the configuration; ``premain`` to show what is calling coverage; ``pybehave`` to show internal flags describing Python behavior. **erase** Erase previously collected coverage data. Options: \--data-file `DATAFILE` Base name of the data files to operate on. Defaults to '.coverage'. **help** [ `command` ] Describe how to use coverage.py. **html** [ `option` ... ] [ `MODULE` ... ] Create an HTML report of the coverage of each `MODULE` file. Each file gets its own page, with the source decorated to show executed, excluded, and missed lines. Options: \--contexts `PAT` [ , ... ] Only include contexts that match one of the regex patterns. \-d `DIR`, --directory `DIR` Write the output files to `DIR`. \--data-file `INFILE` Read coverage data for report generation from this file. Defaults to '.coverage'. \--fail-under `MIN` Exit with a status of 2 if the total coverage is less than `MIN`. \-i, --ignore-errors Ignore errors while reading source files. \--include `PATTERN` [ , ... ] Include only files whose paths match one of these PATTERNs. Accepts shell-style wildcards, which must be quoted. \--omit `PATTERN` [ , ... ] Omit files when their file name matches one of these PATTERNs. Usually needs quoting on the command line. \--precision `N` Number of digits after the decimal point to display for reported coverage percentages. \-q, --quiet Don't print messages about what is happening. \--show-contexts Annotate lines with the contexts that executed them. \--skip-covered Skip files with 100% coverage. \--no-skip-covered Disable ``--skip-covered``. \--skip-empty Skip files with no code. \--title `TITLE` Use the text string `TITLE` as the title on the HTML. **json** [ `option` ... ] [ `MODULE` ... ] Generate a JSON report of coverage results. \--contexts `PAT` [ , ... ] Only include contexts that match one of the regex patterns. \--data-file `INFILE` Read coverage data for report generation from this file. Defaults to '.coverage'. \--fail-under `MIN` Exit with a status of 2 if the total coverage is less than `MIN`. \-i, --ignore-errors Ignore errors while reading source files. \--include `PATTERN` [ , ... ] Include only files whose paths match one of these PATTERNs. Accepts shell-style wildcards, which must be quoted. \-o `OUTFILE` Write the JSON report to `OUTFILE`. Defaults to ``coverage.json``. \--omit `PATTERN` [ , ... ] Omit files when their file name matches one of these PATTERNs. Usually needs quoting on the command line. \--pretty-print Format the JSON for human readers. \-q, --quiet Don't print messages about what is happening. \--show-contexts Include information about the contexts that executed each line. **lcov** [ `option` ... ] [ `MODULE` ... ] Create an LCOV report of the coverage results. Options: \--data-file `INFILE` Read coverage data for report generation from this file. Defaults to '.coverage'. \--fail-under `MIN` Exit with a status of 2 if the total coverage is less than `MIN`. \-i, --ignore-errors Ignore errors while reading source files. \-o `OUTFILE` Write the LCOV report to `OUTFILE`. Defaults to ``coverage.lcov``. \--include `PATTERN` [ , ... ] Include only files whose paths match one of these PATTERNs. Accepts shell-style wildcards, which must be quoted. \--omit `PATTERN` [ , ... ] Omit files when their file name matches one of these PATTERNs. Usually needs quoting on the command line. \-q, --quiet Don't print messages about what is happening. **report** [ `option` ... ] [ `MODULE` ... ] Report coverage statistics on each `MODULE`. Options: \--contexts `PAT` [ , ... ] Only include contexts that match one of the regex patterns. \--data-file `INFILE` Read coverage data for report generation from this file. Defaults to '.coverage'. \--fail-under `MIN` Exit with a status of 2 if the total coverage is less than `MIN`. \--format `FORMAT` Output format, either text (default), markdown, or total. \-i, --ignore-errors Ignore errors while reading source files. \--include `PATTERN` [ , ... ] Include only files whose paths match one of these PATTERNs. Accepts shell-style wildcards, which must be quoted. \--omit `PATTERN` [ , ... ] Omit files when their file name matches one of these PATTERNs. Usually needs quoting on the command line. \-m, --show-missing Show line numbers of statements in each module that weren't executed. \--precision `N` Number of digits after the decimal point to display for reported coverage percentages. \--skip-covered Skip files with 100% coverage. \--no-skip-covered Disable ``--skip-covered``. \--skip-empty Skip files with no code. \--sort `COLUMN` Sort the report by thee named column: `name`, `stmts`, `miss`, `branch`, `brpart`, or `cover`. **run** [ `options` ... ] `PROGRAMFILE` [ `program_options` ] Run a Python program `PROGRAMFILE`, measuring code execution. Options: \-a, --append Append coverage data to .coverage, otherwise it is started clean with each run. \--branch Measure branch coverage in addition to statement coverage. \--concurrency `LIBS` Properly measure code using a concurrency library. Valid values are: thread, gevent, greenlet, eventlet, multiprocessing, or a comma-list of them. \--context `CONTEXT` The context label to record for this coverage run. \--data-file `OUTFILE` Write the recorded coverage data to this file. Defaults to '.coverage'. \--include `PATTERN` [ , ... ] Include only files whose paths match one of these PATTERNs. Accepts shell-style wildcards, which must be quoted. \-m `PROGRAMFILE` is interpreted as a module name. \--omit `PATTERN` [ , ... ] Omit files when their file name matches one of these PATTERNs. Usually needs quoting on the command line. \-L, --pylib Measure coverage even inside the Python installed library, which isn't done by default. \-p, --parallel-mode Append the machine name, process id and random number to the ``.coverage`` data file name to simplify collecting data from many processes. \--source `SOURCE` ... A list of packages or directories of code to be measured. \--timid Use the slower Python trace function core. **xml** [ `options` ... ] [ `MODULES` ... ] Generate an XML report of coverage results on each `MODULE`. Options: \--data-file `INFILE` Read coverage data for report generation from this file. Defaults to '.coverage'. \--fail-under `MIN` Exit with a status of 2 if the total coverage is less than `MIN`. \-i, --ignore-errors Ignore errors while reading source files. \--include `PATTERN` [ , ... ] Include only files whose paths match one of these PATTERNs. Accepts shell-style wildcards, which must be quoted. \--omit `PATTERN` [ , ... ] Omit files when their file name matches one of these PATTERNs. Usually needs quoting on the command line. \-o `OUTFILE` Write the XML report to `OUTFILE`. Defaults to ``coverage.xml``. \-q, --quiet Don't print messages about what is happening. \--skip-empty Skip files with no code. ENVIRONMENT VARIABLES ===================== COVERAGE_FILE Path to the file where coverage measurements are collected to and reported from. Default: ``.coverage`` in the current working directory. COVERAGE_RCFILE Path to the configuration file, often named ``.coveragerc``. HISTORY ======= The |command| command is a Python program which calls the ``coverage`` Python library to do all the work. It was originally developed by Gareth Rees, and is now developed by Ned Batchelder and many others. This manual page was written by |author|. .. |author| replace:: |authorname| |authoremail| .. |authorname| replace:: Ben Finney .. |authoremail| replace:: .. Local variables: mode: rst coding: utf-8 time-stamp-format: "%:y-%02m-%02d" time-stamp-start: "^:Date:[ ]+" time-stamp-end: "$" time-stamp-line-limit: 20 End: vim: filetype=rst fileencoding=utf-8 : ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/doc/requirements.in0000644000175100001770000000074400000000000017754 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt # PyPI requirements input for building documentation for coverage.py # "make doc_upgrade" turns this into doc/requirements.pip -c ../requirements/pins.pip cogapp doc8 pyenchant scriv # for writing GitHub releases sphinx sphinx-autobuild sphinx_rtd_theme sphinx-code-tabs sphinxcontrib-restbuilder sphinxcontrib-spelling ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/doc/requirements.pip0000644000175100001770000000436300000000000020137 0ustar00runnerdocker00000000000000# # This file is autogenerated by pip-compile with Python 3.11 # by the following command: # # make doc_upgrade # alabaster==0.7.16 # via sphinx attrs==23.2.0 # via scriv babel==2.14.0 # via sphinx certifi==2024.2.2 # via requests charset-normalizer==3.3.2 # via requests click==8.1.7 # via # click-log # scriv click-log==0.4.0 # via scriv cogapp==3.4.1 # via -r doc/requirements.in colorama==0.4.6 # via sphinx-autobuild doc8==1.1.1 # via -r doc/requirements.in docutils==0.20.1 # via # doc8 # restructuredtext-lint # sphinx # sphinx-rtd-theme idna==3.6 # via requests imagesize==1.4.1 # via sphinx jinja2==3.1.3 # via # scriv # sphinx livereload==2.6.3 # via sphinx-autobuild markdown-it-py==3.0.0 # via scriv markupsafe==2.1.5 # via jinja2 mdurl==0.1.2 # via markdown-it-py packaging==24.0 # via sphinx pbr==6.0.0 # via stevedore pyenchant==3.2.2 # via # -r doc/requirements.in # sphinxcontrib-spelling pygments==2.17.2 # via # doc8 # sphinx requests==2.31.0 # via # scriv # sphinx restructuredtext-lint==1.4.0 # via doc8 scriv==1.5.1 # via -r doc/requirements.in six==1.16.0 # via livereload snowballstemmer==2.2.0 # via sphinx sphinx==7.2.6 # via # -r doc/requirements.in # sphinx-autobuild # sphinx-code-tabs # sphinx-rtd-theme # sphinxcontrib-jquery # sphinxcontrib-restbuilder # sphinxcontrib-spelling sphinx-autobuild==2024.2.4 # via -r doc/requirements.in sphinx-code-tabs==0.5.5 # via -r doc/requirements.in sphinx-rtd-theme==2.0.0 # via -r doc/requirements.in sphinxcontrib-applehelp==1.0.8 # via sphinx sphinxcontrib-devhelp==1.0.6 # via sphinx sphinxcontrib-htmlhelp==2.0.5 # via sphinx sphinxcontrib-jquery==4.1 # via sphinx-rtd-theme sphinxcontrib-jsmath==1.0.1 # via sphinx sphinxcontrib-qthelp==1.0.7 # via sphinx sphinxcontrib-restbuilder==0.3 # via -r doc/requirements.in sphinxcontrib-serializinghtml==1.1.10 # via sphinx sphinxcontrib-spelling==8.0.0 # via -r doc/requirements.in stevedore==5.2.0 # via doc8 tornado==6.4 # via livereload urllib3==2.2.1 # via requests ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1710442639.125815 coverage-7.4.4/doc/sample_html/0000755000175100001770000000000000000000000017201 5ustar00runnerdocker00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/doc/sample_html/favicon_32.png0000644000175100001770000000330400000000000021640 0ustar00runnerdocker00000000000000‰PNG  IHDR DคŠฦPLTE250SRS;A6œž  ___^^_CJ>OXH#cm[fef05,[[\HPCFL?JIK<:< tjep\[gT_iX7V_N]\](<;=&#$็ิ๊ึเXใYXซพแX๛๛๛ํืไัQŸฒพัญีํฤ๗๗๗๊๋๋๔สฯๆฟSฃทโY แW๙๘๘็ๆๆ฿๗ฬุ๐ฦฤฤฤฤฺตฆธ˜ขด•Ÿฒ’šฌข‡฿Z๛YV€J;zG:vD7rC5\5*๕๓๓โ๚ฯา๊ยหโปM™ฌดศฅ€“ฐ˜ค‰‹›€ฺUฺUฏษGˆO?l>0X2'ืืืฮฮฮษ฿ธฟิฐชฌฌปฯซM—ฉทหงขคฆฒลขGŒŸD‡—@‘>v„<‰7jw…v{}f&šXึ๓UัQฬP[cPรMมK™<4q:L+"๐๐๑฿฿฿ัััVฉผWงบขชฌ”คฉฉฌฆIกญภŸชฝœ–—™v” ณ‹œฉ‡Œ“‡‘œfz|3WญภถทนฒทนŸฃฃงฐ TœE‹œ˜ญ™EŽ–a‰”›ค“AŽK€ŽP€ŒHz‡}ry{Lqzjvyžx’x†กv u^jo3’mˆ€iq†hmyc/Wbc”`i{`PŸ[*’[ิ๘Zฒ๓Yœ๏Y”๎YfๆY+แXฺ๘Wˆ™SW{SีRCกRewRueRฮQPPQ+ะPวโO!ฌN›N หMI…LพูK{‹KญฮJ\[JjYH ฒGจภE–ซE7E/ฑCŒRBfRBr@|ฐ>›IDAT8หb€&nNC%Nn&  ฬ.ร›๋ccใ9นฝ]฿˜M ”ฏ“)์อŽŽ/!ศƒ,ฏย\เi 6ณณขณ"#ณ๒ย๒]piงูแYQแ™ู|ยp๒Pูœค)S๎ส — TมตŸช?'i๗{7๛๒S?ฟฌ ˆŠˆVT+*K;•์/ฒฒฒJจ(/J(ซT!ศ๒@R`๗yํ›ๅfeีSVnoฟ๓Xรํ‘‘™™:@K˜ุ}ม๒G+€ฺ&$ุWี5งYป7mชyตึˆ‰[ไ˜ ๙n{ทm็š=,-,,,oด•๚คผ็ใbเไฐฑ(฿c๏V๕ ฬายคbษRsWM\s€ฺท”U-ถดถkด ณถpoM5ทuเgPp*(ฉ\Wดฝ/ฉฒฮ2=:lีŠๅ-i mฎๆŽฌ ,1@ฮLุ็e3s™ๅฝทึiหVYพ๚wรสIถ>b ข6ฆฆพ{vmน$๘‘วผ–ึ๎๎i--ึญAfม8'อ™ี ๖๊Y+๎N‡8าาruฒร๓5 ,ž๋ซซKภaนตู๚๒๙Œo ,A*ฌำ$ฮจc0pvž Žk฿›CC2nฝYb ิฟ`ี™ย)ฌ o‚AGา…ึ๓็†ึ/rMผuๅงึถ๑วMไ‡TGฮฆƒ‹-=ๆ…†ผšjk๎ํไ๏g7ญึA‹ƒ[ฯษินGJ๊ใ… >$;š™›ฺฺšิงJs10ฑ๙vNwฐ3s)>๒ะำใcอออ๓ gคฐฉ300ฒL77333w9,ไ๊"W[ป< €‰Ž‰A๑’Œ ˜`ชญ˜;^ซrฤ0sq 8Yใ`ศฮIr๙ๆS3^B™fŽ>.~ฬชDษ(—Ž{๚=ฮ ์ผ๓eแษ^b†นš8ˆR๏bW?YDฦ`dNŽษฤ%๛€h—Rฟ@f ~PาNŒต5ทณs๖w4๗ž(ษฑ‘ทูtƒโผํ\Jk‚5Xู™ก„‹ƒŸUผฉI\Z‰ƒK. /๒%;่“ IENDฎB`‚././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/doc/sample_html/keybd_closed.png0000644000175100001770000002145400000000000022344 0ustar00runnerdocker00000000000000‰PNG  IHDR 'ว>N…zTXtRaw profile type exifxฺํšYv;ฒE1ŠBข†ƒvญšม~ํ$eQe๛๊~>ห"ฉl€@4'ฮAาฌ๛๏6แ_r1˜sI5ฅ‹ก†๊สu5}ตWะื๓Gผฯู็ใๆ๗ ว!ฯป?–t_8n฿8oO๑@ๅศ๖็5ใ—น๓ๆล"๙<๏๊=w็„ฝhgYWช%ฟ_B_็พธกศาx ๅู์Ogผ7#๓x็–ทโีyw ๐๒ko|ผZ๏ๅB๏๕ˆผz_๏มpศW~บYe>Fๅํำ‡จT๗uP|:W<;3ฝฝyœ๐?๗๐‹‹฿อ์วษ=wรๆหy๎=‹ู{ีตpiบ๕Xข~โยŽหฝ–๘ษF>gฉC๖B>Iฬฮฯฐี:ยฒmฐำ6ปํา๗a&ท\ๆนA ไX๑ูU7eˆVป]๖ีO_ˆ ผžฃ๎อซ๓Vnุยฤำrฅณ f%Œผ?/ฺ[RZqfu๊+์r’˜!‘“Wฎ" v?๒(ชƒ?I\=Œ๊ๆยีฯ=ฺ;ท$ผฺsaไิšอ๓1wฤ๋‰ภ•ฌ6ู+;—ญล…๘4*ฮื ัMฌtม๛DpŠ“นน'[ฝึEwƒY"๚ไ3กฉพ+6๒'‡Bต่cˆ1ฆ˜c‰5ถไSH1ฅ”“€_ห>‡sส9—\s+พ„K*นSjiีU8ฦšjฎฅึฺ“6Fnธ ต๎บ๏กวžz๎ฅื้3ยˆ#<Šuด้ฆŸเฤL3ฯ2๋lห.Ri…WZy•UWคฺ๖;์ธำฮป์บ[ิฌ9a๔๓็Qณจ9”\˜฿ขฦญ9?†ฐ'QbFฤ\ฐD๏5ไSฟr{ป5 ๓ํนป๚”๛ŽLยิ2–\ผฎrgใส=Zาci6t‚<าsœLb%๏7ภใ~๓ำ๗›Ÿ๐ธ฿t€วๆง<๎7?เqฟ๙้๛อO๘<๚อฺฅ@gขT";๓ty๗ื+วๆๆุตฏ@y—ั{[sค•๖Z ฒฑvดPะt้เi๒c๚ฝำRˆ่ฑ๕qฉต2{b2@Žฉฆ\ฤ‘‡–6ขp–ฟ`.&fฦ ?AU0”E!ฆใฝ8าf๎˜ฝ฿VŒ็Tg‘mฯ(g์*—๏ีฐา•Rƒ—๛ลิฎฎ•F™–!w\๋šm/ŒS๒ุ~1อฆx.•AMํb0ํ>Vศพaฬcr๎uฏnฎึ๖ Ÿ@kn๓Qฌห]ฏw3ค3ฮง&gภิDไผญTึaๅ’ณx>Oญ)n+=I3#6Sนฑ็ฒล/\xŸx‹โใขk/i—e8GL€๎™Wกว้้J๛๘˜kฤวšOSงz/ืพ[ุ'*ฉhTL฿ษž[ƒ\S.๕ษdqฑ‹ํG?‡,ŸšOQ๘D้ขซ๘ฮXอฎjN_ญ์ใฃัใขyIผีkk๏แ1้4šQฉษˆ[žš)ัณฮ:›šaPห4วSฒOฯ๙—Yf^ฆYวKื’|jปฑฌ™kะˆƒF,ืด9Nัฦ๚นภ36œทDdศียA‘๙%้เ'โ๑ำ[๔Hฃแซาƒกq ]‚„คœฬ!(z๑"*Il“LuŸ(Ÿำœ4้ณžืrfrขฤE!M สิ%ฌ23>๘ุ๘y๒ศ“Zภ์RDุ0ฑs{ะH0ส ฟn.qs;8 แ|+ุHœŽฐฆ2;คe hpๆฉ€•šx]M@์,1โฃฉีง„HJมHุ๗ค่Cคz๗,8ื่ะนกอuโy2ๆฬ…8๋ำ"=่n,Q#ปBNไข ะ3ชƒ?คŸ…™j $6โฌึ4ศ„LFํa*PเrฦIพPQฺŸv1p‰ใุMัไตt!ล/  Ÿ5…9ค†Fน#ถเห ;#ฮUค  มPฮบ˜ธฐPtA๓๚ฑnใณrูถ9kฯnดต|7ิโk[xp9‡ ˆ•ง2ˆ RI†Xพ0็š๓ำ•=€ณb‚,E๋ฎบ8Ž„ฎคพV็ผ•ถ}&ํŠ-Hู๕เษ3— ขถ‹,C@z%’งK๘K฿™_ฮ#6€+yด.yMV“ฅ[nฎค’ซร"6ฟ-๕ฑœฬ[ตz ž$งka|Œ…Eฉvกถ4‘… ME^-–•ฏสดีฤ๓u™ e^A฿†piด,Oƒw”2Mvˆฮ๗ ีM”VฏK=ฆd๒์8Š๑' ุ}ต[I2Oyt"ภvศฉNิ`ฟญ@„6ฦ98ล =œขLสฐฆีฦศฒ๏ไ$ล@.K๛&{;ฑถw๙ุงkvา;ซtRŸฅ *ŠมรfŒาป4K๎ข–>r—ต4๑ปฎฟฯpรฤ็XuญtcูW๔$™xจ[ ปฌX\)๙ฉื๐•‡€ฺgษŽ„าโmฌณt'cถณE๖43˜ŽญD72ก,ก.ยa D/Gืฅ็Rปš–\า๒Wูm>@C๚/ยWD*ัœRT๖๑-|ํ™_Ž1โ™ๆ (><‡4qำฅAใ W5= 1†žQ๐ศ5กhู ฑNˆ8C๙าPูเqอ7ษ^ywl—ญˆต–}œU๐w +Rc(1Žx2œ ญ %dVŸ&๐ 7 `ip+‹Ÿ฿Tๅส๏Uฅq•ธ ฬ-ณl/X N{ฃรA/ล+$ฤ!โนพ#โ &กๆˆCข฿๑่d}Gnโ๖!mี!TQๆW ฐฉeพ!ูำDฏ=85ะ๖ŽS๛่nvื>๗•iบปoฆ๚Kœฝ‰ด–—ว+ไ}น_Dz€4ผ๕n~sั•‡T‡๙7สCชรถcญศ<ฃ"บ(ฉไ Cๅต5:๛>t%๗HOใp๖Œฬ=Žr็q„ใgศ๏Pป"๚lY( ษ3จOคXs2สofฉCšเœ7ฟY0UˆพW… ŽIyฐ DฏTh_๔K่aชฟZชภB“ k5ก๖ูsๆkืษh่=บK–—u-AK{.ห%ศ-"ุสNดc7L›*๋1ฎY'๋ผZ(5_UX‹ูmฬ๐ืvg๐:Ff๏Eqน”%า WhA`vฏษ็ไFฆa7HMดะœnœีค~t€)ีฟ stA™Fฆม: ฅัIJฆ‘.ำŒlœIm้๕]ฺe๏ทŒl@ฃ’ฐฒI๖Ax69!e9ม?E”™†^€Šชw&ฉfช1Šž"฿}๊Ažญุ’ฑถ"้Qแฟท†+9๘ฎD๔e<อฏ€~1ึW6EโฦBQi๘XแแTืJ,m 93๔ฺFฐ$วUG8ขาBำฒ{ฏ๑ฟ’๘ๆŸhฏ$พ๙SฟจŽ-’/๊~ษT‚T์แOฃเ-[r๖+ฏ`<ป#—œnf{ฦ67oบ%ใ‰ณe—Vีข“Lzา7ฆ[HทYiˆไKญuิณuš^•.$u r"Q‡Aญ‚ซ%‘ญsถV)ดเ˜xฌ Tฐจ๓~ป=mพŸo- #”\€๙Nษg)Sภแ๖มํณี$]ขHœ•ฟrชๆZศ@1S&ํมใาญ ƒ@น7“>™p 8ำตภ^Gc?™!่"์๚ฤูช-ๆƒูๅ‘ฯ1฿y$N› lคฯ˜ึ–'”๖Šฐ‚Ž2˜ฃ%9FO_›Bฌ‘M๖่>๓-aaKลฎ,ฺe๙ชบ]`ยิ่lฆฮ%ยพึิjCJะ ,›ฅิXาอั๛มอฦO`cRwโC๓ทนคฤV˜2Ng่nGแxัsล๎๊pด 4.๔ํึ้๓ƒfฒB‚B‘ญลึจ/=ฃšT]wฝกJ‘vฬW@™…ต~ฒพjึ+รถN˜ฑต‡ ๏f|Zvำ/‰I>?านƒพŠ[X -VะึqK“ษฌฐ;OวัoยHhฝื}๘ลฎฉ๙n๔ovMอ฿B๊+D5Ÿ 5%ซญ ฯฒ็มŠ<ึ[a„~๖)•บ๏ฌ฿ญi8Dู*ฉ{.‚yqŽ4—<Š;‹ฮ]EฦซžอrŸธ`๖อฎฒkฎ}njฒษ=ปnr๋™,ป~ณMHU่ฑศ“HvTบH>*-คฺhŸฒล๏น zษจƒ!*ศ5ุ"9А!€ึึ!rนT ฮtIญoŽ=]ื/“Œ>ณ{3H„สYไ1J˜ส“QjGIฟ jF ๚ผdพ„ฤ#AUฟณจ๎ecฯ๗`ง<iพO€k‰182#†D“Eย7ั—็ 9ฉ๎uืyชฦEg#F”#›*9 ๘—ฯเ๔gเj๒ฎม้ƒŒ ์ห—• ™„Z๐x๔,๓ƒhQบYกOYต๚๕A๋ำsV๐๋rูOTe^—ฟb๓‰U™๙––g@๒ลฐ„ฒ]ะฮสL๓ชK –0€)ฺL๛N=uJฅิ็ฑn ข่ะl9ํB[ส™”ก๔F.ูHGBTณ๔ชด๚ ืy"฿๗ม@RพE‹‘+่ฆ'9y๘ด&ŽึpAฅ!†4ผNวบร,*ศรฌcิm’๔›ษfตKถ4ิ(กNฯf1ุณค๒ีฐ'ณไค%น|ฬ’oF?๖G2๏๖™๗๛‰ฟฬ{‡ฤ_ๆCฑฟฬซ๛[™W ๖ท2ฏ์oe^%ุ฿๚หIA‰ฟฬŸไKม‰ ภฒ]a ศ<——๏p2๙ฺ~MpZบ*พkoz๔)ฬ๒Œ*ฤ@฿ถฯGล๋y7ๅร๚ทแ•Yอ(Ÿฒ0 ิ๚y%iCCPICC profilexœฑJรP†ฟฦข"๊ข8ˆC—‚vrฉ ก kฃSzำb1‰!I)พoขำA|_@มูFณxแ๐ฮ๙{/8nl’ขน IZๆ^ฏ\W๎ยMVqุa/4Eึ๑jฯ็+ ซ/-๋U?๗็™†…‘ฮTฉษ๒โ๖ดฬ,ซXฟํ๗Žฤb7JาH$Ž’ศฒํ%๑ฤxฺ,ำ‹sWmแัๅ—ฦฤ”ดคฉ:วดู—zไ„S`ค1C๕ฆš)นr๒8๕EบMMf•็+e ฑผlย‰ฮชอฦฦ, ๓ฐjอฉœัa%€ตgXบฎษZถš™v5๓ฯ7~`Pr๒t iTXtXML:com.adobe.xmp ึยbKGD‡ฬฟ pHYs  šœtIMEๅ'cฉ“OIDAT(ฯcd๘ฯ@$`a R-#C:C:๒3ŠD)vภ‹a๎bภqรL†tฌ$Dฏจเ„e pK‘B+0ฑ"k้ฬ%ฬสIENDฎB`‚././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/doc/sample_html/keybd_open.png0000644000175100001770000002145300000000000022033 0ustar00runnerdocker00000000000000‰PNG  IHDR 'ว>N„zTXtRaw profile type exifxฺํšYv;ฒE1ŠBข†ƒvญšม~ํ$eQe๛๊~>ห"ฉl€@4'ฮAาฌ๛๏6แ_r1˜sI5ฅ‹ก†๊สu5}ตWะื๓Gผฯู็ใๆ๗ ว!ฯป?–t_8n฿8oO๑@ๅศ๖็5ใ—น๓ๆล"๙<๏๊=w็„ฝhgYWช%ฟ_B_็พธกศาx ๅู์Ogผ7#๓x็–ทโีyw ๐๒ko|ผZ๏ๅB๏๕ˆผzฐ‡|ๅง๋UๆcT>}ˆJu_ลงs…แภณ3ำ๛—ว ๓qฟธ๘ฬ~Ÿ๓q7lธœว๏ณ˜ฝืY] —ฆ{Q%๊'.์ธ๋m‰ŸฬoไsึŸสO1d๏ ไ“ฤ์ [ญ#,;mณ.}v`bpหeJŽŸ]uร_†h๙ฑe_๔…ศ ยKฬผ{ณล๊ผUงถ0๑ด\้,ƒYI#/ฦฯห๖–”ทVœY๚ ปœ$fHไไ•ซˆ<Š๊เวฯวWOฃบนฐภv๕3D๖ฮ-ษ#ฏ๖\y?ตf๓ผภEฬ1ฦz"p%๋ฃM๖สฮek๑c!>Š๓มuB`ct+]๐>œโdn๎ษVฏuัร`ˆ>๙LhชoฤJ€ษกC-๚bŒ)ๆXb-๙RL)ๅ$เืฒฯ!วœrฮ%ืŠ/กฤ’J.ล”ZZuีŽฑฆškฉตถฦค‘w7.hญป๎{่ฑงž{้ตทA๚Œ0โH#bFmบ้'81ำฬณฬ:ฒ‹TZaล•V^eีี6ฉถ;๎ด๓.ป๎๖5kNX?yิ์#jN#%ๆทจqkฮ!ฌภI”˜1,ฯฺIฬฎbCpFB'1#๋จŠ่ฐ2Jpฆ•ˆมฐฌ‹พล๎WไžโfB๘Q#rFB๗oDฮH่^D๎sพˆฺ”n3.o4BR†โิหS~\ฐJs}^~ูš‡0ˆหไฎ*Ÿญฯ{ ๙ิฏ\ใnMร|{๎.ภƒ>ๅพ#“0ตŒ%o†ซูธr–๔Xšอ? ๔'“Xษ๛฿ ๐ธ฿t€วๆง<๎7?เqฟ๙้๛อOxo~:ภใ~๓ำ?~3@ฟv)ะ™จ•ศลฮ๖<]=ไ5๗สฑน9vํ+Pe๔ึiฅฝV‚lฌ-4]:x๗Fš˜~๏ด"zl}\jญฬž˜ cช)qครกฅ ‡hใœๅ/˜‹‰™qยใOP eQ‚้x๏Žด™;f๏ทใ9ีYd3สปสๅ{5ฌtฅิเๅ~1ต๋ƒkๅ†QฆeศWภบf๗ ฃว”<ถ_LณižKePSปLปฒow๓˜œ{รซ›ซต=่'ะš|+ภrืkล ้Œ๓ฉ‰‡ฦ059o+•uG˜Eนไ,žวSkŠJOภฬˆอTn์นl๑ —'^็ข๘ธ่รฺKฺ%Fฮ {ๆีF่มqzบา>>ๆ๑ฑๆำิฉหต๏?๖‰J*ำwฒ็–ม ื”KE2Y\์โE๛‘ลฯ!หงๆS>ัvบ่*พ3Vณซš“วW+๛๘h๔ธh^o๕ฺว{xL:fTj2โึง&J๔ฌณฮฆๆ@ิลฒอ1ฦ”์ำse–™—iึ๑าต$Ÿฺn,kๆ4bฤ Q'ห5ญ@NงSดqท~.๐ŒM็mrตpFd~I:๘‰๘G๔=ลh๘ช๔`่F\C— !)'sŠ^ผˆJ[วคS'ส็4'Mบรฌ็ตœ™œ(qQHS‚2u ซฬŒ>๖~ž<๒dฦ0{‡ิ6L์$4Œ2รฏ›‹H8฿J6งฃl†ฉฬiœy*`ฅ&^W;K €๘hชD๕)!’R0๖=)๚ฉ ฮ๕:tnh3w8Tฮ๘x๏Iฝืเ™%๕Sด”f๑7€่Z TMไr‘"ผa#๎๎Fหวกม5&ิล๘>งC[UŒผKฒ&ŸโŠ+I็7 x#™Mคjลภ\า€; ePtํ๕yแv E*ร๚r;USœ„cฺ๎ฆค%€xื.V๊Uฒงี๛Vปื๏ๆภ็rช๖ิฌV,S=jcp]๗าฑ์˜่€eสลYN_Ž‹ );๓%F0V juMซ-ธm–สมOhใฺl–ขฬ„อภT๙pตwร]ศ๋กeุ@G…7 Jปˆ’ki‘ํ๐(QสjศCา…‘j˜ฅ3ลgจd ‰1“ํฅ ŸึU “…„ฏ^•Tก eฺ๗ณž9™‘๘–ผอHZีQfOROCถ0HอณGา=ูผ’@ƒZYฉ"@P๊Z„๑L ”ฝx€`h=• ’OžทŒ9s!ฮ๚ดHzฟKิศ๖.‚๙…(@B๔ฬ…๊ ๗้gaฆฺ‰8ซ5 ๒!“Q{˜ ธ†‡q’/T”๖ง] œAโ8vSt y-]cH๑ห่gMaฉกF๎Hง-๘๒ยNฤˆณCiBH0$”ณn&.,]ะผ~ฌ[เ๘ฌ\ถmฮฺณm-฿ ๕ƒ๘ภ\ฮ!๗‚beใฉ bƒD’!–/ฬน&…ฤteเl‡ุƒ`Kับซ.Ž#ก+ฉฏU๗9oฅmŸ dปb Rv=x๒ ๗eƒhทํ"ห^@‰ไ้าาwๆ—๓ˆ เJญK^“ีd้–›+ฉไ๊ฐˆMภoK,'๓Vญจ'ษ้ZcaQช]gจ-MdaCFS‘W‹eๅk€2m5ฑ@Aภ|]ฆHG™Wะภท!7Eญหำ ยฅL“ข๓=ˆuฅีม๋RAF)™<;Žb|เ ถG_ใEํึD’ฬSฐ?rช5ุo+a†qN๑B‚Gง(“2ฌiต1ฒ์;9I๑หาพษNฌmว]>๖้š๔ฮ*ิg)ศถ †b๐ฐ†ฃ๔.อ’ปจฅe-Mฎ๋๏30ฑว9V]+X๖=I&๊่.+WF d~๊5|ๅ! ๖ูCฒ#กฟดx๋lฦษ˜ํl‘=อ ฆc+ัL(Kจ‹pXัหั5B้นิฎฆ%ื…ดUv›ะ~ใ‹๐U‘J4ง•}|‹?_{ๆ—cŒxฆ9 €ฯ!Mtiะ8่@MOCŒกgq_.+{ๆพๆ๙AC•ย<ห๓Qจ๚ค;Ezœแ^‰:ํ~ฏ Šเศฯbภ็7Uน๒{Ui\%nsเฤvห, V‚ย่pะK๑ qˆxฎ๏ˆxรฃI่ƒy#โ่๗D<:Y฿‘[ทธ}H[5AU”๙UEljD™oFH๖4ัkN ดฝใิ>บท›ตฯฝDeš๎๎›iเถgo"ญๅeว๑ y_nว‘ /Gฝ›฿\๔Gๅ!ีa๒๊0ฟ-๎šึฃS]ๅืพฯX+2ฯ่ถˆ.Jช๙ยPymฮพ]ษ=าำ8œ=ใsฃไyแธ๗๒;ิ…ฎˆ>[JB๒ ๊iV๗œŒ๒›Y๊&8็อoฬGีข/ยUaƒcRE์Gั+ฺ๚D˜*ยฏ–*0†ะ$รZ hฃ}๖œ๙ฺu2บD๎’e#มe]Kะาžหr ฒw‹ถฒmวุ ำฆสz…AŒkึษ:ฏJอWwึ"แภ9ม5‘VโฐOv›?3ต<„Ž‘ู{ัc\.e‰4ฺ่ฦ_˜k๒9น‘iุ R-4'ทguฉ`J๕/ศFFPฆQ…iฐ@it’…i$ฟฟห4#gR›wz}—v`™ล๛-#ะจคฌฌC’}žMwNAHYN๐OQeฆกƒ ข๊ Dช™jŒข็…ศwŸzg+ถdฌญ?dz”g๘๏ํ6ทแJพ+‘w#}O๓+ _Œ๕•M‘ธฑPDT>Vx8ีตKHฮ †ถ,ษลqี‘GŽจดะด์kฏ$พ๙'+‰oTใ/ชc‹ไ‹บ_2• {๘คภ(xห–œ=ฦJ็๋‡ุ6ฯ๎ศๅDง›ูžฑ„ออ›nษxโlูฅU`ต่ค“ž๔้าmV"๙Rk๕l]ฆWฅท Iม‚œHิaPซ ฤjId๋œญU -8&ž+(•,๊ผ฿nO›ฏ๗งล[หย%`‡S2ไYสpธ}p{ภluI—่6gฅภฏœชน2Pฬ”I{๐ธtkร P๎อคO&ฮ๔฿A-0ว„ืัุOf?:†ป>qถj‹9ฦ`๖_yไณCฬw‰ำๆ้3ฆตๅ ฅฝ"์‡ ฃ &ฤhIN†ัำืf…+@d“=บฯ|K˜@ุRฑ+‹vYพช๎EX„05:›ฉs‰pงฏ๕ต‡ฺฤ4(หf)5–ts๔~Dpณฑวุ˜ิ๘ะ|็Dm.)ฑฆฬŸำบQ8^tฤ\qฤ†ป:ํK}ป๕E๚ ™ƒฌ Pdkฑ5ชฦKฯจ&Uื]oจRค๓wf!BญŸฌฏš๕สฐญflํaฟย{…Ÿ–๔หEโ…’ฯtnม ฏโ–ยD‹ีดuาd2+์ฮำq๔›0Z๏u7~ฑkjพ6›]S๓ท๚ Qอ'HM 7ยวช@+่ณ์yฐ"๕VกŸ}Je#$€๎;๋wkQถJ๊ž‹`^œ#อ%โฮขsW‘๑ชทgณฦg.˜}ณkง์š+dŸ[„šlrฯฎ›z&หฎ฿lRz,๒$$†•.’ฯ„สGKฉ6ฺงl๑{.ƒ^๒๊`ˆ r ถHŽ"d ตuˆ\.Uƒ3DRซO„๋๕๒5ิoŽƒF/}!ษุื>ฬ๋‡CU้š <˜dฮs}ศzฬRฃD‚ชYทQทIบ#GAื๕ห$ฃฯ์ กryŒฆ๒d”šฤัcา/ƒšQƒ/™/!๑HPี๏฿,ช{ูุ๓=ุ)ฯEš๏ มZb Žฬˆ!ัd‘ฐล@๔ๅ9HNช{užชqัูˆๅศฤฆJŽวๅ38ธš€kp๚ @ c๛๒eฅB&ก–<}ห Zิ€nV่SVญ~}ะ๚๔œUญ‰ฃ5\ะciGศฃ! ฏำฑ.๗0Kฟ ๒0๋u›$ฝfrทYFํ’- 5JจำณY ๖ม,ฉ|5์ษ,yฉFI.ณไ›ัO†‘ฟฬ{‡ฤ_ๆฝร~โ/๓a?๑—๙ว์/๓*มึ_ๆU‚ญฟฬซ๛[™W ๖ท2Rโ/๓'๙า_p"(ฐlWX2ฯๅๅ;œLพถ_œ†–ฎ ค๏ไ›} s‡<ฃ 1ะทํ๓Q๑๚ฤ_M๙pเŸพํ@xeV๓?8ฒ+i0lา%iCCPICC profilexœฑJรP†ฟฦข"๊ข8ˆC—‚vrฉ ก kฃSzำb1‰!I)พoขำA|_@มูFณxแ๐ฮ๙{/8nl’ขน IZๆ^ฏ\W๎ยMVqุa/4Eึ๑jฯ็+ ซ/-๋U?๗็™†…‘ฮTฉษ๒โ๖ดฬ,ซXฟํ๗Žฤb7JาH$Ž’ศฒํ%๑ฤxฺ,ำ‹sWmแัๅ—ฦฤ”ดคฉ:วดู—zไ„S`ค1C๕ฆš)นr๒8๕EบMMf•็+e ฑผlย‰ฮชอฦฦ, ๓ฐjอฉœัa%€ตgXบฎษZถš™v5๓ฯ7~`Pr๒t iTXtXML:com.adobe.xmp HM5AbKGD‡ฬฟ pHYs  šœtIMEๅ"c ]OIDAT(ฯcd๘ฯ@$`a R-#C:C:๒3ŠD)vภ‹a๎bภqรL†tฌ$Dฏจเ„e pK‘B+0ฑ"k้ฬ%ฬสIENDฎB`‚././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/doc/sleepy.rst0000644000175100001770000000120300000000000016723 0ustar00runnerdocker00000000000000.. Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 .. For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt .. _sleepy: ============ Sleepy Snake ============ Coverage.py's mascot is Sleepy Snake, drawn by Ben Batchelder. Ben's art can be found on `Instagram`_ and at `artofbatch.com`_. Some details of Sleepy's creation are on `Ned's blog`__. __ https://nedbatchelder.com/blog/201912/sleepy_snake.html .. image:: media/sleepy-snake-600.png :alt: Sleepy Snake, cozy in his snake-shaped bed. .. _Instagram: https://instagram.com/artofbatch .. _artofbatch.com: https://artofbatch.com ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/doc/source.rst0000644000175100001770000001756000000000000016737 0ustar00runnerdocker00000000000000.. Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 .. For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt .. This file is processed with cog to create the tabbed multi-syntax configuration examples. If those are wrong, the quality checks will fail. Running "make prebuild" checks them and produces the output. .. [[[cog from cog_helpers import show_configs .. ]]] .. [[[end]]] (checksum: d41d8cd98f00b204e9800998ecf8427e) .. _source: ======================= Specifying source files ======================= When coverage.py is running your program and measuring its execution, it needs to know what code to measure and what code not to. Measurement imposes a speed penalty, and the collected data must be stored in memory and then on disk. More importantly, when reviewing your coverage reports, you don't want to be distracted with modules that aren't your concern. Coverage.py has a number of ways you can focus it in on the code you care about. .. _source_execution: Execution --------- When running your code, the ``coverage run`` command will by default measure all code, unless it is part of the Python standard library. You can specify source to measure with the ``--source`` command-line switch, or the ``[run] source`` configuration value. The value is a comma- or newline-separated list of directories or importable names (packages or modules). If the source option is specified, only code in those locations will be measured. Specifying the source option also enables coverage.py to report on un-executed files, since it can search the source tree for files that haven't been measured at all. Only importable files (ones at the root of the tree, or in directories with a ``__init__.py`` file) will be considered. Files with unusual punctuation in their names will be skipped (they are assumed to be scratch files written by text editors). Files that do not end with ``.py``, ``.pyw``, ``.pyo``, or ``.pyc`` will also be skipped. .. note:: Modules named as sources may be imported twice, once by coverage.py to find their location, then again by your own code or test suite. Usually this isn't a problem, but could cause trouble if a module has side-effects at import time. Exceptions during the early import are suppressed and ignored. You can further fine-tune coverage.py's attention with the ``--include`` and ``--omit`` switches (or ``[run] include`` and ``[run] omit`` configuration values). ``--include`` is a list of file name patterns. If specified, only files matching those patterns will be measured. ``--omit`` is also a list of file name patterns, specifying files not to measure. If both ``include`` and ``omit`` are specified, first the set of files is reduced to only those that match the include patterns, then any files that match the omit pattern are removed from the set. .. highlight:: ini The ``include`` and ``omit`` file name patterns follow common shell syntax, described below in :ref:`source_glob`. Patterns that start with a wildcard character are used as-is, other patterns are interpreted relative to the current directory: .. [[[cog show_configs( ini=r""" [run] omit = # omit anything in a .local directory anywhere */.local/* # omit everything in /usr /usr/* # omit this single file utils/tirefire.py """, toml=r""" [tool.coverage.run] omit = [ # omit anything in a .local directory anywhere "*/.local/*", # omit everything in /usr "/usr/*", # omit this single file "utils/tirefire.py", ] """, ) .. ]]] .. tabs:: .. code-tab:: ini :caption: .coveragerc [run] omit = # omit anything in a .local directory anywhere */.local/* # omit everything in /usr /usr/* # omit this single file utils/tirefire.py .. code-tab:: toml :caption: pyproject.toml [tool.coverage.run] omit = [ # omit anything in a .local directory anywhere "*/.local/*", # omit everything in /usr "/usr/*", # omit this single file "utils/tirefire.py", ] .. code-tab:: ini :caption: setup.cfg, tox.ini [coverage:run] omit = # omit anything in a .local directory anywhere */.local/* # omit everything in /usr /usr/* # omit this single file utils/tirefire.py .. [[[end]]] (checksum: 9fa764509b4c484ea613298a20d4b577) The ``source``, ``include``, and ``omit`` values all work together to determine the source that will be measured. If both ``source`` and ``include`` are set, the ``include`` value is ignored and a warning is issued. .. _source_reporting: Reporting --------- Once your program is measured, you can specify the source files you want reported. Usually you want to see all the code that was measured, but if you are measuring a large project, you may want to get reports for just certain parts. The report commands (``report``, ``html``, ``json``, ``lcov``, ``annotate``, and ``xml``) all take optional ``modules`` arguments, and ``--include`` and ``--omit`` switches. The ``modules`` arguments specify particular modules to report on. The ``include`` and ``omit`` values are lists of file name patterns, just as with the ``run`` command. Remember that the reporting commands can only report on the data that has been collected, so the data you're looking for may not be in the data available for reporting. Note that these are ways of specifying files to measure. You can also exclude individual source lines. See :ref:`excluding` for details. .. _source_glob: File patterns ------------- File path patterns are used for :ref:`include ` and :ref:`omit `, and for :ref:`combining path remapping `. They follow common shell syntax: - ``?`` matches a single file name character. - ``*`` matches any number of file name characters, not including the directory separator. As a special case, if a pattern starts with ``*/``, it is treated as ``**/``, and if a pattern ends with ``/*``, it is treated as ``/**``. - ``**`` matches any number of nested directory names, including none. It must be used as a full component of the path, not as part of a word: ``/**/`` is allowed, but ``/a**/`` is not. - Both ``/`` and ``\`` will match either a slash or a backslash, to make cross-platform matching easier. - A pattern with no directory separators matches the file name in any directory. Some examples: .. list-table:: :widths: 20 20 20 :header-rows: 1 * - Pattern - Matches - Doesn't Match * - ``a*.py`` - | anything.py | sub1/sub2/another.py - | cat.py * - ``sub/*/*.py`` - | sub/a/main.py | sub/b/another.py - | sub/foo.py | sub/m1/m2/foo.py * - ``sub/**/*.py`` - | sub/something.py | sub/a/main.py | sub/b/another.py | sub/m1/m2/foo.py - | sub1/anything.py | sub1/more/code/main.py * - ``*/sub/*`` - | some/where/sub/more/something.py | sub/hello.py - | sub1/anything.py * - ``*/sub*/*`` - | some/where/sub/more/something.py | sub/hello.py | sub1/anything.py - | some/more/something.py * - ``*/*sub/test_*.py`` - | some/where/sub/test_everything.py | moresub/test_things.py - | some/where/sub/more/test_everything.py | more/test_things.py * - ``*/*sub/*sub/**`` - | sub/sub/something.py | asub/bsub/more/thing.py | code/sub/sub/code.py - | sub/something.py ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/doc/subprocess.rst0000644000175100001770000001031100000000000017612 0ustar00runnerdocker00000000000000.. Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 .. For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt .. _subprocess: ======================= Measuring sub-processes ======================= Complex test suites may spawn sub-processes to run tests, either to run them in parallel, or because sub-process behavior is an important part of the system under test. Measuring coverage in those sub-processes can be tricky because you have to modify the code spawning the process to invoke coverage.py. There's an easier way to do it: coverage.py includes a function, :func:`coverage.process_startup` designed to be invoked when Python starts. It examines the ``COVERAGE_PROCESS_START`` environment variable, and if it is set, begins coverage measurement. The environment variable's value will be used as the name of the :ref:`configuration file ` to use. .. note:: The subprocess only sees options in the configuration file. Options set on the command line will not be used in the subprocesses. .. note:: If you have subprocesses created with :mod:`multiprocessing `, the ``--concurrency=multiprocessing`` command-line option should take care of everything for you. See :ref:`cmd_run` for details. When using this technique, be sure to set the parallel option to true so that multiple coverage.py runs will each write their data to a distinct file. Configuring Python for sub-process measurement ---------------------------------------------- Measuring coverage in sub-processes is a little tricky. When you spawn a sub-process, you are invoking Python to run your program. Usually, to get coverage measurement, you have to use coverage.py to run your program. Your sub-process won't be using coverage.py, so we have to convince Python to use coverage.py even when not explicitly invoked. To do that, we'll configure Python to run a little coverage.py code when it starts. That code will look for an environment variable that tells it to start coverage measurement at the start of the process. To arrange all this, you have to do two things: set a value for the ``COVERAGE_PROCESS_START`` environment variable, and then configure Python to invoke :func:`coverage.process_startup` when Python processes start. How you set ``COVERAGE_PROCESS_START`` depends on the details of how you create sub-processes. As long as the environment variable is visible in your sub-process, it will work. You can configure your Python installation to invoke the ``process_startup`` function in two ways: #. Create or append to sitecustomize.py to add these lines:: import coverage coverage.process_startup() #. Create a .pth file in your Python installation containing:: import coverage; coverage.process_startup() The sitecustomize.py technique is cleaner, but may involve modifying an existing sitecustomize.py, since there can be only one. If there is no sitecustomize.py already, you can create it in any directory on the Python path. The .pth technique seems like a hack, but works, and is documented behavior. On the plus side, you can create the file with any name you like so you don't have to coordinate with other .pth files. On the minus side, you have to create the file in a system-defined directory, so you may need privileges to write it. Note that if you use one of these techniques, you must undo them if you uninstall coverage.py, since you will be trying to import it during Python start-up. Be sure to remove the change when you uninstall coverage.py, or use a more defensive approach to importing it. Process termination ------------------- To successfully write a coverage data file, the Python sub-process under analysis must shut down cleanly and have a chance for coverage.py to run its termination code. It will do that when the process ends naturally, or when a SIGTERM signal is received. Coverage.py uses :mod:`atexit ` to handle usual process ends, and a :mod:`signal ` handler to catch SIGTERM signals. Other ways of ending a process, like SIGKILL or :func:`os._exit `, will prevent coverage.py from writing its data file, leaving you with incomplete or non-existent coverage data. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/doc/trouble.rst0000644000175100001770000000472600000000000017113 0ustar00runnerdocker00000000000000.. Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 .. For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt .. _trouble: ========================= Things that cause trouble ========================= Coverage.py works well, and I want it to properly measure any Python program, but there are some situations it can't cope with. This page details some known problems, with possible courses of action, and links to coverage.py bug reports with more information. I would love to :ref:`hear from you ` if you have information about any of these problems, even just to explain to me why you want them to start working properly. If your problem isn't discussed here, you can of course search the `coverage.py bug tracker`_ directly to see if there is some mention of it. .. _coverage.py bug tracker: https://github.com/nedbat/coveragepy/issues Things that don't work ---------------------- There are a few modules or functions that prevent coverage.py from working properly: * `execv`_, or one of its variants. These end the current program and replace it with a new one. This doesn't save the collected coverage data, so your program that calls execv will not be fully measured. A patch for coverage.py is in `issue 43`_. * `thread`_, in the Python standard library, is the low-level threading interface. Threads created with this module will not be traced. Use the higher-level `threading`_ module instead. * `sys.settrace`_ is the Python feature that coverage.py uses to see what's happening in your program. If another part of your program is using sys.settrace, then it will conflict with coverage.py, and it won't be measured properly. * `sys.setprofile`_ calls your code, but while running your code, does not fire trace events. This means that coverage.py can't see what's happening in that code. .. _execv: https://docs.python.org/3/library/os.html#os.execl .. _sys.settrace: https://docs.python.org/3/library/sys.html#sys.settrace .. _sys.setprofile: https://docs.python.org/3/library/sys.html#sys.setprofile .. _thread: https://docs.python.org/3/library/_thread.html .. _threading: https://docs.python.org/3/library/threading.html .. _issue 43: https://github.com/nedbat/coveragepy/issues/43 Still having trouble? --------------------- If your problem isn't mentioned here, and isn't already reported in the `coverage.py bug tracker`_, please :ref:`get in touch with me `, we'll figure out a solution. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/doc/whatsnew5x.rst0000644000175100001770000001543000000000000017546 0ustar00runnerdocker00000000000000.. Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 .. For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt :orphan: .. _whatsnew5x: ==================== Major changes in 5.0 ==================== This is an overview of the changes in 5.0 since the last version of 4.5.x. This is not a complete list of all changes. See the :ref:`complete change history ` for all the details. Open Questions -------------- - How to support SQL access to data? The database schema has to be convenient and efficient for coverage.py's execution, which would naturally make it an internal implementation detail. But the coverage data is now more complex, and SQL access could be a powerful way to access it, pointing toward a public guaranteed schema. What's the right balance? Backward Incompatibilities -------------------------- - Python 2.6, 3.3 and 3.4 are no longer supported. - The :class:`.CoverageData` interface is still public, but has changed. - The data file is now created earlier than it used to be. In some circumstances, you may need to use ``parallel=true`` to avoid multiple processes overwriting each others' data. - When constructing a :class:`coverage.Coverage` object, `data_file` can be specified as None to prevent writing any data file at all. In previous versions, an explicit `data_file=None` argument would use the default of ".coverage". Fixes `issue 871`_. .. _issue 871: https://github.com/nedbat/coveragepy/issues/871 - The ``[run] note`` setting has been deprecated. Using it will result in a warning, and the note will not be written to the data file. The corresponding :class:`.CoverageData` methods have been removed. - The deprecated `Reporter.file_reporters` property has been removed. - The reporting methods used to permanently apply their arguments to the configuration of the Coverage object. Now they no longer do. The arguments affect the operation of the method, but do not persist. - Many internal attributes and functions were changed. These were not part of the public supported API. If your code used them, it might now stop working. New Features ------------ - Coverage.py can now record the context in which each line was executed. The contexts are stored in the data file and can be used to drill down into why a particular line was run. Static contexts let you specify a label for an entire coverage run, for example to separate coverage for different operating systems or versions of Python. Dynamic contexts can change during a single measurement run. This can be used to record the names of the tests that executed each line. See :ref:`contexts` for full information. - Coverage's data storage has changed. In version 4.x, .coverage files were basically JSON. Now, they are SQLite databases. The database schema is documented (:ref:`dbschema`), but might still be in flux. - Data can now be "reported" in JSON format, for programmatic use, as requested in `issue 720`_. The new ``coverage json`` command writes raw and summarized data to a JSON file. Thanks, Matt Bachmann. - Configuration can now be read from `TOML`_ files. This requires installing coverage.py with the ``[toml]`` extra. The standard "pyproject.toml" file will be read automatically if no other configuration file is found, with settings in the ``[tool.coverage.]`` namespace. Thanks to Frazer McLean for implementation and persistence. Finishes `issue 664`_. - The HTML and textual reports now have a ``--skip-empty`` option that skips files with no statements, notably ``__init__.py`` files. Thanks, Reya B. - You can specify the command line to run your program with the ``[run] command_line`` configuration setting, as requested in `issue 695`_. - An experimental ``[run] relative_files`` setting tells coverage to store relative file names in the data file. This makes it easier to run tests in one (or many) environments, and then report in another. It has not had much real-world testing, so it may change in incompatible ways in the future. - Environment variable substitution in configuration files now supports two syntaxes for controlling the behavior of undefined variables: if ``VARNAME`` is not defined, ``${VARNAME?}`` will raise an error, and ``${VARNAME-default value}`` will use "default value". - The location of the configuration file can now be specified with a ``COVERAGE_RCFILE`` environment variable, as requested in `issue 650`_. - A new warning (``already-imported``) is issued if measurable files have already been imported before coverage.py started measurement. See :ref:`cmd_warnings` for more information. - Error handling during reporting has changed slightly. All reporting methods now behave the same. The ``--ignore-errors`` option keeps errors from stopping the reporting, but files that couldn't parse as Python will always be reported as warnings. As with other warnings, you can suppress them with the ``[run] disable_warnings`` configuration setting. - Added the classmethod :meth:`.Coverage.current` to get the latest started Coverage instance. Bugs Fixed ---------- - The ``coverage run`` command has always adjusted the first entry in sys.path, to properly emulate how Python runs your program. Now this adjustment is skipped if sys.path[0] is already different than Python's default. This fixes `issue 715`_. - Python files run with ``-m`` now have ``__spec__`` defined properly. This fixes `issue 745`_ (about not being able to run unittest tests that spawn subprocesses), and `issue 838`_, which described the problem directly. - Coverage will create directories as needed for the data file if they don't exist, closing `issue 721`_. - ``fail_under`` values more than 100 are reported as errors. Thanks to Mike Fiedler for closing `issue 746`_. - The "missing" values in the text output are now sorted by line number, so that missing branches are reported near the other lines they affect. The values used to show all missing lines, and then all missing branches. - Coverage.py no longer fails if the user program deletes its current directory. Fixes `issue 806`_. Thanks, Dan Hemberger. .. _TOML: https://toml.io/ .. _issue 650: https://github.com/nedbat/coveragepy/issues/650 .. _issue 664: https://github.com/nedbat/coveragepy/issues/664 .. _issue 695: https://github.com/nedbat/coveragepy/issues/695 .. _issue 715: https://github.com/nedbat/coveragepy/issues/715 .. _issue 720: https://github.com/nedbat/coveragepy/issues/720 .. _issue 721: https://github.com/nedbat/coveragepy/issues/721 .. _issue 745: https://github.com/nedbat/coveragepy/issues/745 .. _issue 746: https://github.com/nedbat/coveragepy/issues/746 .. _issue 806: https://github.com/nedbat/coveragepy/issues/806 .. _issue 838: https://github.com/nedbat/coveragepy/issues/838 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/howto.txt0000644000175100001770000000712300000000000016033 0ustar00runnerdocker00000000000000* Release checklist - Check that the current virtualenv matches the current coverage branch. - start branch for release work $ make relbranch - Edit version number in coverage/version.py version_info = (4, 0, 2, "alpha", 1) version_info = (4, 0, 2, "beta", 1) version_info = (4, 0, 2, "candidate", 1) version_info = (4, 0, 2, "final", 0) - make sure: _dev = 0 - Edit supported Python version numbers. Search for "PYVERSIONS". - Especially README.rst and doc/index.rst - Update source files with release facts, and get useful snippets: $ make edit_for_release cheats - Look over CHANGES.rst - Update README.rst - "New in x.y:" - Update docs - IF PRE-RELEASE: - Version of latest stable release in doc/index.rst - Make sure the docs are cogged: $ make prebuild - Don't forget the man page: doc/python-coverage.1.txt - Check that the docs build correctly: $ tox -e doc - commit the release-prep changes $ make relcommit1 - Generate new sample_html to get the latest, incl footer version number: - IF PRE-RELEASE: $ make sample_html_beta - IF NOT PRE-RELEASE: $ make sample_html - check in the new sample html $ make relcommit2 - Done with changes to source files - check them in on the release prep branch - wait for ci to finish - merge to master - git push - Start the kits: - opvars github - Trigger the kit GitHub Action $ make build_kits - Build and publish docs: - IF PRE-RELEASE: $ make publishbeta - ELSE: $ make publish - commit and publish nedbatchelder.com - Kits: - Wait for kits to finish: - https://github.com/nedbat/coveragepy/actions/workflows/kit.yml - Download and check built kits from GitHub Actions: $ make clean download_kits check_kits - there should be 52 - examine the dist directory, and remove anything that looks malformed. - opvars - test the pypi upload: $ make test_upload - upload kits: $ make kit_upload - Tag the tree $ make tag - Update GitHub releases: $ make clean github_releases - Visit the fixed issues on GitHub and mention the version it was fixed in. $ make comment_on_fixes - deopvars - Bump version: $ make bump_version - Update readthedocs - @ https://readthedocs.org/projects/coverage/versions/ - find the latest tag in the inactive list, edit it, make it active. - keep just the latest version of each x.y release, make the rest active but hidden. - pre-releases should be hidden - IF NOT PRE-RELEASE: - @ https://readthedocs.org/dashboard/coverage/advanced/ - change the "default version" to the new version - @ https://readthedocs.org/projects/coverage/builds/ - manually build "latest" - wait for the new tag build to finish successfully. - Once CI passes, merge the bump-version branch to master and push it - things to automate: - readthedocs api to do the readthedocs changes * Testing - Testing of Python code is handled by tox. - Create and activate a virtualenv - pip install -r requirements/dev.pip - $ tox - For complete coverage testing: $ make metacov This will run coverage.py under its own measurement. You can do this in different environments (Linux vs. Windows, for example), then copy the data files (.metacov.*) to one machine for combination and reporting. To combine and report: $ make metahtml - To run the Javascript tests: open tests/js/index.html in variety of browsers. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/igor.py0000644000175100001770000004034100000000000015443 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt """Helper for building, testing, and linting coverage.py. To get portability, all these operations are written in Python here instead of in shell scripts, batch files, or Makefiles. """ import contextlib import datetime import glob import inspect import itertools import os import platform import pprint import re import subprocess import sys import sysconfig import textwrap import types import warnings import zipfile try: import pytest except ImportError: # We want to be able to run this for some tasks that don't need pytest. pytest = None # Constants derived the same as in coverage/env.py. We can't import # that file here, it would be evaluated too early and not get the # settings we make in this file. CPYTHON = platform.python_implementation() == "CPython" PYPY = platform.python_implementation() == "PyPy" @contextlib.contextmanager def ignore_warnings(): """Context manager to ignore warning within the with statement.""" with warnings.catch_warnings(): warnings.simplefilter("ignore") yield VERBOSITY = int(os.getenv("COVERAGE_IGOR_VERBOSE", "0")) # Functions named do_* are executable from the command line: do_blah is run # by "python igor.py blah". def do_show_env(): """Show the environment variables.""" print("Environment:") for env in sorted(os.environ): print(f" {env} = {os.environ[env]!r}") def do_remove_extension(*args): """Remove the compiled C extension, no matter what its name.""" so_patterns = """ tracer.so tracer.*.so tracer.pyd tracer.*.pyd """.split() if "--from-install" in args: # Get the install location using a subprocess to avoid # locking the file we are about to delete root = os.path.dirname( subprocess.check_output( [ sys.executable, "-Xutf8", "-c", "import coverage; print(coverage.__file__)", ], encoding="utf-8", ).strip(), ) roots = [root] else: roots = ["coverage", "build/*/coverage"] for root, pattern in itertools.product(roots, so_patterns): pattern = os.path.join(root, pattern.strip()) if VERBOSITY: print(f"Searching for {pattern}") for filename in glob.glob(pattern): if os.path.exists(filename): if VERBOSITY: print(f"Removing {filename}") try: os.remove(filename) except OSError as exc: if VERBOSITY: print(f"Couldn't remove {filename}: {exc}") def label_for_core(core): """Get the label for these tests.""" if core == "pytrace": return "with Python tracer" elif core == "ctrace": return "with C tracer" elif core == "sysmon": return "with sys.monitoring" else: raise ValueError(f"Bad core: {core!r}") def should_skip(core): """Is there a reason to skip these tests? Return empty string to run tests, or a message about why we are skipping the tests. """ skipper = "" # $set_env.py: COVERAGE_TEST_CORES - List of cores to run test_cores = os.getenv("COVERAGE_TEST_CORES") if test_cores: if core not in test_cores: skipper = f"core {core} not in COVERAGE_TEST_CORES={test_cores}" else: # $set_env.py: COVERAGE_ONE_CORE - Only run tests for one core. only_one = os.getenv("COVERAGE_ONE_CORE") if only_one: if CPYTHON: if sys.version_info >= (3, 12): if core != "sysmon": skipper = f"Only one core: not running {core}" elif core != "ctrace": skipper = f"Only one core: not running {core}" else: if core != "pytrace": skipper = f"No C core for {platform.python_implementation()}" if skipper: msg = "Skipping tests " + label_for_core(core) if len(skipper) > 1: msg += ": " + skipper else: msg = "" return msg def make_env_id(core): """An environment id that will keep all the test runs distinct.""" impl = platform.python_implementation().lower() version = "{}{}".format(*sys.version_info[:2]) if PYPY: version += "_{}{}".format(*sys.pypy_version_info[:2]) env_id = f"{impl}{version}_{core}" return env_id def run_tests(core, *runner_args): """The actual running of tests.""" if "COVERAGE_TESTING" not in os.environ: os.environ["COVERAGE_TESTING"] = "True" print_banner(label_for_core(core)) return pytest.main(list(runner_args)) def run_tests_with_coverage(core, *runner_args): """Run tests, but with coverage.""" # Need to define this early enough that the first import of env.py sees it. os.environ["COVERAGE_TESTING"] = "True" os.environ["COVERAGE_PROCESS_START"] = os.path.abspath("metacov.ini") os.environ["COVERAGE_HOME"] = os.getcwd() context = os.getenv("COVERAGE_CONTEXT") if context: if context[0] == "$": context = os.environ[context[1:]] os.environ["COVERAGE_CONTEXT"] = context + "." + core # Create the .pth file that will let us measure coverage in sub-processes. # The .pth file seems to have to be alphabetically after easy-install.pth # or the sys.path entries aren't created right? # There's an entry in "make clean" to get rid of this file. pth_dir = sysconfig.get_path("purelib") pth_path = os.path.join(pth_dir, "zzz_metacov.pth") with open(pth_path, "w") as pth_file: pth_file.write("import coverage; coverage.process_startup()\n") suffix = f"{make_env_id(core)}_{platform.platform()}" os.environ["COVERAGE_METAFILE"] = os.path.abspath(".metacov." + suffix) import coverage cov = coverage.Coverage(config_file="metacov.ini") cov._warn_unimported_source = False cov._warn_preimported_source = False cov._metacov = True cov.start() try: # Re-import coverage to get it coverage tested! I don't understand all # the mechanics here, but if I don't carry over the imported modules # (in covmods), then things go haywire (os is None, eventually). covmods = {} covdir = os.path.split(coverage.__file__)[0] # We have to make a list since we'll be deleting in the loop. modules = list(sys.modules.items()) for name, mod in modules: if name.startswith("coverage"): if getattr(mod, "__file__", "??").startswith(covdir): covmods[name] = mod del sys.modules[name] import coverage # pylint: disable=reimported sys.modules.update(covmods) # Run tests, with the arguments from our command line. status = run_tests(core, *runner_args) finally: cov.stop() os.remove(pth_path) cov.save() return status def do_combine_html(): """Combine data from a meta-coverage run, and make the HTML report.""" import coverage os.environ["COVERAGE_HOME"] = os.getcwd() cov = coverage.Coverage(config_file="metacov.ini") cov.load() cov.combine() cov.save() # A new Coverage to turn on messages. Better would be to have tighter # control over message verbosity... cov = coverage.Coverage(config_file="metacov.ini", messages=True) cov.load() show_contexts = bool( os.getenv("COVERAGE_DYNCTX") or os.getenv("COVERAGE_CONTEXT"), ) cov.html_report(show_contexts=show_contexts) def do_test_with_core(core, *runner_args): """Run tests with a particular core.""" # If we should skip these tests, skip them. skip_msg = should_skip(core) if skip_msg: print(skip_msg) return None os.environ["COVERAGE_CORE"] = core if os.getenv("COVERAGE_COVERAGE", "no") == "yes": return run_tests_with_coverage(core, *runner_args) else: return run_tests(core, *runner_args) def do_zip_mods(): """Build the zip files needed for tests.""" with zipfile.ZipFile("tests/zipmods.zip", "w") as zf: # Take some files from disk. zf.write("tests/covmodzip1.py", "covmodzip1.py") # The others will be various encodings. source = textwrap.dedent( """\ # coding: {encoding} text = u"{text}" ords = {ords} assert [ord(c) for c in text] == ords print(u"All OK with {encoding}") encoding = "{encoding}" """, ) # These encodings should match the list in tests/test_python.py details = [ ("utf-8", "โ“—โ“”โ“›โ“›โ“ž, โ“ฆโ“žโ“กโ“›โ““"), ("gb2312", "ไฝ ๅฅฝ๏ผŒไธ–็•Œ"), ("hebrew", "ืฉืœื•ื, ืขื•ืœื"), ("shift_jis", "ใ“ใ‚“ใซใกใฏไธ–็•Œ"), ("cp1252", "โ€œhiโ€"), ] for encoding, text in details: filename = f"encoded_{encoding}.py" ords = [ord(c) for c in text] source_text = source.format(encoding=encoding, text=text, ords=ords) zf.writestr(filename, source_text.encode(encoding)) with zipfile.ZipFile("tests/zip1.zip", "w") as zf: zf.write("tests/zipsrc/zip1/__init__.py", "zip1/__init__.py") zf.write("tests/zipsrc/zip1/zip1.py", "zip1/zip1.py") with zipfile.ZipFile("tests/covmain.zip", "w") as zf: zf.write("coverage/__main__.py", "__main__.py") def print_banner(label): """Print the version of Python.""" try: impl = platform.python_implementation() except AttributeError: impl = "Python" version = platform.python_version() if PYPY: version += " (pypy %s)" % ".".join(str(v) for v in sys.pypy_version_info) rev = platform.python_revision() if rev: version += f" (rev {rev})" try: which_python = os.path.relpath(sys.executable) except ValueError: # On Windows having a python executable on a different drive # than the sources cannot be relative. which_python = sys.executable print(f"=== {impl} {version} {label} ({which_python}) ===") sys.stdout.flush() def do_quietly(command): """Run a command in a shell, and suppress all output.""" proc = subprocess.run( command, shell=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, ) return proc.returncode def get_release_facts(): """Return an object with facts about the current release.""" import coverage import coverage.version facts = types.SimpleNamespace() facts.ver = coverage.__version__ mjr, mnr, mcr, rel, ser = facts.vi = coverage.version_info facts.dev = coverage.version._dev facts.shortver = f"{mjr}.{mnr}.{mcr}" facts.anchor = facts.shortver.replace(".", "-") if rel == "final": facts.next_vi = (mjr, mnr, mcr + 1, "alpha", 0) else: facts.anchor += f"{rel[0]}{ser}" facts.next_vi = (mjr, mnr, mcr, rel, ser + 1) facts.now = datetime.datetime.now() facts.branch = subprocess.getoutput("git rev-parse --abbrev-ref @") facts.sha = subprocess.getoutput("git rev-parse @") return facts def update_file(fname, pattern, replacement): """Update the contents of a file, replacing pattern with replacement.""" with open(fname) as fobj: old_text = fobj.read() new_text = re.sub(pattern, replacement, old_text, count=1) if new_text != old_text: print(f"Updating {fname}") with open(fname, "w") as fobj: fobj.write(new_text) UNRELEASED = "Unreleased\n----------" SCRIV_START = ".. scriv-start-here\n\n" def do_edit_for_release(): """Edit a few files in preparation for a release.""" facts = get_release_facts() if facts.dev: print(f"**\n** This is a dev release: {facts.ver}\n**\n\nNo edits") return # NOTICE.txt update_file( "NOTICE.txt", r"Copyright 2004.*? Ned", f"Copyright 2004-{facts.now:%Y} Ned", ) # CHANGES.rst title = f"Version {facts.ver} โ€” {facts.now:%Y-%m-%d}" rule = "-" * len(title) new_head = f".. _changes_{facts.anchor}:\n\n{title}\n{rule}" update_file("CHANGES.rst", re.escape(SCRIV_START), "") update_file("CHANGES.rst", re.escape(UNRELEASED), SCRIV_START + new_head) # doc/conf.py new_conf = textwrap.dedent( f"""\ # @@@ editable copyright = "2009\N{EN DASH}{facts.now:%Y}, Ned Batchelder" # pylint: disable=redefined-builtin # The short X.Y.Z version. version = "{facts.shortver}" # The full version, including alpha/beta/rc tags. release = "{facts.ver}" # The date of release, in "monthname day, year" format. release_date = "{facts.now:%B %-d, %Y}" # @@@ end """, ) update_file("doc/conf.py", r"(?s)# @@@ editable\n.*# @@@ end\n", new_conf) def do_bump_version(): """Edit a few files right after a release to bump the version.""" facts = get_release_facts() # CHANGES.rst update_file( "CHANGES.rst", re.escape(SCRIV_START), f"{UNRELEASED}\n\nNothing yet.\n\n\n" + SCRIV_START, ) # coverage/version.py next_version = f"version_info = {facts.next_vi}\n_dev = 1".replace("'", '"') update_file( "coverage/version.py", r"(?m)^version_info = .*\n_dev = \d+$", next_version, ) def do_cheats(): """Show a cheatsheet of useful things during releasing.""" facts = get_release_facts() pprint.pprint(facts.__dict__) print() print(f"Coverage version is {facts.ver}") repo = "nedbat/coveragepy" github = f"https://github.com/{repo}" egg = "egg=coverage==0.0" # to force a re-install print( f"https://coverage.readthedocs.io/en/{facts.ver}/changes.html#changes-{facts.anchor}", ) print( "\n## For GitHub commenting:\n" + "This is now released as part of " + f"[coverage {facts.ver}](https://pypi.org/project/coverage/{facts.ver}).", ) print("\n## To install this code:") if facts.branch == "master": print(f"python3 -m pip install git+{github}#{egg}") else: print(f"python3 -m pip install git+{github}@{facts.branch}#{egg}") print(f"python3 -m pip install git+{github}@{facts.sha[:20]}#{egg}") print("\n## To read this code on GitHub:") print(f"https://github.com/nedbat/coveragepy/commit/{facts.sha}") print(f"https://github.com/nedbat/coveragepy/commits/{facts.sha}") print( "\n## For other collaborators to get this code:\n" + f"git clone {github}\n" + f"cd {repo.partition('/')[-1]}\n" + f"git checkout {facts.sha}", ) def do_help(): """List the available commands""" items = list(globals().items()) items.sort() for name, value in items: if name.startswith("do_"): print(f"{name[3:]:<20}{value.__doc__}") def analyze_args(function): """What kind of args does `function` expect? Returns: star, num_pos: star(boolean): Does `function` accept *args? num_args(int): How many positional arguments does `function` have? """ argspec = inspect.getfullargspec(function) return bool(argspec.varargs), len(argspec.args) def main(args): """Main command-line execution for igor. Verbs are taken from the command line, and extra words taken as directed by the arguments needed by the handler. """ while args: verb = args.pop(0) handler = globals().get("do_" + verb) if handler is None: print(f"*** No handler for {verb!r}") return 1 star, num_args = analyze_args(handler) if star: # Handler has *args, give it all the rest of the command line. handler_args = args args = [] else: # Handler has specific arguments, give it only what it needs. handler_args = args[:num_args] args = args[num_args:] ret = handler(*handler_args) # If a handler returns a failure-like value, stop. if ret: return ret return 0 if __name__ == "__main__": sys.exit(main(sys.argv[1:])) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1710442639.1298149 coverage-7.4.4/lab/0000755000175100001770000000000000000000000014665 5ustar00runnerdocker00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/lab/README.txt0000644000175100001770000000042600000000000016365 0ustar00runnerdocker00000000000000The lab directory is not part of the installed coverage.py code. These programs are tools I have used while diagnosing problems, investigating functionality, and so on. They are not guaranteed to work, or to be suitable for any given purpose. If you find them useful, enjoy! ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1710442639.1298149 coverage-7.4.4/lab/benchmark/0000755000175100001770000000000000000000000016617 5ustar00runnerdocker00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/lab/benchmark/benchmark.py0000644000175100001770000005531100000000000021130 0ustar00runnerdocker00000000000000"""Run performance comparisons for versions of coverage""" import collections import contextlib import itertools import os import random import shutil import statistics import subprocess import sys import time from pathlib import Path from dataclasses import dataclass from typing import Any, Dict, Iterable, Iterator, List, Optional, Tuple import tabulate class ShellSession: """A logged shell session. The duration of the last command is available as .last_duration. """ def __init__(self, output_filename: str): self.output_filename = output_filename self.last_duration: float = 0 self.foutput = None self.env_vars = {} def __enter__(self): self.foutput = open(self.output_filename, "a", encoding="utf-8") print(f"Logging output to {os.path.abspath(self.output_filename)}") return self def __exit__(self, exc_type, exc_value, traceback): self.foutput.close() @contextlib.contextmanager def set_env(self, env_vars): old_env_vars = self.env_vars if env_vars: self.env_vars = dict(old_env_vars) self.env_vars.update(env_vars) try: yield finally: self.env_vars = old_env_vars def print(self, *args, **kwargs): """Print a message to this shell's log.""" print(*args, **kwargs, file=self.foutput) def print_banner(self, *args, **kwargs): """Print a distinguished banner to the log.""" self.print("\n######> ", end="") self.print(*args, **kwargs) def run_command(self, cmd: str) -> str: """ Run a command line (with a shell). Returns: str: the output of the command. """ self.print(f"\n### ========================\n$ {cmd}") start = time.perf_counter() proc = subprocess.run( cmd, shell=True, check=False, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, env=self.env_vars, ) output = proc.stdout.decode("utf-8") self.last_duration = time.perf_counter() - start self.print(output, end="") self.print(f"(was: {cmd})") self.print(f"(in {os.getcwd()}, duration: {self.last_duration:.3f}s)") if proc.returncode != 0: self.print(f"ERROR: command returned {proc.returncode}") raise Exception( f"Command failed ({proc.returncode}): {cmd!r}, output was:\n{output}" ) return output.strip() def rmrf(path: Path) -> None: """ Remove a directory tree. It's OK if it doesn't exist. """ if path.exists(): shutil.rmtree(path) @contextlib.contextmanager def change_dir(newdir: Path) -> Iterator[Path]: """ Change to a new directory, and then change back. Will make the directory if needed. """ old_dir = os.getcwd() newdir.mkdir(parents=True, exist_ok=True) os.chdir(newdir) try: yield newdir finally: os.chdir(old_dir) @contextlib.contextmanager def file_replace(file_name: Path, old_text: str, new_text: str) -> Iterator[None]: """ Replace some text in `file_name`, and change it back. """ if old_text: file_text = file_name.read_text() if old_text not in file_text: raise Exception("Old text {old_text!r} not found in {file_name}") updated_text = file_text.replace(old_text, new_text) file_name.write_text(updated_text) try: yield finally: if old_text: file_name.write_text(file_text) class ProjectToTest: """Information about a project to use as a test case.""" # Where can we clone the project from? git_url: Optional[str] = None slug: Optional[str] = None def __init__(self): if not self.slug: if self.git_url: self.slug = self.git_url.split("/")[-1] def shell(self): return ShellSession(f"output_{self.slug}.log") def make_dir(self): self.dir = Path(f"work_{self.slug}") if self.dir.exists(): rmrf(self.dir) def get_source(self, shell): """Get the source of the project.""" shell.run_command(f"git clone {self.git_url} {self.dir}") def prep_environment(self, env): """Prepare the environment to run the test suite. This is not timed. """ pass @contextlib.contextmanager def tweak_coverage_settings( self, settings: Iterable[Tuple[str, Any]] ) -> Iterator[None]: """Tweak the coverage settings. NOTE: This is not properly factored, and is only used by ToxProject now!!! """ yield def pre_check(self, env): pass def post_check(self, env): pass def run_no_coverage(self, env): """Run the test suite with no coverage measurement. Returns the duration of the run. """ pass def run_with_coverage(self, env, pip_args, cov_tweaks): """Run the test suite with coverage measurement. Must install a particular version of coverage using `pip_args`. Returns the duration of the run. """ pass class EmptyProject(ProjectToTest): """A dummy project for testing other parts of this code.""" def __init__(self, slug: str = "empty", fake_durations: Iterable[float] = (1.23,)): self.slug = slug self.durations = iter(itertools.cycle(fake_durations)) def get_source(self, shell): pass def run_no_coverage(self, env): """Run the test suite with coverage measurement.""" return next(self.durations) def run_with_coverage(self, env, pip_args, cov_tweaks): """Run the test suite with coverage measurement.""" return next(self.durations) class ToxProject(ProjectToTest): """A project using tox to run the test suite.""" def prep_environment(self, env): env.shell.run_command(f"{env.python} -m pip install tox") self.run_tox(env, env.pyver.toxenv, "--notest") def run_tox(self, env, toxenv, toxargs=""): """Run a tox command. Return the duration.""" env.shell.run_command(f"{env.python} -m tox -e {toxenv} {toxargs}") return env.shell.last_duration def run_no_coverage(self, env): return self.run_tox(env, env.pyver.toxenv, "--skip-pkg-install") def run_with_coverage(self, env, pip_args, cov_tweaks): self.run_tox(env, env.pyver.toxenv, "--notest") env.shell.run_command( f".tox/{env.pyver.toxenv}/bin/python -m pip install {pip_args}" ) with self.tweak_coverage_settings(cov_tweaks): self.pre_check(env) # NOTE: Not properly factored, and only used from here. duration = self.run_tox(env, env.pyver.toxenv, "--skip-pkg-install") self.post_check( env ) # NOTE: Not properly factored, and only used from here. return duration class ProjectPytestHtml(ToxProject): """pytest-dev/pytest-html""" git_url = "https://github.com/pytest-dev/pytest-html" def run_with_coverage(self, env, pip_args, cov_tweaks): raise Exception("This doesn't work because options changed to tweaks") covenv = env.pyver.toxenv + "-cov" self.run_tox(env, covenv, "--notest") env.shell.run_command(f".tox/{covenv}/bin/python -m pip install {pip_args}") if cov_tweaks: replace = ("# reference: https", f"[run]\n{cov_tweaks}\n#") else: replace = ("", "") with file_replace(Path(".coveragerc"), *replace): env.shell.run_command("cat .coveragerc") env.shell.run_command(f".tox/{covenv}/bin/python -m coverage debug sys") return self.run_tox(env, covenv, "--skip-pkg-install") class ProjectDateutil(ToxProject): """dateutil/dateutil""" git_url = "https://github.com/dateutil/dateutil" def prep_environment(self, env): super().prep_environment(env) env.shell.run_command(f"{env.python} updatezinfo.py") def run_no_coverage(self, env): env.shell.run_command("echo No option to run without coverage") return 0 class ProjectAttrs(ToxProject): """python-attrs/attrs""" git_url = "https://github.com/python-attrs/attrs" def tweak_coverage_settings( self, tweaks: Iterable[Tuple[str, Any]] ) -> Iterator[None]: return tweak_toml_coverage_settings("pyproject.toml", tweaks) def pre_check(self, env): env.shell.run_command("cat pyproject.toml") def post_check(self, env): env.shell.run_command("ls -al") class ProjectDjangoAuthToolkit(ToxProject): """jazzband/django-oauth-toolkit""" git_url = "https://github.com/jazzband/django-oauth-toolkit" def run_no_coverage(self, env): env.shell.run_command("echo No option to run without coverage") return 0 class ProjectDjango(ToxProject): """django/django""" # brew install libmemcached # pip install -e . # coverage run tests/runtests.py --settings=test_sqlite # coverage report --format=total --precision=6 # 32.848540 class ProjectMashumaro(ProjectToTest): git_url = "https://github.com/Fatal1ty/mashumaro" def __init__(self, more_pytest_args=""): super().__init__() self.more_pytest_args = more_pytest_args def prep_environment(self, env): env.shell.run_command(f"{env.python} -m pip install .") env.shell.run_command(f"{env.python} -m pip install -r requirements-dev.txt") def run_no_coverage(self, env): env.shell.run_command(f"{env.python} -m pytest {self.more_pytest_args}") return env.shell.last_duration def run_with_coverage(self, env, pip_args, cov_tweaks): env.shell.run_command(f"{env.python} -m pip install {pip_args}") env.shell.run_command( f"{env.python} -m pytest --cov=mashumaro --cov=tests {self.more_pytest_args}" ) duration = env.shell.last_duration report = env.shell.run_command(f"{env.python} -m coverage report --precision=6") print("Results:", report.splitlines()[-1]) return duration class ProjectMashumaroBranch(ProjectMashumaro): def __init__(self, more_pytest_args=""): super().__init__(more_pytest_args="--cov-branch " + more_pytest_args) self.slug = "mashbranch" class ProjectOperator(ProjectToTest): git_url = "https://github.com/nedbat/operator" def __init__(self, more_pytest_args=""): super().__init__() self.more_pytest_args = more_pytest_args def prep_environment(self, env): env.shell.run_command(f"{env.python} -m pip install tox") Path("/tmp/operator_tmp").mkdir(exist_ok=True) env.shell.run_command(f"{env.python} -m tox -e unit --notest") env.shell.run_command(f"{env.python} -m tox -e unitnocov --notest") def run_no_coverage(self, env): env.shell.run_command( f"TMPDIR=/tmp/operator_tmp {env.python} -m tox -e unitnocov --skip-pkg-install -- {self.more_pytest_args}" ) return env.shell.last_duration def run_with_coverage(self, env, pip_args, cov_tweaks): env.shell.run_command(f"{env.python} -m pip install {pip_args}") env.shell.run_command( f"TMPDIR=/tmp/operator_tmp {env.python} -m tox -e unit --skip-pkg-install -- {self.more_pytest_args}" ) duration = env.shell.last_duration report = env.shell.run_command(f"{env.python} -m coverage report --precision=6") print("Results:", report.splitlines()[-1]) return duration def tweak_toml_coverage_settings( toml_file: str, tweaks: Iterable[Tuple[str, Any]] ) -> Iterator[None]: if tweaks: toml_inserts = [] for name, value in tweaks: if isinstance(value, bool): toml_inserts.append(f"{name} = {str(value).lower()}") elif isinstance(value, str): toml_inserts.append(f"{name} = '{value}'") else: raise Exception(f"Can't tweak toml setting: {name} = {value!r}") header = "[tool.coverage.run]\n" insert = header + "\n".join(toml_inserts) + "\n" else: header = insert = "" return file_replace(Path(toml_file), header, insert) class AdHocProject(ProjectToTest): """A standalone program to run locally.""" def __init__(self, python_file, cur_dir=None, pip_args=None): super().__init__() self.python_file = Path(python_file) if not self.python_file.exists(): raise ValueError(f"Couldn't find {self.python_file} to run ad-hoc.") self.cur_dir = Path(cur_dir or self.python_file.parent) if not self.cur_dir.exists(): raise ValueError(f"Couldn't find {self.cur_dir} to run in.") self.pip_args = pip_args self.slug = self.python_file.name def get_source(self, shell): pass def prep_environment(self, env): env.shell.run_command(f"{env.python} -m pip install {self.pip_args}") def run_no_coverage(self, env): with change_dir(self.cur_dir): env.shell.run_command(f"{env.python} {self.python_file}") return env.shell.last_duration def run_with_coverage(self, env, pip_args, cov_tweaks): env.shell.run_command(f"{env.python} -m pip install {pip_args}") with change_dir(self.cur_dir): env.shell.run_command(f"{env.python} -m coverage run {self.python_file}") return env.shell.last_duration class SlipcoverBenchmark(AdHocProject): """ For running code from the Slipcover benchmarks. Clone https://github.com/plasma-umass/slipcover to /src/slipcover """ def __init__(self, python_file): super().__init__( python_file=f"/src/slipcover/benchmarks/{python_file}", cur_dir="/src/slipcover", pip_args="six pyperf", ) class PyVersion: """A version of Python to use.""" # The command to run this Python command: str # Short word for messages, directories, etc slug: str # The tox environment to run this Python toxenv: str class Python(PyVersion): """A version of CPython to use.""" def __init__(self, major, minor): self.command = self.slug = f"python{major}.{minor}" self.toxenv = f"py{major}{minor}" class PyPy(PyVersion): """A version of PyPy to use.""" def __init__(self, major, minor): self.command = self.slug = f"pypy{major}.{minor}" self.toxenv = f"pypy{major}{minor}" class AdHocPython(PyVersion): """A custom build of Python to use.""" def __init__(self, path, slug): self.command = f"{path}/bin/python3" self.slug = slug self.toxenv = None @dataclass class Coverage: """A version of coverage.py to use, maybe None.""" # Short word for messages, directories, etc slug: str # Arguments for "pip install ..." pip_args: Optional[str] = None # Tweaks to the .coveragerc file tweaks: Optional[Iterable[Tuple[str, Any]]] = None # Environment variables to set env_vars: Optional[Dict[str, str]] = None class NoCoverage(Coverage): """Run without coverage at all.""" def __init__(self, slug="nocov"): super().__init__(slug=slug, pip_args=None) class CoveragePR(Coverage): """A version of coverage.py from a pull request.""" def __init__(self, number, tweaks=None, env_vars=None): super().__init__( slug=f"#{number}", pip_args=f"git+https://github.com/nedbat/coveragepy.git@refs/pull/{number}/merge", tweaks=tweaks, env_vars=env_vars, ) class CoverageCommit(Coverage): """A version of coverage.py from a specific commit.""" def __init__(self, sha, tweaks=None, env_vars=None): super().__init__( slug=sha, pip_args=f"git+https://github.com/nedbat/coveragepy.git@{sha}", tweaks=tweaks, env_vars=env_vars, ) class CoverageSource(Coverage): """The coverage.py in a working tree.""" def __init__(self, directory, slug="source", tweaks=None, env_vars=None): super().__init__( slug=slug, pip_args=directory, tweaks=tweaks, env_vars=env_vars, ) @dataclass class Env: """An environment to run a test suite in.""" pyver: PyVersion python: Path shell: ShellSession ResultKey = Tuple[str, str, str] DIMENSION_NAMES = ["proj", "pyver", "cov"] class Experiment: """A particular time experiment to run.""" def __init__( self, py_versions: List[PyVersion], cov_versions: List[Coverage], projects: List[ProjectToTest], ): self.py_versions = py_versions self.cov_versions = cov_versions self.projects = projects self.result_data: Dict[ResultKey, List[float]] = {} def run(self, num_runs: int = 3) -> None: total_runs = ( len(self.projects) * len(self.py_versions) * len(self.cov_versions) * num_runs ) total_run_nums = iter(itertools.count(start=1)) all_runs = [] for proj in self.projects: with proj.shell() as shell: print(f"Prepping project {proj.slug}") shell.print_banner(f"Prepping project {proj.slug}") proj.make_dir() proj.get_source(shell) for pyver in self.py_versions: print(f"Making venv for {proj.slug} {pyver.slug}") venv_dir = f"venv_{proj.slug}_{pyver.slug}" shell.run_command(f"{pyver.command} -m venv {venv_dir}") python = Path.cwd() / f"{venv_dir}/bin/python" shell.run_command(f"{python} -V") env = Env(pyver, python, shell) with change_dir(proj.dir): print(f"Prepping for {proj.slug} {pyver.slug}") proj.prep_environment(env) for cov_ver in self.cov_versions: all_runs.append((proj, pyver, cov_ver, env)) all_runs *= num_runs random.shuffle(all_runs) run_data: Dict[ResultKey, List[float]] = collections.defaultdict(list) for proj, pyver, cov_ver, env in all_runs: with env.shell: total_run_num = next(total_run_nums) banner = ( "Running tests: " + f"proj={proj.slug}, py={pyver.slug}, cov={cov_ver.slug}, " + f"{total_run_num} of {total_runs}" ) print(banner) env.shell.print_banner(banner) with change_dir(proj.dir): with env.shell.set_env(cov_ver.env_vars): if cov_ver.pip_args is None: dur = proj.run_no_coverage(env) else: dur = proj.run_with_coverage( env, cov_ver.pip_args, cov_ver.tweaks, ) print(f"Tests took {dur:.3f}s") result_key = (proj.slug, pyver.slug, cov_ver.slug) run_data[result_key].append(dur) # Summarize and collect the data. print("# Results") for proj in self.projects: for pyver in self.py_versions: for cov_ver in self.cov_versions: result_key = (proj.slug, pyver.slug, cov_ver.slug) data = run_data[result_key] med = statistics.median(data) self.result_data[result_key] = med stdev = statistics.stdev(data) if len(data) > 1 else 0.0 summary = ( f"Median for {proj.slug}, {pyver.slug}, {cov_ver.slug}: " + f"{med:.3f}s, " + f"stdev={stdev:.3f}" ) if 1: data_sum = ", ".join(f"{d:.3f}" for d in data) summary += f", data={data_sum}" print(summary) def show_results( self, rows: List[str], column: str, ratios: Iterable[Tuple[str, str, str]] = (), ) -> None: dimensions = { "cov": [cov_ver.slug for cov_ver in self.cov_versions], "pyver": [pyver.slug for pyver in self.py_versions], "proj": [proj.slug for proj in self.projects], } table_axes = [dimensions[rowname] for rowname in rows] data_order = [*rows, column] remap = [data_order.index(datum) for datum in DIMENSION_NAMES] header = [] header.extend(rows) header.extend(dimensions[column]) header.extend(slug for slug, _, _ in ratios) aligns = ["left"] * len(rows) + ["right"] * (len(header) - len(rows)) data = [] for tup in itertools.product(*table_axes): row = [] row.extend(tup) col_data = {} for col in dimensions[column]: key = (*tup, col) key = tuple(key[i] for i in remap) result_time = self.result_data[key] # type: ignore row.append(f"{result_time:.1f}s") col_data[col] = result_time for _, num, denom in ratios: ratio = col_data[num] / col_data[denom] row.append(f"{ratio * 100:.0f}%") data.append(row) print() print(tabulate.tabulate(data, headers=header, colalign=aligns, tablefmt="pipe")) PERF_DIR = Path("/tmp/covperf") def run_experiment( py_versions: List[PyVersion], cov_versions: List[Coverage], projects: List[ProjectToTest], rows: List[str], column: str, ratios: Iterable[Tuple[str, str, str]] = (), ): slugs = [v.slug for v in py_versions + cov_versions + projects] if len(set(slugs)) != len(slugs): raise Exception(f"Slugs must be unique: {slugs}") if any(" " in slug for slug in slugs): raise Exception(f"No spaces in slugs please: {slugs}") ratio_slugs = [rslug for ratio in ratios for rslug in ratio[1:]] if any(rslug not in slugs for rslug in ratio_slugs): raise Exception(f"Ratio slug doesn't match a slug: {ratio_slugs}, {slugs}") if set(rows + [column]) != set(DIMENSION_NAMES): raise Exception( f"All of these must be in rows or column: {', '.join(DIMENSION_NAMES)}" ) print(f"Removing and re-making {PERF_DIR}") rmrf(PERF_DIR) with change_dir(PERF_DIR): exp = Experiment( py_versions=py_versions, cov_versions=cov_versions, projects=projects ) exp.run(num_runs=int(sys.argv[1])) exp.show_results(rows=rows, column=column, ratios=ratios) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/lab/benchmark/empty.py0000644000175100001770000000137100000000000020331 0ustar00runnerdocker00000000000000from benchmark import * run_experiment( py_versions=[ Python(3, 9), Python(3, 11), ], cov_versions=[ Coverage("701", "coverage==7.0.1"), Coverage( "701.dynctx", "coverage==7.0.1", [("dynamic_context", "test_function")] ), Coverage("702", "coverage==7.0.2"), Coverage( "702.dynctx", "coverage==7.0.2", [("dynamic_context", "test_function")] ), ], projects=[ EmptyProject("empty", [1.2, 3.4]), EmptyProject("dummy", [6.9, 7.1]), ], rows=["proj", "pyver"], column="cov", ratios=[ (".2 vs .1", "702", "701"), (".1 dynctx cost", "701.dynctx", "701"), (".2 dynctx cost", "702.dynctx", "702"), ], ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/lab/benchmark/run.py0000644000175100001770000000671100000000000020002 0ustar00runnerdocker00000000000000from benchmark import * if 0: run_experiment( py_versions=[ # Python(3, 11), AdHocPython("/usr/local/cpython/v3.10.5", "v3.10.5"), AdHocPython("/usr/local/cpython/v3.11.0b3", "v3.11.0b3"), AdHocPython("/usr/local/cpython/94231", "94231"), ], cov_versions=[ Coverage("6.4.1", "coverage==6.4.1"), ], projects=[ AdHocProject("/src/bugs/bug1339/bug1339.py"), SlipcoverBenchmark("bm_sudoku.py"), SlipcoverBenchmark("bm_spectral_norm.py"), ], rows=["cov", "proj"], column="pyver", ratios=[ ("3.11b3 vs 3.10", "v3.11.0b3", "v3.10.5"), ("94231 vs 3.10", "94231", "v3.10.5"), ], ) if 0: run_experiment( py_versions=[ Python(3, 9), Python(3, 11), ], cov_versions=[ Coverage("701", "coverage==7.0.1"), Coverage( "701.dynctx", "coverage==7.0.1", [("dynamic_context", "test_function")] ), Coverage("702", "coverage==7.0.2"), Coverage( "702.dynctx", "coverage==7.0.2", [("dynamic_context", "test_function")] ), ], projects=[ ProjectAttrs(), ], rows=["proj", "pyver"], column="cov", ratios=[ (".2 vs .1", "702", "701"), (".1 dynctx cost", "701.dynctx", "701"), (".2 dynctx cost", "702.dynctx", "702"), ], ) if 0: # Compare 3.10 vs 3.12 v1 = 10 v2 = 12 run_experiment( py_versions=[ Python(3, v1), Python(3, v2), ], cov_versions=[ Coverage("732", "coverage==7.3.2"), ], projects=[ ProjectMashumaro(), ], rows=["cov", "proj"], column="pyver", ratios=[ (f"3.{v2} vs 3.{v1}", f"python3.{v2}", f"python3.{v1}"), ], ) if 0: # Compare 3.12 coverage vs no coverage run_experiment( py_versions=[ Python(3, 12), ], cov_versions=[ NoCoverage("nocov"), Coverage("732", "coverage==7.3.2"), CoverageSource( slug="sysmon", directory="/Users/nbatchelder/coverage/trunk", env_vars={"COVERAGE_CORE": "sysmon"}, ), ], projects=[ ProjectMashumaro(), # small: "-k ck" ProjectOperator(), # small: "-k irk" ], rows=["pyver", "proj"], column="cov", ratios=[ (f"732%", "732", "nocov"), (f"sysmon%", "sysmon", "nocov"), ], ) if 1: # Compare 3.12 coverage vs no coverage run_experiment( py_versions=[ Python(3, 12), ], cov_versions=[ NoCoverage("nocov"), Coverage("732", "coverage==7.3.2"), CoverageSource( slug="sysmon", directory="/Users/nbatchelder/coverage/trunk", env_vars={"COVERAGE_CORE": "sysmon"}, ), ], projects=[ ProjectMashumaro(), # small: "-k ck" ProjectMashumaroBranch(), # small: "-k ck" ], rows=["pyver", "proj"], column="cov", ratios=[ (f"732%", "732", "nocov"), (f"sysmon%", "sysmon", "nocov"), ], ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/lab/bpo_prelude.py0000644000175100001770000000061000000000000017534 0ustar00runnerdocker00000000000000import linecache, sys def trace(frame, event, arg): # The weird globals here is to avoid a NameError on shutdown... if frame.f_code.co_filename == globals().get("__file__"): lineno = frame.f_lineno line = linecache.getline(__file__, lineno).rstrip() print("{} {}: {}".format(event[:4], lineno, line)) return trace print(sys.version) sys.settrace(trace) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/lab/branch_trace.py0000644000175100001770000000044300000000000017653 0ustar00runnerdocker00000000000000import sys pairs = set() last = -1 def trace(frame, event, arg): global last if event == "line": this = frame.f_lineno pairs.add((last, this)) last = this return trace code = open(sys.argv[1]).read() sys.settrace(trace) exec(code) print(sorted(pairs)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/lab/branches.py0000644000175100001770000000521500000000000017027 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt # Demonstrate some issues with coverage.py branch testing. def my_function(x): """This isn't real code, just snippets...""" # An infinite loop is structurally still a branch: it can next execute the # first line of the loop, or the first line after the loop. But # "while True" will never jump to the line after the loop, so the line # is shown as a partial branch: i = 0 while True: print("In while True") if i > 0: break i += 1 print("Left the True loop") # Notice that "while 1" also has this problem. Even though the compiler # knows there's no computation at the top of the loop, it's still expressed # in bytecode as a branch with two possibilities. i = 0 while 1: print("In while 1") if i > 0: break i += 1 print("Left the 1 loop") # Coverage.py lets developers exclude lines that they know will not be # executed. So far, the branch coverage doesn't use all that information # when deciding which lines are partially executed. # # Here, even though the else line is explicitly marked as never executed, # the if line complains that it never branched to the else: if x < 1000: # This branch is always taken print("x is reasonable") else: # pragma: nocover print("this never happens") # try-except structures are complex branches. An except clause with a # type is a three-way branch: there could be no exception, there could be # a matching exception, and there could be a non-matching exception. # # Here we run the code twice: once with no exception, and once with a # matching exception. The "except" line is marked as partial because we # never executed its third case: a non-matching exception. for y in (1, 2): try: if y % 2: raise ValueError("y is odd!") except ValueError: print("y must have been odd") print("done with y") print("done with 1, 2") # Another except clause, but this time all three cases are executed. No # partial lines are shown: for y in (0, 1, 2): try: if y % 2: raise ValueError("y is odd!") if y == 0: raise Exception("zero!") except ValueError: print("y must have been odd") except: print("y is something else") print("done with y") print("done with 0, 1, 2") my_function(1) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/lab/compare_times.sh0000755000175100001770000000402200000000000020051 0ustar00runnerdocker00000000000000#!/bin/bash # Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt # A suggestion about how to get less hyperfine output: # https://github.com/sharkdp/hyperfine/issues/223 HYPERFINE='hyperfine -w 1 -s basic -r 10' cat > sourcefile1.py << EOF import random def get_random_number(): return random.randint(5, 20) EOF cat > test_file1.py << EOF import pytest import sourcefile1 tests = tuple(f'test{i}' for i in range(1000)) @pytest.mark.parametrize("input_str", tests) def test_speed(input_str): print(input_str) number = sourcefile1.get_random_number() assert number <= 20 assert number >= 5 EOF rm -f .coveragerc $HYPERFINE 'python -m pytest test_file1.py' echo "Coverage 4.5.4" pip install -q coverage==4.5.4 $HYPERFINE 'python -m coverage run -m pytest test_file1.py' $HYPERFINE 'python -m coverage run --branch -m pytest test_file1.py' $HYPERFINE 'python -m pytest --cov=. --cov-report= test_file1.py' $HYPERFINE 'python -m pytest --cov=. --cov-report= --cov-branch test_file1.py' echo "Coverage 5.0a8, no contexts" pip install -q coverage==5.0a8 $HYPERFINE 'python -m coverage run -m pytest test_file1.py' $HYPERFINE 'python -m coverage run --branch -m pytest test_file1.py' $HYPERFINE 'python -m pytest --cov=. --cov-report= test_file1.py' $HYPERFINE 'python -m pytest --cov=. --cov-report= --cov-branch test_file1.py' echo "Coverage 5.0a8, with test contexts" cat > .coveragerc < ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/lab/coverage-04.dtd0000644000175100001770000000375600000000000017411 0ustar00runnerdocker00000000000000 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/lab/extract_code.py0000644000175100001770000000421700000000000017707 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt """ Use this to copy some indented code from the coverage.py test suite into a standalone file for deeper testing, or writing bug reports. Give it a file name and a line number, and it will find the indentend multiline string containing that line number, and output the dedented contents of the string. If tests/test_arcs.py has this (partial) content:: 1630 def test_partial_generators(self): 1631 # https://github.com/nedbat/coveragepy/issues/475 1632 # Line 2 is executed completely. 1633 # Line 3 is started but not finished, because zip ends before it finishes. 1634 # Line 4 is never started. 1635 cov = self.check_coverage('''\ 1636 def f(a, b): 1637 c = (i for i in a) # 2 1638 d = (j for j in b) # 3 1639 e = (k for k in b) # 4 1640 return dict(zip(c, d)) 1641 1642 f(['a', 'b'], [1, 2, 3]) 1643 ''', 1644 arcz=".1 17 7. .2 23 34 45 5. -22 2-2 -33 3-3 -44 4-4", 1645 arcz_missing="3-3 -44 4-4", 1646 ) then you can do:: % python lab/extract_code.py tests/test_arcs.py 1637 def f(a, b): c = (i for i in a) # 2 d = (j for j in b) # 3 e = (k for k in b) # 4 return dict(zip(c, d)) f(['a', 'b'], [1, 2, 3]) % """ import sys import textwrap if len(sys.argv) == 2: fname, lineno = sys.argv[1].split(":") else: fname, lineno = sys.argv[1:] lineno = int(lineno) with open(fname) as code_file: lines = ["", *code_file] # Find opening triple-quote for start in range(lineno, 0, -1): line = lines[start] if "'''" in line or '"""' in line: break for end in range(lineno+1, len(lines)): line = lines[end] if "'''" in line or '"""' in line: break code = "".join(lines[start+1: end]) code = textwrap.dedent(code) print(code, end="") ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/lab/find_class.py0000644000175100001770000000166600000000000017355 0ustar00runnerdocker00000000000000class Parent: def meth(self): print("METH") class Child(Parent): pass def trace(frame, event, args): # Thanks to Aleksi Torhamo for code and idea. co = frame.f_code fname = co.co_name if not co.co_varnames: return locs = frame.f_locals first_arg = co.co_varnames[0] if co.co_argcount: self = locs[first_arg] elif co.co_flags & 0x04: # *args syntax self = locs[first_arg][0] else: return func = getattr(self, fname).__func__ if hasattr(func, '__qualname__'): qname = func.__qualname__ else: for cls in self.__class__.__mro__: f = cls.__dict__.get(fname, None) if f is None: continue if f is func: qname = cls.__name__ + "." + fname break print(f"{event}: {self}.{fname} {qname}") return trace import sys sys.settrace(trace) Child().meth() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/lab/genpy.py0000644000175100001770000002264200000000000016367 0ustar00runnerdocker00000000000000"""Generate random Python for testing.""" import collections from itertools import cycle, product import random import re from coverage.parser import PythonParser class PythonSpinner: """Spin Python source from a simple AST.""" def __init__(self): self.lines = [] self.lines.append("async def func():") self.indent = 4 @property def lineno(self): return len(self.lines) + 1 @classmethod def generate_python(cls, ast): spinner = cls() spinner.gen_python_internal(ast) return "\n".join(spinner.lines) def add_line(self, line): g = f"g{self.lineno}" self.lines.append(' ' * self.indent + line.format(g=g, lineno=self.lineno)) def add_block(self, node): self.indent += 4 self.gen_python_internal(node) self.indent -= 4 def maybe_block(self, node, nodei, keyword): if len(node) > nodei and node[nodei] is not None: self.add_line(keyword + ":") self.add_block(node[nodei]) def gen_python_internal(self, ast): for node in ast: if isinstance(node, list): op = node[0] if op == "if": self.add_line("if {g}:") self.add_block(node[1]) self.maybe_block(node, 2, "else") elif op == "for": self.add_line("for x in {g}:") self.add_block(node[1]) self.maybe_block(node, 2, "else") elif op == "while": self.add_line("while {g}:") self.add_block(node[1]) self.maybe_block(node, 2, "else") elif op == "try": self.add_line("try:") self.add_block(node[1]) # 'except' clauses are different, because there can be any # number. if len(node) > 2 and node[2] is not None: for except_node in node[2]: self.add_line(f"except Exception{self.lineno}:") self.add_block(except_node) self.maybe_block(node, 3, "else") self.maybe_block(node, 4, "finally") elif op == "with": self.add_line("with {g} as x:") self.add_block(node[1]) else: raise Exception(f"Bad list node: {node!r}") else: op = node if op == "assign": self.add_line("x = {lineno}") elif op in ["break", "continue"]: self.add_line(op) elif op == "return": self.add_line("return") elif op == "yield": self.add_line("yield {lineno}") else: raise Exception(f"Bad atom node: {node!r}") def weighted_choice(rand, choices): """Choose from a list of [(choice, weight), ...] options, randomly.""" total = sum(w for c, w in choices) r = rand.uniform(0, total) upto = 0 for c, w in choices: if upto + w >= r: return c upto += w assert False, "Shouldn't get here" class RandomAstMaker: def __init__(self, seed=None): self.r = random.Random() if seed is not None: self.r.seed(seed) self.depth = 0 self.bc_allowed = set() def roll(self, prob=0.5): return self.r.random() <= prob def choose(self, choices): """Roll the dice to choose an option.""" return weighted_choice(self.r, choices) STMT_CHOICES = [ [("if", 10), ("for", 10), ("try", 10), ("while", 3), ("with", 10), ("assign", 20), ("return", 1), ("yield", 0)], [("if", 10), ("for", 10), ("try", 10), ("while", 3), ("with", 10), ("assign", 40), ("return", 1), ("yield", 0), ("break", 10), ("continue", 10)], [("if", 10), ("for", 10), ("try", 10), ("while", 3), ("with", 10), ("assign", 40), ("return", 1), ("yield", 0), ("break", 10), ("continue", 10)], [("if", 10), ("for", 10), ("try", 10), ("while", 3), ("with", 10), ("assign", 40), ("return", 1), ("yield", 0), ("break", 10), ("continue", 10)], [("if", 10), ("for", 10), ("try", 10), ("while", 3), ("with", 10), ("assign", 40), ("return", 1), ("yield", 0), ("break", 10), ("continue", 10)], # Last element has to have no compound statements, to limit depth. [("assign", 10), ("return", 1), ("yield", 0), ("break", 10), ("continue", 10)], ] def make_body(self, parent): body = [] choices = self.STMT_CHOICES[self.depth] self.depth += 1 nstmts = self.choose([(1, 10), (2, 25), (3, 10), (4, 10), (5, 5)]) for _ in range(nstmts): stmt = self.choose(choices) if stmt == "if": body.append(["if", self.make_body("if")]) if self.roll(): body[-1].append(self.make_body("ifelse")) elif stmt == "for": old_allowed = self.bc_allowed self.bc_allowed = self.bc_allowed | {"break", "continue"} body.append(["for", self.make_body("for")]) self.bc_allowed = old_allowed if self.roll(): body[-1].append(self.make_body("forelse")) elif stmt == "while": old_allowed = self.bc_allowed self.bc_allowed = self.bc_allowed | {"break", "continue"} body.append(["while", self.make_body("while")]) self.bc_allowed = old_allowed if self.roll(): body[-1].append(self.make_body("whileelse")) elif stmt == "try": else_clause = self.make_body("try") if self.roll() else None old_allowed = self.bc_allowed self.bc_allowed = self.bc_allowed - {"continue"} finally_clause = self.make_body("finally") if self.roll() else None self.bc_allowed = old_allowed if else_clause: with_exceptions = True elif not else_clause and not finally_clause: with_exceptions = True else: with_exceptions = self.roll() if with_exceptions: num_exceptions = self.choose([(1, 50), (2, 50)]) exceptions = [self.make_body("except") for _ in range(num_exceptions)] else: exceptions = None body.append( ["try", self.make_body("tryelse"), exceptions, else_clause, finally_clause] ) elif stmt == "with": body.append(["with", self.make_body("with")]) elif stmt == "return": body.append(stmt) break elif stmt == "yield": body.append("yield") elif stmt in ["break", "continue"]: if stmt in self.bc_allowed: # A break or continue immediately after a loop is not # interesting. So if we are immediately after a loop, then # insert an assignment. if not body and (parent in ["for", "while"]): body.append("assign") body.append(stmt) break else: stmt = "assign" if stmt == "assign": # Don't put two assignments in a row, there's no point. if not body or body[-1] != "assign": body.append("assign") self.depth -= 1 return body def async_alternatives(source): parts = re.split(r"(for |with )", source) nchoices = len(parts) // 2 #print("{} choices".format(nchoices)) def constant(s): return [s] def maybe_async(s): return [s, "async "+s] choices = [f(x) for f, x in zip(cycle([constant, maybe_async]), parts)] for result in product(*choices): source = "".join(result) yield source def compare_alternatives(source): all_all_arcs = collections.defaultdict(list) for i, alternate_source in enumerate(async_alternatives(source)): parser = PythonParser(alternate_source) arcs = parser.arcs() all_all_arcs[tuple(arcs)].append((i, alternate_source)) return len(all_all_arcs) def show_a_bunch(): longest = "" for i in range(100): maker = RandomAstMaker(i) source = PythonSpinner.generate_python(maker.make_body("def")) try: print("-"*80, "\n", source, sep="") compile(source, "", "exec", dont_inherit=True) except Exception as ex: print(f"Oops: {ex}\n{source}") if len(source) > len(longest): longest = source def show_alternatives(): for i in range(1000): maker = RandomAstMaker(i) source = PythonSpinner.generate_python(maker.make_body("def")) nlines = len(source.splitlines()) if nlines < 15: nalt = compare_alternatives(source) if nalt > 1: print(f"--- {nlines:3} lines, {nalt:2} alternatives ---------") print(source) def show_one(): maker = RandomAstMaker() source = PythonSpinner.generate_python(maker.make_body("def")) print(source) if __name__ == "__main__": show_one() #show_alternatives() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/lab/goals.py0000644000175100001770000000652700000000000016356 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt """\ Check coverage goals. Use `coverage json` to get a coverage.json file, then run this tool to check goals for subsets of files. Patterns can use '**/foo*.py' to find files anywhere in the project, and '!**/something.py' to exclude files matching a pattern. --file will check each file individually for the required coverage. --group checks the entire group collectively. """ import argparse import json import sys from wcmatch import fnmatch as wcfnmatch # python -m pip install wcmatch from coverage.results import Numbers # Note: an internal class! def select_files(files, pat): flags = wcfnmatch.NEGATE | wcfnmatch.NEGATEALL selected = [f for f in files if wcfnmatch.fnmatch(f, pat, flags=flags)] return selected def total_for_files(data, files): total = Numbers(precision=3) for f in files: sel_summ = data["files"][f]["summary"] total += Numbers( n_statements=sel_summ["num_statements"], n_excluded=sel_summ["excluded_lines"], n_missing=sel_summ["missing_lines"], n_branches=sel_summ.get("num_branches", 0), n_partial_branches=sel_summ.get("num_partial_branches", 0), n_missing_branches=sel_summ.get("missing_branches", 0), ) return total def main(argv): parser = argparse.ArgumentParser(description=__doc__) parser.add_argument("--file", "-f", action="store_true", help="Check each file individually") parser.add_argument("--group", "-g", action="store_true", help="Check a group of files") parser.add_argument("--verbose", "-v", action="store_true", help="Be chatty about what's happening") parser.add_argument("goal", type=float, help="Coverage goal") parser.add_argument("pattern", type=str, nargs="+", help="Patterns to check") args = parser.parse_args(argv) print("** Note: this is a proof-of-concept. Support is not promised. **") print("Read more: https://nedbatchelder.com/blog/202111/coverage_goals.html") print("Feedback is appreciated: https://github.com/nedbat/coveragepy/issues/691") if args.file and args.group: print("Can't use --file and --group together") return 1 if not (args.file or args.group): print("Need either --file or --group") return 1 with open("coverage.json") as j: data = json.load(j) all_files = list(data["files"].keys()) selected = select_files(all_files, args.pattern) ok = True if args.group: total = total_for_files(data, selected) pat_nice = ",".join(args.pattern) result = f"Coverage for {pat_nice} is {total.pc_covered_str}" if total.pc_covered < args.goal: print(f"{result}, below {args.goal}") ok = False elif args.verbose: print(result) else: for fname in selected: total = total_for_files(data, [fname]) result = f"Coverage for {fname} is {total.pc_covered_str}" if total.pc_covered < args.goal: print(f"{result}, below {args.goal}") ok = False elif args.verbose: print(result) return 0 if ok else 2 if __name__ == "__main__": sys.exit(main(sys.argv[1:])) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/lab/hack_pyc.py0000644000175100001770000000534700000000000017031 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt """ Wicked hack to get .pyc files to do bytecode tracing instead of line tracing. """ import marshal, new, opcode, sys, types from lnotab import lnotab_numbers, lnotab_string class PycFile: def read(self, f): if isinstance(f, basestring): f = open(f, "rb") self.magic = f.read(4) self.modtime = f.read(4) self.code = marshal.load(f) def write(self, f): if isinstance(f, basestring): f = open(f, "wb") f.write(self.magic) f.write(self.modtime) marshal.dump(self.code, f) def hack_line_numbers(self): self.code = hack_line_numbers(self.code) def hack_line_numbers(code): """ Replace a code object's line number information to claim that every byte of the bytecode is a new source line. Returns a new code object. Also recurses to hack the line numbers in nested code objects. """ # Create a new lnotab table. Each opcode is claimed to be at # 1000*lineno + (opcode number within line), so for example, the opcodes on # source line 12 will be given new line numbers 12000, 12001, 12002, etc. old_num = list(lnotab_numbers(code.co_lnotab, code.co_firstlineno)) n_bytes = len(code.co_code) new_num = [] line = 0 opnum_in_line = 0 i_byte = 0 while i_byte < n_bytes: if old_num and i_byte == old_num[0][0]: line = old_num.pop(0)[1] opnum_in_line = 0 new_num.append((i_byte, 100000000 + 1000*line + opnum_in_line)) if ord(code.co_code[i_byte]) >= opcode.HAVE_ARGUMENT: i_byte += 3 else: i_byte += 1 opnum_in_line += 1 # new_num is a list of pairs, (byteoff, lineoff). Turn it into an lnotab. new_firstlineno = new_num[0][1]-1 new_lnotab = lnotab_string(new_num, new_firstlineno) # Recurse into code constants in this code object. new_consts = [] for const in code.co_consts: if type(const) == types.CodeType: new_consts.append(hack_line_numbers(const)) else: new_consts.append(const) # Create a new code object, just like the old one, except with new # line numbers. new_code = new.code( code.co_argcount, code.co_nlocals, code.co_stacksize, code.co_flags, code.co_code, tuple(new_consts), code.co_names, code.co_varnames, code.co_filename, code.co_name, new_firstlineno, new_lnotab ) return new_code def hack_file(f): pyc = PycFile() pyc.read(f) pyc.hack_line_numbers() pyc.write(f) if __name__ == '__main__': hack_file(sys.argv[1]) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/lab/new-data.js0000644000175100001770000000312300000000000016722 0ustar00runnerdocker00000000000000{ // As of now: "lines": { "a/b/c.py": [1, 2, 3, 4, 5], "a/b/d.py": [4, 5, 6, 7, 8], }, "arcs": { "a/b/c.py: [[1, 2], [2, 3], [4, 5]], }, "file_tracers": { "a/b/c.py": "fooey.plugin", }, // We used to do this, but it got too bulky, removed in 4.0.1: "run" { "collector": "coverage.py 4.0", "config": { "branch": true, "source": ".", }, "collected": "20150711T090600", }, // Maybe in the future? "files": { "a/b/c.py": { "lines": [1, 2, 3, 4, 5], "arcs": [ [1, 2], [3, 4], [5, -1], ], "plugin": "django.coverage", "lines": { "1": { "tests": [ "foo/bar/test.py:TheTest.test_it", "asdasdasd", ], "tests": [17, 34, 23, 12389], }, "2": { "count": 23, }, "3": {}, "4": {}, "17": {}, }, "arcs": { "1.2": {}, "2.3": {}, "3.-1": {}, }, }, }, "tests": [ { "file": "a/b/c.py", "test": "test_it", }, { "file": "a/b/d.py", "test": "TheTest.test_it", }, ], "runs": [ { // info about each run? }, { ... }, ], } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1710442639.1298149 coverage-7.4.4/lab/notes/0000755000175100001770000000000000000000000016015 5ustar00runnerdocker00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/lab/notes/bug1303.txt0000644000175100001770000002110700000000000017643 0ustar00runnerdocker00000000000000https://github.com/nedbat/coveragepy/issues/1303 Looks like the race condition is between erasing and creating... COVERAGE_DEBUG=dataio,pid,process,self pytest "./test/." \ -q -s -m unittest \ --cov-report=xml --cov-report=term-missing \ --cov="./pji/." \ \ 7715.ea8c: cwd is now '/src/bugs/bug1303/pji' 7715.ea8c: New process: executable: '/usr/local/bin/python' 7715.ea8c: New process: cmd: ['/usr/local/bin/pytest', './test/.', '-q', '-s', '-m', 'unittest', '--cov-report=xml', '--cov-report=term-missing', '--cov=./pji/.'] 7715.ea8c: New process: pid: 7715, parent pid: 7714 7715.ea8c: Erasing data file '/src/bugs/bug1303/pji/.coverage' 7715.ea8c: self: _have_used=False _has_lines=False _has_arcs=False _current_context=None _current_context_id=None _query_context_ids=None> ....................... 7720.ea8c: Opening data file '/src/bugs/bug1303/pji/.coverage' 7720.ea8c: self: _have_used=False _has_lines=False _has_arcs=False _current_context=None _current_context_id=None _query_context_ids=None> 7725.ea8c: Opening data file '/src/bugs/bug1303/pji/.coverage' 7725.ea8c: self: _have_used=False _has_lines=False _has_arcs=False _current_context=None _current_context_id=None _query_context_ids=None> . 7732.ea8c: Opening data file '/src/bugs/bug1303/pji/.coverage' 7732.ea8c: self: _have_used=False _has_lines=False _has_arcs=False _current_context=None _current_context_id=None _query_context_ids=None> 7737.ea8c: Opening data file '/src/bugs/bug1303/pji/.coverage' 7737.ea8c: self: _have_used=False _has_lines=False _has_arcs=False _current_context=None _current_context_id=None _query_context_ids=None> ................................................................................................................................................................................................................................................................................................ 7715.ea8c: Erasing data file '/src/bugs/bug1303/pji/.coverage.docker-desktop.7715.157477' 7715.ea8c: self: _have_used=False _has_lines=False _has_arcs=False _current_context=None _current_context_id=None _query_context_ids=None> 7715.ea8c: Creating data file '/src/bugs/bug1303/pji/.coverage.docker-desktop.7715.157477' 7715.ea8c: self: _have_used=True _has_lines=True _has_arcs=False _current_context=None _current_context_id=None _query_context_ids=None> 7715.ea8c: Opening data file '/src/bugs/bug1303/pji/.coverage' 7715.ea8c: self: _have_used=False _has_lines=False _has_arcs=False _current_context=None _current_context_id=None _query_context_ids=None> INTERNALERROR> Traceback (most recent call last): INTERNALERROR> File "/usr/local/lib/python3.6/site-packages/coverage/sqldata.py", line 1100, in execute INTERNALERROR> return self.con.execute(sql, parameters) INTERNALERROR> sqlite3.OperationalError: no such table: coverage_schema INTERNALERROR> INTERNALERROR> During handling of the above exception, another exception occurred: INTERNALERROR> INTERNALERROR> Traceback (most recent call last): INTERNALERROR> File "/usr/local/lib/python3.6/site-packages/coverage/sqldata.py", line 1105, in execute INTERNALERROR> return self.con.execute(sql, parameters) INTERNALERROR> sqlite3.OperationalError: no such table: coverage_schema INTERNALERROR> INTERNALERROR> The above exception was the direct cause of the following exception: INTERNALERROR> INTERNALERROR> Traceback (most recent call last): INTERNALERROR> File "/usr/local/lib/python3.6/site-packages/coverage/sqldata.py", line 290, in _read_db INTERNALERROR> schema_version, = db.execute_one("select version from coverage_schema") INTERNALERROR> File "/usr/local/lib/python3.6/site-packages/coverage/sqldata.py", line 1133, in execute_one INTERNALERROR> rows = list(self.execute(sql, parameters)) INTERNALERROR> File "/usr/local/lib/python3.6/site-packages/coverage/sqldata.py", line 1122, in execute INTERNALERROR> raise DataError(f"Couldn't use data file {self.filename!r}: {msg}") from exc INTERNALERROR> coverage.exceptions.DataError: Couldn't use data file '/src/bugs/bug1303/pji/.coverage': no such table: coverage_schema INTERNALERROR> INTERNALERROR> The above exception was the direct cause of the following exception: INTERNALERROR> INTERNALERROR> Traceback (most recent call last): INTERNALERROR> File "/usr/local/lib/python3.6/site-packages/_pytest/main.py", line 269, in wrap_session INTERNALERROR> session.exitstatus = doit(config, session) or 0 INTERNALERROR> File "/usr/local/lib/python3.6/site-packages/_pytest/main.py", line 323, in _main INTERNALERROR> config.hook.pytest_runtestloop(session=session) INTERNALERROR> File "/usr/local/lib/python3.6/site-packages/pluggy/_hooks.py", line 265, in __call__ INTERNALERROR> return self._hookexec(self.name, self.get_hookimpls(), kwargs, firstresult) INTERNALERROR> File "/usr/local/lib/python3.6/site-packages/pluggy/_manager.py", line 80, in _hookexec INTERNALERROR> return self._inner_hookexec(hook_name, methods, kwargs, firstresult) INTERNALERROR> File "/usr/local/lib/python3.6/site-packages/pluggy/_callers.py", line 55, in _multicall INTERNALERROR> gen.send(outcome) INTERNALERROR> File "/usr/local/lib/python3.6/site-packages/pytest_cov/plugin.py", line 294, in pytest_runtestloop INTERNALERROR> self.cov_controller.finish() INTERNALERROR> File "/usr/local/lib/python3.6/site-packages/pytest_cov/engine.py", line 44, in ensure_topdir_wrapper INTERNALERROR> return meth(self, *args, **kwargs) INTERNALERROR> File "/usr/local/lib/python3.6/site-packages/pytest_cov/engine.py", line 230, in finish INTERNALERROR> self.cov.stop() INTERNALERROR> File "/usr/local/lib/python3.6/site-packages/coverage/control.py", line 446, in load INTERNALERROR> self._data.read() INTERNALERROR> File "/usr/local/lib/python3.6/site-packages/coverage/sqldata.py", line 778, in read INTERNALERROR> with self._connect(): # TODO: doesn't look right INTERNALERROR> File "/usr/local/lib/python3.6/site-packages/coverage/sqldata.py", line 316, in _connect INTERNALERROR> self._open_db() INTERNALERROR> File "/usr/local/lib/python3.6/site-packages/coverage/sqldata.py", line 284, in _open_db INTERNALERROR> self._read_db() INTERNALERROR> File "/usr/local/lib/python3.6/site-packages/coverage/sqldata.py", line 296, in _read_db INTERNALERROR> ) from exc INTERNALERROR> coverage.exceptions.DataError: Data file '/src/bugs/bug1303/pji/.coverage' doesn't seem to be a coverage data file: Couldn't use data file '/src/bugs/bug1303/pji/.coverage': no such table: coverage_schema 312 passed, 1 warning in 62.60s (0:01:02) 7715.ea8c: atexit: pid: 7715, instance: 7715.ea8c: self: make: *** [Makefile:17: unittest] Error 3 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/lab/notes/pypy-738-decorated-functions.txt0000644000175100001770000000650600000000000024043 0ustar00runnerdocker00000000000000Comparing versions: export PY38=/usr/local/pyenv/pyenv/versions/3.8.12/bin/python3.8 export PY39=/usr/local/pyenv/pyenv/versions/3.9.10/bin/python3.9 export PP38old=/usr/local/pypy/pypy3.8-v7.3.7-osx64/bin/pypy3 export PP38=/usr/local/pypy/pypy3.8-v7.3.8rc1-osx64/bin/pypy3 export PP39=/usr/local/pypy/pypy3.9-v7.3.8rc1-osx64/bin/pypy3 $ for py in $PY38 $PY39 $PP38old $PP38 $PP39; do $py -m coverage run --debug=pybehave igor.py; done 2>&1 | grep trace trace_decorated_def: True trace_decorator_line_again: False trace_decorated_def: True trace_decorator_line_again: False trace_decorated_def: False trace_decorator_line_again: False trace_decorated_def: False trace_decorator_line_again: False trace_decorated_def: False trace_decorator_line_again: False # t466a_ast.py: import ast import sys def find_function(node, name): if node.__class__.__name__ == "FunctionDef" and node.name == name: return node for node in getattr(node, "body", ()): fnode = find_function(node, name) if fnode is not None: return fnode root_node = ast.parse(open(__file__).read()) func_node = find_function(root_node, "parse") print(func_node.name, func_node.lineno, func_node.end_lineno, tuple(sys.version_info), tuple(getattr(sys, "pypy_version_info", ()))) class Parser(object): @classmethod def parse(cls): formats = [ 5 ] return None Parser.parse() $ for py in $PY38 $PY39 $PP38old $PP38 $PP39; do $py t466a_ast.py; done parse 20 24 (3, 8, 12, 'final', 0) () parse 20 24 (3, 9, 10, 'final', 0) () parse 19 -1 (3, 8, 12, 'final', 0) (7, 3, 7, 'final', 0) parse 19 -1 (3, 8, 12, 'final', 0) (7, 3, 8, 'final', 0) parse 20 24 (3, 9, 10, 'final', 0) (7, 3, 8, 'final', 0) PyPy <=3.8 includes the decorator line in the FunctionDef node PyPy >=3.9 does not include the decorator line in the node PyPy traces the decorator line, but not the def: $ $PP38 -m trace --trace t466a_plain.py --- modulename: t466a_plain, funcname: t466a_plain.py(1): class Parser(object): --- modulename: t466a_plain, funcname: Parser t466a_plain.py(1): class Parser(object): t466a_plain.py(3): @classmethod t466a_plain.py(10): Parser.parse() --- modulename: t466a_plain, funcname: parse t466a_plain.py(5): formats = [ 5 ] t466a_plain.py(8): return None $ $PP39 -m trace --trace t466a_plain.py --- modulename: t466a_plain, funcname: t466a_plain.py(1): class Parser(object): --- modulename: t466a_plain, funcname: Parser t466a_plain.py(1): class Parser(object): t466a_plain.py(3): @classmethod t466a_plain.py(10): Parser.parse() --- modulename: t466a_plain, funcname: parse t466a_plain.py(5): formats = [ 5 ] t466a_plain.py(8): return None CPython traces the decorator and the def: $ $PY39 -m trace --trace t466a_plain.py --- modulename: t466a_plain, funcname: t466a_plain.py(1): class Parser(object): --- modulename: t466a_plain, funcname: Parser t466a_plain.py(1): class Parser(object): t466a_plain.py(3): @classmethod t466a_plain.py(4): def parse(cls): t466a_plain.py(10): Parser.parse() --- modulename: t466a_plain, funcname: parse t466a_plain.py(5): formats = [ 5 ] t466a_plain.py(8): return None ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/lab/parse_all.py0000644000175100001770000000101400000000000017175 0ustar00runnerdocker00000000000000"""Parse every Python file in a tree.""" import os import sys from coverage.parser import PythonParser for root, dirnames, filenames in os.walk(sys.argv[1]): for filename in filenames: if filename.endswith(".py"): filename = os.path.join(root, filename) print(f":: {filename}") try: par = PythonParser(filename=filename) par.parse_source() par.arcs() except Exception as exc: print(f" ** {exc}") ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/lab/parser.py0000644000175100001770000001610100000000000016532 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt """Parser.py: a main for invoking code in coverage/parser.py""" import collections import dis import glob import optparse import os import re import sys import textwrap import types from coverage.parser import PythonParser from coverage.python import get_python_source class ParserMain: """A main for code parsing experiments.""" def main(self, args): """A main function for trying the code from the command line.""" parser = optparse.OptionParser() parser.add_option( "-d", action="store_true", dest="dis", help="Disassemble" ) parser.add_option( "-R", action="store_true", dest="recursive", help="Recurse to find source files" ) parser.add_option( "-s", action="store_true", dest="source", help="Show analyzed source" ) parser.add_option( "-t", action="store_true", dest="tokens", help="Show tokens" ) options, args = parser.parse_args() if options.recursive: if args: root = args[0] else: root = "." for root, _, _ in os.walk(root): for f in glob.glob(root + "/*.py"): self.one_file(options, f) elif not args: parser.print_help() else: self.one_file(options, args[0]) def one_file(self, options, filename): """Process just one file.""" # `filename` can have a line number suffix. In that case, extract those # lines, dedent them, and use that. This is for trying test cases # embedded in the test files. if match := re.search(r"^(.*):(\d+)-(\d+)$", filename): filename, start, end = match.groups() start, end = int(start), int(end) else: start = end = None try: text = get_python_source(filename) if start is not None: lines = text.splitlines(True) text = textwrap.dedent("".join(lines[start-1:end]).replace("\\\\", "\\")) pyparser = PythonParser(text, filename=filename, exclude=r"no\s*cover") pyparser.parse_source() except Exception as err: print(f"{err}") return if options.dis: print("Main code:") disassemble(pyparser) arcs = pyparser.arcs() if options.source or options.tokens: pyparser.show_tokens = options.tokens pyparser.parse_source() if options.source: arc_chars = self.arc_ascii_art(arcs) if arc_chars: arc_width = max(len(a) for a in arc_chars.values()) exit_counts = pyparser.exit_counts() for lineno, ltext in enumerate(pyparser.lines, start=1): marks = [' ', ' ', ' ', ' ', ' '] a = ' ' if lineno in pyparser.raw_statements: marks[0] = '-' if lineno in pyparser.statements: marks[1] = '=' exits = exit_counts.get(lineno, 0) if exits > 1: marks[2] = str(exits) if lineno in pyparser.raw_docstrings: marks[3] = '"' if lineno in pyparser.raw_classdefs: marks[3] = 'C' if lineno in pyparser.raw_excluded: marks[4] = 'x' if arc_chars: a = arc_chars[lineno].ljust(arc_width) else: a = "" print("%4d %s%s %s" % (lineno, "".join(marks), a, ltext)) def arc_ascii_art(self, arcs): """Draw arcs as ascii art. Returns a dictionary mapping line numbers to ascii strings to draw for that line. """ plus_ones = set() arc_chars = collections.defaultdict(str) for lfrom, lto in sorted(arcs): if lfrom < 0: arc_chars[lto] += 'v' elif lto < 0: arc_chars[lfrom] += '^' else: if lfrom == lto - 1: plus_ones.add(lfrom) arc_chars[lfrom] += "" # ensure this line is in arc_chars continue if lfrom < lto: l1, l2 = lfrom, lto else: l1, l2 = lto, lfrom w = first_all_blanks(arc_chars[l] for l in range(l1, l2+1)) for l in range(l1, l2+1): if l == lfrom: ch = '<' elif l == lto: ch = '>' else: ch = '|' arc_chars[l] = set_char(arc_chars[l], w, ch) # Add the plusses as the first character for lineno, arcs in arc_chars.items(): arc_chars[lineno] = ( ("+" if lineno in plus_ones else " ") + arcs ) return arc_chars def all_code_objects(code): """Iterate over all the code objects in `code`.""" stack = [code] while stack: # We're going to return the code object on the stack, but first # push its children for later returning. code = stack.pop() stack.extend(c for c in code.co_consts if isinstance(c, types.CodeType)) yield code def disassemble(pyparser): """Disassemble code, for ad-hoc experimenting.""" code = compile(pyparser.text, "", "exec", dont_inherit=True) for code_obj in all_code_objects(code): if pyparser.text: srclines = pyparser.text.splitlines() else: srclines = None print("\n%s: " % code_obj) upto = None for inst in dis.get_instructions(code_obj): if inst.starts_line is not None: if srclines: upto = upto or inst.starts_line - 1 while upto <= inst.starts_line - 1: print("{:>100}{}".format("", srclines[upto])) upto += 1 elif inst.offset > 0: print("") line = inst._disassemble() print(f"{line:<70}") print("") def set_char(s, n, c): """Set the nth char of s to be c, extending s if needed.""" s = s.ljust(n) return s[:n] + c + s[n+1:] def blanks(s): """Return the set of positions where s is blank.""" return {i for i, c in enumerate(s) if c == " "} def first_all_blanks(ss): """Find the first position that is all blank in the strings ss.""" ss = list(ss) blankss = blanks(ss[0]) for s in ss[1:]: blankss &= blanks(s) if blankss: return min(blankss) else: return max(len(s) for s in ss) if __name__ == '__main__': ParserMain().main(sys.argv[1:]) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/lab/pick.py0000644000175100001770000000340500000000000016167 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt """ Pick lines from the standard input. Blank or commented lines are ignored. Used to subset lists of tests to run. Use with the --select-cmd pytest plugin option. The first command line argument is a mode for selection. Other arguments depend on the mode. Only one mode is currently implemented: sample. Modes: - ``sample``: randomly sample N lines from the input. - the first argument is N, the number of lines you want. - the second argument is optional: a seed for the randomizer. Using the same seed will produce the same output. Examples: Get a list of test nodes:: pytest --collect-only | grep :: > tests.txt Use like this:: pytest --cache-clear --select-cmd="python pick.py sample 10 < tests.txt" For coverage.py specifically:: tox -q -e py311 -- -n 0 --cache-clear --select-cmd="python lab/pick.py sample 10 < tests.txt" or:: for n in $(seq 1 100); do \ echo seed=$n; \ tox -q -e py311 -- -n 0 --cache-clear --select-cmd="python lab/pick.py sample 3 $n < tests.txt"; \ done More about this: https://nedbatchelder.com/blog/202401/randomly_subsetting_test_suites.html """ import random import sys args = sys.argv[1:][::-1] next_arg = args.pop lines = [] for line in sys.stdin: line = line.strip() if not line: continue if line.startswith("#"): continue lines.append(line) mode = next_arg() if mode == "sample": number = int(next_arg()) if args: random.seed(next_arg()) lines = random.sample(lines, number) else: raise ValueError(f"Don't know {mode=}") for line in lines: print(line) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/lab/platform_info.py0000644000175100001770000000115200000000000020075 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt """Dump information so we can get a quick look at what's available.""" import platform import sys def whatever(f): try: return f() except: return f def dump_module(mod): print(f"\n### {mod.__name__} ---------------------------") for name in dir(mod): if name.startswith("_"): continue print(f"{name:30s}: {whatever(getattr(mod, name))!r:.100}") for mod in [platform, sys]: dump_module(mod) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/lab/run_sysmon.py0000644000175100001770000000467400000000000017466 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt """Run sys.monitoring on a file of Python code.""" import functools import sys print(sys.version) the_program = sys.argv[1] code = open(the_program).read() my_id = sys.monitoring.COVERAGE_ID sys.monitoring.use_tool_id(my_id, "run_sysmon.py") register = functools.partial(sys.monitoring.register_callback, my_id) events = sys.monitoring.events def bytes_to_lines(code): """Make a dict mapping byte code offsets to line numbers.""" b2l = {} cur_line = 0 for bstart, bend, lineno in code.co_lines(): for boffset in range(bstart, bend, 2): b2l[boffset] = lineno return b2l def sysmon_py_start(code, instruction_offset): print(f"PY_START: {code.co_filename}@{instruction_offset}") sys.monitoring.set_local_events( my_id, code, events.PY_RETURN | events.PY_RESUME | events.LINE | events.BRANCH | events.JUMP, ) def sysmon_py_resume(code, instruction_offset): b2l = bytes_to_lines(code) print( f"PY_RESUME: {code.co_filename}@{instruction_offset}, " + f"{b2l[instruction_offset]}" ) def sysmon_py_return(code, instruction_offset, retval): b2l = bytes_to_lines(code) print( f"PY_RETURN: {code.co_filename}@{instruction_offset}, " + f"{b2l[instruction_offset]}" ) def sysmon_line(code, line_number): print(f"LINE: {code.co_filename}@{line_number}") return sys.monitoring.DISABLE def sysmon_branch(code, instruction_offset, destination_offset): b2l = bytes_to_lines(code) print( f"BRANCH: {code.co_filename}@{instruction_offset}->{destination_offset}, " + f"{b2l[instruction_offset]}->{b2l[destination_offset]}" ) def sysmon_jump(code, instruction_offset, destination_offset): b2l = bytes_to_lines(code) print( f"JUMP: {code.co_filename}@{instruction_offset}->{destination_offset}, " + f"{b2l[instruction_offset]}->{b2l[destination_offset]}" ) sys.monitoring.set_events( my_id, events.PY_START | events.PY_UNWIND, ) register(events.PY_START, sysmon_py_start) register(events.PY_RESUME, sysmon_py_resume) register(events.PY_RETURN, sysmon_py_return) # register(events.PY_UNWIND, sysmon_py_unwind_arcs) register(events.LINE, sysmon_line) register(events.BRANCH, sysmon_branch) register(events.JUMP, sysmon_jump) exec(code) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/lab/run_trace.py0000644000175100001770000000141200000000000017217 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt """Run a simple trace function on a file of Python code.""" import os, sys nest = 0 def trace(frame, event, arg): global nest if nest is None: # This can happen when Python is shutting down. return None print("%s%s %s %d @%d" % ( " " * nest, event, os.path.basename(frame.f_code.co_filename), frame.f_lineno, frame.f_lasti, )) if event == 'call': nest += 1 if event == 'return': nest -= 1 return trace print(sys.version) the_program = sys.argv[1] code = open(the_program).read() sys.settrace(trace) exec(code) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/lab/select_contexts.py0000644000175100001770000000417000000000000020447 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt """\ Select certain contexts from a coverage.py data file. """ import argparse import re import sys import coverage def main(argv): parser = argparse.ArgumentParser(description=__doc__) parser.add_argument("--include", type=str, help="Regex for contexts to keep") parser.add_argument("--exclude", type=str, help="Regex for contexts to discard") args = parser.parse_args(argv) print("** Note: this is a proof-of-concept. Support is not promised. **") print("Feedback is appreciated: https://github.com/nedbat/coveragepy/issues/668") cov_in = coverage.Coverage() cov_in.load() data_in = cov_in.get_data() print(f"Contexts in {data_in.data_filename()}:") for ctx in sorted(data_in.measured_contexts()): print(f" {ctx}") if args.include is None and args.exclude is None: print("Nothing to do, no output written.") return out_file = "output.data" file_names = data_in.measured_files() print(f"{len(file_names)} measured files") print(f"Writing to {out_file}") cov_out = coverage.Coverage(data_file=out_file) data_out = cov_out.get_data() for ctx in sorted(data_in.measured_contexts()): if args.include is not None: if not re.search(args.include, ctx): print(f"Skipping context {ctx}, not included") continue if args.exclude is not None: if re.search(args.exclude, ctx): print(f"Skipping context {ctx}, excluded") continue print(f"Keeping context {ctx}") data_in.set_query_context(ctx) data_out.set_context(ctx) if data_in.has_arcs(): data_out.add_arcs({f: data_in.arcs(f) for f in file_names}) else: data_out.add_lines({f: data_in.lines(f) for f in file_names}) for fname in file_names: data_out.touch_file(fname, data_in.file_tracer(fname)) cov_out.save() if __name__ == "__main__": sys.exit(main(sys.argv[1:])) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/lab/show_platform.py0000644000175100001770000000074000000000000020124 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt import platform import types for n in dir(platform): if n.startswith("_"): continue v = getattr(platform, n) if isinstance(v, types.ModuleType): continue if callable(v): try: v = v() n += "()" except: continue print(f"{n:>30}: {v!r}") ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/lab/show_pyc.py0000644000175100001770000001350500000000000017076 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt """ Dump the contents of a .pyc file. The output will only be correct if run with the same version of Python that produced the .pyc. """ import binascii import dis import marshal import struct import sys import time import types def show_pyc_file(fname): f = open(fname, "rb") magic = f.read(4) print("magic %s" % (binascii.hexlify(magic))) read_date_and_size = True flags = struct.unpack('= 0x80: line_incr -= 0x100 line_num += line_incr if line_num != last_line_num: yield (byte_num, line_num) def flag_words(flags, flag_defs): words = [] for word, flag in flag_defs: if flag & flags: words.append(word) return ", ".join(words) def show_file(fname): if fname.endswith('pyc'): show_pyc_file(fname) elif fname.endswith('py'): show_py_file(fname) else: print("Odd file:", fname) def main(args): if args[0] == '-c': show_py_text(" ".join(args[1:]).replace(";", "\n")) else: for a in args: show_file(a) if __name__ == '__main__': main(sys.argv[1:]) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/lab/treetopy.sh0000644000175100001770000000025600000000000017077 0ustar00runnerdocker00000000000000# Turn a tree of Python files into a series of make_file calls. for f in **/*.py; do echo 'make_file("'$1$f'", """\\' sed -e 's/^/ /' <$f echo ' """)' done ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/metacov.ini0000644000175100001770000000556300000000000016277 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt # Settings to use when using coverage.py to measure itself, known as # meta-coverage. This gets intricate because we need to keep the coverage # measurement happening in the tests separate from our own coverage measurement # of coverage.py itself. [run] branch = true data_file = ${COVERAGE_METAFILE-.metacov} parallel = true relative_files = true source = ${COVERAGE_HOME-.}/coverage ${COVERAGE_HOME-.}/tests # $set_env.py: COVERAGE_DYNCTX - Set to 'test_function' for who-tests-what dynamic_context = ${COVERAGE_DYNCTX-none} # $set_env.py: COVERAGE_CONTEXT - Static context for this run (or $ENV_VAR like $TOX_ENV_NAME) context = ${COVERAGE_CONTEXT-none} [report] # We set different pragmas so our code won't be confused with test code, and # we use different pragmas for different reasons that the lines won't be # measured. exclude_lines = pragma: not covered # Lines in test code that aren't covered: we are nested inside ourselves. # Sometimes this is used as a comment: # # cov.start() # blah() # pragma: nested # cov.stop() # pragma: nested # # In order to exclude a series of lines, sometimes it's used as a constant # condition, which might be too cute: # # cov.start() # if "pragma: nested": # blah() # cov.stop() # pragma: nested cov.stop\(\) with cov.collect\(\): # Lines that are only executed when we are debugging coverage.py. def __repr__ pragma: debugging # Lines that are only executed when we are not testing coverage.py. pragma: not testing # Lines that we can't run during metacov. pragma: no metacov pytest.mark.skipif\(env.METACOV if not env.METACOV: # These lines only happen if tests fail. raise AssertionError pragma: only failure # Not-real code for type checking if TYPE_CHECKING: class .*\(Protocol\): @overload # OS error conditions that we can't (or don't care to) replicate. pragma: cant happen # Obscure bugs in specific versions of interpreters, and so probably no # longer tested. pragma: obscure partial_branches = pragma: part covered # A for-loop that always hits its break statement pragma: always breaks pragma: part started # If we're asserting that any() is true, it didn't finish. assert any\( if env.TESTING: if env.METACOV: precision = 3 [html] title = Coverage.py metacov [paths] source = . */coverage/trunk # GitHub Actions on Ubuntu uses /home/runner/work/coveragepy # GitHub Actions on Mac uses /Users/runner/work/coveragepy # GitHub Actions on Window uses D:\a\coveragepy\coveragepy *\coveragepy */coveragepy ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/pyproject.toml0000644000175100001770000000767700000000000017064 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt [build-system] requires = ['setuptools'] build-backend = 'setuptools.build_meta' ## MYPY [tool.mypy] check_untyped_defs = true disallow_any_generics = true disallow_incomplete_defs = true disallow_subclassing_any = true disallow_untyped_calls = true disallow_untyped_decorators = true disallow_untyped_defs = true follow_imports = "silent" ignore_missing_imports = true no_implicit_optional = true show_error_codes = true warn_redundant_casts = true warn_return_any = true warn_unreachable = true warn_unused_configs = true warn_unused_ignores = true exclude = """(?x)( ^tests/.*_plugin\\.py$ # not part of our test suite. )""" ## PYLINT [tool.pylint.basic] no-docstring-rgx = "__.*__|test[A-Z_].*|setUp|_decorator|_wrapper|_.*__.*" [tool.pylint.classes] defining-attr-methods = [ "__init__", "__new__", "setUp", "reset", ] [tool.pylint.design] max-args = 15 max-attributes = 40 max-bool-expr = 5 max-branches = 50 max-locals = 50 max-parents = 12 max-public-methods = 500 max-returns = 20 max-statements = 150 min-public-methods = 0 [tool.pylint.main] extension-pkg-whitelist = ["greenlet"] [tool.pylint."messages control"] enable = [ "useless-suppression", ] disable = [ "spelling", # Messages that are just silly: "locally-disabled", "exec-used", "global-statement", "broad-except", "no-else-return", "subprocess-run-check", "use-dict-literal", # Messages that may be silly: "no-member", "using-constant-test", "too-many-nested-blocks", "too-many-ancestors", "unnecessary-pass", "no-else-break", "no-else-continue", # Questionable things, but it's ok, I don't need to be told: "import-outside-toplevel", "self-assigning-variable", "consider-using-with", "missing-timeout", "too-many-lines", "use-implicit-booleaness-not-comparison", # Formatting stuff "superfluous-parens", # Messages that are noisy for now, eventually maybe we'll turn them on: "invalid-name", "protected-access", "unspecified-encoding", "consider-using-f-string", "duplicate-code", "cyclic-import", ] [tool.pylint.reports] score = false [tool.pylint.variables] dummy-variables-rgx = "_|unused|.*_unused" ignored-argument-names = "_|unused|.*_unused" ## PYTEST [tool.pytest.ini_options] addopts = "-q -n auto -p no:legacypath --strict-markers --no-flaky-report -rfEX --failed-first" python_classes = "*Test" markers = [ "expensive: too slow to run during \"make smoke\"", ] # How come these warnings are suppressed successfully here, but not in conftest.py?? filterwarnings = [ # Sample 'ignore': #"ignore:the imp module is deprecated in favour of importlib:DeprecationWarning", ## Pytest warns if it can't collect things that seem to be tests. This should be an error. "error::pytest.PytestCollectionWarning", ] # xfail tests that pass should fail the test suite xfail_strict = true # https://docs.pytest.org/en/stable/reference/reference.html#confval-verbosity_assertions verbosity_assertions = 5 balanced_clumps = [ # Because of expensive session-scoped fixture: "VirtualenvTest", # Because of shared-file manipulations (~/tests/actual/testing): "CompareTest", # No idea why this one fails if run on separate workers: "GetZipBytesTest", ] ## RUFF # We aren't using ruff for real yet... [tool.ruff] target-version = "py38" # Can't use [project] line-length = 100 [tool.ruff.lint] select = ["ALL"] ignore = [ "ANN101", # Missing type annotation for `self` in method "ERA001", # Found commented-out code ] ## SCRIV [tool.scriv] # Changelog management: https://pypi.org/project/scriv/ format = "rst" output_file = "CHANGES.rst" insert_marker = "scriv-start-here" end_marker = "scriv-end-here" ghrel_template = "file: ci/ghrel_template.md.j2" ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1710442639.1338148 coverage-7.4.4/requirements/0000755000175100001770000000000000000000000016652 5ustar00runnerdocker00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/requirements/dev.in0000644000175100001770000000105200000000000017756 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt # Requirements input for doing local development work on coverage.py. # "make upgrade" turns this into requirements/dev.pip. -c pins.pip -r pip.in # PyPI requirements for running tests. -r tox.in -r pytest.in # for linting. check-manifest cogapp greenlet pylint readme_renderer # for kitting. requests twine libsass # Just so I have a debugger if I want it. pudb # For lab/benchmark tabulate ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/requirements/dev.pip0000644000175100001770000000701100000000000020141 0ustar00runnerdocker00000000000000# # This file is autogenerated by pip-compile with Python 3.8 # by the following command: # # make upgrade # astroid==3.1.0 # via pylint attrs==23.2.0 # via hypothesis build==1.1.1 # via check-manifest cachetools==5.3.3 # via tox certifi==2024.2.2 # via requests chardet==5.2.0 # via tox charset-normalizer==3.3.2 # via requests check-manifest==0.49 # via -r requirements/dev.in cogapp==3.4.1 # via -r requirements/dev.in colorama==0.4.6 # via # -r requirements/pytest.in # -r requirements/tox.in # tox dill==0.3.8 # via pylint distlib==0.3.8 # via virtualenv docutils==0.20.1 # via readme-renderer exceptiongroup==1.2.0 # via # hypothesis # pytest execnet==2.0.2 # via pytest-xdist filelock==3.13.1 # via # tox # virtualenv flaky==3.8.1 # via -r requirements/pytest.in greenlet==3.0.3 # via -r requirements/dev.in hypothesis==6.99.6 # via -r requirements/pytest.in idna==3.6 # via requests importlib-metadata==7.0.2 # via # build # keyring # twine importlib-resources==6.3.0 # via keyring iniconfig==2.0.0 # via pytest isort==5.13.2 # via pylint jaraco-classes==3.3.1 # via keyring jedi==0.19.1 # via pudb keyring==24.3.1 # via twine libsass==0.23.0 # via -r requirements/dev.in markdown-it-py==3.0.0 # via rich mccabe==0.7.0 # via pylint mdurl==0.1.2 # via markdown-it-py more-itertools==10.2.0 # via jaraco-classes nh3==0.2.15 # via readme-renderer packaging==24.0 # via # build # pudb # pyproject-api # pytest # tox parso==0.8.3 # via jedi pkginfo==1.10.0 # via twine platformdirs==4.2.0 # via # pylint # tox # virtualenv pluggy==1.4.0 # via # pytest # tox pudb==2024.1 # via -r requirements/dev.in pygments==2.17.2 # via # -r requirements/pytest.in # pudb # readme-renderer # rich pylint==3.1.0 # via -r requirements/dev.in pyproject-api==1.6.1 # via tox pyproject-hooks==1.0.0 # via build pytest==8.1.1 # via # -r requirements/pytest.in # pytest-xdist pytest-xdist==3.5.0 # via -r requirements/pytest.in readme-renderer==43.0 # via # -r requirements/dev.in # twine requests==2.31.0 # via # -r requirements/dev.in # requests-toolbelt # twine requests-toolbelt==1.0.0 # via twine rfc3986==2.0.0 # via twine rich==13.7.1 # via twine sortedcontainers==2.4.0 # via hypothesis tabulate==0.9.0 # via -r requirements/dev.in tomli==2.0.1 # via # build # check-manifest # pylint # pyproject-api # pyproject-hooks # pytest # tox tomlkit==0.12.4 # via pylint tox==4.14.1 # via # -r requirements/tox.in # tox-gh tox-gh==1.3.1 # via -r requirements/tox.in twine==5.0.0 # via -r requirements/dev.in typing-extensions==4.10.0 # via # astroid # pylint # rich # urwid urllib3==2.2.1 # via # requests # twine urwid==2.6.9 # via # pudb # urwid-readline urwid-readline==0.14 # via pudb virtualenv==20.25.1 # via # -r requirements/pip.in # tox wcwidth==0.2.13 # via urwid zipp==3.18.0 # via # importlib-metadata # importlib-resources # The following packages are considered to be unsafe in a requirements file: pip==24.0 # via -r requirements/pip.in setuptools==69.2.0 # via # -r requirements/pip.in # check-manifest ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/requirements/kit.in0000644000175100001770000000076500000000000020001 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt -c pins.pip # Things needed to make distribution kits. # "make upgrade" turns this into requirements/kit.pip. auditwheel build cibuildwheel setuptools wheel # Build has a windows-only dependency on colorama: # https://github.com/pypa/build/blob/main/setup.cfg#L32 # colorama;os_name == "nt" # We copy it here so it can get pinned. colorama ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/requirements/kit.pip0000644000175100001770000000204100000000000020150 0ustar00runnerdocker00000000000000# # This file is autogenerated by pip-compile with Python 3.8 # by the following command: # # make upgrade # auditwheel==6.0.0 # via -r requirements/kit.in bashlex==0.18 # via cibuildwheel bracex==2.4 # via cibuildwheel build==1.1.1 # via -r requirements/kit.in certifi==2024.2.2 # via cibuildwheel cibuildwheel==2.17.0 # via -r requirements/kit.in colorama==0.4.6 # via -r requirements/kit.in filelock==3.13.1 # via cibuildwheel importlib-metadata==7.0.2 # via build packaging==24.0 # via # auditwheel # build # cibuildwheel platformdirs==4.2.0 # via cibuildwheel pyelftools==0.31 # via auditwheel pyproject-hooks==1.0.0 # via build tomli==2.0.1 # via # build # cibuildwheel # pyproject-hooks typing-extensions==4.10.0 # via cibuildwheel wheel==0.43.0 # via -r requirements/kit.in zipp==3.18.0 # via importlib-metadata # The following packages are considered to be unsafe in a requirements file: setuptools==69.2.0 # via -r requirements/kit.in ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/requirements/light-threads.in0000644000175100001770000000046700000000000021750 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt -c pins.pip # The light-threads packages we test against. eventlet gevent greenlet # gevent needs cffi, but only on Windows, not sure why. cffi>=1.12.2 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/requirements/light-threads.pip0000644000175100001770000000123700000000000022126 0ustar00runnerdocker00000000000000# # This file is autogenerated by pip-compile with Python 3.8 # by the following command: # # make upgrade # cffi==1.16.0 # via -r requirements/light-threads.in dnspython==2.6.1 # via eventlet eventlet==0.35.2 # via -r requirements/light-threads.in gevent==24.2.1 # via -r requirements/light-threads.in greenlet==3.0.3 # via # -r requirements/light-threads.in # eventlet # gevent pycparser==2.21 # via cffi zope-event==5.0 # via gevent zope-interface==6.2 # via gevent # The following packages are considered to be unsafe in a requirements file: setuptools==69.2.0 # via # zope-event # zope-interface ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/requirements/mypy.in0000644000175100001770000000033400000000000020200 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt -c pins.pip # So that we have pytest types. -r pytest.in mypy ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/requirements/mypy.pip0000644000175100001770000000162200000000000020363 0ustar00runnerdocker00000000000000# # This file is autogenerated by pip-compile with Python 3.8 # by the following command: # # make upgrade # attrs==23.2.0 # via hypothesis colorama==0.4.6 # via -r requirements/pytest.in exceptiongroup==1.2.0 # via # hypothesis # pytest execnet==2.0.2 # via pytest-xdist flaky==3.8.1 # via -r requirements/pytest.in hypothesis==6.99.6 # via -r requirements/pytest.in iniconfig==2.0.0 # via pytest mypy==1.9.0 # via -r requirements/mypy.in mypy-extensions==1.0.0 # via mypy packaging==24.0 # via pytest pluggy==1.4.0 # via pytest pygments==2.17.2 # via -r requirements/pytest.in pytest==8.1.1 # via # -r requirements/pytest.in # pytest-xdist pytest-xdist==3.5.0 # via -r requirements/pytest.in sortedcontainers==2.4.0 # via hypothesis tomli==2.0.1 # via # mypy # pytest typing-extensions==4.10.0 # via mypy ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/requirements/pins.pip0000644000175100001770000000033400000000000020335 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt # Version pins, for use as a constraints file. # None for now! ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/requirements/pip-tools.in0000644000175100001770000000036100000000000021130 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt -c pins.pip # "make upgrade" turns this into requirements/pip-tools.pip. pip-tools ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/requirements/pip-tools.pip0000644000175100001770000000124300000000000021312 0ustar00runnerdocker00000000000000# # This file is autogenerated by pip-compile with Python 3.8 # by the following command: # # make upgrade # build==1.1.1 # via pip-tools click==8.1.7 # via pip-tools importlib-metadata==7.0.2 # via build packaging==24.0 # via build pip-tools==7.4.1 # via -r requirements/pip-tools.in pyproject-hooks==1.0.0 # via # build # pip-tools tomli==2.0.1 # via # build # pip-tools # pyproject-hooks wheel==0.43.0 # via pip-tools zipp==3.18.0 # via importlib-metadata # The following packages are considered to be unsafe in a requirements file: pip==24.0 # via pip-tools setuptools==69.2.0 # via pip-tools ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/requirements/pip.in0000644000175100001770000000037300000000000017775 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt -c pins.pip # "make upgrade" turns this into requirements/pip.pip. pip setuptools virtualenv ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/requirements/pip.pip0000644000175100001770000000070500000000000020156 0ustar00runnerdocker00000000000000# # This file is autogenerated by pip-compile with Python 3.8 # by the following command: # # make upgrade # distlib==0.3.8 # via virtualenv filelock==3.13.1 # via virtualenv platformdirs==4.2.0 # via virtualenv virtualenv==20.25.1 # via -r requirements/pip.in # The following packages are considered to be unsafe in a requirements file: pip==24.0 # via -r requirements/pip.in setuptools==69.2.0 # via -r requirements/pip.in ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/requirements/pytest.in0000644000175100001770000000105400000000000020532 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt -c pins.pip # The pytest specifics used by coverage.py # "make upgrade" turns this into requirements/pytest.pip. flaky hypothesis pygments # so that pytest will syntax-color. pytest pytest-xdist # Pytest has a windows-only dependency on colorama: # https://github.com/pytest-dev/pytest/blob/main/setup.cfg#L49 # colorama;sys_platform=="win32" # We copy it here so it can get pinned. colorama ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/requirements/pytest.pip0000644000175100001770000000140000000000000020707 0ustar00runnerdocker00000000000000# # This file is autogenerated by pip-compile with Python 3.8 # by the following command: # # make upgrade # attrs==23.2.0 # via hypothesis colorama==0.4.6 # via -r requirements/pytest.in exceptiongroup==1.2.0 # via # hypothesis # pytest execnet==2.0.2 # via pytest-xdist flaky==3.8.1 # via -r requirements/pytest.in hypothesis==6.99.6 # via -r requirements/pytest.in iniconfig==2.0.0 # via pytest packaging==24.0 # via pytest pluggy==1.4.0 # via pytest pygments==2.17.2 # via -r requirements/pytest.in pytest==8.1.1 # via # -r requirements/pytest.in # pytest-xdist pytest-xdist==3.5.0 # via -r requirements/pytest.in sortedcontainers==2.4.0 # via hypothesis tomli==2.0.1 # via pytest ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/requirements/tox.in0000644000175100001770000000077200000000000020022 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt -c pins.pip # Just install tox, which will then install more things. # "make upgrade" turns this into requirements/tox.pip. tox tox-gh # Tox has a windows-only dependency on colorama: # https://github.com/tox-dev/tox/blob/master/setup.cfg#L44 # colorama>=0.4.1 ;platform_system=="Windows" # We copy it here so it can get pinned. colorama>=0.4.1 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/requirements/tox.pip0000644000175100001770000000132600000000000020200 0ustar00runnerdocker00000000000000# # This file is autogenerated by pip-compile with Python 3.8 # by the following command: # # make upgrade # cachetools==5.3.3 # via tox chardet==5.2.0 # via tox colorama==0.4.6 # via # -r requirements/tox.in # tox distlib==0.3.8 # via virtualenv filelock==3.13.1 # via # tox # virtualenv packaging==24.0 # via # pyproject-api # tox platformdirs==4.2.0 # via # tox # virtualenv pluggy==1.4.0 # via tox pyproject-api==1.6.1 # via tox tomli==2.0.1 # via # pyproject-api # tox tox==4.14.1 # via # -r requirements/tox.in # tox-gh tox-gh==1.3.1 # via -r requirements/tox.in virtualenv==20.25.1 # via tox ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1710442639.1658149 coverage-7.4.4/setup.cfg0000644000175100001770000000004600000000000015750 0ustar00runnerdocker00000000000000[egg_info] tag_build = tag_date = 0 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/setup.py0000644000175100001770000001623100000000000015644 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt """Code coverage measurement for Python""" # Distutils setup for coverage.py # This file is used unchanged under all versions of Python. import os import sys # Setuptools has to be imported before distutils, or things break. from setuptools import setup from distutils.core import Extension # pylint: disable=wrong-import-order from setuptools.command.build_ext import build_ext # pylint: disable=wrong-import-order from distutils import errors # pylint: disable=wrong-import-order # Get or massage our metadata. We exec coverage/version.py so we can avoid # importing the product code into setup.py. # PYVERSIONS classifiers = """\ Environment :: Console Intended Audience :: Developers License :: OSI Approved :: Apache Software License Operating System :: OS Independent Programming Language :: Python Programming Language :: Python :: 3 Programming Language :: Python :: 3.8 Programming Language :: Python :: 3.9 Programming Language :: Python :: 3.10 Programming Language :: Python :: 3.11 Programming Language :: Python :: 3.12 Programming Language :: Python :: 3.13 Programming Language :: Python :: Implementation :: CPython Programming Language :: Python :: Implementation :: PyPy Topic :: Software Development :: Quality Assurance Topic :: Software Development :: Testing """ cov_ver_py = os.path.join(os.path.split(__file__)[0], "coverage/version.py") with open(cov_ver_py) as version_file: # __doc__ will be overwritten by version.py. doc = __doc__ # Keep pylint happy. __version__ = __url__ = version_info = "" # Execute the code in version.py. exec(compile(version_file.read(), cov_ver_py, "exec", dont_inherit=True)) with open("README.rst") as readme: readme_text = readme.read() temp_url = __url__.replace("readthedocs", "@@") assert "@@" not in readme_text long_description = ( readme_text.replace("https://coverage.readthedocs.io/en/latest", temp_url) .replace("https://coverage.readthedocs.io", temp_url) .replace("@@", "readthedocs") ) with open("CONTRIBUTORS.txt", "rb") as contributors: paras = contributors.read().split(b"\n\n") num_others = len(paras[-1].splitlines()) num_others += 1 # Count Gareth Rees, who is mentioned in the top paragraph. classifier_list = classifiers.splitlines() if version_info[3] == "alpha": devstat = "3 - Alpha" elif version_info[3] in ["beta", "candidate"]: devstat = "4 - Beta" else: assert version_info[3] == "final" devstat = "5 - Production/Stable" classifier_list.append(f"Development Status :: {devstat}") # Create the keyword arguments for setup() setup_args = dict( name="coverage", version=__version__, packages=[ "coverage", ], package_data={ "coverage": [ "htmlfiles/*.*", "py.typed", ], }, entry_points={ # Install a script as "coverage", and as "coverage3", and as # "coverage-3.7" (or whatever). "console_scripts": [ "coverage = coverage.cmdline:main", "coverage%d = coverage.cmdline:main" % sys.version_info[:1], "coverage-%d.%d = coverage.cmdline:main" % sys.version_info[:2], ], }, extras_require={ # Enable pyproject.toml support. "toml": ['tomli; python_full_version<="3.11.0a6"'], }, # We need to get HTML assets from our htmlfiles directory. zip_safe=False, author=f"Ned Batchelder and {num_others} others", author_email="ned@nedbatchelder.com", description=doc, long_description=long_description, long_description_content_type="text/x-rst", keywords="code coverage testing", license="Apache-2.0", license_files=["LICENSE.txt"], classifiers=classifier_list, url="https://github.com/nedbat/coveragepy", project_urls={ "Documentation": __url__, "Funding": ( "https://tidelift.com/subscription/pkg/pypi-coverage" + "?utm_source=pypi-coverage&utm_medium=referral&utm_campaign=pypi" ), "Issues": "https://github.com/nedbat/coveragepy/issues", "Mastodon": "https://hachyderm.io/@coveragepy", "Mastodon (nedbat)": "https://hachyderm.io/@nedbat", }, python_requires=">=3.8", # minimum of PYVERSIONS ) # A replacement for the build_ext command which raises a single exception # if the build fails, so we can fallback nicely. ext_errors = ( errors.CCompilerError, errors.DistutilsExecError, errors.DistutilsPlatformError, ) if sys.platform == "win32": # distutils.msvc9compiler can raise an IOError when failing to # find the compiler ext_errors += (IOError,) class BuildFailed(Exception): """Raise this to indicate the C extension wouldn't build.""" def __init__(self): Exception.__init__(self) self.cause = sys.exc_info()[1] # work around py 2/3 different syntax class ve_build_ext(build_ext): """Build C extensions, but fail with a straightforward exception.""" def run(self): """Wrap `run` with `BuildFailed`.""" try: build_ext.run(self) except errors.DistutilsPlatformError as exc: raise BuildFailed() from exc def build_extension(self, ext): """Wrap `build_extension` with `BuildFailed`.""" try: # Uncomment to test compile failure handling: # raise errors.CCompilerError("OOPS") build_ext.build_extension(self, ext) except ext_errors as exc: raise BuildFailed() from exc except ValueError as err: # this can happen on Windows 64 bit, see Python issue 7511 if "'path'" in str(err): # works with both py 2/3 raise BuildFailed() from err raise # There are a few reasons we might not be able to compile the C extension. # Figure out if we should attempt the C extension or not. compile_extension = True if "__pypy__" in sys.builtin_module_names: # Pypy can't compile C extensions compile_extension = False if compile_extension: setup_args.update( dict( ext_modules=[ Extension( "coverage.tracer", sources=[ "coverage/ctracer/datastack.c", "coverage/ctracer/filedisp.c", "coverage/ctracer/module.c", "coverage/ctracer/tracer.c", ], ), ], cmdclass={ "build_ext": ve_build_ext, }, ), ) def main(): """Actually invoke setup() with the arguments we built above.""" # For a variety of reasons, it might not be possible to install the C # extension. Try it with, and if it fails, try it without. try: setup(**setup_args) except BuildFailed as exc: msg = "Couldn't install with extension module, trying without it..." exc_msg = f"{exc.__class__.__name__}: {exc.cause}" print(f"**\n** {msg}\n** {exc_msg}\n**") del setup_args["ext_modules"] setup(**setup_args) if __name__ == "__main__": main() ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1710442639.145815 coverage-7.4.4/tests/0000755000175100001770000000000000000000000015271 5ustar00runnerdocker00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/__init__.py0000644000175100001770000000030400000000000017377 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt """Automated tests. Run with pytest.""" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/balance_xdist_plugin.py0000644000175100001770000001540400000000000022025 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt """ A pytest plugin to record test times and then use those times to divide tests into evenly balanced workloads for each xdist worker. Two things are hard-coded here that shouldn't be: - The timing data is written to the tmp directory, but should use the pytest cache (https://docs.pytest.org/en/latest/how-to/cache.html). - The number of xdist workers is hard-coded to 8 because I couldn't figure out how to find the number. Would it be crazy to read the -n argument directly? You can force some tests to run on the same worker by setting the `balanced_clumps` setting in your pytest config file. Each line is a substring of a test name. All tests with that substring (like -k) will run on the worker: balanced_clumps = LongRunningFixture some_other_test_substring """ import collections import csv import os import shutil import time from pathlib import Path import pytest import xdist.scheduler def pytest_addoption(parser): """Auto-called to define ini-file settings.""" parser.addini( "balanced_clumps", type="linelist", help="Test substrings to assign to the same worker", ) @pytest.hookimpl(tryfirst=True) def pytest_configure(config): """Registers our pytest plugin.""" config.pluginmanager.register(BalanceXdistPlugin(config), "balance_xdist_plugin") class BalanceXdistPlugin: # pragma: debugging """The plugin""" def __init__(self, config): self.config = config self.running_all = (self.config.getoption("-k") == "") self.times = collections.defaultdict(float) self.worker = os.getenv("PYTEST_XDIST_WORKER", "none") self.tests_csv = None def pytest_sessionstart(self, session): """Called once before any tests are run, but in every worker.""" if not self.running_all: return tests_csv_dir = session.startpath.resolve() / "tmp/tests_csv" self.tests_csv = tests_csv_dir / f"{self.worker}.csv" if self.worker == "none": if tests_csv_dir.exists(): for csv_file in tests_csv_dir.iterdir(): with csv_file.open(newline="") as fcsv: reader = csv.reader(fcsv) for row in reader: self.times[row[1]] += float(row[3]) shutil.rmtree(tests_csv_dir) def write_duration_row(self, item, phase, duration): """Helper to write a row to the tracked-test csv file.""" if self.running_all: self.tests_csv.parent.mkdir(parents=True, exist_ok=True) with self.tests_csv.open("a", newline="") as fcsv: csv.writer(fcsv).writerow([self.worker, item.nodeid, phase, duration]) @pytest.hookimpl(hookwrapper=True) def pytest_runtest_setup(self, item): """Run once for each test.""" start = time.time() yield self.write_duration_row(item, "setup", time.time() - start) @pytest.hookimpl(hookwrapper=True) def pytest_runtest_call(self, item): """Run once for each test.""" start = time.time() yield self.write_duration_row(item, "call", time.time() - start) @pytest.hookimpl(hookwrapper=True) def pytest_runtest_teardown(self, item): """Run once for each test.""" start = time.time() yield self.write_duration_row(item, "teardown", time.time() - start) @pytest.hookimpl(trylast=True) def pytest_xdist_make_scheduler(self, config, log): """Create our BalancedScheduler using time data from the last run.""" # Assign tests to chunks nchunks = 8 totals = [0] * nchunks tests = collections.defaultdict(set) # first put the difficult ones all in one worker clumped = set() clumps = config.getini("balanced_clumps") for i, clump_word in enumerate(clumps): clump_nodes = {nodeid for nodeid in self.times.keys() if clump_word in nodeid} i %= nchunks tests[i].update(clump_nodes) totals[i] += sum(self.times[nodeid] for nodeid in clump_nodes) clumped.update(clump_nodes) # Then assign the rest in descending order rest = [(nodeid, t) for (nodeid, t) in self.times.items() if nodeid not in clumped] rest.sort(key=lambda item: item[1], reverse=True) for nodeid, t in rest: lightest = min(enumerate(totals), key=lambda pair: pair[1])[0] tests[lightest].add(nodeid) totals[lightest] += t test_chunks = {} for chunk_id, nodeids in tests.items(): for nodeid in nodeids: test_chunks[nodeid] = chunk_id return BalancedScheduler(config, log, clumps, test_chunks) class BalancedScheduler(xdist.scheduler.LoadScopeScheduling): # pylint: disable=abstract-method # pragma: debugging """A balanced-chunk test scheduler for pytest-xdist.""" def __init__(self, config, log, clumps, test_chunks): super().__init__(config, log) self.clumps = clumps self.test_chunks = test_chunks def _split_scope(self, nodeid): """Assign a chunk id to a test node.""" # If we have a chunk assignment for this node, return it. scope = self.test_chunks.get(nodeid) if scope is not None: return scope # If this is a node that should be clumped, clump it. for i, clump_word in enumerate(self.clumps): if clump_word in nodeid: return f"clump{i}" # Otherwise every node is a separate chunk. return nodeid # Run this with: # python -c "from tests.balance_xdist_plugin import show_worker_times as f; f()" def show_worker_times(): # pragma: debugging """Ad-hoc utility to show data from the last tracked-test run.""" times = collections.defaultdict(float) tests = collections.defaultdict(int) tests_csv_dir = Path("tmp/tests_csv") for csv_file in tests_csv_dir.iterdir(): with csv_file.open(newline="") as fcsv: reader = csv.reader(fcsv) for row in reader: worker = row[0] duration = float(row[3]) times[worker] += duration if row[2] == "call": tests[worker] += 1 for worker in sorted(tests.keys()): print(f"{worker}: {tests[worker]:3d} {times[worker]:.2f}") total = sum(times.values()) avg = total / len(times) print(f"total: {total:.2f}, avg: {avg:.2f}") lo = min(times.values()) hi = max(times.values()) print(f"lo = {lo:.2f}; hi = {hi:.2f}; gap = {hi - lo:.2f}; long delta = {hi - avg:.2f}") ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/conftest.py0000644000175100001770000001101500000000000017466 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt """ Pytest auto configuration. This module is run automatically by pytest, to define and enable fixtures. """ from __future__ import annotations import os import sys import sysconfig import warnings from pathlib import Path from typing import Iterator import pytest from coverage import env from coverage.files import set_relative_directory # Pytest will rewrite assertions in test modules, but not elsewhere. # This tells pytest to also rewrite assertions in these files: pytest.register_assert_rewrite("tests.coveragetest") pytest.register_assert_rewrite("tests.helpers") # Pytest can take additional options: # $set_env.py: PYTEST_ADDOPTS - Extra arguments to pytest. pytest_plugins = [ "tests.balance_xdist_plugin", "tests.select_plugin", ] @pytest.fixture(autouse=True) def set_warnings() -> None: """Configure warnings to show while running tests.""" warnings.simplefilter("default") warnings.simplefilter("once", DeprecationWarning) # Warnings to suppress: # How come these warnings are successfully suppressed here, but not in pyproject.toml?? if env.PYPY: # pypy3 warns about unclosed files a lot. warnings.filterwarnings("ignore", r".*unclosed file", category=ResourceWarning) # Don't warn about unclosed SQLite connections. # We don't close ":memory:" databases because we don't have a way to connect # to them more than once if we close them. In real coverage.py uses, there # are only a couple of them, but our test suite makes many and we get warned # about them all. # Python3.13 added this warning, but the behavior has been the same all along, # without any reported problems, so just quiet the warning. # https://github.com/python/cpython/issues/105539 warnings.filterwarnings("ignore", r"unclosed database", category=ResourceWarning) @pytest.fixture(autouse=True) def reset_sys_path() -> Iterator[None]: """Clean up sys.path changes around every test.""" sys_path = list(sys.path) yield sys.path[:] = sys_path @pytest.fixture(autouse=True) def reset_environment() -> Iterator[None]: """Make sure a test setting an envvar doesn't leak into another test.""" old_environ = os.environ.copy() yield os.environ.clear() os.environ.update(old_environ) @pytest.fixture(autouse=True) def reset_filesdotpy_globals() -> Iterator[None]: """coverage/files.py has some unfortunate globals. Reset them every test.""" set_relative_directory() yield WORKER = os.getenv("PYTEST_XDIST_WORKER", "none") def pytest_sessionstart() -> None: """Run once at the start of the test session.""" # Only in the main process... if WORKER == "none": # Create a .pth file for measuring subprocess coverage. pth_dir = find_writable_pth_directory() assert pth_dir (pth_dir / "subcover.pth").write_text("import coverage; coverage.process_startup()\n") # subcover.pth is deleted by pytest_sessionfinish below. def pytest_sessionfinish() -> None: """Hook the end of a test session, to clean up.""" # This is called by each of the workers and by the main process. if WORKER == "none": for pth_dir in possible_pth_dirs(): # pragma: part covered pth_file = pth_dir / "subcover.pth" if pth_file.exists(): pth_file.unlink() def possible_pth_dirs() -> Iterator[Path]: """Produce a sequence of directories for trying to write .pth files.""" # First look through sys.path, and if we find a .pth file, then it's a good # place to put ours. for pth_dir in map(Path, sys.path): # pragma: part covered pth_files = list(pth_dir.glob("*.pth")) if pth_files: yield pth_dir # If we're still looking, then try the Python library directory. # https://github.com/nedbat/coveragepy/issues/339 yield Path(sysconfig.get_path("purelib")) # pragma: cant happen def find_writable_pth_directory() -> Path | None: """Find a place to write a .pth file.""" for pth_dir in possible_pth_dirs(): # pragma: part covered try_it = pth_dir / f"touch_{WORKER}.it" try: try_it.write_text("foo") except OSError: # pragma: cant happen continue os.remove(try_it) return pth_dir return None # pragma: cant happen ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/coveragetest.py0000644000175100001770000005076700000000000020355 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt """Base test case class for coverage.py testing.""" from __future__ import annotations import contextlib import datetime import difflib import glob import io import os import os.path import random import re import shlex import sys from types import ModuleType from typing import ( Any, Collection, Iterable, Iterator, Mapping, Sequence, ) import coverage from coverage import Coverage from coverage.cmdline import CoverageScript from coverage.data import CoverageData from coverage.misc import import_local_file from coverage.types import TArc, TLineNo from tests.helpers import arcs_to_arcz_repr, arcz_to_arcs, assert_count_equal from tests.helpers import nice_file, run_command from tests.mixins import PytestBase, StdStreamCapturingMixin, RestoreModulesMixin, TempDirMixin # Status returns for the command line. OK, ERR = 0, 1 # The coverage/tests directory, for all sorts of finding test helping things. TESTS_DIR = os.path.dirname(__file__) # Install arguments to pass to pip when reinstalling ourselves. # Defaults to the top of the source tree, but can be overridden if we need # some help on certain platforms. COVERAGE_INSTALL_ARGS = os.getenv("COVERAGE_INSTALL_ARGS", nice_file(TESTS_DIR, "..")) class CoverageTest( StdStreamCapturingMixin, RestoreModulesMixin, TempDirMixin, PytestBase, ): """A base class for coverage.py test cases.""" # Standard unittest setting: show me diffs even if they are very long. maxDiff = None # Tell newer unittest implementations to print long helpful messages. longMessage = True # Let stderr go to stderr, pytest will capture it for us. show_stderr = True def setUp(self) -> None: super().setUp() # Attributes for getting info about what happened. self.last_command_status: int | None = None self.last_command_output: str | None = None self.last_module_name: str | None = None def start_import_stop( self, cov: Coverage, modname: str, modfile: str | None = None, ) -> ModuleType: """Start coverage, import a file, then stop coverage. `cov` is started and stopped, with an `import_local_file` of `modname` in the middle. `modfile` is the file to import as `modname` if it isn't in the current directory. The imported module is returned. """ # Here's something I don't understand. I tried changing the code to use # the handy context manager, like this: # # with cov.collect(): # # Import the Python file, executing it. # return import_local_file(modname, modfile) # # That seemed to work, until 7.4.0 when it made metacov fail after # running all the tests. The deep recursion tests in test_oddball.py # seemed to cause something to be off so that a "Trace function # changed" error would happen as pytest was cleaning up, failing the # metacov runs. Putting back the old code below fixes it, but I don't # understand the difference. cov.start() try: # pragma: nested # Import the Python file, executing it. mod = import_local_file(modname, modfile) finally: # pragma: nested # Stop coverage.py. cov.stop() return mod def get_report(self, cov: Coverage, squeeze: bool = True, **kwargs: Any) -> str: """Get the report from `cov`, and canonicalize it.""" repout = io.StringIO() kwargs.setdefault("show_missing", False) cov.report(file=repout, **kwargs) report = repout.getvalue().replace('\\', '/') print(report) # When tests fail, it's helpful to see the output if squeeze: report = re.sub(r" +", " ", report) return report def get_module_name(self) -> str: """Return a random module name to use for this test run.""" self.last_module_name = 'coverage_test_' + str(random.random())[2:] return self.last_module_name def _check_arcs( self, a1: Iterable[TArc] | None, a2: Iterable[TArc] | None, arc_type: str, ) -> str: """Check that the arc lists `a1` and `a2` are equal. If they are equal, return empty string. If they are unequal, return a string explaining what is different. """ # Make them into multi-line strings so we can see what's going wrong. s1 = arcs_to_arcz_repr(a1) s2 = arcs_to_arcz_repr(a2) if s1 != s2: lines1 = s1.splitlines(True) lines2 = s2.splitlines(True) diff = "".join(difflib.ndiff(lines1, lines2)) return "\n" + arc_type + " arcs differ: minus is expected, plus is actual\n" + diff else: return "" def check_coverage( self, text: str, lines: Sequence[TLineNo] | Sequence[list[TLineNo]] | None = None, missing: str | Sequence[str] = "", report: str = "", excludes: Iterable[str] | None = None, partials: Iterable[str] = (), arcz: str | None = None, arcz_missing: str | None = None, arcz_unpredicted: str | None = None, arcs: Iterable[TArc] | None = None, arcs_missing: Iterable[TArc] | None = None, arcs_unpredicted: Iterable[TArc] | None = None, ) -> Coverage: """Check the coverage measurement of `text`. The source `text` is run and measured. `lines` are the line numbers that are executable, or a list of possible line numbers, any of which could match. `missing` are the lines not executed, `excludes` are regexes to match against for excluding lines, and `report` is the text of the measurement report. For arc measurement, `arcz` is a string that can be decoded into arcs in the code (see `arcz_to_arcs` for the encoding scheme). `arcz_missing` are the arcs that are not executed, and `arcz_unpredicted` are the arcs executed in the code, but not deducible from the code. These last two default to "", meaning we explicitly check that there are no missing or unpredicted arcs. Returns the Coverage object, in case you want to poke at it some more. """ __tracebackhide__ = True # pytest, please don't show me this function. # We write the code into a file so that we can import it. # Coverage.py wants to deal with things as modules with file names. modname = self.get_module_name() self.make_file(modname + ".py", text) if arcs is None and arcz is not None: arcs = arcz_to_arcs(arcz) if arcs_missing is None and arcz_missing is not None: arcs_missing = arcz_to_arcs(arcz_missing) if arcs_unpredicted is None and arcz_unpredicted is not None: arcs_unpredicted = arcz_to_arcs(arcz_unpredicted) # Start up coverage.py. cov = coverage.Coverage(branch=True) cov.erase() for exc in excludes or []: cov.exclude(exc) for par in partials or []: cov.exclude(par, which='partial') mod = self.start_import_stop(cov, modname) # Clean up our side effects del sys.modules[modname] # Get the analysis results, and check that they are right. analysis = cov._analyze(mod) statements = sorted(analysis.statements) if lines: if isinstance(lines[0], int): # lines is just a list of numbers, it must match the statements # found in the code. assert statements == lines, f"lines: {statements!r} != {lines!r}" else: # lines is a list of possible line number lists, one of them # must match. for line_list in lines: if statements == line_list: break else: assert False, f"None of the lines choices matched {statements!r}" missing_formatted = analysis.missing_formatted() if isinstance(missing, str): msg = f"missing: {missing_formatted!r} != {missing!r}" assert missing_formatted == missing, msg else: for missing_list in missing: if missing_formatted == missing_list: break else: assert False, f"None of the missing choices matched {missing_formatted!r}" if arcs is not None: # print("Possible arcs:") # print(" expected:", arcs) # print(" actual:", analysis.arc_possibilities()) # print("Executed:") # print(" actual:", sorted(set(analysis.arcs_executed()))) # TODO: this would be nicer with pytest-check, once we can run that. msg = ( self._check_arcs(arcs, analysis.arc_possibilities(), "Possible") + self._check_arcs(arcs_missing, analysis.arcs_missing(), "Missing") + self._check_arcs(arcs_unpredicted, analysis.arcs_unpredicted(), "Unpredicted") ) if msg: assert False, msg if report: frep = io.StringIO() cov.report(mod, file=frep, show_missing=True) rep = " ".join(frep.getvalue().split("\n")[2].split()[1:]) assert report == rep, f"{report!r} != {rep!r}" return cov def make_data_file( self, basename: str | None = None, suffix: str | None = None, lines: Mapping[str, Collection[TLineNo]] | None = None, arcs: Mapping[str, Collection[TArc]] | None = None, file_tracers: Mapping[str, str] | None = None, ) -> CoverageData: """Write some data into a coverage data file.""" data = coverage.CoverageData(basename=basename, suffix=suffix) assert lines is None or arcs is None if lines: data.add_lines(lines) if arcs: data.add_arcs(arcs) if file_tracers: data.add_file_tracers(file_tracers) data.write() return data @contextlib.contextmanager def assert_warnings( self, cov: Coverage, warnings: Iterable[str], not_warnings: Iterable[str] = (), ) -> Iterator[None]: """A context manager to check that particular warnings happened in `cov`. `cov` is a Coverage instance. `warnings` is a list of regexes. Every regex must match a warning that was issued by `cov`. It is OK for extra warnings to be issued by `cov` that are not matched by any regex. Warnings that are disabled are still considered issued by this function. `not_warnings` is a list of regexes that must not appear in the warnings. This is only checked if there are some positive warnings to test for in `warnings`. If `warnings` is empty, then `cov` is not allowed to issue any warnings. """ __tracebackhide__ = True saved_warnings = [] def capture_warning( msg: str, slug: str | None = None, once: bool = False, # pylint: disable=unused-argument ) -> None: """A fake implementation of Coverage._warn, to capture warnings.""" # NOTE: we don't implement `once`. if slug: msg = f"{msg} ({slug})" saved_warnings.append(msg) original_warn = cov._warn cov._warn = capture_warning # type: ignore[method-assign] try: yield except: # pylint: disable=try-except-raise raise else: if warnings: for warning_regex in warnings: for saved in saved_warnings: if re.search(warning_regex, saved): break else: msg = f"Didn't find warning {warning_regex!r} in {saved_warnings!r}" assert False, msg for warning_regex in not_warnings: for saved in saved_warnings: if re.search(warning_regex, saved): msg = f"Found warning {warning_regex!r} in {saved_warnings!r}" assert False, msg else: # No warnings expected. Raise if any warnings happened. if saved_warnings: assert False, f"Unexpected warnings: {saved_warnings!r}" finally: cov._warn = original_warn # type: ignore[method-assign] def assert_same_files(self, flist1: Iterable[str], flist2: Iterable[str]) -> None: """Assert that `flist1` and `flist2` are the same set of file names.""" flist1_nice = [nice_file(f) for f in flist1] flist2_nice = [nice_file(f) for f in flist2] assert_count_equal(flist1_nice, flist2_nice) def assert_exists(self, fname: str) -> None: """Assert that `fname` is a file that exists.""" assert os.path.exists(fname), f"File {fname!r} should exist" def assert_doesnt_exist(self, fname: str) -> None: """Assert that `fname` is a file that doesn't exist.""" assert not os.path.exists(fname), f"File {fname!r} shouldn't exist" def assert_file_count(self, pattern: str, count: int) -> None: """Assert that there are `count` files matching `pattern`.""" files = sorted(glob.glob(pattern)) msg = "There should be {} files matching {!r}, but there are these: {}" msg = msg.format(count, pattern, files) assert len(files) == count, msg def assert_recent_datetime( self, dt: datetime.datetime, seconds: int = 10, msg: str | None = None, ) -> None: """Assert that `dt` marks a time at most `seconds` seconds ago.""" age = datetime.datetime.now() - dt assert age.total_seconds() >= 0, msg assert age.total_seconds() <= seconds, msg def command_line(self, args: str, ret: int = OK) -> None: """Run `args` through the command line. Use this when you want to run the full coverage machinery, but in the current process. Exceptions may be thrown from deep in the code. Asserts that `ret` is returned by `CoverageScript.command_line`. Compare with `run_command`. Returns None. """ ret_actual = command_line(args) assert ret_actual == ret, f"{ret_actual!r} != {ret!r}" # Some distros rename the coverage command, and need a way to indicate # their new command name to the tests. This is here for them to override, # for example: # https://salsa.debian.org/debian/pkg-python-coverage/-/blob/master/debian/patches/02.rename-public-programs.patch coverage_command = "coverage" def run_command(self, cmd: str) -> str: """Run the command-line `cmd` in a sub-process. `cmd` is the command line to invoke in a sub-process. Returns the combined content of `stdout` and `stderr` output streams from the sub-process. See `run_command_status` for complete semantics. Use this when you need to test the process behavior of coverage. Compare with `command_line`. """ _, output = self.run_command_status(cmd) return output def run_command_status(self, cmd: str) -> tuple[int, str]: """Run the command-line `cmd` in a sub-process, and print its output. Use this when you need to test the process behavior of coverage. Compare with `command_line`. Handles the following command names specially: * "python" is replaced with the command name of the current Python interpreter. * "coverage" is replaced with the command name for the main coverage.py program. Returns a pair: the process' exit status and its stdout/stderr text, which are also stored as `self.last_command_status` and `self.last_command_output`. """ # Make sure "python" and "coverage" mean specifically what we want # them to mean. split_commandline = cmd.split() command_name = split_commandline[0] command_args = split_commandline[1:] if command_name == "python": # Running a Python interpreter in a sub-processes can be tricky. # Use the real name of our own executable. So "python foo.py" might # get executed as "python3.3 foo.py". This is important because # Python 3.x doesn't install as "python", so you might get a Python # 2 executable instead if you don't use the executable's basename. command_words = [os.path.basename(sys.executable)] elif command_name == "coverage": # The invocation requests the coverage.py program. Substitute the # actual coverage.py main command name. command_words = [self.coverage_command] else: command_words = [command_name] cmd = " ".join([shlex.quote(w) for w in command_words] + command_args) self.last_command_status, self.last_command_output = run_command(cmd) print(self.last_command_output) return self.last_command_status, self.last_command_output def add_test_modules_to_pythonpath(self) -> None: """Add our test modules directory to PYTHONPATH.""" # Check that there isn't already a PYTHONPATH. assert os.getenv("PYTHONPATH") is None testmods = nice_file(self.working_root(), "tests/modules") zipfile = nice_file(self.working_root(), "tests/zipmods.zip") self.set_environ("PYTHONPATH", testmods + os.pathsep + zipfile) def working_root(self) -> str: """Where is the root of the coverage.py working tree?""" return os.path.dirname(nice_file(__file__, "..")) def report_from_command(self, cmd: str) -> str: """Return the report from the `cmd`, with some convenience added.""" report = self.run_command(cmd).replace('\\', '/') assert "error" not in report.lower() return report def report_lines(self, report: str) -> list[str]: """Return the lines of the report, as a list.""" lines = report.split('\n') assert lines[-1] == "" return lines[:-1] def line_count(self, report: str) -> int: """How many lines are in `report`?""" return len(self.report_lines(report)) def squeezed_lines(self, report: str) -> list[str]: """Return a list of the lines in report, with the spaces squeezed.""" lines = self.report_lines(report) return [re.sub(r"\s+", " ", l.strip()) for l in lines] def last_line_squeezed(self, report: str) -> str: """Return the last line of `report` with the spaces squeezed down.""" return self.squeezed_lines(report)[-1] def get_measured_filenames(self, coverage_data: CoverageData) -> dict[str, str]: """Get paths to measured files. Returns a dict of {filename: absolute path to file} for given CoverageData. """ return {os.path.basename(filename): filename for filename in coverage_data.measured_files()} def get_missing_arc_description(self, cov: Coverage, start: TLineNo, end: TLineNo) -> str: """Get the missing-arc description for a line arc in a coverage run.""" # ugh, unexposed methods?? assert self.last_module_name is not None filename = self.last_module_name + ".py" fr = cov._get_file_reporter(filename) arcs_executed = cov._analyze(filename).arcs_executed() return fr.missing_arc_description(start, end, arcs_executed) class UsingModulesMixin: """A mixin for importing modules from tests/modules and tests/moremodules.""" def setUp(self) -> None: super().setUp() # type: ignore[misc] # Parent class saves and restores sys.path, we can just modify it. sys.path.append(nice_file(TESTS_DIR, "modules")) sys.path.append(nice_file(TESTS_DIR, "moremodules")) sys.path.append(nice_file(TESTS_DIR, "zipmods.zip")) def command_line(args: str) -> int: """Run `args` through the CoverageScript command line. Returns the return code from CoverageScript.command_line. """ script = CoverageScript() ret = script.command_line(shlex.split(args)) return ret ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/covmodzip1.py0000644000175100001770000000055600000000000017744 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt # Module-level docstrings are counted differently in different versions of Python, # so don't add one here. # pylint: disable=missing-module-docstring # covmodzip.py: for putting into a zip file. j = 1 j += 1 ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1710442639.145815 coverage-7.4.4/tests/gold/0000755000175100001770000000000000000000000016216 5ustar00runnerdocker00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/gold/README.rst0000644000175100001770000000412200000000000017704 0ustar00runnerdocker00000000000000.. Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 .. For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt Gold files ========== These are files used in comparisons for some of the tests. Code to support these comparisons is in tests/goldtest.py. If gold tests are failing, you may need to update the gold files by copying the current output of the tests into the gold files. When a test fails, the actual output is in the tests/actual directory. Those files are ignored by git. There's a Makefile in the html directory for working with gold files and their associated support files. To view the tests/actual files, you need to tentatively copy them to the gold directories, and then add the supporting files so they can be viewed as complete output. For example:: cp tests/actual/html/contexts/* tests/gold/html/contexts cd tests/actual/html make complete If the new actual output is correct, you can use "make update-gold" to copy the actual output as the new gold files. If you have changed some of the supporting files (.css or .js), then "make update-support" will copy the updated files to the tests/gold/html/support directory for checking test output. If you have added a gold test, you'll need to manually copy the tests/actual files to tests/gold. Once you've copied the actual results to the gold files, or to check your work again, you can run just the failed tests again with:: tox -e py39 -- -n 0 --lf The saved HTML files in the html directories can't be viewed properly without the supporting CSS and Javascript files. But we don't want to save copies of those files in every subdirectory. The make target "make complete" in tests/gold/html will copy the support files so you can open the HTML files to see how they look. When you are done checking the output, you can use "make clean" to remove the support files from the gold directories. If the output files are correct, you can update the gold files with "make update-gold". If there are version-specific gold files (for example, bom/2/\*), you'll need to update them manually. ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1710442639.089815 coverage-7.4.4/tests/gold/annotate/0000755000175100001770000000000000000000000020027 5ustar00runnerdocker00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1710442639.145815 coverage-7.4.4/tests/gold/annotate/anno_dir/0000755000175100001770000000000000000000000021620 5ustar00runnerdocker00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/gold/annotate/anno_dir/d_80084bf2fba02475___init__.py,cover0000644000175100001770000000000000000000000027545 0ustar00runnerdocker00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/gold/annotate/anno_dir/d_80084bf2fba02475_a.py,cover0000644000175100001770000000014100000000000026234 0ustar00runnerdocker00000000000000> def a(x): > if x == 1: > print("x is 1") ! else: ! print("x is not 1") ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/gold/annotate/anno_dir/d_b039179a8a4ce2c2___init__.py,cover0000644000175100001770000000000000000000000027630 0ustar00runnerdocker00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/gold/annotate/anno_dir/d_b039179a8a4ce2c2_b.py,cover0000644000175100001770000000006500000000000026325 0ustar00runnerdocker00000000000000> def b(x): > msg = f"x is {x}" > print(msg) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/gold/annotate/anno_dir/multi.py,cover0000644000175100001770000000006300000000000024436 0ustar00runnerdocker00000000000000> import a.a > import b.b > a.a.a(1) > b.b.b(2) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1710442639.145815 coverage-7.4.4/tests/gold/annotate/encodings/0000755000175100001770000000000000000000000022000 5ustar00runnerdocker00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/gold/annotate/encodings/utf8.py,cover0000644000175100001770000000012500000000000024351 0ustar00runnerdocker00000000000000 # -*- coding: utf-8 -*- # This comment has an accent: รฉ > print("spam eggs") ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1710442639.145815 coverage-7.4.4/tests/gold/annotate/mae/0000755000175100001770000000000000000000000020571 5ustar00runnerdocker00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/gold/annotate/mae/mae.py,cover0000644000175100001770000000022400000000000023016 0ustar00runnerdocker00000000000000> def f(x): > if x == 1: > print("1") > else: > print("2") > if f(1): ! print("nope") > if f(2): ! print("nope") ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1710442639.145815 coverage-7.4.4/tests/gold/annotate/multi/0000755000175100001770000000000000000000000021161 5ustar00runnerdocker00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1710442639.145815 coverage-7.4.4/tests/gold/annotate/multi/a/0000755000175100001770000000000000000000000021401 5ustar00runnerdocker00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/gold/annotate/multi/a/__init__.py,cover0000644000175100001770000000000000000000000024613 0ustar00runnerdocker00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/gold/annotate/multi/a/a.py,cover0000644000175100001770000000013700000000000023307 0ustar00runnerdocker00000000000000> def a(x): > if x == 1: > print "x is 1" ! else: ! print "x is not 1" ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1710442639.145815 coverage-7.4.4/tests/gold/annotate/multi/b/0000755000175100001770000000000000000000000021402 5ustar00runnerdocker00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/gold/annotate/multi/b/__init__.py,cover0000644000175100001770000000000000000000000024614 0ustar00runnerdocker00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/gold/annotate/multi/b/b.py,cover0000644000175100001770000000004600000000000023310 0ustar00runnerdocker00000000000000> def b(x): > print "x is %s" % x ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/gold/annotate/multi/multi.py,cover0000644000175100001770000000006300000000000023777 0ustar00runnerdocker00000000000000> import a.a > import b.b > a.a.a(1) > b.b.b(2) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1710442639.145815 coverage-7.4.4/tests/gold/annotate/white/0000755000175100001770000000000000000000000021147 5ustar00runnerdocker00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/gold/annotate/white/white.py,cover0000644000175100001770000000101700000000000023753 0ustar00runnerdocker00000000000000 # A test case sent to me by Steve White > def f(self): ! if self==1: ! pass ! elif self.m('fred'): ! pass ! elif (g==1) and (b==2): ! pass ! elif self.m('fred')==True: ! pass ! elif ((g==1) and (b==2))==True: ! pass ! else: ! pass > def g(x): > if x == 1: > a = 1 ! else: ! a = 2 > g(1) > def h(x): - if 0: #pragma: no cover - pass > if x == 1: ! a = 1 > else: > a = 2 > h(2) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1710442639.145815 coverage-7.4.4/tests/gold/html/0000755000175100001770000000000000000000000017162 5ustar00runnerdocker00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/gold/html/Makefile0000644000175100001770000000170700000000000020627 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt help: @echo "Available targets:" @grep '^[a-zA-Z]' $(MAKEFILE_LIST) | sort | awk -F ':.*?## ' 'NF==2 {printf " %-26s%s\n", $$1, $$2}' complete: ## Copy support files into directories so the HTML can be viewed properly. @for sub in *; do \ if [ -f "$$sub/index.html" ]; then \ echo Copying into $$sub ; \ cp -n support/* $$sub ; \ fi ; \ done ; \ true # because the for loop exits with 1 for some reason. clean: ## Remove the effects of this Makefile. @git clean -fq . update-gold: ## Copy actual output files from latest tests to gold files. @for sub in ../../actual/html/*; do \ rsync --verbose --existing --recursive $$sub/ $$(basename $$sub) ; \ done ; \ true update-support: ## Copy latest support files here for posterity. cp ../../../coverage/htmlfiles/*.{css,js,png} support ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1710442639.145815 coverage-7.4.4/tests/gold/html/a/0000755000175100001770000000000000000000000017402 5ustar00runnerdocker00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/gold/html/a/a_py.html0000644000175100001770000001246600000000000021231 0ustar00runnerdocker00000000000000 Coverage for a.py: 67%

    Coverage for a.py: 67%

    3 statements  

    « prev     ^ index     » next       coverage.py v6.4a0, created at 2022-05-20 16:29 -0400

    1if 1 < 2: 

    2 # Needed a < to look at HTML entities. 

    3 a = 3 

    4else: 

    5 a = 4 

    ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/gold/html/a/index.html0000644000175100001770000000732600000000000021407 0ustar00runnerdocker00000000000000 Coverage report

    Coverage report: 67%

    coverage.py v6.4a0, created at 2022-05-20 16:29 -0400

    Module statements missing excluded coverage
    a.py 3 1 0 67%
    Total 3 1 0 67%

    No items found using the specified filter.

    ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1710442639.145815 coverage-7.4.4/tests/gold/html/b_branch/0000755000175100001770000000000000000000000020720 5ustar00runnerdocker00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/gold/html/b_branch/b_py.html0000644000175100001770000002511600000000000022544 0ustar00runnerdocker00000000000000 Coverage for b.py: 70%

    1def one(x): 

    2 # This will be a branch that misses the else. 

    3 if x < 2: 3 ↛ 6line 3 didn't jump to line 6, because the condition on line 3 was never false

    4 a = 3 

    5 else: 

    6 a = 4 

    7 

    8one(1) 

    9 

    10def two(x): 

    11 # A missed else that branches to "exit" 

    12 if x: 12 ↛ exitline 12 didn't return from function 'two', because the condition on line 12 was never false

    13 a = 5 

    14 

    15two(1) 

    16 

    17def three(): 

    18 try: 

    19 # This if has two branches, *neither* one taken. 

    20 if name_error_this_variable_doesnt_exist: 20 ↛ 21,   20 ↛ 232 missed branches: 1) line 20 didn't jump to line 21, because the condition on line 20 was never true, 2) line 20 didn't jump to line 23, because the condition on line 20 was never false

    21 a = 1 

    22 else: 

    23 a = 2 

    24 except: 

    25 pass 

    26 

    27three() 

    ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/gold/html/b_branch/index.html0000644000175100001770000001014500000000000022716 0ustar00runnerdocker00000000000000 Coverage report

    Coverage report: 70%

    coverage.py v6.4a0, created at 2022-05-20 16:29 -0400

    Module statements missing excluded branches partial coverage
    b.py 17 3 0 6 4 70%
    Total 17 3 0 6 4 70%

    No items found using the specified filter.

    ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1710442639.1498148 coverage-7.4.4/tests/gold/html/bom/0000755000175100001770000000000000000000000017737 5ustar00runnerdocker00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/gold/html/bom/bom_py.html0000644000175100001770000001315300000000000022115 0ustar00runnerdocker00000000000000 Coverage for bom.py: 100%

    1# A Python source file in utf-8, with BOM. 

    2math = "3×4 = 12, ÷2 = 6±0" 

    3 

    4assert len(math) == 18 

    5assert len(math.encode('utf-8')) == 21 

    ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/gold/html/bom/index.html0000644000175100001770000000737500000000000021750 0ustar00runnerdocker00000000000000 Coverage report

    Coverage report: 100%

    coverage.py v7.2.8a0.dev1, created at 2023-06-19 21:52 -0400

    Module statements missing excluded coverage
    bom.py 3 0 0 100%
    Total 3 0 0 100%

    No items found using the specified filter.

    ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1710442639.1498148 coverage-7.4.4/tests/gold/html/contexts/0000755000175100001770000000000000000000000021031 5ustar00runnerdocker00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/gold/html/contexts/index.html0000644000175100001770000000743000000000000023032 0ustar00runnerdocker00000000000000 Coverage report
    Module statements missing excluded coverage
    two_tests.py 17 1 0 94%
    Total 17 1 0 94%

    No items found using the specified filter.

    ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/gold/html/contexts/two_tests_py.html0000644000175100001770000002473100000000000024471 0ustar00runnerdocker00000000000000 Coverage for two_tests.py: 94%

    1def helper(lineno): 

    2 x = 2 1acb

    3 

    4def test_one(): 

    5 a = 5 1c

    6 helper(6) 1c

    7 

    8def test_two(): 

    9 a = 9 1b

    10 b = 10 1b

    11 if a > 11: 1b

    12 b = 12 

    13 assert a == (13-4) 1b

    14 assert b == (14-4) 1b

    15 helper( 1b

    16 16 

    17 ) 

    18 

    19test_one() 

    20x = 20 

    21helper(21) 

    22test_two() 

    ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1710442639.1498148 coverage-7.4.4/tests/gold/html/isolatin1/0000755000175100001770000000000000000000000021065 5ustar00runnerdocker00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/gold/html/isolatin1/index.html0000644000175100001770000000737100000000000023072 0ustar00runnerdocker00000000000000 Coverage report

    Coverage report: 100%

    coverage.py v6.4a0, created at 2022-05-20 16:29 -0400

    Module statements missing excluded coverage
    isolatin1.py 2 0 0 100%
    Total 2 0 0 100%

    No items found using the specified filter.

    ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/gold/html/isolatin1/isolatin1_py.html0000644000175100001770000001250400000000000024370 0ustar00runnerdocker00000000000000 Coverage for isolatin1.py: 100%

    1# -*- coding: iso8859-1 -*- 

    2# A Python source file in another encoding. 

    3 

    4math = "3×4 = 12, ÷2 = 6±0" 

    5assert len(math) == 18 

    ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1710442639.1498148 coverage-7.4.4/tests/gold/html/omit_1/0000755000175100001770000000000000000000000020352 5ustar00runnerdocker00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/gold/html/omit_1/index.html0000644000175100001770000001077600000000000022362 0ustar00runnerdocker00000000000000 Coverage report

    Coverage report: 100%

    coverage.py v6.4a0, created at 2022-05-20 16:29 -0400

    Module statements missing excluded coverage
    m1.py 2 0 0 100%
    m2.py 2 0 0 100%
    m3.py 2 0 0 100%
    main.py 8 0 0 100%
    Total 14 0 0 100%

    No items found using the specified filter.

    ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/gold/html/omit_1/m1_py.html0000644000175100001770000001126000000000000022265 0ustar00runnerdocker00000000000000 Coverage for m1.py: 100%

    1m1a = 1 

    2m1b = 2 

    ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/gold/html/omit_1/m2_py.html0000644000175100001770000001126000000000000022266 0ustar00runnerdocker00000000000000 Coverage for m2.py: 100%

    Coverage for m2.py: 100%

    2 statements  

    « prev     ^ index     » next       coverage.py v6.4a0, created at 2022-05-20 16:29 -0400

    1m2a = 1 

    2m2b = 2 

    ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/gold/html/omit_1/m3_py.html0000644000175100001770000001126400000000000022273 0ustar00runnerdocker00000000000000 Coverage for m3.py: 100%

    Coverage for m3.py: 100%

    2 statements  

    « prev     ^ index     » next       coverage.py v6.4a0, created at 2022-05-20 16:29 -0400

    1m3a = 1 

    2m3b = 2 

    ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/gold/html/omit_1/main_py.html0000644000175100001770000001450600000000000022702 0ustar00runnerdocker00000000000000 Coverage for main.py: 100%

    Coverage for main.py: 100%

    8 statements  

    « prev     ^ index     » next       coverage.py v6.4a0, created at 2022-05-20 16:29 -0400

    1import m1 

    2import m2 

    3import m3 

    4 

    5a = 5 

    6b = 6 

    7 

    8assert m1.m1a == 1 

    9assert m2.m2a == 1 

    10assert m3.m3a == 1 

    ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1710442639.1498148 coverage-7.4.4/tests/gold/html/omit_2/0000755000175100001770000000000000000000000020353 5ustar00runnerdocker00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/gold/html/omit_2/index.html0000644000175100001770000001036600000000000022356 0ustar00runnerdocker00000000000000 Coverage report

    Coverage report: 100%

    coverage.py v6.4a0, created at 2022-05-20 16:29 -0400

    Module statements missing excluded coverage
    m2.py 2 0 0 100%
    m3.py 2 0 0 100%
    main.py 8 0 0 100%
    Total 12 0 0 100%

    No items found using the specified filter.

    ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/gold/html/omit_2/m2_py.html0000644000175100001770000001126000000000000022267 0ustar00runnerdocker00000000000000 Coverage for m2.py: 100%

    1m2a = 1 

    2m2b = 2 

    ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/gold/html/omit_2/m3_py.html0000644000175100001770000001126400000000000022274 0ustar00runnerdocker00000000000000 Coverage for m3.py: 100%

    Coverage for m3.py: 100%

    2 statements  

    « prev     ^ index     » next       coverage.py v6.4a0, created at 2022-05-20 16:29 -0400

    1m3a = 1 

    2m3b = 2 

    ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/gold/html/omit_2/main_py.html0000644000175100001770000001450600000000000022703 0ustar00runnerdocker00000000000000 Coverage for main.py: 100%

    Coverage for main.py: 100%

    8 statements  

    « prev     ^ index     » next       coverage.py v6.4a0, created at 2022-05-20 16:29 -0400

    1import m1 

    2import m2 

    3import m3 

    4 

    5a = 5 

    6b = 6 

    7 

    8assert m1.m1a == 1 

    9assert m2.m2a == 1 

    10assert m3.m3a == 1 

    ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1710442639.1498148 coverage-7.4.4/tests/gold/html/omit_3/0000755000175100001770000000000000000000000020354 5ustar00runnerdocker00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/gold/html/omit_3/index.html0000644000175100001770000000775600000000000022370 0ustar00runnerdocker00000000000000 Coverage report

    Coverage report: 100%

    coverage.py v6.4a0, created at 2022-05-20 16:28 -0400

    Module statements missing excluded coverage
    m3.py 2 0 0 100%
    main.py 8 0 0 100%
    Total 10 0 0 100%

    No items found using the specified filter.

    ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/gold/html/omit_3/m3_py.html0000644000175100001770000001126400000000000022275 0ustar00runnerdocker00000000000000 Coverage for m3.py: 100%

    1m3a = 1 

    2m3b = 2 

    ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/gold/html/omit_3/main_py.html0000644000175100001770000001450600000000000022704 0ustar00runnerdocker00000000000000 Coverage for main.py: 100%

    Coverage for main.py: 100%

    8 statements  

    « prev     ^ index     » next       coverage.py v6.4a0, created at 2022-05-20 16:28 -0400

    1import m1 

    2import m2 

    3import m3 

    4 

    5a = 5 

    6b = 6 

    7 

    8assert m1.m1a == 1 

    9assert m2.m2a == 1 

    10assert m3.m3a == 1 

    ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1710442639.153815 coverage-7.4.4/tests/gold/html/omit_4/0000755000175100001770000000000000000000000020355 5ustar00runnerdocker00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/gold/html/omit_4/index.html0000644000175100001770000001036600000000000022360 0ustar00runnerdocker00000000000000 Coverage report

    Coverage report: 100%

    coverage.py v6.4a0, created at 2022-05-20 16:29 -0400

    Module statements missing excluded coverage
    m1.py 2 0 0 100%
    m3.py 2 0 0 100%
    main.py 8 0 0 100%
    Total 12 0 0 100%

    No items found using the specified filter.

    ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/gold/html/omit_4/m1_py.html0000644000175100001770000001126000000000000022270 0ustar00runnerdocker00000000000000 Coverage for m1.py: 100%

    1m1a = 1 

    2m1b = 2 

    ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/gold/html/omit_4/m3_py.html0000644000175100001770000001126400000000000022276 0ustar00runnerdocker00000000000000 Coverage for m3.py: 100%

    Coverage for m3.py: 100%

    2 statements  

    « prev     ^ index     » next       coverage.py v6.4a0, created at 2022-05-20 16:29 -0400

    1m3a = 1 

    2m3b = 2 

    ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/gold/html/omit_4/main_py.html0000644000175100001770000001450600000000000022705 0ustar00runnerdocker00000000000000 Coverage for main.py: 100%

    Coverage for main.py: 100%

    8 statements  

    « prev     ^ index     » next       coverage.py v6.4a0, created at 2022-05-20 16:29 -0400

    1import m1 

    2import m2 

    3import m3 

    4 

    5a = 5 

    6b = 6 

    7 

    8assert m1.m1a == 1 

    9assert m2.m2a == 1 

    10assert m3.m3a == 1 

    ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1710442639.153815 coverage-7.4.4/tests/gold/html/omit_5/0000755000175100001770000000000000000000000020356 5ustar00runnerdocker00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/gold/html/omit_5/index.html0000644000175100001770000000775600000000000022372 0ustar00runnerdocker00000000000000 Coverage report

    Coverage report: 100%

    coverage.py v6.4a0, created at 2022-05-20 16:29 -0400

    Module statements missing excluded coverage
    m1.py 2 0 0 100%
    main.py 8 0 0 100%
    Total 10 0 0 100%

    No items found using the specified filter.

    ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/gold/html/omit_5/m1_py.html0000644000175100001770000001126400000000000022275 0ustar00runnerdocker00000000000000 Coverage for m1.py: 100%

    1m1a = 1 

    2m1b = 2 

    ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/gold/html/omit_5/main_py.html0000644000175100001770000001450600000000000022706 0ustar00runnerdocker00000000000000 Coverage for main.py: 100%

    Coverage for main.py: 100%

    8 statements  

    « prev     ^ index     » next       coverage.py v6.4a0, created at 2022-05-20 16:29 -0400

    1import m1 

    2import m2 

    3import m3 

    4 

    5a = 5 

    6b = 6 

    7 

    8assert m1.m1a == 1 

    9assert m2.m2a == 1 

    10assert m3.m3a == 1 

    ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1710442639.153815 coverage-7.4.4/tests/gold/html/other/0000755000175100001770000000000000000000000020303 5ustar00runnerdocker00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/gold/html/other/blah_blah_other_py.html0000644000175100001770000001237600000000000025007 0ustar00runnerdocker00000000000000 Coverage for /private/var/folders/10/4sn2sk3j2mg5m116f08_367m0000gq/T/pytest-of-nedbatchelder/pytest-49/popen-gw0/t75/othersrc/other.py: 100%

    Coverage for /private/var/folders/10/4sn2sk3j2mg5m116f08_367m0000gq/T/pytest-of-nedbatchelder/pytest-49/popen-gw0/t75/othersrc/other.py: 100%

    1 statements  

    « prev     ^ index     » next       coverage.py v6.4a0, created at 2022-05-20 16:29 -0400

    1# A file in another directory. We're checking that it ends up in the 

    2# HTML report. 

    3 

    4print("This is the other src!") 

    ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/gold/html/other/here_py.html0000644000175100001770000001273600000000000022635 0ustar00runnerdocker00000000000000 Coverage for here.py: 75%

    Coverage for here.py: 75%

    4 statements  

    « prev     ^ index     » next       coverage.py v6.4a0, created at 2022-05-20 16:29 -0400

    1import other 

    2 

    3if 1 < 2: 

    4 h = 3 

    5else: 

    6 h = 4 

    ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/gold/html/other/index.html0000644000175100001770000001021100000000000022273 0ustar00runnerdocker00000000000000 Coverage report

    Coverage report: 80%

    coverage.py v6.4a0, created at 2022-05-20 16:29 -0400

    Module statements missing excluded coverage
    /private/var/folders/10/4sn2sk3j2mg5m116f08_367m0000gq/T/pytest-of-nedbatchelder/pytest-49/popen-gw0/t75/othersrc/other.py 1 0 0 100%
    here.py 4 1 0 75%
    Total 5 1 0 80%

    No items found using the specified filter.

    ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1710442639.153815 coverage-7.4.4/tests/gold/html/partial/0000755000175100001770000000000000000000000020616 5ustar00runnerdocker00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/gold/html/partial/index.html0000644000175100001770000001017300000000000022615 0ustar00runnerdocker00000000000000 Coverage report
    Module statements missing excluded branches partial coverage
    partial.py 7 0 1 4 1 91%
    Total 7 0 1 4 1 91%

    No items found using the specified filter.

    ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/gold/html/partial/partial_py.html0000644000175100001770000001774700000000000023670 0ustar00runnerdocker00000000000000 Coverage for partial.py: 91%

    1# partial branches and excluded lines 

    2a = 2 

    3 

    4while "no peephole".upper(): # t4 4 ↛ 7line 4 didn't jump to line 7, because the condition on line 4 was never false

    5 break 

    6 

    7while a: # pragma: no branch 

    8 break 

    9 

    10if 0: 

    11 never_happen() 

    12 

    13if 13: 

    14 a = 14 

    15 

    16if a == 16: 

    17 raise ZeroDivisionError("17") 

    ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1710442639.153815 coverage-7.4.4/tests/gold/html/partial_626/0000755000175100001770000000000000000000000021213 5ustar00runnerdocker00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/gold/html/partial_626/index.html0000644000175100001770000001017300000000000023212 0ustar00runnerdocker00000000000000 Coverage report

    Coverage report: 87%

    coverage.py v6.4a0, created at 2022-05-20 16:29 -0400

    Module statements missing excluded branches partial coverage
    partial.py 9 0 1 6 2 87%
    Total 9 0 1 6 2 87%

    No items found using the specified filter.

    ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/gold/html/partial_626/partial_py.html0000644000175100001770000002024700000000000024252 0ustar00runnerdocker00000000000000 Coverage for partial.py: 87%

    1# partial branches and excluded lines 

    2a = 2 

    3 

    4while "no peephole".upper(): # t4 4 ↛ 7line 4 didn't jump to line 7, because the condition on line 4 was never false

    5 break 

    6 

    7while a: # pragma: no branch 

    8 break 

    9 

    10if 0: 

    11 never_happen() 

    12 

    13if 13: 13 ↛ 16line 13 didn't jump to line 16, because the condition on line 13 was never false

    14 a = 14 

    15 

    16if a == 16: 

    17 raise ZeroDivisionError("17") 

    ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1710442639.153815 coverage-7.4.4/tests/gold/html/styled/0000755000175100001770000000000000000000000020466 5ustar00runnerdocker00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/gold/html/styled/a_py.html0000644000175100001770000001256700000000000022317 0ustar00runnerdocker00000000000000 Coverage for a.py: 67%

    Coverage for a.py: 67%

    3 statements  

    « prev     ^ index     » next       coverage.py v6.4a0, created at 2022-05-20 16:29 -0400

    1if 1 < 2: 

    2 # Needed a < to look at HTML entities. 

    3 a = 3 

    4else: 

    5 a = 4 

    ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/gold/html/styled/extra.css0000644000175100001770000000007000000000000022320 0ustar00runnerdocker00000000000000/* Doesn't matter what goes in here, it gets copied. */ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/gold/html/styled/index.html0000644000175100001770000000742700000000000022475 0ustar00runnerdocker00000000000000 Coverage report

    Coverage report: 67%

    coverage.py v6.4a0, created at 2022-05-20 16:29 -0400

    Module statements missing excluded coverage
    a.py 3 1 0 67%
    Total 3 1 0 67%

    No items found using the specified filter.

    ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/gold/html/styled/style.css0000644000175100001770000003015200000000000022341 0ustar00runnerdocker00000000000000@charset "UTF-8"; /* Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 */ /* For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt */ /* Don't edit this .css file. Edit the .scss file instead! */ html, body, h1, h2, h3, p, table, td, th { margin: 0; padding: 0; border: 0; font-weight: inherit; font-style: inherit; font-size: 100%; font-family: inherit; vertical-align: baseline; } body { font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, Ubuntu, Cantarell, "Helvetica Neue", sans-serif; font-size: 1em; background: #fff; color: #000; } @media (prefers-color-scheme: dark) { body { background: #1e1e1e; } } @media (prefers-color-scheme: dark) { body { color: #eee; } } html > body { font-size: 16px; } a:active, a:focus { outline: 2px dashed #007acc; } p { font-size: .875em; line-height: 1.4em; } table { border-collapse: collapse; } td { vertical-align: top; } table tr.hidden { display: none !important; } p#no_rows { display: none; font-size: 1.2em; } a.nav { text-decoration: none; color: inherit; } a.nav:hover { text-decoration: underline; color: inherit; } .hidden { display: none; } header { background: #f8f8f8; width: 100%; z-index: 2; border-bottom: 1px solid #ccc; } @media (prefers-color-scheme: dark) { header { background: black; } } @media (prefers-color-scheme: dark) { header { border-color: #333; } } header .content { padding: 1rem 3.5rem; } header h2 { margin-top: .5em; font-size: 1em; } header p.text { margin: .5em 0 -.5em; color: #666; font-style: italic; } @media (prefers-color-scheme: dark) { header p.text { color: #aaa; } } header.sticky { position: fixed; left: 0; right: 0; height: 2.5em; } header.sticky .text { display: none; } header.sticky h1, header.sticky h2 { font-size: 1em; margin-top: 0; display: inline-block; } header.sticky .content { padding: 0.5rem 3.5rem; } header.sticky .content p { font-size: 1em; } header.sticky ~ #source { padding-top: 6.5em; } main { position: relative; z-index: 1; } footer { margin: 1rem 3.5rem; } footer .content { padding: 0; color: #666; font-style: italic; } @media (prefers-color-scheme: dark) { footer .content { color: #aaa; } } #index { margin: 1rem 0 0 3.5rem; } h1 { font-size: 1.25em; display: inline-block; } #filter_container { float: right; margin: 0 2em 0 0; } #filter_container input { width: 10em; padding: 0.2em 0.5em; border: 2px solid #ccc; background: #fff; color: #000; } @media (prefers-color-scheme: dark) { #filter_container input { border-color: #444; } } @media (prefers-color-scheme: dark) { #filter_container input { background: #1e1e1e; } } @media (prefers-color-scheme: dark) { #filter_container input { color: #eee; } } #filter_container input:focus { border-color: #007acc; } header button { font-family: inherit; font-size: inherit; border: 1px solid; border-radius: .2em; color: inherit; padding: .1em .5em; margin: 1px calc(.1em + 1px); cursor: pointer; border-color: #ccc; } @media (prefers-color-scheme: dark) { header button { border-color: #444; } } header button:active, header button:focus { outline: 2px dashed #007acc; } header button.run { background: #eeffee; } @media (prefers-color-scheme: dark) { header button.run { background: #373d29; } } header button.run.show_run { background: #dfd; border: 2px solid #00dd00; margin: 0 .1em; } @media (prefers-color-scheme: dark) { header button.run.show_run { background: #373d29; } } header button.mis { background: #ffeeee; } @media (prefers-color-scheme: dark) { header button.mis { background: #4b1818; } } header button.mis.show_mis { background: #fdd; border: 2px solid #ff0000; margin: 0 .1em; } @media (prefers-color-scheme: dark) { header button.mis.show_mis { background: #4b1818; } } header button.exc { background: #f7f7f7; } @media (prefers-color-scheme: dark) { header button.exc { background: #333; } } header button.exc.show_exc { background: #eee; border: 2px solid #808080; margin: 0 .1em; } @media (prefers-color-scheme: dark) { header button.exc.show_exc { background: #333; } } header button.par { background: #ffffd5; } @media (prefers-color-scheme: dark) { header button.par { background: #650; } } header button.par.show_par { background: #ffa; border: 2px solid #bbbb00; margin: 0 .1em; } @media (prefers-color-scheme: dark) { header button.par.show_par { background: #650; } } #help_panel, #source p .annotate.long { display: none; position: absolute; z-index: 999; background: #ffffcc; border: 1px solid #888; border-radius: .2em; color: #333; padding: .25em .5em; } #source p .annotate.long { white-space: normal; float: right; top: 1.75em; right: 1em; height: auto; } #help_panel_wrapper { float: right; position: relative; } #keyboard_icon { margin: 5px; } #help_panel_state { display: none; } #help_panel { top: 25px; right: 0; padding: .75em; border: 1px solid #883; color: #333; } #help_panel .keyhelp p { margin-top: .75em; } #help_panel .legend { font-style: italic; margin-bottom: 1em; } .indexfile #help_panel { width: 25em; } .pyfile #help_panel { width: 18em; } #help_panel_state:checked ~ #help_panel { display: block; } kbd { border: 1px solid black; border-color: #888 #333 #333 #888; padding: .1em .35em; font-family: SFMono-Regular, Menlo, Monaco, Consolas, monospace; font-weight: bold; background: #eee; border-radius: 3px; } #source { padding: 1em 0 1em 3.5rem; font-family: SFMono-Regular, Menlo, Monaco, Consolas, monospace; } #source p { position: relative; white-space: pre; } #source p * { box-sizing: border-box; } #source p .n { float: left; text-align: right; width: 3.5rem; box-sizing: border-box; margin-left: -3.5rem; padding-right: 1em; color: #999; user-select: none; } @media (prefers-color-scheme: dark) { #source p .n { color: #777; } } #source p .n.highlight { background: #ffdd00; } #source p .n a { scroll-margin-top: 6em; text-decoration: none; color: #999; } @media (prefers-color-scheme: dark) { #source p .n a { color: #777; } } #source p .n a:hover { text-decoration: underline; color: #999; } @media (prefers-color-scheme: dark) { #source p .n a:hover { color: #777; } } #source p .t { display: inline-block; width: 100%; box-sizing: border-box; margin-left: -.5em; padding-left: 0.3em; border-left: 0.2em solid #fff; } @media (prefers-color-scheme: dark) { #source p .t { border-color: #1e1e1e; } } #source p .t:hover { background: #f2f2f2; } @media (prefers-color-scheme: dark) { #source p .t:hover { background: #282828; } } #source p .t:hover ~ .r .annotate.long { display: block; } #source p .t .com { color: #008000; font-style: italic; line-height: 1px; } @media (prefers-color-scheme: dark) { #source p .t .com { color: #6a9955; } } #source p .t .key { font-weight: bold; line-height: 1px; } #source p .t .str { color: #0451a5; } @media (prefers-color-scheme: dark) { #source p .t .str { color: #9cdcfe; } } #source p.mis .t { border-left: 0.2em solid #ff0000; } #source p.mis.show_mis .t { background: #fdd; } @media (prefers-color-scheme: dark) { #source p.mis.show_mis .t { background: #4b1818; } } #source p.mis.show_mis .t:hover { background: #f2d2d2; } @media (prefers-color-scheme: dark) { #source p.mis.show_mis .t:hover { background: #532323; } } #source p.run .t { border-left: 0.2em solid #00dd00; } #source p.run.show_run .t { background: #dfd; } @media (prefers-color-scheme: dark) { #source p.run.show_run .t { background: #373d29; } } #source p.run.show_run .t:hover { background: #d2f2d2; } @media (prefers-color-scheme: dark) { #source p.run.show_run .t:hover { background: #404633; } } #source p.exc .t { border-left: 0.2em solid #808080; } #source p.exc.show_exc .t { background: #eee; } @media (prefers-color-scheme: dark) { #source p.exc.show_exc .t { background: #333; } } #source p.exc.show_exc .t:hover { background: #e2e2e2; } @media (prefers-color-scheme: dark) { #source p.exc.show_exc .t:hover { background: #3c3c3c; } } #source p.par .t { border-left: 0.2em solid #bbbb00; } #source p.par.show_par .t { background: #ffa; } @media (prefers-color-scheme: dark) { #source p.par.show_par .t { background: #650; } } #source p.par.show_par .t:hover { background: #f2f2a2; } @media (prefers-color-scheme: dark) { #source p.par.show_par .t:hover { background: #6d5d0c; } } #source p .r { position: absolute; top: 0; right: 2.5em; font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, Ubuntu, Cantarell, "Helvetica Neue", sans-serif; } #source p .annotate { font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, Ubuntu, Cantarell, "Helvetica Neue", sans-serif; color: #666; padding-right: .5em; } @media (prefers-color-scheme: dark) { #source p .annotate { color: #ddd; } } #source p .annotate.short:hover ~ .long { display: block; } #source p .annotate.long { width: 30em; right: 2.5em; } #source p input { display: none; } #source p input ~ .r label.ctx { cursor: pointer; border-radius: .25em; } #source p input ~ .r label.ctx::before { content: "โ–ถ "; } #source p input ~ .r label.ctx:hover { background: #e8f4ff; color: #666; } @media (prefers-color-scheme: dark) { #source p input ~ .r label.ctx:hover { background: #0f3a42; } } @media (prefers-color-scheme: dark) { #source p input ~ .r label.ctx:hover { color: #aaa; } } #source p input:checked ~ .r label.ctx { background: #d0e8ff; color: #666; border-radius: .75em .75em 0 0; padding: 0 .5em; margin: -.25em 0; } @media (prefers-color-scheme: dark) { #source p input:checked ~ .r label.ctx { background: #056; } } @media (prefers-color-scheme: dark) { #source p input:checked ~ .r label.ctx { color: #aaa; } } #source p input:checked ~ .r label.ctx::before { content: "โ–ผ "; } #source p input:checked ~ .ctxs { padding: .25em .5em; overflow-y: scroll; max-height: 10.5em; } #source p label.ctx { color: #999; display: inline-block; padding: 0 .5em; font-size: .8333em; } @media (prefers-color-scheme: dark) { #source p label.ctx { color: #777; } } #source p .ctxs { display: block; max-height: 0; overflow-y: hidden; transition: all .2s; padding: 0 .5em; font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, Ubuntu, Cantarell, "Helvetica Neue", sans-serif; white-space: nowrap; background: #d0e8ff; border-radius: .25em; margin-right: 1.75em; text-align: right; } @media (prefers-color-scheme: dark) { #source p .ctxs { background: #056; } } #index { font-family: SFMono-Regular, Menlo, Monaco, Consolas, monospace; font-size: 0.875em; } #index table.index { margin-left: -.5em; } #index td, #index th { text-align: right; width: 5em; padding: .25em .5em; border-bottom: 1px solid #eee; } @media (prefers-color-scheme: dark) { #index td, #index th { border-color: #333; } } #index td.name, #index th.name { text-align: left; width: auto; } #index th { font-style: italic; color: #333; cursor: pointer; } @media (prefers-color-scheme: dark) { #index th { color: #ddd; } } #index th:hover { background: #eee; } @media (prefers-color-scheme: dark) { #index th:hover { background: #333; } } #index th[aria-sort="ascending"], #index th[aria-sort="descending"] { white-space: nowrap; background: #eee; padding-left: .5em; } @media (prefers-color-scheme: dark) { #index th[aria-sort="ascending"], #index th[aria-sort="descending"] { background: #333; } } #index th[aria-sort="ascending"]::after { font-family: sans-serif; content: " โ†‘"; } #index th[aria-sort="descending"]::after { font-family: sans-serif; content: " โ†“"; } #index td.name a { text-decoration: none; color: inherit; } #index tr.total td, #index tr.total_dynamic td { font-weight: bold; border-top: 1px solid #ccc; border-bottom: none; } #index tr.file:hover { background: #eee; } @media (prefers-color-scheme: dark) { #index tr.file:hover { background: #333; } } #index tr.file:hover td.name { text-decoration: underline; color: inherit; } #scroll_marker { position: fixed; z-index: 3; right: 0; top: 0; width: 16px; height: 100%; background: #fff; border-left: 1px solid #eee; will-change: transform; } @media (prefers-color-scheme: dark) { #scroll_marker { background: #1e1e1e; } } @media (prefers-color-scheme: dark) { #scroll_marker { border-color: #333; } } #scroll_marker .marker { background: #ccc; position: absolute; min-height: 3px; width: 100%; } @media (prefers-color-scheme: dark) { #scroll_marker .marker { background: #444; } } ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1710442639.153815 coverage-7.4.4/tests/gold/html/support/0000755000175100001770000000000000000000000020676 5ustar00runnerdocker00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/gold/html/support/coverage_html.js0000644000175100001770000005155700000000000024070 0ustar00runnerdocker00000000000000// Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 // For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt // Coverage.py HTML report browser code. /*jslint browser: true, sloppy: true, vars: true, plusplus: true, maxerr: 50, indent: 4 */ /*global coverage: true, document, window, $ */ coverage = {}; // General helpers function debounce(callback, wait) { let timeoutId = null; return function(...args) { clearTimeout(timeoutId); timeoutId = setTimeout(() => { callback.apply(this, args); }, wait); }; }; function checkVisible(element) { const rect = element.getBoundingClientRect(); const viewBottom = Math.max(document.documentElement.clientHeight, window.innerHeight); const viewTop = 30; return !(rect.bottom < viewTop || rect.top >= viewBottom); } function on_click(sel, fn) { const elt = document.querySelector(sel); if (elt) { elt.addEventListener("click", fn); } } // Helpers for table sorting function getCellValue(row, column = 0) { const cell = row.cells[column] if (cell.childElementCount == 1) { const child = cell.firstElementChild if (child instanceof HTMLTimeElement && child.dateTime) { return child.dateTime } else if (child instanceof HTMLDataElement && child.value) { return child.value } } return cell.innerText || cell.textContent; } function rowComparator(rowA, rowB, column = 0) { let valueA = getCellValue(rowA, column); let valueB = getCellValue(rowB, column); if (!isNaN(valueA) && !isNaN(valueB)) { return valueA - valueB } return valueA.localeCompare(valueB, undefined, {numeric: true}); } function sortColumn(th) { // Get the current sorting direction of the selected header, // clear state on other headers and then set the new sorting direction const currentSortOrder = th.getAttribute("aria-sort"); [...th.parentElement.cells].forEach(header => header.setAttribute("aria-sort", "none")); if (currentSortOrder === "none") { th.setAttribute("aria-sort", th.dataset.defaultSortOrder || "ascending"); } else { th.setAttribute("aria-sort", currentSortOrder === "ascending" ? "descending" : "ascending"); } const column = [...th.parentElement.cells].indexOf(th) // Sort all rows and afterwards append them in order to move them in the DOM Array.from(th.closest("table").querySelectorAll("tbody tr")) .sort((rowA, rowB) => rowComparator(rowA, rowB, column) * (th.getAttribute("aria-sort") === "ascending" ? 1 : -1)) .forEach(tr => tr.parentElement.appendChild(tr) ); } // Find all the elements with data-shortcut attribute, and use them to assign a shortcut key. coverage.assign_shortkeys = function () { document.querySelectorAll("[data-shortcut]").forEach(element => { document.addEventListener("keypress", event => { if (event.target.tagName.toLowerCase() === "input") { return; // ignore keypress from search filter } if (event.key === element.dataset.shortcut) { element.click(); } }); }); }; // Create the events for the filter box. coverage.wire_up_filter = function () { // Cache elements. const table = document.querySelector("table.index"); const table_body_rows = table.querySelectorAll("tbody tr"); const no_rows = document.getElementById("no_rows"); // Observe filter keyevents. document.getElementById("filter").addEventListener("input", debounce(event => { // Keep running total of each metric, first index contains number of shown rows const totals = new Array(table.rows[0].cells.length).fill(0); // Accumulate the percentage as fraction totals[totals.length - 1] = { "numer": 0, "denom": 0 }; // Hide / show elements. table_body_rows.forEach(row => { if (!row.cells[0].textContent.includes(event.target.value)) { // hide row.classList.add("hidden"); return; } // show row.classList.remove("hidden"); totals[0]++; for (let column = 1; column < totals.length; column++) { // Accumulate dynamic totals cell = row.cells[column] if (column === totals.length - 1) { // Last column contains percentage const [numer, denom] = cell.dataset.ratio.split(" "); totals[column]["numer"] += parseInt(numer, 10); totals[column]["denom"] += parseInt(denom, 10); } else { totals[column] += parseInt(cell.textContent, 10); } } }); // Show placeholder if no rows will be displayed. if (!totals[0]) { // Show placeholder, hide table. no_rows.style.display = "block"; table.style.display = "none"; return; } // Hide placeholder, show table. no_rows.style.display = null; table.style.display = null; const footer = table.tFoot.rows[0]; // Calculate new dynamic sum values based on visible rows. for (let column = 1; column < totals.length; column++) { // Get footer cell element. const cell = footer.cells[column]; // Set value into dynamic footer cell element. if (column === totals.length - 1) { // Percentage column uses the numerator and denominator, // and adapts to the number of decimal places. const match = /\.([0-9]+)/.exec(cell.textContent); const places = match ? match[1].length : 0; const { numer, denom } = totals[column]; cell.dataset.ratio = `${numer} ${denom}`; // Check denom to prevent NaN if filtered files contain no statements cell.textContent = denom ? `${(numer * 100 / denom).toFixed(places)}%` : `${(100).toFixed(places)}%`; } else { cell.textContent = totals[column]; } } })); // Trigger change event on setup, to force filter on page refresh // (filter value may still be present). document.getElementById("filter").dispatchEvent(new Event("input")); }; coverage.INDEX_SORT_STORAGE = "COVERAGE_INDEX_SORT_2"; // Loaded on index.html coverage.index_ready = function () { coverage.assign_shortkeys(); coverage.wire_up_filter(); document.querySelectorAll("[data-sortable] th[aria-sort]").forEach( th => th.addEventListener("click", e => sortColumn(e.target)) ); // Look for a localStorage item containing previous sort settings: const stored_list = localStorage.getItem(coverage.INDEX_SORT_STORAGE); if (stored_list) { const {column, direction} = JSON.parse(stored_list); const th = document.querySelector("[data-sortable]").tHead.rows[0].cells[column]; th.setAttribute("aria-sort", direction === "ascending" ? "descending" : "ascending"); th.click() } // Watch for page unload events so we can save the final sort settings: window.addEventListener("unload", function () { const th = document.querySelector('[data-sortable] th[aria-sort="ascending"], [data-sortable] [aria-sort="descending"]'); if (!th) { return; } localStorage.setItem(coverage.INDEX_SORT_STORAGE, JSON.stringify({ column: [...th.parentElement.cells].indexOf(th), direction: th.getAttribute("aria-sort"), })); }); on_click(".button_prev_file", coverage.to_prev_file); on_click(".button_next_file", coverage.to_next_file); on_click(".button_show_hide_help", coverage.show_hide_help); }; // -- pyfile stuff -- coverage.LINE_FILTERS_STORAGE = "COVERAGE_LINE_FILTERS"; coverage.pyfile_ready = function () { // If we're directed to a particular line number, highlight the line. var frag = location.hash; if (frag.length > 2 && frag[1] === "t") { document.querySelector(frag).closest(".n").classList.add("highlight"); coverage.set_sel(parseInt(frag.substr(2), 10)); } else { coverage.set_sel(0); } on_click(".button_toggle_run", coverage.toggle_lines); on_click(".button_toggle_mis", coverage.toggle_lines); on_click(".button_toggle_exc", coverage.toggle_lines); on_click(".button_toggle_par", coverage.toggle_lines); on_click(".button_next_chunk", coverage.to_next_chunk_nicely); on_click(".button_prev_chunk", coverage.to_prev_chunk_nicely); on_click(".button_top_of_page", coverage.to_top); on_click(".button_first_chunk", coverage.to_first_chunk); on_click(".button_prev_file", coverage.to_prev_file); on_click(".button_next_file", coverage.to_next_file); on_click(".button_to_index", coverage.to_index); on_click(".button_show_hide_help", coverage.show_hide_help); coverage.filters = undefined; try { coverage.filters = localStorage.getItem(coverage.LINE_FILTERS_STORAGE); } catch(err) {} if (coverage.filters) { coverage.filters = JSON.parse(coverage.filters); } else { coverage.filters = {run: false, exc: true, mis: true, par: true}; } for (cls in coverage.filters) { coverage.set_line_visibilty(cls, coverage.filters[cls]); } coverage.assign_shortkeys(); coverage.init_scroll_markers(); coverage.wire_up_sticky_header(); document.querySelectorAll("[id^=ctxs]").forEach( cbox => cbox.addEventListener("click", coverage.expand_contexts) ); // Rebuild scroll markers when the window height changes. window.addEventListener("resize", coverage.build_scroll_markers); }; coverage.toggle_lines = function (event) { const btn = event.target.closest("button"); const category = btn.value const show = !btn.classList.contains("show_" + category); coverage.set_line_visibilty(category, show); coverage.build_scroll_markers(); coverage.filters[category] = show; try { localStorage.setItem(coverage.LINE_FILTERS_STORAGE, JSON.stringify(coverage.filters)); } catch(err) {} }; coverage.set_line_visibilty = function (category, should_show) { const cls = "show_" + category; const btn = document.querySelector(".button_toggle_" + category); if (btn) { if (should_show) { document.querySelectorAll("#source ." + category).forEach(e => e.classList.add(cls)); btn.classList.add(cls); } else { document.querySelectorAll("#source ." + category).forEach(e => e.classList.remove(cls)); btn.classList.remove(cls); } } }; // Return the nth line div. coverage.line_elt = function (n) { return document.getElementById("t" + n)?.closest("p"); }; // Set the selection. b and e are line numbers. coverage.set_sel = function (b, e) { // The first line selected. coverage.sel_begin = b; // The next line not selected. coverage.sel_end = (e === undefined) ? b+1 : e; }; coverage.to_top = function () { coverage.set_sel(0, 1); coverage.scroll_window(0); }; coverage.to_first_chunk = function () { coverage.set_sel(0, 1); coverage.to_next_chunk(); }; coverage.to_prev_file = function () { window.location = document.getElementById("prevFileLink").href; } coverage.to_next_file = function () { window.location = document.getElementById("nextFileLink").href; } coverage.to_index = function () { location.href = document.getElementById("indexLink").href; } coverage.show_hide_help = function () { const helpCheck = document.getElementById("help_panel_state") helpCheck.checked = !helpCheck.checked; } // Return a string indicating what kind of chunk this line belongs to, // or null if not a chunk. coverage.chunk_indicator = function (line_elt) { const classes = line_elt?.className; if (!classes) { return null; } const match = classes.match(/\bshow_\w+\b/); if (!match) { return null; } return match[0]; }; coverage.to_next_chunk = function () { const c = coverage; // Find the start of the next colored chunk. var probe = c.sel_end; var chunk_indicator, probe_line; while (true) { probe_line = c.line_elt(probe); if (!probe_line) { return; } chunk_indicator = c.chunk_indicator(probe_line); if (chunk_indicator) { break; } probe++; } // There's a next chunk, `probe` points to it. var begin = probe; // Find the end of this chunk. var next_indicator = chunk_indicator; while (next_indicator === chunk_indicator) { probe++; probe_line = c.line_elt(probe); next_indicator = c.chunk_indicator(probe_line); } c.set_sel(begin, probe); c.show_selection(); }; coverage.to_prev_chunk = function () { const c = coverage; // Find the end of the prev colored chunk. var probe = c.sel_begin-1; var probe_line = c.line_elt(probe); if (!probe_line) { return; } var chunk_indicator = c.chunk_indicator(probe_line); while (probe > 1 && !chunk_indicator) { probe--; probe_line = c.line_elt(probe); if (!probe_line) { return; } chunk_indicator = c.chunk_indicator(probe_line); } // There's a prev chunk, `probe` points to its last line. var end = probe+1; // Find the beginning of this chunk. var prev_indicator = chunk_indicator; while (prev_indicator === chunk_indicator) { probe--; if (probe <= 0) { return; } probe_line = c.line_elt(probe); prev_indicator = c.chunk_indicator(probe_line); } c.set_sel(probe+1, end); c.show_selection(); }; // Returns 0, 1, or 2: how many of the two ends of the selection are on // the screen right now? coverage.selection_ends_on_screen = function () { if (coverage.sel_begin === 0) { return 0; } const begin = coverage.line_elt(coverage.sel_begin); const end = coverage.line_elt(coverage.sel_end-1); return ( (checkVisible(begin) ? 1 : 0) + (checkVisible(end) ? 1 : 0) ); }; coverage.to_next_chunk_nicely = function () { if (coverage.selection_ends_on_screen() === 0) { // The selection is entirely off the screen: // Set the top line on the screen as selection. // This will select the top-left of the viewport // As this is most likely the span with the line number we take the parent const line = document.elementFromPoint(0, 0).parentElement; if (line.parentElement !== document.getElementById("source")) { // The element is not a source line but the header or similar coverage.select_line_or_chunk(1); } else { // We extract the line number from the id coverage.select_line_or_chunk(parseInt(line.id.substring(1), 10)); } } coverage.to_next_chunk(); }; coverage.to_prev_chunk_nicely = function () { if (coverage.selection_ends_on_screen() === 0) { // The selection is entirely off the screen: // Set the lowest line on the screen as selection. // This will select the bottom-left of the viewport // As this is most likely the span with the line number we take the parent const line = document.elementFromPoint(document.documentElement.clientHeight-1, 0).parentElement; if (line.parentElement !== document.getElementById("source")) { // The element is not a source line but the header or similar coverage.select_line_or_chunk(coverage.lines_len); } else { // We extract the line number from the id coverage.select_line_or_chunk(parseInt(line.id.substring(1), 10)); } } coverage.to_prev_chunk(); }; // Select line number lineno, or if it is in a colored chunk, select the // entire chunk coverage.select_line_or_chunk = function (lineno) { var c = coverage; var probe_line = c.line_elt(lineno); if (!probe_line) { return; } var the_indicator = c.chunk_indicator(probe_line); if (the_indicator) { // The line is in a highlighted chunk. // Search backward for the first line. var probe = lineno; var indicator = the_indicator; while (probe > 0 && indicator === the_indicator) { probe--; probe_line = c.line_elt(probe); if (!probe_line) { break; } indicator = c.chunk_indicator(probe_line); } var begin = probe + 1; // Search forward for the last line. probe = lineno; indicator = the_indicator; while (indicator === the_indicator) { probe++; probe_line = c.line_elt(probe); indicator = c.chunk_indicator(probe_line); } coverage.set_sel(begin, probe); } else { coverage.set_sel(lineno); } }; coverage.show_selection = function () { // Highlight the lines in the chunk document.querySelectorAll("#source .highlight").forEach(e => e.classList.remove("highlight")); for (let probe = coverage.sel_begin; probe < coverage.sel_end; probe++) { coverage.line_elt(probe).querySelector(".n").classList.add("highlight"); } coverage.scroll_to_selection(); }; coverage.scroll_to_selection = function () { // Scroll the page if the chunk isn't fully visible. if (coverage.selection_ends_on_screen() < 2) { const element = coverage.line_elt(coverage.sel_begin); coverage.scroll_window(element.offsetTop - 60); } }; coverage.scroll_window = function (to_pos) { window.scroll({top: to_pos, behavior: "smooth"}); }; coverage.init_scroll_markers = function () { // Init some variables coverage.lines_len = document.querySelectorAll("#source > p").length; // Build html coverage.build_scroll_markers(); }; coverage.build_scroll_markers = function () { const temp_scroll_marker = document.getElementById("scroll_marker") if (temp_scroll_marker) temp_scroll_marker.remove(); // Don't build markers if the window has no scroll bar. if (document.body.scrollHeight <= window.innerHeight) { return; } const marker_scale = window.innerHeight / document.body.scrollHeight; const line_height = Math.min(Math.max(3, window.innerHeight / coverage.lines_len), 10); let previous_line = -99, last_mark, last_top; const scroll_marker = document.createElement("div"); scroll_marker.id = "scroll_marker"; document.getElementById("source").querySelectorAll( "p.show_run, p.show_mis, p.show_exc, p.show_exc, p.show_par" ).forEach(element => { const line_top = Math.floor(element.offsetTop * marker_scale); const line_number = parseInt(element.querySelector(".n a").id.substr(1)); if (line_number === previous_line + 1) { // If this solid missed block just make previous mark higher. last_mark.style.height = `${line_top + line_height - last_top}px`; } else { // Add colored line in scroll_marker block. last_mark = document.createElement("div"); last_mark.id = `m${line_number}`; last_mark.classList.add("marker"); last_mark.style.height = `${line_height}px`; last_mark.style.top = `${line_top}px`; scroll_marker.append(last_mark); last_top = line_top; } previous_line = line_number; }); // Append last to prevent layout calculation document.body.append(scroll_marker); }; coverage.wire_up_sticky_header = function () { const header = document.querySelector("header"); const header_bottom = ( header.querySelector(".content h2").getBoundingClientRect().top - header.getBoundingClientRect().top ); function updateHeader() { if (window.scrollY > header_bottom) { header.classList.add("sticky"); } else { header.classList.remove("sticky"); } } window.addEventListener("scroll", updateHeader); updateHeader(); }; coverage.expand_contexts = function (e) { var ctxs = e.target.parentNode.querySelector(".ctxs"); if (!ctxs.classList.contains("expanded")) { var ctxs_text = ctxs.textContent; var width = Number(ctxs_text[0]); ctxs.textContent = ""; for (var i = 1; i < ctxs_text.length; i += width) { key = ctxs_text.substring(i, i + width).trim(); ctxs.appendChild(document.createTextNode(contexts[key])); ctxs.appendChild(document.createElement("br")); } ctxs.classList.add("expanded"); } }; document.addEventListener("DOMContentLoaded", () => { if (document.body.classList.contains("indexfile")) { coverage.index_ready(); } else { coverage.pyfile_ready(); } }); ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/gold/html/support/favicon_32.png0000644000175100001770000000330400000000000023335 0ustar00runnerdocker00000000000000‰PNG  IHDR DคŠฦPLTE250SRS;A6œž  ___^^_CJ>OXH#cm[fef05,[[\HPCFL?JIK<:< tjep\[gT_iX7V_N]\](<;=&#$็ิ๊ึเXใYXซพแX๛๛๛ํืไัQŸฒพัญีํฤ๗๗๗๊๋๋๔สฯๆฟSฃทโY แW๙๘๘็ๆๆ฿๗ฬุ๐ฦฤฤฤฤฺตฆธ˜ขด•Ÿฒ’šฌข‡฿Z๛YV€J;zG:vD7rC5\5*๕๓๓โ๚ฯา๊ยหโปM™ฌดศฅ€“ฐ˜ค‰‹›€ฺUฺUฏษGˆO?l>0X2'ืืืฮฮฮษ฿ธฟิฐชฌฌปฯซM—ฉทหงขคฆฒลขGŒŸD‡—@‘>v„<‰7jw…v{}f&šXึ๓UัQฬP[cPรMมK™<4q:L+"๐๐๑฿฿฿ัััVฉผWงบขชฌ”คฉฉฌฆIกญภŸชฝœ–—™v” ณ‹œฉ‡Œ“‡‘œfz|3WญภถทนฒทนŸฃฃงฐ TœE‹œ˜ญ™EŽ–a‰”›ค“AŽK€ŽP€ŒHz‡}ry{Lqzjvyžx’x†กv u^jo3’mˆ€iq†hmyc/Wbc”`i{`PŸ[*’[ิ๘Zฒ๓Yœ๏Y”๎YfๆY+แXฺ๘Wˆ™SW{SีRCกRewRueRฮQPPQ+ะPวโO!ฌN›N หMI…LพูK{‹KญฮJ\[JjYH ฒGจภE–ซE7E/ฑCŒRBfRBr@|ฐ>›IDAT8หb€&nNC%Nn&  ฬ.ร›๋ccใ9นฝ]฿˜M ”ฏ“)์อŽŽ/!ศƒ,ฏย\เi 6ณณขณ"#ณ๒ย๒]piงูแYQแ™ู|ยp๒Pูœค)S๎ส — TมตŸช?'i๗{7๛๒S?ฟฌ ˆŠˆVT+*K;•์/ฒฒฒJจ(/J(ซT!ศ๒@R`๗yํ›ๅfeีSVnoฟ๓Xรํ‘‘™™:@K˜ุ}ม๒G+€ฺ&$ุWี5งYป7mชyตึˆ‰[ไ˜ ๙n{ทm็š=,-,,,oด•๚คผ็ใbเไฐฑ(฿c๏V๕ ฬายคbษRsWM\s€ฺท”U-ถดถkด ณถpoM5ทuเgPp*(ฉ\Wดฝ/ฉฒฮ2=:lีŠๅ-i mฎๆŽฌ ,1@ฮLุ็e3s™ๅฝทึiหVYพ๚wรสIถ>b ข6ฆฆพ{vmน$๘‘วผ–ึ๎๎i--ึญAfม8'อ™ี ๖๊Y+๎N‡8าาruฒร๓5 ,ž๋ซซKภaนตู๚๒๙Œo ,A*ฌำ$ฮจc0pvž Žk฿›CC2nฝYb ิฟ`ี™ย)ฌ o‚AGา…ึ๓็†ึ/rMผuๅงึถ๑วMไ‡TGฮฆƒ‹-=ๆ…†ผšjk๎ํไ๏g7ญึA‹ƒ[ฯษินGJ๊ใ… >$;š™›ฺฺšิงJs10ฑ๙vNwฐ3s)>๒ะำใcอออ๓ gคฐฉ300ฒL77333w9,ไ๊"W[ป< €‰Ž‰A๑’Œ ˜`ชญ˜;^ซrฤ0sq 8Yใ`ศฮIr๙ๆS3^B™fŽ>.~ฬชDษ(—Ž{๚=ฮ ์ผ๓eแษ^b†นš8ˆR๏bW?YDฦ`dNŽษฤ%๛€h—Rฟ@f ~PาNŒต5ทณs๖w4๗ž(ษฑ‘ทูtƒโผํ\Jk‚5Xู™ก„‹ƒŸUผฉI\Z‰ƒK. /๒%;่“ IENDฎB`‚././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/gold/html/support/keybd_closed.png0000644000175100001770000002145400000000000024041 0ustar00runnerdocker00000000000000‰PNG  IHDR 'ว>N…zTXtRaw profile type exifxฺํšYv;ฒE1ŠBข†ƒvญšม~ํ$eQe๛๊~>ห"ฉl€@4'ฮAาฌ๛๏6แ_r1˜sI5ฅ‹ก†๊สu5}ตWะื๓Gผฯู็ใๆ๗ ว!ฯป?–t_8n฿8oO๑@ๅศ๖็5ใ—น๓ๆล"๙<๏๊=w็„ฝhgYWช%ฟ_B_็พธกศาx ๅู์Ogผ7#๓x็–ทโีyw ๐๒ko|ผZ๏ๅB๏๕ˆผz_๏มpศW~บYe>Fๅํำ‡จT๗uP|:W<;3ฝฝyœ๐?๗๐‹‹฿อ์วษ=wรๆหy๎=‹ู{ีตpiบ๕Xข~โยŽหฝ–๘ษF>gฉC๖B>Iฬฮฯฐี:ยฒmฐำ6ปํา๗a&ท\ๆนA ไX๑ูU7eˆVป]๖ีO_ˆ ผžฃ๎อซ๓Vnุยฤำrฅณ f%Œผ?/ฺ[RZqfu๊+์r’˜!‘“Wฎ" v?๒(ชƒ?I\=Œ๊ๆยีฯ=ฺ;ท$ผฺsaไิšอ๓1wฤ๋‰ภ•ฌ6ู+;—ญล…๘4*ฮื ัMฌtม๛DpŠ“นน'[ฝึEwƒY"๚ไ3กฉพ+6๒'‡Bต่cˆ1ฆ˜c‰5ถไSH1ฅ”“€_ห>‡sส9—\s+พ„K*นSjiีU8ฦšjฎฅึฺ“6Fnธ ต๎บ๏กวžz๎ฅื้3ยˆ#<Šuด้ฆŸเฤL3ฯ2๋lห.Ri…WZy•UWคฺ๖;์ธำฮป์บ[ิฌ9a๔๓็Qณจ9”\˜฿ขฦญ9?†ฐ'QbFฤ\ฐD๏5ไSฟr{ป5 ๓ํนป๚”๛ŽLยิ2–\ผฎrgใส=Zาci6t‚<าsœLb%๏7ภใ~๓ำ๗›Ÿ๐ธ฿t€วๆง<๎7?เqฟ๙้๛อO๘<๚อฺฅ@gขT";๓ty๗ื+วๆๆุตฏ@y—ั{[sค•๖Z ฒฑvดPะt้เi๒c๚ฝำRˆ่ฑ๕qฉต2{b2@Žฉฆ\ฤ‘‡–6ขp–ฟ`.&fฦ ?AU0”E!ฆใฝ8าf๎˜ฝ฿VŒ็Tg‘mฯ(g์*—๏ีฐา•Rƒ—๛ลิฎฎ•F™–!w\๋šm/ŒS๒ุ~1อฆx.•AMํb0ํ>Vศพaฬcr๎uฏnฎึ๖ Ÿ@kn๓Qฌห]ฏw3ค3ฮง&gภิDไผญTึaๅ’ณx>Oญ)n+=I3#6Sนฑ็ฒล/\xŸx‹โใขk/i—e8GL€๎™Wกว้้J๛๘˜kฤวšOSงz/ืพ[ุ'*ฉhTL฿ษž[ƒ\S.๕ษdqฑ‹ํG?‡,ŸšOQ๘D้ขซ๘ฮXอฎjN_ญ์ใฃัใขyIผีkk๏แ1้4šQฉษˆ[žš)ัณฮ:›šaPห4วSฒOฯ๙—Yf^ฆYวKื’|jปฑฌ™kะˆƒF,ืด9Nัฦ๚นภ36œทDdศียA‘๙%้เ'โ๑ำ[๔Hฃแซาƒกq ]‚„คœฬ!(z๑"*Il“LuŸ(Ÿำœ4้ณžืrfrขฤE!M สิ%ฌ23>๘ุ๘y๒ศ“Zภ์RDุ0ฑs{ะH0ส ฟn.qs;8 แ|+ุHœŽฐฆ2;คe hpๆฉ€•šx]M@์,1โฃฉีง„HJมHุ๗ค่Cคz๗,8ื่ะนกอuโy2ๆฬ…8๋ำ"=่n,Q#ปBNไข ะ3ชƒ?คŸ…™j $6โฌึ4ศ„LFํa*PเrฦIพPQฺŸv1p‰ใุMัไตt!ล/  Ÿ5…9ค†Fน#ถเห ;#ฮUค  มPฮบ˜ธฐPtA๓๚ฑnใณrูถ9kฯnดต|7ิโk[xp9‡ ˆ•ง2ˆ RI†Xพ0็š๓ำ•=€ณb‚,E๋ฎบ8Ž„ฎคพV็ผ•ถ}&ํŠ-Hู๕เษ3— ขถ‹,C@z%’งK๘K฿™_ฮ#6€+yด.yMV“ฅ[nฎค’ซร"6ฟ-๕ฑœฬ[ตz ž$งka|Œ…Eฉvกถ4‘… ME^-–•ฏสดีฤ๓u™ e^A฿†piด,Oƒw”2Mvˆฮ๗ ีM”VฏK=ฆd๒์8Š๑' ุ}ต[I2Oyt"ภvศฉNิ`ฟญ@„6ฦ98ล =œขLสฐฆีฦศฒ๏ไ$ล@.K๛&{;ฑถw๙ุงkvา;ซtRŸฅ *ŠมรfŒาป4K๎ข–>r—ต4๑ปฎฟฯpรฤ็XuญtcูW๔$™xจ[ ปฌX\)๙ฉื๐•‡€ฺgษŽ„าโmฌณt'cถณE๖43˜ŽญD72ก,ก.ยa D/Gืฅ็Rปš–\า๒Wูm>@C๚/ยWD*ัœRT๖๑-|ํ™_Ž1โ™ๆ (><‡4qำฅAใ W5= 1†žQ๐ศ5กhู ฑNˆ8C๙าPูเqอ7ษ^ywl—ญˆต–}œU๐w +Rc(1Žx2œ ญ %dVŸ&๐ 7 `ip+‹Ÿ฿Tๅส๏Uฅq•ธ ฬ-ณl/X N{ฃรA/ล+$ฤ!โนพ#โ &กๆˆCข฿๑่d}Gnโ๖!mี!TQๆW ฐฉeพ!ูำDฏ=85ะ๖ŽS๛่nvื>๗•iบปoฆ๚Kœฝ‰ด–—ว+ไ}น_Dz€4ผ๕n~sั•‡T‡๙7สCชรถcญศ<ฃ"บ(ฉไ Cๅต5:๛>t%๗HOใp๖Œฬ=Žr็q„ใgศ๏Pป"๚lY( ษ3จOคXs2สofฉCšเœ7ฟY0UˆพW… ŽIyฐ DฏTh_๔K่aชฟZชภB“ k5ก๖ูsๆkืษh่=บK–—u-AK{.ห%ศ-"ุสNดc7L›*๋1ฎY'๋ผZ(5_UX‹ูmฬ๐ืvg๐:Ff๏Eqน”%า WhA`vฏษ็ไFฆa7HMดะœnœีค~t€)ีฟ stA™Fฆม: ฅัIJฆ‘.ำŒlœIm้๕]ฺe๏ทŒl@ฃ’ฐฒI๖Ax69!e9ม?E”™†^€Šชw&ฉfช1Šž"฿}๊Ažญุ’ฑถ"้Qแฟท†+9๘ฎD๔e<อฏ€~1ึW6EโฦBQi๘XแแTืJ,m 93๔ฺFฐ$วUG8ขาBำฒ{ฏ๑ฟ’๘ๆŸhฏ$พ๙SฟจŽ-’/๊~ษT‚T์แOฃเ-[r๖+ฏ`<ป#—œnf{ฦ67oบ%ใ‰ณe—Vีข“Lzา7ฆ[HทYiˆไKญuิณuš^•.$u r"Q‡Aญ‚ซ%‘ญsถV)ดเ˜xฌ Tฐจ๓~ป=mพŸo- #”\€๙Nษg)Sภแ๖มํณี$]ขHœ•ฟrชๆZศ@1S&ํมใาญ ƒ@น7“>™p 8ำตภ^Gc?™!่"์๚ฤูช-ๆƒูๅ‘ฯ1฿y$N› lคฯ˜ึ–'”๖Šฐ‚Ž2˜ฃ%9FO_›Bฌ‘M๖่>๓-aaKลฎ,ฺe๙ชบ]`ยิ่lฆฮ%ยพึิjCJะ ,›ฅิXาอั๛มอฦO`cRwโC๓ทนคฤV˜2Ng่nGแxัsล๎๊pด 4.๔ํึ้๓ƒfฒB‚B‘ญลึจ/=ฃšT]wฝกJ‘vฬW@™…ต~ฒพjึ+รถN˜ฑต‡ ๏f|Zvำ/‰I>?านƒพŠ[X -VะึqK“ษฌฐ;OวัoยHhฝื}๘ลฎฉ๙n๔ovMอ฿B๊+D5Ÿ 5%ซญ ฯฒ็มŠ<ึ[a„~๖)•บ๏ฌ฿ญi8Dู*ฉ{.‚yqŽ4—<Š;‹ฮ]EฦซžอrŸธ`๖อฎฒkฎ}njฒษ=ปnr๋™,ป~ณMHU่ฑศ“HvTบH>*-คฺhŸฒล๏น zษจƒ!*ศ5ุ"9А!€ึึ!rนT ฮtIญoŽ=]ื/“Œ>ณ{3H„สYไ1J˜ส“QjGIฟ jF ๚ผdพ„ฤ#AUฟณจ๎ecฯ๗`ง<iพO€k‰182#†D“Eย7ั—็ 9ฉ๎uืyชฦEg#F”#›*9 ๘—ฯเ๔gเj๒ฎม้ƒŒ ์ห—• ™„Z๐x๔,๓ƒhQบYกOYต๚๕A๋ำsV๐๋rูOTe^—ฟb๓‰U™๙––g@๒ลฐ„ฒ]ะฮสL๓ชK –0€)ฺL๛N=uJฅิ็ฑn ข่ะl9ํB[ส™”ก๔F.ูHGBTณ๔ชด๚ ืy"฿๗ม@RพE‹‘+่ฆ'9y๘ด&ŽึpAฅ!†4ผNวบร,*ศรฌcิm’๔›ษfตKถ4ิ(กNฯf1ุณค๒ีฐ'ณไค%น|ฬ’oF?๖G2๏๖™๗๛‰ฟฬ{‡ฤ_ๆCฑฟฬซ๛[™W ๖ท2ฏ์oe^%ุ฿๚หIA‰ฟฬŸไKม‰ ภฒ]a ศ<——๏p2๙ฺ~MpZบ*พkoz๔)ฬ๒Œ*ฤ@฿ถฯGล๋y7ๅร๚ทแ•Yอ(Ÿฒ0 ิ๚y%iCCPICC profilexœฑJรP†ฟฦข"๊ข8ˆC—‚vrฉ ก kฃSzำb1‰!I)พoขำA|_@มูFณxแ๐ฮ๙{/8nl’ขน IZๆ^ฏ\W๎ยMVqุa/4Eึ๑jฯ็+ ซ/-๋U?๗็™†…‘ฮTฉษ๒โ๖ดฬ,ซXฟํ๗Žฤb7JาH$Ž’ศฒํ%๑ฤxฺ,ำ‹sWmแัๅ—ฦฤ”ดคฉ:วดู—zไ„S`ค1C๕ฆš)นr๒8๕EบMMf•็+e ฑผlย‰ฮชอฦฦ, ๓ฐjอฉœัa%€ตgXบฎษZถš™v5๓ฯ7~`Pr๒t iTXtXML:com.adobe.xmp ึยbKGD‡ฬฟ pHYs  šœtIMEๅ'cฉ“OIDAT(ฯcd๘ฯ@$`a R-#C:C:๒3ŠD)vภ‹a๎bภqรL†tฌ$Dฏจเ„e pK‘B+0ฑ"k้ฬ%ฬสIENDฎB`‚././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/gold/html/support/keybd_open.png0000644000175100001770000002145300000000000023530 0ustar00runnerdocker00000000000000‰PNG  IHDR 'ว>N„zTXtRaw profile type exifxฺํšYv;ฒE1ŠBข†ƒvญšม~ํ$eQe๛๊~>ห"ฉl€@4'ฮAาฌ๛๏6แ_r1˜sI5ฅ‹ก†๊สu5}ตWะื๓Gผฯู็ใๆ๗ ว!ฯป?–t_8n฿8oO๑@ๅศ๖็5ใ—น๓ๆล"๙<๏๊=w็„ฝhgYWช%ฟ_B_็พธกศาx ๅู์Ogผ7#๓x็–ทโีyw ๐๒ko|ผZ๏ๅB๏๕ˆผzฐ‡|ๅง๋UๆcT>}ˆJu_ลงs…แภณ3ำ๛—ว ๓qฟธ๘ฬ~Ÿ๓q7lธœว๏ณ˜ฝืY] —ฆ{Q%๊'.์ธ๋m‰ŸฬoไsึŸสO1d๏ ไ“ฤ์ [ญ#,;mณ.}v`bpหeJŽŸ]uร_†h๙ฑe_๔…ศ ยKฬผ{ณล๊ผUงถ0๑ด\้,ƒYI#/ฦฯห๖–”ทVœY๚ ปœ$fHไไ•ซˆ<Š๊เวฯวWOฃบนฐภv๕3D๖ฮ-ษ#ฏ๖\y?ตf๓ผภEฬ1ฦz"p%๋ฃM๖สฮek๑c!>Š๓มuB`ct+]๐>œโdn๎ษVฏuัร`ˆ>๙LhชoฤJ€ษกC-๚bŒ)ๆXb-๙RL)ๅ$เืฒฯ!วœrฮ%ืŠ/กฤ’J.ล”ZZuีŽฑฆškฉตถฦค‘w7.hญป๎{่ฑงž{้ตทA๚Œ0โH#bFmบ้'81ำฬณฬ:ฒ‹TZaล•V^eีี6ฉถ;๎ด๓.ป๎๖5kNX?yิ์#jN#%ๆทจqkฮ!ฌภI”˜1,ฯฺIฬฎbCpFB'1#๋จŠ่ฐ2Jpฆ•ˆมฐฌ‹พล๎WไžโfB๘Q#rFB๗oDฮH่^D๎sพˆฺ”n3.o4BR†โิหS~\ฐJs}^~ูš‡0ˆหไฎ*Ÿญฯ{ ๙ิฏ\ใnMร|{๎.ภƒ>ๅพ#“0ตŒ%o†ซูธr–๔Xšอ? ๔'“Xษ๛฿ ๐ธ฿t€วๆง<๎7?เqฟ๙้๛อOxo~:ภใ~๓ำ?~3@ฟv)ะ™จ•ศลฮ๖<]=ไ5๗สฑน9vํ+Pe๔ึiฅฝV‚lฌ-4]:x๗Fš˜~๏ด"zl}\jญฬž˜ cช)qครกฅ ‡hใœๅ/˜‹‰™qยใOP eQ‚้x๏Žด™;f๏ทใ9ีYd3สปสๅ{5ฌtฅิเๅ~1ต๋ƒkๅ†QฆeศWภบf๗ ฃว”<ถ_LณižKePSปLปฒow๓˜œ{รซ›ซต=่'ะš|+ภrืkล ้Œ๓ฉ‰‡ฦ059o+•uG˜Eนไ,žวSkŠJOภฬˆอTn์นl๑ —'^็ข๘ธ่รฺKฺ%Fฮ {ๆีF่มqzบา>>ๆ๑ฑๆำิฉหต๏?๖‰J*ำwฒ็–ม ื”KE2Y\์โE๛‘ลฯ!หงๆS>ัvบ่*พ3Vณซš“วW+๛๘h๔ธh^o๕ฺว{xL:fTj2โึง&J๔ฌณฮฆๆ@ิลฒอ1ฦ”์ำse–™—iึ๑าต$Ÿฺn,kๆ4bฤ Q'ห5ญ@NงSดqท~.๐ŒM็mrตpFd~I:๘‰๘G๔=ลh๘ช๔`่F\C— !)'sŠ^ผˆJ[วคS'ส็4'Mบรฌ็ตœ™œ(qQHS‚2u ซฬŒ>๖~ž<๒dฦ0{‡ิ6L์$4Œ2รฏ›‹H8฿J6งฃl†ฉฬiœy*`ฅ&^W;K €๘hชD๕)!’R0๖=)๚ฉ ฮ๕:tnh3w8Tฮ๘x๏Iฝืเ™%๕Sด”f๑7€่Z TMไr‘"ผa#๎๎Fหวกม5&ิล๘>งC[UŒผKฒ&ŸโŠ+I็7 x#™Mคjลภ\า€; ePtํ๕yแv E*ร๚r;USœ„cฺ๎ฆค%€xื.V๊Uฒงี๛Vปื๏ๆภ็rช๖ิฌV,S=jcp]๗าฑ์˜่€eสลYN_Ž‹ );๓%F0V juMซ-ธm–สมOhใฺl–ขฬ„อภT๙pตwร]ศ๋กeุ@G…7 Jปˆ’ki‘ํ๐(QสjศCา…‘j˜ฅ3ลgจd ‰1“ํฅ ŸึU “…„ฏ^•Tก eฺ๗ณž9™‘๘–ผอHZีQfOROCถ0HอณGา=ูผ’@ƒZYฉ"@P๊Z„๑L ”ฝx€`h=• ’OžทŒ9s!ฮ๚ดHzฟKิศ๖.‚๙…(@B๔ฬ…๊ ๗้gaฆฺ‰8ซ5 ๒!“Q{˜ ธ†‡q’/T”๖ง] œAโ8vSt y-]cH๑ห่gMaฉกF๎Hง-๘๒ยNฤˆณCiBH0$”ณn&.,]ะผ~ฌ[เ๘ฌ\ถmฮฺณm-฿ ๕ƒ๘ภ\ฮ!๗‚beใฉ bƒD’!–/ฬน&…ฤteเl‡ุƒ`Kับซ.Ž#ก+ฉฏU๗9oฅmŸ dปb Rv=x๒ ๗eƒhทํ"ห^@‰ไ้าาwๆ—๓ˆ เJญK^“ีd้–›+ฉไ๊ฐˆMภoK,'๓Vญจ'ษ้ZcaQช]gจ-MdaCFS‘W‹eๅk€2m5ฑ@Aภ|]ฆHG™Wะภท!7Eญหำ ยฅL“ข๓=ˆuฅีม๋RAF)™<;Žb|เ ถG_ใEํึD’ฬSฐ?rช5ุo+a†qN๑B‚Gง(“2ฌiต1ฒ์;9I๑หาพษNฌmว]>๖้š๔ฮ*ิg)ศถ †b๐ฐ†ฃ๔.อ’ปจฅe-Mฎ๋๏30ฑว9V]+X๖=I&๊่.+WF d~๊5|ๅ! ๖ูCฒ#กฟดx๋lฦษ˜ํl‘=อ ฆc+ัL(Kจ‹pXัหั5B้นิฎฆ%ื…ดUv›ะ~ใ‹๐U‘J4ง•}|‹?_{ๆ—cŒxฆ9 €ฯ!Mtiะ8่@MOCŒกgq_.+{ๆพๆ๙AC•ย<ห๓Qจ๚ค;Ezœแ^‰:ํ~ฏ Šเศฯbภ็7Uน๒{Ui\%nsเฤvห, V‚ย่pะK๑ qˆxฎ๏ˆxรฃI่ƒy#โ่๗D<:Y฿‘[ทธ}H[5AU”๙UEljD™oFH๖4ัkN ดฝใิ>บท›ตฯฝDeš๎๎›iเถgo"ญๅeว๑ y_nว‘ /Gฝ›฿\๔Gๅ!ีa๒๊0ฟ-๎šึฃS]ๅืพฯX+2ฯ่ถˆ.Jช๙ยPymฮพ]ษ=าำ8œ=ใsฃไyแธ๗๒;ิ…ฎˆ>[JB๒ ๊iV๗œŒ๒›Y๊&8็อoฬGีข/ยUaƒcRE์Gั+ฺ๚D˜*ยฏ–*0†ะ$รZ hฃ}๖œ๙ฺu2บD๎’e#มe]Kะาžหr ฒw‹ถฒmวุ ำฆสz…AŒkึษ:ฏJอWwึ"แภ9ม5‘VโฐOv›?3ต<„Ž‘ู{ัc\.e‰4ฺ่ฦ_˜k๒9น‘iุ R-4'ทguฉ`J๕/ศFFPฆQ…iฐ@it’…i$ฟฟห4#gR›wz}—v`™ล๛-#ะจคฌฌC’}žMwNAHYN๐OQeฆกƒ ข๊ Dช™jŒข็…ศwŸzg+ถdฌญ?dz”g๘๏ํ6ทแJพ+‘w#}O๓+ _Œ๕•M‘ธฑPDT>Vx8ีตKHฮ †ถ,ษลqี‘GŽจดะด์kฏ$พ๙'+‰oTใ/ชc‹ไ‹บ_2• {๘คภ(xห–œ=ฦJ็๋‡ุ6ฯ๎ศๅDง›ูžฑ„ออ›nษxโlูฅU`ต่ค“ž๔้าmV"๙Rk๕l]ฆWฅท Iม‚œHิaPซ ฤjId๋œญU -8&ž+(•,๊ผ฿nO›ฏ๗งล[หย%`‡S2ไYสpธ}p{ภluI—่6gฅภฏœชน2Pฬ”I{๐ธtkร P๎อคO&ฮ๔฿A-0ว„ืัุOf?:†ป>qถj‹9ฦ`๖_yไณCฬw‰ำๆ้3ฆตๅ ฅฝ"์‡ ฃ &ฤhIN†ัำืf…+@d“=บฯ|K˜@ุRฑ+‹vYพช๎EX„05:›ฉs‰pงฏ๕ต‡ฺฤ4(หf)5–ts๔~Dpณฑวุ˜ิ๘ะ|็Dm.)ฑฆฬŸำบQ8^tฤ\qฤ†ป:ํK}ป๕E๚ ™ƒฌ Pdkฑ5ชฦKฯจ&Uื]oจRค๓wf!BญŸฌฏš๕สฐญflํaฟย{…Ÿ–๔หEโ…’ฯtnม ฏโ–ยD‹ีดuาd2+์ฮำq๔›0Z๏u7~ฑkjพ6›]S๓ท๚ Qอ'HM 7ยวช@+่ณ์yฐ"๕VกŸ}Je#$€๎;๋wkQถJ๊ž‹`^œ#อ%โฮขsW‘๑ชทgณฦg.˜}ณkง์š+dŸ[„šlrฯฎ›z&หฎ฿lRz,๒$$†•.’ฯ„สGKฉ6ฺงl๑{.ƒ^๒๊`ˆ r ถHŽ"d ตuˆ\.Uƒ3DRซO„๋๕๒5ิoŽƒF/}!ษุื>ฬ๋‡CU้š <˜dฮs}ศzฬRฃD‚ชYทQทIบ#GAื๕ห$ฃฯ์ กryŒฆ๒d”šฤัcา/ƒšQƒ/™/!๑HPี๏฿,ช{ูุ๓=ุ)ฯEš๏ มZb Žฬˆ!ัd‘ฐล@๔ๅ9HNช{užชqัูˆๅศฤฆJŽวๅ38ธš€kp๚ @ c๛๒eฅB&ก–<}ห Zิ€nV่SVญ~}ะ๚๔œUญ‰ฃ5\ะciGศฃ! ฏำฑ.๗0Kฟ ๒0๋u›$ฝfrทYFํ’- 5JจำณY ๖ม,ฉ|5์ษ,yฉFI.ณไ›ัO†‘ฟฬ{‡ฤ_ๆฝร~โ/๓a?๑—๙ว์/๓*มึ_ๆU‚ญฟฬซ๛[™W ๖ท2Rโ/๓'๙า_p"(ฐlWX2ฯๅๅ;œLพถ_œ†–ฎ ค๏ไ›} s‡<ฃ 1ะทํ๓Q๑๚ฤ_M๙pเŸพํ@xeV๓?8ฒ+i0lา%iCCPICC profilexœฑJรP†ฟฦข"๊ข8ˆC—‚vrฉ ก kฃSzำb1‰!I)พoขำA|_@มูFณxแ๐ฮ๙{/8nl’ขน IZๆ^ฏ\W๎ยMVqุa/4Eึ๑jฯ็+ ซ/-๋U?๗็™†…‘ฮTฉษ๒โ๖ดฬ,ซXฟํ๗Žฤb7JาH$Ž’ศฒํ%๑ฤxฺ,ำ‹sWmแัๅ—ฦฤ”ดคฉ:วดู—zไ„S`ค1C๕ฆš)นr๒8๕EบMMf•็+e ฑผlย‰ฮชอฦฦ, ๓ฐjอฉœัa%€ตgXบฎษZถš™v5๓ฯ7~`Pr๒t iTXtXML:com.adobe.xmp HM5AbKGD‡ฬฟ pHYs  šœtIMEๅ"c ]OIDAT(ฯcd๘ฯ@$`a R-#C:C:๒3ŠD)vภ‹a๎bภqรL†tฌ$Dฏจเ„e pK‘B+0ฑ"k้ฬ%ฬสIENDฎB`‚././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/gold/html/support/style.css0000644000175100001770000003016600000000000022556 0ustar00runnerdocker00000000000000@charset "UTF-8"; /* Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 */ /* For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt */ /* Don't edit this .css file. Edit the .scss file instead! */ html, body, h1, h2, h3, p, table, td, th { margin: 0; padding: 0; border: 0; font-weight: inherit; font-style: inherit; font-size: 100%; font-family: inherit; vertical-align: baseline; } body { font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, Ubuntu, Cantarell, "Helvetica Neue", sans-serif; font-size: 1em; background: #fff; color: #000; } @media (prefers-color-scheme: dark) { body { background: #1e1e1e; } } @media (prefers-color-scheme: dark) { body { color: #eee; } } html > body { font-size: 16px; } a:active, a:focus { outline: 2px dashed #007acc; } p { font-size: .875em; line-height: 1.4em; } table { border-collapse: collapse; } td { vertical-align: top; } table tr.hidden { display: none !important; } p#no_rows { display: none; font-size: 1.2em; } a.nav { text-decoration: none; color: inherit; } a.nav:hover { text-decoration: underline; color: inherit; } .hidden { display: none; } header { background: #f8f8f8; width: 100%; z-index: 2; border-bottom: 1px solid #ccc; } @media (prefers-color-scheme: dark) { header { background: black; } } @media (prefers-color-scheme: dark) { header { border-color: #333; } } header .content { padding: 1rem 3.5rem; } header h2 { margin-top: .5em; font-size: 1em; } header p.text { margin: .5em 0 -.5em; color: #666; font-style: italic; } @media (prefers-color-scheme: dark) { header p.text { color: #aaa; } } header.sticky { position: fixed; left: 0; right: 0; height: 2.5em; } header.sticky .text { display: none; } header.sticky h1, header.sticky h2 { font-size: 1em; margin-top: 0; display: inline-block; } header.sticky .content { padding: 0.5rem 3.5rem; } header.sticky .content p { font-size: 1em; } header.sticky ~ #source { padding-top: 6.5em; } main { position: relative; z-index: 1; } footer { margin: 1rem 3.5rem; } footer .content { padding: 0; color: #666; font-style: italic; } @media (prefers-color-scheme: dark) { footer .content { color: #aaa; } } #index { margin: 1rem 0 0 3.5rem; } h1 { font-size: 1.25em; display: inline-block; } #filter_container { float: right; margin: 0 2em 0 0; } #filter_container input { width: 10em; padding: 0.2em 0.5em; border: 2px solid #ccc; background: #fff; color: #000; } @media (prefers-color-scheme: dark) { #filter_container input { border-color: #444; } } @media (prefers-color-scheme: dark) { #filter_container input { background: #1e1e1e; } } @media (prefers-color-scheme: dark) { #filter_container input { color: #eee; } } #filter_container input:focus { border-color: #007acc; } header button { font-family: inherit; font-size: inherit; border: 1px solid; border-radius: .2em; color: inherit; padding: .1em .5em; margin: 1px calc(.1em + 1px); cursor: pointer; border-color: #ccc; } @media (prefers-color-scheme: dark) { header button { border-color: #444; } } header button:active, header button:focus { outline: 2px dashed #007acc; } header button.run { background: #eeffee; } @media (prefers-color-scheme: dark) { header button.run { background: #373d29; } } header button.run.show_run { background: #dfd; border: 2px solid #00dd00; margin: 0 .1em; } @media (prefers-color-scheme: dark) { header button.run.show_run { background: #373d29; } } header button.mis { background: #ffeeee; } @media (prefers-color-scheme: dark) { header button.mis { background: #4b1818; } } header button.mis.show_mis { background: #fdd; border: 2px solid #ff0000; margin: 0 .1em; } @media (prefers-color-scheme: dark) { header button.mis.show_mis { background: #4b1818; } } header button.exc { background: #f7f7f7; } @media (prefers-color-scheme: dark) { header button.exc { background: #333; } } header button.exc.show_exc { background: #eee; border: 2px solid #808080; margin: 0 .1em; } @media (prefers-color-scheme: dark) { header button.exc.show_exc { background: #333; } } header button.par { background: #ffffd5; } @media (prefers-color-scheme: dark) { header button.par { background: #650; } } header button.par.show_par { background: #ffa; border: 2px solid #bbbb00; margin: 0 .1em; } @media (prefers-color-scheme: dark) { header button.par.show_par { background: #650; } } #help_panel, #source p .annotate.long { display: none; position: absolute; z-index: 999; background: #ffffcc; border: 1px solid #888; border-radius: .2em; color: #333; padding: .25em .5em; } #source p .annotate.long { white-space: normal; float: right; top: 1.75em; right: 1em; height: auto; } #help_panel_wrapper { float: right; position: relative; } #keyboard_icon { margin: 5px; } #help_panel_state { display: none; } #help_panel { top: 25px; right: 0; padding: .75em; border: 1px solid #883; color: #333; } #help_panel .keyhelp p { margin-top: .75em; } #help_panel .legend { font-style: italic; margin-bottom: 1em; } .indexfile #help_panel { width: 25em; } .pyfile #help_panel { width: 18em; } #help_panel_state:checked ~ #help_panel { display: block; } kbd { border: 1px solid black; border-color: #888 #333 #333 #888; padding: .1em .35em; font-family: SFMono-Regular, Menlo, Monaco, Consolas, monospace; font-weight: bold; background: #eee; border-radius: 3px; } #source { padding: 1em 0 1em 3.5rem; font-family: SFMono-Regular, Menlo, Monaco, Consolas, monospace; } #source p { position: relative; white-space: pre; } #source p * { box-sizing: border-box; } #source p .n { float: left; text-align: right; width: 3.5rem; box-sizing: border-box; margin-left: -3.5rem; padding-right: 1em; color: #999; user-select: none; } @media (prefers-color-scheme: dark) { #source p .n { color: #777; } } #source p .n.highlight { background: #ffdd00; } #source p .n a { margin-top: -4em; padding-top: 4em; text-decoration: none; color: #999; } @media (prefers-color-scheme: dark) { #source p .n a { color: #777; } } #source p .n a:hover { text-decoration: underline; color: #999; } @media (prefers-color-scheme: dark) { #source p .n a:hover { color: #777; } } #source p .t { display: inline-block; width: 100%; box-sizing: border-box; margin-left: -.5em; padding-left: 0.3em; border-left: 0.2em solid #fff; } @media (prefers-color-scheme: dark) { #source p .t { border-color: #1e1e1e; } } #source p .t:hover { background: #f2f2f2; } @media (prefers-color-scheme: dark) { #source p .t:hover { background: #282828; } } #source p .t:hover ~ .r .annotate.long { display: block; } #source p .t .com { color: #008000; font-style: italic; line-height: 1px; } @media (prefers-color-scheme: dark) { #source p .t .com { color: #6a9955; } } #source p .t .key { font-weight: bold; line-height: 1px; } #source p .t .str { color: #0451a5; } @media (prefers-color-scheme: dark) { #source p .t .str { color: #9cdcfe; } } #source p.mis .t { border-left: 0.2em solid #ff0000; } #source p.mis.show_mis .t { background: #fdd; } @media (prefers-color-scheme: dark) { #source p.mis.show_mis .t { background: #4b1818; } } #source p.mis.show_mis .t:hover { background: #f2d2d2; } @media (prefers-color-scheme: dark) { #source p.mis.show_mis .t:hover { background: #532323; } } #source p.run .t { border-left: 0.2em solid #00dd00; } #source p.run.show_run .t { background: #dfd; } @media (prefers-color-scheme: dark) { #source p.run.show_run .t { background: #373d29; } } #source p.run.show_run .t:hover { background: #d2f2d2; } @media (prefers-color-scheme: dark) { #source p.run.show_run .t:hover { background: #404633; } } #source p.exc .t { border-left: 0.2em solid #808080; } #source p.exc.show_exc .t { background: #eee; } @media (prefers-color-scheme: dark) { #source p.exc.show_exc .t { background: #333; } } #source p.exc.show_exc .t:hover { background: #e2e2e2; } @media (prefers-color-scheme: dark) { #source p.exc.show_exc .t:hover { background: #3c3c3c; } } #source p.par .t { border-left: 0.2em solid #bbbb00; } #source p.par.show_par .t { background: #ffa; } @media (prefers-color-scheme: dark) { #source p.par.show_par .t { background: #650; } } #source p.par.show_par .t:hover { background: #f2f2a2; } @media (prefers-color-scheme: dark) { #source p.par.show_par .t:hover { background: #6d5d0c; } } #source p .r { position: absolute; top: 0; right: 2.5em; font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, Ubuntu, Cantarell, "Helvetica Neue", sans-serif; } #source p .annotate { font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, Ubuntu, Cantarell, "Helvetica Neue", sans-serif; color: #666; padding-right: .5em; } @media (prefers-color-scheme: dark) { #source p .annotate { color: #ddd; } } #source p .annotate.short:hover ~ .long { display: block; } #source p .annotate.long { width: 30em; right: 2.5em; } #source p input { display: none; } #source p input ~ .r label.ctx { cursor: pointer; border-radius: .25em; } #source p input ~ .r label.ctx::before { content: "โ–ถ "; } #source p input ~ .r label.ctx:hover { background: #e8f4ff; color: #666; } @media (prefers-color-scheme: dark) { #source p input ~ .r label.ctx:hover { background: #0f3a42; } } @media (prefers-color-scheme: dark) { #source p input ~ .r label.ctx:hover { color: #aaa; } } #source p input:checked ~ .r label.ctx { background: #d0e8ff; color: #666; border-radius: .75em .75em 0 0; padding: 0 .5em; margin: -.25em 0; } @media (prefers-color-scheme: dark) { #source p input:checked ~ .r label.ctx { background: #056; } } @media (prefers-color-scheme: dark) { #source p input:checked ~ .r label.ctx { color: #aaa; } } #source p input:checked ~ .r label.ctx::before { content: "โ–ผ "; } #source p input:checked ~ .ctxs { padding: .25em .5em; overflow-y: scroll; max-height: 10.5em; } #source p label.ctx { color: #999; display: inline-block; padding: 0 .5em; font-size: .8333em; } @media (prefers-color-scheme: dark) { #source p label.ctx { color: #777; } } #source p .ctxs { display: block; max-height: 0; overflow-y: hidden; transition: all .2s; padding: 0 .5em; font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, Ubuntu, Cantarell, "Helvetica Neue", sans-serif; white-space: nowrap; background: #d0e8ff; border-radius: .25em; margin-right: 1.75em; text-align: right; } @media (prefers-color-scheme: dark) { #source p .ctxs { background: #056; } } #index { font-family: SFMono-Regular, Menlo, Monaco, Consolas, monospace; font-size: 0.875em; } #index table.index { margin-left: -.5em; } #index td, #index th { text-align: right; width: 5em; padding: .25em .5em; border-bottom: 1px solid #eee; } @media (prefers-color-scheme: dark) { #index td, #index th { border-color: #333; } } #index td.name, #index th.name { text-align: left; width: auto; } #index th { font-style: italic; color: #333; cursor: pointer; } @media (prefers-color-scheme: dark) { #index th { color: #ddd; } } #index th:hover { background: #eee; } @media (prefers-color-scheme: dark) { #index th:hover { background: #333; } } #index th[aria-sort="ascending"], #index th[aria-sort="descending"] { white-space: nowrap; background: #eee; padding-left: .5em; } @media (prefers-color-scheme: dark) { #index th[aria-sort="ascending"], #index th[aria-sort="descending"] { background: #333; } } #index th[aria-sort="ascending"]::after { font-family: sans-serif; content: " โ†‘"; } #index th[aria-sort="descending"]::after { font-family: sans-serif; content: " โ†“"; } #index td.name a { text-decoration: none; color: inherit; } #index tr.total td, #index tr.total_dynamic td { font-weight: bold; border-top: 1px solid #ccc; border-bottom: none; } #index tr.file:hover { background: #eee; } @media (prefers-color-scheme: dark) { #index tr.file:hover { background: #333; } } #index tr.file:hover td.name { text-decoration: underline; color: inherit; } #scroll_marker { position: fixed; z-index: 3; right: 0; top: 0; width: 16px; height: 100%; background: #fff; border-left: 1px solid #eee; will-change: transform; } @media (prefers-color-scheme: dark) { #scroll_marker { background: #1e1e1e; } } @media (prefers-color-scheme: dark) { #scroll_marker { border-color: #333; } } #scroll_marker .marker { background: #ccc; position: absolute; min-height: 3px; width: 100%; } @media (prefers-color-scheme: dark) { #scroll_marker .marker { background: #444; } } ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1710442639.153815 coverage-7.4.4/tests/gold/html/unicode/0000755000175100001770000000000000000000000020610 5ustar00runnerdocker00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/gold/html/unicode/index.html0000644000175100001770000000736100000000000022614 0ustar00runnerdocker00000000000000 Coverage report
    Module statements missing excluded coverage
    unicode.py 2 0 0 100%
    Total 2 0 0 100%

    No items found using the specified filter.

    ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/gold/html/unicode/unicode_py.html0000644000175100001770000001240300000000000023634 0ustar00runnerdocker00000000000000 Coverage for unicode.py: 100%

    1# -*- coding: utf-8 -*- 

    2# A Python source file with exotic characters. 

    3 

    4upside_down = "ʎd˙ǝbɐɹǝʌoɔ" 

    5surrogate = "db40,dd00: x󠄀" 

    ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1710442639.0938148 coverage-7.4.4/tests/gold/testing/0000755000175100001770000000000000000000000017673 5ustar00runnerdocker00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1710442639.157815 coverage-7.4.4/tests/gold/testing/getty/0000755000175100001770000000000000000000000021027 5ustar00runnerdocker00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/gold/testing/getty/gettysburg.txt0000644000175100001770000000033000000000000023763 0ustar00runnerdocker00000000000000Four score and seven years ago our fathers brought forth upon this continent, a new nation, conceived in Liberty, and dedicated to the proposition that all men are created equal. 11/19/1863, Gettysburg, Pennsylvania ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1710442639.157815 coverage-7.4.4/tests/gold/testing/xml/0000755000175100001770000000000000000000000020473 5ustar00runnerdocker00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/gold/testing/xml/output.xml0000644000175100001770000000021100000000000022547 0ustar00runnerdocker00000000000000 Goodbye ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1710442639.0938148 coverage-7.4.4/tests/gold/xml/0000755000175100001770000000000000000000000017016 5ustar00runnerdocker00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1710442639.157815 coverage-7.4.4/tests/gold/xml/x_xml/0000755000175100001770000000000000000000000020145 5ustar00runnerdocker00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/gold/xml/x_xml/coverage.xml0000644000175100001770000000166100000000000022466 0ustar00runnerdocker00000000000000 /private/var/folders/j2/gr3cj3jn63s5q8g3bjvw57hm0000gp/T/coverage_test/tests_test_xml_XmlGoldTest_test_a_xml_1_43316963 ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1710442639.157815 coverage-7.4.4/tests/gold/xml/y_xml_branch/0000755000175100001770000000000000000000000021463 5ustar00runnerdocker00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/gold/xml/y_xml_branch/coverage.xml0000644000175100001770000000207100000000000024000 0ustar00runnerdocker00000000000000 /private/var/folders/j2/gr3cj3jn63s5q8g3bjvw57hm0000gp/T/coverage_test/tests_test_xml_XmlGoldTest_test_y_xml_branch_93378757 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/goldtest.py0000644000175100001770000001526500000000000017501 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt """A test base class for tests based on gold file comparison.""" from __future__ import annotations import difflib import filecmp import fnmatch import os import os.path import re import xml.etree.ElementTree from typing import Iterable from tests.coveragetest import TESTS_DIR from tests.helpers import os_sep def gold_path(path: str) -> str: """Get a path to a gold file for comparison.""" return os.path.join(TESTS_DIR, "gold", path) def compare( expected_dir: str, actual_dir: str, file_pattern: str | None = None, actual_extra: bool = False, scrubs: list[tuple[str, str]] | None = None, ) -> None: """Compare files matching `file_pattern` in `expected_dir` and `actual_dir`. `actual_extra` true means `actual_dir` can have extra files in it without triggering an assertion. `scrubs` is a list of pairs: regexes to find and replace to scrub the files of unimportant differences. If a comparison fails, a message will be written to stdout, the original unscrubbed output of the test will be written to an "/actual/" directory alongside the "/gold/" directory, and an assertion will be raised. """ __tracebackhide__ = True # pytest, please don't show me this function. assert os_sep("/gold/") in expected_dir assert os.path.exists(actual_dir) os.makedirs(expected_dir, exist_ok=True) dc = filecmp.dircmp(expected_dir, actual_dir) diff_files = _fnmatch_list(dc.diff_files, file_pattern) expected_only = _fnmatch_list(dc.left_only, file_pattern) actual_only = _fnmatch_list(dc.right_only, file_pattern) def save_mismatch(f: str) -> None: """Save a mismatched result to tests/actual.""" save_path = expected_dir.replace(os_sep("/gold/"), os_sep("/actual/")) os.makedirs(save_path, exist_ok=True) save_file = os.path.join(save_path, f) with open(save_file, "w") as savef: with open(os.path.join(actual_dir, f)) as readf: savef.write(readf.read()) print(os_sep(f"Saved actual output to '{save_file}': see tests/gold/README.rst")) # filecmp only compares in binary mode, but we want text mode. So # look through the list of different files, and compare them # ourselves. text_diff = [] for f in diff_files: expected_file = os.path.join(expected_dir, f) with open(expected_file) as fobj: expected = fobj.read() if expected_file.endswith(".xml"): expected = canonicalize_xml(expected) actual_file = os.path.join(actual_dir, f) with open(actual_file) as fobj: actual = fobj.read() if actual_file.endswith(".xml"): actual = canonicalize_xml(actual) if scrubs: expected = scrub(expected, scrubs) actual = scrub(actual, scrubs) if expected != actual: text_diff.append(f'{expected_file} != {actual_file}') expected_lines = expected.splitlines() actual_lines = actual.splitlines() print(f":::: diff '{expected_file}' and '{actual_file}'") print("\n".join(difflib.Differ().compare(expected_lines, actual_lines))) print(f":::: end diff '{expected_file}' and '{actual_file}'") save_mismatch(f) if not actual_extra: for f in actual_only: save_mismatch(f) assert not text_diff, "Files differ: " + "\n".join(text_diff) assert not expected_only, f"Files in {os.path.abspath(expected_dir)} only: {expected_only}" if not actual_extra: assert not actual_only, f"Files in {os.path.abspath(actual_dir)} only: {actual_only}" def contains(filename: str, *strlist: str) -> None: """Check that the file contains all of a list of strings. An assert will be raised if one of the arguments in `strlist` is missing in `filename`. """ __tracebackhide__ = True # pytest, please don't show me this function. with open(filename) as fobj: text = fobj.read() for s in strlist: assert s in text, f"Missing content in {filename}: {s!r}" def contains_rx(filename: str, *rxlist: str) -> None: """Check that the file has lines that re.search all of the regexes. An assert will be raised if one of the regexes in `rxlist` doesn't match any lines in `filename`. """ __tracebackhide__ = True # pytest, please don't show me this function. with open(filename) as fobj: lines = fobj.readlines() for rx in rxlist: assert any(re.search(rx, line) for line in lines), ( f"Missing regex in {filename}: r{rx!r}" ) def contains_any(filename: str, *strlist: str) -> None: """Check that the file contains at least one of a list of strings. An assert will be raised if none of the arguments in `strlist` is in `filename`. """ __tracebackhide__ = True # pytest, please don't show me this function. with open(filename) as fobj: text = fobj.read() for s in strlist: if s in text: return assert False, f"Missing content in {filename}: {strlist[0]!r} [1 of {len(strlist)}]" def doesnt_contain(filename: str, *strlist: str) -> None: """Check that the file contains none of a list of strings. An assert will be raised if any of the strings in `strlist` appears in `filename`. """ __tracebackhide__ = True # pytest, please don't show me this function. with open(filename) as fobj: text = fobj.read() for s in strlist: assert s not in text, f"Forbidden content in {filename}: {s!r}" # Helpers def canonicalize_xml(xtext: str) -> str: """Canonicalize some XML text.""" root = xml.etree.ElementTree.fromstring(xtext) for node in root.iter(): node.attrib = dict(sorted(node.items())) return xml.etree.ElementTree.tostring(root).decode("utf-8") def _fnmatch_list(files: list[str], file_pattern: str | None) -> list[str]: """Filter the list of `files` to only those that match `file_pattern`. If `file_pattern` is None, then return the entire list of files. Returns a list of the filtered files. """ if file_pattern: files = [f for f in files if fnmatch.fnmatch(f, file_pattern)] return files def scrub(strdata: str, scrubs: Iterable[tuple[str, str]]) -> str: """Scrub uninteresting data from the payload in `strdata`. `scrubs` is a list of (find, replace) pairs of regexes that are used on `strdata`. A string is returned. """ for rx_find, rx_replace in scrubs: strdata = re.sub(rx_find, rx_replace, strdata) return strdata ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/helpers.py0000644000175100001770000003107300000000000017311 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt """Helpers for coverage.py tests.""" from __future__ import annotations import collections import contextlib import dis import io import os import os.path import re import shutil import subprocess import textwrap import warnings from pathlib import Path from typing import ( Any, Callable, Iterable, Iterator, NoReturn, TypeVar, cast, ) import flaky import pytest from coverage import env from coverage.debug import DebugControl from coverage.exceptions import CoverageWarning from coverage.misc import output_encoding from coverage.types import TArc, TLineNo def run_command(cmd: str) -> tuple[int, str]: """Run a command in a sub-process. Returns the exit status code and the combined stdout and stderr. """ # Subprocesses are expensive, but convenient, and so may be over-used in # the test suite. Use these lines to get a list of the tests using them: if 0: # pragma: debugging with open("/tmp/processes.txt", "a") as proctxt: # type: ignore[unreachable] print(os.getenv("PYTEST_CURRENT_TEST", "unknown"), file=proctxt, flush=True) # In some strange cases (PyPy3 in a virtualenv!?) the stdout encoding of # the subprocess is set incorrectly to ascii. Use an environment variable # to force the encoding to be the same as ours. sub_env = dict(os.environ) sub_env['PYTHONIOENCODING'] = output_encoding() proc = subprocess.Popen( cmd, shell=True, env=sub_env, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, ) output, _ = proc.communicate() status = proc.returncode # Get the output, and canonicalize it to strings with newlines. output_str = output.decode(output_encoding()).replace("\r", "") return status, output_str # $set_env.py: COVERAGE_DIS - Disassemble test code to /tmp/dis SHOW_DIS = bool(int(os.getenv("COVERAGE_DIS", "0"))) def make_file( filename: str, text: str = "", bytes: bytes = b"", newline: str | None = None, ) -> str: """Create a file for testing. `filename` is the relative path to the file, including directories if desired, which will be created if need be. `text` is the text content to create in the file, or `bytes` are the bytes to write. If `newline` is provided, it is a string that will be used as the line endings in the created file, otherwise the line endings are as provided in `text`. Returns `filename`. """ # pylint: disable=redefined-builtin # bytes if bytes: data = bytes else: text = textwrap.dedent(text) if newline: text = text.replace("\n", newline) data = text.encode("utf-8") # Make sure the directories are available. dirs, basename = os.path.split(filename) if dirs: os.makedirs(dirs, exist_ok=True) # Create the file. with open(filename, 'wb') as f: f.write(data) if text and basename.endswith(".py") and SHOW_DIS: # pragma: debugging os.makedirs("/tmp/dis", exist_ok=True) with open(f"/tmp/dis/{basename}.dis", "w") as fdis: print(f"# {os.path.abspath(filename)}", file=fdis) cur_test = os.getenv("PYTEST_CURRENT_TEST", "unknown") print(f"# PYTEST_CURRENT_TEST = {cur_test}", file=fdis) try: dis.dis(text, file=fdis) except Exception as exc: # Some tests make .py files that aren't Python, so dis will # fail, which is expected. print(f"#! {exc!r}", file=fdis) # For debugging, enable this to show the contents of files created. if 0: # pragma: debugging print(f" โ”€โ”€โ”€โ”ฌโ”€โ”€โ”ค {filename} โ”œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€") # type: ignore[unreachable] for lineno, line in enumerate(data.splitlines(), start=1): print(f"{lineno:6}โ”‚ {line.rstrip().decode()}") print() return filename def nice_file(*fparts: str) -> str: """Canonicalize the file name composed of the parts in `fparts`.""" fname = os.path.join(*fparts) return os.path.normcase(os.path.abspath(os.path.realpath(fname))) def os_sep(s: str) -> str: """Replace slashes in `s` with the correct separator for the OS.""" return s.replace("/", os.sep) class CheckUniqueFilenames: """Asserts the uniqueness of file names passed to a function.""" def __init__(self, wrapped: Callable[..., Any]) -> None: self.filenames: set[str] = set() self.wrapped = wrapped @classmethod def hook(cls, obj: Any, method_name: str) -> CheckUniqueFilenames: """Replace a method with our checking wrapper. The method must take a string as a first argument. That argument will be checked for uniqueness across all the calls to this method. The values don't have to be file names actually, just strings, but we only use it for filename arguments. """ method = getattr(obj, method_name) hook = cls(method) setattr(obj, method_name, hook.wrapper) return hook def wrapper(self, filename: str, *args: Any, **kwargs: Any) -> Any: """The replacement method. Check that we don't have dupes.""" assert filename not in self.filenames, ( f"File name {filename!r} passed to {self.wrapped!r} twice" ) self.filenames.add(filename) return self.wrapped(filename, *args, **kwargs) def re_lines(pat: str, text: str, match: bool = True) -> list[str]: """Return a list of lines selected by `pat` in the string `text`. If `match` is false, the selection is inverted: only the non-matching lines are included. Returns a list, the selected lines, without line endings. """ assert len(pat) < 200, "It's super-easy to swap the arguments to re_lines" return [l for l in text.splitlines() if bool(re.search(pat, l)) == match] def re_lines_text(pat: str, text: str, match: bool = True) -> str: """Return the multi-line text of lines selected by `pat`.""" return "".join(l + "\n" for l in re_lines(pat, text, match=match)) def re_line(pat: str, text: str) -> str: """Return the one line in `text` that matches regex `pat`. Raises an AssertionError if more than one, or less than one, line matches. """ lines = re_lines(pat, text) assert len(lines) == 1 return lines[0] def remove_tree(dirname: str) -> None: """Remove a directory tree. It's fine for the directory to not exist in the first place. """ if os.path.exists(dirname): shutil.rmtree(dirname) # Map chars to numbers for arcz_to_arcs _arcz_map = {'.': -1} _arcz_map.update({c: ord(c) - ord('0') for c in '123456789'}) _arcz_map.update({c: 10 + ord(c) - ord('A') for c in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'}) def arcz_to_arcs(arcz: str) -> list[TArc]: """Convert a compact textual representation of arcs to a list of pairs. The text has space-separated pairs of letters. Period is -1, 1-9 are 1-9, A-Z are 10 through 36. The resulting list is sorted regardless of the order of the input pairs. ".1 12 2." --> [(-1,1), (1,2), (2,-1)] Minus signs can be included in the pairs: "-11, 12, 2-5" --> [(-1,1), (1,2), (2,-5)] """ # The `type: ignore[misc]` here are to suppress "Unpacking a string is # disallowed". a: str b: str arcs = [] for pair in arcz.split(): asgn = bsgn = 1 if len(pair) == 2: a, b = pair # type: ignore[misc] else: assert len(pair) == 3 if pair[0] == "-": _, a, b = pair # type: ignore[misc] asgn = -1 else: assert pair[1] == "-" a, _, b = pair # type: ignore[misc] bsgn = -1 arcs.append((asgn * _arcz_map[a], bsgn * _arcz_map[b])) return sorted(arcs) _arcz_unmap = {val: ch for ch, val in _arcz_map.items()} def _arcs_to_arcz_repr_one(num: TLineNo) -> str: """Return an arcz form of the number `num`, or "?" if there is none.""" if num == -1: return "." z = "" if num < 0: z += "-" num *= -1 z += _arcz_unmap.get(num, "?") return z def arcs_to_arcz_repr(arcs: Iterable[TArc] | None) -> str: """Convert a list of arcs to a readable multi-line form for asserting. Each pair is on its own line, with a comment showing the arcz form, to make it easier to decode when debugging test failures. """ repr_list = [] for a, b in (arcs or ()): line = repr((a, b)) line += " # " line += _arcs_to_arcz_repr_one(a) line += _arcs_to_arcz_repr_one(b) repr_list.append(line) return "\n".join(repr_list) + "\n" @contextlib.contextmanager def change_dir(new_dir: str | Path) -> Iterator[None]: """Change directory, and then change back. Use as a context manager, it will return to the original directory at the end of the block. """ old_dir = os.getcwd() os.chdir(str(new_dir)) try: yield finally: os.chdir(old_dir) T = TypeVar("T") def assert_count_equal( a: Iterable[T] | None, b: Iterable[T] | None, ) -> None: """ A pytest-friendly implementation of assertCountEqual. Assert that `a` and `b` have the same elements, but maybe in different order. This only works for hashable elements. """ assert a is not None assert b is not None assert collections.Counter(list(a)) == collections.Counter(list(b)) def assert_coverage_warnings( warns: Iterable[warnings.WarningMessage], *msgs: str | re.Pattern[str], ) -> None: """ Assert that the CoverageWarning's in `warns` have `msgs` as messages. Each msg can be a string compared for equality, or a compiled regex used to search the text. """ assert msgs # don't call this without some messages. warns = [w for w in warns if issubclass(w.category, CoverageWarning)] actuals = [cast(Warning, w.message).args[0] for w in warns] assert len(msgs) == len(actuals) for expected, actual in zip(msgs, actuals): if hasattr(expected, "search"): assert expected.search(actual), f"{actual!r} didn't match {expected!r}" else: assert expected == actual @contextlib.contextmanager def swallow_warnings( message: str = r".", category: type[Warning] = CoverageWarning, ) -> Iterator[None]: """Swallow particular warnings. It's OK if they happen, or if they don't happen. Just ignore them. """ with warnings.catch_warnings(): warnings.filterwarnings("ignore", category=category, message=message) yield xfail_pypy38 = pytest.mark.xfail( env.PYPY and env.PYVERSION[:2] == (3, 8) and env.PYPYVERSION < (7, 3, 11), reason="These tests fail on older PyPy 3.8", ) class FailingProxy: """A proxy for another object, but one method will fail a few times before working.""" def __init__(self, obj: Any, methname: str, fails: list[Exception]) -> None: """Create the failing proxy. `obj` is the object to proxy. `methname` is the method that will fail a few times. `fails` are the exceptions to fail with. Once used up, the method will proxy correctly. """ self.obj = obj self.methname = methname self.fails = fails def __getattr__(self, name: str) -> Any: if name == self.methname and self.fails: meth = self._make_failing_method(self.fails[0]) del self.fails[0] else: meth = getattr(self.obj, name) return meth def _make_failing_method(self, exc: Exception) -> Callable[..., NoReturn]: """Return a function that will raise `exc`.""" def _meth(*args: Any, **kwargs: Any) -> NoReturn: raise exc return _meth class DebugControlString(DebugControl): """A `DebugControl` that writes to a StringIO, for testing.""" def __init__(self, options: Iterable[str]) -> None: self.io = io.StringIO() super().__init__(options, self.io) def get_output(self) -> str: """Get the output text from the `DebugControl`.""" return self.io.getvalue() TestMethod = Callable[[Any], None] def flaky_method(max_runs: int) -> Callable[[TestMethod], TestMethod]: """flaky.flaky, but with type annotations.""" def _decorator(fn: TestMethod) -> TestMethod: return cast(TestMethod, flaky.flaky(max_runs)(fn)) return _decorator ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1710442639.157815 coverage-7.4.4/tests/js/0000755000175100001770000000000000000000000015705 5ustar00runnerdocker00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/js/index.html0000644000175100001770000000317100000000000017704 0ustar00runnerdocker00000000000000 Coverage.py Javascript Test Suite
    ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/js/tests.js0000644000175100001770000001321200000000000017404 0ustar00runnerdocker00000000000000/* Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 */ /* For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt */ // Tests of coverage.py HTML report chunk navigation. /*global coverage, jQuery, $ */ // Test helpers function selection_is(assert, sel) { raw_selection_is(assert, sel, true); } function raw_selection_is(assert, sel, check_highlight) { var beg = sel[0], end = sel[1]; assert.equal(coverage.sel_begin, beg); assert.equal(coverage.sel_end, end); if (check_highlight) { assert.equal($(".linenos .highlight").length, end-beg); } } // The spec is a list of "rbw" letters, indicating colors of successive lines. // We set the show_r and show_b classes for r and b. function build_fixture(spec) { var i, data; $("#fixture-template").tmpl().appendTo("#qunit-fixture"); for (i = 0; i < spec.length; i++) { data = {number: i+1, klass: spec.substr(i, 1)}; $("#lineno-template").tmpl(data).appendTo("#qunit-fixture .linenos"); $("#text-template").tmpl(data).appendTo("#qunit-fixture .text"); } coverage.pyfile_ready(jQuery); } // Tests // Zero-chunk tests QUnit.module("Zero-chunk navigation", { beforeEach: function () { build_fixture("wwww"); } }); QUnit.test("set_sel defaults", function (assert) { coverage.set_sel(2); assert.equal(coverage.sel_begin, 2); assert.equal(coverage.sel_end, 3); }); QUnit.test("No first chunk to select", function (assert) { coverage.to_first_chunk(); assert.expect(0); }); // One-chunk tests $.each([ ['rrrrr', [1,6]], ['r', [1,2]], ['wwrrrr', [3,7]], ['wwrrrrww', [3,7]], ['rrrrww', [1,5]] ], function (i, params) { // Each of these tests uses a fixture with one highlighted chunks. var id = params[0]; var c1 = params[1]; QUnit.module("One-chunk navigation - " + id, { beforeEach: function () { build_fixture(id); } }); QUnit.test("First chunk", function (assert) { coverage.to_first_chunk(); selection_is(assert, c1); }); QUnit.test("Next chunk is first chunk", function (assert) { coverage.to_next_chunk(); selection_is(assert, c1); }); QUnit.test("There is no next chunk", function (assert) { coverage.to_first_chunk(); coverage.to_next_chunk(); selection_is(assert, c1); }); QUnit.test("There is no prev chunk", function (assert) { coverage.to_first_chunk(); coverage.to_prev_chunk(); selection_is(assert, c1); }); }); // Two-chunk tests $.each([ ['rrwwrrrr', [1,3], [5,9]], ['rb', [1,2], [2,3]], ['rbbbbbbbbbb', [1,2], [2,12]], ['rrrrrrrrrrb', [1,11], [11,12]], ['wrrwrrrrw', [2,4], [5,9]], ['rrrbbb', [1,4], [4,7]] ], function (i, params) { // Each of these tests uses a fixture with two highlighted chunks. var id = params[0]; var c1 = params[1]; var c2 = params[2]; QUnit.module("Two-chunk navigation - " + id, { beforeEach: function () { build_fixture(id); } }); QUnit.test("First chunk", function (assert) { coverage.to_first_chunk(); selection_is(assert, c1); }); QUnit.test("Next chunk is first chunk", function (assert) { coverage.to_next_chunk(); selection_is(assert, c1); }); QUnit.test("Move to next chunk", function (assert) { coverage.to_first_chunk(); coverage.to_next_chunk(); selection_is(assert, c2); }); QUnit.test("Move to first chunk", function (assert) { coverage.to_first_chunk(); coverage.to_next_chunk(); coverage.to_first_chunk(); selection_is(assert, c1); }); QUnit.test("Move to previous chunk", function (assert) { coverage.to_first_chunk(); coverage.to_next_chunk(); coverage.to_prev_chunk(); selection_is(assert, c1); }); QUnit.test("Next doesn't move after last chunk", function (assert) { coverage.to_first_chunk(); coverage.to_next_chunk(); coverage.to_next_chunk(); selection_is(assert, c2); }); QUnit.test("Prev doesn't move before first chunk", function (assert) { coverage.to_first_chunk(); coverage.to_next_chunk(); coverage.to_prev_chunk(); coverage.to_prev_chunk(); selection_is(assert, c1); }); }); QUnit.module("Miscellaneous"); QUnit.test("Jump from a line selected", function (assert) { build_fixture("rrwwrr"); coverage.set_sel(3); coverage.to_next_chunk(); selection_is(assert, [5,7]); }); // Tests of select_line_or_chunk. $.each([ // The data for each test: a spec for the fixture to build, and an array // of the selection that will be selected by select_line_or_chunk for // each line in the fixture. ['rrwwrr', [[1,3], [1,3], [3,4], [4,5], [5,7], [5,7]]], ['rb', [[1,2], [2,3]]], ['r', [[1,2]]], ['w', [[1,2]]], ['www', [[1,2], [2,3], [3,4]]], ['wwwrrr', [[1,2], [2,3], [3,4], [4,7], [4,7], [4,7]]], ['rrrwww', [[1,4], [1,4], [1,4], [4,5], [5,6], [6,7]]], ['rrrbbb', [[1,4], [1,4], [1,4], [4,7], [4,7], [4,7]]] ], function (i, params) { // Each of these tests uses a fixture with two highlighted chunks. var id = params[0]; var sels = params[1]; QUnit.module("Select line or chunk - " + id, { beforeEach: function () { build_fixture(id); } }); $.each(sels, function (i, sel) { i++; QUnit.test("Select line " + i, function (assert) { coverage.select_line_or_chunk(i); raw_selection_is(assert, sel); }); }); }); ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/mixins.py0000644000175100001770000001166300000000000017161 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt """ Test class mixins Some of these are transitional while working toward pure-pytest style. """ from __future__ import annotations import importlib import os import os.path import sys from typing import Any, Callable, Iterable, Iterator, Tuple, cast import pytest from coverage.misc import SysModuleSaver from tests.helpers import change_dir, make_file, remove_tree class PytestBase: """A base class to connect to pytest in a test class hierarchy.""" @pytest.fixture(autouse=True) def connect_to_pytest( self, request: pytest.FixtureRequest, monkeypatch: pytest.MonkeyPatch, ) -> None: """Captures pytest facilities for use by other test helpers.""" # pylint: disable=attribute-defined-outside-init self._pytest_request = request self._monkeypatch = monkeypatch self.setUp() def setUp(self) -> None: """Per-test initialization. Override this as you wish.""" pass def addCleanup(self, fn: Callable[..., None], *args: Any) -> None: """Like unittest's addCleanup: code to call when the test is done.""" self._pytest_request.addfinalizer(lambda: fn(*args)) def set_environ(self, name: str, value: str) -> None: """Set an environment variable `name` to be `value`.""" self._monkeypatch.setenv(name, value) def del_environ(self, name: str) -> None: """Delete an environment variable, unless we set it.""" self._monkeypatch.delenv(name, raising=False) class TempDirMixin: """Provides temp dir and data file helpers for tests.""" # Our own setting: most of these tests run in their own temp directory. # Set this to False in your subclass if you don't want a temp directory # created. run_in_temp_dir = True @pytest.fixture(autouse=True) def _temp_dir(self, tmp_path_factory: pytest.TempPathFactory) -> Iterator[None]: """Create a temp dir for the tests, if they want it.""" if self.run_in_temp_dir: tmpdir = tmp_path_factory.mktemp("t") self.temp_dir = str(tmpdir) with change_dir(self.temp_dir): # Modules should be importable from this temp directory. We don't # use '' because we make lots of different temp directories and # nose's caching importer can get confused. The full path prevents # problems. sys.path.insert(0, os.getcwd()) yield else: yield def make_file( self, filename: str, text: str = "", bytes: bytes = b"", newline: str | None = None, ) -> str: """Make a file. See `tests.helpers.make_file`""" # pylint: disable=redefined-builtin # bytes assert self.run_in_temp_dir, "Only use make_file when running in a temp dir" return make_file(filename, text, bytes, newline) class RestoreModulesMixin: """Auto-restore the imported modules at the end of each test.""" @pytest.fixture(autouse=True) def _module_saving(self) -> Iterable[None]: """Remove modules we imported during the test.""" self._sys_module_saver = SysModuleSaver() try: yield finally: self._sys_module_saver.restore() def clean_local_file_imports(self) -> None: """Clean up the results of calls to `import_local_file`. Use this if you need to `import_local_file` the same file twice in one test. """ # So that we can re-import files, clean them out first. self._sys_module_saver.restore() # Also have to clean out the .pyc files, since the time stamp # resolution is only one second, a changed file might not be # picked up. remove_tree("__pycache__") importlib.invalidate_caches() class StdStreamCapturingMixin: """ Adapter from the pytest capsys fixture to more convenient methods. This doesn't also output to the real stdout, so we probably want to move to "real" capsys when we can use fixtures in test methods. Once you've used one of these methods, the capturing is reset, so another invocation will only return the delta. """ @pytest.fixture(autouse=True) def _capcapsys(self, capsys: pytest.CaptureFixture[str]) -> None: """Grab the fixture so our methods can use it.""" self.capsys = capsys def stdouterr(self) -> tuple[str, str]: """Returns (out, err), two strings for stdout and stderr.""" return cast(Tuple[str, str], self.capsys.readouterr()) def stdout(self) -> str: """Returns a string, the captured stdout.""" return self.capsys.readouterr().out def stderr(self) -> str: """Returns a string, the captured stderr.""" return self.capsys.readouterr().err ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1710442639.157815 coverage-7.4.4/tests/modules/0000755000175100001770000000000000000000000016741 5ustar00runnerdocker00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1710442639.157815 coverage-7.4.4/tests/modules/aa/0000755000175100001770000000000000000000000017322 5ustar00runnerdocker00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/modules/aa/__init__.py0000644000175100001770000000000500000000000021426 0ustar00runnerdocker00000000000000# aa ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/modules/aa/afile.odd.py0000644000175100001770000000001700000000000021517 0ustar00runnerdocker00000000000000# afile.odd.py ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/modules/aa/afile.py0000644000175100001770000000001300000000000020746 0ustar00runnerdocker00000000000000# afile.py ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1710442639.157815 coverage-7.4.4/tests/modules/aa/bb/0000755000175100001770000000000000000000000017705 5ustar00runnerdocker00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/modules/aa/bb/__init__.py0000644000175100001770000000000500000000000022011 0ustar00runnerdocker00000000000000# bb ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/modules/aa/bb/bfile.odd.py0000644000175100001770000000001700000000000022103 0ustar00runnerdocker00000000000000# bfile.odd.py ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/modules/aa/bb/bfile.py0000644000175100001770000000001300000000000021332 0ustar00runnerdocker00000000000000# bfile.py ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1710442639.157815 coverage-7.4.4/tests/modules/aa/bb/cc/0000755000175100001770000000000000000000000020272 5ustar00runnerdocker00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/modules/aa/bb/cc/__init__.py0000644000175100001770000000000000000000000022371 0ustar00runnerdocker00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/modules/aa/bb/cc/cfile.py0000644000175100001770000000001300000000000021720 0ustar00runnerdocker00000000000000# cfile.py ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1710442639.157815 coverage-7.4.4/tests/modules/aa/bb.odd/0000755000175100001770000000000000000000000020452 5ustar00runnerdocker00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/modules/aa/bb.odd/bfile.py0000644000175100001770000000001300000000000022077 0ustar00runnerdocker00000000000000# bfile.py ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/modules/aa/zfile.py0000644000175100001770000000001300000000000020777 0ustar00runnerdocker00000000000000# zfile.py ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1710442639.157815 coverage-7.4.4/tests/modules/ambiguous/0000755000175100001770000000000000000000000020734 5ustar00runnerdocker00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/modules/ambiguous/__init__.py0000644000175100001770000000000000000000000023033 0ustar00runnerdocker00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1710442639.157815 coverage-7.4.4/tests/modules/ambiguous/pkg1/0000755000175100001770000000000000000000000021576 5ustar00runnerdocker00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/modules/ambiguous/pkg1/__init__.py0000644000175100001770000000003000000000000023700 0ustar00runnerdocker00000000000000print("Ambiguous pkg1") ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/modules/ambiguous/pkg1/ambiguous.py0000644000175100001770000000002000000000000024133 0ustar00runnerdocker00000000000000amb = 1 amb = 2 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/modules/covmod1.py0000644000175100001770000000032400000000000020662 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt # covmod1.py: Simplest module for testing. i = 1 i += 1 ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1710442639.0938148 coverage-7.4.4/tests/modules/namespace_420/0000755000175100001770000000000000000000000021262 5ustar00runnerdocker00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1710442639.157815 coverage-7.4.4/tests/modules/namespace_420/sub1/0000755000175100001770000000000000000000000022134 5ustar00runnerdocker00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/modules/namespace_420/sub1/__init__.py0000644000175100001770000000027000000000000024244 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt sub1 = "namespace_420 sub1" ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1710442639.161815 coverage-7.4.4/tests/modules/pkg1/0000755000175100001770000000000000000000000017603 5ustar00runnerdocker00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/modules/pkg1/__init__.py0000644000175100001770000000011100000000000021705 0ustar00runnerdocker00000000000000# A simple package for testing with. print(f"pkg1.__init__: {__name__}") ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/modules/pkg1/__main__.py0000644000175100001770000000013400000000000021673 0ustar00runnerdocker00000000000000# Used in the tests for PyRunner import sys print("pkg1.__main__: passed %s" % sys.argv[1]) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/modules/pkg1/p1a.py0000644000175100001770000000044500000000000020641 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt import os, sys # Invoke functions in os and sys so we can see if we measure code there. x = sys.getfilesystemencoding() y = os.getcwd() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/modules/pkg1/p1b.py0000644000175100001770000000025600000000000020642 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt x = 1 y = 2 z = 3 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/modules/pkg1/p1c.py0000644000175100001770000000025600000000000020643 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt a = 1 b = 2 c = 3 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/modules/pkg1/runmod2.py0000644000175100001770000000036200000000000021544 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt # Used in the tests for PyRunner import sys print("runmod2: passed %s" % sys.argv[1]) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1710442639.161815 coverage-7.4.4/tests/modules/pkg1/sub/0000755000175100001770000000000000000000000020374 5ustar00runnerdocker00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/modules/pkg1/sub/__init__.py0000644000175100001770000000000000000000000022473 0ustar00runnerdocker00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/modules/pkg1/sub/__main__.py0000644000175100001770000000014000000000000022461 0ustar00runnerdocker00000000000000# Used in the tests for PyRunner import sys print("pkg1.sub.__main__: passed %s" % sys.argv[1]) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/modules/pkg1/sub/ps1a.py0000644000175100001770000000025600000000000021615 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt d = 1 e = 2 f = 3 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/modules/pkg1/sub/runmod3.py0000644000175100001770000000036200000000000022336 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt # Used in the tests for PyRunner import sys print("runmod3: passed %s" % sys.argv[1]) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1710442639.161815 coverage-7.4.4/tests/modules/pkg2/0000755000175100001770000000000000000000000017604 5ustar00runnerdocker00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/modules/pkg2/__init__.py0000644000175100001770000000016100000000000021713 0ustar00runnerdocker00000000000000# This is an __init__.py file, with no executable statements in it. # This comment shouldn't confuse the parser. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/modules/pkg2/p2a.py0000644000175100001770000000025600000000000020643 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt q = 1 r = 1 s = 1 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/modules/pkg2/p2b.py0000644000175100001770000000025600000000000020644 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt t = 1 u = 1 v = 1 ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1710442639.161815 coverage-7.4.4/tests/modules/plugins/0000755000175100001770000000000000000000000020422 5ustar00runnerdocker00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/modules/plugins/__init__.py0000644000175100001770000000000000000000000022521 0ustar00runnerdocker00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/modules/plugins/a_plugin.py0000644000175100001770000000055300000000000022575 0ustar00runnerdocker00000000000000"""A plugin for tests to reference.""" from __future__ import annotations from typing import Any from coverage import CoveragePlugin from coverage.plugin_support import Plugins class Plugin(CoveragePlugin): pass def coverage_init( reg: Plugins, options: Any, # pylint: disable=unused-argument ) -> None: reg.add_file_tracer(Plugin()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/modules/plugins/another.py0000644000175100001770000000100600000000000022431 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt """A plugin for tests to reference.""" from __future__ import annotations from typing import Any from coverage import CoveragePlugin from coverage.plugin_support import Plugins class Plugin(CoveragePlugin): pass def coverage_init( reg: Plugins, options: Any, # pylint: disable=unused-argument ) -> None: reg.add_file_tracer(Plugin()) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1710442639.161815 coverage-7.4.4/tests/modules/process_test/0000755000175100001770000000000000000000000021456 5ustar00runnerdocker00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/modules/process_test/__init__.py0000644000175100001770000000000000000000000023555 0ustar00runnerdocker00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/modules/process_test/try_execfile.py0000644000175100001770000000663000000000000024517 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt """Test file for run_python_file. This file is executed two ways:: $ coverage run try_execfile.py and:: $ python try_execfile.py The output is compared to see that the program execution context is the same under coverage and under Python. It is not crucial that the execution be identical, there are some differences that are OK. This program canonicalizes the output to gloss over those differences and get a clean diff. """ from __future__ import annotations import itertools import json import os import sys from typing import Any, List # sys.path varies by execution environments. Some installation libraries # removes duplicate entries from sys.path. So we do that too, since the extra # entries don't affect the running of the program. def same_file(p1: str, p2: str) -> bool: """Determine if `p1` and `p2` refer to the same existing file.""" if not p1: return not p2 if not os.path.exists(p1): return False if not os.path.exists(p2): return False if hasattr(os.path, "samefile"): return os.path.samefile(p1, p2) else: norm1 = os.path.normcase(os.path.normpath(p1)) norm2 = os.path.normcase(os.path.normpath(p2)) return norm1 == norm2 def without_same_files(filenames: List[str]) -> List[str]: """Return the list `filenames` with duplicates (by same_file) removed.""" reduced: List[str] = [] for filename in filenames: if not any(same_file(filename, other) for other in reduced): reduced.append(filename) return reduced cleaned_sys_path = [os.path.normcase(p) for p in without_same_files(sys.path)] DATA = "xyzzy" import __main__ def my_function(a: Any) -> str: """A function to force execution of module-level values.""" return f"my_fn({a!r})" FN_VAL = my_function("fooey") loader = globals().get('__loader__') spec = globals().get('__spec__') # A more compact ad-hoc grouped-by-first-letter list of builtins. CLUMPS = "ABC,DEF,GHI,JKLMN,OPQR,ST,U,VWXYZ_,ab,cd,efg,hij,lmno,pqr,stuvwxyz".split(",") def word_group(w: str) -> int: """Figure out which CLUMP the first letter of w is in.""" for i, clump in enumerate(CLUMPS): if w[0] in clump: return i return 99 builtin_dir = [" ".join(s) for _, s in itertools.groupby(dir(__builtins__), key=word_group)] globals_to_check = { 'os.getcwd': os.getcwd(), '__name__': __name__, '__file__': os.path.normcase(__file__), '__doc__': __doc__, '__builtins__.has_open': hasattr(__builtins__, 'open'), '__builtins__.dir': builtin_dir, '__loader__ exists': loader is not None, '__package__': __package__, '__spec__ exists': spec is not None, 'DATA': DATA, 'FN_VAL': FN_VAL, '__main__.DATA': getattr(__main__, "DATA", "nothing"), 'argv0': sys.argv[0], 'argv1-n': sys.argv[1:], 'path': cleaned_sys_path, } if loader is not None: globals_to_check.update({ '__loader__.fullname': getattr(loader, 'fullname', None) or getattr(loader, 'name', None), }) if spec is not None: globals_to_check.update({ '__spec__.' + aname: getattr(spec, aname) for aname in ['name', 'origin', 'submodule_search_locations', 'parent', 'has_location'] }) print(json.dumps(globals_to_check, indent=4, sort_keys=True)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/modules/runmod1.py0000644000175100001770000000036200000000000020701 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt # Used in the tests for PyRunner import sys print("runmod1: passed %s" % sys.argv[1]) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/modules/usepkgs.py0000644000175100001770000000053300000000000020775 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt import pkg1.p1a, pkg1.p1b, pkg1.sub import pkg2.p2a, pkg2.p2b import othermods.othera, othermods.otherb import othermods.sub.osa, othermods.sub.osb import ambiguous, ambiguous.pkg1.ambiguous ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1710442639.0978148 coverage-7.4.4/tests/moremodules/0000755000175100001770000000000000000000000017624 5ustar00runnerdocker00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1710442639.0938148 coverage-7.4.4/tests/moremodules/namespace_420/0000755000175100001770000000000000000000000022145 5ustar00runnerdocker00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1710442639.161815 coverage-7.4.4/tests/moremodules/namespace_420/sub2/0000755000175100001770000000000000000000000023020 5ustar00runnerdocker00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/moremodules/namespace_420/sub2/__init__.py0000644000175100001770000000027000000000000025130 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt sub2 = "namespace_420 sub2" ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1710442639.161815 coverage-7.4.4/tests/moremodules/othermods/0000755000175100001770000000000000000000000021630 5ustar00runnerdocker00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/moremodules/othermods/__init__.py0000644000175100001770000000000000000000000023727 0ustar00runnerdocker00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/moremodules/othermods/othera.py0000644000175100001770000000025000000000000023461 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt o = 1 p = 2 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/moremodules/othermods/otherb.py0000644000175100001770000000025000000000000023462 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt q = 3 r = 4 ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1710442639.161815 coverage-7.4.4/tests/moremodules/othermods/sub/0000755000175100001770000000000000000000000022421 5ustar00runnerdocker00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/moremodules/othermods/sub/__init__.py0000644000175100001770000000000000000000000024520 0ustar00runnerdocker00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/moremodules/othermods/sub/osa.py0000644000175100001770000000025000000000000023552 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt s = 5 t = 6 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/moremodules/othermods/sub/osb.py0000644000175100001770000000025000000000000023553 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt u = 7 v = 8 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/osinfo.py0000644000175100001770000000636700000000000017154 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt """OS information for testing.""" from __future__ import annotations import sys if sys.platform == "win32": # Windows implementation def process_ram() -> int: """How much RAM is this process using? (Windows)""" import ctypes from ctypes import wintypes # From: http://lists.ubuntu.com/archives/bazaar-commits/2009-February/011990.html # Updated from: https://stackoverflow.com/a/16204942/14343 class PROCESS_MEMORY_COUNTERS_EX(ctypes.Structure): """Used by GetProcessMemoryInfo""" _fields_ = [ ('cb', wintypes.DWORD), ('PageFaultCount', wintypes.DWORD), ('PeakWorkingSetSize', ctypes.c_size_t), ('WorkingSetSize', ctypes.c_size_t), ('QuotaPeakPagedPoolUsage', ctypes.c_size_t), ('QuotaPagedPoolUsage', ctypes.c_size_t), ('QuotaPeakNonPagedPoolUsage', ctypes.c_size_t), ('QuotaNonPagedPoolUsage', ctypes.c_size_t), ('PagefileUsage', ctypes.c_size_t), ('PeakPagefileUsage', ctypes.c_size_t), ('PrivateUsage', ctypes.c_size_t), ] GetProcessMemoryInfo = ctypes.windll.psapi.GetProcessMemoryInfo GetProcessMemoryInfo.argtypes = [ wintypes.HANDLE, ctypes.POINTER(PROCESS_MEMORY_COUNTERS_EX), wintypes.DWORD, ] GetProcessMemoryInfo.restype = wintypes.BOOL GetCurrentProcess = ctypes.windll.kernel32.GetCurrentProcess GetCurrentProcess.argtypes = [] GetCurrentProcess.restype = wintypes.HANDLE counters = PROCESS_MEMORY_COUNTERS_EX() ret = GetProcessMemoryInfo( GetCurrentProcess(), ctypes.byref(counters), ctypes.sizeof(counters), ) if not ret: # pragma: part covered return 0 # pragma: cant happen return counters.PrivateUsage elif sys.platform.startswith("linux"): # Linux implementation import os _scale = {'kb': 1024, 'mb': 1024*1024} def _VmB(key: str) -> int: """Read the /proc/PID/status file to find memory use.""" try: # Get pseudo file /proc//status with open(f"/proc/{os.getpid()}/status") as t: v = t.read() except OSError: # pragma: cant happen return 0 # non-Linux? # Get VmKey line e.g. 'VmRSS: 9999 kB\n ...' i = v.index(key) vp = v[i:].split(None, 3) if len(vp) < 3: # pragma: part covered return 0 # pragma: cant happen # Convert Vm value to bytes. return int(float(vp[1]) * _scale[vp[2].lower()]) def process_ram() -> int: """How much RAM is this process using? (Linux implementation)""" return _VmB('VmRSS') else: # Generic implementation. def process_ram() -> int: """How much RAM is this process using? (stdlib implementation)""" import resource return resource.getrusage(resource.RUSAGE_SELF).ru_maxrss ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/plugin1.py0000644000175100001770000000360500000000000017226 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt """A file tracer plugin for test_plugins.py to import.""" from __future__ import annotations import os.path from types import FrameType from typing import Any from coverage import CoveragePlugin, FileReporter, FileTracer from coverage.plugin_support import Plugins from coverage.types import TLineNo class Plugin(CoveragePlugin): """A file tracer plugin to import, so that it isn't in the test's current directory.""" def file_tracer(self, filename: str) -> FileTracer | None: """Trace only files named xyz.py""" if "xyz.py" in filename: return MyFileTracer(filename) return None def file_reporter(self, filename: str) -> FileReporter | str: return MyFileReporter(filename) class MyFileTracer(FileTracer): """A FileTracer emulating a simple static plugin.""" def __init__(self, filename: str) -> None: """Claim that */*xyz.py was actually sourced from /src/*ABC.zz""" self._filename = filename self._source_filename = os.path.join( "/src", os.path.basename(filename.replace("xyz.py", "ABC.zz")), ) def source_filename(self) -> str: return self._source_filename def line_number_range(self, frame: FrameType) -> tuple[TLineNo, TLineNo]: """Map the line number X to X05,X06,X07.""" lineno = frame.f_lineno return lineno*100+5, lineno*100+7 class MyFileReporter(FileReporter): """Dead-simple FileReporter.""" def lines(self) -> set[TLineNo]: return {105, 106, 107, 205, 206, 207} def coverage_init( reg: Plugins, options: Any, # pylint: disable=unused-argument ) -> None: """Called by coverage to initialize the plugins here.""" reg.add_file_tracer(Plugin()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/plugin2.py0000644000175100001770000000433100000000000017224 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt """A file tracer plugin for test_plugins.py to import.""" from __future__ import annotations import os.path from types import FrameType from typing import Any from coverage import CoveragePlugin, FileReporter, FileTracer from coverage.plugin_support import Plugins from coverage.types import TLineNo try: import third.render # pylint: disable=unused-import except ImportError: # This plugin is used in a few tests. One of them has the third.render # module, but most don't. We need to import it but not use it, so just # try importing it and it's OK if the module doesn't exist. pass class Plugin(CoveragePlugin): """A file tracer plugin for testing.""" def file_tracer(self, filename: str) -> FileTracer | None: if "render.py" in filename: return RenderFileTracer() return None def file_reporter(self, filename: str) -> FileReporter: return MyFileReporter(filename) class RenderFileTracer(FileTracer): """A FileTracer using information from the caller.""" def has_dynamic_source_filename(self) -> bool: return True def dynamic_source_filename( self, filename: str, frame: FrameType, ) -> str | None: if frame.f_code.co_name != "render": return None source_filename: str = os.path.abspath(frame.f_locals['filename']) return source_filename def line_number_range(self, frame: FrameType) -> tuple[TLineNo, TLineNo]: lineno = frame.f_locals['linenum'] return lineno, lineno+1 class MyFileReporter(FileReporter): """A goofy file reporter.""" def lines(self) -> set[TLineNo]: # Goofy test arrangement: claim that the file has as many lines as the # number in its name. num = os.path.basename(self.filename).split(".")[0].split("_")[1] return set(range(1, int(num)+1)) def coverage_init( reg: Plugins, options: Any, # pylint: disable=unused-argument ) -> None: """Called by coverage to initialize the plugins here.""" reg.add_file_tracer(Plugin()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/plugin_config.py0000644000175100001770000000201600000000000020465 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt """A configuring plugin for test_plugins.py to import.""" from __future__ import annotations from typing import Any, List, cast import coverage from coverage.plugin_support import Plugins from coverage.types import TConfigurable class Plugin(coverage.CoveragePlugin): """A configuring plugin for testing.""" def configure(self, config: TConfigurable) -> None: """Configure all the things!""" opt_name = "report:exclude_lines" exclude_lines = cast(List[str], config.get_option(opt_name)) exclude_lines.append(r"pragma: custom") exclude_lines.append(r"pragma: or whatever") config.set_option(opt_name, exclude_lines) def coverage_init( reg: Plugins, options: Any, # pylint: disable=unused-argument ) -> None: """Called by coverage to initialize the plugins here.""" reg.add_configurer(Plugin()) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1710442639.161815 coverage-7.4.4/tests/qunit/0000755000175100001770000000000000000000000016431 5ustar00runnerdocker00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/select_plugin.py0000644000175100001770000000230400000000000020477 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt """ A pytest plugin to select tests by running an external command. See lab/pick.py for how to use pick.py to subset test suites. More about this: https://nedbatchelder.com/blog/202401/randomly_subsetting_test_suites.html """ import subprocess def pytest_addoption(parser): """Add command-line options for controlling the plugin.""" parser.addoption( "--select-cmd", metavar="CMD", action="store", default="", type=str, help="Command to run to get test names", ) def pytest_collection_modifyitems(config, items): """Run an external command to get a list of tests to run.""" select_cmd = config.getoption("--select-cmd") if select_cmd: output = subprocess.check_output(select_cmd, shell="True").decode("utf-8") test_nodeids = { nodeid: seq for seq, nodeid in enumerate(output.splitlines()) } new_items = [item for item in items if item.nodeid in test_nodeids] items[:] = sorted(new_items, key=lambda item: test_nodeids[item.nodeid]) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/stress_phystoken.tok0000644000175100001770000000241200000000000021436 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt # Here's some random Python so that test_tokenize_myself will have some # stressful stuff to try. This file is .tok instead of .py so pylint won't # complain about it, editors won't mess with it, etc. # Some lines are here to reproduce fixed bugs in ast_dump also. first_back = """\ hey there! """ other_back = """ hey \ there """ lots_of_back = """\ hey \ there """ # This next line is supposed to have trailing whitespace: fake_back = """\ ouch """ # Lots of difficulty happens with code like: # # fake_back = """\ # ouch # """ # # Ugh, the edge cases... # What about a comment like this\ "what's this string doing here?" class C(object): def there(): this = 5 + \ 7 that = \ "a continued line" cont1 = "one line of text" + \ "another line of text" a_long_string = \ "part 1" \ "2" \ "3 is longer" def hello(): global x # ast_dump bug print("Hello world!") hello() # ast dump bugs: weird = { **d, **{'c': 7}, 'd': 8, } self.hash.update(b'.') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/stress_phystoken_dos.tok0000644000175100001770000000220400000000000022302 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt # Here's some random Python so that test_tokenize_myself will have some # stressful stuff to try. This file is .tok instead of .py so pylint won't # complain about it, editors won't mess with it, etc. first_back = """\ hey there! """ other_back = """ hey \ there """ lots_of_back = """\ hey \ there """ # This next line is supposed to have trailing whitespace: fake_back = """\ ouch """ # Lots of difficulty happens with code like: # # fake_back = """\ # ouch # """ # # Ugh, the edge cases... # What about a comment like this\ "what's this string doing here?" class C(object): def there(): this = 5 + \ 7 that = \ "a continued line" cont1 = "one line of text" + \ "another line of text" a_long_string = \ "part 1" \ "2" \ "3 is longer" def hello(): print("Hello world!") hello() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/test_annotate.py0000644000175100001770000000676500000000000020531 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt """Tests for annotation from coverage.py.""" from __future__ import annotations import coverage from tests.coveragetest import CoverageTest from tests.goldtest import compare, gold_path class AnnotationGoldTest(CoverageTest): """Test the annotate feature with gold files.""" def make_multi(self) -> None: """Make a few source files we need for the tests.""" self.make_file("multi.py", """\ import a.a import b.b a.a.a(1) b.b.b(2) """) self.make_file("a/__init__.py") self.make_file("a/a.py", """\ def a(x): if x == 1: print("x is 1") else: print("x is not 1") """) self.make_file("b/__init__.py") self.make_file("b/b.py", """\ def b(x): msg = f"x is {x}" print(msg) """) def test_multi(self) -> None: self.make_multi() cov = coverage.Coverage() self.start_import_stop(cov, "multi") cov.annotate() compare(gold_path("annotate/multi"), ".", "*,cover") def test_annotate_dir(self) -> None: self.make_multi() cov = coverage.Coverage(source=["."]) self.start_import_stop(cov, "multi") cov.annotate(directory="out_anno_dir") compare(gold_path("annotate/anno_dir"), "out_anno_dir", "*,cover") def test_encoding(self) -> None: self.make_file("utf8.py", """\ # -*- coding: utf-8 -*- # This comment has an accent: รฉ print("spam eggs") """) cov = coverage.Coverage() self.start_import_stop(cov, "utf8") cov.annotate() compare(gold_path("annotate/encodings"), ".", "*,cover") def test_white(self) -> None: self.make_file("white.py", """\ # A test case sent to me by Steve White def f(self): if self==1: pass elif self.m('fred'): pass elif (g==1) and (b==2): pass elif self.m('fred')==True: pass elif ((g==1) and (b==2))==True: pass else: pass def g(x): if x == 1: a = 1 else: a = 2 g(1) def h(x): if 0: #pragma: no cover pass if x == 1: a = 1 else: a = 2 h(2) """) cov = coverage.Coverage() self.start_import_stop(cov, "white") cov.annotate() compare(gold_path("annotate/white"), ".", "*,cover") def test_missing_after_else(self) -> None: self.make_file("mae.py", """\ def f(x): if x == 1: print("1") else: print("2") if f(1): print("nope") if f(2): print("nope") """) cov = coverage.Coverage() self.start_import_stop(cov, "mae") cov.annotate() assert self.stdout() == "1\n2\n" compare(gold_path("annotate/mae"), ".", "*,cover") ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/test_api.py0000644000175100001770000015373000000000000017464 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt """Tests for coverage.py's API.""" from __future__ import annotations import fnmatch import glob import io import os import os.path import re import shutil import sys import textwrap from typing import cast, Callable, Iterable import pytest import coverage from coverage import Coverage, env from coverage.data import line_counts, sorted_lines from coverage.exceptions import CoverageException, DataError, NoDataError, NoSource from coverage.files import abs_file, relative_filename from coverage.misc import import_local_file from coverage.types import FilePathClasses, FilePathType, TCovKwargs from tests import testenv from tests.coveragetest import CoverageTest, TESTS_DIR, UsingModulesMixin from tests.helpers import assert_count_equal, assert_coverage_warnings from tests.helpers import change_dir, nice_file, os_sep BAD_SQLITE_REGEX = r"file( is encrypted or)? is not a database" class ApiTest(CoverageTest): """Api-oriented tests for coverage.py.""" def clean_files(self, files: list[str], pats: list[str]) -> list[str]: """Remove names matching `pats` from `files`, a list of file names.""" good = [] for f in files: for pat in pats: if fnmatch.fnmatch(f, pat): break else: good.append(f) return good def assertFiles(self, files: list[str]) -> None: """Assert that the files here are `files`, ignoring the usual junk.""" here = os.listdir(".") here = self.clean_files(here, ["*.pyc", "__pycache__", "*$py.class"]) assert_count_equal(here, files) def test_unexecuted_file(self) -> None: cov = coverage.Coverage() self.make_file("mycode.py", """\ a = 1 b = 2 if b == 3: c = 4 d = 5 """) self.make_file("not_run.py", """\ fooey = 17 """) # Import the Python file, executing it. self.start_import_stop(cov, "mycode") _, statements, missing, _ = cov.analysis("not_run.py") assert statements == [1] assert missing == [1] def test_filenames(self) -> None: self.make_file("mymain.py", """\ import mymod a = 1 """) self.make_file("mymod.py", """\ fooey = 17 """) # Import the Python file, executing it. cov = coverage.Coverage() self.start_import_stop(cov, "mymain") filename, _, _, _ = cov.analysis("mymain.py") assert os.path.basename(filename) == "mymain.py" filename, _, _, _ = cov.analysis("mymod.py") assert os.path.basename(filename) == "mymod.py" filename, _, _, _ = cov.analysis(sys.modules["mymain"]) assert os.path.basename(filename) == "mymain.py" filename, _, _, _ = cov.analysis(sys.modules["mymod"]) assert os.path.basename(filename) == "mymod.py" # Import the Python file, executing it again, once it's been compiled # already. cov = coverage.Coverage() self.start_import_stop(cov, "mymain") filename, _, _, _ = cov.analysis("mymain.py") assert os.path.basename(filename) == "mymain.py" filename, _, _, _ = cov.analysis("mymod.py") assert os.path.basename(filename) == "mymod.py" filename, _, _, _ = cov.analysis(sys.modules["mymain"]) assert os.path.basename(filename) == "mymain.py" filename, _, _, _ = cov.analysis(sys.modules["mymod"]) assert os.path.basename(filename) == "mymod.py" def test_ignore_stdlib(self) -> None: self.make_file("mymain.py", """\ import colorsys a = 1 hls = colorsys.rgb_to_hls(1.0, 0.5, 0.0) """) # Measure without the stdlib. cov1 = coverage.Coverage() assert cov1.config.cover_pylib is False self.start_import_stop(cov1, "mymain") # some statements were marked executed in mymain.py _, statements, missing, _ = cov1.analysis("mymain.py") assert statements != missing # but none were in colorsys.py _, statements, missing, _ = cov1.analysis("colorsys.py") assert statements == missing # Measure with the stdlib. cov2 = coverage.Coverage(cover_pylib=True) self.start_import_stop(cov2, "mymain") # some statements were marked executed in mymain.py _, statements, missing, _ = cov2.analysis("mymain.py") assert statements != missing # and some were marked executed in colorsys.py _, statements, missing, _ = cov2.analysis("colorsys.py") assert statements != missing def test_include_can_measure_stdlib(self) -> None: self.make_file("mymain.py", """\ import colorsys, random a = 1 r, g, b = [random.random() for _ in range(3)] hls = colorsys.rgb_to_hls(r, g, b) """) # Measure without the stdlib, but include colorsys. cov1 = coverage.Coverage(cover_pylib=False, include=["*/colorsys.py"]) self.start_import_stop(cov1, "mymain") # some statements were marked executed in colorsys.py _, statements, missing, _ = cov1.analysis("colorsys.py") assert statements != missing # but none were in random.py _, statements, missing, _ = cov1.analysis("random.py") assert statements == missing def test_exclude_list(self) -> None: cov = coverage.Coverage() cov.clear_exclude() assert cov.get_exclude_list() == [] cov.exclude("foo") assert cov.get_exclude_list() == ["foo"] cov.exclude("bar") assert cov.get_exclude_list() == ["foo", "bar"] assert cov._exclude_regex('exclude') == "(?:foo)|(?:bar)" cov.clear_exclude() assert cov.get_exclude_list() == [] def test_exclude_partial_list(self) -> None: cov = coverage.Coverage() cov.clear_exclude(which='partial') assert cov.get_exclude_list(which='partial') == [] cov.exclude("foo", which='partial') assert cov.get_exclude_list(which='partial') == ["foo"] cov.exclude("bar", which='partial') assert cov.get_exclude_list(which='partial') == ["foo", "bar"] assert cov._exclude_regex(which='partial') == "(?:foo)|(?:bar)" cov.clear_exclude(which='partial') assert cov.get_exclude_list(which='partial') == [] def test_exclude_and_partial_are_separate_lists(self) -> None: cov = coverage.Coverage() cov.clear_exclude(which='partial') cov.clear_exclude(which='exclude') cov.exclude("foo", which='partial') assert cov.get_exclude_list(which='partial') == ['foo'] assert cov.get_exclude_list(which='exclude') == [] cov.exclude("bar", which='exclude') assert cov.get_exclude_list(which='partial') == ['foo'] assert cov.get_exclude_list(which='exclude') == ['bar'] cov.exclude("p2", which='partial') cov.exclude("e2", which='exclude') assert cov.get_exclude_list(which='partial') == ['foo', 'p2'] assert cov.get_exclude_list(which='exclude') == ['bar', 'e2'] cov.clear_exclude(which='partial') assert cov.get_exclude_list(which='partial') == [] assert cov.get_exclude_list(which='exclude') == ['bar', 'e2'] cov.clear_exclude(which='exclude') assert cov.get_exclude_list(which='partial') == [] assert cov.get_exclude_list(which='exclude') == [] def test_datafile_default(self) -> None: # Default data file behavior: it's .coverage self.make_file("datatest1.py", """\ fooey = 17 """) self.assertFiles(["datatest1.py"]) cov = coverage.Coverage() self.start_import_stop(cov, "datatest1") cov.save() self.assertFiles(["datatest1.py", ".coverage"]) @pytest.mark.parametrize("file_class", FilePathClasses) def test_datafile_specified(self, file_class: FilePathType) -> None: # You can specify the data file name. self.make_file("datatest2.py", """\ fooey = 17 """) self.assertFiles(["datatest2.py"]) cov = coverage.Coverage(data_file=file_class("cov.data")) self.start_import_stop(cov, "datatest2") cov.save() self.assertFiles(["datatest2.py", "cov.data"]) @pytest.mark.parametrize("file_class", FilePathClasses) def test_datafile_and_suffix_specified(self, file_class: FilePathType) -> None: # You can specify the data file name and suffix. self.make_file("datatest3.py", """\ fooey = 17 """) self.assertFiles(["datatest3.py"]) cov = coverage.Coverage(data_file=file_class("cov.data"), data_suffix="14") self.start_import_stop(cov, "datatest3") cov.save() self.assertFiles(["datatest3.py", "cov.data.14"]) def test_datafile_from_rcfile(self) -> None: # You can specify the data file name in the .coveragerc file self.make_file("datatest4.py", """\ fooey = 17 """) self.make_file(".coveragerc", """\ [run] data_file = mydata.dat """) self.assertFiles(["datatest4.py", ".coveragerc"]) cov = coverage.Coverage() self.start_import_stop(cov, "datatest4") cov.save() self.assertFiles(["datatest4.py", ".coveragerc", "mydata.dat"]) def test_deep_datafile(self) -> None: self.make_file("datatest5.py", "fooey = 17") self.assertFiles(["datatest5.py"]) cov = coverage.Coverage(data_file="deep/sub/cov.data") self.start_import_stop(cov, "datatest5") cov.save() self.assertFiles(["datatest5.py", "deep"]) self.assert_exists("deep/sub/cov.data") def test_datafile_none(self) -> None: cov = coverage.Coverage(data_file=None) def f1() -> None: # pragma: nested a = 1 # pylint: disable=unused-variable one_line_number = f1.__code__.co_firstlineno + 1 lines = [] def run_one_function(f: Callable[[], None]) -> None: cov.erase() with cov.collect(): f() fs = cov.get_data().measured_files() lines.append(cov.get_data().lines(list(fs)[0])) run_one_function(f1) run_one_function(f1) run_one_function(f1) assert lines == [[one_line_number]] * 3 self.assert_doesnt_exist(".coverage") assert os.listdir(".") == [] def test_empty_reporting(self) -> None: # empty summary reports raise exception, just like the xml report cov = coverage.Coverage() cov.erase() with pytest.raises(NoDataError, match="No data to report."): cov.report() def test_completely_zero_reporting(self) -> None: # https://github.com/nedbat/coveragepy/issues/884 # If nothing was measured, the file-touching didn't happen properly. self.make_file("foo/bar.py", "print('Never run')") self.make_file("test.py", "assert True") with pytest.warns(Warning) as warns: cov = coverage.Coverage(source=["foo"]) self.start_import_stop(cov, "test") cov.report() assert_coverage_warnings(warns, "No data was collected. (no-data-collected)") # Name Stmts Miss Cover # -------------------------------- # foo/bar.py 1 1 0% # -------------------------------- # TOTAL 1 1 0% last = self.last_line_squeezed(self.stdout()) assert "TOTAL 1 1 0%" == last def test_cov4_data_file(self) -> None: cov4_data = ( "!coverage.py: This is a private format, don't read it directly!" + '{"lines":{"/private/tmp/foo.py":[1,5,2,3]}}' ) self.make_file(".coverage", cov4_data) cov = coverage.Coverage() with pytest.raises(DataError, match="Looks like a coverage 4.x data file"): cov.load() cov.erase() def make_code1_code2(self) -> None: """Create the code1.py and code2.py files.""" self.make_file("code1.py", """\ code1 = 1 """) self.make_file("code2.py", """\ code2 = 1 code2 = 2 """) def check_code1_code2(self, cov: Coverage) -> None: """Check the analysis is correct for code1.py and code2.py.""" _, statements, missing, _ = cov.analysis("code1.py") assert statements == [1] assert missing == [] _, statements, missing, _ = cov.analysis("code2.py") assert statements == [1, 2] assert missing == [] def test_start_stop_start_stop(self) -> None: self.make_code1_code2() cov = coverage.Coverage() self.start_import_stop(cov, "code1") cov.save() self.start_import_stop(cov, "code2") self.check_code1_code2(cov) def test_start_save_stop(self) -> None: self.make_code1_code2() cov = coverage.Coverage() with cov.collect(): import_local_file("code1") cov.save() import_local_file("code2") self.check_code1_code2(cov) def test_start_save_nostop(self) -> None: self.make_code1_code2() cov = coverage.Coverage() with cov.collect(): import_local_file("code1") cov.save() import_local_file("code2") self.check_code1_code2(cov) def test_two_getdata_only_warn_once(self) -> None: self.make_code1_code2() cov = coverage.Coverage(source=["."], omit=["code1.py"]) with cov.collect(): import_local_file("code1") # We didn't collect any data, so we should get a warning. with self.assert_warnings(cov, ["No data was collected"]): cov.get_data() # But calling get_data a second time with no intervening activity # won't make another warning. with self.assert_warnings(cov, []): cov.get_data() def test_two_getdata_warn_twice(self) -> None: self.make_code1_code2() cov = coverage.Coverage(source=["."], omit=["code1.py", "code2.py"]) with cov.collect(): import_local_file("code1") # We didn't collect any data, so we should get a warning. with self.assert_warnings(cov, ["No data was collected"]): cov.save() import_local_file("code2") # Calling get_data a second time after tracing some more will warn again. with self.assert_warnings(cov, ["No data was collected"]): cov.get_data() def make_good_data_files(self) -> None: """Make some good data files.""" self.make_code1_code2() cov = coverage.Coverage(data_suffix=True) self.start_import_stop(cov, "code1") cov.save() cov = coverage.Coverage(data_suffix=True) self.start_import_stop(cov, "code2") cov.save() self.assert_file_count(".coverage.*", 2) def test_combining_corrupt_data(self) -> None: # If you combine a corrupt data file, then you will get a warning, # and the file will remain. self.make_good_data_files() self.make_file(".coverage.foo", """La la la, this isn't coverage data!""") cov = coverage.Coverage() warning_regex = ( r"Couldn't use data file '.*\.coverage\.foo': " + BAD_SQLITE_REGEX ) with self.assert_warnings(cov, [warning_regex]): cov.combine() # We got the results from code1 and code2 properly. self.check_code1_code2(cov) # The bad file still exists, but it's the only parallel data file left. self.assert_exists(".coverage.foo") self.assert_file_count(".coverage.*", 1) def test_combining_twice(self) -> None: self.make_good_data_files() cov1 = coverage.Coverage() cov1.combine() assert self.stdout() == "" cov1.save() self.check_code1_code2(cov1) self.assert_file_count(".coverage.*", 0) self.assert_exists(".coverage") cov2 = coverage.Coverage() with pytest.raises(NoDataError, match=r"No data to combine"): cov2.combine(strict=True, keep=False) cov3 = coverage.Coverage() cov3.combine() assert self.stdout() == "" # Now the data is empty! _, statements, missing, _ = cov3.analysis("code1.py") assert statements == [1] assert missing == [1] _, statements, missing, _ = cov3.analysis("code2.py") assert statements == [1, 2] assert missing == [1, 2] def test_combining_with_a_used_coverage(self) -> None: # Can you use a coverage object to run one shard of a parallel suite, # and then also combine the data? self.make_code1_code2() cov = coverage.Coverage(data_suffix=True) self.start_import_stop(cov, "code1") cov.save() cov = coverage.Coverage(data_suffix=True) self.start_import_stop(cov, "code2") cov.save() cov.combine() assert self.stdout() == "" self.check_code1_code2(cov) def test_ordered_combine(self) -> None: # https://github.com/nedbat/coveragepy/issues/649 # The order of the [paths] setting used to matter. Now the # resulting path must exist, so the order doesn't matter. def make_files() -> None: self.make_file("plugins/p1.py", "") self.make_file("girder/g1.py", "") self.make_data_file( basename=".coverage.1", lines={ abs_file('ci/girder/g1.py'): range(10), abs_file('ci/girder/plugins/p1.py'): range(10), }, ) def get_combined_filenames() -> set[str]: cov = coverage.Coverage() cov.combine() assert self.stdout() == "" cov.save() data = cov.get_data() filenames = {relative_filename(f).replace("\\", "/") for f in data.measured_files()} return filenames # Case 1: get the order right. make_files() self.make_file(".coveragerc", """\ [paths] plugins = plugins/ ci/girder/plugins/ girder = girder/ ci/girder/ """) assert get_combined_filenames() == {'girder/g1.py', 'plugins/p1.py'} # Case 2: get the order "wrong". make_files() self.make_file(".coveragerc", """\ [paths] girder = girder/ ci/girder/ plugins = plugins/ ci/girder/plugins/ """) assert get_combined_filenames() == {'girder/g1.py', 'plugins/p1.py'} def test_warnings(self) -> None: self.make_file("hello.py", """\ import sys, os print("Hello") """) with pytest.warns(Warning) as warns: cov = coverage.Coverage(source=["sys", "xyzzy", "quux"]) self.start_import_stop(cov, "hello") cov.get_data() assert "Hello\n" == self.stdout() assert_coverage_warnings( warns, "Module sys has no Python source. (module-not-python)", "Module xyzzy was never imported. (module-not-imported)", "Module quux was never imported. (module-not-imported)", "No data was collected. (no-data-collected)", ) def test_warnings_suppressed(self) -> None: self.make_file("hello.py", """\ import sys, os print("Hello") """) self.make_file(".coveragerc", """\ [run] disable_warnings = no-data-collected, module-not-imported """) with pytest.warns(Warning) as warns: cov = coverage.Coverage(source=["sys", "xyzzy", "quux"]) self.start_import_stop(cov, "hello") cov.get_data() assert "Hello\n" == self.stdout() assert_coverage_warnings(warns, "Module sys has no Python source. (module-not-python)") # No "module-not-imported" in warns # No "no-data-collected" in warns def test_warn_once(self) -> None: with pytest.warns(Warning) as warns: cov = coverage.Coverage() cov.load() cov._warn("Warning, warning 1!", slug="bot", once=True) cov._warn("Warning, warning 2!", slug="bot", once=True) assert_coverage_warnings(warns, "Warning, warning 1! (bot)") # No "Warning, warning 2!" in warns def test_source_and_include_dont_conflict(self) -> None: # A bad fix made this case fail: https://github.com/nedbat/coveragepy/issues/541 self.make_file("a.py", "import b\na = 1") self.make_file("b.py", "b = 1") self.make_file(".coveragerc", """\ [run] source = . """) # Just like: coverage run a.py cov = coverage.Coverage() self.start_import_stop(cov, "a") cov.save() # Run the equivalent of: coverage report --include=b.py cov = coverage.Coverage(include=["b.py"]) cov.load() # There should be no exception. At one point, report() threw: # CoverageException: --include and --source are mutually exclusive cov.report() expected = textwrap.dedent("""\ Name Stmts Miss Cover --------------------------- b.py 1 0 100% --------------------------- TOTAL 1 0 100% """) assert expected == self.stdout() def test_config_crash(self) -> None: # The internal '[run] _crash' setting can be used to artificially raise # exceptions from inside Coverage. cov = coverage.Coverage() cov.set_option("run:_crash", "test_config_crash") with pytest.raises(Exception, match="Crashing because called by test_config_crash"): cov.start() def test_config_crash_no_crash(self) -> None: # '[run] _crash' really checks the call stack. cov = coverage.Coverage() cov.set_option("run:_crash", "not_my_caller") cov.start() cov.stop() def test_run_debug_sys(self) -> None: # https://github.com/nedbat/coveragepy/issues/907 cov = coverage.Coverage() with cov.collect(): d = dict(cov.sys_info()) assert cast(str, d['data_file']).endswith(".coverage") @pytest.mark.skipif(not testenv.DYN_CONTEXTS, reason="No dynamic contexts with this core.") class SwitchContextTest(CoverageTest): """Tests of the .switch_context() method.""" def make_test_files(self) -> None: """Create a simple file representing a method with two tests.""" self.make_file("testsuite.py", """\ def timestwo(x): return x*2 def test_multiply_zero(): assert timestwo(0) == 0 def test_multiply_six(): assert timestwo(6) == 12 """) def test_switch_context_testrunner(self) -> None: # This test simulates a coverage-aware test runner, # measuring labeled coverage via public API self.make_test_files() # Test runner starts cov = coverage.Coverage() with cov.collect(): # Imports the test suite suite = import_local_file("testsuite") # Measures test case 1 cov.switch_context('multiply_zero') suite.test_multiply_zero() # Measures test case 2 cov.switch_context('multiply_six') suite.test_multiply_six() # Runner finishes cov.save() # Labeled data is collected data = cov.get_data() assert ['', 'multiply_six', 'multiply_zero'] == sorted(data.measured_contexts()) filenames = self.get_measured_filenames(data) suite_filename = filenames['testsuite.py'] data.set_query_context("multiply_six") assert [2, 8] == sorted_lines(data, suite_filename) data.set_query_context("multiply_zero") assert [2, 5] == sorted_lines(data, suite_filename) def test_switch_context_with_static(self) -> None: # This test simulates a coverage-aware test runner, # measuring labeled coverage via public API, # with static label prefix. self.make_test_files() # Test runner starts cov = coverage.Coverage(context="mysuite") with cov.collect(): # Imports the test suite suite = import_local_file("testsuite") # Measures test case 1 cov.switch_context('multiply_zero') suite.test_multiply_zero() # Measures test case 2 cov.switch_context('multiply_six') suite.test_multiply_six() # Runner finishes cov.save() # Labeled data is collected data = cov.get_data() expected = ['mysuite', 'mysuite|multiply_six', 'mysuite|multiply_zero'] assert expected == sorted(data.measured_contexts()) filenames = self.get_measured_filenames(data) suite_filename = filenames['testsuite.py'] data.set_query_context("mysuite|multiply_six") assert [2, 8] == sorted_lines(data, suite_filename) data.set_query_context("mysuite|multiply_zero") assert [2, 5] == sorted_lines(data, suite_filename) def test_dynamic_context_conflict(self) -> None: cov = coverage.Coverage(source=["."]) cov.set_option("run:dynamic_context", "test_function") with cov.collect(): with pytest.warns(Warning) as warns: # Switch twice, but only get one warning. cov.switch_context("test1") cov.switch_context("test2") assert_coverage_warnings(warns, "Conflicting dynamic contexts (dynamic-conflict)") def test_unknown_dynamic_context(self) -> None: cov = coverage.Coverage() cov.set_option("run:dynamic_context", "no-idea") with pytest.raises(Exception, match="Don't understand dynamic_context setting: 'no-idea'"): cov.start() def test_switch_context_unstarted(self) -> None: # Coverage must be started to switch context msg = "Cannot switch context, coverage is not started" cov = coverage.Coverage() with pytest.raises(CoverageException, match=msg): cov.switch_context("test1") with cov.collect(): cov.switch_context("test2") with pytest.raises(CoverageException, match=msg): cov.switch_context("test3") class CurrentInstanceTest(CoverageTest): """Tests of Coverage.current().""" run_in_temp_dir = False def assert_current_is_none(self, current: Coverage | None) -> None: """Assert that a current we expect to be None is correct.""" # During meta-coverage, the None answers will be wrong because the # overall coverage measurement will still be on the current-stack. # Since we know they will be wrong, and we have non-meta test runs # also, don't assert them. if not env.METACOV: assert current is None def test_current(self) -> None: cur0 = coverage.Coverage.current() self.assert_current_is_none(cur0) # Making an instance doesn't make it current. cov = coverage.Coverage() cur1 = coverage.Coverage.current() self.assert_current_is_none(cur1) assert cur0 is cur1 # Starting the instance makes it current. with cov.collect(): cur2 = coverage.Coverage.current() assert cur2 is cov # Stopping the instance makes current None again. cur3 = coverage.Coverage.current() self.assert_current_is_none(cur3) assert cur0 is cur3 class NamespaceModuleTest(UsingModulesMixin, CoverageTest): """Test PEP-420 namespace modules.""" def test_explicit_namespace_module(self) -> None: self.make_file("main.py", "import namespace_420\n") cov = coverage.Coverage() self.start_import_stop(cov, "main") with pytest.raises(CoverageException, match=r"Module .* has no file"): cov.analysis(sys.modules['namespace_420']) def test_bug_572(self) -> None: self.make_file("main.py", "import namespace_420\n") # Use source=namespace_420 to trigger the check that used to fail, # and use source=main so that something is measured. cov = coverage.Coverage(source=["namespace_420", "main"]) with self.assert_warnings(cov, []): self.start_import_stop(cov, "main") cov.report() class IncludeOmitTestsMixin(UsingModulesMixin, CoverageTest): """Test methods for coverage methods taking include and omit.""" # An abstract method for subclasses to define, to appease mypy. def coverage_usepkgs(self, **kwargs_unused: TCovKwargs) -> Iterable[str]: """Run coverage on usepkgs, return a line summary. kwargs are for Coverage(**kwargs).""" raise NotImplementedError() # pragma: not covered def filenames_in(self, summary: Iterable[str], filenames: str) -> None: """Assert the `filenames` are in the `summary`.""" for filename in filenames.split(): assert filename in summary def filenames_not_in(self, summary: Iterable[str], filenames: str) -> None: """Assert the `filenames` are not in the `summary`.""" for filename in filenames.split(): assert filename not in summary def test_nothing_specified(self) -> None: result = self.coverage_usepkgs() self.filenames_in(result, "p1a p1b p2a p2b othera otherb osa osb") self.filenames_not_in(result, "p1c") # Because there was no source= specified, we don't search for # un-executed files. def test_include(self) -> None: result = self.coverage_usepkgs(include=["*/p1a.py"]) self.filenames_in(result, "p1a") self.filenames_not_in(result, "p1b p1c p2a p2b othera otherb osa osb") def test_include_2(self) -> None: result = self.coverage_usepkgs(include=["*a.py"]) self.filenames_in(result, "p1a p2a othera osa") self.filenames_not_in(result, "p1b p1c p2b otherb osb") def test_include_as_string(self) -> None: result = self.coverage_usepkgs(include="*a.py") self.filenames_in(result, "p1a p2a othera osa") self.filenames_not_in(result, "p1b p1c p2b otherb osb") def test_omit(self) -> None: result = self.coverage_usepkgs(omit=["*/p1a.py"]) self.filenames_in(result, "p1b p2a p2b") self.filenames_not_in(result, "p1a p1c") def test_omit_2(self) -> None: result = self.coverage_usepkgs(omit=["*a.py"]) self.filenames_in(result, "p1b p2b otherb osb") self.filenames_not_in(result, "p1a p1c p2a othera osa") def test_omit_as_string(self) -> None: result = self.coverage_usepkgs(omit="*a.py") self.filenames_in(result, "p1b p2b otherb osb") self.filenames_not_in(result, "p1a p1c p2a othera osa") def test_omit_and_include(self) -> None: result = self.coverage_usepkgs(include=["*/p1*"], omit=["*/p1a.py"]) self.filenames_in(result, "p1b") self.filenames_not_in(result, "p1a p1c p2a p2b") class SourceIncludeOmitTest(IncludeOmitTestsMixin, CoverageTest): """Test using `source`, `include`, and `omit` when measuring code.""" def setUp(self) -> None: super().setUp() # These tests use the TESTS_DIR/modules files, but they cd into it. To # keep tests from cross-contaminating, we make a copy of the files. # Since we need to import from there, we also add it to the beginning # of sys.path. shutil.copytree( nice_file(TESTS_DIR, "modules"), "tests_dir_modules", ignore=shutil.ignore_patterns("__pycache__"), ) sys.path.insert(0, abs_file("tests_dir_modules")) def coverage_usepkgs_counts(self, **kwargs: TCovKwargs) -> dict[str, int]: """Run coverage on usepkgs and return a line summary. Arguments are passed to the `coverage.Coverage` constructor. """ cov = coverage.Coverage(**kwargs) with cov.collect(): import usepkgs # pylint: disable=import-error, unused-import with self.assert_warnings(cov, []): data = cov.get_data() summary = line_counts(data) for k, v in list(summary.items()): assert k.endswith(".py") summary[k[:-3]] = v return summary def coverage_usepkgs(self, **kwargs: TCovKwargs) -> Iterable[str]: summary = self.coverage_usepkgs_counts(**kwargs) return list(summary) def test_source_include_exclusive(self) -> None: cov = coverage.Coverage(source=["pkg1"], include=["pkg2"]) with self.assert_warnings(cov, ["--include is ignored because --source is set"]): cov.start() cov.stop() def test_source_package_as_package(self) -> None: assert not os.path.isdir("pkg1") lines = self.coverage_usepkgs_counts(source=["pkg1"]) self.filenames_in(list(lines), "p1a p1b") self.filenames_not_in(list(lines), "p2a p2b othera otherb osa osb") # Because source= was specified, we do search for un-executed files. assert lines['p1c'] == 0 def test_source_package_as_dir(self) -> None: os.chdir("tests_dir_modules") assert os.path.isdir("pkg1") lines = self.coverage_usepkgs_counts(source=["pkg1"]) self.filenames_in(list(lines), "p1a p1b") self.filenames_not_in(list(lines), "p2a p2b othera otherb osa osb") # Because source= was specified, we do search for un-executed files. assert lines['p1c'] == 0 def test_source_package_dotted_sub(self) -> None: lines = self.coverage_usepkgs_counts(source=["pkg1.sub"]) self.filenames_not_in(list(lines), "p2a p2b othera otherb osa osb") # Because source= was specified, we do search for un-executed files. assert lines['runmod3'] == 0 def test_source_package_dotted_p1b(self) -> None: lines = self.coverage_usepkgs_counts(source=["pkg1.p1b"]) self.filenames_in(list(lines), "p1b") self.filenames_not_in(list(lines), "p1a p1c p2a p2b othera otherb osa osb") def test_source_package_part_omitted(self) -> None: # https://github.com/nedbat/coveragepy/issues/218 # Used to be if you omitted something executed and inside the source, # then after it was executed but not recorded, it would be found in # the search for un-executed files, and given a score of 0%. # The omit arg is by path, so need to be in the modules directory. os.chdir("tests_dir_modules") lines = self.coverage_usepkgs_counts(source=["pkg1"], omit=["pkg1/p1b.py"]) self.filenames_in(list(lines), "p1a") self.filenames_not_in(list(lines), "p1b") assert lines['p1c'] == 0 def test_source_package_as_package_part_omitted(self) -> None: # https://github.com/nedbat/coveragepy/issues/638 lines = self.coverage_usepkgs_counts(source=["pkg1"], omit=["*/p1b.py"]) self.filenames_in(list(lines), "p1a") self.filenames_not_in(list(lines), "p1b") assert lines['p1c'] == 0 def test_ambiguous_source_package_as_dir(self) -> None: # pkg1 is a directory and a pkg, since we cd into tests_dir_modules/ambiguous os.chdir("tests_dir_modules/ambiguous") # pkg1 defaults to directory because tests_dir_modules/ambiguous/pkg1 exists lines = self.coverage_usepkgs_counts(source=["pkg1"]) self.filenames_in(list(lines), "ambiguous") self.filenames_not_in(list(lines), "p1a p1b p1c") def test_ambiguous_source_package_as_package(self) -> None: # pkg1 is a directory and a pkg, since we cd into tests_dir_modules/ambiguous os.chdir("tests_dir_modules/ambiguous") lines = self.coverage_usepkgs_counts(source_pkgs=["pkg1"]) self.filenames_in(list(lines), "p1a p1b") self.filenames_not_in(list(lines), "p2a p2b othera otherb osa osb ambiguous") # Because source= was specified, we do search for un-executed files. assert lines['p1c'] == 0 class ReportIncludeOmitTest(IncludeOmitTestsMixin, CoverageTest): """Tests of the report include/omit functionality.""" def coverage_usepkgs(self, **kwargs: TCovKwargs) -> Iterable[str]: """Try coverage.report().""" cov = coverage.Coverage() with cov.collect(): import usepkgs # pylint: disable=import-error, unused-import report = io.StringIO() cov.report(file=report, **kwargs) return report.getvalue() class XmlIncludeOmitTest(IncludeOmitTestsMixin, CoverageTest): """Tests of the XML include/omit functionality. This also takes care of the HTML and annotate include/omit, by virtue of the structure of the code. """ def coverage_usepkgs(self, **kwargs: TCovKwargs) -> Iterable[str]: """Try coverage.xml_report().""" cov = coverage.Coverage() with cov.collect(): import usepkgs # pylint: disable=import-error, unused-import cov.xml_report(outfile="-", **kwargs) return self.stdout() class AnalysisTest(CoverageTest): """Test the numerical analysis of results.""" def test_many_missing_branches(self) -> None: cov = coverage.Coverage(branch=True) self.make_file("missing.py", """\ def fun1(x): if x == 1: print("one") else: print("not one") print("done") # pragma: nocover def fun2(x): print("x") fun2(3) """) # Import the Python file, executing it. self.start_import_stop(cov, "missing") nums = cov._analyze("missing.py").numbers assert nums.n_files == 1 assert nums.n_statements == 7 assert nums.n_excluded == 1 assert nums.n_missing == 3 assert nums.n_branches == 2 assert nums.n_partial_branches == 0 assert nums.n_missing_branches == 2 class TestRunnerPluginTest(CoverageTest): """Test that the API works properly the way various third-party plugins call it. We don't actually use the plugins, but these tests call the API the same way they do. """ def pretend_to_be_nose_with_cover(self, erase: bool = False, cd: bool = False) -> None: """This is what the nose --with-cover plugin does.""" self.make_file("no_biggie.py", """\ a = 1 b = 2 if b == 1: c = 4 """) self.make_file("sub/hold.txt", "") cov = coverage.Coverage() if erase: cov.combine() cov.erase() cov.load() self.start_import_stop(cov, "no_biggie") if cd: os.chdir("sub") cov.combine() cov.save() cov.report(["no_biggie.py"], show_missing=True) assert self.stdout() == textwrap.dedent("""\ Name Stmts Miss Cover Missing -------------------------------------------- no_biggie.py 4 1 75% 4 -------------------------------------------- TOTAL 4 1 75% """) if cd: os.chdir("..") def test_nose_plugin(self) -> None: self.pretend_to_be_nose_with_cover() def test_nose_plugin_with_erase(self) -> None: self.pretend_to_be_nose_with_cover(erase=True) def test_nose_plugin_with_cd(self) -> None: # https://github.com/nedbat/coveragepy/issues/916 self.pretend_to_be_nose_with_cover(cd=True) def pretend_to_be_pytestcov(self, append: bool) -> None: """Act like pytest-cov.""" self.make_file("prog.py", """\ a = 1 b = 2 if b == 1: c = 4 """) self.make_file(".coveragerc", """\ [run] parallel = True source = . """) cov = coverage.Coverage(source=None, branch=None, config_file='.coveragerc') if append: cov.load() else: cov.erase() self.start_import_stop(cov, "prog") cov.combine() cov.save() report = io.StringIO() cov.report(show_missing=None, ignore_errors=True, file=report, skip_covered=None, skip_empty=None) assert report.getvalue() == textwrap.dedent("""\ Name Stmts Miss Cover ----------------------------- prog.py 4 1 75% ----------------------------- TOTAL 4 1 75% """) self.assert_file_count(".coverage", 0) self.assert_file_count(".coverage.*", 1) def test_pytestcov_parallel(self) -> None: self.pretend_to_be_pytestcov(append=False) def test_pytestcov_parallel_append(self) -> None: self.pretend_to_be_pytestcov(append=True) class ImmutableConfigTest(CoverageTest): """Check that reporting methods don't permanently change the configuration.""" def test_config_doesnt_change(self) -> None: self.make_file("simple.py", "a = 1") cov = coverage.Coverage() self.start_import_stop(cov, "simple") assert cov.get_option("report:show_missing") is False cov.report(show_missing=True) assert cov.get_option("report:show_missing") is False class RelativePathTest(CoverageTest): """Tests of the relative_files setting.""" def test_moving_stuff(self) -> None: # When using absolute file names, moving the source around results in # "No source for code" errors while reporting. self.make_file("foo.py", "a = 1") cov = coverage.Coverage(source=["."]) self.start_import_stop(cov, "foo") res = cov.report() assert res == 100 expected = re.escape("No source for code: '{}'.".format(abs_file("foo.py"))) os.remove("foo.py") self.make_file("new/foo.py", "a = 1") shutil.move(".coverage", "new/.coverage") with change_dir("new"): cov = coverage.Coverage() cov.load() with pytest.raises(NoSource, match=expected): cov.report() def test_moving_stuff_with_relative(self) -> None: # When using relative file names, moving the source around is fine. self.make_file("foo.py", "a = 1") self.make_file(".coveragerc", """\ [run] relative_files = true """) cov = coverage.Coverage(source=["."]) self.start_import_stop(cov, "foo") res = cov.report() assert res == 100 os.remove("foo.py") self.make_file("new/foo.py", "a = 1") shutil.move(".coverage", "new/.coverage") shutil.move(".coveragerc", "new/.coveragerc") with change_dir("new"): cov = coverage.Coverage() cov.load() res = cov.report() assert res == 100 def test_combine_relative(self) -> None: self.make_file("foo.py", """\ import mod a = 1 """) self.make_file("lib/mod/__init__.py", "x = 1") self.make_file(".coveragerc", """\ [run] relative_files = true """) sys.path.append("lib") cov = coverage.Coverage(source=["."], data_suffix=True) self.start_import_stop(cov, "foo") cov.save() self.make_file("dir2/bar.py", "a = 1") self.make_file("dir2/.coveragerc", """\ [run] relative_files = true """) with change_dir("dir2"): cov = coverage.Coverage(source=["."], data_suffix=True) self.start_import_stop(cov, "bar") cov.save() shutil.move(glob.glob(".coverage.*")[0], "..") self.make_file("foo.py", "a = 1") self.make_file("bar.py", "a = 1") self.make_file("modsrc/__init__.py", "x = 1") self.make_file(".coveragerc", """\ [run] relative_files = true [paths] source = modsrc */mod """) cov = coverage.Coverage() cov.combine() cov.save() cov = coverage.Coverage() cov.load() files = cov.get_data().measured_files() assert files == {'foo.py', 'bar.py', os_sep('modsrc/__init__.py')} res = cov.report() assert res == 100 def test_combine_no_suffix_multiprocessing(self) -> None: self.make_file(".coveragerc", """\ [run] branch = True """) cov = coverage.Coverage( config_file=".coveragerc", concurrency="multiprocessing", data_suffix=False, ) cov.start() cov.stop() # The warning isn't the point of this test, but suppress it. with pytest.warns(Warning) as warns: cov.combine() assert_coverage_warnings(warns, "No data was collected. (no-data-collected)") cov.save() self.assert_file_count(".coverage.*", 0) self.assert_exists(".coverage") def test_files_up_one_level(self) -> None: # https://github.com/nedbat/coveragepy/issues/1280 self.make_file("src/mycode.py", """\ def foo(): return 17 """) self.make_file("test/test_it.py", """\ from src.mycode import foo assert foo() == 17 """) self.make_file("test/.coveragerc", """\ [run] parallel = True relative_files = True [paths] source = ../src/ */src """) os.chdir("test") sys.path.insert(0, "..") cov1 = coverage.Coverage() self.start_import_stop(cov1, "test_it") cov1.save() cov2 = coverage.Coverage() cov2.combine() cov3 = coverage.Coverage() cov3.load() report = self.get_report(cov3) assert self.last_line_squeezed(report) == "TOTAL 4 0 100%" class CombiningTest(CoverageTest): """More tests of combining data.""" B_LINES = {"b_or_c.py": [1, 2, 3, 4, 8, 9]} C_LINES = {"b_or_c.py": [1, 2, 3, 6, 7, 8, 9]} def make_b_or_c_py(self) -> None: """Create b_or_c.py, used in a few of these tests.""" # "b_or_c.py b" will run 6 lines. # "b_or_c.py c" will run 7 lines. # Together, they run 8 lines. self.make_file("b_or_c.py", """\ import sys a = 2 if sys.argv[1] == 'b': b = 4 else: c = 6 c2 = 7 d = 8 print('done') """) def test_combine_parallel_data(self) -> None: self.make_b_or_c_py() self.make_data_file(".coverage.b", lines=self.B_LINES) self.make_data_file(".coverage.c", lines=self.C_LINES) # Combine the parallel coverage data files into .coverage . cov = coverage.Coverage() cov.combine(strict=True) self.assert_exists(".coverage") # After combining, there should be only the .coverage file. self.assert_file_count(".coverage.*", 0) # Read the coverage file and see that b_or_c.py has all 8 lines # executed. data = coverage.CoverageData() data.read() assert line_counts(data)['b_or_c.py'] == 8 # Running combine again should fail, because there are no parallel data # files to combine. cov = coverage.Coverage() with pytest.raises(NoDataError, match=r"No data to combine"): cov.combine(strict=True) # And the originally combined data is still there. data = coverage.CoverageData() data.read() assert line_counts(data)['b_or_c.py'] == 8 def test_combine_parallel_data_with_a_corrupt_file(self) -> None: self.make_b_or_c_py() self.make_data_file(".coverage.b", lines=self.B_LINES) self.make_data_file(".coverage.c", lines=self.C_LINES) # Make a bogus data file. self.make_file(".coverage.bad", "This isn't a coverage data file.") # Combine the parallel coverage data files into .coverage . cov = coverage.Coverage() with pytest.warns(Warning) as warns: cov.combine(strict=True) assert_coverage_warnings( warns, re.compile( r"Couldn't use data file '.*[/\\]\.coverage\.bad': " + BAD_SQLITE_REGEX, ), ) # After combining, those two should be the only data files. self.assert_exists(".coverage") self.assert_exists(".coverage.bad") self.assert_file_count(".coverage.*", 1) # Read the coverage file and see that b_or_c.py has all 8 lines # executed. data = coverage.CoverageData() data.read() assert line_counts(data)['b_or_c.py'] == 8 def test_combine_no_usable_files(self) -> None: # https://github.com/nedbat/coveragepy/issues/629 self.make_b_or_c_py() self.make_data_file(".coverage", lines=self.B_LINES) # Make bogus data files. self.make_file(".coverage.bad1", "This isn't a coverage data file.") self.make_file(".coverage.bad2", "This isn't a coverage data file either.") # Combine the parallel coverage data files into .coverage, but nothing is readable. cov = coverage.Coverage() with pytest.warns(Warning) as warns: with pytest.raises(NoDataError, match=r"No usable data files"): cov.combine(strict=True) warn_rx = re.compile( r"Couldn't use data file '.*[/\\]\.coverage\.bad[12]': " + BAD_SQLITE_REGEX, ) assert_coverage_warnings(warns, warn_rx, warn_rx) # After combining, we should have a main file and two parallel files. self.assert_exists(".coverage") self.assert_exists(".coverage.bad1") self.assert_exists(".coverage.bad2") self.assert_file_count(".coverage.*", 2) # Read the coverage file and see that b_or_c.py has 6 lines # executed (we only did b, not c). data = coverage.CoverageData() data.read() assert line_counts(data)['b_or_c.py'] == 6 def test_combine_parallel_data_in_two_steps(self) -> None: self.make_b_or_c_py() self.make_data_file(".coverage.b", lines=self.B_LINES) # Combine the (one) parallel coverage data file into .coverage . cov = coverage.Coverage() cov.combine(strict=True) self.assert_exists(".coverage") self.assert_file_count(".coverage.*", 0) self.make_data_file(".coverage.c", lines=self.C_LINES) self.assert_exists(".coverage") self.assert_file_count(".coverage.*", 1) # Combine the parallel coverage data files into .coverage . cov = coverage.Coverage() cov.load() cov.combine(strict=True) # After combining, there should be only the .coverage file. self.assert_exists(".coverage") self.assert_file_count(".coverage.*", 0) # Read the coverage file and see that b_or_c.py has all 8 lines # executed. data = coverage.CoverageData() data.read() assert line_counts(data)['b_or_c.py'] == 8 def test_combine_parallel_data_no_append(self) -> None: self.make_b_or_c_py() self.make_data_file(".coverage.b", lines=self.B_LINES) # Combine the (one) parallel coverage data file into .coverage . cov = coverage.Coverage() cov.combine(strict=True) self.assert_exists(".coverage") self.assert_file_count(".coverage.*", 0) self.make_data_file(".coverage.c", lines=self.C_LINES) # Combine the parallel coverage data files into .coverage, but don't # use the data in .coverage already. cov = coverage.Coverage() cov.combine(strict=True) # After combining, there should be only the .coverage file. self.assert_exists(".coverage") self.assert_file_count(".coverage.*", 0) # Read the coverage file and see that b_or_c.py has only 7 lines # because we didn't keep the data from running b. data = coverage.CoverageData() data.read() assert line_counts(data)['b_or_c.py'] == 7 def test_combine_parallel_data_keep(self) -> None: self.make_b_or_c_py() self.make_data_file(".coverage.b", lines=self.B_LINES) self.make_data_file(".coverage.c", lines=self.C_LINES) # Combine the parallel coverage data files into .coverage with the keep flag. cov = coverage.Coverage() cov.combine(strict=True, keep=True) # After combining, the .coverage file & the original combined file should still be there. self.assert_exists(".coverage") self.assert_file_count(".coverage.*", 2) @pytest.mark.parametrize("abs_order, rel_order", [(1, 2), (2, 1)]) def test_combine_absolute_then_relative_1752(self, abs_order: int, rel_order: int) -> None: # https://github.com/nedbat/coveragepy/issues/1752 # If we're combining a relative data file and an absolute data file, # the absolutes were made relative only if the relative file name was # encountered first. Test combining in both orders and check that the # absolute file name is properly relative in either order. FILE = "sub/myprog.py" self.make_file(FILE, "a = 1") self.make_data_file(suffix=f"{abs_order}.abs", lines={abs_file(FILE): [1]}) self.make_data_file(suffix=f"{rel_order}.rel", lines={FILE: [1]}) self.make_file(".coveragerc", "[run]\nrelative_files = True\n") cov = coverage.Coverage() cov.combine() data = coverage.CoverageData() data.read() assert {os_sep("sub/myprog.py")} == data.measured_files() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/test_arcs.py0000644000175100001770000017347300000000000017651 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt """Tests for coverage.py's arc measurement.""" from __future__ import annotations import pytest from tests.coveragetest import CoverageTest from tests.helpers import assert_count_equal, xfail_pypy38 import coverage from coverage import env from coverage.data import sorted_lines from coverage.files import abs_file # When a try block ends, does the finally block (incorrectly) jump to the # last statement, or does it go the line outside the try block that it # should? xfail_pypy_3882 = pytest.mark.xfail( env.PYPY and env.PYVERSION[:2] == (3, 8) and env.PYPYVERSION >= (7, 3, 11), reason="https://foss.heptapod.net/pypy/pypy/-/issues/3882", ) class SimpleArcTest(CoverageTest): """Tests for coverage.py's arc measurement.""" def test_simple_sequence(self) -> None: self.check_coverage("""\ a = 1 b = 2 """, arcz=".1 12 2.", ) self.check_coverage("""\ a = 1 b = 3 """, arcz=".1 13 3.", ) line1 = 1 if env.PYBEHAVIOR.module_firstline_1 else 2 self.check_coverage("""\ a = 2 b = 3 c = 5 """, arcz=f"-{line1}2 23 35 5-{line1}", ) def test_function_def(self) -> None: self.check_coverage("""\ def foo(): a = 2 foo() """, arcz=".1 .2 14 2. 4.", ) def test_if(self) -> None: self.check_coverage("""\ a = 1 if len([]) == 0: a = 3 assert a == 3 """, arcz=".1 12 23 24 34 4.", arcz_missing="24", ) self.check_coverage("""\ a = 1 if len([]) == 1: a = 3 assert a == 1 """, arcz=".1 12 23 24 34 4.", arcz_missing="23 34", ) def test_if_else(self) -> None: self.check_coverage("""\ if len([]) == 0: a = 2 else: a = 4 assert a == 2 """, arcz=".1 12 25 14 45 5.", arcz_missing="14 45", ) self.check_coverage("""\ if len([]) == 1: a = 2 else: a = 4 assert a == 4 """, arcz=".1 12 25 14 45 5.", arcz_missing="12 25", ) def test_compact_if(self) -> None: self.check_coverage("""\ a = 1 if len([]) == 0: a = 2 assert a == 2 """, arcz=".1 12 23 3.", ) self.check_coverage("""\ def fn(x): if x % 2: return True return False a = fn(1) assert a is True """, arcz=".1 14 45 5. .2 2. 23 3.", arcz_missing="23 3.", ) def test_multiline(self) -> None: self.check_coverage("""\ a = ( 2 + 3 ) b = \\ 6 """, arcz=".1 15 5.", ) def test_if_return(self) -> None: self.check_coverage("""\ def if_ret(a): if a: return 3 b = 4 return 5 x = if_ret(0) + if_ret(1) assert x == 8 """, arcz=".1 16 67 7. .2 23 24 3. 45 5.", ) def test_dont_confuse_exit_and_else(self) -> None: self.check_coverage("""\ def foo(): if foo: a = 3 else: a = 5 return a assert foo() == 3 # 7 """, arcz=".1 17 7. .2 23 36 25 56 6.", arcz_missing="25 56", ) self.check_coverage("""\ def foo(): if foo: a = 3 else: a = 5 foo() # 6 """, arcz=".1 16 6. .2 23 3. 25 5.", arcz_missing="25 5.", ) def test_what_is_the_sound_of_no_lines_clapping(self) -> None: if env.PYBEHAVIOR.empty_is_empty: arcz_missing=".1 1." else: arcz_missing="" self.check_coverage("""\ # __init__.py """, arcz=".1 1.", arcz_missing=arcz_missing, ) def test_bug_1184(self) -> None: self.check_coverage("""\ def foo(x): if x: try: 1/(x - 1) except ZeroDivisionError: pass return x # 7 for i in range(3): # 9 foo(i) """, arcz=".1 19 9-1 .2 23 27 34 47 56 67 7-1 9A A9", arcz_unpredicted="45", ) class WithTest(CoverageTest): """Arc-measuring tests involving context managers.""" def test_with(self) -> None: arcz = ".1 .2 23 34 4. 16 6." if env.PYBEHAVIOR.exit_through_with: arcz = arcz.replace("4.", "42 2.") self.check_coverage("""\ def example(): with open("test", "w") as f: f.write("3") a = 4 example() """, arcz=arcz, ) def test_with_return(self) -> None: arcz = ".1 .2 23 34 4. 16 6." if env.PYBEHAVIOR.exit_through_with: arcz = arcz.replace("4.", "42 2.") self.check_coverage("""\ def example(): with open("test", "w") as f: f.write("3") return 4 example() """, arcz=arcz, ) def test_bug_146(self) -> None: # https://github.com/nedbat/coveragepy/issues/146 arcz = ".1 12 23 34 41 15 5." if env.PYBEHAVIOR.exit_through_with: arcz = arcz.replace("34", "32 24") self.check_coverage("""\ for i in range(2): with open("test", "w") as f: print(3) print(4) print(5) """, arcz=arcz, ) def test_nested_with_return(self) -> None: arcz = ".1 .2 23 34 45 56 6. 18 8." if env.PYBEHAVIOR.exit_through_with: arcz = arcz.replace("6.", "64 42 2.") self.check_coverage("""\ def example(x): with open("test", "w") as f2: a = 3 with open("test2", "w") as f4: f2.write("5") return 6 example(8) """, arcz=arcz, ) def test_break_through_with(self) -> None: arcz = ".1 12 23 34 45 15 5." if env.PYBEHAVIOR.exit_through_with: arcz = arcz.replace("45", "42 25") self.check_coverage("""\ for i in range(1+1): with open("test", "w") as f: print(3) break print(5) """, arcz=arcz, arcz_missing="15", ) def test_continue_through_with(self) -> None: arcz = ".1 12 23 34 41 15 5." if env.PYBEHAVIOR.exit_through_with: arcz = arcz.replace("41", "42 21") self.check_coverage("""\ for i in range(1+1): with open("test", "w") as f: print(3) continue print(5) """, arcz=arcz, ) # https://github.com/nedbat/coveragepy/issues/1270 def test_raise_through_with(self) -> None: if env.PYBEHAVIOR.exit_through_with: arcz = ".1 12 27 78 8. 9A A. -23 34 45 53 6-2" arcz_missing = "6-2 8." arcz_unpredicted = "3-2 89" else: arcz = ".1 12 27 78 8. 9A A. -23 34 45 5-2 6-2" arcz_missing = "6-2 8." arcz_unpredicted = "89" cov = self.check_coverage("""\ from contextlib import suppress def f(x): with suppress(): # used as a null context manager print(4) raise Exception("Boo6") print(6) try: f(8) except Exception: print("oops 10") """, arcz=arcz, arcz_missing=arcz_missing, arcz_unpredicted=arcz_unpredicted, ) expected = "line 3 didn't jump to the function exit" assert self.get_missing_arc_description(cov, 3, -2) == expected def test_untaken_raise_through_with(self) -> None: if env.PYBEHAVIOR.exit_through_with: arcz = ".1 12 28 89 9. AB B. -23 34 45 56 53 63 37 7-2" arcz_missing = "56 63 AB B." else: arcz = ".1 12 28 89 9. AB B. -23 34 45 56 6-2 57 7-2" arcz_missing = "56 6-2 AB B." cov = self.check_coverage("""\ from contextlib import suppress def f(x): with suppress(): # used as a null context manager print(4) if x == 5: raise Exception("Boo6") print(7) try: f(9) except Exception: print("oops 11") """, arcz=arcz, arcz_missing=arcz_missing, ) expected = "line 3 didn't jump to the function exit" assert self.get_missing_arc_description(cov, 3, -2) == expected class LoopArcTest(CoverageTest): """Arc-measuring tests involving loops.""" def test_loop(self) -> None: self.check_coverage("""\ for i in range(10): a = i assert a == 9 """, arcz=".1 12 21 13 3.", ) self.check_coverage("""\ a = -1 for i in range(0): a = i assert a == -1 """, arcz=".1 12 23 32 24 4.", arcz_missing="23 32", ) def test_nested_loop(self) -> None: self.check_coverage("""\ for i in range(3): for j in range(3): a = i + j assert a == 4 """, arcz=".1 12 23 32 21 14 4.", ) def test_break(self) -> None: if env.PYBEHAVIOR.omit_after_jump: arcz = ".1 12 23 35 15 5." arcz_missing = "15" else: arcz = ".1 12 23 35 15 41 5." arcz_missing = "15 41" self.check_coverage("""\ for i in range(10): a = i break # 3 a = 99 assert a == 0 # 5 """, arcz=arcz, arcz_missing=arcz_missing, ) def test_continue(self) -> None: if env.PYBEHAVIOR.omit_after_jump: arcz = ".1 12 23 31 15 5." arcz_missing = "" else: arcz = ".1 12 23 31 15 41 5." arcz_missing = "41" self.check_coverage("""\ for i in range(10): a = i continue # 3 a = 99 assert a == 9 # 5 """, arcz=arcz, arcz_missing=arcz_missing, ) def test_nested_breaks(self) -> None: self.check_coverage("""\ for i in range(3): for j in range(3): a = i + j break # 4 if i == 2: break assert a == 2 and i == 2 # 7 """, arcz=".1 12 23 34 45 25 56 51 67 17 7.", arcz_missing="17 25", ) def test_while_1(self) -> None: # With "while 1", the loop knows it's constant. if env.PYBEHAVIOR.keep_constant_test: arcz = ".1 12 23 34 45 36 62 57 7." else: arcz = ".1 13 34 45 36 63 57 7." self.check_coverage("""\ a, i = 1, 0 while 1: if i >= 3: a = 4 break i += 1 assert a == 4 and i == 3 """, arcz=arcz, ) def test_while_true(self) -> None: # With "while True", 2.x thinks it's computation, # 3.x thinks it's constant. if env.PYBEHAVIOR.keep_constant_test: arcz = ".1 12 23 34 45 36 62 57 7." else: arcz = ".1 13 34 45 36 63 57 7." self.check_coverage("""\ a, i = 1, 0 while True: if i >= 3: a = 4 break i += 1 assert a == 4 and i == 3 """, arcz=arcz, ) def test_zero_coverage_while_loop(self) -> None: # https://github.com/nedbat/coveragepy/issues/502 self.make_file("main.py", "print('done')") self.make_file("zero.py", """\ def method(self): while True: return 1 """) cov = coverage.Coverage(source=["."], branch=True) self.start_import_stop(cov, "main") assert self.stdout() == 'done\n' if env.PYBEHAVIOR.keep_constant_test: num_stmts = 3 else: num_stmts = 2 expected = f"zero.py {num_stmts} {num_stmts} 0 0 0% 1-3" report = self.get_report(cov, show_missing=True) squeezed = self.squeezed_lines(report) assert expected in squeezed[3] def test_bug_496_continue_in_constant_while(self) -> None: # https://github.com/nedbat/coveragepy/issues/496 # A continue in a while-true needs to jump to the right place. if env.PYBEHAVIOR.keep_constant_test: arcz = ".1 12 23 34 45 52 46 67 7." else: arcz = ".1 13 34 45 53 46 67 7." self.check_coverage("""\ up = iter('ta') while True: char = next(up) if char == 't': continue i = "line 6" break """, arcz=arcz, ) def test_for_if_else_for(self) -> None: self.check_coverage("""\ def branches_2(l): if l: for e in l: a = 4 else: a = 6 def branches_3(l): for x in l: if x: for e in l: a = 12 else: a = 14 branches_2([0,1]) branches_3([0,1]) """, arcz= ".1 18 8G GH H. " + ".2 23 34 43 26 3. 6. " + "-89 9A 9-8 AB BC CB B9 AE E9", arcz_missing="26 6.", ) def test_for_else(self) -> None: self.check_coverage("""\ def forelse(seq): for n in seq: if n > 5: break else: print('None of the values were greater than 5') print('Done') forelse([1,2]) forelse([1,6]) """, arcz=".1 .2 23 32 34 47 26 67 7. 18 89 9.", ) def test_while_else(self) -> None: self.check_coverage("""\ def whileelse(seq): while seq: n = seq.pop() if n > 4: break else: n = 99 return n assert whileelse([1, 2]) == 99 assert whileelse([1, 5]) == 5 """, arcz=".1 19 9A A. .2 23 34 45 58 42 27 78 8.", ) def test_confusing_for_loop_bug_175(self) -> None: if env.PYBEHAVIOR.comprehensions_are_functions: extra_arcz = " -22 2-2" else: extra_arcz = "" self.check_coverage("""\ o = [(1,2), (3,4)] o = [a for a in o] for tup in o: x = tup[0] y = tup[1] """, arcz=".1 12 23 34 45 53 3." + extra_arcz, ) self.check_coverage("""\ o = [(1,2), (3,4)] for tup in [a for a in o]: x = tup[0] y = tup[1] """, arcz=".1 12 23 34 42 2." + extra_arcz, ) # https://bugs.python.org/issue44672 @pytest.mark.xfail(env.PYVERSION < (3, 10), reason="<3.10 traced final pass incorrectly") def test_incorrect_loop_exit_bug_1175(self) -> None: self.check_coverage("""\ def wrong_loop(x): if x: for i in [3, 33]: print(i+4) else: pass wrong_loop(8) """, arcz=".1 .2 23 26 34 43 3. 6. 18 8.", arcz_missing="26 6.", ) # https://bugs.python.org/issue44672 @pytest.mark.xfail(env.PYVERSION < (3, 10), reason="<3.10 traced final pass incorrectly") def test_incorrect_if_bug_1175(self) -> None: self.check_coverage("""\ def wrong_loop(x): if x: if x: print(4) else: pass wrong_loop(8) """, arcz=".1 .2 23 26 34 4. 3. 6. 18 8.", arcz_missing="26 3. 6.", ) def test_generator_expression(self) -> None: # Generator expression: self.check_coverage("""\ o = ((1,2), (3,4)) o = (a for a in o) for tup in o: x = tup[0] y = tup[1] """, arcz=".1 -22 2-2 12 23 34 45 53 3.", ) def test_generator_expression_another_way(self) -> None: # https://bugs.python.org/issue44450 # Generator expression: self.check_coverage("""\ o = ((1,2), (3,4)) o = (a for a in o) for tup in o: x = tup[0] y = tup[1] """, arcz=".1 -22 2-2 12 25 56 67 75 5.", ) def test_other_comprehensions(self) -> None: if env.PYBEHAVIOR.comprehensions_are_functions: extra_arcz = " -22 2-2" else: extra_arcz = "" # Set comprehension: self.check_coverage("""\ o = ((1,2), (3,4)) o = {a for a in o} for tup in o: x = tup[0] y = tup[1] """, arcz=".1 12 23 34 45 53 3." + extra_arcz, ) # Dict comprehension: self.check_coverage("""\ o = ((1,2), (3,4)) o = {a:1 for a in o} for tup in o: x = tup[0] y = tup[1] """, arcz=".1 12 23 34 45 53 3." + extra_arcz, ) def test_multiline_dict_comp(self) -> None: if env.PYBEHAVIOR.comprehensions_are_functions: extra_arcz = " 2-2" else: extra_arcz = "" # Multiline dict comp: self.check_coverage("""\ # comment d = \\ { i: str(i) for i in range(9) } x = 11 """, arcz="-22 2B B-2" + extra_arcz, ) # Multi dict comp: self.check_coverage("""\ # comment d = \\ { (i, j): str(i+j) for i in range(9) for j in range(13) } x = 15 """, arcz="-22 2F F-2" + extra_arcz, ) class ExceptionArcTest(CoverageTest): """Arc-measuring tests involving exception handling.""" def test_try_except(self) -> None: self.check_coverage("""\ a, b = 1, 1 try: a = 3 except: b = 5 assert a == 3 and b == 1 """, arcz=".1 12 23 36 45 56 6.", arcz_missing="45 56", ) def test_raise_followed_by_statement(self) -> None: if env.PYBEHAVIOR.omit_after_jump: arcz = ".1 12 23 34 46 67 78 8." arcz_missing = "" else: arcz = ".1 12 23 34 46 58 67 78 8." arcz_missing = "58" self.check_coverage("""\ a, b = 1, 1 try: a = 3 raise Exception("Yikes!") a = 5 except: b = 7 assert a == 3 and b == 7 """, arcz=arcz, arcz_missing=arcz_missing, ) def test_hidden_raise(self) -> None: self.check_coverage("""\ a, b = 1, 1 def oops(x): if x % 2: raise Exception("odd") try: a = 6 oops(1) a = 8 except: b = 10 assert a == 6 and b == 10 """, arcz=".1 12 -23 34 3-2 4-2 25 56 67 78 8B 9A AB B.", arcz_missing="3-2 78 8B", arcz_unpredicted="79", ) def test_except_with_type(self) -> None: self.check_coverage("""\ a, b = 1, 1 def oops(x): if x % 2: raise ValueError("odd") def try_it(x): try: a = 7 oops(x) a = 9 except ValueError: b = 11 return a assert try_it(0) == 9 # C assert try_it(1) == 7 # D """, arcz=".1 12 -23 34 3-2 4-2 25 5D DE E. -56 67 78 89 9C AB BC C-5", arcz_unpredicted="8A", ) @xfail_pypy_3882 def test_try_finally(self) -> None: self.check_coverage("""\ a, c = 1, 1 try: a = 3 finally: c = 5 assert a == 3 and c == 5 """, arcz=".1 12 23 35 56 6.", ) self.check_coverage("""\ a, c, d = 1, 1, 1 try: try: a = 4 finally: c = 6 except: d = 8 assert a == 4 and c == 6 and d == 1 # 9 """, arcz=".1 12 23 34 46 78 89 69 9.", arcz_missing="78 89", ) self.check_coverage("""\ a, c, d = 1, 1, 1 try: try: a = 4 raise Exception("Yikes!") # line 6 finally: c = 8 except: d = 10 # A assert a == 4 and c == 8 and d == 10 # B """, arcz=".1 12 23 34 45 58 89 9A AB B.", arcz_missing="", ) @xfail_pypy_3882 def test_finally_in_loop(self) -> None: self.check_coverage("""\ a, c, d, i = 1, 1, 1, 99 try: for i in range(5): try: a = 5 if i > 0: raise Exception("Yikes!") a = 8 finally: c = 10 except: d = 12 # C assert a == 5 and c == 10 and d == 12 # D """, arcz=".1 12 23 34 3D 45 56 67 68 7A 8A A3 AB BC CD D.", arcz_missing="3D", ) self.check_coverage("""\ a, c, d, i = 1, 1, 1, 99 try: for i in range(5): try: a = 5 if i > 10: raise Exception("Yikes!") a = 8 finally: c = 10 except: d = 12 # C assert a == 8 and c == 10 and d == 1 # D """, arcz=".1 12 23 34 3D 45 56 67 68 7A 8A A3 AB BC CD D.", arcz_missing="67 7A AB BC CD", ) @xfail_pypy_3882 def test_break_through_finally(self) -> None: arcz = ".1 12 23 34 3D 45 56 67 68 7A AD 8A A3 BC CD D." if env.PYBEHAVIOR.finally_jumps_back: arcz = arcz.replace("AD", "A7 7D") self.check_coverage("""\ a, c, d, i = 1, 1, 1, 99 try: for i in range(3): try: a = 5 if i > 0: break a = 8 finally: c = 10 except: d = 12 # C assert a == 5 and c == 10 and d == 1 # D """, arcz=arcz, arcz_missing="3D BC CD", ) def test_break_continue_without_finally(self) -> None: self.check_coverage("""\ a, c, d, i = 1, 1, 1, 99 try: for i in range(3): try: a = 5 if i > 0: break continue except: c = 10 except: d = 12 # C assert a == 5 and c == 1 and d == 1 # D """, arcz=".1 12 23 34 3D 45 56 67 68 7D 83 9A A3 BC CD D.", arcz_missing="3D 9A A3 BC CD", ) @xfail_pypy_3882 def test_continue_through_finally(self) -> None: arcz = ".1 12 23 34 3D 45 56 67 68 7A 8A A3 BC CD D." if env.PYBEHAVIOR.finally_jumps_back: arcz += " 73 A7" self.check_coverage("""\ a, b, c, d, i = 1, 1, 1, 1, 99 try: for i in range(3): try: a = 5 if i > 0: continue b = 8 finally: c = 10 except: d = 12 # C assert (a, b, c, d) == (5, 8, 10, 1) # D """, arcz=arcz, arcz_missing="BC CD", ) def test_finally_in_loop_bug_92(self) -> None: self.check_coverage("""\ for i in range(5): try: j = 3 finally: f = 5 g = 6 h = 7 """, arcz=".1 12 23 35 56 61 17 7.", ) def test_bug_212(self) -> None: # "except Exception as e" is crucial here. # Bug 212 said that the "if exc" line was incorrectly marked as only # partially covered. self.check_coverage("""\ def b(exc): try: while "no peephole".upper(): raise Exception(exc) # 4 except Exception as e: if exc != 'expected': raise q = 8 b('expected') try: b('unexpected') # C except: pass """, arcz=".1 .2 1A 23 34 3. 45 56 67 68 7. 8. AB BC C. DE E.", arcz_missing="3. C.", arcz_unpredicted="CD", ) def test_except_finally(self) -> None: self.check_coverage("""\ a, b, c = 1, 1, 1 try: a = 3 except: b = 5 finally: c = 7 assert a == 3 and b == 1 and c == 7 """, arcz=".1 12 23 45 37 57 78 8.", arcz_missing="45 57", ) self.check_coverage("""\ a, b, c = 1, 1, 1 def oops(x): if x % 2: raise Exception("odd") try: a = 5 oops(1) a = 7 except: b = 9 finally: c = 11 assert a == 5 and b == 9 and c == 11 """, arcz=".1 12 -23 3-2 24 45 56 67 7B 89 9B BC C.", arcz_missing="67 7B", arcz_unpredicted="68", ) def test_multiple_except_clauses(self) -> None: self.check_coverage("""\ a, b, c = 1, 1, 1 try: a = 3 except ValueError: b = 5 except IndexError: a = 7 finally: c = 9 assert a == 3 and b == 1 and c == 9 """, arcz=".1 12 23 45 46 39 59 67 79 9A A.", arcz_missing="45 59 46 67 79", ) self.check_coverage("""\ a, b, c = 1, 1, 1 try: a = int("xyz") # ValueError except ValueError: b = 5 except IndexError: a = 7 finally: c = 9 assert a == 1 and b == 5 and c == 9 """, arcz=".1 12 23 45 46 39 59 67 79 9A A.", arcz_missing="39 46 67 79", arcz_unpredicted="34", ) self.check_coverage("""\ a, b, c = 1, 1, 1 try: a = [1][3] # IndexError except ValueError: b = 5 except IndexError: a = 7 finally: c = 9 assert a == 7 and b == 1 and c == 9 """, arcz=".1 12 23 45 46 39 59 67 79 9A A.", arcz_missing="39 45 59", arcz_unpredicted="34", ) self.check_coverage("""\ a, b, c = 1, 1, 1 try: try: a = 4/0 # ZeroDivisionError except ValueError: b = 6 except IndexError: a = 8 finally: c = 10 except ZeroDivisionError: pass assert a == 1 and b == 1 and c == 10 """, arcz=".1 12 23 34 4A 56 6A 57 78 8A AD BC CD D.", arcz_missing="4A 56 6A 78 8A AD", arcz_unpredicted="45 7A AB", ) def test_return_finally(self) -> None: arcz = ".1 12 29 9A AB BC C-1 -23 34 45 7-2 57 38 8-2" if env.PYBEHAVIOR.finally_jumps_back: arcz = arcz.replace("7-2", "75 5-2") self.check_coverage("""\ a = [1] def check_token(data): if data: try: return 5 finally: a.append(7) return 8 assert check_token(False) == 8 assert a == [1] assert check_token(True) == 5 assert a == [1, 7] """, arcz=arcz, ) @xfail_pypy_3882 def test_except_jump_finally(self) -> None: arcz = ( ".1 1Q QR RS ST TU U. " + ".2 23 34 45 56 4O 6L " + "78 89 9A AL 8B BC CD DL BE EF FG GL EH HI IJ JL HL " + "LO L4 L. LM " + "MN NO O." ) if env.PYBEHAVIOR.finally_jumps_back: arcz = arcz.replace("LO", "LA AO").replace("L4", "L4 LD D4").replace("L.", "LG G.") self.check_coverage("""\ def func(x): a = f = g = 2 try: for i in range(4): try: 6/0 except ZeroDivisionError: if x == 'break': a = 9 break elif x == 'continue': a = 12 continue elif x == 'return': a = 15 # F return a, f, g, i # G elif x == 'raise': # H a = 18 # I raise ValueError() # J finally: f = 21 # L except ValueError: # M g = 23 # N return a, f, g, i # O assert func('break') == (9, 21, 2, 0) # Q assert func('continue') == (12, 21, 2, 3) # R assert func('return') == (15, 2, 2, 0) # S assert func('raise') == (18, 21, 23, 0) # T assert func('other') == (2, 21, 2, 3) # U 30 """, arcz=arcz, arcz_missing="6L", arcz_unpredicted="67", ) @xfail_pypy_3882 def test_else_jump_finally(self) -> None: arcz = ( ".1 1S ST TU UV VW W. " + ".2 23 34 45 56 6A 78 8N 4Q " + "AB BC CN AD DE EF FN DG GH HI IN GJ JK KL LN JN " + "N4 NQ N. NO " + "OP PQ Q." ) if env.PYBEHAVIOR.finally_jumps_back: arcz = arcz.replace("NQ", "NC CQ").replace("N4", "N4 NF F4").replace("N.", "NI I.") self.check_coverage("""\ def func(x): a = f = g = 2 try: for i in range(4): try: b = 6 except ZeroDivisionError: pass else: if x == 'break': a = 11 break elif x == 'continue': a = 14 continue elif x == 'return': a = 17 # H return a, f, g, i # I elif x == 'raise': # J a = 20 # K raise ValueError() # L finally: f = 23 # N except ValueError: # O g = 25 # P return a, f, g, i # Q assert func('break') == (11, 23, 2, 0) # S assert func('continue') == (14, 23, 2, 3) # T assert func('return') == (17, 2, 2, 0) # U assert func('raise') == (20, 23, 25, 0) # V assert func('other') == (2, 23, 2, 3) # W 32 """, arcz=arcz, arcz_missing="78 8N", arcz_unpredicted="", ) class YieldTest(CoverageTest): """Arc tests for generators.""" def test_yield_in_loop(self) -> None: self.check_coverage("""\ def gen(inp): for n in inp: yield n list(gen([1,2,3])) """, arcz=".1 .2 23 2. 32 15 5.", ) def test_padded_yield_in_loop(self) -> None: self.check_coverage("""\ def gen(inp): i = 2 for n in inp: i = 4 yield n i = 6 i = 7 list(gen([1,2,3])) """, arcz=".1 19 9. .2 23 34 45 56 63 37 7.", ) def test_bug_308(self) -> None: self.check_coverage("""\ def run(): for i in range(10): yield lambda: i for f in run(): print(f()) """, arcz=".1 15 56 65 5. .2 23 32 2. -33 3-3", ) self.check_coverage("""\ def run(): yield lambda: 100 for i in range(10): yield lambda: i for f in run(): print(f()) """, arcz=".1 16 67 76 6. .2 23 34 43 3. -22 2-2 -44 4-4", ) self.check_coverage("""\ def run(): yield lambda: 100 # no branch miss for f in run(): print(f()) """, arcz=".1 14 45 54 4. .2 2. -22 2-2", ) def test_bug_324(self) -> None: # This code is tricky: the list() call pulls all the values from gen(), # but each of them is a generator itself that is never iterated. As a # result, the generator expression on line 3 is never entered or run. self.check_coverage("""\ def gen(inp): for n in inp: yield (i * 2 for i in range(n)) list(gen([1,2,3])) """, arcz= ".1 15 5. " # The module level ".2 23 32 2. " # The gen() function "-33 3-3", # The generator expression arcz_missing="-33 3-3", ) def test_coroutines(self) -> None: self.check_coverage("""\ def double_inputs(): while len([1]): # avoid compiler differences x = yield x *= 2 yield x gen = double_inputs() next(gen) print(gen.send(10)) next(gen) print(gen.send(6)) """, arcz=".1 17 78 89 9A AB B. .2 23 34 45 52 2.", arcz_missing="2.", ) assert self.stdout() == "20\n12\n" def test_yield_from(self) -> None: self.check_coverage("""\ def gen(inp): i = 2 for n in inp: i = 4 yield from range(3) i = 6 i = 7 list(gen([1,2,3])) """, arcz=".1 19 9. .2 23 34 45 56 63 37 7.", ) def test_abandoned_yield(self) -> None: # https://github.com/nedbat/coveragepy/issues/440 self.check_coverage("""\ def gen(): print(2) yield 3 print(4) print(next(gen())) """, lines=[1, 2, 3, 4, 6], missing="4", arcz=".1 16 6. .2 23 34 4.", arcz_missing="34 4.", ) assert self.stdout() == "2\n3\n" @pytest.mark.skipif(not env.PYBEHAVIOR.match_case, reason="Match-case is new in 3.10") class MatchCaseTest(CoverageTest): """Tests of match-case.""" def test_match_case_with_default(self) -> None: self.check_coverage("""\ for command in ["huh", "go home", "go n"]: match command.split(): case ["go", direction] if direction in "nesw": match = f"go: {direction}" case ["go", _]: match = "no go" case _: match = "default" print(match) """, arcz=".1 12 23 34 49 35 56 69 57 78 89 91 1.", ) assert self.stdout() == "default\nno go\ngo: n\n" def test_match_case_with_wildcard(self) -> None: self.check_coverage("""\ for command in ["huh", "go home", "go n"]: match command.split(): case ["go", direction] if direction in "nesw": match = f"go: {direction}" case ["go", _]: match = "no go" case x: match = f"default: {x}" print(match) """, arcz=".1 12 23 34 49 35 56 69 57 78 89 91 1.", ) assert self.stdout() == "default: ['huh']\nno go\ngo: n\n" def test_match_case_without_wildcard(self) -> None: self.check_coverage("""\ match = None for command in ["huh", "go home", "go n"]: match command.split(): case ["go", direction] if direction in "nesw": match = f"go: {direction}" case ["go", _]: match = "no go" print(match) """, arcz=".1 12 23 34 45 58 46 78 67 68 82 2.", ) assert self.stdout() == "None\nno go\ngo: n\n" def test_absurd_wildcard(self) -> None: # https://github.com/nedbat/coveragepy/issues/1421 self.check_coverage("""\ def absurd(x): match x: case (3 | 99 | (999 | _)): print("default") absurd(5) """, arcz=".1 15 5. .2 23 34 4.", ) assert self.stdout() == "default\n" class OptimizedIfTest(CoverageTest): """Tests of if statements being optimized away.""" def test_optimized_away_if_0(self) -> None: if env.PYBEHAVIOR.keep_constant_test: lines = [1, 2, 3, 4, 8, 9] arcz = ".1 12 23 24 34 48 49 89 9." arcz_missing = "24" # 49 isn't missing because line 4 is matched by the default partial # exclusion regex, and no branches are considered missing if they # start from an excluded line. else: lines = [1, 2, 3, 8, 9] arcz = ".1 12 23 28 38 89 9." arcz_missing = "28" self.check_coverage("""\ a = 1 if len([2]): c = 3 if 0: if len([5]): d = 6 else: e = 8 f = 9 """, lines=lines, arcz=arcz, arcz_missing=arcz_missing, ) def test_optimized_away_if_1(self) -> None: if env.PYBEHAVIOR.keep_constant_test: lines = [1, 2, 3, 4, 5, 6, 9] arcz = ".1 12 23 24 34 45 49 56 69 59 9." arcz_missing = "24 59" # 49 isn't missing because line 4 is matched by the default partial # exclusion regex, and no branches are considered missing if they # start from an excluded line. else: lines = [1, 2, 3, 5, 6, 9] arcz = ".1 12 23 25 35 56 69 59 9." arcz_missing = "25 59" self.check_coverage("""\ a = 1 if len([2]): c = 3 if 1: if len([5]): d = 6 else: e = 8 f = 9 """, lines=lines, arcz=arcz, arcz_missing=arcz_missing, ) def test_optimized_away_if_1_no_else(self) -> None: if env.PYBEHAVIOR.keep_constant_test: lines = [1, 2, 3, 4, 5] arcz = ".1 12 23 25 34 45 5." arcz_missing = "" # 25 isn't missing because line 2 is matched by the default partial # exclusion regex, and no branches are considered missing if they # start from an excluded line. else: lines = [1, 3, 4, 5] arcz = ".1 13 34 45 5." arcz_missing = "" self.check_coverage("""\ a = 1 if 1: b = 3 c = 4 d = 5 """, lines=lines, arcz=arcz, arcz_missing=arcz_missing, ) def test_optimized_if_nested(self) -> None: if env.PYBEHAVIOR.keep_constant_test: lines = [1, 2, 8, 11, 12, 13, 14, 15] arcz = ".1 12 28 2F 8B 8F BC CD DE EF F." arcz_missing = "" # 2F and 8F aren't missing because they're matched by the default # partial exclusion regex, and no branches are considered missing # if they start from an excluded line. else: lines = [1, 12, 14, 15] arcz = ".1 1C CE EF F." arcz_missing = "" self.check_coverage("""\ a = 1 if 0: if 0: b = 4 else: c = 6 else: if 0: d = 9 else: if 0: e = 11 f = 12 if 0: g = 13 h = 14 i = 15 """, lines=lines, arcz=arcz, arcz_missing=arcz_missing, ) def test_dunder_debug(self) -> None: # Since some of our tests use __debug__, let's make sure it is true as # we expect assert __debug__ # Check that executed code has __debug__ self.check_coverage("""\ assert __debug__, "assert __debug__" """, ) # Check that if it didn't have debug, it would let us know. with pytest.raises(AssertionError): self.check_coverage("""\ assert not __debug__, "assert not __debug__" """, ) def test_if_debug(self) -> None: if env.PYBEHAVIOR.optimize_if_debug: arcz = ".1 12 24 41 26 61 1." arcz_missing = "" else: arcz = ".1 12 23 31 34 41 26 61 1." arcz_missing = "31" self.check_coverage("""\ for value in [True, False]: if value: if __debug__: x = 4 else: x = 6 """, arcz=arcz, arcz_missing=arcz_missing, ) @xfail_pypy_3882 def test_if_not_debug(self) -> None: if env.PYBEHAVIOR.optimize_if_not_debug == 1: arcz = ".1 12 23 34 42 37 72 28 8." elif env.PYBEHAVIOR.optimize_if_not_debug == 2: arcz = ".1 12 23 35 52 37 72 28 8." else: assert env.PYBEHAVIOR.optimize_if_not_debug == 3 arcz = ".1 12 23 32 37 72 28 8." self.check_coverage("""\ lines = set() for value in [True, False]: if value: if not __debug__: lines.add(5) else: lines.add(7) assert lines == set([7]) """, arcz=arcz, ) class MiscArcTest(CoverageTest): """Miscellaneous arc-measuring tests.""" def test_dict_literal(self) -> None: self.check_coverage("""\ d = { 'a': 2, 'b': 3, 'c': { 'd': 5, 'e': 6, } } assert d """, arcz=".1 19 9.", ) self.check_coverage("""\ d = \\ { 'a': 2, 'b': 3, 'c': { 'd': 5, 'e': 6, } } assert d """, arcz=".1 19 9.", ) def test_unpacked_literals(self) -> None: self.check_coverage("""\ d = { 'a': 2, 'b': 3, } weird = { **d, **{'c': 7}, 'd': 8, } assert weird['b'] == 3 """, arcz=".1 15 5A A.", ) self.check_coverage("""\ l = [ 2, 3, ] weird = [ *l, *[7], 8, ] assert weird[1] == 3 """, arcz=".1 15 5A A.", ) @pytest.mark.parametrize("n", [10, 50, 100, 500, 1000, 2000, 10000]) def test_pathologically_long_code_object(self, n: int) -> None: # https://github.com/nedbat/coveragepy/issues/359 # Long code objects sometimes cause problems. Originally, it was # due to EXTENDED_ARG bytes codes. Then it showed a mistake in # line-number packing. code = """\ data = [ """ + "".join(f"""\ [ {i}, {i}, {i}, {i}, {i}, {i}, {i}, {i}, {i}, {i}], """ for i in range(n) ) + """\ ] print(len(data)) """ self.check_coverage(code, arcs=[(-1, 1), (1, 2*n+4), (2*n+4, -1)]) assert self.stdout() == f"{n}\n" def test_partial_generators(self) -> None: # https://github.com/nedbat/coveragepy/issues/475 # Line 2 is executed completely. # Line 3 is started but not finished, because zip ends before it finishes. # Line 4 is never started. cov = self.check_coverage("""\ def f(a, b): c = (i for i in a) # 2 d = (j for j in b) # 3 e = (k for k in b) # 4 return dict(zip(c, d)) f(['a', 'b'], [1, 2, 3]) """, arcz=".1 17 7. .2 23 34 45 5. -22 2-2 -33 3-3 -44 4-4", arcz_missing="3-3 -44 4-4", ) expected = "line 3 didn't finish the generator expression on line 3" assert self.get_missing_arc_description(cov, 3, -3) == expected expected = "line 4 didn't run the generator expression on line 4" assert self.get_missing_arc_description(cov, 4, -4) == expected class DecoratorArcTest(CoverageTest): """Tests of arcs with decorators.""" def test_function_decorator(self) -> None: arcz = ( ".1 16 67 7A AE EF F. " # main line ".2 24 4. -23 3-2 " # decorators "-6D D-6 " # my_function ) if env.PYBEHAVIOR.trace_decorator_line_again: arcz += "A7 76 6A " self.check_coverage("""\ def decorator(arg): def _dec(f): return f return _dec @decorator(6) @decorator( len([8]), ) def my_function( a=len([11]), ): x = 13 a = 14 my_function() """, arcz=arcz, ) @xfail_pypy38 def test_class_decorator(self) -> None: arcz = ( ".1 16 67 6D 7A AE E. " # main line ".2 24 4. -23 3-2 " # decorators "-66 D-6 " # MyObject ) if env.PYBEHAVIOR.trace_decorator_line_again: arcz += "A7 76 6A " self.check_coverage("""\ def decorator(arg): def _dec(c): return c return _dec @decorator(6) @decorator( len([8]), ) class MyObject( object ): X = 13 a = 14 """, arcz=arcz, ) def test_bug_466a(self) -> None: # A bad interaction between decorators and multi-line list assignments, # believe it or not...! arcz = ".1 1A A. 13 34 4. -35 58 8-3 " if env.PYBEHAVIOR.trace_decorator_line_again: arcz += "43 " # This example makes more sense when considered in tandem with 466b below. self.check_coverage("""\ class Parser(object): @classmethod def parse(cls): formats = [ 5 ] return None Parser.parse() """, arcz=arcz, ) def test_bug_466b(self) -> None: # A bad interaction between decorators and multi-line list assignments, # believe it or not...! arcz = ".1 1A A. 13 34 4. -35 58 8-3 " if env.PYBEHAVIOR.trace_decorator_line_again: arcz += "43 " self.check_coverage("""\ class Parser(object): @classmethod def parse(cls): formats = [ 6, ] return None Parser.parse() """, arcz=arcz, ) class LambdaArcTest(CoverageTest): """Tests of lambdas""" def test_multiline_lambda(self) -> None: self.check_coverage("""\ fn = (lambda x: x + 2 ) assert fn(4) == 6 """, arcz=".1 14 4-1 1-1", ) self.check_coverage("""\ fn = \\ ( lambda x: x + 8 ) assert fn(10) == 18 """, arcz="-22 2A A-2 2-2", ) def test_unused_lambdas_are_confusing_bug_90(self) -> None: self.check_coverage("""\ a = 1 fn = lambda x: x b = 3 """, arcz=".1 12 -22 2-2 23 3.", arcz_missing="-22 2-2", ) def test_raise_with_lambda_looks_like_partial_branch(self) -> None: self.check_coverage("""\ def ouch(fn): 2/0 a = b = c = d = 3 try: a = ouch(lambda: 5) if a: b = 7 except ZeroDivisionError: c = 9 d = 10 assert (a, b, c, d) == (3, 3, 9, 10) """, lines=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11], missing="6-7", arcz=".1 13 34 45 56 67 6A 7A 89 9A AB B. .2 2. -55 5-5", arcz_missing="56 67 6A 7A -55 5-5", arcz_unpredicted="58", ) def test_lambda_in_dict(self) -> None: self.check_coverage("""\ x = 1 x = 2 d = { 4: lambda: [], 5: lambda: [], 6: lambda: [], 7: lambda: [], } for k, v in d.items(): # 10 if k & 1: v() """, arcz=".1 12 23 3A AB BC BA CA A. -33 3-3", ) # This had been a failure on Mac 3.9, but it started passing on GitHub # actions (running macOS 12) but still failed on my laptop (macOS 14). # I don't understand why it failed, I don't understand why it passed, # so just skip the whole thing. skip_eventlet_670 = pytest.mark.skipif( env.PYVERSION[:2] == (3, 9) and env.CPYTHON and env.OSX, reason="Avoid an eventlet bug on Mac 3.9: eventlet#670", # https://github.com/eventlet/eventlet/issues/670 ) class AsyncTest(CoverageTest): """Tests of the new async and await keywords in Python 3.5""" @skip_eventlet_670 def test_async(self) -> None: self.check_coverage("""\ import asyncio async def compute(x, y): # 3 print(f"Compute {x} + {y} ...") await asyncio.sleep(0.001) return x + y # 6 async def print_sum(x, y): # 8 result = (0 + await compute(x, y) # A ) print(f"{x} + {y} = {result}") loop = asyncio.new_event_loop() # E loop.run_until_complete(print_sum(1, 2)) loop.close() # G """, arcz= ".1 13 38 8E EF FG G. " + "-34 45 56 6-3 " + "-89 9C C-8", ) assert self.stdout() == "Compute 1 + 2 ...\n1 + 2 = 3\n" @skip_eventlet_670 def test_async_for(self) -> None: self.check_coverage("""\ import asyncio class AsyncIteratorWrapper: # 3 def __init__(self, obj): # 4 self._it = iter(obj) def __aiter__(self): # 7 return self async def __anext__(self): # A try: return next(self._it) except StopIteration: raise StopAsyncIteration async def doit(): # G async for letter in AsyncIteratorWrapper("abc"): print(letter) print(".") loop = asyncio.new_event_loop() # L loop.run_until_complete(doit()) loop.close() """, arcz= ".1 13 3G GL LM MN N. " # module main line "-33 34 47 7A A-3 " # class definition "-GH HI IH HJ J-G " # doit "-45 5-4 " # __init__ "-78 8-7 " # __aiter__ "-AB BC C-A DE E-A ", # __anext__ arcz_unpredicted="CD", ) assert self.stdout() == "a\nb\nc\n.\n" def test_async_with(self) -> None: if env.PYBEHAVIOR.exit_through_with: arcz = ".1 1. .2 23 32 2." arcz_missing = ".2 23 32 2." else: arcz = ".1 1. .2 23 3." arcz_missing = ".2 23 3." self.check_coverage("""\ async def go(): async with x: pass """, arcz=arcz, arcz_missing=arcz_missing, ) def test_async_decorator(self) -> None: arcz = ".1 14 45 5. .2 2. -46 6-4 " if env.PYBEHAVIOR.trace_decorator_line_again: arcz += "54 " self.check_coverage("""\ def wrap(f): # 1 return f @wrap # 4 async def go(): return """, arcz=arcz, arcz_missing='-46 6-4', ) # https://github.com/nedbat/coveragepy/issues/1158 # https://bugs.python.org/issue44621 @pytest.mark.skipif(env.PYVERSION[:2] == (3, 9), reason="avoid a 3.9 bug: 44621") def test_bug_1158(self) -> None: self.check_coverage("""\ import asyncio async def async_gen(): yield 4 async def async_test(): global a a = 8 async for i in async_gen(): print(i + 10) else: a = 12 asyncio.run(async_test()) assert a == 12 """, arcz=".1 13 36 6E EF F. -34 4-3 -68 89 9A 9C A9 C-6", ) assert self.stdout() == "14\n" # https://github.com/nedbat/coveragepy/issues/1176 # https://bugs.python.org/issue44622 @skip_eventlet_670 def test_bug_1176(self) -> None: self.check_coverage("""\ import asyncio async def async_gen(): yield 4 async def async_test(): async for i in async_gen(): print(i + 8) asyncio.run(async_test()) """, arcz=".1 13 36 6A A. -34 4-3 -67 78 87 7-6", ) assert self.stdout() == "12\n" # https://github.com/nedbat/coveragepy/issues/1205 def test_bug_1205(self) -> None: self.check_coverage("""\ def func(): if T(2): if T(3): if F(4): if X(5): return 6 else: return 8 elif X(9) and Y: return 10 T, F = (lambda _: True), (lambda _: False) func() """, arcz=".1 1C CD D. .2 23 29 34 38 45 4. 56 5. 6. 8. 9. 9A A. -CC C-C", arcz_missing="29 38 45 56 5. 6. 8. 9. 9A A.", ) class AnnotationTest(CoverageTest): """Tests using type annotations.""" def test_annotations(self) -> None: self.check_coverage("""\ def f(x:str, y:int) -> str: a:int = 2 return f"{x}, {y}, {a}, 3" print(f("x", 4)) """, arcz=".1 .2 23 3. 14 4.", ) assert self.stdout() == "x, 4, 2, 3\n" class ExcludeTest(CoverageTest): """Tests of exclusions to indicate known partial branches.""" def test_default(self) -> None: # A number of forms of pragma comment are accepted. self.check_coverage("""\ a = 1 if a: #pragma: no branch b = 3 c = 4 if c: # pragma NOBRANCH d = 6 e = 7 if e:#\tpragma:\tno branch f = 9 """, [1,2,3,4,5,6,7,8,9], arcz=".1 12 23 24 34 45 56 57 67 78 89 9. 8.", ) def test_custom_pragmas(self) -> None: self.check_coverage("""\ a = 1 while a: # [only some] c = 3 break assert c == 5-2 """, [1,2,3,4,5], partials=["only some"], arcz=".1 12 23 34 45 25 5.", ) class LineDataTest(CoverageTest): """Tests that line_data gives us what we expect.""" def test_branch(self) -> None: cov = coverage.Coverage(branch=True) self.make_file("fun1.py", """\ def fun1(x): if x == 1: return fun1(3) """) self.start_import_stop(cov, "fun1") data = cov.get_data() fun1_lines = sorted_lines(data, abs_file("fun1.py")) assert_count_equal(fun1_lines, [1, 2, 5]) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/test_cmdline.py0000644000175100001770000012725300000000000020327 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt """Test cmdline.py for coverage.py.""" from __future__ import annotations import ast import os import pprint import re import sys import textwrap from unittest import mock from typing import Any, Mapping import pytest import coverage import coverage.cmdline from coverage.control import DEFAULT_DATAFILE from coverage.config import CoverageConfig from coverage.exceptions import _ExceptionDuringRun from coverage.types import TConfigValueIn, TConfigValueOut from coverage.version import __url__ from tests import testenv from tests.coveragetest import CoverageTest, OK, ERR, command_line from tests.helpers import os_sep, re_line class BaseCmdLineTest(CoverageTest): """Tests of execution paths through the command line interpreter.""" run_in_temp_dir = False # Make a dict mapping function names to the default values that cmdline.py # uses when calling the function. _defaults = mock.Mock() _defaults.Coverage().annotate( directory=None, ignore_errors=None, include=None, omit=None, morfs=[], contexts=None, ) _defaults.Coverage().html_report( directory=None, ignore_errors=None, include=None, omit=None, morfs=[], skip_covered=None, show_contexts=None, title=None, contexts=None, skip_empty=None, precision=None, ) _defaults.Coverage().report( ignore_errors=None, include=None, omit=None, morfs=[], show_missing=None, skip_covered=None, contexts=None, skip_empty=None, precision=None, sort=None, output_format=None, ) _defaults.Coverage().xml_report( ignore_errors=None, include=None, omit=None, morfs=[], outfile=None, contexts=None, skip_empty=None, ) _defaults.Coverage().json_report( ignore_errors=None, include=None, omit=None, morfs=[], outfile=None, contexts=None, pretty_print=None, show_contexts=None, ) _defaults.Coverage().lcov_report( ignore_errors=None, include=None, omit=None, morfs=[], outfile=None, contexts=None, ) _defaults.Coverage( data_file=DEFAULT_DATAFILE, cover_pylib=None, data_suffix=None, timid=None, branch=None, config_file=True, source=None, include=None, omit=None, debug=None, concurrency=None, check_preimported=True, context=None, messages=True, ) DEFAULT_KWARGS = {name: kw for name, _, kw in _defaults.mock_calls} def model_object(self) -> mock.Mock: """Return a Mock suitable for use in CoverageScript.""" mk = mock.Mock() cov = mk.Coverage.return_value # The mock needs options. mk.config = CoverageConfig() cov.get_option = mk.config.get_option cov.set_option = mk.config.set_option # Get the type right for the result of reporting. cov.report.return_value = 50.0 cov.html_report.return_value = 50.0 cov.xml_report.return_value = 50.0 cov.json_report.return_value = 50.0 cov.lcov_report.return_value = 50.0 return mk # Global names in cmdline.py that will be mocked during the tests. MOCK_GLOBALS = ['Coverage', 'PyRunner', 'show_help'] def mock_command_line( self, args: str, options: Mapping[str, TConfigValueIn] | None = None, ) -> tuple[mock.Mock, int]: """Run `args` through the command line, with a Mock. `options` is a dict of names and values to pass to `set_option`. Returns the Mock it used and the status code returned. """ mk = self.model_object() if options is not None: for name, value in options.items(): mk.config.set_option(name, value) patchers = [ mock.patch("coverage.cmdline."+name, getattr(mk, name)) for name in self.MOCK_GLOBALS ] for patcher in patchers: patcher.start() try: ret = command_line(args) finally: for patcher in patchers: patcher.stop() return mk, ret def cmd_executes( self, args: str, code: str, ret: int = OK, options: Mapping[str, TConfigValueIn] | None = None, ) -> None: """Assert that the `args` end up executing the sequence in `code`.""" called, status = self.mock_command_line(args, options=options) assert status == ret, f"Wrong status: got {status!r}, wanted {ret!r}" # Remove all indentation, and execute with mock globals code = textwrap.dedent(code) expected = self.model_object() globs = {n: getattr(expected, n) for n in self.MOCK_GLOBALS} code_obj = compile(code, "", "exec", dont_inherit=True) eval(code_obj, globs, {}) # pylint: disable=eval-used # Many of our functions take a lot of arguments, and cmdline.py # calls them with many. But most of them are just the defaults, which # we don't want to have to repeat in all tests. For each call, apply # the defaults. This lets the tests just mention the interesting ones. for name, _, kwargs in expected.mock_calls: for k, v in self.DEFAULT_KWARGS.get(name, {}).items(): kwargs.setdefault(k, v) self.assert_same_mock_calls(expected, called) def cmd_executes_same(self, args1: str, args2: str) -> None: """Assert that the `args1` executes the same as `args2`.""" m1, r1 = self.mock_command_line(args1) m2, r2 = self.mock_command_line(args2) assert r1 == r2 self.assert_same_mock_calls(m1, m2) def assert_same_mock_calls(self, m1: mock.Mock, m2: mock.Mock) -> None: """Assert that `m1.mock_calls` and `m2.mock_calls` are the same.""" # Use a real equality comparison, but if it fails, use a nicer assert # so we can tell what's going on. We have to use the real == first due # to CmdOptionParser.__eq__ if m1.mock_calls != m2.mock_calls: pp1 = pprint.pformat(m1.mock_calls) pp2 = pprint.pformat(m2.mock_calls) assert pp1+'\n' == pp2+'\n' def cmd_help( self, args: str, help_msg: str | None = None, topic: str | None = None, ret: int = ERR, ) -> None: """Run a command line, and check that it prints the right help. Only the last function call in the mock is checked, which should be the help message that we want to see. """ mk, status = self.mock_command_line(args) assert status == ret, f"Wrong status: got {status}, wanted {ret}" if help_msg: assert mk.mock_calls[-1] == ('show_help', (help_msg,), {}) else: assert mk.mock_calls[-1] == ('show_help', (), {'topic': topic}) class BaseCmdLineTestTest(BaseCmdLineTest): """Tests that our BaseCmdLineTest helpers work.""" def test_cmd_executes_same(self) -> None: # All the other tests here use self.cmd_executes_same in successful # ways, so here we just check that it fails. with pytest.raises(AssertionError): self.cmd_executes_same("run", "debug") class CmdLineTest(BaseCmdLineTest): """Tests of the coverage.py command line.""" def test_annotate(self) -> None: # coverage annotate [-d DIR] [-i] [--omit DIR,...] [FILE1 FILE2 ...] self.cmd_executes("annotate", """\ cov = Coverage() cov.load() cov.annotate() """) self.cmd_executes("annotate -d dir1", """\ cov = Coverage() cov.load() cov.annotate(directory="dir1") """) self.cmd_executes("annotate -i", """\ cov = Coverage() cov.load() cov.annotate(ignore_errors=True) """) self.cmd_executes("annotate --omit fooey", """\ cov = Coverage(omit=["fooey"]) cov.load() cov.annotate(omit=["fooey"]) """) self.cmd_executes("annotate --omit fooey,booey", """\ cov = Coverage(omit=["fooey", "booey"]) cov.load() cov.annotate(omit=["fooey", "booey"]) """) self.cmd_executes("annotate mod1", """\ cov = Coverage() cov.load() cov.annotate(morfs=["mod1"]) """) self.cmd_executes("annotate mod1 mod2 mod3", """\ cov = Coverage() cov.load() cov.annotate(morfs=["mod1", "mod2", "mod3"]) """) def test_combine(self) -> None: # coverage combine with args self.cmd_executes("combine datadir1", """\ cov = Coverage() cov.combine(["datadir1"], strict=True, keep=False) cov.save() """) # coverage combine, appending self.cmd_executes("combine --append datadir1", """\ cov = Coverage() cov.load() cov.combine(["datadir1"], strict=True, keep=False) cov.save() """) # coverage combine without args self.cmd_executes("combine", """\ cov = Coverage() cov.combine(None, strict=True, keep=False) cov.save() """) # coverage combine quietly self.cmd_executes("combine -q", """\ cov = Coverage(messages=False) cov.combine(None, strict=True, keep=False) cov.save() """) self.cmd_executes("combine --quiet", """\ cov = Coverage(messages=False) cov.combine(None, strict=True, keep=False) cov.save() """) self.cmd_executes("combine --data-file=foo.cov", """\ cov = Coverage(data_file="foo.cov") cov.combine(None, strict=True, keep=False) cov.save() """) def test_combine_doesnt_confuse_options_with_args(self) -> None: # https://github.com/nedbat/coveragepy/issues/385 self.cmd_executes("combine --rcfile cov.ini", """\ cov = Coverage(config_file='cov.ini') cov.combine(None, strict=True, keep=False) cov.save() """) self.cmd_executes("combine --rcfile cov.ini data1 data2/more", """\ cov = Coverage(config_file='cov.ini') cov.combine(["data1", "data2/more"], strict=True, keep=False) cov.save() """) @pytest.mark.parametrize("cmd, output", [ ("debug", "What information would you like: config, data, sys, premain, pybehave?"), ("debug foo", "Don't know what you mean by 'foo'"), ("debug sys config", "Only one topic at a time, please"), ]) def test_debug(self, cmd: str, output: str) -> None: self.cmd_help(cmd, output) def test_debug_sys(self) -> None: self.command_line("debug sys") out = self.stdout() assert "version:" in out assert "data_file:" in out def test_debug_config(self) -> None: self.command_line("debug config") out = self.stdout() assert "cover_pylib:" in out assert "skip_covered:" in out assert "skip_empty:" in out def test_debug_pybehave(self) -> None: self.command_line("debug pybehave") out = self.stdout() assert " CPYTHON:" in out assert " PYVERSION:" in out assert " pep626:" in out # Some things that shouldn't appear.. assert "typing." not in out # import from typing assert ": <" not in out # objects without a good repr # It should report PYVERSION correctly. pyversion = re_line(r" PYVERSION:", out) vtuple = ast.literal_eval(pyversion.partition(":")[-1].strip()) assert vtuple[:5] == sys.version_info def test_debug_premain(self) -> None: self.command_line("debug premain") out = self.stdout() # -- premain --------------------------------------------------- # ... many lines ... # _multicall : /Users/ned/cov/trunk/.tox/py39/site-packages/pluggy/_callers.py:77 # pytest_pyfunc_call : /Users/ned/cov/trunk/.tox/py39/site-packages/_pytest/python.py:183 # test_debug_premain : /Users/ned/cov/trunk/tests/test_cmdline.py:284 # command_line : /Users/ned/cov/trunk/tests/coveragetest.py:309 # command_line : /Users/ned/cov/trunk/tests/coveragetest.py:472 # command_line : /Users/ned/cov/trunk/coverage/cmdline.py:592 # do_debug : /Users/ned/cov/trunk/coverage/cmdline.py:804 lines = out.splitlines() s = re.escape(os.sep) assert lines[0].startswith("-- premain ----") assert len(lines) > 25 assert re.search(fr"{s}site-packages{s}_pytest{s}", out) assert re.search(fr"{s}site-packages{s}pluggy{s}", out) assert re.search(fr"(?m)^\s+test_debug_premain : .*{s}tests{s}test_cmdline.py:\d+$", out) assert re.search(fr"(?m)^\s+command_line : .*{s}coverage{s}cmdline.py:\d+$", out) assert re.search(fr"(?m)^\s+do_debug : .*{s}coverage{s}cmdline.py:\d+$", out) assert "do_debug : " in lines[-1] def test_erase(self) -> None: # coverage erase self.cmd_executes("erase", """\ cov = Coverage() cov.erase() """) self.cmd_executes("erase --data-file=foo.cov", """\ cov = Coverage(data_file="foo.cov") cov.erase() """) def test_version(self) -> None: # coverage --version self.cmd_help("--version", topic="version", ret=OK) def test_help_option(self) -> None: # coverage -h self.cmd_help("-h", topic="help", ret=OK) self.cmd_help("--help", topic="help", ret=OK) def test_help_command(self) -> None: self.cmd_executes("help", "show_help(topic='help')") def test_cmd_help(self) -> None: self.cmd_executes("run --help", "show_help(parser='')") self.cmd_executes_same("help run", "run --help") def test_html(self) -> None: # coverage html -d DIR [-i] [--omit DIR,...] [FILE1 FILE2 ...] self.cmd_executes("html", """\ cov = Coverage() cov.load() cov.html_report() """) self.cmd_executes("html -d dir1", """\ cov = Coverage() cov.load() cov.html_report(directory="dir1") """) self.cmd_executes("html -i", """\ cov = Coverage() cov.load() cov.html_report(ignore_errors=True) """) self.cmd_executes("html --omit fooey", """\ cov = Coverage(omit=["fooey"]) cov.load() cov.html_report(omit=["fooey"]) """) self.cmd_executes("html --omit fooey,booey", """\ cov = Coverage(omit=["fooey", "booey"]) cov.load() cov.html_report(omit=["fooey", "booey"]) """) self.cmd_executes("html mod1", """\ cov = Coverage() cov.load() cov.html_report(morfs=["mod1"]) """) self.cmd_executes("html mod1 mod2 mod3", """\ cov = Coverage() cov.load() cov.html_report(morfs=["mod1", "mod2", "mod3"]) """) self.cmd_executes("html --precision=3", """\ cov = Coverage() cov.load() cov.html_report(precision=3) """) self.cmd_executes("html --title=Hello_there", """\ cov = Coverage() cov.load() cov.html_report(title='Hello_there') """) self.cmd_executes("html -q", """\ cov = Coverage(messages=False) cov.load() cov.html_report() """) self.cmd_executes("html --quiet", """\ cov = Coverage(messages=False) cov.load() cov.html_report() """) def test_json(self) -> None: # coverage json [-i] [--omit DIR,...] [FILE1 FILE2 ...] self.cmd_executes("json", """\ cov = Coverage() cov.load() cov.json_report() """) self.cmd_executes("json --pretty-print", """\ cov = Coverage() cov.load() cov.json_report(pretty_print=True) """) self.cmd_executes("json --pretty-print --show-contexts", """\ cov = Coverage() cov.load() cov.json_report(pretty_print=True, show_contexts=True) """) self.cmd_executes("json -i", """\ cov = Coverage() cov.load() cov.json_report(ignore_errors=True) """) self.cmd_executes("json -o myjson.foo", """\ cov = Coverage() cov.load() cov.json_report(outfile="myjson.foo") """) self.cmd_executes("json -o -", """\ cov = Coverage() cov.load() cov.json_report(outfile="-") """) self.cmd_executes("json --omit fooey", """\ cov = Coverage(omit=["fooey"]) cov.load() cov.json_report(omit=["fooey"]) """) self.cmd_executes("json --omit fooey,booey", """\ cov = Coverage(omit=["fooey", "booey"]) cov.load() cov.json_report(omit=["fooey", "booey"]) """) self.cmd_executes("json mod1", """\ cov = Coverage() cov.load() cov.json_report(morfs=["mod1"]) """) self.cmd_executes("json mod1 mod2 mod3", """\ cov = Coverage() cov.load() cov.json_report(morfs=["mod1", "mod2", "mod3"]) """) self.cmd_executes("json -q", """\ cov = Coverage(messages=False) cov.load() cov.json_report() """) self.cmd_executes("json --quiet", """\ cov = Coverage(messages=False) cov.load() cov.json_report() """) def test_lcov(self) -> None: # coverage lcov [-i] [--omit DIR,...] [FILE1 FILE2 ...] self.cmd_executes("lcov", """\ cov = Coverage() cov.load() cov.lcov_report() """) self.cmd_executes("lcov -i", """\ cov = Coverage() cov.load() cov.lcov_report(ignore_errors=True) """) self.cmd_executes("lcov -o mylcov.foo", """\ cov = Coverage() cov.load() cov.lcov_report(outfile="mylcov.foo") """) self.cmd_executes("lcov -o -", """\ cov = Coverage() cov.load() cov.lcov_report(outfile="-") """) self.cmd_executes("lcov --omit fooey", """\ cov = Coverage(omit=["fooey"]) cov.load() cov.lcov_report(omit=["fooey"]) """) self.cmd_executes("lcov --omit fooey,booey", """\ cov = Coverage(omit=["fooey", "booey"]) cov.load() cov.lcov_report(omit=["fooey", "booey"]) """) self.cmd_executes("lcov -q", """\ cov = Coverage(messages=False) cov.load() cov.lcov_report() """) self.cmd_executes("lcov --quiet", """\ cov = Coverage(messages=False) cov.load() cov.lcov_report() """) def test_report(self) -> None: # coverage report [-m] [-i] [-o DIR,...] [FILE1 FILE2 ...] self.cmd_executes("report", """\ cov = Coverage() cov.load() cov.report(show_missing=None) """) self.cmd_executes("report -i", """\ cov = Coverage() cov.load() cov.report(ignore_errors=True) """) self.cmd_executes("report -m", """\ cov = Coverage() cov.load() cov.report(show_missing=True) """) self.cmd_executes("report --omit fooey", """\ cov = Coverage(omit=["fooey"]) cov.load() cov.report(omit=["fooey"]) """) self.cmd_executes("report --omit fooey,booey", """\ cov = Coverage(omit=["fooey", "booey"]) cov.load() cov.report(omit=["fooey", "booey"]) """) self.cmd_executes("report mod1", """\ cov = Coverage() cov.load() cov.report(morfs=["mod1"]) """) self.cmd_executes("report mod1 mod2 mod3", """\ cov = Coverage() cov.load() cov.report(morfs=["mod1", "mod2", "mod3"]) """) self.cmd_executes("report --precision=7", """\ cov = Coverage() cov.load() cov.report(precision=7) """) self.cmd_executes("report --skip-covered", """\ cov = Coverage() cov.load() cov.report(skip_covered=True) """) self.cmd_executes("report --skip-covered --no-skip-covered", """\ cov = Coverage() cov.load() cov.report(skip_covered=False) """) self.cmd_executes("report --no-skip-covered", """\ cov = Coverage() cov.load() cov.report(skip_covered=False) """) self.cmd_executes("report --skip-empty", """\ cov = Coverage() cov.load() cov.report(skip_empty=True) """) self.cmd_executes("report --contexts=foo,bar", """\ cov = Coverage() cov.load() cov.report(contexts=["foo", "bar"]) """) self.cmd_executes("report --sort=-foo", """\ cov = Coverage() cov.load() cov.report(sort='-foo') """) self.cmd_executes("report --data-file=foo.cov.2", """\ cov = Coverage(data_file="foo.cov.2") cov.load() cov.report(show_missing=None) """) self.cmd_executes("report --format=markdown", """\ cov = Coverage() cov.load() cov.report(output_format="markdown") """) def test_run(self) -> None: # coverage run [-p] [-L] [--timid] MODULE.py [ARG1 ARG2 ...] # run calls coverage.erase first. self.cmd_executes("run foo.py", """\ cov = Coverage() runner = PyRunner(['foo.py'], as_module=False) runner.prepare() cov.start() runner.run() cov.stop() cov.save() """) # run -a combines with an existing data file before saving. self.cmd_executes("run -a foo.py", """\ cov = Coverage() runner = PyRunner(['foo.py'], as_module=False) runner.prepare() cov.load() cov.start() runner.run() cov.stop() cov.save() """) # --timid sets a flag, and program arguments get passed through. self.cmd_executes("run --timid foo.py abc 123", """\ cov = Coverage(timid=True) runner = PyRunner(['foo.py', 'abc', '123'], as_module=False) runner.prepare() cov.start() runner.run() cov.stop() cov.save() """) # -L sets a flag, and flags for the program don't confuse us. self.cmd_executes("run -p -L foo.py -a -b", """\ cov = Coverage(cover_pylib=True, data_suffix=True) runner = PyRunner(['foo.py', '-a', '-b'], as_module=False) runner.prepare() cov.start() runner.run() cov.stop() cov.save() """) self.cmd_executes("run --branch foo.py", """\ cov = Coverage(branch=True) runner = PyRunner(['foo.py'], as_module=False) runner.prepare() cov.start() runner.run() cov.stop() cov.save() """) self.cmd_executes("run --rcfile=myrc.rc foo.py", """\ cov = Coverage(config_file="myrc.rc") runner = PyRunner(['foo.py'], as_module=False) runner.prepare() cov.start() runner.run() cov.stop() cov.save() """) self.cmd_executes("run --include=pre1,pre2 foo.py", """\ cov = Coverage(include=["pre1", "pre2"]) runner = PyRunner(['foo.py'], as_module=False) runner.prepare() cov.start() runner.run() cov.stop() cov.save() """) self.cmd_executes("run --omit=opre1,opre2 foo.py", """\ cov = Coverage(omit=["opre1", "opre2"]) runner = PyRunner(['foo.py'], as_module=False) runner.prepare() cov.start() runner.run() cov.stop() cov.save() """) self.cmd_executes("run --include=pre1,pre2 --omit=opre1,opre2 foo.py", """\ cov = Coverage(include=["pre1", "pre2"], omit=["opre1", "opre2"]) runner = PyRunner(['foo.py'], as_module=False) runner.prepare() cov.start() runner.run() cov.stop() cov.save() """) self.cmd_executes("run --source=quux,hi.there,/home/bar foo.py", """\ cov = Coverage(source=["quux", "hi.there", "/home/bar"]) runner = PyRunner(['foo.py'], as_module=False) runner.prepare() cov.start() runner.run() cov.stop() cov.save() """) self.cmd_executes("run --concurrency=gevent foo.py", """\ cov = Coverage(concurrency=['gevent']) runner = PyRunner(['foo.py'], as_module=False) runner.prepare() cov.start() runner.run() cov.stop() cov.save() """) self.cmd_executes("run --concurrency=multiprocessing foo.py", """\ cov = Coverage(concurrency=['multiprocessing']) runner = PyRunner(['foo.py'], as_module=False) runner.prepare() cov.start() runner.run() cov.stop() cov.save() """) self.cmd_executes("run --concurrency=gevent,thread foo.py", """\ cov = Coverage(concurrency=['gevent', 'thread']) runner = PyRunner(['foo.py'], as_module=False) runner.prepare() cov.start() runner.run() cov.stop() cov.save() """) self.cmd_executes("run --data-file=output.coverage foo.py", """\ cov = Coverage(data_file="output.coverage") runner = PyRunner(['foo.py'], as_module=False) runner.prepare() cov.start() runner.run() cov.stop() cov.save() """) def test_multiprocessing_needs_config_file(self) -> None: # You can't use command-line args to add options to multiprocessing # runs, since they won't make it to the subprocesses. You need to use a # config file. self.command_line("run --concurrency=multiprocessing --branch foo.py", ret=ERR) msg = "Options affecting multiprocessing must only be specified in a configuration file." _, err = self.stdouterr() assert msg in err assert "Remove --branch from the command line." in err def test_run_debug(self) -> None: self.cmd_executes("run --debug=opt1 foo.py", """\ cov = Coverage(debug=["opt1"]) runner = PyRunner(['foo.py'], as_module=False) runner.prepare() cov.start() runner.run() cov.stop() cov.save() """) self.cmd_executes("run --debug=opt1,opt2 foo.py", """\ cov = Coverage(debug=["opt1","opt2"]) runner = PyRunner(['foo.py'], as_module=False) runner.prepare() cov.start() runner.run() cov.stop() cov.save() """) def test_run_module(self) -> None: self.cmd_executes("run -m mymodule", """\ cov = Coverage() runner = PyRunner(['mymodule'], as_module=True) runner.prepare() cov.start() runner.run() cov.stop() cov.save() """) self.cmd_executes("run -m mymodule -qq arg1 arg2", """\ cov = Coverage() runner = PyRunner(['mymodule', '-qq', 'arg1', 'arg2'], as_module=True) runner.prepare() cov.start() runner.run() cov.stop() cov.save() """) self.cmd_executes("run --branch -m mymodule", """\ cov = Coverage(branch=True) runner = PyRunner(['mymodule'], as_module=True) runner.prepare() cov.start() runner.run() cov.stop() cov.save() """) self.cmd_executes_same("run -m mymodule", "run --module mymodule") def test_run_nothing(self) -> None: self.command_line("run", ret=ERR) assert "Nothing to do" in self.stderr() def test_run_from_config(self) -> None: options = {"run:command_line": "myprog.py a 123 'a quoted thing' xyz"} self.cmd_executes("run", """\ cov = Coverage() runner = PyRunner(['myprog.py', 'a', '123', 'a quoted thing', 'xyz'], as_module=False) runner.prepare() cov.start() runner.run() cov.stop() cov.save() """, options=options, ) def test_run_module_from_config(self) -> None: self.cmd_executes("run", """\ cov = Coverage() runner = PyRunner(['mymodule', 'thing1', 'thing2'], as_module=True) runner.prepare() cov.start() runner.run() cov.stop() cov.save() """, options={"run:command_line": "-m mymodule thing1 thing2"}, ) def test_run_from_config_but_empty(self) -> None: self.cmd_executes("run", """\ cov = Coverage() show_help('Nothing to do.') """, ret=ERR, options={"run:command_line": ""}, ) def test_run_dashm_only(self) -> None: self.cmd_executes("run -m", """\ cov = Coverage() show_help('No module specified for -m') """, ret=ERR, ) self.cmd_executes("run -m", """\ cov = Coverage() show_help('No module specified for -m') """, ret=ERR, options={"run:command_line": "myprog.py"}, ) def test_cant_append_parallel(self) -> None: self.command_line("run --append --parallel-mode foo.py", ret=ERR) assert "Can't append to data files in parallel mode." in self.stderr() def test_xml(self) -> None: # coverage xml [-i] [--omit DIR,...] [FILE1 FILE2 ...] self.cmd_executes("xml", """\ cov = Coverage() cov.load() cov.xml_report() """) self.cmd_executes("xml -i", """\ cov = Coverage() cov.load() cov.xml_report(ignore_errors=True) """) self.cmd_executes("xml -o myxml.foo", """\ cov = Coverage() cov.load() cov.xml_report(outfile="myxml.foo") """) self.cmd_executes("xml -o -", """\ cov = Coverage() cov.load() cov.xml_report(outfile="-") """) self.cmd_executes("xml --omit fooey", """\ cov = Coverage(omit=["fooey"]) cov.load() cov.xml_report(omit=["fooey"]) """) self.cmd_executes("xml --omit fooey,booey", """\ cov = Coverage(omit=["fooey", "booey"]) cov.load() cov.xml_report(omit=["fooey", "booey"]) """) self.cmd_executes("xml mod1", """\ cov = Coverage() cov.load() cov.xml_report(morfs=["mod1"]) """) self.cmd_executes("xml mod1 mod2 mod3", """\ cov = Coverage() cov.load() cov.xml_report(morfs=["mod1", "mod2", "mod3"]) """) self.cmd_executes("xml -q", """\ cov = Coverage(messages=False) cov.load() cov.xml_report() """) self.cmd_executes("xml --quiet", """\ cov = Coverage(messages=False) cov.load() cov.xml_report() """) def test_no_arguments_at_all(self) -> None: self.cmd_help("", topic="minimum_help", ret=OK) def test_bad_command(self) -> None: self.cmd_help("xyzzy", "Unknown command: 'xyzzy'") class CmdLineWithFilesTest(BaseCmdLineTest): """Test the command line in ways that need temp files.""" run_in_temp_dir = True def test_debug_data(self) -> None: data = self.make_data_file( lines={ "file1.py": range(1, 18), "file2.py": range(1, 24), }, file_tracers={"file1.py": "a_plugin"}, ) self.command_line("debug data") assert self.stdout() == textwrap.dedent(f"""\ -- data ------------------------------------------------------ path: {data.data_filename()} has_arcs: False 2 files: file1.py: 17 lines [a_plugin] file2.py: 23 lines """) def test_debug_data_with_no_data_file(self) -> None: data = self.make_data_file() self.command_line("debug data") assert self.stdout() == textwrap.dedent(f"""\ -- data ------------------------------------------------------ path: {data.data_filename()} No data collected: file doesn't exist """) def test_debug_combinable_data(self) -> None: data1 = self.make_data_file(lines={"file1.py": range(1, 18), "file2.py": [1]}) data2 = self.make_data_file(suffix="123", lines={"file2.py": range(1, 10)}) self.command_line("debug data") assert self.stdout() == textwrap.dedent(f"""\ -- data ------------------------------------------------------ path: {data1.data_filename()} has_arcs: False 2 files: file1.py: 17 lines file2.py: 1 line ----- path: {data2.data_filename()} has_arcs: False 1 file: file2.py: 9 lines """) class CmdLineStdoutTest(BaseCmdLineTest): """Test the command line with real stdout output.""" def test_minimum_help(self) -> None: self.command_line("") out = self.stdout() assert "Code coverage for Python" in out assert out.count("\n") < 4 def test_version(self) -> None: self.command_line("--version") out = self.stdout() assert "ersion " in out if testenv.C_TRACER or testenv.SYS_MON: assert "with C extension" in out else: assert "without C extension" in out assert out.count("\n") < 4 def test_help_contains_command_name(self) -> None: # Command name should be present in help output. fake_command_path = os_sep("lorem/ipsum/dolor") expected_command_name = "dolor" fake_argv = [fake_command_path, "sit", "amet"] with mock.patch.object(sys, 'argv', new=fake_argv): self.command_line("help") out = self.stdout() assert expected_command_name in out def test_help_contains_command_name_from_package(self) -> None: # Command package name should be present in help output. # # When the main module is actually a package's `__main__` module, the resulting command line # has the `__main__.py` file's patch as the command name. Instead, the command name should # be derived from the package name. fake_command_path = os_sep("lorem/ipsum/dolor/__main__.py") expected_command_name = "dolor" fake_argv = [fake_command_path, "sit", "amet"] with mock.patch.object(sys, 'argv', new=fake_argv): self.command_line("help") out = self.stdout() assert expected_command_name in out def test_help(self) -> None: self.command_line("help") lines = self.stdout().splitlines() assert len(lines) > 10 assert lines[-1] == f"Full documentation is at {__url__}" def test_cmd_help(self) -> None: self.command_line("help run") out = self.stdout() lines = out.splitlines() assert "" in lines[0] assert "--timid" in out assert len(lines) > 20 assert lines[-1] == f"Full documentation is at {__url__}" def test_unknown_topic(self) -> None: # Should probably be an ERR return, but meh. self.command_line("help foobar") lines = self.stdout().splitlines() assert lines[0] == "Don't know topic 'foobar'" assert lines[-1] == f"Full documentation is at {__url__}" def test_error(self) -> None: self.command_line("fooey kablooey", ret=ERR) err = self.stderr() assert "fooey" in err assert "help" in err def test_option_error(self) -> None: self.command_line("run --fooey", ret=ERR) err = self.stderr() assert "fooey" in err assert "help" in err def test_doc_url(self) -> None: assert __url__.startswith("https://coverage.readthedocs.io") class CmdMainTest(CoverageTest): """Tests of coverage.cmdline.main(), using mocking for isolation.""" run_in_temp_dir = False class CoverageScriptStub: """A stub for coverage.cmdline.CoverageScript, used by CmdMainTest.""" def command_line(self, argv: list[str]) -> int: """Stub for command_line, the arg determines what it will do.""" if argv[0] == 'hello': print("Hello, world!") elif argv[0] == 'raise': try: raise RuntimeError("oh noes!") except: raise _ExceptionDuringRun(*sys.exc_info()) from None elif argv[0] == 'internalraise': raise ValueError("coverage is broken") elif argv[0] == 'exit': sys.exit(23) else: raise AssertionError(f"Bad CoverageScriptStub: {argv!r}") return 0 def setUp(self) -> None: super().setUp() old_CoverageScript = coverage.cmdline.CoverageScript coverage.cmdline.CoverageScript = self.CoverageScriptStub # type: ignore self.addCleanup(setattr, coverage.cmdline, 'CoverageScript', old_CoverageScript) def test_normal(self) -> None: ret = coverage.cmdline.main(['hello']) assert ret == 0 assert self.stdout() == "Hello, world!\n" def test_raise(self) -> None: ret = coverage.cmdline.main(['raise']) assert ret == 1 out, err = self.stdouterr() assert out == "" print(err) err_parts = err.splitlines(keepends=True) assert err_parts[0] == 'Traceback (most recent call last):\n' assert ' raise RuntimeError("oh noes!")\n' in err_parts assert err_parts[-1] == 'RuntimeError: oh noes!\n' def test_internalraise(self) -> None: with pytest.raises(ValueError, match="coverage is broken"): coverage.cmdline.main(['internalraise']) def test_exit(self) -> None: ret = coverage.cmdline.main(['exit']) assert ret == 23 class CoverageReportingFake: """A fake Coverage.coverage test double for FailUnderTest methods.""" # pylint: disable=missing-function-docstring def __init__( self, report_result: float, html_result: float = 0, xml_result: float = 0, json_report: float = 0, lcov_result: float = 0, ) -> None: self.config = CoverageConfig() self.report_result = report_result self.html_result = html_result self.xml_result = xml_result self.json_result = json_report self.lcov_result = lcov_result def set_option(self, optname: str, optvalue: TConfigValueIn) -> None: self.config.set_option(optname, optvalue) def get_option(self, optname: str) -> TConfigValueOut: return self.config.get_option(optname) def load(self) -> None: pass def report(self, *args_unused: Any, **kwargs_unused: Any) -> float: return self.report_result def html_report(self, *args_unused: Any, **kwargs_unused: Any) -> float: return self.html_result def xml_report(self, *args_unused: Any, **kwargs_unused: Any) -> float: return self.xml_result def json_report(self, *args_unused: Any, **kwargs_unused: Any) -> float: return self.json_result def lcov_report(self, *args_unused: Any, **kwargs_unused: Any) -> float: return self.lcov_result class FailUnderTest(CoverageTest): """Tests of the --fail-under handling in cmdline.py.""" @pytest.mark.parametrize("results, fail_under, cmd, ret", [ # Command-line switch properly checks the result of reporting functions. ((20, 30, 40, 50, 60), None, "report --fail-under=19", 0), ((20, 30, 40, 50, 60), None, "report --fail-under=21", 2), ((20, 30, 40, 50, 60), None, "html --fail-under=29", 0), ((20, 30, 40, 50, 60), None, "html --fail-under=31", 2), ((20, 30, 40, 50, 60), None, "xml --fail-under=39", 0), ((20, 30, 40, 50, 60), None, "xml --fail-under=41", 2), ((20, 30, 40, 50, 60), None, "json --fail-under=49", 0), ((20, 30, 40, 50, 60), None, "json --fail-under=51", 2), ((20, 30, 40, 50, 60), None, "lcov --fail-under=59", 0), ((20, 30, 40, 50, 60), None, "lcov --fail-under=61", 2), # Configuration file setting properly checks the result of reporting. ((20, 30, 40, 50, 60), 19, "report", 0), ((20, 30, 40, 50, 60), 21, "report", 2), ((20, 30, 40, 50, 60), 29, "html", 0), ((20, 30, 40, 50, 60), 31, "html", 2), ((20, 30, 40, 50, 60), 39, "xml", 0), ((20, 30, 40, 50, 60), 41, "xml", 2), ((20, 30, 40, 50, 60), 49, "json", 0), ((20, 30, 40, 50, 60), 51, "json", 2), ((20, 30, 40, 50, 60), 59, "lcov", 0), ((20, 30, 40, 50, 60), 61, "lcov", 2), # Command-line overrides configuration. ((20, 30, 40, 50, 60), 19, "report --fail-under=21", 2), ]) def test_fail_under( self, results: tuple[float, float, float, float, float], fail_under: float | None, cmd: str, ret: int, ) -> None: cov = CoverageReportingFake(*results) if fail_under is not None: cov.set_option("report:fail_under", fail_under) with mock.patch("coverage.cmdline.Coverage", lambda *a,**kw: cov): self.command_line(cmd, ret) @pytest.mark.parametrize("result, cmd, ret, msg", [ (20.5, "report --fail-under=20.4 --precision=1", 0, ""), (20.5, "report --fail-under=20.6 --precision=1", 2, "Coverage failure: total of 20.5 is less than fail-under=20.6\n"), (20.12345, "report --fail-under=20.1235 --precision=5", 2, "Coverage failure: total of 20.12345 is less than fail-under=20.12350\n"), (20.12339, "report --fail-under=20.1234 --precision=4", 0, ""), ]) def test_fail_under_with_precision(self, result: float, cmd: str, ret: int, msg: str) -> None: cov = CoverageReportingFake(report_result=result) with mock.patch("coverage.cmdline.Coverage", lambda *a,**kw: cov): self.command_line(cmd, ret) assert self.stdout() == msg ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/test_collector.py0000644000175100001770000000313200000000000020667 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt """Tests of coverage/collector.py and other collectors.""" from __future__ import annotations import os.path import coverage from tests.coveragetest import CoverageTest from tests.helpers import CheckUniqueFilenames class CollectorTest(CoverageTest): """Test specific aspects of the collection process.""" def test_should_trace_cache(self) -> None: # The tracers should only invoke should_trace once for each file name. # Make some files that invoke each other. self.make_file("f1.py", """\ def f1(x, f): return f(x) """) self.make_file("f2.py", """\ import f1 def func(x): return f1.f1(x, otherfunc) def otherfunc(x): return x*x for i in range(10): func(i) """) # Trace one file, but not the other. CheckUniqueFilenames will assert # that _should_trace hasn't been called twice for the same file. cov = coverage.Coverage(include=["f1.py"]) should_trace_hook = CheckUniqueFilenames.hook(cov, '_should_trace') # Import the Python file, executing it. self.start_import_stop(cov, "f2") # Double-check that our files were checked. abs_files = {os.path.abspath(f) for f in should_trace_hook.filenames} assert os.path.abspath("f1.py") in abs_files assert os.path.abspath("f2.py") in abs_files ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/test_concurrency.py0000644000175100001770000006450200000000000021243 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt """Tests for concurrency libraries.""" from __future__ import annotations import glob import multiprocessing import os import pathlib import random import re import sys import threading import time from types import ModuleType from typing import Iterable from flaky import flaky import pytest import coverage from coverage import env from coverage.data import line_counts from coverage.exceptions import ConfigError from coverage.files import abs_file from coverage.misc import import_local_file from tests import testenv from tests.coveragetest import CoverageTest from tests.helpers import flaky_method # These libraries aren't always available, we'll skip tests if they aren't. try: import eventlet except ImportError: eventlet = None try: import gevent except ImportError: gevent = None try: import greenlet except ImportError: greenlet = None def measurable_line(l: str) -> bool: """Is this a line of code coverage will measure? Not blank, not a comment, and not "else" """ l = l.strip() if not l: return False if l.startswith('#'): return False if l.startswith('else:'): return False return True def line_count(s: str) -> int: """How many measurable lines are in `s`?""" return len(list(filter(measurable_line, s.splitlines()))) def print_simple_annotation(code: str, linenos: Iterable[int]) -> None: """Print the lines in `code` with X for each line number in `linenos`.""" for lineno, line in enumerate(code.splitlines(), start=1): print(" {} {}".format("X" if lineno in linenos else " ", line)) class LineCountTest(CoverageTest): """Test the helpers here.""" run_in_temp_dir = False def test_line_count(self) -> None: CODE = """ # Hey there! x = 1 if x: print("hello") else: print("bye") print("done") """ assert line_count(CODE) == 5 # The code common to all the concurrency models. SUM_RANGE_Q = """ # Above this will be imports defining queue and threading. class Producer(threading.Thread): def __init__(self, limit, q): threading.Thread.__init__(self) self.limit = limit self.q = q def run(self): for i in range(self.limit): self.q.put(i) self.q.put(None) class Consumer(threading.Thread): def __init__(self, q, qresult): threading.Thread.__init__(self) self.q = q self.qresult = qresult def run(self): sum = 0 while "no peephole".upper(): i = self.q.get() if i is None: break sum += i self.qresult.put(sum) def sum_range(limit): q = queue.Queue() qresult = queue.Queue() c = Consumer(q, qresult) p = Producer(limit, q) c.start() p.start() p.join() c.join() return qresult.get() # Below this will be something using sum_range. """ PRINT_SUM_RANGE = """ print(sum_range({QLIMIT})) """ # Import the things to use threads. THREAD = """ import threading import queue """ # Import the things to use eventlet. EVENTLET = """ import eventlet.green.threading as threading import eventlet.queue as queue """ # Import the things to use gevent. GEVENT = """ from gevent import monkey monkey.patch_thread() import threading import gevent.queue as queue """ # Uncomplicated code that doesn't use any of the concurrency stuff, to test # the simple case under each of the regimes. SIMPLE = """ total = 0 for i in range({QLIMIT}): total += i print(total) """ def cant_trace_msg(concurrency: str, the_module: ModuleType | None) -> str | None: """What might coverage.py say about a concurrency setting and imported module?""" # In the concurrency choices, "multiprocessing" doesn't count, so remove it. if "multiprocessing" in concurrency: parts = concurrency.split(",") parts.remove("multiprocessing") concurrency = ",".join(parts) if the_module is None: # We don't even have the underlying module installed, we expect # coverage to alert us to this fact. expected_out = ( f"Couldn't trace with concurrency={concurrency}, the module isn't installed.\n" ) elif testenv.C_TRACER or concurrency == "thread" or concurrency == "": expected_out = None else: expected_out = ( f"Can't support concurrency={concurrency} with PyTracer, only threads are supported.\n" ) return expected_out class ConcurrencyTest(CoverageTest): """Tests of the concurrency support in coverage.py.""" QLIMIT = 1000 def try_some_code( self, code: str, concurrency: str, the_module: ModuleType, expected_out: str | None = None, ) -> None: """Run some concurrency testing code and see that it was all covered. `code` is the Python code to execute. `concurrency` is the name of the concurrency regime to test it under. `the_module` is the imported module that must be available for this to work at all. `expected_out` is the text we expect the code to produce. """ self.make_file("try_it.py", code) cmd = f"coverage run --concurrency={concurrency} try_it.py" out = self.run_command(cmd) expected_cant_trace = cant_trace_msg(concurrency, the_module) if expected_cant_trace is not None: assert out == expected_cant_trace pytest.skip(f"Can't test: {expected_cant_trace}") else: # We can fully measure the code if we are using the C tracer, which # can support all the concurrency, or if we are using threads. if expected_out is None: expected_out = "%d\n" % (sum(range(self.QLIMIT))) print(code) assert out == expected_out # Read the coverage file and see that try_it.py has all its lines # executed. data = coverage.CoverageData(".coverage") data.read() # If the test fails, it's helpful to see this info: fname = abs_file("try_it.py") linenos = data.lines(fname) assert linenos is not None print(f"{len(linenos)}: {linenos}") print_simple_annotation(code, linenos) lines = line_count(code) assert line_counts(data)['try_it.py'] == lines def test_threads(self) -> None: code = (THREAD + SUM_RANGE_Q + PRINT_SUM_RANGE).format(QLIMIT=self.QLIMIT) self.try_some_code(code, "thread", threading) def test_threads_simple_code(self) -> None: code = SIMPLE.format(QLIMIT=self.QLIMIT) self.try_some_code(code, "thread", threading) def test_eventlet(self) -> None: code = (EVENTLET + SUM_RANGE_Q + PRINT_SUM_RANGE).format(QLIMIT=self.QLIMIT) self.try_some_code(code, "eventlet", eventlet) def test_eventlet_simple_code(self) -> None: code = SIMPLE.format(QLIMIT=self.QLIMIT) self.try_some_code(code, "eventlet", eventlet) # https://github.com/nedbat/coveragepy/issues/663 @pytest.mark.skipif(env.WINDOWS, reason="gevent has problems on Windows: #663") def test_gevent(self) -> None: code = (GEVENT + SUM_RANGE_Q + PRINT_SUM_RANGE).format(QLIMIT=self.QLIMIT) self.try_some_code(code, "gevent", gevent) def test_gevent_simple_code(self) -> None: code = SIMPLE.format(QLIMIT=self.QLIMIT) self.try_some_code(code, "gevent", gevent) def test_greenlet(self) -> None: GREENLET = """\ from greenlet import greenlet def test1(x, y): z = gr2.switch(x+y) print(z) def test2(u): print(u) gr1.switch(42) gr1 = greenlet(test1) gr2 = greenlet(test2) gr1.switch("hello", " world") """ self.try_some_code(GREENLET, "greenlet", greenlet, "hello world\n42\n") def test_greenlet_simple_code(self) -> None: code = SIMPLE.format(QLIMIT=self.QLIMIT) self.try_some_code(code, "greenlet", greenlet) def test_bug_330(self) -> None: BUG_330 = """\ from weakref import WeakKeyDictionary import eventlet def do(): eventlet.sleep(.01) gts = WeakKeyDictionary() for _ in range(100): gts[eventlet.spawn(do)] = True eventlet.sleep(.005) eventlet.sleep(.1) print(len(gts)) """ self.try_some_code(BUG_330, "eventlet", eventlet, "0\n") @flaky_method(max_runs=3) # Sometimes a test fails due to inherent randomness. Try more times. def test_threads_with_gevent(self) -> None: self.make_file("both.py", """\ import queue import threading import gevent def work1(q): q.put(1) def gwork(q): gevent.spawn(work1, q).join() q.put(None) print("done") q = queue.Queue() t = threading.Thread(target=gwork, args=(q,)) t.start() t.join() answer = q.get() assert answer == 1 """) out = self.run_command("coverage run --concurrency=thread,gevent both.py") if gevent is None: assert out == ( "Couldn't trace with concurrency=gevent, the module isn't installed.\n" ) pytest.skip("Can't run test without gevent installed.") if not testenv.C_TRACER: assert out == ( "Can't support concurrency=gevent with PyTracer, only threads are supported.\n" ) pytest.skip("Can't run gevent with PyTracer") assert out == "done\n" out = self.run_command("coverage report -m") last_line = self.squeezed_lines(out)[-1] assert re.search(r"TOTAL \d+ 0 100%", last_line) def test_bad_concurrency(self) -> None: with pytest.raises(ConfigError, match="Unknown concurrency choices: nothing"): self.command_line("run --concurrency=nothing prog.py") def test_bad_concurrency_in_config(self) -> None: self.make_file(".coveragerc", "[run]\nconcurrency = nothing\n") with pytest.raises(ConfigError, match="Unknown concurrency choices: nothing"): self.command_line("run prog.py") def test_no_multiple_light_concurrency(self) -> None: with pytest.raises(ConfigError, match="Conflicting concurrency settings: eventlet, gevent"): self.command_line("run --concurrency=gevent,eventlet prog.py") def test_no_multiple_light_concurrency_in_config(self) -> None: self.make_file(".coveragerc", "[run]\nconcurrency = gevent, eventlet\n") with pytest.raises(ConfigError, match="Conflicting concurrency settings: eventlet, gevent"): self.command_line("run prog.py") def test_multiprocessing_needs_config_file(self) -> None: with pytest.raises(ConfigError, match="multiprocessing requires a configuration file"): self.command_line("run --concurrency=multiprocessing prog.py") class WithoutConcurrencyModuleTest(CoverageTest): """Tests of what happens if the requested concurrency isn't installed.""" @pytest.mark.parametrize("module", ["eventlet", "gevent", "greenlet"]) def test_missing_module(self, module: str) -> None: self.make_file("prog.py", "a = 1") sys.modules[module] = None # type: ignore[assignment] msg = f"Couldn't trace with concurrency={module}, the module isn't installed." with pytest.raises(ConfigError, match=msg): self.command_line(f"run --concurrency={module} prog.py") SQUARE_OR_CUBE_WORK = """ def work(x): # Use different lines in different subprocesses. if x % 2: y = x*x else: y = x*x*x return y """ SUM_RANGE_WORK = """ def work(x): return sum_range((x+1)*100) """ MULTI_CODE = """ # Above this will be a definition of work(). import multiprocessing import os import time import sys def process_worker_main(args): # Need to pause, or the tasks go too quickly, and some processes # in the pool don't get any work, and then don't record data. ret = work(*args) time.sleep(0.1) return os.getpid(), ret if __name__ == "__main__": # pragma: no branch # This if is on a single line so we can get 100% coverage # even if we have no arguments. if len(sys.argv) > 1: multiprocessing.set_start_method(sys.argv[1]) pool = multiprocessing.Pool({NPROCS}) inputs = [(x,) for x in range({UPTO})] outputs = pool.imap_unordered(process_worker_main, inputs) pids = set() total = 0 for pid, sq in outputs: pids.add(pid) total += sq print(f"{{len(pids)}} pids, {{total = }}") pool.close() pool.join() """ @pytest.fixture(params=["fork", "spawn"], name="start_method") def start_method_fixture(request: pytest.FixtureRequest) -> str: """Parameterized fixture to choose the start_method for multiprocessing.""" start_method: str = request.param if start_method not in multiprocessing.get_all_start_methods(): # Windows doesn't support "fork". pytest.skip(f"start_method={start_method} not supported here") return start_method #@flaky(max_runs=30) # Sometimes a test fails due to inherent randomness. Try more times. class MultiprocessingTest(CoverageTest): """Test support of the multiprocessing module.""" def try_multiprocessing_code( self, code: str, expected_out: str | None, the_module: ModuleType, nprocs: int, start_method: str, concurrency: str = "multiprocessing", args: str = "", ) -> None: """Run code using multiprocessing, it should produce `expected_out`.""" self.make_file("multi.py", code) self.make_file(".coveragerc", f"""\ [run] concurrency = {concurrency} source = . """) cmd = f"coverage run {args} multi.py {start_method}" out = self.run_command(cmd) expected_cant_trace = cant_trace_msg(concurrency, the_module) if expected_cant_trace is not None: print(out) assert out == expected_cant_trace pytest.skip(f"Can't test: {expected_cant_trace}") else: assert out.rstrip() == expected_out assert len(glob.glob(".coverage.*")) == nprocs + 1 out = self.run_command("coverage combine") out_lines = out.splitlines() assert len(out_lines) == nprocs + 1 assert all( re.fullmatch( r"(Combined data file|Skipping duplicate data) \.coverage\..*\.\d+\.X\w{6}x", line, ) for line in out_lines ) assert len(glob.glob(".coverage.*")) == 0 out = self.run_command("coverage report -m") last_line = self.squeezed_lines(out)[-1] assert re.search(r"TOTAL \d+ 0 100%", last_line) def test_multiprocessing_simple(self, start_method: str) -> None: nprocs = 3 upto = 30 code = (SQUARE_OR_CUBE_WORK + MULTI_CODE).format(NPROCS=nprocs, UPTO=upto) total = sum(x*x if x%2 else x*x*x for x in range(upto)) expected_out = f"{nprocs} pids, {total = }" self.try_multiprocessing_code( code, expected_out, threading, nprocs, start_method=start_method, ) def test_multiprocessing_append(self, start_method: str) -> None: nprocs = 3 upto = 30 code = (SQUARE_OR_CUBE_WORK + MULTI_CODE).format(NPROCS=nprocs, UPTO=upto) total = sum(x*x if x%2 else x*x*x for x in range(upto)) expected_out = f"{nprocs} pids, total = {total}" self.try_multiprocessing_code( code, expected_out, threading, nprocs, args="--append", start_method=start_method, ) def test_multiprocessing_and_gevent(self, start_method: str) -> None: nprocs = 3 upto = 30 code = ( SUM_RANGE_WORK + EVENTLET + SUM_RANGE_Q + MULTI_CODE ).format(NPROCS=nprocs, UPTO=upto) total = sum(sum(range((x + 1) * 100)) for x in range(upto)) expected_out = f"{nprocs} pids, total = {total}" self.try_multiprocessing_code( code, expected_out, eventlet, nprocs, concurrency="multiprocessing,eventlet", start_method=start_method, ) def test_multiprocessing_with_branching(self, start_method: str) -> None: nprocs = 3 upto = 30 code = (SQUARE_OR_CUBE_WORK + MULTI_CODE).format(NPROCS=nprocs, UPTO=upto) total = sum(x*x if x%2 else x*x*x for x in range(upto)) expected_out = f"{nprocs} pids, total = {total}" self.make_file("multi.py", code) self.make_file("multi.rc", """\ [run] concurrency = multiprocessing branch = True omit = */site-packages/* """) out = self.run_command(f"coverage run --rcfile=multi.rc multi.py {start_method}") assert out.rstrip() == expected_out out = self.run_command("coverage combine -q") # sneak in a test of -q assert out == "" out = self.run_command("coverage report -m") last_line = self.squeezed_lines(out)[-1] assert re.search(r"TOTAL \d+ 0 \d+ 0 100%", last_line) def test_multiprocessing_bootstrap_error_handling(self) -> None: # An exception during bootstrapping will be reported. self.make_file("multi.py", """\ import multiprocessing if __name__ == "__main__": with multiprocessing.Manager(): pass """) self.make_file(".coveragerc", """\ [run] concurrency = multiprocessing _crash = _bootstrap """) out = self.run_command("coverage run multi.py") assert "Exception during multiprocessing bootstrap init" in out assert "RuntimeError: Crashing because called by _bootstrap" in out def test_bug_890(self) -> None: # chdir in multiprocessing shouldn't keep us from finding the # .coveragerc file. self.make_file("multi.py", """\ import multiprocessing, os, os.path if __name__ == "__main__": if not os.path.exists("./tmp"): os.mkdir("./tmp") os.chdir("./tmp") with multiprocessing.Manager(): pass print("ok") """) self.make_file(".coveragerc", """\ [run] concurrency = multiprocessing """) out = self.run_command("coverage run multi.py") assert out.splitlines()[-1] == "ok" @pytest.mark.skipif(not testenv.SETTRACE_CORE, reason="gettrace is not supported with this core.") def test_coverage_stop_in_threads() -> None: has_started_coverage = [] has_stopped_coverage = [] def run_thread() -> None: # pragma: nested """Check that coverage is stopping properly in threads.""" deadline = time.time() + 5 ident = threading.current_thread().ident if sys.gettrace() is not None: has_started_coverage.append(ident) while sys.gettrace() is not None: # Wait for coverage to stop time.sleep(0.01) if time.time() > deadline: return has_stopped_coverage.append(ident) cov = coverage.Coverage() with cov.collect(): t = threading.Thread(target=run_thread) t.start() time.sleep(0.1) t.join() assert has_started_coverage == [t.ident] assert has_stopped_coverage == [t.ident] def test_thread_safe_save_data(tmp_path: pathlib.Path) -> None: # Non-regression test for: https://github.com/nedbat/coveragepy/issues/581 # Create some Python modules and put them in the path modules_dir = tmp_path / "test_modules" modules_dir.mkdir() module_names = [f"m{i:03d}" for i in range(1000)] for module_name in module_names: (modules_dir / (module_name + ".py")).write_text("def f(): pass\n") # Shared variables for threads should_run = [True] imported = [] old_dir = os.getcwd() os.chdir(modules_dir) try: # Make sure that all dummy modules can be imported. for module_name in module_names: import_local_file(module_name) def random_load() -> None: # pragma: nested """Import modules randomly to stress coverage.""" while should_run[0]: module_name = random.choice(module_names) mod = import_local_file(module_name) mod.f() imported.append(mod) # Spawn some threads with coverage enabled and attempt to read the # results right after stopping coverage collection with the threads # still running. duration = 0.01 for _ in range(3): cov = coverage.Coverage() with cov.collect(): threads = [threading.Thread(target=random_load) for _ in range(10)] should_run[0] = True for t in threads: t.start() time.sleep(duration) # The following call used to crash with running background threads. cov.get_data() # Stop the threads should_run[0] = False for t in threads: t.join() if (not imported) and duration < 10: # pragma: only failure duration *= 2 finally: os.chdir(old_dir) should_run[0] = False @pytest.mark.skipif(env.WINDOWS, reason="SIGTERM doesn't work the same on Windows") @flaky(max_runs=3) # Sometimes a test fails due to inherent randomness. Try more times. class SigtermTest(CoverageTest): """Tests of our handling of SIGTERM.""" @pytest.mark.parametrize("sigterm", [False, True]) def test_sigterm_multiprocessing_saves_data(self, sigterm: bool) -> None: # A terminated process should save its coverage data. self.make_file("clobbered.py", """\ import multiprocessing import time def subproc(x): if x.value == 3: print("THREE", flush=True) # line 6, missed else: print("NOT THREE", flush=True) x.value = 0 time.sleep(60) if __name__ == "__main__": print("START", flush=True) x = multiprocessing.Value("L", 1) proc = multiprocessing.Process(target=subproc, args=(x,)) proc.start() while x.value != 0: time.sleep(.05) proc.terminate() print("END", flush=True) """) self.make_file(".coveragerc", """\ [run] parallel = True concurrency = multiprocessing """ + ("sigterm = true" if sigterm else ""), ) out = self.run_command("coverage run clobbered.py") # Under Linux, things go wrong. Does that matter? if env.LINUX and "assert self._collectors" in out: lines = out.splitlines(True) out = "".join(lines[:3]) assert out == "START\nNOT THREE\nEND\n" self.run_command("coverage combine") out = self.run_command("coverage report -m") if sigterm: expected = "clobbered.py 17 1 94% 6" else: expected = "clobbered.py 17 5 71% 5-10" assert self.squeezed_lines(out)[2] == expected def test_sigterm_threading_saves_data(self) -> None: # A terminated process should save its coverage data. self.make_file("handler.py", """\ import os, signal print("START", flush=True) print("SIGTERM", flush=True) os.kill(os.getpid(), signal.SIGTERM) print("NOT HERE", flush=True) """) self.make_file(".coveragerc", """\ [run] # The default concurrency option. concurrency = thread sigterm = true """) out = self.run_command("coverage run handler.py") out_lines = out.splitlines() assert len(out_lines) in [2, 3] assert out_lines[:2] == ["START", "SIGTERM"] if len(out_lines) == 3: assert out_lines[2] == "Terminated" out = self.run_command("coverage report -m") expected = "handler.py 5 1 80% 6" assert self.squeezed_lines(out)[2] == expected def test_sigterm_still_runs(self) -> None: # A terminated process still runs its own SIGTERM handler. self.make_file("handler.py", """\ import multiprocessing import signal import time def subproc(x): print("START", flush=True) def on_sigterm(signum, frame): print("SIGTERM", flush=True) signal.signal(signal.SIGTERM, on_sigterm) x.value = 0 time.sleep(.1) print("END", flush=True) if __name__ == "__main__": x = multiprocessing.Value("L", 1) proc = multiprocessing.Process(target=subproc, args=(x,)) proc.start() while x.value != 0: time.sleep(.02) proc.terminate() """) self.make_file(".coveragerc", """\ [run] parallel = True concurrency = multiprocessing sigterm = True """) out = self.run_command("coverage run handler.py") assert out == "START\nSIGTERM\nEND\n" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/test_config.py0000644000175100001770000007575500000000000020172 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt """Test the config file handling for coverage.py""" from __future__ import annotations from unittest import mock import pytest import coverage from coverage import Coverage, env from coverage.config import HandyConfigParser from coverage.exceptions import ConfigError, CoverageWarning from coverage.tomlconfig import TomlConfigParser from coverage.types import FilePathClasses, FilePathType from tests.coveragetest import CoverageTest, UsingModulesMixin class ConfigTest(CoverageTest): """Tests of the different sources of configuration settings.""" def test_default_config(self) -> None: # Just constructing a coverage() object gets the right defaults. cov = coverage.Coverage() assert not cov.config.timid assert not cov.config.branch assert cov.config.data_file == ".coverage" def test_arguments(self) -> None: # Arguments to the constructor are applied to the configuration. cov = coverage.Coverage(timid=True, data_file="fooey.dat", concurrency="multiprocessing") assert cov.config.timid assert not cov.config.branch assert cov.config.data_file == "fooey.dat" assert cov.config.concurrency == ["multiprocessing"] def test_config_file(self) -> None: # A .coveragerc file will be read into the configuration. self.make_file(".coveragerc", """\ # This is just a bogus .rc file for testing. [run] timid = True data_file = .hello_kitty.data """) cov = coverage.Coverage() assert cov.config.timid assert not cov.config.branch assert cov.config.data_file == ".hello_kitty.data" @pytest.mark.parametrize("file_class", FilePathClasses) def test_named_config_file(self, file_class: FilePathType) -> None: # You can name the config file what you like. self.make_file("my_cov.ini", """\ [run] timid = True ; I wouldn't really use this as a data file... data_file = delete.me """) cov = coverage.Coverage(config_file=file_class("my_cov.ini")) assert cov.config.timid assert not cov.config.branch assert cov.config.data_file == "delete.me" def test_toml_config_file(self) -> None: # A pyproject.toml file will be read into the configuration. self.make_file("pyproject.toml", """\ # This is just a bogus toml file for testing. [tool.somethingelse] authors = ["Joe D'รvila "] [tool.coverage.run] concurrency = ["a", "b"] timid = true data_file = ".hello_kitty.data" plugins = ["plugins.a_plugin"] [tool.coverage.report] precision = 3 fail_under = 90.5 [tool.coverage.html] title = "tabblo & ยซฯ„ฮฑะ‘ะฌโ„“ฯƒยป" [tool.coverage.plugins.a_plugin] hello = "world" """) cov = coverage.Coverage() assert cov.config.timid assert not cov.config.branch assert cov.config.concurrency == ["a", "b"] assert cov.config.data_file == ".hello_kitty.data" assert cov.config.plugins == ["plugins.a_plugin"] assert cov.config.precision == 3 assert cov.config.html_title == "tabblo & ยซฯ„ฮฑะ‘ะฌโ„“ฯƒยป" assert cov.config.fail_under == 90.5 assert cov.config.get_plugin_options("plugins.a_plugin") == {"hello": "world"} def test_toml_ints_can_be_floats(self) -> None: # Test that our class doesn't reject integers when loading floats self.make_file("pyproject.toml", """\ # This is just a bogus toml file for testing. [tool.coverage.report] fail_under = 90 """) cov = coverage.Coverage() assert cov.config.fail_under == 90 assert isinstance(cov.config.fail_under, float) def test_ignored_config_file(self) -> None: # You can disable reading the .coveragerc file. self.make_file(".coveragerc", """\ [run] timid = True data_file = delete.me """) cov = coverage.Coverage(config_file=False) assert not cov.config.timid assert not cov.config.branch assert cov.config.data_file == ".coverage" def test_config_file_then_args(self) -> None: # The arguments override the .coveragerc file. self.make_file(".coveragerc", """\ [run] timid = True data_file = weirdo.file """) cov = coverage.Coverage(timid=False, data_file=".mycov") assert not cov.config.timid assert not cov.config.branch assert cov.config.data_file == ".mycov" def test_data_file_from_environment(self) -> None: # There's an environment variable for the data_file. self.make_file(".coveragerc", """\ [run] timid = True data_file = weirdo.file """) self.set_environ("COVERAGE_FILE", "fromenv.dat") cov = coverage.Coverage() assert cov.config.data_file == "fromenv.dat" # But the constructor arguments override the environment variable. cov = coverage.Coverage(data_file="fromarg.dat") assert cov.config.data_file == "fromarg.dat" def test_debug_from_environment(self) -> None: self.make_file(".coveragerc", """\ [run] debug = dataio, pids """) self.set_environ("COVERAGE_DEBUG", "callers, fooey") cov = coverage.Coverage() assert cov.config.debug == ["dataio", "pids", "callers", "fooey"] def test_rcfile_from_environment(self) -> None: self.make_file("here.ini", """\ [run] data_file = overthere.dat """) self.set_environ("COVERAGE_RCFILE", "here.ini") cov = coverage.Coverage() assert cov.config.data_file == "overthere.dat" def test_missing_rcfile_from_environment(self) -> None: self.set_environ("COVERAGE_RCFILE", "nowhere.ini") msg = "Couldn't read 'nowhere.ini' as a config file" with pytest.raises(ConfigError, match=msg): coverage.Coverage() @pytest.mark.parametrize("bad_config, msg", [ ("[run]\ntimid = maybe?\n", r"maybe[?]"), ("timid = 1\n", r"no section headers"), ("[run\n", r"\[run"), ("[report]\nexclude_lines = foo(\n", r"Invalid \[report\].exclude_lines value 'foo\(': " + r"(unbalanced parenthesis|missing \))"), ("[report]\npartial_branches = foo[\n", r"Invalid \[report\].partial_branches value 'foo\[': " + r"(unexpected end of regular expression|unterminated character set)"), ("[report]\npartial_branches_always = foo***\n", r"Invalid \[report\].partial_branches_always value " + r"'foo\*\*\*': " + r"multiple repeat"), ]) def test_parse_errors(self, bad_config: str, msg: str) -> None: # Im-parsable values raise ConfigError, with details. self.make_file(".coveragerc", bad_config) with pytest.raises(ConfigError, match=msg): coverage.Coverage() @pytest.mark.parametrize("bad_config, msg", [ ("[tool.coverage.run]\ntimid = \"maybe?\"\n", r"maybe[?]"), ("[tool.coverage.run\n", None), ('[tool.coverage.report]\nexclude_lines = ["foo("]\n', r"Invalid \[tool.coverage.report\].exclude_lines value 'foo\(': " + r"(unbalanced parenthesis|missing \))"), ('[tool.coverage.report]\npartial_branches = ["foo["]\n', r"Invalid \[tool.coverage.report\].partial_branches value 'foo\[': " + r"(unexpected end of regular expression|unterminated character set)"), ('[tool.coverage.report]\npartial_branches_always = ["foo***"]\n', r"Invalid \[tool.coverage.report\].partial_branches_always value " + r"'foo\*\*\*': " + r"multiple repeat"), ('[tool.coverage.run]\nconcurrency="foo"', "not a list"), ("[tool.coverage.report]\nprecision=1.23", "not an integer"), ('[tool.coverage.report]\nfail_under="s"', "couldn't convert to a float"), ]) def test_toml_parse_errors(self, bad_config: str, msg: str) -> None: # Im-parsable values raise ConfigError, with details. self.make_file("pyproject.toml", bad_config) with pytest.raises(ConfigError, match=msg): coverage.Coverage() def test_environment_vars_in_config(self) -> None: # Config files can have $envvars in them. self.make_file(".coveragerc", """\ [run] data_file = $DATA_FILE.fooey branch = $OKAY [report] exclude_lines = the_$$one another${THING} x${THING}y x${NOTHING}y huh$${X}what """) self.set_environ("DATA_FILE", "hello-world") self.set_environ("THING", "ZZZ") self.set_environ("OKAY", "yes") cov = coverage.Coverage() assert cov.config.data_file == "hello-world.fooey" assert cov.config.branch is True assert cov.config.exclude_list == ["the_$one", "anotherZZZ", "xZZZy", "xy", "huh${X}what"] def test_environment_vars_in_toml_config(self) -> None: # Config files can have $envvars in them. self.make_file("pyproject.toml", """\ [tool.coverage.run] data_file = "$DATA_FILE.fooey" branch = "$BRANCH" [tool.coverage.report] precision = "$DIGITS" fail_under = "$FAIL_UNDER" exclude_lines = [ "the_$$one", "another${THING}", "x${THING}y", "x${NOTHING}y", "huh$${X}what", ] [othersection] # This reproduces the failure from https://github.com/nedbat/coveragepy/issues/1481 # When OTHER has a backslash that isn't a valid escape, like \\z (see below). something = "if [ $OTHER ]; then printf '%s\\n' 'Hi'; fi" """) self.set_environ("BRANCH", "true") self.set_environ("DIGITS", "3") self.set_environ("FAIL_UNDER", "90.5") self.set_environ("DATA_FILE", "hello-world") self.set_environ("THING", "ZZZ") self.set_environ("OTHER", "hi\\zebra") cov = coverage.Coverage() assert cov.config.branch is True assert cov.config.precision == 3 assert cov.config.data_file == "hello-world.fooey" assert cov.config.exclude_list == ["the_$one", "anotherZZZ", "xZZZy", "xy", "huh${X}what"] def test_tilde_in_config(self) -> None: # Config entries that are file paths can be tilde-expanded. self.make_file(".coveragerc", """\ [run] data_file = ~/data.file [html] directory = ~joe/html_dir [xml] output = ~/somewhere/xml.out [report] # Strings that aren't file paths are not tilde-expanded. exclude_lines = ~/data.file ~joe/html_dir [paths] mapping = ~/src ~joe/source """) def expanduser(s: str) -> str: """Fake tilde expansion""" s = s.replace("~/", "/Users/me/") s = s.replace("~joe/", "/Users/joe/") return s with mock.patch.object(coverage.config.os.path, 'expanduser', new=expanduser): cov = coverage.Coverage() assert cov.config.data_file == "/Users/me/data.file" assert cov.config.html_dir == "/Users/joe/html_dir" assert cov.config.xml_output == "/Users/me/somewhere/xml.out" assert cov.config.exclude_list == ["~/data.file", "~joe/html_dir"] assert cov.config.paths == {'mapping': ['/Users/me/src', '/Users/joe/source']} def test_tilde_in_toml_config(self) -> None: # Config entries that are file paths can be tilde-expanded. self.make_file("pyproject.toml", """\ [tool.coverage.run] data_file = "~/data.file" [tool.coverage.html] directory = "~joe/html_dir" [tool.coverage.xml] output = "~/somewhere/xml.out" [tool.coverage.report] # Strings that aren't file paths are not tilde-expanded. exclude_lines = [ "~/data.file", "~joe/html_dir", ] [tool.coverage.paths] mapping = [ "~/src", "~joe/source", ] """) def expanduser(s: str) -> str: """Fake tilde expansion""" s = s.replace("~/", "/Users/me/") s = s.replace("~joe/", "/Users/joe/") return s with mock.patch.object(coverage.config.os.path, 'expanduser', new=expanduser): cov = coverage.Coverage() assert cov.config.data_file == "/Users/me/data.file" assert cov.config.html_dir == "/Users/joe/html_dir" assert cov.config.xml_output == "/Users/me/somewhere/xml.out" assert cov.config.exclude_list == ["~/data.file", "~joe/html_dir"] assert cov.config.paths == {'mapping': ['/Users/me/src', '/Users/joe/source']} def test_tweaks_after_constructor(self) -> None: # set_option can be used after construction to affect the config. cov = coverage.Coverage(timid=True, data_file="fooey.dat") cov.set_option("run:timid", False) assert not cov.config.timid assert not cov.config.branch assert cov.config.data_file == "fooey.dat" assert not cov.get_option("run:timid") assert not cov.get_option("run:branch") assert cov.get_option("run:data_file") == "fooey.dat" def test_tweaks_paths_after_constructor(self) -> None: self.make_file(".coveragerc", """\ [paths] first = /first/1 /first/2 second = /second/a /second/b """) old_paths = { "first": ["/first/1", "/first/2"], "second": ["/second/a", "/second/b"], } cov = coverage.Coverage() paths = cov.get_option("paths") assert paths == old_paths new_paths = { "magic": ["src", "ok"], } cov.set_option("paths", new_paths) assert cov.get_option("paths") == new_paths def test_tweak_error_checking(self) -> None: # Trying to set an unknown config value raises an error. cov = coverage.Coverage() with pytest.raises(ConfigError, match="No such option: 'run:xyzzy'"): cov.set_option("run:xyzzy", 12) with pytest.raises(ConfigError, match="No such option: 'xyzzy:foo'"): cov.set_option("xyzzy:foo", 12) with pytest.raises(ConfigError, match="No such option: 'run:xyzzy'"): _ = cov.get_option("run:xyzzy") with pytest.raises(ConfigError, match="No such option: 'xyzzy:foo'"): _ = cov.get_option("xyzzy:foo") def test_tweak_plugin_options(self) -> None: # Plugin options have a more flexible syntax. cov = coverage.Coverage() cov.set_option("run:plugins", ["fooey.plugin", "xyzzy.coverage.plugin"]) cov.set_option("fooey.plugin:xyzzy", 17) cov.set_option("xyzzy.coverage.plugin:plugh", ["a", "b"]) with pytest.raises(ConfigError, match="No such option: 'no_such.plugin:foo'"): cov.set_option("no_such.plugin:foo", 23) assert cov.get_option("fooey.plugin:xyzzy") == 17 assert cov.get_option("xyzzy.coverage.plugin:plugh") == ["a", "b"] with pytest.raises(ConfigError, match="No such option: 'no_such.plugin:foo'"): _ = cov.get_option("no_such.plugin:foo") def test_unknown_option(self) -> None: self.make_file(".coveragerc", """\ [run] xyzzy = 17 """) msg = r"Unrecognized option '\[run\] xyzzy=' in config file .coveragerc" with pytest.warns(CoverageWarning, match=msg): _ = coverage.Coverage() def test_unknown_option_toml(self) -> None: self.make_file("pyproject.toml", """\ [tool.coverage.run] xyzzy = 17 """) msg = r"Unrecognized option '\[tool.coverage.run\] xyzzy=' in config file pyproject.toml" with pytest.warns(CoverageWarning, match=msg): _ = coverage.Coverage() def test_misplaced_option(self) -> None: self.make_file(".coveragerc", """\ [report] branch = True """) msg = r"Unrecognized option '\[report\] branch=' in config file .coveragerc" with pytest.warns(CoverageWarning, match=msg): _ = coverage.Coverage() def test_unknown_option_in_other_ini_file(self) -> None: self.make_file("setup.cfg", """\ [coverage:run] huh = what? """) msg = r"Unrecognized option '\[coverage:run\] huh=' in config file setup.cfg" with pytest.warns(CoverageWarning, match=msg): _ = coverage.Coverage() def test_exceptions_from_missing_things(self) -> None: self.make_file("config.ini", """\ [run] branch = True """) config = HandyConfigParser(True) config.read(["config.ini"]) with pytest.raises(ConfigError, match="No section: 'xyzzy'"): config.options("xyzzy") with pytest.raises(ConfigError, match="No option 'foo' in section: 'xyzzy'"): config.get("xyzzy", "foo") def test_exclude_also(self) -> None: self.make_file("pyproject.toml", """\ [tool.coverage.report] exclude_also = ["foobar", "raise .*Error"] """) cov = coverage.Coverage() expected = coverage.config.DEFAULT_EXCLUDE + ["foobar", "raise .*Error"] assert cov.config.exclude_list == expected class ConfigFileTest(UsingModulesMixin, CoverageTest): """Tests of the config file settings in particular.""" # This sample file tries to use lots of variation of syntax... # The {section} placeholder lets us nest these settings in another file. LOTSA_SETTINGS = """\ # This is a settings file for coverage.py [{section}run] timid = yes data_file = something_or_other.dat branch = 1 cover_pylib = TRUE parallel = on concurrency = thread ; this omit is overridden by the omit from [report] omit = twenty source = myapp source_pkgs = ned plugins = plugins.a_plugin plugins.another debug = callers, pids , dataio disable_warnings = abcd , efgh [{section}report] ; these settings affect reporting. exclude_lines = if 0: pragma:?\\s+no cover another_tab ignore_errors = TRUE omit = one, another, some_more, yet_more include = thirty precision = 3 partial_branches = pragma:?\\s+no branch partial_branches_always = if 0: while True: show_missing= TruE skip_covered = TruE skip_empty =TruE include_namespace_packages = TRUE [{section}html] directory = c:\\tricky\\dir.somewhere extra_css=something/extra.css title = Title & nums # nums! [{section}xml] output=mycov.xml package_depth = 17 [{section}paths] source = . /home/ned/src/ other = other, /home/ned/other, c:\\Ned\\etc [{section}plugins.a_plugin] hello = world ; comments still work. names = Jane/John/Jenny [{section}json] pretty_print = True show_contexts = True """ # Just some sample setup.cfg text from the docs. SETUP_CFG = """\ [bdist_rpm] release = 1 packager = Jane Packager doc_files = CHANGES.txt README.txt USAGE.txt doc/ examples/ """ # Just some sample tox.ini text from the docs. TOX_INI = """\ [tox] envlist = py{26,27,33,34,35}-{c,py}tracer skip_missing_interpreters = True [testenv] commands = # Create tests/zipmods.zip python igor.py zip_mods """ def assert_config_settings_are_correct(self, cov: Coverage) -> None: """Check that `cov` has all the settings from LOTSA_SETTINGS.""" assert cov.config.timid assert cov.config.data_file == "something_or_other.dat" assert cov.config.branch assert cov.config.cover_pylib assert cov.config.debug == ["callers", "pids", "dataio"] assert cov.config.parallel assert cov.config.concurrency == ["thread"] assert cov.config.source == ["myapp"] assert cov.config.source_pkgs == ["ned"] assert cov.config.disable_warnings == ["abcd", "efgh"] assert cov.get_exclude_list() == ["if 0:", r"pragma:?\s+no cover", "another_tab"] assert cov.config.ignore_errors assert cov.config.run_omit == ["twenty"] assert cov.config.report_omit == ["one", "another", "some_more", "yet_more"] assert cov.config.report_include == ["thirty"] assert cov.config.precision == 3 assert cov.config.partial_list == [r"pragma:?\s+no branch"] assert cov.config.partial_always_list == ["if 0:", "while True:"] assert cov.config.plugins == ["plugins.a_plugin", "plugins.another"] assert cov.config.show_missing assert cov.config.skip_covered assert cov.config.skip_empty assert cov.config.html_dir == r"c:\tricky\dir.somewhere" assert cov.config.extra_css == "something/extra.css" assert cov.config.html_title == "Title & nums # nums!" assert cov.config.xml_output == "mycov.xml" assert cov.config.xml_package_depth == 17 assert cov.config.paths == { 'source': ['.', '/home/ned/src/'], 'other': ['other', '/home/ned/other', 'c:\\Ned\\etc'], } assert cov.config.get_plugin_options("plugins.a_plugin") == { 'hello': 'world', 'names': 'Jane/John/Jenny', } assert cov.config.get_plugin_options("plugins.another") == {} assert cov.config.json_show_contexts is True assert cov.config.json_pretty_print is True assert cov.config.include_namespace_packages is True def test_config_file_settings(self) -> None: self.make_file(".coveragerc", self.LOTSA_SETTINGS.format(section="")) cov = coverage.Coverage() self.assert_config_settings_are_correct(cov) def check_config_file_settings_in_other_file(self, fname: str, contents: str) -> None: """Check config will be read from another file, with prefixed sections.""" nested = self.LOTSA_SETTINGS.format(section="coverage:") fname = self.make_file(fname, nested + "\n" + contents) cov = coverage.Coverage() self.assert_config_settings_are_correct(cov) def test_config_file_settings_in_setupcfg(self) -> None: self.check_config_file_settings_in_other_file("setup.cfg", self.SETUP_CFG) def test_config_file_settings_in_toxini(self) -> None: self.check_config_file_settings_in_other_file("tox.ini", self.TOX_INI) def check_other_config_if_coveragerc_specified(self, fname: str, contents: str) -> None: """Check that config `fname` is read if .coveragerc is missing, but specified.""" nested = self.LOTSA_SETTINGS.format(section="coverage:") self.make_file(fname, nested + "\n" + contents) cov = coverage.Coverage(config_file=".coveragerc") self.assert_config_settings_are_correct(cov) def test_config_file_settings_in_setupcfg_if_coveragerc_specified(self) -> None: self.check_other_config_if_coveragerc_specified("setup.cfg", self.SETUP_CFG) def test_config_file_settings_in_tox_if_coveragerc_specified(self) -> None: self.check_other_config_if_coveragerc_specified("tox.ini", self.TOX_INI) def check_other_not_read_if_coveragerc(self, fname: str) -> None: """Check config `fname` is not read if .coveragerc exists.""" self.make_file(".coveragerc", """\ [run] include = foo """) self.make_file(fname, """\ [coverage:run] omit = bar branch = true """) cov = coverage.Coverage() assert cov.config.run_include == ["foo"] assert cov.config.run_omit == [] assert cov.config.branch is False def test_setupcfg_only_if_not_coveragerc(self) -> None: self.check_other_not_read_if_coveragerc("setup.cfg") def test_toxini_only_if_not_coveragerc(self) -> None: self.check_other_not_read_if_coveragerc("tox.ini") def check_other_config_need_prefixes(self, fname: str) -> None: """Check that `fname` sections won't be read if un-prefixed.""" self.make_file(fname, """\ [run] omit = bar branch = true """) cov = coverage.Coverage() assert cov.config.run_omit == [] assert cov.config.branch is False def test_setupcfg_only_if_prefixed(self) -> None: self.check_other_config_need_prefixes("setup.cfg") def test_toxini_only_if_prefixed(self) -> None: self.check_other_config_need_prefixes("tox.ini") def test_tox_ini_even_if_setup_cfg(self) -> None: # There's a setup.cfg, but no coverage settings in it, so tox.ini # is read. nested = self.LOTSA_SETTINGS.format(section="coverage:") self.make_file("tox.ini", self.TOX_INI + "\n" + nested) self.make_file("setup.cfg", self.SETUP_CFG) cov = coverage.Coverage() self.assert_config_settings_are_correct(cov) def test_read_prefixed_sections_from_explicit_file(self) -> None: # You can point to a tox.ini, and it will find [coverage:run] sections nested = self.LOTSA_SETTINGS.format(section="coverage:") self.make_file("tox.ini", self.TOX_INI + "\n" + nested) cov = coverage.Coverage(config_file="tox.ini") self.assert_config_settings_are_correct(cov) def test_non_ascii(self) -> None: self.make_file(".coveragerc", """\ [report] exclude_lines = first โœ˜${TOX_ENVNAME} third [html] title = tabblo & ยซฯ„ฮฑะ‘ะฌโ„“ฯƒยป # numbers """) self.set_environ("TOX_ENVNAME", "weirdo") cov = coverage.Coverage() assert cov.config.exclude_list == ["first", "โœ˜weirdo", "third"] assert cov.config.html_title == "tabblo & ยซฯ„ฮฑะ‘ะฌโ„“ฯƒยป # numbers" @pytest.mark.parametrize("bad_file", ["nosuchfile.txt", "."]) def test_unreadable_config(self, bad_file: str) -> None: # If a config file is explicitly specified, then it is an error for it # to not be readable. msg = f"Couldn't read {bad_file!r} as a config file" with pytest.raises(ConfigError, match=msg): coverage.Coverage(config_file=bad_file) def test_nocoveragerc_file_when_specified(self) -> None: cov = coverage.Coverage(config_file=".coveragerc") assert not cov.config.timid assert not cov.config.branch assert cov.config.data_file == ".coverage" def test_no_toml_installed_no_toml(self) -> None: # Can't read a toml file that doesn't exist. with mock.patch.object(coverage.tomlconfig, "has_tomllib", False): msg = "Couldn't read 'cov.toml' as a config file" with pytest.raises(ConfigError, match=msg): coverage.Coverage(config_file="cov.toml") @pytest.mark.skipif(env.PYVERSION >= (3, 11), reason="Python 3.11 has toml in stdlib") def test_no_toml_installed_explicit_toml(self) -> None: # Can't specify a toml config file if toml isn't installed. self.make_file("cov.toml", "# A toml file!") with mock.patch.object(coverage.tomlconfig, "has_tomllib", False): msg = "Can't read 'cov.toml' without TOML support" with pytest.raises(ConfigError, match=msg): coverage.Coverage(config_file="cov.toml") @pytest.mark.skipif(env.PYVERSION >= (3, 11), reason="Python 3.11 has toml in stdlib") def test_no_toml_installed_pyproject_toml(self) -> None: # Can't have coverage config in pyproject.toml without toml installed. self.make_file("pyproject.toml", """\ # A toml file! [tool.coverage.run] xyzzy = 17 """) with mock.patch.object(coverage.tomlconfig, "has_tomllib", False): msg = "Can't read 'pyproject.toml' without TOML support" with pytest.raises(ConfigError, match=msg): coverage.Coverage() @pytest.mark.skipif(env.PYVERSION >= (3, 11), reason="Python 3.11 has toml in stdlib") def test_no_toml_installed_pyproject_toml_shorter_syntax(self) -> None: # Can't have coverage config in pyproject.toml without toml installed. self.make_file("pyproject.toml", """\ # A toml file! [tool.coverage] run.parallel = true """) with mock.patch.object(coverage.tomlconfig, "has_tomllib", False): msg = "Can't read 'pyproject.toml' without TOML support" with pytest.raises(ConfigError, match=msg): coverage.Coverage() @pytest.mark.skipif(env.PYVERSION >= (3, 11), reason="Python 3.11 has toml in stdlib") def test_no_toml_installed_pyproject_no_coverage(self) -> None: # It's ok to have non-coverage pyproject.toml without toml installed. self.make_file("pyproject.toml", """\ # A toml file! [tool.something] xyzzy = 17 """) with mock.patch.object(coverage.tomlconfig, "has_tomllib", False): cov = coverage.Coverage() # We get default settings: assert not cov.config.timid assert not cov.config.branch assert cov.config.data_file == ".coverage" def test_exceptions_from_missing_toml_things(self) -> None: self.make_file("pyproject.toml", """\ [tool.coverage.run] branch = true """) config = TomlConfigParser(False) config.read("pyproject.toml") with pytest.raises(ConfigError, match="No section: 'xyzzy'"): config.options("xyzzy") with pytest.raises(ConfigError, match="No section: 'xyzzy'"): config.get("xyzzy", "foo") with pytest.raises(ConfigError, match="No option 'foo' in section: 'tool.coverage.run'"): config.get("run", "foo") ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/test_context.py0000644000175100001770000002423500000000000020374 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt """Tests for context support.""" from __future__ import annotations import inspect import os.path from typing import Any from unittest import mock import pytest import coverage from coverage.context import qualname_from_frame from coverage.data import CoverageData, sorted_lines from coverage.types import TArc, TCovKwargs, TLineNo from tests import testenv from tests.coveragetest import CoverageTest from tests.helpers import assert_count_equal class StaticContextTest(CoverageTest): """Tests of the static context.""" def test_no_context(self) -> None: self.make_file("main.py", "a = 1") cov = coverage.Coverage() self.start_import_stop(cov, "main") data = cov.get_data() assert_count_equal(data.measured_contexts(), [""]) def test_static_context(self) -> None: self.make_file("main.py", "a = 1") cov = coverage.Coverage(context="gooey") self.start_import_stop(cov, "main") data = cov.get_data() assert_count_equal(data.measured_contexts(), ["gooey"]) SOURCE = """\ a = 1 if a > 2: a = 3 assert a == 1 """ LINES = [1, 2, 4] ARCS = [(-1, 1), (1, 2), (2, 4), (4, -1)] def run_red_blue(self, **options: TCovKwargs) -> tuple[CoverageData, CoverageData]: """Run red.py and blue.py, and return their CoverageData objects.""" self.make_file("red.py", self.SOURCE) red_cov = coverage.Coverage(context="red", data_suffix="r", source=["."], **options) self.start_import_stop(red_cov, "red") red_cov.save() red_data = red_cov.get_data() self.make_file("blue.py", self.SOURCE) blue_cov = coverage.Coverage(context="blue", data_suffix="b", source=["."], **options) self.start_import_stop(blue_cov, "blue") blue_cov.save() blue_data = blue_cov.get_data() return red_data, blue_data def test_combining_line_contexts(self) -> None: red_data, blue_data = self.run_red_blue() for datas in [[red_data, blue_data], [blue_data, red_data]]: combined = CoverageData(suffix="combined") for data in datas: combined.update(data) assert combined.measured_contexts() == {'red', 'blue'} full_names = {os.path.basename(f): f for f in combined.measured_files()} assert_count_equal(full_names, ['red.py', 'blue.py']) fred = full_names['red.py'] fblue = full_names['blue.py'] def assert_combined_lines(filename: str, context: str, lines: list[TLineNo]) -> None: # pylint: disable=cell-var-from-loop combined.set_query_context(context) assert combined.lines(filename) == lines assert_combined_lines(fred, 'red', self.LINES) assert_combined_lines(fred, 'blue', []) assert_combined_lines(fblue, 'red', []) assert_combined_lines(fblue, 'blue', self.LINES) def test_combining_arc_contexts(self) -> None: red_data, blue_data = self.run_red_blue(branch=True) for datas in [[red_data, blue_data], [blue_data, red_data]]: combined = CoverageData(suffix="combined") for data in datas: combined.update(data) assert combined.measured_contexts() == {'red', 'blue'} full_names = {os.path.basename(f): f for f in combined.measured_files()} assert_count_equal(full_names, ['red.py', 'blue.py']) fred = full_names['red.py'] fblue = full_names['blue.py'] def assert_combined_lines(filename: str, context: str, lines: list[TLineNo]) -> None: # pylint: disable=cell-var-from-loop combined.set_query_context(context) assert combined.lines(filename) == lines assert_combined_lines(fred, 'red', self.LINES) assert_combined_lines(fred, 'blue', []) assert_combined_lines(fblue, 'red', []) assert_combined_lines(fblue, 'blue', self.LINES) def assert_combined_arcs(filename: str, context: str, lines: list[TArc]) -> None: # pylint: disable=cell-var-from-loop combined.set_query_context(context) assert combined.arcs(filename) == lines assert_combined_arcs(fred, 'red', self.ARCS) assert_combined_arcs(fred, 'blue', []) assert_combined_arcs(fblue, 'red', []) assert_combined_arcs(fblue, 'blue', self.ARCS) @pytest.mark.skipif(not testenv.DYN_CONTEXTS, reason="No dynamic contexts with this core") class DynamicContextTest(CoverageTest): """Tests of dynamically changing contexts.""" SOURCE = """\ def helper(lineno): x = 2 def test_one(): a = 5 helper(6) def test_two(): a = 9 b = 10 if a > 11: b = 12 assert a == (13-4) assert b == (14-4) helper(15) test_one() x = 18 helper(19) test_two() """ OUTER_LINES = [1, 4, 8, 17, 18, 19, 2, 20] TEST_ONE_LINES = [5, 6, 2] TEST_TWO_LINES = [9, 10, 11, 13, 14, 15, 2] def test_dynamic_alone(self) -> None: self.make_file("two_tests.py", self.SOURCE) cov = coverage.Coverage(source=["."]) cov.set_option("run:dynamic_context", "test_function") self.start_import_stop(cov, "two_tests") data = cov.get_data() full_names = {os.path.basename(f): f for f in data.measured_files()} fname = full_names["two_tests.py"] assert_count_equal( data.measured_contexts(), ["", "two_tests.test_one", "two_tests.test_two"], ) def assert_context_lines(context: str, lines: list[TLineNo]) -> None: data.set_query_context(context) assert_count_equal(lines, sorted_lines(data, fname)) assert_context_lines("", self.OUTER_LINES) assert_context_lines("two_tests.test_one", self.TEST_ONE_LINES) assert_context_lines("two_tests.test_two", self.TEST_TWO_LINES) def test_static_and_dynamic(self) -> None: self.make_file("two_tests.py", self.SOURCE) cov = coverage.Coverage(context="stat", source=["."]) cov.set_option("run:dynamic_context", "test_function") self.start_import_stop(cov, "two_tests") data = cov.get_data() full_names = {os.path.basename(f): f for f in data.measured_files()} fname = full_names["two_tests.py"] assert_count_equal( data.measured_contexts(), ["stat", "stat|two_tests.test_one", "stat|two_tests.test_two"], ) def assert_context_lines(context: str, lines: list[TLineNo]) -> None: data.set_query_context(context) assert_count_equal(lines, sorted_lines(data, fname)) assert_context_lines("stat", self.OUTER_LINES) assert_context_lines("stat|two_tests.test_one", self.TEST_ONE_LINES) assert_context_lines("stat|two_tests.test_two", self.TEST_TWO_LINES) def get_qualname() -> str | None: """Helper to return qualname_from_frame for the caller.""" stack = inspect.stack()[1:] if any(sinfo[0].f_code.co_name == "get_qualname" for sinfo in stack): # We're calling ourselves recursively, maybe because we're testing # properties. Return an int to try to get back on track. return 17 # type: ignore[return-value] caller_frame = stack[0][0] return qualname_from_frame(caller_frame) # pylint: disable=missing-class-docstring, missing-function-docstring, unused-argument class Parent: def meth(self) -> str | None: return get_qualname() @property def a_property(self) -> str | None: return get_qualname() class Child(Parent): pass class SomethingElse: pass class MultiChild(SomethingElse, Child): pass def no_arguments() -> str | None: return get_qualname() def plain_old_function(a: Any, b: Any) -> str | None: return get_qualname() def fake_out(self: Any) -> str | None: return get_qualname() def patch_meth(self: Any) -> str | None: return get_qualname() # pylint: enable=missing-class-docstring, missing-function-docstring, unused-argument class QualnameTest(CoverageTest): """Tests of qualname_from_frame.""" # Pylint gets confused about meth() below. # pylint: disable=no-value-for-parameter run_in_temp_dir = False def test_method(self) -> None: assert Parent().meth() == "tests.test_context.Parent.meth" def test_inherited_method(self) -> None: assert Child().meth() == "tests.test_context.Parent.meth" def test_mi_inherited_method(self) -> None: assert MultiChild().meth() == "tests.test_context.Parent.meth" def test_no_arguments(self) -> None: assert no_arguments() == "tests.test_context.no_arguments" def test_plain_old_function(self) -> None: assert plain_old_function(0, 1) == "tests.test_context.plain_old_function" def test_fake_out(self) -> None: assert fake_out(0) == "tests.test_context.fake_out" def test_property(self) -> None: assert Parent().a_property == "tests.test_context.Parent.a_property" def test_changeling(self) -> None: c = Child() c.meth = patch_meth # type: ignore[assignment] assert c.meth(c) == "tests.test_context.patch_meth" # type: ignore[call-arg] def test_bug_829(self) -> None: # A class with a name like a function shouldn't confuse qualname_from_frame. class test_something: # pylint: disable=unused-variable assert get_qualname() is None def test_bug_1210(self) -> None: # Under pyarmor (an obfuscator), a function can have a "self" argument, # but then not have a "self" local. co = mock.Mock(co_name="a_co_name", co_argcount=1, co_varnames=["self"]) frame = mock.Mock(f_code=co, f_locals={}) assert qualname_from_frame(frame) == "unittest.mock.a_co_name" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/test_coverage.py0000644000175100001770000012447300000000000020510 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt """Tests for coverage.py.""" from __future__ import annotations import pytest import coverage from coverage import env from coverage.exceptions import NoDataError from tests.coveragetest import CoverageTest class TestCoverageTest(CoverageTest): """Make sure our complex self.check_coverage method works.""" def test_successful_coverage(self) -> None: # The simplest run possible. self.check_coverage("""\ a = 1 b = 2 """, [1,2], ) # You can provide a list of possible statement matches. self.check_coverage("""\ a = 1 b = 2 """, ([100], [1,2], [1723,47]), ) # You can specify missing lines. self.check_coverage("""\ a = 1 if a == 2: a = 3 """, [1,2,3], missing="3", ) # You can specify a list of possible missing lines. self.check_coverage("""\ a = 1 if a == 2: a = 3 """, [1,2,3], missing=("47-49", "3", "100,102"), ) def test_failed_coverage(self) -> None: # If the lines are wrong, the message shows right and wrong. with pytest.raises(AssertionError, match=r"\[1, 2] != \[1]"): self.check_coverage("""\ a = 1 b = 2 """, [1], ) # If the list of lines possibilities is wrong, the msg shows right. msg = r"None of the lines choices matched \[1, 2]" with pytest.raises(AssertionError, match=msg): self.check_coverage("""\ a = 1 b = 2 """, ([1], [2]), ) # If the missing lines are wrong, the message shows right and wrong. with pytest.raises(AssertionError, match=r"'3' != '37'"): self.check_coverage("""\ a = 1 if a == 2: a = 3 """, [1,2,3], missing="37", ) # If the missing lines possibilities are wrong, the msg shows right. msg = r"None of the missing choices matched '3'" with pytest.raises(AssertionError, match=msg): self.check_coverage("""\ a = 1 if a == 2: a = 3 """, [1,2,3], missing=("37", "4-10"), ) def test_exceptions_really_fail(self) -> None: # An assert in the checked code will really raise up to us. with pytest.raises(AssertionError, match="This is bad"): self.check_coverage("""\ a = 1 assert a == 99, "This is bad" """, ) # Other exceptions too. with pytest.raises(ZeroDivisionError, match="division"): self.check_coverage("""\ a = 1 assert a == 1, "This is good" a/0 """, ) class BasicCoverageTest(CoverageTest): """The simplest tests, for quick smoke testing of fundamental changes.""" def test_simple(self) -> None: self.check_coverage("""\ a = 1 b = 2 c = 4 # Nothing here d = 6 """, [1,2,4,6], report="4 0 0 0 100%", ) def test_indentation_wackiness(self) -> None: # Partial final lines are OK. self.check_coverage("""\ import sys if not sys.path: a = 1 """, # indented last line [1,2,3], "3", ) def test_multiline_initializer(self) -> None: self.check_coverage("""\ d = { 'foo': 1+2, 'bar': (lambda x: x+1)(1), 'baz': str(1), } e = { 'foo': 1, 'bar': 2 } """, [1,7], "", ) def test_list_comprehension(self) -> None: self.check_coverage("""\ l = [ 2*i for i in range(10) if i > 5 ] assert l == [12, 14, 16, 18] """, [1,5], "", ) class SimpleStatementTest(CoverageTest): """Testing simple single-line statements.""" def test_expression(self) -> None: # Bare expressions as statements are tricky: some implementations # optimize some of them away. All implementations seem to count # the implicit return at the end as executable. self.check_coverage("""\ 12 23 """, ([1,2],[2]), "", ) self.check_coverage("""\ 12 23 a = 3 """, ([1,2,3],[3]), "", ) self.check_coverage("""\ 1 + 2 1 + \\ 2 """, ([1,2], [2]), "", ) self.check_coverage("""\ 1 + 2 1 + \\ 2 a = 4 """, ([1,2,4], [4]), "", ) def test_assert(self) -> None: self.check_coverage("""\ assert (1 + 2) assert (1 + 2) assert (1 + 2), 'the universe is broken' assert (1 + 2), \\ 'something is amiss' """, [1,2,4,5], "", ) def test_assignment(self) -> None: # Simple variable assignment self.check_coverage("""\ a = (1 + 2) b = (1 + 2) c = \\ 1 """, [1,2,4], "", ) def test_assign_tuple(self) -> None: self.check_coverage("""\ a = 1 a,b,c = 7,8,9 assert a == 7 and b == 8 and c == 9 """, [1,2,3], "", ) def test_more_assignments(self) -> None: self.check_coverage("""\ x = [] d = {} d[ 4 + len(x) + 5 ] = \\ d[ 8 ** 2 ] = \\ 9 """, [1, 2, 3], "", ) def test_attribute_assignment(self) -> None: # Attribute assignment self.check_coverage("""\ class obj: pass o = obj() o.foo = (1 + 2) o.foo = (1 + 2) o.foo = \\ 1 """, [1,2,3,4,6], "", ) def test_list_of_attribute_assignment(self) -> None: self.check_coverage("""\ class obj: pass o = obj() o.a, o.b = (1 + 2), 3 o.a, o.b = (1 + 2), (3 + 4) o.a, o.b = \\ 1, \\ 2 """, [1,2,3,4,7], "", ) def test_augmented_assignment(self) -> None: self.check_coverage("""\ a = 1 a += 1 a += (1 + 2) a += \\ 1 """, [1,2,3,5], "", ) def test_triple_string_stuff(self) -> None: self.check_coverage("""\ a = ''' a multiline string. ''' b = ''' long expression ''' + ''' on many lines. ''' c = len(''' long expression ''' + ''' on many lines. ''') """, [1,5,11], "", ) def test_pass(self) -> None: # pass is tricky: if it's the only statement in a block, then it is # "executed". But if it is not the only statement, then it is not. self.check_coverage("""\ if 1==1: pass """, [1,2], "", ) self.check_coverage("""\ def foo(): pass foo() """, [1,2,3], "", ) self.check_coverage("""\ def foo(): "doc" pass foo() """, ([1,3,4], [1,4]), "", ) self.check_coverage("""\ class Foo: def foo(self): pass Foo().foo() """, [1,2,3,4], "", ) self.check_coverage("""\ class Foo: def foo(self): "Huh?" pass Foo().foo() """, ([1,2,4,5], [1,2,5]), "", ) def test_del(self) -> None: self.check_coverage("""\ d = { 'a': 1, 'b': 1, 'c': 1, 'd': 1, 'e': 1 } del d['a'] del d[ 'b' ] del d['c'], \\ d['d'], \\ d['e'] assert(len(d.keys()) == 0) """, [1,2,3,6,9], "", ) def test_raise(self) -> None: self.check_coverage("""\ try: raise Exception( "hello %d" % 17) except: pass """, [1,2,5,6], "", ) def test_raise_followed_by_statement(self) -> None: if env.PYBEHAVIOR.omit_after_jump: lines = [1,2,4,5] missing = "" else: lines = [1,2,3,4,5] missing = "3" self.check_coverage("""\ try: raise Exception("hello") a = 3 except: pass """, lines=lines, missing=missing, ) def test_return(self) -> None: self.check_coverage("""\ def fn(): a = 1 return a x = fn() assert(x == 1) """, [1,2,3,5,6], "", ) self.check_coverage("""\ def fn(): a = 1 return ( a + 1) x = fn() assert(x == 2) """, [1,2,3,7,8], "", ) self.check_coverage("""\ def fn(): a = 1 return (a, a + 1, a + 2) x,y,z = fn() assert x == 1 and y == 2 and z == 3 """, [1,2,3,7,8], "", ) def test_return_followed_by_statement(self) -> None: if env.PYBEHAVIOR.omit_after_return: lines = [1,2,3,6,7] missing = "" else: lines = [1,2,3,4,6,7] missing = "4" self.check_coverage("""\ def fn(): a = 2 return a a = 4 x = fn() assert(x == 2) """, lines=lines, missing=missing, ) def test_yield(self) -> None: self.check_coverage("""\ def gen(): yield 1 yield (2+ 3+ 4) yield 1, \\ 2 a,b,c = gen() assert a == 1 and b == 9 and c == (1,2) """, [1,2,3,6,8,9], "", ) def test_break(self) -> None: if env.PYBEHAVIOR.omit_after_jump: lines = [1,2,3,5] missing = "" else: lines = [1,2,3,4,5] missing = "4" self.check_coverage("""\ for x in range(10): a = 2 + x break a = 4 assert a == 2 """, lines=lines, missing=missing, ) def test_continue(self) -> None: if env.PYBEHAVIOR.omit_after_jump: lines = [1,2,3,5] missing = "" else: lines = [1,2,3,4,5] missing = "4" self.check_coverage("""\ for x in range(10): a = 2 + x continue a = 4 assert a == 11 """, lines=lines, missing=missing, ) def test_strange_unexecuted_continue(self) -> None: # Peephole optimization of jumps to jumps can mean that some statements # never hit the line tracer. The behavior is different in different # versions of Python, so be careful when running this test. self.check_coverage("""\ a = b = c = 0 for n in range(100): if n % 2: if n % 4: a += 1 continue # <-- This line may not be hit. else: b += 1 c += 1 assert a == 50 and b == 50 and c == 50 a = b = c = 0 for n in range(100): if n % 2: if n % 3: a += 1 continue # <-- This line is always hit. else: b += 1 c += 1 assert a == 33 and b == 50 and c == 50 """, lines=[1,2,3,4,5,6,8,9,10, 12,13,14,15,16,17,19,20,21], missing=["", "6"], ) def test_import(self) -> None: self.check_coverage("""\ import string from sys import path a = 1 """, [1,2,3], "", ) self.check_coverage("""\ import string if 1 == 2: from sys import path a = 1 """, [1,2,3,4], "3", ) self.check_coverage("""\ import string, \\ os, \\ re from sys import path, \\ stdout a = 1 """, [1,4,6], "", ) self.check_coverage("""\ import sys, sys as s assert s.path == sys.path """, [1,2], "", ) self.check_coverage("""\ import sys, \\ sys as s assert s.path == sys.path """, [1,3], "", ) self.check_coverage("""\ from sys import path, \\ path as p assert p == path """, [1,3], "", ) self.check_coverage("""\ from sys import \\ * assert len(path) > 0 """, [1,3], "", ) def test_global(self) -> None: self.check_coverage("""\ g = h = i = 1 def fn(): global g global h, \\ i g = h = i = 2 fn() assert g == 2 and h == 2 and i == 2 """, [1,2,6,7,8], "", ) self.check_coverage("""\ g = h = i = 1 def fn(): global g; g = 2 fn() assert g == 2 and h == 1 and i == 1 """, [1,2,3,4,5], "", ) def test_exec(self) -> None: self.check_coverage("""\ a = b = c = 1 exec("a = 2") exec("b = " + "c = " + "2") assert a == 2 and b == 2 and c == 2 """, [1,2,3,6], "", ) self.check_coverage("""\ vars = {'a': 1, 'b': 1, 'c': 1} exec("a = 2", vars) exec("b = " + "c = " + "2", vars) assert vars['a'] == 2 and vars['b'] == 2 and vars['c'] == 2 """, [1,2,3,6], "", ) self.check_coverage("""\ globs = {} locs = {'a': 1, 'b': 1, 'c': 1} exec("a = 2", globs, locs) exec("b = " + "c = " + "2", globs, locs) assert locs['a'] == 2 and locs['b'] == 2 and locs['c'] == 2 """, [1,2,3,4,7], "", ) def test_extra_doc_string(self) -> None: self.check_coverage("""\ a = 1 "An extra docstring, should be a comment." b = 3 assert (a,b) == (1,3) """, ([1,3,4], [1,2,3,4]), "", ) self.check_coverage("""\ a = 1 "An extra docstring, should be a comment." b = 3 123 # A number for some reason: ignored 1+1 # An expression: executed. c = 6 assert (a,b,c) == (1,3,6) """, ([1,3,6,7], [1,3,5,6,7], [1,3,4,5,6,7], [1,2,3,4,5,6,7]), "", ) def test_nonascii(self) -> None: self.check_coverage("""\ # coding: utf-8 a = 2 b = 3 """, [2, 3], ) def test_module_docstring(self) -> None: self.check_coverage("""\ '''I am a module docstring.''' a = 2 b = 3 """, [2, 3], ) lines = [2, 3, 4] self.check_coverage("""\ # Start with a comment, because it changes the behavior(!?) '''I am a module docstring.''' a = 3 b = 4 """, lines, ) class CompoundStatementTest(CoverageTest): """Testing coverage of multi-line compound statements.""" def test_statement_list(self) -> None: self.check_coverage("""\ a = 1; b = 2; c = 3 d = 4; e = 5; assert (a,b,c,d,e) == (1,2,3,4,5) """, [1,2,3,5], "", ) def test_if(self) -> None: self.check_coverage("""\ a = 1 if a == 1: x = 3 assert x == 3 if (a == 1): x = 7 assert x == 7 """, [1,2,3,4,5,7,8], "", ) self.check_coverage("""\ a = 1 if a == 1: x = 3 else: y = 5 assert x == 3 """, [1,2,3,5,6], "5", ) self.check_coverage("""\ a = 1 if a != 1: x = 3 else: y = 5 assert y == 5 """, [1,2,3,5,6], "3", ) self.check_coverage("""\ a = 1; b = 2 if a == 1: if b == 2: x = 4 else: y = 6 else: z = 8 assert x == 4 """, [1,2,3,4,6,8,9], "6-8", ) def test_elif(self) -> None: self.check_coverage("""\ a = 1; b = 2; c = 3; if a == 1: x = 3 elif b == 2: y = 5 else: z = 7 assert x == 3 """, [1,2,3,4,5,7,8], "4-7", report="7 3 4 1 45% 4-7", ) self.check_coverage("""\ a = 1; b = 2; c = 3; if a != 1: x = 3 elif b == 2: y = 5 else: z = 7 assert y == 5 """, [1,2,3,4,5,7,8], "3, 7", report="7 2 4 2 64% 3, 7", ) self.check_coverage("""\ a = 1; b = 2; c = 3; if a != 1: x = 3 elif b != 2: y = 5 else: z = 7 assert z == 7 """, [1,2,3,4,5,7,8], "3, 5", report="7 2 4 2 64% 3, 5", ) def test_elif_no_else(self) -> None: self.check_coverage("""\ a = 1; b = 2; c = 3; if a == 1: x = 3 elif b == 2: y = 5 assert x == 3 """, [1,2,3,4,5,6], "4-5", report="6 2 4 1 50% 4-5", ) self.check_coverage("""\ a = 1; b = 2; c = 3; if a != 1: x = 3 elif b == 2: y = 5 assert y == 5 """, [1,2,3,4,5,6], "3", report="6 1 4 2 70% 3, 4->6", ) def test_elif_bizarre(self) -> None: self.check_coverage("""\ def f(self): if self==1: x = 3 elif self.m('fred'): x = 5 elif (g==1) and (b==2): x = 7 elif self.m('fred')==True: x = 9 elif ((g==1) and (b==2))==True: x = 11 else: x = 13 """, [1,2,3,4,5,6,7,8,9,10,11,13], "2-13", ) def test_split_if(self) -> None: self.check_coverage("""\ a = 1; b = 2; c = 3; if \\ a == 1: x = 3 elif \\ b == 2: y = 5 else: z = 7 assert x == 3 """, [1,2,4,5,7,9,10], "5-9", ) self.check_coverage("""\ a = 1; b = 2; c = 3; if \\ a != 1: x = 3 elif \\ b == 2: y = 5 else: z = 7 assert y == 5 """, [1,2,4,5,7,9,10], "4, 9", ) self.check_coverage("""\ a = 1; b = 2; c = 3; if \\ a != 1: x = 3 elif \\ b != 2: y = 5 else: z = 7 assert z == 7 """, [1,2,4,5,7,9,10], "4, 7", ) def test_pathological_split_if(self) -> None: self.check_coverage("""\ a = 1; b = 2; c = 3; if ( a == 1 ): x = 3 elif ( b == 2 ): y = 5 else: z = 7 assert x == 3 """, [1,2,5,6,9,11,12], "6-11", ) self.check_coverage("""\ a = 1; b = 2; c = 3; if ( a != 1 ): x = 3 elif ( b == 2 ): y = 5 else: z = 7 assert y == 5 """, [1,2,5,6,9,11,12], "5, 11", ) self.check_coverage("""\ a = 1; b = 2; c = 3; if ( a != 1 ): x = 3 elif ( b != 2 ): y = 5 else: z = 7 assert z == 7 """, [1,2,5,6,9,11,12], "5, 9", ) def test_absurd_split_if(self) -> None: self.check_coverage("""\ a = 1; b = 2; c = 3; if a == 1 \\ : x = 3 elif b == 2 \\ : y = 5 else: z = 7 assert x == 3 """, [1,2,4,5,7,9,10], "5-9", ) self.check_coverage("""\ a = 1; b = 2; c = 3; if a != 1 \\ : x = 3 elif b == 2 \\ : y = 5 else: z = 7 assert y == 5 """, [1,2,4,5,7,9,10], "4, 9", ) self.check_coverage("""\ a = 1; b = 2; c = 3; if a != 1 \\ : x = 3 elif b != 2 \\ : y = 5 else: z = 7 assert z == 7 """, [1,2,4,5,7,9,10], "4, 7", ) def test_constant_if(self) -> None: if env.PYBEHAVIOR.keep_constant_test: lines = [1, 2, 3] else: lines = [2, 3] self.check_coverage("""\ if 1: a = 2 assert a == 2 """, lines, "", ) def test_while(self) -> None: self.check_coverage("""\ a = 3; b = 0 while a: b += 1 a -= 1 assert a == 0 and b == 3 """, [1,2,3,4,5], "", ) self.check_coverage("""\ a = 3; b = 0 while a: b += 1 break assert a == 3 and b == 1 """, [1,2,3,4,5], "", ) def test_while_else(self) -> None: # Take the else branch. self.check_coverage("""\ a = 3; b = 0 while a: b += 1 a -= 1 else: b = 99 assert a == 0 and b == 99 """, [1,2,3,4,6,7], "", ) # Don't take the else branch. self.check_coverage("""\ a = 3; b = 0 while a: b += 1 a -= 1 break else: b = 99 assert a == 2 and b == 1 """, [1,2,3,4,5,7,8], "7", ) def test_split_while(self) -> None: self.check_coverage("""\ a = 3; b = 0 while \\ a: b += 1 a -= 1 assert a == 0 and b == 3 """, [1,2,4,5,6], "", ) self.check_coverage("""\ a = 3; b = 0 while ( a ): b += 1 a -= 1 assert a == 0 and b == 3 """, [1,2,5,6,7], "", ) def test_for(self) -> None: self.check_coverage("""\ a = 0 for i in [1,2,3,4,5]: a += i assert a == 15 """, [1,2,3,4], "", ) self.check_coverage("""\ a = 0 for i in [1, 2,3,4, 5]: a += i assert a == 15 """, [1,2,5,6], "", ) self.check_coverage("""\ a = 0 for i in [1,2,3,4,5]: a += i break assert a == 1 """, [1,2,3,4,5], "", ) def test_for_else(self) -> None: self.check_coverage("""\ a = 0 for i in range(5): a += i+1 else: a = 99 assert a == 99 """, [1,2,3,5,6], "", ) self.check_coverage("""\ a = 0 for i in range(5): a += i+1 break else: a = 123 assert a == 1 """, [1,2,3,4,6,7], "6", ) def test_split_for(self) -> None: self.check_coverage("""\ a = 0 for \\ i in [1,2,3,4,5]: a += i assert a == 15 """, [1,2,4,5], "", ) self.check_coverage("""\ a = 0 for \\ i in [1, 2,3,4, 5]: a += i assert a == 15 """, [1,2,6,7], "", ) def test_try_except(self) -> None: self.check_coverage("""\ a = 0 try: a = 1 except: a = 99 assert a == 1 """, [1,2,3,4,5,6], "4-5", ) self.check_coverage("""\ a = 0 try: a = 1 raise Exception("foo") except: a = 99 assert a == 99 """, [1,2,3,4,5,6,7], "", ) self.check_coverage("""\ a = 0 try: a = 1 raise Exception("foo") except ImportError: a = 99 except: a = 123 assert a == 123 """, [1,2,3,4,5,6,7,8,9], "6", ) self.check_coverage("""\ a = 0 try: a = 1 raise IOError("foo") except ImportError: a = 99 except IOError: a = 17 except: a = 123 assert a == 17 """, [1,2,3,4,5,6,7,8,9,10,11], "6, 9-10", ) self.check_coverage("""\ a = 0 try: a = 1 except: a = 99 else: a = 123 assert a == 123 """, [1,2,3,4,5,7,8], "4-5", arcz=".1 12 23 45 58 37 78 8.", arcz_missing="45 58", ) def test_try_except_stranded_else(self) -> None: if env.PYBEHAVIOR.optimize_unreachable_try_else: # The else can't be reached because the try ends with a raise. lines = [1,2,3,4,5,6,9] missing = "" arcz = ".1 12 23 34 45 56 69 9." arcz_missing = "" else: lines = [1,2,3,4,5,6,8,9] missing = "8" arcz = ".1 12 23 34 45 56 69 89 9." arcz_missing = "89" self.check_coverage("""\ a = 0 try: a = 1 raise Exception("foo") except: a = 99 else: a = 123 assert a == 99 """, lines=lines, missing=missing, arcz=arcz, arcz_missing=arcz_missing, ) def test_try_finally(self) -> None: self.check_coverage("""\ a = 0 try: a = 1 finally: a = 99 assert a == 99 """, [1,2,3,5,6], "", ) self.check_coverage("""\ a = 0; b = 0 try: a = 1 try: raise Exception("foo") finally: b = 123 except: a = 99 assert a == 99 and b == 123 """, [1,2,3,4,5,7,8,9,10], "", ) def test_function_def(self) -> None: self.check_coverage("""\ a = 99 def foo(): ''' docstring ''' return 1 a = foo() assert a == 1 """, [1,2,5,7,8], "", ) self.check_coverage("""\ def foo( a, b ): ''' docstring ''' return a+b x = foo(17, 23) assert x == 40 """, [1,7,9,10], "", ) self.check_coverage("""\ def foo( a = (lambda x: x*2)(10), b = ( lambda x: x+1 )(1) ): ''' docstring ''' return a+b x = foo() assert x == 22 """, [1,10,12,13], "", ) def test_class_def(self) -> None: arcz="-22 2D DE E-2 23 36 6A A-2 -68 8-6 -AB B-A" self.check_coverage("""\ # A comment. class theClass: ''' the docstring. Don't be fooled. ''' def __init__(self): ''' Another docstring. ''' self.a = 1 def foo(self): return self.a x = theClass().foo() assert x == 1 """, [2, 6, 8, 10, 11, 13, 14], "", arcz=arcz, ) class ExcludeTest(CoverageTest): """Tests of the exclusion feature to mark lines as not covered.""" def test_default(self) -> None: # A number of forms of pragma comment are accepted. self.check_coverage("""\ a = 1 b = 2 # pragma: no cover c = 3 d = 4 #pragma NOCOVER e = 5 f = 6#\tpragma:\tno cover g = 7 """, [1,3,5,7], ) def test_two_excludes(self) -> None: self.check_coverage("""\ a = 1; b = 2 if a == 99: a = 4 # -cc b = 5 c = 6 # -xx assert a == 1 and b == 2 """, [1,3,5,7], "5", excludes=['-cc', '-xx'], ) def test_excluding_elif_suites(self) -> None: self.check_coverage("""\ a = 1; b = 2 if 1==1: a = 4 b = 5 c = 6 elif 1==0: #pragma: NO COVER a = 8 b = 9 else: a = 11 b = 12 assert a == 4 and b == 5 and c == 6 """, [1,3,4,5,6,11,12,13], "11-12", excludes=['#pragma: NO COVER'], ) def test_excluding_try_except(self) -> None: self.check_coverage("""\ a = 0 try: a = 1 except: #pragma: NO COVER a = 99 else: a = 123 assert a == 123 """, [1,2,3,7,8], "", excludes=['#pragma: NO COVER'], arcz=".1 12 23 37 45 58 78 8.", arcz_missing="58", ) def test_excluding_try_except_stranded_else(self) -> None: if env.PYBEHAVIOR.optimize_unreachable_try_else: # The else can't be reached because the try ends with a raise. arcz = ".1 12 23 34 45 56 69 9." arcz_missing = "" else: arcz = ".1 12 23 34 45 56 69 89 9." arcz_missing = "89" self.check_coverage("""\ a = 0 try: a = 1 raise Exception("foo") except: a = 99 else: #pragma: NO COVER x = 2 assert a == 99 """, [1,2,3,4,5,6,9], "", excludes=['#pragma: NO COVER'], arcz=arcz, arcz_missing=arcz_missing, ) def test_excluded_comprehension_branches(self) -> None: # https://github.com/nedbat/coveragepy/issues/1271 self.check_coverage("""\ x, y = [0], [1] if x == [2]: raise NotImplementedError # pragma: NO COVER if all(_ == __ for _, __ in zip(x, y)): raise NotImplementedError # pragma: NO COVER """, [1,2,4], "", excludes=['#pragma: NO COVER'], arcz=".1 12 23 24 45 4. -44 4-4", arcz_missing="4-4", ) class Py24Test(CoverageTest): """Tests of new syntax in Python 2.4.""" def test_function_decorators(self) -> None: lines = [1, 2, 3, 4, 6, 8, 9, 10, 12] self.check_coverage("""\ def require_int(func): def wrapper(arg): assert isinstance(arg, int) return func(arg) return wrapper @require_int def p1(arg): return arg*2 assert p1(10) == 20 """, lines, "", ) def test_function_decorators_with_args(self) -> None: lines = [1, 2, 3, 4, 5, 6, 8, 9, 10, 12] self.check_coverage("""\ def boost_by(extra): def decorator(func): def wrapper(arg): return extra*func(arg) return wrapper return decorator @boost_by(10) def boosted(arg): return arg*2 assert boosted(10) == 200 """, lines, "", ) def test_double_function_decorators(self) -> None: lines = [1, 2, 3, 4, 5, 7, 8, 9, 10, 11, 12, 14, 15, 16, 17, 19, 21, 22, 23, 24, 26] self.check_coverage("""\ def require_int(func): def wrapper(arg): assert isinstance(arg, int) return func(arg) return wrapper def boost_by(extra): def decorator(func): def wrapper(arg): return extra*func(arg) return wrapper return decorator @require_int @boost_by(10) def boosted1(arg): return arg*2 assert boosted1(10) == 200 @boost_by(10) @require_int def boosted2(arg): return arg*2 assert boosted2(10) == 200 """, lines, "", ) class Py25Test(CoverageTest): """Tests of new syntax in Python 2.5.""" def test_with_statement(self) -> None: self.check_coverage("""\ class Managed: def __enter__(self): desc = "enter" def __exit__(self, type, value, tb): desc = "exit" m = Managed() with m: desc = "block1a" desc = "block1b" try: with m: desc = "block2" raise Exception("Boo!") except: desc = "caught" """, [1,2,3,5,6,8,9,10,11,13,14,15,16,17,18], "", ) def test_try_except_finally(self) -> None: self.check_coverage("""\ a = 0; b = 0 try: a = 1 except: a = 99 finally: b = 2 assert a == 1 and b == 2 """, [1,2,3,4,5,7,8], "4-5", arcz=".1 12 23 37 45 57 78 8.", arcz_missing="45 57", ) self.check_coverage("""\ a = 0; b = 0 try: a = 1 raise Exception("foo") except: a = 99 finally: b = 2 assert a == 99 and b == 2 """, [1,2,3,4,5,6,8,9], "", arcz=".1 12 23 34 45 56 68 89 9.", ) self.check_coverage("""\ a = 0; b = 0 try: a = 1 raise Exception("foo") except ImportError: a = 99 except: a = 123 finally: b = 2 assert a == 123 and b == 2 """, [1,2,3,4,5,6,7,8,10,11], "6", arcz=".1 12 23 34 45 56 57 78 6A 8A AB B.", arcz_missing="56 6A", ) self.check_coverage("""\ a = 0; b = 0 try: a = 1 raise IOError("foo") except ImportError: a = 99 except IOError: a = 17 except: a = 123 finally: b = 2 assert a == 17 and b == 2 """, [1,2,3,4,5,6,7,8,9,10,12,13], "6, 9-10", arcz=".1 12 23 34 45 56 6C 57 78 8C 79 9A AC CD D.", arcz_missing="56 6C 79 9A AC", ) self.check_coverage("""\ a = 0; b = 0 try: a = 1 except: a = 99 else: a = 123 finally: b = 2 assert a == 123 and b == 2 """, [1,2,3,4,5,7,9,10], "4-5", arcz=".1 12 23 37 45 59 79 9A A.", arcz_missing="45 59", ) def test_try_except_finally_stranded_else(self) -> None: if env.PYBEHAVIOR.optimize_unreachable_try_else: # The else can't be reached because the try ends with a raise. lines = [1,2,3,4,5,6,10,11] missing = "" arcz = ".1 12 23 34 45 56 6A AB B." arcz_missing = "" else: lines = [1,2,3,4,5,6,8,10,11] missing = "8" arcz = ".1 12 23 34 45 56 6A 8A AB B." arcz_missing = "8A" self.check_coverage("""\ a = 0; b = 0 try: a = 1 raise Exception("foo") except: a = 99 else: a = 123 finally: b = 2 assert a == 99 and b == 2 """, lines=lines, missing=missing, arcz=arcz, arcz_missing=arcz_missing, ) class ModuleTest(CoverageTest): """Tests for the module-level behavior of the `coverage` module.""" run_in_temp_dir = False def test_not_singleton(self) -> None: # You *can* create another coverage object. coverage.Coverage() coverage.Coverage() def test_old_name_and_new_name(self) -> None: assert coverage.coverage is coverage.Coverage class ReportingTest(CoverageTest): """Tests of some reporting behavior.""" def test_no_data_to_report_on_annotate(self) -> None: # Reporting with no data produces a nice message and no output # directory. with pytest.raises(NoDataError, match="No data to report."): self.command_line("annotate -d ann") self.assert_doesnt_exist("ann") def test_no_data_to_report_on_html(self) -> None: # Reporting with no data produces a nice message and no output # directory. with pytest.raises(NoDataError, match="No data to report."): self.command_line("html -d htmlcov") self.assert_doesnt_exist("htmlcov") def test_no_data_to_report_on_xml(self) -> None: # Reporting with no data produces a nice message. with pytest.raises(NoDataError, match="No data to report."): self.command_line("xml") self.assert_doesnt_exist("coverage.xml") ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/test_data.py0000644000175100001770000011216100000000000017615 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt """Tests for coverage.data, and coverage.sqldata.""" from __future__ import annotations import glob import os import os.path import re import sqlite3 import threading from typing import ( Any, Callable, Collection, Iterable, Mapping, TypeVar, Union, ) from unittest import mock import pytest from coverage.data import CoverageData, combine_parallel_data from coverage.data import add_data_to_hash, line_counts from coverage.exceptions import DataError, NoDataError from coverage.files import PathAliases, canonical_filename from coverage.types import FilePathClasses, FilePathType, TArc, TLineNo from tests.coveragetest import CoverageTest from tests.helpers import DebugControlString, assert_count_equal LINES_1 = { 'a.py': {1, 2}, 'b.py': {3}, } SUMMARY_1 = {'a.py': 2, 'b.py': 1} MEASURED_FILES_1 = ['a.py', 'b.py'] A_PY_LINES_1 = [1, 2] B_PY_LINES_1 = [3] LINES_2 = { 'a.py': {1, 5}, 'c.py': {17}, } SUMMARY_1_2 = {'a.py': 3, 'b.py': 1, 'c.py': 1} MEASURED_FILES_1_2 = ['a.py', 'b.py', 'c.py'] ARCS_3 = { 'x.py': {(-1, 1), (1, 2), (2, 3), (3, -1)}, 'y.py': {(-1, 17), (17, 23), (23, -1)}, } X_PY_ARCS_3 = [(-1, 1), (1, 2), (2, 3), (3, -1)] Y_PY_ARCS_3 = [(-1, 17), (17, 23), (23, -1)] SUMMARY_3 = {'x.py': 3, 'y.py': 2} MEASURED_FILES_3 = ['x.py', 'y.py'] X_PY_LINES_3 = [1, 2, 3] Y_PY_LINES_3 = [17, 23] ARCS_4 = { 'x.py': {(-1, 2), (2, 5), (5, -1)}, 'z.py': {(-1, 1000), (1000, -1)}, } SUMMARY_3_4 = {'x.py': 4, 'y.py': 2, 'z.py': 1} MEASURED_FILES_3_4 = ['x.py', 'y.py', 'z.py'] def DebugCoverageData(*args: Any, **kwargs: Any) -> CoverageData: """Factory for CovergeData instances with debugging turned on. This lets us exercise the debugging lines in sqldata.py. We don't make any assertions about the debug output, but at least we can know that they execute successfully, and they won't be marked as distracting missing lines in our coverage reports. In the tests in this file, we usually use DebugCoverageData, but sometimes a plain CoverageData, and some tests are parameterized to run once with each so that we have a mix of debugging or not. """ assert "debug" not in kwargs options = ["dataio", "dataop", "sql"] if kwargs: # There's no logical reason kwargs should imply sqldata debugging. # This is just a way to get a mix of debug options across the tests. options.extend(["dataop2", "sqldata"]) debug = DebugControlString(options=options) return CoverageData(*args, debug=debug, **kwargs) # type: ignore[misc] TCoverageData = Callable[..., CoverageData] def assert_line_counts( covdata: CoverageData, counts: Mapping[str, int], fullpath: bool = False, ) -> None: """Check that the line_counts of `covdata` is `counts`.""" assert line_counts(covdata, fullpath) == counts def assert_measured_files(covdata: CoverageData, measured: Iterable[str]) -> None: """Check that `covdata`'s measured files are `measured`.""" assert_count_equal(covdata.measured_files(), measured) def assert_lines1_data(covdata: CoverageData) -> None: """Check that `covdata` has the data from LINES1.""" assert_line_counts(covdata, SUMMARY_1) assert_measured_files(covdata, MEASURED_FILES_1) assert_count_equal(covdata.lines("a.py"), A_PY_LINES_1) assert not covdata.has_arcs() def assert_arcs3_data(covdata: CoverageData) -> None: """Check that `covdata` has the data from ARCS3.""" assert_line_counts(covdata, SUMMARY_3) assert_measured_files(covdata, MEASURED_FILES_3) assert_count_equal(covdata.lines("x.py"), X_PY_LINES_3) assert_count_equal(covdata.arcs("x.py"), X_PY_ARCS_3) assert_count_equal(covdata.lines("y.py"), Y_PY_LINES_3) assert_count_equal(covdata.arcs("y.py"), Y_PY_ARCS_3) assert covdata.has_arcs() TData = TypeVar("TData", bound=Union[TLineNo, TArc]) def dicts_from_sets(file_data: dict[str, set[TData]]) -> dict[str, dict[TData, None]]: """Convert a dict of sets into a dict of dicts. Before 6.0, file data was a dict with None as the values. In 6.0, file data is a set. SqlData all along only cared that it was an iterable. This function helps us test that the old dict format still works. """ return {k: dict.fromkeys(v) for k, v in file_data.items()} class CoverageDataTest(CoverageTest): """Test cases for CoverageData.""" def test_empty_data_is_false(self) -> None: covdata = DebugCoverageData() assert not covdata self.assert_doesnt_exist(".coverage") def test_empty_data_is_false_when_read(self) -> None: covdata = DebugCoverageData() covdata.read() assert not covdata self.assert_doesnt_exist(".coverage") def test_line_data_is_true(self) -> None: covdata = DebugCoverageData() covdata.add_lines(LINES_1) assert covdata def test_arc_data_is_true(self) -> None: covdata = DebugCoverageData() covdata.add_arcs(ARCS_3) assert covdata def test_empty_line_data_is_false(self) -> None: covdata = DebugCoverageData() covdata.add_lines({}) assert not covdata def test_empty_arc_data_is_false(self) -> None: covdata = DebugCoverageData() covdata.add_arcs({}) assert not covdata @pytest.mark.parametrize("lines", [LINES_1, dicts_from_sets(LINES_1)]) def test_adding_lines(self, lines: Mapping[str, Collection[TLineNo]]) -> None: covdata = DebugCoverageData() covdata.add_lines(lines) assert_lines1_data(covdata) @pytest.mark.parametrize("arcs", [ARCS_3, dicts_from_sets(ARCS_3)]) def test_adding_arcs(self, arcs: Mapping[str, Collection[TArc]]) -> None: covdata = DebugCoverageData() covdata.add_arcs(arcs) assert_arcs3_data(covdata) def test_ok_to_add_lines_twice(self) -> None: covdata = DebugCoverageData() covdata.add_lines(LINES_1) covdata.add_lines(LINES_2) assert_line_counts(covdata, SUMMARY_1_2) assert_measured_files(covdata, MEASURED_FILES_1_2) def test_ok_to_add_arcs_twice(self) -> None: covdata = DebugCoverageData() covdata.add_arcs(ARCS_3) covdata.add_arcs(ARCS_4) assert_line_counts(covdata, SUMMARY_3_4) assert_measured_files(covdata, MEASURED_FILES_3_4) def test_ok_to_add_empty_arcs(self) -> None: covdata = DebugCoverageData() covdata.add_arcs(ARCS_3) covdata.add_arcs(ARCS_4) covdata.add_arcs(dict.fromkeys(ARCS_3, set())) assert_line_counts(covdata, SUMMARY_3_4) assert_measured_files(covdata, MEASURED_FILES_3_4) @pytest.mark.parametrize("klass", [CoverageData, DebugCoverageData]) def test_cant_add_arcs_with_lines(self, klass: TCoverageData) -> None: covdata = klass() covdata.add_lines(LINES_1) msg = "Can't add branch measurements to existing line data" with pytest.raises(DataError, match=msg): covdata.add_arcs(ARCS_3) @pytest.mark.parametrize("klass", [CoverageData, DebugCoverageData]) def test_cant_add_lines_with_arcs(self, klass: TCoverageData) -> None: covdata = klass() covdata.add_arcs(ARCS_3) msg = "Can't add line measurements to existing branch data" with pytest.raises(DataError, match=msg): covdata.add_lines(LINES_1) def test_touch_file_with_lines(self) -> None: covdata = DebugCoverageData() covdata.add_lines(LINES_1) covdata.touch_file('zzz.py') assert_measured_files(covdata, MEASURED_FILES_1 + ['zzz.py']) def test_touch_file_with_arcs(self) -> None: covdata = DebugCoverageData() covdata.add_arcs(ARCS_3) covdata.touch_file('zzz.py') assert_measured_files(covdata, MEASURED_FILES_3 + ['zzz.py']) def test_set_query_contexts(self) -> None: covdata = DebugCoverageData() covdata.set_context('test_a') covdata.add_lines(LINES_1) covdata.set_query_contexts(['te.*a']) assert covdata.lines('a.py') == [1, 2] covdata.set_query_contexts(['other']) assert covdata.lines('a.py') == [] def test_no_lines_vs_unmeasured_file(self) -> None: covdata = DebugCoverageData() covdata.add_lines(LINES_1) covdata.touch_file('zzz.py') assert covdata.lines('zzz.py') == [] assert covdata.lines('no_such_file.py') is None def test_lines_with_contexts(self) -> None: covdata = DebugCoverageData() covdata.set_context('test_a') covdata.add_lines(LINES_1) assert covdata.lines('a.py') == [1, 2] covdata.set_query_contexts(['test']) assert covdata.lines('a.py') == [1, 2] covdata.set_query_contexts(['other']) assert covdata.lines('a.py') == [] def test_contexts_by_lineno_with_lines(self) -> None: covdata = DebugCoverageData() covdata.set_context('test_a') covdata.add_lines(LINES_1) expected = {1: ['test_a'], 2: ['test_a']} assert covdata.contexts_by_lineno('a.py') == expected @pytest.mark.parametrize("lines", [LINES_1, dicts_from_sets(LINES_1)]) def test_no_duplicate_lines(self, lines: Mapping[str, Collection[TLineNo]]) -> None: covdata = DebugCoverageData() covdata.set_context("context1") covdata.add_lines(lines) covdata.set_context("context2") covdata.add_lines(lines) assert covdata.lines('a.py') == A_PY_LINES_1 @pytest.mark.parametrize("arcs", [ARCS_3, dicts_from_sets(ARCS_3)]) def test_no_duplicate_arcs(self, arcs: Mapping[str, Collection[TArc]]) -> None: covdata = DebugCoverageData() covdata.set_context("context1") covdata.add_arcs(arcs) covdata.set_context("context2") covdata.add_arcs(arcs) assert covdata.arcs('x.py') == X_PY_ARCS_3 def test_no_arcs_vs_unmeasured_file(self) -> None: covdata = DebugCoverageData() covdata.add_arcs(ARCS_3) covdata.touch_file('zzz.py') assert covdata.lines('zzz.py') == [] assert covdata.lines('no_such_file.py') is None assert covdata.arcs('zzz.py') == [] assert covdata.arcs('no_such_file.py') is None def test_arcs_with_contexts(self) -> None: covdata = DebugCoverageData() covdata.set_context('test_x') covdata.add_arcs(ARCS_3) assert covdata.arcs('x.py') == [(-1, 1), (1, 2), (2, 3), (3, -1)] covdata.set_query_contexts(['test_.$']) assert covdata.arcs('x.py') == [(-1, 1), (1, 2), (2, 3), (3, -1)] covdata.set_query_contexts(['other']) assert covdata.arcs('x.py') == [] def test_contexts_by_lineno_with_arcs(self) -> None: covdata = DebugCoverageData() covdata.set_context('test_x') covdata.add_arcs(ARCS_3) expected = {1: ['test_x'], 2: ['test_x'], 3: ['test_x']} assert covdata.contexts_by_lineno('x.py') == expected def test_contexts_by_lineno_with_unknown_file(self) -> None: covdata = DebugCoverageData() covdata.set_context('test_x') covdata.add_arcs(ARCS_3) assert covdata.contexts_by_lineno('xyz.py') == {} def test_context_by_lineno_with_query_contexts_with_lines(self) -> None: covdata = DebugCoverageData() covdata.set_context("test_1") covdata.add_lines(LINES_1) covdata.set_context("test_2") covdata.add_lines(LINES_2) covdata.set_query_context("test_1") assert covdata.contexts_by_lineno("a.py") == dict.fromkeys([1,2], ["test_1"]) def test_context_by_lineno_with_query_contexts_with_arcs(self) -> None: covdata = DebugCoverageData() covdata.set_context("test_1") covdata.add_arcs(ARCS_3) covdata.set_context("test_2") covdata.add_arcs(ARCS_4) covdata.set_query_context("test_1") assert covdata.contexts_by_lineno("x.py") == dict.fromkeys([1,2,3], ["test_1"]) def test_file_tracer_name(self) -> None: covdata = DebugCoverageData() covdata.add_lines({ "p1.foo": [1, 2, 3], "p2.html": [10, 11, 12], "main.py": [20], }) covdata.add_file_tracers({"p1.foo": "p1.plugin", "p2.html": "p2.plugin"}) assert covdata.file_tracer("p1.foo") == "p1.plugin" assert covdata.file_tracer("p2.html") == "p2.plugin" assert covdata.file_tracer("main.py") == "" assert covdata.file_tracer("p3.not_here") is None def test_ok_to_repeat_file_tracer(self) -> None: covdata = DebugCoverageData() covdata.add_lines({ "p1.foo": [1, 2, 3], "p2.html": [10, 11, 12], }) covdata.add_file_tracers({"p1.foo": "p1.plugin", "p2.html": "p2.plugin"}) covdata.add_file_tracers({"p1.foo": "p1.plugin"}) assert covdata.file_tracer("p1.foo") == "p1.plugin" def test_ok_to_set_empty_file_tracer(self) -> None: covdata = DebugCoverageData() covdata.add_lines({ "p1.foo": [1, 2, 3], "p2.html": [10, 11, 12], "main.py": [20], }) covdata.add_file_tracers({"p1.foo": "p1.plugin", "main.py": ""}) assert covdata.file_tracer("p1.foo") == "p1.plugin" assert covdata.file_tracer("main.py") == "" def test_cant_change_file_tracer_name(self) -> None: covdata = DebugCoverageData() covdata.add_lines({"p1.foo": [1, 2, 3]}) covdata.add_file_tracers({"p1.foo": "p1.plugin"}) msg = "Conflicting file tracer name for 'p1.foo': 'p1.plugin' vs 'p1.plugin.foo'" with pytest.raises(DataError, match=msg): covdata.add_file_tracers({"p1.foo": "p1.plugin.foo"}) def test_update_lines(self) -> None: covdata1 = DebugCoverageData(suffix='1') covdata1.add_lines(LINES_1) covdata2 = DebugCoverageData(suffix='2') covdata2.add_lines(LINES_2) covdata3 = DebugCoverageData(suffix='3') covdata3.update(covdata1) covdata3.update(covdata2) assert_line_counts(covdata3, SUMMARY_1_2) assert_measured_files(covdata3, MEASURED_FILES_1_2) def test_update_arcs(self) -> None: covdata1 = DebugCoverageData(suffix='1') covdata1.add_arcs(ARCS_3) covdata2 = DebugCoverageData(suffix='2') covdata2.add_arcs(ARCS_4) covdata3 = DebugCoverageData(suffix='3') covdata3.update(covdata1) covdata3.update(covdata2) assert_line_counts(covdata3, SUMMARY_3_4) assert_measured_files(covdata3, MEASURED_FILES_3_4) def test_update_cant_mix_lines_and_arcs(self) -> None: covdata1 = DebugCoverageData(suffix='1') covdata1.add_lines(LINES_1) covdata2 = DebugCoverageData(suffix='2') covdata2.add_arcs(ARCS_3) with pytest.raises(DataError, match="Can't combine arc data with line data"): covdata1.update(covdata2) with pytest.raises(DataError, match="Can't combine line data with arc data"): covdata2.update(covdata1) def test_update_file_tracers(self) -> None: covdata1 = DebugCoverageData(suffix='1') covdata1.add_lines({ "p1.html": [1, 2, 3, 4], "p2.html": [5, 6, 7], "main.py": [10, 11, 12], }) covdata1.add_file_tracers({ "p1.html": "html.plugin", "p2.html": "html.plugin2", }) covdata2 = DebugCoverageData(suffix='2') covdata2.add_lines({ "p1.html": [3, 4, 5, 6], "p2.html": [7, 8, 9], "p3.foo": [1000, 1001], "main.py": [10, 11, 12], }) covdata2.add_file_tracers({ "p1.html": "html.plugin", "p2.html": "html.plugin2", "p3.foo": "foo_plugin", }) covdata3 = DebugCoverageData(suffix='3') covdata3.update(covdata1) covdata3.update(covdata2) assert covdata3.file_tracer("p1.html") == "html.plugin" assert covdata3.file_tracer("p2.html") == "html.plugin2" assert covdata3.file_tracer("p3.foo") == "foo_plugin" assert covdata3.file_tracer("main.py") == "" def test_update_conflicting_file_tracers(self) -> None: covdata1 = DebugCoverageData(suffix='1') covdata1.add_lines({"p1.html": [1, 2, 3]}) covdata1.add_file_tracers({"p1.html": "html.plugin"}) covdata2 = DebugCoverageData(suffix='2') covdata2.add_lines({"p1.html": [1, 2, 3]}) covdata2.add_file_tracers({"p1.html": "html.other_plugin"}) msg = "Conflicting file tracer name for 'p1.html': 'html.plugin' vs 'html.other_plugin'" with pytest.raises(DataError, match=msg): covdata1.update(covdata2) msg = "Conflicting file tracer name for 'p1.html': 'html.other_plugin' vs 'html.plugin'" with pytest.raises(DataError, match=msg): covdata2.update(covdata1) def test_update_file_tracer_vs_no_file_tracer(self) -> None: covdata1 = DebugCoverageData(suffix="1") covdata1.add_lines({"p1.html": [1, 2, 3]}) covdata1.add_file_tracers({"p1.html": "html.plugin"}) covdata2 = DebugCoverageData(suffix="2") covdata2.add_lines({"p1.html": [1, 2, 3]}) msg = "Conflicting file tracer name for 'p1.html': 'html.plugin' vs ''" with pytest.raises(DataError, match=msg): covdata1.update(covdata2) msg = "Conflicting file tracer name for 'p1.html': '' vs 'html.plugin'" with pytest.raises(DataError, match=msg): covdata2.update(covdata1) def test_update_lines_empty(self) -> None: covdata1 = DebugCoverageData(suffix='1') covdata1.add_lines(LINES_1) covdata2 = DebugCoverageData(suffix='2') covdata1.update(covdata2) assert_line_counts(covdata1, SUMMARY_1) def test_update_arcs_empty(self) -> None: covdata1 = DebugCoverageData(suffix='1') covdata1.add_arcs(ARCS_3) covdata2 = DebugCoverageData(suffix='2') covdata1.update(covdata2) assert_line_counts(covdata1, SUMMARY_3) def test_asking_isnt_measuring(self) -> None: # Asking about an unmeasured file shouldn't make it seem measured. covdata = DebugCoverageData() assert_measured_files(covdata, []) assert covdata.arcs("missing.py") is None assert_measured_files(covdata, []) def test_add_to_hash_with_lines(self) -> None: covdata = DebugCoverageData() covdata.add_lines(LINES_1) hasher = mock.Mock() add_data_to_hash(covdata, "a.py", hasher) assert hasher.method_calls == [ mock.call.update([1, 2]), # lines mock.call.update(""), # file_tracer name ] def test_add_to_hash_with_arcs(self) -> None: covdata = DebugCoverageData() covdata.add_arcs(ARCS_3) covdata.add_file_tracers({"y.py": "hologram_plugin"}) hasher = mock.Mock() add_data_to_hash(covdata, "y.py", hasher) assert hasher.method_calls == [ mock.call.update([(-1, 17), (17, 23), (23, -1)]), # arcs mock.call.update("hologram_plugin"), # file_tracer name ] def test_add_to_lines_hash_with_missing_file(self) -> None: # https://github.com/nedbat/coveragepy/issues/403 covdata = DebugCoverageData() covdata.add_lines(LINES_1) hasher = mock.Mock() add_data_to_hash(covdata, "missing.py", hasher) assert hasher.method_calls == [ mock.call.update([]), mock.call.update(None), ] def test_add_to_arcs_hash_with_missing_file(self) -> None: # https://github.com/nedbat/coveragepy/issues/403 covdata = DebugCoverageData() covdata.add_arcs(ARCS_3) covdata.add_file_tracers({"y.py": "hologram_plugin"}) hasher = mock.Mock() add_data_to_hash(covdata, "missing.py", hasher) assert hasher.method_calls == [ mock.call.update([]), mock.call.update(None), ] def test_empty_lines_are_still_lines(self) -> None: covdata = DebugCoverageData() covdata.add_lines({}) covdata.touch_file("abc.py") assert not covdata.has_arcs() def test_empty_arcs_are_still_arcs(self) -> None: covdata = DebugCoverageData() covdata.add_arcs({}) covdata.touch_file("abc.py") assert covdata.has_arcs() def test_cant_touch_in_empty_data(self) -> None: covdata = DebugCoverageData() msg = "Can't touch files in an empty CoverageData" with pytest.raises(DataError, match=msg): covdata.touch_file("abc.py") def test_read_and_write_are_opposites(self) -> None: covdata1 = DebugCoverageData() covdata1.add_arcs(ARCS_3) covdata1.write() covdata2 = DebugCoverageData() covdata2.read() assert_arcs3_data(covdata2) def test_thread_stress(self) -> None: covdata = DebugCoverageData() exceptions = [] def thread_main() -> None: """Every thread will try to add the same data.""" try: covdata.add_lines(LINES_1) except Exception as ex: # pragma: only failure exceptions.append(ex) threads = [threading.Thread(target=thread_main) for _ in range(10)] for t in threads: t.start() for t in threads: t.join() assert_lines1_data(covdata) assert not exceptions def test_purge_files_lines(self) -> None: covdata = DebugCoverageData() covdata.add_lines(LINES_1) covdata.add_lines(LINES_2) assert_line_counts(covdata, SUMMARY_1_2) covdata.purge_files(["a.py", "b.py"]) assert_line_counts(covdata, {"a.py": 0, "b.py": 0, "c.py": 1}) covdata.purge_files(["c.py"]) assert_line_counts(covdata, {"a.py": 0, "b.py": 0, "c.py": 0}) # It's OK to "purge" a file that wasn't measured. covdata.purge_files(["xyz.py"]) assert_line_counts(covdata, {"a.py": 0, "b.py": 0, "c.py": 0}) def test_purge_files_arcs(self) -> None: covdata = CoverageData() covdata.add_arcs(ARCS_3) covdata.add_arcs(ARCS_4) assert_line_counts(covdata, SUMMARY_3_4) covdata.purge_files(["x.py", "y.py"]) assert_line_counts(covdata, {"x.py": 0, "y.py": 0, "z.py": 1}) covdata.purge_files(["z.py"]) assert_line_counts(covdata, {"x.py": 0, "y.py": 0, "z.py": 0}) def test_cant_purge_in_empty_data(self) -> None: covdata = DebugCoverageData() msg = "Can't purge files in an empty CoverageData" with pytest.raises(DataError, match=msg): covdata.purge_files(["abc.py"]) class CoverageDataInTempDirTest(CoverageTest): """Tests of CoverageData that need a temporary directory to make files.""" @pytest.mark.parametrize("file_class", FilePathClasses) def test_read_write_lines(self, file_class: FilePathType) -> None: self.assert_doesnt_exist("lines.dat") covdata1 = DebugCoverageData(file_class("lines.dat")) covdata1.add_lines(LINES_1) covdata1.write() self.assert_exists("lines.dat") covdata2 = DebugCoverageData("lines.dat") covdata2.read() assert_lines1_data(covdata2) def test_read_write_arcs(self) -> None: covdata1 = DebugCoverageData("arcs.dat") covdata1.add_arcs(ARCS_3) covdata1.write() covdata2 = DebugCoverageData("arcs.dat") covdata2.read() assert_arcs3_data(covdata2) def test_read_errors(self) -> None: self.make_file("xyzzy.dat", "xyzzy") with pytest.raises(DataError, match=r"Couldn't .* '.*[/\\]xyzzy.dat': \S+"): covdata = DebugCoverageData("xyzzy.dat") covdata.read() assert not covdata def test_hard_read_error(self) -> None: self.make_file("noperms.dat", "go away") os.chmod("noperms.dat", 0) with pytest.raises(DataError, match=r"Couldn't .* '.*[/\\]noperms.dat': \S+"): covdata = DebugCoverageData("noperms.dat") covdata.read() @pytest.mark.parametrize("klass", [CoverageData, DebugCoverageData]) def test_error_when_closing(self, klass: TCoverageData) -> None: msg = r"Couldn't .* '.*[/\\]flaked.dat': \S+" with pytest.raises(DataError, match=msg): covdata = klass("flaked.dat") covdata.add_lines(LINES_1) # I don't know how to make a real error, so let's fake one. sqldb = list(covdata._dbs.values())[0] sqldb.close = lambda: 1/0 # type: ignore[assignment] covdata.add_lines(LINES_1) def test_wrong_schema_version(self) -> None: with sqlite3.connect("wrong_schema.db") as con: con.execute("create table coverage_schema (version integer)") con.execute("insert into coverage_schema (version) values (99)") msg = r"Couldn't .* '.*[/\\]wrong_schema.db': wrong schema: 99 instead of \d+" with pytest.raises(DataError, match=msg): covdata = DebugCoverageData("wrong_schema.db") covdata.read() assert not covdata def test_wrong_schema_schema(self) -> None: with sqlite3.connect("wrong_schema_schema.db") as con: con.execute("create table coverage_schema (xyzzy integer)") con.execute("insert into coverage_schema (xyzzy) values (99)") msg = r"Data file .* doesn't seem to be a coverage data file: .* no such column" with pytest.raises(DataError, match=msg): covdata = DebugCoverageData("wrong_schema_schema.db") covdata.read() assert not covdata class CoverageDataFilesTest(CoverageTest): """Tests of CoverageData file handling.""" def test_reading_missing(self) -> None: self.assert_doesnt_exist(".coverage") covdata = DebugCoverageData() covdata.read() assert_line_counts(covdata, {}) def test_writing_and_reading(self) -> None: covdata1 = DebugCoverageData() covdata1.add_lines(LINES_1) covdata1.write() covdata2 = DebugCoverageData() covdata2.read() assert_line_counts(covdata2, SUMMARY_1) def test_debug_output_with_debug_option(self) -> None: # With debug option dataio, we get debug output about reading and # writing files. debug = DebugControlString(options=["dataio"]) covdata1 = CoverageData(debug=debug) covdata1.add_lines(LINES_1) covdata1.write() covdata2 = CoverageData(debug=debug) covdata2.read() assert_line_counts(covdata2, SUMMARY_1) assert re.search( r"^Erasing data file '.*\.coverage'\n" + r"Opening data file '.*\.coverage'\n" + r"Initing data file '.*\.coverage'\n" + r"Opening data file '.*\.coverage'\n$", debug.get_output(), ) def test_debug_output_without_debug_option(self) -> None: # With a debug object, but not the dataio option, we don't get debug # output. debug = DebugControlString(options=[]) covdata1 = CoverageData(debug=debug) covdata1.add_lines(LINES_1) covdata1.write() covdata2 = CoverageData(debug=debug) covdata2.read() assert_line_counts(covdata2, SUMMARY_1) assert debug.get_output() == "" def test_explicit_suffix(self) -> None: self.assert_doesnt_exist(".coverage.SUFFIX") covdata = DebugCoverageData(suffix='SUFFIX') covdata.add_lines(LINES_1) covdata.write() self.assert_exists(".coverage.SUFFIX") self.assert_doesnt_exist(".coverage") def test_true_suffix(self) -> None: self.assert_file_count(".coverage.*", 0) # suffix=True will make a randomly named data file. covdata1 = DebugCoverageData(suffix=True) covdata1.add_lines(LINES_1) covdata1.write() self.assert_doesnt_exist(".coverage") data_files1 = glob.glob(".coverage.*") assert len(data_files1) == 1 # Another suffix=True will choose a different name. covdata2 = DebugCoverageData(suffix=True) covdata2.add_lines(LINES_1) covdata2.write() self.assert_doesnt_exist(".coverage") data_files2 = glob.glob(".coverage.*") assert len(data_files2) == 2 # In addition to being different, the suffixes have the pid in them. assert all(str(os.getpid()) in fn for fn in data_files2) def test_combining(self) -> None: self.assert_file_count(".coverage.*", 0) covdata1 = DebugCoverageData(suffix='1') covdata1.add_lines(LINES_1) covdata1.write() self.assert_exists(".coverage.1") self.assert_file_count(".coverage.*", 1) covdata2 = DebugCoverageData(suffix='2') covdata2.add_lines(LINES_2) covdata2.write() self.assert_exists(".coverage.2") self.assert_file_count(".coverage.*", 2) covdata3 = DebugCoverageData() combine_parallel_data(covdata3) assert_line_counts(covdata3, SUMMARY_1_2) assert_measured_files(covdata3, MEASURED_FILES_1_2) self.assert_file_count(".coverage.*", 0) def test_erasing(self) -> None: covdata1 = DebugCoverageData() covdata1.add_lines(LINES_1) covdata1.write() covdata1.erase() assert_line_counts(covdata1, {}) covdata2 = DebugCoverageData() covdata2.read() assert_line_counts(covdata2, {}) def test_erasing_parallel(self) -> None: self.make_file("datafile.1") self.make_file("datafile.2") self.make_file(".coverage") data = DebugCoverageData("datafile") data.erase(parallel=True) self.assert_file_count("datafile.*", 0) self.assert_exists(".coverage") def test_combining_with_aliases(self) -> None: covdata1 = DebugCoverageData(suffix='1') covdata1.add_lines({ '/home/ned/proj/src/a.py': {1, 2}, '/home/ned/proj/src/sub/b.py': {3}, '/home/ned/proj/src/template.html': {10}, }) covdata1.add_file_tracers({ '/home/ned/proj/src/template.html': 'html.plugin', }) covdata1.write() covdata2 = DebugCoverageData(suffix='2') covdata2.add_lines({ r'c:\ned\test\a.py': {4, 5}, r'c:\ned\test\sub\b.py': {3, 6}, }) covdata2.write() self.assert_file_count(".coverage.*", 2) self.make_file("a.py", "") self.make_file("sub/b.py", "") self.make_file("template.html", "") covdata3 = DebugCoverageData() aliases = PathAliases() aliases.add("/home/ned/proj/src/", "./") aliases.add(r"c:\ned\test", "./") combine_parallel_data(covdata3, aliases=aliases) self.assert_file_count(".coverage.*", 0) self.assert_exists(".coverage") apy = canonical_filename('./a.py') sub_bpy = canonical_filename('./sub/b.py') template_html = canonical_filename('./template.html') assert_line_counts(covdata3, {apy: 4, sub_bpy: 2, template_html: 1}, fullpath=True) assert_measured_files(covdata3, [apy, sub_bpy, template_html]) assert covdata3.file_tracer(template_html) == 'html.plugin' def test_combining_from_different_directories(self) -> None: os.makedirs('cov1') covdata1 = DebugCoverageData('cov1/.coverage.1') covdata1.add_lines(LINES_1) covdata1.write() os.makedirs('cov2') covdata2 = DebugCoverageData('cov2/.coverage.2') covdata2.add_lines(LINES_2) covdata2.write() # This data won't be included. covdata_xxx = DebugCoverageData('.coverage.xxx') covdata_xxx.add_arcs(ARCS_3) covdata_xxx.write() covdata3 = DebugCoverageData() combine_parallel_data(covdata3, data_paths=['cov1', 'cov2']) assert_line_counts(covdata3, SUMMARY_1_2) assert_measured_files(covdata3, MEASURED_FILES_1_2) self.assert_doesnt_exist("cov1/.coverage.1") self.assert_doesnt_exist("cov2/.coverage.2") self.assert_exists(".coverage.xxx") def test_combining_from_files(self) -> None: os.makedirs('cov1') covdata1 = DebugCoverageData('cov1/.coverage.1') covdata1.add_lines(LINES_1) covdata1.write() # Journal files should never be included in the combining. self.make_file("cov1/.coverage.1-journal", "xyzzy") os.makedirs('cov2') covdata2 = DebugCoverageData('cov2/.coverage.2') covdata2.add_lines(LINES_2) covdata2.write() # This data won't be included. covdata_xxx = DebugCoverageData('.coverage.xxx') covdata_xxx.add_arcs(ARCS_3) covdata_xxx.write() covdata_2xxx = DebugCoverageData('cov2/.coverage.xxx') covdata_2xxx.add_arcs(ARCS_3) covdata_2xxx.write() covdata3 = DebugCoverageData() combine_parallel_data(covdata3, data_paths=['cov1', 'cov2/.coverage.2']) assert_line_counts(covdata3, SUMMARY_1_2) assert_measured_files(covdata3, MEASURED_FILES_1_2) self.assert_doesnt_exist("cov1/.coverage.1") self.assert_doesnt_exist("cov2/.coverage.2") self.assert_exists(".coverage.xxx") self.assert_exists("cov2/.coverage.xxx") def test_combining_from_nonexistent_directories(self) -> None: covdata = DebugCoverageData() msg = "Couldn't combine from non-existent path 'xyzzy'" with pytest.raises(NoDataError, match=msg): combine_parallel_data(covdata, data_paths=['xyzzy']) def test_interleaved_erasing_bug716(self) -> None: # pytest-cov could produce this scenario. #716 covdata1 = DebugCoverageData() covdata2 = DebugCoverageData() # this used to create the .coverage database file.. covdata2.set_context("") # then this would erase it all.. covdata1.erase() # then this would try to use tables that no longer exist. # "no such table: meta" covdata2.add_lines(LINES_1) @pytest.mark.parametrize( "dpart, fpart", [ ("", "[b-a]"), ("[3-1]", ""), ("[3-1]", "[b-a]"), ], ) def test_combining_with_crazy_filename(self, dpart: str, fpart: str) -> None: dirname = f"py{dpart}" basename = f"{dirname}/.coverage{fpart}" os.makedirs(dirname) covdata1 = CoverageData(basename=basename, suffix="1") covdata1.add_lines(LINES_1) covdata1.write() covdata2 = CoverageData(basename=basename, suffix="2") covdata2.add_lines(LINES_2) covdata2.write() covdata3 = CoverageData(basename=basename) combine_parallel_data(covdata3) assert_line_counts(covdata3, SUMMARY_1_2) assert_measured_files(covdata3, MEASURED_FILES_1_2) self.assert_file_count(glob.escape(basename) + ".*", 0) def test_meta_data(self) -> None: # The metadata written to the data file shouldn't interfere with # hashing to remove duplicates, except for debug=process, which # writes debugging info as metadata. debug = DebugControlString(options=[]) covdata1 = CoverageData(basename="meta.1", debug=debug) covdata1.add_lines(LINES_1) covdata1.write() with sqlite3.connect("meta.1") as con: data = sorted(k for (k,) in con.execute("select key from meta")) assert data == ["has_arcs", "version"] debug = DebugControlString(options=["process"]) covdata2 = CoverageData(basename="meta.2", debug=debug) covdata2.add_lines(LINES_1) covdata2.write() with sqlite3.connect("meta.2") as con: data = sorted(k for (k,) in con.execute("select key from meta")) assert data == ["has_arcs", "sys_argv", "version", "when"] class DumpsLoadsTest(CoverageTest): """Tests of CoverageData.dumps and loads.""" run_in_temp_dir = False @pytest.mark.parametrize("klass", [CoverageData, DebugCoverageData]) def test_serialization(self, klass: TCoverageData) -> None: covdata1 = klass(no_disk=True) covdata1.add_lines(LINES_1) covdata1.add_lines(LINES_2) serial = covdata1.dumps() covdata2 = klass(no_disk=True) covdata2.loads(serial) assert_line_counts(covdata2, SUMMARY_1_2) assert_measured_files(covdata2, MEASURED_FILES_1_2) def test_misfed_serialization(self) -> None: covdata = CoverageData(no_disk=True) bad_data = b'Hello, world!\x07 ' + b'z' * 100 msg = r"Unrecognized serialization: {} \(head of {} bytes\)".format( re.escape(repr(bad_data[:40])), len(bad_data), ) with pytest.raises(DataError, match=msg): covdata.loads(bad_data) class NoDiskTest(CoverageTest): """Tests of in-memory CoverageData.""" run_in_temp_dir = False def test_updating(self) -> None: # https://github.com/nedbat/coveragepy/issues/1323 a = CoverageData(no_disk=True) a.add_lines({'foo.py': [10, 20, 30]}) assert a.measured_files() == {'foo.py'} b = CoverageData(no_disk=True) b.update(a) assert b.measured_files() == {'foo.py'} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/test_debug.py0000644000175100001770000004030600000000000017773 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt """Tests of coverage/debug.py""" from __future__ import annotations import ast import io import os import re import sys from typing import Any, Callable, Iterable import pytest import coverage from coverage import env from coverage.debug import ( DebugControl, DebugOutputFile, auto_repr, clipped_repr, exc_one_line, filter_text, info_formatter, info_header, relevant_environment_display, short_id, short_filename, short_stack, ) from coverage.exceptions import DataError from tests import testenv from tests.coveragetest import CoverageTest from tests.helpers import DebugControlString, re_line, re_lines, re_lines_text class InfoFormatterTest(CoverageTest): """Tests of debug.info_formatter.""" run_in_temp_dir = False def test_info_formatter(self) -> None: lines = list(info_formatter([ ('x', 'hello there'), ('very long label', ['one element']), ('regular', ['abc', 'def', 'ghi', 'jkl']), ('nothing', []), ])) expected = [ ' x: hello there', ' very long label: one element', ' regular: abc', ' def', ' ghi', ' jkl', ' nothing: -none-', ] assert expected == lines def test_info_formatter_with_generator(self) -> None: lines = list(info_formatter(('info%d' % i, i) for i in range(3))) expected = [ ' info0: 0', ' info1: 1', ' info2: 2', ] assert expected == lines def test_too_long_label(self) -> None: with pytest.raises(AssertionError): list(info_formatter([('this label is way too long and will not fit', 23)])) @pytest.mark.parametrize("label, header", [ ("x", "-- x ---------------------------------------------------------"), ("hello there", "-- hello there -----------------------------------------------"), ]) def test_info_header(label: str, header: str) -> None: assert header == info_header(label) @pytest.mark.parametrize("id64, id16", [ (0x1234, 0x1234), (0x12340000, 0x1234), (0xA5A55A5A, 0xFFFF), (0x1234cba956780fed, 0x8008), ]) def test_short_id(id64: int, id16: int) -> None: assert id16 == short_id(id64) @pytest.mark.parametrize("text, numchars, result", [ ("hello", 10, "'hello'"), ("0123456789abcdefghijklmnopqrstuvwxyz", 15, "'01234...vwxyz'"), ]) def test_clipped_repr(text: str, numchars: int, result: str) -> None: assert result == clipped_repr(text, numchars) @pytest.mark.parametrize("text, filters, result", [ ("hello", [], "hello"), ("hello\n", [], "hello\n"), ("hello\nhello\n", [], "hello\nhello\n"), ("hello\nbye\n", [lambda x: "="+x], "=hello\n=bye\n"), ("hello\nbye\n", [lambda x: "="+x, lambda x: x+"\ndone\n"], "=hello\ndone\n=bye\ndone\n"), ]) def test_filter_text( text: str, filters: Iterable[Callable[[str], str]], result: str, ) -> None: assert result == filter_text(text, filters) class DebugTraceTest(CoverageTest): """Tests of debug output.""" def f1_debug_output(self, debug: Iterable[str]) -> str: """Runs some code with `debug` option, returns the debug output.""" # Make code to run. self.make_file("f1.py", """\ def f1(x): return x+1 for i in range(5): f1(i) """) debug_out = io.StringIO() cov = coverage.Coverage(debug=debug) cov._debug_file = debug_out self.start_import_stop(cov, "f1") cov.save() return debug_out.getvalue() def test_debug_no_trace(self) -> None: out_text = self.f1_debug_output([]) # We should have no output at all. assert not out_text def test_debug_trace(self) -> None: out_text = self.f1_debug_output(["trace"]) # We should have a line like "Tracing 'f1.py'", perhaps with an # absolute path. assert re.search(r"Tracing '.*f1.py'", out_text) # We should have lines like "Not tracing 'collector.py'..." assert re_lines(r"^Not tracing .*: is part of coverage.py$", out_text) def test_debug_trace_pid(self) -> None: out_text = self.f1_debug_output(["trace", "pid"]) # Now our lines are always prefixed with the process id. pid_prefix = r"^%5d\.[0-9a-f]{4}: " % os.getpid() pid_lines = re_lines_text(pid_prefix, out_text) assert pid_lines == out_text # We still have some tracing, and some not tracing. assert re_lines(pid_prefix + "Tracing ", out_text) assert re_lines(pid_prefix + "Not tracing ", out_text) def test_debug_callers(self) -> None: out_text = self.f1_debug_output(["pid", "dataop", "dataio", "callers", "lock"]) # For every real message, there should be a stack trace with a line like # "f1_debug_output : /Users/ned/coverage/tests/test_debug.py @71" real_messages = re_lines(r":\d+", out_text, match=False) frame_pattern = r"\s+f1_debug_output : .*tests[/\\]test_debug.py:\d+$" frames = re_lines(frame_pattern, out_text) assert len(real_messages) == len(frames) last_line = out_text.splitlines()[-1] # The details of what to expect on the stack are empirical, and can change # as the code changes. This test is here to ensure that the debug code # continues working. It's ok to adjust these details over time. assert re_lines(r"^\s*\d+\.\w{4}: Adding file tracers: 0 files", real_messages[-1]) assert re_lines(r"\s+add_file_tracers : .*coverage[/\\]sqldata.py:\d+$", last_line) def test_debug_config(self) -> None: out_text = self.f1_debug_output(["config"]) labels = """ attempted_config_files branch config_files_read config_file cover_pylib data_file debug exclude_list extra_css html_dir html_title ignore_errors run_include run_omit parallel partial_always_list partial_list paths precision show_missing source timid xml_output report_include report_omit """.split() for label in labels: label_pat = fr"^\s*{label}: " msg = f"Incorrect lines for {label!r}" assert 1 == len(re_lines(label_pat, out_text)), msg def test_debug_sys(self) -> None: out_text = self.f1_debug_output(["sys"]) assert_good_debug_sys(out_text) def test_debug_sys_ctracer(self) -> None: out_text = self.f1_debug_output(["sys"]) tracer_line = re_line(r"CTracer:", out_text).strip() if testenv.C_TRACER or testenv.SYS_MON: expected = "CTracer: available" else: expected = "CTracer: unavailable" assert expected == tracer_line def test_debug_pybehave(self) -> None: out_text = self.f1_debug_output(["pybehave"]) out_lines = out_text.splitlines() assert 10 < len(out_lines) < 40 pyversion = re_line(r" PYVERSION:", out_text) vtuple = ast.literal_eval(pyversion.partition(":")[-1].strip()) assert vtuple[:5] == sys.version_info def test_debug_process(self) -> None: out_text = self.f1_debug_output(["trace", "process"]) assert f"New process: pid={os.getpid()}, executable:" in out_text def test_debug_pytest(self) -> None: out_text = self.f1_debug_output(["trace", "pytest"]) ctx = "tests/test_debug.py::DebugTraceTest::test_debug_pytest (call)" assert f"Pytest context: {ctx}" in out_text def assert_good_debug_sys(out_text: str) -> None: """Assert that `str` is good output for debug=sys.""" labels = """ coverage_version coverage_module coverage_paths stdlib_paths third_party_paths core configs_attempted config_file configs_read data_file python platform implementation executable pid cwd path environment command_line cover_match pylib_match """.split() for label in labels: label_pat = fr"^\s*{label}: " msg = f"Incorrect lines for {label!r}" assert 1 == len(re_lines(label_pat, out_text)), msg tracer_line = re_line(" core:", out_text).strip() if testenv.C_TRACER: assert tracer_line == "core: CTracer" elif testenv.PY_TRACER: assert tracer_line == "core: PyTracer" else: assert testenv.SYS_MON assert tracer_line == "core: SysMonitor" class DebugOutputTest(CoverageTest): """Tests that we can direct debug output where we want.""" def setUp(self) -> None: super().setUp() # DebugOutputFile aggressively tries to start just one output file. We # need to manually force it to make a new one. DebugOutputFile._del_singleton_data() def debug_sys(self) -> None: """Run just enough coverage to get full debug=sys output.""" cov = coverage.Coverage(debug=["sys"]) cov.start() cov.stop() def test_stderr_default(self) -> None: self.debug_sys() out, err = self.stdouterr() assert "" == out assert_good_debug_sys(err) def test_envvar(self) -> None: self.set_environ("COVERAGE_DEBUG_FILE", "debug.out") self.debug_sys() assert ("", "") == self.stdouterr() with open("debug.out") as f: assert_good_debug_sys(f.read()) def test_config_file(self) -> None: self.make_file(".coveragerc", "[run]\ndebug_file = lotsa_info.txt") self.debug_sys() assert ("", "") == self.stdouterr() with open("lotsa_info.txt") as f: assert_good_debug_sys(f.read()) def test_stdout_alias(self) -> None: self.set_environ("COVERAGE_DEBUG_FILE", "stdout") self.debug_sys() out, err = self.stdouterr() assert "" == err assert_good_debug_sys(out) class DebugControlTest(CoverageTest): """Tests of DebugControl (via DebugControlString).""" run_in_temp_dir = False def test_debug_control(self) -> None: debug = DebugControlString(["yes"]) assert debug.should("yes") debug.write("YES") assert not debug.should("no") assert "YES\n" == debug.get_output() def test_debug_write_exceptions(self) -> None: debug = DebugControlString(["yes"]) try: raise RuntimeError('Oops') # This is in the traceback except Exception as exc: debug.write("Something happened", exc=exc) lines = debug.get_output().splitlines() assert "Something happened" == lines[0] assert "Traceback (most recent call last):" == lines[1] assert " raise RuntimeError('Oops') # This is in the traceback" in lines assert "RuntimeError: Oops" == lines[-1] def test_debug_write_self(self) -> None: class DebugWritingClass: """A simple class to show 'self:' debug messages.""" def __init__(self, debug: DebugControl) -> None: # This line will have "self:" reported. debug.write("Hello from me") def __repr__(self) -> str: return "<>" def run_some(debug: DebugControl) -> None: # This line will have no "self:" because there's no local self. debug.write("In run_some") DebugWritingClass(debug) debug = DebugControlString(["self"]) run_some(debug) lines = debug.get_output().splitlines() assert lines == [ "In run_some", "Hello from me", "self: <>", ] def f_one(*args: Any, **kwargs: Any) -> str: """First of the chain of functions for testing `short_stack`.""" return f_two(*args, **kwargs) def f_two(*args: Any, **kwargs: Any) -> str: """Second of the chain of functions for testing `short_stack`.""" return f_three(*args, **kwargs) def f_three(*args: Any, **kwargs: Any) -> str: """Third of the chain of functions for testing `short_stack`.""" return short_stack(*args, **kwargs) class ShortStackTest(CoverageTest): """Tests of coverage.debug.short_stack.""" run_in_temp_dir = False def test_short_stack(self) -> None: stack = f_one().splitlines() assert 4 == len(stack) assert "test_short_stack" in stack[0] assert "f_one" in stack[1] assert "f_two" in stack[2] assert "f_three" in stack[3] def test_short_stack_skip(self) -> None: stack = f_one(skip=1).splitlines() assert 3 == len(stack) assert "test_short_stack" in stack[0] assert "f_one" in stack[1] assert "f_two" in stack[2] def test_short_stack_full(self) -> None: stack_text = f_one(full=True) s = re.escape(os.sep) if env.WINDOWS: pylib = "[Ll]ib" else: py = "pypy" if env.PYPY else "python" majv, minv = sys.version_info[:2] pylib = f"lib{s}{py}{majv}.{minv}" assert len(re_lines(fr"{s}{pylib}{s}site-packages{s}_pytest", stack_text)) > 3 assert len(re_lines(fr"{s}{pylib}{s}site-packages{s}pluggy", stack_text)) > 3 assert not re_lines(r" 0x[0-9a-fA-F]+", stack_text) # No frame ids stack = stack_text.splitlines() assert len(stack) > 25 assert "test_short_stack" in stack[-4] assert "f_one" in stack[-3] assert "f_two" in stack[-2] assert "f_three" in stack[-1] def test_short_stack_short_filenames(self) -> None: stack_text = f_one(full=True, short_filenames=True) s = re.escape(os.sep) assert not re_lines(r"site-packages", stack_text) assert len(re_lines(fr"syspath:{s}_pytest", stack_text)) > 3 assert len(re_lines(fr"syspath:{s}pluggy", stack_text)) > 3 def test_short_stack_frame_ids(self) -> None: stack = f_one(full=True, frame_ids=True).splitlines() assert len(stack) > 25 frame_ids = [m[0] for line in stack if (m := re.search(r" 0x[0-9a-fA-F]{6,}", line))] # Every line has a frame id. assert len(frame_ids) == len(stack) # All the frame ids are different. assert len(set(frame_ids)) == len(frame_ids) class ShortFilenameTest(CoverageTest): """Tests of debug.py:short_filename.""" def test_short_filename(self) -> None: s = os.sep se = re.escape(s) assert short_filename(ast.__file__) == f"syspath:{s}ast.py" assert short_filename(pytest.__file__) == f"syspath:{s}pytest{s}__init__.py" assert short_filename(env.__file__) == f"cov:{s}env.py" self.make_file("hello.txt", "hi") short_hello = short_filename(os.path.abspath("hello.txt")) assert re.match(fr"tmp:{se}t\d+{se}hello.txt", short_hello) oddball = f"{s}xyzzy{s}plugh{s}foo.txt" assert short_filename(oddball) == oddball assert short_filename(None) is None def test_relevant_environment_display() -> None: env_vars = { "HOME": "my home", "HOME_DIR": "other place", "XYZ_NEVER_MIND": "doesn't matter", "SOME_PYOTHER": "xyz123", "COVERAGE_THING": "abcd", "MY_PYPI_TOKEN": "secret.something", "TMP": "temporary", } expected = [ ("COVERAGE_THING", "abcd"), ("HOME", "my home"), ("MY_PYPI_TOKEN", "******.*********"), ("SOME_PYOTHER", "xyz123"), ("TMP", "temporary"), ] assert expected == relevant_environment_display(env_vars) def test_exc_one_line() -> None: try: raise DataError("wtf?") except Exception as exc: assert "coverage.exceptions.DataError: wtf?" == exc_one_line(exc) def test_auto_repr() -> None: class MyStuff: """Random class to test auto_repr.""" def __init__(self) -> None: self.x = 17 self.y = "hello" __repr__ = auto_repr stuff = MyStuff() setattr(stuff, "$coverage.object_id", 123456) assert re.match(r"", repr(stuff)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/test_execfile.py0000644000175100001770000002535700000000000020502 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt """Tests for coverage.execfile""" from __future__ import annotations import compileall import json import os import os.path import pathlib import py_compile import re import sys from typing import Any, Iterator import pytest from coverage.exceptions import NoCode, NoSource, _ExceptionDuringRun from coverage.execfile import run_python_file, run_python_module from coverage.files import python_reported_file from tests.coveragetest import CoverageTest, TESTS_DIR, UsingModulesMixin TRY_EXECFILE = os.path.join(TESTS_DIR, "modules/process_test/try_execfile.py") class RunFileTest(CoverageTest): """Test cases for `run_python_file`.""" @pytest.fixture(autouse=True) def clean_up(self) -> Iterator[None]: """These tests all run in-process. Clean up global changes.""" yield sys.excepthook = sys.__excepthook__ def test_run_python_file(self) -> None: run_python_file([TRY_EXECFILE, "arg1", "arg2"]) mod_globs = json.loads(self.stdout()) # The file should think it is __main__ assert mod_globs['__name__'] == "__main__" # It should seem to come from a file named try_execfile.py dunder_file = os.path.basename(mod_globs['__file__']) assert dunder_file == "try_execfile.py" # It should have its correct module data. assert mod_globs['__doc__'].splitlines()[0] == "Test file for run_python_file." assert mod_globs['DATA'] == "xyzzy" assert mod_globs['FN_VAL'] == "my_fn('fooey')" # It must be self-importable as __main__. assert mod_globs['__main__.DATA'] == "xyzzy" # Argv should have the proper values. assert mod_globs['argv0'] == TRY_EXECFILE assert mod_globs['argv1-n'] == ["arg1", "arg2"] # __builtins__ should have the right values, like open(). assert mod_globs['__builtins__.has_open'] is True def test_no_extra_file(self) -> None: # Make sure that running a file doesn't create an extra compiled file. self.make_file("xxx", """\ desc = "a non-.py file!" """) assert os.listdir(".") == ["xxx"] run_python_file(["xxx"]) assert os.listdir(".") == ["xxx"] def test_universal_newlines(self) -> None: # Make sure we can read any sort of line ending. pylines = """# try newlines|print('Hello, world!')|""".split('|') for nl in ('\n', '\r\n', '\r'): with open('nl.py', 'wb') as fpy: fpy.write(nl.join(pylines).encode('utf-8')) run_python_file(['nl.py']) assert self.stdout() == "Hello, world!\n"*3 def test_missing_final_newline(self) -> None: # Make sure we can deal with a Python file with no final newline. self.make_file("abrupt.py", """\ if 1: a = 1 print(f"a is {a!r}") #""") with open("abrupt.py") as f: abrupt = f.read() assert abrupt[-1] == '#' run_python_file(["abrupt.py"]) assert self.stdout() == "a is 1\n" def test_no_such_file(self) -> None: path = python_reported_file('xyzzy.py') msg = re.escape(f"No file to run: '{path}'") with pytest.raises(NoSource, match=msg): run_python_file(["xyzzy.py"]) def test_directory_with_main(self) -> None: self.make_file("with_main/__main__.py", """\ print("I am __main__") """) run_python_file(["with_main"]) assert self.stdout() == "I am __main__\n" def test_directory_without_main(self) -> None: self.make_file("without_main/__init__.py", "") with pytest.raises(NoSource, match="Can't find '__main__' module in 'without_main'"): run_python_file(["without_main"]) def test_code_throws(self) -> None: self.make_file("throw.py", """\ class MyException(Exception): pass def f1(): print("about to raise..") raise MyException("hey!") def f2(): f1() f2() """) with pytest.raises(SystemExit) as exc_info: run_python_file(["throw.py"]) assert exc_info.value.args == (1,) assert self.stdout() == "about to raise..\n" assert self.stderr() == "" def test_code_exits(self) -> None: self.make_file("exit.py", """\ import sys def f1(): print("about to exit..") sys.exit(17) def f2(): f1() f2() """) with pytest.raises(SystemExit) as exc_info: run_python_file(["exit.py"]) assert exc_info.value.args == (17,) assert self.stdout() == "about to exit..\n" assert self.stderr() == "" def test_excepthook_exit(self) -> None: self.make_file("excepthook_exit.py", """\ import sys def excepthook(*args): print('in excepthook') sys.exit(0) sys.excepthook = excepthook raise RuntimeError('Error Outside') """) with pytest.raises(SystemExit): run_python_file(["excepthook_exit.py"]) cov_out = self.stdout() assert cov_out == "in excepthook\n" def test_excepthook_throw(self) -> None: self.make_file("excepthook_throw.py", """\ import sys def excepthook(*args): # Write this message to stderr so that we don't have to deal # with interleaved stdout/stderr comparisons in the assertions # in the test. sys.stderr.write('in excepthook\\n') raise RuntimeError('Error Inside') sys.excepthook = excepthook raise RuntimeError('Error Outside') """) with pytest.raises(_ExceptionDuringRun) as exc_info: run_python_file(["excepthook_throw.py"]) # The _ExceptionDuringRun exception has the RuntimeError as its argument. assert exc_info.value.args[1].args[0] == "Error Outside" stderr = self.stderr() assert "in excepthook\n" in stderr assert "Error in sys.excepthook:\n" in stderr assert "RuntimeError: Error Inside" in stderr class RunPycFileTest(CoverageTest): """Test cases for `run_python_file`.""" def make_pyc(self, **kwargs: Any) -> str: """Create a .pyc file, and return the path to it.""" self.make_file("compiled.py", """\ def doit(): print("I am here!") doit() """) compileall.compile_dir(".", quiet=True, **kwargs) os.remove("compiled.py") # Find the .pyc file! return str(next(pathlib.Path(".").rglob("compiled*.pyc"))) def test_running_pyc(self) -> None: pycfile = self.make_pyc() run_python_file([pycfile]) assert self.stdout() == "I am here!\n" def test_running_pyo(self) -> None: pycfile = self.make_pyc() pyofile = re.sub(r"[.]pyc$", ".pyo", pycfile) assert pycfile != pyofile os.rename(pycfile, pyofile) run_python_file([pyofile]) assert self.stdout() == "I am here!\n" def test_running_pyc_from_wrong_python(self) -> None: pycfile = self.make_pyc() # Jam Python 2.1 magic number into the .pyc file. with open(pycfile, "r+b") as fpyc: fpyc.seek(0) fpyc.write(bytes([0x2a, 0xeb, 0x0d, 0x0a])) with pytest.raises(NoCode, match="Bad magic number in .pyc file"): run_python_file([pycfile]) # In some environments, the pycfile persists and pollutes another test. os.remove(pycfile) def test_running_hashed_pyc(self) -> None: pycfile = self.make_pyc(invalidation_mode=py_compile.PycInvalidationMode.CHECKED_HASH) run_python_file([pycfile]) assert self.stdout() == "I am here!\n" def test_no_such_pyc_file(self) -> None: path = python_reported_file('xyzzy.pyc') msg = re.escape(f"No file to run: '{path}'") with pytest.raises(NoCode, match=msg): run_python_file(["xyzzy.pyc"]) def test_running_py_from_binary(self) -> None: # Use make_file to get the bookkeeping. Ideally, it would # be able to write binary files. bf = self.make_file("binary") with open(bf, "wb") as f: f.write(b'\x7fELF\x02\x01\x01\x00\x00\x00') path = python_reported_file('binary') msg = ( re.escape(f"Couldn't run '{path}' as Python code: ") + r"(ValueError|SyntaxError): source code string cannot contain null bytes" ) with pytest.raises(Exception, match=msg): run_python_file([bf]) class RunModuleTest(UsingModulesMixin, CoverageTest): """Test run_python_module.""" run_in_temp_dir = False def test_runmod1(self) -> None: run_python_module(["runmod1", "hello"]) out, err = self.stdouterr() assert out == "runmod1: passed hello\n" assert err == "" def test_runmod2(self) -> None: run_python_module(["pkg1.runmod2", "hello"]) out, err = self.stdouterr() assert out == "pkg1.__init__: pkg1\nrunmod2: passed hello\n" assert err == "" def test_runmod3(self) -> None: run_python_module(["pkg1.sub.runmod3", "hello"]) out, err = self.stdouterr() assert out == "pkg1.__init__: pkg1\nrunmod3: passed hello\n" assert err == "" def test_pkg1_main(self) -> None: run_python_module(["pkg1", "hello"]) out, err = self.stdouterr() assert out == "pkg1.__init__: pkg1\npkg1.__main__: passed hello\n" assert err == "" def test_pkg1_sub_main(self) -> None: run_python_module(["pkg1.sub", "hello"]) out, err = self.stdouterr() assert out == "pkg1.__init__: pkg1\npkg1.sub.__main__: passed hello\n" assert err == "" def test_pkg1_init(self) -> None: run_python_module(["pkg1.__init__", "wut?"]) out, err = self.stdouterr() assert out == "pkg1.__init__: pkg1\npkg1.__init__: __main__\n" assert err == "" def test_no_such_module(self) -> None: with pytest.raises(NoSource, match="No module named '?i_dont_exist'?"): run_python_module(["i_dont_exist"]) with pytest.raises(NoSource, match="No module named '?i'?"): run_python_module(["i.dont_exist"]) with pytest.raises(NoSource, match="No module named '?i'?"): run_python_module(["i.dont.exist"]) def test_no_main(self) -> None: with pytest.raises(NoSource): run_python_module(["pkg2", "hi"]) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/test_filereporter.py0000644000175100001770000000774700000000000021423 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt """Tests for FileReporters""" from __future__ import annotations import sys from coverage.plugin import FileReporter from coverage.python import PythonFileReporter from tests.coveragetest import CoverageTest, UsingModulesMixin from tests.helpers import os_sep # pylint: disable=import-error # Unable to import 'aa' (No module named aa) class FileReporterTest(UsingModulesMixin, CoverageTest): """Tests for FileReporter classes.""" run_in_temp_dir = False def test_filenames(self) -> None: acu = PythonFileReporter("aa/afile.py") bcu = PythonFileReporter("aa/bb/bfile.py") ccu = PythonFileReporter("aa/bb/cc/cfile.py") assert acu.relative_filename() == "aa/afile.py" assert bcu.relative_filename() == "aa/bb/bfile.py" assert ccu.relative_filename() == "aa/bb/cc/cfile.py" assert acu.source() == "# afile.py\n" assert bcu.source() == "# bfile.py\n" assert ccu.source() == "# cfile.py\n" def test_odd_filenames(self) -> None: acu = PythonFileReporter("aa/afile.odd.py") bcu = PythonFileReporter("aa/bb/bfile.odd.py") b2cu = PythonFileReporter("aa/bb.odd/bfile.py") assert acu.relative_filename() == "aa/afile.odd.py" assert bcu.relative_filename() == "aa/bb/bfile.odd.py" assert b2cu.relative_filename() == "aa/bb.odd/bfile.py" assert acu.source() == "# afile.odd.py\n" assert bcu.source() == "# bfile.odd.py\n" assert b2cu.source() == "# bfile.py\n" def test_modules(self) -> None: import aa import aa.bb import aa.bb.cc acu = PythonFileReporter(aa) bcu = PythonFileReporter(aa.bb) ccu = PythonFileReporter(aa.bb.cc) assert acu.relative_filename() == os_sep("aa/__init__.py") assert bcu.relative_filename() == os_sep("aa/bb/__init__.py") assert ccu.relative_filename() == os_sep("aa/bb/cc/__init__.py") assert acu.source() == "# aa\n" assert bcu.source() == "# bb\n" assert ccu.source() == "" # yes, empty def test_module_files(self) -> None: import aa.afile import aa.bb.bfile import aa.bb.cc.cfile acu = PythonFileReporter(aa.afile) bcu = PythonFileReporter(aa.bb.bfile) ccu = PythonFileReporter(aa.bb.cc.cfile) assert acu.relative_filename() == os_sep("aa/afile.py") assert bcu.relative_filename() == os_sep("aa/bb/bfile.py") assert ccu.relative_filename() == os_sep("aa/bb/cc/cfile.py") assert acu.source() == "# afile.py\n" assert bcu.source() == "# bfile.py\n" assert ccu.source() == "# cfile.py\n" def test_comparison(self) -> None: acu = FileReporter("aa/afile.py") acu2 = FileReporter("aa/afile.py") zcu = FileReporter("aa/zfile.py") bcu = FileReporter("aa/bb/bfile.py") assert acu == acu2 and acu <= acu2 and acu >= acu2 # pylint: disable=chained-comparison assert acu < zcu and acu <= zcu and acu != zcu assert zcu > acu and zcu >= acu and zcu != acu assert acu < bcu and acu <= bcu and acu != bcu assert bcu > acu and bcu >= acu and bcu != acu def test_zipfile(self) -> None: sys.path.append("tests/zip1.zip") # Test that we can get files out of zipfiles, and read their source files. # The zip1 module is installed by an action in igor.py. import zip1 import zip1.zip1 # Verify that we really imported from an zipfile. If we did, then the # __file__ won't be an actual file, because one of the "directories" # in the path is actually the zip file. self.assert_doesnt_exist(zip1.__file__) z1 = PythonFileReporter(zip1) z1z1 = PythonFileReporter(zip1.zip1) assert z1.source() == "" assert "# My zip file!" in z1z1.source().splitlines() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/test_files.py0000644000175100001770000006671600000000000020024 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt """Tests for files.py""" from __future__ import annotations import itertools import os import os.path import re from typing import Any, Iterable, Iterator, Protocol from unittest import mock import pytest from coverage import env, files from coverage.exceptions import ConfigError from coverage.files import ( GlobMatcher, ModuleMatcher, PathAliases, TreeMatcher, abs_file, actual_path, find_python_files, flat_rootname, globs_to_regex, ) from tests.coveragetest import CoverageTest from tests.helpers import os_sep class FilesTest(CoverageTest): """Tests of coverage.files.""" def abs_path(self, p: str) -> str: """Return the absolute path for `p`.""" return os.path.join(abs_file(os.getcwd()), os.path.normpath(p)) def test_simple(self) -> None: self.make_file("hello.py") files.set_relative_directory() assert files.relative_filename("hello.py") == "hello.py" a = self.abs_path("hello.py") assert a != "hello.py" assert files.relative_filename(a) == "hello.py" def test_peer_directories(self) -> None: self.make_file("sub/proj1/file1.py") self.make_file("sub/proj2/file2.py") a1 = self.abs_path("sub/proj1/file1.py") a2 = self.abs_path("sub/proj2/file2.py") d = os.path.normpath("sub/proj1") os.chdir(d) files.set_relative_directory() assert files.relative_filename(a1) == "file1.py" assert files.relative_filename(a2) == a2 def test_filepath_contains_absolute_prefix_twice(self) -> None: # https://github.com/nedbat/coveragepy/issues/194 # Build a path that has two pieces matching the absolute path prefix. # Technically, this test doesn't do that on Windows, but drive # letters make that impractical to achieve. files.set_relative_directory() d = abs_file(os.curdir) trick = os.path.splitdrive(d)[1].lstrip(os.path.sep) rel = os.path.join('sub', trick, 'file1.py') assert files.relative_filename(abs_file(rel)) == rel def test_canonical_filename_ensure_cache_hit(self) -> None: self.make_file("sub/proj1/file1.py") d = actual_path(self.abs_path("sub/proj1")) os.chdir(d) files.set_relative_directory() canonical_path = files.canonical_filename('sub/proj1/file1.py') assert canonical_path == self.abs_path('file1.py') # After the filename has been converted, it should be in the cache. assert 'sub/proj1/file1.py' in files.CANONICAL_FILENAME_CACHE assert files.canonical_filename('sub/proj1/file1.py') == self.abs_path('file1.py') @pytest.mark.parametrize( "curdir, sep", [ ("/", "/"), ("X:\\", "\\"), ], ) def test_relative_dir_for_root(self, curdir: str, sep: str) -> None: with mock.patch.object(files.os, 'curdir', new=curdir): with mock.patch.object(files.os, 'sep', new=sep): with mock.patch('coverage.files.os.path.normcase', return_value=curdir): files.set_relative_directory() assert files.relative_directory() == curdir @pytest.mark.parametrize( "to_make, to_check, answer", [ ("a/b/c/foo.py", "a/b/c/foo.py", True), ("a/b/c/foo.py", "a/b/c/bar.py", False), ("src/files.zip", "src/files.zip/foo.py", True), ("src/files.whl", "src/files.whl/foo.py", True), ("src/files.egg", "src/files.egg/foo.py", True), ("src/files.pex", "src/files.pex/foo.py", True), ("src/files.zip", "src/morefiles.zip/foo.py", False), ("src/files.pex", "src/files.pex/zipfiles/files.zip/foo.py", True), ], ) def test_source_exists(self, to_make: str, to_check: str, answer: bool) -> None: # source_exists won't look inside the zipfile, so it's fine to make # an empty file with the zipfile name. self.make_file(to_make, "") assert files.source_exists(to_check) == answer @pytest.mark.parametrize("original, flat", [ ("abc.py", "abc_py"), ("hellothere", "hellothere"), ("a/b/c.py", "d_86bbcbe134d28fd2_c_py"), ("a/b/defghi.py", "d_86bbcbe134d28fd2_defghi_py"), ("/a/b/c.py", "d_bb25e0ada04227c6_c_py"), ("/a/b/defghi.py", "d_bb25e0ada04227c6_defghi_py"), (r"c:\foo\bar.html", "d_e7c107482373f299_bar_html"), (r"d:\foo\bar.html", "d_584a05dcebc67b46_bar_html"), ("Montrรฉal/โ˜บ/conf.py", "d_c840497a2c647ce0_conf_py"), ( # original: r"c:\lorem\ipsum\quia\dolor\sit\amet\consectetur\adipisci\velit\sed" + r"\quia\non\numquam\eius\modi\tempora\incidunt\ut\labore\et\dolore" + r"\magnam\aliquam\quaerat\voluptatem\ut\enim\ad\minima\veniam\quis" + r"\nostrum\exercitationem\ullam\corporis\suscipit\laboriosam" + r"\Montrรฉal\โ˜บ\my_program.py", # flat: "d_e597dfacb73a23d5_my_program_py", ), ]) def test_flat_rootname(original: str, flat: str) -> None: assert flat_rootname(original) == flat def globs_to_regex_params( patterns: Iterable[str], case_insensitive: bool = False, partial: bool = False, matches: Iterable[str] = (), nomatches: Iterable[str] = (), ) -> Iterator[Any]: """Generate parameters for `test_globs_to_regex`. `patterns`, `case_insensitive`, and `partial` are arguments for `globs_to_regex`. `matches` is a list of strings that should match, and `nomatches` is a list of strings that should not match. Everything is yielded so that `test_globs_to_regex` can call `globs_to_regex` once and check one result. """ pat_id = "|".join(patterns) for text in matches: yield pytest.param( patterns, case_insensitive, partial, text, True, id=f"{pat_id}:ci{case_insensitive}:par{partial}:{text}:match", ) for text in nomatches: yield pytest.param( patterns, case_insensitive, partial, text, False, id=f"{pat_id}:ci{case_insensitive}:par{partial}:{text}:nomatch", ) @pytest.mark.parametrize( "patterns, case_insensitive, partial, text, result", list(itertools.chain.from_iterable([ globs_to_regex_params( ["abc", "xyz"], matches=["abc", "xyz", "sub/mod/abc"], nomatches=[ "ABC", "xYz", "abcx", "xabc", "axyz", "xyza", "sub/mod/abcd", "sub/abc/more", ], ), globs_to_regex_params( ["abc", "xyz"], case_insensitive=True, matches=["abc", "xyz", "Abc", "XYZ", "AbC"], nomatches=["abcx", "xabc", "axyz", "xyza"], ), globs_to_regex_params( ["a*c", "x*z"], matches=["abc", "xyz", "xYz", "azc", "xaz", "axyzc"], nomatches=["ABC", "abcx", "xabc", "axyz", "xyza", "a/c"], ), globs_to_regex_params( ["a?c", "x?z"], matches=["abc", "xyz", "xYz", "azc", "xaz"], nomatches=["ABC", "abcx", "xabc", "axyz", "xyza", "a/c"], ), globs_to_regex_params( ["a??d"], matches=["abcd", "azcd", "a12d"], nomatches=["ABCD", "abcx", "axyz", "abcde"], ), globs_to_regex_params( ["abc/hi.py"], case_insensitive=True, matches=["abc/hi.py", "ABC/hi.py", r"ABC\hi.py"], nomatches=["abc_hi.py", "abc/hi.pyc"], ), globs_to_regex_params( [r"abc\hi.py"], case_insensitive=True, matches=[r"abc\hi.py", r"ABC\hi.py", "abc/hi.py", "ABC/hi.py"], nomatches=["abc_hi.py", "abc/hi.pyc"], ), globs_to_regex_params( ["abc/*/hi.py"], case_insensitive=True, matches=["abc/foo/hi.py", r"ABC\foo/hi.py"], nomatches=["abc/hi.py", "abc/hi.pyc", "ABC/foo/bar/hi.py", r"ABC\foo/bar/hi.py"], ), globs_to_regex_params( ["abc/**/hi.py"], case_insensitive=True, matches=[ "abc/foo/hi.py", r"ABC\foo/hi.py", "abc/hi.py", "ABC/foo/bar/hi.py", r"ABC\foo/bar/hi.py", ], nomatches=["abc/hi.pyc"], ), globs_to_regex_params( ["abc/[a-f]*/hi.py"], case_insensitive=True, matches=["abc/foo/hi.py", r"ABC\boo/hi.py"], nomatches=[ "abc/zoo/hi.py", "abc/hi.py", "abc/hi.pyc", "abc/foo/bar/hi.py", r"abc\foo/bar/hi.py", ], ), globs_to_regex_params( ["abc/[a-f]/hi.py"], case_insensitive=True, matches=["abc/f/hi.py", r"ABC\b/hi.py"], nomatches=[ "abc/foo/hi.py", "abc/zoo/hi.py", "abc/hi.py", "abc/hi.pyc", "abc/foo/bar/hi.py", r"abc\foo/bar/hi.py", ], ), globs_to_regex_params( ["abc/"], case_insensitive=True, partial=True, matches=["abc/foo/hi.py", "ABC/foo/bar/hi.py", r"ABC\foo/bar/hi.py"], nomatches=["abcd/foo.py", "xabc/hi.py"], ), globs_to_regex_params( ["*/foo"], case_insensitive=False, partial=True, matches=["abc/foo/hi.py", "foo/hi.py", "abc/def/foo/hi.py"], nomatches=["abc/xfoo/hi.py"], ), globs_to_regex_params( ["*c/foo"], case_insensitive=False, partial=True, matches=["abc/foo/hi.py"], nomatches=["abc/xfoo/hi.py", "foo/hi.py", "def/abc/foo/hi.py"], ), globs_to_regex_params( ["foo/x*"], case_insensitive=False, partial=True, matches=["foo/x", "foo/xhi.py", "foo/x/hi.py"], nomatches=[], ), globs_to_regex_params( ["foo/x*"], case_insensitive=False, partial=False, matches=["foo/x", "foo/xhi.py"], nomatches=["foo/x/hi.py"], ), globs_to_regex_params( ["**/foo"], matches=["foo", "hello/foo", "hi/there/foo"], nomatches=["foob", "hello/foob", "hello/Foo"], ), globs_to_regex_params( ["a+b/foo*", "x{y}z/foo*"], matches=["a+b/foo", "a+b/foobar", "x{y}z/foobar"], nomatches=["aab/foo", "ab/foo", "xyz/foo"], ), ])), ) def test_globs_to_regex( patterns: Iterable[str], case_insensitive: bool, partial: bool, text: str, result: bool, ) -> None: regex = globs_to_regex(patterns, case_insensitive=case_insensitive, partial=partial) assert bool(regex.match(text)) == result @pytest.mark.parametrize("pattern, bad_word", [ ("***/foo.py", "***"), ("bar/***/foo.py", "***"), ("*****/foo.py", "*****"), ("Hello]there", "]"), ("Hello[there", "["), ("x/a**/b.py", "a**"), ("x/abcd**/b.py", "abcd**"), ("x/**a/b.py", "**a"), ("x/**/**/b.py", "**/**"), ]) def test_invalid_globs(pattern: str, bad_word: str) -> None: msg = f"File pattern can't include {bad_word!r}" with pytest.raises(ConfigError, match=re.escape(msg)): globs_to_regex([pattern]) class TMatcher(Protocol): """The shape all Matchers have.""" def match(self, s: str) -> bool: """Does this string match?""" class MatcherTest(CoverageTest): """Tests of file matchers.""" def setUp(self) -> None: super().setUp() files.set_relative_directory() def assertMatches(self, matcher: TMatcher, filepath: str, matches: bool) -> None: """The `matcher` should agree with `matches` about `filepath`.""" canonical = files.canonical_filename(filepath) msg = f"File {filepath} should have matched as {matches}" assert matches == matcher.match(canonical), msg def test_tree_matcher(self) -> None: case_folding = env.WINDOWS matches_to_try = [ (self.make_file("sub/file1.py"), True), (self.make_file("sub/file2.c"), True), (self.make_file("sub2/file3.h"), False), (self.make_file("sub3/file4.py"), True), (self.make_file("sub3/file5.c"), False), (self.make_file("sub4/File5.py"), case_folding), (self.make_file("sub5/file6.py"), case_folding), ] trees = [ files.canonical_filename("sub"), files.canonical_filename("sub3/file4.py"), files.canonical_filename("sub4/file5.py"), files.canonical_filename("SUB5/file6.py"), ] tm = TreeMatcher(trees) assert tm.info() == sorted(trees) for filepath, matches in matches_to_try: self.assertMatches(tm, filepath, matches) def test_module_matcher(self) -> None: matches_to_try = [ ('test', True), ('trash', False), ('testing', False), ('test.x', True), ('test.x.y.z', True), ('py', False), ('py.t', False), ('py.test', True), ('py.testing', False), ('py.test.buz', True), ('py.test.buz.baz', True), ('__main__', False), ('mymain', True), ('yourmain', False), ] modules = ['test', 'py.test', 'mymain'] mm = ModuleMatcher(modules) assert mm.info() == modules for modulename, matches in matches_to_try: assert mm.match(modulename) == matches, modulename def test_glob_matcher(self) -> None: matches_to_try = [ (self.make_file("sub/file1.py"), True), (self.make_file("sub/file2.c"), False), (self.make_file("sub2/file3.h"), True), (self.make_file("sub2/sub/file3.h"), True), (self.make_file("sub3/file4.py"), True), (self.make_file("sub3/file5.c"), False), ] fnm = GlobMatcher(["*.py", "*/sub2/*"]) assert fnm.info() == ["*.py", "*/sub2/*"] for filepath, matches in matches_to_try: self.assertMatches(fnm, filepath, matches) def test_glob_matcher_overload(self) -> None: fnm = GlobMatcher(["*x%03d*.txt" % i for i in range(500)]) self.assertMatches(fnm, "x007foo.txt", True) self.assertMatches(fnm, "x123foo.txt", True) self.assertMatches(fnm, "x798bar.txt", False) self.assertMatches(fnm, "x499.txt", True) self.assertMatches(fnm, "x500.txt", False) def test_glob_windows_paths(self) -> None: # We should be able to match Windows paths even if we are running on # a non-Windows OS. fnm = GlobMatcher(["*/foo.py"]) self.assertMatches(fnm, r"dir\foo.py", True) fnm = GlobMatcher([r"*\foo.py"]) self.assertMatches(fnm, r"dir\foo.py", True) @pytest.fixture(params=[False, True], name="rel_yn") def relative_setting(request: pytest.FixtureRequest) -> bool: """Parameterized fixture to choose whether PathAliases is relative or not.""" return request.param # type: ignore[no-any-return] class PathAliasesTest(CoverageTest): """Tests for coverage/files.py:PathAliases""" run_in_temp_dir = False def assert_mapped(self, aliases: PathAliases, inp: str, out: str) -> None: """Assert that `inp` mapped through `aliases` produces `out`. If the aliases are not relative, then `out` is canonicalized first, since aliases produce canonicalized paths by default. """ mapped = aliases.map(inp, exists=lambda p: True) if aliases.relative: expected = out else: expected = files.canonical_filename(out) assert mapped == expected def assert_unchanged(self, aliases: PathAliases, inp: str, exists: bool = True) -> None: """Assert that `inp` mapped through `aliases` is unchanged.""" assert aliases.map(inp, exists=lambda p: exists) == inp def test_noop(self, rel_yn: bool) -> None: aliases = PathAliases(relative=rel_yn) self.assert_unchanged(aliases, '/ned/home/a.py') def test_nomatch(self, rel_yn: bool) -> None: aliases = PathAliases(relative=rel_yn) aliases.add('/home/*/src', './mysrc') self.assert_unchanged(aliases, '/home/foo/a.py') def test_wildcard(self, rel_yn: bool) -> None: aliases = PathAliases(relative=rel_yn) aliases.add('/ned/home/*/src', './mysrc') self.assert_mapped(aliases, '/ned/home/foo/src/a.py', './mysrc/a.py') aliases = PathAliases(relative=rel_yn) aliases.add('/ned/home/*/src/', './mysrc') self.assert_mapped(aliases, '/ned/home/foo/src/a.py', './mysrc/a.py') def test_no_accidental_match(self, rel_yn: bool) -> None: aliases = PathAliases(relative=rel_yn) aliases.add('/home/*/src', './mysrc') self.assert_unchanged(aliases, '/home/foo/srcetc') def test_no_map_if_not_exist(self, rel_yn: bool) -> None: aliases = PathAliases(relative=rel_yn) aliases.add('/ned/home/*/src', './mysrc') self.assert_unchanged(aliases, '/ned/home/foo/src/a.py', exists=False) self.assert_unchanged(aliases, 'foo/src/a.py', exists=False) def test_no_dotslash(self, rel_yn: bool) -> None: # The result shouldn't start with "./" if the map result didn't. aliases = PathAliases(relative=rel_yn) aliases.add('*/project', '.') self.assert_mapped(aliases, '/ned/home/project/src/a.py', os_sep('src/a.py')) def test_relative_pattern(self) -> None: aliases = PathAliases(relative=True) aliases.add(".tox/*/site-packages", "src") self.assert_mapped( aliases, ".tox/py314/site-packages/proj/a.py", os_sep("src/proj/a.py"), ) def test_multiple_patterns(self, rel_yn: bool) -> None: # also test the debugfn... msgs: list[str] = [] aliases = PathAliases(debugfn=msgs.append, relative=rel_yn) aliases.add('/home/*/src', './mysrc') aliases.add('/lib/*/libsrc', './mylib') self.assert_mapped(aliases, '/home/foo/src/a.py', './mysrc/a.py') self.assert_mapped(aliases, '/lib/foo/libsrc/a.py', './mylib/a.py') if rel_yn: assert msgs == [ "Aliases (relative=True):", " Rule: '/home/*/src' -> './mysrc/' using regex " + "'[/\\\\\\\\]home[/\\\\\\\\][^/\\\\\\\\]*[/\\\\\\\\]src[/\\\\\\\\]'", " Rule: '/lib/*/libsrc' -> './mylib/' using regex " + "'[/\\\\\\\\]lib[/\\\\\\\\][^/\\\\\\\\]*[/\\\\\\\\]libsrc[/\\\\\\\\]'", "Matched path '/home/foo/src/a.py' to rule '/home/*/src' -> './mysrc/', " + "producing './mysrc/a.py'", "Matched path '/lib/foo/libsrc/a.py' to rule '/lib/*/libsrc' -> './mylib/', " + "producing './mylib/a.py'", ] else: assert msgs == [ "Aliases (relative=False):", " Rule: '/home/*/src' -> './mysrc/' using regex " + "'[/\\\\\\\\]home[/\\\\\\\\][^/\\\\\\\\]*[/\\\\\\\\]src[/\\\\\\\\]'", " Rule: '/lib/*/libsrc' -> './mylib/' using regex " + "'[/\\\\\\\\]lib[/\\\\\\\\][^/\\\\\\\\]*[/\\\\\\\\]libsrc[/\\\\\\\\]'", "Matched path '/home/foo/src/a.py' to rule '/home/*/src' -> './mysrc/', " + f"producing {files.canonical_filename('./mysrc/a.py')!r}", "Matched path '/lib/foo/libsrc/a.py' to rule '/lib/*/libsrc' -> './mylib/', " + f"producing {files.canonical_filename('./mylib/a.py')!r}", ] @pytest.mark.parametrize("badpat", [ "/ned/home/*", "/ned/home/*/", "/ned/home/*/*/", ]) def test_cant_have_wildcard_at_end(self, badpat: str) -> None: aliases = PathAliases() msg = "Pattern must not end with wildcards." with pytest.raises(ConfigError, match=msg): aliases.add(badpat, "fooey") def test_no_accidental_munging(self) -> None: aliases = PathAliases() aliases.add(r'c:\Zoo\boo', 'src/') aliases.add('/home/ned$', 'src/') self.assert_mapped(aliases, r'c:\Zoo\boo\foo.py', 'src/foo.py') self.assert_mapped(aliases, r'/home/ned$/foo.py', 'src/foo.py') def test_paths_are_os_corrected(self, rel_yn: bool) -> None: aliases = PathAliases(relative=rel_yn) aliases.add('/home/ned/*/src', './mysrc') aliases.add(r'c:\ned\src', './mysrc') self.assert_mapped(aliases, r'C:\Ned\src\sub\a.py', './mysrc/sub/a.py') aliases = PathAliases(relative=rel_yn) aliases.add('/home/ned/*/src', r'.\mysrc') aliases.add(r'c:\ned\src', r'.\mysrc') self.assert_mapped( aliases, r'/home/ned/foo/src/sub/a.py', r'.\mysrc\sub\a.py', ) # Try the paths in both orders. lin = "*/project/module/" win = "*\\project\\module\\" lin_win_paths = [[lin, win], [win, lin]] @pytest.mark.parametrize("paths", lin_win_paths) def test_windows_on_linux(self, paths: Iterable[str], rel_yn: bool) -> None: # https://github.com/nedbat/coveragepy/issues/618 aliases = PathAliases(relative=rel_yn) for path in paths: aliases.add(path, "project/module") self.assert_mapped( aliases, "C:\\a\\path\\somewhere\\coveragepy_test\\project\\module\\tests\\file.py", "project/module/tests/file.py", ) @pytest.mark.parametrize("paths", lin_win_paths) def test_linux_on_windows(self, paths: Iterable[str], rel_yn: bool) -> None: # https://github.com/nedbat/coveragepy/issues/618 aliases = PathAliases(relative=rel_yn) for path in paths: aliases.add(path, "project\\module") self.assert_mapped( aliases, "C:/a/path/somewhere/coveragepy_test/project/module/tests/file.py", "project\\module\\tests\\file.py", ) @pytest.mark.parametrize("paths", lin_win_paths) def test_relative_windows_on_linux(self, paths: Iterable[str]) -> None: # https://github.com/nedbat/coveragepy/issues/991 aliases = PathAliases(relative=True) for path in paths: aliases.add(path, "project/module") self.assert_mapped( aliases, r"project\module\tests\file.py", r"project/module/tests/file.py", ) @pytest.mark.parametrize("paths", lin_win_paths) def test_relative_linux_on_windows(self, paths: Iterable[str]) -> None: # https://github.com/nedbat/coveragepy/issues/991 aliases = PathAliases(relative=True) for path in paths: aliases.add(path, r"project\module") self.assert_mapped( aliases, r"project/module/tests/file.py", r"project\module\tests\file.py", ) @pytest.mark.skipif(env.WINDOWS, reason="This test assumes Unix file system") def test_implicit_relative_windows_on_linux(self) -> None: # https://github.com/nedbat/coveragepy/issues/991 aliases = PathAliases(relative=True) self.assert_mapped( aliases, r"project\module\tests\file.py", r"project/module/tests/file.py", ) @pytest.mark.skipif(not env.WINDOWS, reason="This test assumes Windows file system") def test_implicit_relative_linux_on_windows(self) -> None: # https://github.com/nedbat/coveragepy/issues/991 aliases = PathAliases(relative=True) self.assert_mapped( aliases, r"project/module/tests/file.py", r"project\module\tests\file.py", ) def test_multiple_wildcard(self, rel_yn: bool) -> None: aliases = PathAliases(relative=rel_yn) aliases.add('/home/jenkins/*/a/*/b/*/django', './django') self.assert_mapped( aliases, '/home/jenkins/xx/a/yy/b/zz/django/foo/bar.py', './django/foo/bar.py', ) def test_windows_root_paths(self, rel_yn: bool) -> None: aliases = PathAliases(relative=rel_yn) aliases.add('X:\\', '/tmp/src') self.assert_mapped( aliases, "X:\\a\\file.py", "/tmp/src/a/file.py", ) self.assert_mapped( aliases, "X:\\file.py", "/tmp/src/file.py", ) def test_leading_wildcard(self, rel_yn: bool) -> None: aliases = PathAliases(relative=rel_yn) aliases.add('*/d1', './mysrc1') aliases.add('*/d2', './mysrc2') self.assert_mapped(aliases, '/foo/bar/d1/x.py', './mysrc1/x.py') self.assert_mapped(aliases, '/foo/bar/d2/y.py', './mysrc2/y.py') @pytest.mark.parametrize("dirname", [".", "..", "../other", "/"]) def test_dot(self, dirname: str) -> None: if env.WINDOWS and dirname == "/": # The root test case was added for the manylinux Docker images, # and I'm not sure how it should work on Windows, so skip it. pytest.skip("Don't know how to handle root on Windows") aliases = PathAliases() aliases.add(dirname, '/the/source') the_file = os.path.join(dirname, 'a.py') the_file = os.path.expanduser(the_file) the_file = os.path.abspath(os.path.realpath(the_file)) assert '~' not in the_file # to be sure the test is pure. self.assert_mapped(aliases, the_file, '/the/source/a.py') class PathAliasesRealFilesTest(CoverageTest): """Tests for coverage/files.py:PathAliases using real files.""" def test_aliasing_zip_files(self) -> None: self.make_file("src/zipfiles/code.zip", "fake zip, doesn't matter") aliases = PathAliases() aliases.add("*/d1", "./src") aliases.add("*/d2", "./src") expected = files.canonical_filename("src/zipfiles/code.zip/p1.py") assert aliases.map("tox/d1/zipfiles/code.zip/p1.py") == expected class FindPythonFilesTest(CoverageTest): """Tests of `find_python_files`.""" def test_find_python_files(self) -> None: self.make_file("sub/a.py") self.make_file("sub/b.py") self.make_file("sub/x.c") # nope: not .py self.make_file("sub/ssub/__init__.py") self.make_file("sub/ssub/s.py") self.make_file("sub/ssub/~s.py") # nope: editor effluvia self.make_file("sub/lab/exp.py") # nope: no __init__.py self.make_file("sub/windows.pyw") py_files = set(find_python_files("sub", include_namespace_packages=False)) self.assert_same_files(py_files, [ "sub/a.py", "sub/b.py", "sub/ssub/__init__.py", "sub/ssub/s.py", "sub/windows.pyw", ]) def test_find_python_files_include_namespace_packages(self) -> None: self.make_file("sub/a.py") self.make_file("sub/b.py") self.make_file("sub/x.c") # nope: not .py self.make_file("sub/ssub/__init__.py") self.make_file("sub/ssub/s.py") self.make_file("sub/ssub/~s.py") # nope: editor effluvia self.make_file("sub/lab/exp.py") self.make_file("sub/windows.pyw") py_files = set(find_python_files("sub", include_namespace_packages=True)) self.assert_same_files(py_files, [ "sub/a.py", "sub/b.py", "sub/ssub/__init__.py", "sub/ssub/s.py", "sub/lab/exp.py", "sub/windows.pyw", ]) @pytest.mark.skipif(not env.WINDOWS, reason="Only need to run Windows tests on Windows.") class WindowsFileTest(CoverageTest): """Windows-specific tests of file name handling.""" run_in_temp_dir = False def test_actual_path(self) -> None: assert actual_path(r'c:\Windows') == actual_path(r'C:\wINDOWS') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/test_goldtest.py0000644000175100001770000001575600000000000020545 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt """Tests of the helpers in goldtest.py""" from __future__ import annotations import os.path import re import pytest from tests.coveragetest import CoverageTest, TESTS_DIR from tests.goldtest import compare, gold_path from tests.goldtest import contains, contains_any, contains_rx, doesnt_contain from tests.helpers import os_sep, re_line, remove_tree GOOD_GETTY = """\ Four score and seven years ago our fathers brought forth upon this continent, a new nation, conceived in Liberty, and dedicated to the proposition that all men are created equal. 11/19/9999, Gettysburg, Pennsylvania """ BAD_GETTY = """\ Five score and seven years ago our fathers brought forth upon this continent, a new nation, conceived in Liberty, and dedicated to the proposition that all men are created equal. 333/4444/55555, Gabcdef, Pennsylvania """ SCRUBS = [ # Numbers don't matter when comparing. (r'\d+', 'D'), (r'G\w+', 'Gxxx'), ] def path_regex(path: str) -> str: """Convert a file path into a regex that will match that path on any OS.""" return re.sub(r"[/\\]", r"[/\\\\]", path.replace(".", "[.]")) ACTUAL_DIR = os.path.join(TESTS_DIR, "actual/testing") ACTUAL_GETTY_FILE = os.path.join(ACTUAL_DIR, "getty/gettysburg.txt") GOLD_GETTY_FILE = os.path.join(TESTS_DIR, "gold/testing/getty/gettysburg.txt") GOLD_GETTY_FILE_RX = path_regex(GOLD_GETTY_FILE) GOLD_PATH_RX = path_regex("/tests/gold/testing/getty/gettysburg.txt") OUT_PATH_RX = path_regex("out/gettysburg.txt") class CompareTest(CoverageTest): """Tests of goldtest.py:compare()""" def setUp(self) -> None: super().setUp() self.addCleanup(remove_tree, ACTUAL_DIR) def test_good(self) -> None: self.make_file("out/gettysburg.txt", GOOD_GETTY) compare(gold_path("testing/getty"), "out", scrubs=SCRUBS) self.assert_doesnt_exist(ACTUAL_GETTY_FILE) def test_bad(self) -> None: self.make_file("out/gettysburg.txt", BAD_GETTY) # compare() raises an assertion. msg = fr"Files differ: .*{GOLD_PATH_RX} != {OUT_PATH_RX}" with pytest.raises(AssertionError, match=msg): compare(gold_path("testing/getty"), "out", scrubs=SCRUBS) # Stdout has a description of the diff. The diff shows the scrubbed content. stdout = self.stdout() assert "- Four score" in stdout assert "+ Five score" in stdout assert re_line(fr"^:::: diff '.*{GOLD_PATH_RX}' and '{OUT_PATH_RX}'", stdout) assert re_line(fr"^:::: end diff '.*{GOLD_PATH_RX}' and '{OUT_PATH_RX}'", stdout) assert ( os_sep(f"Saved actual output to '{ACTUAL_GETTY_FILE}': see tests/gold/README.rst") in os_sep(stdout) ) assert " D/D/D, Gxxx, Pennsylvania" in stdout # The actual file was saved. with open(ACTUAL_GETTY_FILE) as f: saved = f.read() assert saved == BAD_GETTY def test_good_needs_scrubs(self) -> None: # Comparing the "good" result without scrubbing the variable parts will fail. self.make_file("out/gettysburg.txt", GOOD_GETTY) # compare() raises an assertion. msg = fr"Files differ: .*{GOLD_PATH_RX} != {OUT_PATH_RX}" with pytest.raises(AssertionError, match=msg): compare(gold_path("testing/getty"), "out") stdout = self.stdout() assert "- 11/19/1863, Gettysburg, Pennsylvania" in stdout assert "+ 11/19/9999, Gettysburg, Pennsylvania" in stdout def test_actual_extra(self) -> None: self.make_file("out/gettysburg.txt", GOOD_GETTY) self.make_file("out/another.more", "hi") # Extra files in the output are ok with actual_extra=True. compare(gold_path("testing/getty"), "out", scrubs=SCRUBS, actual_extra=True) # But not without it: # (test output is in files like /tmp/pytest-of-user/pytest-0/popen-gw3/t76/out) msg = r"Files in .*[/\\]t\d+[/\\]out only: \['another.more'\]" with pytest.raises(AssertionError, match=msg): compare(gold_path("testing/getty"), "out", scrubs=SCRUBS) self.assert_exists(os.path.join(TESTS_DIR, "actual/testing/getty/another.more")) # But only the files matching the file_pattern are considered. compare(gold_path("testing/getty"), "out", file_pattern="*.txt", scrubs=SCRUBS) def test_xml_good(self) -> None: self.make_file("out/output.xml", """\ Goodie """) compare(gold_path("testing/xml"), "out", scrubs=SCRUBS) def test_xml_bad(self) -> None: self.make_file("out/output.xml", """\ Goodbye """) # compare() raises an exception. gold_rx = path_regex(gold_path("testing/xml/output.xml")) out_rx = path_regex("out/output.xml") msg = fr"Files differ: .*{gold_rx} != {out_rx}" with pytest.raises(AssertionError, match=msg): compare(gold_path("testing/xml"), "out", scrubs=SCRUBS) # Stdout has a description of the diff. The diff shows the # canonicalized and scrubbed content. stdout = self.stdout() assert '- ' in stdout assert '+ ' in stdout class ContainsTest(CoverageTest): """Tests of the various "contains" functions in goldtest.py""" run_in_temp_dir = False def test_contains(self) -> None: contains(GOLD_GETTY_FILE, "Four", "fathers", "dedicated") msg = fr"Missing content in {GOLD_GETTY_FILE_RX}: 'xyzzy'" with pytest.raises(AssertionError, match=msg): contains(GOLD_GETTY_FILE, "Four", "fathers", "xyzzy", "dedicated") def test_contains_rx(self) -> None: contains_rx(GOLD_GETTY_FILE, r"Fo.r", r"f[abc]thers", "dedi[cdef]ated") msg = fr"Missing regex in {GOLD_GETTY_FILE_RX}: r'm\[opq\]thers'" with pytest.raises(AssertionError, match=msg): contains_rx(GOLD_GETTY_FILE, r"Fo.r", r"m[opq]thers") def test_contains_any(self) -> None: contains_any(GOLD_GETTY_FILE, "Five", "Four", "Three") msg = fr"Missing content in {GOLD_GETTY_FILE_RX}: 'One' \[1 of 3\]" with pytest.raises(AssertionError, match=msg): contains_any(GOLD_GETTY_FILE, "One", "Two", "Three") def test_doesnt_contain(self) -> None: doesnt_contain(GOLD_GETTY_FILE, "One", "Two", "Three") msg = fr"Forbidden content in {GOLD_GETTY_FILE_RX}: 'Four'" with pytest.raises(AssertionError, match=msg): doesnt_contain(GOLD_GETTY_FILE, "Three", "Four", "Five") ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/test_html.py0000644000175100001770000013716700000000000017665 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt """Tests that HTML generation is awesome.""" from __future__ import annotations import collections import datetime import glob import json import os import os.path import re import sys from unittest import mock from typing import Any, IO import pytest import coverage from coverage import env, Coverage from coverage.exceptions import NoDataError, NotPython, NoSource from coverage.files import abs_file, flat_rootname import coverage.html from coverage.report_core import get_analysis_to_report from coverage.types import TLineNo, TMorf from tests import testenv from tests.coveragetest import CoverageTest, TESTS_DIR from tests.goldtest import gold_path from tests.goldtest import compare, contains, contains_rx, doesnt_contain, contains_any from tests.helpers import assert_coverage_warnings, change_dir class HtmlTestHelpers(CoverageTest): """Methods that help with HTML tests.""" def create_initial_files(self) -> None: """Create the source files we need to run these tests.""" self.make_file("main_file.py", """\ import helper1, helper2 helper1.func1(12) helper2.func2(12) """) self.make_file("helper1.py", """\ def func1(x): if x % 2: print("odd") """) self.make_file("helper2.py", """\ def func2(x): print("x is %d" % x) """) def run_coverage( self, covargs: dict[str, Any] | None = None, htmlargs: dict[str, Any] | None = None, ) -> float: """Run coverage.py on main_file.py, and create an HTML report.""" self.clean_local_file_imports() cov = coverage.Coverage(**(covargs or {})) self.start_import_stop(cov, "main_file") ret = cov.html_report(**(htmlargs or {})) self.assert_valid_hrefs() return ret def get_html_report_content(self, module: str) -> str: """Return the content of the HTML report for `module`.""" filename = flat_rootname(module) + ".html" filename = os.path.join("htmlcov", filename) with open(filename) as f: return f.read() def get_html_index_content(self) -> str: """Return the content of index.html. Time stamps are replaced with a placeholder so that clocks don't matter. """ with open("htmlcov/index.html") as f: index = f.read() index = re.sub( r"created at \d{4}-\d{2}-\d{2} \d{2}:\d{2} \+\d{4}", r"created at YYYY-MM-DD HH:MM +ZZZZ", index, ) index = re.sub( r"created at \d{4}-\d{2}-\d{2} \d{2}:\d{2}", r"created at YYYY-MM-DD HH:MM", index, ) return index def assert_correct_timestamp(self, html: str) -> None: """Extract the time stamp from `html`, and assert it is recent.""" timestamp_pat = r"created at (\d{4})-(\d{2})-(\d{2}) (\d{2}):(\d{2})" m = re.search(timestamp_pat, html) assert m, "Didn't find a time stamp!" timestamp = datetime.datetime(*[int(v) for v in m.groups()]) # type: ignore[arg-type] # The time stamp only records the minute, so the delta could be from # 12:00 to 12:01:59, or two minutes. self.assert_recent_datetime( timestamp, seconds=120, msg=f"Time stamp is wrong: {timestamp}", ) def assert_valid_hrefs(self) -> None: """Assert that the hrefs in htmlcov/*.html to see the references are valid. Doesn't check external links (those with a protocol). """ hrefs = collections.defaultdict(set) for fname in glob.glob("htmlcov/*.html"): with open(fname) as fhtml: html = fhtml.read() for href in re.findall(r""" href=['"]([^'"]*)['"]""", html): if href.startswith("#"): assert re.search(fr""" id=['"]{href[1:]}['"]""", html), ( f"Fragment {href!r} in {fname} has no anchor" ) continue if "://" in href: continue hrefs[href].add(fname) for href, sources in hrefs.items(): assert os.path.exists(f"htmlcov/{href}"), ( f"These files link to {href!r}, which doesn't exist: {', '.join(sources)}" ) class FileWriteTracker: """A fake object to track how `open` is used to write files.""" def __init__(self, written: set[str]) -> None: self.written = written def open(self, filename: str, mode: str = "r") -> IO[str]: """Be just like `open`, but write written file names to `self.written`.""" if mode.startswith("w"): self.written.add(filename.replace('\\', '/')) return open(filename, mode) class HtmlDeltaTest(HtmlTestHelpers, CoverageTest): """Tests of the HTML delta speed-ups.""" def setUp(self) -> None: super().setUp() # At least one of our tests monkey-patches the version of coverage.py, # so grab it here to restore it later. self.real_coverage_version = coverage.__version__ self.addCleanup(setattr, coverage, "__version__", self.real_coverage_version) self.files_written: set[str] def run_coverage( self, covargs: dict[str, Any] | None = None, htmlargs: dict[str, Any] | None = None, ) -> float: """Run coverage in-process for the delta tests. For the delta tests, we always want `source=.` and we want to track which files are written. `self.files_written` will be the file names that were opened for writing in html.py. """ covargs = covargs or {} covargs['source'] = "." self.files_written = set() mock_open = FileWriteTracker(self.files_written).open with mock.patch("coverage.html.open", mock_open): return super().run_coverage(covargs=covargs, htmlargs=htmlargs) def assert_htmlcov_files_exist(self) -> None: """Assert that all the expected htmlcov files exist.""" self.assert_exists("htmlcov/index.html") self.assert_exists("htmlcov/main_file_py.html") self.assert_exists("htmlcov/helper1_py.html") self.assert_exists("htmlcov/helper2_py.html") self.assert_exists("htmlcov/style.css") self.assert_exists("htmlcov/coverage_html.js") self.assert_exists("htmlcov/.gitignore") def test_html_created(self) -> None: # Test basic HTML generation: files should be created. self.create_initial_files() self.run_coverage() self.assert_htmlcov_files_exist() def test_html_delta_from_source_change(self) -> None: # HTML generation can create only the files that have changed. # In this case, helper1 changes because its source is different. self.create_initial_files() self.run_coverage() index1 = self.get_html_index_content() # Now change a file (but only in a comment) and do it again. self.make_file("helper1.py", """\ def func1(x): # A nice function if x % 2: print("odd") """) self.run_coverage() # Only the changed files should have been created. self.assert_htmlcov_files_exist() assert "htmlcov/index.html" in self.files_written assert "htmlcov/helper1_py.html" in self.files_written assert "htmlcov/helper2_py.html" not in self.files_written assert "htmlcov/main_file_py.html" not in self.files_written # Because the source change was only a comment, the index is the same. index2 = self.get_html_index_content() assert index1 == index2 def test_html_delta_from_coverage_change(self) -> None: # HTML generation can create only the files that have changed. # In this case, helper1 changes because its coverage is different. self.create_initial_files() self.run_coverage() # Now change a file and do it again. main_file is different, and calls # helper1 differently. self.make_file("main_file.py", """\ import helper1, helper2 helper1.func1(23) helper2.func2(23) """) self.run_coverage() # Only the changed files should have been created. self.assert_htmlcov_files_exist() assert "htmlcov/index.html" in self.files_written assert "htmlcov/helper1_py.html" in self.files_written assert "htmlcov/helper2_py.html" not in self.files_written assert "htmlcov/main_file_py.html" in self.files_written def test_html_delta_from_settings_change(self) -> None: # HTML generation can create only the files that have changed. # In this case, everything changes because the coverage.py settings # have changed. self.create_initial_files() self.run_coverage(covargs=dict(omit=[])) index1 = self.get_html_index_content() self.run_coverage(covargs=dict(omit=['xyzzy*'])) # All the files have been reported again. self.assert_htmlcov_files_exist() assert "htmlcov/index.html" in self.files_written assert "htmlcov/helper1_py.html" in self.files_written assert "htmlcov/helper2_py.html" in self.files_written assert "htmlcov/main_file_py.html" in self.files_written index2 = self.get_html_index_content() assert index1 == index2 def test_html_delta_from_coverage_version_change(self) -> None: # HTML generation can create only the files that have changed. # In this case, everything changes because the coverage.py version has # changed. self.create_initial_files() self.run_coverage() index1 = self.get_html_index_content() # "Upgrade" coverage.py! coverage.__version__ = "XYZZY" self.run_coverage() # All the files have been reported again. self.assert_htmlcov_files_exist() assert "htmlcov/index.html" in self.files_written assert "htmlcov/helper1_py.html" in self.files_written assert "htmlcov/helper2_py.html" in self.files_written assert "htmlcov/main_file_py.html" in self.files_written index2 = self.get_html_index_content() fixed_index2 = index2.replace("XYZZY", self.real_coverage_version) assert index1 == fixed_index2 def test_file_becomes_100(self) -> None: self.create_initial_files() self.run_coverage() # Now change a file and do it again self.make_file("main_file.py", """\ import helper1, helper2 # helper1 is now 100% helper1.func1(12) helper1.func1(23) """) self.run_coverage(htmlargs=dict(skip_covered=True)) # The 100% file, skipped, shouldn't be here. self.assert_doesnt_exist("htmlcov/helper1_py.html") def test_status_format_change(self) -> None: self.create_initial_files() self.run_coverage() with open("htmlcov/status.json") as status_json: status_data = json.load(status_json) assert status_data['format'] == 2 status_data['format'] = 99 with open("htmlcov/status.json", "w") as status_json: json.dump(status_data, status_json) self.run_coverage() # All the files have been reported again. self.assert_htmlcov_files_exist() assert "htmlcov/index.html" in self.files_written assert "htmlcov/helper1_py.html" in self.files_written assert "htmlcov/helper2_py.html" in self.files_written assert "htmlcov/main_file_py.html" in self.files_written def test_dont_overwrite_gitignore(self) -> None: self.create_initial_files() self.make_file("htmlcov/.gitignore", "# ignore nothing") self.run_coverage() with open("htmlcov/.gitignore") as fgi: assert fgi.read() == "# ignore nothing" def test_dont_write_gitignore_into_existing_directory(self) -> None: self.create_initial_files() self.make_file("htmlcov/README", "My files: don't touch!") self.run_coverage() self.assert_doesnt_exist("htmlcov/.gitignore") self.assert_exists("htmlcov/index.html") class HtmlTitleTest(HtmlTestHelpers, CoverageTest): """Tests of the HTML title support.""" def test_default_title(self) -> None: self.create_initial_files() self.run_coverage() index = self.get_html_index_content() assert "Coverage report" in index assert "

    Coverage report:" in index def test_title_set_in_config_file(self) -> None: self.create_initial_files() self.make_file(".coveragerc", "[html]\ntitle = Metrics & stuff!\n") self.run_coverage() index = self.get_html_index_content() assert "Metrics & stuff!" in index assert "

    Metrics & stuff!:" in index def test_non_ascii_title_set_in_config_file(self) -> None: self.create_initial_files() self.make_file(".coveragerc", "[html]\ntitle = ยซฯ„ฮฑะ‘ะฌโ„“ฯƒยป numbers") self.run_coverage() index = self.get_html_index_content() assert "«ταБЬℓσ» numbers" in index assert "<h1>«ταБЬℓσ» numbers" in index def test_title_set_in_args(self) -> None: self.create_initial_files() self.make_file(".coveragerc", "[html]\ntitle = Good title\n") self.run_coverage(htmlargs=dict(title="ยซฯ„ฮฑะ‘ะฌโ„“ฯƒยป & stรผff!")) index = self.get_html_index_content() expected = ( "<title>«ταБЬℓσ» " + "& stüff!" ) assert expected in index assert "

    «ταБЬℓσ» & stüff!:" in index class HtmlWithUnparsableFilesTest(HtmlTestHelpers, CoverageTest): """Test the behavior when measuring unparsable files.""" def test_dotpy_not_python(self) -> None: self.make_file("main.py", "import innocuous") self.make_file("innocuous.py", "a = 1") cov = coverage.Coverage() self.start_import_stop(cov, "main") self.make_file("innocuous.py", "

    This isn't python!

    ") msg = "Couldn't parse '.*innocuous.py' as Python source: .* at line 1" with pytest.raises(NotPython, match=msg): cov.html_report() def test_dotpy_not_python_ignored(self) -> None: self.make_file("main.py", "import innocuous") self.make_file("innocuous.py", "a = 2") cov = coverage.Coverage() self.start_import_stop(cov, "main") self.make_file("innocuous.py", "

    This isn't python!

    ") with pytest.warns(Warning) as warns: cov.html_report(ignore_errors=True) assert_coverage_warnings( warns, re.compile(r"Couldn't parse Python file '.*innocuous.py' \(couldnt-parse\)"), ) self.assert_exists("htmlcov/index.html") # This would be better as a glob, if the HTML layout changes: self.assert_doesnt_exist("htmlcov/innocuous.html") def test_dothtml_not_python(self) -> None: # Run an "HTML" file self.make_file("innocuous.html", "a = 3") self.make_data_file(lines={abs_file("innocuous.html"): [1]}) # Before reporting, change it to be an HTML file. self.make_file("innocuous.html", "

    This isn't python at all!

    ") cov = coverage.Coverage() cov.load() with pytest.raises(NoDataError, match="No data to report."): cov.html_report() def test_execed_liar_ignored(self) -> None: # Jinja2 sets __file__ to be a non-Python file, and then execs code. # If that file contains non-Python code, a TokenError shouldn't # have been raised when writing the HTML report. source = "exec(compile('','','exec'), {'__file__': 'liar.html'})" self.make_file("liar.py", source) self.make_file("liar.html", "{# Whoops, not python code #}") cov = coverage.Coverage() self.start_import_stop(cov, "liar") cov.html_report() self.assert_exists("htmlcov/index.html") def test_execed_liar_ignored_indentation_error(self) -> None: # Jinja2 sets __file__ to be a non-Python file, and then execs code. # If that file contains untokenizable code, we shouldn't get an # exception. source = "exec(compile('','','exec'), {'__file__': 'liar.html'})" self.make_file("liar.py", source) # Tokenize will raise an IndentationError if it can't dedent. self.make_file("liar.html", "0\n 2\n 1\n") cov = coverage.Coverage() self.start_import_stop(cov, "liar") cov.html_report() self.assert_exists("htmlcov/index.html") def test_decode_error(self) -> None: # https://github.com/nedbat/coveragepy/issues/351 # imp.load_module won't load a file with an undecodable character # in a comment, though Python will run them. So we'll change the # file after running. self.make_file("main.py", "import sub.not_ascii") self.make_file("sub/__init__.py") self.make_file("sub/not_ascii.py", """\ # coding: utf-8 a = 1 # Isn't this great?! """) cov = coverage.Coverage() self.start_import_stop(cov, "main") # Create the undecodable version of the file. make_file is too helpful, # so get down and dirty with bytes. with open("sub/not_ascii.py", "wb") as f: f.write(b"# coding: utf-8\na = 1 # Isn't this great?\xcb!\n") with open("sub/not_ascii.py", "rb") as f: undecodable = f.read() assert b"?\xcb!" in undecodable cov.html_report() html_report = self.get_html_report_content("sub/not_ascii.py") expected = "# Isn't this great?�!" assert expected in html_report def test_formfeeds(self) -> None: # https://github.com/nedbat/coveragepy/issues/360 self.make_file("formfeed.py", "line_one = 1\n\f\nline_two = 2\n") cov = coverage.Coverage() self.start_import_stop(cov, "formfeed") cov.html_report() formfeed_html = self.get_html_report_content("formfeed.py") assert "line_two" in formfeed_html def test_splitlines_special_chars(self) -> None: # https://github.com/nedbat/coveragepy/issues/1512 # See https://docs.python.org/3/library/stdtypes.html#str.splitlines for # the characters splitlines treats specially that readlines does not. # I'm not exactly sure why we need the "a" strings here, but the old # code wasn't failing without them. self.make_file("splitlines_is_weird.py", """\ test = { "0b": ["\x0b0"], "a1": "this is line 2", "0c": ["\x0c0"], "a2": "this is line 3", "1c": ["\x1c0"], "a3": "this is line 4", "1d": ["\x1d0"], "a4": "this is line 5", "1e": ["\x1e0"], "a5": "this is line 6", "85": ["\x850"], "a6": "this is line 7", "2028": ["\u20280"], "a7": "this is line 8", "2029": ["\u20290"], "a8": "this is line 9", } DONE = 1 """) cov = coverage.Coverage() self.start_import_stop(cov, "splitlines_is_weird") cov.html_report() the_html = self.get_html_report_content("splitlines_is_weird.py") assert "DONE" in the_html # Check that the lines are properly decoded and reported... html_lines = the_html.split("\n") assert any(re.search(r'id="t2".*"this is line 2"', line) for line in html_lines) assert any(re.search(r'id="t9".*"this is line 9"', line) for line in html_lines) class HtmlTest(HtmlTestHelpers, CoverageTest): """Moar HTML tests.""" def test_missing_source_file_incorrect_message(self) -> None: # https://github.com/nedbat/coveragepy/issues/60 self.make_file("thefile.py", "import sub.another\n") self.make_file("sub/__init__.py", "") self.make_file("sub/another.py", "print('another')\n") cov = coverage.Coverage() self.start_import_stop(cov, 'thefile') os.remove("sub/another.py") missing_file = os.path.join(self.temp_dir, "sub", "another.py") missing_file = os.path.realpath(missing_file) msg = "(?i)No source for code: '%s'" % re.escape(missing_file) with pytest.raises(NoSource, match=msg): cov.html_report() def test_extensionless_file_collides_with_extension(self) -> None: # It used to be that "program" and "program.py" would both be reported # to "program.html". Now they are not. # https://github.com/nedbat/coveragepy/issues/69 self.make_file("program", "import program\n") self.make_file("program.py", "a = 1\n") self.make_data_file(lines={ abs_file("program"): [1], abs_file("program.py"): [1], }) cov = coverage.Coverage() cov.load() cov.html_report() self.assert_exists("htmlcov/index.html") self.assert_exists("htmlcov/program.html") self.assert_exists("htmlcov/program_py.html") def test_has_date_stamp_in_files(self) -> None: self.create_initial_files() self.run_coverage() with open("htmlcov/index.html") as f: self.assert_correct_timestamp(f.read()) with open("htmlcov/main_file_py.html") as f: self.assert_correct_timestamp(f.read()) def test_reporting_on_unmeasured_file(self) -> None: # It should be ok to ask for an HTML report on a file that wasn't even # measured at all. https://github.com/nedbat/coveragepy/issues/403 self.create_initial_files() self.make_file("other.py", "a = 1\n") self.run_coverage(htmlargs=dict(morfs=['other.py'])) self.assert_exists("htmlcov/index.html") self.assert_exists("htmlcov/other_py.html") def make_main_and_not_covered(self) -> None: """Helper to create files for skip_covered scenarios.""" self.make_file("main_file.py", """\ import not_covered def normal(): print("z") normal() """) self.make_file("not_covered.py", """\ def not_covered(): print("n") """) def test_report_skip_covered(self) -> None: self.make_main_and_not_covered() self.run_coverage(htmlargs=dict(skip_covered=True)) self.assert_exists("htmlcov/index.html") self.assert_doesnt_exist("htmlcov/main_file_py.html") self.assert_exists("htmlcov/not_covered_py.html") def test_html_skip_covered(self) -> None: self.make_main_and_not_covered() self.make_file(".coveragerc", "[html]\nskip_covered = True") self.run_coverage() self.assert_exists("htmlcov/index.html") self.assert_doesnt_exist("htmlcov/main_file_py.html") self.assert_exists("htmlcov/not_covered_py.html") index = self.get_html_index_content() assert "1 file skipped due to complete coverage." in index def test_report_skip_covered_branches(self) -> None: self.make_main_and_not_covered() self.run_coverage(covargs=dict(branch=True), htmlargs=dict(skip_covered=True)) self.assert_exists("htmlcov/index.html") self.assert_doesnt_exist("htmlcov/main_file_py.html") self.assert_exists("htmlcov/not_covered_py.html") def test_report_skip_covered_100(self) -> None: self.make_file("main_file.py", """\ def normal(): print("z") normal() """) res = self.run_coverage(covargs=dict(source="."), htmlargs=dict(skip_covered=True)) assert res == 100.0 self.assert_doesnt_exist("htmlcov/main_file_py.html") def make_init_and_main(self) -> None: """Helper to create files for skip_empty scenarios.""" self.make_file("submodule/__init__.py", "") self.make_file("main_file.py", """\ import submodule def normal(): print("z") normal() """) def test_report_skip_empty(self) -> None: self.make_init_and_main() self.run_coverage(htmlargs=dict(skip_empty=True)) self.assert_exists("htmlcov/index.html") self.assert_exists("htmlcov/main_file_py.html") self.assert_doesnt_exist("htmlcov/submodule___init___py.html") index = self.get_html_index_content() assert "1 empty file skipped." in index def test_html_skip_empty(self) -> None: self.make_init_and_main() self.make_file(".coveragerc", "[html]\nskip_empty = True") self.run_coverage() self.assert_exists("htmlcov/index.html") self.assert_exists("htmlcov/main_file_py.html") self.assert_doesnt_exist("htmlcov/submodule___init___py.html") def filepath_to_regex(path: str) -> str: """Create a regex for scrubbing a file path.""" regex = re.escape(path) # If there's a backslash, let it match either slash. regex = regex.replace(r"\\", r"[\\/]") if env.WINDOWS: regex = "(?i)" + regex return regex def compare_html( expected: str, actual: str, extra_scrubs: list[tuple[str, str]] | None = None, ) -> None: """Specialized compare function for our HTML files.""" __tracebackhide__ = True # pytest, please don't show me this function. scrubs = [ (r'/coverage\.readthedocs\.io/?[-.\w/]*', '/coverage.readthedocs.io/VER'), (r'coverage\.py v[\d.abcdev]+', 'coverage.py vVER'), (r'created at \d\d\d\d-\d\d-\d\d \d\d:\d\d [-+]\d\d\d\d', 'created at DATE'), (r'created at \d\d\d\d-\d\d-\d\d \d\d:\d\d', 'created at DATE'), # Occasionally an absolute path is in the HTML report. (filepath_to_regex(TESTS_DIR), 'TESTS_DIR'), (filepath_to_regex(flat_rootname(str(TESTS_DIR))), '_TESTS_DIR'), # The temp dir the tests make. (filepath_to_regex(os.getcwd()), 'TEST_TMPDIR'), (filepath_to_regex(flat_rootname(str(os.getcwd()))), '_TEST_TMPDIR'), (filepath_to_regex(abs_file(os.getcwd())), 'TEST_TMPDIR'), (filepath_to_regex(flat_rootname(str(abs_file(os.getcwd())))), '_TEST_TMPDIR'), (r'/private/var/[\w/]+/pytest-of-\w+/pytest-\d+/(popen-gw\d+/)?t\d+', 'TEST_TMPDIR'), ] if env.WINDOWS: # For file paths... scrubs += [(r"\\", "/")] if extra_scrubs: scrubs += extra_scrubs compare(expected, actual, file_pattern="*.html", scrubs=scrubs) class HtmlGoldTest(CoverageTest): """Tests of HTML reporting that use gold files.""" def test_a(self) -> None: self.make_file("a.py", """\ if 1 < 2: # Needed a < to look at HTML entities. a = 3 else: a = 4 """) cov = coverage.Coverage() a = self.start_import_stop(cov, "a") cov.html_report(a, directory='out/a') compare_html(gold_path("html/a"), "out/a") contains( "out/a/a_py.html", ('if 1 ' + '< 2'), (' a ' + '= 3'), '67%', ) contains( "out/a/index.html", 'a.py', '67%', '67%', ) def test_b_branch(self) -> None: self.make_file("b.py", """\ def one(x): # This will be a branch that misses the else. if x < 2: a = 3 else: a = 4 one(1) def two(x): # A missed else that branches to "exit" if x: a = 5 two(1) def three(): try: # This if has two branches, *neither* one taken. if name_error_this_variable_doesnt_exist: a = 1 else: a = 2 except: pass three() """) cov = coverage.Coverage(branch=True) b = self.start_import_stop(cov, "b") cov.html_report(b, directory="out/b_branch") compare_html(gold_path("html/b_branch"), "out/b_branch") contains( "out/b_branch/b_py.html", ('if x ' + '< 2'), (' a = ' + '3'), '70%', ('3 ↛ 6' + 'line 3 didn\'t jump to line 6, ' + 'because the condition on line 3 was never false'), ('12 ↛ exit' + 'line 12 didn\'t return from function \'two\', ' + 'because the condition on line 12 was never false'), ('20 ↛ 21,   ' + '20 ↛ 23' + '2 missed branches: ' + '1) line 20 didn\'t jump to line 21, ' + 'because the condition on line 20 was never true, ' + '2) line 20 didn\'t jump to line 23, ' + 'because the condition on line 20 was never false'), ) contains( "out/b_branch/index.html", 'b.py', '70%', '70%', ) def test_bom(self) -> None: self.make_file("bom.py", bytes=b"""\ \xef\xbb\xbf# A Python source file in utf-8, with BOM. math = "3\xc3\x974 = 12, \xc3\xb72 = 6\xc2\xb10" assert len(math) == 18 assert len(math.encode('utf-8')) == 21 """.replace(b"\n", b"\r\n")) # It's important that the source file really have a BOM, which can # get lost, so check that it's really there, and that we have \r\n # line endings. with open("bom.py", "rb") as f: data = f.read() assert data[:3] == b"\xef\xbb\xbf" assert data.count(b"\r\n") == 5 cov = coverage.Coverage() bom = self.start_import_stop(cov, "bom") cov.html_report(bom, directory="out/bom") compare_html(gold_path("html/bom"), "out/bom") contains( "out/bom/bom_py.html", '"3×4 = 12, ÷2 = 6±0"', ) def test_isolatin1(self) -> None: self.make_file("isolatin1.py", bytes=b"""\ # -*- coding: iso8859-1 -*- # A Python source file in another encoding. math = "3\xd74 = 12, \xf72 = 6\xb10" assert len(math) == 18 """) cov = coverage.Coverage() isolatin1 = self.start_import_stop(cov, "isolatin1") cov.html_report(isolatin1, directory="out/isolatin1") compare_html(gold_path("html/isolatin1"), "out/isolatin1") contains( "out/isolatin1/isolatin1_py.html", '"3×4 = 12, ÷2 = 6±0"', ) def make_main_etc(self) -> None: """Make main.py and m1-m3.py for other tests.""" self.make_file("main.py", """\ import m1 import m2 import m3 a = 5 b = 6 assert m1.m1a == 1 assert m2.m2a == 1 assert m3.m3a == 1 """) self.make_file("m1.py", """\ m1a = 1 m1b = 2 """) self.make_file("m2.py", """\ m2a = 1 m2b = 2 """) self.make_file("m3.py", """\ m3a = 1 m3b = 2 """) def test_omit_1(self) -> None: self.make_main_etc() cov = coverage.Coverage(include=["./*"]) self.start_import_stop(cov, "main") cov.html_report(directory="out/omit_1") compare_html(gold_path("html/omit_1"), "out/omit_1") def test_omit_2(self) -> None: self.make_main_etc() cov = coverage.Coverage(include=["./*"]) self.start_import_stop(cov, "main") cov.html_report(directory="out/omit_2", omit=["m1.py"]) compare_html(gold_path("html/omit_2"), "out/omit_2") def test_omit_3(self) -> None: self.make_main_etc() cov = coverage.Coverage(include=["./*"]) self.start_import_stop(cov, "main") cov.html_report(directory="out/omit_3", omit=["m1.py", "m2.py"]) compare_html(gold_path("html/omit_3"), "out/omit_3") def test_omit_4(self) -> None: self.make_main_etc() self.make_file("omit4.ini", """\ [report] omit = m2.py """) cov = coverage.Coverage(config_file="omit4.ini", include=["./*"]) self.start_import_stop(cov, "main") cov.html_report(directory="out/omit_4") compare_html(gold_path("html/omit_4"), "out/omit_4") def test_omit_5(self) -> None: self.make_main_etc() self.make_file("omit5.ini", """\ [report] omit = fooey gooey, m[23]*, kablooey helloworld [html] directory = out/omit_5 """) cov = coverage.Coverage(config_file="omit5.ini", include=["./*"]) self.start_import_stop(cov, "main") cov.html_report() compare_html(gold_path("html/omit_5"), "out/omit_5") def test_other(self) -> None: self.make_file("src/here.py", """\ import other if 1 < 2: h = 3 else: h = 4 """) self.make_file("othersrc/other.py", """\ # A file in another directory. We're checking that it ends up in the # HTML report. print("This is the other src!") """) with change_dir("src"): sys.path.insert(0, "../othersrc") cov = coverage.Coverage(include=["./*", "../othersrc/*"]) self.start_import_stop(cov, "here") cov.html_report(directory="../out/other") # Different platforms will name the "other" file differently. Rename it actual_file = list(glob.glob("out/other/*_other_py.html")) assert len(actual_file) == 1 os.rename(actual_file[0], "out/other/blah_blah_other_py.html") compare_html( gold_path("html/other"), "out/other", extra_scrubs=[ (r'href="d_[0-9a-z]{16}_', 'href="_TEST_TMPDIR_othersrc_'), ], ) contains( 'out/other/index.html', 'here.py', 'other_py.html">', 'other.py', ) def test_partial(self) -> None: self.make_file("partial.py", """\ # partial branches and excluded lines a = 2 while "no peephole".upper(): # t4 break while a: # pragma: no branch break if 0: never_happen() if 13: a = 14 if a == 16: raise ZeroDivisionError("17") """) self.make_file("partial.ini", """\ [run] branch = True [report] exclude_lines = raise ZeroDivisionError """) cov = coverage.Coverage(config_file="partial.ini") partial = self.start_import_stop(cov, "partial") if env.PYBEHAVIOR.pep626: cov.html_report(partial, directory="out/partial_626") compare_html(gold_path("html/partial_626"), "out/partial_626") contains_rx( "out/partial_626/partial_py.html", r'

    .* id="t4"', r'

    .* id="t7"', # The "if 0" and "if 1" statements are marked as run. r'

    .* id="t10"', # The "raise ZeroDivisionError" is excluded by regex in the .ini. r'

    .* id="t17"', ) contains( "out/partial_626/index.html", 'partial.py', '87%', ) else: cov.html_report(partial, directory="out/partial") compare_html(gold_path("html/partial"), "out/partial") contains_rx( "out/partial/partial_py.html", r'

    .* id="t4"', r'

    .* id="t7"', # The "if 0" and "if 1" statements are optimized away. r'

    .* id="t10"', # The "raise ZeroDivisionError" is excluded by regex in the .ini. r'

    .* id="t17"', ) contains( "out/partial/index.html", 'partial.py', '91%', ) def test_styled(self) -> None: self.make_file("a.py", """\ if 1 < 2: # Needed a < to look at HTML entities. a = 3 else: a = 4 """) self.make_file("extra.css", "/* Doesn't matter what goes in here, it gets copied. */\n") cov = coverage.Coverage() a = self.start_import_stop(cov, "a") cov.html_report(a, directory="out/styled", extra_css="extra.css") compare_html(gold_path("html/styled"), "out/styled") compare(gold_path("html/styled"), "out/styled", file_pattern="*.css") contains( "out/styled/a_py.html", '', ('if 1 ' + '< 2'), (' a = ' + '3'), '67%', ) contains( "out/styled/index.html", '', 'a.py', '67%', ) def test_tabbed(self) -> None: # The file contents would look like this with 8-space tabs: # x = 1 # if x: # a = "tabbed" # aligned comments # if x: # look nice # b = "no spaces" # when they # c = "done" # line up. self.make_file("tabbed.py", """\ x = 1 if x: \ta = "Tabbed"\t\t\t\t# Aligned comments \tif x:\t\t\t\t\t# look nice \t\tb = "No spaces"\t\t\t# when they \tc = "Done"\t\t\t\t# line up. """) cov = coverage.Coverage() tabbed = self.start_import_stop(cov, "tabbed") cov.html_report(tabbed, directory="out") # Editors like to change things, make sure our source file still has tabs. contains("tabbed.py", "\tif x:\t\t\t\t\t# look nice") contains( "out/tabbed_py.html", '> if ' + 'x:' + ' ' + '# look nice', ) doesnt_contain("out/tabbed_py.html", "\t") def test_unicode(self) -> None: surrogate = "\U000e0100" self.make_file("unicode.py", """\ # -*- coding: utf-8 -*- # A Python source file with exotic characters. upside_down = "สŽdห™วbษษนวสŒoษ”" surrogate = "db40,dd00: x@" """.replace("@", surrogate)) cov = coverage.Coverage() unimod = self.start_import_stop(cov, "unicode") cov.html_report(unimod, directory="out/unicode") compare_html(gold_path("html/unicode"), "out/unicode") contains( "out/unicode/unicode_py.html", '"ʎd˙ǝbɐɹǝʌoɔ"', ) contains_any( "out/unicode/unicode_py.html", '"db40,dd00: x��"', '"db40,dd00: x󠄀"', ) def test_accented_dot_py(self) -> None: # Make a file with a non-ascii character in the filename. self.make_file("h\xe2t.py", "print('accented')") self.make_data_file(lines={abs_file("h\xe2t.py"): [1]}) cov = coverage.Coverage() cov.load() cov.html_report() self.assert_exists("htmlcov/h\xe2t_py.html") with open("htmlcov/index.html") as indexf: index = indexf.read() assert 'hât.py' in index def test_accented_directory(self) -> None: # Make a file with a non-ascii character in the directory name. self.make_file("\xe2/accented.py", "print('accented')") self.make_data_file(lines={abs_file("\xe2/accented.py"): [1]}) # The HTML report uses ascii-encoded HTML entities. cov = coverage.Coverage() cov.load() cov.html_report() self.assert_exists("htmlcov/d_5786906b6f0ffeb4_accented_py.html") with open("htmlcov/index.html") as indexf: index = indexf.read() expected = 'â%saccented.py' assert expected % os.sep in index @pytest.mark.skipif(not testenv.DYN_CONTEXTS, reason="No dynamic contexts with this core.") class HtmlWithContextsTest(HtmlTestHelpers, CoverageTest): """Tests of the HTML reports with shown contexts.""" EMPTY = coverage.html.HtmlDataGeneration.EMPTY def html_data_from_cov(self, cov: Coverage, morf: TMorf) -> coverage.html.FileData: """Get HTML report data from a `Coverage` object for a morf.""" with self.assert_warnings(cov, []): datagen = coverage.html.HtmlDataGeneration(cov) fr, analysis = next(get_analysis_to_report(cov, [morf])) file_data = datagen.data_for_file(fr, analysis) return file_data SOURCE = """\ def helper(lineno): x = 2 def test_one(): a = 5 helper(6) def test_two(): a = 9 b = 10 if a > 11: b = 12 assert a == (13-4) assert b == (14-4) helper( 16 ) test_one() x = 20 helper(21) test_two() """ OUTER_LINES = [1, 4, 8, 19, 20, 21, 2, 22] TEST_ONE_LINES = [5, 6, 2] TEST_TWO_LINES = [9, 10, 11, 13, 14, 15, 2] def test_dynamic_contexts(self) -> None: self.make_file("two_tests.py", self.SOURCE) cov = coverage.Coverage(source=["."]) cov.set_option("run:dynamic_context", "test_function") cov.set_option("html:show_contexts", True) mod = self.start_import_stop(cov, "two_tests") d = self.html_data_from_cov(cov, mod) context_labels = [self.EMPTY, 'two_tests.test_one', 'two_tests.test_two'] expected_lines = [self.OUTER_LINES, self.TEST_ONE_LINES, self.TEST_TWO_LINES] for label, expected in zip(context_labels, expected_lines): actual = [ ld.number for ld in d.lines if label == ld.contexts_label or label in (ld.contexts or ()) ] assert sorted(expected) == sorted(actual) cov.html_report(mod, directory="out/contexts") compare_html(gold_path("html/contexts"), "out/contexts") def test_filtered_dynamic_contexts(self) -> None: self.make_file("two_tests.py", self.SOURCE) cov = coverage.Coverage(source=["."]) cov.set_option("run:dynamic_context", "test_function") cov.set_option("html:show_contexts", True) cov.set_option("report:contexts", ["test_one"]) mod = self.start_import_stop(cov, "two_tests") d = self.html_data_from_cov(cov, mod) context_labels = [self.EMPTY, 'two_tests.test_one', 'two_tests.test_two'] expected_lines: list[list[TLineNo]] = [[], self.TEST_ONE_LINES, []] for label, expected in zip(context_labels, expected_lines): actual = [ld.number for ld in d.lines if label in (ld.contexts or ())] assert sorted(expected) == sorted(actual) def test_no_contexts_warns_no_contexts(self) -> None: # If no contexts were collected, then show_contexts emits a warning. self.make_file("two_tests.py", self.SOURCE) cov = coverage.Coverage(source=["."]) cov.set_option("html:show_contexts", True) self.start_import_stop(cov, "two_tests") with self.assert_warnings(cov, ["No contexts were measured"]): cov.html_report() def test_dynamic_contexts_relative_files(self) -> None: self.make_file("two_tests.py", self.SOURCE) self.make_file("config", "[run]\nrelative_files = True") cov = coverage.Coverage(source=["."], config_file="config") cov.set_option("run:dynamic_context", "test_function") cov.set_option("html:show_contexts", True) mod = self.start_import_stop(cov, "two_tests") d = self.html_data_from_cov(cov, mod) context_labels = [self.EMPTY, 'two_tests.test_one', 'two_tests.test_two'] expected_lines = [self.OUTER_LINES, self.TEST_ONE_LINES, self.TEST_TWO_LINES] for label, expected in zip(context_labels, expected_lines): actual = [ ld.number for ld in d.lines if label == ld.contexts_label or label in (ld.contexts or ()) ] assert sorted(expected) == sorted(actual) class HtmlHelpersTest(HtmlTestHelpers, CoverageTest): """Tests of the helpers in HtmlTestHelpers.""" def test_bad_link(self) -> None: # Does assert_valid_hrefs detect links to non-existent files? self.make_file("htmlcov/index.html", "Nothing") msg = "These files link to 'nothing.html', which doesn't exist: htmlcov.index.html" with pytest.raises(AssertionError, match=msg): self.assert_valid_hrefs() def test_bad_anchor(self) -> None: # Does assert_valid_hrefs detect fragments that go nowhere? self.make_file("htmlcov/index.html", "Nothing") msg = "Fragment '#nothing' in htmlcov.index.html has no anchor" with pytest.raises(AssertionError, match=msg): self.assert_valid_hrefs() @pytest.mark.parametrize("n, key", [ (0, "a"), (1, "b"), (999999999, "e9S_p"), ]) def test_encode_int(n: int, key: str) -> None: assert coverage.html.encode_int(n) == key ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/test_json.py0000644000175100001770000001550400000000000017660 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt """Test json-based summary reporting for coverage.py""" from __future__ import annotations import json import os from datetime import datetime from typing import Any import coverage from coverage import Coverage from coverage.jsonreport import FORMAT_VERSION from tests.coveragetest import UsingModulesMixin, CoverageTest class JsonReportTest(UsingModulesMixin, CoverageTest): """Tests of the JSON reports from coverage.py.""" def _assert_expected_json_report( self, cov: Coverage, expected_result: dict[str, Any], ) -> None: """ Helper that handles common ceremonies so tests can clearly show the consequences of setting various arguments. """ self.make_file("a.py", """\ a = {'b': 1} if a.get('a'): b = 1 elif a.get('b'): b = 2 else: b = 3 if not a: b = 4 """) a = self.start_import_stop(cov, "a") output_path = os.path.join(self.temp_dir, "a.json") cov.json_report(a, outfile=output_path) with open(output_path) as result_file: parsed_result = json.load(result_file) self.assert_recent_datetime( datetime.strptime(parsed_result['meta']['timestamp'], "%Y-%m-%dT%H:%M:%S.%f"), ) del (parsed_result['meta']['timestamp']) expected_result["meta"].update({ "format": FORMAT_VERSION, "version": coverage.__version__, }) assert parsed_result == expected_result def test_branch_coverage(self) -> None: cov = coverage.Coverage(branch=True) expected_result = { 'meta': { "branch_coverage": True, "show_contexts": False, }, 'files': { 'a.py': { 'executed_lines': [1, 2, 4, 5, 8], 'missing_lines': [3, 7, 9], 'excluded_lines': [], 'executed_branches': [ [2, 4], [4, 5], [8, -1], ], 'missing_branches': [ [2, 3], [4, 7], [8, 9], ], 'summary': { 'missing_lines': 3, 'covered_lines': 5, 'num_statements': 8, 'num_branches': 6, 'excluded_lines': 0, 'num_partial_branches': 3, 'covered_branches': 3, 'missing_branches': 3, 'percent_covered': 57.142857142857146, 'percent_covered_display': '57', }, }, }, 'totals': { 'missing_lines': 3, 'covered_lines': 5, 'num_statements': 8, 'num_branches': 6, 'excluded_lines': 0, 'num_partial_branches': 3, 'percent_covered': 57.142857142857146, 'percent_covered_display': '57', 'covered_branches': 3, 'missing_branches': 3, }, } self._assert_expected_json_report(cov, expected_result) def test_simple_line_coverage(self) -> None: cov = coverage.Coverage() expected_result = { 'meta': { "branch_coverage": False, "show_contexts": False, }, 'files': { 'a.py': { 'executed_lines': [1, 2, 4, 5, 8], 'missing_lines': [3, 7, 9], 'excluded_lines': [], 'summary': { 'excluded_lines': 0, 'missing_lines': 3, 'covered_lines': 5, 'num_statements': 8, 'percent_covered': 62.5, 'percent_covered_display': '62', }, }, }, 'totals': { 'excluded_lines': 0, 'missing_lines': 3, 'covered_lines': 5, 'num_statements': 8, 'percent_covered': 62.5, 'percent_covered_display': '62', }, } self._assert_expected_json_report(cov, expected_result) def run_context_test(self, relative_files: bool) -> None: """A helper for two tests below.""" self.make_file("config", f"""\ [run] relative_files = {relative_files} [report] precision = 2 [json] show_contexts = True """) cov = coverage.Coverage(context="cool_test", config_file="config") expected_result = { 'meta': { "branch_coverage": False, "show_contexts": True, }, 'files': { 'a.py': { 'executed_lines': [1, 2, 4, 5, 8], 'missing_lines': [3, 7, 9], 'excluded_lines': [], "contexts": { "1": [ "cool_test", ], "2": [ "cool_test", ], "4": [ "cool_test", ], "5": [ "cool_test", ], "8": [ "cool_test", ], }, 'summary': { 'excluded_lines': 0, 'missing_lines': 3, 'covered_lines': 5, 'num_statements': 8, 'percent_covered': 62.5, 'percent_covered_display': '62.50', }, }, }, 'totals': { 'excluded_lines': 0, 'missing_lines': 3, 'covered_lines': 5, 'num_statements': 8, 'percent_covered': 62.5, 'percent_covered_display': '62.50', }, } self._assert_expected_json_report(cov, expected_result) def test_context_non_relative(self) -> None: self.run_context_test(relative_files=False) def test_context_relative(self) -> None: self.run_context_test(relative_files=True) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/test_lcov.py0000644000175100001770000002472600000000000017660 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt """Test LCOV-based summary reporting for coverage.py.""" from __future__ import annotations import math import textwrap from tests.coveragetest import CoverageTest import coverage from coverage import env class LcovTest(CoverageTest): """Tests of the LCOV reports from coverage.py.""" def create_initial_files(self) -> None: """ Helper for tests that handles the common ceremony so the tests can show the consequences of changes in the setup. """ self.make_file("main_file.py", """\ def cuboid_volume(l): return (l*l*l) def IsItTrue(): return True """) self.make_file("test_file.py", """\ from main_file import cuboid_volume import unittest class TestCuboid(unittest.TestCase): def test_volume(self): self.assertAlmostEqual(cuboid_volume(2),8) self.assertAlmostEqual(cuboid_volume(1),1) self.assertAlmostEqual(cuboid_volume(0),0) self.assertAlmostEqual(cuboid_volume(5.5),166.375) """) def get_lcov_report_content(self, filename: str = "coverage.lcov") -> str: """Return the content of an LCOV report.""" with open(filename) as file: return file.read() def test_lone_file(self) -> None: # For a single file with a couple of functions, the lcov should cover # the function definitions themselves, but not the returns. self.make_file("main_file.py", """\ def cuboid_volume(l): return (l*l*l) def IsItTrue(): return True """) expected_result = textwrap.dedent("""\ TN: SF:main_file.py DA:1,1,7URou3io0zReBkk69lEb/Q DA:4,1,ilhb4KUfytxtEuClijZPlQ DA:2,0,Xqj6H1iz/nsARMCAbE90ng DA:5,0,LWILTcvARcydjFFyo9qM0A LF:4 LH:2 end_of_record """) self.assert_doesnt_exist(".coverage") cov = coverage.Coverage(source=["."]) self.start_import_stop(cov, "main_file") pct = cov.lcov_report() assert pct == 50.0 actual_result = self.get_lcov_report_content() assert expected_result == actual_result def test_simple_line_coverage_two_files(self) -> None: # Test that line coverage is created when coverage is run, # and matches the output of the file below. self.create_initial_files() self.assert_doesnt_exist(".coverage") self.make_file(".coveragerc", "[lcov]\noutput = data.lcov\n") cov = coverage.Coverage(source=".") self.start_import_stop(cov, "test_file") pct = cov.lcov_report() assert pct == 50.0 self.assert_exists("data.lcov") expected_result = textwrap.dedent("""\ TN: SF:main_file.py DA:1,1,7URou3io0zReBkk69lEb/Q DA:4,1,ilhb4KUfytxtEuClijZPlQ DA:2,0,Xqj6H1iz/nsARMCAbE90ng DA:5,0,LWILTcvARcydjFFyo9qM0A LF:4 LH:2 end_of_record TN: SF:test_file.py DA:1,1,R5Rb4IzmjKRgY/vFFc1TRg DA:2,1,E/tvV9JPVDhEcTCkgrwOFw DA:4,1,GP08LPBYJq8EzYveHJy2qA DA:5,1,MV+jSLi6PFEl+WatEAptog DA:6,0,qyqd1mF289dg6oQAQHA+gQ DA:7,0,nmEYd5F1KrxemgC9iVjlqg DA:8,0,jodMK26WYDizOO1C7ekBbg DA:9,0,LtxfKehkX8o4KvC5GnN52g LF:8 LH:4 end_of_record """) actual_result = self.get_lcov_report_content(filename="data.lcov") assert expected_result == actual_result def test_branch_coverage_one_file(self) -> None: # Test that the reporter produces valid branch coverage. self.make_file("main_file.py", """\ def is_it_x(x): if x == 3: return x else: return False """) self.assert_doesnt_exist(".coverage") cov = coverage.Coverage(branch=True, source=".") self.start_import_stop(cov, "main_file") pct = cov.lcov_report() assert math.isclose(pct, 16.666666666666668) self.assert_exists("coverage.lcov") expected_result = textwrap.dedent("""\ TN: SF:main_file.py DA:1,1,4MDXMbvwQ3L7va1tsphVzw DA:2,0,MuERA6EYyZNpKPqoJfzwkA DA:3,0,sAyiiE6iAuPMte9kyd0+3g DA:5,0,W/g8GJDAYJkSSurt59Mzfw LF:4 LH:1 BRDA:3,0,0,- BRDA:5,0,1,- BRF:2 BRH:0 end_of_record """) actual_result = self.get_lcov_report_content() assert expected_result == actual_result def test_branch_coverage_two_files(self) -> None: # Test that valid branch coverage is generated # in the case of two files. self.make_file("main_file.py", """\ def is_it_x(x): if x == 3: return x else: return False """) self.make_file("test_file.py", """\ from main_file import * import unittest class TestIsItX(unittest.TestCase): def test_is_it_x(self): self.assertEqual(is_it_x(3), 3) self.assertEqual(is_it_x(4), False) """) self.assert_doesnt_exist(".coverage") cov = coverage.Coverage(branch=True, source=".") self.start_import_stop(cov, "test_file") pct = cov.lcov_report() assert math.isclose(pct, 41.666666666666664) self.assert_exists("coverage.lcov") expected_result = textwrap.dedent("""\ TN: SF:main_file.py DA:1,1,4MDXMbvwQ3L7va1tsphVzw DA:2,0,MuERA6EYyZNpKPqoJfzwkA DA:3,0,sAyiiE6iAuPMte9kyd0+3g DA:5,0,W/g8GJDAYJkSSurt59Mzfw LF:4 LH:1 BRDA:3,0,0,- BRDA:5,0,1,- BRF:2 BRH:0 end_of_record TN: SF:test_file.py DA:1,1,9TxKIyoBtmhopmlbDNa8FQ DA:2,1,E/tvV9JPVDhEcTCkgrwOFw DA:4,1,C3s/c8C1Yd/zoNG1GnGexg DA:5,1,9qPyWexYysgeKtB+YvuzAg DA:6,0,LycuNcdqoUhPXeuXUTf5lA DA:7,0,FPTWzd68bDx76HN7VHu1wA LF:6 LH:4 BRF:0 BRH:0 end_of_record """) actual_result = self.get_lcov_report_content() assert expected_result == actual_result def test_half_covered_branch(self) -> None: # Test that for a given branch that is only half covered, # the block numbers remain the same, and produces valid lcov. self.make_file("main_file.py", """\ something = True if something: print("Yes, something") else: print("No, nothing") """) self.assert_doesnt_exist(".coverage") cov = coverage.Coverage(branch=True, source=".") self.start_import_stop(cov, "main_file") pct = cov.lcov_report() assert math.isclose(pct, 66.66666666666667) self.assert_exists("coverage.lcov") expected_result = textwrap.dedent("""\ TN: SF:main_file.py DA:1,1,N4kbVOlkNI1rqOfCArBClw DA:3,1,CmlqqPf0/H+R/p7/PLEXZw DA:4,1,rE3mWnpoMq2W2sMETVk/uQ DA:6,0,+Aov7ekIts7C96udNDVIIQ LF:4 LH:3 BRDA:6,0,0,- BRDA:4,0,1,1 BRF:2 BRH:1 end_of_record """) actual_result = self.get_lcov_report_content() assert expected_result == actual_result def test_empty_init_files(self) -> None: # Test that in the case of an empty __init__.py file, the lcov # reporter will note that the file is there, and will note the empty # line. It will also note the lack of branches, and the checksum for # the line. # # Although there are no lines found, it will note one line as hit in # old Pythons, and no lines hit in newer Pythons. self.make_file("__init__.py", "") self.assert_doesnt_exist(".coverage") cov = coverage.Coverage(branch=True, source=".") self.start_import_stop(cov, "__init__") pct = cov.lcov_report() assert pct == 0.0 self.assert_exists("coverage.lcov") # Newer Pythons have truly empty empty files. if env.PYBEHAVIOR.empty_is_empty: expected_result = textwrap.dedent("""\ TN: SF:__init__.py LF:0 LH:0 BRF:0 BRH:0 end_of_record """) else: expected_result = textwrap.dedent("""\ TN: SF:__init__.py DA:1,1,1B2M2Y8AsgTpgAmY7PhCfg LF:0 LH:0 BRF:0 BRH:0 end_of_record """) actual_result = self.get_lcov_report_content() assert expected_result == actual_result def test_excluded_lines(self) -> None: self.make_file(".coveragerc", """\ [report] exclude_lines = foo """) self.make_file("runme.py", """\ s = "Hello 1" t = "foo is ignored 2" if s.upper() == "BYE 3": i_am_missing_4() foo_is_missing_5() print("Done 6") # foo 7 # line 8 """) cov = coverage.Coverage(source=".", branch=True) self.start_import_stop(cov, "runme") cov.lcov_report() expected_result = textwrap.dedent("""\ TN: SF:runme.py DA:1,1,nWfwsz0pRTEJrInVF+xNvQ DA:3,1,uV4NoIauDo5LCti6agX9sg DA:6,1,+PfQRgSChjQOGkA6MArMDg DA:4,0,GR4ThLStnqpcZvm3alfRaA LF:4 LH:3 BRDA:4,0,0,- BRDA:6,0,1,1 BRF:2 BRH:1 end_of_record """) actual_result = self.get_lcov_report_content() assert expected_result == actual_result ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/test_misc.py0000644000175100001770000001346200000000000017643 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt """Tests of miscellaneous stuff.""" from __future__ import annotations import sys from unittest import mock import pytest from coverage.exceptions import CoverageException from coverage.misc import file_be_gone from coverage.misc import Hasher, substitute_variables, import_third_party from coverage.misc import human_sorted, human_sorted_items, stdout_link from tests.coveragetest import CoverageTest class HasherTest(CoverageTest): """Test our wrapper of fingerprint hashing.""" run_in_temp_dir = False def test_string_hashing(self) -> None: h1 = Hasher() h1.update("Hello, world!") h2 = Hasher() h2.update("Goodbye!") h3 = Hasher() h3.update("Hello, world!") assert h1.hexdigest() != h2.hexdigest() assert h1.hexdigest() == h3.hexdigest() def test_bytes_hashing(self) -> None: h1 = Hasher() h1.update(b"Hello, world!") h2 = Hasher() h2.update(b"Goodbye!") assert h1.hexdigest() != h2.hexdigest() def test_unicode_hashing(self) -> None: h1 = Hasher() h1.update("Hello, world! \N{SNOWMAN}") h2 = Hasher() h2.update("Goodbye!") assert h1.hexdigest() != h2.hexdigest() def test_dict_hashing(self) -> None: h1 = Hasher() h1.update({'a': 17, 'b': 23}) h2 = Hasher() h2.update({'b': 23, 'a': 17}) assert h1.hexdigest() == h2.hexdigest() def test_dict_collision(self) -> None: h1 = Hasher() h1.update({'a': 17, 'b': {'c': 1, 'd': 2}}) h2 = Hasher() h2.update({'a': 17, 'b': {'c': 1}, 'd': 2}) assert h1.hexdigest() != h2.hexdigest() class RemoveFileTest(CoverageTest): """Tests of misc.file_be_gone.""" def test_remove_nonexistent_file(self) -> None: # It's OK to try to remove a file that doesn't exist. file_be_gone("not_here.txt") def test_remove_actual_file(self) -> None: # It really does remove a file that does exist. self.make_file("here.txt", "We are here, we are here, we are here!") file_be_gone("here.txt") self.assert_doesnt_exist("here.txt") def test_actual_errors(self) -> None: # Errors can still happen. # ". is a directory" on Unix, or "Access denied" on Windows with pytest.raises(OSError): file_be_gone(".") VARS = { 'FOO': 'fooey', 'BAR': 'xyzzy', } @pytest.mark.parametrize("before, after", [ ("Nothing to do", "Nothing to do"), ("Dollar: $$", "Dollar: $"), ("Simple: $FOO is fooey", "Simple: fooey is fooey"), ("Braced: X${FOO}X.", "Braced: XfooeyX."), ("Missing: x${NOTHING}y is xy", "Missing: xy is xy"), ("Multiple: $$ $FOO $BAR ${FOO}", "Multiple: $ fooey xyzzy fooey"), ("Ill-formed: ${%5} ${{HI}} ${", "Ill-formed: ${%5} ${{HI}} ${"), ("Strict: ${FOO?} is there", "Strict: fooey is there"), ("Defaulted: ${WUT-missing}!", "Defaulted: missing!"), ("Defaulted empty: ${WUT-}!", "Defaulted empty: !"), ]) def test_substitute_variables(before: str, after: str) -> None: assert substitute_variables(before, VARS) == after @pytest.mark.parametrize("text", [ "Strict: ${NOTHING?} is an error", ]) def test_substitute_variables_errors(text: str) -> None: with pytest.raises(CoverageException) as exc_info: substitute_variables(text, VARS) assert text in str(exc_info.value) assert "Variable NOTHING is undefined" in str(exc_info.value) class ImportThirdPartyTest(CoverageTest): """Test import_third_party.""" run_in_temp_dir = False def test_success(self) -> None: # Make sure we don't have pytest in sys.modules before we start. del sys.modules["pytest"] # Import pytest mod, has = import_third_party("pytest") assert has # Yes, it's really pytest: assert mod.__name__ == "pytest" print(dir(mod)) assert all(hasattr(mod, name) for name in ["skip", "mark", "raises", "warns"]) # But it's not in sys.modules: assert "pytest" not in sys.modules def test_failure(self) -> None: _, has = import_third_party("xyzzy") assert not has assert "xyzzy" not in sys.modules HUMAN_DATA = [ ("z1 a2z a01 a2a a3 a1", "a01 a1 a2a a2z a3 z1"), ("a10 a9 a100 a1", "a1 a9 a10 a100"), ("4.0 3.10-win 3.10-mac 3.9-mac 3.9-win", "3.9-mac 3.9-win 3.10-mac 3.10-win 4.0"), ] @pytest.mark.parametrize("words, ordered", HUMAN_DATA) def test_human_sorted(words: str, ordered: str) -> None: assert " ".join(human_sorted(words.split())) == ordered @pytest.mark.parametrize("words, ordered", HUMAN_DATA) def test_human_sorted_items(words: str, ordered: str) -> None: keys = words.split() # Check that we never try to compare the values in the items human_sorted_items([(k, object()) for k in keys]) items = [(k, 1) for k in keys] + [(k, 2) for k in keys] okeys = ordered.split() oitems = [(k, v) for k in okeys for v in [1, 2]] assert human_sorted_items(items) == oitems assert human_sorted_items(items, reverse=True) == oitems[::-1] def test_stdout_link_tty() -> None: with mock.patch.object(sys.stdout, "isatty", lambda:True): link = stdout_link("some text", "some url") assert link == "\033]8;;some url\asome text\033]8;;\a" def test_stdout_link_not_tty() -> None: # Without mocking isatty, it reports False in a pytest suite. assert stdout_link("some text", "some url") == "some text" def test_stdout_link_with_fake_stdout() -> None: # If stdout is another object, we should still be ok. with mock.patch.object(sys, "stdout", object()): link = stdout_link("some text", "some url") assert link == "some text" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/test_mixins.py0000644000175100001770000000566300000000000020223 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt """Tests of code in tests/mixins.py""" from __future__ import annotations import pytest from coverage.misc import import_local_file from tests.mixins import TempDirMixin, RestoreModulesMixin class TempDirMixinTest(TempDirMixin): """Test the methods in TempDirMixin.""" def file_text(self, fname: str) -> str: """Return the text read from a file.""" with open(fname, "rb") as f: return f.read().decode('ascii') def test_make_file(self) -> None: # A simple file. self.make_file("fooey.boo", "Hello there") assert self.file_text("fooey.boo") == "Hello there" # A file in a sub-directory self.make_file("sub/another.txt", "Another") assert self.file_text("sub/another.txt") == "Another" # A second file in that sub-directory self.make_file("sub/second.txt", "Second") assert self.file_text("sub/second.txt") == "Second" # A deeper directory self.make_file("sub/deeper/evenmore/third.txt") assert self.file_text("sub/deeper/evenmore/third.txt") == "" # Dedenting self.make_file("dedented.txt", """\ Hello Bye """) assert self.file_text("dedented.txt") == "Hello\nBye\n" def test_make_file_newline(self) -> None: self.make_file("unix.txt", "Hello\n") assert self.file_text("unix.txt") == "Hello\n" self.make_file("dos.txt", "Hello\n", newline="\r\n") assert self.file_text("dos.txt") == "Hello\r\n" self.make_file("mac.txt", "Hello\n", newline="\r") assert self.file_text("mac.txt") == "Hello\r" def test_make_file_non_ascii(self) -> None: self.make_file("unicode.txt", "tablo: ยซฯ„ฮฑะ‘โ„“ฯƒยป") with open("unicode.txt", "rb") as f: text = f.read() assert text == b"tablo: \xc2\xab\xcf\x84\xce\xb1\xd0\x91\xe2\x84\x93\xcf\x83\xc2\xbb" def test_make_bytes_file(self) -> None: self.make_file("binary.dat", bytes=b"\x99\x33\x66hello\0") with open("binary.dat", "rb") as f: data = f.read() assert data == b"\x99\x33\x66hello\0" class RestoreModulessMixinTest(TempDirMixin, RestoreModulesMixin): """Tests of SysPathModulesMixin.""" @pytest.mark.parametrize("val", [17, 42]) def test_module_independence(self, val: int) -> None: self.make_file("xyzzy.py", f"A = {val}") import xyzzy # pylint: disable=import-error assert xyzzy.A == val def test_cleanup_and_reimport(self) -> None: self.make_file("xyzzy.py", "A = 17") xyzzy = import_local_file("xyzzy") assert xyzzy.A == 17 self.clean_local_file_imports() self.make_file("xyzzy.py", "A = 42") xyzzy = import_local_file("xyzzy") assert xyzzy.A == 42 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/test_numbits.py0000644000175100001770000001331700000000000020370 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt """Tests for coverage.numbits""" from __future__ import annotations import json import sqlite3 from typing import Iterable from hypothesis import example, given, settings from hypothesis.strategies import sets, integers from coverage import env from coverage.numbits import ( nums_to_numbits, numbits_to_nums, numbits_union, numbits_intersection, numbits_any_intersection, num_in_numbits, register_sqlite_functions, ) from tests.coveragetest import CoverageTest # Hypothesis-generated line number data line_numbers = integers(min_value=1, max_value=9999) line_number_sets = sets(line_numbers) # When coverage-testing ourselves, hypothesis complains about a test being # flaky because the first run exceeds the deadline (and fails), and the second # run succeeds. Disable the deadline if we are coverage-testing. default_settings = settings(deadline=400) # milliseconds if env.METACOV: default_settings = settings(default_settings, deadline=None) def good_numbits(numbits: bytes) -> None: """Assert that numbits is good.""" # It shouldn't end with a zero byte, that should have been trimmed off. assert (not numbits) or (numbits[-1] != 0) class NumbitsOpTest(CoverageTest): """Tests of the numbits operations in numbits.py.""" run_in_temp_dir = False @given(line_number_sets) @settings(default_settings) def test_conversion(self, nums: Iterable[int]) -> None: numbits = nums_to_numbits(nums) good_numbits(numbits) nums2 = numbits_to_nums(numbits) assert nums == set(nums2) @given(line_number_sets, line_number_sets) @settings(default_settings) def test_union(self, nums1: set[int], nums2: set[int]) -> None: nb1 = nums_to_numbits(nums1) good_numbits(nb1) nb2 = nums_to_numbits(nums2) good_numbits(nb2) nbu = numbits_union(nb1, nb2) good_numbits(nbu) union = numbits_to_nums(nbu) assert nums1 | nums2 == set(union) @given(line_number_sets, line_number_sets) @settings(default_settings) def test_intersection(self, nums1: set[int], nums2: set[int]) -> None: nb1 = nums_to_numbits(nums1) good_numbits(nb1) nb2 = nums_to_numbits(nums2) good_numbits(nb2) nbi = numbits_intersection(nb1, nb2) good_numbits(nbi) intersection = numbits_to_nums(nbi) assert nums1 & nums2 == set(intersection) @given(line_number_sets, line_number_sets) @settings(default_settings) def test_any_intersection(self, nums1: set[int], nums2: set[int]) -> None: nb1 = nums_to_numbits(nums1) good_numbits(nb1) nb2 = nums_to_numbits(nums2) good_numbits(nb2) inter = numbits_any_intersection(nb1, nb2) expect = bool(nums1 & nums2) assert expect == bool(inter) @given(line_numbers, line_number_sets) @settings(default_settings) @example(152, {144}) def test_num_in_numbits(self, num: int, nums: Iterable[int]) -> None: numbits = nums_to_numbits(nums) good_numbits(numbits) is_in = num_in_numbits(num, numbits) assert (num in nums) == is_in class NumbitsSqliteFunctionTest(CoverageTest): """Tests of the SQLite integration for numbits functions.""" run_in_temp_dir = False def setUp(self) -> None: super().setUp() conn = sqlite3.connect(":memory:") register_sqlite_functions(conn) self.cursor = conn.cursor() self.cursor.execute("create table data (id int, numbits blob)") self.cursor.executemany( "insert into data (id, numbits) values (?, ?)", [ (i, nums_to_numbits(range(i, 100, i))) for i in range(1, 11) ], ) self.addCleanup(self.cursor.close) def test_numbits_union(self) -> None: res = self.cursor.execute( "select numbits_union(" + "(select numbits from data where id = 7)," + "(select numbits from data where id = 9)" + ")", ) expected = [ 7, 9, 14, 18, 21, 27, 28, 35, 36, 42, 45, 49, 54, 56, 63, 70, 72, 77, 81, 84, 90, 91, 98, 99, ] answer = numbits_to_nums(list(res)[0][0]) assert expected == answer def test_numbits_intersection(self) -> None: res = self.cursor.execute( "select numbits_intersection(" + "(select numbits from data where id = 7)," + "(select numbits from data where id = 9)" + ")", ) answer = numbits_to_nums(list(res)[0][0]) assert [63] == answer def test_numbits_any_intersection(self) -> None: res = self.cursor.execute( "select numbits_any_intersection(?, ?)", (nums_to_numbits([1, 2, 3]), nums_to_numbits([3, 4, 5])), ) answer = [any_inter for (any_inter,) in res] assert [1] == answer res = self.cursor.execute( "select numbits_any_intersection(?, ?)", (nums_to_numbits([1, 2, 3]), nums_to_numbits([7, 8, 9])), ) answer = [any_inter for (any_inter,) in res] assert [0] == answer def test_num_in_numbits(self) -> None: res = self.cursor.execute("select id, num_in_numbits(12, numbits) from data order by id") answer = [is_in for (id, is_in) in res] assert [1, 1, 1, 1, 0, 1, 0, 0, 0, 0] == answer def test_numbits_to_nums(self) -> None: res = self.cursor.execute("select numbits_to_nums(?)", [nums_to_numbits([1, 2, 3])]) assert [1, 2, 3] == json.loads(res.fetchone()[0]) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/test_oddball.py0000644000175100001770000005420400000000000020310 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt """Oddball cases for testing coverage.py""" from __future__ import annotations import os.path import re import sys from flaky import flaky import pytest import coverage from coverage import env from coverage.data import sorted_lines from coverage.files import abs_file from coverage.misc import import_local_file from tests import osinfo, testenv from tests.coveragetest import CoverageTest from tests.helpers import swallow_warnings class ThreadingTest(CoverageTest): """Tests of the threading support.""" def test_threading(self) -> None: self.check_coverage("""\ import threading def fromMainThread(): return "called from main thread" def fromOtherThread(): return "called from other thread" def neverCalled(): return "no one calls me" other = threading.Thread(target=fromOtherThread) other.start() fromMainThread() other.join() """, [1, 3, 4, 6, 7, 9, 10, 12, 13, 14, 15], "10", ) def test_thread_run(self) -> None: self.check_coverage("""\ import threading class TestThread(threading.Thread): def run(self): self.a = 5 self.do_work() self.a = 7 def do_work(self): self.a = 10 thd = TestThread() thd.start() thd.join() """, [1, 3, 4, 5, 6, 7, 9, 10, 12, 13, 14], "", ) class RecursionTest(CoverageTest): """Check what happens when recursive code gets near limits.""" def test_short_recursion(self) -> None: # We can definitely get close to 500 stack frames. self.check_coverage("""\ def recur(n): if n == 0: return 0 else: return recur(n-1)+1 recur(495) # We can get at least this many stack frames. i = 8 # and this line will be traced """, [1, 2, 3, 5, 7, 8], "", ) def test_long_recursion(self) -> None: # We can't finish a very deep recursion, but we don't crash. with pytest.raises(RuntimeError): with swallow_warnings("Trace function changed, data is likely wrong: None"): self.check_coverage("""\ def recur(n): if n == 0: return 0 else: return recur(n-1)+1 recur(100000) # This is definitely too many frames. """, [1, 2, 3, 5, 7], "", ) def test_long_recursion_recovery(self) -> None: # Test the core of bug 93: https://github.com/nedbat/coveragepy/issues/93 # When recovering from a stack overflow, the Python trace function is # disabled, but the C trace function is not. So if we're using a # Python trace function, we won't trace anything after the stack # overflow, and there should be a warning about it. If we're using # the C trace function, only line 3 will be missing, and all else # will be traced. self.make_file("recur.py", """\ import sys #; sys.setrecursionlimit(70) def recur(n): if n == 0: return 0 # never hit else: return recur(n-1)+1 try: recur(100000) # This is definitely too many frames. except RuntimeError: i = 11 i = 12 """) cov = coverage.Coverage() with swallow_warnings("Trace function changed, data is likely wrong: None"): self.start_import_stop(cov, "recur") assert cov._collector is not None pytrace = (cov._collector.tracer_name() == "PyTracer") expected_missing = [4] if pytrace: # pragma: no metacov expected_missing += [10, 11, 12] _, statements, missing, _ = cov.analysis("recur.py") assert statements == [1, 2, 3, 4, 6, 8, 9, 10, 11, 12] assert expected_missing == missing # Get a warning about the stackoverflow effect on the tracing function. if pytrace and not env.METACOV: # pragma: no metacov assert len(cov._warnings) == 1 assert re.fullmatch( r"Trace function changed, data is likely wrong: None != " + r">", cov._warnings[0], ) else: assert not cov._warnings class MemoryLeakTest(CoverageTest): """Attempt the impossible: test that memory doesn't leak. Note: this test is truly unusual, and has had a colorful history. See for example: https://github.com/nedbat/coveragepy/issues/186 It may still fail occasionally, especially on PyPy. """ @flaky # type: ignore[misc] @pytest.mark.skipif(not testenv.C_TRACER, reason="Only the C tracer has refcounting issues") def test_for_leaks(self) -> None: # Our original bad memory leak only happened on line numbers > 255, so # make a code object with more lines than that. Ugly string mumbo # jumbo to get 300 blank lines at the beginning.. code = """\ # blank line\n""" * 300 + """\ def once(x): # line 301 if x % 100 == 0: raise Exception("100!") elif x % 2: return 10 else: # line 306 return 11 i = 0 # Portable loop without alloc'ing memory. while i < ITERS: try: once(i) except: pass i += 1 # line 315 """ lines = list(range(301, 315)) lines.remove(306) # Line 306 is the "else". # This is a non-deterministic test, so try it a few times, and fail it # only if it predominantly fails. fails = 0 for _ in range(10): ram_0 = osinfo.process_ram() self.check_coverage(code.replace("ITERS", "10"), lines, "") ram_10 = osinfo.process_ram() self.check_coverage(code.replace("ITERS", "10000"), lines, "") ram_10k = osinfo.process_ram() # Running the code 10k times shouldn't grow the ram much more than # running it 10 times. ram_growth = (ram_10k - ram_10) - (ram_10 - ram_0) if ram_growth > 100000: fails += 1 # pragma: only failure if fails > 8: pytest.fail("RAM grew by %d" % (ram_growth)) # pragma: only failure class MemoryFumblingTest(CoverageTest): """Test that we properly manage the None refcount.""" @pytest.mark.skipif(not testenv.C_TRACER, reason="Only the C tracer has refcounting issues") def test_dropping_none(self) -> None: # pragma: not covered # TODO: Mark this so it will only be run sometimes. pytest.skip("This is too expensive for now (30s)") # Start and stop coverage thousands of times to flush out bad # reference counting, maybe. _ = "this is just here to put a type comment on" # type: ignore[unreachable] self.make_file("the_code.py", """\ import random def f(): if random.random() > .5: x = 1 else: x = 2 """) self.make_file("main.py", """\ import coverage import sys from the_code import f for i in range(10000): cov = coverage.Coverage(branch=True) cov.start() f() cov.stop() cov.erase() print("Final None refcount: %d" % (sys.getrefcount(None))) """) status, out = self.run_command_status("python main.py") assert status == 0 assert "Final None refcount" in out assert "Fatal" not in out class PyexpatTest(CoverageTest): """Pyexpat screws up tracing. Make sure we've counter-defended properly.""" def test_pyexpat(self) -> None: # pyexpat calls the trace function explicitly (inexplicably), and does # it wrong for exceptions. Parsing a DOCTYPE for some reason throws # an exception internally, and triggers its wrong behavior. This test # checks that our fake PyTrace_RETURN hack in tracer.c works. It will # also detect if the pyexpat bug is fixed unbeknownst to us, meaning # we'd see two RETURNs where there should only be one. self.make_file("trydom.py", """\ import xml.dom.minidom XML = '''\\ ''' def foo(): dom = xml.dom.minidom.parseString(XML) assert len(dom.getElementsByTagName('child')) == 2 a = 11 foo() """) self.make_file("outer.py", "\n"*100 + "import trydom\na = 102\n") cov = coverage.Coverage() cov.erase() # Import the Python file, executing it. self.start_import_stop(cov, "outer") _, statements, missing, _ = cov.analysis("trydom.py") assert statements == [1, 3, 8, 9, 10, 11, 13] assert missing == [] _, statements, missing, _ = cov.analysis("outer.py") assert statements == [101, 102] assert missing == [] # Make sure pyexpat isn't recorded as a source file. # https://github.com/nedbat/coveragepy/issues/419 files = cov.get_data().measured_files() msg = f"Pyexpat.c is in the measured files!: {files!r}:" assert not any(f.endswith("pyexpat.c") for f in files), msg class ExceptionTest(CoverageTest): """I suspect different versions of Python deal with exceptions differently in the trace function. """ def test_exception(self) -> None: # Python 2.3's trace function doesn't get called with "return" if the # scope is exiting due to an exception. This confounds our trace # function which relies on scope announcements to track which files to # trace. # # This test is designed to sniff this out. Each function in the call # stack is in a different file, to try to trip up the tracer. Each # file has active lines in a different range so we'll see if the lines # get attributed to the wrong file. self.make_file("oops.py", """\ def oops(args): a = 2 raise Exception("oops") a = 4 """) self.make_file("fly.py", "\n"*100 + """\ def fly(calls): a = 2 calls[0](calls[1:]) a = 4 """) self.make_file("catch.py", "\n"*200 + """\ def catch(calls): try: a = 3 calls[0](calls[1:]) a = 5 except: a = 7 """) self.make_file("doit.py", "\n"*300 + """\ def doit(calls): try: calls[0](calls[1:]) except: a = 5 """) # Import all the modules before starting coverage, so the def lines # won't be in all the results. for mod in "oops fly catch doit".split(): import_local_file(mod) # Each run nests the functions differently to get different # combinations of catching exceptions and letting them fly. runs = [ ("doit fly oops", { 'doit.py': [302, 303, 304, 305], 'fly.py': [102, 103], 'oops.py': [2, 3], }), ("doit catch oops", { 'doit.py': [302, 303], 'catch.py': [202, 203, 204, 206, 207], 'oops.py': [2, 3], }), ("doit fly catch oops", { 'doit.py': [302, 303], 'fly.py': [102, 103, 104], 'catch.py': [202, 203, 204, 206, 207], 'oops.py': [2, 3], }), ("doit catch fly oops", { 'doit.py': [302, 303], 'catch.py': [202, 203, 204, 206, 207], 'fly.py': [102, 103], 'oops.py': [2, 3], }), ] for callnames, lines_expected in runs: # Make the list of functions we'll call for this test. callnames_list = callnames.split() calls = [getattr(sys.modules[cn], cn) for cn in callnames_list] cov = coverage.Coverage() with cov.collect(): # Call our list of functions: invoke the first, with the rest as # an argument. calls[0](calls[1:]) # Clean the line data and compare to expected results. # The file names are absolute, so keep just the base. clean_lines = {} data = cov.get_data() for callname in callnames_list: filename = callname + ".py" clean_lines[filename] = sorted_lines(data, abs_file(filename)) assert clean_lines == lines_expected class DoctestTest(CoverageTest): """Tests invoked with doctest should measure properly.""" def test_doctest(self) -> None: # Doctests used to be traced, with their line numbers credited to the # file they were in. Below, one of the doctests has four lines (1-4), # which would incorrectly claim that lines 1-4 of the file were # executed. In this file, line 2 is not executed. self.make_file("the_doctest.py", '''\ if "x" in "abc": print("hello") def return_arg_or_void(arg): """If is None, return "Void"; otherwise return >>> return_arg_or_void(None) 'Void' >>> return_arg_or_void("arg") 'arg' >>> return_arg_or_void("None") 'None' >>> if "x" in "xyz": # line 1 ... if "a" in "aswed": # line 2 ... if "a" in "abc": # line 3 ... return_arg_or_void(12) # line 4 12 """ if arg is None: return "Void" else: return arg import doctest, sys doctest.testmod(sys.modules[__name__]) # we're not __main__ :( ''') cov = coverage.Coverage() self.start_import_stop(cov, "the_doctest") data = cov.get_data() assert len(data.measured_files()) == 1 lines = sorted_lines(data, data.measured_files().pop()) assert lines == [1, 3, 18, 19, 21, 23, 24] class GettraceTest(CoverageTest): """Tests that we work properly with `sys.gettrace()`.""" def test_round_trip_in_untraced_function(self) -> None: # https://github.com/nedbat/coveragepy/issues/575 self.make_file("main.py", """import sample""") self.make_file("sample.py", """\ from swap import swap_it def doit(): print(3) swap_it() print(5) def doit_soon(): print(7) doit() print(9) print(10) doit_soon() print(12) """) self.make_file("swap.py", """\ import sys def swap_it(): sys.settrace(sys.gettrace()) """) # Use --source=sample to prevent measurement of swap.py. cov = coverage.Coverage(source=["sample"]) self.start_import_stop(cov, "main") assert self.stdout() == "10\n7\n3\n5\n9\n12\n" _, statements, missing, _ = cov.analysis("sample.py") assert statements == [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12] assert missing == [] def test_setting_new_trace_function(self) -> None: # https://github.com/nedbat/coveragepy/issues/436 if testenv.SETTRACE_CORE: missing = "5-7, 13-14" else: missing = "5-7" self.check_coverage('''\ import os.path import sys def tracer(frame, event, arg): filename = os.path.basename(frame.f_code.co_filename) # 5 print(f"{event}: {filename} @ {frame.f_lineno}") # 6 return tracer # 7 def begin(): sys.settrace(tracer) def collect(): t = sys.gettrace() # 13 assert t is tracer, t # 14 def test_unsets_trace() -> None: begin() collect() old = sys.gettrace() test_unsets_trace() sys.settrace(old) a = 21 b = 22 ''', lines=[1, 2, 4, 5, 6, 7, 9, 10, 12, 13, 14, 16, 17, 18, 20, 21, 22, 23, 24], missing=missing, ) assert self.last_module_name is not None out = self.stdout().replace(self.last_module_name, "coverage_test") expected = ( "call: coverage_test.py @ 12\n" + "line: coverage_test.py @ 13\n" + "line: coverage_test.py @ 14\n" + "return: coverage_test.py @ 14\n" ) assert expected == out @pytest.mark.expensive @pytest.mark.skipif(env.METACOV, reason="Can't set trace functions during meta-coverage") def test_atexit_gettrace(self) -> None: # This is not a test of coverage at all, but of our understanding # of this edge-case behavior in various Pythons. self.make_file("atexit_gettrace.py", """\ import atexit, sys def trace_function(frame, event, arg): return trace_function sys.settrace(trace_function) def show_trace_function(): tfn = sys.gettrace() if tfn is not None: tfn = tfn.__name__ print(tfn) atexit.register(show_trace_function) # This will show what the trace function is at the end of the program. """) status, out = self.run_command_status("python atexit_gettrace.py") assert status == 0 if env.PYPY and env.PYPYVERSION >= (5, 4): # Newer PyPy clears the trace function before atexit runs. assert out == "None\n" else: # Other Pythons leave the trace function in place. assert out == "trace_function\n" class ExecTest(CoverageTest): """Tests of exec.""" def test_correct_filename(self) -> None: # https://github.com/nedbat/coveragepy/issues/380 # Bug was that exec'd files would have their lines attributed to the # calling file. Make two files, both with ~30 lines, but no lines in # common. Line 30 in to_exec.py was recorded as line 30 in main.py, # but now it's fixed. :) self.make_file("to_exec.py", """\ \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n print("var is {}".format(var)) # line 31 """) self.make_file("main.py", """\ namespace = {'var': 17} with open("to_exec.py") as to_exec_py: code = compile(to_exec_py.read(), 'to_exec.py', 'exec') exec(code, globals(), namespace) \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n print("done") # line 35 """) cov = coverage.Coverage() self.start_import_stop(cov, "main") _, statements, missing, _ = cov.analysis("main.py") assert statements == [1, 2, 3, 4, 35] assert missing == [] _, statements, missing, _ = cov.analysis("to_exec.py") assert statements == [31] assert missing == [] def test_unencodable_filename(self) -> None: # https://github.com/nedbat/coveragepy/issues/891 self.make_file("bug891.py", r"""exec(compile("pass", "\udcff.py", "exec"))""") cov = coverage.Coverage() self.start_import_stop(cov, "bug891") # Saving would fail trying to encode \udcff.py cov.save() files = [os.path.basename(f) for f in cov.get_data().measured_files()] assert "bug891.py" in files class MockingProtectionTest(CoverageTest): """Tests about protecting ourselves from aggressive mocking. https://github.com/nedbat/coveragepy/issues/416 """ def test_os_path_exists(self) -> None: # To see if this test still detects the problem, change isolate_module # in misc.py to simply return its argument. It should fail with a # StopIteration error. self.make_file("bug416.py", """\ import os.path from unittest import mock @mock.patch('os.path.exists') def test_path_exists(mock_exists): mock_exists.side_effect = [17] print("in test") import bug416a print(bug416a.foo) print(os.path.exists(".")) test_path_exists() """) self.make_file("bug416a.py", """\ print("bug416a.py") foo = 23 """) import py_compile py_compile.compile("bug416a.py") out = self.run_command("coverage run bug416.py") assert out == "in test\nbug416a.py\n23\n17\n" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/test_parser.py0000644000175100001770000007623600000000000020214 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt """Tests for coverage.py's code parsing.""" from __future__ import annotations import textwrap import pytest from coverage import env from coverage.exceptions import NotPython from coverage.parser import PythonParser from tests.coveragetest import CoverageTest from tests.helpers import arcz_to_arcs, xfail_pypy38 class PythonParserTestBase(CoverageTest): """Tests for coverage.py's Python code parsing.""" run_in_temp_dir = False def parse_text(self, text: str, exclude: str = "nocover") -> PythonParser: """Parse `text` as source, and return the `PythonParser` used.""" text = textwrap.dedent(text) parser = PythonParser(text=text, exclude=exclude) parser.parse_source() return parser class PythonParserTest(PythonParserTestBase): """Tests of coverage.parser.""" def test_exit_counts(self) -> None: parser = self.parse_text("""\ # check some basic branch counting class Foo: def foo(self, a): if a: return 5 else: return 7 class Bar: pass """) assert parser.exit_counts() == { 2:1, 3:1, 4:2, 5:1, 7:1, 9:1, 10:1, } def test_generator_exit_counts(self) -> None: # https://github.com/nedbat/coveragepy/issues/324 parser = self.parse_text("""\ def gen(input): for n in inp: yield (i * 2 for i in range(n)) list(gen([1,2,3])) """) assert parser.exit_counts() == { 1:1, # def -> list 2:2, # for -> yield; for -> exit 3:2, # yield -> for; genexp exit 5:1, # list -> exit } def test_try_except(self) -> None: parser = self.parse_text("""\ try: a = 2 except ValueError: a = 4 except ZeroDivideError: a = 6 except: a = 8 b = 9 """) assert parser.exit_counts() == { 1: 1, 2:1, 3:2, 4:1, 5:2, 6:1, 7:1, 8:1, 9:1, } def test_excluded_classes(self) -> None: parser = self.parse_text("""\ class Foo: def __init__(self): pass if len([]): # nocover class Bar: pass """) assert parser.exit_counts() == { 1:0, 2:1, 3:1, } def test_missing_branch_to_excluded_code(self) -> None: parser = self.parse_text("""\ if fooey: a = 2 else: # nocover a = 4 b = 5 """) assert parser.exit_counts() == { 1:1, 2:1, 5:1 } parser = self.parse_text("""\ def foo(): if fooey: a = 3 else: a = 5 b = 6 """) assert parser.exit_counts() == { 1:1, 2:2, 3:1, 5:1, 6:1 } parser = self.parse_text("""\ def foo(): if fooey: a = 3 else: # nocover a = 5 b = 6 """) assert parser.exit_counts() == { 1:1, 2:1, 3:1, 6:1 } def test_indentation_error(self) -> None: msg = ( "Couldn't parse '' as Python source: " + "'unindent does not match any outer indentation level.*' at line 3" ) with pytest.raises(NotPython, match=msg): _ = self.parse_text("""\ 0 spaces 2 1 """) def test_token_error(self) -> None: submsgs = [ r"EOF in multi-line string", # before 3.12.0b1 r"unterminated triple-quoted string literal .detected at line 1.", # after 3.12.0b1 ] msg = ( r"Couldn't parse '' as Python source: '" + r"(" + "|".join(submsgs) + ")" + r"' at line 1" ) with pytest.raises(NotPython, match=msg): _ = self.parse_text("'''") def test_empty_decorated_function(self) -> None: parser = self.parse_text("""\ def decorator(func): return func @decorator def foo(self): '''Docstring''' @decorator def bar(self): pass """) expected_statements = {1, 2, 4, 5, 8, 9, 10} expected_arcs = set(arcz_to_arcs(".1 14 45 58 89 9. .2 2. -8A A-8")) expected_exits = {1: 1, 2: 1, 4: 1, 5: 1, 8: 1, 9: 1, 10: 1} if env.PYBEHAVIOR.docstring_only_function: # 3.7 changed how functions with only docstrings are numbered. expected_arcs.update(set(arcz_to_arcs("-46 6-4"))) expected_exits.update({6: 1}) if env.PYBEHAVIOR.trace_decorator_line_again: expected_arcs.update(set(arcz_to_arcs("54 98"))) expected_exits.update({9: 2, 5: 2}) assert expected_statements == parser.statements assert expected_arcs == parser.arcs() assert expected_exits == parser.exit_counts() def test_fuzzed_double_parse(self) -> None: # https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=50381 # The second parse used to raise `TypeError: 'NoneType' object is not iterable` msg = ( r"(EOF in multi-line statement)" # before 3.12.0b1 + r"|(unmatched ']')" # after 3.12.0b1 ) with pytest.raises(NotPython, match=msg): self.parse_text("]") with pytest.raises(NotPython, match=msg): self.parse_text("]") class ExclusionParserTest(PythonParserTestBase): """Tests for the exclusion code in PythonParser.""" def test_simple(self) -> None: parser = self.parse_text("""\ a = 1; b = 2 if len([]): a = 4 # nocover """, ) assert parser.statements == {1,3} def test_excluding_if_suite(self) -> None: parser = self.parse_text("""\ a = 1; b = 2 if len([]): # nocover a = 4 b = 5 c = 6 assert a == 1 and b == 2 """, ) assert parser.statements == {1,7} def test_excluding_if_but_not_else_suite(self) -> None: parser = self.parse_text("""\ a = 1; b = 2 if len([]): # nocover a = 4 b = 5 c = 6 else: a = 8 b = 9 assert a == 8 and b == 9 """, ) assert parser.statements == {1,8,9,10} def test_excluding_else_suite(self) -> None: parser = self.parse_text("""\ a = 1; b = 2 if 1==1: a = 4 b = 5 c = 6 else: # nocover a = 8 b = 9 assert a == 4 and b == 5 and c == 6 """, ) assert parser.statements == {1,3,4,5,6,10} parser = self.parse_text("""\ a = 1; b = 2 if 1==1: a = 4 b = 5 c = 6 # Lots of comments to confuse the else handler. # more. else: # nocover # Comments here too. a = 8 b = 9 assert a == 4 and b == 5 and c == 6 """, ) assert parser.statements == {1,3,4,5,6,17} def test_excluding_oneline_if(self) -> None: parser = self.parse_text("""\ def foo(): a = 2 if len([]): x = 3 # nocover b = 4 foo() """, ) assert parser.statements == {1,2,4,6} def test_excluding_a_colon_not_a_suite(self) -> None: parser = self.parse_text("""\ def foo(): l = list(range(10)) a = l[:3] # nocover b = 4 foo() """, ) assert parser.statements == {1,2,4,6} def test_excluding_for_suite(self) -> None: parser = self.parse_text("""\ a = 0 for i in [1,2,3,4,5]: # nocover a += i assert a == 15 """, ) assert parser.statements == {1,4} parser = self.parse_text("""\ a = 0 for i in [1, 2,3,4, 5]: # nocover a += i assert a == 15 """, ) assert parser.statements == {1,6} parser = self.parse_text("""\ a = 0 for i in [1,2,3,4,5 ]: # nocover a += i break a = 99 assert a == 1 """, ) assert parser.statements == {1,7} def test_excluding_for_else(self) -> None: parser = self.parse_text("""\ a = 0 for i in range(5): a += i+1 break else: # nocover a = 123 assert a == 1 """, ) assert parser.statements == {1,2,3,4,7} def test_excluding_while(self) -> None: parser = self.parse_text("""\ a = 3; b = 0 while a*b: # nocover b += 1 break assert a == 3 and b == 0 """, ) assert parser.statements == {1,5} parser = self.parse_text("""\ a = 3; b = 0 while ( a*b ): # nocover b += 1 break assert a == 3 and b == 0 """, ) assert parser.statements == {1,7} def test_excluding_while_else(self) -> None: parser = self.parse_text("""\ a = 3; b = 0 while a: b += 1 break else: # nocover b = 123 assert a == 3 and b == 1 """, ) assert parser.statements == {1,2,3,4,7} def test_excluding_try_except(self) -> None: parser = self.parse_text("""\ a = 0 try: a = 1 except: # nocover a = 99 assert a == 1 """, ) assert parser.statements == {1,2,3,6} parser = self.parse_text("""\ a = 0 try: a = 1 raise Exception("foo") except: a = 99 assert a == 99 """, ) assert parser.statements == {1,2,3,4,5,6,7} parser = self.parse_text("""\ a = 0 try: a = 1 raise Exception("foo") except ImportError: # nocover a = 99 except: a = 123 assert a == 123 """, ) assert parser.statements == {1,2,3,4,7,8,9} def test_excluding_if_pass(self) -> None: # From a comment on the coverage.py page by Michael McNeil Forbes: parser = self.parse_text("""\ def f(): if False: # pragma: nocover pass # This line still reported as missing if False: # pragma: nocover x = 1 # Now it is skipped. f() """, ) assert parser.statements == {1,7} def test_excluding_function(self) -> None: parser = self.parse_text("""\ def fn(foo): # nocover a = 1 b = 2 c = 3 x = 1 assert x == 1 """, ) assert parser.statements == {6,7} parser = self.parse_text("""\ a = 0 def very_long_function_to_exclude_name(very_long_argument1, very_long_argument2): pass assert a == 0 """, exclude="function_to_exclude", ) assert parser.statements == {1,5} parser = self.parse_text("""\ a = 0 def very_long_function_to_exclude_name( very_long_argument1, very_long_argument2 ): pass assert a == 0 """, exclude="function_to_exclude", ) assert parser.statements == {1,7} parser = self.parse_text("""\ def my_func( super_long_input_argument_0=0, super_long_input_argument_1=1, super_long_input_argument_2=2): pass def my_func_2(super_long_input_argument_0=0, super_long_input_argument_1=1, super_long_input_argument_2=2): pass """, exclude="my_func", ) assert parser.statements == set() parser = self.parse_text("""\ def my_func( super_long_input_argument_0=0, super_long_input_argument_1=1, super_long_input_argument_2=2): pass def my_func_2(super_long_input_argument_0=0, super_long_input_argument_1=1, super_long_input_argument_2=2): pass """, exclude="my_func_2", ) assert parser.statements == {1,5} parser = self.parse_text("""\ def my_func ( super_long_input_argument_0=0, super_long_input_argument_1=1, super_long_input_argument_2=2): pass def my_func_2 (super_long_input_argument_0=0, super_long_input_argument_1=1, super_long_input_argument_2=2): pass """, exclude="my_func_2", ) assert parser.statements == {1,5} parser = self.parse_text("""\ def my_func ( super_long_input_argument_0=0, super_long_input_argument_1=1, super_long_input_argument_2=2): pass def my_func_2 (super_long_input_argument_0=0, super_long_input_argument_1=1, super_long_input_argument_2=2): pass """, exclude="my_func", ) assert parser.statements == set() parser = self.parse_text("""\ def my_func \ ( super_long_input_argument_0=0, super_long_input_argument_1=1 ): pass def my_func_2(super_long_input_argument_0=0, super_long_input_argument_1=1, super_long_input_argument_2=2): pass """, exclude="my_func_2", ) assert parser.statements == {1,5} parser = self.parse_text("""\ def my_func \ ( super_long_input_argument_0=0, super_long_input_argument_1=1 ): pass def my_func_2(super_long_input_argument_0=0, super_long_input_argument_1=1, super_long_input_argument_2=2): pass """, exclude="my_func", ) assert parser.statements == set() def test_excluding_bug1713(self) -> None: if env.PYVERSION >= (3, 10): parser = self.parse_text("""\ print("1") def hello_3(a): # pragma: nocover match a: case ("5" | "6"): print("7") case "8": print("9") print("11") """, ) assert parser.statements == {1, 11} parser = self.parse_text("""\ print("1") def hello_3(a): # nocover if ("4" or "5"): print("6") else: print("8") print("10") """, ) assert parser.statements == {1, 10} parser = self.parse_text("""\ print(1) def func(a, b): if a == 4: # nocover func5() if b: print(7) func8() print(10) """, ) assert parser.statements == {1, 3, 10} parser = self.parse_text("""\ class Foo: # pragma: nocover def greet(self): print("hello world") """, ) assert parser.statements == set() def test_excluding_method(self) -> None: parser = self.parse_text("""\ class Fooey: def __init__(self): self.a = 1 def foo(self): # nocover return self.a x = Fooey() assert x.a == 1 """, ) assert parser.statements == {1,2,3,8,9} parser = self.parse_text("""\ class Fooey: def __init__(self): self.a = 1 def very_long_method_to_exclude_name( very_long_argument1, very_long_argument2 ): pass x = Fooey() assert x.a == 1 """, exclude="method_to_exclude", ) assert parser.statements == {1,2,3,11,12} def test_excluding_class(self) -> None: parser = self.parse_text("""\ class Fooey: # nocover def __init__(self): self.a = 1 def foo(self): return self.a x = 1 assert x == 1 """, ) assert parser.statements == {8,9} def test_excludes_non_ascii(self) -> None: parser = self.parse_text("""\ # coding: utf-8 a = 1; b = 2 if len([]): a = 5 # โœ˜cover """, exclude="โœ˜cover", ) assert parser.statements == {2, 4} def test_formfeed(self) -> None: # https://github.com/nedbat/coveragepy/issues/461 parser = self.parse_text("""\ x = 1 assert len([]) == 0, ( "This won't happen %s" % ("hello",) ) \f x = 6 assert len([]) == 0, ( "This won't happen %s" % ("hello",) ) """, exclude="assert", ) assert parser.statements == {1, 6} @xfail_pypy38 def test_decorator_pragmas(self) -> None: parser = self.parse_text("""\ # 1 @foo(3) # nocover @bar def func(x, y=5): return 6 class Foo: # this is the only statement. '''9''' @foo # nocover def __init__(self): '''12''' return 13 @foo( # nocover 16, 17, ) def meth(self): return 20 @foo( # nocover 23 ) def func(x=25): return 26 """) raw_statements = {3, 4, 5, 6, 8, 9, 10, 11, 13, 15, 16, 17, 19, 20, 22, 23, 25, 26} assert parser.raw_statements == raw_statements assert parser.statements == {8} @xfail_pypy38 def test_decorator_pragmas_with_colons(self) -> None: # A colon in a decorator expression would confuse the parser, # ending the exclusion of the decorated function. parser = self.parse_text("""\ @decorate(X) # nocover @decorate("Hello"[2]) def f(): x = 4 @decorate(X) # nocover @decorate("Hello"[:7]) def g(): x = 9 """) raw_statements = {1, 2, 3, 4, 6, 7, 8, 9} assert parser.raw_statements == raw_statements assert parser.statements == set() def test_class_decorator_pragmas(self) -> None: parser = self.parse_text("""\ class Foo(object): def __init__(self): self.x = 3 @foo # nocover class Bar(object): def __init__(self): self.x = 8 """) assert parser.raw_statements == {1, 2, 3, 5, 6, 7, 8} assert parser.statements == {1, 2, 3} class ParserMissingArcDescriptionTest(PythonParserTestBase): """Tests for PythonParser.missing_arc_description.""" def test_missing_arc_description(self) -> None: # This code is never run, so the actual values don't matter. parser = self.parse_text("""\ if x: print(2) print(3) def func5(): for x in range(6): if x == 7: break def func10(): while something(11): thing(12) more_stuff(13) """) expected = "line 1 didn't jump to line 2, because the condition on line 1 was never true" assert expected == parser.missing_arc_description(1, 2) expected = "line 1 didn't jump to line 3, because the condition on line 1 was never false" assert expected == parser.missing_arc_description(1, 3) expected = ( "line 6 didn't return from function 'func5', " + "because the loop on line 6 didn't complete" ) assert expected == parser.missing_arc_description(6, -5) expected = "line 6 didn't jump to line 7, because the loop on line 6 never started" assert expected == parser.missing_arc_description(6, 7) expected = "line 11 didn't jump to line 12, because the condition on line 11 was never true" assert expected == parser.missing_arc_description(11, 12) expected = ( "line 11 didn't jump to line 13, " + "because the condition on line 11 was never false" ) assert expected == parser.missing_arc_description(11, 13) def test_missing_arc_descriptions_for_small_callables(self) -> None: parser = self.parse_text("""\ callables = [ lambda: 2, (x for x in range(3)), {x:1 for x in range(4)}, {x for x in range(5)}, ] x = 7 """) expected = "line 2 didn't finish the lambda on line 2" assert expected == parser.missing_arc_description(2, -2) expected = "line 3 didn't finish the generator expression on line 3" assert expected == parser.missing_arc_description(3, -3) if env.PYBEHAVIOR.comprehensions_are_functions: expected = "line 4 didn't finish the dictionary comprehension on line 4" assert expected == parser.missing_arc_description(4, -4) expected = "line 5 didn't finish the set comprehension on line 5" assert expected == parser.missing_arc_description(5, -5) def test_missing_arc_descriptions_for_exceptions(self) -> None: parser = self.parse_text("""\ try: pass except ZeroDivideError: print("whoops") except ValueError: print("yikes") """) expected = ( "line 3 didn't jump to line 4, " + "because the exception caught by line 3 didn't happen" ) assert expected == parser.missing_arc_description(3, 4) expected = ( "line 5 didn't jump to line 6, " + "because the exception caught by line 5 didn't happen" ) assert expected == parser.missing_arc_description(5, 6) def test_missing_arc_descriptions_for_finally(self) -> None: parser = self.parse_text("""\ def function(): for i in range(2): try: if something(4): break elif something(6): x = 7 else: if something(9): continue else: continue if also_this(13): return 14 else: raise Exception(16) finally: this_thing(18) that_thing(19) """) if env.PYBEHAVIOR.finally_jumps_back: expected = "line 18 didn't jump to line 5, because the break on line 5 wasn't executed" assert expected == parser.missing_arc_description(18, 5) expected = "line 5 didn't jump to line 19, because the break on line 5 wasn't executed" assert expected == parser.missing_arc_description(5, 19) expected = ( "line 18 didn't jump to line 10, " + "because the continue on line 10 wasn't executed" ) assert expected == parser.missing_arc_description(18, 10) expected = ( "line 10 didn't jump to line 2, " + "because the continue on line 10 wasn't executed" ) assert expected == parser.missing_arc_description(10, 2) expected = ( "line 18 didn't jump to line 14, " + "because the return on line 14 wasn't executed" ) assert expected == parser.missing_arc_description(18, 14) expected = ( "line 14 didn't return from function 'function', " + "because the return on line 14 wasn't executed" ) assert expected == parser.missing_arc_description(14, -1) expected = ( "line 18 didn't except from function 'function', " + "because the raise on line 16 wasn't executed" ) assert expected == parser.missing_arc_description(18, -1) else: expected = ( "line 18 didn't jump to line 19, " + "because the break on line 5 wasn't executed" ) assert expected == parser.missing_arc_description(18, 19) expected = ( "line 18 didn't jump to line 2, " + "because the continue on line 10 wasn't executed" + " or " + "the continue on line 12 wasn't executed" ) assert expected == parser.missing_arc_description(18, 2) expected = ( "line 18 didn't except from function 'function', " + "because the raise on line 16 wasn't executed" + " or " + "line 18 didn't return from function 'function', " + "because the return on line 14 wasn't executed" ) assert expected == parser.missing_arc_description(18, -1) def test_missing_arc_descriptions_bug460(self) -> None: parser = self.parse_text("""\ x = 1 d = { 3: lambda: [], 4: lambda: [], } x = 6 """) assert parser.missing_arc_description(2, -3) == "line 3 didn't finish the lambda on line 3" @pytest.mark.skipif(not env.PYBEHAVIOR.match_case, reason="Match-case is new in 3.10") def test_match_case(self) -> None: parser = self.parse_text("""\ match command.split(): case ["go", direction] if direction in "nesw": # 2 match = f"go: {direction}" case ["go", _]: # 4 match = "no go" print(match) # 6 """) assert parser.missing_arc_description(2, 3) == ( "line 2 didn't jump to line 3, because the pattern on line 2 never matched" ) assert parser.missing_arc_description(2, 4) == ( "line 2 didn't jump to line 4, because the pattern on line 2 always matched" ) assert parser.missing_arc_description(4, 6) == ( "line 4 didn't jump to line 6, because the pattern on line 4 always matched" ) class ParserFileTest(CoverageTest): """Tests for coverage.py's code parsing from files.""" def parse_file(self, filename: str) -> PythonParser: """Parse `text` as source, and return the `PythonParser` used.""" parser = PythonParser(filename=filename, exclude="nocover") parser.parse_source() return parser @pytest.mark.parametrize("slug, newline", [ ("unix", "\n"), ("dos", "\r\n"), ("mac", "\r"), ]) def test_line_endings(self, slug: str, newline: str) -> None: text = """\ # check some basic branch counting class Foo: def foo(self, a): if a: return 5 else: return 7 class Bar: pass """ counts = { 2:1, 3:1, 4:2, 5:1, 7:1, 9:1, 10:1 } fname = slug + ".py" self.make_file(fname, text, newline=newline) parser = self.parse_file(fname) assert parser.exit_counts() == counts, f"Wrong for {fname!r}" def test_encoding(self) -> None: self.make_file("encoded.py", """\ coverage = "\xe7\xf6v\xear\xe3g\xe9" """) parser = self.parse_file("encoded.py") assert parser.exit_counts() == {1: 1} def test_missing_line_ending(self) -> None: # Test that the set of statements is the same even if a final # multi-line statement has no final newline. # https://github.com/nedbat/coveragepy/issues/293 self.make_file("normal.py", """\ out, err = subprocess.Popen( [sys.executable, '-c', 'pass'], stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate() """) parser = self.parse_file("normal.py") assert parser.statements == {1} self.make_file("abrupt.py", """\ out, err = subprocess.Popen( [sys.executable, '-c', 'pass'], stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()""") # no final newline. # Double-check that some test helper wasn't being helpful. with open("abrupt.py") as f: assert f.read()[-1] == ")" parser = self.parse_file("abrupt.py") assert parser.statements == {1} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/test_phystokens.py0000644000175100001770000001751200000000000021117 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt """Tests for coverage.py's improved tokenizer.""" from __future__ import annotations import os.path import re import sys import textwrap import warnings import pytest from coverage import env from coverage.phystokens import source_token_lines, source_encoding from coverage.python import get_python_source from tests.coveragetest import CoverageTest, TESTS_DIR # A simple program and its token stream. SIMPLE = """\ # yay! def foo(): say('two = %d' % 2) """ SIMPLE_TOKENS = [ [('com', "# yay!")], [('key', 'def'), ('ws', ' '), ('nam', 'foo'), ('op', '('), ('op', ')'), ('op', ':')], [('ws', ' '), ('nam', 'say'), ('op', '('), ('str', "'two = %d'"), ('ws', ' '), ('op', '%'), ('ws', ' '), ('num', '2'), ('op', ')')], ] # Mixed-white-space program, and its token stream. MIXED_WS = """\ def hello(): a="Hello world!" \tb="indented" """ MIXED_WS_TOKENS = [ [('key', 'def'), ('ws', ' '), ('nam', 'hello'), ('op', '('), ('op', ')'), ('op', ':')], [('ws', ' '), ('nam', 'a'), ('op', '='), ('str', '"Hello world!"')], [('ws', ' '), ('nam', 'b'), ('op', '='), ('str', '"indented"')], ] # https://github.com/nedbat/coveragepy/issues/822 BUG_822 = """\ print( "Message 1" ) array = [ 1,2,3,4, # 4 numbers \\ 5,6,7 ] # 3 numbers print( "Message 2" ) """ class PhysTokensTest(CoverageTest): """Tests for coverage.py's improved tokenizer.""" run_in_temp_dir = False def check_tokenization(self, source: str) -> None: """Tokenize `source`, then put it back together, should be the same.""" tokenized = "" for line in source_token_lines(source): text = "".join(t for _, t in line) tokenized += text + "\n" # source_token_lines doesn't preserve trailing spaces, so trim all that # before comparing. source = source.replace('\r\n', '\n') source = re.sub(r"(?m)[ \t]+$", "", source) tokenized = re.sub(r"(?m)[ \t]+$", "", tokenized) assert source == tokenized def check_file_tokenization(self, fname: str) -> None: """Use the contents of `fname` for `check_tokenization`.""" self.check_tokenization(get_python_source(fname)) def test_simple(self) -> None: assert list(source_token_lines(SIMPLE)) == SIMPLE_TOKENS self.check_tokenization(SIMPLE) def test_missing_final_newline(self) -> None: # We can tokenize source that is missing the final newline. assert list(source_token_lines(SIMPLE.rstrip())) == SIMPLE_TOKENS def test_tab_indentation(self) -> None: # Mixed tabs and spaces... assert list(source_token_lines(MIXED_WS)) == MIXED_WS_TOKENS def test_bug_822(self) -> None: self.check_tokenization(BUG_822) def test_tokenize_real_file(self) -> None: # Check the tokenization of a real file (large, btw). real_file = os.path.join(TESTS_DIR, "test_coverage.py") self.check_file_tokenization(real_file) @pytest.mark.parametrize("fname", [ "stress_phystoken.tok", "stress_phystoken_dos.tok", ]) def test_stress(self, fname: str) -> None: # Check the tokenization of the stress-test files. # And check that those files haven't been incorrectly "fixed". with warnings.catch_warnings(): warnings.filterwarnings("ignore", message=r".*invalid escape sequence") stress = os.path.join(TESTS_DIR, fname) self.check_file_tokenization(stress) with open(stress) as fstress: assert re.search(r"(?m) $", fstress.read()), f"{stress} needs a trailing space." @pytest.mark.skipif(not env.PYBEHAVIOR.soft_keywords, reason="Soft keywords are new in Python 3.10") class SoftKeywordTest(CoverageTest): """Tests the tokenizer handling soft keywords.""" run_in_temp_dir = False def test_soft_keywords_match_case(self) -> None: source = textwrap.dedent("""\ match re.match(something): case ["what"]: match = case("hello") case [_]: match("hello") match another.thing: case 1: pass class case(): pass def match(): global case """) tokens = list(source_token_lines(source)) assert tokens[0][0] == ("key", "match") assert tokens[0][4] == ("nam", "match") assert tokens[1][1] == ("key", "case") assert tokens[2][1] == ("nam", "match") assert tokens[2][5] == ("nam", "case") assert tokens[3][1] == ("key", "case") assert tokens[4][1] == ("nam", "match") assert tokens[5][1] == ("key", "match") assert tokens[6][1] == ("key", "case") assert tokens[9][2] == ("nam", "case") assert tokens[10][2] == ("nam", "match") assert tokens[11][3] == ("nam", "case") @pytest.mark.skipif(sys.version_info < (3, 12), reason="type is a soft keyword in 3.12") def test_soft_keyword_type(self) -> None: source = textwrap.dedent("""\ type Point = tuple[float, float] type(int) """) tokens = list(source_token_lines(source)) assert tokens[0][0] == ("key", "type") assert tokens[1][0] == ("nam", "type") # The default source file encoding. DEF_ENCODING = "utf-8" ENCODING_DECLARATION_SOURCES = [ # Various forms from http://www.python.org/dev/peps/pep-0263/ (1, b"# coding=cp850\n\n", "cp850"), (1, b"# coding=latin-1\n", "iso-8859-1"), (1, b"# coding=iso-latin-1\n", "iso-8859-1"), (1, b"#!/usr/bin/python\n# -*- coding: cp850 -*-\n", "cp850"), (1, b"#!/usr/bin/python\n# vim: set fileencoding=cp850:\n", "cp850"), (1, b"# This Python file uses this encoding: cp850\n", "cp850"), (1, b"# This file uses a different encoding:\n# coding: cp850\n", "cp850"), (1, b"\n# coding=cp850\n\n", "cp850"), (2, b"# -*- coding:cp850 -*-\n# vim: fileencoding=cp850\n", "cp850"), ] class SourceEncodingTest(CoverageTest): """Tests of source_encoding() for detecting encodings.""" run_in_temp_dir = False def test_detect_source_encoding(self) -> None: for _, source, expected in ENCODING_DECLARATION_SOURCES: assert source_encoding(source) == expected, f"Wrong encoding in {source!r}" def test_detect_source_encoding_not_in_comment(self) -> None: # Should not detect anything here source = b'def parse(src, encoding=None):\n pass' assert source_encoding(source) == DEF_ENCODING def test_dont_detect_source_encoding_on_third_line(self) -> None: # A coding declaration doesn't count on the third line. source = b"\n\n# coding=cp850\n\n" assert source_encoding(source) == DEF_ENCODING def test_detect_source_encoding_of_empty_file(self) -> None: # An important edge case. assert source_encoding(b"") == DEF_ENCODING def test_bom(self) -> None: # A BOM means utf-8. source = b"\xEF\xBB\xBFtext = 'hello'\n" assert source_encoding(source) == 'utf-8-sig' def test_bom_with_encoding(self) -> None: source = b"\xEF\xBB\xBF# coding: utf-8\ntext = 'hello'\n" assert source_encoding(source) == 'utf-8-sig' def test_bom_is_wrong(self) -> None: # A BOM with an explicit non-utf8 encoding is an error. source = b"\xEF\xBB\xBF# coding: cp850\n" with pytest.raises(SyntaxError, match="encoding problem: utf-8"): source_encoding(source) def test_unknown_encoding(self) -> None: source = b"# coding: klingon\n" with pytest.raises(SyntaxError, match="unknown encoding: klingon"): source_encoding(source) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/test_plugins.py0000644000175100001770000012610500000000000020370 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt """Tests for plugins.""" from __future__ import annotations import inspect import io import math import os.path from typing import Any from xml.etree import ElementTree import pytest import coverage from coverage import Coverage from coverage.control import Plugins from coverage.data import line_counts, sorted_lines from coverage.exceptions import CoverageWarning, NoSource, PluginError from coverage.misc import import_local_file from coverage.types import TConfigSectionOut, TLineNo, TPluginConfig import coverage.plugin from tests import testenv from tests.coveragetest import CoverageTest from tests.helpers import CheckUniqueFilenames, swallow_warnings class NullConfig(TPluginConfig): """A plugin configure thing when we don't really need one.""" def get_plugin_options(self, plugin: str) -> TConfigSectionOut: return {} class FakeConfig(TPluginConfig): """A fake config for use in tests.""" def __init__(self, plugin: str, options: dict[str, Any]) -> None: self.plugin = plugin self.options = options self.asked_for: list[str] = [] def get_plugin_options(self, plugin: str) -> TConfigSectionOut: """Just return the options for `plugin` if this is the right module.""" self.asked_for.append(plugin) if plugin == self.plugin: return self.options else: return {} class LoadPluginsTest(CoverageTest): """Test Plugins.load_plugins directly.""" def test_implicit_boolean(self) -> None: self.make_file("plugin1.py", """\ from coverage import CoveragePlugin class Plugin(CoveragePlugin): pass def coverage_init(reg, options): reg.add_file_tracer(Plugin()) """) config = FakeConfig("plugin1", {}) plugins = Plugins.load_plugins([], config) assert not plugins plugins = Plugins.load_plugins(["plugin1"], config) assert plugins def test_importing_and_configuring(self) -> None: self.make_file("plugin1.py", """\ from coverage import CoveragePlugin class Plugin(CoveragePlugin): def __init__(self, options): self.options = options self.this_is = "me" def coverage_init(reg, options): reg.add_file_tracer(Plugin(options)) """) config = FakeConfig("plugin1", {'a': 'hello'}) plugins = list(Plugins.load_plugins(["plugin1"], config)) assert len(plugins) == 1 assert plugins[0].this_is == "me" # type: ignore assert plugins[0].options == {'a': 'hello'} # type: ignore assert config.asked_for == ['plugin1'] def test_importing_and_configuring_more_than_one(self) -> None: self.make_file("plugin1.py", """\ from coverage import CoveragePlugin class Plugin(CoveragePlugin): def __init__(self, options): self.options = options self.this_is = "me" def coverage_init(reg, options): reg.add_file_tracer(Plugin(options)) """) self.make_file("plugin2.py", """\ from coverage import CoveragePlugin class Plugin(CoveragePlugin): def __init__(self, options): self.options = options def coverage_init(reg, options): reg.add_file_tracer(Plugin(options)) """) config = FakeConfig("plugin1", {'a': 'hello'}) plugins = list(Plugins.load_plugins(["plugin1", "plugin2"], config)) assert len(plugins) == 2 assert plugins[0].this_is == "me" # type: ignore assert plugins[0].options == {'a': 'hello'} # type: ignore assert plugins[1].options == {} # type: ignore assert config.asked_for == ['plugin1', 'plugin2'] # The order matters... config = FakeConfig("plugin1", {'a': 'second'}) plugins = list(Plugins.load_plugins(["plugin2", "plugin1"], config)) assert len(plugins) == 2 assert plugins[0].options == {} # type: ignore assert plugins[1].this_is == "me" # type: ignore assert plugins[1].options == {'a': 'second'} # type: ignore def test_cant_import(self) -> None: with pytest.raises(ImportError, match="No module named '?plugin_not_there'?"): _ = Plugins.load_plugins(["plugin_not_there"], NullConfig()) def test_plugin_must_define_coverage_init(self) -> None: self.make_file("no_plugin.py", """\ from coverage import CoveragePlugin Nothing = 0 """) msg_pat = "Plugin module 'no_plugin' didn't define a coverage_init function" with pytest.raises(PluginError, match=msg_pat): list(Plugins.load_plugins(["no_plugin"], NullConfig())) class PluginTest(CoverageTest): """Test plugins through the Coverage class.""" def test_plugin_imported(self) -> None: # Prove that a plugin will be imported. self.make_file("my_plugin.py", """\ from coverage import CoveragePlugin class Plugin(CoveragePlugin): pass def coverage_init(reg, options): reg.add_noop(Plugin()) with open("evidence.out", "w") as f: f.write("we are here!") """) self.assert_doesnt_exist("evidence.out") cov = coverage.Coverage() cov.set_option("run:plugins", ["my_plugin"]) cov.start() cov.stop() # pragma: nested with open("evidence.out") as f: assert f.read() == "we are here!" def test_missing_plugin_raises_import_error(self) -> None: # Prove that a missing plugin will raise an ImportError. with pytest.raises(ImportError, match="No module named '?does_not_exist_woijwoicweo'?"): cov = coverage.Coverage() cov.set_option("run:plugins", ["does_not_exist_woijwoicweo"]) cov.start() cov.stop() def test_bad_plugin_isnt_hidden(self) -> None: # Prove that a plugin with an error in it will raise the error. self.make_file("plugin_over_zero.py", "1/0") with pytest.raises(ZeroDivisionError): cov = coverage.Coverage() cov.set_option("run:plugins", ["plugin_over_zero"]) cov.start() cov.stop() def test_plugin_sys_info(self) -> None: self.make_file("plugin_sys_info.py", """\ import coverage class Plugin(coverage.CoveragePlugin): def sys_info(self): return [("hello", "world")] def coverage_init(reg, options): reg.add_file_tracer(Plugin()) """) debug_out = io.StringIO() cov = coverage.Coverage(debug=["sys"]) cov._debug_file = debug_out cov.set_option("run:plugins", ["plugin_sys_info"]) with swallow_warnings( r"Plugin file tracers \(plugin_sys_info.Plugin\) aren't supported with .*", ): cov.start() cov.stop() # pragma: nested out_lines = [line.strip() for line in debug_out.getvalue().splitlines()] if testenv.C_TRACER: assert 'plugins.file_tracers: plugin_sys_info.Plugin' in out_lines else: assert 'plugins.file_tracers: plugin_sys_info.Plugin (disabled)' in out_lines assert 'plugins.configurers: -none-' in out_lines expected_end = [ "-- sys: plugin_sys_info.Plugin -------------------------------", "hello: world", "-- end -------------------------------------------------------", ] assert expected_end == out_lines[-len(expected_end):] def test_plugin_with_no_sys_info(self) -> None: self.make_file("plugin_no_sys_info.py", """\ import coverage class Plugin(coverage.CoveragePlugin): pass def coverage_init(reg, options): reg.add_configurer(Plugin()) """) debug_out = io.StringIO() cov = coverage.Coverage(debug=["sys"]) cov._debug_file = debug_out cov.set_option("run:plugins", ["plugin_no_sys_info"]) cov.start() cov.stop() # pragma: nested out_lines = [line.strip() for line in debug_out.getvalue().splitlines()] assert 'plugins.file_tracers: -none-' in out_lines assert 'plugins.configurers: plugin_no_sys_info.Plugin' in out_lines expected_end = [ "-- sys: plugin_no_sys_info.Plugin ----------------------------", "-- end -------------------------------------------------------", ] assert expected_end == out_lines[-len(expected_end):] def test_local_files_are_importable(self) -> None: self.make_file("importing_plugin.py", """\ from coverage import CoveragePlugin import local_module class MyPlugin(CoveragePlugin): pass def coverage_init(reg, options): reg.add_noop(MyPlugin()) """) self.make_file("local_module.py", "CONST = 1") self.make_file(".coveragerc", """\ [run] plugins = importing_plugin """) self.make_file("main_file.py", "print('MAIN')") out = self.run_command("coverage run main_file.py") assert out == "MAIN\n" out = self.run_command("coverage html -q") # sneak in a test of -q assert out == "" @pytest.mark.skipif(testenv.PLUGINS, reason="This core doesn't support plugins.") class PluginWarningOnPyTracerTest(CoverageTest): """Test that we get a controlled exception when plugins aren't supported.""" def test_exception_if_plugins_on_pytracer(self) -> None: self.make_file("simple.py", "a = 1") cov = coverage.Coverage() cov.set_option("run:plugins", ["tests.plugin1"]) if testenv.PY_TRACER: core = "PyTracer" elif testenv.SYS_MON: core = "SysMonitor" expected_warnings = [ fr"Plugin file tracers \(tests.plugin1.Plugin\) aren't supported with {core}", ] with self.assert_warnings(cov, expected_warnings): self.start_import_stop(cov, "simple") @pytest.mark.skipif(not testenv.PLUGINS, reason="Plugins are not supported with this core.") class FileTracerTest(CoverageTest): """Tests of plugins that implement file_tracer.""" class GoodFileTracerTest(FileTracerTest): """Tests of file tracer plugin happy paths.""" def test_plugin1(self) -> None: self.make_file("simple.py", """\ import try_xyz a = 1 b = 2 """) self.make_file("try_xyz.py", """\ c = 3 d = 4 """) cov = coverage.Coverage() CheckUniqueFilenames.hook(cov, '_should_trace') CheckUniqueFilenames.hook(cov, '_check_include_omit_etc') cov.set_option("run:plugins", ["tests.plugin1"]) # Import the Python file, executing it. self.start_import_stop(cov, "simple") _, statements, missing, _ = cov.analysis("simple.py") assert statements == [1, 2, 3] assert missing == [] zzfile = os.path.abspath(os.path.join("/src", "try_ABC.zz")) _, statements, _, _ = cov.analysis(zzfile) assert statements == [105, 106, 107, 205, 206, 207] def make_render_and_caller(self) -> None: """Make the render.py and caller.py files we need.""" # plugin2 emulates a dynamic tracing plugin: the caller's locals # are examined to determine the source file and line number. # The plugin is in tests/plugin2.py. self.make_file("render.py", """\ def render(filename, linenum): # This function emulates a template renderer. The plugin # will examine the `filename` and `linenum` locals to # determine the source file and line number. fiddle_around = 1 # not used, just chaff. return "[{} @ {}]".format(filename, linenum) def helper(x): # This function is here just to show that not all code in # this file will be part of the dynamic tracing. return x+1 """) self.make_file("caller.py", """\ import sys from render import helper, render assert render("foo_7.html", 4) == "[foo_7.html @ 4]" # Render foo_7.html again to try the CheckUniqueFilenames asserts. render("foo_7.html", 4) assert helper(42) == 43 assert render("bar_4.html", 2) == "[bar_4.html @ 2]" assert helper(76) == 77 # quux_5.html will be omitted from the results. assert render("quux_5.html", 3) == "[quux_5.html @ 3]" """) # will try to read the actual source files, so make some # source files. def lines(n: int) -> str: """Make a string with n lines of text.""" return "".join("line %d\n" % i for i in range(n)) self.make_file("bar_4.html", lines(4)) self.make_file("foo_7.html", lines(7)) def test_plugin2(self) -> None: self.make_render_and_caller() cov = coverage.Coverage(omit=["*quux*"]) CheckUniqueFilenames.hook(cov, '_should_trace') CheckUniqueFilenames.hook(cov, '_check_include_omit_etc') cov.set_option("run:plugins", ["tests.plugin2"]) self.start_import_stop(cov, "caller") # The way plugin2 works, a file named foo_7.html will be claimed to # have 7 lines in it. If render() was called with line number 4, # then the plugin will claim that lines 4 and 5 were executed. _, statements, missing, _ = cov.analysis("foo_7.html") assert statements == [1, 2, 3, 4, 5, 6, 7] assert missing == [1, 2, 3, 6, 7] assert "foo_7.html" in line_counts(cov.get_data()) _, statements, missing, _ = cov.analysis("bar_4.html") assert statements == [1, 2, 3, 4] assert missing == [1, 4] assert "bar_4.html" in line_counts(cov.get_data()) assert "quux_5.html" not in line_counts(cov.get_data()) def test_plugin2_with_branch(self) -> None: self.make_render_and_caller() cov = coverage.Coverage(branch=True, omit=["*quux*"]) CheckUniqueFilenames.hook(cov, '_should_trace') CheckUniqueFilenames.hook(cov, '_check_include_omit_etc') cov.set_option("run:plugins", ["tests.plugin2"]) self.start_import_stop(cov, "caller") # The way plugin2 works, a file named foo_7.html will be claimed to # have 7 lines in it. If render() was called with line number 4, # then the plugin will claim that lines 4 and 5 were executed. analysis = cov._analyze("foo_7.html") assert analysis.statements == {1, 2, 3, 4, 5, 6, 7} # Plugins don't do branch coverage yet. assert analysis.has_arcs() is True assert analysis.arc_possibilities() == [] assert analysis.missing == {1, 2, 3, 6, 7} def test_plugin2_with_text_report(self) -> None: self.make_render_and_caller() cov = coverage.Coverage(branch=True, omit=["*quux*"]) cov.set_option("run:plugins", ["tests.plugin2"]) self.start_import_stop(cov, "caller") repout = io.StringIO() total = cov.report(file=repout, include=["*.html"], omit=["uni*.html"], show_missing=True) report = repout.getvalue().splitlines() expected = [ 'Name Stmts Miss Branch BrPart Cover Missing', '--------------------------------------------------------', 'bar_4.html 4 2 0 0 50% 1, 4', 'foo_7.html 7 5 0 0 29% 1-3, 6-7', '--------------------------------------------------------', 'TOTAL 11 7 0 0 36%', ] assert expected == report assert math.isclose(total, 4 / 11 * 100) def test_plugin2_with_html_report(self) -> None: self.make_render_and_caller() cov = coverage.Coverage(branch=True, omit=["*quux*"]) cov.set_option("run:plugins", ["tests.plugin2"]) self.start_import_stop(cov, "caller") total = cov.html_report(include=["*.html"], omit=["uni*.html"]) assert math.isclose(total, 4 / 11 * 100) self.assert_exists("htmlcov/index.html") self.assert_exists("htmlcov/bar_4_html.html") self.assert_exists("htmlcov/foo_7_html.html") def test_plugin2_with_xml_report(self) -> None: self.make_render_and_caller() cov = coverage.Coverage(branch=True, omit=["*quux*"]) cov.set_option("run:plugins", ["tests.plugin2"]) self.start_import_stop(cov, "caller") total = cov.xml_report(include=["*.html"], omit=["uni*.html"]) assert math.isclose(total, 4 / 11 * 100) dom = ElementTree.parse("coverage.xml") classes = {} for elt in dom.findall(".//class"): classes[elt.get('name')] = elt assert classes['bar_4.html'].attrib == { 'branch-rate': '1', 'complexity': '0', 'filename': 'bar_4.html', 'line-rate': '0.5', 'name': 'bar_4.html', } assert classes['foo_7.html'].attrib == { 'branch-rate': '1', 'complexity': '0', 'filename': 'foo_7.html', 'line-rate': '0.2857', 'name': 'foo_7.html', } def test_defer_to_python(self) -> None: # A plugin that measures, but then wants built-in python reporting. self.make_file("fairly_odd_plugin.py", """\ # A plugin that claims all the odd lines are executed, and none of # the even lines, and then punts reporting off to the built-in # Python reporting. import coverage.plugin class Plugin(coverage.CoveragePlugin): def file_tracer(self, filename): return OddTracer(filename) def file_reporter(self, filename): return "python" class OddTracer(coverage.plugin.FileTracer): def __init__(self, filename): self.filename = filename def source_filename(self): return self.filename def line_number_range(self, frame): lineno = frame.f_lineno if lineno % 2: return (lineno, lineno) else: return (-1, -1) def coverage_init(reg, options): reg.add_file_tracer(Plugin()) """) self.make_file("unsuspecting.py", """\ a = 1 b = 2 c = 3 d = 4 e = 5 f = 6 """) cov = coverage.Coverage(include=["unsuspecting.py"]) cov.set_option("run:plugins", ["fairly_odd_plugin"]) self.start_import_stop(cov, "unsuspecting") repout = io.StringIO() total = cov.report(file=repout, show_missing=True) report = repout.getvalue().splitlines() expected = [ 'Name Stmts Miss Cover Missing', '-----------------------------------------------', 'unsuspecting.py 6 3 50% 2, 4, 6', '-----------------------------------------------', 'TOTAL 6 3 50%', ] assert expected == report assert total == 50 def test_find_unexecuted(self) -> None: self.make_file("unexecuted_plugin.py", """\ import os import coverage.plugin class Plugin(coverage.CoveragePlugin): def file_tracer(self, filename): if filename.endswith("foo.py"): return MyTracer(filename) def file_reporter(self, filename): return MyReporter(filename) def find_executable_files(self, src_dir): # Check that src_dir is the right value files = os.listdir(src_dir) assert "foo.py" in files assert "unexecuted_plugin.py" in files return ["chimera.py"] class MyTracer(coverage.plugin.FileTracer): def __init__(self, filename): self.filename = filename def source_filename(self): return self.filename def line_number_range(self, frame): return (999, 999) class MyReporter(coverage.FileReporter): def lines(self): return {99, 999, 9999} def coverage_init(reg, options): reg.add_file_tracer(Plugin()) """) self.make_file("foo.py", "a = 1") cov = coverage.Coverage(source=['.']) cov.set_option("run:plugins", ["unexecuted_plugin"]) self.start_import_stop(cov, "foo") # The file we executed claims to have run line 999. _, statements, missing, _ = cov.analysis("foo.py") assert statements == [99, 999, 9999] assert missing == [99, 9999] # The completely missing file is in the results. _, statements, missing, _ = cov.analysis("chimera.py") assert statements == [99, 999, 9999] assert missing == [99, 999, 9999] # But completely new filenames are not in the results. assert len(cov.get_data().measured_files()) == 3 with pytest.raises(NoSource): cov.analysis("fictional.py") class BadFileTracerTest(FileTracerTest): """Test error handling around file tracer plugins.""" def run_plugin(self, module_name: str) -> Coverage: """Run a plugin with the given module_name. Uses a few fixed Python files. Returns the Coverage object. """ self.make_file("simple.py", """\ import other, another a = other.f(2) b = other.f(3) c = another.g(4) d = another.g(5) """) # The names of these files are important: some plugins apply themselves # to "*other.py". self.make_file("other.py", """\ def f(x): return x+1 """) self.make_file("another.py", """\ def g(x): return x-1 """) cov = coverage.Coverage() cov.set_option("run:plugins", [module_name]) self.start_import_stop(cov, "simple") cov.save() # pytest-cov does a save after stop, so we'll do it too. return cov def run_bad_plugin( self, module_name: str, plugin_name: str, our_error: bool = True, excmsg: str | None = None, excmsgs: list[str] | None = None, ) -> None: """Run a file, and see that the plugin failed. `module_name` and `plugin_name` is the module and name of the plugin to use. `our_error` is True if the error reported to the user will be an explicit error in our test code, marked with an '# Oh noes!' comment. `excmsg`, if provided, is text that must appear in the stderr. `excmsgs`, if provided, is a list of messages, one of which must appear in the stderr. The plugin will be disabled, and we check that a warning is output explaining why. """ with pytest.warns(Warning) as warns: self.run_plugin(module_name) stderr = self.stderr() stderr += "".join(str(w.message) for w in warns) if our_error: # The exception we're causing should only appear once. assert stderr.count("# Oh noes!") == 1 # There should be a warning explaining what's happening, but only one. # The message can be in two forms: # Disabling plug-in '...' due to previous exception # or: # Disabling plug-in '...' due to an exception: print([str(w) for w in warns.list]) warnings = [w for w in warns.list if issubclass(w.category, CoverageWarning)] assert len(warnings) == 1 warnmsg = str(warnings[0].message) assert f"Disabling plug-in '{module_name}.{plugin_name}' due to " in warnmsg if excmsg: assert excmsg in stderr if excmsgs: found_exc = any(em in stderr for em in excmsgs) # pragma: part covered assert found_exc, f"expected one of {excmsgs} in stderr" def test_file_tracer_has_no_file_tracer_method(self) -> None: self.make_file("bad_plugin.py", """\ class Plugin(object): pass def coverage_init(reg, options): reg.add_file_tracer(Plugin()) """) self.run_bad_plugin("bad_plugin", "Plugin", our_error=False) def test_file_tracer_has_inherited_sourcefilename_method(self) -> None: self.make_file("bad_plugin.py", """\ import coverage class Plugin(coverage.CoveragePlugin): def file_tracer(self, filename): # Just grab everything. return FileTracer() class FileTracer(coverage.FileTracer): pass def coverage_init(reg, options): reg.add_file_tracer(Plugin()) """) self.run_bad_plugin( "bad_plugin", "Plugin", our_error=False, excmsg="Class 'bad_plugin.FileTracer' needs to implement source_filename()", ) def test_plugin_has_inherited_filereporter_method(self) -> None: self.make_file("bad_plugin.py", """\ import coverage class Plugin(coverage.CoveragePlugin): def file_tracer(self, filename): # Just grab everything. return FileTracer() class FileTracer(coverage.FileTracer): def source_filename(self): return "foo.xxx" def coverage_init(reg, options): reg.add_file_tracer(Plugin()) """) cov = self.run_plugin("bad_plugin") expected_msg = "Plugin 'bad_plugin.Plugin' needs to implement file_reporter()" with pytest.raises(NotImplementedError, match=expected_msg): cov.report() def test_file_tracer_fails(self) -> None: self.make_file("bad_plugin.py", """\ import coverage.plugin class Plugin(coverage.plugin.CoveragePlugin): def file_tracer(self, filename): 17/0 # Oh noes! def coverage_init(reg, options): reg.add_file_tracer(Plugin()) """) self.run_bad_plugin("bad_plugin", "Plugin") def test_file_tracer_fails_eventually(self) -> None: # Django coverage plugin can report on a few files and then fail. # https://github.com/nedbat/coveragepy/issues/1011 self.make_file("bad_plugin.py", """\ import os.path import coverage.plugin class Plugin(coverage.plugin.CoveragePlugin): def __init__(self): self.calls = 0 def file_tracer(self, filename): print(filename) self.calls += 1 if self.calls <= 2: return FileTracer(filename) else: 17/0 # Oh noes! class FileTracer(coverage.FileTracer): def __init__(self, filename): self.filename = filename def source_filename(self): return os.path.basename(self.filename).replace(".py", ".foo") def line_number_range(self, frame): return -1, -1 def coverage_init(reg, options): reg.add_file_tracer(Plugin()) """) self.run_bad_plugin("bad_plugin", "Plugin") def test_file_tracer_returns_wrong(self) -> None: self.make_file("bad_plugin.py", """\ import coverage.plugin class Plugin(coverage.plugin.CoveragePlugin): def file_tracer(self, filename): return 3.14159 def coverage_init(reg, options): reg.add_file_tracer(Plugin()) """) self.run_bad_plugin( "bad_plugin", "Plugin", our_error=False, excmsg="'float' object has no attribute", ) def test_has_dynamic_source_filename_fails(self) -> None: self.make_file("bad_plugin.py", """\ import coverage.plugin class Plugin(coverage.plugin.CoveragePlugin): def file_tracer(self, filename): return BadFileTracer() class BadFileTracer(coverage.plugin.FileTracer): def has_dynamic_source_filename(self): 23/0 # Oh noes! def coverage_init(reg, options): reg.add_file_tracer(Plugin()) """) self.run_bad_plugin("bad_plugin", "Plugin") def test_source_filename_fails(self) -> None: self.make_file("bad_plugin.py", """\ import coverage.plugin class Plugin(coverage.plugin.CoveragePlugin): def file_tracer(self, filename): return BadFileTracer() class BadFileTracer(coverage.plugin.FileTracer): def source_filename(self): 42/0 # Oh noes! def coverage_init(reg, options): reg.add_file_tracer(Plugin()) """) self.run_bad_plugin("bad_plugin", "Plugin") def test_source_filename_returns_wrong(self) -> None: self.make_file("bad_plugin.py", """\ import coverage.plugin class Plugin(coverage.plugin.CoveragePlugin): def file_tracer(self, filename): return BadFileTracer() class BadFileTracer(coverage.plugin.FileTracer): def source_filename(self): return 17.3 def coverage_init(reg, options): reg.add_file_tracer(Plugin()) """) self.run_bad_plugin( "bad_plugin", "Plugin", our_error=False, excmsgs=[ "expected str, bytes or os.PathLike object, not float", "'float' object has no attribute", "object of type 'float' has no len()", "'float' object is unsubscriptable", ], ) def test_dynamic_source_filename_fails(self) -> None: self.make_file("bad_plugin.py", """\ import coverage.plugin class Plugin(coverage.plugin.CoveragePlugin): def file_tracer(self, filename): if filename.endswith("other.py"): return BadFileTracer() class BadFileTracer(coverage.plugin.FileTracer): def has_dynamic_source_filename(self): return True def dynamic_source_filename(self, filename, frame): 101/0 # Oh noes! def coverage_init(reg, options): reg.add_file_tracer(Plugin()) """) self.run_bad_plugin("bad_plugin", "Plugin") def test_line_number_range_raises_error(self) -> None: self.make_file("bad_plugin.py", """\ import coverage.plugin class Plugin(coverage.plugin.CoveragePlugin): def file_tracer(self, filename): if filename.endswith("other.py"): return BadFileTracer() class BadFileTracer(coverage.plugin.FileTracer): def source_filename(self): return "something.foo" def line_number_range(self, frame): raise Exception("borked!") def coverage_init(reg, options): reg.add_file_tracer(Plugin()) """) self.run_bad_plugin( "bad_plugin", "Plugin", our_error=False, excmsg="borked!", ) def test_line_number_range_returns_non_tuple(self) -> None: self.make_file("bad_plugin.py", """\ import coverage.plugin class Plugin(coverage.plugin.CoveragePlugin): def file_tracer(self, filename): if filename.endswith("other.py"): return BadFileTracer() class BadFileTracer(coverage.plugin.FileTracer): def source_filename(self): return "something.foo" def line_number_range(self, frame): return 42.23 def coverage_init(reg, options): reg.add_file_tracer(Plugin()) """) self.run_bad_plugin( "bad_plugin", "Plugin", our_error=False, excmsg="line_number_range must return 2-tuple", ) def test_line_number_range_returns_triple(self) -> None: self.make_file("bad_plugin.py", """\ import coverage.plugin class Plugin(coverage.plugin.CoveragePlugin): def file_tracer(self, filename): if filename.endswith("other.py"): return BadFileTracer() class BadFileTracer(coverage.plugin.FileTracer): def source_filename(self): return "something.foo" def line_number_range(self, frame): return (1, 2, 3) def coverage_init(reg, options): reg.add_file_tracer(Plugin()) """) self.run_bad_plugin( "bad_plugin", "Plugin", our_error=False, excmsg="line_number_range must return 2-tuple", ) def test_line_number_range_returns_pair_of_strings(self) -> None: self.make_file("bad_plugin.py", """\ import coverage.plugin class Plugin(coverage.plugin.CoveragePlugin): def file_tracer(self, filename): if filename.endswith("other.py"): return BadFileTracer() class BadFileTracer(coverage.plugin.FileTracer): def source_filename(self): return "something.foo" def line_number_range(self, frame): return ("5", "7") def coverage_init(reg, options): reg.add_file_tracer(Plugin()) """) self.run_bad_plugin( "bad_plugin", "Plugin", our_error=False, excmsgs=[ "an integer is required", "cannot be interpreted as an integer", ], ) class ConfigurerPluginTest(CoverageTest): """Test configuring plugins.""" run_in_temp_dir = False def test_configurer_plugin(self) -> None: cov = coverage.Coverage() cov.set_option("run:plugins", ["tests.plugin_config"]) cov.start() cov.stop() # pragma: nested excluded = cov.get_option("report:exclude_lines") assert isinstance(excluded, list) assert "pragma: custom" in excluded assert "pragma: or whatever" in excluded @pytest.mark.skipif(not testenv.DYN_CONTEXTS, reason="No dynamic contexts with this core") class DynamicContextPluginTest(CoverageTest): """Tests of plugins that implement `dynamic_context`.""" def make_plugin_capitalized_testnames(self, filename: str) -> None: """Create a dynamic context plugin that capitalizes the part after 'test_'.""" self.make_file(filename, """\ from coverage import CoveragePlugin class Plugin(CoveragePlugin): def dynamic_context(self, frame): name = frame.f_code.co_name if name.startswith(("test_", "doctest_")): parts = name.split("_", 1) return "%s:%s" % (parts[0], parts[1].upper()) return None def coverage_init(reg, options): reg.add_dynamic_context(Plugin()) """) def make_plugin_track_render(self, filename: str) -> None: """Make a dynamic context plugin that tracks 'render_' functions.""" self.make_file(filename, """\ from coverage import CoveragePlugin class Plugin(CoveragePlugin): def dynamic_context(self, frame): name = frame.f_code.co_name if name.startswith("render_"): return 'renderer:' + name[7:] return None def coverage_init(reg, options): reg.add_dynamic_context(Plugin()) """) def make_test_files(self) -> None: """Make some files to use while testing dynamic context plugins.""" self.make_file("rendering.py", """\ def html_tag(tag, content): return f'<{tag}>{content}' def render_paragraph(text): return html_tag('p', text) def render_span(text): return html_tag('span', text) def render_bold(text): return html_tag('b', text) """) self.make_file("testsuite.py", """\ import rendering def test_html_tag() -> None: assert rendering.html_tag('b', 'hello') == 'hello' def doctest_html_tag(): assert eval(''' rendering.html_tag('i', 'text') == 'text' '''.strip()) def test_renderers() -> None: assert rendering.render_paragraph('hello') == '

    hello

    ' assert rendering.render_bold('wide') == 'wide' assert rendering.render_span('world') == 'world' def build_full_html(): html = '%s' % ( rendering.render_paragraph( rendering.render_span('hello'))) return html """) def run_all_functions(self, cov: Coverage, suite_name: str) -> None: # pragma: nested """Run all functions in `suite_name` under coverage.""" cov.start() suite = import_local_file(suite_name) try: # Call all functions in this module for name in dir(suite): variable = getattr(suite, name) if inspect.isfunction(variable): variable() finally: cov.stop() def test_plugin_standalone(self) -> None: self.make_plugin_capitalized_testnames('plugin_tests.py') self.make_test_files() # Enable dynamic context plugin cov = coverage.Coverage() cov.set_option("run:plugins", ['plugin_tests']) # Run the tests self.run_all_functions(cov, 'testsuite') # Labeled coverage is collected data = cov.get_data() filenames = self.get_measured_filenames(data) expected = ['', 'doctest:HTML_TAG', 'test:HTML_TAG', 'test:RENDERERS'] assert expected == sorted(data.measured_contexts()) data.set_query_context("doctest:HTML_TAG") assert [2] == sorted_lines(data, filenames['rendering.py']) data.set_query_context("test:HTML_TAG") assert [2] == sorted_lines(data, filenames['rendering.py']) data.set_query_context("test:RENDERERS") assert [2, 5, 8, 11] == sorted_lines(data, filenames['rendering.py']) def test_static_context(self) -> None: self.make_plugin_capitalized_testnames('plugin_tests.py') self.make_test_files() # Enable dynamic context plugin for coverage with named context cov = coverage.Coverage(context='mytests') cov.set_option("run:plugins", ['plugin_tests']) # Run the tests self.run_all_functions(cov, 'testsuite') # Static context prefix is preserved data = cov.get_data() expected = [ 'mytests', 'mytests|doctest:HTML_TAG', 'mytests|test:HTML_TAG', 'mytests|test:RENDERERS', ] assert expected == sorted(data.measured_contexts()) def test_plugin_with_test_function(self) -> None: self.make_plugin_capitalized_testnames('plugin_tests.py') self.make_test_files() # Enable both a plugin and test_function dynamic context cov = coverage.Coverage() cov.set_option("run:plugins", ['plugin_tests']) cov.set_option("run:dynamic_context", "test_function") # Run the tests self.run_all_functions(cov, 'testsuite') # test_function takes precedence over plugins - only # functions that are not labeled by test_function are # labeled by plugin_tests. data = cov.get_data() filenames = self.get_measured_filenames(data) expected = [ '', 'doctest:HTML_TAG', 'testsuite.test_html_tag', 'testsuite.test_renderers', ] assert expected == sorted(data.measured_contexts()) def assert_context_lines(context: str, lines: list[TLineNo]) -> None: data.set_query_context(context) assert lines == sorted_lines(data, filenames['rendering.py']) assert_context_lines("doctest:HTML_TAG", [2]) assert_context_lines("testsuite.test_html_tag", [2]) assert_context_lines("testsuite.test_renderers", [2, 5, 8, 11]) def test_multiple_plugins(self) -> None: self.make_plugin_capitalized_testnames('plugin_tests.py') self.make_plugin_track_render('plugin_renderers.py') self.make_test_files() # Enable two plugins cov = coverage.Coverage() cov.set_option("run:plugins", ['plugin_renderers', 'plugin_tests']) self.run_all_functions(cov, 'testsuite') # It is important to note, that line 11 (render_bold function) is never # labeled as renderer:bold context, because it is only called from # test_renderers function - so it already falls under test:RENDERERS # context. # # render_paragraph and render_span (lines 5, 8) are directly called by # testsuite.build_full_html, so they get labeled by renderers plugin. data = cov.get_data() filenames = self.get_measured_filenames(data) expected = [ '', 'doctest:HTML_TAG', 'renderer:paragraph', 'renderer:span', 'test:HTML_TAG', 'test:RENDERERS', ] assert expected == sorted(data.measured_contexts()) def assert_context_lines(context: str, lines: list[TLineNo]) -> None: data.set_query_context(context) assert lines == sorted_lines(data, filenames['rendering.py']) assert_context_lines("test:HTML_TAG", [2]) assert_context_lines("test:RENDERERS", [2, 5, 8, 11]) assert_context_lines("doctest:HTML_TAG", [2]) assert_context_lines("renderer:paragraph", [2, 5]) assert_context_lines("renderer:span", [2, 8]) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/test_process.py0000644000175100001770000014262500000000000020372 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt """Tests for process behavior of coverage.py.""" from __future__ import annotations import csv import glob import os import os.path import platform import re import stat import sys import textwrap from pathlib import Path from typing import Any import pytest import coverage from coverage import env from coverage.data import line_counts from coverage.files import abs_file, python_reported_file from tests import testenv from tests.coveragetest import CoverageTest, TESTS_DIR from tests.helpers import re_line, re_lines, re_lines_text class ProcessTest(CoverageTest): """Tests of the per-process behavior of coverage.py.""" def test_save_on_exit(self) -> None: self.make_file("mycode.py", """\ h = "Hello" w = "world" """) self.assert_doesnt_exist(".coverage") self.run_command("coverage run mycode.py") self.assert_exists(".coverage") def test_tests_dir_is_importable(self) -> None: # Checks that we can import modules from the tests directory at all! self.make_file("mycode.py", """\ import covmod1 import covmodzip1 a = 1 print('done') """) self.assert_doesnt_exist(".coverage") self.add_test_modules_to_pythonpath() out = self.run_command("coverage run mycode.py") self.assert_exists(".coverage") assert out == 'done\n' def test_coverage_run_envvar_is_in_coveragerun(self) -> None: # Test that we are setting COVERAGE_RUN when we run. self.make_file("envornot.py", """\ import os print(os.getenv("COVERAGE_RUN", "nope")) """) self.del_environ("COVERAGE_RUN") # Regular Python doesn't have the environment variable. out = self.run_command("python envornot.py") assert out == "nope\n" self.del_environ("COVERAGE_RUN") # But `coverage run` does have it. out = self.run_command("coverage run envornot.py") assert out == "true\n" def make_b_or_c_py(self) -> None: """Create b_or_c.py, used in a few of these tests.""" # "b_or_c.py b" will run 6 lines. # "b_or_c.py c" will run 7 lines. # Together, they run 8 lines. self.make_file("b_or_c.py", """\ import sys a = 2 if sys.argv[1] == 'b': b = 4 else: c = 6 c2 = 7 d = 8 print('done') """) def test_append_data(self) -> None: self.make_b_or_c_py() out = self.run_command("coverage run b_or_c.py b") assert out == 'done\n' self.assert_exists(".coverage") self.assert_file_count(".coverage.*", 0) out = self.run_command("coverage run --append b_or_c.py c") assert out == 'done\n' self.assert_exists(".coverage") self.assert_file_count(".coverage.*", 0) # Read the coverage file and see that b_or_c.py has all 8 lines # executed. data = coverage.CoverageData() data.read() assert line_counts(data)['b_or_c.py'] == 8 def test_append_data_with_different_file(self) -> None: self.make_b_or_c_py() self.make_file(".coveragerc", """\ [run] data_file = .mycovdata """) out = self.run_command("coverage run b_or_c.py b") assert out == 'done\n' self.assert_doesnt_exist(".coverage") self.assert_exists(".mycovdata") out = self.run_command("coverage run --append b_or_c.py c") assert out == 'done\n' self.assert_doesnt_exist(".coverage") self.assert_exists(".mycovdata") # Read the coverage file and see that b_or_c.py has all 8 lines # executed. data = coverage.CoverageData(".mycovdata") data.read() assert line_counts(data)['b_or_c.py'] == 8 def test_append_can_create_a_data_file(self) -> None: self.make_b_or_c_py() out = self.run_command("coverage run --append b_or_c.py b") assert out == 'done\n' self.assert_exists(".coverage") self.assert_file_count(".coverage.*", 0) # Read the coverage file and see that b_or_c.py has only 6 lines # executed. data = coverage.CoverageData() data.read() assert line_counts(data)['b_or_c.py'] == 6 def test_combine_with_rc(self) -> None: self.make_b_or_c_py() self.make_file(".coveragerc", """\ [run] source = . parallel = true """) out = self.run_command("coverage run b_or_c.py b") assert out == 'done\n' self.assert_doesnt_exist(".coverage") out = self.run_command("coverage run b_or_c.py c") assert out == 'done\n' self.assert_doesnt_exist(".coverage") # After two runs, there should be two .coverage.machine.123 files. self.assert_file_count(".coverage.*", 2) # Combine the parallel coverage data files into .coverage . self.run_command("coverage combine") self.assert_exists(".coverage") self.assert_exists(".coveragerc") # After combining, there should be only the .coverage file. self.assert_file_count(".coverage.*", 0) # Read the coverage file and see that b_or_c.py has all 8 lines # executed. data = coverage.CoverageData() data.read() assert line_counts(data)['b_or_c.py'] == 8 # Reporting should still work even with the .rc file out = self.run_command("coverage report") assert out == textwrap.dedent("""\ Name Stmts Miss Cover ------------------------------- b_or_c.py 8 0 100% ------------------------------- TOTAL 8 0 100% """) def test_combine_with_aliases(self) -> None: self.make_file("d1/x.py", """\ a = 1 b = 2 print(f"{a} {b}") """) self.make_file("d2/x.py", """\ # 1 # 2 # 3 c = 4 d = 5 print(f"{c} {d}") """) self.make_file(".coveragerc", """\ [run] source = . parallel = True [paths] source = src */d1 */d2 """) out = self.run_command("coverage run " + os.path.normpath("d1/x.py")) assert out == '1 2\n' out = self.run_command("coverage run " + os.path.normpath("d2/x.py")) assert out == '4 5\n' self.assert_file_count(".coverage.*", 2) self.make_file("src/x.py", "") self.run_command("coverage combine") self.assert_exists(".coverage") # After combining, there should be only the .coverage file. self.assert_file_count(".coverage.*", 0) # Read the coverage data file and see that the two different x.py # files have been combined together. data = coverage.CoverageData() data.read() summary = line_counts(data, fullpath=True) assert len(summary) == 1 actual = abs_file(list(summary.keys())[0]) expected = abs_file('src/x.py') assert expected == actual assert list(summary.values())[0] == 6 def test_erase_parallel(self) -> None: self.make_file(".coveragerc", """\ [run] data_file = data.dat parallel = True """) self.make_file("data.dat") self.make_file("data.dat.fooey") self.make_file("data.dat.gooey") self.make_file(".coverage") self.run_command("coverage erase") self.assert_doesnt_exist("data.dat") self.assert_doesnt_exist("data.dat.fooey") self.assert_doesnt_exist("data.dat.gooey") self.assert_exists(".coverage") def test_missing_source_file(self) -> None: # Check what happens if the source is missing when reporting happens. self.make_file("fleeting.py", """\ s = 'goodbye, cruel world!' """) self.run_command("coverage run fleeting.py") os.remove("fleeting.py") out = self.run_command("coverage html -d htmlcov") assert re.search("No source for code: '.*fleeting.py'", out) assert "Traceback" not in out # It happens that the code paths are different for *.py and other # files, so try again with no extension. self.make_file("fleeting", """\ s = 'goodbye, cruel world!' """) self.run_command("coverage run fleeting") os.remove("fleeting") status, out = self.run_command_status("coverage html -d htmlcov") assert re.search("No source for code: '.*fleeting'", out) assert "Traceback" not in out assert status == 1 def test_running_missing_file(self) -> None: status, out = self.run_command_status("coverage run xyzzy.py") assert re.search("No file to run: .*xyzzy.py", out) assert "raceback" not in out assert "rror" not in out assert status == 1 def test_code_throws(self) -> None: self.make_file("throw.py", """\ class MyException(Exception): pass def f1(): raise MyException("hey!") def f2(): f1() f2() """) # The important thing is for "coverage run" and "python" to report the # same traceback. status, out = self.run_command_status("coverage run throw.py") out2 = self.run_command("python throw.py") if env.PYPY: # PyPy has an extra frame in the traceback for some reason out2 = re_lines_text("toplevel", out2, match=False) assert out == out2 # But also make sure that the output is what we expect. path = python_reported_file('throw.py') msg = f'File "{re.escape(path)}", line 8, in f2' assert re.search(msg, out) assert 'raise MyException("hey!")' in out assert status == 1 def test_code_exits(self) -> None: self.make_file("exit.py", """\ import sys def f1(): print("about to exit..") sys.exit(17) def f2(): f1() f2() """) # The important thing is for "coverage run" and "python" to have the # same output. No traceback. status, out = self.run_command_status("coverage run exit.py") status2, out2 = self.run_command_status("python exit.py") assert out == out2 assert out == "about to exit..\n" assert status == status2 assert status == 17 def test_code_exits_no_arg(self) -> None: self.make_file("exit_none.py", """\ import sys def f1(): print("about to exit quietly..") sys.exit() f1() """) status, out = self.run_command_status("coverage run exit_none.py") status2, out2 = self.run_command_status("python exit_none.py") assert out == out2 assert out == "about to exit quietly..\n" assert status == status2 assert status == 0 @pytest.mark.skipif(not hasattr(os, "fork"), reason="Can't test os.fork, it doesn't exist.") def test_fork(self) -> None: self.make_file("fork.py", """\ import os print(f"parent,{os.getpid()}", flush=True) ret = os.fork() if ret == 0: print(f"child,{os.getpid()}", flush=True) else: os.waitpid(ret, 0) """) total_lines = 6 self.set_environ("COVERAGE_DEBUG_FILE", "debug.out") out = self.run_command("coverage run --debug=pid,process,trace -p fork.py") pids = {key:int(pid) for key, pid in csv.reader(out.splitlines())} assert set(pids) == {"parent", "child"} self.assert_doesnt_exist(".coverage") # After running the forking program, there should be two # .coverage.machine.pid.randomword files. The pids should match our # processes, and the files should have different random words at the # end of the file name. self.assert_file_count(".coverage.*", 2) data_files = glob.glob(".coverage.*") filepids = {int(name.split(".")[-2]) for name in data_files} assert filepids == set(pids.values()) suffixes = {name.split(".")[-1] for name in data_files} assert len(suffixes) == 2, f"Same random suffix: {data_files}" # Each data file should have a subset of the lines. for data_file in data_files: data = coverage.CoverageData(data_file) data.read() assert line_counts(data)["fork.py"] < total_lines # Combine the parallel coverage data files into a .coverage file. # After combining, there should be only the .coverage file. self.run_command("coverage combine") self.assert_exists(".coverage") self.assert_file_count(".coverage.*", 0) data = coverage.CoverageData() data.read() assert line_counts(data)["fork.py"] == total_lines debug_text = Path("debug.out").read_text() ppid = pids["parent"] cpid = pids["child"] assert ppid != cpid plines = re_lines(fr"{ppid}\.[0-9a-f]+: New process: pid={ppid}, executable", debug_text) assert len(plines) == 1 clines = re_lines(fr"{cpid}\.[0-9a-f]+: New process: forked {ppid} -> {cpid}", debug_text) assert len(clines) == 1 reported_pids = {line.split(".")[0] for line in debug_text.splitlines()} assert len(reported_pids) == 2 def test_warnings_during_reporting(self) -> None: # While fixing issue #224, the warnings were being printed far too # often. Make sure they're not any more. self.make_file("hello.py", """\ import sys, os, the_other print("Hello") """) self.make_file("the_other.py", """\ print("What?") """) self.make_file(".coveragerc", """\ [run] source = . xyzzy """) self.run_command("coverage run hello.py") out = self.run_command("coverage html") assert out.count("Module xyzzy was never imported.") == 0 def test_warns_if_never_run(self) -> None: # Note: the name of the function can't have "warning" in it, or the # absolute path of the file will have "warning" in it, and an assertion # will fail. out = self.run_command("coverage run i_dont_exist.py") path = python_reported_file('i_dont_exist.py') assert f"No file to run: '{path}'" in out assert "warning" not in out assert "Exception" not in out out = self.run_command("coverage run -m no_such_module") assert ( ("No module named no_such_module" in out) or ("No module named 'no_such_module'" in out) ) assert "warning" not in out assert "Exception" not in out @pytest.mark.skipif(env.METACOV, reason="Can't test tracers changing during metacoverage") def test_warnings_trace_function_changed_with_threads(self) -> None: # https://github.com/nedbat/coveragepy/issues/164 self.make_file("bug164.py", """\ import threading import time class MyThread (threading.Thread): def run(self): print("Hello") thr = MyThread() thr.start() thr.join() """) out = self.run_command("coverage run --timid bug164.py") assert "Hello\n" in out assert "warning" not in out @pytest.mark.skipif(env.METACOV, reason="Can't test tracers changing during metacoverage") def test_warning_trace_function_changed(self) -> None: self.make_file("settrace.py", """\ import sys print("Hello") sys.settrace(None) print("Goodbye") """) out = self.run_command("coverage run --timid settrace.py") assert "Hello\n" in out assert "Goodbye\n" in out assert "Trace function changed" in out # When meta-coverage testing, this test doesn't work, because it finds # coverage.py's own trace function. @pytest.mark.skipif(env.METACOV, reason="Can't test timid during coverage measurement.") def test_timid(self) -> None: # Test that the --timid command line argument properly swaps the tracer # function for a simpler one. # # This is complicated by the fact that the tests are run twice for each # version: once with a compiled C-based trace function, and once without # it, to also test the Python trace function. So this test has to examine # an environment variable set in igor.py to know whether to expect to see # the C trace function or not. self.make_file("showtrace.py", """\ # Show the current frame's trace function, so that we can test what the # command-line options do to the trace function used. import inspect # Show what the trace function is. If a C-based function is used, then f_trace # may be None. trace_fn = inspect.currentframe().f_trace if trace_fn is None: trace_name = "None" else: # Get the name of the tracer class. try: trace_name = trace_fn.__self__.__class__.__name__ except AttributeError: # A C-based function could also manifest as an f_trace value # which doesn't have __self__. trace_name = trace_fn.__class__.__name__ print(trace_name) """) # When running without coverage, no trace function py_out = self.run_command("python showtrace.py") assert py_out == "None\n" cov_out = self.run_command("coverage run showtrace.py") if testenv.C_TRACER: # If the C trace function is being tested, then regular running should have # the C function, which registers itself as f_trace. assert cov_out == "CTracer\n" elif testenv.SYS_MON: assert cov_out == "None\n" else: # If the Python trace function is being tested, then regular running will # also show the Python function. assert cov_out == "PyTracer\n" # When running timidly, the trace function is always Python. timid_out = self.run_command("coverage run --timid showtrace.py") assert timid_out == "PyTracer\n" def test_warn_preimported(self) -> None: self.make_file("hello.py", """\ import goodbye import coverage cov = coverage.Coverage(include=["good*"], check_preimported=True) cov.start() print(goodbye.f()) cov.stop() """) self.make_file("goodbye.py", """\ def f(): return "Goodbye!" """) goodbye_path = os.path.abspath("goodbye.py") out = self.run_command("python hello.py") assert "Goodbye!" in out msg = ( f"CoverageWarning: Already imported a file that will be measured: {goodbye_path} " + "(already-imported)" ) assert msg in out # Pypy passes locally, but fails in CI? Perhaps the version of macOS is # significant? https://foss.heptapod.net/pypy/pypy/-/issues/3074 @pytest.mark.skipif(env.PYPY, reason="PyPy is unreliable with this test") def test_lang_c(self) -> None: # LANG=C forces getfilesystemencoding on Linux to 'ascii', which causes # failures with non-ascii file names. We don't want to make a real file # with strange characters, though, because that gets the test runners # tangled up. This will isolate the concerns to the coverage.py code. # https://github.com/nedbat/coveragepy/issues/533 self.make_file("weird_file.py", r""" globs = {} code = "a = 1\nb = 2\n" exec(compile(code, "wut\xe9\xea\xeb\xec\x01\x02.py", 'exec'), globs) print(globs['a']) print(globs['b']) """) self.set_environ("LANG", "C") out = self.run_command("coverage run weird_file.py") assert out == "1\n2\n" def test_deprecation_warnings(self) -> None: # Test that coverage doesn't trigger deprecation warnings. # https://github.com/nedbat/coveragepy/issues/305 self.make_file("allok.py", """\ import warnings warnings.simplefilter('default') import coverage print("No warnings!") """) # Some of our testing infrastructure can issue warnings. # Turn it all off for the sub-process. self.del_environ("COVERAGE_TESTING") out = self.run_command("python allok.py") assert out == "No warnings!\n" def test_run_twice(self) -> None: # https://github.com/nedbat/coveragepy/issues/353 self.make_file("foo.py", """\ def foo(): pass """) self.make_file("run_twice.py", """\ import sys import coverage for i in [1, 2]: sys.stderr.write(f"Run {i}\\n") inst = coverage.Coverage(source=['foo']) inst.load() inst.start() import foo inst.stop() inst.save() """) out = self.run_command("python run_twice.py") # Remove the file location and source line from the warning. out = re.sub(r"(?m)^[\\/\w.:~_-]+:\d+: CoverageWarning: ", "f:d: CoverageWarning: ", out) out = re.sub(r"(?m)^\s+self.warn.*$\n", "", out) expected = ( "Run 1\n" + "Run 2\n" + "f:d: CoverageWarning: Module foo was previously imported, but not measured " + "(module-not-measured)\n" ) assert expected == out def test_module_name(self) -> None: # https://github.com/nedbat/coveragepy/issues/478 # Make sure help doesn't show a silly command name when run as a # module, like it used to: # $ python -m coverage # Code coverage for Python. Use '__main__.py help' for help. out = self.run_command("python -m coverage") assert "Use 'coverage help' for help" in out TRY_EXECFILE = os.path.join(os.path.dirname(__file__), "modules/process_test/try_execfile.py") class EnvironmentTest(CoverageTest): """Tests using try_execfile.py to test the execution environment.""" def assert_tryexecfile_output(self, expected: str, actual: str) -> None: """Assert that the output we got is a successful run of try_execfile.py. `expected` and `actual` must be the same. """ # First, is this even credible try_execfile.py output? assert '"DATA": "xyzzy"' in actual assert actual == expected def test_coverage_run_is_like_python(self) -> None: with open(TRY_EXECFILE) as f: self.make_file("run_me.py", f.read()) expected = self.run_command("python run_me.py") actual = self.run_command("coverage run run_me.py") self.assert_tryexecfile_output(expected, actual) def test_coverage_run_far_away_is_like_python(self) -> None: with open(TRY_EXECFILE) as f: self.make_file("sub/overthere/prog.py", f.read()) expected = self.run_command("python sub/overthere/prog.py") actual = self.run_command("coverage run sub/overthere/prog.py") self.assert_tryexecfile_output(expected, actual) @pytest.mark.skipif(not env.WINDOWS, reason="This is about Windows paths") def test_coverage_run_far_away_is_like_python_windows(self) -> None: with open(TRY_EXECFILE) as f: self.make_file("sub/overthere/prog.py", f.read()) expected = self.run_command("python sub\\overthere\\prog.py") actual = self.run_command("coverage run sub\\overthere\\prog.py") self.assert_tryexecfile_output(expected, actual) def test_coverage_run_dashm_is_like_python_dashm(self) -> None: self.add_test_modules_to_pythonpath() expected = self.run_command("python -m process_test.try_execfile") actual = self.run_command("coverage run -m process_test.try_execfile") self.assert_tryexecfile_output(expected, actual) def test_coverage_run_dir_is_like_python_dir(self) -> None: with open(TRY_EXECFILE) as f: self.make_file("with_main/__main__.py", f.read()) expected = self.run_command("python with_main") actual = self.run_command("coverage run with_main") self.assert_tryexecfile_output(expected, actual) def test_coverage_run_dashm_dir_no_init_is_like_python(self) -> None: with open(TRY_EXECFILE) as f: self.make_file("with_main/__main__.py", f.read()) expected = self.run_command("python -m with_main") actual = self.run_command("coverage run -m with_main") self.assert_tryexecfile_output(expected, actual) def test_coverage_run_dashm_dir_with_init_is_like_python(self) -> None: with open(TRY_EXECFILE) as f: self.make_file("with_main/__main__.py", f.read()) self.make_file("with_main/__init__.py", "") expected = self.run_command("python -m with_main") actual = self.run_command("coverage run -m with_main") self.assert_tryexecfile_output(expected, actual) def test_coverage_run_dashm_equal_to_doubledashsource(self) -> None: """regression test for #328 When imported by -m, a module's __name__ is __main__, but we need the --source machinery to know and respect the original name. """ self.add_test_modules_to_pythonpath() expected = self.run_command("python -m process_test.try_execfile") actual = self.run_command( "coverage run --source process_test.try_execfile -m process_test.try_execfile", ) self.assert_tryexecfile_output(expected, actual) def test_coverage_run_dashm_superset_of_doubledashsource(self) -> None: """Edge case: --source foo -m foo.bar""" # Ugh: without this config file, we'll get a warning about # CoverageWarning: Module process_test was previously imported, # but not measured (module-not-measured) # # This is because process_test/__init__.py is imported while looking # for process_test.try_execfile. That import happens while setting # sys.path before start() is called. self.make_file(".coveragerc", """\ [run] disable_warnings = module-not-measured """) self.add_test_modules_to_pythonpath() expected = self.run_command("python -m process_test.try_execfile") actual = self.run_command( "coverage run --source process_test -m process_test.try_execfile", ) self.assert_tryexecfile_output(expected, actual) st, out = self.run_command_status("coverage report") assert st == 0 assert self.line_count(out) == 6, out def test_coverage_run_script_imports_doubledashsource(self) -> None: # This file imports try_execfile, which compiles it to .pyc, so the # first run will have __file__ == "try_execfile.py" and the second will # have __file__ == "try_execfile.pyc", which throws off the comparison. # Setting dont_write_bytecode True stops the compilation to .pyc and # keeps the test working. self.make_file("myscript", """\ import sys; sys.dont_write_bytecode = True import process_test.try_execfile """) self.add_test_modules_to_pythonpath() expected = self.run_command("python myscript") actual = self.run_command("coverage run --source process_test myscript") self.assert_tryexecfile_output(expected, actual) st, out = self.run_command_status("coverage report") assert st == 0 assert self.line_count(out) == 6, out def test_coverage_run_dashm_is_like_python_dashm_off_path(self) -> None: # https://github.com/nedbat/coveragepy/issues/242 self.make_file("sub/__init__.py", "") with open(TRY_EXECFILE) as f: self.make_file("sub/run_me.py", f.read()) expected = self.run_command("python -m sub.run_me") actual = self.run_command("coverage run -m sub.run_me") self.assert_tryexecfile_output(expected, actual) def test_coverage_run_dashm_is_like_python_dashm_with__main__207(self) -> None: # https://github.com/nedbat/coveragepy/issues/207 self.make_file("package/__init__.py", "print('init')") self.make_file("package/__main__.py", "print('main')") expected = self.run_command("python -m package") actual = self.run_command("coverage run -m package") assert expected == actual def test_coverage_zip_is_like_python(self) -> None: # Test running coverage from a zip file itself. Some environments # (windows?) zip up the coverage main to be used as the coverage # command. with open(TRY_EXECFILE) as f: self.make_file("run_me.py", f.read()) expected = self.run_command("python run_me.py") cov_main = os.path.join(TESTS_DIR, "covmain.zip") actual = self.run_command(f"python {cov_main} run run_me.py") self.assert_tryexecfile_output(expected, actual) def test_coverage_custom_script(self) -> None: # https://github.com/nedbat/coveragepy/issues/678 # If sys.path[0] isn't the Python default, then coverage.py won't # fiddle with it. self.make_file("a/b/c/thing.py", """\ SOMETHING = "hello-xyzzy" """) abc = os.path.abspath("a/b/c") self.make_file("run_coverage.py", f"""\ import sys sys.path[0:0] = [ r'{abc}', '/Users/somebody/temp/something/eggs/something-4.5.1-py2.7-xxx-10.13-x86_64.egg', ] import coverage.cmdline if __name__ == '__main__': sys.exit(coverage.cmdline.main()) """) self.make_file("how_is_it.py", """\ import pprint, sys pprint.pprint(sys.path) import thing print(thing.SOMETHING) """) # If this test fails, it will be with "can't import thing". out = self.run_command("python run_coverage.py run how_is_it.py") assert "hello-xyzzy" in out out = self.run_command("python -m run_coverage run how_is_it.py") assert "hello-xyzzy" in out @pytest.mark.skipif(env.WINDOWS, reason="Windows can't make symlinks") @pytest.mark.skipif( platform.python_version().endswith("+"), reason="setuptools barfs on dev versions: https://github.com/pypa/packaging/issues/678", # https://github.com/nedbat/coveragepy/issues/1556 ) def test_bug_862(self) -> None: # This used to simulate how pyenv and pyenv-virtualenv create the # coverage executable. Now the code shows how venv does it. self.make_file("elsewhere/bin/fake-coverage", f"""\ #!{sys.executable} import re import sys from coverage.cmdline import main if __name__ == '__main__': sys.argv[0] = re.sub(r'(-script\\.pyw|\\.exe)?$', '', sys.argv[0]) sys.exit(main()) """) os.chmod("elsewhere/bin/fake-coverage", stat.S_IREAD | stat.S_IEXEC) os.symlink("elsewhere", "somewhere") self.make_file("foo.py", "print('inside foo')") self.make_file("bar.py", "import foo") out = self.run_command("somewhere/bin/fake-coverage run bar.py") assert "inside foo\n" == out def test_bug_909(self) -> None: # https://github.com/nedbat/coveragepy/issues/909 # The __init__ files were being imported before measurement started, # so the line in __init__.py was being marked as missed, and there were # warnings about measured files being imported before start. self.make_file("proj/__init__.py", "print('Init')") self.make_file("proj/thecode.py", "print('The code')") self.make_file("proj/tests/__init__.py", "") self.make_file("proj/tests/test_it.py", "import proj.thecode") expected = "Init\nThe code\n" actual = self.run_command("coverage run --source=proj -m proj.tests.test_it") assert expected == actual report = self.run_command("coverage report -m") # Name Stmts Miss Cover Missing # ------------------------------------------------------ # proj/__init__.py 1 0 100% # proj/tests/__init__.py 0 0 100% # proj/tests/test_it.py 1 0 100% # proj/thecode.py 1 0 100% # ------------------------------------------------------ # TOTAL 3 0 100% squeezed = self.squeezed_lines(report) assert squeezed[2].replace("\\", "/") == "proj/__init__.py 1 0 100%" class ExcepthookTest(CoverageTest): """Tests of sys.excepthook support.""" # TODO: do we need these as process tests if we have test_execfile.py:RunFileTest? def test_excepthook(self) -> None: self.make_file("excepthook.py", """\ import sys def excepthook(*args): print('in excepthook') if maybe == 2: print('definitely') sys.excepthook = excepthook maybe = 1 raise RuntimeError('Error Outside') """) cov_st, cov_out = self.run_command_status("coverage run excepthook.py") py_st, py_out = self.run_command_status("python excepthook.py") assert cov_st == py_st assert cov_st == 1 assert "in excepthook" in py_out assert cov_out == py_out # Read the coverage file and see that excepthook.py has 7 lines # executed. data = coverage.CoverageData() data.read() assert line_counts(data)['excepthook.py'] == 7 @pytest.mark.skipif(not env.CPYTHON, reason="non-CPython handles excepthook exits differently, punt for now.", ) def test_excepthook_exit(self) -> None: self.make_file("excepthook_exit.py", """\ import sys def excepthook(*args): print('in excepthook') sys.exit(0) sys.excepthook = excepthook raise RuntimeError('Error Outside') """) cov_st, cov_out = self.run_command_status("coverage run excepthook_exit.py") py_st, py_out = self.run_command_status("python excepthook_exit.py") assert cov_st == py_st assert cov_st == 0 assert py_out == "in excepthook\n" assert cov_out == py_out @pytest.mark.skipif(env.PYPY, reason="PyPy handles excepthook throws differently.") def test_excepthook_throw(self) -> None: self.make_file("excepthook_throw.py", """\ import sys def excepthook(*args): # Write this message to stderr so that we don't have to deal # with interleaved stdout/stderr comparisons in the assertions # in the test. sys.stderr.write('in excepthook\\n') raise RuntimeError('Error Inside') sys.excepthook = excepthook raise RuntimeError('Error Outside') """) cov_st, cov_out = self.run_command_status("coverage run excepthook_throw.py") py_st, py_out = self.run_command_status("python excepthook_throw.py") assert cov_st == py_st assert cov_st == 1 assert "in excepthook" in py_out assert cov_out == py_out class AliasedCommandTest(CoverageTest): """Tests of the version-specific command aliases.""" run_in_temp_dir = False def test_major_version_works(self) -> None: # "coverage3" works on py3 cmd = "coverage%d" % sys.version_info[0] out = self.run_command(cmd) assert "Code coverage for Python" in out def test_wrong_alias_doesnt_work(self) -> None: # "coverage2" doesn't work on py3 assert sys.version_info[0] == 3 # Let us know when Python 4 is out... badcmd = "coverage2" out = self.run_command(badcmd) assert "Code coverage for Python" not in out def test_specific_alias_works(self) -> None: # "coverage-3.9" works on py3.9 cmd = "coverage-%d.%d" % sys.version_info[:2] out = self.run_command(cmd) assert "Code coverage for Python" in out @pytest.mark.parametrize("cmd", [ "coverage", "coverage%d" % sys.version_info[0], "coverage-%d.%d" % sys.version_info[:2], ]) def test_aliases_used_in_messages(self, cmd: str) -> None: out = self.run_command(f"{cmd} foobar") assert "Unknown command: 'foobar'" in out assert f"Use '{cmd} help' for help" in out class PydocTest(CoverageTest): """Test that pydoc can get our information.""" run_in_temp_dir = False def assert_pydoc_ok(self, name: str, thing: Any) -> None: """Check that pydoc of `name` finds the docstring from `thing`.""" # Run pydoc. out = self.run_command("python -m pydoc " + name) # It should say "Help on..", and not have a traceback assert out.startswith("Help on ") assert "Traceback" not in out # All of the lines in the docstring should be there somewhere. for line in thing.__doc__.splitlines(): assert line.strip() in out def test_pydoc_coverage(self) -> None: self.assert_pydoc_ok("coverage", coverage) def test_pydoc_coverage_coverage(self) -> None: self.assert_pydoc_ok("coverage.Coverage", coverage.Coverage) class FailUnderTest(CoverageTest): """Tests of the --fail-under switch.""" def setUp(self) -> None: super().setUp() self.make_file("forty_two_plus.py", """\ # I have 42.857% (3/7) coverage! a = 1 b = 2 if a > 3: b = 4 c = 5 d = 6 e = 7 """) self.make_data_file(lines={abs_file("forty_two_plus.py"): [2, 3, 4]}) def test_report_43_is_ok(self) -> None: st, out = self.run_command_status("coverage report --fail-under=43") assert st == 0 assert self.last_line_squeezed(out) == "TOTAL 7 4 43%" def test_report_43_is_not_ok(self) -> None: st, out = self.run_command_status("coverage report --fail-under=44") assert st == 2 expected = "Coverage failure: total of 43 is less than fail-under=44" assert expected == self.last_line_squeezed(out) def test_report_42p86_is_not_ok(self) -> None: self.make_file(".coveragerc", "[report]\nprecision = 2") st, out = self.run_command_status("coverage report --fail-under=42.88") assert st == 2 expected = "Coverage failure: total of 42.86 is less than fail-under=42.88" assert expected == self.last_line_squeezed(out) def test_report_99p9_is_not_ok(self) -> None: # A file with 99.9% coverage: self.make_file("ninety_nine_plus.py", "a = 1\n" + "b = 2\n" * 2000 + "if a > 3:\n" + " c = 4\n", ) self.make_data_file(lines={abs_file("ninety_nine_plus.py"): range(1, 2002)}) st, out = self.run_command_status("coverage report --fail-under=100") assert st == 2 expected = "Coverage failure: total of 99 is less than fail-under=100" assert expected == self.last_line_squeezed(out) class CoverageCoreTest(CoverageTest): """Test that cores are chosen correctly.""" # This doesn't test failure modes, only successful requests. try: from coverage.tracer import CTracer has_ctracer = True except ImportError: has_ctracer = False def test_core_default(self) -> None: self.del_environ("COVERAGE_TEST_CORES") self.del_environ("COVERAGE_CORE") self.make_file("numbers.py", "print(123, 456)") out = self.run_command("coverage run --debug=sys numbers.py") assert out.endswith("123 456\n") core = re_line(r" core:", out).strip() if self.has_ctracer: assert core == "core: CTracer" else: assert core == "core: PyTracer" @pytest.mark.skipif(not has_ctracer, reason="No CTracer to request") def test_core_request_ctrace(self) -> None: self.del_environ("COVERAGE_TEST_CORES") self.set_environ("COVERAGE_CORE", "ctrace") self.make_file("numbers.py", "print(123, 456)") out = self.run_command("coverage run --debug=sys numbers.py") assert out.endswith("123 456\n") core = re_line(r" core:", out).strip() assert core == "core: CTracer" def test_core_request_pytrace(self) -> None: self.del_environ("COVERAGE_TEST_CORES") self.set_environ("COVERAGE_CORE", "pytrace") self.make_file("numbers.py", "print(123, 456)") out = self.run_command("coverage run --debug=sys numbers.py") assert out.endswith("123 456\n") core = re_line(r" core:", out).strip() assert core == "core: PyTracer" def test_core_request_sysmon(self) -> None: self.del_environ("COVERAGE_TEST_CORES") self.set_environ("COVERAGE_CORE", "sysmon") self.make_file("numbers.py", "print(123, 456)") out = self.run_command("coverage run --debug=sys numbers.py") assert out.endswith("123 456\n") core = re_line(r" core:", out).strip() warns = re_lines(r"CoverageWarning: sys.monitoring isn't available", out) if env.PYBEHAVIOR.pep669: assert core == "core: SysMonitor" assert not warns else: assert core in ("core: CTracer", "core: PyTracer") assert warns class FailUnderNoFilesTest(CoverageTest): """Test that nothing to report results in an error exit status.""" def test_report(self) -> None: self.make_file(".coveragerc", "[report]\nfail_under = 99\n") st, out = self.run_command_status("coverage report") assert 'No data to report.' in out assert st == 1 class FailUnderEmptyFilesTest(CoverageTest): """Test that empty files produce the proper fail_under exit status.""" def test_report(self) -> None: self.make_file(".coveragerc", "[report]\nfail_under = 99\n") self.make_file("empty.py", "") st, _ = self.run_command_status("coverage run empty.py") assert st == 0 st, _ = self.run_command_status("coverage report") # An empty file is marked as 100% covered, so this is ok. assert st == 0 @pytest.mark.skipif(env.WINDOWS, reason="Windows can't delete the directory in use.") class YankedDirectoryTest(CoverageTest): """Tests of what happens when the current directory is deleted.""" BUG_806 = """\ import os import sys import tempfile tmpdir = tempfile.mkdtemp() os.chdir(tmpdir) os.rmdir(tmpdir) print(sys.argv[1]) """ def test_removing_directory(self) -> None: self.make_file("bug806.py", self.BUG_806) out = self.run_command("coverage run bug806.py noerror") assert out == "noerror\n" def test_removing_directory_with_error(self) -> None: self.make_file("bug806.py", self.BUG_806) out = self.run_command("coverage run bug806.py") path = python_reported_file('bug806.py') # Python 3.11 adds an extra line to the traceback. # Check that the lines we expect are there. lines = textwrap.dedent(f"""\ Traceback (most recent call last): File "{path}", line 8, in print(sys.argv[1]) IndexError: list index out of range """).splitlines(keepends=True) assert all(line in out for line in lines) @pytest.mark.skipif(env.METACOV, reason="Can't test sub-process pth file during metacoverage") class ProcessStartupTest(CoverageTest): """Test that we can measure coverage in sub-processes.""" def setUp(self) -> None: super().setUp() # Main will run sub.py self.make_file("main.py", """\ import os, os.path, sys ex = os.path.basename(sys.executable) os.system(ex + " sub.py") """) # sub.py will write a few lines. self.make_file("sub.py", """\ f = open("out.txt", "w") f.write("Hello, world!\\n") f.close() """) def test_subprocess_with_pth_files(self) -> None: # An existing data file should not be read when a subprocess gets # measured automatically. Create the data file here with bogus data in # it. data = coverage.CoverageData(".mycovdata") data.add_lines({os.path.abspath('sub.py'): range(100)}) data.write() self.make_file("coverage.ini", """\ [run] data_file = .mycovdata """) self.set_environ("COVERAGE_PROCESS_START", "coverage.ini") import main # pylint: disable=unused-import, import-error with open("out.txt") as f: assert f.read() == "Hello, world!\n" # Read the data from .coverage self.assert_exists(".mycovdata") data = coverage.CoverageData(".mycovdata") data.read() assert line_counts(data)['sub.py'] == 3 def test_subprocess_with_pth_files_and_parallel(self) -> None: # https://github.com/nedbat/coveragepy/issues/492 self.make_file("coverage.ini", """\ [run] parallel = true """) self.set_environ("COVERAGE_PROCESS_START", "coverage.ini") self.run_command("coverage run main.py") with open("out.txt") as f: assert f.read() == "Hello, world!\n" self.run_command("coverage combine") # assert that the combined .coverage data file is correct self.assert_exists(".coverage") data = coverage.CoverageData() data.read() assert line_counts(data)['sub.py'] == 3 # assert that there are *no* extra data files left over after a combine data_files = glob.glob(os.getcwd() + '/.coverage*') msg = ( "Expected only .coverage after combine, looks like there are " + f"extra data files that were not cleaned up: {data_files!r}" ) assert len(data_files) == 1, msg class ProcessStartupWithSourceTest(CoverageTest): """Show that we can configure {[run]source} during process-level coverage. There are three interesting variables, for a total of eight tests: 1. -m versus a simple script argument (for example, `python myscript`), 2. filtering for the top-level (main.py) or second-level (sub.py) module, and 3. whether the files are in a package or not. """ @pytest.mark.parametrize("dashm", ["-m", ""]) @pytest.mark.parametrize("package", ["pkg", ""]) @pytest.mark.parametrize("source", ["main", "sub"]) def test_pth_and_source_work_together(self, dashm: str, package: str, source: str) -> None: """Run the test for a particular combination of factors. The arguments are all strings: * `dashm`: Either "" (run the program as a file) or "-m" (run the program as a module). * `package`: Either "" (put the source at the top level) or a package name to use to hold the source. * `source`: Either "main" or "sub", which file to use as the ``--source`` argument. """ def fullname(modname: str) -> str: """What is the full module name for `modname` for this test?""" if package and dashm: return '.'.join((package, modname)) else: return modname def path(basename: str) -> str: """Where should `basename` be created for this test?""" return os.path.join(package, basename) # Main will run sub.py. self.make_file(path("main.py"), """\ import %s a = 2 b = 3 """ % fullname('sub')) if package: self.make_file(path("__init__.py"), "") # sub.py will write a few lines. self.make_file(path("sub.py"), """\ f = open("out.txt", "w") f.write("Hello, world!") f.close() """) self.make_file("coverage.ini", """\ [run] source = %s """ % fullname(source)) self.set_environ("COVERAGE_PROCESS_START", "coverage.ini") if dashm: cmd = "python -m %s" % fullname('main') else: cmd = "python %s" % path('main.py') self.run_command(cmd) with open("out.txt") as f: assert f.read() == "Hello, world!" # Read the data from .coverage self.assert_exists(".coverage") data = coverage.CoverageData() data.read() summary = line_counts(data) assert summary[source + '.py'] == 3 assert len(summary) == 1 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/test_python.py0000644000175100001770000000404500000000000020226 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt """Tests of coverage/python.py""" from __future__ import annotations import pathlib import sys import pytest from coverage import env from coverage.python import get_zip_bytes, source_for_file from tests.coveragetest import CoverageTest from tests.helpers import os_sep class GetZipBytesTest(CoverageTest): """Tests of `get_zip_bytes`.""" run_in_temp_dir = False @pytest.mark.parametrize( "encoding", ["utf-8", "gb2312", "hebrew", "shift_jis", "cp1252"], ) def test_get_encoded_zip_files(self, encoding: str) -> None: # See igor.py, do_zipmods, for the text of these files. zip_file = "tests/zipmods.zip" sys.path.append(zip_file) # So we can import the files. filename = zip_file + "/encoded_" + encoding + ".py" filename = os_sep(filename) zip_data = get_zip_bytes(filename) assert zip_data is not None zip_text = zip_data.decode(encoding) assert 'All OK' in zip_text # Run the code to see that we really got it encoded properly. mod = __import__("encoded_"+encoding) assert mod.encoding == encoding def test_source_for_file(tmp_path: pathlib.Path) -> None: src = str(tmp_path / "a.py") assert source_for_file(src) == src assert source_for_file(src + 'c') == src assert source_for_file(src + 'o') == src unknown = src + 'FOO' assert source_for_file(unknown) == unknown @pytest.mark.skipif(not env.WINDOWS, reason="not windows") def test_source_for_file_windows(tmp_path: pathlib.Path) -> None: a_py = tmp_path / "a.py" src = str(a_py) # On windows if a pyw exists, it is an acceptable source path_windows = tmp_path / "a.pyw" path_windows.write_text("") assert str(path_windows) == source_for_file(src + 'c') # If both pyw and py exist, py is preferred a_py.write_text("") assert source_for_file(src + 'c') == src ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/test_report.py0000644000175100001770000012417200000000000020224 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt """Test text-based summary reporting for coverage.py""" from __future__ import annotations import glob import io import math import os import os.path import py_compile import re import pytest import coverage from coverage import env from coverage.control import Coverage from coverage.data import CoverageData from coverage.exceptions import ConfigError, NoDataError, NotPython from coverage.files import abs_file from coverage.report import SummaryReporter from coverage.types import TConfigValueIn from tests.coveragetest import CoverageTest, TESTS_DIR, UsingModulesMixin from tests.helpers import assert_coverage_warnings class SummaryTest(UsingModulesMixin, CoverageTest): """Tests of the text summary reporting for coverage.py.""" def make_mycode(self) -> None: """Make the mycode.py file when needed.""" self.make_file("mycode.py", """\ import covmod1 import covmodzip1 a = 1 print('done') """) def test_report(self) -> None: self.make_mycode() cov = coverage.Coverage() self.start_import_stop(cov, "mycode") assert self.stdout() == 'done\n' report = self.get_report(cov) # Name Stmts Miss Cover # ------------------------------------------------------------------ # c:/ned/coverage/tests/modules/covmod1.py 2 0 100% # c:/ned/coverage/tests/zipmods.zip/covmodzip1.py 2 0 100% # mycode.py 4 0 100% # ------------------------------------------------------------------ # TOTAL 8 0 100% assert "/coverage/__init__/" not in report assert "/tests/modules/covmod1.py " in report assert "/tests/zipmods.zip/covmodzip1.py " in report assert "mycode.py " in report assert self.last_line_squeezed(report) == "TOTAL 8 0 100%" def test_report_just_one(self) -> None: # Try reporting just one module self.make_mycode() cov = coverage.Coverage() self.start_import_stop(cov, "mycode") report = self.get_report(cov, morfs=["mycode.py"]) # Name Stmts Miss Cover # ------------------------------- # mycode.py 4 0 100% # ------------------------------- # TOTAL 4 0 100% assert self.line_count(report) == 5 assert "/coverage/" not in report assert "/tests/modules/covmod1.py " not in report assert "/tests/zipmods.zip/covmodzip1.py " not in report assert "mycode.py " in report assert self.last_line_squeezed(report) == "TOTAL 4 0 100%" def test_report_wildcard(self) -> None: # Try reporting using wildcards to get the modules. self.make_mycode() self.add_test_modules_to_pythonpath() # Wildcard is handled by shell or cmdline.py, so use real commands self.run_command("coverage run mycode.py") report = self.report_from_command("coverage report my*.py") # Name Stmts Miss Cover # ------------------------------- # mycode.py 4 0 100% # ------------------------------- # TOTAL 4 0 100% assert self.line_count(report) == 5 assert "/coverage/" not in report assert "/tests/modules/covmod1.py " not in report assert "/tests/zipmods.zip/covmodzip1.py " not in report assert "mycode.py " in report assert self.last_line_squeezed(report) == "TOTAL 4 0 100%" def test_report_omitting(self) -> None: # Try reporting while omitting some modules self.make_mycode() cov = coverage.Coverage() self.start_import_stop(cov, "mycode") report = self.get_report(cov, omit=[f"{TESTS_DIR}/*", "*/site-packages/*"]) # Name Stmts Miss Cover # ------------------------------- # mycode.py 4 0 100% # ------------------------------- # TOTAL 4 0 100% assert self.line_count(report) == 5 assert "/coverage/" not in report assert "/tests/modules/covmod1.py " not in report assert "/tests/zipmods.zip/covmodzip1.py " not in report assert "mycode.py " in report assert self.last_line_squeezed(report) == "TOTAL 4 0 100%" def test_report_including(self) -> None: # Try reporting while including some modules self.make_mycode() cov = coverage.Coverage() self.start_import_stop(cov, "mycode") report = self.get_report(cov, include=["mycode*"]) # Name Stmts Miss Cover # ------------------------------- # mycode.py 4 0 100% # ------------------------------- # TOTAL 4 0 100% assert self.line_count(report) == 5 assert "/coverage/" not in report assert "/tests/modules/covmod1.py " not in report assert "/tests/zipmods.zip/covmodzip1.py " not in report assert "mycode.py " in report assert self.last_line_squeezed(report) == "TOTAL 4 0 100%" def test_report_include_relative_files_and_path(self) -> None: """ Test that when relative_files is True and a relative path to a module is included, coverage is reported for the module. Ref: https://github.com/nedbat/coveragepy/issues/1604 """ self.make_mycode() self.make_file(".coveragerc", """\ [run] relative_files = true """) self.make_file("submodule/mycode.py", "import mycode") cov = coverage.Coverage() self.start_import_stop(cov, "submodule/mycode") report = self.get_report(cov, include="submodule/mycode.py") # Name Stmts Miss Cover # --------------------------------------- # submodule/mycode.py 1 0 100% # --------------------------------------- # TOTAL 1 0 100% assert "submodule/mycode.py " in report assert self.last_line_squeezed(report) == "TOTAL 1 0 100%" def test_report_include_relative_files_and_wildcard_path(self) -> None: self.make_mycode() self.make_file(".coveragerc", """\ [run] relative_files = true """) self.make_file("submodule/mycode.py", "import nested.submodule.mycode") self.make_file("nested/submodule/mycode.py", "import mycode") cov = coverage.Coverage() self.start_import_stop(cov, "submodule/mycode") report = self.get_report(cov, include="*/submodule/mycode.py") # Name Stmts Miss Cover # ------------------------------------------------- # nested/submodule/mycode.py 1 0 100% # submodule/mycode.py 1 0 100% # ------------------------------------------------- # TOTAL 2 0 100% reported_files = [line.split()[0] for line in report.splitlines()[2:4]] assert reported_files == [ "nested/submodule/mycode.py", "submodule/mycode.py", ] def test_omit_files_here(self) -> None: # https://github.com/nedbat/coveragepy/issues/1407 self.make_file("foo.py", "") self.make_file("bar/bar.py", "") self.make_file("tests/test_baz.py", """\ def test_foo(): assert True test_foo() """) self.run_command("coverage run --source=. --omit='./*.py' -m tests.test_baz") report = self.report_from_command("coverage report") # Name Stmts Miss Cover # --------------------------------------- # tests/test_baz.py 3 0 100% # --------------------------------------- # TOTAL 3 0 100% assert self.line_count(report) == 5 assert "foo" not in report assert "bar" not in report assert "tests/test_baz.py" in report assert self.last_line_squeezed(report) == "TOTAL 3 0 100%" def test_run_source_vs_report_include(self) -> None: # https://github.com/nedbat/coveragepy/issues/621 self.make_file(".coveragerc", """\ [run] source = . [report] include = mod/*,tests/* """) # It should be OK to use that configuration. cov = coverage.Coverage() with self.assert_warnings(cov, []): with cov.collect(): pass def test_run_omit_vs_report_omit(self) -> None: # https://github.com/nedbat/coveragepy/issues/622 # report:omit shouldn't clobber run:omit. self.make_mycode() self.make_file(".coveragerc", """\ [run] omit = */covmodzip1.py [report] omit = */covmod1.py """) self.add_test_modules_to_pythonpath() self.run_command("coverage run mycode.py") # Read the data written, to see that the right files have been omitted from running. covdata = CoverageData() covdata.read() files = [os.path.basename(p) for p in covdata.measured_files()] assert "covmod1.py" in files assert "covmodzip1.py" not in files def test_report_branches(self) -> None: self.make_file("mybranch.py", """\ def branch(x): if x: print("x") return x branch(1) """) cov = coverage.Coverage(source=["."], branch=True) self.start_import_stop(cov, "mybranch") assert self.stdout() == 'x\n' report = self.get_report(cov) # Name Stmts Miss Branch BrPart Cover # ----------------------------------------------- # mybranch.py 5 0 2 1 86% # ----------------------------------------------- # TOTAL 5 0 2 1 86% assert self.line_count(report) == 5 assert "mybranch.py " in report assert self.last_line_squeezed(report) == "TOTAL 5 0 2 1 86%" def test_report_show_missing(self) -> None: self.make_file("mymissing.py", """\ def missing(x, y): if x: print("x") return x if y: print("y") try: print("z") 1/0 print("Never!") except ZeroDivisionError: pass return x missing(0, 1) """) cov = coverage.Coverage(source=["."]) self.start_import_stop(cov, "mymissing") assert self.stdout() == 'y\nz\n' report = self.get_report(cov, show_missing=True) # Name Stmts Miss Cover Missing # -------------------------------------------- # mymissing.py 14 3 79% 3-4, 10 # -------------------------------------------- # TOTAL 14 3 79% assert self.line_count(report) == 5 squeezed = self.squeezed_lines(report) assert squeezed[2] == "mymissing.py 14 3 79% 3-4, 10" assert squeezed[4] == "TOTAL 14 3 79%" def test_report_show_missing_branches(self) -> None: self.make_file("mybranch.py", """\ def branch(x, y): if x: print("x") if y: print("y") branch(1, 1) """) cov = coverage.Coverage(branch=True) self.start_import_stop(cov, "mybranch") assert self.stdout() == 'x\ny\n' report = self.get_report(cov, show_missing=True) # Name Stmts Miss Branch BrPart Cover Missing # ---------------------------------------------------------- # mybranch.py 6 0 4 2 80% 2->4, 4->exit # ---------------------------------------------------------- # TOTAL 6 0 4 2 80% assert self.line_count(report) == 5 squeezed = self.squeezed_lines(report) assert squeezed[2] == "mybranch.py 6 0 4 2 80% 2->4, 4->exit" assert squeezed[4] == "TOTAL 6 0 4 2 80%" def test_report_show_missing_branches_and_lines(self) -> None: self.make_file("main.py", """\ import mybranch """) self.make_file("mybranch.py", """\ def branch(x, y, z): if x: print("x") if y: print("y") if z: if x and y: print("z") return x branch(1, 1, 0) """) cov = coverage.Coverage(branch=True) self.start_import_stop(cov, "main") assert self.stdout() == 'x\ny\n' report_lines = self.get_report(cov, squeeze=False, show_missing=True).splitlines() expected = [ 'Name Stmts Miss Branch BrPart Cover Missing', '---------------------------------------------------------', 'main.py 1 0 0 0 100%', 'mybranch.py 10 2 8 3 61% 2->4, 4->6, 7-8', '---------------------------------------------------------', 'TOTAL 11 2 8 3 63%', ] assert expected == report_lines def test_report_skip_covered_no_branches(self) -> None: self.make_file("main.py", """\ import not_covered def normal(): print("z") normal() """) self.make_file("not_covered.py", """\ def not_covered(): print("n") """) # --fail-under is handled by cmdline.py, use real commands. out = self.run_command("coverage run main.py") assert out == "z\n" report = self.report_from_command("coverage report --skip-covered --fail-under=70") # Name Stmts Miss Cover # ------------------------------------ # not_covered.py 2 1 50% # ------------------------------------ # TOTAL 6 1 83% # # 1 file skipped due to complete coverage. assert self.line_count(report) == 7, report squeezed = self.squeezed_lines(report) assert squeezed[2] == "not_covered.py 2 1 50%" assert squeezed[4] == "TOTAL 6 1 83%" assert squeezed[6] == "1 file skipped due to complete coverage." assert self.last_command_status == 0 def test_report_skip_covered_branches(self) -> None: self.make_file("main.py", """\ import not_covered, covered def normal(z): if z: print("z") normal(True) normal(False) """) self.make_file("not_covered.py", """\ def not_covered(n): if n: print("n") not_covered(True) """) self.make_file("covered.py", """\ def foo(): pass foo() """) cov = coverage.Coverage(branch=True) self.start_import_stop(cov, "main") assert self.stdout() == "n\nz\n" report = self.get_report(cov, skip_covered=True) # Name Stmts Miss Branch BrPart Cover # -------------------------------------------------- # not_covered.py 4 0 2 1 83% # -------------------------------------------------- # TOTAL 13 0 4 1 94% # # 2 files skipped due to complete coverage. assert self.line_count(report) == 7, report squeezed = self.squeezed_lines(report) assert squeezed[2] == "not_covered.py 4 0 2 1 83%" assert squeezed[4] == "TOTAL 13 0 4 1 94%" assert squeezed[6] == "2 files skipped due to complete coverage." def test_report_skip_covered_branches_with_totals(self) -> None: self.make_file("main.py", """\ import not_covered import also_not_run def normal(z): if z: print("z") normal(True) normal(False) """) self.make_file("not_covered.py", """\ def not_covered(n): if n: print("n") not_covered(True) """) self.make_file("also_not_run.py", """\ def does_not_appear_in_this_film(ni): print("Ni!") """) cov = coverage.Coverage(branch=True) self.start_import_stop(cov, "main") assert self.stdout() == "n\nz\n" report = self.get_report(cov, skip_covered=True) # Name Stmts Miss Branch BrPart Cover # -------------------------------------------------- # also_not_run.py 2 1 0 0 50% # not_covered.py 4 0 2 1 83% # -------------------------------------------------- # TOTAL 13 1 4 1 88% # # 1 file skipped due to complete coverage. assert self.line_count(report) == 8, report squeezed = self.squeezed_lines(report) assert squeezed[2] == "also_not_run.py 2 1 0 0 50%" assert squeezed[3] == "not_covered.py 4 0 2 1 83%" assert squeezed[5] == "TOTAL 13 1 4 1 88%" assert squeezed[7] == "1 file skipped due to complete coverage." def test_report_skip_covered_all_files_covered(self) -> None: self.make_file("main.py", """\ def foo(): pass foo() """) cov = coverage.Coverage(source=["."], branch=True) self.start_import_stop(cov, "main") assert self.stdout() == "" report = self.get_report(cov, skip_covered=True) # Name Stmts Miss Branch BrPart Cover # ----------------------------------------- # TOTAL 3 0 0 0 100% # # 1 file skipped due to complete coverage. assert self.line_count(report) == 5, report squeezed = self.squeezed_lines(report) assert squeezed[4] == "1 file skipped due to complete coverage." report = self.get_report(cov, squeeze=False, skip_covered=True, output_format="markdown") # | Name | Stmts | Miss | Branch | BrPart | Cover | # |---------- | -------: | -------: | -------: | -------: | -------: | # | **TOTAL** | **3** | **0** | **0** | **0** | **100%** | # # 1 file skipped due to complete coverage. assert self.line_count(report) == 5, report assert report.split("\n")[0] == ( '| Name | Stmts | Miss | Branch | BrPart | Cover |' ) assert report.split("\n")[1] == ( '|---------- | -------: | -------: | -------: | -------: | -------: |' ) assert report.split("\n")[2] == ( '| **TOTAL** | **3** | **0** | **0** | **0** | **100%** |' ) squeezed = self.squeezed_lines(report) assert squeezed[4] == "1 file skipped due to complete coverage." total = self.get_report(cov, output_format="total", skip_covered=True) assert total == "100\n" def test_report_skip_covered_longfilename(self) -> None: self.make_file("long_______________filename.py", """\ def foo(): pass foo() """) cov = coverage.Coverage(source=["."], branch=True) self.start_import_stop(cov, "long_______________filename") assert self.stdout() == "" report = self.get_report(cov, squeeze=False, skip_covered=True) # Name Stmts Miss Branch BrPart Cover # ----------------------------------------- # TOTAL 3 0 0 0 100% # # 1 file skipped due to complete coverage. assert self.line_count(report) == 5, report lines = self.report_lines(report) assert lines[0] == "Name Stmts Miss Branch BrPart Cover" squeezed = self.squeezed_lines(report) assert squeezed[4] == "1 file skipped due to complete coverage." def test_report_skip_covered_no_data(self) -> None: cov = coverage.Coverage() cov.load() with pytest.raises(NoDataError, match="No data to report."): self.get_report(cov, skip_covered=True) self.assert_doesnt_exist(".coverage") def test_report_skip_empty(self) -> None: self.make_file("main.py", """\ import submodule def normal(): print("z") normal() """) self.make_file("submodule/__init__.py", "") cov = coverage.Coverage() self.start_import_stop(cov, "main") assert self.stdout() == "z\n" report = self.get_report(cov, skip_empty=True) # Name Stmts Miss Cover # ------------------------------------ # main.py 4 0 100% # ------------------------------------ # TOTAL 4 0 100% # # 1 empty file skipped. assert self.line_count(report) == 7, report squeezed = self.squeezed_lines(report) assert squeezed[2] == "main.py 4 0 100%" assert squeezed[4] == "TOTAL 4 0 100%" assert squeezed[6] == "1 empty file skipped." def test_report_skip_empty_no_data(self) -> None: self.make_file("__init__.py", "") cov = coverage.Coverage() self.start_import_stop(cov, "__init__") assert self.stdout() == "" report = self.get_report(cov, skip_empty=True) # Name Stmts Miss Cover # ------------------------------------ # TOTAL 0 0 100% # # 1 empty file skipped. assert self.line_count(report) == 5, report assert report.split("\n")[2] == "TOTAL 0 0 100%" assert report.split("\n")[4] == "1 empty file skipped." def test_report_precision(self) -> None: self.make_file(".coveragerc", """\ [report] precision = 3 omit = */site-packages/* """) self.make_file("main.py", """\ import not_covered, covered def normal(z): if z: print("z") normal(True) normal(False) """) self.make_file("not_covered.py", """\ def not_covered(n): if n: print("n") not_covered(True) """) self.make_file("covered.py", """\ def foo(): pass foo() """) cov = coverage.Coverage(branch=True) self.start_import_stop(cov, "main") assert self.stdout() == "n\nz\n" report = self.get_report(cov, squeeze=False) # Name Stmts Miss Branch BrPart Cover # ------------------------------------------------------ # covered.py 3 0 0 0 100.000% # main.py 6 0 2 0 100.000% # not_covered.py 4 0 2 1 83.333% # ------------------------------------------------------ # TOTAL 13 0 4 1 94.118% assert self.line_count(report) == 7, report squeezed = self.squeezed_lines(report) assert squeezed[2] == "covered.py 3 0 0 0 100.000%" assert squeezed[4] == "not_covered.py 4 0 2 1 83.333%" assert squeezed[6] == "TOTAL 13 0 4 1 94.118%" def test_report_precision_all_zero(self) -> None: self.make_file("not_covered.py", """\ def not_covered(n): if n: print("n") """) self.make_file("empty.py", "") cov = coverage.Coverage(source=["."]) self.start_import_stop(cov, "empty") report = self.get_report(cov, precision=6, squeeze=False) # Name Stmts Miss Cover # ----------------------------------------- # empty.py 0 0 100.000000% # not_covered.py 3 3 0.000000% # ----------------------------------------- # TOTAL 3 3 0.000000% assert self.line_count(report) == 6, report assert "empty.py 0 0 100.000000%" in report assert "not_covered.py 3 3 0.000000%" in report assert "TOTAL 3 3 0.000000%" in report def test_dotpy_not_python(self) -> None: # We run a .py file, and when reporting, we can't parse it as Python. # We should get an error message in the report. self.make_data_file(lines={"mycode.py": [1]}) self.make_file("mycode.py", "This isn't python at all!") cov = coverage.Coverage() cov.load() msg = r"Couldn't parse '.*[/\\]mycode.py' as Python source: '.*' at line 1" with pytest.raises(NotPython, match=msg): self.get_report(cov, morfs=["mycode.py"]) def test_accented_directory(self) -> None: # Make a file with a non-ascii character in the directory name. self.make_file("\xe2/accented.py", "print('accented')") self.make_data_file(lines={abs_file("\xe2/accented.py"): [1]}) report_expected = ( "Name Stmts Miss Cover\n" + "-----------------------------------\n" + "\xe2/accented.py 1 0 100%\n" + "-----------------------------------\n" + "TOTAL 1 0 100%\n" ) cov = coverage.Coverage() cov.load() output = self.get_report(cov, squeeze=False) assert output == report_expected def test_accenteddotpy_not_python(self) -> None: # We run a .py file with a non-ascii name, and when reporting, we can't # parse it as Python. We should get an error message in the report. self.make_data_file(lines={"accented\xe2.py": [1]}) self.make_file("accented\xe2.py", "This isn't python at all!") cov = coverage.Coverage() cov.load() msg = r"Couldn't parse '.*[/\\]accented\xe2.py' as Python source: '.*' at line 1" with pytest.raises(NotPython, match=msg): self.get_report(cov, morfs=["accented\xe2.py"]) def test_dotpy_not_python_ignored(self) -> None: # We run a .py file, and when reporting, we can't parse it as Python, # but we've said to ignore errors, so there's no error reported, # though we still get a warning. self.make_file("mycode.py", "This isn't python at all!") self.make_data_file(lines={"mycode.py": [1]}) cov = coverage.Coverage() cov.load() with pytest.raises(NoDataError, match="No data to report."): with pytest.warns(Warning) as warns: self.get_report(cov, morfs=["mycode.py"], ignore_errors=True) assert_coverage_warnings( warns, re.compile(r"Couldn't parse Python file '.*[/\\]mycode.py' \(couldnt-parse\)"), ) def test_dothtml_not_python(self) -> None: # We run a .html file, and when reporting, we can't parse it as # Python. Since it wasn't .py, no error is reported. # Pretend to run an html file. self.make_file("mycode.html", "

    This isn't python at all!

    ") self.make_data_file(lines={"mycode.html": [1]}) cov = coverage.Coverage() cov.load() with pytest.raises(NoDataError, match="No data to report."): self.get_report(cov, morfs=["mycode.html"]) def test_report_no_extension(self) -> None: self.make_file("xxx", """\ # This is a python file though it doesn't look like it, like a main script. a = b = c = d = 0 a = 3 b = 4 if not b: c = 6 d = 7 print(f"xxx: {a} {b} {c} {d}") """) self.make_data_file(lines={abs_file("xxx"): [2, 3, 4, 5, 7, 8]}) cov = coverage.Coverage() cov.load() report = self.get_report(cov) assert self.last_line_squeezed(report) == "TOTAL 7 1 86%" def test_report_with_chdir(self) -> None: self.make_file("chdir.py", """\ import os print("Line One") os.chdir("subdir") print("Line Two") print(open("something").read()) """) self.make_file("subdir/something", "hello") out = self.run_command("coverage run --source=. chdir.py") assert out == "Line One\nLine Two\nhello\n" report = self.report_from_command("coverage report") assert self.last_line_squeezed(report) == "TOTAL 5 0 100%" report = self.report_from_command("coverage report --format=markdown") assert self.last_line_squeezed(report) == "| **TOTAL** | **5** | **0** | **100%** |" def test_bug_156_file_not_run_should_be_zero(self) -> None: # https://github.com/nedbat/coveragepy/issues/156 self.make_file("mybranch.py", """\ def branch(x): if x: print("x") return x branch(1) """) self.make_file("main.py", """\ print("y") """) cov = coverage.Coverage(branch=True, source=["."]) self.start_import_stop(cov, "main") report = self.get_report(cov).splitlines() assert "mybranch.py 5 5 2 0 0%" in report def run_TheCode_and_report_it(self) -> str: """A helper for the next few tests.""" cov = coverage.Coverage() self.start_import_stop(cov, "TheCode") return self.get_report(cov) def test_bug_203_mixed_case_listed_twice_with_rc(self) -> None: self.make_file("TheCode.py", "a = 1\n") self.make_file(".coveragerc", "[run]\nsource = .\n") report = self.run_TheCode_and_report_it() assert "TheCode" in report assert "thecode" not in report def test_bug_203_mixed_case_listed_twice(self) -> None: self.make_file("TheCode.py", "a = 1\n") report = self.run_TheCode_and_report_it() assert "TheCode" in report assert "thecode" not in report @pytest.mark.skipif(not env.WINDOWS, reason=".pyw files are only on Windows.") def test_pyw_files(self) -> None: # https://github.com/nedbat/coveragepy/issues/261 self.make_file("start.pyw", """\ import mod print("In start.pyw") """) self.make_file("mod.pyw", """\ print("In mod.pyw") """) cov = coverage.Coverage() # start_import_stop can't import the .pyw file, so use the long form. with cov.collect(): import start # pylint: disable=import-error, unused-import report = self.get_report(cov) assert "NoSource" not in report report_lines = report.splitlines() assert "start.pyw 2 0 100%" in report_lines assert "mod.pyw 1 0 100%" in report_lines def test_tracing_pyc_file(self) -> None: # Create two Python files. self.make_file("mod.py", "a = 1\n") self.make_file("main.py", "import mod\n") # Make one into a .pyc. py_compile.compile("mod.py") # Run the program. cov = coverage.Coverage() self.start_import_stop(cov, "main") report_lines = self.get_report(cov).splitlines() assert "mod.py 1 0 100%" in report_lines report = self.get_report(cov, squeeze=False, output_format="markdown") assert report.split("\n")[3] == "| mod.py | 1 | 0 | 100% |" assert report.split("\n")[4] == "| **TOTAL** | **2** | **0** | **100%** |" def test_missing_py_file_during_run(self) -> None: # Create two Python files. self.make_file("mod.py", "a = 1\n") self.make_file("main.py", "import mod\n") # Make one into a .pyc, and remove the .py. py_compile.compile("mod.py") os.remove("mod.py") # Python 3 puts the .pyc files in a __pycache__ directory, and will # not import from there without source. It will import a .pyc from # the source location though. pycs = glob.glob("__pycache__/mod.*.pyc") assert len(pycs) == 1 os.rename(pycs[0], "mod.pyc") # Run the program. cov = coverage.Coverage() self.start_import_stop(cov, "main") # Put back the missing Python file. self.make_file("mod.py", "a = 1\n") report = self.get_report(cov).splitlines() assert "mod.py 1 0 100%" in report def test_empty_files(self) -> None: # Shows that empty files like __init__.py are listed as having zero # statements, not one statement. cov = coverage.Coverage(branch=True) with cov.collect(): import usepkgs # pylint: disable=import-error, unused-import report = self.get_report(cov) assert "tests/modules/pkg1/__init__.py 1 0 0 0 100%" in report assert "tests/modules/pkg2/__init__.py 0 0 0 0 100%" in report report = self.get_report(cov, squeeze=False, output_format="markdown") # get_report() escapes backslash so we expect forward slash escaped # underscore assert "tests/modules/pkg1//_/_init/_/_.py " in report assert "| 1 | 0 | 0 | 0 | 100% |" in report assert "tests/modules/pkg2//_/_init/_/_.py " in report assert "| 0 | 0 | 0 | 0 | 100% |" in report def test_markdown_with_missing(self) -> None: self.make_file("mymissing.py", """\ def missing(x, y): if x: print("x") return x if y: print("y") try: print("z") 1/0 print("Never!") except ZeroDivisionError: pass return x missing(0, 1) """) cov = coverage.Coverage(source=["."]) self.start_import_stop(cov, "mymissing") assert self.stdout() == 'y\nz\n' report = self.get_report(cov, squeeze=False, output_format="markdown", show_missing=True) # | Name | Stmts | Miss | Cover | Missing | # |------------- | -------: | -------: | ------: | --------: | # | mymissing.py | 14 | 3 | 79% | 3-4, 10 | # | **TOTAL** | **14** | **3** | **79%** | | assert self.line_count(report) == 4 report_lines = report.split("\n") assert report_lines[2] == "| mymissing.py | 14 | 3 | 79% | 3-4, 10 |" assert report_lines[3] == "| **TOTAL** | **14** | **3** | **79%** | |" assert self.get_report(cov, output_format="total") == "79\n" assert self.get_report(cov, output_format="total", precision=2) == "78.57\n" assert self.get_report(cov, output_format="total", precision=4) == "78.5714\n" def test_bug_1524(self) -> None: self.make_file("bug1524.py", """\ class Mine: @property def thing(self) -> int: return 17 print(Mine().thing) """) cov = coverage.Coverage() self.start_import_stop(cov, "bug1524") assert self.stdout() == "17\n" report = self.get_report(cov) report_lines = report.splitlines() assert report_lines[2] == "bug1524.py 5 0 100%" class ReportingReturnValueTest(CoverageTest): """Tests of reporting functions returning values.""" def run_coverage(self) -> Coverage: """Run coverage on doit.py and return the coverage object.""" self.make_file("doit.py", """\ a = 1 b = 2 c = 3 d = 4 if a > 10: f = 6 g = 7 """) cov = coverage.Coverage() self.start_import_stop(cov, "doit") return cov def test_report(self) -> None: cov = self.run_coverage() val = cov.report(include="*/doit.py") assert math.isclose(val, 6 / 7 * 100) def test_html(self) -> None: cov = self.run_coverage() val = cov.html_report(include="*/doit.py") assert math.isclose(val, 6 / 7 * 100) def test_xml(self) -> None: cov = self.run_coverage() val = cov.xml_report(include="*/doit.py") assert math.isclose(val, 6 / 7 * 100) class SummaryReporterConfigurationTest(CoverageTest): """Tests of SummaryReporter.""" def make_rigged_file(self, filename: str, stmts: int, miss: int) -> None: """Create a file that will have specific results. `stmts` and `miss` are ints, the number of statements, and missed statements that should result. """ run = stmts - miss - 1 dont_run = miss source = "" source += "a = 1\n" * run source += "if a == 99:\n" source += " a = 2\n" * dont_run self.make_file(filename, source) def get_summary_text(self, *options: tuple[str, TConfigValueIn]) -> str: """Get text output from the SummaryReporter. The arguments are tuples: (name, value) for Coverage.set_option. """ self.make_rigged_file("file1.py", 339, 155) self.make_rigged_file("file2.py", 13, 3) self.make_rigged_file("file10.py", 234, 228) self.make_file("doit.py", "import file1, file2, file10") cov = Coverage(source=["."], omit=["doit.py"]) self.start_import_stop(cov, "doit") for name, value in options: cov.set_option(name, value) printer = SummaryReporter(cov) destination = io.StringIO() printer.report([], destination) return destination.getvalue() def test_test_data(self) -> None: # We use our own test files as test data. Check that our assumptions # about them are still valid. We want the three columns of numbers to # sort in three different orders. report = self.get_summary_text() # Name Stmts Miss Cover # ------------------------------ # file1.py 339 155 54% # file2.py 13 3 77% # file10.py 234 228 3% # ------------------------------ # TOTAL 586 386 34% lines = report.splitlines()[2:-2] assert len(lines) == 3 nums = [list(map(int, l.replace('%', '').split()[1:])) for l in lines] # [ # [339, 155, 54], # [ 13, 3, 77], # [234, 228, 3] # ] assert nums[1][0] < nums[2][0] < nums[0][0] assert nums[1][1] < nums[0][1] < nums[2][1] assert nums[2][2] < nums[0][2] < nums[1][2] def test_defaults(self) -> None: """Run the report with no configuration options.""" report = self.get_summary_text() assert 'Missing' not in report assert 'Branch' not in report def test_print_missing(self) -> None: """Run the report printing the missing lines.""" report = self.get_summary_text(('report:show_missing', True)) assert 'Missing' in report assert 'Branch' not in report def assert_ordering(self, text: str, *words: str) -> None: """Assert that the `words` appear in order in `text`.""" indexes = list(map(text.find, words)) assert -1 not in indexes msg = f"The words {words!r} don't appear in order in {text!r}" assert indexes == sorted(indexes), msg def test_default_sort_report(self) -> None: # Sort the text report by the default (Name) column. report = self.get_summary_text() self.assert_ordering(report, "file1.py", "file2.py", "file10.py") def test_sort_report_by_name(self) -> None: # Sort the text report explicitly by the Name column. report = self.get_summary_text(('report:sort', 'Name')) self.assert_ordering(report, "file1.py", "file2.py", "file10.py") def test_sort_report_by_stmts(self) -> None: # Sort the text report by the Stmts column. report = self.get_summary_text(('report:sort', 'Stmts')) self.assert_ordering(report, "file2.py", "file10.py", "file1.py") def test_sort_report_by_missing(self) -> None: # Sort the text report by the Missing column. report = self.get_summary_text(('report:sort', 'Miss')) self.assert_ordering(report, "file2.py", "file1.py", "file10.py") def test_sort_report_by_cover(self) -> None: # Sort the text report by the Cover column. report = self.get_summary_text(('report:sort', 'Cover')) self.assert_ordering(report, "file10.py", "file1.py", "file2.py") def test_sort_report_by_cover_plus(self) -> None: # Sort the text report by the Cover column, including the explicit + sign. report = self.get_summary_text(('report:sort', '+Cover')) self.assert_ordering(report, "file10.py", "file1.py", "file2.py") def test_sort_report_by_cover_reversed(self) -> None: # Sort the text report by the Cover column reversed. report = self.get_summary_text(('report:sort', '-Cover')) self.assert_ordering(report, "file2.py", "file1.py", "file10.py") def test_sort_report_by_invalid_option(self) -> None: # Sort the text report by a nonsense column. msg = "Invalid sorting option: 'Xyzzy'" with pytest.raises(ConfigError, match=msg): self.get_summary_text(('report:sort', 'Xyzzy')) def test_report_with_invalid_format(self) -> None: # Ask for an invalid format. msg = "Unknown report format choice: 'xyzzy'" with pytest.raises(ConfigError, match=msg): self.get_summary_text(('report:format', 'xyzzy')) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/test_report_common.py0000644000175100001770000002432000000000000021566 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt """Tests of behavior common to all reporting.""" from __future__ import annotations import textwrap import coverage from coverage.files import abs_file from tests.coveragetest import CoverageTest from tests.goldtest import contains, doesnt_contain from tests.helpers import arcz_to_arcs, os_sep class ReportMapsPathsTest(CoverageTest): """Check that reporting implicitly maps paths.""" def make_files(self, data: str, settings: bool = False) -> None: """Create the test files we need for line coverage.""" src = """\ if VER == 1: print("line 2") if VER == 2: print("line 4") if VER == 3: print("line 6") """ self.make_file("src/program.py", src) self.make_file("ver1/program.py", src) self.make_file("ver2/program.py", src) if data == "line": self.make_data_file( lines={ abs_file("ver1/program.py"): [1, 2, 3, 5], abs_file("ver2/program.py"): [1, 3, 4, 5], }, ) else: self.make_data_file( arcs={ abs_file("ver1/program.py"): arcz_to_arcs(".1 12 23 35 5."), abs_file("ver2/program.py"): arcz_to_arcs(".1 13 34 45 5."), }, ) if settings: self.make_file(".coveragerc", """\ [paths] source = src ver1 ver2 """) def test_map_paths_during_line_report_without_setting(self) -> None: self.make_files(data="line") cov = coverage.Coverage() cov.load() cov.report(show_missing=True) expected = textwrap.dedent(os_sep("""\ Name Stmts Miss Cover Missing ----------------------------------------------- ver1/program.py 6 2 67% 4, 6 ver2/program.py 6 2 67% 2, 6 ----------------------------------------------- TOTAL 12 4 67% """)) assert expected == self.stdout() def test_map_paths_during_line_report(self) -> None: self.make_files(data="line", settings=True) cov = coverage.Coverage() cov.load() cov.report(show_missing=True) expected = textwrap.dedent(os_sep("""\ Name Stmts Miss Cover Missing ---------------------------------------------- src/program.py 6 1 83% 6 ---------------------------------------------- TOTAL 6 1 83% """)) assert expected == self.stdout() def test_map_paths_during_branch_report_without_setting(self) -> None: self.make_files(data="arcs") cov = coverage.Coverage(branch=True) cov.load() cov.report(show_missing=True) expected = textwrap.dedent(os_sep("""\ Name Stmts Miss Branch BrPart Cover Missing ------------------------------------------------------------- ver1/program.py 6 2 6 3 58% 1->3, 4, 6 ver2/program.py 6 2 6 3 58% 2, 3->5, 6 ------------------------------------------------------------- TOTAL 12 4 12 6 58% """)) assert expected == self.stdout() def test_map_paths_during_branch_report(self) -> None: self.make_files(data="arcs", settings=True) cov = coverage.Coverage(branch=True) cov.load() cov.report(show_missing=True) expected = textwrap.dedent(os_sep("""\ Name Stmts Miss Branch BrPart Cover Missing ------------------------------------------------------------ src/program.py 6 1 6 1 83% 6 ------------------------------------------------------------ TOTAL 6 1 6 1 83% """)) assert expected == self.stdout() def test_map_paths_during_annotate(self) -> None: self.make_files(data="line", settings=True) cov = coverage.Coverage() cov.load() cov.annotate() self.assert_exists(os_sep("src/program.py,cover")) self.assert_doesnt_exist(os_sep("ver1/program.py,cover")) self.assert_doesnt_exist(os_sep("ver2/program.py,cover")) def test_map_paths_during_html_report(self) -> None: self.make_files(data="line", settings=True) cov = coverage.Coverage() cov.load() cov.html_report() contains("htmlcov/index.html", os_sep("src/program.py")) doesnt_contain("htmlcov/index.html", os_sep("ver1/program.py"), os_sep("ver2/program.py")) def test_map_paths_during_xml_report(self) -> None: self.make_files(data="line", settings=True) cov = coverage.Coverage() cov.load() cov.xml_report() contains("coverage.xml", "src/program.py") doesnt_contain("coverage.xml", "ver1/program.py", "ver2/program.py") def test_map_paths_during_json_report(self) -> None: self.make_files(data="line", settings=True) cov = coverage.Coverage() cov.load() cov.json_report() def os_sepj(s: str) -> str: return os_sep(s).replace("\\", r"\\") contains("coverage.json", os_sepj("src/program.py")) doesnt_contain("coverage.json", os_sepj("ver1/program.py"), os_sepj("ver2/program.py")) def test_map_paths_during_lcov_report(self) -> None: self.make_files(data="line", settings=True) cov = coverage.Coverage() cov.load() cov.lcov_report() contains("coverage.lcov", os_sep("src/program.py")) doesnt_contain("coverage.lcov", os_sep("ver1/program.py"), os_sep("ver2/program.py")) class ReportWithJinjaTest(CoverageTest): """Tests of Jinja-like behavior. Jinja2 compiles a template into Python code, and then runs the Python code to render the template. But during rendering, it uses the template name (for example, "template.j2") as the file name, not the Python code file name. Then during reporting, we will try to parse template.j2 as Python code. If the file can be parsed, it's included in the report (as a Python file!). If it can't be parsed, then it's not included in the report. These tests confirm that code doesn't raise an exception (as reported in #1553), and that the current (incorrect) behavior remains stable. Ideally, good.j2 wouldn't be listed at all, since we can't report on it accurately. See https://github.com/nedbat/coveragepy/issues/1553 for more detail, and https://github.com/nedbat/coveragepy/issues/1623 for an issue about this behavior. """ def make_files(self) -> None: """Create test files: two Jinja templates, and data from rendering them.""" # A Jinja2 file that is syntactically acceptable Python (though it wont run). self.make_file("good.j2", """\ {{ data }} line2 line3 """) # A Jinja2 file that is a Python syntax error. self.make_file("bad.j2", """\ This is data: {{ data }}. line 2 line 3 """) self.make_data_file( lines={ abs_file("good.j2"): [1, 3, 5, 7, 9], abs_file("bad.j2"): [1, 3, 5, 7, 9], }, ) def test_report(self) -> None: self.make_files() cov = coverage.Coverage() cov.load() cov.report(show_missing=True) expected = textwrap.dedent("""\ Name Stmts Miss Cover Missing --------------------------------------- good.j2 3 1 67% 2 --------------------------------------- TOTAL 3 1 67% """) assert expected == self.stdout() def test_html(self) -> None: self.make_files() cov = coverage.Coverage() cov.load() cov.html_report() contains("htmlcov/index.html", """\ good.j2 3 1 0 67% """, ) doesnt_contain("htmlcov/index.html", "bad.j2") def test_xml(self) -> None: self.make_files() cov = coverage.Coverage() cov.load() cov.xml_report() contains("coverage.xml", 'filename="good.j2"') contains("coverage.xml", '', '', '', ) doesnt_contain("coverage.xml", 'filename="bad.j2"') doesnt_contain("coverage.xml", ' None: self.make_files() cov = coverage.Coverage() cov.load() cov.json_report() contains("coverage.json", # Notice the .json report claims lines in good.j2 executed that # don't even exist in good.j2... '"files": {"good.j2": {"executed_lines": [1, 3, 5, 7, 9], ' + '"summary": {"covered_lines": 2, "num_statements": 3', ) doesnt_contain("coverage.json", "bad.j2") def test_lcov(self) -> None: self.make_files() cov = coverage.Coverage() cov.load() cov.lcov_report() with open("coverage.lcov") as lcov: actual = lcov.read() expected = textwrap.dedent("""\ TN: SF:good.j2 DA:1,1,FHs1rDakj9p/NAzMCu3Kgw DA:3,1,DGOyp8LEgI+3CcdFYw9uKQ DA:2,0,5iUbzxp9w7peeTPjJbvmBQ LF:3 LH:2 end_of_record """) assert expected == actual ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/test_report_core.py0000644000175100001770000000436000000000000021230 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt """Tests for helpers in report.py""" from __future__ import annotations from typing import IO, Iterable import pytest from coverage.exceptions import CoverageException from coverage.report_core import render_report from coverage.types import TMorf from tests.coveragetest import CoverageTest class FakeReporter: """A fake implementation of a one-file reporter.""" report_type = "fake report file" def __init__(self, output: str = "", error: type[Exception] | None = None) -> None: self.output = output self.error = error self.morfs: Iterable[TMorf] | None = None def report(self, morfs: Iterable[TMorf] | None, outfile: IO[str]) -> float: """Fake.""" self.morfs = morfs outfile.write(self.output) if self.error: raise self.error("You asked for it!") return 17.25 class RenderReportTest(CoverageTest): """Tests of render_report.""" def test_stdout(self) -> None: fake = FakeReporter(output="Hello!\n") msgs: list[str] = [] res = render_report("-", fake, [pytest, "coverage"], msgs.append) assert res == 17.25 assert fake.morfs == [pytest, "coverage"] assert self.stdout() == "Hello!\n" assert not msgs def test_file(self) -> None: fake = FakeReporter(output="Grรฉรจtings!\n") msgs: list[str] = [] res = render_report("output.txt", fake, [], msgs.append) assert res == 17.25 assert self.stdout() == "" with open("output.txt", "rb") as f: assert f.read().rstrip() == b"Gr\xc3\xa9\xc3\xa8tings!" assert msgs == ["Wrote fake report file to output.txt"] @pytest.mark.parametrize("error", [CoverageException, ZeroDivisionError]) def test_exception(self, error: type[Exception]) -> None: fake = FakeReporter(error=error) msgs: list[str] = [] with pytest.raises(error, match="You asked for it!"): render_report("output.txt", fake, [], msgs.append) assert self.stdout() == "" self.assert_doesnt_exist("output.txt") assert not msgs ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/test_results.py0000644000175100001770000001345400000000000020412 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt """Tests for coverage.py's results analysis.""" from __future__ import annotations import math from typing import Iterable, cast import pytest from coverage.exceptions import ConfigError from coverage.results import format_lines, Numbers, should_fail_under from coverage.types import TLineNo from tests.coveragetest import CoverageTest class NumbersTest(CoverageTest): """Tests for coverage.py's numeric measurement summaries.""" run_in_temp_dir = False def test_basic(self) -> None: n1 = Numbers(n_files=1, n_statements=200, n_missing=20) assert n1.n_statements == 200 assert n1.n_executed == 180 assert n1.n_missing == 20 assert n1.pc_covered == 90 def test_addition(self) -> None: n1 = Numbers(n_files=1, n_statements=200, n_missing=20) n2 = Numbers(n_files=1, n_statements=10, n_missing=8) n3 = n1 + n2 assert n3.n_files == 2 assert n3.n_statements == 210 assert n3.n_executed == 182 assert n3.n_missing == 28 assert math.isclose(n3.pc_covered, 86.666666666) def test_sum(self) -> None: n1 = Numbers(n_files=1, n_statements=200, n_missing=20) n2 = Numbers(n_files=1, n_statements=10, n_missing=8) n3 = cast(Numbers, sum([n1, n2])) assert n3.n_files == 2 assert n3.n_statements == 210 assert n3.n_executed == 182 assert n3.n_missing == 28 assert math.isclose(n3.pc_covered, 86.666666666) @pytest.mark.parametrize("kwargs, res", [ (dict(n_files=1, n_statements=1000, n_missing=0), "100"), (dict(n_files=1, n_statements=1000, n_missing=1), "99"), (dict(n_files=1, n_statements=1000, n_missing=999), "1"), (dict(n_files=1, n_statements=1000, n_missing=1000), "0"), (dict(precision=1, n_files=1, n_statements=10000, n_missing=0), "100.0"), (dict(precision=1, n_files=1, n_statements=10000, n_missing=1), "99.9"), (dict(precision=1, n_files=1, n_statements=10000, n_missing=9999), "0.1"), (dict(precision=1, n_files=1, n_statements=10000, n_missing=10000), "0.0"), ]) def test_pc_covered_str(self, kwargs: dict[str, int], res: str) -> None: assert Numbers(**kwargs).pc_covered_str == res @pytest.mark.parametrize("prec, pc, res", [ (0, 47.87, "48"), (1, 47.87, "47.9"), (0, 99.995, "99"), (2, 99.99995, "99.99"), ]) def test_display_covered(self, prec: int, pc: float, res: str) -> None: assert Numbers(precision=prec).display_covered(pc) == res @pytest.mark.parametrize("prec, width", [ (0, 3), # 100 (1, 5), # 100.0 (4, 8), # 100.0000 ]) def test_pc_str_width(self, prec: int, width: int) -> None: assert Numbers(precision=prec).pc_str_width() == width def test_covered_ratio(self) -> None: n = Numbers(n_files=1, n_statements=200, n_missing=47) assert n.ratio_covered == (153, 200) n = Numbers( n_files=1, n_statements=200, n_missing=47, n_branches=10, n_missing_branches=3, n_partial_branches=1000, ) assert n.ratio_covered == (160, 210) @pytest.mark.parametrize("total, fail_under, precision, result", [ # fail_under==0 means anything is fine! (0, 0, 0, False), (0.001, 0, 0, False), # very small fail_under is possible to fail. (0.001, 0.01, 0, True), # Rounding should work properly. (42.1, 42, 0, False), (42.1, 43, 0, True), (42.857, 42, 0, False), (42.857, 43, 0, False), (42.857, 44, 0, True), (42.857, 42.856, 3, False), (42.857, 42.858, 3, True), # If you don't specify precision, your fail-under is rounded. (42.857, 42.856, 0, False), # Values near 100 should only be treated as 100 if they are 100. (99.8, 100, 0, True), (100.0, 100, 0, False), (99.8, 99.7, 1, False), (99.88, 99.90, 2, True), (99.999, 100, 1, True), (99.999, 100, 2, True), (99.999, 100, 3, True), ]) def test_should_fail_under(total: float, fail_under: float, precision: int, result: bool) -> None: assert should_fail_under(float(total), float(fail_under), precision) == result def test_should_fail_under_invalid_value() -> None: with pytest.raises(ConfigError, match=r"fail_under=101"): should_fail_under(100.0, 101, 0) @pytest.mark.parametrize("statements, lines, result", [ ({1,2,3,4,5,10,11,12,13,14}, {1,2,5,10,11,13,14}, "1-2, 5-11, 13-14"), ([1,2,3,4,5,10,11,12,13,14,98,99], [1,2,5,10,11,13,14,99], "1-2, 5-11, 13-14, 99"), ([1,2,3,4,98,99,100,101,102,103,104], [1,2,99,102,103,104], "1-2, 99, 102-104"), ([17], [17], "17"), ([90,91,92,93,94,95], [90,91,92,93,94,95], "90-95"), ([1, 2, 3, 4, 5], [], ""), ([1, 2, 3, 4, 5], [4], "4"), ]) def test_format_lines( statements: Iterable[TLineNo], lines: Iterable[TLineNo], result: str, ) -> None: assert format_lines(statements, lines) == result @pytest.mark.parametrize("statements, lines, arcs, result", [ ( {1,2,3,4,5,10,11,12,13,14}, {1,2,5,10,11,13,14}, (), "1-2, 5-11, 13-14", ), ( [1,2,3,4,5,10,11,12,13,14,98,99], [1,2,5,10,11,13,14,99], [(3, [4]), (5, [10, 11]), (98, [100, -1])], "1-2, 3->4, 5-11, 13-14, 98->100, 98->exit, 99", ), ( [1,2,3,4,98,99,100,101,102,103,104], [1,2,99,102,103,104], [(3, [4]), (104, [-1])], "1-2, 3->4, 99, 102-104", ), ]) def test_format_lines_with_arcs( statements: Iterable[TLineNo], lines: Iterable[TLineNo], arcs: Iterable[tuple[TLineNo, list[TLineNo]]], result: str, ) -> None: assert format_lines(statements, lines, arcs) == result ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/test_setup.py0000644000175100001770000000337400000000000020051 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt """Tests of miscellaneous stuff.""" from __future__ import annotations import sys from typing import List, cast import coverage from tests.coveragetest import CoverageTest class SetupPyTest(CoverageTest): """Tests of setup.py""" run_in_temp_dir = False def setUp(self) -> None: super().setUp() # Force the most restrictive interpretation. self.set_environ('LC_ALL', 'C') def test_metadata(self) -> None: status, output = self.run_command_status( "python setup.py --description --version --url --author", ) assert status == 0 out = output.splitlines() assert "measurement" in out[0] assert coverage.__version__ == out[1] assert "github.com/nedbat/coveragepy" in out[2] assert "Ned Batchelder" in out[3] def test_more_metadata(self) -> None: # Let's be sure we pick up our own setup.py # CoverageTest restores the original sys.path for us. sys.path.insert(0, '') from setup import setup_args classifiers = cast(List[str], setup_args['classifiers']) assert len(classifiers) > 7 assert classifiers[-1].startswith("Development Status ::") assert "Programming Language :: Python :: %d" % sys.version_info[:1] in classifiers assert "Programming Language :: Python :: %d.%d" % sys.version_info[:2] in classifiers long_description = cast(str, setup_args['long_description']).splitlines() assert len(long_description) > 7 assert long_description[0].strip() != "" assert long_description[-1].strip() != "" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/test_sqlitedb.py0000644000175100001770000001136500000000000020517 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt """Tests for coverage.sqlitedb""" from __future__ import annotations from typing import NoReturn from unittest import mock import pytest import coverage.sqlitedb from coverage.exceptions import DataError from coverage.sqlitedb import SqliteDb from tests.coveragetest import CoverageTest from tests.helpers import DebugControlString, FailingProxy DB_INIT = """\ create table name (first text, last text); insert into name (first, last) values ("pablo", "picasso"); """ class SqliteDbTest(CoverageTest): """Tests of tricky parts of SqliteDb.""" def test_error_reporting(self) -> None: msg = "Couldn't use data file 'test.db': no such table: bar" with SqliteDb("test.db", DebugControlString(options=["sql"])) as db: with pytest.raises(DataError, match=msg): with db.execute("select foo from bar"): # Entering the context manager raises the error, this line doesn't run: pass # pragma: not covered def test_retry_execute(self) -> None: with SqliteDb("test.db", DebugControlString(options=["sql"])) as db: db.executescript(DB_INIT) proxy = FailingProxy(db.con, "execute", [Exception("WUT")]) with mock.patch.object(db, "con", proxy): with db.execute("select first from name order by 1") as cur: assert list(cur) == [("pablo",)] def test_retry_execute_failure(self) -> None: with SqliteDb("test.db", DebugControlString(options=["sql"])) as db: db.executescript(DB_INIT) proxy = FailingProxy(db.con, "execute", [Exception("WUT"), RuntimeError("Fake")]) with mock.patch.object(db, "con", proxy): with pytest.raises(RuntimeError, match="Fake"): with db.execute("select first from name order by 1"): # Entering the context manager raises the error, this line doesn't run: pass # pragma: not covered def test_retry_executemany_void(self) -> None: with SqliteDb("test.db", DebugControlString(options=["sql"])) as db: db.executescript(DB_INIT) proxy = FailingProxy(db.con, "executemany", [Exception("WUT")]) with mock.patch.object(db, "con", proxy): db.executemany_void( "insert into name (first, last) values (?, ?)", [("vincent", "van gogh")], ) with db.execute("select first from name order by 1") as cur: assert list(cur) == [("pablo",), ("vincent",)] def test_retry_executemany_void_failure(self) -> None: with SqliteDb("test.db", DebugControlString(options=["sql"])) as db: db.executescript(DB_INIT) proxy = FailingProxy(db.con, "executemany", [Exception("WUT"), RuntimeError("Fake")]) with mock.patch.object(db, "con", proxy): with pytest.raises(RuntimeError, match="Fake"): db.executemany_void( "insert into name (first, last) values (?, ?)", [("vincent", "van gogh")], ) def test_open_fails_on_bad_db(self) -> None: self.make_file("bad.db", "boogers") def fake_failing_open(filename: str, mode: str) -> NoReturn: assert (filename, mode) == ("bad.db", "rb") raise RuntimeError("No you can't!") with mock.patch.object(coverage.sqlitedb, "open", fake_failing_open): msg = "Couldn't use data file 'bad.db': file is not a database" with pytest.raises(DataError, match=msg): with SqliteDb("bad.db", DebugControlString(options=["sql"])): pass # pragma: not covered def test_execute_void_can_allow_failure(self) -> None: with SqliteDb("fail.db", DebugControlString(options=["sql"])) as db: db.executescript(DB_INIT) proxy = FailingProxy(db.con, "execute", [Exception("WUT")]) with mock.patch.object(db, "con", proxy): db.execute_void("select x from nosuchtable", fail_ok=True) def test_execute_void_can_refuse_failure(self) -> None: with SqliteDb("fail.db", DebugControlString(options=["sql"])) as db: db.executescript(DB_INIT) proxy = FailingProxy(db.con, "execute", [Exception("WUT")]) with mock.patch.object(db, "con", proxy): msg = "Couldn't use data file 'fail.db': no such table: nosuchtable" with pytest.raises(DataError, match=msg): db.execute_void("select x from nosuchtable", fail_ok=False) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/test_templite.py0000644000175100001770000002744300000000000020537 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt """Tests for coverage.templite.""" from __future__ import annotations import re from types import SimpleNamespace from typing import Any, ContextManager import pytest from coverage.templite import Templite, TempliteSyntaxError, TempliteValueError from tests.coveragetest import CoverageTest # pylint: disable=possibly-unused-variable class TempliteTest(CoverageTest): """Tests for Templite.""" run_in_temp_dir = False def try_render( self, text: str, ctx: dict[str, Any] | None = None, result: str | None = None, ) -> None: """Render `text` through `ctx`, and it had better be `result`. Result defaults to None so we can shorten the calls where we expect an exception and never get to the result comparison. """ actual = Templite(text).render(ctx or {}) # If result is None, then an exception should have prevented us getting # to here. assert result is not None assert actual == result def assertSynErr(self, msg: str) -> ContextManager[None]: """Assert that a `TempliteSyntaxError` will happen. A context manager, and the message should be `msg`. """ pat = "^" + re.escape(msg) + "$" return pytest.raises(TempliteSyntaxError, match=pat) # type: ignore def test_passthrough(self) -> None: # Strings without variables are passed through unchanged. assert Templite("Hello").render() == "Hello" assert Templite("Hello, 20% fun time!").render() == "Hello, 20% fun time!" def test_variables(self) -> None: # Variables use {{var}} syntax. self.try_render("Hello, {{name}}!", {'name':'Ned'}, "Hello, Ned!") def test_undefined_variables(self) -> None: # Using undefined names is an error. with pytest.raises(Exception, match="'name'"): self.try_render("Hi, {{name}}!") def test_pipes(self) -> None: # Variables can be filtered with pipes. data = { 'name': 'Ned', 'upper': lambda x: x.upper(), 'second': lambda x: x[1], } self.try_render("Hello, {{name|upper}}!", data, "Hello, NED!") # Pipes can be concatenated. self.try_render("Hello, {{name|upper|second}}!", data, "Hello, E!") def test_reusability(self) -> None: # A single Templite can be used more than once with different data. globs = { 'upper': lambda x: x.upper(), 'punct': '!', } template = Templite("This is {{name|upper}}{{punct}}", globs) assert template.render({'name':'Ned'}) == "This is NED!" assert template.render({'name':'Ben'}) == "This is BEN!" def test_attribute(self) -> None: # Variables' attributes can be accessed with dots. obj = SimpleNamespace(a="Ay") self.try_render("{{obj.a}}", locals(), "Ay") obj2 = SimpleNamespace(obj=obj, b="Bee") self.try_render("{{obj2.obj.a}} {{obj2.b}}", locals(), "Ay Bee") def test_member_function(self) -> None: # Variables' member functions can be used, as long as they are nullary. class WithMemberFns(SimpleNamespace): """A class to try out member function access.""" def ditto(self) -> str: """Return twice the .txt attribute.""" return self.txt + self.txt # type: ignore obj = WithMemberFns(txt="Once") self.try_render("{{obj.ditto}}", locals(), "OnceOnce") def test_item_access(self) -> None: # Variables' items can be used. d = {'a':17, 'b':23} self.try_render("{{d.a}} < {{d.b}}", locals(), "17 < 23") def test_loops(self) -> None: # Loops work like in Django. nums = [1,2,3,4] self.try_render( "Look: {% for n in nums %}{{n}}, {% endfor %}done.", locals(), "Look: 1, 2, 3, 4, done.", ) # Loop iterables can be filtered. def rev(l: list[int]) -> list[int]: """Return the reverse of `l`.""" l = l[:] l.reverse() return l self.try_render( "Look: {% for n in nums|rev %}{{n}}, {% endfor %}done.", locals(), "Look: 4, 3, 2, 1, done.", ) def test_empty_loops(self) -> None: self.try_render( "Empty: {% for n in nums %}{{n}}, {% endfor %}done.", {'nums':[]}, "Empty: done.", ) def test_multiline_loops(self) -> None: self.try_render( "Look: \n{% for n in nums %}\n{{n}}, \n{% endfor %}done.", {'nums':[1,2,3]}, "Look: \n\n1, \n\n2, \n\n3, \ndone.", ) def test_multiple_loops(self) -> None: self.try_render( "{% for n in nums %}{{n}}{% endfor %} and " + "{% for n in nums %}{{n}}{% endfor %}", {'nums': [1,2,3]}, "123 and 123", ) def test_comments(self) -> None: # Single-line comments work: self.try_render( "Hello, {# Name goes here: #}{{name}}!", {'name':'Ned'}, "Hello, Ned!", ) # and so do multi-line comments: self.try_render( "Hello, {# Name\ngoes\nhere: #}{{name}}!", {'name':'Ned'}, "Hello, Ned!", ) def test_if(self) -> None: self.try_render( "Hi, {% if ned %}NED{% endif %}{% if ben %}BEN{% endif %}!", {'ned': 1, 'ben': 0}, "Hi, NED!", ) self.try_render( "Hi, {% if ned %}NED{% endif %}{% if ben %}BEN{% endif %}!", {'ned': 0, 'ben': 1}, "Hi, BEN!", ) self.try_render( "Hi, {% if ned %}NED{% if ben %}BEN{% endif %}{% endif %}!", {'ned': 0, 'ben': 0}, "Hi, !", ) self.try_render( "Hi, {% if ned %}NED{% if ben %}BEN{% endif %}{% endif %}!", {'ned': 1, 'ben': 0}, "Hi, NED!", ) self.try_render( "Hi, {% if ned %}NED{% if ben %}BEN{% endif %}{% endif %}!", {'ned': 1, 'ben': 1}, "Hi, NEDBEN!", ) def test_complex_if(self) -> None: class Complex(SimpleNamespace): """A class to try out complex data access.""" def getit(self): # type: ignore """Return it.""" return self.it obj = Complex(it={'x':"Hello", 'y': 0}) self.try_render( "@" + "{% if obj.getit.x %}X{% endif %}" + "{% if obj.getit.y %}Y{% endif %}" + "{% if obj.getit.y|str %}S{% endif %}" + "!", { 'obj': obj, 'str': str }, "@XS!", ) def test_loop_if(self) -> None: self.try_render( "@{% for n in nums %}{% if n %}Z{% endif %}{{n}}{% endfor %}!", {'nums': [0,1,2]}, "@0Z1Z2!", ) self.try_render( "X{%if nums%}@{% for n in nums %}{{n}}{% endfor %}{%endif%}!", {'nums': [0,1,2]}, "X@012!", ) self.try_render( "X{%if nums%}@{% for n in nums %}{{n}}{% endfor %}{%endif%}!", {'nums': []}, "X!", ) def test_nested_loops(self) -> None: self.try_render( "@" + "{% for n in nums %}" + "{% for a in abc %}{{a}}{{n}}{% endfor %}" + "{% endfor %}" + "!", {'nums': [0,1,2], 'abc': ['a', 'b', 'c']}, "@a0b0c0a1b1c1a2b2c2!", ) def test_whitespace_handling(self) -> None: self.try_render( "@{% for n in nums %}\n" + " {% for a in abc %}{{a}}{{n}}{% endfor %}\n" + "{% endfor %}!\n", {'nums': [0, 1, 2], 'abc': ['a', 'b', 'c']}, "@\n a0b0c0\n\n a1b1c1\n\n a2b2c2\n!\n", ) self.try_render( "@{% for n in nums -%}\n" + " {% for a in abc -%}\n" + " {# this disappears completely -#}\n" + " {{a-}}\n" + " {{n -}}\n" + " {{n -}}\n" + " {% endfor %}\n" + "{% endfor %}!\n", {'nums': [0, 1, 2], 'abc': ['a', 'b', 'c']}, "@a00b00c00\na11b11c11\na22b22c22\n!\n", ) self.try_render( "@{% for n in nums -%}\n" + " {{n -}}\n" + " x\n" + "{% endfor %}!\n", {'nums': [0, 1, 2]}, "@0x\n1x\n2x\n!\n", ) self.try_render(" hello ", {}, " hello ") def test_eat_whitespace(self) -> None: self.try_render( "Hey!\n" + "{% joined %}\n" + "@{% for n in nums %}\n" + " {% for a in abc %}\n" + " {# this disappears completely #}\n" + " X\n" + " Y\n" + " {{a}}\n" + " {{n }}\n" + " {% endfor %}\n" + "{% endfor %}!\n" + "{% endjoined %}\n", {'nums': [0, 1, 2], 'abc': ['a', 'b', 'c']}, "Hey!\n@XYa0XYb0XYc0XYa1XYb1XYc1XYa2XYb2XYc2!\n", ) def test_non_ascii(self) -> None: self.try_render( "{{where}} ollวษฅ", { 'where': 'วษนวษฅส‡' }, "วษนวษฅส‡ ollวษฅ", ) def test_exception_during_evaluation(self) -> None: # TypeError: Couldn't evaluate {{ foo.bar.baz }}: regex = "^Couldn't evaluate None.bar$" with pytest.raises(TempliteValueError, match=regex): self.try_render( "Hey {{foo.bar.baz}} there", {'foo': None}, "Hey ??? there", ) def test_bad_names(self) -> None: with self.assertSynErr("Not a valid name: 'var%&!@'"): self.try_render("Wat: {{ var%&!@ }}") with self.assertSynErr("Not a valid name: 'filter%&!@'"): self.try_render("Wat: {{ foo|filter%&!@ }}") with self.assertSynErr("Not a valid name: '@'"): self.try_render("Wat: {% for @ in x %}{% endfor %}") def test_bogus_tag_syntax(self) -> None: with self.assertSynErr("Don't understand tag: 'bogus'"): self.try_render("Huh: {% bogus %}!!{% endbogus %}??") def test_malformed_if(self) -> None: with self.assertSynErr("Don't understand if: '{% if %}'"): self.try_render("Buh? {% if %}hi!{% endif %}") with self.assertSynErr("Don't understand if: '{% if this or that %}'"): self.try_render("Buh? {% if this or that %}hi!{% endif %}") def test_malformed_for(self) -> None: with self.assertSynErr("Don't understand for: '{% for %}'"): self.try_render("Weird: {% for %}loop{% endfor %}") with self.assertSynErr("Don't understand for: '{% for x from y %}'"): self.try_render("Weird: {% for x from y %}loop{% endfor %}") with self.assertSynErr("Don't understand for: '{% for x, y in z %}'"): self.try_render("Weird: {% for x, y in z %}loop{% endfor %}") def test_bad_nesting(self) -> None: with self.assertSynErr("Unmatched action tag: 'if'"): self.try_render("{% if x %}X") with self.assertSynErr("Mismatched end tag: 'for'"): self.try_render("{% if x %}X{% endfor %}") with self.assertSynErr("Too many ends: '{% endif %}'"): self.try_render("{% if x %}{% endif %}{% endif %}") def test_malformed_end(self) -> None: with self.assertSynErr("Don't understand end: '{% end if %}'"): self.try_render("{% if x %}X{% end if %}") with self.assertSynErr("Don't understand end: '{% endif now %}'"): self.try_render("{% if x %}X{% endif now %}") ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/test_testing.py0000644000175100001770000004265200000000000020370 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt """Tests that our test infrastructure is really working!""" from __future__ import annotations import datetime import os import re import sys import warnings import pytest import coverage from coverage.exceptions import CoverageWarning from coverage.files import actual_path from coverage.types import TArc from tests.coveragetest import CoverageTest from tests.helpers import ( CheckUniqueFilenames, FailingProxy, arcs_to_arcz_repr, arcz_to_arcs, assert_count_equal, assert_coverage_warnings, re_lines, re_lines_text, re_line, ) def test_xdist_sys_path_nuttiness_is_fixed() -> None: # See conftest.py:fix_xdist_sys_path assert sys.path[1] != "" assert os.getenv("PYTHONPATH") is None def test_assert_count_equal() -> None: assert_count_equal(set(), set()) assert_count_equal({"a": 1, "b": 2}, ["b", "a"]) with pytest.raises(AssertionError): assert_count_equal({1,2,3}, set()) with pytest.raises(AssertionError): assert_count_equal({1,2,3}, {4,5,6}) class CoverageTestTest(CoverageTest): """Test the methods in `CoverageTest`.""" def test_file_exists(self) -> None: self.make_file("whoville.txt", "We are here!") self.assert_exists("whoville.txt") self.assert_doesnt_exist("shadow.txt") msg = "File 'whoville.txt' shouldn't exist" with pytest.raises(AssertionError, match=msg): self.assert_doesnt_exist("whoville.txt") msg = "File 'shadow.txt' should exist" with pytest.raises(AssertionError, match=msg): self.assert_exists("shadow.txt") def test_file_count(self) -> None: self.make_file("abcde.txt", "abcde") self.make_file("axczz.txt", "axczz") self.make_file("afile.txt", "afile") self.assert_file_count("a*.txt", 3) self.assert_file_count("*c*.txt", 2) self.assert_file_count("afile.*", 1) self.assert_file_count("*.q", 0) msg = re.escape( "There should be 13 files matching 'a*.txt', but there are these: " + "['abcde.txt', 'afile.txt', 'axczz.txt']", ) with pytest.raises(AssertionError, match=msg): self.assert_file_count("a*.txt", 13) msg = re.escape( "There should be 12 files matching '*c*.txt', but there are these: " + "['abcde.txt', 'axczz.txt']", ) with pytest.raises(AssertionError, match=msg): self.assert_file_count("*c*.txt", 12) msg = re.escape( "There should be 11 files matching 'afile.*', but there are these: ['afile.txt']", ) with pytest.raises(AssertionError, match=msg): self.assert_file_count("afile.*", 11) msg = re.escape( "There should be 10 files matching '*.q', but there are these: []", ) with pytest.raises(AssertionError, match=msg): self.assert_file_count("*.q", 10) def test_assert_recent_datetime(self) -> None: def now_delta(seconds: int) -> datetime.datetime: """Make a datetime `seconds` seconds from now.""" return datetime.datetime.now() + datetime.timedelta(seconds=seconds) # Default delta is 10 seconds. self.assert_recent_datetime(now_delta(0)) self.assert_recent_datetime(now_delta(-9)) with pytest.raises(AssertionError): self.assert_recent_datetime(now_delta(-11)) with pytest.raises(AssertionError): self.assert_recent_datetime(now_delta(1)) # Delta is settable. self.assert_recent_datetime(now_delta(0), seconds=120) self.assert_recent_datetime(now_delta(-100), seconds=120) with pytest.raises(AssertionError): self.assert_recent_datetime(now_delta(-1000), seconds=120) with pytest.raises(AssertionError): self.assert_recent_datetime(now_delta(1), seconds=120) def test_assert_warnings(self) -> None: cov = coverage.Coverage() # Make a warning, it should catch it properly. with self.assert_warnings(cov, ["Hello there!"]): cov._warn("Hello there!") # The expected warnings are regexes. with self.assert_warnings(cov, ["Hello.*!"]): cov._warn("Hello there!") # There can be a bunch of actual warnings. with self.assert_warnings(cov, ["Hello.*!"]): cov._warn("You there?") cov._warn("Hello there!") # There can be a bunch of expected warnings. with self.assert_warnings(cov, ["Hello.*!", "You"]): cov._warn("You there?") cov._warn("Hello there!") # But if there are a bunch of expected warnings, they have to all happen. warn_regex = r"Didn't find warning 'You' in \['Hello there!'\]" with pytest.raises(AssertionError, match=warn_regex): with self.assert_warnings(cov, ["Hello.*!", "You"]): cov._warn("Hello there!") # Make a different warning than expected, it should raise an assertion. warn_regex = r"Didn't find warning 'Not me' in \['Hello there!'\]" with pytest.raises(AssertionError, match=warn_regex): with self.assert_warnings(cov, ["Not me"]): cov._warn("Hello there!") # Try checking a warning that shouldn't appear: happy case. with self.assert_warnings(cov, ["Hi"], not_warnings=["Bye"]): cov._warn("Hi") # But it should fail if the unexpected warning does appear. warn_regex = r"Found warning 'Bye' in \['Hi', 'Bye'\]" with pytest.raises(AssertionError, match=warn_regex): with self.assert_warnings(cov, ["Hi"], not_warnings=["Bye"]): cov._warn("Hi") cov._warn("Bye") # assert_warnings shouldn't hide a real exception. with pytest.raises(ZeroDivisionError, match="oops"): with self.assert_warnings(cov, ["Hello there!"]): raise ZeroDivisionError("oops") def test_assert_no_warnings(self) -> None: cov = coverage.Coverage() # Happy path: no warnings. with self.assert_warnings(cov, []): pass # If you said there would be no warnings, and there were, fail! warn_regex = r"Unexpected warnings: \['Watch out!'\]" with pytest.raises(AssertionError, match=warn_regex): with self.assert_warnings(cov, []): cov._warn("Watch out!") def test_sub_python_is_this_python(self) -> None: # Try it with a Python command. self.set_environ('COV_FOOBAR', 'XYZZY') self.make_file("showme.py", """\ import os, sys print(sys.executable) print(os.__file__) print(os.environ['COV_FOOBAR']) """) out_lines = self.run_command("python showme.py").splitlines() assert actual_path(out_lines[0]) == actual_path(sys.executable) assert out_lines[1] == os.__file__ assert out_lines[2] == 'XYZZY' # Try it with a "coverage debug sys" command. out = self.run_command("coverage debug sys") executable = re_line("executable:", out) executable = executable.split(":", 1)[1].strip() assert _same_python_executable(executable, sys.executable) # "environment: COV_FOOBAR = XYZZY" or "COV_FOOBAR = XYZZY" environ = re_line("COV_FOOBAR", out) _, _, environ = environ.rpartition(":") assert environ.strip() == "COV_FOOBAR = XYZZY" def test_run_command_stdout_stderr(self) -> None: # run_command should give us both stdout and stderr. self.make_file("outputs.py", """\ import sys sys.stderr.write("StdErr\\n") print("StdOut") """) out = self.run_command("python outputs.py") assert "StdOut\n" in out assert "StdErr\n" in out def test_stdout(self) -> None: # stdout is captured. print("This is stdout") print("Line 2") assert self.stdout() == "This is stdout\nLine 2\n" # When we grab stdout(), it's reset. print("Some more") assert self.stdout() == "Some more\n" class CheckUniqueFilenamesTest(CoverageTest): """Tests of CheckUniqueFilenames.""" run_in_temp_dir = False class Stub: """A stand-in for the class we're checking.""" def __init__(self, x: int) -> None: self.x = x def method( self, filename: str, a: int = 17, b: str = "hello", ) -> tuple[int, str, int, str]: """The method we'll wrap, with args to be sure args work.""" return (self.x, filename, a, b) def test_detect_duplicate(self) -> None: stub = self.Stub(23) CheckUniqueFilenames.hook(stub, "method") # Two method calls with different names are fine. assert stub.method("file1") == (23, "file1", 17, "hello") assert stub.method("file2", 1723, b="what") == (23, "file2", 1723, "what") # A duplicate file name trips an assertion. with pytest.raises(AssertionError): stub.method("file1") class CheckCoverageTest(CoverageTest): """Tests of the failure assertions in check_coverage.""" CODE = """\ a, b = 1, 1 def oops(x): if x % 2: raise Exception("odd") try: a = 6 oops(1) a = 8 except: b = 10 assert a == 6 and b == 10 """ ARCZ = ".1 12 -23 34 3-2 4-2 25 56 67 78 8B 9A AB B." ARCZ_MISSING = "3-2 78 8B" ARCZ_UNPREDICTED = "79" def test_check_coverage_possible(self) -> None: msg = r"(?s)Possible arcs differ: .*- \(6, 3\).*\+ \(6, 7\)" with pytest.raises(AssertionError, match=msg): self.check_coverage( self.CODE, arcz=self.ARCZ.replace("7", "3"), arcz_missing=self.ARCZ_MISSING, arcz_unpredicted=self.ARCZ_UNPREDICTED, ) def test_check_coverage_missing(self) -> None: msg = r"(?s)Missing arcs differ: .*- \(3, 8\).*\+ \(7, 8\)" with pytest.raises(AssertionError, match=msg): self.check_coverage( self.CODE, arcz=self.ARCZ, arcz_missing=self.ARCZ_MISSING.replace("7", "3"), arcz_unpredicted=self.ARCZ_UNPREDICTED, ) def test_check_coverage_unpredicted(self) -> None: msg = r"(?s)Unpredicted arcs differ: .*- \(3, 9\).*\+ \(7, 9\)" with pytest.raises(AssertionError, match=msg): self.check_coverage( self.CODE, arcz=self.ARCZ, arcz_missing=self.ARCZ_MISSING, arcz_unpredicted=self.ARCZ_UNPREDICTED.replace("7", "3"), ) class ReLinesTest(CoverageTest): """Tests of `re_lines`.""" run_in_temp_dir = False @pytest.mark.parametrize("pat, text, result", [ ("line", "line1\nline2\nline3\n", "line1\nline2\nline3\n"), ("[13]", "line1\nline2\nline3\n", "line1\nline3\n"), ("X", "line1\nline2\nline3\n", ""), ]) def test_re_lines(self, pat: str, text: str, result: str) -> None: assert re_lines_text(pat, text) == result assert re_lines(pat, text) == result.splitlines() @pytest.mark.parametrize("pat, text, result", [ ("line", "line1\nline2\nline3\n", ""), ("[13]", "line1\nline2\nline3\n", "line2\n"), ("X", "line1\nline2\nline3\n", "line1\nline2\nline3\n"), ]) def test_re_lines_inverted(self, pat: str, text: str, result: str) -> None: assert re_lines_text(pat, text, match=False) == result assert re_lines(pat, text, match=False) == result.splitlines() @pytest.mark.parametrize("pat, text, result", [ ("2", "line1\nline2\nline3\n", "line2"), ]) def test_re_line(self, pat: str, text: str, result: str) -> None: assert re_line(pat, text) == result @pytest.mark.parametrize("pat, text", [ ("line", "line1\nline2\nline3\n"), # too many matches ("X", "line1\nline2\nline3\n"), # no matches ]) def test_re_line_bad(self, pat: str, text: str) -> None: with pytest.raises(AssertionError): re_line(pat, text) def _same_python_executable(e1: str, e2: str) -> bool: """Determine if `e1` and `e2` refer to the same Python executable. Either path could include symbolic links. The two paths might not refer to the exact same file, but if they are in the same directory and their numeric suffixes aren't different, they are the same executable. """ e1 = os.path.abspath(os.path.realpath(e1)) e2 = os.path.abspath(os.path.realpath(e2)) if os.path.dirname(e1) != os.path.dirname(e2): return False # pragma: only failure e1 = os.path.basename(e1) e2 = os.path.basename(e2) if e1 == "python" or e2 == "python" or e1 == e2: # Python and Python2.3: OK # Python2.3 and Python: OK # Python and Python: OK # Python2.3 and Python2.3: OK return True return False # pragma: only failure class ArczTest(CoverageTest): """Tests of arcz/arcs helpers.""" run_in_temp_dir = False @pytest.mark.parametrize("arcz, arcs", [ (".1 12 2.", [(-1, 1), (1, 2), (2, -1)]), ("-11 12 2-5", [(-1, 1), (1, 2), (2, -5)]), ("-QA CB IT Z-A", [(-26, 10), (12, 11), (18, 29), (35, -10)]), ]) def test_arcz_to_arcs(self, arcz: str, arcs: list[TArc]) -> None: assert arcz_to_arcs(arcz) == arcs @pytest.mark.parametrize("arcs, arcz_repr", [ ([(-1, 1), (1, 2), (2, -1)], "(-1, 1) # .1\n(1, 2) # 12\n(2, -1) # 2.\n"), ([(-1, 1), (1, 2), (2, -5)], "(-1, 1) # .1\n(1, 2) # 12\n(2, -5) # 2-5\n"), ([(-26, 10), (12, 11), (18, 29), (35, -10), (1, 33), (100, 7)], ( "(-26, 10) # -QA\n" + "(12, 11) # CB\n" + "(18, 29) # IT\n" + "(35, -10) # Z-A\n" + "(1, 33) # 1X\n" + "(100, 7) # ?7\n" ), ), ]) def test_arcs_to_arcz_repr(self, arcs: list[TArc], arcz_repr: str) -> None: assert arcs_to_arcz_repr(arcs) == arcz_repr class AssertCoverageWarningsTest(CoverageTest): """Tests of assert_coverage_warnings""" def test_one_warning(self) -> None: with pytest.warns(Warning) as warns: warnings.warn("Hello there", category=CoverageWarning) assert_coverage_warnings(warns, "Hello there") def test_many_warnings(self) -> None: with pytest.warns(Warning) as warns: warnings.warn("The first", category=CoverageWarning) warnings.warn("The second", category=CoverageWarning) warnings.warn("The third", category=CoverageWarning) assert_coverage_warnings(warns, "The first", "The second", "The third") def test_wrong_type(self) -> None: with pytest.warns(Warning) as warns: warnings.warn("Not ours", category=Warning) with pytest.raises(AssertionError): assert_coverage_warnings(warns, "Not ours") def test_wrong_message(self) -> None: with pytest.warns(Warning) as warns: warnings.warn("Goodbye", category=CoverageWarning) with pytest.raises(AssertionError): assert_coverage_warnings(warns, "Hello there") def test_wrong_number_too_many(self) -> None: with pytest.warns(Warning) as warns: warnings.warn("The first", category=CoverageWarning) warnings.warn("The second", category=CoverageWarning) with pytest.raises(AssertionError): assert_coverage_warnings(warns, "The first", "The second", "The third") def test_wrong_number_too_few(self) -> None: with pytest.warns(Warning) as warns: warnings.warn("The first", category=CoverageWarning) warnings.warn("The second", category=CoverageWarning) warnings.warn("The third", category=CoverageWarning) with pytest.raises(AssertionError): assert_coverage_warnings(warns, "The first", "The second") def test_regex_matches(self) -> None: with pytest.warns(Warning) as warns: warnings.warn("The first", category=CoverageWarning) assert_coverage_warnings(warns, re.compile("f?rst")) def test_regex_doesnt_match(self) -> None: with pytest.warns(Warning) as warns: warnings.warn("The first", category=CoverageWarning) with pytest.raises(AssertionError): assert_coverage_warnings(warns, re.compile("second")) def test_failing_proxy() -> None: class Arithmetic: """Sample class to test FailingProxy.""" # pylint: disable=missing-function-docstring def add(self, a, b): # type: ignore[no-untyped-def] return a + b def subtract(self, a, b): # type: ignore[no-untyped-def] return a - b proxy = FailingProxy(Arithmetic(), "add", [RuntimeError("First"), RuntimeError("Second")]) # add fails the first time with pytest.raises(RuntimeError, match="First"): proxy.add(1, 2) # subtract always works assert proxy.subtract(10, 3) == 7 # add fails the second time with pytest.raises(RuntimeError, match="Second"): proxy.add(3, 4) # then add starts working assert proxy.add(5, 6) == 11 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/test_venv.py0000644000175100001770000003254000000000000017664 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt """Tests about understanding how third-party code is installed.""" from __future__ import annotations import os import os.path import shutil from pathlib import Path from typing import Iterator, cast import pytest from coverage import env from tests import testenv from tests.coveragetest import CoverageTest, COVERAGE_INSTALL_ARGS from tests.helpers import change_dir, make_file from tests.helpers import re_lines, run_command def run_in_venv(cmd: str) -> str: r"""Run `cmd` in the virtualenv at `venv`. The first word of the command will be adjusted to run it from the venv/bin or venv\Scripts directory. Returns the text output of the command. """ words = cmd.split() if env.WINDOWS: words[0] = fr"venv\Scripts\{words[0]}.exe" else: words[0] = fr"venv/bin/{words[0]}" status, output = run_command(" ".join(words)) # Print the output so if it fails, we can tell what happened. print(output) assert status == 0 return output @pytest.fixture(scope="session", name="venv_world") def venv_world_fixture(tmp_path_factory: pytest.TempPathFactory) -> Path: """Create a virtualenv with a few test packages for VirtualenvTest to use. Returns the directory containing the "venv" virtualenv. """ venv_world = tmp_path_factory.mktemp("venv_world") with change_dir(venv_world): # Create a virtualenv. run_command("python -m venv venv") # A third-party package that installs a few different packages. make_file("third_pkg/third/__init__.py", """\ import fourth def third(x): return 3 * x """) # Use plugin2.py as third.plugin with open(os.path.join(os.path.dirname(__file__), "plugin2.py")) as f: make_file("third_pkg/third/plugin.py", f.read()) # A render function for plugin2 to use for dynamic file names. make_file("third_pkg/third/render.py", """\ def render(filename, linenum): return "HTML: {}@{}".format(filename, linenum) """) # Another package that third can use. make_file("third_pkg/fourth/__init__.py", """\ def fourth(x): return 4 * x """) # Some namespace packages. make_file("third_pkg/nspkg/fifth/__init__.py", """\ def fifth(x): return 5 * x """) # The setup.py to install everything. make_file("third_pkg/setup.py", """\ import setuptools setuptools.setup( name="third", packages=["third", "fourth", "nspkg.fifth"], ) """) # Some namespace packages. make_file("another_pkg/nspkg/sixth/__init__.py", """\ def sixth(x): return 6 * x """) make_file("another_pkg/setup.py", """\ import setuptools setuptools.setup( name="another", packages=["nspkg.sixth"], ) """) # Bug888 code. make_file("bug888/app/setup.py", """\ from setuptools import setup setup( name='testcov', packages=['testcov'], ) """) # https://packaging.python.org/en/latest/guides/packaging-namespace-packages/#pkgutil-style-namespace-packages make_file("bug888/app/testcov/__init__.py", """\ __path__ = __import__('pkgutil').extend_path(__path__, __name__) """) if env.PYVERSION < (3, 10): get_plugins = "entry_points['plugins']" else: get_plugins = "entry_points.select(group='plugins')" make_file("bug888/app/testcov/main.py", f"""\ import importlib.metadata entry_points = importlib.metadata.entry_points() for entry_point in {get_plugins}: entry_point.load()() """) make_file("bug888/plugin/setup.py", """\ from setuptools import setup setup( name='testcov-plugin', packages=['testcov'], entry_points={'plugins': ['testp = testcov.plugin:testp']}, ) """) # https://packaging.python.org/en/latest/guides/packaging-namespace-packages/#pkgutil-style-namespace-packages make_file("bug888/plugin/testcov/__init__.py", """\ __path__ = __import__('pkgutil').extend_path(__path__, __name__) """) make_file("bug888/plugin/testcov/plugin.py", """\ def testp(): print("Plugin here") """) # Install everything. run_in_venv( "python -m pip install " + "./third_pkg " + "-e ./another_pkg " + "-e ./bug888/app -e ./bug888/plugin " + COVERAGE_INSTALL_ARGS, ) shutil.rmtree("third_pkg") return venv_world @pytest.fixture(params=[ "coverage", "python -m coverage", ], name="coverage_command") def coverage_command_fixture(request: pytest.FixtureRequest) -> str: """Parametrized fixture to use multiple forms of "coverage" command.""" return cast(str, request.param) class VirtualenvTest(CoverageTest): """Tests of virtualenv considerations.""" expected_stdout = "33\n110\n198\n1.5\n" @pytest.fixture(autouse=True) def in_venv_world_fixture(self, venv_world: Path) -> Iterator[None]: """For running tests inside venv_world, and cleaning up made files.""" with change_dir(venv_world): self.make_file("myproduct.py", """\ import colorsys import third import nspkg.fifth import nspkg.sixth print(third.third(11)) print(nspkg.fifth.fifth(22)) print(nspkg.sixth.sixth(33)) print(sum(colorsys.rgb_to_hls(1, 0, 0))) """) self.del_environ("COVERAGE_TESTING") # To get realistic behavior self.set_environ("COVERAGE_DEBUG_FILE", "debug_out.txt") self.set_environ("COVERAGE_DEBUG", "trace") yield for fname in os.listdir("."): if fname not in {"venv", "another_pkg", "bug888"}: os.remove(fname) def get_trace_output(self) -> str: """Get the debug output of coverage.py""" with open("debug_out.txt") as f: return f.read() @pytest.mark.parametrize('install_source_in_venv', [True, False]) def test_third_party_venv_isnt_measured( self, coverage_command: str, install_source_in_venv: bool, ) -> None: if install_source_in_venv: make_file("setup.py", """\ import setuptools setuptools.setup( name="myproduct", py_modules = ["myproduct"], ) """) try: run_in_venv("python -m pip install .") finally: shutil.rmtree("build", ignore_errors=True) shutil.rmtree("myproduct.egg-info", ignore_errors=True) # Ensure that coverage doesn't run the non-installed module. os.remove('myproduct.py') out = run_in_venv(coverage_command + " run --source=.,myproduct -m myproduct") else: out = run_in_venv(coverage_command + " run --source=. myproduct.py") # In particular, this warning doesn't appear: # Already imported a file that will be measured: .../coverage/__main__.py assert out == self.expected_stdout # Check that our tracing was accurate. Files are mentioned because # --source refers to a file. debug_out = self.get_trace_output() assert re_lines( r"^Not tracing .*\bexecfile.py': inside --source, but is third-party", debug_out, ) assert re_lines(r"^Tracing .*\bmyproduct.py", debug_out) assert re_lines( r"^Not tracing .*\bcolorsys.py': (module 'colorsys' |)?falls outside the --source spec", debug_out, ) out = run_in_venv(coverage_command + " report") assert "myproduct.py" in out assert "third" not in out assert "coverage" not in out assert "colorsys" not in out def test_us_in_venv_isnt_measured(self, coverage_command: str) -> None: out = run_in_venv(coverage_command + " run --source=third myproduct.py") assert out == self.expected_stdout # Check that our tracing was accurate. Modules are mentioned because # --source refers to a module. debug_out = self.get_trace_output() assert re_lines( r"^Not tracing .*\bexecfile.py': " + "module 'coverage.execfile' falls outside the --source spec", debug_out, ) assert re_lines( r"^Not tracing .*\bmyproduct.py': module 'myproduct' falls outside the --source spec", debug_out, ) assert re_lines( r"^Not tracing .*\bcolorsys.py': module 'colorsys' falls outside the --source spec", debug_out, ) out = run_in_venv(coverage_command + " report") assert "myproduct.py" not in out assert "third" in out assert "coverage" not in out assert "colorsys" not in out def test_venv_isnt_measured(self, coverage_command: str) -> None: out = run_in_venv(coverage_command + " run myproduct.py") assert out == self.expected_stdout debug_out = self.get_trace_output() assert re_lines(r"^Not tracing .*\bexecfile.py': is part of coverage.py", debug_out) assert re_lines(r"^Tracing .*\bmyproduct.py", debug_out) assert re_lines(r"^Not tracing .*\bcolorsys.py': is in the stdlib", debug_out) out = run_in_venv(coverage_command + " report") assert "myproduct.py" in out assert "third" not in out assert "coverage" not in out assert "colorsys" not in out @pytest.mark.skipif(not testenv.C_TRACER, reason="No plugins with this core.") def test_venv_with_dynamic_plugin(self, coverage_command: str) -> None: # https://github.com/nedbat/coveragepy/issues/1150 # Django coverage plugin was incorrectly getting warnings: # "Already imported: ... django/template/blah.py" # It happened because coverage imported the plugin, which imported # Django, and then the Django files were reported as traceable. self.make_file(".coveragerc", "[run]\nplugins=third.plugin\n") self.make_file("myrender.py", """\ import third.render print(third.render.render("hello.html", 1723)) """) out = run_in_venv(coverage_command + " run --source=. myrender.py") # The output should not have this warning: # Already imported a file that will be measured: ...third/render.py (already-imported) assert out == "HTML: hello.html@1723\n" def test_installed_namespace_packages(self, coverage_command: str) -> None: # https://github.com/nedbat/coveragepy/issues/1231 # When namespace packages were installed, they were considered # third-party packages. Test that isn't still happening. out = run_in_venv(coverage_command + " run --source=nspkg myproduct.py") # In particular, this warning doesn't appear: # Already imported a file that will be measured: .../coverage/__main__.py assert out == self.expected_stdout # Check that our tracing was accurate. Files are mentioned because # --source refers to a file. debug_out = self.get_trace_output() assert re_lines( r"^Not tracing .*\bexecfile.py': " + "module 'coverage.execfile' falls outside the --source spec", debug_out, ) assert re_lines( r"^Not tracing .*\bmyproduct.py': module 'myproduct' falls outside the --source spec", debug_out, ) assert re_lines( r"^Not tracing .*\bcolorsys.py': module 'colorsys' falls outside the --source spec", debug_out, ) out = run_in_venv(coverage_command + " report") # Name Stmts Miss Cover # ------------------------------------------------------------------------------ # another_pkg/nspkg/sixth/__init__.py 2 0 100% # venv/lib/python3.9/site-packages/nspkg/fifth/__init__.py 2 0 100% # ------------------------------------------------------------------------------ # TOTAL 4 0 100% assert "myproduct.py" not in out assert "third" not in out assert "coverage" not in out assert "colorsys" not in out assert "fifth" in out assert "sixth" in out def test_bug_888(self, coverage_command: str) -> None: out = run_in_venv( coverage_command + " run --source=bug888/app,bug888/plugin bug888/app/testcov/main.py", ) # When the test fails, the output includes "Already imported a file that will be measured" assert out == "Plugin here\n" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/test_version.py0000644000175100001770000000344200000000000020372 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt """Tests of version.py.""" from __future__ import annotations import coverage from coverage.version import _make_url, _make_version from tests.coveragetest import CoverageTest class VersionTest(CoverageTest): """Tests of version.py""" run_in_temp_dir = False def test_version_info(self) -> None: # Make sure we didn't screw up the version_info tuple. assert isinstance(coverage.version_info, tuple) assert [type(d) for d in coverage.version_info] == [int, int, int, str, int] assert coverage.version_info[3] in {'alpha', 'beta', 'candidate', 'final'} def test_make_version(self) -> None: assert _make_version(4, 0, 0, 'alpha') == "4.0.0a0" assert _make_version(4, 0, 0, 'alpha', 1) == "4.0.0a1" assert _make_version(4, 0, 0, 'final') == "4.0.0" assert _make_version(4, 1, 0) == "4.1.0" assert _make_version(4, 1, 2, 'beta', 3) == "4.1.2b3" assert _make_version(4, 1, 2) == "4.1.2" assert _make_version(5, 10, 2, 'candidate', 7) == "5.10.2rc7" assert _make_version(5, 10, 2, 'candidate', 7, 3) == "5.10.2rc7.dev3" def test_make_url(self) -> None: expected = "https://coverage.readthedocs.io/en/4.1.2" assert _make_url(4, 1, 2, 'final') == expected expected = "https://coverage.readthedocs.io/en/4.1.2b3" assert _make_url(4, 1, 2, 'beta', 3) == expected expected = "https://coverage.readthedocs.io/en/4.1.2b3.dev17" assert _make_url(4, 1, 2, 'beta', 3, 17) == expected expected = "https://coverage.readthedocs.io/en/4.1.2.dev17" assert _make_url(4, 1, 2, 'final', 0, 17) == expected ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/test_xml.py0000644000175100001770000005276600000000000017522 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt """Tests for XML reports from coverage.py.""" from __future__ import annotations import os import os.path import re from typing import Any, Iterator from xml.etree import ElementTree import pytest import coverage from coverage import Coverage, env from coverage.exceptions import NoDataError from coverage.files import abs_file from coverage.misc import import_local_file from tests.coveragetest import CoverageTest from tests.goldtest import compare, gold_path from tests.helpers import assert_coverage_warnings, change_dir class XmlTestHelpers(CoverageTest): """Methods to use from XML tests.""" def run_doit(self) -> Coverage: """Construct a simple sub-package.""" self.make_file("sub/__init__.py") self.make_file("sub/doit.py", "print('doit!')") self.make_file("main.py", "import sub.doit") cov = coverage.Coverage(source=["."]) self.start_import_stop(cov, "main") return cov def make_tree(self, width: int, depth: int, curdir: str = ".") -> None: """Make a tree of packages. Makes `width` directories, named d0 .. d{width-1}. Each directory has __init__.py, and `width` files, named f0.py .. f{width-1}.py. Each directory also has `width` sub-directories, in the same fashion, until a depth of `depth` is reached. """ if depth == 0: return def here(p: str) -> str: """A path for `p` in our currently interesting directory.""" return os.path.join(curdir, p) for i in range(width): next_dir = here(f"d{i}") self.make_tree(width, depth-1, next_dir) if curdir != ".": self.make_file(here("__init__.py"), "") for i in range(width): filename = here(f"f{i}.py") self.make_file(filename, f"# {filename}\n") def assert_source( self, xmldom: ElementTree.Element | ElementTree.ElementTree, src: str, ) -> None: """Assert that the XML has a element with `src`.""" src = abs_file(src) elts = xmldom.findall(".//sources/source") assert any(elt.text == src for elt in elts) class XmlTestHelpersTest(XmlTestHelpers, CoverageTest): """Tests of methods in XmlTestHelpers.""" run_in_temp_dir = False def test_assert_source(self) -> None: dom = ElementTree.fromstring("""\ foo {cwd}something {cwd}another """.format(cwd=abs_file(".")+os.sep)) self.assert_source(dom, "something") self.assert_source(dom, "another") with pytest.raises(AssertionError): self.assert_source(dom, "hello") with pytest.raises(AssertionError): self.assert_source(dom, "foo") with pytest.raises(AssertionError): self.assert_source(dom, "thing") class XmlReportTest(XmlTestHelpers, CoverageTest): """Tests of the XML reports from coverage.py.""" def make_mycode_data(self) -> None: """Pretend that we ran mycode.py, so we can report on it.""" self.make_file("mycode.py", "print('hello')\n") self.make_data_file(lines={abs_file("mycode.py"): [1]}) def run_xml_report(self, **kwargs: Any) -> None: """Run xml_report()""" cov = coverage.Coverage() cov.load() cov.xml_report(**kwargs) def test_default_file_placement(self) -> None: self.make_mycode_data() self.run_xml_report() self.assert_exists("coverage.xml") assert self.stdout() == "" def test_argument_affects_xml_placement(self) -> None: self.make_mycode_data() cov = coverage.Coverage(messages=True) cov.load() cov.xml_report(outfile="put_it_there.xml") assert self.stdout() == "Wrote XML report to put_it_there.xml\n" self.assert_doesnt_exist("coverage.xml") self.assert_exists("put_it_there.xml") def test_output_directory_does_not_exist(self) -> None: self.make_mycode_data() self.run_xml_report(outfile="nonexistent/put_it_there.xml") self.assert_doesnt_exist("coverage.xml") self.assert_doesnt_exist("put_it_there.xml") self.assert_exists("nonexistent/put_it_there.xml") def test_config_affects_xml_placement(self) -> None: self.make_mycode_data() self.make_file(".coveragerc", "[xml]\noutput = xml.out\n") self.run_xml_report() self.assert_doesnt_exist("coverage.xml") self.assert_exists("xml.out") def test_no_data(self) -> None: # https://github.com/nedbat/coveragepy/issues/210 with pytest.raises(NoDataError, match="No data to report."): self.run_xml_report() self.assert_doesnt_exist("coverage.xml") self.assert_doesnt_exist(".coverage") def test_no_source(self) -> None: # Written while investigating a bug, might as well keep it. # https://github.com/nedbat/coveragepy/issues/208 self.make_file("innocuous.py", "a = 4") cov = coverage.Coverage() self.start_import_stop(cov, "innocuous") os.remove("innocuous.py") with pytest.warns(Warning) as warns: cov.xml_report(ignore_errors=True) assert_coverage_warnings( warns, re.compile(r"Couldn't parse '.*innocuous.py'. \(couldnt-parse\)"), ) self.assert_exists("coverage.xml") def test_filename_format_showing_everything(self) -> None: cov = self.run_doit() cov.xml_report() dom = ElementTree.parse("coverage.xml") elts = dom.findall(".//class[@name='doit.py']") assert len(elts) == 1 assert elts[0].get('filename') == "sub/doit.py" def test_filename_format_including_filename(self) -> None: cov = self.run_doit() cov.xml_report(["sub/doit.py"]) dom = ElementTree.parse("coverage.xml") elts = dom.findall(".//class[@name='doit.py']") assert len(elts) == 1 assert elts[0].get('filename') == "sub/doit.py" def test_filename_format_including_module(self) -> None: cov = self.run_doit() import sub.doit # pylint: disable=import-error cov.xml_report([sub.doit]) dom = ElementTree.parse("coverage.xml") elts = dom.findall(".//class[@name='doit.py']") assert len(elts) == 1 assert elts[0].get('filename') == "sub/doit.py" def test_reporting_on_nothing(self) -> None: # Used to raise a zero division error: # https://github.com/nedbat/coveragepy/issues/250 self.make_file("empty.py", "") cov = coverage.Coverage() empty = self.start_import_stop(cov, "empty") cov.xml_report([empty]) dom = ElementTree.parse("coverage.xml") elts = dom.findall(".//class[@name='empty.py']") assert len(elts) == 1 assert elts[0].get('filename') == "empty.py" assert elts[0].get('line-rate') == '1' def test_empty_file_is_100_not_0(self) -> None: # https://github.com/nedbat/coveragepy/issues/345 cov = self.run_doit() cov.xml_report() dom = ElementTree.parse("coverage.xml") elts = dom.findall(".//class[@name='__init__.py']") assert len(elts) == 1 assert elts[0].get('line-rate') == '1' def test_empty_file_is_skipped(self) -> None: cov = self.run_doit() cov.xml_report(skip_empty=True) dom = ElementTree.parse("coverage.xml") elts = dom.findall(".//class[@name='__init__.py']") assert len(elts) == 0 def test_curdir_source(self) -> None: # With no source= option, the XML report should explain that the source # is in the current directory. cov = self.run_doit() cov.xml_report() dom = ElementTree.parse("coverage.xml") self.assert_source(dom, ".") sources = dom.findall(".//source") assert len(sources) == 1 def test_deep_source(self) -> None: # When using source=, the XML report needs to mention those directories # in the elements. # https://github.com/nedbat/coveragepy/issues/439 self.make_file("src/main/foo.py", "a = 1") self.make_file("also/over/there/bar.py", "b = 2") cov = coverage.Coverage(source=["src/main", "also/over/there", "not/really"]) with cov.collect(): mod_foo = import_local_file("foo", "src/main/foo.py") mod_bar = import_local_file("bar", "also/over/there/bar.py") with pytest.warns(Warning) as warns: cov.xml_report([mod_foo, mod_bar]) assert_coverage_warnings( warns, "Module not/really was never imported. (module-not-imported)", ) dom = ElementTree.parse("coverage.xml") self.assert_source(dom, "src/main") self.assert_source(dom, "also/over/there") sources = dom.findall(".//source") assert len(sources) == 2 foo_class = dom.findall(".//class[@name='foo.py']") assert len(foo_class) == 1 assert foo_class[0].attrib == { 'branch-rate': '0', 'complexity': '0', 'filename': 'foo.py', 'line-rate': '1', 'name': 'foo.py', } bar_class = dom.findall(".//class[@name='bar.py']") assert len(bar_class) == 1 assert bar_class[0].attrib == { 'branch-rate': '0', 'complexity': '0', 'filename': 'bar.py', 'line-rate': '1', 'name': 'bar.py', } def test_nonascii_directory(self) -> None: # https://github.com/nedbat/coveragepy/issues/573 self.make_file("ํ…Œ์ŠคํŠธ/program.py", "a = 1") with change_dir("ํ…Œ์ŠคํŠธ"): cov = coverage.Coverage() self.start_import_stop(cov, "program") cov.xml_report() def test_accented_dot_py(self) -> None: # Make a file with a non-ascii character in the filename. self.make_file("h\xe2t.py", "print('accented')") self.make_data_file(lines={abs_file("h\xe2t.py"): [1]}) cov = coverage.Coverage() cov.load() cov.xml_report() # The XML report is always UTF8-encoded. with open("coverage.xml", "rb") as xmlf: xml = xmlf.read() assert ' filename="h\xe2t.py"'.encode() in xml assert ' name="h\xe2t.py"'.encode() in xml def test_accented_directory(self) -> None: # Make a file with a non-ascii character in the directory name. self.make_file("\xe2/accented.py", "print('accented')") self.make_data_file(lines={abs_file("\xe2/accented.py"): [1]}) # The XML report is always UTF8-encoded. cov = coverage.Coverage() cov.load() cov.xml_report() with open("coverage.xml", "rb") as xmlf: xml = xmlf.read() assert b' filename="\xc3\xa2/accented.py"' in xml assert b' name="accented.py"' in xml dom = ElementTree.parse("coverage.xml") elts = dom.findall(".//package[@name='รข']") assert len(elts) == 1 assert elts[0].attrib == { "branch-rate": "0", "complexity": "0", "line-rate": "1", "name": "รข", } def test_no_duplicate_packages(self) -> None: self.make_file( "namespace/package/__init__.py", "from . import sample; from . import test; from .subpackage import test", ) self.make_file("namespace/package/sample.py", "print('package.sample')") self.make_file("namespace/package/test.py", "print('package.test')") self.make_file("namespace/package/subpackage/test.py", "print('package.subpackage.test')") # no source path passed to coverage! # problem occurs when they are dynamically generated during xml report cov = coverage.Coverage() with cov.collect(): import_local_file("foo", "namespace/package/__init__.py") cov.xml_report() dom = ElementTree.parse("coverage.xml") # only two packages should be present packages = dom.findall(".//package") assert len(packages) == 2 # one of them is namespace.package named_package = dom.findall(".//package[@name='namespace.package']") assert len(named_package) == 1 # the other one namespace.package.subpackage named_sub_package = dom.findall(".//package[@name='namespace.package.subpackage']") assert len(named_sub_package) == 1 def test_bug_1709(self) -> None: # https://github.com/nedbat/coveragepy/issues/1709 self.make_file("main.py", "import x1y, x01y, x001y") self.make_file("x1y.py", "print('x1y')") self.make_file("x01y.py", "print('x01y')") self.make_file("x001y.py", "print('x001y')") cov = coverage.Coverage() self.start_import_stop(cov, "main") assert self.stdout() == "x1y\nx01y\nx001y\n" # This used to raise: # TypeError: '<' not supported between instances of 'Element' and 'Element' cov.xml_report() def unbackslash(v: Any) -> Any: """Find strings in `v`, and replace backslashes with slashes throughout.""" if isinstance(v, (tuple, list)): return [unbackslash(vv) for vv in v] elif isinstance(v, dict): return {k: unbackslash(vv) for k, vv in v.items()} else: assert isinstance(v, str) return v.replace("\\", "/") class XmlPackageStructureTest(XmlTestHelpers, CoverageTest): """Tests about the package structure reported in the coverage.xml file.""" def package_and_class_tags(self, cov: Coverage) -> Iterator[tuple[str, dict[str, Any]]]: """Run an XML report on `cov`, and get the package and class tags.""" cov.xml_report() dom = ElementTree.parse("coverage.xml") for node in dom.iter(): if node.tag in ('package', 'class'): yield (node.tag, {a:v for a,v in node.items() if a in ('name', 'filename')}) def assert_package_and_class_tags(self, cov: Coverage, result: Any) -> None: """Check the XML package and class tags from `cov` match `result`.""" assert unbackslash(list(self.package_and_class_tags(cov))) == unbackslash(result) def test_package_names(self) -> None: self.make_tree(width=1, depth=3) self.make_file("main.py", """\ from d0.d0 import f0 """) cov = coverage.Coverage(source=["."]) self.start_import_stop(cov, "main") self.assert_package_and_class_tags(cov, [ ('package', {'name': "."}), ('class', {'filename': "main.py", 'name': "main.py"}), ('package', {'name': "d0"}), ('class', {'filename': "d0/__init__.py", 'name': "__init__.py"}), ('class', {'filename': "d0/f0.py", 'name': "f0.py"}), ('package', {'name': "d0.d0"}), ('class', {'filename': "d0/d0/__init__.py", 'name': "__init__.py"}), ('class', {'filename': "d0/d0/f0.py", 'name': "f0.py"}), ]) def test_package_depth_1(self) -> None: self.make_tree(width=1, depth=4) self.make_file("main.py", """\ from d0.d0 import f0 """) cov = coverage.Coverage(source=["."]) self.start_import_stop(cov, "main") cov.set_option("xml:package_depth", 1) self.assert_package_and_class_tags(cov, [ ('package', {'name': "."}), ('class', {'filename': "main.py", 'name': "main.py"}), ('package', {'name': "d0"}), ('class', {'filename': "d0/__init__.py", 'name': "__init__.py"}), ('class', {'filename': "d0/d0/__init__.py", 'name': "d0/__init__.py"}), ('class', {'filename': "d0/d0/d0/__init__.py", 'name': "d0/d0/__init__.py"}), ('class', {'filename': "d0/d0/d0/f0.py", 'name': "d0/d0/f0.py"}), ('class', {'filename': "d0/d0/f0.py", 'name': "d0/f0.py"}), ('class', {'filename': "d0/f0.py", 'name': "f0.py"}), ]) def test_package_depth_2(self) -> None: self.make_tree(width=1, depth=4) self.make_file("main.py", """\ from d0.d0 import f0 """) cov = coverage.Coverage(source=["."]) self.start_import_stop(cov, "main") cov.set_option("xml:package_depth", 2) self.assert_package_and_class_tags(cov, [ ('package', {'name': "."}), ('class', {'filename': "main.py", 'name': "main.py"}), ('package', {'name': "d0"}), ('class', {'filename': "d0/__init__.py", 'name': "__init__.py"}), ('class', {'filename': "d0/f0.py", 'name': "f0.py"}), ('package', {'name': "d0.d0"}), ('class', {'filename': "d0/d0/__init__.py", 'name': "__init__.py"}), ('class', {'filename': "d0/d0/d0/__init__.py", 'name': "d0/__init__.py"}), ('class', {'filename': "d0/d0/d0/f0.py", 'name': "d0/f0.py"}), ('class', {'filename': "d0/d0/f0.py", 'name': "f0.py"}), ]) def test_package_depth_3(self) -> None: self.make_tree(width=1, depth=4) self.make_file("main.py", """\ from d0.d0 import f0 """) cov = coverage.Coverage(source=["."]) self.start_import_stop(cov, "main") cov.set_option("xml:package_depth", 3) self.assert_package_and_class_tags(cov, [ ('package', {'name': "."}), ('class', {'filename': "main.py", 'name': "main.py"}), ('package', {'name': "d0"}), ('class', {'filename': "d0/__init__.py", 'name': "__init__.py"}), ('class', {'filename': "d0/f0.py", 'name': "f0.py"}), ('package', {'name': "d0.d0"}), ('class', {'filename': "d0/d0/__init__.py", 'name': "__init__.py"}), ('class', {'filename': "d0/d0/f0.py", 'name': "f0.py"}), ('package', {'name': "d0.d0.d0"}), ('class', {'filename': "d0/d0/d0/__init__.py", 'name': "__init__.py"}), ('class', {'filename': "d0/d0/d0/f0.py", 'name': "f0.py"}), ]) def test_source_prefix(self) -> None: # https://github.com/nedbat/coveragepy/issues/465 # https://github.com/nedbat/coveragepy/issues/526 self.make_file("src/mod.py", "print(17)") cov = coverage.Coverage(source=["src"]) self.start_import_stop(cov, "mod", modfile="src/mod.py") self.assert_package_and_class_tags(cov, [ ('package', {'name': "."}), ('class', {'filename': "mod.py", 'name': "mod.py"}), ]) dom = ElementTree.parse("coverage.xml") self.assert_source(dom, "src") @pytest.mark.parametrize("trail", ["", "/", "\\"]) def test_relative_source(self, trail: str) -> None: if trail == "\\" and not env.WINDOWS: pytest.skip("trailing backslash is only for Windows") self.make_file("src/mod.py", "print(17)") cov = coverage.Coverage(source=[f"src{trail}"]) cov.set_option("run:relative_files", True) self.start_import_stop(cov, "mod", modfile="src/mod.py") cov.xml_report() dom = ElementTree.parse("coverage.xml") elts = dom.findall(".//sources/source") assert [elt.text for elt in elts] == ["src"] def compare_xml(expected: str, actual: str, actual_extra: bool = False) -> None: """Specialized compare function for our XML files.""" source_path = coverage.files.relative_directory().rstrip(r"\/") scrubs=[ (r' timestamp="\d+"', ' timestamp="TIMESTAMP"'), (r' version="[-.\w]+"', ' version="VERSION"'), (r'\s*.*?\s*', '%s' % re.escape(source_path)), (r'/coverage\.readthedocs\.io/?[-.\w/]*', '/coverage.readthedocs.io/VER'), ] compare(expected, actual, scrubs=scrubs, actual_extra=actual_extra) class XmlGoldTest(CoverageTest): """Tests of XML reporting that use gold files.""" def test_a_xml_1(self) -> None: self.make_file("a.py", """\ if 1 < 2: # Needed a < to look at HTML entities. a = 3 else: a = 4 """) cov = coverage.Coverage() a = self.start_import_stop(cov, "a") cov.xml_report(a, outfile="coverage.xml") compare_xml(gold_path("xml/x_xml"), ".", actual_extra=True) def test_a_xml_2(self) -> None: self.make_file("a.py", """\ if 1 < 2: # Needed a < to look at HTML entities. a = 3 else: a = 4 """) self.make_file("run_a_xml_2.ini", """\ # Put all the XML output in xml_2 [xml] output = xml_2/coverage.xml """) cov = coverage.Coverage(config_file="run_a_xml_2.ini") a = self.start_import_stop(cov, "a") cov.xml_report(a) compare_xml(gold_path("xml/x_xml"), "xml_2") def test_y_xml_branch(self) -> None: self.make_file("y.py", """\ def choice(x): if x < 2: return 3 else: return 4 assert choice(1) == 3 """) cov = coverage.Coverage(branch=True) y = self.start_import_stop(cov, "y") cov.xml_report(y, outfile="y_xml_branch/coverage.xml") compare_xml(gold_path("xml/y_xml_branch"), "y_xml_branch") ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/testenv.py0000644000175100001770000000147600000000000017343 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt """Environment settings affecting tests.""" from __future__ import annotations import os # Are we testing the C-implemented trace function? C_TRACER = os.getenv("COVERAGE_CORE", "ctrace") == "ctrace" # Are we testing the Python-implemented trace function? PY_TRACER = os.getenv("COVERAGE_CORE", "ctrace") == "pytrace" # Are we testing the sys.monitoring implementation? SYS_MON = os.getenv("COVERAGE_CORE", "ctrace") == "sysmon" # Are we using a settrace function as a core? SETTRACE_CORE = C_TRACER or PY_TRACER # Are plugins supported during these tests? PLUGINS = C_TRACER # Are dynamic contexts supported during these tests? DYN_CONTEXTS = C_TRACER or PY_TRACER ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1710442639.0978148 coverage-7.4.4/tests/zipsrc/0000755000175100001770000000000000000000000016603 5ustar00runnerdocker00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1710442639.1658149 coverage-7.4.4/tests/zipsrc/zip1/0000755000175100001770000000000000000000000017466 5ustar00runnerdocker00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/zipsrc/zip1/__init__.py0000644000175100001770000000000000000000000021565 0ustar00runnerdocker00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tests/zipsrc/zip1/zip1.py0000644000175100001770000000032600000000000020724 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt # My zip file! lighter = "Zippo" says = "coo-coo cachoo" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1710442632.0 coverage-7.4.4/tox.ini0000644000175100001770000001010600000000000015440 0ustar00runnerdocker00000000000000# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt [tox] # When changing this list, be sure to check the [gh] list below. # PYVERSIONS envlist = py3{8,9,10,11,12,13}, pypy3, doc, lint, mypy skip_missing_interpreters = {env:COVERAGE_SKIP_MISSING_INTERPRETERS:True} toxworkdir = {env:TOXWORKDIR:.tox} [testenv] usedevelop = True download = True extras = toml # PYVERSIONS deps = -r requirements/pip.pip -r requirements/pytest.pip py3{8,9,10,11}: -r requirements/light-threads.pip # Windows can't update the pip version with pip running, so use Python # to install things. install_command = python -m pip install -U {opts} {packages} passenv = * setenv = pypy3{,8,9,10}: COVERAGE_TEST_CORES=pytrace # For some tests, we need .pyc files written in the current directory, # so override any local setting. PYTHONPYCACHEPREFIX= # If we ever need a stronger way to suppress warnings: #PYTHONWARNINGS=ignore:removed in Python 3.14; use ast.Constant:DeprecationWarning # Disable CPython's color output PYTHON_COLORS=0 # $set_env.py: COVERAGE_PIP_ARGS - Extra arguments for `pip install` # `--no-build-isolation` will let tox work with no network. commands = # Create tests/zipmods.zip python igor.py zip_mods # Build the C extension and test with the CTracer python setup.py --quiet build_ext --inplace python -m pip install {env:COVERAGE_PIP_ARGS} -q -e . python igor.py test_with_core ctrace {posargs} py3{12,13},anypy: python igor.py test_with_core sysmon {posargs} # Remove the C extension so that we can test the PyTracer python igor.py remove_extension # Test with the PyTracer python igor.py test_with_core pytrace {posargs} [testenv:anypy] # $set_env.py: COVERAGE_ANYPY - The custom Python for "tox -e anypy" # For running against my own builds of CPython, or any other specific Python. basepython = {env:COVERAGE_ANYPY} [testenv:doc] # One of the PYVERSIONS, that's currently supported by Sphinx. Make sure it # matches the `python:version:` in the .readthedocs.yml file, and the # python-version in the `doc` job in the .github/workflows/quality.yml workflow. basepython = python3.11 # Build the docs so we know if they are successful. We build twice: once with # -q to get all warnings, and once with -QW to get a success/fail status # return. deps = -r doc/requirements.pip allowlist_externals = make commands = # If this command fails, see the comment at the top of doc/cmd.rst python -m cogapp -cP --check --verbosity=1 doc/*.rst doc8 -q --ignore-path 'doc/_*' doc CHANGES.rst README.rst sphinx-build -b html -aEnqW doc doc/_build/html rst2html.py --strict README.rst doc/_build/trash - sphinx-build -b html -b linkcheck -aEnq doc doc/_build/html - sphinx-build -b html -b linkcheck -aEnQW doc doc/_build/html [testenv:lint] # Minimum of PYVERSIONS basepython = python3.8 deps = -r requirements/dev.pip setenv = {[testenv]setenv} LINTABLE=coverage tests doc ci igor.py setup.py __main__.py commands = python -m tabnanny {env:LINTABLE} # If this command fails, see the comment at the top of doc/cmd.rst python -m cogapp -cP --check --verbosity=1 doc/*.rst python -m cogapp -cP --check --verbosity=1 .github/workflows/*.yml python -m pylint --notes= --ignore-paths 'doc/_build/.*' {env:LINTABLE} check-manifest --ignore 'doc/sample_html/*,.treerc' # If 'build -q' becomes a thing (https://github.com/pypa/build/issues/188), # this can be simplified: python igor.py quietly "python -m build" twine check dist/* [testenv:mypy] basepython = python3.8 deps = -r requirements/mypy.pip setenv = {[testenv]setenv} TYPEABLE=coverage tests commands = # PYVERSIONS mypy --python-version=3.8 --exclude=sysmon {env:TYPEABLE} mypy --python-version=3.12 {env:TYPEABLE} [gh] # https://pypi.org/project/tox-gh/ # PYVERSIONS python = 3.8 = py38 3.9 = py39 3.10 = py310 3.11 = py311 3.12 = py312 3.13 = py313 pypy-3 = pypy3